From a2770e98a6b53e6ec5d502c1e2228078ee149b3b Mon Sep 17 00:00:00 2001 From: Bjorn Munch Date: Mon, 2 May 2016 09:26:00 +0200 Subject: Raise version number after cloning 5.5.50 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 58a6b369de9..db9d497c141 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=5 MYSQL_VERSION_MINOR=5 -MYSQL_VERSION_PATCH=50 +MYSQL_VERSION_PATCH=51 MYSQL_VERSION_EXTRA= -- cgit v1.2.1 From 818b3a91231119663a95b854ab1e0e2d7a2d3feb Mon Sep 17 00:00:00 2001 From: Sujatha Sivakumar Date: Wed, 4 May 2016 14:06:45 +0530 Subject: Bug#12818255: READ-ONLY OPTION DOES NOT ALLOW INSERTS/UPDATES ON TEMPORARY TABLES Bug#14294223: CHANGES NOT ALLOWED TO TEMPORARY TABLES ON READ-ONLY SERVERS Problem: ======== Running 5.5.14 in read only we can create temporary tables but can not insert or update records in the table. When we try we get Error 1290 : The MySQL server is running with the --read-only option so it cannot execute this statement. Analysis: ========= This bug is very specific to binlog being enabled and binlog-format being stmt/mixed. Standalone server without binlog enabled or with row based binlog-mode works fine. How standalone server and row based replication work: ===================================================== Standalone server and row based replication mark the transactions as read_write only when they are modifying non temporary tables as part of their current transaction. Because of this when code enters commit phase it checks if a transaction is read_write or not. If the transaction is read_write and global read only mode is enabled those transaction will fail with 'server is read only mode' error. In the case of statement based mode at the time of writing to binary log a binlog handler is created and it is always marked as read_write. In case of temporary tables even though the engine did not mark the transaction as read_write but the new transaction that is started by binlog handler is considered as read_write. Hence in this case when code enters commit phase it finds one handler which has a read_write transaction even when we are modifying temporary table. This causes the server to throw an error when global read-only mode is enabled. Fix: ==== At the time of commit in "ha_commit_trans" if a read_write transaction is found, we should check if this transaction is coming from a handler other than binlog_handler. This will ensure that there is a genuine read_write transaction being sent by the engine apart from binlog_handler and only then it should be blocked. --- .../r/binlog_dmls_on_tmp_tables_readonly.result | 58 ++++++++++++++ .../t/binlog_dmls_on_tmp_tables_readonly.test | 90 ++++++++++++++++++++++ sql/handler.cc | 2 +- sql/log.cc | 8 +- sql/log.h | 2 +- sql/log_event.cc | 3 +- 6 files changed, 155 insertions(+), 8 deletions(-) create mode 100644 mysql-test/suite/binlog/r/binlog_dmls_on_tmp_tables_readonly.result create mode 100644 mysql-test/suite/binlog/t/binlog_dmls_on_tmp_tables_readonly.test diff --git a/mysql-test/suite/binlog/r/binlog_dmls_on_tmp_tables_readonly.result b/mysql-test/suite/binlog/r/binlog_dmls_on_tmp_tables_readonly.result new file mode 100644 index 00000000000..1dfac08e762 --- /dev/null +++ b/mysql-test/suite/binlog/r/binlog_dmls_on_tmp_tables_readonly.result @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS t1 ; +# READ_ONLY does nothing to SUPER users +# so we use a non-SUPER one: +GRANT CREATE, SELECT, DROP ON *.* TO test@localhost; +connect con1,localhost,test,,test; +connection default; +SET GLOBAL READ_ONLY=1; +connection con1; +CREATE TEMPORARY TABLE t1 (a INT) ENGINE=INNODB; +# Test INSERTS with autocommit being off and on. +BEGIN; +INSERT INTO t1 VALUES (10); +COMMIT; +INSERT INTO t1 VALUES (20); +# Test UPDATES with autocommit being off and on. +BEGIN; +UPDATE t1 SET a=30 WHERE a=10; +COMMIT; +UPDATE t1 SET a=40 WHERE a=20; +connection default; +SET GLOBAL READ_ONLY=0; +# Test scenario where global read_only is enabled in the middle of transaction. +# Test INSERT operations on temporary tables, INSERTs should be successful even +# when global read_only is enabled. +connection con1; +BEGIN; +INSERT INTO t1 VALUES(50); +connection default; +SET GLOBAL READ_ONLY=1; +connection con1; +SELECT @@GLOBAL.READ_ONLY; +@@GLOBAL.READ_ONLY +1 +COMMIT; +connection default; +SET GLOBAL READ_ONLY=0; +# Test UPDATE operations on temporary tables, UPDATEs should be successful even +# when global read_only is enabled. +connection con1; +BEGIN; +UPDATE t1 SET a=60 WHERE a=50; +connection default; +SET GLOBAL READ_ONLY=1; +connection con1; +SELECT @@GLOBAL.READ_ONLY; +@@GLOBAL.READ_ONLY +1 +COMMIT; +SELECT * FROM t1; +a +30 +40 +60 +# Clean up +connection default; +SET GLOBAL READ_ONLY=0; +disconnect con1; +DROP USER test@localhost; diff --git a/mysql-test/suite/binlog/t/binlog_dmls_on_tmp_tables_readonly.test b/mysql-test/suite/binlog/t/binlog_dmls_on_tmp_tables_readonly.test new file mode 100644 index 00000000000..30a6471bf61 --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_dmls_on_tmp_tables_readonly.test @@ -0,0 +1,90 @@ +# ==== Purpose ==== +# +# Check that DMLs are allowed on temporary tables, when server is in read only +# mode and binary log is enabled with binlog-format being stmt/mixed mode. +# +# ==== Implementation ==== +# +# Start the server with binary log being enabled. Mark the server as read only. +# Create a non-SUPER user and let the user to create a temporary table and +# perform DML operations on that temporary table. DMLs should not be blocked +# with a 'server read-only mode' error. +# +# ==== References ==== +# +# Bug#12818255: READ-ONLY OPTION DOES NOT ALLOW INSERTS/UPDATES ON TEMPORARY +# TABLES +# Bug#14294223: CHANGES NOT ALLOWED TO TEMPORARY TABLES ON READ-ONLY SERVERS +############################################################################### +--source include/have_log_bin.inc +--disable_warnings +DROP TABLE IF EXISTS t1 ; +--enable_warnings + +--enable_connect_log +--echo # READ_ONLY does nothing to SUPER users +--echo # so we use a non-SUPER one: +GRANT CREATE, SELECT, DROP ON *.* TO test@localhost; + +connect (con1,localhost,test,,test); + +connection default; +SET GLOBAL READ_ONLY=1; + +connection con1; +CREATE TEMPORARY TABLE t1 (a INT) ENGINE=INNODB; + +--echo # Test INSERTS with autocommit being off and on. +BEGIN; +INSERT INTO t1 VALUES (10); +COMMIT; +INSERT INTO t1 VALUES (20); + +--echo # Test UPDATES with autocommit being off and on. +BEGIN; +UPDATE t1 SET a=30 WHERE a=10; +COMMIT; +UPDATE t1 SET a=40 WHERE a=20; + +connection default; +SET GLOBAL READ_ONLY=0; + +--echo # Test scenario where global read_only is enabled in the middle of transaction. +--echo # Test INSERT operations on temporary tables, INSERTs should be successful even +--echo # when global read_only is enabled. +connection con1; +BEGIN; +INSERT INTO t1 VALUES(50); + +connection default; +SET GLOBAL READ_ONLY=1; + +connection con1; +SELECT @@GLOBAL.READ_ONLY; +COMMIT; + +connection default; +SET GLOBAL READ_ONLY=0; + +--echo # Test UPDATE operations on temporary tables, UPDATEs should be successful even +--echo # when global read_only is enabled. +connection con1; +BEGIN; +UPDATE t1 SET a=60 WHERE a=50; + +connection default; +SET GLOBAL READ_ONLY=1; + +connection con1; +SELECT @@GLOBAL.READ_ONLY; +COMMIT; + +SELECT * FROM t1; + +--echo # Clean up +connection default; +SET GLOBAL READ_ONLY=0; + +disconnect con1; +DROP USER test@localhost; +--disable_connect_log diff --git a/sql/handler.cc b/sql/handler.cc index 9d57cba73dc..79cf7ac2fd9 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1279,7 +1279,7 @@ int ha_commit_trans(THD *thd, bool all) DEBUG_SYNC(thd, "ha_commit_trans_after_acquire_commit_lock"); } - if (rw_trans && + if (rw_trans && stmt_has_updated_trans_table(ha_info) && opt_readonly && !(thd->security_ctx->master_access & SUPER_ACL) && !thd->slave_thread) diff --git a/sql/log.cc b/sql/log.cc index a7f05905514..e0ba93b0959 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -4562,17 +4562,15 @@ trans_has_updated_trans_table(const THD* thd) This function checks if a transactional table was updated by the current statement. - @param thd The client thread that executed the current statement. + @param ha_list Registered storage engine handler list. @return @c true if a transactional table was updated, @c false otherwise. */ bool -stmt_has_updated_trans_table(const THD *thd) +stmt_has_updated_trans_table(Ha_trx_info* ha_list) { Ha_trx_info *ha_info; - - for (ha_info= thd->transaction.stmt.ha_list; ha_info; - ha_info= ha_info->next()) + for (ha_info= ha_list; ha_info; ha_info= ha_info->next()) { if (ha_info->is_trx_read_write() && ha_info->ht() != binlog_hton) return (TRUE); diff --git a/sql/log.h b/sql/log.h index 7d1c3161ac2..dd09cb41026 100644 --- a/sql/log.h +++ b/sql/log.h @@ -25,7 +25,7 @@ class Master_info; class Format_description_log_event; bool trans_has_updated_trans_table(const THD* thd); -bool stmt_has_updated_trans_table(const THD *thd); +bool stmt_has_updated_trans_table(Ha_trx_info* ha_list); bool use_trans_cache(const THD* thd, bool is_transactional); bool ending_trans(THD* thd, const bool all); bool ending_single_stmt_trans(THD* thd, const bool all); diff --git a/sql/log_event.cc b/sql/log_event.cc index 702cf1d575a..5dbeb1eb4b9 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -2637,7 +2637,8 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, { cache_type= Log_event::EVENT_NO_CACHE; } - else if (using_trans || trx_cache || stmt_has_updated_trans_table(thd) || + else if (using_trans || trx_cache || + stmt_has_updated_trans_table(thd->transaction.stmt.ha_list) || thd->lex->is_mixed_stmt_unsafe(thd->in_multi_stmt_transaction_mode(), thd->variables.binlog_direct_non_trans_update, trans_has_updated_trans_table(thd), -- cgit v1.2.1 From 93845e1718e4460e829a3e942bf5202e6a733db2 Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Sun, 8 May 2016 21:19:51 +0300 Subject: Initial commit for mdev-9864 containing only test files --- mysql-test/r/cte_recursive.result | 289 ++++++++++++++++++++++++++++++++++++++ mysql-test/t/cte_recursive.test | 241 +++++++++++++++++++++++++++++++ 2 files changed, 530 insertions(+) create mode 100644 mysql-test/r/cte_recursive.result create mode 100644 mysql-test/t/cte_recursive.test diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result new file mode 100644 index 00000000000..7408bc56e63 --- /dev/null +++ b/mysql-test/r/cte_recursive.result @@ -0,0 +1,289 @@ +create table t1 (a int, b varchar(32)); +insert into t1 values +(4,'aaaa' ), (7,'bb'), (1,'ccc'), (4,'dd'); +insert into t1 values +(3,'eee'), (7,'bb'), (1,'fff'), (4,'ggg'); +with recursive +a1(a,b) as +(select * from t1 where t1.a>3 +union +select * from b1 where b1.a >3 +union +select * from c1 where c1.a>3), +b1(a,b) as +(select * from a1 where a1.b > 'ccc' +union +select * from c1 where c1.b > 'ddd'), +c1(a,b) as +(select * from a1 where a1.a<6 and a1.b< 'zz' +union +select * from b1 where b1.b > 'auu') +select * from c1; +ERROR HY000: No anchors for recursive WITH element 'b1' +drop table t1; +create table folks(id int, name char(32), dob date, father int, mother int); +insert into folks values +(100, 'Vasya', '2000-01-01', 20, 30), +(20, 'Dad', '1970-02-02', 10, 9), +(30, 'Mom', '1975-03-03', 8, 7), +(10, 'Grandpa Bill', '1940-04-05', null, null), +(9, 'Grandma Ann', '1941-10-15', null, null), +(25, 'Uncle Jim', '1968-11-18', 8, 7), +(98, 'Sister Amy', '2001-06-20', 20, 30), +(8, 'Grandma Sally', '1943-08-23', 5, 6), +(6, 'Grandgrandma Martha', '1923-05-17', null, null), +(67, 'Cousin Eddie', '1992-02-28', 25, 27), +(27, 'Auntie Melinda', '1971-03-29', null, null); +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.id, p.name, p.dob, p.father, p.mother +from folks as p, ancestors AS a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestors +as +( +select p.* +from folks as p, ancestors AS a +where p.id = a.father or p.id = a.mother +union +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestors +as +( +select * +from folks +where name = 'Cousin Eddie' + union +select p.* +from folks as p, ancestors as a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id name dob father mother +67 Cousin Eddie 1992-02-28 25 27 +25 Uncle Jim 1968-11-18 8 7 +27 Auntie Melinda 1971-03-29 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' or name='Sister Amy' + union +select p.* +from folks as p, ancestors as a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +98 Sister Amy 2001-06-20 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +prev_gen +as +( +select folks.* +from folks, prev_gen +where folks.id=prev_gen.father or folks.id=prev_gen.mother +union +select * +from folks +where name='Vasya' +), +ancestors +as +( +select * +from folks +where name='Vasya' + union +select * +from ancestors +union +select * +from prev_gen +) +select ancestors.name, ancestors.dob from ancestors; +name dob +Vasya 2000-01-01 +Dad 1970-02-02 +Mom 1975-03-03 +Grandpa Bill 1940-04-05 +Grandma Ann 1941-10-15 +Grandma Sally 1943-08-23 +Grandgrandma Martha 1923-05-17 +with recursive +descendants +as +( +select * +from folks +where name = 'Grandpa Bill' + union +select folks.* +from folks, descendants as d +where d.id=folks.father or d.id=folks.mother +) +select * from descendants; +id name dob father mother +10 Grandpa Bill 1940-04-05 NULL NULL +20 Dad 1970-02-02 10 9 +100 Vasya 2000-01-01 20 30 +98 Sister Amy 2001-06-20 20 30 +with recursive +descendants +as +( +select * +from folks +where name = 'Grandma Sally' + union +select folks.* +from folks, descendants as d +where d.id=folks.father or d.id=folks.mother +) +select * from descendants; +id name dob father mother +8 Grandma Sally 1943-08-23 5 6 +30 Mom 1975-03-03 8 7 +25 Uncle Jim 1968-11-18 8 7 +100 Vasya 2000-01-01 20 30 +98 Sister Amy 2001-06-20 20 30 +67 Cousin Eddie 1992-02-28 25 27 +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.* +from folks as p, ancestors AS a +where p.id = a.father OR p.id = a.mother +) +select * +from ancestors t1, ancestors t2 +where exists (select * from ancestors a +where a.father=t1.id AND a.mother=t2.id); +id name dob father mother id name dob father mother +20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL +with +ancestor_couples(husband, h_dob, wife, w_dob) +as +( +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' + union +select p.* +from folks as p, ancestors AS a +where p.id = a.father OR p.id = a.mother +) +select t1.name, t1.dob, t2.name, t2.dob +from ancestors t1, ancestors t2 +where exists (select * from ancestors a +where a.father=t1.id AND a.mother=t2.id) +) +select * from ancestor_couples; +husband h_dob wife w_dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.* +from folks as p, ancestors AS a +where p.id = a.father +union +select p.* +from folks as p, ancestors AS a +where p.id = a.mother +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +9 Grandma Ann 1941-10-15 NULL NULL +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, +w_id, w_name, w_dob, w_father, w_mother) +as +( +select h.*, w.* +from folks h, folks w, coupled_ancestors a +where a.father = h.id AND a.mother = w.id +union +select h.*, w.* +from folks v, folks h, folks w +where v.name = 'Vasya' and +(v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select h_id, h_name, h_dob, h_father, h_mother +from ancestor_couples +union +select w_id, w_name, w_dob, w_father, w_mother +from ancestor_couples +) +select h_name, h_dob, w_name, w_dob +from ancestor_couples; +h_name h_dob w_name w_dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +drop table folks; diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test new file mode 100644 index 00000000000..34eee6d3bf2 --- /dev/null +++ b/mysql-test/t/cte_recursive.test @@ -0,0 +1,241 @@ +create table t1 (a int, b varchar(32)); +insert into t1 values +(4,'aaaa' ), (7,'bb'), (1,'ccc'), (4,'dd'); +insert into t1 values +(3,'eee'), (7,'bb'), (1,'fff'), (4,'ggg'); + +--ERROR 1984 +with recursive +a1(a,b) as +(select * from t1 where t1.a>3 +union +select * from b1 where b1.a >3 +union +select * from c1 where c1.a>3), +b1(a,b) as +(select * from a1 where a1.b > 'ccc' +union +select * from c1 where c1.b > 'ddd'), +c1(a,b) as +(select * from a1 where a1.a<6 and a1.b< 'zz' +union +select * from b1 where b1.b > 'auu') +select * from c1; + +drop table t1; + +create table folks(id int, name char(32), dob date, father int, mother int); + +insert into folks values +(100, 'Vasya', '2000-01-01', 20, 30), +(20, 'Dad', '1970-02-02', 10, 9), +(30, 'Mom', '1975-03-03', 8, 7), +(10, 'Grandpa Bill', '1940-04-05', null, null), +(9, 'Grandma Ann', '1941-10-15', null, null), +(25, 'Uncle Jim', '1968-11-18', 8, 7), +(98, 'Sister Amy', '2001-06-20', 20, 30), +(8, 'Grandma Sally', '1943-08-23', 5, 6), +(6, 'Grandgrandma Martha', '1923-05-17', null, null), +(67, 'Cousin Eddie', '1992-02-28', 25, 27), +(27, 'Auntie Melinda', '1971-03-29', null, null); + + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.id, p.name, p.dob, p.father, p.mother + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + +with recursive +ancestors +as +( + select p.* + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother + union + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' +) +select * from ancestors; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Cousin Eddie' + union + select p.* + from folks as p, ancestors as a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' or name='Sister Amy' + union + select p.* + from folks as p, ancestors as a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + +with recursive +prev_gen +as +( + select folks.* + from folks, prev_gen + where folks.id=prev_gen.father or folks.id=prev_gen.mother + union + select * + from folks + where name='Vasya' +), +ancestors +as +( + select * + from folks + where name='Vasya' + union + select * + from ancestors + union + select * + from prev_gen +) +select ancestors.name, ancestors.dob from ancestors; + + +with recursive +descendants +as +( + select * + from folks + where name = 'Grandpa Bill' + union + select folks.* + from folks, descendants as d + where d.id=folks.father or d.id=folks.mother +) +select * from descendants; + +with recursive +descendants +as +( + select * + from folks + where name = 'Grandma Sally' + union + select folks.* + from folks, descendants as d + where d.id=folks.father or d.id=folks.mother +) +select * from descendants; + + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.* + from folks as p, ancestors AS a + where p.id = a.father OR p.id = a.mother +) +select * + from ancestors t1, ancestors t2 + where exists (select * from ancestors a + where a.father=t1.id AND a.mother=t2.id); + +with +ancestor_couples(husband, h_dob, wife, w_dob) +as +( +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' + union + select p.* + from folks as p, ancestors AS a + where p.id = a.father OR p.id = a.mother +) +select t1.name, t1.dob, t2.name, t2.dob + from ancestors t1, ancestors t2 + where exists (select * from ancestors a + where a.father=t1.id AND a.mother=t2.id) +) +select * from ancestor_couples; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.* + from folks as p, ancestors AS a + where p.id = a.father + union + select p.* + from folks as p, ancestors AS a + where p.id = a.mother +) +select * from ancestors; + +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, + w_id, w_name, w_dob, w_father, w_mother) +as +( + select h.*, w.* + from folks h, folks w, coupled_ancestors a + where a.father = h.id AND a.mother = w.id + union + select h.*, w.* + from folks v, folks h, folks w + where v.name = 'Vasya' and + (v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select h_id, h_name, h_dob, h_father, h_mother + from ancestor_couples + union + select w_id, w_name, w_dob, w_father, w_mother + from ancestor_couples +) +select h_name, h_dob, w_name, w_dob + from ancestor_couples; + +drop table folks; + -- cgit v1.2.1 From e09b1f2a226bf2763b211f74908a6486b83ebed1 Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Sun, 8 May 2016 21:50:13 +0300 Subject: Initial commit just to make a branch for mdev-9864 --- sql/sql_select.h | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/sql_select.h b/sql/sql_select.h index 87de9316c3a..86c5ef87d89 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -202,6 +202,7 @@ class JOIN_TAB_RANGE; typedef struct st_join_table { st_join_table() {} /* Remove gcc warning */ TABLE *table; + TABLE_LIST *tab_list; KEYUSE *keyuse; /**< pointer to first used key */ KEY *hj_key; /**< descriptor of the used best hash join key not supported by any index */ -- cgit v1.2.1 From be1d06c8a5f843e775374e5ec148aaee56970bdc Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Sun, 8 May 2016 23:04:41 +0300 Subject: Merge branch '10.2' into 10.2-mdev9864 --- .gitignore | 1 + client/mysqltest.cc | 2 +- config.h.cmake | 1 + configure.cmake | 11 + extra/comp_err.c | 147 +- include/my_sys.h | 6 +- include/mysql/plugin.h | 4 +- include/mysql/plugin_audit.h.pp | 3 +- include/mysql/plugin_auth.h.pp | 3 +- include/mysql/plugin_encryption.h.pp | 3 +- include/mysql/plugin_ftparser.h.pp | 3 +- include/mysql/plugin_password_validation.h.pp | 3 +- libmysql/errmsg.c | 2 +- libmysql/get_password.c | 2 +- libmysqld/CMakeLists.txt | 1 + libmysqld/lib_sql.cc | 3 + mysql-test/include/crash_mysqld.inc | 18 + mysql-test/include/galera_connect.inc | 5 +- mysql-test/r/analyze_format_json.result | 21 +- mysql-test/r/analyze_stmt_orderby.result | 65 +- mysql-test/r/connect2.result | 1 + mysql-test/r/cte_nonrecursive.result | 13 + mysql-test/r/cte_recursive.result | 289 ++ mysql-test/r/ctype_eucjpms.result | 21 + mysql-test/r/ctype_gbk_export_import.result | 3903 ++++++++++++++++++++ mysql-test/r/ctype_ujis.result | 21 + mysql-test/r/ctype_utf8.result | 33 + mysql-test/r/ctype_utf8mb4.result | 27 + mysql-test/r/delayed.result | 34 + mysql-test/r/derived_opt.result | 4 +- mysql-test/r/distinct.result | 18 +- mysql-test/r/explain_json.result | 12 +- mysql-test/r/func_analyse.result | 26 +- mysql-test/r/func_group.result | 2 +- mysql-test/r/group_by.result | 49 +- mysql-test/r/having.result | 10 +- mysql-test/r/join_cache.result | 4 +- mysql-test/r/join_outer.result | 12 +- mysql-test/r/join_outer_jcl6.result | 12 +- mysql-test/r/limit.result | 4 +- mysql-test/r/limit_rows_examined.result | 29 +- mysql-test/r/mrr_derived_crash_4610.result | 4 +- mysql-test/r/myisam.result | 8 +- mysql-test/r/order_by_optimizer.result | 30 + mysql-test/r/parser.result | 4 +- mysql-test/r/select_found.result | 20 +- mysql-test/r/show_explain.result | 2 +- mysql-test/r/subselect.result | 2 +- mysql-test/r/subselect4.result | 6 +- mysql-test/r/subselect_no_exists_to_in.result | 2 +- mysql-test/r/subselect_no_mat.result | 2 +- mysql-test/r/subselect_no_opts.result | 2 +- mysql-test/r/subselect_no_scache.result | 2 +- mysql-test/r/subselect_no_semijoin.result | 2 +- mysql-test/r/win.result | 1961 ++++++++++ mysql-test/r/win_avg.result | 95 + mysql-test/r/win_bit.result | 117 + mysql-test/r/win_ntile.result | 435 +++ mysql-test/r/win_orderby.result | 26 + mysql-test/r/win_percent_cume.result | 62 + mysql-test/r/win_rank.result | 104 + mysql-test/r/win_sum.result | 95 + mysql-test/std_data/bad2_master.info | 35 + mysql-test/std_data/bad3_master.info | 37 + mysql-test/std_data/bad4_master.info | 35 + mysql-test/std_data/bad5_master.info | 35 + mysql-test/std_data/bad6_master.info | 36 + mysql-test/std_data/bad_master.info | 35 + mysql-test/std_data/loaddata/mdev9823.ujis.txt | 11 + mysql-test/std_data/loaddata/mdev9823.utf8mb4.txt | 12 + mysql-test/std_data/loaddata/mdev9874.xml | 1 + .../galera/include/galera_sst_set_mysqldump.inc | 1 + mysql-test/suite/galera/r/binlog_checksum.result | 7 + mysql-test/suite/galera/r/create.result | 4 + .../suite/galera/r/enforce_storage_engine.result | 3 + mysql-test/suite/galera/r/fk.result | 7 + .../galera/r/galera_account_management.result | 16 + .../galera/r/galera_alter_engine_innodb.result | 1 + .../galera/r/galera_alter_engine_myisam.result | 2 + .../suite/galera/r/galera_alter_table_force.result | 1 + .../galera/r/galera_applier_ftwrl_table.result | 5 + .../r/galera_applier_ftwrl_table_alter.result | 3 + mysql-test/suite/galera/r/galera_as_master.result | 6 + .../suite/galera/r/galera_as_master_large.result | 6 + mysql-test/suite/galera/r/galera_as_slave.result | 9 + .../suite/galera/r/galera_as_slave_gtid.result | 8 + .../r/galera_bf_abort_flush_for_export.result | 3 + .../suite/galera/r/galera_bf_abort_ftwrl.result | 3 + .../suite/galera/r/galera_bf_abort_get_lock.result | 4 + .../galera/r/galera_bf_abort_lock_table.result | 3 + .../suite/galera/r/galera_bf_abort_sleep.result | 3 + .../suite/galera/r/galera_binlog_cache_size.result | 1 + .../suite/galera/r/galera_binlog_checksum.result | 3 + .../r/galera_binlog_event_max_size_min.result | 1 + .../suite/galera/r/galera_concurrent_ctas.result | 2 + .../suite/galera/r/galera_create_function.result | 6 + .../suite/galera/r/galera_create_procedure.result | 6 + .../suite/galera/r/galera_create_table_like.result | 2 + .../suite/galera/r/galera_create_trigger.result | 2 + .../suite/galera/r/galera_delete_limit.result | 5 + mysql-test/suite/galera/r/galera_drop_multi.result | 2 + mysql-test/suite/galera/r/galera_enum.result | 9 + mysql-test/suite/galera/r/galera_events.result | 9 + .../suite/galera/r/galera_fk_cascade_delete.result | 2 + .../suite/galera/r/galera_fk_cascade_update.result | 2 + .../suite/galera/r/galera_fk_conflict.result | 4 + .../suite/galera/r/galera_fk_mismatch.result | 3 + .../suite/galera/r/galera_fk_multicolumn.result | 2 + .../suite/galera/r/galera_fk_multitable.result | 2 + mysql-test/suite/galera/r/galera_fk_no_pk.result | 3 + .../galera/r/galera_fk_selfreferential.result | 2 + mysql-test/suite/galera/r/galera_fk_setnull.result | 4 + .../galera/r/galera_forced_binlog_format.result | 3 + mysql-test/suite/galera/r/galera_ftwrl.result | 3 + .../suite/galera/r/galera_gcs_fc_limit.result | 6 + mysql-test/suite/galera/r/galera_gtid.result | 2 + .../suite/galera/r/galera_insert_ignore.result | 9 + .../suite/galera/r/galera_insert_multi.result | 19 + .../suite/galera/r/galera_ist_mysqldump.result | 1 + .../suite/galera/r/galera_kill_applier.result | 1 + mysql-test/suite/galera/r/galera_kill_ddl.result | 6 + .../suite/galera/r/galera_kill_smallchanges.result | 6 + mysql-test/suite/galera/r/galera_lock_table.result | 4 + mysql-test/suite/galera/r/galera_log_bin.result | 4 + .../suite/galera/r/galera_many_indexes.result | 10 + mysql-test/suite/galera/r/galera_mdl_race.result | 10 + .../suite/galera/r/galera_multi_database.result | 5 + .../suite/galera/r/galera_myisam_autocommit.result | 1 + .../galera/r/galera_myisam_transactions.result | 3 + mysql-test/suite/galera/r/galera_nopk_bit.result | 6 + mysql-test/suite/galera/r/galera_nopk_blob.result | 6 + .../galera/r/galera_nopk_large_varchar.result | 6 + .../suite/galera/r/galera_nopk_unicode.result | 5 + .../r/galera_parallel_apply_lock_table.result | 5 + .../suite/galera/r/galera_pc_ignore_sb.result | 6 + .../suite/galera/r/galera_pk_bigint_signed.result | 6 + .../galera/r/galera_pk_bigint_unsigned.result | 6 + .../galera/r/galera_prepared_statement.result | 3 + .../suite/galera/r/galera_query_cache.result | 5 + .../galera/r/galera_query_cache_sync_wait.result | 12 + mysql-test/suite/galera/r/galera_read_only.result | 6 + .../galera/r/galera_repl_key_format_flat16.result | 3 + .../suite/galera/r/galera_repl_max_ws_size.result | 1 + .../suite/galera/r/galera_restart_nochanges.result | 5 + mysql-test/suite/galera/r/galera_roles.result | 15 + mysql-test/suite/galera/r/galera_rsu_error.result | 3 + mysql-test/suite/galera/r/galera_rsu_simple.result | 4 + .../suite/galera/r/galera_rsu_wsrep_desync.result | 8 + mysql-test/suite/galera/r/galera_sbr.result | 3 + mysql-test/suite/galera/r/galera_sbr_binlog.result | 3 + .../suite/galera/r/galera_serializable.result | 10 + mysql-test/suite/galera/r/galera_server.result | 6 + .../suite/galera/r/galera_split_brain.result | 6 + .../suite/galera/r/galera_sql_log_bin_zero.result | 2 + mysql-test/suite/galera/r/galera_ssl.result | 1 + .../suite/galera/r/galera_ssl_compression.result | 4 + .../suite/galera/r/galera_sst_mysqldump.result | 1 + .../galera/r/galera_sst_mysqldump_with_key.result | 1 + .../suite/galera/r/galera_status_cluster.result | 2 + .../galera/r/galera_status_local_index.result | 3 + .../suite/galera/r/galera_suspend_slave.result | 7 + .../suite/galera/r/galera_sync_wait_show.result | 15 + .../r/galera_toi_alter_auto_increment.result | 14 + .../suite/galera/r/galera_toi_ddl_locking.result | 8 + .../galera/r/galera_toi_ddl_nonconflicting.result | 4 + .../galera/r/galera_toi_ddl_sequential.result | 4 + mysql-test/suite/galera/r/galera_toi_ftwrl.result | 3 + .../galera/r/galera_toi_lock_exclusive.result | 5 + .../suite/galera/r/galera_toi_lock_shared.result | 3 + mysql-test/suite/galera/r/galera_truncate.result | 7 + .../galera/r/galera_truncate_temporary.result | 9 + .../galera/r/galera_unicode_identifiers.result | 4 + mysql-test/suite/galera/r/galera_unicode_pk.result | 9 + .../suite/galera/r/galera_update_limit.result | 5 + .../suite/galera/r/galera_v1_row_events.result | 3 + .../suite/galera/r/galera_var_OSU_method.result | 8 + .../suite/galera/r/galera_var_OSU_method2.result | 7 + .../r/galera_var_auto_inc_control_off.result | 10 + .../galera/r/galera_var_certify_nonPK_off.result | 5 + .../galera/r/galera_var_cluster_address.result | 11 + .../suite/galera/r/galera_var_desync_on.result | 5 + .../suite/galera/r/galera_var_dirty_reads.result | 4 + .../suite/galera/r/galera_var_fkchecks.result | 2 + .../galera/r/galera_var_gtid_domain_id.result | 9 + .../r/galera_var_innodb_disallow_writes.result | 5 + .../suite/galera/r/galera_var_log_bin.result | 3 + .../suite/galera/r/galera_var_max_ws_size.result | 1 + .../r/galera_var_mysql_replication_bundle.result | 5 + .../suite/galera/r/galera_var_node_address.result | 5 + .../r/galera_var_replicate_myisam_off.result | 2 + .../galera/r/galera_var_replicate_myisam_on.result | 22 + .../suite/galera/r/galera_var_slave_threads.result | 8 + .../suite/galera/r/galera_var_sync_wait.result | 7 + .../suite/galera/r/galera_var_wsrep_on_off.result | 3 + mysql-test/suite/galera/r/galera_wan.result | 5 + .../suite/galera/r/galera_wsrep_new_cluster.result | 1 + .../galera/r/galera_zero_length_column.result | 3 + mysql-test/suite/galera/r/grant.result | 4 + mysql-test/suite/galera/r/lp1276424.result | 1 + mysql-test/suite/galera/r/lp1438990.result | 1 + mysql-test/suite/galera/r/mdev_9290.result | 5 + mysql-test/suite/galera/r/mysql-wsrep#110.result | 3 + mysql-test/suite/galera/r/mysql-wsrep#198.result | 10 + mysql-test/suite/galera/r/mysql-wsrep#201.result | 1 + mysql-test/suite/galera/r/mysql-wsrep#237.result | 8 + mysql-test/suite/galera/r/mysql-wsrep#247.result | 1 + mysql-test/suite/galera/r/mysql-wsrep#31.result | 3 + mysql-test/suite/galera/r/mysql-wsrep#33.result | 1 + mysql-test/suite/galera/r/partition.result | 17 + mysql-test/suite/galera/r/rename.result | 7 + mysql-test/suite/galera/r/rpl_row_annotate.result | 7 + mysql-test/suite/galera/r/sql_log_bin.result | 4 + mysql-test/suite/galera/r/unique_key.result | 8 + mysql-test/suite/galera/r/view.result | 4 + mysql-test/suite/galera/t/mysql-wsrep#237.test | 2 + mysql-test/suite/maria/maria.result | 4 +- .../suite/rpl/r/rpl_begin_commit_rollback.result | 2 - .../suite/rpl/r/rpl_upgrade_master_info.result | 102 + .../suite/rpl/t/rpl_begin_commit_rollback.test | 2 - .../suite/rpl/t/rpl_upgrade_master_info.test | 163 + mysql-test/t/connect2.test | 3 +- mysql-test/t/cte_nonrecursive.test | 14 + mysql-test/t/cte_recursive.test | 241 ++ mysql-test/t/ctype_eucjpms.test | 16 + mysql-test/t/ctype_gbk_export_import.test | 161 + mysql-test/t/ctype_ujis.test | 17 + mysql-test/t/ctype_utf8.test | 16 + mysql-test/t/ctype_utf8mb4.test | 17 + mysql-test/t/delayed.test | 41 + mysql-test/t/func_analyse.test | 28 +- mysql-test/t/group_by.test | 35 + mysql-test/t/limit_rows_examined.test | 4 +- mysql-test/t/myisam.test | 2 + mysql-test/t/order_by_optimizer.test | 34 + mysql-test/t/parser.test | 4 +- mysql-test/t/subselect.test | 2 +- mysql-test/t/win.test | 1203 ++++++ mysql-test/t/win_avg.test | 47 + mysql-test/t/win_bit.test | 89 + mysql-test/t/win_ntile.test | 171 + mysql-test/t/win_orderby.test | 32 + mysql-test/t/win_percent_cume.test | 36 + mysql-test/t/win_rank.test | 58 + mysql-test/t/win_sum.test | 47 + mysys/errors.c | 2 +- mysys/my_default.c | 2 +- mysys/my_error.c | 18 +- .../file_key_management_plugin.cc | 8 +- plugin/wsrep_info/plugin.cc | 14 +- scripts/mysql_install_db.sh | 5 +- scripts/mysql_system_tables.sql | 6 + sql/CMakeLists.txt | 3 +- sql/derror.cc | 255 +- sql/derror.h | 3 +- sql/field.cc | 3 +- sql/filesort.cc | 88 +- sql/filesort.h | 71 +- sql/handler.cc | 11 +- sql/item.cc | 53 +- sql/item.h | 74 +- sql/item_buff.cc | 89 +- sql/item_cmpfunc.cc | 4 +- sql/item_cmpfunc.h | 10 +- sql/item_func.cc | 15 +- sql/item_func.h | 4 +- sql/item_row.cc | 2 +- sql/item_row.h | 4 +- sql/item_subselect.cc | 47 +- sql/item_sum.cc | 183 +- sql/item_sum.h | 82 +- sql/item_windowfunc.cc | 242 ++ sql/item_windowfunc.h | 757 ++++ sql/lex.h | 14 + sql/log.cc | 6 + sql/log.h | 1 + sql/log_event.cc | 2 +- sql/mysqld.cc | 122 +- sql/mysqld.h | 1 + sql/opt_range.cc | 27 +- sql/opt_subselect.cc | 36 +- sql/opt_sum.cc | 4 +- sql/opt_table_elimination.cc | 2 +- sql/records.cc | 6 +- sql/rpl_gtid.cc | 3 +- sql/rpl_gtid.h | 3 +- sql/rpl_mi.cc | 81 +- sql/rpl_parallel.cc | 1 - sql/rpl_rli.cc | 1 + sql/rpl_rli.h | 9 +- sql/set_var.cc | 7 +- sql/set_var.h | 1 + sql/share/errmsg-utf8.txt | 47 + sql/slave.cc | 1 - sql/sql_analyze_stmt.cc | 72 - sql/sql_analyze_stmt.h | 171 - sql/sql_array.h | 14 + sql/sql_base.cc | 55 +- sql/sql_base.h | 10 +- sql/sql_cache.cc | 2 +- sql/sql_class.cc | 15 +- sql/sql_class.h | 44 +- sql/sql_connect.cc | 2 - sql/sql_cte.cc | 5 +- sql/sql_delete.cc | 27 +- sql/sql_derived.cc | 8 +- sql/sql_do.cc | 2 +- sql/sql_explain.cc | 250 +- sql/sql_explain.h | 92 +- sql/sql_insert.cc | 38 +- sql/sql_lex.cc | 50 +- sql/sql_lex.h | 42 +- sql/sql_list.h | 12 +- sql/sql_load.cc | 310 +- sql/sql_locale.h | 2 +- sql/sql_parse.cc | 69 +- sql/sql_plugin.cc | 10 +- sql/sql_prepare.cc | 13 +- sql/sql_priv.h | 1 + sql/sql_repl.cc | 1 + sql/sql_select.cc | 3714 ++++++++++--------- sql/sql_select.h | 376 +- sql/sql_show.cc | 23 +- sql/sql_show.h | 2 +- sql/sql_table.cc | 82 +- sql/sql_test.cc | 58 +- sql/sql_trigger.cc | 1 - sql/sql_union.cc | 39 +- sql/sql_update.cc | 28 +- sql/sql_view.cc | 2 + sql/sql_window.cc | 2129 +++++++++++ sql/sql_window.h | 230 ++ sql/sql_yacc.yy | 338 +- sql/sys_vars.cc | 3 +- sql/table.cc | 6 +- sql/table.h | 65 +- sql/unireg.h | 19 +- sql/wsrep_mysqld.cc | 15 +- sql/wsrep_sst.cc | 2 +- sql/wsrep_utils.h | 2 +- storage/innobase/dict/dict0boot.cc | 20 +- storage/innobase/fil/fil0fil.cc | 6 +- storage/innobase/include/log0crypt.h | 10 +- storage/innobase/include/ut0ut.h | 25 +- storage/innobase/lock/lock0lock.cc | 8 +- storage/innobase/log/log0crypt.cc | 43 +- storage/innobase/log/log0recv.cc | 2 + storage/innobase/ut/ut0ut.cc | 17 +- storage/mroonga/ha_mroonga.cpp | 2 +- storage/mroonga/mrn_table.cpp | 2 +- storage/spider/spd_db_mysql.cc | 2 +- storage/tokudb/ha_tokudb.h | 6 + .../mysql-test/tokudb/t/change_column_all.py | 1 + .../mysql-test/tokudb_bugs/r/simple_icp.result | 4 +- .../r/part_supported_sql_func_tokudb.result | 1055 ++---- .../r/partition_alter1_1_2_tokudb.result | 296 +- .../r/partition_alter1_1_tokudb.result | 264 +- .../r/partition_alter1_2_tokudb.result | 456 +-- .../r/partition_alter2_1_1_tokudb.result | 280 +- .../r/partition_alter2_1_2_tokudb.result | 280 +- .../r/partition_alter2_2_1_tokudb.result | 280 +- .../r/partition_alter2_2_2_tokudb.result | 280 +- .../tokudb_parts/r/partition_alter4_tokudb.result | 1608 ++++---- .../tokudb_parts/r/partition_basic_tokudb.result | 392 +- .../tokudb_parts/r/partition_debug_tokudb.result | 327 +- storage/xtradb/dict/dict0boot.cc | 20 +- storage/xtradb/fil/fil0fil.cc | 8 +- storage/xtradb/include/log0crypt.h | 10 +- storage/xtradb/include/ut0ut.h | 29 +- storage/xtradb/lock/lock0lock.cc | 8 +- storage/xtradb/log/log0crypt.cc | 43 +- storage/xtradb/log/log0recv.cc | 2 + storage/xtradb/os/os0file.cc | 2 +- storage/xtradb/ut/ut0ut.cc | 17 +- strings/conf_to_src.c | 8 +- strings/ctype-eucjpms.c | 1 + strings/ctype-mb.ic | 6 + strings/ctype-ujis.c | 1 + tests/mysql_client_test.c | 5 +- unittest/sql/explain_filename-t.cc | 15 +- win/packaging/extra.wxs.in | 11 - 380 files changed, 21915 insertions(+), 6186 deletions(-) create mode 100644 mysql-test/include/crash_mysqld.inc create mode 100644 mysql-test/r/cte_recursive.result create mode 100644 mysql-test/r/ctype_gbk_export_import.result create mode 100644 mysql-test/r/order_by_optimizer.result create mode 100644 mysql-test/r/win.result create mode 100644 mysql-test/r/win_avg.result create mode 100644 mysql-test/r/win_bit.result create mode 100644 mysql-test/r/win_ntile.result create mode 100644 mysql-test/r/win_orderby.result create mode 100644 mysql-test/r/win_percent_cume.result create mode 100644 mysql-test/r/win_rank.result create mode 100644 mysql-test/r/win_sum.result create mode 100644 mysql-test/std_data/bad2_master.info create mode 100644 mysql-test/std_data/bad3_master.info create mode 100644 mysql-test/std_data/bad4_master.info create mode 100644 mysql-test/std_data/bad5_master.info create mode 100644 mysql-test/std_data/bad6_master.info create mode 100644 mysql-test/std_data/bad_master.info create mode 100644 mysql-test/std_data/loaddata/mdev9823.ujis.txt create mode 100644 mysql-test/std_data/loaddata/mdev9823.utf8mb4.txt create mode 100644 mysql-test/std_data/loaddata/mdev9874.xml create mode 100644 mysql-test/suite/rpl/r/rpl_upgrade_master_info.result create mode 100644 mysql-test/suite/rpl/t/rpl_upgrade_master_info.test create mode 100644 mysql-test/t/cte_recursive.test create mode 100644 mysql-test/t/ctype_gbk_export_import.test create mode 100644 mysql-test/t/order_by_optimizer.test create mode 100644 mysql-test/t/win.test create mode 100644 mysql-test/t/win_avg.test create mode 100644 mysql-test/t/win_bit.test create mode 100644 mysql-test/t/win_ntile.test create mode 100644 mysql-test/t/win_orderby.test create mode 100644 mysql-test/t/win_percent_cume.test create mode 100644 mysql-test/t/win_rank.test create mode 100644 mysql-test/t/win_sum.test create mode 100644 sql/item_windowfunc.cc create mode 100644 sql/item_windowfunc.h create mode 100644 sql/sql_window.cc create mode 100644 sql/sql_window.h diff --git a/.gitignore b/.gitignore index 4c7efe9a119..f2064537da4 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ Docs/INFO_SRC Makefile TAGS Testing/ +tmp/ VERSION.dep configure client/async_example diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 13c642df932..f09ad3107cc 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -703,7 +703,7 @@ public: DBUG_ASSERT(ds->str); #ifdef EXTRA_DEBUG - DBUG_PRINT("QQ", ("str: %*s", (int) ds->length, ds->str)); + DBUG_PRINT("extra", ("str: %*s", (int) ds->length, ds->str)); #endif if (fwrite(ds->str, 1, ds->length, m_file) != ds->length) diff --git a/config.h.cmake b/config.h.cmake index 048ce816a43..f35e33ebe1a 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -189,6 +189,7 @@ #cmakedefine HAVE_PREAD 1 #cmakedefine HAVE_PAUSE_INSTRUCTION 1 #cmakedefine HAVE_FAKE_PAUSE_INSTRUCTION 1 +#cmakedefine HAVE_HMT_PRIORITY_INSTRUCTION 1 #cmakedefine HAVE_RDTSCLL 1 #cmakedefine HAVE_READ_REAL_TIME 1 #cmakedefine HAVE_PTHREAD_ATTR_CREATE 1 diff --git a/configure.cmake b/configure.cmake index 294c5e24ca8..4470bee3223 100644 --- a/configure.cmake +++ b/configure.cmake @@ -784,6 +784,17 @@ IF(NOT CMAKE_CROSSCOMPILING AND NOT MSVC) } " HAVE_FAKE_PAUSE_INSTRUCTION) ENDIF() + IF (NOT HAVE_PAUSE_INSTRUCTION) + CHECK_C_SOURCE_COMPILES(" + #include + int main() + { + __ppc_set_ppr_low(); + __ppc_set_ppr_med(); + return 0; + } + " HAVE_HMT_PRIORITY_INSTRUCTION) + ENDIF() ENDIF() CHECK_SYMBOL_EXISTS(tcgetattr "termios.h" HAVE_TCGETATTR 1) diff --git a/extra/comp_err.c b/extra/comp_err.c index 3fc4b05fa61..2bab6b60be7 100644 --- a/extra/comp_err.c +++ b/extra/comp_err.c @@ -32,9 +32,11 @@ #include #include -#define MAX_ROWS 2000 +#define MAX_ROWS 3000 +#define ERRORS_PER_RANGE 1000 +#define MAX_SECTIONS 4 #define HEADER_LENGTH 32 /* Length of header in errmsg.sys */ -#define ERRMSG_VERSION 3 /* Version number of errmsg.sys */ +#define ERRMSG_VERSION 4 /* Version number of errmsg.sys */ #define DEFAULT_CHARSET_DIR "../sql/share/charsets" #define ER_PREFIX "ER_" #define ER_PREFIX2 "MARIA_ER_" @@ -53,6 +55,8 @@ static char *default_dbug_option= (char*) "d:t:O,/tmp/comp_err.trace"; uchar file_head[]= { 254, 254, 2, ERRMSG_VERSION }; /* Store positions to each error message row to store in errmsg.sys header */ uint file_pos[MAX_ROWS+1]; +uint section_count,section_start; +uchar section_header[MAX_SECTIONS*2]; const char *empty_string= ""; /* For empty states */ /* @@ -131,7 +135,7 @@ static struct my_option my_long_options[]= }; -static struct errors *generate_empty_message(uint dcode); +static struct errors *generate_empty_message(uint dcode, my_bool skip); static struct languages *parse_charset_string(char *str); static struct errors *parse_error_string(char *ptr, int er_count); static struct message *parse_message_string(struct message *new_message, @@ -140,8 +144,9 @@ static struct message *find_message(struct errors *err, const char *lang, my_bool no_default); static int check_message_format(struct errors *err, const char* mess); -static int parse_input_file(const char *file_name, struct errors **top_error, - struct languages **top_language); +static uint parse_input_file(const char *file_name, struct errors **top_error, + struct languages **top_language, + uint *error_count); static int get_options(int *argc, char ***argv); static void print_version(void); static void usage(void); @@ -158,14 +163,15 @@ static char *find_end_of_word(char *str); static void clean_up(struct languages *lang_head, struct errors *error_head); static int create_header_files(struct errors *error_head); static int create_sys_files(struct languages *lang_head, - struct errors *error_head, uint row_count); + struct errors *error_head, uint max_error, + uint error_count); int main(int argc, char *argv[]) { MY_INIT(argv[0]); { - uint row_count; + uint max_error, error_count; struct errors *error_head; struct languages *lang_head; DBUG_ENTER("main"); @@ -173,32 +179,39 @@ int main(int argc, char *argv[]) charsets_dir= DEFAULT_CHARSET_DIR; my_umask_dir= 0777; if (get_options(&argc, &argv)) - DBUG_RETURN(1); - if (!(row_count= parse_input_file(TXTFILE, &error_head, &lang_head))) + goto err; + if (!(max_error= parse_input_file(TXTFILE, &error_head, &lang_head, + &error_count))) { fprintf(stderr, "Failed to parse input file %s\n", TXTFILE); - DBUG_RETURN(1); + goto err; } if (lang_head == NULL || error_head == NULL) { fprintf(stderr, "Failed to parse input file %s\n", TXTFILE); - DBUG_RETURN(1); + goto err; } if (create_header_files(error_head)) { fprintf(stderr, "Failed to create header files\n"); - DBUG_RETURN(1); + goto err; } - if (create_sys_files(lang_head, error_head, row_count)) + if (create_sys_files(lang_head, error_head, max_error, error_count)) { fprintf(stderr, "Failed to create sys files\n"); - DBUG_RETURN(1); + goto err; } clean_up(lang_head, error_head); DBUG_LEAVE; /* Can't use dbug after my_end() */ my_end(info_flag ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); return 0; + +err: + clean_up(lang_head, error_head); + DBUG_LEAVE; /* Can't use dbug after my_end() */ + my_end(info_flag ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); + exit(1); } } @@ -226,6 +239,7 @@ static void print_escaped_string(FILE *f, const char *str) static int create_header_files(struct errors *error_head) { uint er_last= 0; + uint section= 1; FILE *er_definef, *sql_statef, *er_namef; struct errors *tmp_error; struct message *er_msg; @@ -266,8 +280,19 @@ static int create_header_files(struct errors *error_head) if (!tmp_error->er_name) continue; /* Placeholder for gap */ - if (tmp_error->d_code > current_d_code + 1) + while (tmp_error->d_code > current_d_code + 1) + { + uint next_range= (((current_d_code + ERRORS_PER_RANGE) / + ERRORS_PER_RANGE) * ERRORS_PER_RANGE); + + fprintf(er_definef, "#define ER_ERROR_LAST_SECTION_%d %d\n", section, + current_d_code); fprintf(er_definef, "\n/* New section */\n\n"); + fprintf(er_definef, "#define ER_ERROR_FIRST_SECTION_%d %d\n", section+1, + MY_MIN(tmp_error->d_code, next_range)); + section++; + current_d_code= MY_MIN(tmp_error->d_code, next_range); + } current_d_code= tmp_error->d_code; fprintf(er_definef, "#define %s %u\n", tmp_error->er_name, @@ -297,17 +322,18 @@ static int create_header_files(struct errors *error_head) static int create_sys_files(struct languages *lang_head, - struct errors *error_head, uint row_count) + struct errors *error_head, + uint max_error, + uint error_count) { FILE *to; uint csnum= 0, length, i, row_nr; - uchar head[32]; + uchar head[HEADER_LENGTH]; char outfile[FN_REFLEN], *outfile_end; long start_pos; struct message *tmp; struct languages *tmp_lang; struct errors *tmp_error; - MY_STAT stat_info; DBUG_ENTER("create_sys_files"); @@ -331,7 +357,7 @@ static int create_sys_files(struct languages *lang_head, { if (my_mkdir(outfile, 0777,MYF(0)) < 0) { - fprintf(stderr, "Can't create output directory for %s\n", + fprintf(stderr, "Can't creqate output directory for %s\n", outfile); DBUG_RETURN(1); } @@ -343,8 +369,8 @@ static int create_sys_files(struct languages *lang_head, DBUG_RETURN(1); /* 2 is for 2 bytes to store row position / error message */ - start_pos= (long) (HEADER_LENGTH + row_count * 2); - fseek(to, start_pos, 0); + start_pos= (long) (HEADER_LENGTH + (error_count + section_count) * 2); + my_fseek(to, start_pos, 0, MYF(0)); row_nr= 0; for (tmp_error= error_head; tmp_error; tmp_error= tmp_error->next_error) { @@ -358,29 +384,38 @@ static int create_sys_files(struct languages *lang_head, "language\n", tmp_error->er_name, tmp_lang->lang_short_name); goto err; } - if (copy_rows(to, tmp->text, row_nr, start_pos)) + if (tmp->text) /* If not skipped row */ { - fprintf(stderr, "Failed to copy rows to %s\n", outfile); - goto err; + if (copy_rows(to, tmp->text, row_nr, start_pos)) + { + fprintf(stderr, "Failed to copy rows to %s\n", outfile); + goto err; + } + row_nr++; } - row_nr++; } + DBUG_ASSERT(error_count == row_nr); /* continue with header of the errmsg.sys file */ - length= ftell(to) - HEADER_LENGTH - row_count * 2; + length= (my_ftell(to, MYF(0)) - HEADER_LENGTH - + (error_count + section_count) * 2); bzero((uchar*) head, HEADER_LENGTH); - bmove((uchar *) head, (uchar *) file_head, 4); + bmove((uchar*) head, (uchar*) file_head, 4); head[4]= 1; int4store(head + 6, length); - int2store(head + 10, row_count); + int2store(head + 10, max_error); /* Max error */ + int2store(head + 12, row_nr); + int2store(head + 14, section_count); head[30]= csnum; my_fseek(to, 0l, MY_SEEK_SET, MYF(0)); - if (my_fwrite(to, (uchar*) head, HEADER_LENGTH, MYF(MY_WME | MY_FNABP))) + if (my_fwrite(to, (uchar*) head, HEADER_LENGTH, MYF(MY_WME | MY_FNABP)) || + my_fwrite(to, (uchar*) section_header, section_count*2, + MYF(MY_WME | MY_FNABP))) goto err; - file_pos[row_count]= (ftell(to) - start_pos); - for (i= 0; i < row_count; i++) + file_pos[row_nr]= (ftell(to) - start_pos); + for (i= 0; i < row_nr; i++) { /* Store length of each string */ int2store(head, file_pos[i+1] - file_pos[i]); @@ -437,24 +472,29 @@ static void clean_up(struct languages *lang_head, struct errors *error_head) } -static int parse_input_file(const char *file_name, struct errors **top_error, - struct languages **top_lang) +static uint parse_input_file(const char *file_name, struct errors **top_error, + struct languages **top_lang, uint *error_count) { FILE *file; char *str, buff[1000]; struct errors *current_error= 0, **tail_error= top_error; struct message current_message; - uint rcount= 0; + uint rcount= 0, skiped_errors= 0; my_bool er_offset_found= 0; DBUG_ENTER("parse_input_file"); *top_error= 0; *top_lang= 0; + *error_count= 0; + section_start= er_offset; + section_count= 0; + if (!(file= my_fopen(file_name, O_RDONLY | O_SHARE, MYF(MY_WME)))) DBUG_RETURN(0); while ((str= fgets(buff, sizeof(buff), file))) { + my_bool skip; if (is_prefix(str, "language")) { if (!(*top_lang= parse_charset_string(str))) @@ -464,18 +504,34 @@ static int parse_input_file(const char *file_name, struct errors **top_error, } continue; } - if (is_prefix(str, "start-error-number")) + skip= 0; + if (is_prefix(str, "start-error-number") || + (skip= is_prefix(str, "skip-to-error-number"))) { uint tmp_er_offset; + if (!(tmp_er_offset= parse_error_offset(str))) { fprintf(stderr, "Failed to parse the error offset string!\n"); DBUG_RETURN(0); } + if (skip) + { + if (section_count >= MAX_SECTIONS-1) + { + fprintf(stderr, "Found too many skip-to-error-number entries. " + "We only support %d entries\n", MAX_SECTIONS); + DBUG_RETURN(0); + } + int2store(section_header + section_count*2, + er_offset +rcount - section_start); + section_count++; + section_start= tmp_er_offset; + } if (!er_offset_found) { er_offset_found= 1; - er_offset= tmp_er_offset; + er_offset= section_start= tmp_er_offset; } else { @@ -487,7 +543,8 @@ static int parse_input_file(const char *file_name, struct errors **top_error, } for ( ; er_offset + rcount < tmp_er_offset ; rcount++) { - current_error= generate_empty_message(er_offset + rcount); + skiped_errors+= skip != 0; + current_error= generate_empty_message(er_offset + rcount, skip); *tail_error= current_error; tail_error= ¤t_error->next_error; } @@ -559,6 +616,11 @@ static int parse_input_file(const char *file_name, struct errors **top_error, fprintf(stderr, "Wrong input file format. Stop!\nLine: %s\n", str); DBUG_RETURN(0); } + int2store(section_header + section_count*2, + er_offset + rcount - section_start); + section_count++; + *error_count= rcount - skiped_errors; + *tail_error= 0; /* Mark end of list */ my_fclose(file, MYF(0)); @@ -887,7 +949,7 @@ static struct message *parse_message_string(struct message *new_message, } -static struct errors *generate_empty_message(uint d_code) +static struct errors *generate_empty_message(uint d_code, my_bool skip) { struct errors *new_error; struct message message; @@ -896,7 +958,8 @@ static struct errors *generate_empty_message(uint d_code) if (!(new_error= (struct errors *) my_malloc(sizeof(*new_error), MYF(MY_WME)))) return(0); - if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 1, MYF(0))) + if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 1, + MYF(0))) return(0); /* OOM: Fatal error */ new_error->er_name= NULL; @@ -904,8 +967,10 @@ static struct errors *generate_empty_message(uint d_code) new_error->sql_code1= empty_string; new_error->sql_code2= empty_string; + message.text= 0; /* If skip set, don't generate a text */ + if (!(message.lang_short_name= my_strdup(default_language, MYF(MY_WME))) || - !(message.text= my_strdup("", MYF(MY_WME)))) + (!skip && !(message.text= my_strdup("", MYF(MY_WME))))) return(0); /* Can't fail as msg is preallocated */ @@ -1071,10 +1136,12 @@ get_one_option(int optid, const struct my_option *opt __attribute__ ((unused)), switch (optid) { case 'V': print_version(); + my_end(0); exit(0); break; case '?': usage(); + my_end(0); exit(0); break; case '#': diff --git a/include/my_sys.h b/include/my_sys.h index 36530eb94e9..7b7158573b4 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -689,7 +689,7 @@ extern void my_osmaperr(unsigned long last_error); #endif extern void init_glob_errs(void); -extern const char** get_global_errmsgs(void); +extern const char** get_global_errmsgs(int nr); extern void wait_for_free_space(const char *filename, int errors); extern FILE *my_fopen(const char *FileName,int Flags,myf MyFlags); extern FILE *my_fdopen(File Filedes,const char *name, int Flags,myf MyFlags); @@ -714,9 +714,9 @@ extern void my_printf_error(uint my_err, const char *format, ATTRIBUTE_FORMAT(printf, 2, 4); extern void my_printv_error(uint error, const char *format, myf MyFlags, va_list ap); -extern int my_error_register(const char** (*get_errmsgs) (void), +extern int my_error_register(const char** (*get_errmsgs) (int nr), uint first, uint last); -extern const char **my_error_unregister(uint first, uint last); +extern my_bool my_error_unregister(uint first, uint last); extern void my_message(uint my_err, const char *str,myf MyFlags); extern void my_message_stderr(uint my_err, const char *str, myf MyFlags); extern my_bool my_init(void); diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h index cfa4b13a7ef..b3c71c65488 100644 --- a/include/mysql/plugin.h +++ b/include/mysql/plugin.h @@ -194,8 +194,10 @@ struct st_mysql_show_var { enum enum_mysql_show_type type; }; +struct system_status_var; + #define SHOW_VAR_FUNC_BUFF_SIZE (256 * sizeof(void*)) -typedef int (*mysql_show_var_func)(MYSQL_THD, struct st_mysql_show_var*, void *, enum enum_var_type); +typedef int (*mysql_show_var_func)(MYSQL_THD, struct st_mysql_show_var*, void *, struct system_status_var *status_var, enum enum_var_type); /* diff --git a/include/mysql/plugin_audit.h.pp b/include/mysql/plugin_audit.h.pp index 8fc935262e2..aaf41c74a54 100644 --- a/include/mysql/plugin_audit.h.pp +++ b/include/mysql/plugin_audit.h.pp @@ -281,7 +281,8 @@ struct st_mysql_show_var { void *value; enum enum_mysql_show_type type; }; -typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, enum enum_var_type); +struct system_status_var; +typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, struct system_status_var *status_var, enum enum_var_type); struct st_mysql_sys_var; struct st_mysql_value; typedef int (*mysql_var_check_func)(void* thd, diff --git a/include/mysql/plugin_auth.h.pp b/include/mysql/plugin_auth.h.pp index 046f92b5ab8..10cd10bf9c8 100644 --- a/include/mysql/plugin_auth.h.pp +++ b/include/mysql/plugin_auth.h.pp @@ -281,7 +281,8 @@ struct st_mysql_show_var { void *value; enum enum_mysql_show_type type; }; -typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, enum enum_var_type); +struct system_status_var; +typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, struct system_status_var *status_var, enum enum_var_type); struct st_mysql_sys_var; struct st_mysql_value; typedef int (*mysql_var_check_func)(void* thd, diff --git a/include/mysql/plugin_encryption.h.pp b/include/mysql/plugin_encryption.h.pp index 850dbf05a58..46d3c3d5a55 100644 --- a/include/mysql/plugin_encryption.h.pp +++ b/include/mysql/plugin_encryption.h.pp @@ -281,7 +281,8 @@ struct st_mysql_show_var { void *value; enum enum_mysql_show_type type; }; -typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, enum enum_var_type); +struct system_status_var; +typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, struct system_status_var *status_var, enum enum_var_type); struct st_mysql_sys_var; struct st_mysql_value; typedef int (*mysql_var_check_func)(void* thd, diff --git a/include/mysql/plugin_ftparser.h.pp b/include/mysql/plugin_ftparser.h.pp index ee1056a36d7..17de800875e 100644 --- a/include/mysql/plugin_ftparser.h.pp +++ b/include/mysql/plugin_ftparser.h.pp @@ -281,7 +281,8 @@ struct st_mysql_show_var { void *value; enum enum_mysql_show_type type; }; -typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, enum enum_var_type); +struct system_status_var; +typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, struct system_status_var *status_var, enum enum_var_type); struct st_mysql_sys_var; struct st_mysql_value; typedef int (*mysql_var_check_func)(void* thd, diff --git a/include/mysql/plugin_password_validation.h.pp b/include/mysql/plugin_password_validation.h.pp index 0f14ac1eb53..1abdbd30f57 100644 --- a/include/mysql/plugin_password_validation.h.pp +++ b/include/mysql/plugin_password_validation.h.pp @@ -281,7 +281,8 @@ struct st_mysql_show_var { void *value; enum enum_mysql_show_type type; }; -typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, enum enum_var_type); +struct system_status_var; +typedef int (*mysql_show_var_func)(void*, struct st_mysql_show_var*, void *, struct system_status_var *status_var, enum enum_var_type); struct st_mysql_sys_var; struct st_mysql_value; typedef int (*mysql_var_check_func)(void* thd, diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c index e30cdc9762b..fc5a6a07e11 100644 --- a/libmysql/errmsg.c +++ b/libmysql/errmsg.c @@ -90,7 +90,7 @@ const char *client_errors[]= "" }; -const char** get_client_errmsgs(void) +const char** get_client_errmsgs(int nr __attribute__((unused))) { return client_errors; } diff --git a/libmysql/get_password.c b/libmysql/get_password.c index e704aec8337..a113306ed57 100644 --- a/libmysql/get_password.c +++ b/libmysql/get_password.c @@ -46,7 +46,7 @@ #endif #endif #ifdef alpha_linux_port -#include /* QQ; Fix this in configure */ +#include #include #endif #else diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index 2081e7558d5..ad16584ec0e 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -109,6 +109,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc ../sql/table_cache.cc ../sql/mf_iocache_encr.cc ../sql/item_inetfunc.cc ../sql/wsrep_dummy.cc ../sql/encryption.cc + ../sql/item_windowfunc.cc ../sql/sql_window.cc ../sql/sql_cte.cc ${GEN_SOURCES} ${MYSYS_LIBWRAP_SOURCE} diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 476981023fd..a067856b287 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -517,6 +517,9 @@ int init_embedded_server(int argc, char **argv, char **groups) if (my_thread_init()) return 1; + if (init_early_variables()) + return 1; + if (argc) { argcp= &argc; diff --git a/mysql-test/include/crash_mysqld.inc b/mysql-test/include/crash_mysqld.inc new file mode 100644 index 00000000000..4190d24d801 --- /dev/null +++ b/mysql-test/include/crash_mysqld.inc @@ -0,0 +1,18 @@ +# Crash mysqld hard and wait until it's restarted + +--source include/have_debug_sync.inc +--source include/not_embedded.inc + +# Write file to make mysql-test-run.pl expect crash and restart +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + +# Setup the mysqld to crash at shutdown +SET debug_dbug="d,crash_shutdown"; +--error 2013 +shutdown; + +# Turn on reconnect +--enable_reconnect + +# Call script that will poll the server waiting for it to be back online again +--source include/wait_until_connected_again.inc diff --git a/mysql-test/include/galera_connect.inc b/mysql-test/include/galera_connect.inc index 9d458c5e063..a40b03bf421 100644 --- a/mysql-test/include/galera_connect.inc +++ b/mysql-test/include/galera_connect.inc @@ -37,12 +37,15 @@ if (!$_galera_port) if ($galera_debug) { +--disable_query_log --echo connect($galera_connection_name,127.0.0.1,root,,test,$_galera_port,) +--enable_query_log } # Temporal solution to avoid concurrent IST MDEV-7178 --sleep 1 # Open a connection +--disable_query_log --connect($galera_connection_name,127.0.0.1,root,,test,$_galera_port,) - +--enable_query_log diff --git a/mysql-test/r/analyze_format_json.result b/mysql-test/r/analyze_format_json.result index 4f8876a8414..fadbe705f99 100644 --- a/mysql-test/r/analyze_format_json.result +++ b/mysql-test/r/analyze_format_json.result @@ -492,8 +492,9 @@ ANALYZE "select_id": 1, "r_loops": 1, "volatile parameter": "REPLACED", - "having_condition": "(TOP > a)", + "having_condition": "(TOP > t2.a)", "filesort": { + "sort_key": "t2.a", "r_loops": 1, "volatile parameter": "REPLACED", "r_used_priority_queue": false, @@ -523,6 +524,7 @@ ANALYZE "r_loops": 1, "volatile parameter": "REPLACED", "filesort": { + "sort_key": "t2.a", "r_loops": 1, "volatile parameter": "REPLACED", "r_used_priority_queue": false, @@ -563,6 +565,7 @@ ANALYZE "r_loops": 1, "volatile parameter": "REPLACED", "filesort": { + "sort_key": "t2.a", "r_loops": 1, "volatile parameter": "REPLACED", "r_used_priority_queue": false, @@ -684,18 +687,20 @@ ANALYZE "r_loops": 1, "volatile parameter": "REPLACED", "filesort": { + "sort_key": "group_concat(t3.f3 separator ',')", "r_loops": 1, "volatile parameter": "REPLACED", "r_used_priority_queue": false, "r_output_rows": 0, "volatile parameter": "REPLACED", - "filesort": { - "r_loops": 1, - "volatile parameter": "REPLACED", - "r_used_priority_queue": false, - "r_output_rows": 0, - "volatile parameter": "REPLACED", - "temporary_table": { + "temporary_table": { + "filesort": { + "sort_key": "(subquery#2)", + "r_loops": 1, + "volatile parameter": "REPLACED", + "r_used_priority_queue": false, + "r_output_rows": 0, + "volatile parameter": "REPLACED", "temporary_table": { "table": { "table_name": "t2", diff --git a/mysql-test/r/analyze_stmt_orderby.result b/mysql-test/r/analyze_stmt_orderby.result index be1f01a2a52..37f0005148e 100644 --- a/mysql-test/r/analyze_stmt_orderby.result +++ b/mysql-test/r/analyze_stmt_orderby.result @@ -172,8 +172,8 @@ EXPLAIN "query_block": { "select_id": 1, "filesort": { + "sort_key": "t2.b", "temporary_table": { - "function": "buffer", "table": { "table_name": "t0", "access_type": "ALL", @@ -205,6 +205,7 @@ ANALYZE "r_loops": 1, "r_total_time_ms": "REPLACED", "filesort": { + "sort_key": "t2.b", "r_loops": 1, "r_total_time_ms": "REPLACED", "r_limit": 4, @@ -257,6 +258,7 @@ EXPLAIN "select_id": 1, "read_sorted_file": { "filesort": { + "sort_key": "t0.a", "table": { "table_name": "t0", "access_type": "ALL", @@ -290,6 +292,7 @@ ANALYZE "read_sorted_file": { "r_rows": 10, "filesort": { + "sort_key": "t0.a", "r_loops": 1, "r_total_time_ms": "REPLACED", "r_used_priority_queue": false, @@ -346,6 +349,7 @@ ANALYZE "r_loops": 1, "r_total_time_ms": "REPLACED", "filesort": { + "sort_key": "t2.c", "r_loops": 1, "r_total_time_ms": "REPLACED", "r_used_priority_queue": false, @@ -455,18 +459,20 @@ ANALYZE "r_loops": 1, "r_total_time_ms": "REPLACED", "filesort": { + "sort_key": "count(distinct t5.b)", "r_loops": 1, "r_total_time_ms": "REPLACED", "r_limit": 1, "r_used_priority_queue": true, "r_output_rows": 2, - "filesort": { - "r_loops": 1, - "r_total_time_ms": "REPLACED", - "r_used_priority_queue": false, - "r_output_rows": 6, - "r_buffer_size": "REPLACED", - "temporary_table": { + "temporary_table": { + "filesort": { + "sort_key": "t5.a", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "r_used_priority_queue": false, + "r_output_rows": 6, + "r_buffer_size": "REPLACED", "temporary_table": { "table": { "table_name": "t6", @@ -511,26 +517,31 @@ EXPLAIN "query_block": { "select_id": 1, "filesort": { + "sort_key": "count(distinct t5.b)", "temporary_table": { - "function": "buffer", - "table": { - "table_name": "t6", - "access_type": "ALL", - "rows": 5, - "filtered": 100, - "attached_condition": "((t6.b > 0) and (t6.a <= 5))" - }, - "block-nl-join": { - "table": { - "table_name": "t5", - "access_type": "ALL", - "rows": 7, - "filtered": 100 - }, - "buffer_type": "flat", - "buffer_size": "256Kb", - "join_type": "BNL", - "attached_condition": "(t5.a = t6.a)" + "filesort": { + "sort_key": "t5.a", + "temporary_table": { + "table": { + "table_name": "t6", + "access_type": "ALL", + "rows": 5, + "filtered": 100, + "attached_condition": "((t6.b > 0) and (t6.a <= 5))" + }, + "block-nl-join": { + "table": { + "table_name": "t5", + "access_type": "ALL", + "rows": 7, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(t5.a = t6.a)" + } + } } } } diff --git a/mysql-test/r/connect2.result b/mysql-test/r/connect2.result index b68f7ae7c43..5430626a533 100644 --- a/mysql-test/r/connect2.result +++ b/mysql-test/r/connect2.result @@ -1,6 +1,7 @@ call mtr.add_suppression("Allocation failed"); SET @old_debug= @@session.debug; set @old_thread_cache_size=@@global.thread_cache_size; +set @@global.thread_cache_size=0; connect con1,localhost,root,,test,,; select 1; 1 diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index df641156e61..a9c13f3f10b 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -746,3 +746,16 @@ with t(f1,f1) as (select * from t1 where b >= 'c') select t1.b from t2,t1 where t1.a = t2.c; ERROR 42S21: Duplicate column name 'f1' drop table t1,t2; +# +# Bug mdev-9937: View used in the specification of with table +# refers to the base table with the same name +# +create table t1 (a int); +insert into t1 values (20), (30), (10); +create view v1 as select * from t1 where a > 10; +with t1 as (select * from v1) select * from t1; +a +20 +30 +drop view v1; +drop table t1; diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result new file mode 100644 index 00000000000..7408bc56e63 --- /dev/null +++ b/mysql-test/r/cte_recursive.result @@ -0,0 +1,289 @@ +create table t1 (a int, b varchar(32)); +insert into t1 values +(4,'aaaa' ), (7,'bb'), (1,'ccc'), (4,'dd'); +insert into t1 values +(3,'eee'), (7,'bb'), (1,'fff'), (4,'ggg'); +with recursive +a1(a,b) as +(select * from t1 where t1.a>3 +union +select * from b1 where b1.a >3 +union +select * from c1 where c1.a>3), +b1(a,b) as +(select * from a1 where a1.b > 'ccc' +union +select * from c1 where c1.b > 'ddd'), +c1(a,b) as +(select * from a1 where a1.a<6 and a1.b< 'zz' +union +select * from b1 where b1.b > 'auu') +select * from c1; +ERROR HY000: No anchors for recursive WITH element 'b1' +drop table t1; +create table folks(id int, name char(32), dob date, father int, mother int); +insert into folks values +(100, 'Vasya', '2000-01-01', 20, 30), +(20, 'Dad', '1970-02-02', 10, 9), +(30, 'Mom', '1975-03-03', 8, 7), +(10, 'Grandpa Bill', '1940-04-05', null, null), +(9, 'Grandma Ann', '1941-10-15', null, null), +(25, 'Uncle Jim', '1968-11-18', 8, 7), +(98, 'Sister Amy', '2001-06-20', 20, 30), +(8, 'Grandma Sally', '1943-08-23', 5, 6), +(6, 'Grandgrandma Martha', '1923-05-17', null, null), +(67, 'Cousin Eddie', '1992-02-28', 25, 27), +(27, 'Auntie Melinda', '1971-03-29', null, null); +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.id, p.name, p.dob, p.father, p.mother +from folks as p, ancestors AS a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestors +as +( +select p.* +from folks as p, ancestors AS a +where p.id = a.father or p.id = a.mother +union +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestors +as +( +select * +from folks +where name = 'Cousin Eddie' + union +select p.* +from folks as p, ancestors as a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id name dob father mother +67 Cousin Eddie 1992-02-28 25 27 +25 Uncle Jim 1968-11-18 8 7 +27 Auntie Melinda 1971-03-29 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' or name='Sister Amy' + union +select p.* +from folks as p, ancestors as a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +98 Sister Amy 2001-06-20 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +prev_gen +as +( +select folks.* +from folks, prev_gen +where folks.id=prev_gen.father or folks.id=prev_gen.mother +union +select * +from folks +where name='Vasya' +), +ancestors +as +( +select * +from folks +where name='Vasya' + union +select * +from ancestors +union +select * +from prev_gen +) +select ancestors.name, ancestors.dob from ancestors; +name dob +Vasya 2000-01-01 +Dad 1970-02-02 +Mom 1975-03-03 +Grandpa Bill 1940-04-05 +Grandma Ann 1941-10-15 +Grandma Sally 1943-08-23 +Grandgrandma Martha 1923-05-17 +with recursive +descendants +as +( +select * +from folks +where name = 'Grandpa Bill' + union +select folks.* +from folks, descendants as d +where d.id=folks.father or d.id=folks.mother +) +select * from descendants; +id name dob father mother +10 Grandpa Bill 1940-04-05 NULL NULL +20 Dad 1970-02-02 10 9 +100 Vasya 2000-01-01 20 30 +98 Sister Amy 2001-06-20 20 30 +with recursive +descendants +as +( +select * +from folks +where name = 'Grandma Sally' + union +select folks.* +from folks, descendants as d +where d.id=folks.father or d.id=folks.mother +) +select * from descendants; +id name dob father mother +8 Grandma Sally 1943-08-23 5 6 +30 Mom 1975-03-03 8 7 +25 Uncle Jim 1968-11-18 8 7 +100 Vasya 2000-01-01 20 30 +98 Sister Amy 2001-06-20 20 30 +67 Cousin Eddie 1992-02-28 25 27 +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.* +from folks as p, ancestors AS a +where p.id = a.father OR p.id = a.mother +) +select * +from ancestors t1, ancestors t2 +where exists (select * from ancestors a +where a.father=t1.id AND a.mother=t2.id); +id name dob father mother id name dob father mother +20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL +with +ancestor_couples(husband, h_dob, wife, w_dob) +as +( +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' + union +select p.* +from folks as p, ancestors AS a +where p.id = a.father OR p.id = a.mother +) +select t1.name, t1.dob, t2.name, t2.dob +from ancestors t1, ancestors t2 +where exists (select * from ancestors a +where a.father=t1.id AND a.mother=t2.id) +) +select * from ancestor_couples; +husband h_dob wife w_dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.* +from folks as p, ancestors AS a +where p.id = a.father +union +select p.* +from folks as p, ancestors AS a +where p.id = a.mother +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +9 Grandma Ann 1941-10-15 NULL NULL +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, +w_id, w_name, w_dob, w_father, w_mother) +as +( +select h.*, w.* +from folks h, folks w, coupled_ancestors a +where a.father = h.id AND a.mother = w.id +union +select h.*, w.* +from folks v, folks h, folks w +where v.name = 'Vasya' and +(v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select h_id, h_name, h_dob, h_father, h_mother +from ancestor_couples +union +select w_id, w_name, w_dob, w_father, w_mother +from ancestor_couples +) +select h_name, h_dob, w_name, w_dob +from ancestor_couples; +h_name h_dob w_name w_dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +drop table folks; diff --git a/mysql-test/r/ctype_eucjpms.result b/mysql-test/r/ctype_eucjpms.result index f9cb4f1eecc..8d4d8f6d5f9 100644 --- a/mysql-test/r/ctype_eucjpms.result +++ b/mysql-test/r/ctype_eucjpms.result @@ -33913,3 +33913,24 @@ DROP TABLE t1; # # End of 10.1 tests # +# +# End of 10.2 tests +# +# +# MDEV-9842 LOAD DATA INFILE does not work well with a TEXT column when using sjis +# +CREATE TABLE t1 (a TEXT CHARACTER SET eucjpms); +LOAD DATA INFILE '../../std_data/loaddata/mdev9823.ujis.txt' INTO TABLE t1 CHARACTER SET eucjpms IGNORE 4 LINES; +SELECT HEX(a) FROM t1; +HEX(a) +3F +78787831 +3F3F +78787832 +8FA1A1 +78787833 +3F3F +DROP TABLE t1; +# +# End of 10.2 tests +# diff --git a/mysql-test/r/ctype_gbk_export_import.result b/mysql-test/r/ctype_gbk_export_import.result new file mode 100644 index 00000000000..d7d5aa98f02 --- /dev/null +++ b/mysql-test/r/ctype_gbk_export_import.result @@ -0,0 +1,3903 @@ +DROP DATABASE IF EXISTS gbk; +CREATE DATABASE gbk DEFAULT CHARACTER SET gbk; +USE gbk; +CREATE TABLE t1 ( +id INT NOT NULL, +a1 TEXT NOT NULL, +a2 TEXT CHARACTER SET utf8 NOT NULL, +b1 BLOB NOT NULL, +eol TEXT NOT NULL); +CREATE PROCEDURE populate() +BEGIN +TRUNCATE TABLE t1; +INSERT INTO t1 SET id=1, a1=0xEE5C, a2=_gbk 0xEE5C, b1=0xEE5C, eol='$'; +INSERT INTO t1 SET id=2, a1=0xEE5C5C, a2=_gbk 0xEE5C5C, b1=0xEE5C5C, eol='$'; +END| +CREATE FUNCTION cmt(id INT, field_name TEXT, field_value BLOB) +RETURNS TEXT CHARACTER SET utf8 +BEGIN +DECLARE comment TEXT CHARACTER SET utf8; +DECLARE expected_value_01 BLOB; +DECLARE expected_value_02 BLOB; +SET comment= CASE field_name WHEN 'a1' THEN 'TEXT-GBK' WHEN 'a2' THEN 'TEXT-UTF8' WHEN 'b1' THEN 'BLOB' ELSE '' END; +SET expected_value_01= CASE field_name WHEN 'a1' THEN 0xEE5C WHEN 'a2' THEN 0xE9A0AB WHEN 'b1' THEN 0xEE5C ELSE '' END; +SET expected_value_02= CASE field_name WHEN 'a1' THEN 0xEE5C5C WHEN 'a2' THEN 0xE9A0AB5C WHEN 'b1' THEN 0xEE5C5C ELSE '' END; +RETURN IF(CASE id +WHEN 1 THEN expected_value_01 +WHEN 2 THEN expected_value_02 +ELSE '' + END <> field_value, +CONCAT('BAD-', comment), ''); +END| +CREATE FUNCTION display_file(file BLOB) RETURNS TEXT CHARACTER SET utf8 +BEGIN +SET file=REPLACE(file, 0x09, '----'); +SET file=REPLACE(file, 0x0A, '++++'); +RETURN REPLACE(REPLACE(HEX(file), '2D2D2D2D','-'), '2B2B2B2B','|'); +END| +CREATE VIEW v1 AS +SELECT +id, +CONCAT(RPAD(HEX(a1),50,' '), cmt(id, 'a1', a1)) AS a1, +CONCAT(RPAD(HEX(a2),50,' '), cmt(id, 'a2', a2)) AS a2, +CONCAT(RPAD(HEX(b1),50,' '), cmt(id, 'b1', b1)) AS b1, +CONCAT(RPAD(HEX(eol),50,' '), IF(eol<>'$','BAD-EOL','')) AS eol, +'---' AS `---` +FROM t1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL, + `a1` text NOT NULL, + `a2` text CHARACTER SET utf8 NOT NULL, + `b1` blob NOT NULL, + `eol` text NOT NULL +) ENGINE=MyISAM DEFAULT CHARSET=gbk +# +# Dump using SELECT INTO OUTFILE +# +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=auto +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto }{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=auto +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=auto +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=auto +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=auto CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=auto CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=auto +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk }{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=auto +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=auto +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=auto +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=gbk CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-E9A0AB-5CEE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-5CEE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=auto +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 }{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=auto +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET gbk +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET gbk}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=auto +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET utf8 +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET utf8}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=auto +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 } +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +OUTFILE: --default-character-set=utf8 CHARACTER SET binary +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8 CHARACTER SET binary}{--default-character-set=utf8 CHARACTER SET binary} + + +# +# Dump using mysqldump -Tdir +# +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto } +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=auto +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary } +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary } + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=binary CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=binary CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=binary CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=binary CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk } +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 } +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9978B3F BAD-TEXT-UTF8 +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C5C +a2 E9978BE78E9509E9A0AB5C0924 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 E9A0AB +b1 EE5C0924 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 EE5C5C0924 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=binary +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-E9A0AB-EE5C5C-24|32-EE5C5C5C-E9A0AB5C5C-EE5C5C5C5C-24| +id 1 +a1 3F09E9A03F BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09E9A0AB5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 24 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=binary}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto } +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=auto +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary } +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary } + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=binary CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=binary CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=binary CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=binary CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk } +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 } +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 EE5C +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F5C BAD-TEXT-GBK +a2 3F5C BAD-TEXT-UTF8 +b1 EE5C +eol 24 +--- --- +id 2 +a1 3F5C5C BAD-TEXT-GBK +a2 3F5C5C BAD-TEXT-UTF8 +b1 EE5C5C +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=gbk +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-EE5C-EE5C-EE5C-24|32-EE5C5C5C-EE5C5C5C-EE5C5C5C-24| +id 1 +a1 3F093F093F0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +id 2 +a1 EE5C09EE5C09EE5C0924 BAD-TEXT-GBK +a2 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=gbk}{--default-character-set=utf8 CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto } +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=auto +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto } + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=auto CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=auto CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=auto CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=auto CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary } +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary } + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=binary CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=binary CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=binary CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=binary CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk } +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk } + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=gbk CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=gbk CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=gbk CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=gbk CHARACTER SET binary} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 } +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 } + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 CHARACTER SET gbk} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=utf8 CHARACTER SET gbk +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9978B3F BAD-TEXT-UTF8 +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C09E9A0AB5C093F5C5C BAD-TEXT-GBK +a2 24 BAD-TEXT-UTF8 +b1 BAD-BLOB +eol BAD-EOL +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 CHARACTER SET gbk} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 CHARACTER SET utf8} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=utf8 CHARACTER SET utf8 +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 EE5C +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 EE5C5C +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 CHARACTER SET utf8} + + +Start of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 CHARACTER SET binary} +Locale: zh_CN.gbk +mysqldump: --default-character-set=utf8 +INFILE: --default-character-set=utf8 CHARACTER SET binary +file 31-E9A0AB-E9A0AB-3F5C5C-24|32-E9A0AB5C5C-E9A0AB5C5C-3F5C5C5C5C-24| +id 1 +a1 E9A03F BAD-TEXT-GBK +a2 E9A0AB +b1 3F5C BAD-BLOB +eol 24 +--- --- +id 2 +a1 E9A0AB5C BAD-TEXT-GBK +a2 E9A0AB5C +b1 3F5C5C BAD-BLOB +eol 24 +--- --- +End of {zh_CN.gbk}{--default-character-set=utf8}{--default-character-set=utf8 CHARACTER SET binary} + + +DROP DATABASE gbk; +USE test; diff --git a/mysql-test/r/ctype_ujis.result b/mysql-test/r/ctype_ujis.result index 61541ec7678..5eb9a3e1db5 100644 --- a/mysql-test/r/ctype_ujis.result +++ b/mysql-test/r/ctype_ujis.result @@ -26218,3 +26218,24 @@ DROP TABLE t1; # # End of 10.1 tests # +# +# End of 10.2 tests +# +# +# MDEV-9842 LOAD DATA INFILE does not work well with a TEXT column when using sjis +# +CREATE TABLE t1 (a TEXT CHARACTER SET ujis); +LOAD DATA INFILE '../../std_data/loaddata/mdev9823.ujis.txt' INTO TABLE t1 CHARACTER SET ujis IGNORE 4 LINES; +SELECT HEX(a) FROM t1; +HEX(a) +3F +78787831 +3F3F +78787832 +8FA1A1 +78787833 +3F3F +DROP TABLE t1; +# +# End of 10.2 tests +# diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index f52e08a676f..48a6eb02526 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -10426,5 +10426,38 @@ b c DROP TABLE t1; # +# MDEV-9842 LOAD DATA INFILE does not work well with a TEXT column when using sjis +# +CREATE TABLE t1 (a TEXT CHARACTER SET utf8); +LOAD DATA INFILE '../../std_data/loaddata/mdev9823.utf8mb4.txt' INTO TABLE t1 CHARACTER SET utf8 IGNORE 4 LINES; +Warnings: +Warning 1366 Incorrect string value: '\xD0' for column 'a' at row 1 +Warning 1366 Incorrect string value: '\xE1\x80' for column 'a' at row 3 +Warning 1366 Incorrect string value: '\xF0\x9F\x98' for column 'a' at row 5 +Warning 1366 Incorrect string value: '\xF0\x9F\x98\x8E' for column 'a' at row 7 +Warning 1366 Incorrect string value: '\xF0\x9F\x98' for column 'a' at row 8 +SELECT HEX(a) FROM t1; +HEX(a) +3F +78787831 +3F3F +78787832 +3F3F3F +78787833 +3F3F3F3F +3F3F3F +DROP TABLE t1; +# +# MDEV-9874 LOAD XML INFILE does not handle well broken multi-byte characters +# +CREATE TABLE t1 (a TEXT CHARACTER SET utf8); +LOAD XML INFILE '../../std_data/loaddata/mdev9874.xml' INTO TABLE t1 CHARACTER SET utf8 ROWS IDENTIFIED BY ''; +Warnings: +Warning 1366 Incorrect string value: '\xD0' for column 'a' at row 1 +SELECT HEX(a) FROM t1; +HEX(a) +613F +DROP TABLE t1; +# # End of 10.2 tests # diff --git a/mysql-test/r/ctype_utf8mb4.result b/mysql-test/r/ctype_utf8mb4.result index 10d77ae1502..558aba9c466 100644 --- a/mysql-test/r/ctype_utf8mb4.result +++ b/mysql-test/r/ctype_utf8mb4.result @@ -3398,3 +3398,30 @@ DROP FUNCTION f1; # # End of 10.1 tests # +# +# End of 10.2 tests +# +# +# MDEV-9842 LOAD DATA INFILE does not work well with a TEXT column when using sjis +# +CREATE TABLE t1 (a TEXT CHARACTER SET utf8mb4); +LOAD DATA INFILE '../../std_data/loaddata/mdev9823.utf8mb4.txt' INTO TABLE t1 CHARACTER SET utf8mb4 IGNORE 4 LINES; +Warnings: +Warning 1366 Incorrect string value: '\xD0' for column 'a' at row 1 +Warning 1366 Incorrect string value: '\xE1\x80' for column 'a' at row 3 +Warning 1366 Incorrect string value: '\xF0\x9F\x98' for column 'a' at row 5 +Warning 1366 Incorrect string value: '\xF0\x9F\x98' for column 'a' at row 8 +SELECT HEX(a) FROM t1; +HEX(a) +3F +78787831 +3F3F +78787832 +3F3F3F +78787833 +F09F988E +3F3F3F +DROP TABLE t1; +# +# End of 10.2 tests +# diff --git a/mysql-test/r/delayed.result b/mysql-test/r/delayed.result index 80b99115055..d54fa40f2da 100644 --- a/mysql-test/r/delayed.result +++ b/mysql-test/r/delayed.result @@ -476,3 +476,37 @@ connection con1; disconnect con1; connection default; drop tables tm, t1, t2; +# +# MDEV-9621 INSERT DELAYED fails on insert for tables with many columns +# +CREATE TABLE t1 ( +a int,b int,c int,d int,e int,f int,g int,h int,i int,j int,k int,l int,m int,n int,o int,p int,q int,r int,s int,t int,u int,v int,x int,y int,z int +) ENGINE=MyISAM; +INSERT DELAYED INTO t1 (a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,x,y,z) +values (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1); +INSERT DELAYED INTO t1 (a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,x,y,z) +values (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1); +drop table t1; +# +# INSERT DELAYED hangs if table was crashed +# +create table t1 (a int, b int) engine=myisam; +insert into t1 values (1,1); +SET debug_dbug="d,crash_shutdown"; +shutdown; +ERROR HY000: Lost connection to MySQL server during query +call mtr.add_suppression(" marked as crashed and should be repaired"); +call mtr.add_suppression("Checking table"); +insert delayed into t1 values (2,2); +Warnings: +Error 145 Table './test/t1' is marked as crashed and should be repaired +Error 1194 Table 't1' is marked as crashed and should be repaired +Error 1034 1 client is using or hasn't closed the table properly +insert delayed into t1 values (3,3); +flush tables t1; +select * from t1; +a b +1 1 +2 2 +3 3 +drop table t1; diff --git a/mysql-test/r/derived_opt.result b/mysql-test/r/derived_opt.result index 04a76c2cbc8..6e4ea1b5d36 100644 --- a/mysql-test/r/derived_opt.result +++ b/mysql-test/r/derived_opt.result @@ -231,8 +231,8 @@ CREATE VIEW v1 AS SELECT a, MIN(b) AS b FROM t2 GROUP BY a; EXPLAIN SELECT * FROM v1, t1 WHERE v1.b=t1.a ORDER BY v1.a; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 system NULL NULL NULL NULL 1 Using filesort -1 PRIMARY ref key0 key0 5 const 1 Using where +1 PRIMARY t1 system NULL NULL NULL NULL 1 +1 PRIMARY ref key0 key0 5 const 1 Using where; Using filesort 2 DERIVED t2 ALL NULL NULL NULL NULL 10 Using temporary; Using filesort SELECT * FROM v1, t1 WHERE v1.b=t1.a ORDER BY v1.a; a b a diff --git a/mysql-test/r/distinct.result b/mysql-test/r/distinct.result index d6e5a69e217..e2a7c462efd 100644 --- a/mysql-test/r/distinct.result +++ b/mysql-test/r/distinct.result @@ -175,7 +175,7 @@ explain SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 4 Using where; Using temporary 1 SIMPLE t3 ref a a 5 test.t1.b 2 Using index -1 SIMPLE t2 index a a 4 NULL 5 Using where; Using index; Distinct; Using join buffer (flat, BNL join) +1 SIMPLE t2 index a a 4 NULL 5 Using where; Using index; Using join buffer (flat, BNL join) SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a; a 1 @@ -302,11 +302,11 @@ WHERE AND ((t1.id=j_lj_t3.id AND t3_lj.id IS NULL) OR (t1.id=t3.id AND t3.idx=2)); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index id id 4 NULL 2 Using index; Using temporary -1 SIMPLE t2 index id id 8 NULL 1 Using index; Distinct; Using join buffer (flat, BNL join) -1 SIMPLE t3 index id id 8 NULL 1 Using index; Distinct; Using join buffer (flat, BNL join) -1 SIMPLE j_lj_t2 index id id 4 NULL 2 Using where; Using index; Distinct; Using join buffer (flat, BNL join) -1 SIMPLE t2_lj ref id id 4 test.j_lj_t2.id 1 Using where; Using index; Distinct -1 SIMPLE j_lj_t3 index id id 4 NULL 2 Using where; Using index; Distinct; Using join buffer (flat, BNL join) +1 SIMPLE t2 index id id 8 NULL 1 Using index; Using join buffer (flat, BNL join) +1 SIMPLE t3 index id id 8 NULL 1 Using index; Using join buffer (flat, BNL join) +1 SIMPLE j_lj_t2 index id id 4 NULL 2 Using where; Using index; Using join buffer (flat, BNL join) +1 SIMPLE t2_lj ref id id 4 test.j_lj_t2.id 1 Using where; Using index +1 SIMPLE j_lj_t3 index id id 4 NULL 2 Using where; Using index; Using join buffer (flat, BNL join) 1 SIMPLE t3_lj ref id id 4 test.j_lj_t3.id 1 Using where; Using index; Distinct SELECT DISTINCT t1.id @@ -518,7 +518,7 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1_1 ALL NULL NULL NULL NULL 3 Using temporary -1 SIMPLE t1_2 index NULL PRIMARY 4 NULL 3 Using index; Distinct; Using join buffer (flat, BNL join) +1 SIMPLE t1_2 index NULL PRIMARY 4 NULL 3 Using index; Using join buffer (flat, BNL join) EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2 WHERE t1_1.a = t1_2.a; id select_type table type possible_keys key key_len ref rows Extra @@ -916,8 +916,8 @@ SELECT STRAIGHT_JOIN DISTINCT t1.id FROM t1, v1, t2 WHERE v1.id = t2.i AND t1.i1 = v1.i1 AND t2.i != 3; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 96 100.00 Using where; Using temporary -1 PRIMARY ref key0 key0 5 test.t1.i1 9 100.00 Using where; Distinct -1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Distinct; Using join buffer (flat, BNL join) +1 PRIMARY ref key0 key0 5 test.t1.i1 9 100.00 Using where +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (flat, BNL join) 2 DERIVED t1 ALL NULL NULL NULL NULL 96 100.00 Warnings: Note 1003 select straight_join distinct `test`.`t1`.`id` AS `id` from `test`.`t1` join `test`.`v1` join `test`.`t2` where ((`test`.`t2`.`i` = `v1`.`id`) and (`v1`.`i1` = `test`.`t1`.`i1`) and (`v1`.`id` <> 3)) diff --git a/mysql-test/r/explain_json.result b/mysql-test/r/explain_json.result index 46d586ac29d..af5d1b800aa 100644 --- a/mysql-test/r/explain_json.result +++ b/mysql-test/r/explain_json.result @@ -486,8 +486,8 @@ EXPLAIN "query_block": { "select_id": 2, "filesort": { + "sort_key": "t1.a", "temporary_table": { - "function": "buffer", "table": { "table_name": "t1", "access_type": "ALL", @@ -530,8 +530,8 @@ EXPLAIN "query_block": { "select_id": 2, "filesort": { + "sort_key": "t1.a", "temporary_table": { - "function": "buffer", "table": { "table_name": "t1", "access_type": "ALL", @@ -576,7 +576,6 @@ EXPLAIN "query_block": { "select_id": 2, "temporary_table": { - "function": "buffer", "table": { "table_name": "t1", "access_type": "ALL", @@ -1132,8 +1131,8 @@ EXPLAIN "select_id": 1, "having_condition": "(TOP > t2.a)", "filesort": { + "sort_key": "t2.a", "temporary_table": { - "function": "buffer", "table": { "table_name": "t2", "access_type": "ALL", @@ -1151,8 +1150,8 @@ EXPLAIN "query_block": { "select_id": 1, "filesort": { + "sort_key": "t2.a", "temporary_table": { - "function": "buffer", "table": { "table_name": "t2", "access_type": "ALL", @@ -1181,8 +1180,8 @@ EXPLAIN "query_block": { "select_id": 1, "filesort": { + "sort_key": "t2.a", "temporary_table": { - "function": "buffer", "table": { "table_name": "t2", "access_type": "ALL", @@ -1380,7 +1379,6 @@ EXPLAIN "query_block": { "select_id": 1, "temporary_table": { - "function": "buffer", "table": { "table_name": "t1", "access_type": "ALL", diff --git a/mysql-test/r/func_analyse.result b/mysql-test/r/func_analyse.result index 2c300559a32..bc8ec445e73 100644 --- a/mysql-test/r/func_analyse.result +++ b/mysql-test/r/func_analyse.result @@ -19,7 +19,7 @@ test.t1.empty_string 0 0 4 0 0.0000 NULL CHAR(0) NOT NULL test.t1.bool N Y 1 1 0 0 1.0000 NULL ENUM('N','Y') NOT NULL test.t1.d 2002-03-03 2002-03-05 10 10 0 0 10.0000 NULL ENUM('2002-03-03','2002-03-04','2002-03-05') NOT NULL create table t2 select * from t1 procedure analyse(); -ERROR HY000: Incorrect usage of PROCEDURE and non-SELECT +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'procedure analyse()' at line 1 drop table t1; EXPLAIN SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(); ERROR HY000: Incorrect usage of PROCEDURE and subquery @@ -120,7 +120,7 @@ CREATE TABLE t1(a INT); INSERT INTO t1 VALUES (1),(2); # should not crash CREATE TABLE t2 SELECT 1 FROM t1, t1 t3 GROUP BY t3.a PROCEDURE ANALYSE(); -ERROR HY000: Incorrect usage of PROCEDURE and non-SELECT +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE()' at line 1 DROP TABLE t1; End of 5.0 tests # @@ -149,3 +149,25 @@ Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_ test.t2.f2 1 1 1 1 0 0 1.0000 0.0000 ENUM('1') NOT NULL DROP TABLE t1, t2; End of 5.1 tests +# +# Start of 10.2 tests +# +(SELECT 1 FROM DUAL PROCEDURE ANALYSE()); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +1 1 1 1 1 0 0 1.0000 0.0000 ENUM('1') NOT NULL +((SELECT 1 FROM DUAL PROCEDURE ANALYSE())); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +1 1 1 1 1 0 0 1.0000 0.0000 ENUM('1') NOT NULL +SELECT * FROM t1 UNION SELECT * FROM t1 PROCEDURE analyse(); +ERROR HY000: Incorrect usage of PROCEDURE and subquery +# +# MDEV-10030 sql_yacc.yy: Split table_expression and remove PROCEDURE from create_select, select_paren_derived, select_derived2, query_specification +# +SELECT * FROM (SELECT * FROM t1 PROCEDURE ANALYSE()); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE())' at line 1 +SELECT * FROM t1 NATURAL JOIN (SELECT * FROM t2 PROCEDURE ANALYSE()); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE())' at line 1 +SELECT (SELECT 1 FROM t1 PROCEDURE ANALYSE()) FROM t2; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE()) FROM t2' at line 1 +SELECT ((SELECT 1 FROM t1 PROCEDURE ANALYSE())) FROM t2; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE())) FROM t2' at line 1 diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 422e7bd25c8..74247ae1b8e 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -1519,7 +1519,7 @@ SELECT MAX(pk) as max, i FROM t1 ORDER BY max; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using temporary +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 # Only 11 is correct for collumn i in this result SELECT MAX(pk) as max, i diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index 07773960e5a..f27ae67adff 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -1346,12 +1346,43 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR JOIN (PRIMARY,i2); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 144 +# +# For this explain, the query plan is weird: if we are using +# the primary key for reasons other than doing grouping, can't +# GROUP BY code take advantage of this? Well, currently it doesnt: EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR GROUP BY (PRIMARY,i2) GROUP BY a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL PRIMARY 4 NULL 144 Using index +1 SIMPLE t1 index NULL PRIMARY 4 NULL 144 Using index; Using filesort +# Here's a proof it is really doing sorting: +flush status; +SELECT a FROM t1 IGNORE INDEX FOR GROUP BY (PRIMARY,i2) GROUP BY a; +show status like 'Sort_%'; +Variable_name Value +Sort_merge_passes 0 +Sort_priority_queue_sorts 0 +Sort_range 0 +Sort_rows 144 +Sort_scan 1 +# Proof ends. +# +# For this explain, the query plan is weird: if we are using +# the primary key for reasons other than doing sorting, can't +# ORDER BY code take advantage of this? Well, currently it doesnt: EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY,i2) ORDER BY a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL PRIMARY 4 NULL 144 Using index +1 SIMPLE t1 index NULL PRIMARY 4 NULL 144 Using index; Using filesort +# Here's a proof it is really doing sorting: +flush status; +SELECT a FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY,i2) ORDER BY a; +show status like 'Sort_%'; +Variable_name Value +Sort_merge_passes 0 +Sort_priority_queue_sorts 0 +Sort_range 0 +Sort_rows 144 +Sort_scan 1 +# Proof ends. +# SELECT a FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY,i2) ORDER BY a; a 1 @@ -2678,3 +2709,17 @@ NULL 100098 100099 drop table t0,t1,t2; +# +# MDEV-9602 crash in st_key::actual_rec_per_key when group by constant +# +create table t1 (a date not null,unique (a)) engine=innodb; +Warnings: +Warning 1286 Unknown storage engine 'innodb' +Warning 1266 Using storage engine MyISAM for table 't1' +select distinct a from t1 group by 'a'; +a +insert into t1 values("2001-02-02"),("2001-02-03"); +select distinct a from t1 group by 'a'; +a +2001-02-02 +drop table t1; diff --git a/mysql-test/r/having.result b/mysql-test/r/having.result index eda67460205..627edd60141 100644 --- a/mysql-test/r/having.result +++ b/mysql-test/r/having.result @@ -470,10 +470,9 @@ WHERE table2.f1 = 2 GROUP BY table1.f1, table2.f2 HAVING (table2.f2 = 8 AND table1.f1 >= 6); id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE table2 const PRIMARY PRIMARY 4 const 1 100.00 Using filesort -1 SIMPLE table1 ALL NULL NULL NULL NULL 4 100.00 Using where +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING noticed after reading const tables Warnings: -Note 1003 select `test`.`table1`.`f1` AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where (`test`.`table1`.`f3` = 9) group by `test`.`table1`.`f1`,7 having ((7 = 8) and (`test`.`table1`.`f1` >= 6)) +Note 1003 select `test`.`table1`.`f1` AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where (`test`.`table1`.`f3` = 9) group by `test`.`table1`.`f1`,7 having 0 EXPLAIN EXTENDED SELECT table1.f1, table2.f2 FROM t1 AS table1 @@ -482,10 +481,9 @@ WHERE table2.f1 = 2 GROUP BY table1.f1, table2.f2 HAVING (table2.f2 = 8); id select_type table type possible_keys key key_len ref rows filtered Extra -1 SIMPLE table2 const PRIMARY PRIMARY 4 const 1 100.00 Using filesort -1 SIMPLE table1 ALL NULL NULL NULL NULL 4 100.00 Using where +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING noticed after reading const tables Warnings: -Note 1003 select `test`.`table1`.`f1` AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where (`test`.`table1`.`f3` = 9) group by `test`.`table1`.`f1`,7 having (7 = 8) +Note 1003 select `test`.`table1`.`f1` AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where (`test`.`table1`.`f3` = 9) group by `test`.`table1`.`f1`,7 having 0 DROP TABLE t1; # # Bug#52336 Segfault / crash in 5.1 copy_fields (param=0x9872980) at sql_select.cc:15355 diff --git a/mysql-test/r/join_cache.result b/mysql-test/r/join_cache.result index d816b1acf92..c15f96fc2e5 100644 --- a/mysql-test/r/join_cache.result +++ b/mysql-test/r/join_cache.result @@ -5412,9 +5412,9 @@ WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND t2.a BETWEEN 4 and 5 ORDER BY t2.b; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 system NULL NULL NULL NULL 1 Using filesort +1 PRIMARY t1 system NULL NULL NULL NULL 1 1 PRIMARY t3 system NULL NULL NULL NULL 1 -1 PRIMARY t2 range a,c a 5 NULL 1 Using index condition; Using where +1 PRIMARY t2 range a,c a 5 NULL 1 Using index condition; Using where; Using filesort 1 PRIMARY t4 ref c c 5 test.t2.c 2 Using where; Start temporary; End temporary SELECT * FROM t1,t2 WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result index 820d66b9264..ca544f40aa8 100644 --- a/mysql-test/r/join_outer.result +++ b/mysql-test/r/join_outer.result @@ -1289,8 +1289,8 @@ SELECT t1.a, COUNT( t2.b ), SUM( t2.b ), MAX( t2.b ) FROM t1 JOIN t2 USING( a ) GROUP BY t1.a WITH ROLLUP; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system NULL NULL NULL NULL 1 Using filesort -1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using where +1 SIMPLE t1 system NULL NULL NULL NULL 1 +1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using where; Using filesort SELECT t1.a, COUNT( t2.b ), SUM( t2.b ), MAX( t2.b ) FROM t1 JOIN t2 USING( a ) GROUP BY t1.a WITH ROLLUP; @@ -1429,8 +1429,8 @@ EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 Using filesort -1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index +1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 +1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; @@ -1846,8 +1846,8 @@ EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 Using filesort -1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index +1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 +1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; diff --git a/mysql-test/r/join_outer_jcl6.result b/mysql-test/r/join_outer_jcl6.result index 4412f8059dd..3616deaee9c 100644 --- a/mysql-test/r/join_outer_jcl6.result +++ b/mysql-test/r/join_outer_jcl6.result @@ -1300,8 +1300,8 @@ SELECT t1.a, COUNT( t2.b ), SUM( t2.b ), MAX( t2.b ) FROM t1 JOIN t2 USING( a ) GROUP BY t1.a WITH ROLLUP; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system NULL NULL NULL NULL 1 Using filesort -1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using where +1 SIMPLE t1 system NULL NULL NULL NULL 1 +1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using where; Using filesort SELECT t1.a, COUNT( t2.b ), SUM( t2.b ), MAX( t2.b ) FROM t1 JOIN t2 USING( a ) GROUP BY t1.a WITH ROLLUP; @@ -1440,8 +1440,8 @@ EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 Using filesort -1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index +1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 +1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; @@ -1857,8 +1857,8 @@ EXPLAIN SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 Using filesort -1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index +1 SIMPLE t1 system PRIMARY NULL NULL NULL 1 +1 SIMPLE t2 ref PRIMARY PRIMARY 4 const 1 Using where; Using index; Using filesort SELECT * FROM t1 LEFT JOIN t2 ON t2.f1 = t1.f1 WHERE t1.f1 = 4 AND t2.f1 IS NOT NULL AND t2.f2 IS NOT NULL GROUP BY t2.f1, t2.f2; diff --git a/mysql-test/r/limit.result b/mysql-test/r/limit.result index 176a93c7a46..064fa5a18a7 100644 --- a/mysql-test/r/limit.result +++ b/mysql-test/r/limit.result @@ -80,13 +80,13 @@ create table t1 (a int); insert into t1 values (1),(2),(3),(4),(5),(6),(7); explain select count(*) c FROM t1 WHERE a > 0 ORDER BY c LIMIT 3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 7 Using where; Using temporary +1 SIMPLE t1 ALL NULL NULL NULL NULL 7 Using where select count(*) c FROM t1 WHERE a > 0 ORDER BY c LIMIT 3; c 7 explain select sum(a) c FROM t1 WHERE a > 0 ORDER BY c LIMIT 3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL NULL NULL NULL NULL 7 Using where; Using temporary +1 SIMPLE t1 ALL NULL NULL NULL NULL 7 Using where select sum(a) c FROM t1 WHERE a > 0 ORDER BY c LIMIT 3; c 28 diff --git a/mysql-test/r/limit_rows_examined.result b/mysql-test/r/limit_rows_examined.result index 130d17ae270..318039db068 100644 --- a/mysql-test/r/limit_rows_examined.result +++ b/mysql-test/r/limit_rows_examined.result @@ -471,9 +471,11 @@ id select_type table type possible_keys key key_len ref rows Extra select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 0; c1 sum(c2) Warnings: -Warning 1931 Query execution was interrupted. The query examined at least 2 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete. +Warning 1931 Query execution was interrupted. The query examined at least 1 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete. select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 1; -ERROR HY000: Sort aborted: +c1 sum(c2) +Warnings: +Warning 1931 Query execution was interrupted. The query examined at least 3 rows, which exceeds LIMIT ROWS EXAMINED (1). The query result may be incomplete. select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 20; c1 sum(c2) aa 3 @@ -496,9 +498,11 @@ id select_type table type possible_keys key key_len ref rows Extra select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 0; c1 sum(c2) Warnings: -Warning 1931 Query execution was interrupted. The query examined at least 2 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete. +Warning 1931 Query execution was interrupted. The query examined at least 1 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete. select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 1; -ERROR HY000: Sort aborted: +c1 sum(c2) +Warnings: +Warning 1931 Query execution was interrupted. The query examined at least 3 rows, which exceeds LIMIT ROWS EXAMINED (1). The query result may be incomplete. select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 20; c1 sum(c2) aa 3 @@ -627,7 +631,7 @@ CREATE TABLE t4 (a int); INSERT INTO t4 values (1), (2); INSERT INTO t4 SELECT a + 2 FROM t4 LIMIT ROWS EXAMINED 0; Warnings: -Warning 1931 Query execution was interrupted. The query examined at least 2 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete. +Warning 1931 Query execution was interrupted. The query examined at least 1 rows, which exceeds LIMIT ROWS EXAMINED (0). The query result may be incomplete. select * from t4; a 1 @@ -666,7 +670,7 @@ MDEV-115 SET @@optimizer_switch='in_to_exists=on,outer_join_with_cache=on'; CREATE TABLE t1 ( a VARCHAR(3) ) ENGINE=MyISAM; -INSERT INTO t1 VALUES ('USA'); +INSERT INTO t1 VALUES ('USA'),('CAN'); CREATE TABLE t2 ( b INT ); INSERT INTO t2 VALUES (3899),(3914),(3888); CREATE TABLE t3 ( c VARCHAR(33), d INT ); @@ -676,8 +680,8 @@ SELECT DISTINCT a AS field1 FROM t1, t2 WHERE EXISTS (SELECT c FROM t3 LEFT JOIN t2 ON b = d) HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 20; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 system NULL NULL NULL NULL 1 Using temporary -1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Distinct +1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using temporary +1 PRIMARY t2 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join) 2 SUBQUERY t3 ALL NULL NULL NULL NULL 3 2 SUBQUERY t2 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join) SELECT DISTINCT a AS field1 FROM t1, t2 @@ -685,24 +689,27 @@ WHERE EXISTS (SELECT c FROM t3 LEFT JOIN t2 ON b = d) HAVING field1 > 'aaa' LIMIT ROWS EXAMINED 20; field1 Warnings: -Warning 1931 Query execution was interrupted. The query examined at least 23 rows, which exceeds LIMIT ROWS EXAMINED (20). The query result may be incomplete. +Warning 1931 Query execution was interrupted. The query examined at least 21 rows, which exceeds LIMIT ROWS EXAMINED (20). The query result may be incomplete. EXPLAIN SELECT DISTINCT a FROM t1, t2 HAVING a > ' ' LIMIT ROWS EXAMINED 14; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system NULL NULL NULL NULL 1 Using temporary -1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Distinct +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary +1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join) SELECT DISTINCT a FROM t1, t2 HAVING a > ' ' LIMIT ROWS EXAMINED 14; a +USA Warnings: Warning 1931 Query execution was interrupted. The query examined at least 15 rows, which exceeds LIMIT ROWS EXAMINED (14). The query result may be incomplete. SELECT DISTINCT a FROM t1, t2 HAVING a > ' ' LIMIT ROWS EXAMINED 15; a USA +CAN Warnings: Warning 1931 Query execution was interrupted. The query examined at least 16 rows, which exceeds LIMIT ROWS EXAMINED (15). The query result may be incomplete. SELECT DISTINCT a FROM t1, t2 HAVING a > ' ' LIMIT ROWS EXAMINED 16; a USA +CAN Warnings: Warning 1931 Query execution was interrupted. The query examined at least 17 rows, which exceeds LIMIT ROWS EXAMINED (16). The query result may be incomplete. drop table t1,t2,t3; diff --git a/mysql-test/r/mrr_derived_crash_4610.result b/mysql-test/r/mrr_derived_crash_4610.result index 8dcdfda9276..3e38a0d4218 100644 --- a/mysql-test/r/mrr_derived_crash_4610.result +++ b/mysql-test/r/mrr_derived_crash_4610.result @@ -7,8 +7,8 @@ explain select 1 from (select f2, f3, val, count(id) from t4 join t2 left join t3 on 0) top join t1 on f1 = f3 where f3 = 'aaaa' order by val; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 const PRIMARY PRIMARY 12 const 1 Using index; Using filesort -1 PRIMARY ref key0 key0 13 const 0 Using where +1 PRIMARY t1 const PRIMARY PRIMARY 12 const 1 Using index +1 PRIMARY ref key0 key0 13 const 0 Using where; Using filesort 2 DERIVED t4 ALL NULL NULL NULL NULL 1 2 DERIVED t2 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join) 2 DERIVED t3 ALL NULL NULL NULL NULL 1 Using where; Using join buffer (incremental, BNL join) diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 67a63d7a08a..1eef64be640 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -523,11 +523,11 @@ a explain select sql_big_result distinct t1.a from t1,t2 order by t2.a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 system NULL NULL NULL NULL 1 Using temporary -1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index; Distinct +1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index explain select distinct t1.a from t1,t2 order by t2.a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 system NULL NULL NULL NULL 1 Using temporary -1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index; Distinct +1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index drop table t1,t2; create table t1 ( c1 varchar(32), @@ -603,6 +603,10 @@ test.t2 3442722830 test.t3 NULL Warnings: Error 1146 Table 'test.t3' doesn't exist +alter table t1 add d int default 30, add e bigint default 300000, add f decimal(30) default 442; +checksum table t1; +Table Checksum +test.t1 2924214226 drop table t1,t2; create table t1 (a int, key (a)); show keys from t1; diff --git a/mysql-test/r/order_by_optimizer.result b/mysql-test/r/order_by_optimizer.result new file mode 100644 index 00000000000..8f128552a86 --- /dev/null +++ b/mysql-test/r/order_by_optimizer.result @@ -0,0 +1,30 @@ +drop table if exists t0,t1,t2,t3; +# +# MDEV-7885: EXPLAIN shows wrong info for ORDER BY query +# +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int); +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; +create table t2 (key1 int, col1 int, key(key1)); +insert into t2 select a,a from t0; +insert into t2 select 15,15 from t1; +alter table t2 add key2 int, add key(key2); +# This must show "Using filesort": +explain +select * from t2 ignore index for order by (key1) where col1<0 order by key1 limit 10; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 1010 Using where; Using filesort +drop table t0, t1, t2; +# +# MDEV-8857: [Upstream too] EXPLAIN incorrectly shows Distinct for tables using join buffer +# +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (a int, filler char(200), key(a)); +insert into t1 select A.a + B.a* 10, 'AAAAAAAAAAAAAAAAAAAA' from t0 A, t0 B where B.a in (0,1); +explain select distinct A.a from t0 A, t1 B where A.a+B.a> 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE A ALL NULL NULL NULL NULL 10 Using temporary +1 SIMPLE B index NULL a 5 NULL 20 Using where; Using index; Using join buffer (flat, BNL join) +drop table t0, t1; diff --git a/mysql-test/r/parser.result b/mysql-test/r/parser.result index 01cc9d79aaf..18a8e13815c 100644 --- a/mysql-test/r/parser.result +++ b/mysql-test/r/parser.result @@ -686,7 +686,7 @@ FOR UPDATE) a; SELECT 1 FROM (SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 PROCEDURE ANALYSE() FOR UPDATE) a; -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE() FOR UPDATE) a' at line 3 SELECT 1 FROM t1 WHERE EXISTS(SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 FOR UPDATE); @@ -694,7 +694,7 @@ FOR UPDATE); SELECT 1 FROM t1 WHERE EXISTS(SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 PROCEDURE ANALYSE() FOR UPDATE); -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE() FOR UPDATE)' at line 3 SELECT 1 FROM t1 UNION SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 diff --git a/mysql-test/r/select_found.result b/mysql-test/r/select_found.result index 7b38515cf70..8462e19fda8 100644 --- a/mysql-test/r/select_found.result +++ b/mysql-test/r/select_found.result @@ -83,20 +83,20 @@ UNIQUE KEY e_n (email,name) ); EXPLAIN SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 system PRIMARY,kid NULL NULL NULL 0 const row not found -1 SIMPLE t2 index NULL e_n 104 NULL 10 +1 SIMPLE t1 system PRIMARY,kid NULL NULL NULL 0 const row not found; Using temporary +1 SIMPLE t2 ALL NULL NULL NULL NULL 200 SELECT SQL_CALC_FOUND_ROWS DISTINCT email FROM t2 LEFT JOIN t1 ON kid = t2.id WHERE t1.id IS NULL LIMIT 10; email email1 +email2 +email3 +email4 +email5 +email6 +email7 +email8 +email9 email10 -email100 -email101 -email102 -email103 -email104 -email105 -email106 -email107 SELECT FOUND_ROWS(); FOUND_ROWS() 200 diff --git a/mysql-test/r/show_explain.result b/mysql-test/r/show_explain.result index 5a885766f7b..0819ae5ba37 100644 --- a/mysql-test/r/show_explain.result +++ b/mysql-test/r/show_explain.result @@ -1162,7 +1162,7 @@ SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, fie connection default; show explain for $thr2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 system NULL NULL NULL NULL 1 Using filesort +1 SIMPLE t2 system NULL NULL NULL NULL 1 1 SIMPLE t1 range b b 6 NULL 107 Using where; Using index 1 SIMPLE t3 ref PRIMARY PRIMARY 5 test.t1.b 1 Using index Warnings: diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index b306965034b..1b1198958b8 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -79,7 +79,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); 1 1 select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(1))' at line 1 SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1))' at line 1 SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; diff --git a/mysql-test/r/subselect4.result b/mysql-test/r/subselect4.result index 87645d187f2..7202a6238b0 100644 --- a/mysql-test/r/subselect4.result +++ b/mysql-test/r/subselect4.result @@ -19,7 +19,7 @@ SELECT 1 FROM t1 WHERE NOT EXISTS (SELECT 1 FROM t2 WHERE 1 = (SELECT MIN(t2.b) FROM t3)) ORDER BY count(*); id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t1 index NULL a 5 NULL 2 Using where; Using index; Using temporary +1 PRIMARY t1 index NULL a 5 NULL 2 Using where; Using index 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where 3 DEPENDENT SUBQUERY t3 system NULL NULL NULL NULL 0 const row not found # should not crash the next statement @@ -2003,8 +2003,8 @@ FROM t2 JOIN t3 ON t3.f4 = t2.f4 WHERE t3.f1 = 8 GROUP BY 1, 2; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY t3 system NULL NULL NULL NULL 1 Using filesort -1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where +1 PRIMARY t3 system NULL NULL NULL NULL 1 +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where; Using filesort 2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE 3 MATERIALIZED NULL NULL NULL NULL NULL NULL NULL no matching row in const table PREPARE st1 FROM " diff --git a/mysql-test/r/subselect_no_exists_to_in.result b/mysql-test/r/subselect_no_exists_to_in.result index 244f6057a2f..39d1f64410e 100644 --- a/mysql-test/r/subselect_no_exists_to_in.result +++ b/mysql-test/r/subselect_no_exists_to_in.result @@ -83,7 +83,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); 1 1 select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(1))' at line 1 SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1))' at line 1 SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; diff --git a/mysql-test/r/subselect_no_mat.result b/mysql-test/r/subselect_no_mat.result index e095ed16ff9..211d3206a7f 100644 --- a/mysql-test/r/subselect_no_mat.result +++ b/mysql-test/r/subselect_no_mat.result @@ -86,7 +86,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); 1 1 select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(1))' at line 1 SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1))' at line 1 SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; diff --git a/mysql-test/r/subselect_no_opts.result b/mysql-test/r/subselect_no_opts.result index f4714526613..6ae9f8aa066 100644 --- a/mysql-test/r/subselect_no_opts.result +++ b/mysql-test/r/subselect_no_opts.result @@ -82,7 +82,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); 1 1 select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(1))' at line 1 SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1))' at line 1 SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; diff --git a/mysql-test/r/subselect_no_scache.result b/mysql-test/r/subselect_no_scache.result index 5ead5de4cf9..9a3fcd1de3b 100644 --- a/mysql-test/r/subselect_no_scache.result +++ b/mysql-test/r/subselect_no_scache.result @@ -85,7 +85,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); 1 1 select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(1))' at line 1 SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1))' at line 1 SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; diff --git a/mysql-test/r/subselect_no_semijoin.result b/mysql-test/r/subselect_no_semijoin.result index c57c46b76ff..10cf05649e7 100644 --- a/mysql-test/r/subselect_no_semijoin.result +++ b/mysql-test/r/subselect_no_semijoin.result @@ -82,7 +82,7 @@ SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); 1 1 select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -ERROR HY000: Incorrect usage of PROCEDURE and subquery +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(1))' at line 1 SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1))' at line 1 SELECT (SELECT 1) as a FROM (SELECT 1) b WHERE (SELECT a) IS NULL; diff --git a/mysql-test/r/win.result b/mysql-test/r/win.result new file mode 100644 index 00000000000..a6b43788ffe --- /dev/null +++ b/mysql-test/r/win.result @@ -0,0 +1,1961 @@ +drop table if exists t1,t2; +drop view if exists v1; +# ######################################################################## +# # Parser tests +# ######################################################################## +# +# Check what happens when one attempts to use window function without OVER clause +create table t1 (a int, b int); +insert into t1 values (1,1),(2,2); +select row_number() from t1; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'from t1' at line 1 +select rank() from t1; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'from t1' at line 1 +# Attempt to use window function in the WHERE clause +select * from t1 where 1=rank() over (order by a); +ERROR HY000: Window function is allowed only in SELECT list and ORDER BY clause +select * from t1 where 1>row_number() over (partition by b order by a); +ERROR HY000: Window function is allowed only in SELECT list and ORDER BY clause +drop table t1; +# ######################################################################## +# # Functionality tests +# ######################################################################## +# +# Check if ROW_NUMBER() works in basic cases +create table t1(a int, b int, x char(32)); +insert into t1 values (2, 10, 'xx'); +insert into t1 values (2, 10, 'zz'); +insert into t1 values (2, 20, 'yy'); +insert into t1 values (3, 10, 'xxx'); +insert into t1 values (3, 20, 'vvv'); +select a, row_number() over (partition by a order by b) from t1; +a row_number() over (partition by a order by b) +2 1 +2 2 +2 3 +3 1 +3 2 +select a, b, x, row_number() over (partition by a order by x) from t1; +a b x row_number() over (partition by a order by x) +2 10 xx 1 +2 10 zz 3 +2 20 yy 2 +3 10 xxx 2 +3 20 vvv 1 +drop table t1; +create table t1 (pk int primary key, a int, b int); +insert into t1 values +(1, 10, 22), +(2, 11, 21), +(3, 12, 20), +(4, 13, 19), +(5, 14, 18); +select +pk, a, b, +row_number() over (order by a), +row_number() over (order by b) +from t1; +pk a b row_number() over (order by a) row_number() over (order by b) +1 10 22 1 5 +2 11 21 2 4 +3 12 20 3 3 +4 13 19 4 2 +5 14 18 5 1 +drop table t1; +# +# Try RANK() function +# +create table t2 ( +pk int primary key, +a int +); +insert into t2 values +( 1 , 0), +( 2 , 0), +( 3 , 1), +( 4 , 1), +( 8 , 2), +( 5 , 2), +( 6 , 2), +( 7 , 2), +( 9 , 4), +(10 , 4); +select pk, a, rank() over (order by a) from t2; +pk a rank() over (order by a) +1 0 1 +2 0 1 +3 1 3 +4 1 3 +8 2 5 +5 2 5 +6 2 5 +7 2 5 +9 4 9 +10 4 9 +select pk, a, rank() over (order by a desc) from t2; +pk a rank() over (order by a desc) +1 0 9 +2 0 9 +3 1 7 +4 1 7 +8 2 3 +5 2 3 +6 2 3 +7 2 3 +9 4 1 +10 4 1 +drop table t2; +# +# Try Aggregates as window functions. With frames. +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; +pk c +1 1 +2 1 +3 1 +4 1 +5 2 +6 2 +7 2 +8 2 +9 2 +10 2 +select +pk, c, +count(*) over (partition by c order by pk +rows between 2 preceding and 2 following) as CNT +from t1; +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +select +pk, c, +count(*) over (partition by c order by pk +rows between 1 preceding and 2 following) as CNT +from t1; +pk c CNT +1 1 3 +2 1 4 +3 1 3 +4 1 2 +5 2 3 +6 2 4 +7 2 4 +8 2 4 +9 2 3 +10 2 2 +select +pk, c, +count(*) over (partition by c order by pk +rows between 2 preceding and current row) as CNT +from t1; +pk c CNT +1 1 1 +2 1 2 +3 1 3 +4 1 3 +5 2 1 +6 2 2 +7 2 3 +8 2 3 +9 2 3 +10 2 3 +select +pk,c, +count(*) over (partition by c order by pk rows +between 1 following and 2 following) as CNT +from t1; +pk c CNT +1 1 2 +2 1 2 +3 1 1 +4 1 0 +5 2 2 +6 2 2 +7 2 2 +8 2 2 +9 2 1 +10 2 0 +select +pk,c, +count(*) over (partition by c order by pk rows +between 2 preceding and 1 preceding) as CNT +from t1; +pk c CNT +1 1 0 +2 1 1 +3 1 2 +4 1 2 +5 2 0 +6 2 1 +7 2 2 +8 2 2 +9 2 2 +10 2 2 +select +pk, c, +count(*) over (partition by c order by pk +rows between current row and 1 following) as CNT +from t1; +pk c CNT +1 1 2 +2 1 2 +3 1 2 +4 1 1 +5 2 2 +6 2 2 +7 2 2 +8 2 2 +9 2 2 +10 2 1 +# Check ORDER BY DESC +select +pk, c, +count(*) over (partition by c order by pk desc +rows between 2 preceding and 2 following) as CNT +from t1; +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +drop table t0,t1; +# +# Resolution of window names +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; +pk c +1 1 +2 1 +3 1 +4 1 +5 2 +6 2 +7 2 +8 2 +9 2 +10 2 +select +pk, c, +count(*) over w1 as CNT +from t1 +window w1 as (partition by c order by pk +rows between 2 preceding and 2 following); +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +select +pk, c, +count(*) over (w1 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c order by pk); +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +select +pk, c, +count(*) over (w1 order by pk rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c); +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +select +pk, c, +count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w2 as (w1 order by pk); +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +select +pk, c, +count(*) over w3 as CNT +from t1 +window +w1 as (partition by c), +w2 as (w1 order by pk), +w3 as (w2 rows between 2 preceding and 2 following); +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +select +pk, c, +count(*) over w as CNT +from t1 +window w1 as (partition by c order by pk +rows between 2 preceding and 2 following); +ERROR HY000: Window specification with name 'w' is not defined +select +pk, c, +count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w1 as (order by pk); +ERROR HY000: Multiple window specifications with the same name 'w1' +select +pk, c, +count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w2 as (w partition by c order by pk); +ERROR HY000: Window specification with name 'w' is not defined +select +pk, c, +count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w2 as (w1 partition by c order by pk); +ERROR HY000: Window specification referencing another one 'w1' cannot contain partition list +select +pk, c, +count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c order by pk), w2 as (w1 order by pk); +ERROR HY000: Referenced window specification 'w1' already contains order list +select +pk, c, +count(*) over w3 as CNT +from t1 +window +w1 as (partition by c), +w2 as (w1 order by pk rows between 3 preceding and 2 following), +w3 as (w2 rows between 2 preceding and 2 following); +ERROR HY000: Referenced window specification 'w2' cannot contain window frame +select +pk, c, +count(*) over w1 as CNT +from t1 +window w1 as (partition by c order by pk +rows between unbounded following and 2 following); +ERROR HY000: Unacceptable combination of window frame bound specifications +select +pk, c, +count(*) over (w1 rows between 2 preceding and unbounded preceding) as CNT +from t1 +window w1 as (partition by c order by pk); +ERROR HY000: Unacceptable combination of window frame bound specifications +select +pk, c, +count(*) over (w1 order by pk rows between current row and 2 preceding) as CNT +from t1 +window w1 as (partition by c); +ERROR HY000: Unacceptable combination of window frame bound specifications +select +pk, c, +count(*) over (w2 rows between 2 following and current row) as CNT +from t1 +window w1 as (partition by c), w2 as (w1 order by pk); +ERROR HY000: Unacceptable combination of window frame bound specifications +select +pk, c +from t1 where rank() over w1 > 2 +window w1 as (partition by c order by pk); +ERROR HY000: Window function is allowed only in SELECT list and ORDER BY clause +select +c, max(pk) as m +from t1 +group by c + rank() over w1 +window w1 as (order by m); +ERROR HY000: Window function is allowed only in SELECT list and ORDER BY clause +select +c, max(pk) as m, rank() over w1 as r +from t1 +group by c+r +window w1 as (order by m); +ERROR HY000: Window function is allowed only in SELECT list and ORDER BY clause +select +c, max(pk) as m, rank() over w1 as r +from t1 +group by c having c+r > 3 +window w1 as (order by m); +ERROR HY000: Window function is allowed only in SELECT list and ORDER BY clause +select +c, max(pk) as m, rank() over w1 as r, +rank() over (partition by r+1 order by m) +from t1 +group by c +window w1 as (order by m); +ERROR HY000: Window function is not allowed in window specification +select +c, max(pk) as m, rank() over w1 as r, +rank() over (partition by m order by r) +from t1 +group by c +window w1 as (order by m); +ERROR HY000: Window function is not allowed in window specification +select +c, max(pk) as m, rank() over w1 as r, dense_rank() over w2 as dr +from t1 +group by c +window w1 as (order by m), w2 as (partition by r order by m); +ERROR HY000: Window function is not allowed in window specification +select +pk, c, +row_number() over (partition by c order by pk +range between unbounded preceding and current row) as r +from t1; +ERROR HY000: Window frame is not allowed with 'row_number' +select +pk, c, +rank() over w1 as r +from t1 +window w1 as (partition by c order by pk +rows between 2 preceding and 2 following); +ERROR HY000: Window frame is not allowed with 'rank' +select +pk, c, +dense_rank() over (partition by c order by pk +rows between 1 preceding and 1 following) as r +from t1; +ERROR HY000: Window frame is not allowed with 'dense_rank' +select +pk, c, +rank() over w1 as r +from t1 +window w1 as (partition by c); +ERROR HY000: No order list in window specification for 'rank' +select +pk, c, +dense_rank() over (partition by c) as r +from t1; +ERROR HY000: No order list in window specification for 'dense_rank' +drop table t0,t1; +# +# MDEV-9634: Window function produces incorrect value +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (part_id int, pk int, a int); +insert into t2 select +if(a<5, 0, 1), a, if(a<5, NULL, 1) from t0; +select * from t2; +part_id pk a +0 0 NULL +0 1 NULL +0 2 NULL +0 3 NULL +0 4 NULL +1 5 1 +1 6 1 +1 7 1 +1 8 1 +1 9 1 +select +part_id, pk, a, +count(a) over (partition by part_id order by pk +rows between 1 preceding and 1 following) as CNT +from t2; +part_id pk a CNT +0 0 NULL 0 +0 1 NULL 0 +0 2 NULL 0 +0 3 NULL 0 +0 4 NULL 0 +1 5 1 2 +1 6 1 3 +1 7 1 3 +1 8 1 3 +1 9 1 2 +drop table t0, t2; +# +# RANGE-type bounds +# +create table t3 ( +pk int, +val int +); +insert into t3 values +(0, 1), +(1, 1), +(2, 1), +(3, 2), +(4, 2), +(5, 2), +(6, 2); +select +pk, +val, +count(val) over (order by val +range between current row and +current row) +as CNT +from t3; +pk val CNT +0 1 3 +1 1 3 +2 1 3 +3 2 4 +4 2 4 +5 2 4 +6 2 4 +insert into t3 values +(7, 3), +(8, 3); +select +pk, +val, +count(val) over (order by val +range between current row and +current row) +as CNT +from t3; +pk val CNT +0 1 3 +1 1 3 +2 1 3 +3 2 4 +4 2 4 +5 2 4 +6 2 4 +7 3 2 +8 3 2 +drop table t3; +# Now, check with PARTITION BY +create table t4 ( +part_id int, +pk int, +val int +); +insert into t4 values +(1234, 100, 1), +(1234, 101, 1), +(1234, 102, 1), +(1234, 103, 2), +(1234, 104, 2), +(1234, 105, 2), +(1234, 106, 2), +(1234, 107, 3), +(1234, 108, 3), +(5678, 200, 1), +(5678, 201, 1), +(5678, 202, 1), +(5678, 203, 2), +(5678, 204, 2), +(5678, 205, 2), +(5678, 206, 2), +(5678, 207, 3), +(5678, 208, 3); +select +part_id, +pk, +val, +count(val) over (partition by part_id +order by val +range between current row and +current row) +as CNT +from t4; +part_id pk val CNT +1234 100 1 3 +1234 101 1 3 +1234 102 1 3 +1234 103 2 4 +1234 104 2 4 +1234 105 2 4 +1234 106 2 4 +1234 107 3 2 +1234 108 3 2 +5678 200 1 3 +5678 201 1 3 +5678 202 1 3 +5678 203 2 4 +5678 204 2 4 +5678 205 2 4 +5678 206 2 4 +5678 207 3 2 +5678 208 3 2 +# +# Try RANGE UNBOUNDED PRECEDING | FOLLOWING +# +select +part_id, +pk, +val, +count(val) over (partition by part_id +order by val +range between unbounded preceding and +current row) +as CNT +from t4; +part_id pk val CNT +1234 100 1 3 +1234 101 1 3 +1234 102 1 3 +1234 103 2 7 +1234 104 2 7 +1234 105 2 7 +1234 106 2 7 +1234 107 3 9 +1234 108 3 9 +5678 200 1 3 +5678 201 1 3 +5678 202 1 3 +5678 203 2 7 +5678 204 2 7 +5678 205 2 7 +5678 206 2 7 +5678 207 3 9 +5678 208 3 9 +select +part_id, +pk, +val, +count(val) over (partition by part_id +order by val +range between current row and +unbounded following) +as CNT +from t4; +part_id pk val CNT +1234 100 1 9 +1234 101 1 9 +1234 102 1 9 +1234 103 2 6 +1234 104 2 6 +1234 105 2 6 +1234 106 2 6 +1234 107 3 2 +1234 108 3 2 +5678 200 1 9 +5678 201 1 9 +5678 202 1 9 +5678 203 2 6 +5678 204 2 6 +5678 205 2 6 +5678 206 2 6 +5678 207 3 2 +5678 208 3 2 +select +part_id, +pk, +val, +count(val) over (partition by part_id +order by val +range between unbounded preceding and +unbounded following) +as CNT +from t4; +part_id pk val CNT +1234 100 1 9 +1234 101 1 9 +1234 102 1 9 +1234 103 2 9 +1234 104 2 9 +1234 105 2 9 +1234 106 2 9 +1234 107 3 9 +1234 108 3 9 +5678 200 1 9 +5678 201 1 9 +5678 202 1 9 +5678 203 2 9 +5678 204 2 9 +5678 205 2 9 +5678 206 2 9 +5678 207 3 9 +5678 208 3 9 +drop table t4; +# +# MDEV-9695: Wrong window frame when using RANGE BETWEEN N FOLLOWING AND PRECEDING +# +create table t1 (pk int, a int, b int); +insert into t1 values +( 1 , 0, 1), +( 2 , 0, 2), +( 3 , 1, 4), +( 4 , 1, 8), +( 5 , 2, 32), +( 6 , 2, 64), +( 7 , 2, 128), +( 8 , 2, 16); +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) as bit_or +from t1; +pk a b bit_or +1 0 1 3 +2 0 2 3 +3 1 4 12 +4 1 8 12 +5 2 32 96 +6 2 64 224 +7 2 128 208 +8 2 16 144 +# Extra ROWS n PRECEDING tests +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) as bit_or +from t1; +pk a b bit_or +1 0 1 0 +2 0 2 1 +3 1 4 0 +4 1 8 4 +5 2 32 0 +6 2 64 32 +7 2 128 64 +8 2 16 128 +drop table t1; +create table t2 ( +pk int, +a int, +b int +); +insert into t2 values +( 1, 0, 1), +( 2, 0, 2), +( 3, 0, 4), +( 4, 0, 8), +( 5, 1, 16), +( 6, 1, 32), +( 7, 1, 64), +( 8, 1, 128), +( 9, 2, 256), +(10, 2, 512), +(11, 2, 1024), +(12, 2, 2048); +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) as bit_or +from t2; +pk a b bit_or +1 0 1 0 +2 0 2 1 +3 0 4 2 +4 0 8 4 +5 1 16 0 +6 1 32 16 +7 1 64 32 +8 1 128 64 +9 2 256 0 +10 2 512 256 +11 2 1024 512 +12 2 2048 1024 +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 2 PRECEDING AND 2 PRECEDING) as bit_or +from t2; +pk a b bit_or +1 0 1 0 +2 0 2 0 +3 0 4 1 +4 0 8 2 +5 1 16 0 +6 1 32 0 +7 1 64 16 +8 1 128 32 +9 2 256 0 +10 2 512 0 +11 2 1024 256 +12 2 2048 512 +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING) as bit_or +from t2; +pk a b bit_or +1 0 1 0 +2 0 2 1 +3 0 4 3 +4 0 8 6 +5 1 16 0 +6 1 32 16 +7 1 64 48 +8 1 128 96 +9 2 256 0 +10 2 512 256 +11 2 1024 768 +12 2 2048 1536 +# Check CURRENT ROW +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN CURRENT ROW AND CURRENT ROW) as bit_or +from t2; +pk a b bit_or +1 0 1 1 +2 0 2 2 +3 0 4 4 +4 0 8 8 +5 1 16 16 +6 1 32 32 +7 1 64 64 +8 1 128 128 +9 2 256 256 +10 2 512 512 +11 2 1024 1024 +12 2 2048 2048 +drop table t2; +# +# Try RANGE PRECEDING|FOLLWING n +# +create table t1 ( +part_id int, +pk int, +a int +); +insert into t1 values +(10, 1, 1), +(10, 2, 2), +(10, 3, 4), +(10, 4, 8), +(10, 5,26), +(10, 6,27), +(10, 7,40), +(10, 8,71), +(10, 9,72); +select +pk, a, +count(a) over (ORDER BY a +RANGE BETWEEN UNBOUNDED PRECEDING +AND 10 FOLLOWING) as cnt +from t1; +pk a cnt +1 1 4 +2 2 4 +3 4 4 +4 8 4 +5 26 6 +6 27 6 +7 40 7 +8 71 9 +9 72 9 +select +pk, a, +count(a) over (ORDER BY a DESC +RANGE BETWEEN UNBOUNDED PRECEDING +AND 10 FOLLOWING) as cnt +from t1; +pk a cnt +1 1 9 +2 2 9 +3 4 9 +4 8 9 +5 26 5 +6 27 5 +7 40 3 +8 71 2 +9 72 2 +select +pk, a, +count(a) over (ORDER BY a +RANGE BETWEEN UNBOUNDED PRECEDING +AND 1 FOLLOWING) as cnt +from t1; +pk a cnt +1 1 2 +2 2 2 +3 4 3 +4 8 4 +5 26 6 +6 27 6 +7 40 7 +8 71 9 +9 72 9 +select +pk, a, +count(a) over (ORDER BY a +RANGE BETWEEN UNBOUNDED PRECEDING +AND 10 PRECEDING) as cnt +from t1; +pk a cnt +1 1 0 +2 2 0 +3 4 0 +4 8 0 +5 26 4 +6 27 4 +7 40 6 +8 71 7 +9 72 7 +select +pk, a, +count(a) over (ORDER BY a DESC +RANGE BETWEEN UNBOUNDED PRECEDING +AND 10 PRECEDING) as cnt +from t1; +pk a cnt +1 1 5 +2 2 5 +3 4 5 +4 8 5 +5 26 3 +6 27 3 +7 40 2 +8 71 0 +9 72 0 +select +pk, a, +count(a) over (ORDER BY a +RANGE BETWEEN UNBOUNDED PRECEDING +AND 1 PRECEDING) as cnt +from t1; +pk a cnt +1 1 0 +2 2 1 +3 4 2 +4 8 3 +5 26 4 +6 27 5 +7 40 6 +8 71 7 +9 72 8 +select +pk, a, +count(a) over (ORDER BY a +RANGE BETWEEN 1 PRECEDING +AND CURRENT ROW) as cnt +from t1; +pk a cnt +1 1 1 +2 2 2 +3 4 1 +4 8 1 +5 26 1 +6 27 2 +7 40 1 +8 71 1 +9 72 2 +select +pk, a, +count(a) over (ORDER BY a DESC +RANGE BETWEEN 1 PRECEDING +AND CURRENT ROW) as cnt +from t1; +pk a cnt +1 1 2 +2 2 1 +3 4 1 +4 8 1 +5 26 2 +6 27 1 +7 40 1 +8 71 2 +9 72 1 +select +pk, a, +count(a) over (ORDER BY a +RANGE BETWEEN 1 FOLLOWING +AND 3 FOLLOWING) as cnt +from t1; +pk a cnt +1 1 2 +2 2 1 +3 4 0 +4 8 0 +5 26 1 +6 27 0 +7 40 0 +8 71 1 +9 72 0 +# Try CURRENT ROW with[out] DESC +select +pk, a, +count(a) over (ORDER BY a +RANGE BETWEEN CURRENT ROW +AND 1 FOLLOWING) as cnt +from t1; +pk a cnt +1 1 2 +2 2 1 +3 4 1 +4 8 1 +5 26 2 +6 27 1 +7 40 1 +8 71 2 +9 72 1 +select +pk, a, +count(a) over (order by a desc +range between current row +and 1 following) as cnt +from t1; +pk a cnt +1 1 1 +2 2 2 +3 4 1 +4 8 1 +5 26 1 +6 27 2 +7 40 1 +8 71 1 +9 72 2 +insert into t1 select 22, pk, a from t1; +select +part_id, pk, a, +count(a) over (PARTITION BY part_id +ORDER BY a +RANGE BETWEEN UNBOUNDED PRECEDING +AND 10 FOLLOWING) as cnt +from t1; +part_id pk a cnt +10 1 1 4 +10 2 2 4 +10 3 4 4 +10 4 8 4 +10 5 26 6 +10 6 27 6 +10 7 40 7 +10 8 71 9 +10 9 72 9 +22 1 1 4 +22 2 2 4 +22 3 4 4 +22 4 8 4 +22 5 26 6 +22 6 27 6 +22 7 40 7 +22 8 71 9 +22 9 72 9 +select +pk, a, +count(a) over (PARTITION BY part_id +ORDER BY a +RANGE BETWEEN UNBOUNDED PRECEDING +AND 1 PRECEDING) as cnt +from t1; +pk a cnt +1 1 0 +2 2 1 +3 4 2 +4 8 3 +5 26 4 +6 27 5 +7 40 6 +8 71 7 +9 72 8 +1 1 0 +2 2 1 +3 4 2 +4 8 3 +5 26 4 +6 27 5 +7 40 6 +8 71 7 +9 72 8 +drop table t1; +# Try a RANGE frame over non-integer datatype: +create table t1 ( +col1 int, +a decimal(5,3) +); +insert into t1 values (1, 0.45); +insert into t1 values (1, 0.5); +insert into t1 values (1, 0.55); +insert into t1 values (1, 1.21); +insert into t1 values (1, 1.22); +insert into t1 values (1, 3.33); +select +a, +count(col1) over (order by a +range between 0.1 preceding +and 0.1 following) +from t1; +a count(col1) over (order by a +range between 0.1 preceding +and 0.1 following) +0.450 3 +0.500 3 +0.550 3 +1.210 2 +1.220 2 +3.330 1 +drop table t1; +# +# RANGE-type frames and NULL values +# +create table t1 ( +pk int, +a int, +b int +); +insert into t1 values (1, NULL,1); +insert into t1 values (2, NULL,1); +insert into t1 values (3, NULL,1); +insert into t1 values (4, 10 ,1); +insert into t1 values (5, 11 ,1); +insert into t1 values (6, 12 ,1); +insert into t1 values (7, 13 ,1); +insert into t1 values (8, 14 ,1); +select +pk, a, +count(b) over (order by a +range between 2 preceding +and 2 following) as CNT +from t1; +pk a CNT +1 NULL 3 +2 NULL 3 +3 NULL 3 +4 10 3 +5 11 4 +6 12 5 +7 13 4 +8 14 3 +drop table t1; +# +# Try ranges that have bound1 > bound2. The standard actually allows them +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; +pk c +1 1 +2 1 +3 1 +4 1 +5 2 +6 2 +7 2 +8 2 +9 2 +10 2 +select +pk, c, +count(*) over (partition by c +order by pk +rows between 1 preceding +and 2 preceding) +as cnt +from t1; +pk c cnt +1 1 0 +2 1 0 +3 1 0 +4 1 0 +5 2 0 +6 2 0 +7 2 0 +8 2 0 +9 2 0 +10 2 0 +select +pk, c, +count(*) over (partition by c +order by pk +range between 1 preceding +and 2 preceding) +as cnt +from t1; +pk c cnt +1 1 0 +2 1 0 +3 1 0 +4 1 0 +5 2 0 +6 2 0 +7 2 0 +8 2 0 +9 2 0 +10 2 0 +drop table t0, t1; +# +# Error checking for frame bounds +# +create table t1 (a int, b int, c varchar(32)); +insert into t1 values (1,1,'foo'); +insert into t1 values (2,2,'bar'); +select +count(*) over (order by a,b +range between unbounded preceding and current row) +from t1; +ERROR HY000: RANGE-type frame requires ORDER BY clause with single sort key +select +count(*) over (order by c +range between unbounded preceding and current row) +from t1; +ERROR HY000: Numeric datatype is required for RANGE-type frame +select +count(*) over (order by a +range between 'abcd' preceding and current row) +from t1; +ERROR HY000: Numeric datatype is required for RANGE-type frame +select +count(*) over (order by a +range between current row and 'foo' following) +from t1; +ERROR HY000: Numeric datatype is required for RANGE-type frame +# Try range frame with invalid bounds +select +count(*) over (order by a +rows between 0.5 preceding and current row) +from t1; +ERROR HY000: Integer is required for ROWS-type frame +select +count(*) over (order by a +rows between current row and 3.14 following) +from t1; +ERROR HY000: Integer is required for ROWS-type frame +# +# EXCLUDE clause is parsed but not supported +# +select +count(*) over (order by a +rows between 1 preceding and 1 following +exclude current row) +from t1; +ERROR HY000: Frame exclusion is not supported yet +select +count(*) over (order by a +range between 1 preceding and 1 following +exclude ties) +from t1; +ERROR HY000: Frame exclusion is not supported yet +select +count(*) over (order by a +range between 1 preceding and 1 following +exclude group) +from t1; +ERROR HY000: Frame exclusion is not supported yet +select +count(*) over (order by a +rows between 1 preceding and 1 following +exclude no others) +from t1; +count(*) over (order by a +rows between 1 preceding and 1 following +exclude no others) +2 +2 +drop table t1; +# +# Window function in grouping query +# +create table t1 ( +username varchar(32), +amount int +); +insert into t1 values +('user1',1), +('user1',5), +('user1',3), +('user2',10), +('user2',20), +('user2',30); +select +username, +sum(amount) as s, +rank() over (order by s desc) +from t1 +group by username; +username s rank() over (order by s desc) +user1 9 2 +user2 60 1 +drop table t1; +# +# mdev-9719: Window function in prepared statement +# +create table t1(a int, b int, x char(32)); +insert into t1 values (2, 10, 'xx'); +insert into t1 values (2, 10, 'zz'); +insert into t1 values (2, 20, 'yy'); +insert into t1 values (3, 10, 'xxx'); +insert into t1 values (3, 20, 'vvv'); +prepare stmt from 'select a, row_number() over (partition by a order by b) from t1'; +execute stmt; +a row_number() over (partition by a order by b) +2 1 +2 2 +2 3 +3 1 +3 2 +drop table t1; +# +# mdev-9754: Window name resolution in prepared statement +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; +pk c +1 1 +2 1 +3 1 +4 1 +5 2 +6 2 +7 2 +8 2 +9 2 +10 2 +prepare stmt from +'select + pk, c, + count(*) over w1 as CNT +from t1 +window w1 as (partition by c order by pk + rows between 2 preceding and 2 following)'; +execute stmt; +pk c CNT +1 1 3 +2 1 4 +3 1 4 +4 1 3 +5 2 3 +6 2 4 +7 2 5 +8 2 5 +9 2 4 +10 2 3 +drop table t0,t1; +# +# EXPLAIN FORMAT=JSON support for window functions +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +explain format=json select rank() over (order by a) from t0; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t0.a" + } + }, + "temporary_table": { + "table": { + "table_name": "t0", + "access_type": "ALL", + "rows": 10, + "filtered": 100 + } + } + } + } +} +create table t1 (a int, b int, c int); +insert into t1 select a,a,a from t0; +explain format=json +select +a, +rank() over (order by sum(b)) +from t1 +group by a; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "filesort": { + "sort_key": "t1.a", + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "sum(t1.b)" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 10, + "filtered": 100 + } + } + } + } + } +} +explain format=json +select +a, +rank() over (order by sum(b)) +from t1 +group by a +order by null; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "sum(t1.b)" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 10, + "filtered": 100 + } + } + } + } +} +# +# Check how window function works together with GROUP BY and HAVING +# +select b,max(a) as MX, rank() over (order by b) from t1 group by b having MX in (3,5,7); +b MX rank() over (order by b) +3 3 1 +5 5 2 +7 7 3 +explain format=json +select b,max(a) as MX, rank() over (order by b) from t1 group by b having MX in (3,5,7); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "having_condition": "(MX in (3,5,7))", + "filesort": { + "sort_key": "t1.b", + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t1.b" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 10, + "filtered": 100 + } + } + } + } + } +} +drop table t1; +drop table t0; +# +# Building ordering index for window functions +# +create table t1 ( +pk int primary key, +a int, +b int, +c int +); +insert into t1 values +(101 , 0, 10, 1), +(102 , 0, 10, 2), +(103 , 1, 10, 3), +(104 , 1, 10, 4), +(108 , 2, 10, 5), +(105 , 2, 20, 6), +(106 , 2, 20, 7), +(107 , 2, 20, 8), +(109 , 4, 20, 9), +(110 , 4, 20, 10), +(111 , 5, NULL, 11), +(112 , 5, 1, 12), +(113 , 5, NULL, 13), +(114 , 5, NULL, 14), +(115 , 5, NULL, 15), +(116 , 6, 1, NULL), +(117 , 6, 1, 10), +(118 , 6, 1, 1), +(119 , 6, 1, NULL), +(120 , 6, 1, NULL), +(121 , 6, 1, NULL), +(122 , 6, 1, 2), +(123 , 6, 1, 20), +(124 , 6, 1, -10), +(125 , 6, 1, NULL), +(126 , 6, 1, NULL), +(127 , 6, 1, NULL); +select sum(b) over (partition by a order by b,pk +rows between unbounded preceding and current row) as c1, +avg(b) over (w1 rows between 1 preceding and 1 following) as c2, +sum(c) over (w2 rows between 1 preceding and 1 following) as c5, +avg(b) over (w1 rows between 5 preceding and 5 following) as c3, +sum(b) over (w1 rows between 1 preceding and 1 following) as c4 +from t1 +window w1 as (partition by a order by b,pk), +w2 as (partition by b order by c,pk); +c1 c2 c5 c3 c4 +1 1.0000 42 1.0000 1 +1 1.0000 NULL 1.0000 2 +10 1.0000 NULL 1.0000 3 +10 10.0000 3 10.0000 20 +10 10.0000 9 10.0000 20 +10 15.0000 9 17.5000 30 +11 1.0000 NULL 1.0000 3 +12 1.0000 -10 1.0000 2 +2 1.0000 24 1.0000 3 +20 10.0000 12 10.0000 20 +20 10.0000 6 10.0000 20 +20 20.0000 27 20.0000 40 +3 1.0000 -7 1.0000 3 +30 16.6667 13 17.5000 50 +4 1.0000 NULL 1.0000 3 +40 20.0000 19 20.0000 40 +5 1.0000 NULL 1.0000 3 +50 20.0000 21 17.5000 60 +6 1.0000 NULL 1.0000 3 +7 1.0000 13 1.0000 3 +70 20.0000 24 17.5000 40 +8 1.0000 32 1.0000 3 +9 1.0000 -9 1.0000 3 +NULL 1.0000 29 1.0000 1 +NULL NULL 24 1.0000 NULL +NULL NULL 38 1.0000 NULL +NULL NULL 42 1.0000 NULL +drop table t1; +# +# MDEV-9848: Window functions: reuse sorting and/or scanning +# +create table t1 (a int, b int, c int); +insert into t1 values +(1,3,1), +(2,2,1), +(3,1,1); +# Check using counters +flush status; +select +rank() over (partition by c order by a), +rank() over (partition by c order by b) +from t1; +rank() over (partition by c order by a) rank() over (partition by c order by b) +1 3 +2 2 +3 1 +show status like '%sort%'; +Variable_name Value +Sort_merge_passes 0 +Sort_priority_queue_sorts 0 +Sort_range 0 +Sort_rows 6 +Sort_scan 2 +flush status; +select +rank() over (partition by c order by a), +rank() over (partition by c order by a) +from t1; +rank() over (partition by c order by a) rank() over (partition by c order by a) +1 1 +2 2 +3 3 +show status like '%sort%'; +Variable_name Value +Sort_merge_passes 0 +Sort_priority_queue_sorts 0 +Sort_range 0 +Sort_rows 3 +Sort_scan 1 +explain format=json +select +rank() over (partition by c order by a), +rank() over (partition by c order by a) +from t1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t1.c, t1.a" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 3, + "filtered": 100 + } + } + } + } +} +explain format=json +select +rank() over (order by a), +row_number() over (order by a) +from t1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t1.a" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 3, + "filtered": 100 + } + } + } + } +} +explain format=json +select +rank() over (partition by c order by a), +count(*) over (partition by c) +from t1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t1.c, t1.a" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 3, + "filtered": 100 + } + } + } + } +} +explain format=json +select +count(*) over (partition by c), +rank() over (partition by c order by a) +from t1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t1.c, t1.a" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 3, + "filtered": 100 + } + } + } + } +} +drop table t1; +# +# MDEV-9847: Window functions: crash with big_tables=1 +# +create table t1(a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +set @tmp=@@big_tables; +set big_tables=1; +select rank() over (order by a) from t1; +rank() over (order by a) +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +set big_tables=@tmp; +drop table t1; +# +# Check if "ORDER BY window_func" works +# +create table t1 (s1 int, s2 char(5)); +insert into t1 values (1,'a'); +insert into t1 values (null,null); +insert into t1 values (1,null); +insert into t1 values (null,'a'); +insert into t1 values (2,'b'); +insert into t1 values (-1,''); +explain format=json +select *, row_number() over (order by s1, s2) as X from t1 order by X desc; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "filesort": { + "sort_key": "X", + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t1.s1, t1.s2" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 6, + "filtered": 100 + } + } + } + } + } +} +select *, row_number() over (order by s1, s2) as X from t1 order by X desc; +s1 s2 X +2 b 6 +1 a 5 +1 NULL 4 +-1 3 +NULL a 2 +NULL NULL 1 +drop table t1; +# +# Try window functions that are not directly present in the select list +# +create table t1 (a int, b int); +insert into t1 values +(1,3), +(2,2), +(3,1); +select +rank() over (order by a) - +rank() over (order by b) +from +t1; +rank() over (order by a) - +rank() over (order by b) +0 +0 +0 +drop table t1; +# +# MDEV-9894: Assertion `0' failed in Window_func_runner::setup +# return ER_NOT_SUPPORTED_YET for aggregates that are not yet supported +# as window functions. +# +create table t1 (i int); +insert into t1 values (1),(2); +SELECT MAX(i) OVER (PARTITION BY (i)) FROM t1; +ERROR 42000: This version of MariaDB doesn't yet support 'This aggregate as window function' +drop table t1; +# +# Check the 0 in ROWS 0 PRECEDING +# +create table t1 ( +part_id int, +pk int, +a int +); +insert into t1 values (1, 1, 1); +insert into t1 values (1, 2, 2); +insert into t1 values (1, 3, 4); +insert into t1 values (1, 4, 8); +select +pk, a, +sum(a) over (order by pk rows between 0 preceding and current row) +from t1; +pk a sum(a) over (order by pk rows between 0 preceding and current row) +1 1 1 +2 2 2 +3 4 4 +4 8 8 +select +pk, a, +sum(a) over (order by pk rows between 1 preceding and 0 preceding) +from t1; +pk a sum(a) over (order by pk rows between 1 preceding and 0 preceding) +1 1 1 +2 2 3 +3 4 6 +4 8 12 +insert into t1 values (200, 1, 1); +insert into t1 values (200, 2, 2); +insert into t1 values (200, 3, 4); +insert into t1 values (200, 4, 8); +select +part_id, pk, a, +sum(a) over (partition by part_id order by pk rows between 0 preceding and current row) +from t1; +part_id pk a sum(a) over (partition by part_id order by pk rows between 0 preceding and current row) +1 1 1 1 +1 2 2 2 +1 3 4 4 +1 4 8 8 +200 1 1 1 +200 2 2 2 +200 3 4 4 +200 4 8 8 +select +part_id, pk, a, +sum(a) over (partition by part_id order by pk rows between 1 preceding and 0 preceding) +from t1; +part_id pk a sum(a) over (partition by part_id order by pk rows between 1 preceding and 0 preceding) +1 1 1 1 +1 2 2 3 +1 3 4 6 +1 4 8 12 +200 1 1 1 +200 2 2 3 +200 3 4 6 +200 4 8 12 +drop table t1; +# +# MDEV-9780, The "DISTINCT must not bet converted into GROUP BY when +# window functions are present" part +# +create table t1 (part_id int, a int); +insert into t1 values +(100, 1), +(100, 2), +(100, 2), +(100, 3), +(2000, 1), +(2000, 2), +(2000, 3), +(2000, 3), +(2000, 3); +select rank() over (partition by part_id order by a) from t1; +rank() over (partition by part_id order by a) +1 +2 +2 +4 +1 +2 +3 +3 +3 +select distinct rank() over (partition by part_id order by a) from t1; +rank() over (partition by part_id order by a) +1 +2 +4 +3 +explain format=json +select distinct rank() over (partition by part_id order by a) from t1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "duplicate_removal": { + "window_functions_computation": { + "sorts": { + "filesort": { + "sort_key": "t1.part_id, t1.a" + } + }, + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + } + } + } + } + } +} +drop table t1; +# +# MDEV-9893: Window functions with different ORDER BY lists, +# one of these lists containing an expression +# +create table t1 (s1 int, s2 char(5)); +insert into t1 values (1,'a'); +insert into t1 values (null,null); +insert into t1 values (3,null); +insert into t1 values (4,'a'); +insert into t1 values (2,'b'); +insert into t1 values (-1,''); +select +*, +ROW_NUMBER() OVER (order by s1), +CUME_DIST() OVER (order by -s1) +from t1; +s1 s2 ROW_NUMBER() OVER (order by s1) CUME_DIST() OVER (order by -s1) +1 a 3 0.8333333333 +NULL NULL 1 0.1666666667 +3 NULL 5 0.5000000000 +4 a 6 0.3333333333 +2 b 4 0.6666666667 +-1 2 1.0000000000 +drop table t1; +# +# MDEV-9925: Wrong result with aggregate function as a window function +# +create table t1 (i int); +insert into t1 values (1),(2); +select i, sum(i) over (partition by i) from t1; +i sum(i) over (partition by i) +1 1 +2 2 +drop table t1; +# +# MDEV-9922: Assertion `!join->only_const_tables() && fsort' failed in int create_sort_index +# +create view v1 as select 1 as i; +select rank() over (order by i) from v1; +rank() over (order by i) +1 +drop view v1; diff --git a/mysql-test/r/win_avg.result b/mysql-test/r/win_avg.result new file mode 100644 index 00000000000..7e539d933d8 --- /dev/null +++ b/mysql-test/r/win_avg.result @@ -0,0 +1,95 @@ +create table t1 ( +pk int primary key, +a int, +b int, +c real +); +insert into t1 values +(101 , 0, 10, 1.1), +(102 , 0, 10, 2.1), +(103 , 1, 10, 3.1), +(104 , 1, 10, 4.1), +(108 , 2, 10, 5.1), +(105 , 2, 20, 6.1), +(106 , 2, 20, 7.1), +(107 , 2, 20, 8.15), +(109 , 4, 20, 9.15), +(110 , 4, 20, 10.15), +(111 , 5, NULL, 11.15), +(112 , 5, 1, 12.25), +(113 , 5, NULL, 13.35), +(114 , 5, NULL, 14.50), +(115 , 5, NULL, 15.65), +(116 , 6, 1, NULL), +(117 , 6, 1, 10), +(118 , 6, 1, 1.1), +(119 , 6, 1, NULL), +(120 , 6, 1, NULL), +(121 , 6, 1, NULL), +(122 , 6, 1, 2.2), +(123 , 6, 1, 20.1), +(124 , 6, 1, -10.4), +(125 , 6, 1, NULL), +(126 , 6, 1, NULL), +(127 , 6, 1, NULL); +select pk, a, b, avg(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; +pk a b avg(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +101 0 10 10.0000 +102 0 10 10.0000 +103 1 10 10.0000 +104 1 10 10.0000 +105 2 20 20.0000 +106 2 20 20.0000 +107 2 20 16.6667 +108 2 10 15.0000 +109 4 20 20.0000 +110 4 20 20.0000 +111 5 NULL 1.0000 +112 5 1 1.0000 +113 5 NULL 1.0000 +114 5 NULL NULL +115 5 NULL NULL +116 6 1 1.0000 +117 6 1 1.0000 +118 6 1 1.0000 +119 6 1 1.0000 +120 6 1 1.0000 +121 6 1 1.0000 +122 6 1 1.0000 +123 6 1 1.0000 +124 6 1 1.0000 +125 6 1 1.0000 +126 6 1 1.0000 +127 6 1 1.0000 +select pk, a, c, avg(c) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; +pk a c avg(c) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +101 0 1.1 1.6 +102 0 2.1 1.6 +103 1 3.1 3.5999999999999996 +104 1 4.1 3.5999999999999996 +105 2 6.1 6.6 +106 2 7.1 7.116666666666667 +107 2 8.15 6.783333333333334 +108 2 5.1 6.625000000000001 +109 4 9.15 9.65 +110 4 10.15 9.65 +111 5 11.15 11.7 +112 5 12.25 12.25 +113 5 13.35 13.366666666666667 +114 5 14.5 14.5 +115 5 15.65 15.075 +116 6 NULL 10 +117 6 10 5.55 +118 6 1.1 5.55 +119 6 NULL 1.0999999999999996 +120 6 NULL NULL +121 6 NULL 2.1999999999999997 +122 6 2.2 11.15 +123 6 20.1 3.966666666666667 +124 6 -10.4 4.85 +125 6 NULL -10.400000000000002 +126 6 NULL NULL +127 6 NULL NULL +drop table t1; diff --git a/mysql-test/r/win_bit.result b/mysql-test/r/win_bit.result new file mode 100644 index 00000000000..ac7625beb90 --- /dev/null +++ b/mysql-test/r/win_bit.result @@ -0,0 +1,117 @@ +create table t1 ( +pk int primary key, +a int, +b int +); +create table t2 ( +pk int primary key, +a int, +b int +); +insert into t1 values +( 1 , 0, 1), +( 2 , 0, 2), +( 3 , 1, 4), +( 4 , 1, 8), +( 5 , 2, 32), +( 6 , 2, 64), +( 7 , 2, 128), +( 8 , 2, 16); +insert into t2 values +( 1 , 0, 2), +( 2 , 0, 2), +( 3 , 1, 4), +( 4 , 1, 4), +( 5 , 2, 16), +( 6 , 2, 64), +( 7 , 2, 128), +( 8 , 2, 16); +# Test bit functions on only one partition. +select pk, a, b, +bit_or(b) over (order by pk) as bit_or, +bit_and(b) over (order by pk) as bit_and, +bit_xor(b) over (order by pk) as bit_xor +from t1; +pk a b bit_or bit_and bit_xor +1 0 1 1 1 1 +2 0 2 3 0 3 +3 1 4 7 0 7 +4 1 8 15 0 15 +5 2 32 47 0 47 +6 2 64 111 0 111 +7 2 128 239 0 239 +8 2 16 255 0 255 +select pk, a, b, +bit_or(b) over (order by pk) as bit_or, +bit_and(b) over (order by pk) as bit_and, +bit_xor(b) over (order by pk) as bit_xor +from t2; +pk a b bit_or bit_and bit_xor +1 0 2 2 2 2 +2 0 2 2 2 0 +3 1 4 6 0 4 +4 1 4 6 0 0 +5 2 16 22 0 16 +6 2 64 86 0 80 +7 2 128 214 0 208 +8 2 16 214 0 192 +# Test multiple partitions with bit functions. +select pk, a, b, +bit_or(b) over (partition by a order by pk) as bit_or, +bit_and(b) over (partition by a order by pk) as bit_and, +bit_xor(b) over (partition by a order by pk) as bit_xor +from t1; +pk a b bit_or bit_and bit_xor +1 0 1 1 1 1 +2 0 2 3 0 3 +3 1 4 4 4 4 +4 1 8 12 0 12 +5 2 32 32 32 32 +6 2 64 96 0 96 +7 2 128 224 0 224 +8 2 16 240 0 240 +select pk, a, b, +bit_or(b) over (partition by a order by pk) as bit_or, +bit_and(b) over (partition by a order by pk) as bit_and, +bit_xor(b) over (partition by a order by pk) as bit_xor +from t2; +pk a b bit_or bit_and bit_xor +1 0 2 2 2 2 +2 0 2 2 2 0 +3 1 4 4 4 4 +4 1 4 4 4 0 +5 2 16 16 16 16 +6 2 64 80 0 80 +7 2 128 208 0 208 +8 2 16 208 0 192 +# Test remove function for bit functions using a sliding window. +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) as bit_or, +bit_and(b) over (partition by a order by pk) as bit_and, +bit_xor(b) over (partition by a order by pk) as bit_xor +from t1; +pk a b bit_or bit_and bit_xor +1 0 1 3 1 1 +2 0 2 3 0 3 +3 1 4 12 4 4 +4 1 8 12 0 12 +5 2 32 96 32 32 +6 2 64 224 0 96 +7 2 128 208 0 224 +8 2 16 144 0 240 +select pk, a, b, +bit_or(b) over (partition by a order by pk) as bit_or, +bit_and(b) over (partition by a order by pk) as bit_and, +bit_xor(b) over (partition by a order by pk) as bit_xor +from t2; +pk a b bit_or bit_and bit_xor +1 0 2 2 2 2 +2 0 2 2 2 0 +3 1 4 4 4 4 +4 1 4 4 4 0 +5 2 16 16 16 16 +6 2 64 80 0 80 +7 2 128 208 0 208 +8 2 16 208 0 192 +drop table t1; +drop table t2; diff --git a/mysql-test/r/win_ntile.result b/mysql-test/r/win_ntile.result new file mode 100644 index 00000000000..41cb1a594bf --- /dev/null +++ b/mysql-test/r/win_ntile.result @@ -0,0 +1,435 @@ +create table t1 ( +pk int primary key, +a int, +b int +); +insert into t1 values +(11 , 0, 10), +(12 , 0, 10), +(13 , 1, 10), +(14 , 1, 10), +(18 , 2, 10), +(15 , 2, 20), +(16 , 2, 20), +(17 , 2, 20), +(19 , 4, 20), +(20 , 4, 20); +select pk, a, b, ntile(-1) over (order by a) +from t1; +ERROR HY000: Argument of NTILE must be greater than 0 +select pk, a, b, +ntile(0) over (order by a) +from t1; +ERROR HY000: Argument of NTILE must be greater than 0 +select pk, a, b, +ntile(1) over (order by pk) +from t1; +pk a b ntile(1) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 1 +14 1 10 1 +15 2 20 1 +16 2 20 1 +17 2 20 1 +18 2 10 1 +19 4 20 1 +20 4 20 1 +select pk, a, b, +ntile(2) over (order by pk) +from t1; +pk a b ntile(2) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 1 +14 1 10 1 +15 2 20 1 +16 2 20 2 +17 2 20 2 +18 2 10 2 +19 4 20 2 +20 4 20 2 +select pk, a, b, +ntile(3) over (order by pk) +from t1; +pk a b ntile(3) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 1 +14 1 10 1 +15 2 20 2 +16 2 20 2 +17 2 20 2 +18 2 10 3 +19 4 20 3 +20 4 20 3 +select pk, a, b, +ntile(4) over (order by pk) +from t1; +pk a b ntile(4) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 1 +14 1 10 2 +15 2 20 2 +16 2 20 2 +17 2 20 3 +18 2 10 3 +19 4 20 4 +20 4 20 4 +select pk, a, b, +ntile(5) over (order by pk) +from t1; +pk a b ntile(5) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 2 +15 2 20 3 +16 2 20 3 +17 2 20 4 +18 2 10 4 +19 4 20 5 +20 4 20 5 +select pk, a, b, +ntile(6) over (order by pk) +from t1; +pk a b ntile(6) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 2 +15 2 20 3 +16 2 20 3 +17 2 20 4 +18 2 10 4 +19 4 20 5 +20 4 20 6 +select pk, a, b, +ntile(7) over (order by pk) +from t1; +pk a b ntile(7) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 2 +15 2 20 3 +16 2 20 3 +17 2 20 4 +18 2 10 5 +19 4 20 6 +20 4 20 7 +select pk, a, b, +ntile(8) over (order by pk) +from t1; +pk a b ntile(8) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 2 +15 2 20 3 +16 2 20 4 +17 2 20 5 +18 2 10 6 +19 4 20 7 +20 4 20 8 +select pk, a, b, +ntile(9) over (order by pk) +from t1; +pk a b ntile(9) over (order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 3 +15 2 20 4 +16 2 20 5 +17 2 20 6 +18 2 10 7 +19 4 20 8 +20 4 20 9 +select pk, a, b, +ntile(10) over (order by pk) +from t1; +pk a b ntile(10) over (order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +15 2 20 5 +16 2 20 6 +17 2 20 7 +18 2 10 8 +19 4 20 9 +20 4 20 10 +select pk, a, b, +ntile(11) over (order by pk) +from t1; +pk a b ntile(11) over (order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +15 2 20 5 +16 2 20 6 +17 2 20 7 +18 2 10 8 +19 4 20 9 +20 4 20 10 +select pk, a, b, +ntile(20) over (order by pk) +from t1; +pk a b ntile(20) over (order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +15 2 20 5 +16 2 20 6 +17 2 20 7 +18 2 10 8 +19 4 20 9 +20 4 20 10 +select pk, a, b, +ntile(1) over (partition by b order by pk) +from t1; +pk a b ntile(1) over (partition by b order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 1 +14 1 10 1 +18 2 10 1 +15 2 20 1 +16 2 20 1 +17 2 20 1 +19 4 20 1 +20 4 20 1 +select pk, a, b, +ntile(2) over (partition by b order by pk) +from t1; +pk a b ntile(2) over (partition by b order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 1 +14 1 10 2 +18 2 10 2 +15 2 20 1 +16 2 20 1 +17 2 20 1 +19 4 20 2 +20 4 20 2 +select pk, a, b, +ntile(3) over (partition by b order by pk) +from t1; +pk a b ntile(3) over (partition by b order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 2 +18 2 10 3 +15 2 20 1 +16 2 20 1 +17 2 20 2 +19 4 20 2 +20 4 20 3 +select pk, a, b, +ntile(4) over (partition by b order by pk) +from t1; +pk a b ntile(4) over (partition by b order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 3 +18 2 10 4 +15 2 20 1 +16 2 20 1 +17 2 20 2 +19 4 20 3 +20 4 20 4 +select pk, a, b, +ntile(5) over (partition by b order by pk) +from t1; +pk a b ntile(5) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(6) over (partition by b order by pk) +from t1; +pk a b ntile(6) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(7) over (partition by b order by pk) +from t1; +pk a b ntile(7) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(8) over (partition by b order by pk) +from t1; +pk a b ntile(8) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(9) over (partition by b order by pk) +from t1; +pk a b ntile(9) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(10) over (partition by b order by pk) +from t1; +pk a b ntile(10) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(11) over (partition by b order by pk) +from t1; +pk a b ntile(11) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(20) over (partition by b order by pk) +from t1; +pk a b ntile(20) over (partition by b order by pk) +11 0 10 1 +12 0 10 2 +13 1 10 3 +14 1 10 4 +18 2 10 5 +15 2 20 1 +16 2 20 2 +17 2 20 3 +19 4 20 4 +20 4 20 5 +select pk, a, b, +ntile(1 + 3) over (partition by b order by pk) +from t1; +pk a b ntile(1 + 3) over (partition by b order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 3 +18 2 10 4 +15 2 20 1 +16 2 20 1 +17 2 20 2 +19 4 20 3 +20 4 20 4 +select pk, a, b, +ntile((select 4)) over (partition by b order by pk) +from t1; +pk a b ntile((select 4)) over (partition by b order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 2 +14 1 10 3 +18 2 10 4 +15 2 20 1 +16 2 20 1 +17 2 20 2 +19 4 20 3 +20 4 20 4 +select t1.a from t1 where pk = 11; +a +0 +select pk, a, b, +ntile((select a from t1 where pk=11)) over (partition by b order by pk) +from t1; +ERROR HY000: Argument of NTILE must be greater than 0 +select t1.a from t1 where pk = 13; +a +1 +select pk, a, b, +ntile((select a from t1 where pk=13)) over (partition by b order by pk) +from t1; +pk a b ntile((select a from t1 where pk=13)) over (partition by b order by pk) +11 0 10 1 +12 0 10 1 +13 1 10 1 +14 1 10 1 +18 2 10 1 +15 2 20 1 +16 2 20 1 +17 2 20 1 +19 4 20 1 +20 4 20 1 +explain +select pk, a, b, +ntile((select a from t1 where pk=13)) over (partition by b order by pk) +from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 10 Using temporary +2 SUBQUERY t1 const PRIMARY PRIMARY 4 const 1 +select a from t1; +a +0 +0 +1 +1 +2 +2 +2 +2 +4 +4 +select pk, a, b, +ntile((select a from t1)) over (partition by b order by pk) +from t1; +ERROR 21000: Subquery returns more than 1 row +drop table t1; diff --git a/mysql-test/r/win_orderby.result b/mysql-test/r/win_orderby.result new file mode 100644 index 00000000000..bf4a40a4db3 --- /dev/null +++ b/mysql-test/r/win_orderby.result @@ -0,0 +1,26 @@ +drop table if exists t0,t1; +create table t0(a int primary key); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1( +pk int, +a int, +key(pk) +); +insert into t1 +select +A.a + B.a* 10 + C.a * 100, +1 +from t0 A, t0 B, t0 C; +select +pk, +count(a) over (order by pk rows between 2 preceding and 2 following) +from t1 +where pk between 1 and 30 +order by pk desc +limit 4; +pk count(a) over (order by pk rows between 2 preceding and 2 following) +30 3 +29 4 +28 5 +27 5 +drop table t0,t1; diff --git a/mysql-test/r/win_percent_cume.result b/mysql-test/r/win_percent_cume.result new file mode 100644 index 00000000000..d38c95c9ea7 --- /dev/null +++ b/mysql-test/r/win_percent_cume.result @@ -0,0 +1,62 @@ +create table t1 ( +pk int primary key, +a int, +b int +); +insert into t1 values +( 1 , 0, 10), +( 2 , 0, 10), +( 3 , 1, 10), +( 4 , 1, 10), +( 8 , 2, 10), +( 5 , 2, 20), +( 6 , 2, 20), +( 7 , 2, 20), +( 9 , 4, 20), +(10 , 4, 20); +select pk, a, b, +percent_rank() over (order by a), +cume_dist() over (order by a) +from t1; +pk a b percent_rank() over (order by a) cume_dist() over (order by a) +1 0 10 0.0000000000 0.2000000000 +2 0 10 0.0000000000 0.2000000000 +3 1 10 0.2222222222 0.4000000000 +4 1 10 0.2222222222 0.4000000000 +8 2 10 0.4444444444 0.8000000000 +5 2 20 0.4444444444 0.8000000000 +6 2 20 0.4444444444 0.8000000000 +7 2 20 0.4444444444 0.8000000000 +9 4 20 0.8888888889 1.0000000000 +10 4 20 0.8888888889 1.0000000000 +select pk, a, b, +percent_rank() over (order by pk), +cume_dist() over (order by pk) +from t1 order by pk; +pk a b percent_rank() over (order by pk) cume_dist() over (order by pk) +1 0 10 0.0000000000 0.1000000000 +2 0 10 0.1111111111 0.2000000000 +3 1 10 0.2222222222 0.3000000000 +4 1 10 0.3333333333 0.4000000000 +5 2 20 0.4444444444 0.5000000000 +6 2 20 0.5555555556 0.6000000000 +7 2 20 0.6666666667 0.7000000000 +8 2 10 0.7777777778 0.8000000000 +9 4 20 0.8888888889 0.9000000000 +10 4 20 1.0000000000 1.0000000000 +select pk, a, b, +percent_rank() over (partition by a order by a), +cume_dist() over (partition by a order by a) +from t1; +pk a b percent_rank() over (partition by a order by a) cume_dist() over (partition by a order by a) +1 0 10 0.0000000000 1.0000000000 +2 0 10 0.0000000000 1.0000000000 +3 1 10 0.0000000000 1.0000000000 +4 1 10 0.0000000000 1.0000000000 +8 2 10 0.0000000000 1.0000000000 +5 2 20 0.0000000000 1.0000000000 +6 2 20 0.0000000000 1.0000000000 +7 2 20 0.0000000000 1.0000000000 +9 4 20 0.0000000000 1.0000000000 +10 4 20 0.0000000000 1.0000000000 +drop table t1; diff --git a/mysql-test/r/win_rank.result b/mysql-test/r/win_rank.result new file mode 100644 index 00000000000..725683d3869 --- /dev/null +++ b/mysql-test/r/win_rank.result @@ -0,0 +1,104 @@ +# +# Try DENSE_RANK() function +# +create table t1 ( +pk int primary key, +a int, +b int +); +insert into t1 values +( 1 , 0, 10), +( 2 , 0, 10), +( 3 , 1, 10), +( 4 , 1, 10), +( 8 , 2, 10), +( 5 , 2, 20), +( 6 , 2, 20), +( 7 , 2, 20), +( 9 , 4, 20), +(10 , 4, 20); +select pk, a, b, rank() over (order by a) as rank, +dense_rank() over (order by a) as dense_rank +from t1; +pk a b rank dense_rank +1 0 10 1 1 +2 0 10 1 1 +3 1 10 3 2 +4 1 10 3 2 +8 2 10 5 3 +5 2 20 5 3 +6 2 20 5 3 +7 2 20 5 3 +9 4 20 9 4 +10 4 20 9 4 +select pk, a, b, rank() over (partition by b order by a) as rank, +dense_rank() over (partition by b order by a) as dense_rank +from t1; +pk a b rank dense_rank +1 0 10 1 1 +2 0 10 1 1 +3 1 10 3 2 +4 1 10 3 2 +8 2 10 5 3 +5 2 20 1 1 +6 2 20 1 1 +7 2 20 1 1 +9 4 20 4 2 +10 4 20 4 2 +drop table t1; +# +# Test with null values in the table. +# +create table t2 (s1 int, s2 char(5)); +insert into t2 values (1,'a'); +insert into t2 values (null,null); +insert into t2 values (1,null); +insert into t2 values (null,'a'); +insert into t2 values (null,'c'); +insert into t2 values (2,'b'); +insert into t2 values (-1,''); +select *, rank() over (order by s1) as rank, +dense_rank() over (order by s1) as dense_rank +from t2; +s1 s2 rank dense_rank +1 a 5 3 +NULL NULL 1 1 +1 NULL 5 3 +NULL a 1 1 +NULL c 1 1 +2 b 7 4 +-1 4 2 +select *, rank() over (partition by s2 order by s1) as rank, +dense_rank() over (partition by s2 order by s1) as dense_rank +from t2; +s1 s2 rank dense_rank +1 a 2 2 +NULL NULL 1 1 +1 NULL 2 2 +NULL a 1 1 +NULL c 1 1 +2 b 1 1 +-1 1 1 +select *, rank() over (order by s2) as rank, +dense_rank() over (order by s2) as dense_rank +from t2; +s1 s2 rank dense_rank +1 a 4 3 +NULL NULL 1 1 +1 NULL 1 1 +NULL a 4 3 +NULL c 7 5 +2 b 6 4 +-1 3 2 +select *, rank() over (partition by s1 order by s2) as rank, +dense_rank() over (partition by s1 order by s2) as dense_rank +from t2; +s1 s2 rank dense_rank +1 a 2 2 +NULL NULL 1 1 +1 NULL 1 1 +NULL a 2 2 +NULL c 3 3 +2 b 1 1 +-1 1 1 +drop table t2; diff --git a/mysql-test/r/win_sum.result b/mysql-test/r/win_sum.result new file mode 100644 index 00000000000..66a48fe8293 --- /dev/null +++ b/mysql-test/r/win_sum.result @@ -0,0 +1,95 @@ +create table t1 ( +pk int primary key, +a int, +b int, +c real +); +insert into t1 values +(101 , 0, 10, 1.1), +(102 , 0, 10, 2.1), +(103 , 1, 10, 3.1), +(104 , 1, 10, 4.1), +(108 , 2, 10, 5.1), +(105 , 2, 20, 6.1), +(106 , 2, 20, 7.1), +(107 , 2, 20, 8.15), +(109 , 4, 20, 9.15), +(110 , 4, 20, 10.15), +(111 , 5, NULL, 11.15), +(112 , 5, 1, 12.25), +(113 , 5, NULL, 13.35), +(114 , 5, NULL, 14.50), +(115 , 5, NULL, 15.65), +(116 , 6, 1, NULL), +(117 , 6, 1, 10), +(118 , 6, 1, 1.1), +(119 , 6, 1, NULL), +(120 , 6, 1, NULL), +(121 , 6, 1, NULL), +(122 , 6, 1, 2.2), +(123 , 6, 1, 20.1), +(124 , 6, 1, -10.4), +(125 , 6, 1, NULL), +(126 , 6, 1, NULL), +(127 , 6, 1, NULL); +select pk, a, b, sum(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; +pk a b sum(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +101 0 10 20 +102 0 10 20 +103 1 10 20 +104 1 10 20 +105 2 20 40 +106 2 20 60 +107 2 20 50 +108 2 10 30 +109 4 20 40 +110 4 20 40 +111 5 NULL 1 +112 5 1 1 +113 5 NULL 1 +114 5 NULL NULL +115 5 NULL NULL +116 6 1 2 +117 6 1 3 +118 6 1 3 +119 6 1 3 +120 6 1 3 +121 6 1 3 +122 6 1 3 +123 6 1 3 +124 6 1 3 +125 6 1 3 +126 6 1 3 +127 6 1 2 +select pk, a, c, sum(c) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; +pk a c sum(c) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +101 0 1.1 3.2 +102 0 2.1 3.2 +103 1 3.1 7.199999999999999 +104 1 4.1 7.199999999999999 +105 2 6.1 13.2 +106 2 7.1 21.35 +107 2 8.15 20.35 +108 2 5.1 13.250000000000002 +109 4 9.15 19.3 +110 4 10.15 19.3 +111 5 11.15 23.4 +112 5 12.25 36.75 +113 5 13.35 40.1 +114 5 14.5 43.5 +115 5 15.65 30.15 +116 6 NULL 10 +117 6 10 11.1 +118 6 1.1 11.1 +119 6 NULL 1.0999999999999996 +120 6 NULL NULL +121 6 NULL 2.1999999999999997 +122 6 2.2 22.3 +123 6 20.1 11.9 +124 6 -10.4 9.7 +125 6 NULL -10.400000000000002 +126 6 NULL NULL +127 6 NULL NULL +drop table t1; diff --git a/mysql-test/std_data/bad2_master.info b/mysql-test/std_data/bad2_master.info new file mode 100644 index 00000000000..61722562748 --- /dev/null +++ b/mysql-test/std_data/bad2_master.info @@ -0,0 +1,35 @@ +33 +mysql-bin.000001 +4 +127.0.0.1 +root + +3310 +60 +0 + + + + + +0 +1800.000 + +0 + +0 + + + + + + + + + + + + + +using_gtid=1 +=0 diff --git a/mysql-test/std_data/bad3_master.info b/mysql-test/std_data/bad3_master.info new file mode 100644 index 00000000000..6e632cd9a49 --- /dev/null +++ b/mysql-test/std_data/bad3_master.info @@ -0,0 +1,37 @@ +33 +mysql-bin.000001 +4 +127.0.0.1 +root + +3310 +60 +0 + + + + + +0 +1800.000 + +0 + +0 + + + + + + + + + + + + + +using_gtid=1 + + +0 diff --git a/mysql-test/std_data/bad4_master.info b/mysql-test/std_data/bad4_master.info new file mode 100644 index 00000000000..87572efc8a4 --- /dev/null +++ b/mysql-test/std_data/bad4_master.info @@ -0,0 +1,35 @@ +33 +mysql-bin.000001 +4 +127.0.0.1 +root + +3310 +60 +0 + + + + + +0 +1800.000 + +0 + +0 + + + + + + + + + + + + + +using_gtid=1 +d=1 diff --git a/mysql-test/std_data/bad5_master.info b/mysql-test/std_data/bad5_master.info new file mode 100644 index 00000000000..4ea8113250b --- /dev/null +++ b/mysql-test/std_data/bad5_master.info @@ -0,0 +1,35 @@ +33 +mysql-bin.000001 +4 +127.0.0.1 +root + +3310 +60 +0 + + + + + +0 +1800.000 + +0 + +0 + + + + + + + + + + + + + +using_gtid=1 +using_gtid diff --git a/mysql-test/std_data/bad6_master.info b/mysql-test/std_data/bad6_master.info new file mode 100644 index 00000000000..0f48f4871f0 --- /dev/null +++ b/mysql-test/std_data/bad6_master.info @@ -0,0 +1,36 @@ +33 +mysql-bin.000001 +4 +127.0.0.1 +root + +3310 +60 +0 + + + + + +0 +1800.000 + +0 + +0 + + + + + + + + + + + + + +using_gtid=1 +END_MARKER +do_domain_ids=20 Hulubulu!!?! diff --git a/mysql-test/std_data/bad_master.info b/mysql-test/std_data/bad_master.info new file mode 100644 index 00000000000..1541fdf2c61 --- /dev/null +++ b/mysql-test/std_data/bad_master.info @@ -0,0 +1,35 @@ +33 +mysql-bin.000001 +4 +127.0.0.1 +root + +3310 +60 +0 + + + + + +0 +1800.000 + +0 + +0 + + + + + + + + + + + + + +using_gtid=1 + diff --git a/mysql-test/std_data/loaddata/mdev9823.ujis.txt b/mysql-test/std_data/loaddata/mdev9823.ujis.txt new file mode 100644 index 00000000000..5468c999585 --- /dev/null +++ b/mysql-test/std_data/loaddata/mdev9823.ujis.txt @@ -0,0 +1,11 @@ +# This file has incomplete UJIS sequences {8F}, {8FA1}, +# has a valid UJIS sequence {8FA1A1}, +# and has no NL at the end: +# {8F} \n xxx1 {8FA1} \n xxx2 {8FA1A1} \n xxx3 \n {8FA1} EOF + +xxx1 +¡ +xxx2 +¡¡ +xxx3 +¡ \ No newline at end of file diff --git a/mysql-test/std_data/loaddata/mdev9823.utf8mb4.txt b/mysql-test/std_data/loaddata/mdev9823.utf8mb4.txt new file mode 100644 index 00000000000..87739567de1 --- /dev/null +++ b/mysql-test/std_data/loaddata/mdev9823.utf8mb4.txt @@ -0,0 +1,12 @@ +# This file has incomplete utf8mb4 sequences {D0}, {E180}, {F09F98}, +# has a valid utf8mb4 sequence {F09F988E} +# and has no NL at the end: +# {D0} \n xxx1 {E180} xxx2 \n {F09F98} \n xxx3 {F09F988E} {F09F98} EOF +Ð +xxx1 +ဠ+xxx2 +😠+xxx3 +😎 +😠\ No newline at end of file diff --git a/mysql-test/std_data/loaddata/mdev9874.xml b/mysql-test/std_data/loaddata/mdev9874.xml new file mode 100644 index 00000000000..513a0dff2be --- /dev/null +++ b/mysql-test/std_data/loaddata/mdev9874.xml @@ -0,0 +1 @@ +
\ No newline at end of file diff --git a/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc b/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc index 5f87d23dcc1..cbd2c1c817a 100644 --- a/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc +++ b/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc @@ -6,6 +6,7 @@ --connection node_1 # We need a user with a password to perform SST, otherwise we hit LP #1378253 +CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; --let $wsrep_sst_auth_orig = `SELECT @@wsrep_sst_auth` diff --git a/mysql-test/suite/galera/r/binlog_checksum.result b/mysql-test/suite/galera/r/binlog_checksum.result index 5c1981fc17f..e86f3892ac7 100644 --- a/mysql-test/suite/galera/r/binlog_checksum.result +++ b/mysql-test/suite/galera/r/binlog_checksum.result @@ -1,7 +1,9 @@ # On node_1 +connection node_1; SET @binlog_checksum_saved= @@GLOBAL.BINLOG_CHECKSUM; SET @@GLOBAL.BINLOG_CHECKSUM=CRC32; # On node_2 +connection node_2; SET @binlog_checksum_saved= @@GLOBAL.BINLOG_CHECKSUM; SET @@GLOBAL.BINLOG_CHECKSUM=CRC32; USE test; @@ -23,6 +25,7 @@ c1 5 # On node_2 +connection node_2; SELECT * FROM test.t1; c1 1 @@ -31,6 +34,10 @@ c1 4 5 DROP TABLE t1; +connection node_1; SET @@GLOBAL.BINLOG_CHECKSUM = @binlog_checksum_saved; +connection node_2; SET @@GLOBAL.BINLOG_CHECKSUM = @binlog_checksum_saved; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/create.result b/mysql-test/suite/galera/r/create.result index d8a2db99a68..20bc9a3958e 100644 --- a/mysql-test/suite/galera/r/create.result +++ b/mysql-test/suite/galera/r/create.result @@ -25,10 +25,12 @@ SET @@GLOBAL.wsrep_forced_binlog_format=@wsrep_forced_binlog_format_saved; # # MDEV-7673: CREATE TABLE SELECT fails on Galera cluster # +connection node_1; CREATE TABLE t1 (i INT) ENGINE=INNODB DEFAULT CHARSET=utf8 SELECT 1 as i; SELECT * FROM t1; i 1 +connection node_2; SELECT * FROM t1; i 1 @@ -37,6 +39,7 @@ DROP TABLE t1; # MDEV-8166 : Adding index on new table from select crashes Galera # cluster # +connection node_1; CREATE TABLE t1(i int(11) NOT NULL DEFAULT '0') ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO t1(i) VALUES (1), (2), (3); CREATE TABLE t2 (i INT) SELECT i FROM t1; @@ -46,6 +49,7 @@ i 1 2 3 +connection node_2; SELECT * FROM t2; i 1 diff --git a/mysql-test/suite/galera/r/enforce_storage_engine.result b/mysql-test/suite/galera/r/enforce_storage_engine.result index a3513fc2789..746aa22bf20 100644 --- a/mysql-test/suite/galera/r/enforce_storage_engine.result +++ b/mysql-test/suite/galera/r/enforce_storage_engine.result @@ -2,11 +2,13 @@ # MDEV-8831 : enforce_storage_engine doesn't block table creation on # other nodes (galera cluster) # +connection node_1; SET @@enforce_storage_engine=INNODB; CREATE TABLE t1(i INT) ENGINE=INNODB; CREATE TABLE t2(i INT) ENGINE=MYISAM; ERROR 42000: Unknown storage engine 'MyISAM' INSERT INTO t1 VALUES(1); +connection node_2; SHOW TABLES; Tables_in_test t1 @@ -14,6 +16,7 @@ SELECT COUNT(*)=1 FROM t1; COUNT(*)=1 1 CREATE TABLE t2(i INT) ENGINE=MYISAM; +connection node_1; SHOW TABLES; Tables_in_test t1 diff --git a/mysql-test/suite/galera/r/fk.result b/mysql-test/suite/galera/r/fk.result index d6a3a25b01a..ab8e1c8f680 100644 --- a/mysql-test/suite/galera/r/fk.result +++ b/mysql-test/suite/galera/r/fk.result @@ -1,6 +1,7 @@ USE test; # On node_1 +connection node_1; CREATE TABLE networks ( `tenant_id` varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL, `id` varchar(36) COLLATE utf8_unicode_ci NOT NULL, @@ -60,9 +61,11 @@ INSERT INTO ipallocations VALUES ('f37aa3fe-ab99-4d0f-a566-6cd3169d7516','10.25. select * from ports where ports.id = 'f37aa3fe-ab99-4d0f-a566-6cd3169d7516'; tenant_id id name network_id mac_address admin_state_up status device_id device_owner f37aa3fe-ab99-4d0f-a566-6cd3169d7516 f37aa3fe-ab99-4d0f-a566-6cd3169d7516 fa:16:3e:e3:cc:bb 1 DOWN f37aa3fe-ab99-4d0f-a566-6cd3169d7516 network:router_gateway +connection node_2; select * from ports where ports.id = 'f37aa3fe-ab99-4d0f-a566-6cd3169d7516'; tenant_id id name network_id mac_address admin_state_up status device_id device_owner f37aa3fe-ab99-4d0f-a566-6cd3169d7516 f37aa3fe-ab99-4d0f-a566-6cd3169d7516 fa:16:3e:e3:cc:bb 1 DOWN f37aa3fe-ab99-4d0f-a566-6cd3169d7516 network:router_gateway +connection node_1; DELETE FROM ports WHERE ports.id = 'f37aa3fe-ab99-4d0f-a566-6cd3169d7516'; select * from networks; tenant_id id name status admin_state_up shared @@ -78,6 +81,7 @@ select * from ports; tenant_id id name network_id mac_address admin_state_up status device_id device_owner # On node_2 +connection node_2; select * from networks; tenant_id id name status admin_state_up shared f37aa3fe-ab99-4d0f-a566-6cd3169d7516 f37aa3fe-ab99-4d0f-a566-6cd3169d7516 MyNet ACTIVE 0 0 @@ -90,7 +94,10 @@ select * from ipallocations; port_id ip_address subnet_id network_id select * from ports; tenant_id id name network_id mac_address admin_state_up status device_id device_owner +connection node_1; drop table ipallocations; drop table subnets; drop table ports; drop table networks; +disconnect node_2; +disconnect node_1; diff --git a/mysql-test/suite/galera/r/galera_account_management.result b/mysql-test/suite/galera/r/galera_account_management.result index 9b3ae9ba46e..7fb472a5c2d 100644 --- a/mysql-test/suite/galera/r/galera_account_management.result +++ b/mysql-test/suite/galera/r/galera_account_management.result @@ -1,39 +1,55 @@ +connection node_1; CREATE USER user1, user2 IDENTIFIED BY 'password'; +connection node_2; SELECT COUNT(*) = 2 FROM mysql.user WHERE user IN ('user1', 'user2'); COUNT(*) = 2 1 +connection node_1; RENAME USER user2 TO user3; +connection node_2; SELECT COUNT(*) = 0 FROM mysql.user WHERE user = 'user2'; COUNT(*) = 0 1 SELECT COUNT(*) = 1 FROM mysql.user WHERE user = 'user3'; COUNT(*) = 1 1 +connection node_1; SET PASSWORD FOR user3 = PASSWORD('foo'); +connection node_1; SELECT password != '' FROM mysql.user WHERE user = 'user3'; password != '' 1 +connection node_1; DROP USER user1, user3; +connection node_2; SELECT COUNT(*) = 0 FROM mysql.user WHERE user IN ('user1', 'user2'); COUNT(*) = 0 1 +connection node_1; GRANT ALL ON *.* TO user4 IDENTIFIED BY 'password'; +connection node_2; SELECT COUNT(*) = 1 FROM mysql.user WHERE user = 'user4'; COUNT(*) = 1 1 SELECT Select_priv = 'Y' FROM mysql.user WHERE user = 'user4'; Select_priv = 'Y' 1 +connection node_1; CREATE USER user5; GRANT PROXY ON user4 TO user5; +connection node_2; SELECT COUNT(*) = 1 FROM mysql.proxies_priv WHERE user = 'user5'; COUNT(*) = 1 1 +connection node_1; REVOKE ALL PRIVILEGES ON *.* FROM user4; +connection node_2; SELECT Select_priv = 'N' FROM mysql.user WHERE user = 'user4'; Select_priv = 'N' 1 +connection node_1; REVOKE PROXY ON user4 FROM user5; +connection node_2; SELECT COUNT(*) = 0 FROM mysql.proxies_priv WHERE user = 'user5'; COUNT(*) = 0 1 diff --git a/mysql-test/suite/galera/r/galera_alter_engine_innodb.result b/mysql-test/suite/galera/r/galera_alter_engine_innodb.result index 2b30ac5814d..ff6ab792c0e 100644 --- a/mysql-test/suite/galera/r/galera_alter_engine_innodb.result +++ b/mysql-test/suite/galera/r/galera_alter_engine_innodb.result @@ -1,6 +1,7 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); ALTER TABLE t1 ENGINE=InnoDB; +connection node_2; SELECT ENGINE = 'InnoDB' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; ENGINE = 'InnoDB' 1 diff --git a/mysql-test/suite/galera/r/galera_alter_engine_myisam.result b/mysql-test/suite/galera/r/galera_alter_engine_myisam.result index 280cb58208c..389383858ac 100644 --- a/mysql-test/suite/galera/r/galera_alter_engine_myisam.result +++ b/mysql-test/suite/galera/r/galera_alter_engine_myisam.result @@ -2,10 +2,12 @@ SET GLOBAL wsrep_replicate_myisam = TRUE; CREATE TABLE t1 (f1 INTEGER) ENGINE=MyISAM; INSERT INTO t1 VALUES (1); ALTER TABLE t1 ENGINE=InnoDB; +connection node_2; SELECT ENGINE = 'InnoDB' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; ENGINE = 'InnoDB' 1 SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_1; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_alter_table_force.result b/mysql-test/suite/galera/r/galera_alter_table_force.result index 401ab46d868..d0a2f81b631 100644 --- a/mysql-test/suite/galera/r/galera_alter_table_force.result +++ b/mysql-test/suite/galera/r/galera_alter_table_force.result @@ -1,6 +1,7 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); ALTER TABLE t1 FORCE; +connection node_2; SELECT ENGINE = 'InnoDB' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; ENGINE = 'InnoDB' 1 diff --git a/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result b/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result index cecbfc1f3cf..18dcc55d5d4 100644 --- a/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result +++ b/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result @@ -1,12 +1,17 @@ +connection node_1; SET SESSION wsrep_sync_wait = 0; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; FLUSH TABLE t1 WITH READ LOCK; +connection node_2; INSERT INTO t1 VALUES (1); INSERT INTO t1 VALUES (2); +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; SET SESSION wsrep_sync_wait = 0; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_1; UNLOCK TABLES; SET SESSION wsrep_sync_wait = 7; SELECT COUNT(*) = 2 FROM t1; diff --git a/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result b/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result index ee967356c85..1cf88dbf5d3 100644 --- a/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result +++ b/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result @@ -1,10 +1,13 @@ +connection node_1; SET SESSION wsrep_sync_wait = 0; SET SESSION lock_wait_timeout = 60; SET SESSION innodb_lock_wait_timeout=60; SET SESSION wait_timeout=60; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; FLUSH TABLE t1 WITH READ LOCK; +connection node_2; ALTER TABLE t1 ADD COLUMN f2 INTEGER; +connection node_1; SELECT 1 FROM DUAL; 1 1 diff --git a/mysql-test/suite/galera/r/galera_as_master.result b/mysql-test/suite/galera/r/galera_as_master.result index d87a744000c..92a1a0e7cb3 100644 --- a/mysql-test/suite/galera/r/galera_as_master.result +++ b/mysql-test/suite/galera/r/galera_as_master.result @@ -1,4 +1,6 @@ +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; START SLAVE; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES(1); # Disable binary logging for current session @@ -10,6 +12,7 @@ CREATE TABLE test.t3 AS SELECT * from t1; SET SQL_LOG_BIN=ON; INSERT INTO t1 VALUES(3); CREATE TABLE test.t4 AS SELECT * from t1; +connection node_2; SELECT * FROM t1; f1 1 @@ -27,6 +30,7 @@ f1 1 2 3 +connection node_3; SHOW TABLES; Tables_in_test t1 @@ -41,8 +45,10 @@ f1 2 3 # Cleanup +connection node_1; DROP TABLE t1, t4; SET SQL_LOG_BIN=OFF; DROP TABLE t2, t3; +connection node_3; STOP SLAVE; RESET SLAVE ALL; diff --git a/mysql-test/suite/galera/r/galera_as_master_large.result b/mysql-test/suite/galera/r/galera_as_master_large.result index 4d5533899cf..dad74211af9 100644 --- a/mysql-test/suite/galera/r/galera_as_master_large.result +++ b/mysql-test/suite/galera/r/galera_as_master_large.result @@ -2,7 +2,9 @@ # MDEV-9044 : Getting binlog corruption on my Galera cluster (10.1.8) # making it impossible to async slave. # +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; START SLAVE; +connection node_1; SELECT @@GLOBAL.BINLOG_CACHE_SIZE; @@GLOBAL.BINLOG_CACHE_SIZE 8192 @@ -12,12 +14,14 @@ START TRANSACTION; INSERT INTO t1 VALUES(1, REPEAT('-', 10000)); COMMIT; INSERT INTO t2 VALUES(1); +connection node_2; SELECT c1, LENGTH(c2) FROM t1; c1 LENGTH(c2) 1 10000 SELECT * FROM t2; c1 1 +connection node_3; SELECT c1, LENGTH(c2) FROM t1; c1 LENGTH(c2) 1 10000 @@ -25,6 +29,8 @@ SELECT * FROM t2; c1 1 # Cleanup +connection node_1; DROP TABLE t1, t2; +connection node_3; STOP SLAVE; RESET SLAVE ALL; diff --git a/mysql-test/suite/galera/r/galera_as_slave.result b/mysql-test/suite/galera/r/galera_as_slave.result index 2d7d689aa36..9ccb5106234 100644 --- a/mysql-test/suite/galera/r/galera_as_slave.result +++ b/mysql-test/suite/galera/r/galera_as_slave.result @@ -1,15 +1,24 @@ +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2; START SLAVE; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES(1); +connection node_2; INSERT INTO t1 VALUES (2); +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 INSERT INTO t1 VALUES (3); +connection node_2; SELECT COUNT(*) = 3 FROM t1; COUNT(*) = 3 1 +connection node_1; DROP TABLE t1; +connection node_2; STOP SLAVE; RESET SLAVE ALL; +connection node_1; RESET MASTER; diff --git a/mysql-test/suite/galera/r/galera_as_slave_gtid.result b/mysql-test/suite/galera/r/galera_as_slave_gtid.result index fbac7b1b6b5..0ef9d208bf4 100644 --- a/mysql-test/suite/galera/r/galera_as_slave_gtid.result +++ b/mysql-test/suite/galera/r/galera_as_slave_gtid.result @@ -1,16 +1,24 @@ +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2; START SLAVE; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES(1); SELECT LENGTH(@@global.gtid_binlog_state) > 1; LENGTH(@@global.gtid_binlog_state) > 1 1 +connection node_2; gtid_binlog_state_equal 1 +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 gtid_binlog_state_equal 1 +connection node_1; DROP TABLE t1; +connection node_3; +connection node_2; STOP SLAVE; RESET SLAVE ALL; diff --git a/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result b/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result index 8c07d87eec3..210492937b0 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result @@ -1,7 +1,10 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_2; SET AUTOCOMMIT=OFF; FLUSH TABLES t1 FOR EXPORT; +connection node_1; INSERT INTO t1 VALUES (2); +connection node_2; SET SESSION wsrep_sync_wait = 0; UNLOCK TABLES; COMMIT; diff --git a/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result b/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result index e3819172510..ae1ca6d2157 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result @@ -1,7 +1,10 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_2; SET AUTOCOMMIT=OFF; FLUSH TABLES WITH READ LOCK;; +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; UNLOCK TABLES; wsrep_local_aborts_increment 1 diff --git a/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result b/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result index 2e44a773b23..ce3ac6ab2cb 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result @@ -1,11 +1,15 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_2a; SELECT GET_LOCK("foo", 1000); GET_LOCK("foo", 1000) 1 +connection node_2; SET AUTOCOMMIT=OFF; INSERT INTO t1 VALUES (1); SELECT GET_LOCK("foo", 1000);; +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction wsrep_local_aborts_increment 1 diff --git a/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result b/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result index 7510e48ee83..81b5816ddbe 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result @@ -1,7 +1,10 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_2; SET AUTOCOMMIT=OFF; LOCK TABLE t1 WRITE; +connection node_1; INSERT INTO t1 VALUES (2); +connection node_2; UNLOCK TABLES; COMMIT; SELECT COUNT(*) = 1 FROM t1; diff --git a/mysql-test/suite/galera/r/galera_bf_abort_sleep.result b/mysql-test/suite/galera/r/galera_bf_abort_sleep.result index 8e85a5feda2..f069198dff6 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_sleep.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_sleep.result @@ -1,8 +1,11 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_2; SET AUTOCOMMIT=OFF; INSERT INTO t1 VALUES (1); SELECT SLEEP(1000);; +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction wsrep_local_aborts_increment 1 diff --git a/mysql-test/suite/galera/r/galera_binlog_cache_size.result b/mysql-test/suite/galera/r/galera_binlog_cache_size.result index 9726cf2a440..6aac74ab5f0 100644 --- a/mysql-test/suite/galera/r/galera_binlog_cache_size.result +++ b/mysql-test/suite/galera/r/galera_binlog_cache_size.result @@ -3,6 +3,7 @@ CREATE TABLE ten (f1 INTEGER); INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); SET GLOBAL binlog_cache_size=4096; SET GLOBAL max_binlog_cache_size=4096; +connection node_1a; SET AUTOCOMMIT=ON; START TRANSACTION; INSERT INTO t1 SELECT REPEAT('a', 767) FROM ten; diff --git a/mysql-test/suite/galera/r/galera_binlog_checksum.result b/mysql-test/suite/galera/r/galera_binlog_checksum.result index a6ab62350b1..b0ea2293119 100644 --- a/mysql-test/suite/galera/r/galera_binlog_checksum.result +++ b/mysql-test/suite/galera/r/galera_binlog_checksum.result @@ -1,9 +1,12 @@ CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_1; UPDATE t1 SET f1 = 2 WHERE f1 = 1; +connection node_2; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result b/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result index 984a943fcbe..7b88af5d5af 100644 --- a/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result +++ b/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result @@ -1,5 +1,6 @@ CREATE TABLE t1 (f1 VARCHAR(1000)); INSERT INTO t1 VALUES (REPEAT('x', 1000)); +connection node_2; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = REPEAT('x', 1000); COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_concurrent_ctas.result b/mysql-test/suite/galera/r/galera_concurrent_ctas.result index 8b0a4c07ac2..8a3ac1ae0d3 100644 --- a/mysql-test/suite/galera/r/galera_concurrent_ctas.result +++ b/mysql-test/suite/galera/r/galera_concurrent_ctas.result @@ -1 +1,3 @@ +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/galera_create_function.result b/mysql-test/suite/galera/r/galera_create_function.result index 8e4a823d00f..576ea44cce5 100644 --- a/mysql-test/suite/galera/r/galera_create_function.result +++ b/mysql-test/suite/galera/r/galera_create_function.result @@ -1,3 +1,4 @@ +connection node_1; CREATE USER 'user1'; CREATE DEFINER = 'user1' @@ -18,12 +19,14 @@ DETERMINISTIC NO SQL SQL SECURITY INVOKER RETURN 123; +connection node_1; SHOW CREATE FUNCTION f1; Function sql_mode Create Function character_set_client collation_connection Database Collation f1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`user1`@`%` FUNCTION `f1`(param INTEGER) RETURNS varchar(200) CHARSET latin1 MODIFIES SQL DATA COMMENT 'f1_comment' RETURN 'abc' latin1 latin1_swedish_ci latin1_swedish_ci +connection node_2; SELECT 1 FROM DUAL; 1 1 @@ -33,6 +36,7 @@ f1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`user1`@`%` FUNCTIO MODIFIES SQL DATA COMMENT 'f1_comment' RETURN 'abc' latin1 latin1_swedish_ci latin1_swedish_ci +connection node_1; SHOW CREATE FUNCTION f2; Function sql_mode Create Function character_set_client collation_connection Database Collation f2 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` FUNCTION `f2`(param VARCHAR(100)) RETURNS int(11) @@ -40,6 +44,7 @@ f2 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` DETERMINISTIC SQL SECURITY INVOKER RETURN 123 latin1 latin1_swedish_ci latin1_swedish_ci +connection node_2; SHOW CREATE FUNCTION f2; Function sql_mode Create Function character_set_client collation_connection Database Collation f2 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` FUNCTION `f2`(param VARCHAR(100)) RETURNS int(11) @@ -53,6 +58,7 @@ f1(1) = 'abc' SELECT f2('abc') = 123; f2('abc') = 123 1 +connection node_1; DROP FUNCTION f1; DROP FUNCTION f2; DROP USER 'user1'; diff --git a/mysql-test/suite/galera/r/galera_create_procedure.result b/mysql-test/suite/galera/r/galera_create_procedure.result index 6191ef48ee5..90f29ffa617 100644 --- a/mysql-test/suite/galera/r/galera_create_procedure.result +++ b/mysql-test/suite/galera/r/galera_create_procedure.result @@ -1,3 +1,4 @@ +connection node_1; CREATE USER 'user1'; CREATE TABLE t1 (f1 INTEGER); CREATE @@ -16,12 +17,14 @@ PROCEDURE p2 (param VARCHAR(100)) DETERMINISTIC NO SQL SQL SECURITY INVOKER BEGIN END ; +connection node_1; SHOW CREATE PROCEDURE p1; Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation p1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`user1`@`%` PROCEDURE `p1`(IN param1 INTEGER, OUT param2 INTEGER, INOUT param3 INTEGER) MODIFIES SQL DATA COMMENT 'p1_comment' INSERT INTO t1 VALUES (1) latin1 latin1_swedish_ci latin1_swedish_ci +connection node_2; SELECT 1 FROM DUAL; 1 1 @@ -31,6 +34,7 @@ p1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`user1`@`%` PROCEDU MODIFIES SQL DATA COMMENT 'p1_comment' INSERT INTO t1 VALUES (1) latin1 latin1_swedish_ci latin1_swedish_ci +connection node_1; SHOW CREATE PROCEDURE p2; Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation p2 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` PROCEDURE `p2`(param VARCHAR(100)) @@ -38,6 +42,7 @@ p2 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` DETERMINISTIC SQL SECURITY INVOKER BEGIN END latin1 latin1_swedish_ci latin1_swedish_ci +connection node_2; SHOW CREATE PROCEDURE p2; Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation p2 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` PROCEDURE `p2`(param VARCHAR(100)) @@ -47,6 +52,7 @@ p2 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` BEGIN END latin1 latin1_swedish_ci latin1_swedish_ci CALL p1(@a, @b, @c); CALL p2('abc'); +connection node_1; DROP PROCEDURE p1; DROP PROCEDURE p2; DROP USER 'user1'; diff --git a/mysql-test/suite/galera/r/galera_create_table_like.result b/mysql-test/suite/galera/r/galera_create_table_like.result index b335101fa62..131ac311bca 100644 --- a/mysql-test/suite/galera/r/galera_create_table_like.result +++ b/mysql-test/suite/galera/r/galera_create_table_like.result @@ -11,6 +11,7 @@ CREATE TABLE real_table3 LIKE schema1.myisam_table; CREATE TEMPORARY TABLE temp_table1 LIKE schema1.real_table; CREATE TEMPORARY TABLE temp_table2 LIKE schema1.temp_table; CREATE TEMPORARY TABLE temp_table3 LIKE schema1.myisam_table; +connection node_2; SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'real_table' AND TABLE_SCHEMA = 'schema1'; COUNT(*) = 1 1 @@ -38,6 +39,7 @@ COUNT(*) = 0 SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'temp_table3' AND TABLE_SCHEMA = 'schema2'; COUNT(*) = 0 1 +connection node_1; DROP TABLE schema1.real_table; DROP TABLE schema1.myisam_table; DROP TABLE schema2.real_table1; diff --git a/mysql-test/suite/galera/r/galera_create_trigger.result b/mysql-test/suite/galera/r/galera_create_trigger.result index 7e656081871..d07a007543e 100644 --- a/mysql-test/suite/galera/r/galera_create_trigger.result +++ b/mysql-test/suite/galera/r/galera_create_trigger.result @@ -7,6 +7,7 @@ CREATE DEFINER=root@localhost TRIGGER definer_root BEFORE INSERT ON definer_root CREATE DEFINER=user1 TRIGGER definer_user BEFORE INSERT ON definer_user FOR EACH ROW SET NEW.trigger_user = CURRENT_USER(); CREATE DEFINER=current_user TRIGGER definer_current_user BEFORE INSERT ON definer_current_user FOR EACH ROW SET NEW.trigger_user = CURRENT_USER(); CREATE TRIGGER definer_default BEFORE INSERT ON definer_default FOR EACH ROW SET NEW.trigger_user = CURRENT_USER(); +connection node_2; INSERT INTO definer_root (f1) VALUES (1); SELECT DEFINER = 'root@localhost' FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME = 'definer_root'; DEFINER = 'root@localhost' @@ -35,6 +36,7 @@ DEFINER = 'root@localhost' SELECT trigger_user = 'root@localhost' FROM definer_default; trigger_user = 'root@localhost' 1 +connection node_1; DROP TABLE definer_current_user; DROP TABLE definer_user; DROP TABLE definer_root; diff --git a/mysql-test/suite/galera/r/galera_delete_limit.result b/mysql-test/suite/galera/r/galera_delete_limit.result index 72bee18eab6..f6fb2e56346 100644 --- a/mysql-test/suite/galera/r/galera_delete_limit.result +++ b/mysql-test/suite/galera/r/galera_delete_limit.result @@ -1,8 +1,11 @@ +connection node_1; CREATE TABLE ten (f1 INTEGER) Engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; INSERT INTO t1 SELECT f1 FROM ten ORDER BY RAND(); +connection node_2; DELETE FROM t1 ORDER BY RAND() LIMIT 5; +connection node_1; sum_matches 1 max_matches @@ -10,7 +13,9 @@ max_matches DROP TABLE t1; CREATE TABLE t2 (f1 INTEGER) Engine=InnoDB; INSERT INTO t2 SELECT f1 FROM ten ORDER BY RAND(); +connection node_2; DELETE FROM t2 ORDER BY RAND() LIMIT 5; +connection node_1; sum_matches 1 max_matches diff --git a/mysql-test/suite/galera/r/galera_drop_multi.result b/mysql-test/suite/galera/r/galera_drop_multi.result index d82ae3bec1a..7793ef93b90 100644 --- a/mysql-test/suite/galera/r/galera_drop_multi.result +++ b/mysql-test/suite/galera/r/galera_drop_multi.result @@ -8,6 +8,7 @@ START TRANSACTION; DROP TABLE t1, t2, t3, t4; INSERT INTO t5 VALUES (1); COMMIT; +connection node_2; SHOW CREATE TABLE t1; ERROR 42S02: Table 'test.t1' doesn't exist SHOW CREATE TABLE t2; @@ -17,4 +18,5 @@ ERROR 42S02: Table 'test.t3' doesn't exist SHOW CREATE TABLE t4; ERROR 42S02: Table 'test.t4' doesn't exist CALL mtr.add_suppression("Slave SQL: Error 'Unknown table 'test.t2,test.t4'' on query\. Default database: 'test'\. Query: 'DROP TABLE t1, t2, t3, t4', Error_code: 1051"); +connection node_1; DROP TABLE t5; diff --git a/mysql-test/suite/galera/r/galera_enum.result b/mysql-test/suite/galera/r/galera_enum.result index e853c5c9943..e8ccb12b990 100644 --- a/mysql-test/suite/galera/r/galera_enum.result +++ b/mysql-test/suite/galera/r/galera_enum.result @@ -1,9 +1,11 @@ +connection node_1; CREATE TABLE t1 (f1 ENUM('', 'one', 'two'), KEY (f1)) ENGINE=InnoDB; INSERT INTO t1 VALUES (''); INSERT INTO t1 VALUES ('one'), ('two'); INSERT INTO t1 VALUES (0), (1), (2); Warnings: Warning 1265 Data truncated for column 'f1' at row 1 +connection node_2; SELECT COUNT(*) = 6 FROM t1; COUNT(*) = 6 1 @@ -14,23 +16,30 @@ SELECT COUNT(*) = 2 FROM t1 where f1 = 'one'; COUNT(*) = 2 1 DROP TABLE t1; +connection node_1; CREATE TABLE t1 (f1 ENUM('', 'one', 'two', 'three', 'four') PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (''), ('one'), ('two'); +connection node_2; SELECT COUNT(*) = 3 FROM t1; COUNT(*) = 3 1 SELECT COUNT(*) = 1 FROM t1 WHERE f1 = ''; COUNT(*) = 1 1 +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f1 = 'three' where f1 = ''; +connection node_2; SET AUTOCOMMIt=OFF; START TRANSACTION; UPDATE t1 SET f1 = 'four' where f1 = ''; +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection node_1; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 'three'; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_events.result b/mysql-test/suite/galera/r/galera_events.result index 09d8406d5d0..f01627aba70 100644 --- a/mysql-test/suite/galera/r/galera_events.result +++ b/mysql-test/suite/galera/r/galera_events.result @@ -1,18 +1,27 @@ +connection node_1; CREATE EVENT event1 ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT 1; +connection node_2; SELECT DEFINER= 'root@localhost', ORIGINATOR = 1, STATUS = 'SLAVESIDE_DISABLED', EVENT_TYPE = 'ONE TIME', ON_COMPLETION = 'NOT PRESERVE' FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_NAME = 'event1'; DEFINER= 'root@localhost' ORIGINATOR = 1 STATUS = 'SLAVESIDE_DISABLED' EVENT_TYPE = 'ONE TIME' ON_COMPLETION = 'NOT PRESERVE' 1 1 1 1 1 +connection node_1; ALTER EVENT event1 DISABLE; +connection node_2; SELECT DEFINER= 'root@localhost', ORIGINATOR = 1, STATUS = 'SLAVESIDE_DISABLED', EVENT_TYPE = 'ONE TIME', ON_COMPLETION = 'NOT PRESERVE' FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_NAME = 'event1'; DEFINER= 'root@localhost' ORIGINATOR = 1 STATUS = 'SLAVESIDE_DISABLED' EVENT_TYPE = 'ONE TIME' ON_COMPLETION = 'NOT PRESERVE' 1 1 1 1 1 +connection node_2; SET GLOBAL event_scheduler = ON; CREATE EVENT event2 ON SCHEDULE AT CURRENT_TIMESTAMP ON COMPLETION NOT PRESERVE DO SELECT 1; +connection node_1; SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_NAME = 'event2'; COUNT(*) = 0 1 +connection node_1; DROP EVENT event1; +connection node_2; SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.EVENTS WHERE EVENT_NAME = 'event1'; COUNT(*) = 0 1 +connection node_2; SET GLOBAL event_scheduler = OFF;; diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result index 89f4301a0b4..73375ae55c5 100644 --- a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result +++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result @@ -18,7 +18,9 @@ ON DELETE CASCADE INSERT INTO grandparent VALUES (1),(2); INSERT INTO parent VALUES (1,1), (2,2); INSERT INTO child VALUES (1,1), (2,2); +connection node_2; DELETE FROM grandparent WHERE id = 1; +connection node_1; SELECT COUNT(*) = 0 FROM parent WHERE grandparent_id = 1; COUNT(*) = 0 1 diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_update.result b/mysql-test/suite/galera/r/galera_fk_cascade_update.result index 2ab2ad31a13..5fe8b532473 100644 --- a/mysql-test/suite/galera/r/galera_fk_cascade_update.result +++ b/mysql-test/suite/galera/r/galera_fk_cascade_update.result @@ -18,7 +18,9 @@ ON UPDATE CASCADE INSERT INTO grandparent VALUES (1),(2); INSERT INTO parent VALUES (1,1), (2,2); INSERT INTO child VALUES (1,1), (2,2); +connection node_2; UPDATE grandparent SET id = 3 WHERE id = 1; +connection node_1; SELECT COUNT(*) = 1 FROM parent WHERE grandparent_id = 3; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_fk_conflict.result b/mysql-test/suite/galera/r/galera_fk_conflict.result index ae6c4823a54..d9919c0348b 100644 --- a/mysql-test/suite/galera/r/galera_fk_conflict.result +++ b/mysql-test/suite/galera/r/galera_fk_conflict.result @@ -10,13 +10,17 @@ REFERENCES parent(id) ) ENGINE=InnoDB; INSERT INTO parent VALUES (1), (2); INSERT INTO child VALUES (1,1); +connection node_1; SET AUTOCOMMIT = OFF; START TRANSACTION; DELETE FROM parent WHERE id = 2; +connection node_2; SET AUTOCOMMIT = OFF; START TRANSACTION; INSERT INTO child VALUES (2, 2); +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction DROP TABLE child; diff --git a/mysql-test/suite/galera/r/galera_fk_mismatch.result b/mysql-test/suite/galera/r/galera_fk_mismatch.result index 07cdb1b09a2..bdc60c9e099 100644 --- a/mysql-test/suite/galera/r/galera_fk_mismatch.result +++ b/mysql-test/suite/galera/r/galera_fk_mismatch.result @@ -13,11 +13,14 @@ ON DELETE CASCADE ) ENGINE=InnoDB; INSERT INTO parent VALUES (1, 2); INSERT INTO child VALUES (1, 1); +connection node_2; UPDATE parent SET id1 = 3 WHERE id1 = 1; +connection node_1; SELECT COUNT(*) = 1 FROM child WHERE parent_id1 = 3; COUNT(*) = 1 1 DELETE FROM parent WHERE id1 = 3; +connection node_2; SELECT COUNT(*) = 0 FROM child WHERE parent_id1 = 3; COUNT(*) = 0 1 diff --git a/mysql-test/suite/galera/r/galera_fk_multicolumn.result b/mysql-test/suite/galera/r/galera_fk_multicolumn.result index a86b87a83ef..f5b6aa23692 100644 --- a/mysql-test/suite/galera/r/galera_fk_multicolumn.result +++ b/mysql-test/suite/galera/r/galera_fk_multicolumn.result @@ -17,7 +17,9 @@ ON UPDATE CASCADE INSERT INTO t0 VALUES (0, 0); INSERT INTO t1 VALUES (0); INSERT INTO t2 VALUES (0); +connection node_2; UPDATE t0 SET f1 = 1, f2 = 2; +connection node_1; SELECT f1 = 1 FROM t1 WHERE f1 = 1; f1 = 1 1 diff --git a/mysql-test/suite/galera/r/galera_fk_multitable.result b/mysql-test/suite/galera/r/galera_fk_multitable.result index e77128d3b04..04ff7adc3e9 100644 --- a/mysql-test/suite/galera/r/galera_fk_multitable.result +++ b/mysql-test/suite/galera/r/galera_fk_multitable.result @@ -11,7 +11,9 @@ ON DELETE CASCADE INSERT INTO t0 VALUES (0), (1); INSERT INTO t1 VALUES (0, 0); INSERT INTO t1 VALUES (1, 0); +connection node_2; DELETE t0.*, t1.* FROM t0, t1 WHERE t0.f0 = 0 AND t1.f1 = 0; +connection node_1; SELECT COUNT(*) = 1 FROM t0; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_fk_no_pk.result b/mysql-test/suite/galera/r/galera_fk_no_pk.result index e4f92863d92..e7b5f0b2b64 100644 --- a/mysql-test/suite/galera/r/galera_fk_no_pk.result +++ b/mysql-test/suite/galera/r/galera_fk_no_pk.result @@ -12,11 +12,14 @@ ON DELETE CASCADE ) ENGINE=InnoDB; INSERT INTO parent VALUES (1), (1), (2), (2); INSERT INTO child VALUES (1,1), (2,2), (1,1), (2,2); +connection node_2; DELETE FROM parent WHERE id = 1; SELECT COUNT(*) = 0 FROM child WHERE id = 1; COUNT(*) = 0 1 +connection node_1; UPDATE parent SET id = 3 WHERE id = 2; +connection node_2; SELECT COUNT(*) = 0 FROM child WHERE parent_id = 1; COUNT(*) = 0 1 diff --git a/mysql-test/suite/galera/r/galera_fk_selfreferential.result b/mysql-test/suite/galera/r/galera_fk_selfreferential.result index 25c37046e88..3b4dbf2a8e9 100644 --- a/mysql-test/suite/galera/r/galera_fk_selfreferential.result +++ b/mysql-test/suite/galera/r/galera_fk_selfreferential.result @@ -6,7 +6,9 @@ REFERENCES t1(f1) ON DELETE CASCADE ) ENGINE=InnoDB; INSERT INTO t1 VALUES (1, 1), (2, 1); +connection node_2; DELETE FROM t1 WHERE f1 = 1; +connection node_1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 diff --git a/mysql-test/suite/galera/r/galera_fk_setnull.result b/mysql-test/suite/galera/r/galera_fk_setnull.result index f7fb9d04040..d4f20fe60a3 100644 --- a/mysql-test/suite/galera/r/galera_fk_setnull.result +++ b/mysql-test/suite/galera/r/galera_fk_setnull.result @@ -12,10 +12,12 @@ ON DELETE SET NULL ) ENGINE=InnoDB; INSERT INTO parent VALUES (1),(2); INSERT INTO child VALUES (1,1),(2,2); +connection node_2; DELETE FROM parent WHERE id = 1; SELECT parent_id IS NULL FROM child WHERE id = 1; parent_id IS NULL 1 +connection node_1; SELECT parent_id IS NULL FROM child WHERE id = 1; parent_id IS NULL 1 @@ -23,8 +25,10 @@ UPDATE parent SET id = 3 WHERE id = 2; SELECT parent_id IS NULL FROM child WHERE id = 2; parent_id IS NULL 1 +connection node_2; SELECT parent_id IS NULL FROM child WHERE id = 2; parent_id IS NULL 1 +connection node_1; DROP TABLE child; DROP TABLE parent; diff --git a/mysql-test/suite/galera/r/galera_forced_binlog_format.result b/mysql-test/suite/galera/r/galera_forced_binlog_format.result index 92e78685b58..01f738f6109 100644 --- a/mysql-test/suite/galera/r/galera_forced_binlog_format.result +++ b/mysql-test/suite/galera/r/galera_forced_binlog_format.result @@ -1,3 +1,4 @@ +connection node_1; RESET MASTER; SET SESSION binlog_format = 'STATEMENT'; Warnings: @@ -40,4 +41,6 @@ GRANT ALL PRIVILEGES ON `testdb_9401`.`t1` TO 'dummy'@'localhost' REVOKE ALL PRIVILEGES, GRANT OPTION FROM dummy@localhost; DROP USER dummy@localhost; DROP DATABASE testdb_9401; +disconnect node_2; +disconnect node_1; # End of tests diff --git a/mysql-test/suite/galera/r/galera_ftwrl.result b/mysql-test/suite/galera/r/galera_ftwrl.result index c216b52650b..0565781c051 100644 --- a/mysql-test/suite/galera/r/galera_ftwrl.result +++ b/mysql-test/suite/galera/r/galera_ftwrl.result @@ -1,7 +1,10 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; +connection node_2; SET GLOBAL wsrep_provider_options = "repl.causal_read_timeout=PT1S"; FLUSH TABLES WITH READ LOCK; +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; SHOW TABLES; ERROR HY000: Lock wait timeout exceeded; try restarting transaction SELECT * FROM t1; diff --git a/mysql-test/suite/galera/r/galera_gcs_fc_limit.result b/mysql-test/suite/galera/r/galera_gcs_fc_limit.result index ad60ead4b8a..a1ba8672eae 100644 --- a/mysql-test/suite/galera/r/galera_gcs_fc_limit.result +++ b/mysql-test/suite/galera/r/galera_gcs_fc_limit.result @@ -1,16 +1,22 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1'; FLUSH TABLES WITH READ LOCK; +connection node_1; INSERT INTO t1 VALUES (2); INSERT INTO t1 VALUES (3); INSERT INTO t1 VALUES (4); INSERT INTO t1 VALUES (5); +connection node_1a; +connection node_2; UNLOCK TABLES; +connection node_1; INSERT INTO t1 VALUES (6); +connection node_2; SELECT COUNT(*) = 6 FROM t1; COUNT(*) = 6 1 diff --git a/mysql-test/suite/galera/r/galera_gtid.result b/mysql-test/suite/galera/r/galera_gtid.result index 546c29cb49a..acc5eae9876 100644 --- a/mysql-test/suite/galera/r/galera_gtid.result +++ b/mysql-test/suite/galera/r/galera_gtid.result @@ -1,9 +1,11 @@ CREATE TABLE t1 (f1 INT PRIMARY KEY); INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 UPDATE t1 SET f1 = 2; +connection node_1; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_insert_ignore.result b/mysql-test/suite/galera/r/galera_insert_ignore.result index b53b5795416..d21fb2b02d0 100644 --- a/mysql-test/suite/galera/r/galera_insert_ignore.result +++ b/mysql-test/suite/galera/r/galera_insert_ignore.result @@ -1,5 +1,7 @@ SET GLOBAL wsrep_sync_wait = 7; +connection node_2; SET GLOBAL wsrep_sync_wait = 7; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); INSERT IGNORE INTO t1 VALUES (1), (2); @@ -9,10 +11,12 @@ SELECT * FROM t1; f1 1 2 +connection node_2; SELECT * FROM t1; f1 1 2 +connection node_2; CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t2 VALUES (0), (2), (3); INSERT IGNORE INTO t1 SELECT f1 FROM t2; @@ -24,14 +28,17 @@ f1 1 2 3 +connection node_1; SELECT * FROM t1; f1 0 1 2 3 +connection node_2; CREATE TABLE t3 (f1 INTEGER UNIQUE) Engine=InnoDB; INSERT INTO t3 VALUES (NULL); +connection node_1; INSERT IGNORE INTO t3 VALUES (1), (NULL), (2); SELECT * FROM t3; f1 @@ -39,6 +46,7 @@ NULL NULL 1 2 +connection node_2; SELECT * FROM t3; f1 NULL @@ -46,6 +54,7 @@ NULL 1 2 SET GLOBAL wsrep_sync_wait = (SELECT @@wsrep_sync_wait); +connection node_1; DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; diff --git a/mysql-test/suite/galera/r/galera_insert_multi.result b/mysql-test/suite/galera/r/galera_insert_multi.result index 33717781f2c..38bb5c26d20 100644 --- a/mysql-test/suite/galera/r/galera_insert_multi.result +++ b/mysql-test/suite/galera/r/galera_insert_multi.result @@ -1,57 +1,76 @@ +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1),(2); +connection node_2; INSERT INTO t1 VALUES (3),(4); +connection node_1; SELECT COUNT(*) = 4 FROM t1; COUNT(*) = 4 1 +connection node_2; SELECT COUNT(*) = 4 FROM t1; COUNT(*) = 4 1 DROP TABLE t1; +connection node_2; CREATE TABLE t1 (f1 INTEGER, KEY (f1)) ENGINE=InnoDB; INSERT INTO t1 VALUES (1),(1); +connection node_1; INSERT INTO t1 VALUES (2),(2); +connection node_2; SELECT COUNT(*) = 4 FROM t1; COUNT(*) = 4 1 +connection node_1; SELECT COUNT(*) = 4 FROM t1; COUNT(*) = 4 1 DROP TABLE t1; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (1); ERROR 23000: Duplicate entry '1' for key 'PRIMARY' SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 DROP TABLE t1; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; SET AUTOCOMMIT = OFF; START TRANSACTION; INSERT INTO t1 VALUES (1), (2); +connection node_2; SET AUTOCOMMIT = OFF; START TRANSACTION; INSERT INTO t1 VALUES (2), (1); +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction ROLLBACK; INSERT INTO t1 VALUES (1), (2); ERROR 23000: Duplicate entry '1' for key 'PRIMARY' DROP TABLE t1; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; START TRANSACTION; INSERT INTO t1 VALUES (1), (2); +connection node_2; START TRANSACTION; INSERT INTO t1 VALUES (2), (1); +connection node_1; ROLLBACK; +connection node_2; COMMIT; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 +connection node_1; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 diff --git a/mysql-test/suite/galera/r/galera_ist_mysqldump.result b/mysql-test/suite/galera/r/galera_ist_mysqldump.result index 9a5b4e8a76f..788d60051b5 100644 --- a/mysql-test/suite/galera/r/galera_ist_mysqldump.result +++ b/mysql-test/suite/galera/r/galera_ist_mysqldump.result @@ -1,4 +1,5 @@ Setting SST method to mysqldump ... +CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; SET GLOBAL wsrep_sst_method = 'mysqldump'; diff --git a/mysql-test/suite/galera/r/galera_kill_applier.result b/mysql-test/suite/galera/r/galera_kill_applier.result index fe4911639ed..6fa632c242f 100644 --- a/mysql-test/suite/galera/r/galera_kill_applier.result +++ b/mysql-test/suite/galera/r/galera_kill_applier.result @@ -1,3 +1,4 @@ +connection node_1; Got one of the listed errors Got one of the listed errors Got one of the listed errors diff --git a/mysql-test/suite/galera/r/galera_kill_ddl.result b/mysql-test/suite/galera/r/galera_kill_ddl.result index 8dd36497dfb..c5b3e31b80e 100644 --- a/mysql-test/suite/galera/r/galera_kill_ddl.result +++ b/mysql-test/suite/galera/r/galera_kill_ddl.result @@ -1,11 +1,17 @@ +connection node_1; SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true'; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +connection node_2; Killing server ... +connection node_1; ALTER TABLE t1 ADD COLUMN f2 INTEGER; +connection node_2; +connection node_2a; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='t1'; COUNT(*) = 2 1 SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 +connection node_1; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_kill_smallchanges.result b/mysql-test/suite/galera/r/galera_kill_smallchanges.result index 8409740a035..bcd7d6fd15d 100644 --- a/mysql-test/suite/galera/r/galera_kill_smallchanges.result +++ b/mysql-test/suite/galera/r/galera_kill_smallchanges.result @@ -1,11 +1,17 @@ +connection node_1; SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true'; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +connection node_2; Killing server ... +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; +connection node_2a; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 +connection node_1; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_lock_table.result b/mysql-test/suite/galera/r/galera_lock_table.result index 16e9037a4de..c15f61d298e 100644 --- a/mysql-test/suite/galera/r/galera_lock_table.result +++ b/mysql-test/suite/galera/r/galera_lock_table.result @@ -1,8 +1,11 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB; +connection node_2; LOCK TABLE t1 READ; +connection node_1; INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); +connection node_2a; SET SESSION wsrep_sync_wait=0; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 @@ -10,6 +13,7 @@ COUNT(*) = 0 SELECT COUNT(*) = 0 FROM t2; COUNT(*) = 0 1 +connection node_2; UNLOCK TABLES; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 diff --git a/mysql-test/suite/galera/r/galera_log_bin.result b/mysql-test/suite/galera/r/galera_log_bin.result index 4772f347375..576a72eb55f 100644 --- a/mysql-test/suite/galera/r/galera_log_bin.result +++ b/mysql-test/suite/galera/r/galera_log_bin.result @@ -3,12 +3,14 @@ INSERT INTO t1 VALUES (1); CREATE TABLE t2 (id INT) ENGINE=InnoDB; INSERT INTO t2 VALUES (1); INSERT INTO t2 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 SELECT COUNT(*) = 2 FROM t2; COUNT(*) = 2 1 +connection node_1; ALTER TABLE t1 ADD COLUMN f2 INTEGER; FLUSH LOGS; SHOW BINLOG EVENTS IN 'mysqld-bin.000002' LIMIT 4,18; @@ -31,6 +33,7 @@ mysqld-bin.000002 # Write_rows_v1 # # table_id: # flags: STMT_END_F mysqld-bin.000002 # Xid # # COMMIT /* xid=# */ mysqld-bin.000002 # Gtid # # GTID 0-1-6 mysqld-bin.000002 # Query # # use `test`; ALTER TABLE t1 ADD COLUMN f2 INTEGER +connection node_2; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 2 1 @@ -56,4 +59,5 @@ mysqld-bin.000003 # Gtid # # GTID 0-1-6 mysqld-bin.000003 # Query # # use `test`; ALTER TABLE t1 ADD COLUMN f2 INTEGER DROP TABLE t1; DROP TABLE t2; +connection node_1; RESET MASTER; diff --git a/mysql-test/suite/galera/r/galera_many_indexes.result b/mysql-test/suite/galera/r/galera_many_indexes.result index ab6eec550a1..59d0194687f 100644 --- a/mysql-test/suite/galera/r/galera_many_indexes.result +++ b/mysql-test/suite/galera/r/galera_many_indexes.result @@ -63,6 +63,7 @@ CREATE UNIQUE INDEX i3 ON t1(f1); CREATE UNIQUE INDEX i2 ON t1(f1); CREATE UNIQUE INDEX i1 ON t1(f1); INSERT INTO t1 VALUES (REPEAT('a', 767)); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 @@ -91,6 +92,7 @@ INSERT INTO t1 VALUES (REPEAT('b', 767)); ANALYZE TABLE t1; Table Op Msg_type Msg_text test.t1 analyze status OK +connection node_1; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 @@ -98,6 +100,7 @@ ANALYZE TABLE t1; Table Op Msg_type Msg_text test.t1 analyze status OK DELETE FROM t1 WHERE f1 = REPEAT('b', 767); +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; SELECT COUNT(*) = 1 FROM t1; @@ -108,16 +111,23 @@ ROLLBACK; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_2; START TRANSACTION; SET AUTOCOMMIT=OFF; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_1; START TRANSACTION; +connection node_2; START TRANSACTION; +connection node_1; UPDATE t1 SET f1 = REPEAT('e', 767); +connection node_2; UPDATE t1 SET f1 = REPEAT('f', 767); +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_mdl_race.result b/mysql-test/suite/galera/r/galera_mdl_race.result index 535f20de7f1..0c83ead931e 100644 --- a/mysql-test/suite/galera/r/galera_mdl_race.result +++ b/mysql-test/suite/galera/r/galera_mdl_race.result @@ -2,24 +2,32 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); INSERT INTO t1 VALUES (1, 'a'); INSERT INTO t1 VALUES (2, 'a'); +connection node_1; SET AUTOCOMMIT=ON; START TRANSACTION; UPDATE t1 SET f2 = 'b' WHERE f1 = 1; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; LOCK TABLE t2 WRITE; +connection node_1; SET GLOBAL DEBUG = "d,sync.wsrep_before_mdl_wait"; Warnings: Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SELECT * FROM t2;; +connection node_1a; SET GLOBAL DEBUG = "d,sync.wsrep_after_BF_victim_lock"; Warnings: Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead +connection node_2; UPDATE t1 SET f2 = 'c' WHERE f1 = 1; +connection node_1a; SET GLOBAL DEBUG = ""; Warnings: Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SET DEBUG_SYNC = "now SIGNAL signal.wsrep_before_mdl_wait"; SET DEBUG_SYNC = "now SIGNAL signal.wsrep_after_BF_victim_lock"; UNLOCK TABLES; +connection node_1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a'; COUNT(*) = 1 @@ -27,6 +35,7 @@ COUNT(*) = 1 SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c'; COUNT(*) = 1 1 +connection node_2; SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a'; COUNT(*) = 1 1 @@ -35,4 +44,5 @@ COUNT(*) = 1 1 DROP TABLE t1; DROP TABLE t2; +connection node_1a; SET DEBUG_SYNC = "RESET"; diff --git a/mysql-test/suite/galera/r/galera_multi_database.result b/mysql-test/suite/galera/r/galera_multi_database.result index a04eb484caf..f6242de663b 100644 --- a/mysql-test/suite/galera/r/galera_multi_database.result +++ b/mysql-test/suite/galera/r/galera_multi_database.result @@ -2,13 +2,17 @@ CREATE DATABASE d1; CREATE TABLE d1.t1(f1 INTEGER) ENGINE=InnoDB; CREATE DATABASE d2; CREATE TABLE d2.t1(f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO d1.t1 VALUES (1); +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO d2.t1 VALUES (1); +connection node_1; COMMIT; +connection node_2; COMMIT; SELECT COUNT(*) = 1 FROM d1.t1; COUNT(*) = 1 @@ -16,6 +20,7 @@ COUNT(*) = 1 SELECT COUNT(*) = 1 FROM d2.t1; COUNT(*) = 1 1 +connection node_1; SELECT COUNT(*) = 1 FROM d1.t1; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_myisam_autocommit.result b/mysql-test/suite/galera/r/galera_myisam_autocommit.result index 3f8d93bae76..e9578a261e6 100644 --- a/mysql-test/suite/galera/r/galera_myisam_autocommit.result +++ b/mysql-test/suite/galera/r/galera_myisam_autocommit.result @@ -14,6 +14,7 @@ DELETE FROM t1 WHERE f1 = 9; DELETE FROM t2 WHERE f1 = 9; TRUNCATE TABLE t1; TRUNCATE TABLE t1; +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 diff --git a/mysql-test/suite/galera/r/galera_myisam_transactions.result b/mysql-test/suite/galera/r/galera_myisam_transactions.result index 284f92b414c..25796c309d1 100644 --- a/mysql-test/suite/galera/r/galera_myisam_transactions.result +++ b/mysql-test/suite/galera/r/galera_myisam_transactions.result @@ -7,6 +7,7 @@ START TRANSACTION; INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); COMMIT; +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 @@ -16,12 +17,14 @@ COUNT(*) = 0 SELECT COUNT(*) = 0 FROM t2; COUNT(*) = 0 1 +connection node_1; START TRANSACTION; INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); ROLLBACK; Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_nopk_bit.result b/mysql-test/suite/galera/r/galera_nopk_bit.result index 5723dac42fd..7662c15c672 100644 --- a/mysql-test/suite/galera/r/galera_nopk_bit.result +++ b/mysql-test/suite/galera/r/galera_nopk_bit.result @@ -1,5 +1,6 @@ CREATE TABLE t1 (f1 BIT) ENGINE=InnoDB; INSERT INTO t1 VALUES (NULL),(0),(b'1'); +connection node_2; SELECT f1 IS NULL, f1 = b'1' FROM t1; f1 IS NULL f1 = b'1' 1 NULL @@ -8,19 +9,24 @@ f1 IS NULL f1 = b'1' DELETE FROM t1 WHERE f1 = b'1'; UPDATE t1 SET f1 = b'1' WHERE f1 IS NULL; UPDATE t1 SET f1 = 1 WHERE f1 = b'0'; +connection node_1; SELECT f1 IS NULL, f1 = b'1' FROM t1; f1 IS NULL f1 = b'1' 0 1 0 1 +connection node_1; CREATE TABLE t2 (f1 BIT) ENGINE=InnoDB; INSERT INTO t2 VALUES (NULL); SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t2 SET f1 = 0 WHERE f1 IS NULL; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t2 SET f1 = 1 WHERE f1 IS NULL; +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_nopk_blob.result b/mysql-test/suite/galera/r/galera_nopk_blob.result index 7491b715ed2..a7570c2cfee 100644 --- a/mysql-test/suite/galera/r/galera_nopk_blob.result +++ b/mysql-test/suite/galera/r/galera_nopk_blob.result @@ -1,26 +1,32 @@ CREATE TABLE t1 (f1 BLOB) ENGINE=InnoDB; INSERT INTO t1 VALUES (NULL),('abc'); +connection node_2; SELECT f1 FROM t1; f1 NULL abc DELETE FROM t1 WHERE f1 IS NULL; UPDATE t1 SET f1 = 'xyz' WHERE f1 = 'abc'; +connection node_1; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 SELECT f1 = 'abc' FROM t1; f1 = 'abc' 0 +connection node_1; CREATE TABLE t2 (f1 BLOB) ENGINE=InnoDB; INSERT INTO t2 VALUES (NULL); SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t2 SET f1 = 'abc' WHERE f1 IS NULL; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t2 SET f1 = 'xyz' WHERE f1 IS NULL; +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_nopk_large_varchar.result b/mysql-test/suite/galera/r/galera_nopk_large_varchar.result index abca81e15b0..08cffaa2bd1 100644 --- a/mysql-test/suite/galera/r/galera_nopk_large_varchar.result +++ b/mysql-test/suite/galera/r/galera_nopk_large_varchar.result @@ -1,11 +1,13 @@ CREATE TABLE t1 (f1 VARCHAR(8000)) ENGINE=InnoDB; INSERT INTO t1 VALUES (NULL),(CONCAT(REPEAT('x', 7999), 'a')); +connection node_2; SELECT LENGTH(f1) FROM t1; LENGTH(f1) NULL 8000 DELETE FROM t1 WHERE f1 IS NULL; UPDATE t1 SET f1 = CONCAT(REPEAT('x', 7999), 'b') WHERE f1 = CONCAT(REPEAT('x', 7999), 'a'); +connection node_1; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 @@ -15,15 +17,19 @@ LENGTH(f1) = 8000 SELECT f1 = CONCAT(REPEAT('x', 7999), 'b') FROM t1; f1 = CONCAT(REPEAT('x', 7999), 'b') 1 +connection node_1; CREATE TABLE t2 (f1 BLOB) ENGINE=InnoDB; INSERT INTO t2 VALUES (CONCAT(REPEAT('x', 7999), 'a')); SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t2 SET f1 = 'abc' WHERE f1 = CONCAT(REPEAT('x', 7999), 'a'); +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t2 SET f1 = 'xyz' WHERE f1 = CONCAT(REPEAT('x', 7999), 'a'); +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_nopk_unicode.result b/mysql-test/suite/galera/r/galera_nopk_unicode.result index 68d049a2146..882a91a29b5 100644 --- a/mysql-test/suite/galera/r/galera_nopk_unicode.result +++ b/mysql-test/suite/galera/r/galera_nopk_unicode.result @@ -3,16 +3,21 @@ f1 VARCHAR(255), KEY (f1) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO t1 VALUES ('текÑÑ‚'); +connection node_2; SELECT f1 = 'текÑÑ‚' FROM t1; f1 = 'текÑÑ‚' 1 +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f1 = 'текÑÑ‚2'; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f1 = 'текÑÑ‚3'; +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction SELECT f1 = 'текÑÑ‚2' FROM t1; diff --git a/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result b/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result index db4528ac22d..bf4b056a6e4 100644 --- a/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result +++ b/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result @@ -1,9 +1,12 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB; +connection node_2; SET GLOBAL wsrep_slave_threads = 2; LOCK TABLE t1 READ; +connection node_1; INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); +connection node_2a; SET SESSION wsrep_sync_wait=0; SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%applied write set%'; COUNT(*) = 1 @@ -17,7 +20,9 @@ COUNT(*) = 0 SELECT COUNT(*) = 0 FROM t2; COUNT(*) = 0 1 +connection node_2; UNLOCK TABLES; +connection node_2a; SET SESSION wsrep_sync_wait = 7;; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 diff --git a/mysql-test/suite/galera/r/galera_pc_ignore_sb.result b/mysql-test/suite/galera/r/galera_pc_ignore_sb.result index 5fcccfe2d59..e02ec0a3179 100644 --- a/mysql-test/suite/galera/r/galera_pc_ignore_sb.result +++ b/mysql-test/suite/galera/r/galera_pc_ignore_sb.result @@ -1,5 +1,10 @@ +connection node_1; +connection node_2; +connection node_1; SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true'; +connection node_2; Killing server ... +connection node_1; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); DROP TABLE t1; @@ -10,3 +15,4 @@ SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABL VARIABLE_VALUE = 'ON' 1 SET GLOBAL wsrep_cluster_address = ''; +connection node_2; diff --git a/mysql-test/suite/galera/r/galera_pk_bigint_signed.result b/mysql-test/suite/galera/r/galera_pk_bigint_signed.result index a3075994657..27bb3c50733 100644 --- a/mysql-test/suite/galera/r/galera_pk_bigint_signed.result +++ b/mysql-test/suite/galera/r/galera_pk_bigint_signed.result @@ -3,23 +3,29 @@ INSERT INTO t1 VALUES (-9223372036854775808, 'min'), (9223372036854775807, 'max') ; +connection node_2; SELECT * FROM t1; f1 f2 -9223372036854775808 min 9223372036854775807 max UPDATE t1 SET f2 = CONCAT(f2, '_'); +connection node_1; SELECT * FROM t1; f1 f2 -9223372036854775808 min_ 9223372036854775807 max_ +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f2 = 'foo' WHERE f1 = -9223372036854775808; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f2 = 'bar' WHERE f1 = -9223372036854775808; +connection node_1; COMMIT; SET AUTOCOMMIT=ON; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction SET AUTOCOMMIT=ON; diff --git a/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result b/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result index 441926e949c..f586fe2f589 100644 --- a/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result +++ b/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result @@ -2,21 +2,27 @@ CREATE TABLE t1 (f1 BIGINT UNSIGNED PRIMARY KEY, f2 VARCHAR(5)) ENGINE=InnoDB; INSERT INTO t1 VALUES (18446744073709551615, 'max') ; +connection node_2; SELECT f1 = 18446744073709551615 FROM t1; f1 = 18446744073709551615 1 UPDATE t1 SET f2 = CONCAT(f2, '_'); +connection node_1; SELECT f1 = 18446744073709551615 FROM t1; f1 = 18446744073709551615 1 +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f2 = 'foo' WHERE f1 = 18446744073709551615; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f2 = 'bar' WHERE f1 = 18446744073709551615; +connection node_1; COMMIT; SET AUTOCOMMIT=ON; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction SET AUTOCOMMIT=ON; diff --git a/mysql-test/suite/galera/r/galera_prepared_statement.result b/mysql-test/suite/galera/r/galera_prepared_statement.result index de5ac9c760a..d32d412ff46 100644 --- a/mysql-test/suite/galera/r/galera_prepared_statement.result +++ b/mysql-test/suite/galera/r/galera_prepared_statement.result @@ -11,6 +11,7 @@ EXECUTE st1; EXECUTE st2; EXECUTE st3; EXECUTE st4; +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 @@ -25,8 +26,10 @@ COUNT(*) = 1 1 ALTER TABLE t1 ADD COLUMN f2 INTEGER; ALTER TABLE t1 DROP COLUMN f1; +connection node_1; EXECUTE st1; ERROR 22007: Incorrect integer value: 'abc' for column 'f2' at row 1 +connection node_1; DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; diff --git a/mysql-test/suite/galera/r/galera_query_cache.result b/mysql-test/suite/galera/r/galera_query_cache.result index 502d8a58e9c..e64c9438646 100644 --- a/mysql-test/suite/galera/r/galera_query_cache.result +++ b/mysql-test/suite/galera/r/galera_query_cache.result @@ -1,5 +1,6 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; RESET QUERY CACHE; FLUSH STATUS; SELECT COUNT(*) FROM t1; @@ -14,7 +15,9 @@ COUNT(*) SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'Qcache_hits'; VARIABLE_VALUE = 1 1 +connection node_1; INSERT INTO t1 VALUES (2); +connection node_2; FLUSH STATUS; SELECT VARIABLE_VALUE = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'Qcache_queries_in_cache'; VARIABLE_VALUE = 0 @@ -34,7 +37,9 @@ COUNT(*) SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'Qcache_hits'; VARIABLE_VALUE = 1 1 +connection node_1; ALTER TABLE t1 ADD COLUMN f2 INTEGER; +connection node_2; FLUSH STATUS; SELECT VARIABLE_VALUE = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'Qcache_queries_in_cache'; VARIABLE_VALUE = 0 diff --git a/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result b/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result index 856abbb43be..2a37d62698a 100644 --- a/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result +++ b/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result @@ -1,5 +1,6 @@ CREATE TABLE t1 (id INT PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SET GLOBAL wsrep_provider_options = "repl.causal_read_timeout=PT1S"; SET GLOBAL DEBUG = "d,sync.wsrep_apply_cb"; Warnings: @@ -8,7 +9,9 @@ SET SESSION wsrep_sync_wait = 7; SELECT MAX(id) FROM t1; MAX(id) 1 +connection node_1; INSERT INTO t1 VALUES (2); +connection node_2; SELECT MAX(id) FROM t1; ERROR HY000: Lock wait timeout exceeded; try restarting transaction SET GLOBAL DEBUG = ""; @@ -21,14 +24,18 @@ SET GLOBAL DEBUG = "d,sync.wsrep_apply_cb"; Warnings: Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SET DEBUG_SYNC = "RESET"; +connection node_1; INSERT INTO t1 VALUES (3); +connection node_2; SELECT MAX(id) FROM t1; ERROR HY000: Lock wait timeout exceeded; try restarting transaction SET GLOBAL DEBUG = ""; Warnings: Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +connection node_1; INSERT INTO t1 VALUES (4); +connection node_2; SET SESSION wsrep_sync_wait = 7; SELECT MAX(id) FROM t1; MAX(id) @@ -44,13 +51,18 @@ VARIABLE_VALUE = 1 SET GLOBAL DEBUG = "d,sync.wsrep_apply_cb"; Warnings: Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead +connection node_1; INSERT INTO t1 VALUES (5); +connection node_2; SET SESSION wsrep_sync_wait = 7; SELECT MAX(id) FROM t1 ; +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2a; SET GLOBAL DEBUG = ""; Warnings: Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +connection node_2; MAX(id) 5 SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'Qcache_hits'; diff --git a/mysql-test/suite/galera/r/galera_read_only.result b/mysql-test/suite/galera/r/galera_read_only.result index 82736c5f4ba..4c2523f8691 100644 --- a/mysql-test/suite/galera/r/galera_read_only.result +++ b/mysql-test/suite/galera/r/galera_read_only.result @@ -1,15 +1,21 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; +connection node_2; SET GLOBAL read_only=TRUE; +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 CREATE USER foo@localhost; # Open connection to node 2 using 'foo' user. +connect foo_node_2,127.0.0.1,foo,,test,$port_2,; # Connect with foo_node_2 +connection foo_node_2; INSERT INTO t1 VALUES (2); ERROR HY000: The MariaDB server is running with the --read-only option so it cannot execute this statement +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result b/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result index 4acf0143f8b..2470f59c497 100644 --- a/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result +++ b/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result @@ -1,13 +1,16 @@ +connection node_1; SET GLOBAL wsrep_provider_options = 'repl.key_format=FLAT16'; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (123); CREATE TABLE t2 (f1 VARCHAR(256)) ENGINE=InnoDB; INSERT INTO t2 VALUES (REPEAT('a', 256)); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 UPDATE t1 SET f1 = 234; UPDATE t2 SET f1 = REPEAT('b', 256); +connection node_1; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 234; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_repl_max_ws_size.result b/mysql-test/suite/galera/r/galera_repl_max_ws_size.result index 6e245b44462..00980181824 100644 --- a/mysql-test/suite/galera/r/galera_repl_max_ws_size.result +++ b/mysql-test/suite/galera/r/galera_repl_max_ws_size.result @@ -1,3 +1,4 @@ +connection node_1; CREATE TABLE t1 (f1 VARCHAR(512)) ENGINE=InnoDB; SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=512'; INSERT INTO t1 VALUES (REPEAT('a', 512)); diff --git a/mysql-test/suite/galera/r/galera_restart_nochanges.result b/mysql-test/suite/galera/r/galera_restart_nochanges.result index accace97826..380a4812da1 100644 --- a/mysql-test/suite/galera/r/galera_restart_nochanges.result +++ b/mysql-test/suite/galera/r/galera_restart_nochanges.result @@ -1,5 +1,10 @@ +connection node_1; +connection node_2; +connection node_1; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; +connection node_2a; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_roles.result b/mysql-test/suite/galera/r/galera_roles.result index c0cdbc0e338..1617f0d7b51 100644 --- a/mysql-test/suite/galera/r/galera_roles.result +++ b/mysql-test/suite/galera/r/galera_roles.result @@ -3,6 +3,7 @@ # # On node_1 +connection node_1; CREATE DATABASE test1; CREATE TABLE test1.t1 (a int, b int); CREATE TABLE test1.t2 (a int, b int); @@ -18,8 +19,11 @@ GRANT EXECUTE ON PROCEDURE test1.pr1 TO role1; GRANT SELECT ON test1.t1 TO role1; GRANT SELECT (a) ON test1.t2 TO role1; # Open connections to the 2 nodes using 'foo' user. +connect foo_node_1,127.0.0.1,foo,,test,$port_1,; +connect foo_node_2,127.0.0.1,foo,,test,$port_2,; # Connect with foo_node_1 +connection foo_node_1; SHOW GRANTS; Grants for foo@localhost GRANT role1 TO 'foo'@'localhost' @@ -55,6 +59,7 @@ pr1 pr1 # Connect with foo_node_2 +connection foo_node_2; SHOW GRANTS; Grants for foo@localhost GRANT role1 TO 'foo'@'localhost' @@ -93,13 +98,16 @@ pr1 # # # Connect with node_1 +connection node_1; REVOKE EXECUTE ON PROCEDURE test1.pr1 FROM role1; # Connect with foo_node_1 +connection foo_node_1; CALL test1.pr1(); ERROR 42000: execute command denied to user 'foo'@'localhost' for routine 'test1.pr1' # Connect with foo_node_2 +connection foo_node_2; CALL test1.pr1(); ERROR 42000: execute command denied to user 'foo'@'localhost' for routine 'test1.pr1' # @@ -107,9 +115,11 @@ ERROR 42000: execute command denied to user 'foo'@'localhost' for routine 'test1 # # Connect with node_1 +connection node_1; DROP ROLE role1; # Connect with foo_node_1 +connection foo_node_1; FLUSH TABLES; SELECT * FROM mysql.roles_mapping; ERROR 42000: SELECT command denied to user 'foo'@'localhost' for table 'roles_mapping' @@ -131,6 +141,7 @@ CURRENT_ROLE() role1 # Connect with foo_node_2 +connection foo_node_2; FLUSH TABLES; SELECT * FROM mysql.roles_mapping; ERROR 42000: SELECT command denied to user 'foo'@'localhost' for table 'roles_mapping' @@ -150,7 +161,11 @@ NULL SELECT CURRENT_ROLE(); CURRENT_ROLE() role1 +disconnect foo_node_2; # Connect with node_1 +connection node_1; DROP USER foo@localhost; DROP DATABASE test1; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/galera_rsu_error.result b/mysql-test/suite/galera/r/galera_rsu_error.result index bfe41390d1d..5c16e34b492 100644 --- a/mysql-test/suite/galera/r/galera_rsu_error.result +++ b/mysql-test/suite/galera/r/galera_rsu_error.result @@ -1,5 +1,6 @@ CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; INSERT INTO t1 VALUES (1), (1); +connection node_2; SET SESSION wsrep_OSU_method = "RSU"; ALTER TABLE t1 ADD PRIMARY KEY (f1); ERROR 23000: Duplicate entry '1' for key 'PRIMARY' @@ -8,6 +9,7 @@ SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE WHERE TABLE_NAME = COUNT(*) = 0 1 INSERT INTO t1 VALUES (1); +connection node_1; SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE WHERE TABLE_NAME = 't1'; COUNT(*) = 0 1 @@ -15,6 +17,7 @@ SELECT COUNT(*) = 3 FROM t1; COUNT(*) = 3 1 INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(3) = 4 FROM t1; COUNT(3) = 4 1 diff --git a/mysql-test/suite/galera/r/galera_rsu_simple.result b/mysql-test/suite/galera/r/galera_rsu_simple.result index 4c2780a2933..d0ddcfb4d64 100644 --- a/mysql-test/suite/galera/r/galera_rsu_simple.result +++ b/mysql-test/suite/galera/r/galera_rsu_simple.result @@ -1,17 +1,21 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +connection node_2; SET SESSION wsrep_OSU_method = "RSU"; ALTER TABLE t1 ADD COLUMN f2 INTEGER; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 2 1 +connection node_1; SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 1 1 INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 INSERT INTO t1 (f1) VALUES (2); +connection node_1; SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result b/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result index 62e327ffdee..2b2b80ee91c 100644 --- a/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result +++ b/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result @@ -1,10 +1,14 @@ +connection node_1; CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; SET GLOBAL wsrep_desync=1; SET wsrep_OSU_method=RSU; SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; ALTER TABLE t1 ADD COLUMN f2 INTEGER;; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; SET GLOBAL wsrep_desync=0; SET DEBUG_SYNC= 'now SIGNAL continue'; +connection node_1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -17,6 +21,7 @@ wsrep_desync OFF SET wsrep_OSU_method=TOI; DROP TABLE t1; SET DEBUG_SYNC= 'RESET'; +connection node_1; CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; SET GLOBAL wsrep_desync=0; Warnings: @@ -24,10 +29,12 @@ Warning 1231 'wsrep_desync' is already OFF. SET wsrep_OSU_method=RSU; SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; ALTER TABLE t1 ADD COLUMN f2 INTEGER;; +connection node_1a; SET GLOBAL wsrep_desync=1; ERROR HY000: Operation 'desync' failed for SET GLOBAL wsrep_desync=1 SET GLOBAL wsrep_desync=0; SET DEBUG_SYNC= 'now SIGNAL continue'; +connection node_1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -39,4 +46,5 @@ DROP TABLE t1; SET DEBUG_SYNC= 'RESET'; CALL mtr.add_suppression("Protocol violation"); CALL mtr.add_suppression("desync failed"); +connection node_2; CALL mtr.add_suppression("Protocol violation"); diff --git a/mysql-test/suite/galera/r/galera_sbr.result b/mysql-test/suite/galera/r/galera_sbr.result index 0bf6cc7c9d3..61a58c9cb89 100644 --- a/mysql-test/suite/galera/r/galera_sbr.result +++ b/mysql-test/suite/galera/r/galera_sbr.result @@ -1,3 +1,4 @@ +connection node_1; SET SESSION binlog_format = 'STATEMENT'; Warnings: Warning 1105 MariaDB Galera does not support binlog format: STATEMENT @@ -7,8 +8,10 @@ SET SESSION binlog_format = 'MIXED'; Warnings: Warning 1105 MariaDB Galera does not support binlog format: MIXED INSERT INTO t1 VALUES (2); +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 DROP TABLE t1; +connection node_1; SET GLOBAL binlog_format = 'ROW'; diff --git a/mysql-test/suite/galera/r/galera_sbr_binlog.result b/mysql-test/suite/galera/r/galera_sbr_binlog.result index 0bf6cc7c9d3..61a58c9cb89 100644 --- a/mysql-test/suite/galera/r/galera_sbr_binlog.result +++ b/mysql-test/suite/galera/r/galera_sbr_binlog.result @@ -1,3 +1,4 @@ +connection node_1; SET SESSION binlog_format = 'STATEMENT'; Warnings: Warning 1105 MariaDB Galera does not support binlog format: STATEMENT @@ -7,8 +8,10 @@ SET SESSION binlog_format = 'MIXED'; Warnings: Warning 1105 MariaDB Galera does not support binlog format: MIXED INSERT INTO t1 VALUES (2); +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 DROP TABLE t1; +connection node_1; SET GLOBAL binlog_format = 'ROW'; diff --git a/mysql-test/suite/galera/r/galera_serializable.result b/mysql-test/suite/galera/r/galera_serializable.result index 90fe628e505..e81c3911b7d 100644 --- a/mysql-test/suite/galera/r/galera_serializable.result +++ b/mysql-test/suite/galera/r/galera_serializable.result @@ -1,27 +1,37 @@ +connection node_1; CREATE TABLE t1 (id INT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; SET AUTOCOMMIT=OFF; SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; START TRANSACTION; SELECT * FROM t1; id f2 +connection node_2; INSERT INTO t1 VALUES (1,1); +connection node_1; SELECT * FROM t1; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction ROLLBACK; DELETE FROM t1; +connection node_1; INSERT INTO t1 VALUES (1,1); START TRANSACTION; SELECT * FROM t1; id f2 1 1 +connection node_2; UPDATE t1 SET f2 = 2; +connection node_1; UPDATE t1 SET f2 = 3; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction ROLLBACK; DELETE FROM t1; +connection node_1; START TRANSACTION; +connection node_1; INSERT INTO t1 VALUES (1,1); +connection node_2; INSERT INTO t1 VALUES (1,2); +connection node_1; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_server.result b/mysql-test/suite/galera/r/galera_server.result index ef81bf376b0..cc08b826e82 100644 --- a/mysql-test/suite/galera/r/galera_server.result +++ b/mysql-test/suite/galera/r/galera_server.result @@ -1,20 +1,26 @@ +connection node_1; # On node_1 CREATE SERVER s1 FOREIGN DATA WRAPPER mysql OPTIONS (HOST 'foo'); +connection node_2; # On node_2 SELECT * FROM mysql.servers; Server_name Host Db Username Password Port Socket Wrapper Owner s1 foo 3306 mysql ALTER SERVER s1 OPTIONS (HOST 'bar'); +connection node_1; # On node_1 SELECT * FROM mysql.servers; Server_name Host Db Username Password Port Socket Wrapper Owner s1 bar 3306 mysql DROP SERVER s1; +connection node_2; # On node_2 SELECT COUNT(*)=0 FROM mysql.servers; COUNT(*)=0 1 +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/galera_split_brain.result b/mysql-test/suite/galera/r/galera_split_brain.result index 615615040fe..9c5952cfa28 100644 --- a/mysql-test/suite/galera/r/galera_split_brain.result +++ b/mysql-test/suite/galera/r/galera_split_brain.result @@ -1,5 +1,11 @@ call mtr.add_suppression("WSREP: TO isolation failed for: "); +connection node_1; +connection node_1; +connection node_2; +connection node_2; Killing server ... +connection node_1; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction SET GLOBAL wsrep_cluster_address = ''; +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; diff --git a/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result b/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result index c15a24e481f..14407c917a1 100644 --- a/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result +++ b/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result @@ -3,10 +3,12 @@ SET SESSION sql_log_bin = 0; INSERT INTO t1 VALUES (1); SET SESSION sql_log_bin = 1; INSERT INTO t1 VALUES (2); +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1; COUNT(*) = 1 1 +connection node_1; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_ssl.result b/mysql-test/suite/galera/r/galera_ssl.result index 569c3c607d5..022d06319b8 100644 --- a/mysql-test/suite/galera/r/galera_ssl.result +++ b/mysql-test/suite/galera/r/galera_ssl.result @@ -6,6 +6,7 @@ VARIABLE_VALUE = 2 1 CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; VARIABLE_VALUE = 'Synced' 1 diff --git a/mysql-test/suite/galera/r/galera_ssl_compression.result b/mysql-test/suite/galera/r/galera_ssl_compression.result index f25b614d139..333d646376c 100644 --- a/mysql-test/suite/galera/r/galera_ssl_compression.result +++ b/mysql-test/suite/galera/r/galera_ssl_compression.result @@ -4,19 +4,23 @@ VARIABLE_VALUE = 'Synced' SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 +connection node_2; SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; VARIABLE_VALUE = 'Synced' 1 SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 +connection node_1; SET GLOBAL wsrep_provider_options = "socket.ssl_compression=No"; ERROR HY000: Incorrect arguments to SET CREATE TABLE t1 (f1 VARCHAR(333) PRIMARY KEY, f2 BLOB) Engine=InnoDB; INSERT INTO t1 VALUES (REPEAT('a', 333), REPEAT('b', 65535)); +connection node_2; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = REPEAT('a', 333) AND f2 = REPEAT('b', 65535); COUNT(*) = 1 1 +connection node_1; DROP TABLE t1; CALL mtr.add_suppression("Unknown parameter 'socket\.ssl_compression'"); CALL mtr.add_suppression("Set options returned 7"); diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump.result b/mysql-test/suite/galera/r/galera_sst_mysqldump.result index e35c4055f45..5c0d9a45d41 100644 --- a/mysql-test/suite/galera/r/galera_sst_mysqldump.result +++ b/mysql-test/suite/galera/r/galera_sst_mysqldump.result @@ -1,4 +1,5 @@ Setting SST method to mysqldump ... +CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; SET GLOBAL wsrep_sst_method = 'mysqldump'; diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result index 7d30b356aa9..227e1c15444 100644 --- a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result +++ b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result @@ -1,4 +1,5 @@ Setting SST method to mysqldump ... +CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; SET GLOBAL wsrep_sst_method = 'mysqldump'; diff --git a/mysql-test/suite/galera/r/galera_status_cluster.result b/mysql-test/suite/galera/r/galera_status_cluster.result index d7cf671cb10..ad92a51b775 100644 --- a/mysql-test/suite/galera/r/galera_status_cluster.result +++ b/mysql-test/suite/galera/r/galera_status_cluster.result @@ -1,9 +1,11 @@ +connection node_1; SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; VARIABLE_VALUE = 'Primary' 1 +connection node_2; SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 diff --git a/mysql-test/suite/galera/r/galera_status_local_index.result b/mysql-test/suite/galera/r/galera_status_local_index.result index 4e886ac8921..8c36b60cc5f 100644 --- a/mysql-test/suite/galera/r/galera_status_local_index.result +++ b/mysql-test/suite/galera/r/galera_status_local_index.result @@ -1,6 +1,9 @@ +connection node_1; CREATE TABLE wsrep_local_indexes (wsrep_local_index INTEGER); INSERT INTO wsrep_local_indexes VALUES ((SELECT variable_value FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_local_index')); +connection node_2; INSERT INTO wsrep_local_indexes VALUES ((SELECT variable_value FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_local_index')); +connection node_1; SELECT COUNT(*) = 2 FROM wsrep_local_indexes; COUNT(*) = 2 1 diff --git a/mysql-test/suite/galera/r/galera_suspend_slave.result b/mysql-test/suite/galera/r/galera_suspend_slave.result index 02904812dd5..380ecb395c3 100644 --- a/mysql-test/suite/galera/r/galera_suspend_slave.result +++ b/mysql-test/suite/galera/r/galera_suspend_slave.result @@ -1,9 +1,16 @@ +connection node_1; +connection node_2; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_2; +disconnect node_2; +connection node_1; Suspending node_2 ... INSERT INTO t1 VALUES (1); Got one of the listed errors Resuming node_2 ... INSERT INTO t1 VALUES (1); +connection node_2a; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_sync_wait_show.result b/mysql-test/suite/galera/r/galera_sync_wait_show.result index 2f030c26bb8..4a73a573041 100644 --- a/mysql-test/suite/galera/r/galera_sync_wait_show.result +++ b/mysql-test/suite/galera/r/galera_sync_wait_show.result @@ -1,38 +1,53 @@ +connection node_2; SET SESSION wsrep_sync_wait = 1; +connection node_1; CREATE DATABASE db1; +connection node_2; SHOW CREATE DATABASE db1; Database Create Database db1 CREATE DATABASE `db1` /*!40100 DEFAULT CHARACTER SET latin1 */ DROP DATABASE db1; +connection node_1; CREATE PROCEDURE p1 () SELECT 1 FROM DUAL; +connection node_2; SHOW CREATE PROCEDURE p1; Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation p1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` PROCEDURE `p1`() SELECT 1 FROM DUAL latin1 latin1_swedish_ci latin1_swedish_ci DROP PROCEDURE p1; +connection node_1; CREATE PROCEDURE p1 () SELECT 1 FROM DUAL; +connection node_2; SHOW PROCEDURE CODE p1; Pos Instruction 0 stmt 0 "SELECT 1 FROM DUAL" DROP PROCEDURE p1; +connection node_1; CREATE FUNCTION f1 () RETURNS INTEGER RETURN 123; +connection node_2; SHOW CREATE FUNCTION f1; Function sql_mode Create Function character_set_client collation_connection Database Collation f1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` FUNCTION `f1`() RETURNS int(11) RETURN 123 latin1 latin1_swedish_ci latin1_swedish_ci DROP FUNCTION f1; +connection node_1; CREATE FUNCTION f1 () RETURNS INTEGER RETURN 123; +connection node_2; SHOW FUNCTION CODE f1; Pos Instruction 0 freturn 3 123 DROP FUNCTION f1; +connection node_1; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW SET NEW.f1 = 'a'; +connection node_2; SHOW CREATE TRIGGER tr1; Trigger sql_mode SQL Original Statement character_set_client collation_connection Database Collation tr1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`root`@`localhost` TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW SET NEW.f1 = 'a' latin1 latin1_swedish_ci latin1_swedish_ci DROP TABLE t1; +connection node_1; CREATE EVENT event1 ON SCHEDULE AT '2038-01-01 23:59:59' DO SELECT 1; +connection node_2; SHOW CREATE EVENT event1; Event sql_mode time_zone Create Event character_set_client collation_connection Database Collation event1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION SYSTEM CREATE DEFINER=`root`@`localhost` EVENT `event1` ON SCHEDULE AT '2038-01-01 23:59:59' ON COMPLETION NOT PRESERVE DISABLE ON SLAVE DO SELECT 1 latin1 latin1_swedish_ci latin1_swedish_ci diff --git a/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result b/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result index f91415323ec..8a86dfd11e2 100644 --- a/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result +++ b/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result @@ -1,40 +1,54 @@ +connection node_1; CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; INSERT INTO t1 (f2) SELECT 1 FROM ten; +connection node_2; INSERT INTO t1 (f2) SELECT 1 FROM ten; ALTER TABLE t1 AUTO_INCREMENT = 1000; INSERT INTO t1 (f2) SELECT 1 FROM ten; +connection node_1; INSERT INTO t1 (f2) SELECT 1 FROM ten; SELECT MIN(f1) >= 1000, COUNT(*) = 20, COUNT(DISTINCT f1) = 20 FROM t1 WHERE f1 >= 1000; MIN(f1) >= 1000 COUNT(*) = 20 COUNT(DISTINCT f1) = 20 1 1 1 +connection node_2; SELECT MIN(f1) >= 1000, COUNT(*) = 20, COUNT(DISTINCT f1) = 20 FROM t1 WHERE f1 >= 1000; MIN(f1) >= 1000 COUNT(*) = 20 COUNT(DISTINCT f1) = 20 1 1 1 +connection node_1; ALTER TABLE t1 AUTO_INCREMENT = 5; INSERT INTO t1 (f2) SELECT 1 FROM ten; +connection node_2; INSERT INTO t1 (f2) SELECT 1 FROM ten; SELECT MIN(f1) >= 1000, COUNT(*) = 40, COUNT(DISTINCT f1) = 40 FROM t1 WHERE f1 >= 1000; MIN(f1) >= 1000 COUNT(*) = 40 COUNT(DISTINCT f1) = 40 1 1 1 +connection node_1; SELECT MIN(f1) >= 1000, COUNT(*) = 40, COUNT(DISTINCT f1) = 40 FROM t1 WHERE f1 >= 1000; MIN(f1) >= 1000 COUNT(*) = 40 COUNT(DISTINCT f1) = 40 1 1 1 DROP TABLE t1; +connection node_1; SET GLOBAL wsrep_auto_increment_control = OFF; SET GLOBAL auto_increment_increment = 1; SET GLOBAL auto_increment_offset = 1; +connection node_2; SET GLOBAL wsrep_auto_increment_control = OFF; SET GLOBAL auto_increment_increment = 1; SET GLOBAL auto_increment_offset = 1; +connection node_1a; CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; +connection node_2a; ALTER TABLE t1 AUTO_INCREMENT=100; +connection node_1a; INSERT INTO t1 (f2) SELECT 1 FROM ten; +connection node_2a; INSERT INTO t1 (f2) SELECT 1 FROM ten; SELECT MIN(f1) = 100, MAX(f1) = 119, COUNT(f1) = 20, COUNT(DISTINCT f1) = 20 FROM t1; MIN(f1) = 100 MAX(f1) = 119 COUNT(f1) = 20 COUNT(DISTINCT f1) = 20 1 1 1 1 +connection node_1a; SELECT MIN(f1) = 100, MAX(f1) = 119, COUNT(f1) = 20, COUNT(DISTINCT f1) = 20 FROM t1; MIN(f1) = 100 MAX(f1) = 119 COUNT(f1) = 20 COUNT(DISTINCT f1) = 20 1 1 1 1 diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_locking.result b/mysql-test/suite/galera/r/galera_toi_ddl_locking.result index f0a51db14d3..68743c024a0 100644 --- a/mysql-test/suite/galera/r/galera_toi_ddl_locking.result +++ b/mysql-test/suite/galera/r/galera_toi_ddl_locking.result @@ -1,7 +1,9 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB; +connection node_1; SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; ALTER TABLE t1 ADD COLUMN f2 INTEGER;; +connection node_1a; SET SESSION wsrep_sync_wait = 0; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 @@ -15,6 +17,7 @@ SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t2 VALUES (1); COMMIT;; +connection node_1b; SET SESSION wsrep_sync_wait = 0; SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = 'Commit'; COUNT(*) = 1 @@ -23,19 +26,24 @@ SELECT COUNT(*) = 0 FROM t2; COUNT(*) = 0 1 SET DEBUG_SYNC= 'now SIGNAL continue'; +connection node_1a; +connection node_1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 SELECT COUNT(*) = 1 FROM t2; COUNT(*) = 1 1 +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 SELECT COUNT(*) = 1 FROM t2; COUNT(*) = 1 1 +connection node_1; SET DEBUG_SYNC= 'RESET'; +connection node_1b; SET DEBUG_SYNC= 'RESET'; DROP TABLE t1; DROP TABLE t2; diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result b/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result index 41e693c2b19..3844fa97d82 100644 --- a/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result +++ b/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result @@ -1,6 +1,9 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 INTEGER); +connection node_2; ALTER TABLE t1 ADD COLUMN f3 INTEGER; INSERT INTO t1 (f1, f2) VALUES (DEFAULT, 123);; +connection node_1; CREATE UNIQUE INDEX i1 ON t1(f2);; +connection node_2; INSERT INTO t1 (f1, f2) VALUES (DEFAULT, 234); SELECT COUNT(*) = 3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 3 @@ -11,6 +14,7 @@ COUNT(*) = 2 SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 +connection node_1; SELECT COUNT(*) = 3 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 3 1 diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result b/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result index 9dfa433d49f..722bb9d9e12 100644 --- a/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result +++ b/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result @@ -1,9 +1,12 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; ALTER TABLE t1 ADD COLUMN f2 INTEGER; INSERT INTO t1 VALUES (2, 3); +connection node_1; ALTER TABLE t1 DROP COLUMN f2; INSERT INTO t1 VALUES (4); +connection node_2; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -18,6 +21,7 @@ f1 1 2 4 +connection node_1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/mysql-test/suite/galera/r/galera_toi_ftwrl.result b/mysql-test/suite/galera/r/galera_toi_ftwrl.result index 594717c96ff..0f13e95b689 100644 --- a/mysql-test/suite/galera/r/galera_toi_ftwrl.result +++ b/mysql-test/suite/galera/r/galera_toi_ftwrl.result @@ -1,6 +1,9 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; +connection node_2; FLUSH TABLES WITH READ LOCK; +connection node_1; ALTER TABLE t1 ADD COLUMN f2 INTEGER; +connection node_2; UNLOCK TABLES; SHOW CREATE TABLE t1; Table Create Table diff --git a/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result b/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result index eac50e8853c..c13b689f82b 100644 --- a/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result +++ b/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result @@ -1,15 +1,20 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES (2); +connection node_2a; ALTER TABLE t1 ADD COLUMN f2 INTEGER, LOCK=EXCLUSIVE; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection node_1; INSERT INTO t1 VALUES (2, 2); SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 +connection node_2; INSERT INTO t1 VALUES (3, 3); SELECT COUNT(*) = 3 FROM t1; COUNT(*) = 3 diff --git a/mysql-test/suite/galera/r/galera_toi_lock_shared.result b/mysql-test/suite/galera/r/galera_toi_lock_shared.result index 36c38860688..950c4d83c70 100644 --- a/mysql-test/suite/galera/r/galera_toi_lock_shared.result +++ b/mysql-test/suite/galera/r/galera_toi_lock_shared.result @@ -1,10 +1,13 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; ALTER TABLE t1 ADD COLUMN f2 INTEGER, LOCK=SHARED; +connection node_1; INSERT INTO t1 VALUES (2, 2); SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 +connection node_2; INSERT INTO t1 VALUES (3, 3); SELECT COUNT(*) = 3 FROM t1; COUNT(*) = 3 diff --git a/mysql-test/suite/galera/r/galera_truncate.result b/mysql-test/suite/galera/r/galera_truncate.result index eeeb6721d12..4f3d72dbca7 100644 --- a/mysql-test/suite/galera/r/galera_truncate.result +++ b/mysql-test/suite/galera/r/galera_truncate.result @@ -1,24 +1,31 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; TRUNCATE TABLE t1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_2; CREATE TABLE t2 (f1 VARCHAR(255)) Engine=InnoDB; INSERT INTO t2 VALUES ('abc'); +connection node_1; TRUNCATE TABLE t2; +connection node_2; SELECT COUNT(*) = 0 FROM t2; COUNT(*) = 0 1 +connection node_1; CREATE TABLE t3 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY) Engine=InnoDB; INSERT INTO t3 VALUES (DEFAULT),(DEFAULT),(DEFAULT),(DEFAULT),(DEFAULT); CREATE TABLE t4 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY) Engine=InnoDB AUTO_INCREMENT=1234; INSERT INTO t4 VALUES (DEFAULT),(DEFAULT),(DEFAULT),(DEFAULT),(DEFAULT); TRUNCATE TABLE t3; TRUNCATE TABLE t4; +connection node_2; SELECT AUTO_INCREMENT = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN ('t3', 't4'); AUTO_INCREMENT = 1 1 diff --git a/mysql-test/suite/galera/r/galera_truncate_temporary.result b/mysql-test/suite/galera/r/galera_truncate_temporary.result index 0bdc4e3632a..183ebd9d24a 100644 --- a/mysql-test/suite/galera/r/galera_truncate_temporary.result +++ b/mysql-test/suite/galera/r/galera_truncate_temporary.result @@ -4,8 +4,10 @@ TRUNCATE TABLE t1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_2; SELECT * FROM t1; ERROR 42S02: Table 'test.t1' doesn't exist +connection node_1; DROP TABLE t1; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); @@ -21,12 +23,14 @@ TRUNCATE TABLE t1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 SELECT f1 = 1 FROM t1; f1 = 1 1 +connection node_1; DROP TABLE t1; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 @@ -38,18 +42,23 @@ TRUNCATE TABLE t1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_1; DROP TABLE t1; +connection node_1; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (2); +connection node_2; TRUNCATE TABLE t1; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_1; SELECT f1 = 2 FROM t1; f1 = 2 1 diff --git a/mysql-test/suite/galera/r/galera_unicode_identifiers.result b/mysql-test/suite/galera/r/galera_unicode_identifiers.result index 8bea105c77e..146cc052152 100644 --- a/mysql-test/suite/galera/r/galera_unicode_identifiers.result +++ b/mysql-test/suite/galera/r/galera_unicode_identifiers.result @@ -1,5 +1,7 @@ SET GLOBAL wsrep_sync_wait = 7; +connection node_2; SET GLOBAL wsrep_sync_wait = 7; +connection node_1; CREATE DATABASE `database with space`; USE `database with space`; CREATE TABLE `table with space` ( @@ -24,6 +26,7 @@ CREATE TABLE `втора таблица` ( KEY `първи индекÑ` (`първа колона`) ); INSERT INTO `втора таблица` VALUES (1, 1); +connection node_2; USE `database with space`; SELECT `second column with space` FROM `table with space`; second column with space @@ -37,6 +40,7 @@ SELECT `втора колона` FROM `втора таблица`; втора колона 1 SET GLOBAL wsrep_sync_wait = (SELECT @@wsrep_sync_wait); +connection node_1; DROP TABLE `database with space`.`table with space`; DROP TABLE `база`.`таблица`; DROP TABLE `втора база`.`втора таблица`; diff --git a/mysql-test/suite/galera/r/galera_unicode_pk.result b/mysql-test/suite/galera/r/galera_unicode_pk.result index d59615b2542..2711e44b7cb 100644 --- a/mysql-test/suite/galera/r/galera_unicode_pk.result +++ b/mysql-test/suite/galera/r/galera_unicode_pk.result @@ -2,16 +2,21 @@ CREATE TABLE t1 ( f1 VARCHAR(255) PRIMARY KEY ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO t1 VALUES ('текÑÑ‚'); +connection node_2; SELECT f1 = 'текÑÑ‚' FROM t1; f1 = 'текÑÑ‚' 1 +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f1 = 'текÑÑ‚2'; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; UPDATE t1 SET f1 = 'текÑÑ‚3'; +connection node_1; COMMIT; +connection node_2; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction SELECT f1 = 'текÑÑ‚2' FROM t1; @@ -20,11 +25,15 @@ f1 = 'текÑÑ‚2' SELECT f1 = 'текÑÑ‚2' FROM t1 WHERE f1 = 'текÑÑ‚2'; f1 = 'текÑÑ‚2' 1 +connection node_2; START TRANSACTION; INSERT INTO t1 VALUES ('текÑÑ‚4'); +connection node_1; START TRANSACTION; INSERT INTO t1 VALUES ('текÑÑ‚4'); +connection node_2; COMMIT; +connection node_1; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction COMMIT; diff --git a/mysql-test/suite/galera/r/galera_update_limit.result b/mysql-test/suite/galera/r/galera_update_limit.result index c26eb1c29f6..20a94e6f504 100644 --- a/mysql-test/suite/galera/r/galera_update_limit.result +++ b/mysql-test/suite/galera/r/galera_update_limit.result @@ -1,8 +1,11 @@ +connection node_1; CREATE TABLE ten (f1 INTEGER) Engine=InnoDB; INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; INSERT INTO t1 SELECT f1 FROM ten ORDER BY RAND(); +connection node_2; UPDATE IGNORE t1 SET f1 = FLOOR(1 + (RAND() * 10)) ORDER BY RAND() LIMIT 5; +connection node_1; sum_matches 1 max_matches @@ -10,7 +13,9 @@ max_matches DROP TABLE t1; CREATE TABLE t2 (f1 INTEGER) Engine=InnoDB; INSERT INTO t2 SELECT f1 FROM ten ORDER BY RAND(); +connection node_2; UPDATE IGNORE t2 SET f1 = FLOOR(1 + (RAND() * 10)) ORDER BY RAND() LIMIT 5; +connection node_1; sum_matches 1 DROP TABLE t2; diff --git a/mysql-test/suite/galera/r/galera_v1_row_events.result b/mysql-test/suite/galera/r/galera_v1_row_events.result index a6ab62350b1..b0ea2293119 100644 --- a/mysql-test/suite/galera/r/galera_v1_row_events.result +++ b/mysql-test/suite/galera/r/galera_v1_row_events.result @@ -1,9 +1,12 @@ CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_1; UPDATE t1 SET f1 = 2 WHERE f1 = 1; +connection node_2; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_var_OSU_method.result b/mysql-test/suite/galera/r/galera_var_OSU_method.result index 9a07873e1b9..18e8bd2271a 100644 --- a/mysql-test/suite/galera/r/galera_var_OSU_method.result +++ b/mysql-test/suite/galera/r/galera_var_OSU_method.result @@ -1,16 +1,24 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +connection node_1; SET SESSION wsrep_OSU_method = "RSU"; SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; ALTER TABLE t1 ADD COLUMN f2 INTEGER;; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; SET GLOBAL wsrep_OSU_method = "TOI"; SET DEBUG_SYNC= 'now SIGNAL continue'; +connection node_1; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 2 1 +connection node_2; SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 1 1 +connection node_1; SET SESSION wsrep_OSU_method = "TOI"; +connection node_1; SET DEBUG_SYNC= 'RESET'; +connection node_1a; SET DEBUG_SYNC= 'RESET'; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_var_OSU_method2.result b/mysql-test/suite/galera/r/galera_var_OSU_method2.result index 08f2e6aa0d8..0e3751645a8 100644 --- a/mysql-test/suite/galera/r/galera_var_OSU_method2.result +++ b/mysql-test/suite/galera/r/galera_var_OSU_method2.result @@ -1,19 +1,26 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +connection node_1; SET SESSION wsrep_OSU_method = "TOI"; SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; ALTER TABLE t1 ADD COLUMN f2 INTEGER;; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; SET SESSION wsrep_sync_wait = 0; SET GLOBAL wsrep_OSU_method = "RSU"; SET DEBUG_SYNC= 'now SIGNAL continue'; +connection node_1; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 2 1 INSERT INTO t1 VALUES (1,2); +connection node_2; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; COUNT(*) = 2 1 INSERT INTO t1 VALUES (3,4); +connection node_1; SET GLOBAL wsrep_OSU_method = "TOI"; DROP TABLE t1; SET DEBUG_SYNC= 'RESET'; +connection node_1a; SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result b/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result index 92b69fbbaa7..15bbe7c508a 100644 --- a/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result +++ b/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result @@ -1,9 +1,12 @@ +connection node_1; SET GLOBAL wsrep_auto_increment_control = OFF; SET GLOBAL auto_increment_increment = 1; SET GLOBAL auto_increment_offset = 1; +connection node_2; SET GLOBAL wsrep_auto_increment_control = OFF; SET GLOBAL auto_increment_increment = 1; SET GLOBAL auto_increment_offset = 1; +connection node_1a; SELECT @@auto_increment_increment = 1; @@auto_increment_increment = 1 1 @@ -18,6 +21,7 @@ t1 CREATE TABLE `t1` ( `node` varchar(10) DEFAULT NULL, PRIMARY KEY (`f1`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 +connection node_2a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -25,6 +29,7 @@ t1 CREATE TABLE `t1` ( `node` varchar(10) DEFAULT NULL, PRIMARY KEY (`f1`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 +connection node_1a; SELECT @@auto_increment_increment = 1; @@auto_increment_increment = 1 1 @@ -37,6 +42,7 @@ INSERT INTO t1 (node) VALUES ('node1'); SELECT f1 FROM t1; f1 1 +connection node_2a; SELECT @@auto_increment_increment = 1; @@auto_increment_increment = 1 1 @@ -49,12 +55,16 @@ INSERT INTO t1 (node) VALUES ('node2'); SELECT f1 FROM t1; f1 1 +connection node_1a; COMMIT; +connection node_2a; COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection node_1a; SELECT * FROM t1; f1 node 1 node1 +connection node_2a; SELECT * FROM t1; f1 node 1 node1 diff --git a/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result b/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result index e6f37792569..b13302d3ecd 100644 --- a/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result +++ b/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result @@ -1,11 +1,14 @@ SET GLOBAL wsrep_certify_nonPK = OFF; +connection node_2; SET GLOBAL wsrep_certify_nonPK = OFF; +connection node_1; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB /* Table has no primary key */; CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1), (2); Got one of the listed errors INSERT INTO t2 VALUES (1), (2); UPDATE t2 SET f1 = 3 WHERE f1 = 1; +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 @@ -15,7 +18,9 @@ COUNT(*) = 2 SELECT COUNT(*) = 1 FROM t2 WHERE f1 = 3; COUNT(*) = 1 1 +connection node_1; SET GLOBAL wsrep_certify_nonPK = 1; +connection node_2; SET GLOBAL wsrep_certify_nonPK = 1; DROP TABLE t1; DROP TABLE t2; diff --git a/mysql-test/suite/galera/r/galera_var_cluster_address.result b/mysql-test/suite/galera/r/galera_var_cluster_address.result index f8bd869f8fe..09971c08580 100644 --- a/mysql-test/suite/galera/r/galera_var_cluster_address.result +++ b/mysql-test/suite/galera/r/galera_var_cluster_address.result @@ -1,3 +1,6 @@ +connection node_1; +connection node_2; +connection node_1; SET GLOBAL wsrep_cluster_address = 'foo://'; SET SESSION wsrep_sync_wait=0; SELECT * FROM INFORMATION_SCHEMA.GLOBAL_STATUS; @@ -14,19 +17,24 @@ wsrep_local_state 0 SHOW STATUS LIKE 'wsrep_local_state_comment'; Variable_name Value wsrep_local_state_comment Initialized +connection node_2; SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 1 1 SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; VARIABLE_VALUE = 'Primary' 1 +connection node_1; +connection node_2; SET GLOBAL wsrep_cluster_address = @@wsrep_cluster_address; +connection node_1; SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; VARIABLE_VALUE = 'Primary' 1 SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 +connection node_1; SET GLOBAL wsrep_cluster_address = 'gcomm://192.0.2.1'; SELECT * FROM INFORMATION_SCHEMA.GLOBAL_STATUS; ERROR 08S01: WSREP has not yet prepared node for application use @@ -42,7 +50,10 @@ wsrep_local_state 0 SHOW STATUS LIKE 'wsrep_local_state_comment'; Variable_name Value wsrep_local_state_comment Initialized +connection node_1; +connection node_2; SET GLOBAL wsrep_cluster_address = @@wsrep_cluster_address; +connection node_1; SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; VARIABLE_VALUE = 'Primary' 1 diff --git a/mysql-test/suite/galera/r/galera_var_desync_on.result b/mysql-test/suite/galera/r/galera_var_desync_on.result index 0b5f34688b7..383e077f775 100644 --- a/mysql-test/suite/galera/r/galera_var_desync_on.result +++ b/mysql-test/suite/galera/r/galera_var_desync_on.result @@ -1,8 +1,10 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1'; SET GLOBAL wsrep_desync = TRUE; FLUSH TABLES WITH READ LOCK; +connection node_1; INSERT INTO t1 VALUES (2); INSERT INTO t1 VALUES (3); INSERT INTO t1 VALUES (4); @@ -12,6 +14,7 @@ INSERT INTO t1 VALUES (7); INSERT INTO t1 VALUES (8); INSERT INTO t1 VALUES (9); INSERT INTO t1 VALUES (10); +connection node_2; SET SESSION wsrep_sync_wait = 0; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 @@ -22,7 +25,9 @@ SET SESSION wsrep_sync_wait = 1; SELECT COUNT(*) = 10 FROM t1; COUNT(*) = 10 1 +connection node_1; INSERT INTO t1 VALUES (11); +connection node_2; SELECT COUNT(*) = 11 FROM t1; COUNT(*) = 11 1 diff --git a/mysql-test/suite/galera/r/galera_var_dirty_reads.result b/mysql-test/suite/galera/r/galera_var_dirty_reads.result index 6d703c8cf95..6b3a3ec0eb5 100644 --- a/mysql-test/suite/galera/r/galera_var_dirty_reads.result +++ b/mysql-test/suite/galera/r/galera_var_dirty_reads.result @@ -1,3 +1,4 @@ +connection node_2; CREATE TABLE t1(i INT) ENGINE=INNODB; INSERT INTO t1 VALUES(1); SELECT * FROM t1; @@ -18,8 +19,11 @@ SET @@session.wsrep_dirty_reads=ON; SELECT * FROM t1; i 1 +connection node_1; SELECT * FROM t1; i 1 DROP TABLE t1; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/galera_var_fkchecks.result b/mysql-test/suite/galera/r/galera_var_fkchecks.result index 342212a5241..8b1b913a584 100644 --- a/mysql-test/suite/galera/r/galera_var_fkchecks.result +++ b/mysql-test/suite/galera/r/galera_var_fkchecks.result @@ -12,6 +12,7 @@ INSERT INTO parent VALUES (1); INSERT INTO child VALUES (1,1); SET SESSION foreign_key_checks = 0; INSERT INTO child VALUES (2,2); +connection node_2; SELECT COUNT(*) = 1 FROM child WHERE id = 2; COUNT(*) = 1 1 @@ -19,6 +20,7 @@ INSERT INTO child VALUES (3,3); ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test`.`child`, CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `parent` (`id`)) SET SESSION foreign_key_checks = 0; DELETE FROM parent; +connection node_1; SELECT COUNT(*) = 0 FROM parent; COUNT(*) = 0 1 diff --git a/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result b/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result index 8e84236d5bf..1a8733e2e1a 100644 --- a/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result +++ b/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result @@ -1,4 +1,5 @@ # On node_1 +connection node_1; list of GTID variables : gtid_domain_id 1 gtid_binlog_pos @@ -8,6 +9,7 @@ gtid_slave_pos wsrep_gtid_domain_id 9999 wsrep_gtid_mode 1 # On node_2 +connection node_2; list of GTID variables : gtid_domain_id 2 gtid_binlog_pos @@ -17,6 +19,7 @@ gtid_slave_pos wsrep_gtid_domain_id 9999 wsrep_gtid_mode 1 # On node_1 +connection node_1; CREATE TABLE t1(i INT) ENGINE=INNODB; CREATE TABLE t2(i INT) ENGINE=MEMORY; INSERT INTO t1 VALUES(1); @@ -34,6 +37,7 @@ gtid_slave_pos wsrep_gtid_domain_id 9999 wsrep_gtid_mode 1 # On node_2 +connection node_2; SELECT * FROM t1; i 1 @@ -46,6 +50,7 @@ gtid_slave_pos wsrep_gtid_domain_id 9999 wsrep_gtid_mode 1 # On node_1 +connection node_1; INSERT INTO t2 VALUES(1); SELECT * FROM t2; i @@ -59,6 +64,7 @@ gtid_slave_pos wsrep_gtid_domain_id 9999 wsrep_gtid_mode 1 # On node_2 +connection node_2; SELECT * FROM t2; i list of GTID variables : @@ -70,5 +76,8 @@ gtid_slave_pos wsrep_gtid_domain_id 9999 wsrep_gtid_mode 1 # On node_1 +connection node_1; DROP TABLE t1, t2; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result b/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result index 912e45a14b1..5377a0af1b6 100644 --- a/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result +++ b/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result @@ -1,11 +1,16 @@ +connection node_1a; SET SESSION wsrep_sync_wait = 0; +connection node_1; CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; SET GLOBAL innodb_disallow_writes=ON; INSERT INTO t1 VALUES (1);; +connection node_1a; SET GLOBAL innodb_disallow_writes=OFF; +connection node_1; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_var_log_bin.result b/mysql-test/suite/galera/r/galera_var_log_bin.result index a6ab62350b1..b0ea2293119 100644 --- a/mysql-test/suite/galera/r/galera_var_log_bin.result +++ b/mysql-test/suite/galera/r/galera_var_log_bin.result @@ -1,9 +1,12 @@ CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_1; UPDATE t1 SET f1 = 2 WHERE f1 = 1; +connection node_2; SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera/r/galera_var_max_ws_size.result b/mysql-test/suite/galera/r/galera_var_max_ws_size.result index d0bf7deafa5..71859ef82e0 100644 --- a/mysql-test/suite/galera/r/galera_var_max_ws_size.result +++ b/mysql-test/suite/galera/r/galera_var_max_ws_size.result @@ -1,3 +1,4 @@ +connection node_1; CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 VARCHAR(1024)) Engine=InnoDB; SET GLOBAL wsrep_max_ws_size = 1024; INSERT INTO t1 VALUES (DEFAULT, REPEAT('X', 1024)); diff --git a/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result b/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result index f2a951c26b0..202633a020e 100644 --- a/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result +++ b/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result @@ -1,12 +1,17 @@ CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB; SET GLOBAL wsrep_mysql_replication_bundle = 2; +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 0 +connection node_1; INSERT INTO t1 VALUES (2); +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 +connection node_1; SET GLOBAL wsrep_mysql_replication_bundle = 0; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_var_node_address.result b/mysql-test/suite/galera/r/galera_var_node_address.result index fa88f4b3128..fe3b5613903 100644 --- a/mysql-test/suite/galera/r/galera_var_node_address.result +++ b/mysql-test/suite/galera/r/galera_var_node_address.result @@ -1,9 +1,14 @@ SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 4 1 +connection node_1; CREATE TABLE t1 (f1 INTEGER); +connection node_2; INSERT INTO t1 VALUES (1); +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; +connection node_3; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_1; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result b/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result index c8b79071d10..382466b1387 100644 --- a/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result +++ b/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result @@ -1,8 +1,10 @@ SET GLOBAL wsrep_replicate_myisam = FALSE; CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=MyISAM; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_1; SET GLOBAL wsrep_replicate_myisam = 0; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result b/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result index 73a0576048b..bf5a09f6a77 100644 --- a/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result +++ b/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result @@ -1,17 +1,24 @@ +connection node_1; +connection node_1; SET GLOBAL wsrep_replicate_myisam = TRUE; +connection node_2; SET GLOBAL wsrep_replicate_myisam = TRUE; +connection node_1; CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=MyISAM; INSERT INTO t1 VALUES (1); INSERT INTO t1 VALUES (2), (3); INSERT INTO t1 SELECT 4 FROM DUAL UNION ALL SELECT 5 FROM DUAL; +connection node_2; SELECT COUNT(*) = 5 FROM t1; COUNT(*) = 5 1 DROP TABLE t1; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(100)) ENGINE=MyISAM; INSERT INTO t1 VALUES (1, 'abc'),(2,'abc'), (3, 'xxx'); REPLACE INTO t1 VALUES (1, 'klm'), (2,'xyz'); REPLACE INTO t1 SELECT 3, 'yyy' FROM DUAL; +connection node_2; SELECT COUNT(*) = 3 FROM t1; COUNT(*) = 3 1 @@ -24,19 +31,26 @@ COUNT(*) = 1 SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 3 AND f2 = 'yyy'; COUNT(*) = 1 1 +connection node_1; UPDATE t1 SET f2 = 'zzz' WHERE f2 = 'yyy'; +connection node_2; SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'zzz'; COUNT(*) = 1 1 +connection node_1; DELETE FROM t1 WHERE f2 = 'zzz'; +connection node_2; SELECT COUNT(*) = 0 FROM t1 WHERE f2 = 'zzz'; COUNT(*) = 0 1 +connection node_1; TRUNCATE TABLE t1; +connection node_2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 DROP TABLE t1; +connection node_1; CREATE TABLE t1 (f1 INTEGER) ENGINE=MyISAM; CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB; SET AUTOCOMMIT=OFF; @@ -44,18 +58,21 @@ START TRANSACTION; INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); COMMIT; +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 SELECT COUNT(*) = 1 FROM t2; COUNT(*) = 1 1 +connection node_1; START TRANSACTION; INSERT INTO t1 VALUES (2); INSERT INTO t2 VALUES (2); ROLLBACK; Warnings: Warning 1196 Some non-transactional changed tables couldn't be rolled back +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 @@ -64,15 +81,20 @@ COUNT(*) = 1 1 DROP TABLE t1; DROP TABLE t2; +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=MyISAM; CREATE TABLE t2 (f2 INTEGER PRIMARY KEY) ENGINE=InnoDB; START TRANSACTION; INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); +connection node_2; INSERT INTO t1 VALUES (1); ERROR 23000: Duplicate entry '1' for key 'PRIMARY' +connection node_1; COMMIT; DROP TABLE t1; DROP TABLE t2; +connection node_1; SET GLOBAL wsrep_replicate_myisam = 0; +connection node_2; SET GLOBAL wsrep_replicate_myisam = 0; diff --git a/mysql-test/suite/galera/r/galera_var_slave_threads.result b/mysql-test/suite/galera/r/galera_var_slave_threads.result index 603dfaeacc7..facc083544c 100644 --- a/mysql-test/suite/galera/r/galera_var_slave_threads.result +++ b/mysql-test/suite/galera/r/galera_var_slave_threads.result @@ -1,5 +1,7 @@ +connection node_1; CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB; CREATE TABLE t2 (f1 INT AUTO_INCREMENT PRIMARY KEY) Engine=InnoDB; +connection node_2; SET GLOBAL wsrep_slave_threads = 0; Warnings: Warning 1292 Truncated incorrect wsrep_slave_threads value: '0' @@ -17,7 +19,9 @@ SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system use COUNT(*) = 1 1 SET GLOBAL wsrep_slave_threads = 64; +connection node_1; INSERT INTO t1 VALUES (1); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 @@ -28,6 +32,7 @@ SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system use COUNT(*) = 1 1 SET GLOBAL wsrep_slave_threads = 1; +connection node_1; INSERT INTO t2 VALUES (DEFAULT); INSERT INTO t2 VALUES (DEFAULT); INSERT INTO t2 VALUES (DEFAULT); @@ -92,6 +97,7 @@ INSERT INTO t2 VALUES (DEFAULT); INSERT INTO t2 VALUES (DEFAULT); INSERT INTO t2 VALUES (DEFAULT); INSERT INTO t2 VALUES (DEFAULT); +connection node_2; SELECT COUNT(*) = 64 FROM t2; COUNT(*) = 64 1 @@ -107,7 +113,9 @@ DROP TABLE t2; # # lp:1372840 - Changing wsrep_slave_threads causes future connections to hang # +connection node_1; CREATE TABLE t1 (i INT AUTO_INCREMENT PRIMARY KEY) ENGINE=INNODB; +connection node_2; SET GLOBAL wsrep_slave_threads = 4; SET GLOBAL wsrep_slave_threads = 1; DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_var_sync_wait.result b/mysql-test/suite/galera/r/galera_var_sync_wait.result index f6136a4ddc4..40a6290374f 100644 --- a/mysql-test/suite/galera/r/galera_var_sync_wait.result +++ b/mysql-test/suite/galera/r/galera_var_sync_wait.result @@ -1,4 +1,6 @@ +connection node_1; CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB; +connection node_2; SET GLOBAL wsrep_sync_wait = 1; SHOW TABLES LIKE '%t1'; Tables_in_test (%t1) @@ -6,15 +8,20 @@ t1 SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +connection node_1; CREATE TABLE t2 (f1 INT PRIMARY KEY) Engine=InnoDB; +connection node_2; SET GLOBAL wsrep_sync_wait = 4; INSERT INTO t2 VALUES (1); +connection node_1; CREATE TABLE t3 (f1 INT PRIMARY KEY) Engine=InnoDB; INSERT INTO t3 VALUES (1); +connection node_2; SET GLOBAL wsrep_sync_wait = 2; UPDATE t3 SET f1 = 2; affected rows: 1 info: Rows matched: 1 Changed: 1 Warnings: 0 +connection node_2; SET GLOBAL wsrep_sync_wait = 7; DROP TABLE t1; DROP TABLE t2; diff --git a/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result b/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result index 8b1c4ebf83b..36340f505ff 100644 --- a/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result +++ b/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result @@ -2,11 +2,14 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); SET SESSION wsrep_on = FALSE; INSERT INTO t1 VALUES (2); +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 +connection node_1; SET GLOBAL wsrep_on = TRUE; INSERT INTO t1 VALUES (3); +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 diff --git a/mysql-test/suite/galera/r/galera_wan.result b/mysql-test/suite/galera/r/galera_wan.result index 6be32b291e5..73100636a0d 100644 --- a/mysql-test/suite/galera/r/galera_wan.result +++ b/mysql-test/suite/galera/r/galera_wan.result @@ -1,9 +1,14 @@ SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 4 1 +connection node_1; CREATE TABLE t1 (f1 INTEGER); +connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; +connection node_3; INSERT INTO t1 VALUES (1); CALL mtr.add_suppression("There are no nodes in the same segment that will ever be able to become donors, yet there is a suitable donor outside"); +connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4; +connection node_4; SELECT VARIABLE_VALUE LIKE '%gmcast.segment = 3%' FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_provider_options'; VARIABLE_VALUE LIKE '%gmcast.segment = 3%' 1 diff --git a/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result b/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result index e3f2fa4046f..ca388496794 100644 --- a/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result +++ b/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result @@ -16,6 +16,7 @@ VARIABLE_VALUE = 4 SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; VARIABLE_VALUE = 'Synced' 1 +connection node_2; SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; VARIABLE_VALUE = 'Primary' 1 diff --git a/mysql-test/suite/galera/r/galera_zero_length_column.result b/mysql-test/suite/galera/r/galera_zero_length_column.result index 2e6119bd1ba..572d94d6756 100644 --- a/mysql-test/suite/galera/r/galera_zero_length_column.result +++ b/mysql-test/suite/galera/r/galera_zero_length_column.result @@ -1,9 +1,11 @@ +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY , f2 VARCHAR(0)) ENGINE=InnoDB; CREATE TABLE t2 (f1 VARCHAR(0)) ENGINE=InnoDB; INSERT INTO t1 VALUES (1, NULL); INSERT INTO t1 VALUES (2, ''); INSERT INTO t2 VALUES (NULL); INSERT INTO t2 VALUES (''); +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 @@ -25,6 +27,7 @@ f1 = '' UPDATE t1 SET f2 = '' WHERE f1 = 1; UPDATE t1 SET f2 = NULL WHERE f1 = 2; UPDATE t2 SET f1 = '' WHERE f1 IS NULL; +connection node_1; SELECT f2 = '' FROM t1 WHERE f1 = 1; f2 = '' 1 diff --git a/mysql-test/suite/galera/r/grant.result b/mysql-test/suite/galera/r/grant.result index 8d257e7e8e2..a2ca72ee8ec 100644 --- a/mysql-test/suite/galera/r/grant.result +++ b/mysql-test/suite/galera/r/grant.result @@ -3,6 +3,7 @@ # # On node_1 +connection node_1; GRANT SELECT ON *.* TO 'user_6266'@'localhost' IDENTIFIED BY 'pass'; # Now, try changing password for 'user_6266'. This command should also @@ -10,8 +11,11 @@ GRANT SELECT ON *.* TO 'user_6266'@'localhost' IDENTIFIED BY 'pass'; SET PASSWORD FOR 'user_6266'@'localhost' = PASSWORD('newpass'); # On node_2 +connection node_2; SELECT user FROM mysql.user WHERE user='user_6266'; user user_6266 DROP USER 'user_6266'@'localhost'; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/lp1276424.result b/mysql-test/suite/galera/r/lp1276424.result index 5f09ec9ea8b..363758e0d66 100644 --- a/mysql-test/suite/galera/r/lp1276424.result +++ b/mysql-test/suite/galera/r/lp1276424.result @@ -1,6 +1,7 @@ CREATE TABLE t1 (f1 INT DEFAULT NULL, UNIQUE KEY i1 (f1)) ENGINE=InnoDB; INSERT INTO t1 VALUES (NULL); INSERT INTO t1 VALUES (NULL); +connection node_2; SELECT COUNT(*) = 2 FROM t1; COUNT(*) = 2 1 diff --git a/mysql-test/suite/galera/r/lp1438990.result b/mysql-test/suite/galera/r/lp1438990.result index b53bc186953..d48d2435faa 100644 --- a/mysql-test/suite/galera/r/lp1438990.result +++ b/mysql-test/suite/galera/r/lp1438990.result @@ -1,3 +1,4 @@ +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY); CREATE TABLE t2 (f1 INTEGER PRIMARY KEY); CREATE TABLE t3 (f1 INTEGER PRIMARY KEY); diff --git a/mysql-test/suite/galera/r/mdev_9290.result b/mysql-test/suite/galera/r/mdev_9290.result index cb2f0813333..276ab9e8ecb 100644 --- a/mysql-test/suite/galera/r/mdev_9290.result +++ b/mysql-test/suite/galera/r/mdev_9290.result @@ -2,13 +2,18 @@ # MDEV-9290 : InnoDB: Assertion failure in file trx0sys.cc line 353 # InnoDB: Failing assertion: xid_seqno > trx_sys_cur_xid_seqno # +connection node_1; CREATE TABLE t1 (i INT) ENGINE=InnoDB; +connection node_2; START TRANSACTION; INSERT INTO t1 VALUES (1); INSERT INTO t1 VALUES (2); COMMIT; +connection node_1; SELECT * FROM t1; i 1 2 DROP TABLE t1; +disconnect node_2; +disconnect node_1; diff --git a/mysql-test/suite/galera/r/mysql-wsrep#110.result b/mysql-test/suite/galera/r/mysql-wsrep#110.result index 551c3666fb4..6d4031d71cd 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#110.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#110.result @@ -1,3 +1,4 @@ +connection node_1; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY); CREATE TABLE t2 (f1 INTEGER PRIMARY KEY); CREATE TABLE t3 (f1 INTEGER PRIMARY KEY); @@ -23,6 +24,7 @@ COUNT(*) = 1 SELECT COUNT(*) = 1 FROM t3; COUNT(*) = 1 1 +connection node_2; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 @@ -32,6 +34,7 @@ COUNT(*) = 1 SELECT COUNT(*) = 1 FROM t3; COUNT(*) = 1 1 +connection node_1; DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; diff --git a/mysql-test/suite/galera/r/mysql-wsrep#198.result b/mysql-test/suite/galera/r/mysql-wsrep#198.result index 25ba2cb615c..afc2a07fb30 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#198.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#198.result @@ -1,16 +1,26 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB; +connection node_2; LOCK TABLE t2 WRITE; +connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2a; OPTIMIZE TABLE t1,t2;; +connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2b; REPAIR TABLE t1,t2;; +connection node_2; SET SESSION wsrep_sync_wait = 0; +connection node_1; INSERT INTO t2 VALUES (1); +connection node_2; UNLOCK TABLES; +connection node_2a; Table Op Msg_type Msg_text test.t1 optimize note Table does not support optimize, doing recreate + analyze instead test.t1 optimize status OK test.t2 optimize note Table does not support optimize, doing recreate + analyze instead test.t2 optimize status OK +connection node_2b; Table Op Msg_type Msg_text test.t1 repair note The storage engine for the table doesn't support repair test.t2 repair note The storage engine for the table doesn't support repair diff --git a/mysql-test/suite/galera/r/mysql-wsrep#201.result b/mysql-test/suite/galera/r/mysql-wsrep#201.result index 1c0998e35ac..0f2980eddd2 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#201.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#201.result @@ -1,4 +1,5 @@ CREATE TABLE t1 (id INT PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB; INSERT INTO t1 VALUES (DEFAULT); +connection node_2; SET GLOBAL query_cache_size=1355776; SET SESSION wsrep_sync_wait = 7; diff --git a/mysql-test/suite/galera/r/mysql-wsrep#237.result b/mysql-test/suite/galera/r/mysql-wsrep#237.result index 3fd9aed1480..19503dd5781 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#237.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#237.result @@ -1,10 +1,18 @@ CREATE TABLE t (f1 INTEGER PRIMARY KEY) Engine=InnoDB; +connection node_1; SET DEBUG_SYNC = 'wsrep_before_replication WAIT_FOR continue'; INSERT INTO t values (1);; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; SET SESSION wsrep_sync_wait = 0; +connection node_2; FLUSH TABLES; +connection node_1a; SELECT SLEEP(1); SLEEP(1) 0 SET DEBUG_SYNC= 'now SIGNAL continue'; +connection node_1; DROP TABLE t; +connection node_1a; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/galera/r/mysql-wsrep#247.result b/mysql-test/suite/galera/r/mysql-wsrep#247.result index 1b00f511f03..e59c6d1a299 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#247.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#247.result @@ -1,3 +1,4 @@ +connection node_1; SET GLOBAL wsrep_desync=1; SET wsrep_OSU_method=RSU; CREATE TABLE t1 (i int primary key); diff --git a/mysql-test/suite/galera/r/mysql-wsrep#31.result b/mysql-test/suite/galera/r/mysql-wsrep#31.result index a21bb3eccfd..973f11543fa 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#31.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#31.result @@ -1,10 +1,13 @@ +connection node_1; CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; INSERT INTO t1 VALUES('test'); CREATE DATABASE db; +connection node_2; Shutting down server 2 ... Recovering server 2 ... Performing --wsrep-recover ... Restarting server ... Using --wsrep-start-position when starting mysqld ... +connection node_1; DROP TABLE t1; DROP DATABASE db; diff --git a/mysql-test/suite/galera/r/mysql-wsrep#33.result b/mysql-test/suite/galera/r/mysql-wsrep#33.result index 62af519ad32..fc647a2000d 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#33.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#33.result @@ -1,4 +1,5 @@ Setting SST method to mysqldump ... +CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; SET GLOBAL wsrep_sst_method = 'mysqldump'; diff --git a/mysql-test/suite/galera/r/partition.result b/mysql-test/suite/galera/r/partition.result index 3907b4f08c9..15f0275a04c 100644 --- a/mysql-test/suite/galera/r/partition.result +++ b/mysql-test/suite/galera/r/partition.result @@ -13,10 +13,12 @@ SELECT * FROM t1; pk i # On node_1 +connection node_1; SELECT * FROM t1; pk i # On node_2 +connection node_2; SELECT * FROM t1; pk i DROP TABLE t1; @@ -26,6 +28,7 @@ DROP TABLE t1; # # On node_1 +connection node_1; CREATE TABLE test.t1 ( i INT UNSIGNED NOT NULL AUTO_INCREMENT, PRIMARY KEY (i) @@ -46,6 +49,7 @@ i 9 # On node_2 +connection node_2; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -70,15 +74,18 @@ i 9 # On node_1 +connection node_1; ALTER TABLE t1 TRUNCATE PARTITION p2; SELECT * FROM test.t1; i # On node_2 +connection node_2; SELECT * FROM test.t1; i # On node_1 +connection node_1; ALTER TABLE t1 DROP PARTITION p2; SHOW CREATE TABLE t1; Table Create Table @@ -91,6 +98,7 @@ t1 CREATE TABLE `t1` ( PARTITION pMax VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */ # On node_2 +connection node_2; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -104,11 +112,13 @@ DROP TABLE t1, p1; # # MDEV-5146: Bulk loads into partitioned table not working # +connection node_1; # Case 1: wsrep_load_data_splitting = ON & LOAD DATA with 20002 # entries. SET GLOBAL wsrep_load_data_splitting = ON; CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2; +connection node_2; SELECT COUNT(*) = 20002 FROM t1; COUNT(*) = 20002 1 @@ -116,9 +126,11 @@ wsrep_last_committed_diff 1 DROP TABLE t1; # Case 2: wsrep_load_data_splitting = ON & LOAD DATA with 101 entries. +connection node_1; SET GLOBAL wsrep_load_data_splitting = ON; CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2; +connection node_2; SELECT COUNT(*) = 101 FROM t1; COUNT(*) = 101 1 @@ -127,14 +139,19 @@ wsrep_last_committed_diff DROP TABLE t1; # Case 3: wsrep_load_data_splitting = OFF & LOAD DATA with 20002 # entries. +connection node_1; SET GLOBAL wsrep_load_data_splitting = OFF; CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2; +connection node_2; SELECT COUNT(*) = 20002 FROM t1; COUNT(*) = 20002 1 wsrep_last_committed_diff 1 DROP TABLE t1; +connection node_1; SET GLOBAL wsrep_load_data_splitting = 1;; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/rename.result b/mysql-test/suite/galera/r/rename.result index 3c81e8b6320..a7ec431657b 100644 --- a/mysql-test/suite/galera/r/rename.result +++ b/mysql-test/suite/galera/r/rename.result @@ -2,6 +2,7 @@ # MDEV-8598 : Failed MySQL DDL commands and Galera replication # # On node 1 +connection node_1; USE test; DROP TABLE IF EXISTS t1, t2; CREATE TABLE t1(i INT) ENGINE=INNODB; @@ -13,6 +14,8 @@ i CREATE USER foo@localhost; GRANT SELECT on test.* TO foo@localhost; # Open connection to the 1st node using 'test_user1' user. +connect foo_node_1,localhost,foo,,test,$port_1,; +connection foo_node_1; SELECT * FROM t1; i 1 @@ -20,20 +23,24 @@ i RENAME TABLE t1 TO t2; ERROR 42000: DROP, ALTER command denied to user 'foo'@'localhost' for table 't1' # On node 2 +connection node_2; USE test; SELECT * FROM t1; i 1 # On node_1 +connection node_1; RENAME TABLE t1 TO t2; SHOW TABLES; Tables_in_test t2 # On node 2 +connection node_2; USE test; SELECT * FROM t2; i 1 +connection node_1; DROP USER foo@localhost; DROP TABLE t2; # End of tests diff --git a/mysql-test/suite/galera/r/rpl_row_annotate.result b/mysql-test/suite/galera/r/rpl_row_annotate.result index ff8d49702ac..23de06f015b 100644 --- a/mysql-test/suite/galera/r/rpl_row_annotate.result +++ b/mysql-test/suite/galera/r/rpl_row_annotate.result @@ -1,14 +1,18 @@ # On node_2 +connection node_2; RESET MASTER; # On node_1 +connection node_1; RESET MASTER; CREATE TABLE t1(i INT)ENGINE=INNODB; INSERT INTO t1 VALUES(1); DELETE FROM t1 WHERE i = 1; # On node_2 +connection node_2; INSERT INTO t1 VALUES(2); DELETE FROM t1 WHERE i = 2; # On node_1 +connection node_1; SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM ; Log_name Pos Event_type Server_id End_log_pos Info mysqld-bin.000001 # Gtid_list 1 # [] @@ -36,6 +40,7 @@ mysqld-bin.000001 # Table_map 2 # table_id: # (test.t1) mysqld-bin.000001 # Delete_rows_v1 2 # table_id: # flags: STMT_END_F mysqld-bin.000001 # Xid 2 # COMMIT /* xid= */ # On node_2 +connection node_2; SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM ; Log_name Pos Event_type Server_id End_log_pos Info mysqld-bin.000001 # Gtid_list 2 # [] @@ -63,4 +68,6 @@ mysqld-bin.000001 # Table_map 2 # table_id: # (test.t1) mysqld-bin.000001 # Delete_rows_v1 2 # table_id: # flags: STMT_END_F mysqld-bin.000001 # Xid 2 # COMMIT /* xid= */ DROP TABLE t1; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/sql_log_bin.result b/mysql-test/suite/galera/r/sql_log_bin.result index 237725ec9a7..8b208ff82d5 100644 --- a/mysql-test/suite/galera/r/sql_log_bin.result +++ b/mysql-test/suite/galera/r/sql_log_bin.result @@ -1,5 +1,6 @@ # On node_1 +connection node_1; USE test; CREATE TABLE t1(c1 INT PRIMARY KEY) ENGINE=INNODB; INSERT INTO t1 VALUES (1); @@ -31,6 +32,7 @@ c1 2 # On node_2 +connection node_2; USE test; SELECT * FROM t1; c1 @@ -49,4 +51,6 @@ c1 1 2 DROP TABLE t1, t2, t3, t4; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/unique_key.result b/mysql-test/suite/galera/r/unique_key.result index ffb4f01c1f8..9f1fc858389 100644 --- a/mysql-test/suite/galera/r/unique_key.result +++ b/mysql-test/suite/galera/r/unique_key.result @@ -5,6 +5,7 @@ USE test; # On node_1 +connection node_1; CREATE TABLE t1(c1 INT DEFAULT NULL, UNIQUE KEY c1(c1)) ENGINE=INNODB; INSERT INTO t1 VALUES (NULL); INSERT INTO t1 VALUES (NULL); @@ -14,12 +15,14 @@ NULL NULL # On node_2 +connection node_2; SELECT * FROM test.t1; c1 NULL NULL # On node_1 +connection node_1; INSERT INTO t1 VALUES (1); UPDATE t1 SET c1=NULL WHERE c1=1; SELECT * FROM test.t1; @@ -29,6 +32,7 @@ NULL NULL # On node_2 +connection node_2; SELECT * FROM test.t1; c1 NULL @@ -36,12 +40,16 @@ NULL NULL # On node_1 +connection node_1; DELETE FROM t1 WHERE c1<=>NULL; SELECT * FROM test.t1; c1 # On node_2 +connection node_2; SELECT * FROM test.t1; c1 DROP TABLE t1; +disconnect node_2; +disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/view.result b/mysql-test/suite/galera/r/view.result index 06d7bf072e8..f8da811f9cc 100644 --- a/mysql-test/suite/galera/r/view.result +++ b/mysql-test/suite/galera/r/view.result @@ -8,6 +8,7 @@ DROP VIEW v1; # MDEV-8464 : ALTER VIEW not replicated in some cases # # On node_1 +connection node_1; USE test; CREATE TABLE t1(i INT) ENGINE=INNODB; CREATE DEFINER=CURRENT_USER VIEW v1 AS SELECT * FROM t1; @@ -15,6 +16,7 @@ CREATE ALGORITHM=MERGE VIEW v2 AS SELECT * FROM t1; CREATE ALGORITHM=TEMPTABLE VIEW v3 AS SELECT * FROM t1; CREATE ALGORITHM=UNDEFINED DEFINER=CURRENT_USER VIEW v4 AS SELECT * FROM t1; # On node_2 +connection node_2; USE test; SHOW CREATE VIEW v1; View Create View character_set_client collation_connection @@ -29,11 +31,13 @@ SHOW CREATE VIEW v4; View Create View character_set_client collation_connection v4 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS select `t1`.`i` AS `i` from `t1` latin1 latin1_swedish_ci # On node_1 +connection node_1; ALTER ALGORITHM=MERGE VIEW v1 AS SELECT * FROM t1; ALTER ALGORITHM=UNDEFINED VIEW v2 AS SELECT * FROM t1; ALTER DEFINER=CURRENT_USER VIEW v3 AS SELECT * FROM t1; ALTER ALGORITHM=TEMPTABLE DEFINER=CURRENT_USER VIEW v4 AS SELECT * FROM t1; # On node_2 +connection node_2; SHOW CREATE VIEW v1; View Create View character_set_client collation_connection v1 CREATE ALGORITHM=MERGE DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`i` AS `i` from `t1` latin1 latin1_swedish_ci diff --git a/mysql-test/suite/galera/t/mysql-wsrep#237.test b/mysql-test/suite/galera/t/mysql-wsrep#237.test index 7a65cb52ae9..f2dd6bce711 100644 --- a/mysql-test/suite/galera/t/mysql-wsrep#237.test +++ b/mysql-test/suite/galera/t/mysql-wsrep#237.test @@ -29,3 +29,5 @@ SET DEBUG_SYNC= 'now SIGNAL continue'; --reap DROP TABLE t; +--connection node_1a +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result index 1b80f5ef695..ea4e7896066 100644 --- a/mysql-test/suite/maria/maria.result +++ b/mysql-test/suite/maria/maria.result @@ -554,11 +554,11 @@ a explain select sql_big_result distinct t1.a from t1,t2 order by t2.a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 system NULL NULL NULL NULL 1 Using temporary -1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index; Distinct +1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index explain select distinct t1.a from t1,t2 order by t2.a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 system NULL NULL NULL NULL 1 Using temporary -1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index; Distinct +1 SIMPLE t2 index NULL PRIMARY 4 NULL 2 Using index drop table t1,t2; create table t1 ( c1 varchar(32), diff --git a/mysql-test/suite/rpl/r/rpl_begin_commit_rollback.result b/mysql-test/suite/rpl/r/rpl_begin_commit_rollback.result index 5e16befa87d..889db65d7ef 100644 --- a/mysql-test/suite/rpl/r/rpl_begin_commit_rollback.result +++ b/mysql-test/suite/rpl/r/rpl_begin_commit_rollback.result @@ -116,7 +116,6 @@ include/wait_for_slave_sql_to_start.inc # SAVEPOINT and ROLLBACK TO have the same problem in BUG#43263 # This was reported by BUG#50407 connection master; -SET SESSION AUTOCOMMIT=0 BEGIN; INSERT INTO db1.t1 VALUES(20); # @@ -164,7 +163,6 @@ master-bin.000001 # Query # # use `db1`; INSERT INTO db1.t2 VALUES("after rollba master-bin.000001 # Query # # use `db1`; INSERT INTO db1.t1 VALUES(50) master-bin.000001 # Xid # # COMMIT /* XID */ connection slave; -[on slave] # # Verify INSERT statements in savepoints are executed, for MyISAM table # is not effected by ROLLBACK TO diff --git a/mysql-test/suite/rpl/r/rpl_upgrade_master_info.result b/mysql-test/suite/rpl/r/rpl_upgrade_master_info.result new file mode 100644 index 00000000000..3e737267fbd --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_upgrade_master_info.result @@ -0,0 +1,102 @@ +include/master-slave.inc +[connection master] +*** MDEV-9383: Server fails to read master.info after upgrade 10.0 -> 10.1 *** +connection slave; +include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=CURRENT_POS; +include/rpl_stop_server.inc [server_number=2] +include/rpl_start_server.inc [server_number=2] +connection master; +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (1); +include/save_master_gtid.inc +connection slave; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t1; +a +1 +include/stop_slave.inc +include/rpl_stop_server.inc [server_number=2] +include/rpl_start_server.inc [server_number=2] +connection master; +INSERT INTO t1 VALUES (2); +include/save_master_gtid.inc +connection slave; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +include/stop_slave.inc +include/rpl_stop_server.inc [server_number=2] +include/rpl_start_server.inc [server_number=2] +connection master; +INSERT INTO t1 VALUES (3); +include/save_master_gtid.inc +connection slave; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +include/stop_slave.inc +include/rpl_stop_server.inc [server_number=2] +include/rpl_start_server.inc [server_number=2] +connection master; +INSERT INTO t1 VALUES (4); +include/save_master_gtid.inc +connection slave; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +4 +include/stop_slave.inc +include/rpl_stop_server.inc [server_number=2] +include/rpl_start_server.inc [server_number=2] +connection master; +INSERT INTO t1 VALUES (5); +include/save_master_gtid.inc +connection slave; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +4 +5 +include/stop_slave.inc +include/rpl_stop_server.inc [server_number=2] +include/rpl_start_server.inc [server_number=2] +connection master; +INSERT INTO t1 VALUES (6); +include/save_master_gtid.inc +connection slave; +CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; +a +1 +2 +3 +4 +5 +6 +connection master; +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_begin_commit_rollback.test b/mysql-test/suite/rpl/t/rpl_begin_commit_rollback.test index 347e88412c1..f0ed5dc0123 100644 --- a/mysql-test/suite/rpl/t/rpl_begin_commit_rollback.test +++ b/mysql-test/suite/rpl/t/rpl_begin_commit_rollback.test @@ -129,7 +129,6 @@ source include/wait_for_slave_sql_to_start.inc; --echo # SAVEPOINT and ROLLBACK TO have the same problem in BUG#43263 --echo # This was reported by BUG#50407 connection master; -echo SET SESSION AUTOCOMMIT=0; let $binlog_start=query_get_value(SHOW MASTER STATUS, Position, 1); BEGIN; @@ -157,7 +156,6 @@ COMMIT; source include/show_binlog_events.inc; sync_slave_with_master; ---echo [on slave] --echo # --echo # Verify INSERT statements in savepoints are executed, for MyISAM table --echo # is not effected by ROLLBACK TO diff --git a/mysql-test/suite/rpl/t/rpl_upgrade_master_info.test b/mysql-test/suite/rpl/t/rpl_upgrade_master_info.test new file mode 100644 index 00000000000..e81e7c0d714 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_upgrade_master_info.test @@ -0,0 +1,163 @@ +--source include/master-slave.inc + +--echo *** MDEV-9383: Server fails to read master.info after upgrade 10.0 -> 10.1 *** + +--connection slave +--source include/stop_slave.inc +CHANGE MASTER TO master_use_gtid=CURRENT_POS; +--let $datadir= `SELECT @@datadir` + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master.info +--copy_file $MYSQL_TEST_DIR/std_data/bad_master.info $datadir/master.info + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection master +CREATE TABLE t1 (a INT PRIMARY KEY); +INSERT INTO t1 VALUES (1); +--source include/save_master_gtid.inc + +--connection slave +# Fix the port after we replaced master.info. +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t1; + +--source include/stop_slave.inc + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master.info +--copy_file $MYSQL_TEST_DIR/std_data/bad2_master.info $datadir/master.info + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection master +INSERT INTO t1 VALUES (2); +--source include/save_master_gtid.inc + +--connection slave +# Fix the port after we replaced master.info. +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; + +--source include/stop_slave.inc + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master.info +--copy_file $MYSQL_TEST_DIR/std_data/bad3_master.info $datadir/master.info + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection master +INSERT INTO t1 VALUES (3); +--source include/save_master_gtid.inc + +--connection slave +# Fix the port after we replaced master.info. +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; + +--source include/stop_slave.inc + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master.info +--copy_file $MYSQL_TEST_DIR/std_data/bad4_master.info $datadir/master.info + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection master +INSERT INTO t1 VALUES (4); +--source include/save_master_gtid.inc + +--connection slave +# Fix the port after we replaced master.info. +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; + +--source include/stop_slave.inc + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master.info +--copy_file $MYSQL_TEST_DIR/std_data/bad5_master.info $datadir/master.info + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection master +INSERT INTO t1 VALUES (5); +--source include/save_master_gtid.inc + +--connection slave +# Fix the port after we replaced master.info. +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; + +--source include/stop_slave.inc + +--let $rpl_server_number= 2 +--source include/rpl_stop_server.inc + +--remove_file $datadir/master.info +--copy_file $MYSQL_TEST_DIR/std_data/bad6_master.info $datadir/master.info + +--let $rpl_server_number= 2 +--source include/rpl_start_server.inc + +--source include/wait_until_connected_again.inc + +--connection master +INSERT INTO t1 VALUES (6); +--source include/save_master_gtid.inc + +--connection slave +# Fix the port after we replaced master.info. +--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1 +eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1; +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t1 ORDER BY a; + + +# Cleanup +--connection master +DROP TABLE t1; +--source include/rpl_end.inc diff --git a/mysql-test/t/connect2.test b/mysql-test/t/connect2.test index b4614a65a91..9d2a438aa0a 100644 --- a/mysql-test/t/connect2.test +++ b/mysql-test/t/connect2.test @@ -7,7 +7,7 @@ call mtr.add_suppression("Allocation failed"); SET @old_debug= @@session.debug; set @old_thread_cache_size=@@global.thread_cache_size; - +set @@global.thread_cache_size=0; # Test connections to the connect(con1,localhost,root,,test,,); @@ -32,7 +32,6 @@ select 1; disconnect con1; # Test connections to the extra port. - connect(con1,localhost,root,,test,$MASTER_EXTRA_PORT,); select 1; disconnect con1; diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index 5a6e07e0c0c..e3164f53887 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -434,3 +434,17 @@ with t(f1,f1) as (select * from t1 where b >= 'c') select t1.b from t2,t1 where t1.a = t2.c; drop table t1,t2; + +--echo # +--echo # Bug mdev-9937: View used in the specification of with table +--echo # refers to the base table with the same name +--echo # + +create table t1 (a int); +insert into t1 values (20), (30), (10); +create view v1 as select * from t1 where a > 10; + +with t1 as (select * from v1) select * from t1; + +drop view v1; +drop table t1; diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test new file mode 100644 index 00000000000..34eee6d3bf2 --- /dev/null +++ b/mysql-test/t/cte_recursive.test @@ -0,0 +1,241 @@ +create table t1 (a int, b varchar(32)); +insert into t1 values +(4,'aaaa' ), (7,'bb'), (1,'ccc'), (4,'dd'); +insert into t1 values +(3,'eee'), (7,'bb'), (1,'fff'), (4,'ggg'); + +--ERROR 1984 +with recursive +a1(a,b) as +(select * from t1 where t1.a>3 +union +select * from b1 where b1.a >3 +union +select * from c1 where c1.a>3), +b1(a,b) as +(select * from a1 where a1.b > 'ccc' +union +select * from c1 where c1.b > 'ddd'), +c1(a,b) as +(select * from a1 where a1.a<6 and a1.b< 'zz' +union +select * from b1 where b1.b > 'auu') +select * from c1; + +drop table t1; + +create table folks(id int, name char(32), dob date, father int, mother int); + +insert into folks values +(100, 'Vasya', '2000-01-01', 20, 30), +(20, 'Dad', '1970-02-02', 10, 9), +(30, 'Mom', '1975-03-03', 8, 7), +(10, 'Grandpa Bill', '1940-04-05', null, null), +(9, 'Grandma Ann', '1941-10-15', null, null), +(25, 'Uncle Jim', '1968-11-18', 8, 7), +(98, 'Sister Amy', '2001-06-20', 20, 30), +(8, 'Grandma Sally', '1943-08-23', 5, 6), +(6, 'Grandgrandma Martha', '1923-05-17', null, null), +(67, 'Cousin Eddie', '1992-02-28', 25, 27), +(27, 'Auntie Melinda', '1971-03-29', null, null); + + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.id, p.name, p.dob, p.father, p.mother + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + +with recursive +ancestors +as +( + select p.* + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother + union + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' +) +select * from ancestors; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Cousin Eddie' + union + select p.* + from folks as p, ancestors as a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' or name='Sister Amy' + union + select p.* + from folks as p, ancestors as a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + +with recursive +prev_gen +as +( + select folks.* + from folks, prev_gen + where folks.id=prev_gen.father or folks.id=prev_gen.mother + union + select * + from folks + where name='Vasya' +), +ancestors +as +( + select * + from folks + where name='Vasya' + union + select * + from ancestors + union + select * + from prev_gen +) +select ancestors.name, ancestors.dob from ancestors; + + +with recursive +descendants +as +( + select * + from folks + where name = 'Grandpa Bill' + union + select folks.* + from folks, descendants as d + where d.id=folks.father or d.id=folks.mother +) +select * from descendants; + +with recursive +descendants +as +( + select * + from folks + where name = 'Grandma Sally' + union + select folks.* + from folks, descendants as d + where d.id=folks.father or d.id=folks.mother +) +select * from descendants; + + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.* + from folks as p, ancestors AS a + where p.id = a.father OR p.id = a.mother +) +select * + from ancestors t1, ancestors t2 + where exists (select * from ancestors a + where a.father=t1.id AND a.mother=t2.id); + +with +ancestor_couples(husband, h_dob, wife, w_dob) +as +( +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' + union + select p.* + from folks as p, ancestors AS a + where p.id = a.father OR p.id = a.mother +) +select t1.name, t1.dob, t2.name, t2.dob + from ancestors t1, ancestors t2 + where exists (select * from ancestors a + where a.father=t1.id AND a.mother=t2.id) +) +select * from ancestor_couples; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.* + from folks as p, ancestors AS a + where p.id = a.father + union + select p.* + from folks as p, ancestors AS a + where p.id = a.mother +) +select * from ancestors; + +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, + w_id, w_name, w_dob, w_father, w_mother) +as +( + select h.*, w.* + from folks h, folks w, coupled_ancestors a + where a.father = h.id AND a.mother = w.id + union + select h.*, w.* + from folks v, folks h, folks w + where v.name = 'Vasya' and + (v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select h_id, h_name, h_dob, h_father, h_mother + from ancestor_couples + union + select w_id, w_name, w_dob, w_father, w_mother + from ancestor_couples +) +select h_name, h_dob, w_name, w_dob + from ancestor_couples; + +drop table folks; + diff --git a/mysql-test/t/ctype_eucjpms.test b/mysql-test/t/ctype_eucjpms.test index d533e38b2a2..b5bd92d1d07 100644 --- a/mysql-test/t/ctype_eucjpms.test +++ b/mysql-test/t/ctype_eucjpms.test @@ -566,3 +566,19 @@ DROP TABLE t1; --echo # --echo # End of 10.1 tests --echo # + +--echo # +--echo # End of 10.2 tests +--echo # + +--echo # +--echo # MDEV-9842 LOAD DATA INFILE does not work well with a TEXT column when using sjis +--echo # +CREATE TABLE t1 (a TEXT CHARACTER SET eucjpms); +LOAD DATA INFILE '../../std_data/loaddata/mdev9823.ujis.txt' INTO TABLE t1 CHARACTER SET eucjpms IGNORE 4 LINES; +SELECT HEX(a) FROM t1; +DROP TABLE t1; + +--echo # +--echo # End of 10.2 tests +--echo # diff --git a/mysql-test/t/ctype_gbk_export_import.test b/mysql-test/t/ctype_gbk_export_import.test new file mode 100644 index 00000000000..02d18fe60d4 --- /dev/null +++ b/mysql-test/t/ctype_gbk_export_import.test @@ -0,0 +1,161 @@ +--source include/not_windows.inc +--source include/have_case_sensitive_file_system.inc +--source include/have_gbk.inc + +# +# Check if we're running on a POSIX-locale machine +# + +--disable_query_log +--exec locale -a > $MYSQLTEST_VARDIR/tmp/locale_a_gbk.output 2>/dev/null || true +SET @file=REPLACE(LOAD_FILE('../../tmp/locale_a_gbk.output'), '-', ''); +# Note, file content must be case sensitive. See mysql_locale_posix.test +--remove_file $MYSQLTEST_VARDIR/tmp/locale_a_gbk.output +if (`SELECT (IFNULL(@file,'') NOT LIKE '%\nzh_CN.gbk\n%')`) +{ + Skip Need POSIX locale zh_CN.gbk; +} +--enable_query_log + + +--disable_warnings +DROP DATABASE IF EXISTS gbk; +--enable_warnings + +CREATE DATABASE gbk DEFAULT CHARACTER SET gbk; +USE gbk; + +CREATE TABLE t1 ( + id INT NOT NULL, + a1 TEXT NOT NULL, + a2 TEXT CHARACTER SET utf8 NOT NULL, + b1 BLOB NOT NULL, + eol TEXT NOT NULL); + +DELIMITER |; +CREATE PROCEDURE populate() +BEGIN + TRUNCATE TABLE t1; + INSERT INTO t1 SET id=1, a1=0xEE5C, a2=_gbk 0xEE5C, b1=0xEE5C, eol='$'; + INSERT INTO t1 SET id=2, a1=0xEE5C5C, a2=_gbk 0xEE5C5C, b1=0xEE5C5C, eol='$'; +END| + +CREATE FUNCTION cmt(id INT, field_name TEXT, field_value BLOB) + RETURNS TEXT CHARACTER SET utf8 +BEGIN + DECLARE comment TEXT CHARACTER SET utf8; + DECLARE expected_value_01 BLOB; + DECLARE expected_value_02 BLOB; + SET comment= CASE field_name WHEN 'a1' THEN 'TEXT-GBK' WHEN 'a2' THEN 'TEXT-UTF8' WHEN 'b1' THEN 'BLOB' ELSE '' END; + SET expected_value_01= CASE field_name WHEN 'a1' THEN 0xEE5C WHEN 'a2' THEN 0xE9A0AB WHEN 'b1' THEN 0xEE5C ELSE '' END; + SET expected_value_02= CASE field_name WHEN 'a1' THEN 0xEE5C5C WHEN 'a2' THEN 0xE9A0AB5C WHEN 'b1' THEN 0xEE5C5C ELSE '' END; + RETURN IF(CASE id + WHEN 1 THEN expected_value_01 + WHEN 2 THEN expected_value_02 + ELSE '' + END <> field_value, + CONCAT('BAD-', comment), ''); +END| + +CREATE FUNCTION display_file(file BLOB) RETURNS TEXT CHARACTER SET utf8 +BEGIN + SET file=REPLACE(file, 0x09, '----'); + SET file=REPLACE(file, 0x0A, '++++'); + RETURN REPLACE(REPLACE(HEX(file), '2D2D2D2D','-'), '2B2B2B2B','|'); +END| + +DELIMITER ;| + +CREATE VIEW v1 AS +SELECT + id, + CONCAT(RPAD(HEX(a1),50,' '), cmt(id, 'a1', a1)) AS a1, + CONCAT(RPAD(HEX(a2),50,' '), cmt(id, 'a2', a2)) AS a2, + CONCAT(RPAD(HEX(b1),50,' '), cmt(id, 'b1', b1)) AS b1, + CONCAT(RPAD(HEX(eol),50,' '), IF(eol<>'$','BAD-EOL','')) AS eol, + '---' AS `---` +FROM t1; +SHOW CREATE TABLE t1; + +--echo # +--echo # Dump using SELECT INTO OUTFILE +--echo # + +--perl +my $dir= $ENV{'MYSQL_TMP_DIR'}; +open (my $FILE, '>', "$dir/tmpgbk.inc") or die "open(): $!"; +for $LOCALE ("zh_CN.gbk") { +for $DUMP_OPTIONS ("--default-character-set=auto", "--default-character-set=gbk","--default-character-set=utf8") { +for $DUMP_CHARSET_CLAUSE ("", "CHARACTER SET gbk", "CHARACTER SET utf8", "CHARACTER SET binary") { +for $RESTORE_OPTIONS ("--default-character-set=auto", "--default-character-set=gbk","--default-character-set=utf8") { +for $RESTORE_CHARSET_CLAUSE ("", "CHARACTER SET gbk", "CHARACTER SET utf8", "CHARACTER SET binary") { +print $FILE <', "$dir/tmpgbk.inc") or die "open(): $!"; +for $LOCALE ("zh_CN.gbk") { +for $DUMP_OPTIONS ("--default-character-set=binary","--default-character-set=gbk","--default-character-set=utf8") { +for $RESTORE_OPTIONS ("--default-character-set=auto","--default-character-set=binary","--default-character-set=gbk","--default-character-set=utf8") { +for $RESTORE_CHARSET_CLAUSE ("", "CHARACTER SET gbk", "CHARACTER SET utf8", "CHARACTER SET binary") { +print $FILE <'; +SELECT HEX(a) FROM t1; +DROP TABLE t1; + --echo # --echo # End of 10.2 tests --echo # diff --git a/mysql-test/t/ctype_utf8mb4.test b/mysql-test/t/ctype_utf8mb4.test index 2fe9b5e6544..74e39a80e5b 100644 --- a/mysql-test/t/ctype_utf8mb4.test +++ b/mysql-test/t/ctype_utf8mb4.test @@ -1919,3 +1919,20 @@ DROP FUNCTION f1; --echo # --echo # End of 10.1 tests --echo # + + +--echo # +--echo # End of 10.2 tests +--echo # + +--echo # +--echo # MDEV-9842 LOAD DATA INFILE does not work well with a TEXT column when using sjis +--echo # +CREATE TABLE t1 (a TEXT CHARACTER SET utf8mb4); +LOAD DATA INFILE '../../std_data/loaddata/mdev9823.utf8mb4.txt' INTO TABLE t1 CHARACTER SET utf8mb4 IGNORE 4 LINES; +SELECT HEX(a) FROM t1; +DROP TABLE t1; + +--echo # +--echo # End of 10.2 tests +--echo # diff --git a/mysql-test/t/delayed.test b/mysql-test/t/delayed.test index 85f28de128b..dea16c84a51 100644 --- a/mysql-test/t/delayed.test +++ b/mysql-test/t/delayed.test @@ -1,5 +1,12 @@ # delayed works differently in embedded server --source include/not_embedded.inc +# Don't test this under valgrind, memory leaks will occur +--source include/not_valgrind.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc +# Binary must be compiled with debug for crash to occur +--source include/have_debug_sync.inc + # # test of DELAYED insert and timestamps # (Can't be tested with purify :( ) @@ -601,3 +608,37 @@ disconnect con1; --source include/wait_until_disconnected.inc connection default; drop tables tm, t1, t2; + +--echo # +--echo # MDEV-9621 INSERT DELAYED fails on insert for tables with many columns +--echo # + +CREATE TABLE t1 ( + a int,b int,c int,d int,e int,f int,g int,h int,i int,j int,k int,l int,m int,n int,o int,p int,q int,r int,s int,t int,u int,v int,x int,y int,z int +) ENGINE=MyISAM; + +INSERT DELAYED INTO t1 (a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,x,y,z) +values (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1); +INSERT DELAYED INTO t1 (a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,x,y,z) +values (1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1); +drop table t1; + +--echo # +--echo # INSERT DELAYED hangs if table was crashed +--echo # + +create table t1 (a int, b int) engine=myisam; +insert into t1 values (1,1); + +# Will come back with t1 crashed. +--source include/crash_mysqld.inc + +call mtr.add_suppression(" marked as crashed and should be repaired"); +call mtr.add_suppression("Checking table"); + +--replace_result '\\' '/' +insert delayed into t1 values (2,2); +insert delayed into t1 values (3,3); +flush tables t1; +select * from t1; +drop table t1; diff --git a/mysql-test/t/func_analyse.test b/mysql-test/t/func_analyse.test index c77967a0cc9..6c30c0ca630 100644 --- a/mysql-test/t/func_analyse.test +++ b/mysql-test/t/func_analyse.test @@ -11,7 +11,7 @@ insert into t1 values (1,2,"","Y","2002-03-03"), (3,4,"","N","2002-03-04"), (5,6 select count(*) from t1 procedure analyse(); select * from t1 procedure analyse(); select * from t1 procedure analyse(2); ---error ER_WRONG_USAGE +--error ER_PARSE_ERROR create table t2 select * from t1 procedure analyse(); drop table t1; @@ -127,7 +127,7 @@ CREATE TABLE t1(a INT); INSERT INTO t1 VALUES (1),(2); --echo # should not crash ---error ER_WRONG_USAGE +--error ER_PARSE_ERROR CREATE TABLE t2 SELECT 1 FROM t1, t1 t3 GROUP BY t3.a PROCEDURE ANALYSE(); DROP TABLE t1; @@ -157,3 +157,27 @@ SELECT * FROM t2 LIMIT 1 PROCEDURE ANALYSE(); DROP TABLE t1, t2; --echo End of 5.1 tests + +--echo # +--echo # Start of 10.2 tests +--echo # +(SELECT 1 FROM DUAL PROCEDURE ANALYSE()); +((SELECT 1 FROM DUAL PROCEDURE ANALYSE())); + +# TODO: +--error ER_WRONG_USAGE +SELECT * FROM t1 UNION SELECT * FROM t1 PROCEDURE analyse(); + +--echo # +--echo # MDEV-10030 sql_yacc.yy: Split table_expression and remove PROCEDURE from create_select, select_paren_derived, select_derived2, query_specification +--echo # + +--error ER_PARSE_ERROR +SELECT * FROM (SELECT * FROM t1 PROCEDURE ANALYSE()); +--ERROR ER_PARSE_ERROR +SELECT * FROM t1 NATURAL JOIN (SELECT * FROM t2 PROCEDURE ANALYSE()); + +--error ER_PARSE_ERROR +SELECT (SELECT 1 FROM t1 PROCEDURE ANALYSE()) FROM t2; +--error ER_PARSE_ERROR +SELECT ((SELECT 1 FROM t1 PROCEDURE ANALYSE())) FROM t2; diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index fe9308cd20a..f0007186ab2 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -996,8 +996,33 @@ EXPLAIN SELECT a FROM t1 WHERE a < 2 ORDER BY a; EXPLAIN SELECT a FROM t1 WHERE a < 2 GROUP BY a; EXPLAIN SELECT a FROM t1 IGNORE INDEX (PRIMARY,i2); EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR JOIN (PRIMARY,i2); + +--echo # +--echo # For this explain, the query plan is weird: if we are using +--echo # the primary key for reasons other than doing grouping, can't +--echo # GROUP BY code take advantage of this? Well, currently it doesnt: EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR GROUP BY (PRIMARY,i2) GROUP BY a; +--echo # Here's a proof it is really doing sorting: +flush status; +--disable_result_log +SELECT a FROM t1 IGNORE INDEX FOR GROUP BY (PRIMARY,i2) GROUP BY a; +--enable_result_log +show status like 'Sort_%'; +--echo # Proof ends. +--echo # + +--echo # For this explain, the query plan is weird: if we are using +--echo # the primary key for reasons other than doing sorting, can't +--echo # ORDER BY code take advantage of this? Well, currently it doesnt: EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY,i2) ORDER BY a; +--echo # Here's a proof it is really doing sorting: +flush status; +--disable_result_log +SELECT a FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY,i2) ORDER BY a; +--enable_result_log +show status like 'Sort_%'; +--echo # Proof ends. +--echo # SELECT a FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY,i2) ORDER BY a; EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR ORDER BY (PRIMARY) IGNORE INDEX FOR GROUP BY (i2) GROUP BY a; @@ -1793,3 +1818,13 @@ from t1 group by t1.b; drop table t0,t1,t2; + +--echo # +--echo # MDEV-9602 crash in st_key::actual_rec_per_key when group by constant +--echo # + +create table t1 (a date not null,unique (a)) engine=innodb; +select distinct a from t1 group by 'a'; +insert into t1 values("2001-02-02"),("2001-02-03"); +select distinct a from t1 group by 'a'; +drop table t1; diff --git a/mysql-test/t/limit_rows_examined.test b/mysql-test/t/limit_rows_examined.test index 45ee483c7aa..382530234be 100644 --- a/mysql-test/t/limit_rows_examined.test +++ b/mysql-test/t/limit_rows_examined.test @@ -305,7 +305,6 @@ select c1, sum(c2) from t3 group by c1; explain select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 0; select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 0; ---error 1028 select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 1; select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 20; select c1, sum(c2) from t3 group by c1 LIMIT ROWS EXAMINED 21; @@ -321,7 +320,6 @@ insert into t3i values explain select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 0; select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 0; ---error 1028 select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 1; select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 20; select c1, sum(c2) from t3i group by c1 LIMIT ROWS EXAMINED 21; @@ -432,7 +430,7 @@ drop table t1,t2,t1i,t2i; SET @@optimizer_switch='in_to_exists=on,outer_join_with_cache=on'; CREATE TABLE t1 ( a VARCHAR(3) ) ENGINE=MyISAM; -INSERT INTO t1 VALUES ('USA'); +INSERT INTO t1 VALUES ('USA'),('CAN'); CREATE TABLE t2 ( b INT ); INSERT INTO t2 VALUES (3899),(3914),(3888); diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test index 9ac49a9063d..a454fa25ac4 100644 --- a/mysql-test/t/myisam.test +++ b/mysql-test/t/myisam.test @@ -552,6 +552,8 @@ insert t2 select * from t1; checksum table t1, t2, t3 quick; checksum table t1, t2, t3; checksum table t1, t2, t3 extended; +alter table t1 add d int default 30, add e bigint default 300000, add f decimal(30) default 442; +checksum table t1; #show table status; drop table t1,t2; diff --git a/mysql-test/t/order_by_optimizer.test b/mysql-test/t/order_by_optimizer.test new file mode 100644 index 00000000000..a4c134afec9 --- /dev/null +++ b/mysql-test/t/order_by_optimizer.test @@ -0,0 +1,34 @@ +--disable_warnings +drop table if exists t0,t1,t2,t3; + +--enable_warnings +--echo # +--echo # MDEV-7885: EXPLAIN shows wrong info for ORDER BY query +--echo # +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1(a int); +insert into t1 select A.a + B.a* 10 + C.a * 100 from t0 A, t0 B, t0 C; + +create table t2 (key1 int, col1 int, key(key1)); +insert into t2 select a,a from t0; +insert into t2 select 15,15 from t1; +alter table t2 add key2 int, add key(key2); +--echo # This must show "Using filesort": +explain +select * from t2 ignore index for order by (key1) where col1<0 order by key1 limit 10; + +drop table t0, t1, t2; + +--echo # +--echo # MDEV-8857: [Upstream too] EXPLAIN incorrectly shows Distinct for tables using join buffer +--echo # +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 (a int, filler char(200), key(a)); +insert into t1 select A.a + B.a* 10, 'AAAAAAAAAAAAAAAAAAAA' from t0 A, t0 B where B.a in (0,1); +explain select distinct A.a from t0 A, t1 B where A.a+B.a> 0; + +drop table t0, t1; + diff --git a/mysql-test/t/parser.test b/mysql-test/t/parser.test index 0a19b03a4eb..86cc3c47c37 100644 --- a/mysql-test/t/parser.test +++ b/mysql-test/t/parser.test @@ -806,7 +806,7 @@ SELECT 1 FROM (SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 FOR UPDATE) a; ---error ER_WRONG_USAGE +--error ER_PARSE_ERROR SELECT 1 FROM (SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 PROCEDURE ANALYSE() FOR UPDATE) a; @@ -815,7 +815,7 @@ SELECT 1 FROM t1 WHERE EXISTS(SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 FOR UPDATE); ---error ER_WRONG_USAGE +--error ER_PARSE_ERROR SELECT 1 FROM t1 WHERE EXISTS(SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 PROCEDURE ANALYSE() FOR UPDATE); diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 7a7c01e78bd..5313a4b8a39 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -51,7 +51,7 @@ SELECT * FROM (SELECT 1 as id) b WHERE id IN (SELECT * FROM (SELECT 1 as id) c O SELECT * FROM (SELECT 1) a WHERE 1 IN (SELECT 1,1); SELECT 1 IN (SELECT 1); SELECT 1 FROM (SELECT 1 as a) b WHERE 1 IN (SELECT (SELECT a)); --- error ER_WRONG_USAGE +-- error ER_PARSE_ERROR select (SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE(1)); -- error ER_PARSE_ERROR SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE((SELECT 1)); diff --git a/mysql-test/t/win.test b/mysql-test/t/win.test new file mode 100644 index 00000000000..09ddf41b4f0 --- /dev/null +++ b/mysql-test/t/win.test @@ -0,0 +1,1203 @@ +# +# Window Functions Tests +# + +--disable_warnings +drop table if exists t1,t2; +drop view if exists v1; +--enable_warnings + +--echo # ######################################################################## +--echo # # Parser tests +--echo # ######################################################################## +--echo # +--echo # Check what happens when one attempts to use window function without OVER clause +create table t1 (a int, b int); +insert into t1 values (1,1),(2,2); + +--error ER_PARSE_ERROR +select row_number() from t1; +--error ER_PARSE_ERROR +select rank() from t1; + +--echo # Attempt to use window function in the WHERE clause +--error ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION +select * from t1 where 1=rank() over (order by a); +--error ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION +select * from t1 where 1>row_number() over (partition by b order by a); +drop table t1; + +--echo # ######################################################################## +--echo # # Functionality tests +--echo # ######################################################################## +--echo # +--echo # Check if ROW_NUMBER() works in basic cases +create table t1(a int, b int, x char(32)); +insert into t1 values (2, 10, 'xx'); +insert into t1 values (2, 10, 'zz'); +insert into t1 values (2, 20, 'yy'); +insert into t1 values (3, 10, 'xxx'); +insert into t1 values (3, 20, 'vvv'); + +--sorted_result +select a, row_number() over (partition by a order by b) from t1; + +select a, b, x, row_number() over (partition by a order by x) from t1; + +drop table t1; + +create table t1 (pk int primary key, a int, b int); +insert into t1 values + (1, 10, 22), + (2, 11, 21), + (3, 12, 20), + (4, 13, 19), + (5, 14, 18); + +select + pk, a, b, + row_number() over (order by a), + row_number() over (order by b) +from t1; + +drop table t1; + +--echo # +--echo # Try RANK() function +--echo # +create table t2 ( + pk int primary key, + a int +); + +insert into t2 values +( 1 , 0), +( 2 , 0), +( 3 , 1), +( 4 , 1), +( 8 , 2), +( 5 , 2), +( 6 , 2), +( 7 , 2), +( 9 , 4), +(10 , 4); + +select pk, a, rank() over (order by a) from t2; +select pk, a, rank() over (order by a desc) from t2; + +drop table t2; + +--echo # +--echo # Try Aggregates as window functions. With frames. +--echo # +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; + +select + pk, c, + count(*) over (partition by c order by pk + rows between 2 preceding and 2 following) as CNT +from t1; + +select + pk, c, + count(*) over (partition by c order by pk + rows between 1 preceding and 2 following) as CNT +from t1; + +select + pk, c, + count(*) over (partition by c order by pk + rows between 2 preceding and current row) as CNT +from t1; + +select + pk,c, + count(*) over (partition by c order by pk rows + between 1 following and 2 following) as CNT +from t1; + +select + pk,c, + count(*) over (partition by c order by pk rows + between 2 preceding and 1 preceding) as CNT +from t1; + +select + pk, c, + count(*) over (partition by c order by pk + rows between current row and 1 following) as CNT +from t1; + +--echo # Check ORDER BY DESC +select + pk, c, + count(*) over (partition by c order by pk desc + rows between 2 preceding and 2 following) as CNT +from t1; + +drop table t0,t1; + +--echo # +--echo # Resolution of window names +--echo # + +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; + +select + pk, c, + count(*) over w1 as CNT +from t1 +window w1 as (partition by c order by pk + rows between 2 preceding and 2 following); + +select + pk, c, + count(*) over (w1 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c order by pk); + +select + pk, c, + count(*) over (w1 order by pk rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c); + +select + pk, c, + count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w2 as (w1 order by pk); + +select + pk, c, + count(*) over w3 as CNT +from t1 +window + w1 as (partition by c), + w2 as (w1 order by pk), + w3 as (w2 rows between 2 preceding and 2 following); + +--error ER_WRONG_WINDOW_SPEC_NAME +select + pk, c, + count(*) over w as CNT +from t1 +window w1 as (partition by c order by pk + rows between 2 preceding and 2 following); + +--error ER_DUP_WINDOW_NAME +select + pk, c, + count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w1 as (order by pk); + +--error ER_WRONG_WINDOW_SPEC_NAME +select + pk, c, + count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w2 as (w partition by c order by pk); + +--error ER_PARTITION_LIST_IN_REFERENCING_WINDOW_SPEC +select + pk, c, + count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c), w2 as (w1 partition by c order by pk); + +--error ER_ORDER_LIST_IN_REFERENCING_WINDOW_SPEC +select + pk, c, + count(*) over (w2 rows between 2 preceding and 2 following) as CNT +from t1 +window w1 as (partition by c order by pk), w2 as (w1 order by pk); + +--error ER_WINDOW_FRAME_IN_REFERENCED_WINDOW_SPEC +select + pk, c, + count(*) over w3 as CNT +from t1 +window + w1 as (partition by c), + w2 as (w1 order by pk rows between 3 preceding and 2 following), + w3 as (w2 rows between 2 preceding and 2 following); + +--error ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS +select + pk, c, + count(*) over w1 as CNT +from t1 +window w1 as (partition by c order by pk + rows between unbounded following and 2 following); + +--error ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS +select + pk, c, + count(*) over (w1 rows between 2 preceding and unbounded preceding) as CNT +from t1 +window w1 as (partition by c order by pk); + +--error ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS +select + pk, c, + count(*) over (w1 order by pk rows between current row and 2 preceding) as CNT +from t1 +window w1 as (partition by c); + +--error ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS +select + pk, c, + count(*) over (w2 rows between 2 following and current row) as CNT +from t1 +window w1 as (partition by c), w2 as (w1 order by pk); + +--error ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION +select + pk, c +from t1 where rank() over w1 > 2 +window w1 as (partition by c order by pk); + +--error ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION +select + c, max(pk) as m +from t1 + group by c + rank() over w1 +window w1 as (order by m); + +--error ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION +select + c, max(pk) as m, rank() over w1 as r +from t1 + group by c+r +window w1 as (order by m); + +--error ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION +select + c, max(pk) as m, rank() over w1 as r +from t1 + group by c having c+r > 3 +window w1 as (order by m); + +--error ER_WINDOW_FUNCTION_IN_WINDOW_SPEC +select + c, max(pk) as m, rank() over w1 as r, + rank() over (partition by r+1 order by m) +from t1 + group by c +window w1 as (order by m); + +--error ER_WINDOW_FUNCTION_IN_WINDOW_SPEC +select + c, max(pk) as m, rank() over w1 as r, + rank() over (partition by m order by r) +from t1 + group by c +window w1 as (order by m); + +--error ER_WINDOW_FUNCTION_IN_WINDOW_SPEC +select + c, max(pk) as m, rank() over w1 as r, dense_rank() over w2 as dr +from t1 + group by c +window w1 as (order by m), w2 as (partition by r order by m); + +--error ER_NOT_ALLOWED_WINDOW_FRAME +select + pk, c, + row_number() over (partition by c order by pk + range between unbounded preceding and current row) as r +from t1; + +--error ER_NOT_ALLOWED_WINDOW_FRAME +select + pk, c, + rank() over w1 as r +from t1 +window w1 as (partition by c order by pk + rows between 2 preceding and 2 following); + +--error ER_NOT_ALLOWED_WINDOW_FRAME +select + pk, c, + dense_rank() over (partition by c order by pk + rows between 1 preceding and 1 following) as r +from t1; + +--error ER_NO_ORDER_LIST_IN_WINDOW_SPEC +select + pk, c, + rank() over w1 as r +from t1 +window w1 as (partition by c); + +--error ER_NO_ORDER_LIST_IN_WINDOW_SPEC +select + pk, c, + dense_rank() over (partition by c) as r +from t1; + +drop table t0,t1; + +--echo # +--echo # MDEV-9634: Window function produces incorrect value +--echo # + +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t2 (part_id int, pk int, a int); +insert into t2 select + if(a<5, 0, 1), a, if(a<5, NULL, 1) from t0; +select * from t2; + +select + part_id, pk, a, + count(a) over (partition by part_id order by pk + rows between 1 preceding and 1 following) as CNT +from t2; + +drop table t0, t2; + +--echo # +--echo # RANGE-type bounds +--echo # + +create table t3 ( + pk int, + val int +); + +insert into t3 values +(0, 1), +(1, 1), +(2, 1), +(3, 2), +(4, 2), +(5, 2), +(6, 2); + +select + pk, + val, + count(val) over (order by val + range between current row and + current row) + as CNT +from t3; + +insert into t3 values +(7, 3), +(8, 3); + +select + pk, + val, + count(val) over (order by val + range between current row and + current row) + as CNT +from t3; + +drop table t3; + +--echo # Now, check with PARTITION BY +create table t4 ( + part_id int, + pk int, + val int +); + +insert into t4 values +(1234, 100, 1), +(1234, 101, 1), +(1234, 102, 1), +(1234, 103, 2), +(1234, 104, 2), +(1234, 105, 2), +(1234, 106, 2), +(1234, 107, 3), +(1234, 108, 3), + +(5678, 200, 1), +(5678, 201, 1), +(5678, 202, 1), +(5678, 203, 2), +(5678, 204, 2), +(5678, 205, 2), +(5678, 206, 2), +(5678, 207, 3), +(5678, 208, 3); + +select + part_id, + pk, + val, + count(val) over (partition by part_id + order by val + range between current row and + current row) + as CNT +from t4; + +--echo # +--echo # Try RANGE UNBOUNDED PRECEDING | FOLLOWING +--echo # +select + part_id, + pk, + val, + count(val) over (partition by part_id + order by val + range between unbounded preceding and + current row) + as CNT +from t4; + +select + part_id, + pk, + val, + count(val) over (partition by part_id + order by val + range between current row and + unbounded following) + as CNT +from t4; + +select + part_id, + pk, + val, + count(val) over (partition by part_id + order by val + range between unbounded preceding and + unbounded following) + as CNT +from t4; + +drop table t4; + +--echo # +--echo # MDEV-9695: Wrong window frame when using RANGE BETWEEN N FOLLOWING AND PRECEDING +--echo # +create table t1 (pk int, a int, b int); +insert into t1 values +( 1 , 0, 1), +( 2 , 0, 2), +( 3 , 1, 4), +( 4 , 1, 8), +( 5 , 2, 32), +( 6 , 2, 64), +( 7 , 2, 128), +( 8 , 2, 16); + +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) as bit_or +from t1; + +--echo # Extra ROWS n PRECEDING tests +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) as bit_or +from t1; +drop table t1; + + +create table t2 ( + pk int, + a int, + b int +); + +insert into t2 values +( 1, 0, 1), +( 2, 0, 2), +( 3, 0, 4), +( 4, 0, 8), +( 5, 1, 16), +( 6, 1, 32), +( 7, 1, 64), +( 8, 1, 128), +( 9, 2, 256), +(10, 2, 512), +(11, 2, 1024), +(12, 2, 2048); + +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) as bit_or +from t2; + +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 2 PRECEDING AND 2 PRECEDING) as bit_or +from t2; + +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING) as bit_or +from t2; + +--echo # Check CURRENT ROW + +select pk, a, b, +bit_or(b) over (partition by a order by pk ROWS BETWEEN CURRENT ROW AND CURRENT ROW) as bit_or +from t2; + +drop table t2; + +--echo # +--echo # Try RANGE PRECEDING|FOLLWING n +--echo # +create table t1 ( + part_id int, + pk int, + a int +); + +insert into t1 values +(10, 1, 1), +(10, 2, 2), +(10, 3, 4), +(10, 4, 8), +(10, 5,26), +(10, 6,27), +(10, 7,40), +(10, 8,71), +(10, 9,72); + +select + pk, a, + count(a) over (ORDER BY a + RANGE BETWEEN UNBOUNDED PRECEDING + AND 10 FOLLOWING) as cnt +from t1; + +select + pk, a, + count(a) over (ORDER BY a DESC + RANGE BETWEEN UNBOUNDED PRECEDING + AND 10 FOLLOWING) as cnt +from t1; + +select + pk, a, + count(a) over (ORDER BY a + RANGE BETWEEN UNBOUNDED PRECEDING + AND 1 FOLLOWING) as cnt +from t1; + +select + pk, a, + count(a) over (ORDER BY a + RANGE BETWEEN UNBOUNDED PRECEDING + AND 10 PRECEDING) as cnt +from t1; + +select + pk, a, + count(a) over (ORDER BY a DESC + RANGE BETWEEN UNBOUNDED PRECEDING + AND 10 PRECEDING) as cnt +from t1; + +select + pk, a, + count(a) over (ORDER BY a + RANGE BETWEEN UNBOUNDED PRECEDING + AND 1 PRECEDING) as cnt +from t1; + +# Try bottom bound +select + pk, a, + count(a) over (ORDER BY a + RANGE BETWEEN 1 PRECEDING + AND CURRENT ROW) as cnt +from t1; + +select + pk, a, + count(a) over (ORDER BY a DESC + RANGE BETWEEN 1 PRECEDING + AND CURRENT ROW) as cnt +from t1; + +select + pk, a, + count(a) over (ORDER BY a + RANGE BETWEEN 1 FOLLOWING + AND 3 FOLLOWING) as cnt +from t1; + +--echo # Try CURRENT ROW with[out] DESC +select + pk, a, + count(a) over (ORDER BY a + RANGE BETWEEN CURRENT ROW + AND 1 FOLLOWING) as cnt +from t1; + +select + pk, a, + count(a) over (order by a desc + range between current row + and 1 following) as cnt +from t1; + + +# Try with partitions +insert into t1 select 22, pk, a from t1; +select + part_id, pk, a, + count(a) over (PARTITION BY part_id + ORDER BY a + RANGE BETWEEN UNBOUNDED PRECEDING + AND 10 FOLLOWING) as cnt +from t1; + +select + pk, a, + count(a) over (PARTITION BY part_id + ORDER BY a + RANGE BETWEEN UNBOUNDED PRECEDING + AND 1 PRECEDING) as cnt +from t1; + +drop table t1; + +--echo # Try a RANGE frame over non-integer datatype: + +create table t1 ( + col1 int, + a decimal(5,3) +); + +insert into t1 values (1, 0.45); +insert into t1 values (1, 0.5); +insert into t1 values (1, 0.55); +insert into t1 values (1, 1.21); +insert into t1 values (1, 1.22); +insert into t1 values (1, 3.33); + +select + a, + count(col1) over (order by a + range between 0.1 preceding + and 0.1 following) +from t1; + +drop table t1; + +--echo # +--echo # RANGE-type frames and NULL values +--echo # +create table t1 ( + pk int, + a int, + b int +); + +insert into t1 values (1, NULL,1); +insert into t1 values (2, NULL,1); +insert into t1 values (3, NULL,1); +insert into t1 values (4, 10 ,1); +insert into t1 values (5, 11 ,1); +insert into t1 values (6, 12 ,1); +insert into t1 values (7, 13 ,1); +insert into t1 values (8, 14 ,1); + + +select + pk, a, + count(b) over (order by a + range between 2 preceding + and 2 following) as CNT +from t1; +drop table t1; + +--echo # +--echo # Try ranges that have bound1 > bound2. The standard actually allows them +--echo # + +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; + +select + pk, c, + count(*) over (partition by c + order by pk + rows between 1 preceding + and 2 preceding) + as cnt +from t1; + + +select + pk, c, + count(*) over (partition by c + order by pk + range between 1 preceding + and 2 preceding) + as cnt +from t1; +drop table t0, t1; + +--echo # +--echo # Error checking for frame bounds +--echo # + +create table t1 (a int, b int, c varchar(32)); +insert into t1 values (1,1,'foo'); +insert into t1 values (2,2,'bar'); +--error ER_RANGE_FRAME_NEEDS_SIMPLE_ORDERBY +select + count(*) over (order by a,b + range between unbounded preceding and current row) +from t1; + +--error ER_WRONG_TYPE_FOR_RANGE_FRAME +select + count(*) over (order by c + range between unbounded preceding and current row) +from t1; + +--error ER_WRONG_TYPE_FOR_RANGE_FRAME +select + count(*) over (order by a + range between 'abcd' preceding and current row) +from t1; + +--error ER_WRONG_TYPE_FOR_RANGE_FRAME +select + count(*) over (order by a + range between current row and 'foo' following) +from t1; + +--echo # Try range frame with invalid bounds +--error ER_WRONG_TYPE_FOR_ROWS_FRAME +select + count(*) over (order by a + rows between 0.5 preceding and current row) +from t1; + +--error ER_WRONG_TYPE_FOR_ROWS_FRAME +select + count(*) over (order by a + rows between current row and 3.14 following) +from t1; + +--echo # +--echo # EXCLUDE clause is parsed but not supported +--echo # + +--error ER_FRAME_EXCLUSION_NOT_SUPPORTED +select + count(*) over (order by a + rows between 1 preceding and 1 following + exclude current row) +from t1; + +--error ER_FRAME_EXCLUSION_NOT_SUPPORTED +select + count(*) over (order by a + range between 1 preceding and 1 following + exclude ties) +from t1; + +--error ER_FRAME_EXCLUSION_NOT_SUPPORTED +select + count(*) over (order by a + range between 1 preceding and 1 following + exclude group) +from t1; + +# EXCLUDE NO OTHERS means 'don't exclude anything' +select + count(*) over (order by a + rows between 1 preceding and 1 following + exclude no others) +from t1; + +drop table t1; + +--echo # +--echo # Window function in grouping query +--echo # + +create table t1 ( + username varchar(32), + amount int +); + +insert into t1 values +('user1',1), +('user1',5), +('user1',3), +('user2',10), +('user2',20), +('user2',30); + +select + username, + sum(amount) as s, + rank() over (order by s desc) +from t1 +group by username; + +drop table t1; + +--echo # +--echo # mdev-9719: Window function in prepared statement +--echo # + +create table t1(a int, b int, x char(32)); +insert into t1 values (2, 10, 'xx'); +insert into t1 values (2, 10, 'zz'); +insert into t1 values (2, 20, 'yy'); +insert into t1 values (3, 10, 'xxx'); +insert into t1 values (3, 20, 'vvv'); + +prepare stmt from 'select a, row_number() over (partition by a order by b) from t1'; +--sorted_result +execute stmt; + +drop table t1; + +--echo # +--echo # mdev-9754: Window name resolution in prepared statement +--echo # + +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 (pk int, c int); +insert into t1 select a+1,1 from t0; +update t1 set c=2 where pk not in (1,2,3,4); +select * from t1; + +prepare stmt from +'select + pk, c, + count(*) over w1 as CNT +from t1 +window w1 as (partition by c order by pk + rows between 2 preceding and 2 following)'; +execute stmt; + +drop table t0,t1; + +--echo # +--echo # EXPLAIN FORMAT=JSON support for window functions +--echo # +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +explain format=json select rank() over (order by a) from t0; + +create table t1 (a int, b int, c int); +insert into t1 select a,a,a from t0; + +explain format=json +select + a, + rank() over (order by sum(b)) +from t1 +group by a; + +explain format=json +select + a, + rank() over (order by sum(b)) +from t1 +group by a +order by null; + +--echo # +--echo # Check how window function works together with GROUP BY and HAVING +--echo # + +select b,max(a) as MX, rank() over (order by b) from t1 group by b having MX in (3,5,7); +explain format=json +select b,max(a) as MX, rank() over (order by b) from t1 group by b having MX in (3,5,7); + +drop table t1; +drop table t0; + +--echo # +--echo # Building ordering index for window functions +--echo # + +create table t1 ( + pk int primary key, + a int, + b int, + c int +); + +insert into t1 values +(101 , 0, 10, 1), +(102 , 0, 10, 2), +(103 , 1, 10, 3), +(104 , 1, 10, 4), +(108 , 2, 10, 5), +(105 , 2, 20, 6), +(106 , 2, 20, 7), +(107 , 2, 20, 8), +(109 , 4, 20, 9), +(110 , 4, 20, 10), +(111 , 5, NULL, 11), +(112 , 5, 1, 12), +(113 , 5, NULL, 13), +(114 , 5, NULL, 14), +(115 , 5, NULL, 15), +(116 , 6, 1, NULL), +(117 , 6, 1, 10), +(118 , 6, 1, 1), +(119 , 6, 1, NULL), +(120 , 6, 1, NULL), +(121 , 6, 1, NULL), +(122 , 6, 1, 2), +(123 , 6, 1, 20), +(124 , 6, 1, -10), +(125 , 6, 1, NULL), +(126 , 6, 1, NULL), +(127 , 6, 1, NULL); + +--sorted_result +select sum(b) over (partition by a order by b,pk + rows between unbounded preceding and current row) as c1, + avg(b) over (w1 rows between 1 preceding and 1 following) as c2, + sum(c) over (w2 rows between 1 preceding and 1 following) as c5, + avg(b) over (w1 rows between 5 preceding and 5 following) as c3, + sum(b) over (w1 rows between 1 preceding and 1 following) as c4 +from t1 +window w1 as (partition by a order by b,pk), + w2 as (partition by b order by c,pk); + +drop table t1; + + +--echo # +--echo # MDEV-9848: Window functions: reuse sorting and/or scanning +--echo # + +create table t1 (a int, b int, c int); +insert into t1 values +(1,3,1), +(2,2,1), +(3,1,1); + +--echo # Check using counters +flush status; +select + rank() over (partition by c order by a), + rank() over (partition by c order by b) +from t1; +show status like '%sort%'; + +flush status; +select + rank() over (partition by c order by a), + rank() over (partition by c order by a) +from t1; +show status like '%sort%'; + +# Check using EXPLAIN FORMAT=JSON +explain format=json +select + rank() over (partition by c order by a), + rank() over (partition by c order by a) +from t1; + +explain format=json +select + rank() over (order by a), + row_number() over (order by a) +from t1; + +explain format=json +select + rank() over (partition by c order by a), + count(*) over (partition by c) +from t1; + +explain format=json +select + count(*) over (partition by c), + rank() over (partition by c order by a) +from t1; + +drop table t1; + + +--echo # +--echo # MDEV-9847: Window functions: crash with big_tables=1 +--echo # +create table t1(a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +set @tmp=@@big_tables; +set big_tables=1; +select rank() over (order by a) from t1; +set big_tables=@tmp; +drop table t1; + +--echo # +--echo # Check if "ORDER BY window_func" works +--echo # + +create table t1 (s1 int, s2 char(5)); +insert into t1 values (1,'a'); +insert into t1 values (null,null); +insert into t1 values (1,null); +insert into t1 values (null,'a'); +insert into t1 values (2,'b'); +insert into t1 values (-1,''); + +explain format=json +select *, row_number() over (order by s1, s2) as X from t1 order by X desc; +select *, row_number() over (order by s1, s2) as X from t1 order by X desc; +drop table t1; + +--echo # +--echo # Try window functions that are not directly present in the select list +--echo # +create table t1 (a int, b int); +insert into t1 values + (1,3), + (2,2), + (3,1); + +select + rank() over (order by a) - + rank() over (order by b) +from + t1; + +drop table t1; + +--echo # +--echo # MDEV-9894: Assertion `0' failed in Window_func_runner::setup +--echo # return ER_NOT_SUPPORTED_YET for aggregates that are not yet supported +--echo # as window functions. +--echo # +create table t1 (i int); +insert into t1 values (1),(2); +--error ER_NOT_SUPPORTED_YET +SELECT MAX(i) OVER (PARTITION BY (i)) FROM t1; +drop table t1; + +--echo # +--echo # Check the 0 in ROWS 0 PRECEDING +--echo # + +create table t1 ( + part_id int, + pk int, + a int +); + +insert into t1 values (1, 1, 1); +insert into t1 values (1, 2, 2); +insert into t1 values (1, 3, 4); +insert into t1 values (1, 4, 8); + +select + pk, a, + sum(a) over (order by pk rows between 0 preceding and current row) +from t1; + +select + pk, a, + sum(a) over (order by pk rows between 1 preceding and 0 preceding) +from t1; + +insert into t1 values (200, 1, 1); +insert into t1 values (200, 2, 2); +insert into t1 values (200, 3, 4); +insert into t1 values (200, 4, 8); +select + part_id, pk, a, + sum(a) over (partition by part_id order by pk rows between 0 preceding and current row) +from t1; + +select + part_id, pk, a, + sum(a) over (partition by part_id order by pk rows between 1 preceding and 0 preceding) +from t1; + +drop table t1; +--echo # +--echo # MDEV-9780, The "DISTINCT must not bet converted into GROUP BY when +--echo # window functions are present" part +--echo # + +create table t1 (part_id int, a int); +insert into t1 values +(100, 1), +(100, 2), +(100, 2), +(100, 3), +(2000, 1), +(2000, 2), +(2000, 3), +(2000, 3), +(2000, 3); + +select rank() over (partition by part_id order by a) from t1; +select distinct rank() over (partition by part_id order by a) from t1; +explain format=json +select distinct rank() over (partition by part_id order by a) from t1; + +drop table t1; + +--echo # +--echo # MDEV-9893: Window functions with different ORDER BY lists, +--echo # one of these lists containing an expression +--echo # + +create table t1 (s1 int, s2 char(5)); +insert into t1 values (1,'a'); +insert into t1 values (null,null); +insert into t1 values (3,null); +insert into t1 values (4,'a'); +insert into t1 values (2,'b'); +insert into t1 values (-1,''); + +select + *, + ROW_NUMBER() OVER (order by s1), + CUME_DIST() OVER (order by -s1) +from t1; + +drop table t1; + + +--echo # +--echo # MDEV-9925: Wrong result with aggregate function as a window function +--echo # +create table t1 (i int); +insert into t1 values (1),(2); +select i, sum(i) over (partition by i) from t1; +drop table t1; + +--echo # +--echo # MDEV-9922: Assertion `!join->only_const_tables() && fsort' failed in int create_sort_index +--echo # +create view v1 as select 1 as i; +select rank() over (order by i) from v1; +drop view v1; + diff --git a/mysql-test/t/win_avg.test b/mysql-test/t/win_avg.test new file mode 100644 index 00000000000..23a3652d943 --- /dev/null +++ b/mysql-test/t/win_avg.test @@ -0,0 +1,47 @@ +create table t1 ( + pk int primary key, + a int, + b int, + c real +); + + +insert into t1 values +(101 , 0, 10, 1.1), +(102 , 0, 10, 2.1), +(103 , 1, 10, 3.1), +(104 , 1, 10, 4.1), +(108 , 2, 10, 5.1), +(105 , 2, 20, 6.1), +(106 , 2, 20, 7.1), +(107 , 2, 20, 8.15), +(109 , 4, 20, 9.15), +(110 , 4, 20, 10.15), +(111 , 5, NULL, 11.15), +(112 , 5, 1, 12.25), +(113 , 5, NULL, 13.35), +(114 , 5, NULL, 14.50), +(115 , 5, NULL, 15.65), +(116 , 6, 1, NULL), +(117 , 6, 1, 10), +(118 , 6, 1, 1.1), +(119 , 6, 1, NULL), +(120 , 6, 1, NULL), +(121 , 6, 1, NULL), +(122 , 6, 1, 2.2), +(123 , 6, 1, 20.1), +(124 , 6, 1, -10.4), +(125 , 6, 1, NULL), +(126 , 6, 1, NULL), +(127 , 6, 1, NULL); + + +--sorted_result +select pk, a, b, avg(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; + +--sorted_result +select pk, a, c, avg(c) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; + +drop table t1; diff --git a/mysql-test/t/win_bit.test b/mysql-test/t/win_bit.test new file mode 100644 index 00000000000..f077d0d67a0 --- /dev/null +++ b/mysql-test/t/win_bit.test @@ -0,0 +1,89 @@ +create table t1 ( + pk int primary key, + a int, + b int +); + +create table t2 ( + pk int primary key, + a int, + b int +); + + + +insert into t1 values +( 1 , 0, 1), +( 2 , 0, 2), +( 3 , 1, 4), +( 4 , 1, 8), +( 5 , 2, 32), +( 6 , 2, 64), +( 7 , 2, 128), +( 8 , 2, 16); + +insert into t2 values +( 1 , 0, 2), +( 2 , 0, 2), +( 3 , 1, 4), +( 4 , 1, 4), +( 5 , 2, 16), +( 6 , 2, 64), +( 7 , 2, 128), +( 8 , 2, 16); + + + +--echo # Test bit functions on only one partition. +select pk, a, b, + bit_or(b) over (order by pk) as bit_or, + bit_and(b) over (order by pk) as bit_and, + bit_xor(b) over (order by pk) as bit_xor +from t1; + +select pk, a, b, + bit_or(b) over (order by pk) as bit_or, + bit_and(b) over (order by pk) as bit_and, + bit_xor(b) over (order by pk) as bit_xor +from t2; + +--echo # Test multiple partitions with bit functions. +select pk, a, b, + bit_or(b) over (partition by a order by pk) as bit_or, + bit_and(b) over (partition by a order by pk) as bit_and, + bit_xor(b) over (partition by a order by pk) as bit_xor +from t1; + +select pk, a, b, + bit_or(b) over (partition by a order by pk) as bit_or, + bit_and(b) over (partition by a order by pk) as bit_and, + bit_xor(b) over (partition by a order by pk) as bit_xor +from t2; + +--echo # Test remove function for bit functions using a sliding window. +select pk, a, b, + bit_or(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) as bit_or, + bit_and(b) over (partition by a order by pk) as bit_and, + bit_xor(b) over (partition by a order by pk) as bit_xor +from t1; + +select pk, a, b, + bit_or(b) over (partition by a order by pk) as bit_or, + bit_and(b) over (partition by a order by pk) as bit_and, + bit_xor(b) over (partition by a order by pk) as bit_xor +from t2; + + + + + + + +#select pk, a, b, bit_or(b) over (order by a) as count from t1 order by a, pk; +#select pk, a, b, bit_and(b) over (order by a ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as count from t1 order by a, pk; +#select pk, a, b, bit_xor(b) over (order by a, pk ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as count from t2 order by pk; +#select pk, a, b, bit_or(b) over (order by a, pk ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as count from t2 order by pk; +#select pk, a, b, bit_and(b) over (order by a, pk ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) as count from t3 order by pk; + +drop table t1; +drop table t2; diff --git a/mysql-test/t/win_ntile.test b/mysql-test/t/win_ntile.test new file mode 100644 index 00000000000..6f12e1f4005 --- /dev/null +++ b/mysql-test/t/win_ntile.test @@ -0,0 +1,171 @@ +create table t1 ( + pk int primary key, + a int, + b int +); + + +insert into t1 values +(11 , 0, 10), +(12 , 0, 10), +(13 , 1, 10), +(14 , 1, 10), +(18 , 2, 10), +(15 , 2, 20), +(16 , 2, 20), +(17 , 2, 20), +(19 , 4, 20), +(20 , 4, 20); + +# TODO Try invalid queries too. + +--error ER_INVALID_NTILE_ARGUMENT +select pk, a, b, ntile(-1) over (order by a) +from t1; + +--error ER_INVALID_NTILE_ARGUMENT +select pk, a, b, + ntile(0) over (order by a) +from t1; + +--sorted_result +select pk, a, b, + ntile(1) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(2) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(3) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(4) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(5) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(6) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(7) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(8) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(9) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(10) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(11) over (order by pk) +from t1; + +--sorted_result +select pk, a, b, + ntile(20) over (order by pk) +from t1; + + +select pk, a, b, + ntile(1) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(2) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(3) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(4) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(5) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(6) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(7) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(8) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(9) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(10) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(11) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(20) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile(1 + 3) over (partition by b order by pk) +from t1; + +select pk, a, b, + ntile((select 4)) over (partition by b order by pk) +from t1; + +select t1.a from t1 where pk = 11; +--error ER_INVALID_NTILE_ARGUMENT +select pk, a, b, + ntile((select a from t1 where pk=11)) over (partition by b order by pk) +from t1; + +select t1.a from t1 where pk = 13; +select pk, a, b, + ntile((select a from t1 where pk=13)) over (partition by b order by pk) +from t1; + +explain +select pk, a, b, + ntile((select a from t1 where pk=13)) over (partition by b order by pk) +from t1; + +select a from t1; +--error ER_SUBQUERY_NO_1_ROW +select pk, a, b, + ntile((select a from t1)) over (partition by b order by pk) +from t1; + + +drop table t1; diff --git a/mysql-test/t/win_orderby.test b/mysql-test/t/win_orderby.test new file mode 100644 index 00000000000..0d42c606486 --- /dev/null +++ b/mysql-test/t/win_orderby.test @@ -0,0 +1,32 @@ +# +# Tests for window functions and ORDER BY +# + +--disable_warnings +drop table if exists t0,t1; +--enable_warnings + +create table t0(a int primary key); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1( + pk int, + a int, + key(pk) +); + +insert into t1 +select + A.a + B.a* 10 + C.a * 100, + 1 +from t0 A, t0 B, t0 C; + +select + pk, + count(a) over (order by pk rows between 2 preceding and 2 following) +from t1 +where pk between 1 and 30 +order by pk desc +limit 4; + +drop table t0,t1; diff --git a/mysql-test/t/win_percent_cume.test b/mysql-test/t/win_percent_cume.test new file mode 100644 index 00000000000..b851185cb32 --- /dev/null +++ b/mysql-test/t/win_percent_cume.test @@ -0,0 +1,36 @@ +create table t1 ( + pk int primary key, + a int, + b int +); + + +insert into t1 values +( 1 , 0, 10), +( 2 , 0, 10), +( 3 , 1, 10), +( 4 , 1, 10), +( 8 , 2, 10), +( 5 , 2, 20), +( 6 , 2, 20), +( 7 , 2, 20), +( 9 , 4, 20), +(10 , 4, 20); + +select pk, a, b, + percent_rank() over (order by a), + cume_dist() over (order by a) +from t1; + +select pk, a, b, + percent_rank() over (order by pk), + cume_dist() over (order by pk) +from t1 order by pk; + +select pk, a, b, + percent_rank() over (partition by a order by a), + cume_dist() over (partition by a order by a) +from t1; + +drop table t1; + diff --git a/mysql-test/t/win_rank.test b/mysql-test/t/win_rank.test new file mode 100644 index 00000000000..eda1f458205 --- /dev/null +++ b/mysql-test/t/win_rank.test @@ -0,0 +1,58 @@ +--echo # +--echo # Try DENSE_RANK() function +--echo # + +create table t1 ( + pk int primary key, + a int, + b int +); + +insert into t1 values +( 1 , 0, 10), +( 2 , 0, 10), +( 3 , 1, 10), +( 4 , 1, 10), +( 8 , 2, 10), +( 5 , 2, 20), +( 6 , 2, 20), +( 7 , 2, 20), +( 9 , 4, 20), +(10 , 4, 20); + +select pk, a, b, rank() over (order by a) as rank, + dense_rank() over (order by a) as dense_rank +from t1; +select pk, a, b, rank() over (partition by b order by a) as rank, + dense_rank() over (partition by b order by a) as dense_rank +from t1; + +drop table t1; + +--echo # +--echo # Test with null values in the table. +--echo # + +create table t2 (s1 int, s2 char(5)); +insert into t2 values (1,'a'); +insert into t2 values (null,null); +insert into t2 values (1,null); +insert into t2 values (null,'a'); +insert into t2 values (null,'c'); +insert into t2 values (2,'b'); +insert into t2 values (-1,''); + +select *, rank() over (order by s1) as rank, + dense_rank() over (order by s1) as dense_rank +from t2; +select *, rank() over (partition by s2 order by s1) as rank, + dense_rank() over (partition by s2 order by s1) as dense_rank +from t2; +select *, rank() over (order by s2) as rank, + dense_rank() over (order by s2) as dense_rank +from t2; +select *, rank() over (partition by s1 order by s2) as rank, + dense_rank() over (partition by s1 order by s2) as dense_rank +from t2; + +drop table t2; diff --git a/mysql-test/t/win_sum.test b/mysql-test/t/win_sum.test new file mode 100644 index 00000000000..aa4965bfd5a --- /dev/null +++ b/mysql-test/t/win_sum.test @@ -0,0 +1,47 @@ +create table t1 ( + pk int primary key, + a int, + b int, + c real +); + + +insert into t1 values +(101 , 0, 10, 1.1), +(102 , 0, 10, 2.1), +(103 , 1, 10, 3.1), +(104 , 1, 10, 4.1), +(108 , 2, 10, 5.1), +(105 , 2, 20, 6.1), +(106 , 2, 20, 7.1), +(107 , 2, 20, 8.15), +(109 , 4, 20, 9.15), +(110 , 4, 20, 10.15), +(111 , 5, NULL, 11.15), +(112 , 5, 1, 12.25), +(113 , 5, NULL, 13.35), +(114 , 5, NULL, 14.50), +(115 , 5, NULL, 15.65), +(116 , 6, 1, NULL), +(117 , 6, 1, 10), +(118 , 6, 1, 1.1), +(119 , 6, 1, NULL), +(120 , 6, 1, NULL), +(121 , 6, 1, NULL), +(122 , 6, 1, 2.2), +(123 , 6, 1, 20.1), +(124 , 6, 1, -10.4), +(125 , 6, 1, NULL), +(126 , 6, 1, NULL), +(127 , 6, 1, NULL); + + +--sorted_result +select pk, a, b, sum(b) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; + +--sorted_result +select pk, a, c, sum(c) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) +from t1; + +drop table t1; diff --git a/mysys/errors.c b/mysys/errors.c index 11e7d04e79e..e3fbfd4d8e6 100644 --- a/mysys/errors.c +++ b/mysys/errors.c @@ -119,7 +119,7 @@ void wait_for_free_space(const char *filename, int errors) (void) sleep(MY_WAIT_FOR_USER_TO_FIX_PANIC); } -const char **get_global_errmsgs() +const char **get_global_errmsgs(int nr __attribute__((unused))) { return globerrs; } diff --git a/mysys/my_default.c b/mysys/my_default.c index 7f41551f779..0c4fce0d2c7 100644 --- a/mysys/my_default.c +++ b/mysys/my_default.c @@ -565,7 +565,7 @@ int my_load_defaults(const char *conf_file, const char **groups, for (; *groups ; groups++) group.count++; - if (my_init_dynamic_array(&args, sizeof(char*),*argc, 32, MYF(0))) + if (my_init_dynamic_array(&args, sizeof(char*), 128, 64, MYF(0))) goto err; ctx.alloc= &alloc; diff --git a/mysys/my_error.c b/mysys/my_error.c index 5d16091e0be..c0698b19a20 100644 --- a/mysys/my_error.c +++ b/mysys/my_error.c @@ -49,7 +49,7 @@ static struct my_err_head { struct my_err_head *meh_next; /* chain link */ - const char** (*get_errmsgs)(); /* returns error message format */ + const char** (*get_errmsgs)(int nr); /* returns error message format */ uint meh_first; /* error number matching array slot 0 */ uint meh_last; /* error number matching last slot */ } my_errmsgs_globerrs= @@ -86,7 +86,7 @@ const char *my_get_err_msg(uint nr) we return NULL. */ if (!(format= (meh_p && (nr >= meh_p->meh_first)) ? - meh_p->get_errmsgs()[nr - meh_p->meh_first] : NULL) || + meh_p->get_errmsgs(nr)[nr - meh_p->meh_first] : NULL) || !*format) return NULL; @@ -217,7 +217,8 @@ void my_message(uint error, const char *str, register myf MyFlags) @retval != 0 Error */ -int my_error_register(const char** (*get_errmsgs) (), uint first, uint last) +int my_error_register(const char** (*get_errmsgs)(int error), uint first, + uint last) { struct my_err_head *meh_p; struct my_err_head **search_meh_pp; @@ -273,11 +274,10 @@ int my_error_register(const char** (*get_errmsgs) (), uint first, uint last) @retval non-NULL OK, returns address of error messages pointers array. */ -const char **my_error_unregister(uint first, uint last) +my_bool my_error_unregister(uint first, uint last) { struct my_err_head *meh_p; struct my_err_head **search_meh_pp; - const char **errmsgs; /* Search for the registration in the list. */ for (search_meh_pp= &my_errmsgs_list; @@ -289,17 +289,15 @@ const char **my_error_unregister(uint first, uint last) break; } if (! *search_meh_pp) - return NULL; - + return TRUE; + /* Remove header from the chain. */ meh_p= *search_meh_pp; *search_meh_pp= meh_p->meh_next; - /* Save the return value and free the header. */ - errmsgs= meh_p->get_errmsgs(); my_free(meh_p); - return errmsgs; + return FALSE; } diff --git a/plugin/file_key_management/file_key_management_plugin.cc b/plugin/file_key_management/file_key_management_plugin.cc index e0afbd68cc9..77344bc57ee 100644 --- a/plugin/file_key_management/file_key_management_plugin.cc +++ b/plugin/file_key_management/file_key_management_plugin.cc @@ -169,6 +169,12 @@ static int file_key_management_plugin_init(void *p) return parser.parse(&keys); } +static int file_key_management_plugin_deinit(void *p) +{ + keys.free_memory(); + return 0; +} + /* Plugin library descriptor */ @@ -181,7 +187,7 @@ maria_declare_plugin(file_key_management) "File-based key management plugin", PLUGIN_LICENSE_GPL, file_key_management_plugin_init, - NULL, + file_key_management_plugin_deinit, 0x0100 /* 1.0 */, NULL, /* status variables */ settings, diff --git a/plugin/wsrep_info/plugin.cc b/plugin/wsrep_info/plugin.cc index b8aaee5a132..f9843cb6f39 100644 --- a/plugin/wsrep_info/plugin.cc +++ b/plugin/wsrep_info/plugin.cc @@ -120,10 +120,10 @@ static int wsrep_memb_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) if (check_global_access(thd, SUPER_ACL, true)) return rc; - wsrep_config_state.lock(); + wsrep_config_state->lock(); Dynamic_array *memb_arr= - wsrep_config_state.get_member_info(); + wsrep_config_state->get_member_info(); TABLE *table= tables->table; @@ -151,7 +151,7 @@ static int wsrep_memb_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) } end: - wsrep_config_state.unlock(); + wsrep_config_state->unlock(); return rc; } @@ -175,10 +175,10 @@ static int wsrep_status_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) if (check_global_access(thd, SUPER_ACL, true)) return rc; - wsrep_config_state.lock(); + wsrep_config_state->lock(); - wsrep_view_info_t view= wsrep_config_state.get_view_info(); - wsrep_member_status_t status= wsrep_config_state.get_status(); + wsrep_view_info_t view= wsrep_config_state->get_view_info(); + wsrep_member_status_t status= wsrep_config_state->get_status(); TABLE *table= tables->table; @@ -210,7 +210,7 @@ static int wsrep_status_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) if (schema_table_store_record(thd, table)) rc= 1; - wsrep_config_state.unlock(); + wsrep_config_state->unlock(); return rc; } diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 9dcd23a8392..773b5ee99a9 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -29,6 +29,7 @@ args="" defaults="" mysqld_opt="" user="" +silent_startup="--silent-startup" force=0 in_rpm=0 @@ -124,7 +125,7 @@ parse_arguments() # where a chown of datadir won't help) user=`parse_arg "$arg"` ;; --skip-name-resolve) ip_only=1 ;; - --verbose) verbose=1 ;; # Obsolete + --verbose) verbose=1 ; silent_startup="" ;; --rpm) in_rpm=1 ;; --help) usage ;; --no-defaults|--defaults-file=*|--defaults-extra-file=*) @@ -418,7 +419,7 @@ fi mysqld_bootstrap="${MYSQLD_BOOTSTRAP-$mysqld}" mysqld_install_cmd_line() { - "$mysqld_bootstrap" $defaults "$mysqld_opt" --bootstrap \ + "$mysqld_bootstrap" $defaults "$mysqld_opt" --bootstrap $silent_startup\ "--basedir=$basedir" "--datadir=$ldata" --log-warnings=0 --enforce-storage-engine="" \ $args --max_allowed_packet=8M \ --net_buffer_length=16K diff --git a/scripts/mysql_system_tables.sql b/scripts/mysql_system_tables.sql index b878a2e636e..d9d01707b30 100644 --- a/scripts/mysql_system_tables.sql +++ b/scripts/mysql_system_tables.sql @@ -237,3 +237,9 @@ EXECUTE stmt; DROP PREPARE stmt; set storage_engine=@orig_storage_engine; + +-- +-- Drop some tables not used anymore in MariaDB +--- + +drop table if exists mysql.ndb_binlog_index; diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 6136c39fc9c..062e59bee5b 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -123,7 +123,7 @@ SET (SQL_SOURCE sql_profile.cc event_parse_data.cc sql_alter.cc sql_signal.cc rpl_handler.cc mdl.cc sql_admin.cc transaction.cc sys_vars.cc sql_truncate.cc datadict.cc - sql_reload.cc sql_cmd.h item_inetfunc.cc + sql_reload.cc sql_cmd.h item_inetfunc.cc # added in MariaDB: sql_explain.h sql_explain.cc @@ -138,6 +138,7 @@ SET (SQL_SOURCE my_json_writer.cc my_json_writer.h rpl_gtid.cc rpl_parallel.cc sql_type.cc sql_type.h + item_windowfunc.cc sql_window.cc sql_cte.cc sql_cte.h ${WSREP_SOURCES} table_cache.cc encryption.cc diff --git a/sql/derror.cc b/sql/derror.cc index bc4b89493aa..5a1bee23f4a 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -30,16 +30,19 @@ #include "derror.h" // read_texts #include "sql_class.h" // THD +uint errors_per_range[MAX_ERROR_RANGES+1]; + static bool check_error_mesg(const char *file_name, const char **errmsg); static void init_myfunc_errs(void); C_MODE_START -static const char **get_server_errmsgs() +static const char **get_server_errmsgs(int nr) { + int section= (nr-ER_ERROR_FIRST) / ERRORS_PER_RANGE; if (!current_thd) - return DEFAULT_ERRMSGS; - return CURRENT_THD_ERRMSGS; + return DEFAULT_ERRMSGS[section]; + return CURRENT_THD_ERRMSGS[section]; } C_MODE_END @@ -60,61 +63,88 @@ C_MODE_END TRUE Error */ +static const char ***original_error_messages; + bool init_errmessage(void) { - const char **errmsgs, **ptr, **org_errmsgs; + const char **errmsgs; bool error= FALSE; DBUG_ENTER("init_errmessage"); - /* - Get a pointer to the old error messages pointer array. - read_texts() tries to free it. - */ - org_errmsgs= my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST); + free_error_messages(); + my_free(original_error_messages); + original_error_messages= 0; + + error_message_charset_info= system_charset_info; /* Read messages from file. */ if (read_texts(ERRMSG_FILE, my_default_lc_messages->errmsgs->language, - &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1) && - !errmsgs) + &original_error_messages)) { - my_free(errmsgs); - - if (org_errmsgs) - { - /* Use old error messages */ - errmsgs= org_errmsgs; - } - else + /* + No error messages. Create a temporary empty error message so + that we don't get a crash if some code wrongly tries to access + a non existing error message. + */ + if (!(original_error_messages= (const char***) + my_malloc(MAX_ERROR_RANGES * sizeof(char**) + + (ERRORS_PER_RANGE * sizeof(char*)), + MYF(0)))) + DBUG_RETURN(TRUE); + errmsgs= (const char**) (original_error_messages + MAX_ERROR_RANGES); + + for (uint i=0 ; i < MAX_ERROR_RANGES ; i++) { - /* - No error messages. Create a temporary empty error message so - that we don't get a crash if some code wrongly tries to access - a non existing error message. - */ - if (!(errmsgs= (const char**) my_malloc((ER_ERROR_LAST-ER_ERROR_FIRST+1)* - sizeof(char*), MYF(0)))) - DBUG_RETURN(TRUE); - for (ptr= errmsgs; ptr < errmsgs + ER_ERROR_LAST - ER_ERROR_FIRST; ptr++) - *ptr= ""; - error= TRUE; + original_error_messages[i]= errmsgs; + errors_per_range[i]= ERRORS_PER_RANGE; } + errors_per_range[2]= 0; // MYSYS error messages + + for (const char **ptr= errmsgs; + ptr < errmsgs + ERRORS_PER_RANGE ; + ptr++) + *ptr= ""; + + error= TRUE; } - else - my_free(org_errmsgs); // Free old language /* Register messages for use with my_error(). */ - if (my_error_register(get_server_errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST)) + for (uint i=0 ; i < MAX_ERROR_RANGES ; i++) { - my_free(errmsgs); - DBUG_RETURN(TRUE); + if (errors_per_range[i]) + { + if (my_error_register(get_server_errmsgs, (i+1)*ERRORS_PER_RANGE, + (i+1)*ERRORS_PER_RANGE + + errors_per_range[i]-1)) + { + my_free(original_error_messages); + original_error_messages= 0; + DBUG_RETURN(TRUE); + } + } } - - DEFAULT_ERRMSGS= errmsgs; /* Init global variable */ + DEFAULT_ERRMSGS= original_error_messages; init_myfunc_errs(); /* Init myfunc messages */ DBUG_RETURN(error); } +void free_error_messages() +{ + /* We don't need to free errmsg as it's done in cleanup_errmsg */ + for (uint i= 0 ; i < MAX_ERROR_RANGES ; i++) + { + if (errors_per_range[i]) + { + my_error_unregister((i+1)*ERRORS_PER_RANGE, + (i+1)*ERRORS_PER_RANGE + + errors_per_range[i]-1); + errors_per_range[i]= 0; + } + } +} + + /** Check the error messages array contains all relevant error messages */ @@ -125,11 +155,17 @@ static bool check_error_mesg(const char *file_name, const char **errmsg) The last MySQL error message can't be an empty string; If it is, it means that the error file doesn't contain all MySQL messages and is probably from an older version of MySQL / MariaDB. + We also check that each section has enough error messages. */ - if (errmsg[ER_LAST_MYSQL_ERROR_MESSAGE -1 - ER_ERROR_FIRST][0] == 0) + if (errmsg[ER_LAST_MYSQL_ERROR_MESSAGE -1 - ER_ERROR_FIRST][0] == 0 || + (errors_per_range[0] < ER_ERROR_LAST_SECTION_2 - ER_ERROR_FIRST + 1) || + errors_per_range[1] != 0 || + (errors_per_range[2] < ER_ERROR_LAST_SECTION_4 - + ER_ERROR_FIRST_SECTION_4 +1) || + (errors_per_range[3] < ER_ERROR_LAST - ER_ERROR_FIRST_SECTION_5 + 1)) { sql_print_error("Error message file '%s' is probably from and older " - "version of MariaDB / MYSQL as it doesn't contain all " + "version of MariaDB as it doesn't contain all " "error messages", file_name); return 1; } @@ -137,27 +173,28 @@ static bool check_error_mesg(const char *file_name, const char **errmsg) } -/** - Read text from packed textfile in language-directory. +struct st_msg_file +{ + uint sections; + uint max_error; + uint errors; + size_t text_length; +}; - If we can't read messagefile then it's panic- we can't continue. +/** + Open file for packed textfile in language-directory. */ -bool read_texts(const char *file_name, const char *language, - const char ***point, uint error_messages) +static File open_error_msg_file(const char *file_name, const char *language, + uint error_messages, struct st_msg_file *ret) { - register uint i; - uint count,funktpos; - size_t offset, length; + int error_pos= 0; File file; char name[FN_REFLEN]; char lang_path[FN_REFLEN]; - uchar *UNINIT_VAR(buff); - uchar head[32],*pos; - DBUG_ENTER("read_texts"); + uchar head[32]; + DBUG_ENTER("open_error_msg_file"); - *point= 0; - funktpos=0; convert_dirname(lang_path, language, NullS); (void) my_load_path(lang_path, lang_path, lc_messages_dir); if ((file= mysql_file_open(key_file_ERRMSG, @@ -168,69 +205,121 @@ bool read_texts(const char *file_name, const char *language, /* Trying pre-5.4 sematics of the --language parameter. It included the language-specific part, e.g.: - --language=/path/to/english/ */ if ((file= mysql_file_open(key_file_ERRMSG, - fn_format(name, file_name, lc_messages_dir, "", 4), + fn_format(name, file_name, lc_messages_dir, "", + 4), O_RDONLY | O_SHARE | O_BINARY, MYF(0))) < 0) goto err; sql_print_warning("An old style --language or -lc-message-dir value with language specific part detected: %s", lc_messages_dir); sql_print_warning("Use --lc-messages-dir without language specific part instead."); } - - funktpos=1; + error_pos=1; if (mysql_file_read(file, (uchar*) head, 32, MYF(MY_NABP))) goto err; - funktpos=2; + error_pos=2; if (head[0] != (uchar) 254 || head[1] != (uchar) 254 || - head[2] != 2 || head[3] != 3) + head[2] != 2 || head[3] != 4) goto err; /* purecov: inspected */ - error_message_charset_info= system_charset_info; - length=uint4korr(head+6); count=uint2korr(head+10); + ret->text_length= uint4korr(head+6); + ret->max_error= uint2korr(head+10); + ret->errors= uint2korr(head+12); + ret->sections= uint2korr(head+14); - if (count < error_messages) + if (ret->max_error < error_messages || ret->sections != MAX_ERROR_RANGES) { sql_print_error("\ Error message file '%s' had only %d error messages, but it should contain at least %d error messages.\nCheck that the above file is the right version for this program!", - name,count,error_messages); + name,ret->errors,error_messages); (void) mysql_file_close(file, MYF(MY_WME)); - DBUG_RETURN(1); + DBUG_RETURN(FERR); } + DBUG_RETURN(file); - if (!(*point= (const char**) - my_malloc((size_t) (MY_MAX(length,count*2)+count*sizeof(char*)),MYF(0)))) - { - funktpos=3; /* purecov: inspected */ +err: + sql_print_error((error_pos == 2) ? + "Incompatible header in messagefile '%s'. Probably from " + "another version of MariaDB" : + ((error_pos == 1) ? "Can't read from messagefile '%s'" : + "Can't find messagefile '%s'"), name); + if (file != FERR) + (void) mysql_file_close(file, MYF(MY_WME)); + DBUG_RETURN(FERR); +} + + +/* + Define the number of normal and extra error messages in the errmsg.sys + file +*/ + +static const uint error_messages= ER_ERROR_LAST - ER_ERROR_FIRST+1; + +/** + Read text from packed textfile in language-directory. +*/ + +bool read_texts(const char *file_name, const char *language, + const char ****data) +{ + uint i, range_size; + const char **point; + size_t offset; + File file; + uchar *buff, *pos; + struct st_msg_file msg_file; + DBUG_ENTER("read_texts"); + + if ((file= open_error_msg_file(file_name, language, error_messages, + &msg_file)) == FERR) + DBUG_RETURN(1); + + if (!(*data= (const char***) + my_malloc((size_t) ((MAX_ERROR_RANGES+1) * sizeof(char**) + + MY_MAX(msg_file.text_length, msg_file.errors * 2)+ + msg_file.errors * sizeof(char*)), + MYF(MY_WME)))) goto err; /* purecov: inspected */ - } - buff= (uchar*) (*point + count); - if (mysql_file_read(file, buff, (size_t) count*2, MYF(MY_NABP))) + point= (const char**) ((*data) + MAX_ERROR_RANGES); + buff= (uchar*) (point + msg_file.errors); + + if (mysql_file_read(file, buff, + (size_t) (msg_file.errors + msg_file.sections) * 2, + MYF(MY_NABP | MY_WME))) goto err; - for (i=0, offset=0, pos= buff ; i< count ; i++) + + pos= buff; + /* read in sections */ + for (i= 0, offset= 0; i < msg_file.sections ; i++) { - (*point)[i]= (char*) buff+offset; - offset+= uint2korr(pos); + (*data)[i]= point + offset; + errors_per_range[i]= range_size= uint2korr(pos); + offset+= range_size; + pos+= 2; + } + + /* Calculate pointers to text data */ + for (i=0, offset=0 ; i < msg_file.errors ; i++) + { + point[i]= (char*) buff+offset; + offset+=uint2korr(pos); pos+=2; } - if (mysql_file_read(file, buff, length, MYF(MY_NABP))) + + /* Read error message texts */ + if (mysql_file_read(file, buff, msg_file.text_length, MYF(MY_NABP | MY_WME))) goto err; - (void) mysql_file_close(file, MYF(0)); + (void) mysql_file_close(file, MYF(MY_WME)); - i= check_error_mesg(file_name, *point); - DBUG_RETURN(i); + DBUG_RETURN(check_error_mesg(file_name, point)); err: - sql_print_error((funktpos == 3) ? "Not enough memory for messagefile '%s'" : - (funktpos == 2) ? "Incompatible header in messagefile '%s'. Probably from another version of MariaDB" : - ((funktpos == 1) ? "Can't read from messagefile '%s'" : - "Can't find messagefile '%s'"), name); - if (file != FERR) - (void) mysql_file_close(file, MYF(MY_WME)); + (void) mysql_file_close(file, MYF(0)); DBUG_RETURN(1); } /* read_texts */ diff --git a/sql/derror.h b/sql/derror.h index b2f6331e048..9f2aee71c7e 100644 --- a/sql/derror.h +++ b/sql/derror.h @@ -19,7 +19,8 @@ #include "my_global.h" /* uint */ bool init_errmessage(void); +void free_error_messages(); bool read_texts(const char *file_name, const char *language, - const char ***point, uint error_messages); + const char ****data); #endif /* DERROR_INCLUDED */ diff --git a/sql/field.cc b/sql/field.cc index cf5be38398f..a5d2d759edc 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1224,7 +1224,8 @@ bool Field::test_if_equality_guarantees_uniqueness(const Item *item) const for temporal columns, so the query: WHERE temporal_column='string' cannot return multiple distinct temporal values. - QQ: perhaps we could allow INT/DECIMAL/DOUBLE types for temporal items. + + TODO: perhaps we could allow INT/DECIMAL/DOUBLE types for temporal items. */ return result_type() == item->result_type(); } diff --git a/sql/filesort.cc b/sql/filesort.cc index 54a79421d2e..82a5c90d0bb 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -74,7 +74,6 @@ static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info, TABLE *table, ha_rows records, ulong memory_available); - void Sort_param::init_for_filesort(uint sortlen, TABLE *table, ulong max_length_for_sort_data, ha_rows maxrows, bool sort_positions) @@ -124,28 +123,21 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table, @param thd Current thread @param table Table to sort - @param sortorder How to sort the table - @param s_length Number of elements in sortorder - @param select Condition to apply to the rows - @param max_rows Return only this many rows - @param sort_positions Set to TRUE if we want to force sorting by - position - (Needed by UPDATE/INSERT or ALTER TABLE or - when rowids are required by executor) + @param filesort How to sort the table + @param[out] found_rows Store the number of found rows here. + This is the number of found rows after + applying WHERE condition. @note - If we sort by position (like if sort_positions is 1) filesort() will - call table->prepare_for_position(). + If we sort by position (like if filesort->sort_positions==true) + filesort() will call table->prepare_for_position(). @retval 0 Error # SORT_INFO */ -SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, - uint s_length, - SQL_SELECT *select, ha_rows max_rows, - bool sort_positions, - Filesort_tracker* tracker) +SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, + Filesort_tracker* tracker) { int error; size_t memory_available= thd->variables.sortbuff_size; @@ -156,9 +148,16 @@ SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, Sort_param param; bool multi_byte_charset; Bounded_queue pq; + SQL_SELECT *const select= filesort->select; + ha_rows max_rows= filesort->limit; + uint s_length= 0; DBUG_ENTER("filesort"); - DBUG_EXECUTE("info",TEST_filesort(sortorder,s_length);); + + if (!(s_length= filesort->make_sortorder(thd))) + DBUG_RETURN(NULL); /* purecov: inspected */ + + DBUG_EXECUTE("info",TEST_filesort(filesort->sortorder,s_length);); #ifdef SKIP_DBUG_IN_FILESORT DBUG_PUSH(""); /* No DBUG here */ #endif @@ -194,11 +193,11 @@ SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, error= 1; sort->found_rows= HA_POS_ERROR; - param.init_for_filesort(sortlength(thd, sortorder, s_length, + param.init_for_filesort(sortlength(thd, filesort->sortorder, s_length, &multi_byte_charset), table, thd->variables.max_length_for_sort_data, - max_rows, sort_positions); + max_rows, filesort->sort_positions); sort->addon_buf= param.addon_buf; sort->addon_field= param.addon_field; @@ -275,7 +274,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, goto err; param.sort_form= table; - param.end=(param.local_sortorder=sortorder)+s_length; + param.end=(param.local_sortorder=filesort->sortorder)+s_length; num_rows= find_all_keys(thd, ¶m, select, sort, &buffpek_pointers, @@ -429,6 +428,55 @@ SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, } /* filesort */ +void Filesort::cleanup() +{ + if (select && own_select) + { + select->cleanup(); + select= NULL; + } +} + + +uint Filesort::make_sortorder(THD *thd) +{ + uint count; + SORT_FIELD *sort,*pos; + ORDER *ord; + DBUG_ENTER("make_sortorder"); + + + count=0; + for (ord = order; ord; ord= ord->next) + count++; + if (!sortorder) + sortorder= (SORT_FIELD*) thd->alloc(sizeof(SORT_FIELD) * (count + 1)); + pos= sort= sortorder; + + if (!pos) + DBUG_RETURN(0); + + for (ord= order; ord; ord= ord->next, pos++) + { + Item *item= ord->item[0]->real_item(); + pos->field= 0; pos->item= 0; + if (item->type() == Item::FIELD_ITEM) + pos->field= ((Item_field*) item)->field; + else if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) + pos->field= ((Item_sum*) item)->get_tmp_table_field(); + else if (item->type() == Item::COPY_STR_ITEM) + { // Blob patch + pos->item= ((Item_copy*) item)->get_item(); + } + else + pos->item= *ord->item; + pos->reverse= (ord->direction == ORDER::ORDER_DESC); + DBUG_ASSERT(pos->field != NULL || pos->item != NULL); + } + DBUG_RETURN(count); + } + + /** Read 'count' number of buffer pointers into memory. */ static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count, diff --git a/sql/filesort.h b/sql/filesort.h index 454c745b5c0..18029a10c14 100644 --- a/sql/filesort.h +++ b/sql/filesort.h @@ -17,13 +17,67 @@ #define FILESORT_INCLUDED #include "my_base.h" /* ha_rows */ +#include "sql_list.h" /* Sql_alloc */ #include "filesort_utils.h" class SQL_SELECT; class THD; struct TABLE; -struct SORT_FIELD; class Filesort_tracker; +struct SORT_FIELD; +typedef struct st_order ORDER; + + +/** + Sorting related info. + To be extended by another WL to include complete filesort implementation. +*/ +class Filesort: public Sql_alloc +{ +public: + /** List of expressions to order the table by */ + ORDER *order; + /** Number of records to return */ + ha_rows limit; + /** ORDER BY list with some precalculated info for filesort */ + SORT_FIELD *sortorder; + /** select to use for getting records */ + SQL_SELECT *select; + /** TRUE <=> free select on destruction */ + bool own_select; + /** true means we are using Priority Queue for order by with limit. */ + bool using_pq; + + /* + TRUE means sort operation must produce table rowids. + FALSE means that it halso has an option of producing {sort_key, + addon_fields} pairs. + */ + bool sort_positions; + + Filesort_tracker *tracker; + + Filesort(ORDER *order_arg, ha_rows limit_arg, bool sort_positions_arg, + SQL_SELECT *select_arg): + order(order_arg), + limit(limit_arg), + sortorder(NULL), + select(select_arg), + own_select(false), + using_pq(false), + sort_positions(sort_positions_arg) + { + DBUG_ASSERT(order); + }; + + ~Filesort() { cleanup(); } + /* Prepare ORDER BY list for sorting. */ + uint make_sortorder(THD *thd); + +private: + void cleanup(); +}; + class SORT_INFO { @@ -97,19 +151,12 @@ public: size_t sort_buffer_size() const { return filesort_buffer.sort_buffer_size(); } - friend SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, - uint s_length, - SQL_SELECT *select, ha_rows max_rows, - bool sort_positions, - Filesort_tracker* tracker); + friend SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, + Filesort_tracker* tracker); }; - -SORT_INFO *filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, - uint s_length, - SQL_SELECT *select, ha_rows max_rows, - bool sort_positions, - Filesort_tracker* tracker); +SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, + Filesort_tracker* tracker); void change_double_for_sort(double nr,uchar *to); diff --git a/sql/handler.cc b/sql/handler.cc index 748a51f5c59..2186d389056 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -295,7 +295,7 @@ handler *get_ha_partition(partition_info *part_info) static const char **handler_errmsgs; C_MODE_START -static const char **get_handler_errmsgs() +static const char **get_handler_errmsgs(int nr) { return handler_errmsgs; } @@ -386,12 +386,10 @@ int ha_init_errors(void) */ static int ha_finish_errors(void) { - const char **errmsgs; - /* Allocate a pointer array for the error message strings. */ - if (! (errmsgs= my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST))) - return 1; - my_free(errmsgs); + my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST); + my_free(handler_errmsgs); + handler_errmsgs= 0; return 0; } @@ -3079,6 +3077,7 @@ int handler::update_auto_increment() if (unlikely(nr == ULONGLONG_MAX)) DBUG_RETURN(HA_ERR_AUTOINC_ERANGE); + DBUG_ASSERT(nr != 0); DBUG_PRINT("info",("auto_increment: %llu nb_reserved_values: %llu", nr, append ? nb_reserved_values : 0)); diff --git a/sql/item.cc b/sql/item.cc index 7cdb2d2e7e4..65fb00d4757 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -455,7 +455,7 @@ Item::Item(THD *thd): { DBUG_ASSERT(thd); marker= 0; - maybe_null=null_value=with_sum_func=with_field=0; + maybe_null=null_value=with_sum_func=with_window_func=with_field=0; in_rollup= 0; with_subselect= 0; /* Initially this item is not attached to any JOIN_TAB. */ @@ -500,6 +500,7 @@ Item::Item(THD *thd, Item *item): in_rollup(item->in_rollup), null_value(item->null_value), with_sum_func(item->with_sum_func), + with_window_func(item->with_window_func), with_field(item->with_field), fixed(item->fixed), is_autogenerated_name(item->is_autogenerated_name), @@ -1749,7 +1750,7 @@ public: thd->fatal_error() may be called if we are out of memory */ -void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, +void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, Item **ref, uint split_flags) { @@ -1760,6 +1761,14 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, ((Item_sum *) this)->ref_by) return; } + else if (type() == WINDOW_FUNC_ITEM) + { + /* + Skip the else part, window functions are very special functions: + they need to have their own fields in the temp. table, but they + need to be proceessed differently than regular aggregate functions + */ + } else { /* Not a SUM() function */ @@ -1800,7 +1809,7 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Exception is Item_direct_view_ref which we need to convert to Item_ref to allow fields from view being stored in tmp table. */ - Item_aggregate_ref *item_ref; + Item_ref *item_ref; uint el= fields.elements; /* If this is an item_ref, get the original item @@ -1810,13 +1819,24 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Item *real_itm= real_item(); ref_pointer_array[el]= real_itm; - if (!(item_ref= (new (thd->mem_root) - Item_aggregate_ref(thd, - &thd->lex->current_select->context, - ref_pointer_array + el, 0, name)))) - return; // fatal_error is set + if (type() == WINDOW_FUNC_ITEM) + { + if (!(item_ref= (new (thd->mem_root) + Item_direct_ref(thd, + &thd->lex->current_select->context, + &ref_pointer_array[el], 0, name)))) + return; // fatal_error is set + } + else + { + if (!(item_ref= (new (thd->mem_root) + Item_aggregate_ref(thd, + &thd->lex->current_select->context, + &ref_pointer_array[el], 0, name)))) + return; // fatal_error is set + } if (type() == SUM_FUNC_ITEM) - item_ref->depended_from= ((Item_sum *) this)->depended_from(); + item_ref->depended_from= ((Item_sum *) this)->depended_from(); fields.push_front(real_itm); thd->change_item_tree(ref, item_ref); } @@ -3755,16 +3775,18 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it) str_value.charset()); collation.set(str_value.charset(), DERIVATION_COERCIBLE); decimals= 0; - + item_type= Item::STRING_ITEM; break; } case REAL_RESULT: set_double(arg->val_real()); + item_type= Item::REAL_ITEM; break; case INT_RESULT: set_int(arg->val_int(), arg->max_length); + item_type= Item::INT_ITEM; break; case DECIMAL_RESULT: @@ -3776,6 +3798,7 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it) return TRUE; set_decimal(dv); + item_type= Item::DECIMAL_ITEM; break; } @@ -3785,11 +3808,11 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it) DBUG_ASSERT(TRUE); // Abort in debug mode. set_null(); // Set to NULL in release mode. + item_type= Item::NULL_ITEM; return FALSE; } set_handler_by_result_type(arg->result_type()); - item_type= arg->type(); return FALSE; } @@ -4489,7 +4512,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) return NULL; } DBUG_ASSERT((*select_ref)->fixed); - return (select->ref_pointer_array + counter); + return &select->ref_pointer_array[counter]; } if (group_by_ref) return group_by_ref; @@ -6516,15 +6539,14 @@ Item *Item_field::update_value_transformer(THD *thd, uchar *select_arg) type() != Item::TRIGGER_FIELD_ITEM) { List *all_fields= &select->join->all_fields; - Item **ref_pointer_array= select->ref_pointer_array; - DBUG_ASSERT(all_fields->elements <= select->ref_pointer_array_size); + Ref_ptr_array &ref_pointer_array= select->ref_pointer_array; int el= all_fields->elements; Item_ref *ref; ref_pointer_array[el]= (Item*)this; all_fields->push_front((Item*)this, thd->mem_root); ref= new (thd->mem_root) - Item_ref(thd, &select->context, ref_pointer_array + el, + Item_ref(thd, &select->context, &ref_pointer_array[el], table_name, field_name); return ref; } @@ -6928,6 +6950,7 @@ void Item_ref::set_properties() split_sum_func() doesn't try to change the reference. */ with_sum_func= (*ref)->with_sum_func; + with_window_func= (*ref)->with_window_func; with_field= (*ref)->with_field; fixed= 1; if (alias_name_used) diff --git a/sql/item.h b/sql/item.h index e42442aa301..674ff6e99dc 100644 --- a/sql/item.h +++ b/sql/item.h @@ -65,6 +65,8 @@ class RANGE_OPT_PARAM; class SEL_TREE; +typedef Bounds_checked_array Ref_ptr_array; + static inline uint32 char_to_byte_length_safe(uint32 char_length_arg, uint32 mbmaxlen_arg) { @@ -626,7 +628,8 @@ public: static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } static void operator delete(void *ptr, MEM_ROOT *mem_root) {} - enum Type {FIELD_ITEM= 0, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM, + enum Type {FIELD_ITEM= 0, FUNC_ITEM, SUM_FUNC_ITEM, + WINDOW_FUNC_ITEM, STRING_ITEM, INT_ITEM, REAL_ITEM, NULL_ITEM, VARBIN_ITEM, COPY_STR_ITEM, FIELD_AVG_ITEM, DEFAULT_VALUE_ITEM, PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM, @@ -692,6 +695,7 @@ public: of a query with ROLLUP */ bool null_value; /* if item is null */ bool with_sum_func; /* True if item contains a sum func */ + bool with_window_func; /* True if item contains a window func */ /** True if any item except Item_sum contains a field. Set during parsing. */ @@ -1180,7 +1184,7 @@ public: void print_item_w_name(String *, enum_query_type query_type); void print_value(String *); virtual void update_used_tables() {} - virtual COND *build_equal_items(THD *thd, COND_EQUAL *inherited, + virtual COND *build_equal_items(THD *thd, COND_EQUAL *inheited, bool link_item_fields, COND_EQUAL **cond_equal_ref) { @@ -1216,10 +1220,11 @@ public: { return false; } - virtual void split_sum_func(THD *thd, Item **ref_pointer_array, + virtual void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, uint flags) {} /* Called for items that really have to be split */ - void split_sum_func2(THD *thd, Item **ref_pointer_array, List &fields, + void split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, Item **ref, uint flags); virtual bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); bool get_time(MYSQL_TIME *ltime) @@ -4768,17 +4773,10 @@ public: - cmp() method that compares the saved value with the current value of the source item, and if they were not equal saves item's value into the saved value. -*/ -/* - Cached_item_XXX objects are not exactly caches. They do the following: - - Each Cached_item_XXX object has - - its source item - - saved value of the source item - - cmp() method that compares the saved value with the current value of the - source item, and if they were not equal saves item's value into the saved - value. + TODO: add here: + - a way to save the new value w/o comparison + - a way to do less/equal/greater comparison */ class Cached_item :public Sql_alloc @@ -4786,48 +4784,75 @@ class Cached_item :public Sql_alloc public: bool null_value; Cached_item() :null_value(0) {} + /* + Compare the cached value with the source value. If not equal, copy + the source value to the cache. + @return + true - Not equal + false - Equal + */ virtual bool cmp(void)=0; + + /* Compare the cached value with the source value, without copying */ + virtual int cmp_read_only()=0; + virtual ~Cached_item(); /*line -e1509 */ }; -class Cached_item_str :public Cached_item +class Cached_item_item : public Cached_item { +protected: Item *item; + + Cached_item_item(Item *arg) : item(arg) {} +public: + void fetch_value_from(Item *new_item) + { + Item *save= item; + item= new_item; + cmp(); + item= save; + } +}; + +class Cached_item_str :public Cached_item_item +{ uint32 value_max_length; String value,tmp_value; public: Cached_item_str(THD *thd, Item *arg); bool cmp(void); + int cmp_read_only(); ~Cached_item_str(); // Deallocate String:s }; -class Cached_item_real :public Cached_item +class Cached_item_real :public Cached_item_item { - Item *item; double value; public: - Cached_item_real(Item *item_par) :item(item_par),value(0.0) {} + Cached_item_real(Item *item_par) :Cached_item_item(item_par),value(0.0) {} bool cmp(void); + int cmp_read_only(); }; -class Cached_item_int :public Cached_item +class Cached_item_int :public Cached_item_item { - Item *item; longlong value; public: - Cached_item_int(Item *item_par) :item(item_par),value(0) {} + Cached_item_int(Item *item_par) :Cached_item_item(item_par),value(0) {} bool cmp(void); + int cmp_read_only(); }; -class Cached_item_decimal :public Cached_item +class Cached_item_decimal :public Cached_item_item { - Item *item; my_decimal value; public: Cached_item_decimal(Item *item_par); bool cmp(void); + int cmp_read_only(); }; class Cached_item_field :public Cached_item @@ -4844,6 +4869,7 @@ public: buff= (uchar*) thd_calloc(thd, length= field->pack_length()); } bool cmp(void); + int cmp_read_only(); }; class Item_default_value : public Item_field @@ -5129,7 +5155,7 @@ public: return (this->*processor)(arg); } virtual Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs); - void split_sum_func2_example(THD *thd, Item **ref_pointer_array, + void split_sum_func2_example(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, uint flags) { example->split_sum_func2(thd, ref_pointer_array, fields, &example, flags); diff --git a/sql/item_buff.cc b/sql/item_buff.cc index 62c2f76dc2e..488eb52fb77 100644 --- a/sql/item_buff.cc +++ b/sql/item_buff.cc @@ -71,7 +71,7 @@ Cached_item::~Cached_item() {} */ Cached_item_str::Cached_item_str(THD *thd, Item *arg) - :item(arg), + :Cached_item_item(arg), value_max_length(MY_MIN(arg->max_length, thd->variables.max_sort_length)), value(value_max_length) {} @@ -98,6 +98,25 @@ bool Cached_item_str::cmp(void) return tmp; } + +int Cached_item_str::cmp_read_only() +{ + String *res= item->val_str(&tmp_value); + + if (null_value) + { + if (item->null_value) + return 0; + else + return -1; + } + if (item->null_value) + return 1; + + return sortcmp(&value, res, item->collation.collation); +} + + Cached_item_str::~Cached_item_str() { item=0; // Safety @@ -115,6 +134,23 @@ bool Cached_item_real::cmp(void) return FALSE; } + +int Cached_item_real::cmp_read_only() +{ + double nr= item->val_real(); + if (null_value) + { + if (item->null_value) + return 0; + else + return -1; + } + if (item->null_value) + return 1; + return (nr == value)? 0 : ((nr < value)? 1: -1); +} + + bool Cached_item_int::cmp(void) { longlong nr=item->val_int(); @@ -128,6 +164,22 @@ bool Cached_item_int::cmp(void) } +int Cached_item_int::cmp_read_only() +{ + longlong nr= item->val_int(); + if (null_value) + { + if (item->null_value) + return 0; + else + return -1; + } + if (item->null_value) + return 1; + return (nr == value)? 0 : ((nr < value)? 1: -1); +} + + bool Cached_item_field::cmp(void) { bool tmp= FALSE; // Value is identical @@ -148,8 +200,24 @@ bool Cached_item_field::cmp(void) } +int Cached_item_field::cmp_read_only() +{ + if (null_value) + { + if (field->is_null()) + return 0; + else + return -1; + } + if (field->is_null()) + return 1; + + return field->cmp(buff); +} + + Cached_item_decimal::Cached_item_decimal(Item *it) - :item(it) + :Cached_item_item(it) { my_decimal_set_zero(&value); } @@ -174,3 +242,20 @@ bool Cached_item_decimal::cmp() return FALSE; } + +int Cached_item_decimal::cmp_read_only() +{ + my_decimal tmp; + my_decimal *ptmp= item->val_decimal(&tmp); + if (null_value) + { + if (item->null_value) + return 0; + else + return -1; + } + if (item->null_value) + return 1; + return my_decimal_cmp(&value, ptmp); +} + diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 579cdc10057..335228c37fa 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2482,7 +2482,7 @@ bool Item_func_if::date_op(MYSQL_TIME *ltime, uint fuzzydate) } -void Item_func_nullif::split_sum_func(THD *thd, Item **ref_pointer_array, +void Item_func_nullif::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, uint flags) { if (m_cache) @@ -4791,7 +4791,7 @@ void Item_cond::traverse_cond(Cond_traverser traverser, that have or refer (HAVING) to a SUM expression. */ -void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array, +void Item_cond::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, uint flags) { List_iterator li(list); diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 2d197a86d9b..5789186dbe8 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1030,8 +1030,8 @@ public: uint decimal_precision() const { return args[2]->decimal_precision(); } const char *func_name() const { return "nullif"; } void print(String *str, enum_query_type query_type); - void split_sum_func(THD *thd, Item **ref_pointer_array, List &fields, - uint flags); + void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, uint flags); void update_used_tables(); table_map not_null_tables() const { return 0; } bool is_null(); @@ -2055,8 +2055,8 @@ public: SARGABLE_PARAM **sargables); SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); virtual void print(String *str, enum_query_type query_type); - void split_sum_func(THD *thd, Item **ref_pointer_array, List &fields, - uint flags); + void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, uint flags); friend int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, COND **conds); void top_level_item() { abort_on_null=1; } @@ -2083,7 +2083,7 @@ template class LI, class T> class Item_equal_iterator; All equality predicates of the form field1=field2 contained in a conjunction are substituted for a sequence of items of this class. An item of this class Item_equal(f1,f2,...fk) represents a - multiple equality f1=f2=...=fk. + multiple equality f1=f2=...=fk.l If a conjunction contains predicates f1=f2 and f2=f3, a new item of this class is created Item_equal(f1,f2,f3) representing the multiple diff --git a/sql/item_func.cc b/sql/item_func.cc index 50b6f4a6b68..57bd004cf88 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -132,6 +132,7 @@ void Item_func::sync_with_sum_func_and_with_field(List &list) while ((item= li++)) { with_sum_func|= item->with_sum_func; + with_window_func|= item->with_window_func; with_field|= item->with_field; } } @@ -226,6 +227,7 @@ Item_func::fix_fields(THD *thd, Item **ref) maybe_null=1; with_sum_func= with_sum_func || item->with_sum_func; + with_window_func= with_window_func || item->with_window_func; with_field= with_field || item->with_field; used_tables_and_const_cache_join(item); with_subselect|= item->has_subquery(); @@ -431,7 +433,7 @@ void Item_args::propagate_equal_fields(THD *thd, See comments in Item_cond::split_sum_func() */ -void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, +void Item_func::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, uint flags) { Item **arg, **arg_end; @@ -4904,9 +4906,16 @@ Item_func_set_user_var::update_hash(void *ptr, uint length, If we set a variable explicitely to NULL then keep the old result type of the variable */ - if ((null_value= args[0]->null_value) && null_item) + if (args[0]->type() == Item::FIELD_ITEM) + { + /* args[0]->null_value may be outdated */ + null_value= ((Item_field*)args[0])->field->is_null(); + } + else + null_value= args[0]->null_value; + if (null_value && null_item) res_type= m_var_entry->type; // Don't change type of item - if (::update_hash(m_var_entry, (null_value= args[0]->null_value), + if (::update_hash(m_var_entry, null_value, ptr, length, res_type, cs, unsigned_arg)) { null_value= 1; diff --git a/sql/item_func.h b/sql/item_func.h index 2ce199b3565..5c21535adbe 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -151,8 +151,8 @@ public: sync_with_sum_func_and_with_field(list); list.empty(); // Fields are used } - void split_sum_func(THD *thd, Item **ref_pointer_array, List &fields, - uint flags); + void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, uint flags); virtual void print(String *str, enum_query_type query_type); void print_op(String *str, enum_query_type query_type); void print_args(String *str, uint from, enum_query_type query_type); diff --git a/sql/item_row.cc b/sql/item_row.cc index b1575b81087..56d73f7b759 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -100,7 +100,7 @@ void Item_row::cleanup() } -void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array, +void Item_row::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, uint flags) { Item **arg, **arg_end; diff --git a/sql/item_row.h b/sql/item_row.h index 153a6f085b3..ddb6f0835f2 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -85,8 +85,8 @@ public: bool fix_fields(THD *thd, Item **ref); void fix_after_pullout(st_select_lex *new_parent, Item **ref); void cleanup(); - void split_sum_func(THD *thd, Item **ref_pointer_array, List &fields, - uint flags); + void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, uint flags); table_map used_tables() const { return used_tables_cache; }; bool const_item() const { return const_item_cache; }; enum Item_result result_type() const { return ROW_RESULT; } diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 8745baa8c69..94e7bc98618 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -739,7 +739,7 @@ int Item_in_subselect::optimize(double *out_rows, double *cost) } /* Now with grouping */ - if (join->group_list) + if (join->group_list_for_estimates) { DBUG_PRINT("info",("Materialized join has grouping, trying to estimate it")); double output_rows= get_post_group_estimate(join, *out_rows); @@ -1896,7 +1896,8 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join) (ALL && (> || =>)) || (ANY && (< || =<)) for ALL condition is inverted */ - item= new (thd->mem_root) Item_sum_max(thd, *select_lex->ref_pointer_array); + item= new (thd->mem_root) Item_sum_max(thd, + select_lex->ref_pointer_array[0]); } else { @@ -1904,11 +1905,12 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join) (ALL && (< || =<)) || (ANY && (> || =>)) for ALL condition is inverted */ - item= new (thd->mem_root) Item_sum_min(thd, *select_lex->ref_pointer_array); + item= new (thd->mem_root) Item_sum_min(thd, + select_lex->ref_pointer_array[0]); } if (upper_item) upper_item->set_sum_test(item); - thd->change_item_tree(select_lex->ref_pointer_array, item); + thd->change_item_tree(&select_lex->ref_pointer_array[0], item); { List_iterator it(select_lex->item_list); it++; @@ -2054,8 +2056,8 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, thd, &select_lex->context, this, - select_lex-> - ref_pointer_array, + &select_lex-> + ref_pointer_array[0], (char *)"", this->full_name())); if (!abort_on_null && left_expr->maybe_null) @@ -2130,7 +2132,7 @@ Item_in_subselect::create_single_in_to_exists_cond(JOIN *join, new (thd->mem_root) Item_ref_null_helper(thd, &select_lex->context, this, - select_lex->ref_pointer_array, + &select_lex->ref_pointer_array[0], (char *)"", (char *)"")); if (!abort_on_null && left_expr->maybe_null) @@ -2317,7 +2319,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, (char *)in_left_expr_name), new (thd->mem_root) Item_ref(thd, &select_lex->context, - select_lex->ref_pointer_array + i, + &select_lex->ref_pointer_array[i], (char *)"", (char *)"")); Item *item_isnull= @@ -2325,7 +2327,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, Item_func_isnull(thd, new (thd->mem_root) Item_ref(thd, &select_lex->context, - select_lex->ref_pointer_array+i, + &select_lex->ref_pointer_array[i], (char *)"", (char *)"")); Item *col_item= new (thd->mem_root) @@ -2343,8 +2345,8 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, Item_is_not_null_test(thd, this, new (thd->mem_root) Item_ref(thd, &select_lex->context, - select_lex-> - ref_pointer_array + i, + &select_lex-> + ref_pointer_array[i], (char *)"", (char *)"")); if (!abort_on_null && left_expr->element_index(i)->maybe_null) @@ -2382,8 +2384,8 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, (char *)in_left_expr_name), new (thd->mem_root) Item_direct_ref(thd, &select_lex->context, - select_lex-> - ref_pointer_array+i, + &select_lex-> + ref_pointer_array[i], (char *)"", (char *)"")); if (!abort_on_null && select_lex->ref_pointer_array[i]->maybe_null) @@ -2393,7 +2395,7 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, Item_is_not_null_test(thd, this, new (thd->mem_root) Item_ref(thd, &select_lex->context, - select_lex->ref_pointer_array + i, + &select_lex->ref_pointer_array[i], (char *)"", (char *)"")); @@ -2402,8 +2404,8 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join, Item_func_isnull(thd, new (thd->mem_root) Item_direct_ref(thd, &select_lex->context, - select_lex-> - ref_pointer_array+i, + &select_lex-> + ref_pointer_array[i], (char *)"", (char *)"")); item= new (thd->mem_root) Item_cond_or(thd, item, item_isnull); @@ -3533,8 +3535,7 @@ int subselect_single_select_engine::prepare() prepared= 1; SELECT_LEX *save_select= thd->lex->current_select; thd->lex->current_select= select_lex; - if (join->prepare(&select_lex->ref_pointer_array, - select_lex->table_list.first, + if (join->prepare(select_lex->table_list.first, select_lex->with_wild, select_lex->where, select_lex->order_list.elements + @@ -3683,14 +3684,6 @@ int subselect_single_select_engine::exec() */ select_lex->uncacheable|= UNCACHEABLE_EXPLAIN; select_lex->master_unit()->uncacheable|= UNCACHEABLE_EXPLAIN; - /* - Force join->join_tmp creation, because this subquery will be replaced - by a simple select from the materialization temp table by optimize() - called by EXPLAIN and we need to preserve the initial query structure - so we can display it. - */ - if (join->need_tmp && join->init_save_join_tab()) - DBUG_RETURN(1); /* purecov: inspected */ } } if (item->engine_changed(this)) @@ -5231,7 +5224,7 @@ double get_post_group_estimate(JOIN* join, double join_op_rows) table_map tables_in_group_list= table_map(0); /* Find out which tables are used in GROUP BY list */ - for (ORDER *order= join->group_list; order; order= order->next) + for (ORDER *order= join->group_list_for_estimates; order; order= order->next) { Item *item= order->item[0]; table_map item_used_tables= item->used_tables(); diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 0c85cf53e18..f774ee5a561 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1243,7 +1243,8 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table) Item_sum_sum::Item_sum_sum(THD *thd, Item_sum_sum *item) :Item_sum_num(thd, item), Type_handler_hybrid_field_type(item), - curr_dec_buff(item->curr_dec_buff) + curr_dec_buff(item->curr_dec_buff), + count(item->count) { /* TODO: check if the following assignments are really needed */ if (Item_sum_sum::result_type() == DECIMAL_RESULT) @@ -1265,6 +1266,7 @@ void Item_sum_sum::clear() { DBUG_ENTER("Item_sum_sum::clear"); null_value=1; + count= 0; if (Item_sum_sum::result_type() == DECIMAL_RESULT) { curr_dec_buff= 0; @@ -1318,25 +1320,57 @@ void Item_sum_sum::fix_length_and_dec() bool Item_sum_sum::add() { DBUG_ENTER("Item_sum_sum::add"); + add_helper(false); + DBUG_RETURN(0); +} + +void Item_sum_sum::add_helper(bool perform_removal) +{ + DBUG_ENTER("Item_sum_sum::add_helper"); + if (Item_sum_sum::result_type() == DECIMAL_RESULT) { my_decimal value; const my_decimal *val= aggr->arg_val_decimal(&value); if (!aggr->arg_is_null(true)) { - my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff^1), - val, dec_buffs + curr_dec_buff); + if (perform_removal) + { + DBUG_ASSERT(count > 0); + my_decimal_sub(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff ^ 1), + dec_buffs + curr_dec_buff, val); + count--; + } + else + { + count++; + my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff ^ 1), + val, dec_buffs + curr_dec_buff); + } curr_dec_buff^= 1; - null_value= 0; + null_value= (count > 0) ? 0 : 1; } } else { - sum+= aggr->arg_val_real(); + if (perform_removal) + sum-= aggr->arg_val_real(); + else + sum+= aggr->arg_val_real(); if (!aggr->arg_is_null(true)) - null_value= 0; + { + if (perform_removal) + { + DBUG_ASSERT(count > 0); + count--; + } + else + count++; + + null_value= (count > 0) ? 0 : 1; + } } - DBUG_RETURN(0); + DBUG_VOID_RETURN; } @@ -1386,6 +1420,13 @@ my_decimal *Item_sum_sum::val_decimal(my_decimal *val) return val_decimal_from_real(val); } +void Item_sum_sum::remove() +{ + DBUG_ENTER("Item_sum_sum::remove"); + add_helper(true); + DBUG_VOID_RETURN; +} + /** Aggregate a distinct row from the distinct hash table. @@ -1531,6 +1572,19 @@ bool Item_sum_count::add() return 0; } + +/* + Remove a row. This is used by window functions. +*/ + +void Item_sum_count::remove() +{ + DBUG_ASSERT(aggr->Aggrtype() == Aggregator::SIMPLE_AGGREGATOR); + if (aggr->arg_is_null(false)) + return; + count--; +} + longlong Item_sum_count::val_int() { DBUG_ASSERT(fixed == 1); @@ -1626,6 +1680,16 @@ bool Item_sum_avg::add() return FALSE; } +void Item_sum_avg::remove() +{ + Item_sum_sum::remove(); + if (!aggr->arg_is_null(true)) + { + DBUG_ASSERT(count > 0); + count--; + } +} + double Item_sum_avg::val_real() { DBUG_ASSERT(fixed == 1); @@ -2086,6 +2150,8 @@ longlong Item_sum_bit::val_int() void Item_sum_bit::clear() { bits= reset_bits; + if (as_window_function) + clear_as_window(); } Item *Item_sum_or::copy_or_same(THD* thd) @@ -2093,15 +2159,79 @@ Item *Item_sum_or::copy_or_same(THD* thd) return new (thd->mem_root) Item_sum_or(thd, this); } +bool Item_sum_bit::clear_as_window() +{ + memset(bit_counters, 0, sizeof(bit_counters)); + num_values_added= 0; + set_bits_from_counters(); + return 0; +} + +bool Item_sum_bit::remove_as_window(ulonglong value) +{ + DBUG_ASSERT(as_window_function); + for (int i= 0; i < NUM_BIT_COUNTERS; i++) + { + if (!bit_counters[i]) + { + // Don't attempt to remove values that were never added. + DBUG_ASSERT((value & (1 << i)) == 0); + continue; + } + bit_counters[i]-= (value & (1 << i)) ? 1 : 0; + } + DBUG_ASSERT(num_values_added > 0); + // Prevent overflow; + num_values_added = std::min(num_values_added, num_values_added - 1); + set_bits_from_counters(); + return 0; +} + +bool Item_sum_bit::add_as_window(ulonglong value) +{ + DBUG_ASSERT(as_window_function); + for (int i= 0; i < NUM_BIT_COUNTERS; i++) + { + bit_counters[i]+= (value & (1 << i)) ? 1 : 0; + } + // Prevent overflow; + num_values_added = std::max(num_values_added, num_values_added + 1); + set_bits_from_counters(); + return 0; +} + +void Item_sum_or::set_bits_from_counters() +{ + ulonglong value= 0; + for (int i= 0; i < NUM_BIT_COUNTERS; i++) + { + value|= bit_counters[i] > 0 ? (1 << i) : 0; + } + bits= value | reset_bits; +} bool Item_sum_or::add() { ulonglong value= (ulonglong) args[0]->val_int(); if (!args[0]->null_value) + { + if (as_window_function) + return add_as_window(value); bits|=value; + } return 0; } +void Item_sum_xor::set_bits_from_counters() +{ + ulonglong value= 0; + for (int i= 0; i < NUM_BIT_COUNTERS; i++) + { + value|= (bit_counters[i] % 2) ? (1 << i) : 0; + } + bits= value ^ reset_bits; +} + Item *Item_sum_xor::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_xor(thd, this); @@ -2112,10 +2242,31 @@ bool Item_sum_xor::add() { ulonglong value= (ulonglong) args[0]->val_int(); if (!args[0]->null_value) + { + if (as_window_function) + return add_as_window(value); bits^=value; + } return 0; } +void Item_sum_and::set_bits_from_counters() +{ + ulonglong value= 0; + if (!num_values_added) + { + bits= reset_bits; + return; + } + + for (int i= 0; i < NUM_BIT_COUNTERS; i++) + { + // We've only added values of 1 for this bit. + if (bit_counters[i] == num_values_added) + value|= (1 << i); + } + bits= value & reset_bits; +} Item *Item_sum_and::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_and(thd, this); @@ -2126,7 +2277,11 @@ bool Item_sum_and::add() { ulonglong value= (ulonglong) args[0]->val_int(); if (!args[0]->null_value) + { + if (as_window_function) + return add_as_window(value); bits&=value; + } return 0; } @@ -2314,6 +2469,10 @@ void Item_sum_bit::reset_field() void Item_sum_bit::update_field() { + // We never call update_field when computing the function as a window + // function. Setting bits to a random value invalidates the bits counters and + // the result of the bit function becomes erroneous. + DBUG_ASSERT(!as_window_function); uchar *res=result_field->ptr; bits= uint8korr(res); add(); @@ -2896,7 +3055,7 @@ int group_concat_key_cmp_with_order(void* arg, const void* key1, field->table->s->null_bytes); int res= field->cmp((uchar*)key1 + offset, (uchar*)key2 + offset); if (res) - return (*order_item)->asc ? res : -res; + return ((*order_item)->direction == ORDER::ORDER_ASC) ? res : -res; } /* We can't return 0 because in that case the tree class would remove this @@ -3372,8 +3531,8 @@ bool Item_func_group_concat::setup(THD *thd) if (!ref_pointer_array) DBUG_RETURN(TRUE); memcpy(ref_pointer_array, args, arg_count * sizeof(Item*)); - if (setup_order(thd, ref_pointer_array, context->table_list, list, - all_fields, *order)) + if (setup_order(thd, Ref_ptr_array(ref_pointer_array, n_elems), + context->table_list, list, all_fields, *order)) DBUG_RETURN(TRUE); } @@ -3507,9 +3666,9 @@ void Item_func_group_concat::print(String *str, enum_query_type query_type) if (i) str->append(','); orig_args[i + arg_count_field]->print(str, query_type); - if (order[i]->asc) + if (order[i]->direction == ORDER::ORDER_ASC) str->append(STRING_WITH_LEN(" ASC")); - else + else str->append(STRING_WITH_LEN(" DESC")); } } diff --git a/sql/item_sum.h b/sql/item_sum.h index 811e9d5c59c..e766e69a1c5 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -109,6 +109,7 @@ public: class st_select_lex; +class Window_spec; /** Class Item_sum is the base class used for special expressions that SQL calls @@ -347,7 +348,9 @@ public: enum Sumfunctype { COUNT_FUNC, COUNT_DISTINCT_FUNC, SUM_FUNC, SUM_DISTINCT_FUNC, AVG_FUNC, AVG_DISTINCT_FUNC, MIN_FUNC, MAX_FUNC, STD_FUNC, - VARIANCE_FUNC, SUM_BIT_FUNC, UDF_SUM_FUNC, GROUP_CONCAT_FUNC + VARIANCE_FUNC, SUM_BIT_FUNC, UDF_SUM_FUNC, GROUP_CONCAT_FUNC, + ROW_NUMBER_FUNC, RANK_FUNC, DENSE_RANK_FUNC, PERCENT_RANK_FUNC, + CUME_DIST_FUNC, NTILE_FUNC }; Item **ref_by; /* pointer to a ref to the object used to register it */ @@ -540,12 +543,16 @@ public: virtual void clear()= 0; virtual bool add()= 0; virtual bool setup(THD *thd) { return false; } + + virtual void remove() { DBUG_ASSERT(0); } virtual void cleanup(); bool check_vcol_func_processor(uchar *int_arg) { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + + virtual void setup_window_func(THD *thd, Window_spec *window_spec) {} }; @@ -710,6 +717,7 @@ public: class Item_sum_int :public Item_sum_num { public: + Item_sum_int(THD *thd): Item_sum_num(thd) {} Item_sum_int(THD *thd, Item *item_par): Item_sum_num(thd, item_par) {} Item_sum_int(THD *thd, List &list): Item_sum_num(thd, list) {} Item_sum_int(THD *thd, Item_sum_int *item) :Item_sum_num(thd, item) {} @@ -724,7 +732,7 @@ public: class Item_sum_sum :public Item_sum_num, - public Type_handler_hybrid_field_type + public Type_handler_hybrid_field_type { protected: double sum; @@ -763,6 +771,11 @@ public: return has_with_distinct() ? "sum(distinct " : "sum("; } Item *copy_or_same(THD* thd); + void remove(); + +private: + void add_helper(bool perform_removal); + ulonglong count; }; @@ -775,6 +788,7 @@ class Item_sum_count :public Item_sum_int void clear(); bool add(); void cleanup(); + void remove(); public: Item_sum_count(THD *thd, Item *item_par): @@ -821,6 +835,8 @@ class Item_sum_count :public Item_sum_int class Item_sum_avg :public Item_sum_sum { public: + // TODO-cvicentiu given that Item_sum_sum now uses a counter of its own, in + // order to implement remove(), it is possible to remove this member. ulonglong count; uint prec_increment; uint f_precision, f_scale, dec_bin_size; @@ -839,6 +855,7 @@ public: } void clear(); bool add(); + void remove(); double val_real(); // In SPs we might force the "wrong" type with select into a declare variable longlong val_int() { return val_int_from_real(); } @@ -1019,14 +1036,18 @@ public: class Item_sum_bit :public Item_sum_int { -protected: - ulonglong reset_bits,bits; - public: Item_sum_bit(THD *thd, Item *item_par, ulonglong reset_arg): - Item_sum_int(thd, item_par), reset_bits(reset_arg), bits(reset_arg) {} + Item_sum_int(thd, item_par), reset_bits(reset_arg), bits(reset_arg), + as_window_function(FALSE), num_values_added(0) {} Item_sum_bit(THD *thd, Item_sum_bit *item): - Item_sum_int(thd, item), reset_bits(item->reset_bits), bits(item->bits) {} + Item_sum_int(thd, item), reset_bits(item->reset_bits), bits(item->bits), + as_window_function(item->as_window_function), + num_values_added(item->num_values_added) + { + if (as_window_function) + memcpy(bit_counters, item->bit_counters, sizeof(bit_counters)); + } enum Sumfunctype sum_func () const {return SUM_BIT_FUNC;} void clear(); longlong val_int(); @@ -1037,8 +1058,42 @@ public: void cleanup() { bits= reset_bits; + if (as_window_function) + clear_as_window(); Item_sum_int::cleanup(); } + void setup_window_func(THD *thd __attribute__((unused)), + Window_spec *window_spec __attribute__((unused))) + { + as_window_function= TRUE; + clear_as_window(); + } + void remove() + { + if (as_window_function) + { + remove_as_window(args[0]->val_int()); + return; + } + // Unless we're counting bits, we can not remove anything. + DBUG_ASSERT(0); + } + +protected: + static const int NUM_BIT_COUNTERS= 64; + ulonglong reset_bits,bits; + /* + Marks whether the function is to be computed as a window function. + */ + bool as_window_function; + // When used as an aggregate window function, we need to store + // this additional information. + ulonglong num_values_added; + ulonglong bit_counters[NUM_BIT_COUNTERS]; + bool add_as_window(ulonglong value); + bool remove_as_window(ulonglong value); + bool clear_as_window(); + virtual void set_bits_from_counters()= 0; }; @@ -1050,28 +1105,37 @@ public: bool add(); const char *func_name() const { return "bit_or("; } Item *copy_or_same(THD* thd); + +private: + void set_bits_from_counters(); }; class Item_sum_and :public Item_sum_bit { - public: +public: Item_sum_and(THD *thd, Item *item_par): Item_sum_bit(thd, item_par, ULONGLONG_MAX) {} Item_sum_and(THD *thd, Item_sum_and *item) :Item_sum_bit(thd, item) {} bool add(); const char *func_name() const { return "bit_and("; } Item *copy_or_same(THD* thd); + +private: + void set_bits_from_counters(); }; class Item_sum_xor :public Item_sum_bit { - public: +public: Item_sum_xor(THD *thd, Item *item_par): Item_sum_bit(thd, item_par, 0) {} Item_sum_xor(THD *thd, Item_sum_xor *item) :Item_sum_bit(thd, item) {} bool add(); const char *func_name() const { return "bit_xor("; } Item *copy_or_same(THD* thd); + +private: + void set_bits_from_counters(); }; diff --git a/sql/item_windowfunc.cc b/sql/item_windowfunc.cc new file mode 100644 index 00000000000..d157d545dad --- /dev/null +++ b/sql/item_windowfunc.cc @@ -0,0 +1,242 @@ +#include "item_windowfunc.h" +#include "my_dbug.h" +#include "my_global.h" +#include "sql_select.h" // test if group changed + + +bool +Item_window_func::resolve_window_name(THD *thd) +{ + if (window_spec) + { + /* The window name has been already resolved */ + return false; + } + DBUG_ASSERT(window_name != NULL && window_spec == NULL); + char *ref_name= window_name->str; + + /* !TODO: Add the code to resolve ref_name in outer queries */ + /* + First look for the deinition of the window with 'window_name' + in the current select + */ + List curr_window_specs= + List (thd->lex->current_select->window_specs); + List_iterator_fast it(curr_window_specs); + Window_spec *win_spec; + while((win_spec= it++)) + { + char *win_spec_name= win_spec->name(); + if (win_spec_name && + my_strcasecmp(system_charset_info, ref_name, win_spec_name) == 0) + { + window_spec= win_spec; + break; + } + } + + if (!window_spec) + { + my_error(ER_WRONG_WINDOW_SPEC_NAME, MYF(0), ref_name); + return true; + } + + return false; +} + + +void +Item_window_func::update_used_tables() +{ + used_tables_cache= 0; + window_func()->update_used_tables(); + used_tables_cache|= window_func()->used_tables(); + for (ORDER *ord= window_spec->partition_list->first; ord; ord=ord->next) + { + Item *item= *ord->item; + item->update_used_tables(); + used_tables_cache|= item->used_tables(); + } + for (ORDER *ord= window_spec->order_list->first; ord; ord=ord->next) + { + Item *item= *ord->item; + item->update_used_tables(); + used_tables_cache|= item->used_tables(); + } +} + + +bool +Item_window_func::fix_fields(THD *thd, Item **ref) +{ + DBUG_ASSERT(fixed == 0); + + enum_parsing_place place= thd->lex->current_select->parsing_place; + + if (!(place == SELECT_LIST || place == IN_ORDER_BY)) + { + my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0)); + return true; + } + + if (window_name && resolve_window_name(thd)) + return true; + + if (window_spec->window_frame && is_frame_prohibited()) + { + my_error(ER_NOT_ALLOWED_WINDOW_FRAME, MYF(0), window_func()->func_name()); + return true; + } + + if (window_spec->order_list->elements == 0 && is_order_list_mandatory()) + { + my_error(ER_NO_ORDER_LIST_IN_WINDOW_SPEC, MYF(0), window_func()->func_name()); + return true; + } + /* + TODO: why the last parameter is 'ref' in this call? What if window_func + decides to substitute itself for something else and does *ref=.... ? + This will substitute *this (an Item_window_func object) with Item_sum + object. Is this the intent? + */ + if (window_func()->fix_fields(thd, ref)) + return true; + + const_item_cache= false; + with_window_func= true; + with_sum_func= false; + + fix_length_and_dec(); + + max_length= window_func()->max_length; + maybe_null= window_func()->maybe_null; + + fixed= 1; + set_phase_to_initial(); + return false; +} + + +/* + @detail + Window function evaluates its arguments when it is scanning the temporary + table in partition/order-by order. That is, arguments should be read from + the temporary table, not from the original base columns. + + In order for this to work, we need to call "split_sum_func" for each + argument. The effect of the call is: + 1. the argument is added into ref_pointer_array. This will cause the + argument to be saved in the temp.table + 2. argument item is replaced with an Item_ref object. this object refers + the argument through the ref_pointer_array. + + then, change_to_use_tmp_fields() will replace ref_pointer_array with an + array that points to the temp.table fields. + This way, when window_func attempts to evaluate its arguments, it will use + Item_ref objects which will read data from the temp.table. + + Note: Before window functions, aggregate functions never needed to do such + transformations on their arguments. This is because grouping operation + does not need to read from the temp.table. + (Q: what happens when we first sort and then do grouping in a + group-after-group mode? dont group by items read from temp.table, then?) +*/ + +void Item_window_func::split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, uint flags) +{ + for (uint i=0; i < window_func()->argument_count(); i++) + { + Item **p_item= &window_func()->arguments()[i]; + (*p_item)->split_sum_func2(thd, ref_pointer_array, fields, p_item, flags); + } +} + + +/* + This must be called before advance_window() can be called. + + @detail + If we attempt to do it in fix_fields(), partition_fields will refer + to the original window function arguments. + We need it to refer to temp.table columns. +*/ + +void Item_window_func::setup_partition_border_check(THD *thd) +{ + partition_tracker.init(thd, window_spec->partition_list); + window_func()->setup_window_func(thd, window_spec); +} + + +void Item_sum_rank::setup_window_func(THD *thd, Window_spec *window_spec) +{ + /* TODO: move this into Item_window_func? */ + peer_tracker.init(thd, window_spec->order_list); + clear(); +} + +void Item_sum_dense_rank::setup_window_func(THD *thd, Window_spec *window_spec) +{ + /* TODO: consider moving this && Item_sum_rank's implementation */ + peer_tracker.init(thd, window_spec->order_list); + clear(); +} + +bool Item_sum_dense_rank::add() +{ + if (peer_tracker.check_if_next_group() || first_add) + { + first_add= false; + dense_rank++; + } + + return false; +} + + +bool Item_sum_rank::add() +{ + row_number++; + if (peer_tracker.check_if_next_group()) + { + /* Row value changed */ + cur_rank= row_number; + } + return false; +} + +bool Item_window_func::check_if_partition_changed() +{ + return partition_tracker.check_if_next_group(); +} + +void Item_window_func::advance_window() +{ + if (check_if_partition_changed()) + { + /* Next partition */ + window_func()->clear(); + } + window_func()->add(); +} + +bool Item_sum_percent_rank::add() +{ + row_number++; + if (peer_tracker.check_if_next_group()) + { + /* Row value changed. */ + cur_rank= row_number; + } + return false; +} + +void Item_sum_percent_rank::setup_window_func(THD *thd, Window_spec *window_spec) +{ + /* TODO: move this into Item_window_func? */ + peer_tracker.init(thd, window_spec->order_list); + clear(); +} + + diff --git a/sql/item_windowfunc.h b/sql/item_windowfunc.h new file mode 100644 index 00000000000..40f48cc7dc5 --- /dev/null +++ b/sql/item_windowfunc.h @@ -0,0 +1,757 @@ +#ifndef ITEM_WINDOWFUNC_INCLUDED +#define ITEM_WINDOWFUNC_INCLUDED + +#include "my_global.h" +#include "item.h" + +class Window_spec; + + +int test_if_group_changed(List &list); + +/* A wrapper around test_if_group_changed */ +class Group_bound_tracker +{ + List group_fields; + /* + During the first check_if_next_group, the list of cached_items is not + initialized. The compare function will return that the items match if + the field's value is the same as the Cached_item's default value (0). + This flag makes sure that we always return true during the first check. + + XXX This is better to be implemented within test_if_group_changed, but + since it is used in other parts of the codebase, we keep it here for now. + */ + bool first_check; +public: + void init(THD *thd, SQL_I_List *list) + { + for (ORDER *curr = list->first; curr; curr=curr->next) + { + Cached_item *tmp= new_Cached_item(thd, curr->item[0], TRUE); + group_fields.push_back(tmp); + } + first_check= true; + } + + void cleanup() + { + group_fields.empty(); + } + + /* + Check if the current row is in a different group than the previous row + this function was called for. + XXX: Side-effect: The new row's group becomes the current row's group. + + Returns true if there is a change between the current_group and the cached + value, or if it is the first check after a call to init. + */ + bool check_if_next_group() + { + if (test_if_group_changed(group_fields) > -1 || first_check) + { + first_check= false; + return true; + } + return false; + } + + /* + Check if the current row is in a different group than the previous row + check_if_next_group was called for. + + Compares the groups without the additional side effect of updating the + current cached values. + */ + int compare_with_cache() + { + List_iterator li(group_fields); + Cached_item *ptr; + int res; + while ((ptr= li++)) + { + if ((res= ptr->cmp_read_only())) + return res; + } + return 0; + } +}; + +/* + ROW_NUMBER() OVER (...) + + @detail + - This is a Window function (not just an aggregate) + - It can be computed by doing one pass over select output, provided + the output is sorted according to the window definition. +*/ + +class Item_sum_row_number: public Item_sum_int +{ + longlong count; + +public: + void clear() + { + count= 0; + } + bool add() + { + count++; + return false; + } + void update_field() {} + + Item_sum_row_number(THD *thd) + : Item_sum_int(thd), count(0) {} + + enum Sumfunctype sum_func() const + { + return ROW_NUMBER_FUNC; + } + + longlong val_int() + { + return count; + } + const char*func_name() const + { + return "row_number"; + } + +}; + + +/* + RANK() OVER (...) Windowing function + + @detail + - This is a Window function (not just an aggregate) + - It can be computed by doing one pass over select output, provided + the output is sorted according to the window definition. + + The function is defined as: + + "The rank of row R is defined as 1 (one) plus the number of rows that + precede R and are not peers of R" + + "This implies that if two or more rows are not distinct with respect to + the window ordering, then there will be one or more" +*/ + +class Item_sum_rank: public Item_sum_int +{ +protected: + longlong row_number; // just ROW_NUMBER() + longlong cur_rank; // current value + + Group_bound_tracker peer_tracker; +public: + void clear() + { + /* This is called on partition start */ + cur_rank= 1; + row_number= 0; + } + + bool add(); + + longlong val_int() + { + return cur_rank; + } + + void update_field() {} + /* + void reset_field(); + TODO: ^^ what does this do ? It is not called ever? + */ + +public: + Item_sum_rank(THD *thd) + : Item_sum_int(thd) {} + + enum Sumfunctype sum_func () const + { + return RANK_FUNC; + } + + const char*func_name() const + { + return "rank"; + } + + void setup_window_func(THD *thd, Window_spec *window_spec); + void cleanup() + { + peer_tracker.cleanup(); + Item_sum_int::cleanup(); + } +}; + + +/* + DENSE_RANK() OVER (...) Windowing function + + @detail + - This is a Window function (not just an aggregate) + - It can be computed by doing one pass over select output, provided + the output is sorted according to the window definition. + + The function is defined as: + + "If DENSE_RANK is specified, then the rank of row R is defined as the + number of rows preceding and including R that are distinct with respect + to the window ordering" + + "This implies that there are no gaps in the sequential rank numbering of + rows in each window partition." +*/ + + +class Item_sum_dense_rank: public Item_sum_int +{ + longlong dense_rank; + bool first_add; + Group_bound_tracker peer_tracker; + public: + /* + XXX(cvicentiu) This class could potentially be implemented in the rank + class, with a switch for the DENSE case. + */ + void clear() + { + dense_rank= 0; + first_add= true; + } + bool add(); + void update_field() {} + longlong val_int() + { + return dense_rank; + } + + Item_sum_dense_rank(THD *thd) + : Item_sum_int(thd), dense_rank(0), first_add(true) {} + enum Sumfunctype sum_func () const + { + return DENSE_RANK_FUNC; + } + + const char*func_name() const + { + return "dense_rank"; + } + + void setup_window_func(THD *thd, Window_spec *window_spec); + + void cleanup() + { + peer_tracker.cleanup(); + Item_sum_int::cleanup(); + } +}; + +/* + A base window function (aggregate) that also holds a counter for the number + of rows. +*/ +class Item_sum_window_with_row_count : public Item_sum_num +{ + public: + Item_sum_window_with_row_count(THD *thd) : Item_sum_num(thd), + partition_row_count_(0) {} + + Item_sum_window_with_row_count(THD *thd, Item *arg) : + Item_sum_num(thd, arg), partition_row_count_(0) {}; + + void set_row_count(ulonglong count) { partition_row_count_ = count; } + + protected: + longlong get_row_count() { return partition_row_count_; } + private: + ulonglong partition_row_count_; +}; + +/* + @detail + "The relative rank of a row R is defined as (RK-1)/(NR-1), where RK is + defined to be the RANK of R and NR is defined to be the number of rows in + the window partition of R." + + Computation of this function requires two passes: + - First pass to find #rows in the partition + This is held within the row_count context. + - Second pass to compute rank of current row and the value of the function +*/ +class Item_sum_percent_rank: public Item_sum_window_with_row_count +{ + public: + Item_sum_percent_rank(THD *thd) + : Item_sum_window_with_row_count(thd), cur_rank(1) {} + + longlong val_int() + { + /* + Percent rank is a real value so calling the integer value should never + happen. It makes no sense as it gets truncated to either 0 or 1. + */ + DBUG_ASSERT(0); + return 0; + } + + double val_real() + { + /* + We can not get the real value without knowing the number of rows + in the partition. Don't divide by 0. + */ + ulonglong partition_rows = get_row_count(); + null_value= partition_rows > 0 ? false : true; + + return partition_rows > 1 ? + static_cast(cur_rank - 1) / (partition_rows - 1) : 0; + } + + enum Sumfunctype sum_func () const + { + return PERCENT_RANK_FUNC; + } + + const char*func_name() const + { + return "percent_rank"; + } + + void update_field() {} + + void clear() + { + cur_rank= 1; + row_number= 0; + } + bool add(); + enum Item_result result_type () const { return REAL_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } + + void fix_length_and_dec() + { + decimals = 10; // TODO-cvicentiu find out how many decimals the standard + // requires. + } + + void setup_window_func(THD *thd, Window_spec *window_spec); + + private: + longlong cur_rank; // Current rank of the current row. + longlong row_number; // Value if this were ROW_NUMBER() function. + + Group_bound_tracker peer_tracker; + + void cleanup() + { + peer_tracker.cleanup(); + Item_sum_num::cleanup(); + } +}; + + + + +/* + @detail + "The relative rank of a row R is defined as NP/NR, where + - NP is defined to be the number of rows preceding or peer with R in the + window ordering of the window partition of R + - NR is defined to be the number of rows in the window partition of R. + + Just like with Item_sum_percent_rank, computation of this function requires + two passes. +*/ + +class Item_sum_cume_dist: public Item_sum_window_with_row_count +{ + public: + Item_sum_cume_dist(THD *thd) : Item_sum_window_with_row_count(thd), + current_row_count_(0) {} + + double val_real() + { + if (get_row_count() == 0) + { + null_value= true; + return 0; + } + ulonglong partition_row_count= get_row_count(); + null_value= false; + return static_cast(current_row_count_) / partition_row_count; + } + + bool add() + { + current_row_count_++; + return false; + } + + enum Sumfunctype sum_func() const + { + return CUME_DIST_FUNC; + } + + void clear() + { + current_row_count_= 0; + set_row_count(0); + } + + const char*func_name() const + { + return "cume_dist"; + } + + void update_field() {} + enum Item_result result_type () const { return REAL_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } + + void fix_length_and_dec() + { + decimals = 10; // TODO-cvicentiu find out how many decimals the standard + // requires. + } + + private: + ulonglong current_row_count_; +}; + +class Item_sum_ntile : public Item_sum_window_with_row_count +{ + public: + Item_sum_ntile(THD* thd, Item* num_quantiles_expr) : + Item_sum_window_with_row_count(thd, num_quantiles_expr), + current_row_count_(0) {}; + + double val_real() + { + return val_int(); + } + + longlong val_int() + { + if (get_row_count() == 0) + { + null_value= true; + return 0; + } + + longlong num_quantiles= get_num_quantiles(); + + if (num_quantiles <= 0) { + my_error(ER_INVALID_NTILE_ARGUMENT, MYF(0)); + return true; + } + + null_value= false; + ulonglong quantile_size = get_row_count() / num_quantiles; + ulonglong extra_rows = get_row_count() - quantile_size * num_quantiles; + + if (current_row_count_ <= extra_rows * (quantile_size + 1)) + return (current_row_count_ - 1) / (quantile_size + 1) + 1; + + return (current_row_count_ - 1 - extra_rows) / quantile_size + 1; + } + + bool add() + { + current_row_count_++; + return false; + } + + enum Sumfunctype sum_func() const + { + return NTILE_FUNC; + } + + void clear() + { + current_row_count_= 0; + set_row_count(0); + } + + const char*func_name() const + { + return "ntile"; + } + + void update_field() {} + + enum Item_result result_type () const { return INT_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } + + private: + longlong get_num_quantiles() { return args[0]->val_int(); } + ulong current_row_count_; +}; + + +class Item_window_func : public Item_func_or_sum +{ + /* Window function parameters as we've got them from the parser */ +public: + LEX_STRING *window_name; +public: + Window_spec *window_spec; + + /* + This stores the data about the partition we're currently in. + advance_window() uses this to tell when we've left one partition and + entered another + */ + Group_bound_tracker partition_tracker; +public: + Item_window_func(THD *thd, Item_sum *win_func, LEX_STRING *win_name) + : Item_func_or_sum(thd, (Item *) win_func), + window_name(win_name), window_spec(NULL), + force_return_blank(true), + read_value_from_result_field(false) {} + + Item_window_func(THD *thd, Item_sum *win_func, Window_spec *win_spec) + : Item_func_or_sum(thd, (Item *) win_func), + window_name(NULL), window_spec(win_spec), + force_return_blank(true), + read_value_from_result_field(false) {} + + Item_sum *window_func() const { return (Item_sum *) args[0]; } + + void update_used_tables(); + + /* + This is used by filesort to mark the columns it needs to read (because they + participate in the sort criteria and/or row retrieval. Window functions can + only be used in sort criteria). + + Sorting by window function value is only done after the window functions + have been computed. In that case, window function will need to read its + temp.table field. In order to allow that, mark that field in the read_set. + */ + bool register_field_in_read_map(uchar *arg) + { + TABLE *table= (TABLE*) arg; + if (result_field && (result_field->table == table || !table)) + { + bitmap_set_bit(result_field->table->read_set, result_field->field_index); + } + return 0; + } + + bool is_frame_prohibited() const + { + switch (window_func()->sum_func()) { + case Item_sum::ROW_NUMBER_FUNC: + case Item_sum::RANK_FUNC: + case Item_sum::DENSE_RANK_FUNC: + case Item_sum::PERCENT_RANK_FUNC: + case Item_sum::CUME_DIST_FUNC: + case Item_sum::NTILE_FUNC: + return true; + default: + return false; + } + } + + bool requires_partition_size() const + { + switch (window_func()->sum_func()) { + case Item_sum::PERCENT_RANK_FUNC: + case Item_sum::CUME_DIST_FUNC: + case Item_sum::NTILE_FUNC: + return true; + default: + return false; + } + } + + bool requires_peer_size() const + { + switch (window_func()->sum_func()) { + case Item_sum::CUME_DIST_FUNC: + return true; + default: + return false; + } + } + + bool is_order_list_mandatory() const + { + switch (window_func()->sum_func()) { + case Item_sum::RANK_FUNC: + case Item_sum::DENSE_RANK_FUNC: + case Item_sum::PERCENT_RANK_FUNC: + case Item_sum::CUME_DIST_FUNC: + return true; + default: + return false; + } + } + + /* + Computation functions. + TODO: consoder merging these with class Group_bound_tracker. + */ + void setup_partition_border_check(THD *thd); + + void advance_window(); + bool check_if_partition_changed(); + + enum_field_types field_type() const + { + return ((Item_sum *) args[0])->field_type(); + } + enum Item::Type type() const { return Item::WINDOW_FUNC_ITEM; } + +private: + /* + Window functions are very special functions, so val_() methods have + special meaning for them: + + - Phase#1, "Initial" we run the join and put its result into temporary + table. For window functions, we write the default value (NULL?) as + a placeholder. + + - Phase#2: "Computation": executor does the scan in {PARTITION, ORDER BY} + order of this window function. It calls appropriate methods to inform + the window function about rows entering/leaving the window. + It calls window_func()->val_int() so that current window function value + can be saved and stored in the temp.table. + + - Phase#3: "Retrieval" the temporary table is read and passed to query + output. However, Item_window_func still remains in the select list, + so item_windowfunc->val_int() will be called. + During Phase#3, read_value_from_result_field= true. + */ + bool force_return_blank; + bool read_value_from_result_field; + +public: + void set_phase_to_initial() + { + force_return_blank= true; + read_value_from_result_field= false; + } + void set_phase_to_computation() + { + force_return_blank= false; + read_value_from_result_field= false; + } + void set_phase_to_retrieval() + { + force_return_blank= false; + read_value_from_result_field= true; + } + + double val_real() + { + double res; + if (force_return_blank) + { + res= 0.0; + null_value= false; + } + else if (read_value_from_result_field) + { + res= result_field->val_real(); + null_value= result_field->is_null(); + } + else + { + res= window_func()->val_real(); + null_value= window_func()->null_value; + } + return res; + } + + longlong val_int() + { + longlong res; + if (force_return_blank) + { + res= 0; + null_value= false; + } + else if (read_value_from_result_field) + { + res= result_field->val_int(); + null_value= result_field->is_null(); + } + else + { + res= window_func()->val_int(); + null_value= window_func()->null_value; + } + return res; + } + + String* val_str(String* str) + { + String *res; + if (force_return_blank) + { + null_value= false; + str->length(0); + res= str; + } + else if (read_value_from_result_field) + { + if ((null_value= result_field->is_null())) + res= NULL; + else + res= result_field->val_str(str); + } + else + { + res= window_func()->val_str(str); + null_value= window_func()->null_value; + } + return res; + } + + my_decimal* val_decimal(my_decimal* dec) + { + my_decimal *res; + if (force_return_blank) + { + my_decimal_set_zero(dec); + null_value= false; + res= dec; + } + else if (read_value_from_result_field) + { + if ((null_value= result_field->is_null())) + res= NULL; + else + res= result_field->val_decimal(dec); + } + else + { + res= window_func()->val_decimal(dec); + null_value= window_func()->null_value; + } + return res; + } + + void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, uint flags); + void fix_length_and_dec() + { + decimals = window_func()->decimals; + } + + const char* func_name() const { return "WF"; } + + bool fix_fields(THD *thd, Item **ref); + + bool resolve_window_name(THD *thd); + +}; + +#endif /* ITEM_WINDOWFUNC_INCLUDED */ diff --git a/sql/lex.h b/sql/lex.h index da5fa2de137..f7a183e1862 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -221,6 +221,7 @@ static SYMBOL symbols[] = { { "EVERY", SYM(EVERY_SYM)}, { "EXAMINED", SYM(EXAMINED_SYM)}, { "EXCHANGE", SYM(EXCHANGE_SYM)}, + { "EXCLUDE", SYM(EXCLUDE_SYM)}, { "EXECUTE", SYM(EXECUTE_SYM)}, { "EXISTS", SYM(EXISTS)}, { "EXIT", SYM(EXIT_SYM)}, @@ -241,6 +242,7 @@ static SYMBOL symbols[] = { { "FLOAT4", SYM(FLOAT_SYM)}, { "FLOAT8", SYM(DOUBLE_SYM)}, { "FLUSH", SYM(FLUSH_SYM)}, + { "FOLLOWING", SYM(FOLLOWING_SYM)}, { "FOR", SYM(FOR_SYM)}, { "FORCE", SYM(FORCE_SYM)}, { "FOREIGN", SYM(FOREIGN)}, @@ -425,9 +427,11 @@ static SYMBOL symbols[] = { { "OPTIONALLY", SYM(OPTIONALLY)}, { "OR", SYM(OR_SYM)}, { "ORDER", SYM(ORDER_SYM)}, + { "OTHERS", SYM(OTHERS_SYM)}, { "OUT", SYM(OUT_SYM)}, { "OUTER", SYM(OUTER)}, { "OUTFILE", SYM(OUTFILE)}, + { "OVER", SYM(OVER_SYM)}, { "OWNER", SYM(OWNER_SYM)}, { "PACK_KEYS", SYM(PACK_KEYS_SYM)}, { "PAGE", SYM(PAGE_SYM)}, @@ -446,6 +450,7 @@ static SYMBOL symbols[] = { { "POINT", SYM(POINT_SYM)}, { "POLYGON", SYM(POLYGON)}, { "PORT", SYM(PORT_SYM)}, + { "PRECEDING", SYM(PRECEDING_SYM)}, { "PRECISION", SYM(PRECISION)}, { "PREPARE", SYM(PREPARE_SYM)}, { "PRESERVE", SYM(PRESERVE_SYM)}, @@ -602,6 +607,7 @@ static SYMBOL symbols[] = { { "TEXT", SYM(TEXT_SYM)}, { "THAN", SYM(THAN_SYM)}, { "THEN", SYM(THEN_SYM)}, + { "TIES", SYM(TIES_SYM)}, { "TIME", SYM(TIME_SYM)}, { "TIMESTAMP", SYM(TIMESTAMP)}, { "TIMESTAMPADD", SYM(TIMESTAMP_ADD)}, @@ -619,6 +625,7 @@ static SYMBOL symbols[] = { { "TRUNCATE", SYM(TRUNCATE_SYM)}, { "TYPE", SYM(TYPE_SYM)}, { "TYPES", SYM(TYPES_SYM)}, + { "UNBOUNDED", SYM(UNBOUNDED_SYM)}, { "UNCOMMITTED", SYM(UNCOMMITTED_SYM)}, { "UNDEFINED", SYM(UNDEFINED_SYM)}, { "UNDO_BUFFER_SIZE", SYM(UNDO_BUFFER_SIZE_SYM)}, @@ -660,6 +667,7 @@ static SYMBOL symbols[] = { { "WHEN", SYM(WHEN_SYM)}, { "WHERE", SYM(WHERE)}, { "WHILE", SYM(WHILE_SYM)}, + { "WINDOW", SYM(WINDOW_SYM)}, { "WITH", SYM(WITH)}, { "WORK", SYM(WORK_SYM)}, { "WRAPPER", SYM(WRAPPER_SYM)}, @@ -682,17 +690,23 @@ static SYMBOL sql_functions[] = { { "BIT_XOR", SYM(BIT_XOR)}, { "CAST", SYM(CAST_SYM)}, { "COUNT", SYM(COUNT_SYM)}, + { "CUME_DIST", SYM(CUME_DIST_SYM)}, { "CURDATE", SYM(CURDATE)}, { "CURTIME", SYM(CURTIME)}, { "DATE_ADD", SYM(DATE_ADD_INTERVAL)}, { "DATE_SUB", SYM(DATE_SUB_INTERVAL)}, + { "DENSE_RANK", SYM(DENSE_RANK_SYM)}, { "EXTRACT", SYM(EXTRACT_SYM)}, { "GROUP_CONCAT", SYM(GROUP_CONCAT_SYM)}, { "MAX", SYM(MAX_SYM)}, { "MID", SYM(SUBSTRING)}, /* unireg function */ { "MIN", SYM(MIN_SYM)}, { "NOW", SYM(NOW_SYM)}, + { "NTILE", SYM(NTILE_SYM)}, { "POSITION", SYM(POSITION_SYM)}, + { "PERCENT_RANK", SYM(PERCENT_RANK_SYM)}, + { "RANK", SYM(RANK_SYM)}, + { "ROW_NUMBER", SYM(ROW_NUMBER_SYM)}, { "SESSION_USER", SYM(USER)}, { "STD", SYM(STD_SYM)}, { "STDDEV", SYM(STD_SYM)}, diff --git a/sql/log.cc b/sql/log.cc index dc8c08bfd36..f04b1432229 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -139,6 +139,12 @@ static bool start_binlog_background_thread(); static rpl_binlog_state rpl_global_gtid_binlog_state; +void setup_log_handling() +{ + rpl_global_gtid_binlog_state.init(); +} + + /** purge logs, master and slave sides both, related error code convertor. diff --git a/sql/log.h b/sql/log.h index 9bf80d6e603..e556ef91399 100644 --- a/sql/log.h +++ b/sql/log.h @@ -26,6 +26,7 @@ class Relay_log_info; class Format_description_log_event; +void setup_log_handling(); bool trans_has_updated_trans_table(const THD* thd); bool stmt_has_updated_trans_table(const THD *thd); bool use_trans_cache(const THD* thd, bool is_transactional); diff --git a/sql/log_event.cc b/sql/log_event.cc index b56a9e2aee3..3715f0cc4d4 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -4523,7 +4523,7 @@ compare_errors: "Error on master: message (format)='%s' error code=%d ; " "Error on slave: actual message='%s', error code=%d. " "Default database: '%s'. Query: '%s'", - ER_SAFE_THD(thd, expected_error), + ER_THD(thd, expected_error), expected_error, actual_error ? thd->get_stmt_da()->message() : "no error", actual_error, diff --git a/sql/mysqld.cc b/sql/mysqld.cc index e7d7f90d44e..845d114bc7a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -367,7 +367,7 @@ static my_bool opt_short_log_format= 0, opt_silent_startup= 0; uint kill_cached_threads; static uint wake_thread; ulong max_used_connections; -static volatile ulong cached_thread_count= 0; +volatile ulong cached_thread_count= 0; static char *mysqld_user, *mysqld_chroot; static char *default_character_set_name; static char *character_set_filesystem_name; @@ -1987,8 +1987,6 @@ static void __cdecl kill_server(int sig_ptr) if (wsrep_inited == 1) wsrep_deinit(true); - wsrep_thr_deinit(); - if (sig != MYSQL_KILL_SIGNAL && sig != 0) unireg_abort(1); /* purecov: inspected */ @@ -2144,6 +2142,9 @@ static void mysqld_exit(int exit_code) set_malloc_size_cb(NULL); cleanup_tls(); DBUG_LEAVE; + if (opt_endinfo && global_status_var.global_memory_used) + fprintf(stderr, "Warning: Memory not freed: %ld\n", + (long) global_status_var.global_memory_used); sd_notify(0, "STATUS=MariaDB server is down"); exit(exit_code); /* purecov: inspected */ } @@ -2213,11 +2214,12 @@ void clean_up(bool print_message) free_global_client_stats(); free_global_table_stats(); free_global_index_stats(); - delete_dynamic(&all_options); + delete_dynamic(&all_options); // This should be empty free_all_rpl_filters(); #ifdef HAVE_REPLICATION end_slave_list(); #endif + wsrep_thr_deinit(); my_uuid_end(); delete binlog_filter; delete global_rpl_filter; @@ -2234,13 +2236,12 @@ void clean_up(bool print_message) if (print_message && my_default_lc_messages && server_start_time) sql_print_information(ER_DEFAULT(ER_SHUTDOWN_COMPLETE),my_progname); - cleanup_errmsgs(); MYSQL_CALLBACK(thread_scheduler, end, ()); thread_scheduler= 0; mysql_library_end(); finish_client_errs(); - (void) my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST); // finish server errs - DBUG_PRINT("quit", ("Error messages freed")); + cleanup_errmsgs(); + free_error_messages(); /* Tell main we are ready */ logger.cleanup_end(); sys_var_end(); @@ -4088,40 +4089,45 @@ extern "C" { static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) { THD *thd= current_thd; - /* If thread specific memory */ - if (likely(is_thread_specific)) + + if (likely(is_thread_specific)) /* If thread specific memory */ { - if (mysqld_server_initialized || thd) - { - /* - THD may not be set if we are called from my_net_init() before THD - thread has started. - However, this should never happen, so better to assert and - fix this. - */ - DBUG_ASSERT(thd); - if (thd) - { - DBUG_PRINT("info", ("memory_used: %lld size: %lld", - (longlong) thd->status_var.local_memory_used, - size)); - thd->status_var.local_memory_used+= size; - DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 || - !debug_assert_on_not_freed_memory); - } - } + /* + When thread specfic is set, both mysqld_server_initialized and thd + must be set + */ + DBUG_ASSERT(mysqld_server_initialized && thd); + + DBUG_PRINT("info", ("thd memory_used: %lld size: %lld", + (longlong) thd->status_var.local_memory_used, + size)); + thd->status_var.local_memory_used+= size; + DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 || + !debug_assert_on_not_freed_memory); } else if (likely(thd)) + { + DBUG_PRINT("info", ("global thd memory_used: %lld size: %lld", + (longlong) thd->status_var.global_memory_used, + size)); thd->status_var.global_memory_used+= size; + } else { - // workaround for gcc 4.2.4-1ubuntu4 -fPIE (from DEB_BUILD_HARDENING=1) - int64 volatile * volatile ptr=&global_status_var.global_memory_used; - my_atomic_add64_explicit(ptr, size, MY_MEMORY_ORDER_RELAXED); + update_global_memory_status(size); +#ifndef EMBEDDED_LIBRARY + /* + Check if we have missed some mallocs. THis can't be done for embedded + server as the main code may have done calls to malloc before starting + the embedded library. + */ + DBUG_ASSERT(global_status_var.global_memory_used >= 0); +#endif } } } + /** Create a replication file name or base for file names. @@ -4158,6 +4164,22 @@ rpl_make_log_name(const char *opt, DBUG_RETURN(NULL); } +/* We have to setup my_malloc_size_cb_func early to catch all mallocs */ + +static int init_early_variables() +{ + if (pthread_key_create(&THR_THD, NULL)) + { + fprintf(stderr, "Fatal error: Can't create thread-keys\n"); + return 1; + } + set_current_thd(0); + set_malloc_size_cb(my_malloc_size_cb_func); + global_status_var.global_memory_used= 0; + return 0; +} + + static int init_common_variables() { umask(((~my_umask) & 0666)); @@ -4169,15 +4191,6 @@ static int init_common_variables() connection_errors_peer_addr= 0; my_decimal_set_zero(&decimal_zero); // set decimal_zero constant; - if (pthread_key_create(&THR_THD, NULL)) - { - sql_print_error("Can't create thread-keys"); - return 1; - } - - set_current_thd(0); - set_malloc_size_cb(my_malloc_size_cb_func); - init_libstrings(); tzset(); // Set tzname @@ -4714,6 +4727,7 @@ static int init_thread_environment() mysql_mutex_init(key_LOCK_global_system_variables, &LOCK_global_system_variables, MY_MUTEX_INIT_FAST); mysql_mutex_record_order(&LOCK_active_mi, &LOCK_global_system_variables); + mysql_mutex_record_order(&LOCK_status, &LOCK_thread_count); mysql_rwlock_init(key_rwlock_LOCK_system_variables_hash, &LOCK_system_variables_hash); mysql_mutex_init(key_LOCK_prepared_stmt_count, @@ -5020,6 +5034,8 @@ static int init_server_components() /* Setup logs */ + setup_log_handling(); + /* Enable old-fashioned error log, except when the user has requested help information. Since the implementation of plugin server @@ -5187,7 +5203,12 @@ static int init_server_components() variables even when a wsrep provider is not loaded. */ + /* It's now safe to use thread specific memory */ + mysqld_server_initialized= 1; + +#ifndef EMBEDDED_LIBRARY wsrep_thr_init(); +#endif if (WSREP_ON && !wsrep_recovery && !opt_abort) /* WSREP BEFORE SE */ { @@ -5637,6 +5658,9 @@ int mysqld_main(int argc, char **argv) sf_leaking_memory= 1; // no safemalloc memory leak reports if we exit early mysqld_server_started= mysqld_server_initialized= 0; + if (init_early_variables()) + exit(1); + #ifdef HAVE_NPTL ld_assume_kernel_is_set= (getenv("LD_ASSUME_KERNEL") != 0); #endif @@ -5939,9 +5963,6 @@ int mysqld_main(int argc, char **argv) if (Events::init((THD*) 0, opt_noacl || opt_bootstrap)) unireg_abort(1); - /* It's now safe to use thread specific memory */ - mysqld_server_initialized= 1; - if (WSREP_ON) { if (opt_bootstrap) @@ -8310,15 +8331,16 @@ static int show_default_keycache(THD *thd, SHOW_VAR *var, char *buff, static int show_memory_used(THD *thd, SHOW_VAR *var, char *buff, + struct system_status_var *status_var, enum enum_var_type scope) { var->type= SHOW_LONGLONG; var->value= buff; if (scope == OPT_GLOBAL) - *(longlong*) buff= (global_status_var.local_memory_used + - global_status_var.global_memory_used); + *(longlong*) buff= (status_var->global_memory_used + + status_var->local_memory_used); else - *(longlong*) buff= thd->status_var.local_memory_used; + *(longlong*) buff= status_var->local_memory_used; return 0; } @@ -8747,7 +8769,9 @@ static int mysql_init_variables(void) prepared_stmt_count= 0; mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS; bzero((uchar*) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list)); - bzero((char *) &global_status_var, sizeof(global_status_var)); + /* Clear all except global_memory_used */ + bzero((char*) &global_status_var, offsetof(STATUS_VAR, + last_cleared_system_status_var)); opt_large_pages= 0; opt_super_large_pages= 0; #if defined(ENABLED_DEBUG_SYNC) @@ -9428,7 +9452,8 @@ static int get_options(int *argc_ptr, char ***argv_ptr) /* prepare all_options array */ my_init_dynamic_array(&all_options, sizeof(my_option), - array_elements(my_long_options), + array_elements(my_long_options) + + sys_var_elements(), array_elements(my_long_options)/4, MYF(0)); add_many_options(&all_options, my_long_options, array_elements(my_long_options)); sys_var_add_options(&all_options, 0); @@ -9992,6 +10017,7 @@ void refresh_status(THD *thd) /* Reset thread's status variables */ thd->set_status_var_init(); + thd->status_var.global_memory_used= 0; bzero((uchar*) &thd->org_status_var, sizeof(thd->org_status_var)); thd->start_bytes_received= 0; diff --git a/sql/mysqld.h b/sql/mysqld.h index ef4a0d6a47a..e538cbd724e 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -203,6 +203,7 @@ extern LEX_CSTRING reason_slave_blocked; extern ulong slave_trans_retries; extern uint slave_net_timeout; extern int max_user_connections; +extern volatile ulong cached_thread_count; extern ulong what_to_log,flush_time; extern ulong max_prepared_stmt_count, prepared_stmt_count; extern ulong open_files_limit; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 7169a3eda81..a69709bbf03 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1533,7 +1533,7 @@ end: if (!head->no_keyread) { doing_key_read= 1; - head->enable_keyread(); + head->set_keyread(true); } head->prepare_for_position(); @@ -10643,7 +10643,7 @@ int read_keys_and_merge_scans(THD *thd, if (!head->key_read) { enabled_keyread= 1; - head->enable_keyread(); + head->set_keyread(true); } head->prepare_for_position(); @@ -10736,7 +10736,7 @@ int read_keys_and_merge_scans(THD *thd, index merge currently doesn't support "using index" at all */ if (enabled_keyread) - head->disable_keyread(); + head->set_keyread(false); if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0, &unique->sort, 1 , 1, TRUE)) result= 1; @@ -10744,7 +10744,7 @@ int read_keys_and_merge_scans(THD *thd, err: if (enabled_keyread) - head->disable_keyread(); + head->set_keyread(false); DBUG_RETURN(1); } @@ -12071,9 +12071,6 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) DBUG_RETURN(NULL); /* Cannot execute with correlated conditions. */ /* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/ - if (join->make_sum_func_list(join->all_fields, join->fields_list, 1)) - DBUG_RETURN(NULL); - List_iterator select_items_it(join->fields_list); is_agg_distinct = is_indexed_agg_distinct(join, &agg_distinct_flds); @@ -13108,7 +13105,17 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, num_blocks= (ha_rows)(table_records / keys_per_block) + 1; /* Compute the number of keys in a group. */ - keys_per_group= (ha_rows) index_info->actual_rec_per_key(group_key_parts - 1); + if (!group_key_parts) + { + /* Summary over the whole table */ + keys_per_group= table_records; + } + else + { + keys_per_group= (ha_rows) index_info->actual_rec_per_key(group_key_parts - + 1); + } + if (keys_per_group == 0) /* If there is no statistics try to guess */ /* each group contains 10% of all records */ keys_per_group= (table_records / 10) + 1; @@ -13456,7 +13463,7 @@ QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT() { DBUG_ASSERT(file == head->file); if (doing_key_read) - head->disable_keyread(); + head->set_keyread(false); /* There may be a code path when the same table was first accessed by index, then the index is closed, and the table is scanned (order by + loose scan). @@ -13649,7 +13656,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void) if (!head->key_read) { doing_key_read= 1; - head->enable_keyread(); /* We need only the key attributes */ + head->set_keyread(true); /* We need only the key attributes */ } if ((result= file->ha_index_init(index,1))) { diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 79b28a5994e..55c6c075f48 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -445,7 +445,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred); static bool convert_subq_to_jtbm(JOIN *parent_join, Item_in_subselect *subq_pred, bool *remove); static TABLE_LIST *alloc_join_nest(THD *thd); -static uint get_tmp_table_rec_length(Item **p_list, uint elements); +static uint get_tmp_table_rec_length(Ref_ptr_array p_list, uint elements); static double get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size); static double get_tmp_table_write_cost(THD *thd, double row_count, @@ -1342,8 +1342,8 @@ static bool replace_where_subcondition(JOIN *join, Item **expr, static int subq_sj_candidate_cmp(Item_in_subselect* el1, Item_in_subselect* el2, void *arg) { - return (el1->sj_convert_priority > el2->sj_convert_priority) ? 1 : - ( (el1->sj_convert_priority == el2->sj_convert_priority)? 0 : -1); + return (el1->sj_convert_priority > el2->sj_convert_priority) ? -1 : + ( (el1->sj_convert_priority == el2->sj_convert_priority)? 0 : 1); } @@ -2236,13 +2236,9 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map) JOIN_TAB *tab= join->best_positions[i].table; join->map2table[tab->table->tablenr]= tab; } - //List_iterator it(right_expr_list); - Item **ref_array= subq_select->ref_pointer_array; - Item **ref_array_end= ref_array + subq_select->item_list.elements; table_map map= 0; - //while ((item= it++)) - for (;ref_array < ref_array_end; ref_array++) - map |= (*ref_array)->used_tables(); + for (uint i=0; i < subq_select->item_list.elements; i++) + map|= subq_select->ref_pointer_array[i]->used_tables(); map= map & ~PSEUDO_TABLE_BITS; Table_map_iterator tm_it(map); int tableno; @@ -2305,15 +2301,14 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map) Length of the temptable record, in bytes */ -static uint get_tmp_table_rec_length(Item **p_items, uint elements) +static uint get_tmp_table_rec_length(Ref_ptr_array p_items, uint elements) { uint len= 0; Item *item; //List_iterator it(items); - Item **p_item; - for (p_item= p_items; p_item < p_items + elements ; p_item++) + for (uint i= 0; i < elements ; i++) { - item = *p_item; + item = p_items[i]; switch (item->result_type()) { case REAL_RESULT: len += sizeof(double); @@ -3562,13 +3557,10 @@ bool setup_sj_materialization_part1(JOIN_TAB *sjm_tab) */ sjm->sjm_table_param.init(); sjm->sjm_table_param.bit_fields_as_long= TRUE; - //List_iterator it(item_list); SELECT_LEX *subq_select= emb_sj_nest->sj_subq_pred->unit->first_select(); - Item **p_item= subq_select->ref_pointer_array; - Item **p_end= p_item + subq_select->item_list.elements; - //while((right_expr= it++)) - for(;p_item != p_end; p_item++) - sjm->sjm_table_cols.push_back(*p_item, thd->mem_root); + Ref_ptr_array p_items= subq_select->ref_pointer_array; + for (uint i= 0; i < subq_select->item_list.elements; i++) + sjm->sjm_table_cols.push_back(p_items[i], thd->mem_root); sjm->sjm_table_param.field_count= subq_select->item_list.elements; sjm->sjm_table_param.force_not_null_cols= TRUE; @@ -3724,13 +3716,13 @@ bool setup_sj_materialization_part2(JOIN_TAB *sjm_tab) */ sjm->copy_field= new Copy_field[sjm->sjm_table_cols.elements]; //it.rewind(); - Item **p_item= emb_sj_nest->sj_subq_pred->unit->first_select()->ref_pointer_array; + Ref_ptr_array p_items= emb_sj_nest->sj_subq_pred->unit->first_select()->ref_pointer_array; for (uint i=0; i < sjm->sjm_table_cols.elements; i++) { bool dummy; Item_equal *item_eq; //Item *item= (it++)->real_item(); - Item *item= (*(p_item++))->real_item(); + Item *item= p_items[i]->real_item(); DBUG_ASSERT(item->type() == Item::FIELD_ITEM); Field *copy_to= ((Item_field*)item)->field; /* @@ -5602,7 +5594,7 @@ bool JOIN::choose_subquery_plan(table_map join_tables) */ /* C.1 Compute the cost of the materialization strategy. */ //uint rowlen= get_tmp_table_rec_length(unit->first_select()->item_list); - uint rowlen= get_tmp_table_rec_length(ref_pointer_array, + uint rowlen= get_tmp_table_rec_length(ref_ptrs, select_lex->item_list.elements); /* The cost of writing one row into the temporary table. */ double write_cost= get_tmp_table_write_cost(thd, inner_record_count_1, diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index f36887eb137..ad3f5aed112 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -406,7 +406,7 @@ int opt_sum_query(THD *thd, if (!error && reckey_in_range(is_max, &ref, item_field->field, conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; - table->disable_keyread(); + table->set_keyread(false); table->file->ha_index_end(); if (error) { @@ -968,7 +968,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, converted (for example to upper case) */ if (field->part_of_key.is_set(idx)) - table->enable_keyread(); + table->set_keyread(true); DBUG_RETURN(TRUE); } } diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc index 912ef4a7df7..6835594ca07 100644 --- a/sql/opt_table_elimination.cc +++ b/sql/opt_table_elimination.cc @@ -1105,7 +1105,7 @@ int compare_field_values(Dep_value_field *a, Dep_value_field *b, void *unused) uint b_ratio= b->field->table->tablenr*MAX_FIELDS + b->field->field_index; - return (a_ratio < b_ratio)? -1 : ((a_ratio == b_ratio)? 0 : 1); + return (a_ratio < b_ratio)? 1 : ((a_ratio == b_ratio)? 0 : -1); } diff --git a/sql/records.cc b/sql/records.cc index 3995bea6569..e7a4ab836c0 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -39,7 +39,7 @@ int rr_sequential(READ_RECORD *info); static int rr_from_tempfile(READ_RECORD *info); static int rr_unpack_from_tempfile(READ_RECORD *info); static int rr_unpack_from_buffer(READ_RECORD *info); -static int rr_from_pointers(READ_RECORD *info); +int rr_from_pointers(READ_RECORD *info); static int rr_from_cache(READ_RECORD *info); static int init_rr_cache(THD *thd, READ_RECORD *info); static int rr_cmp(uchar *a,uchar *b); @@ -316,7 +316,7 @@ void end_read_record(READ_RECORD *info) } if (info->table) { - if (info->table->created) + if (info->table->is_created()) (void) info->table->file->extra(HA_EXTRA_NO_CACHE); if (info->read_record != rr_quick) // otherwise quick_range does it (void) info->table->file->ha_index_or_rnd_end(); @@ -535,7 +535,7 @@ static int rr_unpack_from_tempfile(READ_RECORD *info) return 0; } -static int rr_from_pointers(READ_RECORD *info) +int rr_from_pointers(READ_RECORD *info) { int tmp; uchar *cache_pos; diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index f54ef2b0081..5a94e1c5b54 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -1127,7 +1127,7 @@ rpl_slave_state::is_empty() } -rpl_binlog_state::rpl_binlog_state() +void rpl_binlog_state::init() { my_hash_init(&hash, &my_charset_bin, 32, offsetof(element, domain_id), sizeof(uint32), NULL, my_free, HASH_UNIQUE); @@ -1137,7 +1137,6 @@ rpl_binlog_state::rpl_binlog_state() initialized= 1; } - void rpl_binlog_state::reset_nolock() { diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h index ece6effbef6..f638a084e38 100644 --- a/sql/rpl_gtid.h +++ b/sql/rpl_gtid.h @@ -231,9 +231,10 @@ struct rpl_binlog_state /* Auxiliary buffer to sort gtid list. */ DYNAMIC_ARRAY gtid_sort_array; - rpl_binlog_state(); + rpl_binlog_state() :initialized(0) {} ~rpl_binlog_state(); + void init(); void reset_nolock(); void reset(); void free(); diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index df721342d1d..02dbac46eb5 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -205,43 +205,56 @@ void init_master_log_pos(Master_info* mi) /** Parses the IO_CACHE for "key=" and returns the "key". + If no '=' found, returns the whole line (for END_MARKER). @param key [OUT] Key buffer @param max_size [IN] Maximum buffer size @param f [IN] IO_CACHE file + @param found_equal [OUT] Set true if a '=' was found. @retval 0 Either "key=" or '\n' found @retval 1 EOF */ -static int read_mi_key_from_file(char *key, int max_size, IO_CACHE *f) +static int +read_mi_key_from_file(char *key, int max_size, IO_CACHE *f, bool *found_equal) { int i= 0, c; - char *last_p; DBUG_ENTER("read_key_from_file"); - while (((c= my_b_get(f)) != '\n') && (c != my_b_EOF)) + *found_equal= false; + if (max_size <= 0) + DBUG_RETURN(1); + for (;;) { - last_p= key + i; - - if (i < max_size) + if (i >= max_size-1) { - if (c == '=') - { - /* We found '=', replace it by 0 and return. */ - *last_p= 0; - DBUG_RETURN(0); - } - else - *last_p= c; + key[i] = '\0'; + DBUG_RETURN(0); + } + c= my_b_get(f); + if (c == my_b_EOF) + { + DBUG_RETURN(1); + } + else if (c == '\n') + { + key[i]= '\0'; + DBUG_RETURN(0); + } + else if (c == '=') + { + key[i]= '\0'; + *found_equal= true; + DBUG_RETURN(0); + } + else + { + key[i]= c; + ++i; } - ++i; } - - if (c == my_b_EOF) - DBUG_RETURN(1); - - DBUG_RETURN(0); + /* NotReached */ } enum { @@ -539,6 +552,10 @@ file '%s')", fname); if (lines >= LINE_FOR_LAST_MYSQL_FUTURE) { uint i; + bool got_eq; + bool seen_using_gtid= false; + bool seen_do_domain_ids=false, seen_ignore_domain_ids=false; + /* Skip lines used by / reserved for MySQL >= 5.6. */ for (i= LINE_FOR_FIRST_MYSQL_5_6; i <= LINE_FOR_LAST_MYSQL_FUTURE; ++i) { @@ -551,11 +568,12 @@ file '%s')", fname); for "key=" and returns the "key" if found. The "value" can then the parsed on case by case basis. The "unknown" lines would be ignored to facilitate downgrades. + 10.0 does not have the END_MARKER before any left-overs at the end + of the file. So ignore any but the first occurrence of a key. */ - while (!read_mi_key_from_file(buf, sizeof(buf), &mi->file)) + while (!read_mi_key_from_file(buf, sizeof(buf), &mi->file, &got_eq)) { - /* using_gtid */ - if (!strncmp(buf, STRING_WITH_LEN("using_gtid"))) + if (got_eq && !seen_using_gtid && !strcmp(buf, "using_gtid")) { int val; if (!init_intvar_from_file(&val, &mi->file, 0)) @@ -566,15 +584,13 @@ file '%s')", fname); mi->using_gtid= Master_info::USE_GTID_SLAVE_POS; else mi->using_gtid= Master_info::USE_GTID_NO; - continue; + seen_using_gtid= true; } else { sql_print_error("Failed to initialize master info using_gtid"); goto errwithmsg; } } - - /* DO_DOMAIN_IDS */ - if (!strncmp(buf, STRING_WITH_LEN("do_domain_ids"))) + else if (got_eq && !seen_do_domain_ids && !strcmp(buf, "do_domain_ids")) { if (mi->domain_id_filter.init_ids(&mi->file, Domain_id_filter::DO_DOMAIN_IDS)) @@ -582,11 +598,10 @@ file '%s')", fname); sql_print_error("Failed to initialize master info do_domain_ids"); goto errwithmsg; } - continue; + seen_do_domain_ids= true; } - - /* IGNORE_DOMAIN_IDS */ - if (!strncmp(buf, STRING_WITH_LEN("ignore_domain_ids"))) + else if (got_eq && !seen_ignore_domain_ids && + !strcmp(buf, "ignore_domain_ids")) { if (mi->domain_id_filter.init_ids(&mi->file, Domain_id_filter::IGNORE_DOMAIN_IDS)) @@ -595,9 +610,9 @@ file '%s')", fname); "ignore_domain_ids"); goto errwithmsg; } - continue; + seen_ignore_domain_ids= true; } - else if (!strncmp(buf, STRING_WITH_LEN("END_MARKER"))) + else if (!got_eq && !strcmp(buf, "END_MARKER")) { /* Guard agaist extra left-overs at the end of file, in case a later diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index acdedd6e0a0..df036d0e23f 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -987,7 +987,6 @@ handle_rpl_parallel_thread(void *arg) thd->client_capabilities = CLIENT_LOCAL_FILES; thd->net.reading_or_writing= 0; thd_proc_info(thd, "Waiting for work from main SQL threads"); - thd->set_time(); thd->variables.lock_wait_timeout= LONG_TIMEOUT; thd->system_thread_info.rpl_sql_info= &sql_info; /* diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 2dd6f7d7afc..f9d0910a2a7 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -70,6 +70,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery) DBUG_ENTER("Relay_log_info::Relay_log_info"); relay_log.is_relay_log= TRUE; + relay_log_state.init(); #ifdef HAVE_PSI_INTERFACE relay_log.set_psi_keys(key_RELAYLOG_LOCK_index, key_RELAYLOG_update_cond, diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h index e896c189a8e..3eaee90d0f6 100644 --- a/sql/rpl_rli.h +++ b/sql/rpl_rli.h @@ -347,10 +347,11 @@ public: rpl_parallel parallel; /* - The relay_log_state keeps track of the current binlog state of the execution - of the relay log. This is used to know where to resume current GTID position - if the slave thread is stopped and restarted. - It is only accessed from the SQL thread, so it does not need any locking. + The relay_log_state keeps track of the current binlog state of the + execution of the relay log. This is used to know where to resume + current GTID position if the slave thread is stopped and + restarted. It is only accessed from the SQL thread, so it does + not need any locking. */ rpl_binlog_state relay_log_state; /* diff --git a/sql/set_var.cc b/sql/set_var.cc index b5430c56865..5392a0065ac 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -64,7 +64,7 @@ int sys_var_init() /* Must be already initialized. */ DBUG_ASSERT(system_charset_info != NULL); - if (my_hash_init(&system_variable_hash, system_charset_info, 100, 0, + if (my_hash_init(&system_variable_hash, system_charset_info, 700, 0, 0, (my_hash_get_key) get_sys_var_length, 0, HASH_UNIQUE)) goto error; @@ -78,6 +78,11 @@ error: DBUG_RETURN(1); } +uint sys_var_elements() +{ + return system_variable_hash.records; +} + int sys_var_add_options(DYNAMIC_ARRAY *long_options, int parse_flags) { uint saved_elements= long_options->elements; diff --git a/sql/set_var.h b/sql/set_var.h index cf86ecf18fa..060a4e1a57c 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -414,6 +414,7 @@ extern sys_var *Sys_autocommit_ptr; CHARSET_INFO *get_old_charset_by_name(const char *old_name); int sys_var_init(); +uint sys_var_elements(); int sys_var_add_options(DYNAMIC_ARRAY *long_options, int parse_flags); void sys_var_end(void); diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 0d168e937a9..376c1eb9d0d 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7154,3 +7154,50 @@ ER_WRONG_ORDER_IN_WITH_CLAUSE eng "The definition of the table '%s' refers to the table '%s' defined later in a non-recursive WITH clause" ER_RECURSIVE_QUERY_IN_WITH_CLAUSE eng "Recursive queries in WITH clause are not supported yet" + +# +# Internal errors, not used +# +skip-to-error-number 2000 + +# MySQL 5.7 error numbers starts here +skip-to-error-number 3000 + +ER_MYSQL_57_TEST + eng "5.7 test" + +# MariaDB extra error numbers starts from 4000 +skip-to-error-number 4000 + +ER_WRONG_WINDOW_SPEC_NAME + eng "Window specification with name '%s' is not defined" +ER_DUP_WINDOW_NAME + eng "Multiple window specifications with the same name '%s'" +ER_PARTITION_LIST_IN_REFERENCING_WINDOW_SPEC + eng "Window specification referencing another one '%s' cannot contain partition list" +ER_ORDER_LIST_IN_REFERENCING_WINDOW_SPEC + eng "Referenced window specification '%s' already contains order list" +ER_WINDOW_FRAME_IN_REFERENCED_WINDOW_SPEC + eng "Referenced window specification '%s' cannot contain window frame" +ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS + eng "Unacceptable combination of window frame bound specifications" +ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION + eng "Window function is allowed only in SELECT list and ORDER BY clause" +ER_WINDOW_FUNCTION_IN_WINDOW_SPEC + eng "Window function is not allowed in window specification" +ER_NOT_ALLOWED_WINDOW_FRAME + eng "Window frame is not allowed with '%s'" +ER_NO_ORDER_LIST_IN_WINDOW_SPEC + eng "No order list in window specification for '%s'" +ER_RANGE_FRAME_NEEDS_SIMPLE_ORDERBY + eng "RANGE-type frame requires ORDER BY clause with single sort key" +ER_WRONG_TYPE_FOR_ROWS_FRAME + eng "Integer is required for ROWS-type frame" +ER_WRONG_TYPE_FOR_RANGE_FRAME + eng "Numeric datatype is required for RANGE-type frame" +ER_FRAME_EXCLUSION_NOT_SUPPORTED + eng "Frame exclusion is not supported yet" +ER_WINDOW_FUNCTION_DONT_HAVE_FRAME + eng "This window function may not have a window frame" +ER_INVALID_NTILE_ARGUMENT + eng "Argument of NTILE must be greater than 0" diff --git a/sql/slave.cc b/sql/slave.cc index 93506bc2ccd..30a0018f490 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -4955,7 +4955,6 @@ err_during_init: delete serial_rgi; mysql_mutex_unlock(&LOCK_thread_count); - THD_CHECK_SENTRY(thd); delete thd; thread_safe_decrement32(&service_thread_count); signal_thd_deleted(); diff --git a/sql/sql_analyze_stmt.cc b/sql/sql_analyze_stmt.cc index d11c93229b0..58f72d6b8de 100644 --- a/sql/sql_analyze_stmt.cc +++ b/sql/sql_analyze_stmt.cc @@ -69,75 +69,3 @@ void Filesort_tracker::print_json_members(Json_writer *writer) } } - -/* - Report that we are doing a filesort. - @return - Tracker object to be used with filesort -*/ - -Filesort_tracker *Sort_and_group_tracker::report_sorting(THD *thd) -{ - DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS); - - if (total_actions) - { - /* This is not the first execution. Check */ - if (qep_actions[cur_action] != EXPL_ACTION_FILESORT) - { - varied_executions= true; - cur_action++; - if (!dummy_fsort_tracker) - dummy_fsort_tracker= new (thd->mem_root) Filesort_tracker(is_analyze); - return dummy_fsort_tracker; - } - return qep_actions_data[cur_action++].filesort_tracker; - } - - Filesort_tracker *fs_tracker= new(thd->mem_root)Filesort_tracker(is_analyze); - qep_actions_data[cur_action].filesort_tracker= fs_tracker; - qep_actions[cur_action++]= EXPL_ACTION_FILESORT; - - return fs_tracker; -} - - -void Sort_and_group_tracker::report_tmp_table(TABLE *tbl) -{ - DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS); - if (total_actions) - { - /* This is not the first execution. Check if the steps match. */ - // todo: should also check that tmp.table kinds are the same. - if (qep_actions[cur_action] != EXPL_ACTION_TEMPTABLE) - varied_executions= true; - } - - if (!varied_executions) - { - qep_actions[cur_action]= EXPL_ACTION_TEMPTABLE; - // qep_actions_data[cur_action]= .... - } - - cur_action++; -} - - -void Sort_and_group_tracker::report_duplicate_removal() -{ - DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS); - if (total_actions) - { - /* This is not the first execution. Check if the steps match. */ - if (qep_actions[cur_action] != EXPL_ACTION_REMOVE_DUPS) - varied_executions= true; - } - - if (!varied_executions) - { - qep_actions[cur_action]= EXPL_ACTION_REMOVE_DUPS; - } - - cur_action++; -} - diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h index d7634bdfb85..2a08a842dfc 100644 --- a/sql/sql_analyze_stmt.h +++ b/sql/sql_analyze_stmt.h @@ -284,174 +284,3 @@ private: ulonglong sort_buffer_size; }; - -typedef enum -{ - EXPL_NO_TMP_TABLE=0, - EXPL_TMP_TABLE_BUFFER, - EXPL_TMP_TABLE_GROUP, - EXPL_TMP_TABLE_DISTINCT -} enum_tmp_table_use; - - -typedef enum -{ - EXPL_ACTION_EOF, /* not-an-action */ - EXPL_ACTION_FILESORT, - EXPL_ACTION_TEMPTABLE, - EXPL_ACTION_REMOVE_DUPS, -} enum_qep_action; - - -/* - This is to track how a JOIN object has resolved ORDER/GROUP BY/DISTINCT - - We are not tied to the query plan at all, because query plan does not have - sufficient information. *A lot* of decisions about ordering/grouping are - made at very late stages (in JOIN::exec, JOIN::init_execution, in - create_sort_index and even in create_tmp_table). - - The idea is that operations that happen during select execution will report - themselves. We have these operations: - - Sorting with filesort() - - Duplicate row removal (the one done by remove_duplicates()). - - Use of temporary table to buffer the result. - - There is also "Selection" operation, done by do_select(). It reads rows, - there are several distinct cases: - 1. doing the join operation on the base tables - 2. reading the temporary table - 3. reading the filesort output - it would be nice to build execution graph, e.g. - - Select(JOIN op) -> temp.table -> filesort -> Select(filesort result) - - the problem is that there is no way to tell what a do_select() call will do. - - Our solution is not to have explicit selection operations. We make these - assumptions about the query plan: - - Select(JOIN op) is the first operation in the query plan - - Unless the first recorded operation is filesort(). filesort() is unable - read result of a select, so when we find it first, the query plan is: - - filesort(first join table) -> Select(JOIN op) -> ... - - the other popular query plan is: - - Select (JOIN op) -> temp.table -> filesort() -> ... - -///TODO: handle repeated execution with subselects! -*/ - -class Sort_and_group_tracker : public Sql_alloc -{ - enum { MAX_QEP_ACTIONS = 5 }; - - /* Query actions in the order they were made. */ - enum_qep_action qep_actions[MAX_QEP_ACTIONS]; - - /* Number for the next action */ - int cur_action; - - /* - Non-zero means there was already an execution which had - #total_actions actions - */ - int total_actions; - - int get_n_actions() - { - return total_actions? total_actions: cur_action; - } - - /* - TRUE<=>there were executions which took different sort/buffer/de-duplicate - routes. The counter values are not meaningful. - */ - bool varied_executions; - - /* Details about query actions */ - union - { - Filesort_tracker *filesort_tracker; - enum_tmp_table_use tmp_table; - } - qep_actions_data[MAX_QEP_ACTIONS]; - - Filesort_tracker *dummy_fsort_tracker; - bool is_analyze; -public: - Sort_and_group_tracker(bool is_analyze_arg) : - cur_action(0), total_actions(0), varied_executions(false), - dummy_fsort_tracker(NULL), - is_analyze(is_analyze_arg) - {} - - /*************** Reporting interface ***************/ - /* Report that join execution is started */ - void report_join_start() - { - if (!total_actions && cur_action != 0) - { - /* This is a second execution */ - total_actions= cur_action; - } - cur_action= 0; - } - - /* - Report that a temporary table is created. The next step is to write to the - this tmp. table - */ - void report_tmp_table(TABLE *tbl); - - /* - Report that we are doing a filesort. - @return - Tracker object to be used with filesort - */ - Filesort_tracker *report_sorting(THD *thd); - - /* - Report that remove_duplicates() is invoked [on a temp. table]. - We don't collect any statistics on this operation, yet. - */ - void report_duplicate_removal(); - - friend class Iterator; - /*************** Statistics retrieval interface ***************/ - bool had_varied_executions() { return varied_executions; } - - class Iterator - { - Sort_and_group_tracker *owner; - int idx; - public: - Iterator(Sort_and_group_tracker *owner_arg) : - owner(owner_arg), idx(owner_arg->get_n_actions() - 1) - {} - - enum_qep_action get_next(Filesort_tracker **tracker/*, - enum_tmp_table_use *tmp_table_use*/) - { - /* Walk back through the array... */ - if (idx < 0) - return EXPL_ACTION_EOF; - switch (owner->qep_actions[idx]) - { - case EXPL_ACTION_FILESORT: - *tracker= owner->qep_actions_data[idx].filesort_tracker; - break; - case EXPL_ACTION_TEMPTABLE: - //*tmp_table_use= tmp_table_kind[tmp_table_idx++]; - break; - default: - break; - } - return owner->qep_actions[idx--]; - } - - bool is_last_element() { return idx == -1; } - }; -}; - diff --git a/sql/sql_array.h b/sql/sql_array.h index 159951e26a6..bbaa653b177 100644 --- a/sql/sql_array.h +++ b/sql/sql_array.h @@ -85,6 +85,15 @@ public: Element_type *array() const { return m_array; } + bool operator==(const Bounds_checked_array&rhs) const + { + return m_array == rhs.m_array && m_size == rhs.m_size; + } + bool operator!=(const Bounds_checked_array&rhs) const + { + return m_array != rhs.m_array || m_size != rhs.m_size; + } + private: Element_type *m_array; size_t m_size; @@ -230,6 +239,11 @@ public: delete_dynamic(&array); } + void free_memory() + { + delete_dynamic(&array); + } + typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2); void sort(CMP_FUNC cmp_func) diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 08509a0e2bf..18e7ee950e6 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2551,8 +2551,13 @@ retry_share: (void) ot_ctx->request_backoff_action(Open_table_context::OT_DISCOVER, table_list); else if (share->crashed) - (void) ot_ctx->request_backoff_action(Open_table_context::OT_REPAIR, - table_list); + { + if (!(flags & MYSQL_OPEN_IGNORE_REPAIR)) + (void) ot_ctx->request_backoff_action(Open_table_context::OT_REPAIR, + table_list); + else + table_list->crashed= 1; /* Mark that table was crashed */ + } goto err_lock; } if (open_table_entry_fini(thd, share, table)) @@ -6824,6 +6829,8 @@ find_field_in_tables(THD *thd, Item_ident *item, or as a field name without alias, or as a field hidden by alias, or ignoring alias) + limit How many items in the list to check + (if limit==0 then all items are to be checked) RETURN VALUES 0 Item is not found or item is not unique, @@ -6841,9 +6848,10 @@ Item **not_found_item= (Item**) 0x1; Item ** find_item_in_list(Item *find, List &items, uint *counter, find_item_error_report_type report_error, - enum_resolution_type *resolution) + enum_resolution_type *resolution, uint limit) { List_iterator li(items); + uint n_items= limit == 0 ? items.elements : limit; Item **found=0, **found_unaliased= 0, *item; const char *db_name=0; const char *field_name=0; @@ -6867,8 +6875,9 @@ find_item_in_list(Item *find, List &items, uint *counter, db_name= ((Item_ident*) find)->db_name; } - for (uint i= 0; (item=li++); i++) + for (uint i= 0; i < n_items; i++) { + item= li++; if (field_name && (item->real_item()->type() == Item::FIELD_ITEM || ((item->type() == Item::REF_ITEM) && @@ -7749,11 +7758,13 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List &fields, List *sum_func_list, uint wild_num) { + if (!wild_num) + return(0); + Item *item; List_iterator it(fields); Query_arena *arena, backup; DBUG_ENTER("setup_wild"); - DBUG_ASSERT(wild_num != 0); /* Don't use arena if we are not in prepared statements or stored procedures @@ -7832,7 +7843,7 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List &fields, ** Check that all given fields exists and fill struct with current data ****************************************************************************/ -bool setup_fields(THD *thd, Item **ref_pointer_array, +bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, List &fields, enum_mark_columns mark_used_columns, List *sum_func_list, bool allow_sum_func) { @@ -7842,7 +7853,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, List_iterator it(fields); bool save_is_item_list_lookup; DBUG_ENTER("setup_fields"); - DBUG_PRINT("enter", ("ref_pointer_array: %p", ref_pointer_array)); + DBUG_PRINT("enter", ("ref_pointer_array: %p", ref_pointer_array.array())); thd->mark_used_columns= mark_used_columns; DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns)); @@ -7864,8 +7875,11 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, TODO: remove it when (if) we made one list for allfields and ref_pointer_array */ - if (ref_pointer_array) - bzero(ref_pointer_array, sizeof(Item *) * fields.elements); + if (!ref_pointer_array.is_null()) + { + DBUG_ASSERT(ref_pointer_array.size() >= fields.elements); + memset(ref_pointer_array.array(), 0, sizeof(Item *) * fields.elements); + } /* We call set_entry() there (before fix_fields() of the whole list of field @@ -7883,7 +7897,7 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, while ((var= li++)) var->set_entry(thd, FALSE); - Item **ref= ref_pointer_array; + Ref_ptr_array ref= ref_pointer_array; thd->lex->current_select->cur_pos_in_select_list= 0; while ((item= it++)) { @@ -7896,12 +7910,20 @@ bool setup_fields(THD *thd, Item **ref_pointer_array, DBUG_PRINT("info", ("thd->mark_used_columns: %d", thd->mark_used_columns)); DBUG_RETURN(TRUE); /* purecov: inspected */ } - if (ref) - *(ref++)= item; - if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM && - sum_func_list) + if (!ref.is_null()) + { + ref[0]= item; + ref.pop_front(); + } + /* + split_sum_func() must be called for Window Function items, see + Item_window_func::split_sum_func. + */ + if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM && + sum_func_list) || item->with_window_func) item->split_sum_func(thd, ref_pointer_array, *sum_func_list, SPLIT_SUM_SELECT); + thd->lex->current_select->select_list_tables|= item->used_tables(); thd->lex->used_tables|= item->used_tables(); thd->lex->current_select->cur_pos_in_select_list++; } @@ -8320,7 +8342,10 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, views and natural joins this update is performed inside the loop below. */ if (table) + { thd->lex->used_tables|= table->map; + thd->lex->current_select->select_list_tables|= table->map; + } /* Initialize a generic field iterator for the current table reference. @@ -8412,6 +8437,8 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, if (field_table) { thd->lex->used_tables|= field_table->map; + thd->lex->current_select->select_list_tables|= + field_table->map; field_table->covering_keys.intersect(field->part_of_key); field_table->merge_keys.merge(field->part_of_key); field_table->used_fields++; diff --git a/sql/sql_base.h b/sql/sql_base.h index b6e135b6feb..d6bd0e2ace7 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -107,6 +107,10 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, table flush, wait on thr_lock.c locks) while opening and locking table. */ #define MYSQL_OPEN_IGNORE_KILLED 0x8000 +/** + Don't try to auto-repair table +*/ +#define MYSQL_OPEN_IGNORE_REPAIR 0x10000 /** Please refer to the internals manual. */ #define MYSQL_OPEN_REOPEN (MYSQL_OPEN_IGNORE_FLUSH |\ @@ -168,7 +172,7 @@ void make_leaves_list(THD *thd, List &list, TABLE_LIST *tables, bool full_table_list, TABLE_LIST *boundary); int setup_wild(THD *thd, TABLE_LIST *tables, List &fields, List *sum_func_list, uint wild_num); -bool setup_fields(THD *thd, Item** ref_pointer_array, +bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array, List &item, enum_mark_columns mark_used_columns, List *sum_func_list, bool allow_sum_func); void unfix_fields(List &items); @@ -197,7 +201,7 @@ Field * find_field_in_table_sef(TABLE *table, const char *name); Item ** find_item_in_list(Item *item, List &items, uint *counter, find_item_error_report_type report_error, - enum_resolution_type *resolution); + enum_resolution_type *resolution, uint limit= 0); bool setup_tables(THD *thd, Name_resolution_context *context, List *from_clause, TABLE_LIST *tables, List &leaves, bool select_insert, @@ -382,7 +386,7 @@ inline TABLE_LIST *find_table_in_local_list(TABLE_LIST *table, } -inline bool setup_fields_with_no_wrap(THD *thd, Item **ref_pointer_array, +inline bool setup_fields_with_no_wrap(THD *thd, Ref_ptr_array ref_pointer_array, List &item, enum_mark_columns mark_used_columns, List *sum_func_list, diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 91dd8ad7325..42b80d9b143 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -354,7 +354,7 @@ const uchar *query_state_map; #include "emb_qcache.h" #endif -#if !defined(EXTRA_DBUG) && !defined(DBUG_OFF) +#if defined(EXTRA_DEBUG) && !defined(DBUG_OFF) #define RW_WLOCK(M) {DBUG_PRINT("lock", ("rwlock wlock 0x%lx",(ulong)(M))); \ if (!mysql_rwlock_wrlock(M)) DBUG_PRINT("lock", ("rwlock wlock ok")); \ else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index e3b70566597..b1217cb1f9f 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -908,7 +908,8 @@ THD::THD(bool is_wsrep_applier) */ THD *old_THR_THD= current_thd; set_current_thd(this); - status_var.local_memory_used= status_var.global_memory_used= 0; + status_var.local_memory_used= sizeof(THD); + status_var.global_memory_used= 0; main_da.init(); /* @@ -1641,6 +1642,8 @@ THD::~THD() that memory allocation counting is done correctly */ set_current_thd(this); + if (!status_in_global) + add_status_to_global(); /* Ensure that no one is using THD */ mysql_mutex_lock(&LOCK_thd_data); @@ -1703,6 +1706,7 @@ THD::~THD() if (xid_hash_pins) lf_hash_put_pins(xid_hash_pins); /* Ensure everything is freed */ + status_var.local_memory_used-= sizeof(THD); if (status_var.local_memory_used != 0) { DBUG_PRINT("error", ("memory_used: %lld", status_var.local_memory_used)); @@ -1710,7 +1714,7 @@ THD::~THD() DBUG_ASSERT(status_var.local_memory_used == 0 || !debug_assert_on_not_freed_memory); } - + update_global_memory_status(status_var.global_memory_used); set_current_thd(orig_thd == this ? 0 : orig_thd); DBUG_VOID_RETURN; } @@ -1748,12 +1752,17 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var) to_var->binlog_bytes_written+= from_var->binlog_bytes_written; to_var->cpu_time+= from_var->cpu_time; to_var->busy_time+= from_var->busy_time; - to_var->local_memory_used+= from_var->local_memory_used; /* Update global_memory_used. We have to do this with atomic_add as the global value can change outside of LOCK_status. */ + if (to_var == &global_status_var) + { + DBUG_PRINT("info", ("global memory_used: %lld size: %lld", + (longlong) global_status_var.global_memory_used, + (longlong) from_var->global_memory_used)); + } // workaround for gcc 4.2.4-1ubuntu4 -fPIE (from DEB_BUILD_HARDENING=1) int64 volatile * volatile ptr= &to_var->global_memory_used; my_atomic_add64_explicit(ptr, from_var->global_memory_used, diff --git a/sql/sql_class.h b/sql/sql_class.h index be652e6dd01..e0792a4059f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -658,7 +658,7 @@ typedef struct system_variables /* Error messages */ MY_LOCALE *lc_messages; - const char **errmsgs; /* lc_messages->errmsg->errmsgs */ + const char ***errmsgs; /* lc_messages->errmsg->errmsgs */ /* Locale Support */ MY_LOCALE *lc_time_names; @@ -824,6 +824,20 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var, STATUS_VAR *dec_var); +/* + Update global_memory_used. We have to do this with atomic_add as the + global value can change outside of LOCK_status. +*/ +inline void update_global_memory_status(int64 size) +{ + DBUG_PRINT("info", ("global memory_used: %lld size: %lld", + (longlong) global_status_var.global_memory_used, + size)); + // workaround for gcc 4.2.4-1ubuntu4 -fPIE (from DEB_BUILD_HARDENING=1) + int64 volatile * volatile ptr= &global_status_var.global_memory_used; + my_atomic_add64_explicit(ptr, size, MY_MEMORY_ORDER_RELAXED); +} + /** Get collation by name, send error to client on failure. @param name Collation name @@ -3846,9 +3860,11 @@ public: void add_status_to_global() { + DBUG_ASSERT(status_in_global == 0); mysql_mutex_lock(&LOCK_status); add_to_status(&global_status_var, &status_var); /* Mark that this THD status has already been added in global status */ + status_var.global_memory_used= 0; status_in_global= 1; mysql_mutex_unlock(&LOCK_status); } @@ -4527,16 +4543,9 @@ inline uint tmp_table_max_key_parts() { return MI_MAX_KEY_SEG; } class TMP_TABLE_PARAM :public Sql_alloc { -private: - /* Prevent use of these (not safe because of lists and copy_field) */ - TMP_TABLE_PARAM(const TMP_TABLE_PARAM &); - void operator=(TMP_TABLE_PARAM &); - public: List copy_funcs; - List save_copy_funcs; Copy_field *copy_field, *copy_field_end; - Copy_field *save_copy_field, *save_copy_field_end; uchar *group_buff; Item **items_to_copy; /* Fields in tmp table */ TMP_ENGINE_COLUMNDEF *recinfo, *start_recinfo; @@ -4571,7 +4580,13 @@ public: uint hidden_field_count; uint group_parts,group_length,group_null_parts; uint quick_group; - bool using_indirect_summary_function; + /** + Enabled when we have atleast one outer_sum_func. Needed when used + along with distinct. + + @see create_tmp_table + */ + bool using_outer_summary_function; CHARSET_INFO *table_charset; bool schema_table; /* TRUE if the temp table is created for subquery materialization. */ @@ -4601,9 +4616,10 @@ public: TMP_TABLE_PARAM() :copy_field(0), group_parts(0), group_length(0), group_null_parts(0), - schema_table(0), materialized_subquery(0), force_not_null_cols(0), - precomputed_group_by(0), - force_copy_fields(0), bit_fields_as_long(0), skip_create_table(0) + using_outer_summary_function(0), + schema_table(0), materialized_subquery(0), force_not_null_cols(0), + precomputed_group_by(0), + force_copy_fields(0), bit_fields_as_long(0), skip_create_table(0) {} ~TMP_TABLE_PARAM() { @@ -4615,8 +4631,8 @@ public: if (copy_field) /* Fix for Intel compiler */ { delete [] copy_field; - save_copy_field= copy_field= NULL; - save_copy_field_end= copy_field_end= NULL; + copy_field= NULL; + copy_field_end= NULL; } } }; diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index ea114bf40a5..66564bd5e94 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -1192,7 +1192,6 @@ void prepare_new_connection_state(THD* thd) */ thd->proc_info= 0; thd->set_command(COM_SLEEP); - thd->set_time(); thd->init_for_queries(); if (opt_init_connect.length && !(sctx->master_access & SUPER_ACL)) @@ -1234,7 +1233,6 @@ void prepare_new_connection_state(THD* thd) } thd->proc_info=0; - thd->set_time(); thd->init_for_queries(); } } diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 1203a4ce0c8..77f0bcf04ba 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -512,7 +512,10 @@ With_element *st_select_lex::find_table_def_in_with_clauses(TABLE_LIST *table) { With_clause *with_clause=sl->get_with_clause(); if (with_clause && (found= with_clause->find_table_def(table))) - return found; + return found; + /* Do not look for the table's definition beyond the scope of the view */ + if (sl->master_unit()->is_view) + break; } return found; } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 42e7f6c3569..95aee805c7f 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -263,7 +263,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE"); DBUG_RETURN(TRUE); } - if (!(table= table_list->table) || !table->created) + if (!(table= table_list->table) || !table->is_created()) { my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), table_list->view_db.str, table_list->view_name.str); @@ -490,27 +490,31 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (query_plan.using_filesort) { - uint length= 0; - SORT_FIELD *sortorder; { + Filesort fsort(order, HA_POS_ERROR, true, select); DBUG_ASSERT(query_plan.index == MAX_KEY); + Filesort_tracker *fs_tracker= thd->lex->explain->get_upd_del_plan()->filesort_tracker; - if (!(sortorder= make_unireg_sortorder(thd, order, &length, NULL)) || - !(file_sort= filesort(thd, table, sortorder, length, - select, HA_POS_ERROR, - true, - fs_tracker))) + if (!(file_sort= filesort(thd, table, &fsort, fs_tracker))) goto got_error; + thd->inc_examined_row_count(file_sort->examined_rows); /* Filesort has already found and selected the rows we want to delete, so we don't need the where clause */ delete select; - free_underlaid_joins(thd, select_lex); + + /* + If we are not in DELETE ... RETURNING, we can free subqueries. (in + DELETE ... RETURNING we can't, because the RETURNING part may have + a subquery in it) + */ + if (!with_select) + free_underlaid_joins(thd, select_lex); select= 0; } } @@ -737,7 +741,7 @@ got_error: wild_num - number of wildcards used in optional SELECT clause field_list - list of items in optional SELECT clause conds - conditions - +l RETURN VALUE FALSE OK TRUE error @@ -758,7 +762,8 @@ got_error: DELETE_ACL, SELECT_ACL, TRUE)) DBUG_RETURN(TRUE); if ((wild_num && setup_wild(thd, table_list, field_list, NULL, wild_num)) || - setup_fields(thd, NULL, field_list, MARK_COLUMNS_READ, NULL, 0) || + setup_fields(thd, Ref_ptr_array(), + field_list, MARK_COLUMNS_READ, NULL, 0) || setup_conds(thd, table_list, select_lex->leaf_tables, conds) || setup_ftfuncs(select_lex)) DBUG_RETURN(TRUE); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 1ef83b3bf1f..79e57cded81 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -477,7 +477,7 @@ exit_merge: unconditional_materialization: derived->change_refs_to_fields(); derived->set_materialized_derived(); - if (!derived->table || !derived->table->created) + if (!derived->table || !derived->table->is_created()) res= mysql_derived_create(thd, lex, derived); if (!res) res= mysql_derived_fill(thd, lex, derived); @@ -859,7 +859,7 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived) TABLE *table= derived->table; SELECT_LEX_UNIT *unit= derived->get_unit(); - if (table->created) + if (table->is_created()) DBUG_RETURN(FALSE); select_union *result= (select_union*)unit->result; if (table->s->db_type() == TMP_ENGINE_HTON) @@ -912,7 +912,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) if (unit->executed && !unit->uncacheable && !unit->describe) DBUG_RETURN(FALSE); /*check that table creation passed without problems. */ - DBUG_ASSERT(derived->table && derived->table->created); + DBUG_ASSERT(derived->table && derived->table->is_created()); SELECT_LEX *first_select= unit->first_select(); select_union *derived_result= derived->derived_result; SELECT_LEX *save_current_select= lex->current_select; @@ -928,7 +928,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) first_select->options&= ~OPTION_FOUND_ROWS; lex->current_select= first_select; - res= mysql_select(thd, &first_select->ref_pointer_array, + res= mysql_select(thd, first_select->table_list.first, first_select->with_wild, first_select->item_list, first_select->where, diff --git a/sql/sql_do.cc b/sql/sql_do.cc index 9e58031f6a4..6d86ece6a9f 100644 --- a/sql/sql_do.cc +++ b/sql/sql_do.cc @@ -29,7 +29,7 @@ bool mysql_do(THD *thd, List &values) List_iterator li(values); Item *value; DBUG_ENTER("mysql_do"); - if (setup_fields(thd, 0, values, MARK_COLUMNS_NONE, 0, 0)) + if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_NONE, 0, 0)) DBUG_RETURN(TRUE); while ((value = li++)) (void) value->is_null(); diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index dae5127cbf8..1f8b4f2dcb1 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -30,6 +30,7 @@ const char * STR_IMPOSSIBLE_WHERE= "Impossible WHERE"; const char * STR_NO_ROWS_AFTER_PRUNING= "No matching rows after partition pruning"; static void write_item(Json_writer *writer, Item *item); +static void append_item_to_str(String *out, Item *item); Explain_query::Explain_query(THD *thd_arg, MEM_ROOT *root) : mem_root(root), upd_del_plan(NULL), insert_plan(NULL), @@ -697,13 +698,6 @@ bool Explain_node::print_explain_json_cache(Json_writer *writer, } -void Explain_select::replace_table(uint idx, Explain_table_access *new_tab) -{ - delete join_tabs[idx]; - join_tabs[idx]= new_tab; -} - - Explain_basic_join::~Explain_basic_join() { if (join_tabs) @@ -754,35 +748,23 @@ int Explain_select::print_explain(Explain_query *query, } else { - bool using_tmp; - bool using_fs; + bool using_tmp= false; + bool using_fs= false; - if (is_analyze) + for (Explain_aggr_node *node= aggr_tree; node; node= node->child) { - /* - Get the data about "Using temporary; Using filesort" from execution - tracking system. - */ - using_tmp= false; - using_fs= false; - Sort_and_group_tracker::Iterator iter(&ops_tracker); - enum_qep_action action; - Filesort_tracker *dummy; - - while ((action= iter.get_next(&dummy)) != EXPL_ACTION_EOF) + switch (node->get_type()) { - if (action == EXPL_ACTION_FILESORT) - using_fs= true; - else if (action == EXPL_ACTION_TEMPTABLE) + case AGGR_OP_TEMP_TABLE: using_tmp= true; + break; + case AGGR_OP_FILESORT: + using_fs= true; + break; + default: + break; } } - else - { - /* Use imprecise "estimates" we got with the query plan */ - using_tmp= using_temporary; - using_fs= using_filesort; - } for (uint i=0; i< n_join_tabs; i++) { @@ -882,88 +864,40 @@ void Explain_select::print_explain_json(Explain_query *query, } } - Filesort_tracker *first_table_sort= NULL; - bool first_table_sort_used= false; int started_objects= 0; + + Explain_aggr_node *node= aggr_tree; - if (is_analyze) + for (; node; node= node->child) { - /* ANALYZE has collected this part of query plan independently */ - if (ops_tracker.had_varied_executions()) + switch (node->get_type()) { - writer->add_member("varied-sort-and-tmp").start_object(); - started_objects++; - } - else - { - Sort_and_group_tracker::Iterator iter(&ops_tracker); - enum_qep_action action; - Filesort_tracker *fs_tracker; - - while ((action= iter.get_next(&fs_tracker)) != EXPL_ACTION_EOF) + case AGGR_OP_TEMP_TABLE: + writer->add_member("temporary_table").start_object(); + break; + case AGGR_OP_FILESORT: { - if (action == EXPL_ACTION_FILESORT) - { - if (iter.is_last_element()) - { - first_table_sort= fs_tracker; - break; - } - writer->add_member("filesort").start_object(); - started_objects++; - fs_tracker->print_json_members(writer); - } - else if (action == EXPL_ACTION_TEMPTABLE) - { - writer->add_member("temporary_table").start_object(); - started_objects++; - /* - if (tmp == EXPL_TMP_TABLE_BUFFER) - func= "buffer"; - else if (tmp == EXPL_TMP_TABLE_GROUP) - func= "group-by"; - else - func= "distinct"; - writer->add_member("function").add_str(func); - */ - } - else if (action == EXPL_ACTION_REMOVE_DUPS) - { - writer->add_member("duplicate_removal").start_object(); - started_objects++; - } - else - DBUG_ASSERT(0); + writer->add_member("filesort").start_object(); + ((Explain_aggr_filesort*)node)->print_json_members(writer, is_analyze); + break; } - } - - if (first_table_sort) - first_table_sort_used= true; - } - else - { - /* This is just EXPLAIN. Try to produce something meaningful */ - if (using_temporary) - { - started_objects= 1; - if (using_filesort) + case AGGR_OP_REMOVE_DUPLICATES: + writer->add_member("duplicate_removal").start_object(); + break; + case AGGR_OP_WINDOW_FUNCS: { - started_objects++; - writer->add_member("filesort").start_object(); + //TODO: make print_json_members virtual? + writer->add_member("window_functions_computation").start_object(); + ((Explain_aggr_window_funcs*)node)->print_json_members(writer, is_analyze); + break; } - writer->add_member("temporary_table").start_object(); - writer->add_member("function").add_str("buffer"); - } - else - { - if (using_filesort) - first_table_sort_used= true; + default: + DBUG_ASSERT(0); } + started_objects++; } - Explain_basic_join::print_explain_json_interns(query, writer, is_analyze, - first_table_sort, - first_table_sort_used); + Explain_basic_join::print_explain_json_interns(query, writer, is_analyze); for (;started_objects; started_objects--) writer->end_object(); @@ -976,6 +910,64 @@ void Explain_select::print_explain_json(Explain_query *query, } +Explain_aggr_filesort::Explain_aggr_filesort(MEM_ROOT *mem_root, + bool is_analyze, + Filesort *filesort) + : tracker(is_analyze) +{ + child= NULL; + for (ORDER *ord= filesort->order; ord; ord= ord->next) + { + sort_items.push_back(ord->item[0], mem_root); + } + filesort->tracker= &tracker; +} + + +void Explain_aggr_filesort::print_json_members(Json_writer *writer, + bool is_analyze) +{ + char item_buf[256]; + String str(item_buf, sizeof(item_buf), &my_charset_bin); + str.length(0); + + List_iterator_fast it(sort_items); + Item *item; + bool first= true; + while ((item= it++)) + { + if (first) + first= false; + else + { + str.append(", "); + } + append_item_to_str(&str, item); + } + + writer->add_member("sort_key").add_str(str.c_ptr_safe()); + + if (is_analyze) + tracker.print_json_members(writer); +} + + +void Explain_aggr_window_funcs::print_json_members(Json_writer *writer, + bool is_analyze) +{ + Explain_aggr_filesort *srt; + List_iterator it(sorts); + writer->add_member("sorts").start_object(); + while ((srt= it++)) + { + writer->add_member("filesort").start_object(); + srt->print_json_members(writer, is_analyze); + writer->end_object(); // filesort + } + writer->end_object(); // sorts +} + + void Explain_basic_join::print_explain_json(Explain_query *query, Json_writer *writer, bool is_analyze) @@ -983,7 +975,7 @@ void Explain_basic_join::print_explain_json(Explain_query *query, writer->add_member("query_block").start_object(); writer->add_member("select_id").add_ll(select_id); - print_explain_json_interns(query, writer, is_analyze, NULL, false); + print_explain_json_interns(query, writer, is_analyze); writer->end_object(); } @@ -992,9 +984,7 @@ void Explain_basic_join::print_explain_json(Explain_query *query, void Explain_basic_join:: print_explain_json_interns(Explain_query *query, Json_writer *writer, - bool is_analyze, - Filesort_tracker *first_table_sort, - bool first_table_sort_used) + bool is_analyze) { Json_writer_nesting_guard guard(writer); for (uint i=0; i< n_join_tabs; i++) @@ -1002,12 +992,7 @@ print_explain_json_interns(Explain_query *query, if (join_tabs[i]->start_dups_weedout) writer->add_member("duplicates_removal").start_object(); - join_tabs[i]->print_explain_json(query, writer, is_analyze, - first_table_sort, - first_table_sort_used); - - first_table_sort= NULL; - first_table_sort_used= false; + join_tabs[i]->print_explain_json(query, writer, is_analyze); if (join_tabs[i]->end_dups_weedout) writer->end_object(); @@ -1299,7 +1284,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai extra_buf.append(STRING_WITH_LEN("Using temporary")); } - if (using_filesort) + if (using_filesort || this->pre_join_sort) { if (first) first= false; @@ -1359,6 +1344,15 @@ static void write_item(Json_writer *writer, Item *item) writer->add_str(str.c_ptr_safe()); } +static void append_item_to_str(String *out, Item *item) +{ + THD *thd= current_thd; + ulonglong save_option_bits= thd->variables.option_bits; + thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE; + + item->print(out, QT_EXPLAIN); + thd->variables.option_bits= save_option_bits; +} void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_tag tag) { @@ -1486,25 +1480,14 @@ void add_json_keyset(Json_writer *writer, const char *elem_name, print_json_array(writer, elem_name, *keyset); } -/* - @param fs_tracker Normally NULL. When not NULL, it means that the join tab - used filesort to pre-sort the data. Then, sorted data - was read and the rest of the join was executed. - - @note - EXPLAIN command will check whether fs_tracker is present, but it can't use - any value from fs_tracker (these are only valid for ANALYZE). -*/ void Explain_table_access::print_explain_json(Explain_query *query, Json_writer *writer, - bool is_analyze, - Filesort_tracker *fs_tracker, - bool first_table_sort_used) + bool is_analyze) { Json_writer_nesting_guard guard(writer); - if (first_table_sort_used) + if (pre_join_sort) { /* filesort was invoked on this join tab before doing the join with the rest */ writer->add_member("read_sorted_file").start_object(); @@ -1531,8 +1514,7 @@ void Explain_table_access::print_explain_json(Explain_query *query, } } writer->add_member("filesort").start_object(); - if (is_analyze) - fs_tracker->print_json_members(writer); + pre_join_sort->print_json_members(writer, is_analyze); } if (bka_type.is_using_jbuf()) @@ -1610,11 +1592,11 @@ void Explain_table_access::print_explain_json(Explain_query *query, if (is_analyze) { writer->add_member("r_rows"); - if (fs_tracker) + if (pre_join_sort) { /* Get r_rows value from filesort */ - if (fs_tracker->get_r_loops()) - writer->add_double(fs_tracker->get_avg_examined_rows()); + if (pre_join_sort->tracker.get_r_loops()) + writer->add_double(pre_join_sort->tracker.get_avg_examined_rows()); else writer->add_null(); } @@ -1641,11 +1623,11 @@ void Explain_table_access::print_explain_json(Explain_query *query, if (is_analyze) { writer->add_member("r_filtered"); - if (fs_tracker) + if (pre_join_sort) { /* Get r_filtered value from filesort */ - if (fs_tracker->get_r_loops()) - writer->add_double(fs_tracker->get_r_filtered()); + if (pre_join_sort->tracker.get_r_loops()) + writer->add_double(pre_join_sort->tracker.get_r_filtered()); else writer->add_null(); } @@ -1723,7 +1705,7 @@ void Explain_table_access::print_explain_json(Explain_query *query, writer->end_object(); } - if (first_table_sort_used) + if (pre_join_sort) { writer->end_object(); // filesort writer->end_object(); // read_sorted_file diff --git a/sql/sql_explain.h b/sql/sql_explain.h index 844773c4a47..abdb1bb978b 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -176,9 +176,7 @@ public: bool is_analyze); void print_explain_json_interns(Explain_query *query, Json_writer *writer, - bool is_analyze, - Filesort_tracker *first_table_sort, - bool first_table_sort_used); + bool is_analyze); /* A flat array of Explain structs for tables. */ Explain_table_access** join_tabs; @@ -186,6 +184,7 @@ public: }; +class Explain_aggr_node; /* EXPLAIN structure for a SELECT. @@ -212,15 +211,9 @@ public: having(NULL), having_value(Item::COND_UNDEF), using_temporary(false), using_filesort(false), time_tracker(is_analyze), - ops_tracker(is_analyze) + aggr_tree(NULL) {} - /* - This is used to save the results of "late" test_if_skip_sort_order() calls - that are made from JOIN::exec - */ - void replace_table(uint idx, Explain_table_access *new_tab); - public: const char *select_type; @@ -244,9 +237,13 @@ public: /* ANALYZE members */ Time_and_counter_tracker time_tracker; - - Sort_and_group_tracker ops_tracker; + /* + Part of query plan describing sorting, temp.table usage, and duplicate + removal + */ + Explain_aggr_node* aggr_tree; + int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); void print_explain_json(Explain_query *query, Json_writer *writer, @@ -260,6 +257,64 @@ private: Table_access_tracker using_temporary_read_tracker; }; +///////////////////////////////////////////////////////////////////////////// +// EXPLAIN structures for ORDER/GROUP operations. +///////////////////////////////////////////////////////////////////////////// +typedef enum +{ + AGGR_OP_TEMP_TABLE, + AGGR_OP_FILESORT, + //AGGR_OP_READ_SORTED_FILE, // need this? + AGGR_OP_REMOVE_DUPLICATES, + AGGR_OP_WINDOW_FUNCS + //AGGR_OP_JOIN // Need this? +} enum_explain_aggr_node_type; + + +class Explain_aggr_node : public Sql_alloc +{ +public: + virtual enum_explain_aggr_node_type get_type()= 0; + virtual ~Explain_aggr_node() {} + Explain_aggr_node *child; +}; + +class Explain_aggr_filesort : public Explain_aggr_node +{ + List sort_items; +public: + enum_explain_aggr_node_type get_type() { return AGGR_OP_FILESORT; } + Filesort_tracker tracker; + + Explain_aggr_filesort(MEM_ROOT *mem_root, bool is_analyze, + Filesort *filesort); + + void print_json_members(Json_writer *writer, bool is_analyze); +}; + +class Explain_aggr_tmp_table : public Explain_aggr_node +{ +public: + enum_explain_aggr_node_type get_type() { return AGGR_OP_TEMP_TABLE; } +}; + +class Explain_aggr_remove_dups : public Explain_aggr_node +{ +public: + enum_explain_aggr_node_type get_type() { return AGGR_OP_REMOVE_DUPLICATES; } +}; + +class Explain_aggr_window_funcs : public Explain_aggr_node +{ + List sorts; +public: + enum_explain_aggr_node_type get_type() { return AGGR_OP_WINDOW_FUNCS; } + + void print_json_members(Json_writer *writer, bool is_analyze); + friend class Window_funcs_computation; +}; + +///////////////////////////////////////////////////////////////////////////// /* Explain structure for a UNION. @@ -617,7 +672,8 @@ public: where_cond(NULL), cache_cond(NULL), pushed_index_cond(NULL), - sjm_nest(NULL) + sjm_nest(NULL), + pre_join_sort(NULL) {} ~Explain_table_access() { delete sjm_nest; } @@ -710,6 +766,12 @@ public: Item *pushed_index_cond; Explain_basic_join *sjm_nest; + + /* + This describes a possible filesort() call that is done before doing the + join operation. + */ + Explain_aggr_filesort *pre_join_sort; /* ANALYZE members */ @@ -723,9 +785,7 @@ public: uint select_id, const char *select_type, bool using_temporary, bool using_filesort); void print_explain_json(Explain_query *query, Json_writer *writer, - bool is_analyze, - Filesort_tracker *fs_tracker, - bool first_table_sort_used); + bool is_analyze); private: void append_tag_name(String *str, enum explain_extra_tag tag); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 9817b882bdd..65af14b62f6 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -258,7 +258,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, if (table_list->is_view()) unfix_fields(fields); - res= setup_fields(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0); + res= setup_fields(thd, Ref_ptr_array(), fields, MARK_COLUMNS_WRITE, 0, 0); /* Restore the current context. */ ctx_state.restore_state(context, table_list); @@ -346,7 +346,8 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, } /* Check the fields we are going to modify */ - if (setup_fields(thd, 0, update_fields, MARK_COLUMNS_WRITE, 0, 0)) + if (setup_fields(thd, Ref_ptr_array(), + update_fields, MARK_COLUMNS_WRITE, 0, 0)) return -1; if (insert_table_list->is_view() && @@ -771,7 +772,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); goto abort; } - if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0)) + if (setup_fields(thd, Ref_ptr_array(), *values, MARK_COLUMNS_READ, 0, 0)) goto abort; switch_to_nullable_trigger_fields(*values, table); } @@ -1466,7 +1467,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, table_list->next_local= 0; context->resolve_in_table_list_only(table_list); - res= (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0) || + res= (setup_fields(thd, Ref_ptr_array(), + *values, MARK_COLUMNS_READ, 0, 0) || check_insert_fields(thd, context->table_list, fields, *values, !insert_into_view, 0, &map)); @@ -1482,7 +1484,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, } if (!res) - res= setup_fields(thd, 0, update_values, MARK_COLUMNS_READ, 0, 0); + res= setup_fields(thd, Ref_ptr_array(), + update_values, MARK_COLUMNS_READ, 0, 0); if (!res && duplic == DUP_UPDATE) { @@ -2014,6 +2017,7 @@ public: mysql_cond_t cond, cond_client; volatile uint tables_in_use,stacked_inserts; volatile bool status; + bool retry; /** When the handler thread starts, it clones a metadata lock ticket which protects against GRL and ticket for the table to be inserted. @@ -2038,7 +2042,7 @@ public: Delayed_insert(SELECT_LEX *current_select) :locks_in_memory(0), table(0),tables_in_use(0),stacked_inserts(0), - status(0), handler_thread_initialized(FALSE), group_count(0) + status(0), retry(0), handler_thread_initialized(FALSE), group_count(0) { DBUG_ENTER("Delayed_insert constructor"); thd.security_ctx->user=(char*) delayed_user; @@ -2297,7 +2301,7 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request, } if (di->thd.killed) { - if (di->thd.is_error()) + if (di->thd.is_error() && ! di->retry) { /* Copy the error message. Note that we don't treat fatal @@ -2523,7 +2527,7 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) copy->vcol_set= copy->def_vcol_set; } copy->tmp_set.bitmap= 0; // To catch errors - bzero((char*) bitmap, share->column_bitmap_size + (share->vfields ? 3 : 2)); + bzero((char*) bitmap, share->column_bitmap_size * (share->vfields ? 3 : 2)); copy->read_set= ©->def_read_set; copy->write_set= ©->def_write_set; @@ -2532,7 +2536,6 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) /* Got fatal error */ error: tables_in_use--; - status=1; mysql_cond_signal(&cond); // Inform thread about abort DBUG_RETURN(0); } @@ -2774,13 +2777,20 @@ bool Delayed_insert::open_and_lock_table() /* Use special prelocking strategy to get ER_DELAYED_NOT_SUPPORTED error for tables with engines which don't support delayed inserts. + + We can't do auto-repair in insert delayed thread, as it would hang + when trying to an exclusive MDL_LOCK on the table during repair + as the connection thread has a SHARED_WRITE lock. */ if (!(table= open_n_lock_single_table(&thd, &table_list, TL_WRITE_DELAYED, - MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK, + MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK | + MYSQL_OPEN_IGNORE_REPAIR, &prelocking_strategy))) { - thd.fatal_error(); // Abort waiting inserts + /* If table was crashed, then upper level should retry open+repair */ + retry= table_list.crashed; + thd.fatal_error(); // Abort waiting inserts return TRUE; } @@ -3440,7 +3450,7 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) */ lex->current_select= &lex->select_lex; - res= (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0) || + res= (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, 0) || check_insert_fields(thd, table_list, *fields, values, !insert_into_view, 1, &map)); @@ -3493,7 +3503,7 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) table_list->next_name_resolution_table= ctx_state.get_first_name_resolution_table(); - res= res || setup_fields(thd, 0, *info.update_values, + res= res || setup_fields(thd, Ref_ptr_array(), *info.update_values, MARK_COLUMNS_READ, 0, 0); if (!res) { @@ -3622,7 +3632,7 @@ void select_insert::cleanup() select_insert::~select_insert() { DBUG_ENTER("~select_insert"); - if (table && table->created) + if (table && table->is_created()) { table->next_number_field=0; table->auto_increment_field_not_null= FALSE; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 65257c9b2ce..de345b4dd1c 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -748,6 +748,14 @@ void lex_start(THD *thd) lex->stmt_var_list.empty(); lex->proc_list.elements=0; + lex->save_group_list.empty(); + lex->save_order_list.empty(); + lex->win_ref= NULL; + lex->win_frame= NULL; + lex->frame_top_bound= NULL; + lex->frame_bottom_bound= NULL; + lex->win_spec= NULL; + lex->is_lex_started= TRUE; DBUG_VOID_RETURN; } @@ -2070,6 +2078,7 @@ void st_select_lex_unit::init_query() found_rows_for_union= 0; insert_table_with_stored_vcol= 0; derived= 0; + is_view= false; with_clause= 0; with_element= 0; columns_are_renamed= false; @@ -2103,8 +2112,7 @@ void st_select_lex::init_query() parent_lex->push_context(&context, parent_lex->thd->mem_root); cond_count= between_count= with_wild= 0; max_equal_elems= 0; - ref_pointer_array= 0; - ref_pointer_array_size= 0; + ref_pointer_array.reset(); select_n_where_fields= 0; select_n_reserved= 0; select_n_having_items= 0; @@ -2122,8 +2130,11 @@ void st_select_lex::init_query() prep_leaf_list_state= UNINIT; have_merged_subqueries= FALSE; bzero((char*) expr_cache_may_be_used, sizeof(expr_cache_may_be_used)); + select_list_tables= 0; m_non_agg_field_used= false; m_agg_func_used= false; + window_specs.empty(); + window_funcs.empty(); } void st_select_lex::init_select() @@ -2650,7 +2661,7 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) select_n_having_items + select_n_where_fields + order_group_num) * 5; - if (ref_pointer_array != NULL) + if (!ref_pointer_array.is_null()) { /* We need to take 'n_sum_items' into account when allocating the array, @@ -2659,17 +2670,24 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) In the usual case we can reuse the array from the prepare phase. If we need a bigger array, we must allocate a new one. */ - if (ref_pointer_array_size >= n_elems) - { - DBUG_PRINT("info", ("reusing old ref_array")); + if (ref_pointer_array.size() == n_elems) return false; - } - } - ref_pointer_array= static_cast(arena->alloc(sizeof(Item*) * n_elems)); - if (ref_pointer_array != NULL) - ref_pointer_array_size= n_elems; - return ref_pointer_array == NULL; + /* + We need to take 'n_sum_items' into account when allocating the array, + and this may actually increase during the optimization phase due to + MIN/MAX rewrite in Item_in_subselect::single_value_transformer. + In the usual case we can reuse the array from the prepare phase. + If we need a bigger array, we must allocate a new one. + */ + if (ref_pointer_array.size() == n_elems) + return false; + } + Item **array= static_cast(arena->alloc(sizeof(Item*) * n_elems)); + if (array != NULL) + ref_pointer_array= Ref_ptr_array(array, n_elems); + + return array == NULL; } @@ -2734,8 +2752,8 @@ void st_select_lex::print_order(String *str, else (*order->item)->print(str, query_type); } - if (!order->asc) - str->append(STRING_WITH_LEN(" desc")); + if (order->direction == ORDER::ORDER_DESC) + str->append(STRING_WITH_LEN(" desc")); if (order->next) str->append(','); } @@ -4177,9 +4195,11 @@ void SELECT_LEX::update_used_tables() Item *item; List_iterator_fast it(join->fields_list); + select_list_tables= 0; while ((item= it++)) { item->update_used_tables(); + select_list_tables|= item->used_tables(); } Item_outer_ref *ref; List_iterator_fast ref_it(inner_refs_list); @@ -4229,6 +4249,8 @@ void st_select_lex::update_correlated_cache() if (join->conds) is_correlated|= MY_TEST(join->conds->used_tables() & OUTER_REF_TABLE_BIT); + is_correlated|= join->having_is_correlated; + if (join->having) is_correlated|= MY_TEST(join->having->used_tables() & OUTER_REF_TABLE_BIT); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index c64ed6b8d5c..10247bd33a2 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -28,6 +28,7 @@ #include "mem_root_array.h" #include "sql_cmd.h" #include "sql_alter.h" // Alter_info +#include "sql_window.h" /* YACC and LEX Definitions */ @@ -47,6 +48,7 @@ class sys_var; class Item_func_match; class File_parser; class Key_part_spec; +class Item_window_func; struct sql_digest_state; class With_clause; @@ -643,6 +645,7 @@ public: derived tables/views handling. */ TABLE_LIST *derived; + bool is_view; /* With clause attached to this unit (if any) */ With_clause *with_clause; /* With element where this unit is used as the specification (if any) */ @@ -721,6 +724,7 @@ public: }; typedef class st_select_lex_unit SELECT_LEX_UNIT; +typedef Bounds_checked_array Ref_ptr_array; /* SELECT_LEX - store information of parsed SELECT statment @@ -799,9 +803,9 @@ public: SQL_I_List order_list; /* ORDER clause */ SQL_I_List gorder_list; Item *select_limit, *offset_limit; /* LIMIT clause parameters */ - // Arrays of pointers to top elements of all_fields list - Item **ref_pointer_array; - size_t ref_pointer_array_size; // Number of elements in array. + + /// Array of pointers to top elements of all_fields list + Ref_ptr_array ref_pointer_array; /* number of items in select_list and HAVING clause used to get number @@ -898,6 +902,12 @@ public: */ List *prev_join_using; + /** + The set of those tables whose fields are referenced in the select list of + this select level. + */ + table_map select_list_tables; + /* namp of nesting SELECT visibility (for aggregate functions check) */ nesting_map name_visibility_map; @@ -1088,6 +1098,24 @@ public: } With_element *find_table_def_in_with_clauses(TABLE_LIST *table); + List window_specs; + void prepare_add_window_spec(THD *thd); + bool add_window_def(THD *thd, LEX_STRING *win_name, LEX_STRING *win_ref, + SQL_I_List win_partition_list, + SQL_I_List win_order_list, + Window_frame *win_frame); + bool add_window_spec(THD *thd, LEX_STRING *win_ref, + SQL_I_List win_partition_list, + SQL_I_List win_order_list, + Window_frame *win_frame); + List window_funcs; + bool add_window_func(Item_window_func *win_func) + { + return window_funcs.push_back(win_func); + } + + bool have_window_funcs() const { return (window_funcs.elements !=0); } + private: bool m_non_agg_field_used; bool m_agg_func_used; @@ -2756,6 +2784,14 @@ public: } + SQL_I_List save_group_list; + SQL_I_List save_order_list; + LEX_STRING *win_ref; + Window_frame *win_frame; + Window_frame_bound *frame_top_bound; + Window_frame_bound *frame_bottom_bound; + Window_spec *win_spec; + inline void free_set_stmt_mem_root() { DBUG_ASSERT(!is_arena_for_set_stmt()); diff --git a/sql/sql_list.h b/sql/sql_list.h index 113af35bad7..94e97e55aa7 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -451,6 +451,11 @@ public: el= ¤t->next; return current->info; } + /* Get what calling next() would return, without moving the iterator */ + inline void *peek() + { + return (*el)->info; + } inline void *next_fast(void) { list_node *tmp; @@ -503,6 +508,10 @@ public: { return el == &list->last_ref()->next; } + inline bool at_end() + { + return current == &end_of_list; + } friend class error_list_iterator; }; @@ -550,6 +559,7 @@ public: List_iterator() : base_list_iterator() {} inline void init(List &a) { base_list_iterator::init(a); } inline T* operator++(int) { return (T*) base_list_iterator::next(); } + inline T* peek() { return (T*) base_list_iterator::peek(); } inline T *replace(T *a) { return (T*) base_list_iterator::replace(a); } inline T *replace(List &a) { return (T*) base_list_iterator::replace(a); } inline void rewind(void) { base_list_iterator::rewind(); } @@ -607,7 +617,7 @@ inline void bubble_sort(List *list_to_sort, swap= FALSE; while ((item2= it++) && (ref2= it.ref()) != last_ref) { - if (sort_func(item1, item2, arg) < 0) + if (sort_func(item1, item2, arg) > 0) { *ref1= item2; *ref2= item1; diff --git a/sql/sql_load.cc b/sql/sql_load.cc index c70e545675d..a4044dd0d59 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -61,6 +61,39 @@ XML_TAG::XML_TAG(int l, String f, String v) } +/* + Field and line terminators must be interpreted as sequence of unsigned char. + Otherwise, non-ascii terminators will be negative on some platforms, + and positive on others (depending on the implementation of char). +*/ +class Term_string +{ + const uchar *m_ptr; + uint m_length; + int m_initial_byte; +public: + Term_string(const String &str) : + m_ptr(static_cast(static_cast(str.ptr()))), + m_length(str.length()), + m_initial_byte((uchar) (str.length() ? str.ptr()[0] : INT_MAX)) + { } + void set(const uchar *str, uint length, int initial_byte) + { + m_ptr= str; + m_length= length; + m_initial_byte= initial_byte; + } + void reset() { set(NULL, 0, INT_MAX); } + const uchar *ptr() const { return m_ptr; } + uint length() const { return m_length; } + int initial_byte() const { return m_initial_byte; } + bool eq(const Term_string &other) const + { + return length() == other.length() && !memcmp(ptr(), other.ptr(), length()); + } +}; + + #define GET (stack_pos != stack ? *--stack_pos : my_b_get(&cache)) #define PUSH(A) *(stack_pos++)=(A) @@ -69,10 +102,10 @@ class READ_INFO { String data; /* Read buffer */ uint fixed_length; /* Length of the fixed length record */ uint max_length; /* Max length of row */ - const uchar *field_term_ptr,*line_term_ptr; - const char *line_start_ptr,*line_start_end; - uint field_term_length,line_term_length,enclosed_length; - int field_term_char,line_term_char,enclosed_char,escape_char; + Term_string m_field_term; /* FIELDS TERMINATED BY 'string' */ + Term_string m_line_term; /* LINES TERMINATED BY 'string' */ + Term_string m_line_start; /* LINES STARTING BY 'string' */ + int enclosed_char,escape_char; int *stack,*stack_pos; bool found_end_of_line,start_of_line,eof; NET *io_net; @@ -86,6 +119,70 @@ class READ_INFO { *to= chr; return false; } + + /** + Read a tail of a multi-byte character. + The first byte of the character is assumed to be already + read from the file and appended to "str". + + @returns true - if EOF happened unexpectedly + @returns false - no EOF happened: found a good multi-byte character, + or a bad byte sequence + + Note: + The return value depends only on EOF: + - read_mbtail() returns "false" is a good character was read, but also + - read_mbtail() returns "false" if an incomplete byte sequence was found + and no EOF happened. + + For example, suppose we have an ujis file with bytes 0x8FA10A, where: + - 0x8FA1 is an incomplete prefix of a 3-byte character + (it should be [8F][A1-FE][A1-FE] to make a full 3-byte character) + - 0x0A is a line demiliter + This file has some broken data, the trailing [A1-FE] is missing. + + In this example it works as follows: + - 0x8F is read from the file and put into "data" before the call + for read_mbtail() + - 0xA1 is read from the file and put into "data" by read_mbtail() + - 0x0A is kept in the read queue, so the next read iteration after + the current read_mbtail() call will normally find it and recognize as + a line delimiter + - the current call for read_mbtail() returns "false", + because no EOF happened + */ + bool read_mbtail(String *str) + { + int chlen; + if ((chlen= my_charlen(read_charset, str->end() - 1, str->end())) == 1) + return false; // Single byte character found + for (uint32 length0= str->length() - 1 ; MY_CS_IS_TOOSMALL(chlen); ) + { + int chr= GET; + if (chr == my_b_EOF) + { + DBUG_PRINT("info", ("read_mbtail: chlen=%d; unexpected EOF", chlen)); + return true; // EOF + } + str->append(chr); + chlen= my_charlen(read_charset, str->ptr() + length0, str->end()); + if (chlen == MY_CS_ILSEQ) + { + /** + It has been an incomplete (but a valid) sequence so far, + but the last byte turned it into a bad byte sequence. + Unget the very last byte. + */ + str->length(str->length() - 1); + PUSH(chr); + DBUG_PRINT("info", ("read_mbtail: ILSEQ")); + return false; // Bad byte sequence + } + } + DBUG_PRINT("info", ("read_mbtail: chlen=%d", chlen)); + return false; // Good multi-byte character + } + public: bool error,line_cuted,found_null,enclosed; uchar *row_start, /* Found row starts here */ @@ -101,7 +198,11 @@ public: int read_fixed_length(void); int next_line(void); char unescape(char chr); - int terminator(const uchar *ptr, uint length); + bool terminator(const uchar *ptr, uint length); + bool terminator(const Term_string &str) + { return terminator(str.ptr(), str.length()); } + bool terminator(int chr, const Term_string &str) + { return str.initial_byte() == chr && terminator(str); } bool find_start_of_fields(); /* load xml */ List taglist; @@ -284,22 +385,25 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, Let us also prepare SET clause, altough it is probably empty in this case. */ - if (setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, 0) || - setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, 0)) + if (setup_fields(thd, Ref_ptr_array(), + set_fields, MARK_COLUMNS_WRITE, 0, 0) || + setup_fields(thd, Ref_ptr_array(), set_values, MARK_COLUMNS_READ, 0, 0)) DBUG_RETURN(TRUE); } else { // Part field list /* TODO: use this conds for 'WITH CHECK OPTIONS' */ - if (setup_fields(thd, 0, fields_vars, MARK_COLUMNS_WRITE, 0, 0) || - setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, 0) || + if (setup_fields(thd, Ref_ptr_array(), + fields_vars, MARK_COLUMNS_WRITE, 0, 0) || + setup_fields(thd, Ref_ptr_array(), + set_fields, MARK_COLUMNS_WRITE, 0, 0) || check_that_all_fields_are_given_values(thd, table, table_list)) DBUG_RETURN(TRUE); /* Add all fields with default functions to table->write_set. */ if (table->default_field) table->mark_default_fields_for_write(); /* Fix the expressions in SET clause */ - if (setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, 0)) + if (setup_fields(thd, Ref_ptr_array(), set_values, MARK_COLUMNS_READ, 0, 0)) DBUG_RETURN(TRUE); } switch_to_nullable_trigger_fields(fields_vars, table); @@ -1348,8 +1452,9 @@ READ_INFO::READ_INFO(THD *thd, File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par), fixed_length(tot_length), escape_char(escape), - found_end_of_line(false), eof(false), + :file(file_par), fixed_length(tot_length), + m_field_term(field_term), m_line_term(line_term), m_line_start(line_start), + escape_char(escape), found_end_of_line(false), eof(false), error(false), line_cuted(false), found_null(false), read_charset(cs) { data.set_thread_specific(); @@ -1358,39 +1463,17 @@ READ_INFO::READ_INFO(THD *thd, File file_par, uint tot_length, CHARSET_INFO *cs, Otherwise, non-ascii terminators will be negative on some platforms, and positive on others (depending on the implementation of char). */ - field_term_ptr= - static_cast(static_cast(field_term.ptr())); - field_term_length= field_term.length(); - line_term_ptr= - static_cast(static_cast(line_term.ptr())); - line_term_length= line_term.length(); level= 0; /* for load xml */ - if (line_start.length() == 0) - { - line_start_ptr=0; - start_of_line= 0; - } - else - { - line_start_ptr= line_start.ptr(); - line_start_end=line_start_ptr+line_start.length(); - start_of_line= 1; - } + start_of_line= line_start.length() != 0; /* If field_terminator == line_terminator, don't use line_terminator */ - if (field_term_length == line_term_length && - !memcmp(field_term_ptr,line_term_ptr,field_term_length)) - { - line_term_length=0; - line_term_ptr= NULL; - } - enclosed_char= (enclosed_length=enclosed_par.length()) ? - (uchar) enclosed_par[0] : INT_MAX; - field_term_char= field_term_length ? field_term_ptr[0] : INT_MAX; - line_term_char= line_term_length ? line_term_ptr[0] : INT_MAX; + if (m_field_term.eq(m_line_term)) + m_line_term.reset(); + enclosed_char= enclosed_par.length() ? (uchar) enclosed_par[0] : INT_MAX; /* Set of a stack for unget if long terminators */ - uint length= MY_MAX(cs->mbmaxlen, MY_MAX(field_term_length, line_term_length)) + 1; + uint length= MY_MAX(cs->mbmaxlen, MY_MAX(m_field_term.length(), + m_line_term.length())) + 1; set_if_bigger(length,line_start.length()); stack= stack_pos= (int*) thd->alloc(sizeof(int) * length); @@ -1432,7 +1515,7 @@ READ_INFO::~READ_INFO() } -inline int READ_INFO::terminator(const uchar *ptr,uint length) +inline bool READ_INFO::terminator(const uchar *ptr, uint length) { int chr=0; // Keep gcc happy uint i; @@ -1444,11 +1527,11 @@ inline int READ_INFO::terminator(const uchar *ptr,uint length) } } if (i == length) - return 1; + return true; PUSH(chr); while (i-- > 1) PUSH(*--ptr); - return 0; + return false; } @@ -1516,12 +1599,12 @@ int READ_INFO::read_field() chr= escape_char; } #ifdef ALLOW_LINESEPARATOR_IN_STRINGS - if (chr == line_term_char) + if (chr == m_line_term.initial_byte()) #else - if (chr == line_term_char && found_enclosed_char == INT_MAX) + if (chr == m_line_term.initial_byte() && found_enclosed_char == INT_MAX) #endif { - if (terminator(line_term_ptr,line_term_length)) + if (terminator(m_line_term)) { // Maybe unexpected linefeed enclosed=0; found_end_of_line=1; @@ -1538,9 +1621,7 @@ int READ_INFO::read_field() continue; } // End of enclosed field if followed by field_term or line_term - if (chr == my_b_EOF || - (chr == line_term_char && terminator(line_term_ptr, - line_term_length))) + if (chr == my_b_EOF || terminator(chr, m_line_term)) { /* Maybe unexpected linefeed */ enclosed=1; @@ -1549,8 +1630,7 @@ int READ_INFO::read_field() row_end= (uchar *) data.end(); return 0; } - if (chr == field_term_char && - terminator(field_term_ptr,field_term_length)) + if (terminator(chr, m_field_term)) { enclosed=1; row_start= (uchar *) data.ptr() + 1; @@ -1565,9 +1645,10 @@ int READ_INFO::read_field() /* copy the found term character to 'to' */ chr= found_enclosed_char; } - else if (chr == field_term_char && found_enclosed_char == INT_MAX) + else if (chr == m_field_term.initial_byte() && + found_enclosed_char == INT_MAX) { - if (terminator(field_term_ptr,field_term_length)) + if (terminator(m_field_term)) { enclosed=0; row_start= (uchar *) data.ptr(); @@ -1575,38 +1656,9 @@ int READ_INFO::read_field() return 0; } } -#ifdef USE_MB - if (my_mbcharlen(read_charset, chr) > 1) - { - uint32 length0= data.length(); - int ml= my_mbcharlen(read_charset, chr); - data.append(chr); - - for (int i= 1; i < ml; i++) - { - chr= GET; - if (chr == my_b_EOF) - { - /* - Need to back up the bytes already ready from illformed - multi-byte char - */ - data.length(length0); - goto found_eof; - } - data.append(chr); - } - if (my_ismbchar(read_charset, - (const char *) data.ptr() + length0, - (const char *) data.end())) - continue; - for (int i= 0; i < ml; i++) - PUSH(data.end()[-1 - i]); - data.length(length0); - chr= GET; - } -#endif data.append(chr); + if (use_mb(read_charset) && read_mbtail(&data)) + goto found_eof; } /* ** We come here if buffer is too small. Enlarge it and continue @@ -1665,13 +1717,10 @@ int READ_INFO::read_fixed_length() data.append((uchar) unescape((char) chr)); continue; } - if (chr == line_term_char) - { - if (terminator(line_term_ptr,line_term_length)) - { // Maybe unexpected linefeed - found_end_of_line=1; - break; - } + if (terminator(chr, m_line_term)) + { // Maybe unexpected linefeed + found_end_of_line= true; + break; } data.append(chr); } @@ -1690,14 +1739,14 @@ found_eof: int READ_INFO::next_line() { line_cuted=0; - start_of_line= line_start_ptr != 0; + start_of_line= m_line_start.length() != 0; if (found_end_of_line || eof) { found_end_of_line=0; return eof; } found_end_of_line=0; - if (!line_term_length) + if (!m_line_term.length()) return 0; // No lines for (;;) { @@ -1725,10 +1774,11 @@ int READ_INFO::next_line() or a broken byte sequence was found. Check if the sequence is a prefix of the "LINES TERMINATED BY" string. */ - if ((uchar) buf[0] == line_term_char && i <= line_term_length && - !memcmp(buf, line_term_ptr, i)) + if ((uchar) buf[0] == m_line_term.initial_byte() && + i <= m_line_term.length() && + !memcmp(buf, m_line_term.ptr(), i)) { - if (line_term_length == i) + if (m_line_term.length() == i) { /* We found a "LINES TERMINATED BY" string that consists @@ -1742,10 +1792,11 @@ int READ_INFO::next_line() that still needs to be checked is (line_term_length - i). Note, READ_INFO::terminator() assumes that the leftmost byte of the argument is already scanned from the file and is checked to - be a known prefix (e.g. against line_term_char). + be a known prefix (e.g. against line_term.initial_char()). So we need to pass one extra byte. */ - if (terminator(line_term_ptr + i - 1, line_term_length - i + 1)) + if (terminator(m_line_term.ptr() + i - 1, + m_line_term.length() - i + 1)) return 0; } /* @@ -1768,7 +1819,7 @@ int READ_INFO::next_line() return 1; continue; } - if (buf[0] == line_term_char && terminator(line_term_ptr,line_term_length)) + if (terminator(buf[0], m_line_term)) return 0; line_cuted= true; } @@ -1777,30 +1828,12 @@ int READ_INFO::next_line() bool READ_INFO::find_start_of_fields() { - int chr; - try_again: - do - { - if ((chr=GET) == my_b_EOF) - { - found_end_of_line=eof=1; - return 1; - } - } while ((char) chr != line_start_ptr[0]); - for (const char *ptr=line_start_ptr+1 ; ptr != line_start_end ; ptr++) + for (int chr= GET ; chr != my_b_EOF ; chr= GET) { - chr=GET; // Eof will be checked later - if ((char) chr != *ptr) - { // Can't be line_start - PUSH(chr); - while (--ptr != line_start_ptr) - { // Restart with next char - PUSH( *ptr); - } - goto try_again; - } + if (terminator(chr, m_line_start)) + return false; } - return 0; + return (found_end_of_line= eof= true); } @@ -1881,26 +1914,8 @@ int READ_INFO::read_value(int delim, String *val) int chr; String tmp; - for (chr= GET; my_tospace(chr) != delim && chr != my_b_EOF;) + for (chr= GET; my_tospace(chr) != delim && chr != my_b_EOF; chr= GET) { -#ifdef USE_MB - if (my_mbcharlen(read_charset, chr) > 1) - { - DBUG_PRINT("read_xml",("multi byte")); - int i, ml= my_mbcharlen(read_charset, chr); - for (i= 1; i < ml; i++) - { - val->append(chr); - /* - Don't use my_tospace() in the middle of a multi-byte character - TODO: check that the multi-byte sequence is valid. - */ - chr= GET; - if (chr == my_b_EOF) - return chr; - } - } -#endif if(chr == '&') { tmp.length(0); @@ -1920,8 +1935,11 @@ int READ_INFO::read_value(int delim, String *val) } } else + { val->append(chr); - chr= GET; + if (use_mb(read_charset) && read_mbtail(val)) + return my_b_EOF; + } } return my_tospace(chr); } @@ -1990,11 +2008,11 @@ int READ_INFO::read_xml(THD *thd) } // row tag should be in ROWS IDENTIFIED BY '' - stored in line_term - if((tag.length() == line_term_length -2) && - (memcmp(tag.ptr(), line_term_ptr + 1, tag.length()) == 0)) + if((tag.length() == m_line_term.length() - 2) && + (memcmp(tag.ptr(), m_line_term.ptr() + 1, tag.length()) == 0)) { DBUG_PRINT("read_xml", ("start-of-row: %i %s %s", - level,tag.c_ptr_safe(), line_term_ptr)); + level,tag.c_ptr_safe(), m_line_term.ptr())); } if(chr == ' ' || chr == '>') @@ -2061,8 +2079,8 @@ int READ_INFO::read_xml(THD *thd) chr= my_tospace(GET); } - if((tag.length() == line_term_length -2) && - (memcmp(tag.ptr(), line_term_ptr + 1, tag.length()) == 0)) + if((tag.length() == m_line_term.length() - 2) && + (memcmp(tag.ptr(), m_line_term.ptr() + 1, tag.length()) == 0)) { DBUG_PRINT("read_xml", ("found end-of-row %i %s", level, tag.c_ptr_safe())); diff --git a/sql/sql_locale.h b/sql/sql_locale.h index 8357a9ecba4..e231393eec6 100644 --- a/sql/sql_locale.h +++ b/sql/sql_locale.h @@ -19,7 +19,7 @@ typedef struct my_locale_errmsgs { const char *language; - const char **errmsgs; + const char ***errmsgs; } MY_LOCALE_ERRMSGS; #include "my_global.h" /* uint */ diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7c50e4ed680..a6bb89f05df 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1122,7 +1122,6 @@ static enum enum_server_command fetch_command(THD *thd, char *packet) { enum enum_server_command command= (enum enum_server_command) (uchar) packet[0]; - NET *net= &thd->net; DBUG_ENTER("fetch_command"); if (command >= COM_END || @@ -1130,7 +1129,7 @@ static enum enum_server_command fetch_command(THD *thd, char *packet) command= COM_END; // Wrong command DBUG_PRINT("info",("Command on %s = %d (%s)", - vio_description(net->vio), command, + vio_description(thd->net.vio), command, command_name[command].str)); DBUG_RETURN(command); } @@ -4521,7 +4520,7 @@ end_with_restore_list: lex->table_count); if (result) { - res= mysql_select(thd, &select_lex->ref_pointer_array, + res= mysql_select(thd, select_lex->get_table_list(), select_lex->with_wild, select_lex->item_list, @@ -5309,6 +5308,7 @@ end_with_restore_list: } case SQLCOM_SHUTDOWN: #ifndef EMBEDDED_LIBRARY + DBUG_EXECUTE_IF("crash_shutdown", DBUG_SUICIDE();); if (check_global_access(thd,SHUTDOWN_ACL)) goto error; kill_mysql(thd); @@ -7789,7 +7789,6 @@ add_proc_to_list(THD* thd, Item *item) item_ptr = (Item**) (order+1); *item_ptr= item; order->item=item_ptr; - order->free_me=0; thd->lex->proc_list.link_in_list(order, &order->next); return 0; } @@ -7807,8 +7806,7 @@ bool add_to_list(THD *thd, SQL_I_List &list, Item *item,bool asc) DBUG_RETURN(1); order->item_ptr= item; order->item= &order->item_ptr; - order->asc = asc; - order->free_me=0; + order->direction= (asc ? ORDER::ORDER_ASC : ORDER::ORDER_DESC); order->used=0; order->counter_used= 0; order->fast_field_copier_setup= 0; @@ -8225,6 +8223,65 @@ TABLE_LIST *st_select_lex::convert_right_join() DBUG_RETURN(tab1); } + +void st_select_lex::prepare_add_window_spec(THD *thd) +{ + LEX *lex= thd->lex; + lex->save_group_list= group_list; + lex->save_order_list= order_list; + lex->win_ref= NULL; + lex->win_frame= NULL; + lex->frame_top_bound= NULL; + lex->frame_bottom_bound= NULL; + group_list.empty(); + order_list.empty(); +} + +bool st_select_lex::add_window_def(THD *thd, + LEX_STRING *win_name, + LEX_STRING *win_ref, + SQL_I_List win_partition_list, + SQL_I_List win_order_list, + Window_frame *win_frame) +{ + SQL_I_List *win_part_list_ptr= + new (thd->mem_root) SQL_I_List (win_partition_list); + SQL_I_List *win_order_list_ptr= + new (thd->mem_root) SQL_I_List (win_order_list); + if (!(win_part_list_ptr && win_order_list_ptr)) + return true; + Window_def *win_def= new (thd->mem_root) Window_def(win_name, + win_ref, + win_part_list_ptr, + win_order_list_ptr, + win_frame); + group_list= thd->lex->save_group_list; + order_list= thd->lex->save_order_list; + return (win_def == NULL || window_specs.push_back(win_def)); +} + +bool st_select_lex::add_window_spec(THD *thd, + LEX_STRING *win_ref, + SQL_I_List win_partition_list, + SQL_I_List win_order_list, + Window_frame *win_frame) +{ + SQL_I_List *win_part_list_ptr= + new (thd->mem_root) SQL_I_List (win_partition_list); + SQL_I_List *win_order_list_ptr= + new (thd->mem_root) SQL_I_List (win_order_list); + if (!(win_part_list_ptr && win_order_list_ptr)) + return true; + Window_spec *win_spec= new (thd->mem_root) Window_spec(win_ref, + win_part_list_ptr, + win_order_list_ptr, + win_frame); + group_list= thd->lex->save_group_list; + order_list= thd->lex->save_order_list; + thd->lex->win_spec= win_spec; + return (win_spec == NULL || window_specs.push_back(win_spec)); +} + /** Set lock for all tables in current select level. diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index dbe19674cf2..f540c268923 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -1544,22 +1544,26 @@ int plugin_init(int *argc, char **argv, int flags) init_alloc_root(&plugin_vars_mem_root, 4096, 4096, MYF(0)); init_alloc_root(&tmp_root, 4096, 4096, MYF(0)); - if (my_hash_init(&bookmark_hash, &my_charset_bin, 16, 0, 0, + if (my_hash_init(&bookmark_hash, &my_charset_bin, 32, 0, 0, get_bookmark_hash_key, NULL, HASH_UNIQUE)) goto err; mysql_mutex_init(key_LOCK_plugin, &LOCK_plugin, MY_MUTEX_INIT_FAST); + /* + The 80 is from 2016-04-27 when we had 71 default plugins + Big enough to avoid many mallocs even in future + */ if (my_init_dynamic_array(&plugin_dl_array, sizeof(struct st_plugin_dl *), 16, 16, MYF(0)) || my_init_dynamic_array(&plugin_array, - sizeof(struct st_plugin_int *), 16, 16, MYF(0))) + sizeof(struct st_plugin_int *), 80, 32, MYF(0))) goto err; for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++) { - if (my_hash_init(&plugin_hash[i], system_charset_info, 16, 0, 0, + if (my_hash_init(&plugin_hash[i], system_charset_info, 32, 0, 0, get_plugin_hash_key, NULL, HASH_UNIQUE)) goto err; } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 8e5ab71288d..2d6a7302afc 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1316,7 +1316,7 @@ static bool mysql_test_insert(Prepared_statement *stmt, my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); goto error; } - if (setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0)) + if (setup_fields(thd, Ref_ptr_array(), *values, MARK_COLUMNS_NONE, 0, 0)) goto error; } } @@ -1406,7 +1406,8 @@ static int mysql_test_update(Prepared_statement *stmt, table_list->register_want_access(want_privilege); #endif thd->lex->select_lex.no_wrap_view_item= TRUE; - res= setup_fields(thd, 0, select->item_list, MARK_COLUMNS_READ, 0, 0); + res= setup_fields(thd, Ref_ptr_array(), + select->item_list, MARK_COLUMNS_READ, 0, 0); thd->lex->select_lex.no_wrap_view_item= FALSE; if (res) goto error; @@ -1417,7 +1418,8 @@ static int mysql_test_update(Prepared_statement *stmt, (SELECT_ACL & ~table_list->table->grant.privilege); table_list->register_want_access(SELECT_ACL); #endif - if (setup_fields(thd, 0, stmt->lex->value_list, MARK_COLUMNS_NONE, 0, 0) || + if (setup_fields(thd, Ref_ptr_array(), + stmt->lex->value_list, MARK_COLUMNS_NONE, 0, 0) || check_unique_table(thd, table_list)) goto error; /* TODO: here we should send types of placeholders to the client. */ @@ -1463,7 +1465,7 @@ static bool mysql_test_delete(Prepared_statement *stmt, my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE"); goto error; } - if (!table_list->table || !table_list->table->created) + if (!table_list->table || !table_list->table->is_created()) { my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), table_list->view_db.str, table_list->view_name.str); @@ -1589,7 +1591,8 @@ static bool mysql_test_do_fields(Prepared_statement *stmt, if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL, DT_PREPARE | DT_CREATE)) DBUG_RETURN(TRUE); - DBUG_RETURN(setup_fields(thd, 0, *values, MARK_COLUMNS_NONE, 0, 0)); + DBUG_RETURN(setup_fields(thd, Ref_ptr_array(), + *values, MARK_COLUMNS_NONE, 0, 0)); } diff --git a/sql/sql_priv.h b/sql/sql_priv.h index cc56daacf3e..b15a80a889a 100644 --- a/sql/sql_priv.h +++ b/sql/sql_priv.h @@ -344,6 +344,7 @@ enum enum_parsing_place IN_WHERE, IN_ON, IN_GROUP_BY, + IN_ORDER_BY, PARSING_PLACE_SIZE /* always should be the last */ }; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index c9e2b3a586d..36f0cd84cbf 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -182,6 +182,7 @@ struct binlog_send_info { { error_text[0] = 0; bzero(&error_gtid, sizeof(error_gtid)); + until_binlog_state.init(); } }; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 3134f8eb007..6c4d2e1fc9c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -54,6 +54,7 @@ #include "sql_derived.h" #include "sql_statistics.h" #include "sql_cte.h" +#include "sql_window.h" #include "debug_sync.h" // DEBUG_SYNC #include @@ -117,7 +118,6 @@ static int join_tab_cmp_straight(const void *dummy, const void* ptr1, const void static int join_tab_cmp_embedded_first(const void *emb, const void* ptr1, const void *ptr2); C_MODE_END static uint cache_record_length(JOIN *join,uint index); -bool get_best_combination(JOIN *join); static store_key *get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, KEY_PART_INFO *key_part, uchar *key_buff, @@ -164,8 +164,11 @@ static COND *optimize_cond(JOIN *join, COND *conds, COND_EQUAL **cond_equal, int flags= 0); bool const_expression_in_where(COND *conds,Item *item, Item **comp_item); -static int do_select(JOIN *join,List *fields,TABLE *tmp_table, - Procedure *proc); +static int do_select(JOIN *join, Procedure *procedure); +static bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, + MARIA_COLUMNDEF *start_recinfo, + MARIA_COLUMNDEF **recinfo, + ulonglong options); static enum_nested_loop_state evaluate_join_record(JOIN *, JOIN_TAB *, int); static enum_nested_loop_state @@ -179,7 +182,6 @@ end_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); static enum_nested_loop_state end_unique_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); -static int test_if_group_changed(List &list); static int join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos); static int join_read_system(JOIN_TAB *tab); static int join_read_const(JOIN_TAB *tab); @@ -232,11 +234,7 @@ static bool list_contains_unique_index(TABLE *table, bool (*find_func) (Field *, void *), void *data); static bool find_field_in_item_list (Field *field, void *data); static bool find_field_in_order_list (Field *field, void *data); -static int create_sort_index(THD *thd, JOIN *join, ORDER *order, - ha_rows filesort_limit, ha_rows select_limit, - bool is_order_by); -static int remove_duplicates(JOIN *join,TABLE *entry,List &fields, - Item *having); +int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, Item *having); static int remove_dup_with_hash_index(THD *thd,TABLE *table, @@ -245,7 +243,7 @@ static int remove_dup_with_hash_index(THD *thd,TABLE *table, static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref); static bool setup_new_fields(THD *thd, List &fields, List &all_fields, ORDER *new_order); -static ORDER *create_distinct_group(THD *thd, Item **ref_pointer_array, +static ORDER *create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array, ORDER *order, List &fields, List &all_fields, bool *all_order_by_fields_used); @@ -256,12 +254,12 @@ static void calc_group_buffer(JOIN *join,ORDER *group); static bool make_group_fields(JOIN *main_join, JOIN *curr_join); static bool alloc_group_fields(JOIN *join,ORDER *group); // Create list for using with tempory table -static bool change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, +static bool change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array, List &new_list1, List &new_list2, uint elements, List &items); // Create list for using with tempory table -static bool change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array, +static bool change_refs_to_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array, List &new_list1, List &new_list2, uint elements, List &items); @@ -288,7 +286,7 @@ static JOIN_TAB *next_breadth_first_tab(JOIN_TAB *first_top_tab, uint n_top_tabs_count, JOIN_TAB *tab); static double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s, table_map rem_tables); - +void set_postjoin_aggr_write_func(JOIN_TAB *tab); #ifndef DBUG_OFF /* @@ -364,7 +362,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, every PS/SP execution new, we will not need reset this flag if setup_tables_done_option changed for next rexecution */ - res= mysql_select(thd, &select_lex->ref_pointer_array, + res= mysql_select(thd, select_lex->table_list.first, select_lex->with_wild, select_lex->item_list, select_lex->where, @@ -455,7 +453,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, bool fix_inner_refs(THD *thd, List &all_fields, SELECT_LEX *select, - Item **ref_pointer_array) + Ref_ptr_array ref_pointer_array) { Item_outer_ref *ref; @@ -484,10 +482,9 @@ fix_inner_refs(THD *thd, List &all_fields, SELECT_LEX *select, existing one. The change will lead to less operations for copying fields, smaller temporary tables and less data passed through filesort. */ - if (ref_pointer_array && !ref->found_in_select_list) + if (!ref_pointer_array.is_null() && !ref->found_in_select_list) { int el= all_fields.elements; - DBUG_ASSERT(all_fields.elements <= select->ref_pointer_array_size); ref_pointer_array[el]= item; /* Add the field item to the select list of the current select. */ all_fields.push_front(item, thd->mem_root); @@ -495,7 +492,7 @@ fix_inner_refs(THD *thd, List &all_fields, SELECT_LEX *select, If it's needed reset each Item_ref item that refers this field with a new reference taken from ref_pointer_array. */ - item_ref= ref_pointer_array + el; + item_ref= &ref_pointer_array[el]; } if (ref->in_sum_func) @@ -533,6 +530,7 @@ fix_inner_refs(THD *thd, List &all_fields, SELECT_LEX *select, if (!ref->fixed && ref->fix_fields(thd, 0)) return TRUE; thd->lex->used_tables|= item->used_tables(); + thd->lex->current_select->select_list_tables|= item->used_tables(); } return false; } @@ -615,22 +613,26 @@ void remove_redundant_subquery_clauses(st_select_lex *subq_select_lex) /** Function to setup clauses without sum functions. */ -inline int setup_without_group(THD *thd, Item **ref_pointer_array, - TABLE_LIST *tables, - List &leaves, - List &fields, - List &all_fields, - COND **conds, - ORDER *order, - ORDER *group, - bool *hidden_group_fields, - uint *reserved) +static inline int +setup_without_group(THD *thd, Ref_ptr_array ref_pointer_array, + TABLE_LIST *tables, + List &leaves, + List &fields, + List &all_fields, + COND **conds, + ORDER *order, + ORDER *group, + List &win_specs, + List &win_funcs, + bool *hidden_group_fields, + uint *reserved) { int res; + enum_parsing_place save_place; st_select_lex *const select= thd->lex->current_select; nesting_map save_allow_sum_func= thd->lex->allow_sum_func; /* - Need to save the value, so we can turn off only any new non_agg_field_used + Need to stave the value, so we can turn off only any new non_agg_field_used additions coming from the WHERE */ const bool saved_non_agg_field_used= select->non_agg_field_used(); @@ -650,11 +652,21 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array, select->set_non_agg_field_used(saved_non_agg_field_used); thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level; + + save_place= thd->lex->current_select->parsing_place; + thd->lex->current_select->parsing_place= IN_ORDER_BY; res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields, order); - thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); + thd->lex->current_select->parsing_place= save_place; + thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level); + save_place= thd->lex->current_select->parsing_place; + thd->lex->current_select->parsing_place= IN_GROUP_BY; res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields, group, hidden_group_fields); + thd->lex->current_select->parsing_place= save_place; + thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level; + res= res || setup_windows(thd, ref_pointer_array, tables, fields, all_fields, + win_specs, win_funcs); thd->lex->allow_sum_func= save_allow_sum_func; DBUG_RETURN(res); } @@ -678,8 +690,7 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array, 0 on success */ int -JOIN::prepare(Item ***rref_pointer_array, - TABLE_LIST *tables_init, +JOIN::prepare(TABLE_LIST *tables_init, uint wild_num, COND *conds_init, uint og_num, ORDER *order_init, bool skip_order_by, ORDER *group_init, Item *having_init, @@ -783,24 +794,33 @@ JOIN::prepare(Item ***rref_pointer_array, tbl->table->maybe_null= 1; } - if ((wild_num && setup_wild(thd, tables_list, fields_list, &all_fields, - wild_num)) || - select_lex->setup_ref_array(thd, og_num) || - setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ, - &all_fields, 1) || - setup_without_group(thd, (*rref_pointer_array), tables_list, - select_lex->leaf_tables, fields_list, - all_fields, &conds, order, group_list, - &hidden_group_fields, &select_lex->select_n_reserved)) - DBUG_RETURN(-1); /* purecov: inspected */ + if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num)) + DBUG_RETURN(-1); + if (select_lex->setup_ref_array(thd, og_num)) + DBUG_RETURN(-1); - ref_pointer_array= *rref_pointer_array; + ref_ptrs= ref_ptr_array_slice(0); + + enum_parsing_place save_place= thd->lex->current_select->parsing_place; + thd->lex->current_select->parsing_place= SELECT_LIST; + if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ, + &all_fields, 1)) + DBUG_RETURN(-1); + thd->lex->current_select->parsing_place= save_place; + if (setup_without_group(thd, ref_ptrs, tables_list, + select_lex->leaf_tables, fields_list, + all_fields, &conds, order, group_list, + select_lex->window_specs, + select_lex->window_funcs, + &hidden_group_fields, + &select_lex->select_n_reserved)) + DBUG_RETURN(-1); /* Resolve the ORDER BY that was skipped, then remove it. */ if (skip_order_by && select_lex != select_lex->master_unit()->global_parameters()) { - if (setup_order(thd, (*rref_pointer_array), tables_list, fields_list, + if (setup_order(thd, ref_ptrs, tables_list, fields_list, all_fields, select_lex->order_list.first)) DBUG_RETURN(-1); select_lex->order_list.empty(); @@ -828,6 +848,12 @@ JOIN::prepare(Item ***rref_pointer_array, if (having_fix_rc || thd->is_error()) DBUG_RETURN(-1); /* purecov: inspected */ thd->lex->allow_sum_func= save_allow_sum_func; + + if (having->with_window_func) + { + my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0)); + DBUG_RETURN(-1); + } } With_clause *with_clause=select_lex->get_with_clause(); @@ -867,14 +893,14 @@ JOIN::prepare(Item ***rref_pointer_array, real_order= TRUE; if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) - item->split_sum_func(thd, ref_pointer_array, all_fields, 0); + item->split_sum_func(thd, ref_ptrs, all_fields, 0); } if (!real_order) order= NULL; } if (having && having->with_sum_func) - having->split_sum_func2(thd, ref_pointer_array, all_fields, + having->split_sum_func2(thd, ref_ptrs, all_fields, &having, SPLIT_SUM_SKIP_REGISTERED); if (select_lex->inner_sum_func_list) { @@ -883,13 +909,13 @@ JOIN::prepare(Item ***rref_pointer_array, do { item_sum= item_sum->next; - item_sum->split_sum_func2(thd, ref_pointer_array, + item_sum->split_sum_func2(thd, ref_ptrs, all_fields, item_sum->ref_by, 0); } while (item_sum != end); } if (select_lex->inner_refs_list.elements && - fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array)) + fix_inner_refs(thd, all_fields, select_lex, ref_ptrs)) DBUG_RETURN(-1); if (group_list) @@ -907,10 +933,9 @@ JOIN::prepare(Item ***rref_pointer_array, { Item_field *field= new (thd->mem_root) Item_field(thd, *(Item_field**)ord->item); int el= all_fields.elements; - DBUG_ASSERT(all_fields.elements <= select_lex->ref_pointer_array_size); - ref_pointer_array[el]= field; + ref_ptrs[el]= field; all_fields.push_front(field, thd->mem_root); - ord->item= ref_pointer_array + el; + ord->item= &ref_ptrs[el]; } } } @@ -963,6 +988,12 @@ JOIN::prepare(Item ***rref_pointer_array, } if (thd->lex->derived_tables) { + /* + Queries with derived tables and PROCEDURE are not allowed. + Many of such queries are disallowed grammatically, but there + are still some complex cases: + SELECT 1 FROM (SELECT 1) a PROCEDURE ANALYSE() + */ my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", thd->lex->derived_tables & DERIVED_VIEW ? "view" : "subquery"); @@ -970,6 +1001,7 @@ JOIN::prepare(Item ***rref_pointer_array, } if (thd->lex->sql_command != SQLCOM_SELECT) { + // EXPLAIN SELECT * FROM t1 PROCEDURE ANALYSE() my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "non-SELECT"); goto err; } @@ -1006,11 +1038,14 @@ bool JOIN::prepare_stage2() /* Init join struct */ count_field_types(select_lex, &tmp_table_param, all_fields, 0); - ref_pointer_array_size= all_fields.elements*sizeof(Item*); this->group= group_list != 0; if (tmp_table_param.sum_func_count && !group_list) + { implicit_grouping= TRUE; + // Result will contain zero or one row - ordering is meaningless + order= NULL; + } #ifdef RESTRICTED_GROUP if (implicit_grouping) @@ -1056,6 +1091,24 @@ int JOIN::optimize() need_tmp, !skip_sort_order && !no_order && (order || group_list), select_distinct); + uint select_nr= select_lex->select_number; + JOIN_TAB *curr_tab= join_tab + top_join_tab_count; + for (uint i= 0; i < aggr_tables; i++, curr_tab++) + { + if (select_nr == INT_MAX) + { + /* this is a fake_select_lex of a union */ + select_nr= select_lex->master_unit()->first_select()->select_number; + curr_tab->tracker= thd->lex->explain->get_union(select_nr)-> + get_tmptable_read_tracker(); + } + else + { + curr_tab->tracker= thd->lex->explain->get_select(select_nr)-> + get_using_temporary_read_tracker(); + } + } + } return res; } @@ -1344,6 +1397,8 @@ JOIN::optimize_inner() { DBUG_PRINT("info",("No tables")); error= 0; + if (make_aggr_tables_info()) + DBUG_RETURN(1); goto setup_subq_exit; } error= -1; // Error is sent to client @@ -1355,7 +1410,6 @@ JOIN::optimize_inner() calling make_join_statistics() as this may call get_best_group_min_max() which needs a simplfied group_list. */ - simple_group= 1; if (group_list && table_count == 1) { group_list= remove_const(this, group_list, conds, @@ -1612,7 +1666,8 @@ JOIN::optimize_inner() (!join_tab[const_tables].select || !join_tab[const_tables].select->quick || join_tab[const_tables].select->quick->get_type() != - QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) + QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) && + !select_lex->have_window_funcs()) { if (group && rollup.state == ROLLUP::STATE_NONE && list_contains_unique_index(join_tab[const_tables].table, @@ -1662,11 +1717,13 @@ JOIN::optimize_inner() } if (group || tmp_table_param.sum_func_count) { - if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE) + if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE + && !select_lex->have_window_funcs()) select_distinct=0; } else if (select_distinct && table_count - const_tables == 1 && - rollup.state == ROLLUP::STATE_NONE) + rollup.state == ROLLUP::STATE_NONE && + !select_lex->have_window_funcs()) { /* We are only using one table. In this case we change DISTINCT to a @@ -1688,16 +1745,20 @@ JOIN::optimize_inner() tab= &join_tab[const_tables]; if (order) { - skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1, - &tab->table->keys_in_use_for_order_by); + skip_sort_order= + test_if_skip_sort_order(tab, order, select_limit, + true, // no_changes + &tab->table->keys_in_use_for_order_by); } if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array, order, fields_list, all_fields, &all_order_fields_used))) { - bool skip_group= (skip_sort_order && - test_if_skip_sort_order(tab, group_list, select_limit, 1, - &tab->table->keys_in_use_for_group_by) != 0); + const bool skip_group= + skip_sort_order && + test_if_skip_sort_order(tab, group_list, select_limit, + true, // no_changes + &tab->table->keys_in_use_for_group_by); count_field_types(select_lex, &tmp_table_param, all_fields, 0); if ((skip_group && all_order_fields_used) || select_limit == HA_POS_ERROR || @@ -1726,6 +1787,7 @@ JOIN::optimize_inner() else if (thd->is_fatal_error) // End of memory DBUG_RETURN(1); } + simple_group= rollup.state == ROLLUP::STATE_NONE; if (group) { /* @@ -1749,6 +1811,7 @@ JOIN::optimize_inner() group_optimized_away= 1; } } + calc_group_buffer(this, group_list); send_group_parts= tmp_table_param.group_parts; /* Save org parts */ if (procedure && procedure->group) @@ -1790,6 +1853,11 @@ JOIN::optimize_inner() } need_tmp= test_if_need_tmp_table(); + //TODO this could probably go in test_if_need_tmp_table. + if (this->select_lex->window_specs.elements > 0) { + need_tmp= TRUE; + simple_order= FALSE; + } /* If the hint FORCE INDEX FOR ORDER BY/GROUP BY is used for the table @@ -1813,6 +1881,31 @@ JOIN::optimize_inner() if (!(select_options & SELECT_DESCRIBE)) init_ftfuncs(thd, select_lex, MY_TEST(order)); + /* + It's necessary to check const part of HAVING cond as + there is a chance that some cond parts may become + const items after make_join_statisctics(for example + when Item is a reference to cost table field from + outer join). + This check is performed only for those conditions + which do not use aggregate functions. In such case + temporary table may not be used and const condition + elements may be lost during further having + condition transformation in JOIN::exec. + */ + if (having && const_table_map && !having->with_sum_func) + { + having->update_used_tables(); + having= having->remove_eq_conds(thd, &select_lex->having_value, true); + if (select_lex->having_value == Item::COND_FALSE) + { + having= new (thd->mem_root) Item_int(thd, (longlong) 0,1); + zero_result_cause= "Impossible HAVING noticed after reading const tables"; + error= 0; + DBUG_RETURN(0); + } + } + if (optimize_unflattened_subqueries()) DBUG_RETURN(1); @@ -1839,8 +1932,28 @@ JOIN::optimize_inner() DBUG_EXECUTE("info",TEST_join(this);); - if (const_tables != table_count) + if (!only_const_tables()) { + JOIN_TAB *tab= &join_tab[const_tables]; + + if (order) + { + /* + Force using of tmp table if sorting by a SP or UDF function due to + their expensive and probably non-deterministic nature. + */ + for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next) + { + Item *item= *tmp_order->item; + if (item->is_expensive()) + { + /* Force tmp table without sort */ + need_tmp=1; simple_order=simple_group=0; + break; + } + } + } + /* Because filesort always does a full table scan or a quick range scan we must add the removed reference to the select for the table. @@ -1848,72 +1961,197 @@ JOIN::optimize_inner() as in other cases the join is done before the sort. */ if ((order || group_list) && - join_tab[const_tables].type != JT_ALL && - join_tab[const_tables].type != JT_FT && - join_tab[const_tables].type != JT_REF_OR_NULL && + tab->type != JT_ALL && + tab->type != JT_FT && + tab->type != JT_REF_OR_NULL && ((order && simple_order) || (group_list && simple_group))) { - if (add_ref_to_table_cond(thd,&join_tab[const_tables])) { + if (add_ref_to_table_cond(thd,tab)) { DBUG_RETURN(1); } } /* - Calculate a possible 'limit' of table rows for 'GROUP BY': 'need_tmp' - implies that there will be more postprocessing so the specified - 'limit' should not be enforced yet in the call to - 'test_if_skip_sort_order'. + Investigate whether we may use an ordered index as part of either + DISTINCT, GROUP BY or ORDER BY execution. An ordered index may be + used for only the first of any of these terms to be executed. This + is reflected in the order which we check for test_if_skip_sort_order() + below. However we do not check for DISTINCT here, as it would have + been transformed to a GROUP BY at this stage if it is a candidate for + ordered index optimization. + If a decision was made to use an ordered index, the availability + of such an access path is stored in 'ordered_index_usage' for later + use by 'execute' or 'explain' */ - const ha_rows limit = need_tmp ? HA_POS_ERROR : unit->select_limit_cnt; + DBUG_ASSERT(ordered_index_usage == ordered_index_void); - if (!(select_options & SELECT_BIG_RESULT) && - ((group_list && - (!simple_group || - !test_if_skip_sort_order(&join_tab[const_tables], group_list, - limit, 0, - &join_tab[const_tables].table-> - keys_in_use_for_group_by))) || - select_distinct) && - tmp_table_param.quick_group && !procedure) - { - need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort - } - if (order) + if (group_list) // GROUP BY honoured first + // (DISTINCT was rewritten to GROUP BY if skippable) { /* - Do we need a temporary table due to the ORDER BY not being equal to - the GROUP BY? The call to test_if_skip_sort_order above tests for the - GROUP BY clause only and hence is not valid in this case. So the - estimated number of rows to be read from the first table is not valid. - We clear it here so that it doesn't show up in EXPLAIN. - */ - if (need_tmp && (select_options & SELECT_DESCRIBE) != 0) - join_tab[const_tables].limit= 0; - /* - Force using of tmp table if sorting by a SP or UDF function due to - their expensive and probably non-deterministic nature. + When there is SQL_BIG_RESULT do not sort using index for GROUP BY, + and thus force sorting on disk unless a group min-max optimization + is going to be used as it is applied now only for one table queries + with covering indexes. */ - for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next) - { - Item *item= *tmp_order->item; - if (item->is_expensive()) + if (!(select_options & SELECT_BIG_RESULT) || + (tab->select && + tab->select->quick && + tab->select->quick->get_type() == + QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) + { + if (simple_group && // GROUP BY is possibly skippable + !select_distinct) // .. if not preceded by a DISTINCT { - /* Force tmp table without sort */ - need_tmp=1; simple_order=simple_group=0; - break; + /* + Calculate a possible 'limit' of table rows for 'GROUP BY': + A specified 'LIMIT' is relative to the final resultset. + 'need_tmp' implies that there will be more postprocessing + so the specified 'limit' should not be enforced yet. + */ + const ha_rows limit = need_tmp ? HA_POS_ERROR : select_limit; + if (test_if_skip_sort_order(tab, group_list, limit, false, + &tab->table->keys_in_use_for_group_by)) + { + ordered_index_usage= ordered_index_group_by; + } + } + + /* + If we are going to use semi-join LooseScan, it will depend + on the selected index scan to be used. If index is not used + for the GROUP BY, we risk that sorting is put on the LooseScan + table. In order to avoid this, force use of temporary table. + TODO: Explain the quick_group part of the test below. + */ + if ((ordered_index_usage != ordered_index_group_by) && + ((tmp_table_param.quick_group && !procedure) || + (tab->emb_sj_nest && + best_positions[const_tables].sj_strategy == SJ_OPT_LOOSE_SCAN))) + { + need_tmp=1; + simple_order= simple_group= false; // Force tmp table without sort } } } - } + else if (order && // ORDER BY wo/ preceeding GROUP BY + (simple_order || skip_sort_order)) // which is possibly skippable + { + if (test_if_skip_sort_order(tab, order, select_limit, false, + &tab->table->keys_in_use_for_order_by)) + { + ordered_index_usage= ordered_index_order_by; + } + } + } + + if (having) + having_is_correlated= MY_TEST(having->used_tables() & OUTER_REF_TABLE_BIT); + tmp_having= having; if ((select_lex->options & OPTION_SCHEMA_TABLE)) optimize_schema_tables_reads(this); + /* + The loose index scan access method guarantees that all grouping or + duplicate row elimination (for distinct) is already performed + during data retrieval, and that all MIN/MAX functions are already + computed for each group. Thus all MIN/MAX functions should be + treated as regular functions, and there is no need to perform + grouping in the main execution loop. + Notice that currently loose index scan is applicable only for + single table queries, thus it is sufficient to test only the first + join_tab element of the plan for its access method. + */ + if (join_tab->is_using_loose_index_scan()) + { + tmp_table_param.precomputed_group_by= TRUE; + if (join_tab->is_using_agg_loose_index_scan()) + { + need_distinct= FALSE; + tmp_table_param.precomputed_group_by= FALSE; + } + } + + if (make_aggr_tables_info()) + DBUG_RETURN(1); + + error= 0; + + if (select_options & SELECT_DESCRIBE) + goto derived_exit; + + DBUG_RETURN(0); + +setup_subq_exit: + /* Choose an execution strategy for this JOIN. */ + if (!tables_list || !table_count) + choose_tableless_subquery_plan(); + /* + Even with zero matching rows, subqueries in the HAVING clause may + need to be evaluated if there are aggregate functions in the query. + */ + if (optimize_unflattened_subqueries()) + DBUG_RETURN(1); + error= 0; + +derived_exit: + + select_lex->mark_const_derived(zero_result_cause); + DBUG_RETURN(0); +} + + +/** + Set info for aggregation tables + + @details + This function finalizes execution plan by taking following actions: + .) aggregation temporary tables are created, but not instantiated + (this is done during execution). + JOIN_TABs for aggregation tables are set appropriately + (see JOIN::create_postjoin_aggr_table). + .) prepare fields lists (fields, all_fields, ref_pointer_array slices) for + each required stage of execution. These fields lists are set for + working tables' tabs and for the tab of last table in the join. + .) info for sorting/grouping/dups removal is prepared and saved in + appropriate tabs. Here is an example: + + @returns + false - Ok + true - Error +*/ + +bool JOIN::make_aggr_tables_info() +{ + List *curr_all_fields= &all_fields; + List *curr_fields_list= &fields_list; + JOIN_TAB *curr_tab= join_tab + const_tables; + TABLE *exec_tmp_table= NULL; + bool distinct= false; + bool keep_row_order= false; + DBUG_ENTER("JOIN::make_aggr_tables_info"); + + const bool has_group_by= this->group; + + sort_and_group_aggr_tab= NULL; + + + /* + Setup last table to provide fields and all_fields lists to the next + node in the plan. + */ + if (join_tab) + { + join_tab[top_join_tab_count - 1].fields= &fields_list; + join_tab[top_join_tab_count - 1].all_fields= &all_fields; + } + /* All optimization is done. Check if we can use the storage engines group by handler to evaluate the group by */ - - if ((tmp_table_param.sum_func_count || group_list) && !procedure) + if (tables_list && (tmp_table_param.sum_func_count || group_list) && + !procedure) { /* At the moment we only support push down for queries where @@ -1933,24 +2171,39 @@ JOIN::optimize_inner() Query query= {&all_fields, select_distinct, tables_list, conds, group_list, order ? order : group_list, having}; group_by_handler *gbh= ht->create_group_by(thd, &query); + if (gbh) { pushdown_query= new (thd->mem_root) Pushdown_query(select_lex, gbh); - /* We must store rows in the tmp table if we need to do an ORDER BY or DISTINCT and the storage handler can't handle it. */ need_tmp= query.order_by || query.group_by || query.distinct; - tmp_table_param.hidden_field_count= (all_fields.elements - - fields_list.elements); - if (!(exec_tmp_table1= - create_tmp_table(thd, &tmp_table_param, all_fields, 0, - query.distinct, 1, - select_options, HA_POS_ERROR, "", - !need_tmp, query.order_by || query.group_by))) + distinct= query.distinct; + keep_row_order= query.order_by || query.group_by; + + order= query.order_by; + + aggr_tables++; + curr_tab= join_tab + top_join_tab_count; + bzero(curr_tab, sizeof(JOIN_TAB)); + curr_tab->ref.key= -1; + curr_tab->join= this; + + curr_tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param); + TABLE* table= create_tmp_table(thd, curr_tab->tmp_table_param, + all_fields, + NULL, query.distinct, + TRUE, select_options, HA_POS_ERROR, + "", !need_tmp, + query.order_by || query.group_by); + if (!table) DBUG_RETURN(1); + curr_tab->aggr= new (thd->mem_root) AGGR_OP(curr_tab); + curr_tab->aggr->set_write_func(::end_send); + curr_tab->table= table; /* Setup reference fields, used by summary functions and group by fields, to point to the temporary table. @@ -1959,17 +2212,18 @@ JOIN::optimize_inner() set_items_ref_array(items1). */ init_items_ref_array(); - items1= items0 + all_fields.elements; + items1= ref_ptr_array_slice(2); + //items1= items0 + all_fields.elements; if (change_to_use_tmp_fields(thd, items1, tmp_fields_list1, tmp_all_fields1, fields_list.elements, all_fields)) DBUG_RETURN(1); /* Give storage engine access to temporary table */ - gbh->table= exec_tmp_table1; - + gbh->table= table; pushdown_query->store_data_in_temp_table= need_tmp; pushdown_query->having= having; + /* Group by and having is calculated by the group_by handler. Reset the group by and having @@ -1986,21 +2240,19 @@ JOIN::optimize_inner() tmp_table_param.field_count+= tmp_table_param.sum_func_count; tmp_table_param.sum_func_count= 0; - /* Remember information about the original join */ - original_join_tab= join_tab; - original_table_count= table_count; + fields= curr_fields_list; - /* Set up one join tab to get sorting to work */ - const_tables= 0; - table_count= 1; - join_tab= (JOIN_TAB*) thd->calloc(sizeof(JOIN_TAB)); - join_tab[0].table= exec_tmp_table1; + //todo: new: + curr_tab->ref_array= &items1; + curr_tab->all_fields= &tmp_all_fields1; + curr_tab->fields= &tmp_fields_list1; DBUG_RETURN(thd->is_fatal_error); } } } + /* The loose index scan access method guarantees that all grouping or duplicate row elimination (for distinct) is already performed @@ -2012,193 +2264,583 @@ JOIN::optimize_inner() single table queries, thus it is sufficient to test only the first join_tab element of the plan for its access method. */ - if (join_tab->is_using_loose_index_scan()) + if (join_tab && join_tab->is_using_loose_index_scan()) + tmp_table_param.precomputed_group_by= + !join_tab->is_using_agg_loose_index_scan(); + + group_list_for_estimates= group_list; + /* Create a tmp table if distinct or if the sort is too complicated */ + if (need_tmp) { - tmp_table_param.precomputed_group_by= TRUE; - if (join_tab->is_using_agg_loose_index_scan()) + aggr_tables++; + curr_tab= join_tab + top_join_tab_count; + bzero(curr_tab, sizeof(JOIN_TAB)); + curr_tab->ref.key= -1; + if (only_const_tables()) + first_select= sub_select_postjoin_aggr; + + /* + Create temporary table on first execution of this join. + (Will be reused if this is a subquery that is executed several times.) + */ + init_items_ref_array(); + + ORDER *tmp_group= (ORDER *) 0; + if (!simple_group && !procedure && !(test_flags & TEST_NO_KEY_GROUP)) + tmp_group= group_list; + + tmp_table_param.hidden_field_count= + all_fields.elements - fields_list.elements; + + distinct= select_distinct && !group_list && + !select_lex->have_window_funcs(); + keep_row_order= false; + if (create_postjoin_aggr_table(curr_tab, + &all_fields, tmp_group, + group_list && simple_group, + distinct, keep_row_order)) + DBUG_RETURN(true); + exec_tmp_table= curr_tab->table; + + if (exec_tmp_table->distinct) + optimize_distinct(); + + /* + We don't have to store rows in temp table that doesn't match HAVING if: + - we are sorting the table and writing complete group rows to the + temp table. + - We are using DISTINCT without resolving the distinct as a GROUP BY + on all columns. + + If having is not handled here, it will be checked before the row + is sent to the client. + */ + if (having && + (sort_and_group || (exec_tmp_table->distinct && !group_list))) { - need_distinct= FALSE; - tmp_table_param.precomputed_group_by= FALSE; + // Attach HAVING to tmp table's condition + curr_tab->having= having; + having= NULL; // Already done } - } - error= 0; + /* Change sum_fields reference to calculated fields in tmp_table */ + items1= ref_ptr_array_slice(2); + if (sort_and_group || curr_tab->table->group || + tmp_table_param.precomputed_group_by) + { + if (change_to_use_tmp_fields(thd, items1, + tmp_fields_list1, tmp_all_fields1, + fields_list.elements, all_fields)) + DBUG_RETURN(true); + } + else + { + if (change_refs_to_tmp_fields(thd, items1, + tmp_fields_list1, tmp_all_fields1, + fields_list.elements, all_fields)) + DBUG_RETURN(true); + } + curr_all_fields= &tmp_all_fields1; + curr_fields_list= &tmp_fields_list1; + // Need to set them now for correct group_fields setup, reset at the end. + set_items_ref_array(items1); + curr_tab->ref_array= &items1; + curr_tab->all_fields= &tmp_all_fields1; + curr_tab->fields= &tmp_fields_list1; + set_postjoin_aggr_write_func(curr_tab); - tmp_having= having; - if (select_options & SELECT_DESCRIBE) - goto derived_exit; - having= 0; + tmp_table_param.func_count= 0; + tmp_table_param.field_count+= tmp_table_param.func_count; + if (sort_and_group || curr_tab->table->group) + { + tmp_table_param.field_count+= tmp_table_param.sum_func_count; + tmp_table_param.sum_func_count= 0; + } - DBUG_RETURN(0); + if (exec_tmp_table->group) + { // Already grouped + if (!order && !no_order && !skip_sort_order) + order= group_list; /* order by group */ + group_list= NULL; + } -setup_subq_exit: - /* Choose an execution strategy for this JOIN. */ - if (!tables_list || !table_count) - choose_tableless_subquery_plan(); - /* - Even with zero matching rows, subqueries in the HAVING clause may - need to be evaluated if there are aggregate functions in the query. - */ - if (optimize_unflattened_subqueries()) - DBUG_RETURN(1); - error= 0; + /* + If we have different sort & group then we must sort the data by group + and copy it to another tmp table + This code is also used if we are using distinct something + we haven't been able to store in the temporary table yet + like SEC_TO_TIME(SUM(...)). + */ + if ((group_list && + (!test_if_subpart(group_list, order) || select_distinct)) || + (select_distinct && tmp_table_param.using_outer_summary_function)) + { /* Must copy to another table */ + DBUG_PRINT("info",("Creating group table")); + + calc_group_buffer(this, group_list); + count_field_types(select_lex, &tmp_table_param, tmp_all_fields1, + select_distinct && !group_list); + tmp_table_param.hidden_field_count= + tmp_all_fields1.elements - tmp_fields_list1.elements; + + curr_tab++; + aggr_tables++; + bzero(curr_tab, sizeof(JOIN_TAB)); + curr_tab->ref.key= -1; -derived_exit: + /* group data to new table */ + /* + If the access method is loose index scan then all MIN/MAX + functions are precomputed, and should be treated as regular + functions. See extended comment above. + */ + if (join_tab->is_using_loose_index_scan()) + tmp_table_param.precomputed_group_by= TRUE; - select_lex->mark_const_derived(zero_result_cause); - DBUG_RETURN(0); -} + tmp_table_param.hidden_field_count= + curr_all_fields->elements - curr_fields_list->elements; + ORDER *dummy= NULL; //TODO can use table->group here also + if (create_postjoin_aggr_table(curr_tab, + curr_all_fields, dummy, true, + distinct, keep_row_order)) + DBUG_RETURN(true); -/** - Create and initialize objects neeed for the execution of a query plan. - Evaluate constant expressions not evaluated during optimization. -*/ + if (group_list) + { + if (!only_const_tables()) // No need to sort a single row + { + if (add_sorting_to_table(curr_tab - 1, group_list)) + DBUG_RETURN(true); + } -int JOIN::init_execution() -{ - DBUG_ENTER("JOIN::init_execution"); + if (make_group_fields(this, this)) + DBUG_RETURN(true); + } - DBUG_ASSERT(optimized); - DBUG_ASSERT(!(select_options & SELECT_DESCRIBE)); - initialized= true; + // Setup sum funcs only when necessary, otherwise we might break info + // for the first table + if (group_list || tmp_table_param.sum_func_count) + { + if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true, true)) + DBUG_RETURN(true); + if (prepare_sum_aggregators(sum_funcs, + !join_tab->is_using_agg_loose_index_scan())) + DBUG_RETURN(true); + group_list= NULL; + if (setup_sum_funcs(thd, sum_funcs)) + DBUG_RETURN(true); + } + // No sum funcs anymore + DBUG_ASSERT(items2.is_null()); - /* - Enable LIMIT ROWS EXAMINED during query execution if: - (1) This JOIN is the outermost query (not a subquery or derived table) - This ensures that the limit is enabled when actual execution begins, - and not if a subquery is evaluated during optimization of the outer - query. - (2) This JOIN is not the result of a UNION. In this case do not apply the - limit in order to produce the partial query result stored in the - UNION temp table. - */ - if (!select_lex->outer_select() && // (1) - select_lex != select_lex->master_unit()->fake_select_lex) // (2) - thd->lex->set_limit_rows_examined(); + items2= ref_ptr_array_slice(3); + if (change_to_use_tmp_fields(thd, items2, + tmp_fields_list2, tmp_all_fields2, + fields_list.elements, tmp_all_fields1)) + DBUG_RETURN(true); - /* Create a tmp table if distinct or if the sort is too complicated */ - if (need_tmp && !exec_tmp_table1) - { - DBUG_PRINT("info",("Creating tmp table")); - THD_STAGE_INFO(thd, stage_creating_tmp_table); + curr_fields_list= &tmp_fields_list2; + curr_all_fields= &tmp_all_fields2; + set_items_ref_array(items2); + curr_tab->ref_array= &items2; + curr_tab->all_fields= &tmp_all_fields2; + curr_tab->fields= &tmp_fields_list2; + set_postjoin_aggr_write_func(curr_tab); - init_items_ref_array(); + tmp_table_param.field_count+= tmp_table_param.sum_func_count; + tmp_table_param.sum_func_count= 0; + } + if (curr_tab->table->distinct) + select_distinct= false; /* Each row is unique */ - tmp_table_param.hidden_field_count= (all_fields.elements - - fields_list.elements); - ORDER *tmp_group= ((!simple_group && !procedure && - !(test_flags & TEST_NO_KEY_GROUP)) ? group_list : - (ORDER*) 0); - /* - Pushing LIMIT to the temporary table creation is not applicable - when there is ORDER BY or GROUP BY or there is no GROUP BY, but - there are aggregate functions, because in all these cases we need - all result rows. - */ - ha_rows tmp_rows_limit= ((order == 0 || skip_sort_order) && - !tmp_group && - !thd->lex->current_select->with_sum_func) ? - select_limit : HA_POS_ERROR; - - if (!(exec_tmp_table1= - create_tmp_table(thd, &tmp_table_param, all_fields, - tmp_group, group_list ? 0 : select_distinct, - group_list && simple_group, - select_options, tmp_rows_limit, ""))) - DBUG_RETURN(1); - explain->ops_tracker.report_tmp_table(exec_tmp_table1); - /* - We don't have to store rows in temp table that doesn't match HAVING if: - - we are sorting the table and writing complete group rows to the - temp table. - - We are using DISTINCT without resolving the distinct as a GROUP BY - on all columns. - - If having is not handled here, it will be checked before the row - is sent to the client. - */ - if (tmp_having && - (sort_and_group || (exec_tmp_table1->distinct && !group_list))) - having= tmp_having; - - /* if group or order on first table, sort first */ - if (group_list && simple_group) - { - DBUG_PRINT("info",("Sorting for group")); - THD_STAGE_INFO(thd, stage_sorting_for_group); - if (create_sort_index(thd, this, group_list, - HA_POS_ERROR, HA_POS_ERROR, FALSE) || - alloc_group_fields(this, group_list) || - make_sum_func_list(all_fields, fields_list, 1) || - prepare_sum_aggregators(sum_funcs, need_distinct) || - setup_sum_funcs(thd, sum_funcs)) + if (select_distinct && !group_list) + { + if (having) { - DBUG_RETURN(1); + curr_tab->having= having; + having->update_used_tables(); } - group_list=0; + curr_tab->distinct= true; + having= NULL; + select_distinct= false; + } + /* Clean tmp_table_param for the next tmp table. */ + tmp_table_param.field_count= tmp_table_param.sum_func_count= + tmp_table_param.func_count= 0; + + tmp_table_param.copy_field= tmp_table_param.copy_field_end=0; + first_record= sort_and_group=0; + + if (!group_optimized_away) + { + group= false; } else { - if (make_sum_func_list(all_fields, fields_list, 0) || - prepare_sum_aggregators(sum_funcs, need_distinct) || - setup_sum_funcs(thd, sum_funcs)) + /* + If grouping has been optimized away, a temporary table is + normally not needed unless we're explicitly requested to create + one (e.g. due to a SQL_BUFFER_RESULT hint or INSERT ... SELECT). + + In this case (grouping was optimized away), temp_table was + created without a grouping expression and JOIN::exec() will not + perform the necessary grouping (by the use of end_send_group() + or end_write_group()) if JOIN::group is set to false. + */ + // the temporary table was explicitly requested + DBUG_ASSERT(MY_TEST(select_options & OPTION_BUFFER_RESULT)); + // the temporary table does not have a grouping expression + DBUG_ASSERT(!curr_tab->table->group); + } + calc_group_buffer(this, group_list); + count_field_types(select_lex, &tmp_table_param, *curr_all_fields, false); + } + + if (group || implicit_grouping || tmp_table_param.sum_func_count) + { + if (make_group_fields(this, this)) + DBUG_RETURN(true); + + DBUG_ASSERT(items3.is_null()); + + if (items0.is_null()) + init_items_ref_array(); + items3= ref_ptr_array_slice(4); + setup_copy_fields(thd, &tmp_table_param, + items3, tmp_fields_list3, tmp_all_fields3, + curr_fields_list->elements, *curr_all_fields); + + curr_fields_list= &tmp_fields_list3; + curr_all_fields= &tmp_all_fields3; + set_items_ref_array(items3); + if (join_tab) + { + JOIN_TAB *last_tab= join_tab + top_join_tab_count + aggr_tables - 1; + // Set grouped fields on the last table + last_tab->ref_array= &items3; + last_tab->all_fields= &tmp_all_fields3; + last_tab->fields= &tmp_fields_list3; + } + if (make_sum_func_list(*curr_all_fields, *curr_fields_list, true, true)) + DBUG_RETURN(true); + if (prepare_sum_aggregators(sum_funcs, + !join_tab || + !join_tab-> is_using_agg_loose_index_scan())) + DBUG_RETURN(true); + if (setup_sum_funcs(thd, sum_funcs) || thd->is_fatal_error) + DBUG_RETURN(true); + } + if (group_list || order) + { + DBUG_PRINT("info",("Sorting for send_result_set_metadata")); + THD_STAGE_INFO(thd, stage_sorting_result); + /* If we have already done the group, add HAVING to sorted table */ + if (tmp_having && !group_list && !sort_and_group) + { + // Some tables may have been const + tmp_having->update_used_tables(); + table_map used_tables= (const_table_map | curr_tab->table->map); + + Item* sort_table_cond= make_cond_for_table(thd, tmp_having, used_tables, + (table_map) 0, false, + false, false); + if (sort_table_cond) { - DBUG_RETURN(1); + if (!curr_tab->select) + if (!(curr_tab->select= new SQL_SELECT)) + DBUG_RETURN(true); + if (!curr_tab->select->cond) + curr_tab->select->cond= sort_table_cond; + else + { + if (!(curr_tab->select->cond= + new (thd->mem_root) Item_cond_and(thd, curr_tab->select->cond, + sort_table_cond))) + DBUG_RETURN(true); + } + if (curr_tab->pre_idx_push_select_cond) + { + if (sort_table_cond->type() == Item::COND_ITEM) + sort_table_cond= sort_table_cond->copy_andor_structure(thd); + if (!(curr_tab->pre_idx_push_select_cond= + new (thd->mem_root) Item_cond_and(thd, + curr_tab->pre_idx_push_select_cond, + sort_table_cond))) + DBUG_RETURN(true); + } + if (curr_tab->select->cond && !curr_tab->select->cond->fixed) + curr_tab->select->cond->fix_fields(thd, 0); + if (curr_tab->pre_idx_push_select_cond && + !curr_tab->pre_idx_push_select_cond->fixed) + curr_tab->pre_idx_push_select_cond->fix_fields(thd, 0); + curr_tab->select->pre_idx_push_select_cond= + curr_tab->pre_idx_push_select_cond; + curr_tab->set_select_cond(curr_tab->select->cond, __LINE__); + curr_tab->select_cond->top_level_item(); + DBUG_EXECUTE("where",print_where(curr_tab->select->cond, + "select and having", + QT_ORDINARY);); + + having= make_cond_for_table(thd, tmp_having, ~ (table_map) 0, + ~used_tables, false, false, false); + DBUG_EXECUTE("where", + print_where(having, "having after sort", QT_ORDINARY);); } + } - if (!group_list && ! exec_tmp_table1->distinct && order && simple_order) + if (group) + select_limit= HA_POS_ERROR; + else if (!need_tmp) + { + /* + We can abort sorting after thd->select_limit rows if there are no + filter conditions for any tables after the sorted one. + Filter conditions come in several forms: + 1. as a condition item attached to the join_tab, or + 2. as a keyuse attached to the join_tab (ref access). + */ + for (uint i= const_tables + 1; i < top_join_tab_count; i++) { - DBUG_PRINT("info",("Sorting for order")); - THD_STAGE_INFO(thd, stage_sorting_for_order); - if (create_sort_index(thd, this, order, - HA_POS_ERROR, HA_POS_ERROR, TRUE)) + JOIN_TAB *const tab= join_tab + i; + if (tab->select_cond || // 1 + (tab->keyuse && !tab->first_inner)) // 2 { - DBUG_RETURN(1); + /* We have to sort all rows */ + select_limit= HA_POS_ERROR; + break; } - order=0; } } - /* - Optimize distinct when used on some of the tables - SELECT DISTINCT t1.a FROM t1,t2 WHERE t1.b=t2.b - In this case we can stop scanning t2 when we have found one t1.a + Here we add sorting stage for ORDER BY/GROUP BY clause, if the + optimiser chose FILESORT to be faster than INDEX SCAN or there is + no suitable index present. + OPTION_FOUND_ROWS supersedes LIMIT and is taken into account. */ + DBUG_PRINT("info",("Sorting for order by/group by")); + ORDER *order_arg= group_list ? group_list : order; + if (join_tab && + ordered_index_usage != + (group_list ? ordered_index_group_by : ordered_index_order_by) && + curr_tab->type != JT_CONST && + curr_tab->type != JT_EQ_REF) // Don't sort 1 row + { + // Sort either first non-const table or the last tmp table + JOIN_TAB *sort_tab= curr_tab; + + if (add_sorting_to_table(sort_tab, order_arg)) + DBUG_RETURN(true); + /* + filesort_limit: Return only this many rows from filesort(). + We can use select_limit_cnt only if we have no group_by and 1 table. + This allows us to use Bounded_queue for queries like: + "select SQL_CALC_FOUND_ROWS * from t1 order by b desc limit 1;" + m_select_limit == HA_POS_ERROR (we need a full table scan) + unit->select_limit_cnt == 1 (we only need one row in the result set) + */ + sort_tab->filesort->limit= + (has_group_by || (join_tab + table_count > curr_tab + 1)) ? + select_limit : unit->select_limit_cnt; + } + if (!only_const_tables() && + !join_tab[const_tables].filesort && + !(select_options & SELECT_DESCRIBE)) + { + /* + If no IO cache exists for the first table then we are using an + INDEX SCAN and no filesort. Thus we should not remove the sorted + attribute on the INDEX SCAN. + */ + skip_sort_order= true; + } + } + + /* + Window functions computation step should be attached to the last join_tab + that's doing aggregation. + The last join_tab reads the data from the temp. table. It also may do + - sorting + - duplicate value removal + Both of these operations are done after window function computation step. + */ + curr_tab= join_tab + top_join_tab_count + aggr_tables - 1; + if (select_lex->window_funcs.elements) + { + curr_tab->window_funcs_step= new Window_funcs_computation; + if (curr_tab->window_funcs_step->setup(thd, &select_lex->window_funcs, + curr_tab)) + DBUG_RETURN(true); + } + + fields= curr_fields_list; + // Reset before execution + set_items_ref_array(items0); + if (join_tab) + join_tab[top_join_tab_count + aggr_tables - 1].next_select= + setup_end_select_func(this, NULL); + group= has_group_by; + + DBUG_RETURN(false); +} + + + +bool +JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List *table_fields, + ORDER *table_group, + bool save_sum_fields, + bool distinct, + bool keep_row_order) +{ + DBUG_ENTER("JOIN::create_postjoin_aggr_table"); + THD_STAGE_INFO(thd, stage_creating_tmp_table); + + /* + Pushing LIMIT to the post-join temporary table creation is not applicable + when there is ORDER BY or GROUP BY or there is no GROUP BY, but + there are aggregate functions, because in all these cases we need + all result rows. + */ + ha_rows table_rows_limit= ((order == NULL || skip_sort_order) && + !table_group && + !select_lex->with_sum_func) ? + select_limit : HA_POS_ERROR; + + tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param); + tab->tmp_table_param->skip_create_table= true; + TABLE* table= create_tmp_table(thd, tab->tmp_table_param, *table_fields, + table_group, distinct, + save_sum_fields, select_options, table_rows_limit, + "", true, keep_row_order); + if (!table) + DBUG_RETURN(true); + tmp_table_param.using_outer_summary_function= + tab->tmp_table_param->using_outer_summary_function; + tab->join= this; + DBUG_ASSERT(tab > tab->join->join_tab); + (tab - 1)->next_select= sub_select_postjoin_aggr; + tab->aggr= new (thd->mem_root) AGGR_OP(tab); + if (!tab->aggr) + goto err; + tab->table= table; + table->reginfo.join_tab= tab; + + /* if group or order on first table, sort first */ + if (group_list && simple_group) + { + DBUG_PRINT("info",("Sorting for group")); + THD_STAGE_INFO(thd, stage_sorting_for_group); + + if (ordered_index_usage != ordered_index_group_by && + (join_tab + const_tables)->type != JT_CONST && // Don't sort 1 row + add_sorting_to_table(join_tab + const_tables, group_list)) + goto err; + + if (alloc_group_fields(this, group_list)) + goto err; + if (make_sum_func_list(all_fields, fields_list, true)) + goto err; + if (prepare_sum_aggregators(sum_funcs, + !join_tab->is_using_agg_loose_index_scan())) + goto err; + if (setup_sum_funcs(thd, sum_funcs)) + goto err; + group_list= NULL; + } + else + { + if (make_sum_func_list(all_fields, fields_list, false)) + goto err; + if (prepare_sum_aggregators(sum_funcs, + !join_tab->is_using_agg_loose_index_scan())) + goto err; + if (setup_sum_funcs(thd, sum_funcs)) + goto err; - if (exec_tmp_table1->distinct) + if (!group_list && !table->distinct && order && simple_order) { - table_map used_tables= select_list_used_tables; - JOIN_TAB *last_join_tab= join_tab + top_join_tab_count - 1; - do - { - if (used_tables & last_join_tab->table->map || - last_join_tab->use_join_cache) - break; - last_join_tab->shortcut_for_distinct= true; - } while (last_join_tab-- != join_tab); - /* Optimize "select distinct b from t1 order by key_part_1 limit #" */ - if (order && skip_sort_order) - { - /* Should always succeed */ - if (test_if_skip_sort_order(&join_tab[const_tables], - order, unit->select_limit_cnt, 0, - &join_tab[const_tables].table-> - keys_in_use_for_order_by)) - order=0; - join_tab[const_tables].update_explain_data(const_tables); - } + DBUG_PRINT("info",("Sorting for order")); + THD_STAGE_INFO(thd, stage_sorting_for_order); + + if (ordered_index_usage != ordered_index_order_by && + add_sorting_to_table(join_tab + const_tables, order)) + goto err; + order= NULL; } + } + + DBUG_RETURN(false); + +err: + if (table != NULL) + free_tmp_table(thd, table); + DBUG_RETURN(true); +} - /* If this join belongs to an uncacheable query save the original join */ - if (select_lex->uncacheable && init_save_join_tab()) - DBUG_RETURN(-1); /* purecov: inspected */ + +void +JOIN::optimize_distinct() +{ + for (JOIN_TAB *last_join_tab= join_tab + top_join_tab_count - 1; ;) + { + if (select_lex->select_list_tables & last_join_tab->table->map || + last_join_tab->use_join_cache) + break; + last_join_tab->shortcut_for_distinct= true; + if (last_join_tab == join_tab) + break; + --last_join_tab; } - DBUG_RETURN(0); + /* Optimize "select distinct b from t1 order by key_part_1 limit #" */ + if (order && skip_sort_order) + { + /* Should already have been optimized away */ + DBUG_ASSERT(ordered_index_usage == ordered_index_order_by); + if (ordered_index_usage == ordered_index_order_by) + { + order= NULL; + } + } +} + + +/** + @brief Add Filesort object to the given table to sort if with filesort + + @param tab the JOIN_TAB object to attach created Filesort object to + @param order List of expressions to sort the table by + + @note This function moves tab->select, if any, to filesort->select + + @return false on success, true on OOM +*/ + +bool +JOIN::add_sorting_to_table(JOIN_TAB *tab, ORDER *order) +{ + tab->filesort= + new (thd->mem_root) Filesort(order, HA_POS_ERROR, tab->keep_current_rowid, + tab->select); + if (!tab->filesort) + return true; + /* + Select was moved to filesort->select to force join_init_read_record to use + sorted result instead of reading table through select. + */ + if (tab->select) + { + tab->select= NULL; + tab->set_select_cond(NULL, __LINE__); + } + tab->read_first_record= join_init_read_record; + return false; } + + /** Setup expression caches for subqueries that need them @@ -2291,17 +2933,6 @@ bool JOIN::setup_subquery_caches() } -/** - Restore values in temporary join. -*/ -void JOIN::restore_tmp() -{ - DBUG_PRINT("info", ("restore_tmp this %p tmp_join %p", this, tmp_join)); - DBUG_ASSERT(tmp_join != this); - memcpy(tmp_join, this, (size_t) sizeof(JOIN)); -} - - /* Shrink join buffers used for preceding tables to reduce the occupied space @@ -2366,25 +2997,29 @@ JOIN::reinit() unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ? select_lex->offset_limit->val_uint() : 0); - first_record= 0; + first_record= false; + group_sent= false; cleaned= false; - if (exec_tmp_table1) + if (aggr_tables) { - exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE); - exec_tmp_table1->file->ha_delete_all_rows(); - } - if (exec_tmp_table2) - { - exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE); - exec_tmp_table2->file->ha_delete_all_rows(); + JOIN_TAB *curr_tab= join_tab + top_join_tab_count; + JOIN_TAB *end_tab= curr_tab + aggr_tables; + for ( ; curr_tab < end_tab; curr_tab++) + { + TABLE *tmp_table= curr_tab->table; + if (!tmp_table->is_created()) + continue; + tmp_table->file->extra(HA_EXTRA_RESET_STATE); + tmp_table->file->ha_delete_all_rows(); + } } clear_sj_tmp_tables(this); - if (items0) + if (current_ref_ptrs != items0) + { set_items_ref_array(items0); - - if (join_tab_save) - memcpy(join_tab, join_tab_save, sizeof(JOIN_TAB) * table_count); + set_group_rpa= false; + } /* need to reset ref access state (see join_read_key) */ if (join_tab) @@ -2397,9 +3032,6 @@ JOIN::reinit() } } - if (tmp_join) - restore_tmp(); - /* Reset of sum functions */ if (sum_funcs) { @@ -2424,38 +3056,40 @@ JOIN::reinit() DBUG_RETURN(0); } + /** - @brief Save the original join layout - - @details Saves the original join layout so it can be reused in - re-execution and for EXPLAIN. - - @return Operation status - @retval 0 success. - @retval 1 error occurred. + Prepare join result. + + @details Prepare join result prior to join execution or describing. + Instantiate derived tables and get schema tables result if necessary. + + @return + TRUE An error during derived or schema tables instantiation. + FALSE Ok */ -bool -JOIN::init_save_join_tab() +bool JOIN::prepare_result(List **columns_list) { - if (!(tmp_join= (JOIN*)thd->alloc(sizeof(JOIN)))) - return 1; /* purecov: inspected */ - error= 0; // Ensure that tmp_join.error= 0 - restore_tmp(); - return 0; -} + DBUG_ENTER("JOIN::prepare_result"); + error= 0; + /* Create result tables for materialized views. */ + if (!zero_result_cause && + select_lex->handle_derived(thd->lex, DT_CREATE)) + goto err; -bool -JOIN::save_join_tab() -{ - if (!join_tab_save && select_lex->master_unit()->uncacheable) - { - if (!(join_tab_save= (JOIN_TAB*)thd->memdup((uchar*) join_tab, - sizeof(JOIN_TAB) * table_count))) - return 1; - } - return 0; + if (result->prepare2()) + goto err; + + if ((select_lex->options & OPTION_SCHEMA_TABLE) && + get_schema_tables_result(this, PROCESSED_BY_JOIN_EXEC)) + goto err; + + DBUG_RETURN(FALSE); + +err: + error= 1; + DBUG_RETURN(TRUE); } @@ -2496,6 +3130,14 @@ void JOIN::save_explain_data(Explain_query *output, bool can_overwrite, Explain_union *eu= output->get_union(nr); explain= &eu->fake_select_lex_explain; join_tab[0].tracker= eu->get_fake_select_lex_tracker(); + for (uint i=0 ; i < top_join_tab_count + aggr_tables; i++) + { + if (join_tab[i].filesort) + { + join_tab[i].filesort->tracker= + new Filesort_tracker(thd->lex->analyze_stmt); + } + } } } @@ -2509,7 +3151,6 @@ void JOIN::exec() dbug_serve_apcs(thd, 1); ); ANALYZE_START_TRACKING(&explain->time_tracker); - explain->ops_tracker.report_join_start(); exec_inner(); ANALYZE_STOP_TRACKING(&explain->time_tracker); @@ -2522,29 +3163,26 @@ void JOIN::exec() } -/** - Exec select. - - @todo - Note, that create_sort_index calls test_if_skip_sort_order and may - finally replace sorting with index scan if there is a LIMIT clause in - the query. It's never shown in EXPLAIN! - - @todo - When can we have here thd->net.report_error not zero? -*/ - void JOIN::exec_inner() { List *columns_list= &fields_list; - int tmp_error; - - DBUG_ENTER("JOIN::exec"); - - const bool has_group_by= this->group; + DBUG_ENTER("JOIN::exec_inner"); THD_STAGE_INFO(thd, stage_executing); - error= 0; + + /* + Enable LIMIT ROWS EXAMINED during query execution if: + (1) This JOIN is the outermost query (not a subquery or derived table) + This ensures that the limit is enabled when actual execution begins, and + not if a subquery is evaluated during optimization of the outer query. + (2) This JOIN is not the result of a UNION. In this case do not apply the + limit in order to produce the partial query result stored in the + UNION temp table. + */ + if (!select_lex->outer_select() && // (1) + select_lex != select_lex->master_unit()->fake_select_lex) // (2) + thd->lex->set_limit_rows_examined(); + if (procedure) { procedure_fields_list= fields_list; @@ -2565,13 +3203,16 @@ void JOIN::exec_inner() if (select_options & SELECT_DESCRIBE) select_describe(this, FALSE, FALSE, FALSE, (zero_result_cause?zero_result_cause:"No tables used")); + else { if (result->send_result_set_metadata(*columns_list, - Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) { DBUG_VOID_RETURN; } + /* We have to test for 'conds' here as the WHERE may not be constant even if we don't have any tables for prepared statements or if @@ -2638,7 +3279,7 @@ void JOIN::exec_inner() having ? having : tmp_having, all_fields); DBUG_VOID_RETURN; } - + /* Evaluate all constant expressions with subqueries in the ORDER/GROUP clauses to make sure that all subqueries return a @@ -2667,42 +3308,6 @@ void JOIN::exec_inner() if (select_options & SELECT_DESCRIBE) { - /* - Check if we managed to optimize ORDER BY away and don't use temporary - table to resolve ORDER BY: in that case, we only may need to do - filesort for GROUP BY. - */ - if (!order && !no_order && (!skip_sort_order || !need_tmp)) - { - /* - Reset 'order' to 'group_list' and reinit variables describing - 'order' - */ - order= group_list; - simple_order= simple_group; - skip_sort_order= 0; - } - if (order && join_tab) - { - bool made_call= false; - SQL_SELECT *tmp_select= join_tab[const_tables].select; - if ((order != group_list || - !(select_options & SELECT_BIG_RESULT) || - (tmp_select && tmp_select->quick && - tmp_select->quick->get_type() == - QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) && - (const_tables == table_count || - ((simple_order || skip_sort_order) && - (made_call=true) && - test_if_skip_sort_order(&join_tab[const_tables], order, - select_limit, 0, - &join_tab[const_tables].table-> - keys_in_use_for_query)))) - order=0; - if (made_call) - join_tab[const_tables].update_explain_data(const_tables); - } - having= tmp_having; select_describe(this, need_tmp, order != 0 && !skip_sort_order, select_distinct, @@ -2715,537 +3320,31 @@ void JOIN::exec_inner() select_lex->mark_const_derived(zero_result_cause); } - if (!initialized && init_execution()) - DBUG_VOID_RETURN; - - JOIN *curr_join= this; - List *curr_all_fields= &all_fields; - List *curr_fields_list= &fields_list; - TABLE *curr_tmp_table= 0; - /* - curr_join->join_free() will call JOIN::cleanup(full=TRUE). It will not - be safe to call update_used_tables() after that. - */ - if (curr_join->tmp_having) - curr_join->tmp_having->update_used_tables(); - /* Initialize examined rows here because the values from all join parts must be accumulated in examined_row_count. Hence every join iteration must count from zero. */ - curr_join->join_examined_rows= 0; - - curr_join->do_select_call_count= 0; - - /* Create a tmp table if distinct or if the sort is too complicated */ - if (need_tmp) - { - if (tmp_join) - { - /* - We are in a non cacheable sub query. Get the saved join structure - after optimization. - (curr_join may have been modified during last exection and we need - to reset it) - */ - curr_join= tmp_join; - } - curr_tmp_table= exec_tmp_table1; - - /* Copy data to the temporary table */ - THD_STAGE_INFO(thd, stage_copying_to_tmp_table); - DBUG_PRINT("info", ("%s", thd->proc_info)); - if (!curr_join->sort_and_group && - curr_join->const_tables != curr_join->table_count) - { - JOIN_TAB *first_tab= curr_join->join_tab + curr_join->const_tables; - first_tab->sorted= MY_TEST(first_tab->loosescan_match_tab); - } - - Procedure *save_proc= curr_join->procedure; - tmp_error= do_select(curr_join, (List *) 0, curr_tmp_table, 0); - curr_join->procedure= save_proc; - if (tmp_error) - { - error= tmp_error; - DBUG_VOID_RETURN; - } - curr_tmp_table->file->info(HA_STATUS_VARIABLE); - - if (curr_join->having) - curr_join->having= curr_join->tmp_having= 0; // Allready done - - /* Change sum_fields reference to calculated fields in tmp_table */ -#ifdef HAVE_valgrind - if (curr_join != this) -#endif - curr_join->all_fields= *curr_all_fields; - if (!items1) - { - items1= items0 + all_fields.elements; - if (sort_and_group || curr_tmp_table->group || - tmp_table_param.precomputed_group_by) - { - if (change_to_use_tmp_fields(thd, items1, - tmp_fields_list1, tmp_all_fields1, - fields_list.elements, all_fields)) - DBUG_VOID_RETURN; - } - else - { - if (change_refs_to_tmp_fields(thd, items1, - tmp_fields_list1, tmp_all_fields1, - fields_list.elements, all_fields)) - DBUG_VOID_RETURN; - } -#ifdef HAVE_valgrind - if (curr_join != this) -#endif - { - curr_join->tmp_all_fields1= tmp_all_fields1; - curr_join->tmp_fields_list1= tmp_fields_list1; - } - curr_join->items1= items1; - } - curr_all_fields= &tmp_all_fields1; - curr_fields_list= &tmp_fields_list1; - curr_join->set_items_ref_array(items1); - - if (sort_and_group || curr_tmp_table->group) - { - curr_join->tmp_table_param.field_count+= - curr_join->tmp_table_param.sum_func_count+ - curr_join->tmp_table_param.func_count; - curr_join->tmp_table_param.sum_func_count= - curr_join->tmp_table_param.func_count= 0; - } - else - { - curr_join->tmp_table_param.field_count+= - curr_join->tmp_table_param.func_count; - curr_join->tmp_table_param.func_count= 0; - } - - // procedure can't be used inside subselect => we do nothing special for it - if (procedure) - procedure->update_refs(); - - if (curr_tmp_table->group) - { // Already grouped - if (!curr_join->order && !curr_join->no_order && !skip_sort_order) - curr_join->order= curr_join->group_list; /* order by group */ - curr_join->group_list= 0; - } - - /* - If we have different sort & group then we must sort the data by group - and copy it to another tmp table - This code is also used if we are using distinct something - we haven't been able to store in the temporary table yet - like SEC_TO_TIME(SUM(...)). - */ - - if ((curr_join->group_list && (!test_if_subpart(curr_join->group_list, - curr_join->order) || - curr_join->select_distinct)) || - (curr_join->select_distinct && - curr_join->tmp_table_param.using_indirect_summary_function)) - { /* Must copy to another table */ - DBUG_PRINT("info",("Creating group table")); - - /* Free first data from old join */ - - /* - psergey-todo: this is the place of pre-mature JOIN::free call. - */ - curr_join->join_free(); - if (curr_join->make_simple_join(this, curr_tmp_table)) - DBUG_VOID_RETURN; - calc_group_buffer(curr_join, group_list); - count_field_types(select_lex, &curr_join->tmp_table_param, - curr_join->tmp_all_fields1, - curr_join->select_distinct && !curr_join->group_list); - curr_join->tmp_table_param.hidden_field_count= - (curr_join->tmp_all_fields1.elements- - curr_join->tmp_fields_list1.elements); - - if (exec_tmp_table2) - curr_tmp_table= exec_tmp_table2; - else - { - /* group data to new table */ - - /* - If the access method is loose index scan then all MIN/MAX - functions are precomputed, and should be treated as regular - functions. See extended comment in JOIN::exec. - */ - if (curr_join->join_tab->is_using_loose_index_scan()) - curr_join->tmp_table_param.precomputed_group_by= TRUE; - - if (!(curr_tmp_table= - exec_tmp_table2= create_tmp_table(thd, - &curr_join->tmp_table_param, - *curr_all_fields, - (ORDER*) 0, - curr_join->select_distinct && - !curr_join->group_list, - 1, curr_join->select_options, - HA_POS_ERROR, ""))) - DBUG_VOID_RETURN; - curr_join->exec_tmp_table2= exec_tmp_table2; - explain->ops_tracker.report_tmp_table(exec_tmp_table2); - } - if (curr_join->group_list) - { - if (curr_join->join_tab == join_tab && save_join_tab()) - { - DBUG_VOID_RETURN; - } - DBUG_PRINT("info",("Sorting for index")); - THD_STAGE_INFO(thd, stage_creating_sort_index); - if (create_sort_index(thd, curr_join, curr_join->group_list, - HA_POS_ERROR, HA_POS_ERROR, FALSE) || - make_group_fields(this, curr_join)) - { - DBUG_VOID_RETURN; - } - sortorder= curr_join->sortorder; - } - - THD_STAGE_INFO(thd, stage_copying_to_group_table); - DBUG_PRINT("info", ("%s", thd->proc_info)); - if (curr_join != this) - { - if (sum_funcs2) - { - curr_join->sum_funcs= sum_funcs2; - curr_join->sum_funcs_end= sum_funcs_end2; - } - else - { - curr_join->alloc_func_list(); - sum_funcs2= curr_join->sum_funcs; - sum_funcs_end2= curr_join->sum_funcs_end; - } - } - if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1, TRUE) || - prepare_sum_aggregators(curr_join->sum_funcs, - !curr_join->join_tab->is_using_agg_loose_index_scan())) - DBUG_VOID_RETURN; - curr_join->group_list= 0; - if (!curr_join->sort_and_group && - curr_join->const_tables != curr_join->table_count) - { - JOIN_TAB *first_tab= curr_join->join_tab + curr_join->const_tables; - first_tab->sorted= MY_TEST(first_tab->loosescan_match_tab); - } - tmp_error= -1; - if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || - (tmp_error= do_select(curr_join, (List *) 0, curr_tmp_table, - 0))) - { - error= tmp_error; - DBUG_VOID_RETURN; - } - end_read_record(&curr_join->join_tab->read_record); - curr_join->const_tables= curr_join->table_count; // Mark free for cleanup() - curr_join->join_tab[0].table= 0; // Table is freed - - // No sum funcs anymore - if (!items2) - { - items2= items1 + all_fields.elements; - if (change_to_use_tmp_fields(thd, items2, - tmp_fields_list2, tmp_all_fields2, - fields_list.elements, tmp_all_fields1)) - DBUG_VOID_RETURN; -#ifdef HAVE_valgrind - /* - Some GCCs use memcpy() for struct assignment, even for x=x. - GCC bug 19410: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19410 - */ - if (curr_join != this) -#endif - { - curr_join->tmp_fields_list2= tmp_fields_list2; - curr_join->tmp_all_fields2= tmp_all_fields2; - } - } - curr_fields_list= &curr_join->tmp_fields_list2; - curr_all_fields= &curr_join->tmp_all_fields2; - curr_join->set_items_ref_array(items2); - curr_join->tmp_table_param.field_count+= - curr_join->tmp_table_param.sum_func_count; - curr_join->tmp_table_param.sum_func_count= 0; - } - if (curr_tmp_table->distinct) - curr_join->select_distinct=0; /* Each row is unique */ - - curr_join->join_free(); /* Free quick selects */ + join_examined_rows= 0; - if (curr_join->select_distinct && ! curr_join->group_list) - { - THD_STAGE_INFO(thd, stage_removing_duplicates); - if (remove_duplicates(curr_join, curr_tmp_table, - *curr_fields_list, curr_join->tmp_having)) - DBUG_VOID_RETURN; - curr_join->tmp_having=0; - curr_join->select_distinct=0; - } - curr_tmp_table->reginfo.lock_type= TL_UNLOCK; - // psergey-todo: here is one place where we switch to - if (curr_join->make_simple_join(this, curr_tmp_table)) - DBUG_VOID_RETURN; - calc_group_buffer(curr_join, curr_join->group_list); - count_field_types(select_lex, &curr_join->tmp_table_param, - *curr_all_fields, 0); - - } - if (procedure) - count_field_types(select_lex, &curr_join->tmp_table_param, - *curr_all_fields, 0); - - if (curr_join->group || curr_join->implicit_grouping || - curr_join->tmp_table_param.sum_func_count || - (procedure && (procedure->flags & PROC_GROUP))) - { - if (make_group_fields(this, curr_join)) - { - DBUG_VOID_RETURN; - } - if (!items3) - { - if (!items0) - init_items_ref_array(); - items3= ref_pointer_array + (all_fields.elements*4); - setup_copy_fields(thd, &curr_join->tmp_table_param, - items3, tmp_fields_list3, tmp_all_fields3, - curr_fields_list->elements, *curr_all_fields); - tmp_table_param.save_copy_funcs= curr_join->tmp_table_param.copy_funcs; - tmp_table_param.save_copy_field= curr_join->tmp_table_param.copy_field; - tmp_table_param.save_copy_field_end= - curr_join->tmp_table_param.copy_field_end; -#ifdef HAVE_valgrind - if (curr_join != this) -#endif - { - curr_join->tmp_all_fields3= tmp_all_fields3; - curr_join->tmp_fields_list3= tmp_fields_list3; - } - } - else - { - curr_join->tmp_table_param.copy_funcs= tmp_table_param.save_copy_funcs; - curr_join->tmp_table_param.copy_field= tmp_table_param.save_copy_field; - curr_join->tmp_table_param.copy_field_end= - tmp_table_param.save_copy_field_end; - } - curr_fields_list= &tmp_fields_list3; - curr_all_fields= &tmp_all_fields3; - curr_join->set_items_ref_array(items3); - - if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1, TRUE) || - prepare_sum_aggregators(curr_join->sum_funcs, - !curr_join->join_tab || - !curr_join->join_tab-> - is_using_agg_loose_index_scan()) || - setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || - thd->is_fatal_error) - DBUG_VOID_RETURN; - } - if (curr_join->group_list || curr_join->order) - { - DBUG_PRINT("info",("Sorting for send_result_set_metadata")); - THD_STAGE_INFO(thd, stage_sorting_result); - /* If we have already done the group, add HAVING to sorted table */ - if (curr_join->tmp_having && ! curr_join->group_list && - ! curr_join->sort_and_group) - { - JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables]; - table_map used_tables= (curr_join->const_table_map | - curr_table->table->map); - curr_join->tmp_having->update_used_tables(); - - Item* sort_table_cond= make_cond_for_table(thd, curr_join->tmp_having, - used_tables, - (table_map)0, -1, - FALSE, FALSE); - if (sort_table_cond) - { - if (!curr_table->select) - if (!(curr_table->select= new SQL_SELECT)) - DBUG_VOID_RETURN; - if (!curr_table->select->cond) - curr_table->select->cond= sort_table_cond; - else - { - if (!(curr_table->select->cond= - new (thd->mem_root) Item_cond_and(thd, curr_table->select->cond, - sort_table_cond))) - DBUG_VOID_RETURN; - } - if (curr_table->pre_idx_push_select_cond) - { - if (sort_table_cond->type() == Item::COND_ITEM) - sort_table_cond= sort_table_cond->copy_andor_structure(thd); - if (!(curr_table->pre_idx_push_select_cond= - new (thd->mem_root) Item_cond_and(thd, curr_table->pre_idx_push_select_cond, - sort_table_cond))) - DBUG_VOID_RETURN; - } - if (curr_table->select->cond && !curr_table->select->cond->fixed) - curr_table->select->cond->fix_fields(thd, 0); - if (curr_table->pre_idx_push_select_cond && - !curr_table->pre_idx_push_select_cond->fixed) - curr_table->pre_idx_push_select_cond->fix_fields(thd, 0); - - curr_table->select->pre_idx_push_select_cond= - curr_table->pre_idx_push_select_cond; - curr_table->set_select_cond(curr_table->select->cond, __LINE__); - curr_table->select_cond->top_level_item(); - DBUG_EXECUTE("where",print_where(curr_table->select->cond, - "select and having", - QT_ORDINARY);); - curr_join->tmp_having= make_cond_for_table(thd, curr_join->tmp_having, - ~ (table_map) 0, - ~used_tables, -1, - FALSE, FALSE); - DBUG_EXECUTE("where",print_where(curr_join->tmp_having, - "having after sort", - QT_ORDINARY);); - } - } - { - if (group) - curr_join->select_limit= HA_POS_ERROR; - else - { - /* - We can abort sorting after thd->select_limit rows if we there is no - WHERE clause for any tables after the sorted one. - */ - JOIN_TAB *curr_table= &curr_join->join_tab[curr_join->const_tables+1]; - JOIN_TAB *end_table= &curr_join->join_tab[curr_join->top_join_tab_count]; - for (; curr_table < end_table ; curr_table++) - { - /* - table->keyuse is set in the case there was an original WHERE clause - on the table that was optimized away. - */ - if (curr_table->select_cond || - (curr_table->keyuse && !curr_table->first_inner)) - { - /* We have to sort all rows */ - curr_join->select_limit= HA_POS_ERROR; - break; - } - } - } - if (curr_join->join_tab == join_tab && save_join_tab()) - { - DBUG_VOID_RETURN; - } - /* - Here we sort rows for ORDER BY/GROUP BY clause, if the optimiser - chose FILESORT to be faster than INDEX SCAN or there is no - suitable index present. - Note, that create_sort_index calls test_if_skip_sort_order and may - finally replace sorting with index scan if there is a LIMIT clause in - the query. XXX: it's never shown in EXPLAIN! - OPTION_FOUND_ROWS supersedes LIMIT and is taken into account. - */ - DBUG_PRINT("info",("Sorting for order by/group by")); - ORDER *order_arg= - curr_join->group_list ? curr_join->group_list : curr_join->order; - /* - filesort_limit: Return only this many rows from filesort(). - We can use select_limit_cnt only if we have no group_by and 1 table. - This allows us to use Bounded_queue for queries like: - "select SQL_CALC_FOUND_ROWS * from t1 order by b desc limit 1;" - select_limit == HA_POS_ERROR (we need a full table scan) - unit->select_limit_cnt == 1 (we only need one row in the result set) - */ - const ha_rows filesort_limit_arg= - (has_group_by || curr_join->table_count > 1) - ? curr_join->select_limit : unit->select_limit_cnt; - const ha_rows select_limit_arg= - select_options & OPTION_FOUND_ROWS - ? HA_POS_ERROR : unit->select_limit_cnt; - curr_join->filesort_found_rows= filesort_limit_arg != HA_POS_ERROR; - - DBUG_PRINT("info", ("has_group_by %d " - "curr_join->table_count %d " - "curr_join->m_select_limit %d " - "unit->select_limit_cnt %d", - has_group_by, - curr_join->table_count, - (int) curr_join->select_limit, - (int) unit->select_limit_cnt)); - if (create_sort_index(thd, - curr_join, - order_arg, - filesort_limit_arg, - select_limit_arg, - curr_join->group_list ? FALSE : TRUE)) - DBUG_VOID_RETURN; - sortorder= curr_join->sortorder; - if (curr_join->const_tables != curr_join->table_count && - !curr_join->join_tab[curr_join->const_tables].filesort) - { - /* - If no filesort for the first table then we are using an - INDEX SCAN. Thus we should not remove the sorted attribute - on the INDEX SCAN. - */ - skip_sort_order= 1; - } - } - } /* XXX: When can we have here thd->is_error() not zero? */ if (thd->is_error()) { error= thd->is_error(); DBUG_VOID_RETURN; } - curr_join->having= curr_join->tmp_having; - curr_join->fields= curr_fields_list; - curr_join->procedure= procedure; THD_STAGE_INFO(thd, stage_sending_data); DBUG_PRINT("info", ("%s", thd->proc_info)); - result->send_result_set_metadata((procedure ? curr_join->procedure_fields_list : - *curr_fields_list), - Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); - error= do_select(curr_join, curr_fields_list, NULL, procedure); - if (curr_join->order && curr_join->sortorder && - curr_join->filesort_found_rows) - { - /* Use info provided by filesort. */ - DBUG_ASSERT(curr_join->table_count > curr_join->const_tables); - JOIN_TAB *tab= curr_join->join_tab + curr_join->const_tables; - thd->limit_found_rows= tab->records; - } - + result->send_result_set_metadata( + procedure ? procedure_fields_list : *fields, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); + error= do_select(this, procedure); /* Accumulate the counts from all join iterations of all join parts. */ - thd->inc_examined_row_count(curr_join->join_examined_rows); + thd->inc_examined_row_count(join_examined_rows); DBUG_PRINT("counts", ("thd->examined_row_count: %lu", (ulong) thd->get_examined_row_count())); - /* - With EXPLAIN EXTENDED we have to restore original ref_array - for a derived table which is always materialized. - We also need to do this when we have temp table(s). - Otherwise we would not be able to print the query correctly. - */ - if (items0 && (thd->lex->describe & DESCRIBE_EXTENDED) && - (select_lex->linkage == DERIVED_TABLE_TYPE || - exec_tmp_table1 || exec_tmp_table2)) - set_items_ref_array(items0); - DBUG_VOID_RETURN; } @@ -3263,41 +3362,34 @@ JOIN::destroy() DBUG_ENTER("JOIN::destroy"); select_lex->join= 0; - if (tmp_join) + cond_equal= 0; + having_equal= 0; + + cleanup(1); + + if (join_tab) { - if (join_tab != tmp_join->join_tab) + DBUG_ASSERT(table_count+aggr_tables > 0); + for (JOIN_TAB *tab= first_linear_tab(this, WITH_BUSH_ROOTS, + WITH_CONST_TABLES); + tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { - JOIN_TAB *tab; - for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); - tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) + if (tab->aggr) { - tab->cleanup(); + free_tmp_table(thd, tab->table); + delete tab->tmp_table_param; + tab->tmp_table_param= NULL; + tab->aggr= NULL; } + tab->table= NULL; } - tmp_join->tmp_join= 0; - /* - We need to clean up tmp_table_param for reusable JOINs (having non-zero - and different from self tmp_join) because it's not being cleaned up - anywhere else (as we need to keep the join is reusable). - */ - tmp_table_param.cleanup(); - tmp_join->tmp_table_param.copy_field= 0; - DBUG_RETURN(tmp_join->destroy()); } - cond_equal= 0; - having_equal= 0; - cleanup(1); - /* Cleanup items referencing temporary table columns */ + /* Cleanup items referencing temporary table columns */ cleanup_item_list(tmp_all_fields1); cleanup_item_list(tmp_all_fields3); - if (exec_tmp_table1) - free_tmp_table(thd, exec_tmp_table1); - if (exec_tmp_table2) - free_tmp_table(thd, exec_tmp_table2); - delete select; destroy_sj_tmp_tables(this); - delete_dynamic(&keyuse); + delete_dynamic(&keyuse); delete procedure; DBUG_RETURN(error); } @@ -3362,7 +3454,7 @@ void JOIN::cleanup_item_list(List &items) const */ bool -mysql_select(THD *thd, Item ***rref_pointer_array, +mysql_select(THD *thd, TABLE_LIST *tables, uint wild_num, List &fields, COND *conds, uint og_num, ORDER *order, ORDER *group, Item *having, ORDER *proc_param, ulonglong select_options, @@ -3397,7 +3489,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array, } else { - if ((err= join->prepare(rref_pointer_array, tables, wild_num, + if ((err= join->prepare( tables, wild_num, conds, og_num, order, false, group, having, proc_param, select_lex, unit))) { @@ -3421,7 +3513,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array, DBUG_RETURN(TRUE); THD_STAGE_INFO(thd, stage_init); thd->lex->used_tables=0; - if ((err= join->prepare(rref_pointer_array, tables, wild_num, + if ((err= join->prepare(tables, wild_num, conds, og_num, order, false, group, having, proc_param, select_lex, unit))) { @@ -4224,7 +4316,7 @@ make_join_statistics(JOIN *join, List &tables_list, DEBUG_SYNC(join->thd, "inside_make_join_statistics"); /* Generate an execution plan from the found optimal join order. */ - DBUG_RETURN(join->thd->check_killed() || get_best_combination(join)); + DBUG_RETURN(join->thd->check_killed() || join->get_best_combination()); error: /* @@ -4516,9 +4608,9 @@ add_key_field(JOIN *join, ((join->is_allowed_hash_join_access() && field->hash_join_is_possible() && !(field->table->pos_in_table_list->is_materialized_derived() && - field->table->created)) || + field->table->is_created())) || (field->table->pos_in_table_list->is_materialized_derived() && - !field->table->created && !(field->flags & BLOB_FLAG)))) + !field->table->is_created() && !(field->flags & BLOB_FLAG)))) { optimize= KEY_OPTIMIZE_EQ; } @@ -5725,7 +5817,8 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab) item->walk(&Item::collect_item_field_processor, 0, (uchar*) &indexed_fields); } - else if (is_indexed_agg_distinct(join, &indexed_fields)) + else if (join->tmp_table_param.sum_func_count && + is_indexed_agg_distinct(join, &indexed_fields)) { join->sort_and_group= 1; } @@ -7312,13 +7405,13 @@ double JOIN::get_examined_rows() { double examined_rows; double prev_fanout= 1; - JOIN_TAB *tab= first_breadth_first_optimization_tab(); + JOIN_TAB *tab= first_breadth_first_tab(); JOIN_TAB *prev_tab= tab; examined_rows= tab->get_examined_rows(); - while ((tab= next_breadth_first_tab(first_breadth_first_optimization_tab(), - top_table_access_tabs_count, tab))) + while ((tab= next_breadth_first_tab(first_breadth_first_tab(), + top_join_tab_count, tab))) { prev_fanout *= prev_tab->records_read; examined_rows+= tab->get_examined_rows() * prev_fanout; @@ -8215,6 +8308,7 @@ prev_record_reads(POSITION *positions, uint idx, table_map found_ref) static JOIN_TAB *next_breadth_first_tab(JOIN_TAB *first_top_tab, uint n_top_tabs_count, JOIN_TAB *tab) { + n_top_tabs_count += tab->join->aggr_tables; if (!tab->bush_root_tab) { /* We're at top level. Get the next top-level tab */ @@ -8266,7 +8360,7 @@ static JOIN_TAB *next_breadth_first_tab(JOIN_TAB *first_top_tab, JOIN_TAB *first_explain_order_tab(JOIN* join) { JOIN_TAB* tab; - tab= join->table_access_tabs; + tab= join->join_tab; return (tab->bush_children) ? tab->bush_children->start : tab; } @@ -8280,7 +8374,7 @@ JOIN_TAB *next_explain_order_tab(JOIN* join, JOIN_TAB* tab) /* Move to next tab in the array we're traversing */ tab++; - if (tab == join->table_access_tabs + join->top_join_tab_count) + if (tab == join->join_tab + join->top_join_tab_count) return NULL; /* Outside SJM nest and reached EOF */ if (tab->bush_children) @@ -8306,7 +8400,7 @@ JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables const_tbls JOIN_TAB *next_top_level_tab(JOIN *join, JOIN_TAB *tab) { - tab= next_breadth_first_tab(join->first_breadth_first_execution_tab(), + tab= next_breadth_first_tab(join->first_breadth_first_tab(), join->top_join_tab_count, tab); if (tab && tab->bush_root_tab) tab= NULL; @@ -8384,7 +8478,7 @@ JOIN_TAB *next_linear_tab(JOIN* join, JOIN_TAB* tab, } /* If no more JOIN_TAB's on the top level */ - if (++tab == join->join_tab + join->top_join_tab_count) + if (++tab == join->join_tab + join->top_join_tab_count + join->aggr_tables) return NULL; if (include_bush_roots == WITHOUT_BUSH_ROOTS && tab->bush_children) @@ -8478,37 +8572,58 @@ static Item * const null_ptr= NULL; TRUE Out of memory */ -bool -get_best_combination(JOIN *join) +bool JOIN::get_best_combination() { uint tablenr; table_map used_tables; - JOIN_TAB *join_tab,*j; + JOIN_TAB *j; KEYUSE *keyuse; - uint table_count; - THD *thd=join->thd; DBUG_ENTER("get_best_combination"); - table_count=join->table_count; - if (!(join->join_tab=join_tab= - (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*table_count))) + /* + Additional plan nodes for postjoin tmp tables: + 1? + // For GROUP BY + 1? + // For DISTINCT + 1? + // For aggregation functions aggregated in outer query + // when used with distinct + 1? + // For ORDER BY + 1? // buffer result + Up to 2 tmp tables are actually used, but it's hard to tell exact number + at this stage. + */ + uint aggr_tables= (group_list ? 1 : 0) + + (select_distinct ? + (tmp_table_param. using_outer_summary_function ? 2 : 1) : 0) + + (order ? 1 : 0) + + (select_options & (SELECT_BIG_RESULT | OPTION_BUFFER_RESULT) ? 1 : 0) ; + + if (aggr_tables == 0) + aggr_tables= 1; /* For group by pushdown */ + + if (select_lex->window_specs.elements) + aggr_tables++; + + if (aggr_tables > 2) + aggr_tables= 2; + if (!(join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)* + (top_join_tab_count + aggr_tables)))) DBUG_RETURN(TRUE); - join->full_join=0; - join->hash_join= FALSE; + full_join=0; + hash_join= FALSE; used_tables= OUTER_REF_TABLE_BIT; // Outer row is already read - fix_semijoin_strategies_for_picked_join_order(join); - + fix_semijoin_strategies_for_picked_join_order(this); + JOIN_TAB_RANGE *root_range; if (!(root_range= new (thd->mem_root) JOIN_TAB_RANGE)) DBUG_RETURN(TRUE); - root_range->start= join->join_tab; + root_range->start= join_tab; /* root_range->end will be set later */ - join->join_tab_ranges.empty(); + join_tab_ranges.empty(); - if (join->join_tab_ranges.push_back(root_range, thd->mem_root)) + if (join_tab_ranges.push_back(root_range, thd->mem_root)) DBUG_RETURN(TRUE); JOIN_TAB *sjm_nest_end= NULL; @@ -8517,7 +8632,7 @@ get_best_combination(JOIN *join) for (j=join_tab, tablenr=0 ; tablenr < table_count ; tablenr++,j++) { TABLE *form; - POSITION *cur_pos= &join->best_positions[tablenr]; + POSITION *cur_pos= &best_positions[tablenr]; if (cur_pos->sj_strategy == SJ_OPT_MATERIALIZE || cur_pos->sj_strategy == SJ_OPT_MATERIALIZE_SCAN) { @@ -8528,7 +8643,7 @@ get_best_combination(JOIN *join) in the temptable. */ bzero(j, sizeof(JOIN_TAB)); - j->join= join; + j->join= this; j->table= NULL; //temporary way to tell SJM tables from others. j->ref.key = -1; j->on_expr_ref= (Item**) &null_ptr; @@ -8544,12 +8659,12 @@ get_best_combination(JOIN *join) j->cond_selectivity= 1.0; JOIN_TAB *jt; JOIN_TAB_RANGE *jt_range; - if (!(jt= (JOIN_TAB*)join->thd->alloc(sizeof(JOIN_TAB)*sjm->tables)) || + if (!(jt= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*sjm->tables)) || !(jt_range= new JOIN_TAB_RANGE)) DBUG_RETURN(TRUE); jt_range->start= jt; jt_range->end= jt + sjm->tables; - join->join_tab_ranges.push_back(jt_range, join->thd->mem_root); + join_tab_ranges.push_back(jt_range, thd->mem_root); j->bush_children= jt_range; sjm_nest_end= jt + sjm->tables; sjm_nest_root= j; @@ -8557,11 +8672,11 @@ get_best_combination(JOIN *join) j= jt; } - *j= *join->best_positions[tablenr].table; + *j= *best_positions[tablenr].table; j->bush_root_tab= sjm_nest_root; - form=join->table[tablenr]=j->table; + form= table[tablenr]= j->table; used_tables|= form->map; form->reginfo.join_tab=j; if (!*j->on_expr_ref) @@ -8577,36 +8692,36 @@ get_best_combination(JOIN *join) if (j->type == JT_SYSTEM) goto loop_end; - if ( !(keyuse= join->best_positions[tablenr].key)) + if ( !(keyuse= best_positions[tablenr].key)) { j->type=JT_ALL; - if (join->best_positions[tablenr].use_join_buffer && - tablenr != join->const_tables) - join->full_join= 1; + if (best_positions[tablenr].use_join_buffer && + tablenr != const_tables) + full_join= 1; } - /*if (join->best_positions[tablenr].sj_strategy == SJ_OPT_LOOSE_SCAN) + /*if (best_positions[tablenr].sj_strategy == SJ_OPT_LOOSE_SCAN) { DBUG_ASSERT(!keyuse || keyuse->key == - join->best_positions[tablenr].loosescan_picker.loosescan_key); - j->index= join->best_positions[tablenr].loosescan_picker.loosescan_key; + best_positions[tablenr].loosescan_picker.loosescan_key); + j->index= best_positions[tablenr].loosescan_picker.loosescan_key; }*/ - if (keyuse && create_ref_for_key(join, j, keyuse, TRUE, used_tables)) + if (keyuse && create_ref_for_key(this, j, keyuse, TRUE, used_tables)) DBUG_RETURN(TRUE); // Something went wrong if ((j->type == JT_REF || j->type == JT_EQ_REF) && is_hash_join_key_no(j->ref.key)) - join->hash_join= TRUE; + hash_join= TRUE; loop_end: /* Save records_read in JOIN_TAB so that select_describe()/etc don't have to access join->best_positions[]. */ - j->records_read= join->best_positions[tablenr].records_read; - j->cond_selectivity= join->best_positions[tablenr].cond_selectivity; - join->map2table[j->table->tablenr]= j; + j->records_read= best_positions[tablenr].records_read; + j->cond_selectivity= best_positions[tablenr].cond_selectivity; + map2table[j->table->tablenr]= j; /* If we've reached the end of sjm nest, switch back to main sequence */ if (j + 1 == sjm_nest_end) @@ -8619,16 +8734,10 @@ get_best_combination(JOIN *join) } root_range->end= j; - join->top_join_tab_count= join->join_tab_ranges.head()->end - - join->join_tab_ranges.head()->start; - /* - Save pointers to select join tabs for SHOW EXPLAIN - */ - join->table_access_tabs= join->join_tab; - join->top_table_access_tabs_count= join->top_join_tab_count; + top_join_tab_count= join_tab_ranges.head()->end - + join_tab_ranges.head()->start; - - update_depend_map(join); + update_depend_map(this); DBUG_RETURN(0); } @@ -9019,121 +9128,12 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, ((Item_field*) keyuse->val->real_item())->field, keyuse->val->real_item()->full_name()); - return new store_key_item(thd, - key_part->field, - key_buff + maybe_null, - maybe_null ? key_buff : 0, - key_part->length, - keyuse->val, FALSE); -} - -/** - @details Initialize a JOIN as a query execution plan - that accesses a single table via a table scan. - - @param parent contains JOIN_TAB and TABLE object buffers for this join - @param tmp_table temporary table - - @retval FALSE success - @retval TRUE error occurred -*/ -bool -JOIN::make_simple_join(JOIN *parent, TABLE *temp_table) -{ - DBUG_ENTER("JOIN::make_simple_join"); - - /* - Reuse TABLE * and JOIN_TAB if already allocated by a previous call - to this function through JOIN::exec (may happen for sub-queries). - - psergey-todo: here, save the pointer for original join_tabs. - */ - if (!(join_tab= parent->join_tab_reexec)) - { - if (!(join_tab= parent->join_tab_reexec= - (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)))) - DBUG_RETURN(TRUE); /* purecov: inspected */ - } - else - { - /* Free memory used by previous allocations */ - delete join_tab->filesort; - } - - table= &parent->table_reexec[0]; parent->table_reexec[0]= temp_table; - table_count= top_join_tab_count= 1; - - const_tables= 0; - const_table_map= 0; - eliminated_tables= 0; - tmp_table_param.field_count= tmp_table_param.sum_func_count= - tmp_table_param.func_count= 0; - /* - We need to destruct the copy_field (allocated in create_tmp_table()) - before setting it to 0 if the join is not "reusable". - */ - if (!tmp_join || tmp_join != this) - tmp_table_param.cleanup(); - tmp_table_param.copy_field= tmp_table_param.copy_field_end=0; - first_record= sort_and_group=0; - send_records= (ha_rows) 0; - - if (group_optimized_away && !tmp_table_param.precomputed_group_by) - { - /* - If grouping has been optimized away, a temporary table is - normally not needed unless we're explicitly requested to create - one (e.g. due to a SQL_BUFFER_RESULT hint or INSERT ... SELECT). - - In this case (grouping was optimized away), temp_table was - created without a grouping expression and JOIN::exec() will not - perform the necessary grouping (by the use of end_send_group() - or end_write_group()) if JOIN::group is set to false. - - There is one exception: if the loose index scan access method is - used to read into the temporary table, grouping and aggregate - functions are handled. - */ - // the temporary table was explicitly requested - DBUG_ASSERT(MY_TEST(select_options & OPTION_BUFFER_RESULT)); - // the temporary table does not have a grouping expression - DBUG_ASSERT(!temp_table->group); - } - else - group= false; - - row_limit= unit->select_limit_cnt; - do_send_rows= row_limit ? 1 : 0; - - bzero(join_tab, sizeof(JOIN_TAB)); - join_tab->table=temp_table; - join_tab->set_select_cond(NULL, __LINE__); - join_tab->type= JT_ALL; /* Map through all records */ - join_tab->keys.init(); - join_tab->keys.set_all(); /* test everything in quick */ - join_tab->ref.key = -1; - join_tab->shortcut_for_distinct= false; - join_tab->read_first_record= join_init_read_record; - join_tab->join= this; - join_tab->ref.key_parts= 0; - - uint select_nr= select_lex->select_number; - if (select_nr == INT_MAX) - { - /* this is a fake_select_lex of a union */ - select_nr= select_lex->master_unit()->first_select()->select_number; - join_tab->tracker= thd->lex->explain->get_union(select_nr)-> - get_tmptable_read_tracker(); - } - else - { - join_tab->tracker= thd->lex->explain->get_select(select_nr)-> - get_using_temporary_read_tracker(); - } - bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record)); - temp_table->status=0; - temp_table->null_row=0; - DBUG_RETURN(FALSE); + return new store_key_item(thd, + key_part->field, + key_buff + maybe_null, + maybe_null ? key_buff : 0, + key_part->length, + keyuse->val, FALSE); } @@ -9551,6 +9551,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) tab= next_depth_first_tab(join, tab), i++) { bool is_hj; + /* first_inner is the X in queries like: SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X @@ -11181,7 +11182,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) join_read_system :join_read_const; if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) - table->enable_keyread(); + table->set_keyread(true); else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered()) push_index_cond(tab, tab->ref.key); break; @@ -11190,7 +11191,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) /* fall through */ if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) - table->enable_keyread(); + table->set_keyread(true); else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered()) push_index_cond(tab, tab->ref.key); break; @@ -11205,7 +11206,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) tab->quick=0; if (table->covering_keys.is_set(tab->ref.key) && !table->no_keyread) - table->enable_keyread(); + table->set_keyread(true); else if ((!jcl || jcl > 4) && !tab->ref.is_access_triggered()) push_index_cond(tab, tab->ref.key); break; @@ -11268,7 +11269,7 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after) if (tab->select && tab->select->quick && tab->select->quick->index != MAX_KEY && //not index_merge table->covering_keys.is_set(tab->select->quick->index)) - table->enable_keyread(); + table->set_keyread(true); else if (!table->covering_keys.is_clear_all() && !(tab->select && tab->select->quick)) { // Only read index tree @@ -11430,17 +11431,27 @@ void JOIN_TAB::cleanup() select= 0; delete quick; quick= 0; - delete filesort; - filesort= 0; if (cache) { cache->free(); cache= 0; } limit= 0; + // Free select that was created for filesort outside of create_sort_index + if (filesort && filesort->select && !filesort->own_select) + delete filesort->select; + delete filesort; + filesort= NULL; + /* Skip non-existing derived tables/views result tables */ + if (table && + (table->s->tmp_table != INTERNAL_TMP_TABLE || table->is_created())) + { + table->set_keyread(FALSE); + table->file->ha_index_or_rnd_end(); + } if (table) { - table->disable_keyread(); + table->set_keyread(false); table->file->ha_index_or_rnd_end(); preread_init_done= FALSE; if (table->pos_in_table_list && @@ -11490,7 +11501,7 @@ void JOIN_TAB::cleanup() double JOIN_TAB::scan_time() { double res; - if (table->created) + if (table->is_created()) { if (table->is_filled_at_execution()) { @@ -11529,9 +11540,10 @@ double JOIN_TAB::scan_time() ha_rows JOIN_TAB::get_examined_rows() { double examined_rows; + SQL_SELECT *sel= filesort? filesort->select : this->select; - if (select && select->quick && use_quick != 2) - examined_rows= select->quick->records; + if (sel && sel->quick && use_quick != 2) + examined_rows= sel->quick->records; else if (type == JT_NEXT || type == JT_ALL || type == JT_HASH || type ==JT_HASH_NEXT) { @@ -11818,35 +11830,12 @@ void JOIN::cleanup(bool full) table_count= original_table_count; } - if (table) + if (join_tab) { JOIN_TAB *tab; - /* - Only a sorted table may be cached. This sorted table is always the - first non const table in join->table - */ - if (table_count > const_tables) // Test for not-const tables - { - JOIN_TAB *first_tab= first_top_level_tab(this, WITHOUT_CONST_TABLES); - if (first_tab->table) - { - delete first_tab->filesort; - first_tab->filesort= 0; - } - } + if (full) { - JOIN_TAB *sort_tab= first_linear_tab(this, WITH_BUSH_ROOTS, - WITHOUT_CONST_TABLES); - if (pre_sort_join_tab) - { - if (sort_tab && sort_tab->select == pre_sort_join_tab->select) - { - pre_sort_join_tab->select= NULL; - } - else - clean_pre_sort_join_tab(); - } /* Call cleanup() on join tabs used by the join optimization (join->join_tab may now be pointing to result of make_simple_join @@ -11858,49 +11847,64 @@ void JOIN::cleanup(bool full) */ if (table_count) { - for (tab= first_breadth_first_optimization_tab(); tab; - tab= next_breadth_first_tab(first_breadth_first_optimization_tab(), - top_table_access_tabs_count, tab)) - tab->cleanup(); - - /* We've walked optimization tabs, do execution ones too. */ - if (first_breadth_first_execution_tab() != - first_breadth_first_optimization_tab()) + for (tab= first_breadth_first_tab(); tab; + tab= next_breadth_first_tab(first_breadth_first_tab(), + top_join_tab_count, tab)) { - for (tab= first_breadth_first_execution_tab(); tab; - tab= next_breadth_first_tab(first_breadth_first_execution_tab(), - top_join_tab_count, tab)) - tab->cleanup(); + tab->cleanup(); + delete tab->filesort_result; + tab->filesort_result= NULL; } } cleaned= true; + //psergey2: added (Q: why not in the above loop?) + { + JOIN_TAB *curr_tab= join_tab + top_join_tab_count; + for (uint i= 0; i < aggr_tables; i++, curr_tab++) + { + if (curr_tab->aggr) + { + free_tmp_table(thd, curr_tab->table); + delete curr_tab->tmp_table_param; + curr_tab->tmp_table_param= NULL; + curr_tab->aggr= NULL; + + delete curr_tab->filesort_result; + curr_tab->filesort_result= NULL; + } + } + aggr_tables= 0; // psergey3 + } } else { for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITH_CONST_TABLES); tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS)) { - if (tab->table) + if (!tab->table) + continue; + DBUG_PRINT("info", ("close index: %s.%s alias: %s", + tab->table->s->db.str, + tab->table->s->table_name.str, + tab->table->alias.c_ptr())); + if (tab->table->is_created()) { - DBUG_PRINT("info", ("close index: %s.%s alias: %s", - tab->table->s->db.str, - tab->table->s->table_name.str, - tab->table->alias.c_ptr())); tab->table->file->ha_index_or_rnd_end(); + if (tab->aggr) + { + int tmp= 0; + if ((tmp= tab->table->file->extra(HA_EXTRA_NO_CACHE))) + tab->table->file->print_error(tmp, MYF(0)); + } } + delete tab->filesort_result; + tab->filesort_result= NULL; } } } if (full) { cleanup_empty_jtbm_semi_joins(this, join_list); - /* - Ensure that the following delete_elements() would not be called - twice for the same list. - */ - if (tmp_join && tmp_join != this && - tmp_join->group_fields == this->group_fields) - tmp_join->group_fields.empty(); // Run Cached_item DTORs! group_fields.delete_elements(); @@ -11916,14 +11920,6 @@ void JOIN::cleanup(bool full) pointer to tmp_table_param.copy_field from tmp_join, because it qill be removed in tmp_table_param.cleanup(). */ - if (tmp_join && - tmp_join != this && - tmp_join->tmp_table_param.copy_field == - tmp_table_param.copy_field) - { - tmp_join->tmp_table_param.copy_field= - tmp_join->tmp_table_param.save_copy_field= 0; - } tmp_table_param.cleanup(); delete pushdown_query; @@ -11945,6 +11941,12 @@ void JOIN::cleanup(bool full) } } } + /* Restore ref array to original state */ + if (current_ref_ptrs != items0) + { + set_items_ref_array(items0); + set_group_rpa= false; + } DBUG_VOID_RETURN; } @@ -12120,8 +12122,8 @@ static ORDER * remove_const(JOIN *join,ORDER *first_order, COND *cond, bool change_list, bool *simple_order) { - *simple_order= 1; - if (join->table_count == join->const_tables) + *simple_order= join->rollup.state == ROLLUP::STATE_NONE; + if (join->only_const_tables()) return change_list ? 0 : first_order; // No need to sort ORDER *order,**prev_ptr, *tmp_order; @@ -13282,16 +13284,16 @@ static int compare_fields_by_table_order(Item *field1, Item_field *f1= (Item_field *) (field1->real_item()); Item_field *f2= (Item_field *) (field2->real_item()); if (field1->const_item() || f1->const_item()) - return 1; - if (field2->const_item() || f2->const_item()) return -1; - if (f2->used_tables() & OUTER_REF_TABLE_BIT) - { + if (field2->const_item() || f2->const_item()) + return 1; + if (f1->used_tables() & OUTER_REF_TABLE_BIT) + { outer_ref= 1; cmp= -1; } - if (f1->used_tables() & OUTER_REF_TABLE_BIT) - { + if (f2->used_tables() & OUTER_REF_TABLE_BIT) + { outer_ref= 1; cmp++; } @@ -13315,10 +13317,12 @@ static int compare_fields_by_table_order(Item *field1, tab2= tab2->bush_root_tab; } - cmp= tab2 - tab1; + cmp= tab1 - tab2; if (!cmp) { + /* Fields f1, f2 belong to the same table */ + JOIN_TAB *tab= idx[f1->field->table->tablenr]; uint keyno= MAX_KEY; if (tab->ref.key_parts) @@ -13327,31 +13331,38 @@ static int compare_fields_by_table_order(Item *field1, keyno = tab->select->quick->index; if (keyno != MAX_KEY) { - if (f2->field->part_of_key.is_set(keyno)) - cmp= -1; if (f1->field->part_of_key.is_set(keyno)) + cmp= -1; + if (f2->field->part_of_key.is_set(keyno)) cmp++; + /* + Here: + if both f1, f2 are components of the key tab->ref.key then cmp==0, + if only f1 is a component of the key then cmp==-1 (f1 is better), + if only f2 is a component of the key then cmp==1, (f2 is better), + if none of f1,f1 is component of the key cmp==0. + */ if (!cmp) { KEY *key_info= tab->table->key_info + keyno; for (uint i= 0; i < key_info->user_defined_key_parts; i++) { Field *fld= key_info->key_part[i].field; - if (fld->eq(f2->field)) + if (fld->eq(f1->field)) { - cmp= -1; + cmp= -1; // f1 is better break; } - if (fld->eq(f1->field)) + if (fld->eq(f2->field)) { - cmp= 1; + cmp= 1; // f2 is better break; } } } } - else - cmp= f2->field->field_index-f1->field->field_index; + if (!cmp) + cmp= f1->field->field_index-f2->field->field_index; } return cmp < 0 ? -1 : (cmp ? 1 : 0); } @@ -15973,6 +15984,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, case Item::NULL_ITEM: case Item::VARBIN_ITEM: case Item::CACHE_ITEM: + case Item::WINDOW_FUNC_ITEM: // psergey-winfunc: case Item::EXPR_CACHE_ITEM: if (make_copy_field) { @@ -16249,7 +16261,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, reclength= string_total_length= 0; blob_count= string_count= null_count= hidden_null_count= group_null_items= 0; - param->using_indirect_summary_function=0; + param->using_outer_summary_function= 0; List_iterator_fast li(fields); Item *item; @@ -16271,7 +16283,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, function. We need to know this if someone is going to use DISTINCT on the result. */ - param->using_indirect_summary_function=1; + param->using_outer_summary_function=1; continue; } } @@ -16885,13 +16897,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, if (!do_not_open) { - if (share->db_type() == TMP_ENGINE_HTON) - { - if (create_internal_tmp_table(table, param->keyinfo, param->start_recinfo, - ¶m->recinfo, select_options)) - goto err; - } - if (open_tmp_table(table)) + if (instantiate_tmp_table(table, param->keyinfo, param->start_recinfo, + ¶m->recinfo, select_options)) goto err; } @@ -17032,9 +17039,9 @@ bool open_tmp_table(TABLE *table) } table->db_stat= HA_OPEN_KEYFILE+HA_OPEN_RNDFILE; (void) table->file->extra(HA_EXTRA_QUICK); /* Faster */ - if (!table->created) + if (!table->is_created()) { - table->created= TRUE; + table->set_created(); table->in_use->inc_status_created_tmp_tables(); } @@ -17240,7 +17247,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, table->in_use->inc_status_created_tmp_tables(); table->in_use->query_plan_flags|= QPLAN_TMP_DISK; share->db_record_offset= 1; - table->created= TRUE; + table->set_created(); DBUG_RETURN(0); err: DBUG_RETURN(1); @@ -17558,7 +17565,7 @@ free_tmp_table(THD *thd, TABLE *entry) save_proc_info=thd->proc_info; THD_STAGE_INFO(thd, stage_removing_tmp_table); - if (entry->file && entry->created) + if (entry->file && entry->is_created()) { entry->file->ha_index_or_rnd_end(); if (entry->db_stat) @@ -17586,81 +17593,101 @@ free_tmp_table(THD *thd, TABLE *entry) /** - @details - Rows produced by a join sweep may end up in a temporary table or be sent - to a client. Setup the function of the nested loop join algorithm which - handles final fully constructed and matched records. + @brief + Set write_func of AGGR_OP object - @param join join to setup the function for. + @param join_tab JOIN_TAB of the corresponding tmp table - @return - end_select function to use. This function can't fail. + @details + Function sets up write_func according to how AGGR_OP object that + is attached to the given join_tab will be used in the query. */ -Next_select_func setup_end_select_func(JOIN *join) +void set_postjoin_aggr_write_func(JOIN_TAB *tab) { - TABLE *table= join->tmp_table; - TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param; - Next_select_func end_select; + JOIN *join= tab->join; + TABLE *table= tab->table; + AGGR_OP *aggr= tab->aggr; + TMP_TABLE_PARAM *tmp_tbl= tab->tmp_table_param; - /* Set up select_end */ - if (table) + DBUG_ASSERT(table && aggr); + + if (table->group && tmp_tbl->sum_func_count && + !tmp_tbl->precomputed_group_by) { - if (table->group && tmp_tbl->sum_func_count && - !tmp_tbl->precomputed_group_by) - { - if (table->s->keys) - { - DBUG_PRINT("info",("Using end_update")); - end_select=end_update; - } - else - { - DBUG_PRINT("info",("Using end_unique_update")); - end_select=end_unique_update; - } - } - else if (join->sort_and_group && !tmp_tbl->precomputed_group_by) + /* + Note for MyISAM tmp tables: if uniques is true keys won't be + created. + */ + if (table->s->keys && !table->s->uniques) { - DBUG_PRINT("info",("Using end_write_group")); - end_select=end_write_group; + DBUG_PRINT("info",("Using end_update")); + aggr->set_write_func(end_update); } else { - DBUG_PRINT("info",("Using end_write")); - end_select=end_write; - if (tmp_tbl->precomputed_group_by) - { - /* - A preceding call to create_tmp_table in the case when loose - index scan is used guarantees that - TMP_TABLE_PARAM::items_to_copy has enough space for the group - by functions. It is OK here to use memcpy since we copy - Item_sum pointers into an array of Item pointers. - */ - memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count, - join->sum_funcs, - sizeof(Item*)*tmp_tbl->sum_func_count); - tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0; - } + DBUG_PRINT("info",("Using end_unique_update")); + aggr->set_write_func(end_unique_update); } } + else if (join->sort_and_group && !tmp_tbl->precomputed_group_by && + !join->sort_and_group_aggr_tab) + { + DBUG_PRINT("info",("Using end_write_group")); + aggr->set_write_func(end_write_group); + join->sort_and_group_aggr_tab= tab; + } else { - /* - Choose method for presenting result to user. Use end_send_group - if the query requires grouping (has a GROUP BY clause and/or one or - more aggregate functions). Use end_send if the query should not - be grouped. - */ - if ((join->sort_and_group || - (join->procedure && join->procedure->flags & PROC_GROUP)) && - !tmp_tbl->precomputed_group_by) - end_select= end_send_group; - else - end_select= end_send; + DBUG_PRINT("info",("Using end_write")); + aggr->set_write_func(end_write); + if (tmp_tbl->precomputed_group_by) + { + /* + A preceding call to create_tmp_table in the case when loose + index scan is used guarantees that + TMP_TABLE_PARAM::items_to_copy has enough space for the group + by functions. It is OK here to use memcpy since we copy + Item_sum pointers into an array of Item pointers. + */ + memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count, + join->sum_funcs, + sizeof(Item*)*tmp_tbl->sum_func_count); + tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0; + } + } +} + + +/** + @details + Rows produced by a join sweep may end up in a temporary table or be sent + to a client. Set the function of the nested loop join algorithm which + handles final fully constructed and matched records. + + @param join join to setup the function for. + + @return + end_select function to use. This function can't fail. +*/ + +Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab) +{ + TMP_TABLE_PARAM *tmp_tbl= tab ? tab->tmp_table_param : &join->tmp_table_param; + + /* + Choose method for presenting result to user. Use end_send_group + if the query requires grouping (has a GROUP BY clause and/or one or + more aggregate functions). Use end_send if the query should not + be grouped. + */ + if (join->sort_and_group && !tmp_tbl->precomputed_group_by) + { + DBUG_PRINT("info",("Using end_send_group")); + return end_send_group; } - return end_select; + DBUG_PRINT("info",("Using end_send")); + return end_send; } @@ -17676,19 +17703,13 @@ Next_select_func setup_end_select_func(JOIN *join) */ static int -do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) +do_select(JOIN *join, Procedure *procedure) { int rc= 0; enum_nested_loop_state error= NESTED_LOOP_OK; - JOIN_TAB *UNINIT_VAR(join_tab); DBUG_ENTER("do_select"); - - join->procedure=procedure; - join->tmp_table= table; /* Save for easy recursion */ - join->fields= fields; - join->do_select_call_count++; - if (join->pushdown_query && join->do_select_call_count == 1) + if (join->pushdown_query) { /* Select fields are in the temporary table */ join->fields= &join->tmp_fields_list1; @@ -17696,34 +17717,33 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) join->set_items_ref_array(join->items1); /* The storage engine will take care of the group by query result */ int res= join->pushdown_query->execute(join); - DBUG_RETURN(res); - } - if (table) - { - (void) table->file->extra(HA_EXTRA_WRITE_CACHE); - empty_record(table); - if (table->group && join->tmp_table_param.sum_func_count && - table->s->keys && !table->file->inited) + if (res) + DBUG_RETURN(res); + + if (join->pushdown_query->store_data_in_temp_table) { - rc= table->file->ha_index_init(0, 0); - if (rc) - { - table->file->print_error(rc, MYF(0)); - DBUG_RETURN(-1); - } + JOIN_TAB *last_tab= join->join_tab + join->table_count; + last_tab->next_select= end_send; + + enum_nested_loop_state state= last_tab->aggr->end_send(); + if (state >= NESTED_LOOP_OK) + state= sub_select(join, last_tab, true); + + if (state < NESTED_LOOP_OK) + res= 1; + + if (join->result->send_eof()) + res= 1; } + DBUG_RETURN(res); } - /* Set up select_end */ - Next_select_func end_select= setup_end_select_func(join); - if (join->table_count) - { - join->join_tab[join->top_join_tab_count - 1].next_select= end_select; - join_tab=join->join_tab+join->const_tables; - } + + join->procedure= procedure; join->send_records=0; - if (join->table_count == join->const_tables) + if (join->only_const_tables() && !join->need_tmp) { + Next_select_func end_select= setup_end_select_func(join, NULL); /* HAVING will be checked after processing aggregate functions, But WHERE should checked here (we alredy have read tables). @@ -17735,8 +17755,9 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) DBUG_ASSERT(join->outer_ref_cond == NULL); if (!join->pseudo_bits_cond || join->pseudo_bits_cond->val_int()) { + // HAVING will be checked by end_select error= (*end_select)(join, 0, 0); - if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT) + if (error >= NESTED_LOOP_OK) error= (*end_select)(join, 0, 1); /* @@ -17752,7 +17773,7 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) if (!join->having || join->having->val_int()) { List *columns_list= (procedure ? &join->procedure_fields_list : - fields); + join->fields); rc= join->result->send_data(*columns_list) > 0; } } @@ -17766,8 +17787,6 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) } else { - DBUG_ASSERT(join->table_count); - DBUG_EXECUTE_IF("show_explain_probe_do_select", if (dbug_user_var_equals_int(join->thd, "show_explain_probe_select_id", @@ -17775,15 +17794,13 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) dbug_serve_apcs(join->thd, 1); ); + JOIN_TAB *join_tab= join->join_tab + join->const_tables; if (join->outer_ref_cond && !join->outer_ref_cond->val_int()) error= NESTED_LOOP_NO_MORE_ROWS; else - error= sub_select(join,join_tab,0); - if ((error == NESTED_LOOP_OK || error == NESTED_LOOP_NO_MORE_ROWS) && - join->thd->killed != ABORT_QUERY) - error= sub_select(join,join_tab,1); - if (error == NESTED_LOOP_QUERY_LIMIT) - error= NESTED_LOOP_OK; /* select_limit used */ + error= join->first_select(join,join_tab,0); + if (error >= NESTED_LOOP_OK && join->thd->killed != ABORT_QUERY) + error= join->first_select(join,join_tab,1); } join->thd->limit_found_rows= join->send_records; @@ -17791,23 +17808,37 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) if (error == NESTED_LOOP_NO_MORE_ROWS || join->thd->killed == ABORT_QUERY) error= NESTED_LOOP_OK; - if (table) + /* + For "order by with limit", we cannot rely on send_records, but need + to use the rowcount read originally into the join_tab applying the + filesort. There cannot be any post-filtering conditions, nor any + following join_tabs in this case, so this rowcount properly represents + the correct number of qualifying rows. + */ + if (join->order) { - int tmp, new_errno= 0; - if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE))) + // Save # of found records prior to cleanup + JOIN_TAB *sort_tab; + JOIN_TAB *join_tab= join->join_tab; + uint const_tables= join->const_tables; + + // Take record count from first non constant table or from last tmp table + if (join->aggr_tables > 0) + sort_tab= join_tab + join->top_join_tab_count + join->aggr_tables - 1; + else { - DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed")); - new_errno= tmp; + DBUG_ASSERT(!join->only_const_tables()); + sort_tab= join_tab + const_tables; } - if ((tmp=table->file->ha_index_or_rnd_end())) + if (sort_tab->filesort && + join->select_options & OPTION_FOUND_ROWS && + sort_tab->filesort->sortorder && + sort_tab->filesort->limit != HA_POS_ERROR) { - DBUG_PRINT("error",("ha_index_or_rnd_end() failed")); - new_errno= tmp; + join->thd->limit_found_rows= sort_tab->records; } - if (new_errno) - table->file->print_error(new_errno,MYF(0)); } - else + { /* The following will unlock all cursors if the command wasn't an @@ -17821,11 +17852,8 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) Sic: this branch works even if rc != 0, e.g. when send_data above returns an error. */ - if (!table) // If sending data to client - { - if (join->result->send_eof()) - rc= 1; // Don't send error - } + if (join->result->send_eof()) + rc= 1; // Don't send error DBUG_PRINT("info",("%ld records output", (long) join->send_records)); } else @@ -17836,7 +17864,8 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) DBUG_PRINT("error",("Error: do_select() failed")); } #endif - DBUG_RETURN(join->thd->is_error() ? -1 : rc); + rc= join->thd->is_error() ? -1 : rc; + DBUG_RETURN(rc); } @@ -17853,6 +17882,106 @@ int rr_sequential_and_unpack(READ_RECORD *info) } +/** + @brief + Instantiates temporary table + + @param table Table object that describes the table to be + instantiated + @param keyinfo Description of the index (there is always one index) + @param start_recinfo Column descriptions + @param recinfo INOUT End of column descriptions + @param options Option bits + + @details + Creates tmp table and opens it. + + @return + FALSE - OK + TRUE - Error +*/ + +static +bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, + MARIA_COLUMNDEF *start_recinfo, + MARIA_COLUMNDEF **recinfo, + ulonglong options) +{ + if (table->s->db_type() == TMP_ENGINE_HTON) + { + if (create_internal_tmp_table(table, keyinfo, start_recinfo, recinfo, + options)) + return TRUE; + // Make empty record so random data is not written to disk + empty_record(table); + } + if (open_tmp_table(table)) + return TRUE; + + return FALSE; +} + + +/** + @brief + Accumulate rows of the result of an aggregation operation in a tmp table + + @param join pointer to the structure providing all context info for the query + @param join_tab the JOIN_TAB object to which the operation is attached + @param end_records TRUE <=> all records were accumulated, send them further + + @details + This function accumulates records of the aggreagation operation for + the node join_tab from the execution plan in a tmp table. To add a new + record the function calls join_tab->aggr->put_records. + When there is no more records to save, in this + case the end_of_records argument == true, function tells the operation to + send records further by calling aggr->send_records(). + When all records are sent this function passes 'end_of_records' signal + further by calling sub_select() with end_of_records argument set to + true. After that aggr->end_send() is called to tell the operation that + it could end internal buffer scan. + + @note + This function is not expected to be called when dynamic range scan is + used to scan join_tab because range scans aren't used for tmp tables. + + @return + return one of enum_nested_loop_state. +*/ + +enum_nested_loop_state +sub_select_postjoin_aggr(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) +{ + enum_nested_loop_state rc; + AGGR_OP *aggr= join_tab->aggr; + + /* This function cannot be called if join_tab has no associated aggregation */ + DBUG_ASSERT(aggr != NULL); + + DBUG_ENTER("sub_select_aggr_tab"); + + if (join->thd->killed) + { + /* The user has aborted the execution of the query */ + join->thd->send_kill_message(); + DBUG_RETURN(NESTED_LOOP_KILLED); + } + + if (end_of_records) + { + rc= aggr->end_send(); + if (rc >= NESTED_LOOP_OK) + rc= sub_select(join, join_tab, end_of_records); + DBUG_RETURN(rc); + } + + rc= aggr->put_record(); + + DBUG_RETURN(rc); +} + + /* Fill the join buffer with partial records, retrieve all full matches for them @@ -17906,7 +18035,8 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) if (end_of_records) { rc= cache->join_records(FALSE); - if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS) + if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS || + rc == NESTED_LOOP_QUERY_LIMIT) rc= sub_select(join, join_tab, end_of_records); DBUG_RETURN(rc); } @@ -17933,7 +18063,8 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) without it. If it's not the case remove it. */ rc= cache->join_records(TRUE); - if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS) + if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS || + rc == NESTED_LOOP_QUERY_LIMIT) rc= sub_select(join, join_tab, end_of_records); DBUG_RETURN(rc); } @@ -18020,7 +18151,7 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) is the same as the value of the predicate, otherwise it's just returns true. To carry out a return to a nested loop level of join table t the pointer - to t is remembered in the field 'return_tab' of the join structure. + to t is remembered in the field 'return_rtab' of the join structure. Consider the following query: @code SELECT * FROM t1, @@ -18083,7 +18214,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) int error; enum_nested_loop_state rc= NESTED_LOOP_OK; READ_RECORD *info= &join_tab->read_record; - + + for (SJ_TMP_TABLE *flush_dups_table= join_tab->flush_weedout_table; flush_dups_table; flush_dups_table= flush_dups_table->next_flush_table) @@ -18174,7 +18306,6 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) DBUG_RETURN(rc); } - /** @brief Process one row of the nested loop join. @@ -18236,6 +18367,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, condition is true => a match is found. */ join_tab->tracker->r_rows_after_where++; + bool found= 1; while (join_tab->first_unmatched && found) { @@ -18577,11 +18709,11 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos) !table->no_keyread && (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY) { - table->enable_keyread(); + table->set_keyread(true); tab->index= tab->ref.key; } error=join_read_const(tab); - table->disable_keyread(); + table->set_keyread(false); if (error) { tab->info= ET_UNIQUE_ROW_NOT_FOUND; @@ -19028,12 +19160,26 @@ bool test_if_use_dynamic_range_scan(JOIN_TAB *join_tab) int join_init_read_record(JOIN_TAB *tab) { - if (tab->select && tab->select->quick && tab->select->quick->reset()) + int error; + /* + Note: the query plan tree for the below operations is constructed in + save_agg_explain_data. + */ + if (tab->distinct && tab->remove_duplicates()) // Remove duplicates. + return 1; + if (tab->filesort && tab->sort_table()) // Sort table. + return 1; + + if (tab->select && tab->select->quick && (error= tab->select->quick->reset())) + { + /* Ensures error status is propageted back to client */ + report_error(tab->table, error); return 1; + } if (!tab->preread_init_done && tab->preread_init()) return 1; if (init_read_record(&tab->read_record, tab->join->thd, tab->table, - tab->select, tab->filesort, 1,1, FALSE)) + tab->select, tab->filesort_result, 1,1, FALSE)) return 1; return (*tab->read_record.read_record)(&tab->read_record); } @@ -19051,7 +19197,7 @@ join_read_record_no_init(JOIN_TAB *tab) save_copy_end= tab->read_record.copy_field_end; init_read_record(&tab->read_record, tab->join->thd, tab->table, - tab->select, tab->filesort, 1, 1, FALSE); + tab->select, tab->filesort_result, 1, 1, FALSE); tab->read_record.copy_field= save_copy; tab->read_record.copy_field_end= save_copy_end; @@ -19060,6 +19206,25 @@ join_read_record_no_init(JOIN_TAB *tab) return (*tab->read_record.read_record)(&tab->read_record); } + +/* + Helper function for sorting table with filesort. +*/ + +bool +JOIN_TAB::sort_table() +{ + int rc; + DBUG_PRINT("info",("Sorting for index")); + THD_STAGE_INFO(join->thd, stage_creating_sort_index); + DBUG_ASSERT(join->ordered_index_usage != (filesort->order == join->order ? + JOIN::ordered_index_order_by : + JOIN::ordered_index_group_by)); + rc= create_sort_index(join->thd, join, this, NULL); + return (rc != 0); +} + + static int join_read_first(JOIN_TAB *tab) { @@ -19069,7 +19234,7 @@ join_read_first(JOIN_TAB *tab) if (table->covering_keys.is_set(tab->index) && !table->no_keyread && !table->key_read) - table->enable_keyread(); + table->set_keyread(true); tab->table->status=0; tab->read_record.read_record=join_read_next; tab->read_record.table=table; @@ -19109,7 +19274,7 @@ join_read_last(JOIN_TAB *tab) if (table->covering_keys.is_set(tab->index) && !table->no_keyread && !table->key_read) - table->enable_keyread(); + table->set_keyread(true); tab->table->status=0; tab->read_record.read_record=join_read_prev; tab->read_record.table=table; @@ -19233,16 +19398,21 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { DBUG_ENTER("end_send"); + /* + When all tables are const this function is called with jointab == NULL. + This function shouldn't be called for the first join_tab as it needs + to get fields from previous tab. + */ + DBUG_ASSERT(join_tab == NULL || join_tab != join->join_tab); + //TODO pass fields via argument + List *fields= join_tab ? (join_tab-1)->fields : join->fields; + if (!end_of_records) { +#if 0 +#endif if (join->table_count && - (join->join_tab->is_using_loose_index_scan() || - /* - When order by used a loose scan as its input, the quick select may - be attached to pre_sort_join_tab. - */ - (join->pre_sort_join_tab && - join->pre_sort_join_tab->is_using_loose_index_scan()))) + join->join_tab->is_using_loose_index_scan()) { /* Copy non-aggregated fields when loose index scan is used. */ copy_fields(&join->tmp_table_param); @@ -19259,7 +19429,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { int error; /* result < 0 if row was not accepted and should not be counted */ - if ((error= join->result->send_data(*join->fields))) + if ((error= join->result->send_data(*fields))) DBUG_RETURN(error < 0 ? NESTED_LOOP_OK : NESTED_LOOP_ERROR); } @@ -19268,13 +19438,15 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), !join->do_send_rows) { /* - If filesort is used for sorting, stop after select_limit_cnt+1 - records are read. Because of optimization in some cases it can - provide only select_limit_cnt+1 records. + If we have used Priority Queue for optimizing order by with limit, + then stop here, there are no more records to consume. + When this optimization is used, end_send is called on the next + join_tab. */ - if (join->order && join->sortorder && - join->filesort_found_rows && - join->select_options & OPTION_FOUND_ROWS) + if (join->order && + join->select_options & OPTION_FOUND_ROWS && + join_tab > join->join_tab && + (join_tab - 1)->filesort && (join_tab - 1)->filesort->using_pq) { DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT")); DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); @@ -19286,7 +19458,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->select_options & OPTION_FOUND_ROWS) { JOIN_TAB *jt=join->join_tab; - if ((join->table_count == 1) && !join->tmp_table && !join->sort_and_group + if ((join->table_count == 1) && !join->sort_and_group && !join->send_group_parts && !join->having && !jt->select_cond && !(jt->select && jt->select->quick) && (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) && @@ -19295,10 +19467,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), /* Join over all rows in table; Return number of found rows */ TABLE *table=jt->table; - join->select_options ^= OPTION_FOUND_ROWS; - if (jt->filesort) // If filesort was used + if (jt->filesort_result) // If filesort was used { - join->send_records= jt->filesort->found_rows; + join->send_records= jt->filesort_result->found_rows; } else { @@ -19349,13 +19520,21 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { int idx= -1; enum_nested_loop_state ok_code= NESTED_LOOP_OK; + List *fields= join_tab ? (join_tab-1)->fields : join->fields; DBUG_ENTER("end_send_group"); + if (!join->items3.is_null() && !join->set_group_rpa) + { + join->set_group_rpa= true; + join->set_items_ref_array(join->items3); + } + if (!join->first_record || end_of_records || (idx=test_if_group_changed(join->group_fields)) >= 0) { - if (join->first_record || - (end_of_records && !join->group && !join->group_optimized_away)) + if (!join->group_sent && + (join->first_record || + (end_of_records && !join->group && !join->group_optimized_away))) { if (join->procedure) join->procedure->end_group(); @@ -19369,7 +19548,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), else { if (join->do_send_rows) - error=join->procedure->send_row(*join->fields) ? 1 : 0; + error=join->procedure->send_row(*fields) ? 1 : 0; join->send_records++; } if (end_of_records && join->procedure->end_of_records()) @@ -19381,11 +19560,8 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { List_iterator_fast it(*join->fields); Item *item; - DBUG_PRINT("info", ("no matching rows")); - - /* No matching rows for group function */ - join->clear(); - join->no_rows_in_result_called= 1; + /* No matching rows for group function */ + join->clear(); while ((item= it++)) item->no_rows_in_result(); @@ -19396,7 +19572,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (join->do_send_rows) { - error= join->result->send_data(*join->fields); + error=join->result->send_data(*fields); if (error < 0) { /* Duplicate row, don't count */ @@ -19405,6 +19581,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } } join->send_records++; + join->group_sent= true; } if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0) { @@ -19456,6 +19633,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_ERROR); if (join->procedure) join->procedure->add(); + join->group_sent= false; DBUG_RETURN(ok_code); } } @@ -19472,16 +19650,16 @@ static enum_nested_loop_state end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { - TABLE *table=join->tmp_table; + TABLE *const table= join_tab->table; DBUG_ENTER("end_write"); if (!end_of_records) { - copy_fields(&join->tmp_table_param); - if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd)) + copy_fields(join_tab->tmp_table_param); + if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ - if (!join->having || join->having->val_int()) + if (!join_tab->having || join_tab->having->val_int()) { int error; join->found_records++; @@ -19491,15 +19669,16 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), goto end; bool is_duplicate; if (create_internal_tmp_table_from_heap(join->thd, table, - join->tmp_table_param.start_recinfo, - &join->tmp_table_param.recinfo, + join_tab->tmp_table_param->start_recinfo, + &join_tab->tmp_table_param->recinfo, error, 1, &is_duplicate)) DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error if (is_duplicate) goto end; table->s->uniques=0; // To ensure rows are the same } - if (++join->send_records >= join->tmp_table_param.end_write_records && + if (++join_tab->send_records >= + join_tab->tmp_table_param->end_write_records && join->do_send_rows) { if (!(join->select_options & OPTION_FOUND_ROWS)) @@ -19534,7 +19713,7 @@ static enum_nested_loop_state end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { - TABLE *table=join->tmp_table; + TABLE *const table= join_tab->table; ORDER *group; int error; DBUG_ENTER("end_update"); @@ -19543,7 +19722,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_OK); join->found_records++; - copy_fields(&join->tmp_table_param); // Groups are copied twice. + copy_fields(join_tab->tmp_table_param); // Groups are copied twice. /* Make a key of group index */ for (group=table->group ; group ; group=group->next) { @@ -19563,7 +19742,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), group->buff[-1]= (char) group->field->is_null(); } if (!table->file->ha_index_read_map(table->record[1], - join->tmp_table_param.group_buff, + join_tab->tmp_table_param->group_buff, HA_WHOLE_KEY, HA_READ_KEY_EXACT)) { /* Update old record */ @@ -19579,13 +19758,13 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } init_tmptable_sum_functions(join->sum_funcs); - if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd)) + if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if ((error= table->file->ha_write_tmp_row(table->record[0]))) { if (create_internal_tmp_table_from_heap(join->thd, table, - join->tmp_table_param.start_recinfo, - &join->tmp_table_param.recinfo, + join_tab->tmp_table_param->start_recinfo, + &join_tab->tmp_table_param->recinfo, error, 0, NULL)) DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error /* Change method to update rows */ @@ -19595,9 +19774,9 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_ERROR); } - join->join_tab[join->top_join_tab_count-1].next_select=end_unique_update; + join_tab->aggr->set_write_func(end_unique_update); } - join->send_records++; + join_tab->send_records++; end: if (join->thd->check_killed()) { @@ -19614,7 +19793,7 @@ static enum_nested_loop_state end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { - TABLE *table=join->tmp_table; + TABLE *table= join_tab->table; int error; DBUG_ENTER("end_unique_update"); @@ -19622,12 +19801,12 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_OK); init_tmptable_sum_functions(join->sum_funcs); - copy_fields(&join->tmp_table_param); // Groups are copied twice. - if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd)) + copy_fields(join_tab->tmp_table_param); // Groups are copied twice. + if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (!(error= table->file->ha_write_tmp_row(table->record[0]))) - join->send_records++; // New group + join_tab->send_records++; // New group else { if ((int) table->file->get_dup_key(error) < 0) @@ -19673,7 +19852,7 @@ enum_nested_loop_state end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { - TABLE *table=join->tmp_table; + TABLE *table= join_tab->table; int idx= -1; DBUG_ENTER("end_write_group"); @@ -19687,21 +19866,21 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), int send_group_parts= join->send_group_parts; if (idx < send_group_parts) { - if (!join->first_record) - { - /* No matching rows for group function */ - join->clear(); - } + if (!join->first_record) + { + /* No matching rows for group function */ + join->clear(); + } copy_sum_funcs(join->sum_funcs, join->sum_funcs_end[send_group_parts]); - if (!join->having || join->having->val_int()) + if (!join_tab->having || join_tab->having->val_int()) { int error= table->file->ha_write_tmp_row(table->record[0]); if (error && create_internal_tmp_table_from_heap(join->thd, table, - join->tmp_table_param.start_recinfo, - &join->tmp_table_param.recinfo, - error, 0, NULL)) + join_tab->tmp_table_param->start_recinfo, + &join_tab->tmp_table_param->recinfo, + error, 0, NULL)) DBUG_RETURN(NESTED_LOOP_ERROR); } if (join->rollup.state != ROLLUP::STATE_NONE) @@ -19722,8 +19901,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } if (idx < (int) join->send_group_parts) { - copy_fields(&join->tmp_table_param); - if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd)) + copy_fields(join_tab->tmp_table_param); + if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd)) DBUG_RETURN(NESTED_LOOP_ERROR); if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1])) DBUG_RETURN(NESTED_LOOP_ERROR); @@ -20281,9 +20460,11 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, if (key_part->field != field || !field->part_of_sortkey.is_set(idx)) DBUG_RETURN(0); + const ORDER::enum_order keypart_order= + (key_part->key_part_flag & HA_REVERSE_SORT) ? + ORDER::ORDER_DESC : ORDER::ORDER_ASC; /* set flag to 1 if we can use read-next on key, else to -1 */ - flag= ((order->asc == !(key_part->key_part_flag & HA_REVERSE_SORT)) ? - 1 : -1); + flag= (order->direction == keypart_order) ? 1 : -1; if (reverse && flag != reverse) DBUG_RETURN(0); reverse=flag; // Remember if reverse @@ -20856,11 +21037,11 @@ check_reverse_order: and best_key doesn't, then revert the decision. */ if (!table->covering_keys.is_set(best_key)) - table->disable_keyread(); + table->set_keyread(false); else { if (!table->key_read) - table->enable_keyread(); + table->set_keyread(true); } if (!quick_created) @@ -20891,7 +21072,7 @@ check_reverse_order: tab->ref.key_parts= 0; if (select_limit < table->stat_records()) tab->limit= select_limit; - table->disable_keyread(); + table->set_keyread(false); } } else if (tab->type != JT_ALL || tab->select->quick) @@ -21015,14 +21196,9 @@ use_filesort: create_sort_index() thd Thread handler join Join with table to sort - order How table should be sorted - filesort_limit Max number of rows that needs to be sorted - select_limit Max number of rows in final output - Used to decide if we should use index or not - is_order_by true if we are sorting on ORDER BY, false if GROUP BY - Used to decide if we should use index or not - - + join_tab What table to sort + fsort Filesort object. NULL means "use tab->filesort". + IMPLEMENTATION - If there is an index that can be used, the first non-const join_tab in 'join' is modified to use this index. @@ -21036,150 +21212,72 @@ use_filesort: 1 No records */ -static int -create_sort_index(THD *thd, JOIN *join, ORDER *order, - ha_rows filesort_limit, ha_rows select_limit, - bool is_order_by) +int +create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort) { - uint length; TABLE *table; SQL_SELECT *select; - JOIN_TAB *tab; bool quick_created= FALSE; SORT_INFO *file_sort= 0; DBUG_ENTER("create_sort_index"); - if (join->table_count == join->const_tables) - DBUG_RETURN(0); // One row, no need to sort - tab= join->join_tab + join->const_tables; - table= tab->table; - select= tab->select; - - JOIN_TAB *save_pre_sort_join_tab= NULL; - if (join->pre_sort_join_tab) - { - /* - we've already been in this function, and stashed away the - original access method in join->pre_sort_join_tab, restore it - now. - */ - - /* First, restore state of the handler */ - if (join->pre_sort_index != MAX_KEY) - { - if (table->file->ha_index_or_rnd_end()) - goto err; - if (join->pre_sort_idx_pushed_cond) - { - table->file->idx_cond_push(join->pre_sort_index, - join->pre_sort_idx_pushed_cond); - } - } - else - { - if (table->file->ha_index_or_rnd_end() || - table->file->ha_rnd_init(TRUE)) - goto err; - } - - /* Second, restore access method parameters */ - tab->records= join->pre_sort_join_tab->records; - tab->select= join->pre_sort_join_tab->select; - tab->select_cond= join->pre_sort_join_tab->select_cond; - tab->type= join->pre_sort_join_tab->type; - tab->read_first_record= join->pre_sort_join_tab->read_first_record; - - save_pre_sort_join_tab= join->pre_sort_join_tab; - join->pre_sort_join_tab= NULL; - } - else - { - /* - Save index #, save index condition. Do it right now, because MRR may - */ - if (table->file->inited == handler::INDEX) - { - join->pre_sort_index= table->file->active_index; - join->pre_sort_idx_pushed_cond= table->file->pushed_idx_cond; - // no need to save key_read - } - else - join->pre_sort_index= MAX_KEY; - } + if (fsort == NULL) + fsort= tab->filesort; + table= tab->table; + select= fsort->select; + /* Currently ORDER BY ... LIMIT is not supported in subqueries. */ DBUG_ASSERT(join->group_list || !join->is_in_subquery()); - /* - When there is SQL_BIG_RESULT do not sort using index for GROUP BY, - and thus force sorting on disk unless a group min-max optimization - is going to be used as it is applied now only for one table queries - with covering indexes. - The expections is if we are already using the index for GROUP BY - (in which case sort would be free) or ORDER and GROUP BY are different. - */ - if ((order != join->group_list || - !(join->select_options & SELECT_BIG_RESULT) || - (select && select->quick && - select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) && - test_if_skip_sort_order(tab,order,select_limit,0, - is_order_by ? &table->keys_in_use_for_order_by : - &table->keys_in_use_for_group_by)) - { - tab->update_explain_data(join->const_tables); - DBUG_RETURN(0); - } - tab->update_explain_data(join->const_tables); - - /* - Calculate length of join->order as this may be longer than 'order', - which may come from 'group by'. This is needed as join->sortorder is - used both for grouping and ordering. - */ - length= 0; - for (ORDER *ord= join->order; ord; ord= ord->next) - length++; - - if (!(join->sortorder= - make_unireg_sortorder(thd, order, &length, join->sortorder))) - goto err; /* purecov: inspected */ - table->status=0; // May be wrong if quick_select if (!tab->preread_init_done && tab->preread_init()) goto err; // If table has a range, move it to select - if (select && !select->quick && tab->ref.key >= 0) + if (select && tab->ref.key >= 0) { - if (tab->quick) + if (!select->quick) { - select->quick=tab->quick; - tab->quick=0; + if (tab->quick) + { + select->quick= tab->quick; + tab->quick= NULL; /* We can only use 'Only index' if quick key is same as ref_key and in index_merge 'Only index' cannot be used */ if (((uint) tab->ref.key != select->quick->index)) - table->disable_keyread(); + table->set_keyread(FALSE); + } + else + { + /* + We have a ref on a const; Change this to a range that filesort + can use. + For impossible ranges (like when doing a lookup on NULL on a NOT NULL + field, quick will contain an empty record set. + */ + if (!(select->quick= (tab->type == JT_FT ? + get_ft_select(thd, table, tab->ref.key) : + get_quick_select_for_ref(thd, table, &tab->ref, + tab->found_records)))) + goto err; + quick_created= TRUE; + } + fsort->own_select= true; } else { - /* - We have a ref on a const; Change this to a range that filesort - can use. - For impossible ranges (like when doing a lookup on NULL on a NOT NULL - field, quick will contain an empty record set. - */ - if (!(select->quick= (tab->type == JT_FT ? - get_ft_select(thd, table, tab->ref.key) : - get_quick_select_for_ref(thd, table, &tab->ref, - tab->found_records)))) - goto err; - quick_created= TRUE; + DBUG_ASSERT(tab->type == JT_REF || tab->type == JT_EQ_REF); + // Update ref value + if ((cp_buffer_from_ref(thd, table, &tab->ref) && thd->is_fatal_error)) + goto err; // out of memory } } + /* Fill schema tables with data before filesort if it's necessary */ if ((join->select_lex->options & OPTION_SCHEMA_TABLE) && get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX)) @@ -21187,11 +21285,9 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, if (table->s->tmp_table) table->file->info(HA_STATUS_VARIABLE); // Get record count - file_sort= filesort(thd, table, join->sortorder, length, - select, filesort_limit, 0, - join->explain->ops_tracker.report_sorting(thd)); - DBUG_ASSERT(tab->filesort == 0); - tab->filesort= file_sort; + file_sort= filesort(thd, table, fsort, fsort->tracker); + DBUG_ASSERT(tab->filesort_result == 0); + tab->filesort_result= file_sort; tab->records= 0; if (file_sort) { @@ -21205,42 +21301,18 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, /* This will delete the quick select. */ select->cleanup(); } - - if (!join->pre_sort_join_tab) - { - if (save_pre_sort_join_tab) - join->pre_sort_join_tab= save_pre_sort_join_tab; - else if (!(join->pre_sort_join_tab= (JOIN_TAB*)thd->alloc(sizeof(JOIN_TAB)))) - goto err; - } - - *(join->pre_sort_join_tab)= *tab; - - tab->select=NULL; - tab->set_select_cond(NULL, __LINE__); - tab->type=JT_ALL; // Read with normal read_record - tab->read_first_record= join_init_read_record; - tab->table->file->ha_index_or_rnd_end(); + + table->set_keyread(FALSE); // Restore if we used indexes + if (tab->type == JT_FT) + table->file->ft_end(); + else + table->file->ha_index_or_rnd_end(); DBUG_RETURN(file_sort == 0); err: DBUG_RETURN(-1); } -void JOIN::clean_pre_sort_join_tab() -{ - //TABLE *table= pre_sort_join_tab->table; - /* - Note: we can come here for fake_select_lex object. That object will have - the table already deleted by st_select_lex_unit::cleanup(). - We rely on that fake_select_lex didn't have quick select. - */ - if (pre_sort_join_tab->select && pre_sort_join_tab->select->quick) - { - pre_sort_join_tab->select->cleanup(); - } -} - /** Compare fields from table->record[0] and table->record[1], @@ -21304,22 +21376,28 @@ static void free_blobs(Field **ptr) Rows that do not satisfy 'having' condition are also removed. */ -static int -remove_duplicates(JOIN *join, TABLE *table, List &fields, Item *having) +bool +JOIN_TAB::remove_duplicates() + { - int error; + bool error; ulong keylength= 0; uint field_count; + List *fields= (this-1)->fields; THD *thd= join->thd; DBUG_ENTER("remove_duplicates"); - join->explain->ops_tracker.report_duplicate_removal(); + + DBUG_ASSERT(join->aggr_tables > 0 && table->s->tmp_table != NO_TMP_TABLE); + THD_STAGE_INFO(join->thd, stage_removing_duplicates); + + //join->explain->ops_tracker.report_duplicate_removal(); table->reginfo.lock_type=TL_WRITE; /* Calculate how many saved fields there is in list */ field_count=0; - List_iterator it(fields); + List_iterator it(*fields); Item *item; while ((item=it++)) { @@ -21330,7 +21408,7 @@ remove_duplicates(JOIN *join, TABLE *table, List &fields, Item *having) if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having) { // only const items with no OPTION_FOUND_ROWS join->unit->select_limit_cnt= 1; // Only send first row - DBUG_RETURN(0); + DBUG_RETURN(false); } Field **first_field=table->field+table->s->fields - field_count; @@ -21558,67 +21636,9 @@ err: } -SORT_FIELD *make_unireg_sortorder(THD *thd, ORDER *order, uint *length, - SORT_FIELD *sortorder) -{ - uint count; - SORT_FIELD *sort,*pos; - DBUG_ENTER("make_unireg_sortorder"); - - count=0; - for (ORDER *tmp = order; tmp; tmp=tmp->next) - count++; - if (!sortorder) - sortorder= (SORT_FIELD*) thd->alloc(sizeof(SORT_FIELD) * - (MY_MAX(count, *length) + 1)); - pos= sort= sortorder; - - if (!pos) - DBUG_RETURN(0); - - for (;order;order=order->next,pos++) - { - Item *const item= order->item[0], *const real_item= item->real_item(); - pos->field= 0; pos->item= 0; - if (real_item->type() == Item::FIELD_ITEM) - { - // Could be a field, or Item_direct_view_ref wrapping a field - DBUG_ASSERT(item->type() == Item::FIELD_ITEM || - (item->type() == Item::REF_ITEM && - static_cast(item)->ref_type() == - Item_ref::VIEW_REF)); - pos->field= static_cast(real_item)->field; - } - else if (real_item->type() == Item::SUM_FUNC_ITEM && - !real_item->const_item()) - { - // Aggregate, or Item_aggregate_ref - DBUG_ASSERT(item->type() == Item::SUM_FUNC_ITEM || - (item->type() == Item::REF_ITEM && - static_cast(item)->ref_type() == - Item_ref::AGGREGATE_REF)); - pos->field= item->get_tmp_table_field(); - } - else if (real_item->type() == Item::COPY_STR_ITEM) - { // Blob patch - pos->item= static_cast(real_item)->get_item(); - } - else - pos->item= item; - pos->reverse=! order->asc; - DBUG_ASSERT(pos->field != NULL || pos->item != NULL); - } - *length=count; - DBUG_RETURN(sort); -} - - /* eq_ref: Create the lookup key and check if it is the same as saved key - - - SYNOPSIS cmp_buffer_with_ref() tab Join tab of the accessed table @@ -21715,6 +21735,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) @param[in,out] all_fields All select, group and order by fields @param[in] is_group_field True if order is a GROUP field, false if ORDER by field + @param[in] search_in_all_fields If true then search in all_fields @retval FALSE if OK @@ -21723,9 +21744,9 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) */ static bool -find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, +find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, ORDER *order, List &fields, List &all_fields, - bool is_group_field) + bool is_group_field, bool search_in_all_fields) { Item *order_item= *order->item; /* The item from the GROUP/ORDER caluse. */ Item::Type order_item_type; @@ -21751,7 +21772,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, order_item->full_name(), thd->where); return TRUE; } - thd->change_item_tree((Item**)&order->item, (Item*)(ref_pointer_array + count - 1)); + thd->change_item_tree((Item **)&order->item, (Item *)&ref_pointer_array[count - 1]); order->in_field_list= 1; order->counter= count; order->counter_used= 1; @@ -21811,7 +21832,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, 'shadowed' a table field with the same name, the table field will be chosen over the derived field. */ - order->item= ref_pointer_array + counter; + order->item= &ref_pointer_array[counter]; order->in_field_list=1; return FALSE; } @@ -21830,6 +21851,18 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, thd->where); } } + else if (search_in_all_fields) + { + Item **found_item= find_item_in_list(order_item, all_fields, &counter, + REPORT_EXCEPT_NOT_FOUND, &resolution, + all_fields.elements - fields.elements); + if (found_item != not_found_item) + { + order->item= &ref_pointer_array[all_fields.elements-1-counter]; + order->in_field_list= 0; + return FALSE; + } + } order->in_field_list=0; /* @@ -21850,8 +21883,6 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, return TRUE; /* Wrong field. */ uint el= all_fields.elements; - DBUG_ASSERT(all_fields.elements <= - thd->lex->current_select->ref_pointer_array_size); /* Add new field to field list. */ all_fields.push_front(order_item, thd->mem_root); ref_pointer_array[el]= order_item; @@ -21866,7 +21897,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, if (order_item->type() == Item::SUM_FUNC_ITEM) ((Item_sum *)order_item)->ref_by= all_fields.head_ref(); - order->item= ref_pointer_array + el; + order->item= &ref_pointer_array[el]; return FALSE; } @@ -21878,15 +21909,22 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, the field list. */ -int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, - List &fields, List &all_fields, ORDER *order) -{ +int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, + List &fields, List &all_fields, ORDER *order, + bool search_in_all_fields) +{ + enum_parsing_place parsing_place= thd->lex->current_select->parsing_place; thd->where="order clause"; for (; order; order=order->next) { if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, - all_fields, FALSE)) + all_fields, FALSE, search_in_all_fields)) + return 1; + if ((*order->item)->with_window_func && parsing_place != IN_ORDER_BY) + { + my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0)); return 1; + } } return 0; } @@ -21895,18 +21933,19 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, /** Intitialize the GROUP BY list. - @param thd Thread handler - @param ref_pointer_array We store references to all fields that was + @param thd Thread handler + @param ref_pointer_array We store references to all fields that was not in 'fields' here. - @param fields All fields in the select part. Any item in + @param fields All fields in the select part. Any item in 'order' that is part of these list is replaced by a pointer to this fields. - @param all_fields Total list of all unique fields used by the + @param all_fields Total list of all unique fields used by the select. All items in 'order' that was not part of fields will be added first to this list. - @param order The fields we should do GROUP BY on. - @param hidden_group_fields Pointer to flag that is set to 1 if we added + @param order The fields we should do GROUP/PARTITION BY on + @param hidden_group_fields Pointer to flag that is set to 1 if we added any fields to all_fields. + @param search_in_all_fields If true then search in all_fields @todo change ER_WRONG_FIELD_WITH_GROUP to more detailed @@ -21919,10 +21958,11 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, */ int -setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, +setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, List &fields, List &all_fields, ORDER *order, - bool *hidden_group_fields) + bool *hidden_group_fields, bool search_in_all_fields) { + enum_parsing_place parsing_place= thd->lex->current_select->parsing_place; *hidden_group_fields=0; ORDER *ord; @@ -21932,22 +21972,26 @@ setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, uint org_fields=all_fields.elements; thd->where="group statement"; - enum_parsing_place save_place= thd->lex->current_select->parsing_place; - thd->lex->current_select->parsing_place= IN_GROUP_BY; for (ord= order; ord; ord= ord->next) { if (find_order_in_list(thd, ref_pointer_array, tables, ord, fields, - all_fields, TRUE)) + all_fields, TRUE, search_in_all_fields)) return 1; (*ord->item)->marker= UNDEF_POS; /* Mark found */ - if ((*ord->item)->with_sum_func) + if ((*ord->item)->with_sum_func && parsing_place == IN_GROUP_BY) { my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name()); return 1; } + if ((*ord->item)->with_window_func) + { + if (parsing_place == IN_GROUP_BY) + my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0)); + else + my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0)); + return 1; + } } - thd->lex->current_select->parsing_place= save_place; - if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) { /* @@ -22054,14 +22098,16 @@ setup_new_fields(THD *thd, List &fields, */ ORDER * -create_distinct_group(THD *thd, Item **ref_pointer_array, +create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array, ORDER *order_list, List &fields, List &all_fields, bool *all_order_by_fields_used) { List_iterator li(fields); - Item *item, **orig_ref_pointer_array= ref_pointer_array; + Item *item; + Ref_ptr_array orig_ref_pointer_array= ref_pointer_array; ORDER *order,*group,**prev; + uint idx= 0; *all_order_by_fields_used= 1; while ((item=li++)) @@ -22108,16 +22154,14 @@ create_distinct_group(THD *thd, Item **ref_pointer_array, Because HEAP tables can't index BIT fields we need to use an additional hidden field for grouping because later it will be converted to a LONG field. Original field will remain of the - BIT type and will be returned to a client. + BIT type and will be returned [el]client. */ Item_field *new_item= new (thd->mem_root) Item_field(thd, (Item_field*)item); int el= all_fields.elements; - DBUG_ASSERT(all_fields.elements <= - thd->lex->current_select->ref_pointer_array_size); orig_ref_pointer_array[el]= new_item; all_fields.push_front(new_item, thd->mem_root); - ord->item= orig_ref_pointer_array + el; - } + ord->item=&orig_ref_pointer_array[el]; + } else { /* @@ -22125,14 +22169,14 @@ create_distinct_group(THD *thd, Item **ref_pointer_array, simple indexing of ref_pointer_array (order in the array and in the list are same) */ - ord->item= ref_pointer_array; + ord->item= &ref_pointer_array[idx]; } - ord->asc=1; + ord->direction= ORDER::ORDER_ASC; *prev=ord; prev= &ord->next; } next_item: - ref_pointer_array++; + idx++; } *prev=0; return group; @@ -22204,7 +22248,7 @@ test_if_subpart(ORDER *a,ORDER *b) for (; a && b; a=a->next,b=b->next) { if ((*a->item)->eq(*b->item,1)) - a->asc=b->asc; + a->direction=b->direction; else return 0; } @@ -22382,9 +22426,9 @@ make_group_fields(JOIN *main_join, JOIN *curr_join) /** - Get a list of buffers for saveing last group. + Get a list of buffers for saving last group. - Groups are saved in reverse order for easyer check loop. + Groups are saved in reverse order for easier check loop. */ static bool @@ -22435,8 +22479,13 @@ int test_if_item_cache_changed(List &list) } +/* + @return + -1 - Group not changed + value>=0 - Number of the component where the group changed +*/ -static int +int test_if_group_changed(List &list) { DBUG_ENTER("test_if_group_changed"); @@ -22485,7 +22534,7 @@ test_if_group_changed(List &list) bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, - Item **ref_pointer_array, + Ref_ptr_array ref_pointer_array, List &res_selected_fields, List &res_all_fields, uint elements, List &all_fields) { @@ -22714,7 +22763,8 @@ bool JOIN::alloc_func_list() 1 error */ -bool JOIN::make_sum_func_list(List &field_list, List &send_result_set_metadata, +bool JOIN::make_sum_func_list(List &field_list, + List &send_result_set_metadata, bool before_group_by, bool recompute) { List_iterator_fast it(field_list); @@ -22769,7 +22819,7 @@ bool JOIN::make_sum_func_list(List &field_list, List &send_result_se */ static bool -change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, +change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array, List &res_selected_fields, List &res_all_fields, uint elements, List &all_fields) @@ -22806,14 +22856,6 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, Item_field *new_field= new (thd->mem_root) Item_temptable_field(thd, field); if (!suv || !new_field) DBUG_RETURN(true); // Fatal error - /* - We are replacing the argument of Item_func_set_user_var after - its value has been read. The argument's null_value should be - set by now, so we must set it explicitly for the replacement - argument since the null_value may be read without any - preceeding call to val_*(). - */ - new_field->update_null_value(); List list; list.push_back(new_field, thd->mem_root); suv->set_arguments(thd, list); @@ -22887,7 +22929,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, */ static bool -change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array, +change_refs_to_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array, List &res_selected_fields, List &res_all_fields, uint elements, List &all_fields) @@ -23240,17 +23282,23 @@ bool JOIN::rollup_init() */ tmp_table_param.group_parts= send_group_parts; - if (!(rollup.null_items= (Item_null_result**) thd->alloc((sizeof(Item*) + - sizeof(Item**) + - sizeof(List) + - ref_pointer_array_size) - * send_group_parts ))) - return 1; - - rollup.fields= (List*) (rollup.null_items + send_group_parts); - rollup.ref_pointer_arrays= (Item***) (rollup.fields + send_group_parts); + Item_null_result **null_items= + static_cast(thd->alloc(sizeof(Item*)*send_group_parts)); + + rollup.null_items= Item_null_array(null_items, send_group_parts); + rollup.ref_pointer_arrays= + static_cast + (thd->alloc((sizeof(Ref_ptr_array) + + all_fields.elements * sizeof(Item*)) * send_group_parts)); + rollup.fields= + static_cast*>(thd->alloc(sizeof(List) * send_group_parts)); + + if (!null_items || !rollup.ref_pointer_arrays || !rollup.fields) + return true; + ref_array= (Item**) (rollup.ref_pointer_arrays+send_group_parts); + /* Prepare space for field list for the different levels These will be filled up in rollup_make_fields() @@ -23260,7 +23308,7 @@ bool JOIN::rollup_init() rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd); List *rollup_fields= &rollup.fields[i]; rollup_fields->empty(); - rollup.ref_pointer_arrays[i]= ref_array; + rollup.ref_pointer_arrays[i]= Ref_ptr_array(ref_array, all_fields.elements); ref_array+= all_fields.elements; } for (i= 0 ; i < send_group_parts; i++) @@ -23407,11 +23455,12 @@ bool JOIN::rollup_make_fields(List &fields_arg, List &sel_fields, bool real_fields= 0; Item *item; List_iterator new_it(rollup.fields[pos]); - Item **ref_array_start= rollup.ref_pointer_arrays[pos]; + Ref_ptr_array ref_array_start= rollup.ref_pointer_arrays[pos]; ORDER *start_group; /* Point to first hidden field */ - Item **ref_array= ref_array_start + fields_arg.elements-1; + uint ref_array_ix= fields_arg.elements-1; + /* Remember where the sum functions ends for the previous level */ sum_funcs_end[pos+1]= *func; @@ -23428,7 +23477,7 @@ bool JOIN::rollup_make_fields(List &fields_arg, List &sel_fields, if (item == first_field) { real_fields= 1; // End of hidden fields - ref_array= ref_array_start; + ref_array_ix= 0; } if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item() && @@ -23472,15 +23521,15 @@ bool JOIN::rollup_make_fields(List &fields_arg, List &sel_fields, } } } - *ref_array= item; + ref_array_start[ref_array_ix]= item; if (real_fields) { (void) new_it++; // Point to next item new_it.replace(item); // Replace previous - ref_array++; + ref_array_ix++; } else - ref_array--; + ref_array_ix--; } } sum_funcs_end[0]= *func; // Point to last function @@ -23513,9 +23562,7 @@ int JOIN::rollup_send_data(uint idx) { int res= 0; /* Get reference pointers to sum functions in place */ - memcpy((char*) ref_pointer_array, - (char*) rollup.ref_pointer_arrays[i], - ref_pointer_array_size); + copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]); if ((!having || having->val_int())) { if (send_records < unit->select_limit_cnt && do_send_rows && @@ -23526,7 +23573,7 @@ int JOIN::rollup_send_data(uint idx) } } /* Restore ref_pointer_array */ - set_items_ref_array(current_ref_pointer_array); + set_items_ref_array(current_ref_ptrs); return 0; } @@ -23556,9 +23603,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg) for (i= send_group_parts ; i-- > idx ; ) { /* Get reference pointers to sum functions in place */ - memcpy((char*) ref_pointer_array, - (char*) rollup.ref_pointer_arrays[i], - ref_pointer_array_size); + copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]); if ((!having || having->val_int())) { int write_error; @@ -23581,7 +23626,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg) } } /* Restore ref_pointer_array */ - set_items_ref_array(current_ref_pointer_array); + set_items_ref_array(current_ref_ptrs); return 0; } @@ -23704,33 +23749,9 @@ int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table, } -/* - TODO: this function is only applicable for the first non-const optimization - join tab. -*/ - -void JOIN_TAB::update_explain_data(uint idx) -{ - if (this == join->first_breadth_first_optimization_tab() + join->const_tables && - join->select_lex->select_number != INT_MAX && - join->select_lex->select_number != UINT_MAX) - { - Explain_table_access *eta= new (join->thd->mem_root) - Explain_table_access(join->thd->mem_root); - save_explain_data(eta, join->const_table_map, join->select_distinct, - join->first_breadth_first_optimization_tab()); - - Explain_select *sel= join->thd->lex->explain-> - get_select(join->select_lex->select_number); - idx -= my_count_bits(join->eliminated_tables); - sel->replace_table(idx, eta); - } -} - - void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tables, - bool distinct, JOIN_TAB *first_top_tab) + bool distinct_arg, JOIN_TAB *first_top_tab) { int quick_type; CHARSET_INFO *cs= system_charset_info; @@ -23746,6 +23767,21 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, explain_plan= eta; eta->key.clear(); eta->quick_info= NULL; + + SQL_SELECT *tab_select; + /* + We assume that if this table does pre-sorting, then it doesn't do filtering + with SQL_SELECT. + */ + DBUG_ASSERT(!(select && filesort)); + tab_select= (filesort)? filesort->select : select; + + if (filesort) + { + eta->pre_join_sort= new Explain_aggr_filesort(thd->mem_root, + thd->lex->analyze_stmt, + filesort); + } tracker= &eta->tracker; jbuf_tracker= &eta->jbuf_tracker; @@ -23823,9 +23859,9 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, /* "type" column */ enum join_type tab_type= type; if ((type == JT_ALL || type == JT_HASH) && - select && select->quick && use_quick != 2) + tab_select && tab_select->quick && use_quick != 2) { - cur_quick= select->quick; + cur_quick= tab_select->quick; quick_type= cur_quick->get_type(); if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) || (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT) || @@ -23860,9 +23896,9 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, In STRAIGHT_JOIN queries, there can be join tabs with JT_CONST type that still have quick selects. */ - if (select && select->quick && tab_type != JT_CONST) + if (tab_select && tab_select->quick && tab_type != JT_CONST) { - eta->quick_info= select->quick->get_explain(thd->mem_root); + eta->quick_info= tab_select->quick->get_explain(thd->mem_root); } if (key_info) /* 'index' or 'ref' access */ @@ -23986,7 +24022,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, uint keyno= MAX_KEY; if (ref.key_parts) keyno= ref.key; - else if (select && cur_quick) + else if (tab_select && cur_quick) keyno = cur_quick->index; if (keyno != MAX_KEY && keyno == table->file->pushed_idx_cond_keyno && @@ -24008,7 +24044,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, { eta->push_extra(ET_USING); } - if (select) + if (tab_select) { if (use_quick == 2) { @@ -24018,7 +24054,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, eta->range_checked_fer-> append_possible_keys_stat(thd->mem_root, table, keys); } - else if (select->cond || + else if (tab_select->cond || (cache_select && cache_select->cond)) { const COND *pushed_cond= table->file->pushed_cond; @@ -24031,7 +24067,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, } else { - eta->where_cond= select->cond; + eta->where_cond= tab_select->cond; eta->cache_cond= cache_select? cache_select->cond : NULL; eta->push_extra(ET_USING_WHERE); } @@ -24063,7 +24099,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) { QUICK_GROUP_MIN_MAX_SELECT *qgs= - (QUICK_GROUP_MIN_MAX_SELECT *) select->quick; + (QUICK_GROUP_MIN_MAX_SELECT *) tab_select->quick; eta->push_extra(ET_USING_INDEX_FOR_GROUP_BY); eta->loose_scan_is_scanning= qgs->loose_scan_is_scanning(); } @@ -24075,14 +24111,15 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, if (quick_type == QUICK_SELECT_I::QS_TYPE_RANGE) { - explain_append_mrr_info((QUICK_RANGE_SELECT*)(select->quick), + explain_append_mrr_info((QUICK_RANGE_SELECT*)(tab_select->quick), &eta->mrr_type); if (eta->mrr_type.length() > 0) eta->push_extra(ET_USING_MRR); } - if (distinct & test_all_bits(prefix_tables, join->select_list_used_tables)) + if (shortcut_for_distinct) eta->push_extra(ET_DISTINCT); + if (loosescan_match_tab) { eta->push_extra(ET_LOOSESCAN); @@ -24149,6 +24186,60 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, } +/* + Walk through join->aggr_tables and save aggregation/grouping query plan into + an Explain_select object +*/ + +void save_agg_explain_data(JOIN *join, Explain_select *xpl_sel) +{ + JOIN_TAB *join_tab=join->join_tab + join->top_join_tab_count; + Explain_aggr_node *prev_node; + Explain_aggr_node *node= xpl_sel->aggr_tree; + bool is_analyze= join->thd->lex->analyze_stmt; + THD *thd= join->thd; + + for (uint i= 0; i < join->aggr_tables; i++, join_tab++) + { + // Each aggregate means a temp.table + prev_node= node; + node= new Explain_aggr_tmp_table; + node->child= prev_node; + + if (join_tab->window_funcs_step) + { + Explain_aggr_node *new_node= + join_tab->window_funcs_step->save_explain_plan(thd->mem_root, + is_analyze); + if (new_node) + { + prev_node=node; + node= new_node; + node->child= prev_node; + } + } + + /* The below matches execution in join_init_read_record() */ + if (join_tab->distinct) + { + prev_node= node; + node= new Explain_aggr_remove_dups; + node->child= prev_node; + } + + if (join_tab->filesort) + { + Explain_aggr_filesort *eaf = + new Explain_aggr_filesort(thd->mem_root, is_analyze, join_tab->filesort); + prev_node= node; + node= eaf; + node->child= prev_node; + } + } + xpl_sel->aggr_tree= node; +} + + /* Save Query Plan Footprint @@ -24156,8 +24247,9 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, Currently, this function may be called multiple times */ -int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, - bool need_order, bool distinct, +int JOIN::save_explain_data_intern(Explain_query *output, + bool need_tmp_table_arg, + bool need_order_arg, bool distinct_arg, const char *message) { JOIN *join= this; /* Legacy: this code used to be a non-member function */ @@ -24186,7 +24278,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, explain->select_id= join->select_lex->select_number; explain->select_type= join->select_lex->type; explain->using_temporary= need_tmp; - explain->using_filesort= need_order; + explain->using_filesort= need_order_arg; /* Setting explain->message means that all other members are invalid */ explain->message= message; @@ -24203,7 +24295,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, explain->select_id= select_lex->select_number; explain->select_type= select_lex->type; explain->using_temporary= need_tmp; - explain->using_filesort= need_order; + explain->using_filesort= need_order_arg; explain->message= "Storage engine handles GROUP BY"; if (select_lex->master_unit()->derived) @@ -24223,12 +24315,8 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, xpl_sel->select_type= join->select_lex->type; if (select_lex->master_unit()->derived) xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED; - - if (need_tmp_table) - xpl_sel->using_temporary= true; - - if (need_order) - xpl_sel->using_filesort= true; + + save_agg_explain_data(this, xpl_sel); xpl_sel->exec_const_cond= exec_const_cond; xpl_sel->outer_ref_cond= outer_ref_cond; @@ -24238,7 +24326,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, xpl_sel->having= having; xpl_sel->having_value= having_value; - JOIN_TAB* const first_top_tab= join->first_breadth_first_optimization_tab(); + JOIN_TAB* const first_top_tab= join->first_breadth_first_tab(); JOIN_TAB* prev_bush_root_tab= NULL; Explain_basic_join *cur_parent= xpl_sel; @@ -24257,13 +24345,6 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, } - if (join->table_access_tabs == join->join_tab && - tab == (first_top_tab + join->const_tables) && pre_sort_join_tab) - { - saved_join_tab= tab; - tab= pre_sort_join_tab; - } - Explain_table_access *eta= (new (output->mem_root) Explain_table_access(output->mem_root)); @@ -24294,7 +24375,7 @@ int JOIN::save_explain_data_intern(Explain_query *output, bool need_tmp_table, prev_bush_root_tab= tab->bush_root_tab; cur_parent->add_table(eta, output); - tab->save_explain_data(eta, used_tables, distinct, first_top_tab); + tab->save_explain_data(eta, used_tables, distinct_arg, first_top_tab); if (saved_join_tab) tab= saved_join_tab; @@ -24354,14 +24435,6 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, DBUG_ENTER("select_describe"); /* Update the QPF with latest values of using_temporary, using_filesort */ - Explain_select *explain_sel; - uint select_nr= join->select_lex->select_number; - if ((explain_sel= thd->lex->explain->get_select(select_nr))) - { - explain_sel->using_temporary= need_tmp_table; - explain_sel->using_filesort= need_order; - } - for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit(); unit; unit= unit->next_unit()) @@ -24425,18 +24498,17 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) { thd->lex->current_select= first; unit->set_limit(unit->global_parameters()); - res= mysql_select(thd, &first->ref_pointer_array, - first->table_list.first, - first->with_wild, first->item_list, - first->where, - first->order_list.elements + - first->group_list.elements, - first->order_list.first, - first->group_list.first, - first->having, - thd->lex->proc_list.first, - first->options | thd->variables.option_bits | SELECT_DESCRIBE, - result, unit, first); + res= mysql_select(thd, + first->table_list.first, + first->with_wild, first->item_list, + first->where, + first->order_list.elements + first->group_list.elements, + first->order_list.first, + first->group_list.first, + first->having, + thd->lex->proc_list.first, + first->options | thd->variables.option_bits | SELECT_DESCRIBE, + result, unit, first); } DBUG_RETURN(res || thd->is_error()); } @@ -24480,6 +24552,7 @@ static void print_table_array(THD *thd, str->append(STRING_WITH_LEN(" semi join ")); else str->append(STRING_WITH_LEN(" join ")); + curr->print(thd, eliminated_tables, str, query_type); if (curr->on_expr) { @@ -25808,6 +25881,153 @@ err: DBUG_RETURN(0); } +/**************************************************************************** + AGGR_OP implementation +****************************************************************************/ + +/** + @brief Instantiate tmp table for aggregation and start index scan if needed + @todo Tmp table always would be created, even for empty result. Extend + executor to avoid tmp table creation when no rows were written + into tmp table. + @return + true error + false ok +*/ + +bool +AGGR_OP::prepare_tmp_table() +{ + TABLE *table= join_tab->table; + JOIN *join= join_tab->join; + int rc= 0; + + if (!join_tab->table->is_created()) + { + if (instantiate_tmp_table(table, join_tab->tmp_table_param->keyinfo, + join_tab->tmp_table_param->start_recinfo, + &join_tab->tmp_table_param->recinfo, + join->select_options)) + return true; + (void) table->file->extra(HA_EXTRA_WRITE_CACHE); + empty_record(table); + } + /* If it wasn't already, start index scan for grouping using table index. */ + if (!table->file->inited && table->group && + join_tab->tmp_table_param->sum_func_count && table->s->keys) + rc= table->file->ha_index_init(0, 0); + else + { + /* Start index scan in scanning mode */ + rc= table->file->ha_rnd_init(true); + } + if (rc) + { + table->file->print_error(rc, MYF(0)); + return true; + } + return false; +} + + +/** + @brief Prepare table if necessary and call write_func to save record + + @param end_of_records the end_of_record signal to pass to the writer + + @return return one of enum_nested_loop_state. +*/ + +enum_nested_loop_state +AGGR_OP::put_record(bool end_of_records) +{ + // Lasy tmp table creation/initialization + if (!join_tab->table->file->inited) + prepare_tmp_table(); + enum_nested_loop_state rc= (*write_func)(join_tab->join, join_tab, + end_of_records); + return rc; +} + + +/** + @brief Finish rnd/index scan after accumulating records, switch ref_array, + and send accumulated records further. + @return return one of enum_nested_loop_state. +*/ + +enum_nested_loop_state +AGGR_OP::end_send() +{ + enum_nested_loop_state rc= NESTED_LOOP_OK; + TABLE *table= join_tab->table; + JOIN *join= join_tab->join; + + // All records were stored, send them further + int tmp, new_errno= 0; + + if ((rc= put_record(true)) < NESTED_LOOP_OK) + return rc; + + if ((tmp= table->file->extra(HA_EXTRA_NO_CACHE))) + { + DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed")); + new_errno= tmp; + } + if ((tmp= table->file->ha_index_or_rnd_end())) + { + DBUG_PRINT("error",("ha_index_or_rnd_end() failed")); + new_errno= tmp; + } + if (new_errno) + { + table->file->print_error(new_errno,MYF(0)); + return NESTED_LOOP_ERROR; + } + + // Update ref array + join_tab->join->set_items_ref_array(*join_tab->ref_array); + if (join_tab->window_funcs_step) + { + if (join_tab->window_funcs_step->exec(join)) + return NESTED_LOOP_ERROR; + } + + table->reginfo.lock_type= TL_UNLOCK; + + bool in_first_read= true; + while (rc == NESTED_LOOP_OK) + { + int error; + if (in_first_read) + { + in_first_read= false; + error= join_init_read_record(join_tab); + } + else + error= join_tab->read_record.read_record(&join_tab->read_record); + + if (error > 0 || (join->thd->is_error())) // Fatal error + rc= NESTED_LOOP_ERROR; + else if (error < 0) + break; + else if (join->thd->killed) // Aborted by user + { + join->thd->send_kill_message(); + rc= NESTED_LOOP_KILLED; + } + else + rc= evaluate_join_record(join, join_tab, 0); + } + + // Finish rnd scn after sending records + if (join_tab->table->file->inited) + join_tab->table->file->ha_rnd_end(); + + return rc; +} + + /** @} (end of group Query_Optimizer) */ diff --git a/sql/sql_select.h b/sql/sql_select.h index 86c5ef87d89..c143d58c2e8 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -34,6 +34,7 @@ #include "opt_range.h" /* SQL_SELECT, QUICK_SELECT_I */ #include "filesort.h" +typedef struct st_join_table JOIN_TAB; /* Values in optimize */ #define KEY_OPTIMIZE_EXISTS 1 #define KEY_OPTIMIZE_REF_OR_NULL 2 @@ -184,7 +185,7 @@ enum sj_strategy_enum typedef enum_nested_loop_state (*Next_select_func)(JOIN *, struct st_join_table *, bool); -Next_select_func setup_end_select_func(JOIN *join); +Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab); int rr_sequential(READ_RECORD *info); int rr_sequential_and_unpack(READ_RECORD *info); @@ -198,9 +199,11 @@ int rr_sequential_and_unpack(READ_RECORD *info); class JOIN_CACHE; class SJ_TMP_TABLE; class JOIN_TAB_RANGE; +class AGGR_OP; +class Filesort; typedef struct st_join_table { - st_join_table() {} /* Remove gcc warning */ + st_join_table() {} TABLE *table; TABLE_LIST *tab_list; KEYUSE *keyuse; /**< pointer to first used key */ @@ -238,7 +241,6 @@ typedef struct st_join_table { For join tabs that are inside an SJM bush: root of the bush */ st_join_table *bush_root_tab; - SORT_INFO *filesort; /* TRUE <=> This join_tab is inside an SJM bush and is the last leaf tab here */ bool last_leaf_in_bush; @@ -262,6 +264,7 @@ typedef struct st_join_table { */ uint packed_info; + // READ_RECORD::Setup_func materialize_table; READ_RECORD::Setup_func read_first_record; Next_select_func next_select; READ_RECORD read_record; @@ -348,6 +351,7 @@ typedef struct st_join_table { */ Item *cache_idx_cond; SQL_SELECT *cache_select; + AGGR_OP *aggr; JOIN *join; /* Embedding SJ-nest (may be not the direct parent), or NULL if none. @@ -414,6 +418,46 @@ typedef struct st_join_table { /* NestedOuterJoins: Bitmap of nested joins this table is part of */ nested_join_map embedding_map; + /* Tmp table info */ + TMP_TABLE_PARAM *tmp_table_param; + + /* Sorting related info */ + Filesort *filesort; + SORT_INFO *filesort_result; + + /* + Non-NULL value means this join_tab must do window function computation + before reading. + */ + Window_funcs_computation* window_funcs_step; + + /** + List of topmost expressions in the select list. The *next* JOIN TAB + in the plan should use it to obtain correct values. Same applicable to + all_fields. These lists are needed because after tmp tables functions + will be turned to fields. These variables are pointing to + tmp_fields_list[123]. Valid only for tmp tables and the last non-tmp + table in the query plan. + @see JOIN::make_tmp_tables_info() + */ + List *fields; + /** List of all expressions in the select list */ + List *all_fields; + /* + Pointer to the ref array slice which to switch to before sending + records. Valid only for tmp tables. + */ + Ref_ptr_array *ref_array; + + /** Number of records saved in tmp table */ + ha_rows send_records; + + /** HAVING condition for checking prior saving a record into tmp table*/ + Item *having; + + /** TRUE <=> remove duplicates on this table. */ + bool distinct; + /* Semi-join strategy to be used for this join table. This is a copy of POSITION::sj_strategy field. This field is set up by the @@ -428,9 +472,9 @@ typedef struct st_join_table { void cleanup(); inline bool is_using_loose_index_scan() { - return (select && select->quick && - (select->quick->get_type() == - QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)); + const SQL_SELECT *sel= filesort ? filesort->select : select; + return (sel && sel->quick && + (sel->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)); } bool is_using_agg_loose_index_scan () { @@ -565,16 +609,22 @@ typedef struct st_join_table { void save_explain_data(Explain_table_access *eta, table_map prefix_tables, bool distinct, struct st_join_table *first_top_tab); - void update_explain_data(uint idx); + bool use_order() const; ///< Use ordering provided by chosen index? + bool sort_table(); + bool remove_duplicates(); + } JOIN_TAB; #include "sql_join_cache.h" -enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool - end_of_records); -enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab, bool - end_of_records); +enum_nested_loop_state +sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); +enum_nested_loop_state +sub_select(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); +enum_nested_loop_state +sub_select_postjoin_aggr(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); + enum_nested_loop_state end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records); @@ -869,12 +919,14 @@ typedef struct st_position Sj_materialization_picker sjmat_picker; } POSITION; +typedef Bounds_checked_array Item_null_array; + typedef struct st_rollup { enum State { STATE_NONE, STATE_INITED, STATE_READY }; State state; - Item_null_result **null_items; - Item ***ref_pointer_arrays; + Item_null_array null_items; + Ref_ptr_array *ref_pointer_arrays; List *fields; } ROLLUP; @@ -888,6 +940,56 @@ public: class Pushdown_query; +/** + @brief + Class to perform postjoin aggregation operations + + @details + The result records are obtained on the put_record() call. + The aggrgation process is determined by the write_func, it could be: + end_write Simply store all records in tmp table. + end_write_group Perform grouping using join->group_fields, + records are expected to be sorted. + end_update Perform grouping using the key generated on tmp + table. Input records aren't expected to be sorted. + Tmp table uses the heap engine + end_update_unique Same as above, but the engine is myisam. + + Lazy table initialization is used - the table will be instantiated and + rnd/index scan started on the first put_record() call. + +*/ + +class AGGR_OP :public Sql_alloc +{ +public: + JOIN_TAB *join_tab; + + AGGR_OP(JOIN_TAB *tab) : join_tab(tab), write_func(NULL) + {}; + + enum_nested_loop_state put_record() { return put_record(false); }; + /* + Send the result of operation further (to a next operation/client) + This function is called after all records were put into tmp table. + + @return return one of enum_nested_loop_state values. + */ + enum_nested_loop_state end_send(); + /** write_func setter */ + void set_write_func(Next_select_func new_write_func) + { + write_func= new_write_func; + } + +private: + /** Write function that would be used for saving records in tmp table. */ + Next_select_func write_func; + enum_nested_loop_state put_record(bool end_of_records); + bool prepare_tmp_table(); +}; + + class JOIN :public Sql_alloc { private: @@ -956,33 +1058,11 @@ protected: public: JOIN_TAB *join_tab, **best_ref; - - /* - Saved join_tab for pre_sorting. create_sort_index() will save here.. - */ - JOIN_TAB *pre_sort_join_tab; - uint pre_sort_index; - Item *pre_sort_idx_pushed_cond; - void clean_pre_sort_join_tab(); /* List of fields that aren't under an aggregate function */ List non_agg_fields; - /* - For "Using temporary+Using filesort" queries, JOIN::join_tab can point to - either: - 1. array of join tabs describing how to run the select, or - 2. array of single join tab describing read from the temporary table. - - SHOW EXPLAIN code needs to read/show #1. This is why two next members are - there for saving it. - */ - JOIN_TAB *table_access_tabs; - uint top_table_access_tabs_count; - JOIN_TAB **map2table; ///< mapping between table indexes and JOIN_TABs - JOIN_TAB *join_tab_save; ///< saved join_tab for subquery reexecution - List join_tab_ranges; /* @@ -1013,13 +1093,8 @@ public: We keep it here so that it is saved/restored with JOIN::restore_tmp. */ uint top_join_tab_count; + uint aggr_tables; ///< Number of post-join tmp tables uint send_group_parts; - /* - This counts how many times do_select() was invoked for this JOIN. - It's used to restrict Pushdown_query::execute() only to the first - do_select() invocation. - */ - uint do_select_call_count; /* True if the query has GROUP BY. (that is, if group_by != NULL. when DISTINCT is converted into GROUP BY, it @@ -1125,6 +1200,7 @@ public: */ table_map complex_firstmatch_tables; + Next_select_func first_select; /* The cost of best complete join plan found so far during optimization, after optimization phase - cost of picked join order (not taking into @@ -1140,9 +1216,6 @@ public: double join_record_count; List *fields; List group_fields, group_fields_cache; - TABLE *tmp_table; - /// used to store 2 possible tmp table of SELECT - TABLE *exec_tmp_table1, *exec_tmp_table2; THD *thd; Item_sum **sum_funcs, ***sum_funcs_end; /** second copy of sumfuncs (for queries with 2 temporary tables */ @@ -1151,6 +1224,8 @@ public: Item *having; Item *tmp_having; ///< To store having when processed temporary table Item *having_history; ///< Store having for explain + ORDER *group_list_for_estimates; + bool having_is_correlated; ulonglong select_options; /* Bitmap of allowed types of the join caches that @@ -1189,26 +1264,6 @@ public: */ bool filesort_found_rows; - /** - Copy of this JOIN to be used with temporary tables. - - tmp_join is used when the JOIN needs to be "reusable" (e.g. in a - subquery that gets re-executed several times) and we know will use - temporary tables for materialization. The materialization to a - temporary table overwrites the JOIN structure to point to the - temporary table after the materialization is done. This is where - tmp_join is used : it's a copy of the JOIN before the - materialization and is used in restoring before re-execution by - overwriting the current JOIN structure with the saved copy. - Because of this we should pay extra care of not freeing up helper - structures that are referenced by the original contents of the - JOIN. We can check for this by making sure the "current" join is - not the temporary copy, e.g. !tmp_join || tmp_join != join - - We should free these sub-structures at JOIN::destroy() if the - "current" join has a copy is not that copy. - */ - JOIN *tmp_join; ROLLUP rollup; ///< Used with rollup bool mixed_implicit_grouping; @@ -1230,6 +1285,19 @@ public: GROUP/ORDER BY. */ bool simple_order, simple_group; + + /* + ordered_index_usage is set if an ordered index access + should be used instead of a filesort when computing + ORDER/GROUP BY. + */ + enum + { + ordered_index_void, // No ordered index avail. + ordered_index_group_by, // Use index for GROUP BY + ordered_index_order_by // Use index for ORDER BY + } ordered_index_usage; + /** Is set only in case if we have a GROUP BY clause and no ORDER BY after constant elimination of 'order'. @@ -1282,10 +1350,19 @@ public: List exec_const_order_group_cond; SQL_SELECT *select; ///ref_pointer_array contains five "slices" of the same length: + |========|========|========|========|========| + ref_ptrs items0 items1 items2 items3 + */ + Ref_ptr_array ref_ptrs; + // Copy of the initial slice above, to be used with different lists + Ref_ptr_array items0, items1, items2, items3; + // Used by rollup, to restore ref_ptrs after overwriting it. + Ref_ptr_array current_ref_ptrs; + const char *zero_result_cause; ///< not 0 if exec must return zero result bool union_part; ///< this subselect is part of union @@ -1312,20 +1389,12 @@ public: /* SJM nests that are executed with SJ-Materialization strategy */ List sjm_info_list; - /* - storage for caching buffers allocated during query execution. - These buffers allocations need to be cached as the thread memory pool is - cleared only at the end of the execution of the whole query and not caching - allocations that occur in repetition at execution time will result in - excessive memory usage. - Note: make_simple_join always creates an execution plan that accesses - a single table, thus it is sufficient to have a one-element array for - table_reexec. - */ - SORT_FIELD *sortorder; // make_unireg_sortorder() - TABLE *table_reexec[1]; // make_simple_join() - JOIN_TAB *join_tab_reexec; // make_simple_join() - /* end of allocation caching storage */ + /** TRUE <=> ref_pointer_array is set to items3. */ + bool set_group_rpa; + /** Exec time only: TRUE <=> current group has been sent */ + bool group_sent; + + JOIN_TAB *sort_and_group_aggr_tab; JOIN(THD *thd_arg, List &fields_arg, ulonglong select_options_arg, select_result *result_arg) @@ -1337,12 +1406,13 @@ public: void init(THD *thd_arg, List &fields_arg, ulonglong select_options_arg, select_result *result_arg) { - join_tab= join_tab_save= 0; + join_tab= 0; table= 0; table_count= 0; top_join_tab_count= 0; const_tables= 0; const_table_map= 0; + aggr_tables= 0; eliminated_tables= 0; join_list= 0; implicit_grouping= FALSE; @@ -1352,25 +1422,21 @@ public: send_records= 0; found_records= 0; fetch_limit= HA_POS_ERROR; - join_examined_rows= 0; - exec_tmp_table1= 0; - exec_tmp_table2= 0; - sortorder= 0; - table_reexec[0]= 0; - join_tab_reexec= 0; thd= thd_arg; sum_funcs= sum_funcs2= 0; procedure= 0; having= tmp_having= having_history= 0; + having_is_correlated= false; + group_list_for_estimates= 0; select_options= select_options_arg; result= result_arg; lock= thd_arg->lock; select_lex= 0; //for safety - tmp_join= 0; select_distinct= MY_TEST(select_options & SELECT_DISTINCT); no_order= 0; simple_order= 0; simple_group= 0; + ordered_index_usage= ordered_index_void; need_distinct= 0; skip_sort_order= 0; need_tmp= 0; @@ -1378,8 +1444,11 @@ public: error= 0; select= 0; return_tab= 0; - ref_pointer_array= items0= items1= items2= items3= 0; - ref_pointer_array_size= 0; + ref_ptrs.reset(); + items0.reset(); + items1.reset(); + items2.reset(); + items3.reset(); zero_result_cause= 0; optimized= 0; have_query_plan= QEP_NOT_PRESENT_YET; @@ -1393,8 +1462,6 @@ public: positions= best_positions= 0; pushdown_query= 0; original_join_tab= 0; - do_select_call_count= 0; - explain= NULL; all_fields= fields_arg; @@ -1407,22 +1474,21 @@ public: rollup.state= ROLLUP::STATE_NONE; no_const_tables= FALSE; + first_select= sub_select; + set_group_rpa= false; + group_sent= 0; + outer_ref_cond= pseudo_bits_cond= NULL; in_to_exists_where= NULL; in_to_exists_having= NULL; - pre_sort_join_tab= NULL; emb_sjm_nest= NULL; sjm_lookup_tables= 0; - - /* - The following is needed because JOIN::cleanup(true) may be called for - joins for which JOIN::optimize was aborted with an error before a proper - query plan was produced - */ - table_access_tabs= NULL; } - int prepare(Item ***rref_pointer_array, TABLE_LIST *tables, uint wind_num, + /* True if the plan guarantees that it will be returned zero or one row */ + bool only_const_tables() { return const_tables == table_count; } + + int prepare(TABLE_LIST *tables, uint wind_num, COND *conds, uint og_num, ORDER *order, bool skip_order_by, ORDER *group, Item *having, ORDER *proc_param, SELECT_LEX *select, SELECT_LEX_UNIT *unit); @@ -1432,7 +1498,9 @@ public: int reinit(); int init_execution(); void exec(); + void exec_inner(); + bool prepare_result(List **columns_list); int destroy(); void restore_tmp(); bool alloc_func_list(); @@ -1442,16 +1510,42 @@ public: bool make_sum_func_list(List &all_fields, List &send_fields, bool before_group_by, bool recompute= FALSE); - inline void set_items_ref_array(Item **ptr) + /// Initialzes a slice, see comments for ref_ptrs above. + Ref_ptr_array ref_ptr_array_slice(size_t slice_num) { - memcpy((char*) ref_pointer_array, (char*) ptr, ref_pointer_array_size); - current_ref_pointer_array= ptr; + size_t slice_sz= select_lex->ref_pointer_array.size() / 5U; + DBUG_ASSERT(select_lex->ref_pointer_array.size() % 5 == 0); + DBUG_ASSERT(slice_num < 5U); + return Ref_ptr_array(&select_lex->ref_pointer_array[slice_num * slice_sz], + slice_sz); } - inline void init_items_ref_array() + + /** + Overwrites one slice with the contents of another slice. + In the normal case, dst and src have the same size(). + However: the rollup slices may have smaller size than slice_sz. + */ + void copy_ref_ptr_array(Ref_ptr_array dst_arr, Ref_ptr_array src_arr) + { + DBUG_ASSERT(dst_arr.size() >= src_arr.size()); + void *dest= dst_arr.array(); + const void *src= src_arr.array(); + memcpy(dest, src, src_arr.size() * src_arr.element_size()); + } + + /// Overwrites 'ref_ptrs' and remembers the the source as 'current'. + void set_items_ref_array(Ref_ptr_array src_arr) { - items0= ref_pointer_array + all_fields.elements; - memcpy(items0, ref_pointer_array, ref_pointer_array_size); - current_ref_pointer_array= items0; + copy_ref_ptr_array(ref_ptrs, src_arr); + current_ref_ptrs= src_arr; + } + + /// Initializes 'items0' and remembers that it is 'current'. + void init_items_ref_array() + { + items0= ref_ptr_array_slice(1); + copy_ref_ptr_array(items0, ref_ptrs); + current_ref_ptrs= items0; } bool rollup_init(); @@ -1460,18 +1554,10 @@ public: Item_sum ***func); int rollup_send_data(uint idx); int rollup_write_data(uint idx, TABLE *table); - /** - Release memory and, if possible, the open tables held by this execution - plan (and nested plans). It's used to release some tables before - the end of execution in order to increase concurrency and reduce - memory consumption. - */ void join_free(); /** Cleanup this JOIN, possibly for reuse */ void cleanup(bool full); void clear(); - bool save_join_tab(); - bool init_save_join_tab(); bool send_row_on_empty_set() { return (do_send_rows && implicit_grouping && !group_optimized_away && @@ -1490,6 +1576,8 @@ public: return (table_map(1) << table_count) - 1; } void drop_unused_derived_keys(); + bool get_best_combination(); + bool add_sorting_to_table(JOIN_TAB *tab, ORDER *order); inline void eval_select_list_used_tables(); /* Return the table for which an index scan can be used to satisfy @@ -1551,16 +1639,44 @@ public: int save_explain_data_intern(Explain_query *output, bool need_tmp_table, bool need_order, bool distinct, const char *message); - JOIN_TAB *first_breadth_first_optimization_tab() { return table_access_tabs; } - JOIN_TAB *first_breadth_first_execution_tab() { return join_tab; } + JOIN_TAB *first_breadth_first_tab() { return join_tab; } private: + /** + Create a temporary table to be used for processing DISTINCT/ORDER + BY/GROUP BY. + + @note Will modify JOIN object wrt sort/group attributes + + @param tab the JOIN_TAB object to attach created table to + @param tmp_table_fields List of items that will be used to define + column types of the table. + @param tmp_table_group Group key to use for temporary table, NULL if none. + @param save_sum_fields If true, do not replace Item_sum items in + @c tmp_fields list with Item_field items referring + to fields in temporary table. + + @returns false on success, true on failure + */ + bool create_postjoin_aggr_table(JOIN_TAB *tab, List *tmp_table_fields, + ORDER *tmp_table_group, + bool save_sum_fields, + bool distinct, + bool keep_row_ordermake); + /** + Optimize distinct when used on a subset of the tables. + + E.g.,: SELECT DISTINCT t1.a FROM t1,t2 WHERE t1.b=t2.b + In this case we can stop scanning t2 when we have found one t1.a + */ + void optimize_distinct(); + /** TRUE if the query contains an aggregate function but has no GROUP BY clause. */ bool implicit_grouping; - bool make_simple_join(JOIN *join, TABLE *tmp_table); void cleanup_item_list(List &items) const; + bool make_aggr_tables_info(); }; enum enum_with_bush_roots { WITH_BUSH_ROOTS, WITHOUT_BUSH_ROOTS}; @@ -1585,7 +1701,7 @@ extern const char *join_type_str[]; void count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param, List &fields, bool reset_with_sum_func); bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, - Item **ref_pointer_array, + Ref_ptr_array ref_pointer_array, List &new_list1, List &new_list2, uint elements, List &fields); void copy_fields(TMP_TABLE_PARAM *param); @@ -1826,19 +1942,20 @@ int safe_index_read(JOIN_TAB *tab); int get_quick_record(SQL_SELECT *select); SORT_FIELD * make_unireg_sortorder(THD *thd, ORDER *order, uint *length, SORT_FIELD *sortorder); -int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, - List &fields, List &all_fields, ORDER *order); -int setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, +int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, + List &fields, List &all_fields, ORDER *order, + bool search_in_all_fields= true); +int setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, List &fields, List &all_fields, ORDER *order, - bool *hidden_group_fields); + bool *hidden_group_fields, bool search_in_all_fields= true); bool fix_inner_refs(THD *thd, List &all_fields, SELECT_LEX *select, - Item **ref_pointer_array); + Ref_ptr_array ref_pointer_array); int join_read_key2(THD *thd, struct st_join_table *tab, TABLE *table, struct st_table_ref *table_ref); bool handle_select(THD *thd, LEX *lex, select_result *result, ulong setup_tables_done_option); -bool mysql_select(THD *thd, Item ***rref_pointer_array, +bool mysql_select(THD *thd, TABLE_LIST *tables, uint wild_num, List &list, COND *conds, uint og_num, ORDER *order, ORDER *group, Item *having, ORDER *proc_param, ulonglong select_type, @@ -2154,4 +2271,7 @@ public: int execute(JOIN *join); }; +bool test_if_order_compatible(SQL_I_List &a, SQL_I_List &b); +int test_if_group_changed(List &list); +int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort); #endif /* SQL_SELECT_INCLUDED */ diff --git a/sql/sql_show.cc b/sql/sql_show.cc index aa2b47fa4b7..f41fb394b47 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2960,8 +2960,7 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond) thread in this thread. However it's better that we notice it eventually than hide it. */ - table->field[12]->store((longlong) (tmp->status_var.local_memory_used + - sizeof(THD)), + table->field[12]->store((longlong) tmp->status_var.local_memory_used, FALSE); table->field[12]->set_notnull(); table->field[13]->store((longlong) tmp->get_examined_row_count(), TRUE); @@ -3044,7 +3043,7 @@ int add_status_vars(SHOW_VAR *list) if (status_vars_inited) mysql_mutex_lock(&LOCK_show_status); if (!all_status_vars.buffer && // array is not allocated yet - do it now - my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20, MYF(0))) + my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 250, 50, MYF(0))) { res= 1; goto err; @@ -3257,7 +3256,8 @@ static bool show_status_array(THD *thd, const char *wild, */ for (var=variables; var->type == SHOW_FUNC || var->type == SHOW_SIMPLE_FUNC; var= &tmp) - ((mysql_show_var_func)(var->value))(thd, &tmp, buff, scope); + ((mysql_show_var_func)(var->value))(thd, &tmp, buff, + status_var, scope); SHOW_TYPE show_type=var->type; if (show_type == SHOW_ARRAY) @@ -3389,10 +3389,14 @@ end: DBUG_RETURN(res); } -/* collect status for all running threads */ +/* + collect status for all running threads + Return number of threads used +*/ -void calc_sum_of_all_status(STATUS_VAR *to) +uint calc_sum_of_all_status(STATUS_VAR *to) { + uint count= 0; DBUG_ENTER("calc_sum_of_all_status"); /* Ensure that thread id not killed during loop */ @@ -3403,16 +3407,21 @@ void calc_sum_of_all_status(STATUS_VAR *to) /* Get global values as base */ *to= global_status_var; + to->local_memory_used= 0; /* Add to this status from existing threads */ while ((tmp= it++)) { + count++; if (!tmp->status_in_global) + { add_to_status(to, &tmp->status_var); + to->local_memory_used+= tmp->status_var.local_memory_used; + } } mysql_mutex_unlock(&LOCK_thread_count); - DBUG_VOID_RETURN; + DBUG_RETURN(count); } diff --git a/sql/sql_show.h b/sql/sql_show.h index 9dae78e7f0e..dbae2a42b39 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -102,7 +102,7 @@ bool mysqld_show_authors(THD *thd); bool mysqld_show_contributors(THD *thd); bool mysqld_show_privileges(THD *thd); char *make_backup_log_name(char *buff, const char *name, const char* log_ext); -void calc_sum_of_all_status(STATUS_VAR *to); +uint calc_sum_of_all_status(STATUS_VAR *to); void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user, const LEX_STRING *definer_host); int add_status_vars(SHOW_VAR *list); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index dad51139af3..758757ea7dd 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -9351,7 +9351,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, int error= 1; Copy_field *copy= NULL, *copy_end; ha_rows found_count= 0, delete_count= 0; - SORT_FIELD *sortorder; SORT_INFO *file_sort= 0; READ_RECORD info; TABLE_LIST tables; @@ -9441,7 +9440,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, } else { - uint length= 0; bzero((char *) &tables, sizeof(tables)); tables.table= from; tables.alias= tables.table_name= from->s->table_name.str; @@ -9449,14 +9447,14 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, THD_STAGE_INFO(thd, stage_sorting); Filesort_tracker dummy_tracker(false); + Filesort fsort(order, HA_POS_ERROR, true, NULL); + if (thd->lex->select_lex.setup_ref_array(thd, order_num) || setup_order(thd, thd->lex->select_lex.ref_pointer_array, - &tables, fields, all_fields, order) || - !(sortorder= make_unireg_sortorder(thd, order, &length, NULL)) || - !(file_sort= filesort(thd, from, sortorder, length, - NULL, HA_POS_ERROR, - true, - &dummy_tracker))) + &tables, fields, all_fields, order)) + goto err; + + if (!(file_sort= filesort(thd, from, &fsort, &dummy_tracker))) goto err; } thd_progress_next_stage(thd); @@ -9670,6 +9668,18 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool table_copy) } +static void flush_checksum(ha_checksum *row_crc, uchar **checksum_start, + size_t *checksum_length) +{ + if (*checksum_start) + { + *row_crc= my_checksum(*row_crc, *checksum_start, *checksum_length); + *checksum_start= NULL; + *checksum_length= 0; + } +} + + bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) { @@ -9746,23 +9756,23 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, if (!(check_opt->flags & T_EXTEND) && (((t->file->ha_table_flags() & HA_HAS_OLD_CHECKSUM) && thd->variables.old_mode) || ((t->file->ha_table_flags() & HA_HAS_NEW_CHECKSUM) && !thd->variables.old_mode))) - protocol->store((ulonglong)t->file->checksum()); + protocol->store((ulonglong)t->file->checksum()); else if (check_opt->flags & T_QUICK) - protocol->store_null(); + protocol->store_null(); else { - /* calculating table's checksum */ - ha_checksum crc= 0; + /* calculating table's checksum */ + ha_checksum crc= 0; uchar null_mask=256 - (1 << t->s->last_null_bit_pos); t->use_all_columns(); - if (t->file->ha_rnd_init(1)) - protocol->store_null(); - else - { - for (;;) - { + if (t->file->ha_rnd_init(1)) + protocol->store_null(); + else + { + for (;;) + { if (thd->killed) { /* @@ -9773,7 +9783,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, thd->protocol->remove_last_row(); goto err; } - ha_checksum row_crc= 0; + ha_checksum row_crc= 0; int error= t->file->ha_rnd_next(t->record[0]); if (unlikely(error)) { @@ -9781,22 +9791,27 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, continue; break; } - if (t->s->null_bytes) + if (t->s->null_bytes) { /* fix undefined null bits */ t->record[0][t->s->null_bytes-1] |= null_mask; if (!(t->s->db_create_options & HA_OPTION_PACK_RECORD)) t->record[0][0] |= 1; - row_crc= my_checksum(row_crc, t->record[0], t->s->null_bytes); + row_crc= my_checksum(row_crc, t->record[0], t->s->null_bytes); } - for (uint i= 0; i < t->s->fields; i++ ) - { - Field *f= t->field[i]; + uchar *checksum_start= NULL; + size_t checksum_length= 0; + for (uint i= 0; i < t->s->fields; i++ ) + { + Field *f= t->field[i]; if (! thd->variables.old_mode && f->is_real_null(0)) + { + flush_checksum(&row_crc, &checksum_start, &checksum_length); continue; + } /* BLOB and VARCHAR have pointers in their field, we must convert to string; GEOMETRY is implemented on top of BLOB. @@ -9808,6 +9823,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_BIT: { + flush_checksum(&row_crc, &checksum_start, &checksum_length); String tmp; f->val_str(&tmp); row_crc= my_checksum(row_crc, (uchar*) tmp.ptr(), @@ -9815,16 +9831,20 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, break; } default: - row_crc= my_checksum(row_crc, f->ptr, f->pack_length()); + if (!checksum_start) + checksum_start= f->ptr; + DBUG_ASSERT(checksum_start + checksum_length == f->ptr); + checksum_length+= f->pack_length(); break; - } - } + } + } + flush_checksum(&row_crc, &checksum_start, &checksum_length); - crc+= row_crc; - } - protocol->store((ulonglong)crc); + crc+= row_crc; + } + protocol->store((ulonglong)crc); t->file->ha_rnd_end(); - } + } } trans_rollback_stmt(thd); close_thread_tables(thd); diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 642cf208908..50d51dcc8cc 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -559,16 +559,19 @@ C_MODE_END void mysql_print_status() { char current_dir[FN_REFLEN]; + char llbuff[10][22]; STATUS_VAR tmp; + uint count; - calc_sum_of_all_status(&tmp); + count= calc_sum_of_all_status(&tmp); printf("\nStatus information:\n\n"); (void) my_getwd(current_dir, sizeof(current_dir),MYF(0)); printf("Current dir: %s\n", current_dir); - printf("Running threads: %d Stack size: %ld\n", thread_count, + printf("Running threads: %d Cached threads: %lu Stack size: %ld\n", + count, cached_thread_count, (long) my_thread_stack_size); +#ifdef EXTRA_DEBUG thr_print_locks(); // Write some debug info -#ifndef DBUG_OFF print_cached_tables(); #endif /* Print key cache status */ @@ -614,28 +617,33 @@ Next alarm time: %lu\n", #ifdef HAVE_MALLINFO struct mallinfo info= mallinfo(); printf("\nMemory status:\n\ -Non-mmapped space allocated from system: %d\n\ -Number of free chunks: %d\n\ -Number of fastbin blocks: %d\n\ -Number of mmapped regions: %d\n\ -Space in mmapped regions: %d\n\ -Maximum total allocated space: %d\n\ -Space available in freed fastbin blocks: %d\n\ -Total allocated space: %d\n\ -Total free space: %d\n\ -Top-most, releasable space: %d\n\ -Estimated memory (with thread stack): %ld\n", - (int) info.arena , - (int) info.ordblks, - (int) info.smblks, - (int) info.hblks, - (int) info.hblkhd, - (int) info.usmblks, - (int) info.fsmblks, - (int) info.uordblks, - (int) info.fordblks, - (int) info.keepcost, - (long) (thread_count * my_thread_stack_size + info.hblkhd + info.arena)); +Non-mmapped space allocated from system: %s\n\ +Number of free chunks: %lu\n\ +Number of fastbin blocks: %lu\n\ +Number of mmapped regions: %lu\n\ +Space in mmapped regions: %s\n\ +Maximum total allocated space: %s\n\ +Space available in freed fastbin blocks: %s\n\ +Total allocated space: %s\n\ +Total free space: %s\n\ +Top-most, releasable space: %s\n\ +Estimated memory (with thread stack): %s\n\ +Global memory allocated by server: %s\n\ +Memory allocated by threads: %s\n", + llstr(info.arena, llbuff[0]), + (ulong) info.ordblks, + (ulong) info.smblks, + (ulong) info.hblks, + llstr(info.hblkhd, llbuff[1]), + llstr(info.usmblks, llbuff[2]), + llstr(info.fsmblks, llbuff[3]), + llstr(info.uordblks, llbuff[4]), + llstr(info.fordblks, llbuff[5]), + llstr(info.keepcost, llbuff[6]), + llstr((count + cached_thread_count)* my_thread_stack_size + info.hblkhd + info.arena, llbuff[7]), + llstr(tmp.global_memory_used, llbuff[8]), + llstr(tmp.local_memory_used, llbuff[9])); + #endif #ifdef HAVE_EVENT_SCHEDULER diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 7a61279fc9c..63093620805 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -1581,7 +1581,6 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, DBUG_RETURN(0); err_with_lex_cleanup: - // QQ: anything else ? lex_end(&lex); thd->lex= old_lex; thd->spcont= save_spcont; diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 5685c90850a..87b836f40d9 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -436,8 +436,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, can_skip_order_by= is_union_select && !(sl->braces && sl->explicit_limit); - saved_error= join->prepare(&sl->ref_pointer_array, - sl->table_list.first, + saved_error= join->prepare(sl->table_list.first, sl->with_wild, sl->where, (can_skip_order_by ? 0 : @@ -646,8 +645,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, fake_select_lex->n_child_sum_items+= global_parameters()->n_sum_items; saved_error= fake_select_lex->join-> - prepare(&fake_select_lex->ref_pointer_array, - fake_select_lex->table_list.first, + prepare(fake_select_lex->table_list.first, 0, 0, global_parameters()->order_list.elements, // og_num global_parameters()->order_list.first, // order @@ -702,7 +700,7 @@ bool st_select_lex_unit::optimize() { item->assigned(0); // We will reinit & rexecute unit item->reset(); - if (table->created) + if (table->is_created()) { table->file->ha_delete_all_rows(); table->file->info(HA_STATUS_VARIABLE); @@ -946,13 +944,13 @@ bool st_select_lex_unit::exec() Don't add more sum_items if we have already done JOIN::prepare for this (with a different join object) */ - if (!fake_select_lex->ref_pointer_array) + if (fake_select_lex->ref_pointer_array.is_null()) fake_select_lex->n_child_sum_items+= global_parameters()->n_sum_items; if (!was_executed) save_union_explain_part2(thd->lex->explain); - saved_error= mysql_select(thd, &fake_select_lex->ref_pointer_array, + saved_error= mysql_select(thd, &result_table_list, 0, item_list, NULL, global_parameters()->order_list.elements, @@ -975,7 +973,7 @@ bool st_select_lex_unit::exec() to reset them back, we re-do all of the actions (yes it is ugly): */ // psergey-todo: is the above really necessary anymore?? join->init(thd, item_list, fake_select_lex->options, result); - saved_error= mysql_select(thd, &fake_select_lex->ref_pointer_array, + saved_error= mysql_select(thd, &result_table_list, 0, item_list, NULL, global_parameters()->order_list.elements, @@ -1022,27 +1020,11 @@ bool st_select_lex_unit::cleanup() } cleaned= 1; - if (union_result) - { - delete union_result; - union_result=0; // Safety - if (table) - free_tmp_table(thd, table); - table= 0; // Safety - } - for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) error|= sl->cleanup(); if (fake_select_lex) { - JOIN *join; - if ((join= fake_select_lex->join)) - { - join->tables_list= 0; - join->table_count= 0; - join->top_join_tab_count= 0; - } error|= fake_select_lex->cleanup(); /* There are two cases when we should clean order items: @@ -1064,6 +1046,15 @@ bool st_select_lex_unit::cleanup() } } + if (union_result) + { + delete union_result; + union_result=0; // Safety + if (table) + free_tmp_table(thd, table); + table= 0; // Safety + } + DBUG_RETURN(error); } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 61c16a905fe..6c60350844e 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -341,7 +341,8 @@ int mysql_update(THD *thd, if (table_list->is_view()) unfix_fields(fields); - if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0)) + if (setup_fields_with_no_wrap(thd, Ref_ptr_array(), + fields, MARK_COLUMNS_WRITE, 0, 0)) DBUG_RETURN(1); /* purecov: inspected */ if (table_list->view && check_fields(thd, fields)) { @@ -360,7 +361,7 @@ int mysql_update(THD *thd, table_list->grant.want_privilege= table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); #endif - if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0)) + if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, 0)) { free_underlaid_joins(thd, select_lex); DBUG_RETURN(1); /* purecov: inspected */ @@ -557,17 +558,12 @@ int mysql_update(THD *thd, to update NOTE: filesort will call table->prepare_for_position() */ - uint length= 0; - SORT_FIELD *sortorder; + Filesort fsort(order, limit, true, select); Filesort_tracker *fs_tracker= thd->lex->explain->get_upd_del_plan()->filesort_tracker; - if (!(sortorder=make_unireg_sortorder(thd, order, &length, NULL)) || - !(file_sort= filesort(thd, table, sortorder, length, - select, limit, - true, - fs_tracker))) + if (!(file_sort= filesort(thd, table, &fsort, fs_tracker))) goto err; thd->inc_examined_row_count(file_sort->examined_rows); @@ -696,7 +692,7 @@ int mysql_update(THD *thd, if (error >= 0) goto err; } - table->disable_keyread(); + table->set_keyread(false); table->column_bitmaps_set(save_read_set, save_write_set); } @@ -1050,7 +1046,7 @@ err: delete select; delete file_sort; free_underlaid_joins(thd, select_lex); - table->disable_keyread(); + table->set_keyread(false); thd->abort_on_warning= 0; DBUG_RETURN(1); @@ -1424,7 +1420,8 @@ int mysql_multi_update_prepare(THD *thd) if (lex->select_lex.handle_derived(thd->lex, DT_MERGE)) DBUG_RETURN(TRUE); - if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0)) + if (setup_fields_with_no_wrap(thd, Ref_ptr_array(), + *fields, MARK_COLUMNS_WRITE, 0, 0)) DBUG_RETURN(TRUE); for (tl= table_list; tl ; tl= tl->next_local) @@ -1611,7 +1608,7 @@ bool mysql_multi_update(THD *thd, thd->abort_on_warning= thd->is_strict_mode(); List total_list; - res= mysql_select(thd, &select_lex->ref_pointer_array, + res= mysql_select(thd, table_list, select_lex->with_wild, total_list, conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, @@ -1707,7 +1704,8 @@ int multi_update::prepare(List ¬_used_values, reference tables */ - int error= setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0); + int error= setup_fields(thd, Ref_ptr_array(), + *values, MARK_COLUMNS_READ, 0, 0); ti.rewind(); while ((table_ref= ti++)) @@ -2034,7 +2032,7 @@ loop_end: /* Make an unique key over the first field to avoid duplicated updates */ bzero((char*) &group, sizeof(group)); - group.asc= 1; + group.direction= ORDER::ORDER_ASC; group.item= (Item**) temp_fields.head_ref(); tmp_param->quick_group=1; diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 41fd5b78f04..b66f678adfc 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1612,6 +1612,8 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, sl->context.error_processor_data= (void *)table; } + table->select_lex->master_unit()->is_view= true; + /* check MERGE algorithm ability - algorithm is not explicit TEMPORARY TABLE diff --git a/sql/sql_window.cc b/sql/sql_window.cc new file mode 100644 index 00000000000..e3e42fc4f75 --- /dev/null +++ b/sql/sql_window.cc @@ -0,0 +1,2129 @@ +#include "sql_select.h" +#include "sql_list.h" +#include "item_windowfunc.h" +#include "filesort.h" +#include "sql_base.h" +#include "sql_window.h" + + +bool +Window_spec::check_window_names(List_iterator_fast &it) +{ + if (window_names_are_checked) + return false; + char *name= this->name(); + char *ref_name= window_reference(); + it.rewind(); + Window_spec *win_spec; + while((win_spec= it++) && win_spec != this) + { + char *win_spec_name= win_spec->name(); + if (!win_spec_name) + break; + if (name && my_strcasecmp(system_charset_info, name, win_spec_name) == 0) + { + my_error(ER_DUP_WINDOW_NAME, MYF(0), name); + return true; + } + if (ref_name && + my_strcasecmp(system_charset_info, ref_name, win_spec_name) == 0) + { + if (partition_list->elements) + { + my_error(ER_PARTITION_LIST_IN_REFERENCING_WINDOW_SPEC, MYF(0), + ref_name); + return true; + } + if (win_spec->order_list->elements && order_list->elements) + { + my_error(ER_ORDER_LIST_IN_REFERENCING_WINDOW_SPEC, MYF(0), ref_name); + return true; + } + if (win_spec->window_frame) + { + my_error(ER_WINDOW_FRAME_IN_REFERENCED_WINDOW_SPEC, MYF(0), ref_name); + return true; + } + referenced_win_spec= win_spec; + if (partition_list->elements == 0) + partition_list= win_spec->partition_list; + if (order_list->elements == 0) + order_list= win_spec->order_list; + } + } + if (ref_name && !referenced_win_spec) + { + my_error(ER_WRONG_WINDOW_SPEC_NAME, MYF(0), ref_name); + return true; + } + window_names_are_checked= true; + return false; +} + +bool +Window_frame::check_frame_bounds() +{ + if ((top_bound->is_unbounded() && + top_bound->precedence_type == Window_frame_bound::FOLLOWING) || + (bottom_bound->is_unbounded() && + bottom_bound->precedence_type == Window_frame_bound::PRECEDING) || + (top_bound->precedence_type == Window_frame_bound::CURRENT && + bottom_bound->precedence_type == Window_frame_bound::PRECEDING) || + (bottom_bound->precedence_type == Window_frame_bound::CURRENT && + top_bound->precedence_type == Window_frame_bound::FOLLOWING)) + { + my_error(ER_BAD_COMBINATION_OF_WINDOW_FRAME_BOUND_SPECS, MYF(0)); + return true; + } + + return false; +} + + +/* + Setup window functions in a select +*/ + +int +setup_windows(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, + List &fields, List &all_fields, + List &win_specs, List &win_funcs) +{ + Window_spec *win_spec; + DBUG_ENTER("setup_windows"); + List_iterator it(win_specs); + + /* + Move all unnamed specifications after the named ones. + We could have avoided it if we had built two separate lists for + named and unnamed specifications. + */ + Query_arena *arena, backup; + arena= thd->activate_stmt_arena_if_needed(&backup); + uint i = 0; + uint elems= win_specs.elements; + while ((win_spec= it++) && i++ < elems) + { + if (win_spec->name() == NULL) + { + it.remove(); + win_specs.push_back(win_spec); + } + } + if (arena) + thd->restore_active_arena(arena, &backup); + + it.rewind(); + + List_iterator_fast itp(win_specs); + + while ((win_spec= it++)) + { + bool hidden_group_fields; + if (win_spec->check_window_names(itp) || + setup_group(thd, ref_pointer_array, tables, fields, all_fields, + win_spec->partition_list->first, &hidden_group_fields, + true) || + setup_order(thd, ref_pointer_array, tables, fields, all_fields, + win_spec->order_list->first, true) || + (win_spec->window_frame && + win_spec->window_frame->check_frame_bounds())) + { + DBUG_RETURN(1); + } + + if (win_spec->window_frame && + win_spec->window_frame->exclusion != Window_frame::EXCL_NONE) + { + my_error(ER_FRAME_EXCLUSION_NOT_SUPPORTED, MYF(0)); + DBUG_RETURN(1); + } + /* + For "win_func() OVER (ORDER BY order_list RANGE BETWEEN ...)", + - ORDER BY order_list must not be ommitted + - the list must have a single element. + */ + if (win_spec->window_frame && + win_spec->window_frame->units == Window_frame::UNITS_RANGE) + { + if (win_spec->order_list->elements != 1) + { + my_error(ER_RANGE_FRAME_NEEDS_SIMPLE_ORDERBY, MYF(0)); + DBUG_RETURN(1); + } + + /* + "The declared type of SK shall be numeric, datetime, or interval" + we don't support datetime or interval, yet. + */ + Item_result rtype= win_spec->order_list->first->item[0]->result_type(); + if (rtype != REAL_RESULT && rtype != INT_RESULT && + rtype != DECIMAL_RESULT) + { + my_error(ER_WRONG_TYPE_FOR_RANGE_FRAME, MYF(0)); + DBUG_RETURN(1); + } + + /* + "The declared type of UVS shall be numeric if the declared type of SK + is numeric; otherwise, it shall be an interval type that may be added + to or subtracted from the declared type of SK" + */ + Window_frame_bound *bounds[]= {win_spec->window_frame->top_bound, + win_spec->window_frame->bottom_bound, + NULL}; + for (Window_frame_bound **pbound= &bounds[0]; *pbound; pbound++) + { + if (!(*pbound)->is_unbounded() && + ((*pbound)->precedence_type == Window_frame_bound::FOLLOWING || + (*pbound)->precedence_type == Window_frame_bound::PRECEDING)) + { + Item_result rtype= (*pbound)->offset->result_type(); + if (rtype != REAL_RESULT && rtype != INT_RESULT && + rtype != DECIMAL_RESULT) + { + my_error(ER_WRONG_TYPE_FOR_RANGE_FRAME, MYF(0)); + DBUG_RETURN(1); + } + } + } + } + + /* "ROWS PRECEDING|FOLLOWING $n" must have a numeric $n */ + if (win_spec->window_frame && + win_spec->window_frame->units == Window_frame::UNITS_ROWS) + { + Window_frame_bound *bounds[]= {win_spec->window_frame->top_bound, + win_spec->window_frame->bottom_bound, + NULL}; + for (Window_frame_bound **pbound= &bounds[0]; *pbound; pbound++) + { + if (!(*pbound)->is_unbounded() && + ((*pbound)->precedence_type == Window_frame_bound::FOLLOWING || + (*pbound)->precedence_type == Window_frame_bound::PRECEDING)) + { + Item *offset= (*pbound)->offset; + if (offset->result_type() != INT_RESULT) + { + my_error(ER_WRONG_TYPE_FOR_ROWS_FRAME, MYF(0)); + DBUG_RETURN(1); + } + } + } + } + } + + List_iterator_fast li(win_funcs); + Item_window_func *win_func_item; + while ((win_func_item= li++)) + { + win_func_item->update_used_tables(); + } + + DBUG_RETURN(0); +} + +///////////////////////////////////////////////////////////////////////////// +// Sorting window functions to minimize the number of table scans +// performed during the computation of these functions +///////////////////////////////////////////////////////////////////////////// + +#define CMP_LT -2 // Less than +#define CMP_LT_C -1 // Less than and compatible +#define CMP_EQ 0 // Equal to +#define CMP_GT_C 1 // Greater than and compatible +#define CMP_GT 2 // Greater then + +static +int compare_order_elements(ORDER *ord1, ORDER *ord2) +{ + if (*ord1->item == *ord2->item) + return CMP_EQ; + Item *item1= (*ord1->item)->real_item(); + Item *item2= (*ord2->item)->real_item(); + DBUG_ASSERT(item1->type() == Item::FIELD_ITEM && + item2->type() == Item::FIELD_ITEM); + int cmp= ((Item_field *) item1)->field - ((Item_field *) item2)->field; + if (cmp == 0) + { + if (ord1->direction == ord2->direction) + return CMP_EQ; + return ord1->direction > ord2->direction ? CMP_GT : CMP_LT; + } + else + return cmp > 0 ? CMP_GT : CMP_LT; +} + +static +int compare_order_lists(SQL_I_List *part_list1, + SQL_I_List *part_list2) +{ + if (part_list1 == part_list2) + return CMP_EQ; + ORDER *elem1= part_list1->first; + ORDER *elem2= part_list2->first; + for ( ; elem1 && elem2; elem1= elem1->next, elem2= elem2->next) + { + int cmp; + if ((cmp= compare_order_elements(elem1, elem2))) + return cmp; + } + if (elem1) + return CMP_GT_C; + if (elem2) + return CMP_LT_C; + return CMP_EQ; +} + + +static +int compare_window_frame_bounds(Window_frame_bound *win_frame_bound1, + Window_frame_bound *win_frame_bound2, + bool is_bottom_bound) +{ + int res; + if (win_frame_bound1->precedence_type != win_frame_bound2->precedence_type) + { + res= win_frame_bound1->precedence_type > win_frame_bound2->precedence_type ? + CMP_GT : CMP_LT; + if (is_bottom_bound) + res= -res; + return res; + } + + if (win_frame_bound1->is_unbounded() && win_frame_bound2->is_unbounded()) + return CMP_EQ; + + if (!win_frame_bound1->is_unbounded() && !win_frame_bound2->is_unbounded()) + { + if (win_frame_bound1->offset->eq(win_frame_bound2->offset, true)) + return CMP_EQ; + else + { + res= strcmp(win_frame_bound1->offset->name, + win_frame_bound2->offset->name); + res= res > 0 ? CMP_GT : CMP_LT; + if (is_bottom_bound) + res= -res; + return res; + } + } + + /* + Here we have: + win_frame_bound1->is_unbounded() != win_frame_bound1->is_unbounded() + */ + return is_bottom_bound != win_frame_bound1->is_unbounded() ? CMP_LT : CMP_GT; +} + + +static +int compare_window_frames(Window_frame *win_frame1, + Window_frame *win_frame2) +{ + int cmp; + + if (win_frame1 == win_frame2) + return CMP_EQ; + + if (!win_frame1) + return CMP_LT; + + if (!win_frame2) + return CMP_GT; + + if (win_frame1->units != win_frame2->units) + return win_frame1->units > win_frame2->units ? CMP_GT : CMP_LT; + + cmp= compare_window_frame_bounds(win_frame1->top_bound, + win_frame2->top_bound, + false); + if (cmp) + return cmp; + + cmp= compare_window_frame_bounds(win_frame1->bottom_bound, + win_frame2->bottom_bound, + true); + if (cmp) + return cmp; + + if (win_frame1->exclusion != win_frame2->exclusion) + return win_frame1->exclusion > win_frame2->exclusion ? CMP_GT_C : CMP_LT_C; + + return CMP_EQ; +} + +static +int compare_window_spec_joined_lists(Window_spec *win_spec1, + Window_spec *win_spec2) +{ + win_spec1->join_partition_and_order_lists(); + win_spec2->join_partition_and_order_lists(); + int cmp= compare_order_lists(win_spec1->partition_list, + win_spec2->partition_list); + win_spec1->disjoin_partition_and_order_lists(); + win_spec2->disjoin_partition_and_order_lists(); + return cmp; +} + + +static +int compare_window_funcs_by_window_specs(Item_window_func *win_func1, + Item_window_func *win_func2, + void *arg) +{ + int cmp; + Window_spec *win_spec1= win_func1->window_spec; + Window_spec *win_spec2= win_func2->window_spec; + if (win_spec1 == win_spec2) + return CMP_EQ; + cmp= compare_order_lists(win_spec1->partition_list, + win_spec2->partition_list); + if (cmp == CMP_EQ) + { + /* + Partition lists contain the same elements. + Let's use only one of the lists. + */ + if (!win_spec1->name() && win_spec2->name()) + win_spec1->partition_list= win_spec2->partition_list; + else + win_spec2->partition_list= win_spec1->partition_list; + + cmp= compare_order_lists(win_spec1->order_list, + win_spec2->order_list); + + if (cmp != CMP_EQ) + return cmp; + + /* + Order lists contain the same elements. + Let's use only one of the lists. + */ + if (!win_spec1->name() && win_spec2->name()) + win_spec1->order_list= win_spec2->order_list; + else + win_spec2->order_list= win_spec1->order_list; + + cmp= compare_window_frames(win_spec1->window_frame, + win_spec2->window_frame); + + if (cmp != CMP_EQ) + return cmp; + + /* Window frames are equal. Let's use only one of them. */ + if (!win_spec1->name() && win_spec2->name()) + win_spec1->window_frame= win_spec2->window_frame; + else + win_spec2->window_frame= win_spec1->window_frame; + + return CMP_EQ; + } + + if (cmp == CMP_GT || cmp == CMP_LT) + return cmp; + + /* one of the partitions lists is the proper beginning of the another */ + cmp= compare_window_spec_joined_lists(win_spec1, win_spec2); + + if (CMP_LT_C <= cmp && cmp <= CMP_GT_C) + cmp= win_spec1->partition_list->elements < + win_spec2->partition_list->elements ? CMP_GT_C : CMP_LT_C; + + return cmp; +} + + +#define SORTORDER_CHANGE_FLAG 1 +#define PARTITION_CHANGE_FLAG 2 +#define FRAME_CHANGE_FLAG 4 + +typedef int (*Item_window_func_cmp)(Item_window_func *f1, + Item_window_func *f2, + void *arg); +/* + @brief + Sort window functions so that those that can be computed together are + adjacent. + + @detail + Sort window functions by their + - required sorting order, + - partition list, + - window frame compatibility. + + The changes between the groups are marked by setting item_window_func->marker. +*/ + +static +void order_window_funcs_by_window_specs(List *win_func_list) +{ + if (win_func_list->elements == 0) + return; + + bubble_sort(win_func_list, + compare_window_funcs_by_window_specs, + NULL); + + List_iterator_fast it(*win_func_list); + Item_window_func *prev= it++; + prev->marker= SORTORDER_CHANGE_FLAG | + PARTITION_CHANGE_FLAG | + FRAME_CHANGE_FLAG; + Item_window_func *curr; + while ((curr= it++)) + { + Window_spec *win_spec_prev= prev->window_spec; + Window_spec *win_spec_curr= curr->window_spec; + curr->marker= 0; + if (!(win_spec_prev->partition_list == win_spec_curr->partition_list && + win_spec_prev->order_list == win_spec_curr->order_list)) + { + int cmp; + if (win_spec_prev->partition_list == win_spec_curr->partition_list) + cmp= compare_order_lists(win_spec_prev->order_list, + win_spec_curr->order_list); + else + cmp= compare_window_spec_joined_lists(win_spec_prev, win_spec_curr); + if (!(CMP_LT_C <= cmp && cmp <= CMP_GT_C)) + { + curr->marker= SORTORDER_CHANGE_FLAG | + PARTITION_CHANGE_FLAG | + FRAME_CHANGE_FLAG; + } + else if (win_spec_prev->partition_list != win_spec_curr->partition_list) + { + curr->marker|= PARTITION_CHANGE_FLAG | FRAME_CHANGE_FLAG; + } + } + else if (win_spec_prev->window_frame != win_spec_curr->window_frame) + curr->marker|= FRAME_CHANGE_FLAG; + + prev= curr; + } +} + + +///////////////////////////////////////////////////////////////////////////// + + +/* + Do a pass over sorted table and compute window function values. + + This function is for handling window functions that can be computed on the + fly. Examples are RANK() and ROW_NUMBER(). +*/ +bool compute_window_func_values(Item_window_func *item_win, + TABLE *tbl, READ_RECORD *info) +{ + int err; + while (!(err=info->read_record(info))) + { + store_record(tbl,record[1]); + + /* + This will cause window function to compute its value for the + current row : + */ + item_win->advance_window(); + + /* + Put the new value into temptable's field + TODO: Should this use item_win->update_field() call? + Regular aggegate function implementations seem to implement it. + */ + item_win->save_in_field(item_win->result_field, true); + err= tbl->file->ha_update_row(tbl->record[1], tbl->record[0]); + if (err && err != HA_ERR_RECORD_IS_THE_SAME) + return true; + } + return false; +} + +///////////////////////////////////////////////////////////////////////////// +// Window Frames support +///////////////////////////////////////////////////////////////////////////// + +// note: make rr_from_pointers static again when not need it here anymore +int rr_from_pointers(READ_RECORD *info); + +/* + A temporary way to clone READ_RECORD structures until Monty provides the real + one. +*/ +bool clone_read_record(const READ_RECORD *src, READ_RECORD *dst) +{ + //DBUG_ASSERT(src->table->sort.record_pointers); + DBUG_ASSERT(src->read_record == rr_from_pointers); + memcpy(dst, src, sizeof(READ_RECORD)); + return false; +} + +///////////////////////////////////////////////////////////////////////////// + + +/* + A cursor over a sequence of rowids. One can + - Move to next rowid + - jump to given number in the sequence + - Know the number of the current rowid (i.e. how many rowids have been read) +*/ + +class Rowid_seq_cursor +{ + uchar *cache_start; + uchar *cache_pos; + uchar *cache_end; + uint ref_length; + +public: + virtual ~Rowid_seq_cursor() {} + + void init(READ_RECORD *info) + { + cache_start= info->cache_pos; + cache_pos= info->cache_pos; + cache_end= info->cache_end; + ref_length= info->ref_length; + } + + virtual int get_next() + { + /* Allow multiple get_next() calls in EOF state*/ + if (cache_pos == cache_end) + return -1; + cache_pos+= ref_length; + return 0; + } + + ha_rows get_rownum() + { + return (cache_pos - cache_start) / ref_length; + } + + // will be called by ROWS n FOLLOWING to catch up. + void move_to(ha_rows row_number) + { + cache_pos= cache_start + row_number * ref_length; + } +protected: + bool at_eof() { return (cache_pos == cache_end); } + + uchar *get_last_rowid() + { + if (cache_pos == cache_start) + return NULL; + else + return cache_pos - ref_length; + } + + uchar *get_curr_rowid() { return cache_pos; } +}; + + +/* + Cursor which reads from rowid sequence and also retrieves table rows. +*/ + +class Table_read_cursor : public Rowid_seq_cursor +{ + /* + Note: we don't own *read_record, somebody else is using it. + We only look at the constant part of it, e.g. table, record buffer, etc. + */ + READ_RECORD *read_record; +public: + virtual ~Table_read_cursor() {} + + void init(READ_RECORD *info) + { + Rowid_seq_cursor::init(info); + read_record= info; + } + + virtual int get_next() + { + if (at_eof()) + return -1; + + uchar* curr_rowid= get_curr_rowid(); + int res= Rowid_seq_cursor::get_next(); + if (!res) + { + res= read_record->table->file->ha_rnd_pos(read_record->record, + curr_rowid); + } + return res; + } + + bool restore_last_row() + { + uchar *p; + if ((p= get_last_rowid())) + { + int rc= read_record->table->file->ha_rnd_pos(read_record->record, p); + if (!rc) + return true; // restored ok + } + return false; // didn't restore + } + + // todo: should move_to() also read row here? +}; + + +/* + A cursor which only moves within a partition. The scan stops at the partition + end, and it needs an explicit command to move to the next partition. +*/ + +class Partition_read_cursor +{ + Table_read_cursor tbl_cursor; + Group_bound_tracker bound_tracker; + bool end_of_partition; +public: + void init(THD *thd, READ_RECORD *info, SQL_I_List *partition_list) + { + tbl_cursor.init(info); + bound_tracker.init(thd, partition_list); + end_of_partition= false; + } + + /* + Informs the cursor that we need to move into the next partition. + The next partition is provided in two ways: + - in table->record[0].. + - rownum parameter has the row number. + */ + void on_next_partition(int rownum) + { + /* Remember the sort key value from the new partition */ + bound_tracker.check_if_next_group(); + end_of_partition= false; + } + + /* + Moves to a new row. The row is assumed to be within the current partition + */ + void move_to(int rownum) { tbl_cursor.move_to(rownum); } + + /* + This returns -1 when end of partition was reached. + */ + int get_next() + { + int res; + if (end_of_partition) + return -1; + if ((res= tbl_cursor.get_next())) + return res; + + if (bound_tracker.compare_with_cache()) + { + end_of_partition= true; + return -1; + } + return 0; + } + + bool restore_last_row() + { + return tbl_cursor.restore_last_row(); + } +}; + +///////////////////////////////////////////////////////////////////////////// + + +/* + Window frame bound cursor. Abstract interface. + + @detail + The cursor moves within the partition that the current row is in. + It may be ahead or behind the current row. + + The cursor also assumes that the current row moves forward through the + partition and will move to the next adjacent partition after this one. + + List of all cursor classes: + Frame_cursor + Frame_range_n_top + Frame_range_n_bottom + + Frame_range_current_row_top + Frame_range_current_row_bottom + + Frame_n_rows_preceding + Frame_n_rows_following + + Frame_rows_current_row_top = Frame_n_rows_preceding(0) + Frame_rows_current_row_bottom + + // These handle both RANGE and ROWS-type bounds + Frame_unbounded_preceding + Frame_unbounded_following + + // This is not used as a frame bound, it counts rows in the partition: + Frame_unbounded_following_set_count : public Frame_unbounded_following + + @todo + - if we want to allocate this on the MEM_ROOT we should make sure + it is not re-allocated for every subquery execution. +*/ + +class Frame_cursor : public Sql_alloc +{ +public: + virtual void init(THD *thd, READ_RECORD *info, + SQL_I_List *partition_list, + SQL_I_List *order_list) + {} + + /* + Current row has moved to the next partition and is positioned on the first + row there. Position the frame bound accordingly. + + @param first - TRUE means this is the first partition + @param item - Put or remove rows from there. + + @detail + - if first==false, the caller guarantees that tbl->record[0] points at the + first row in the new partition. + - if first==true, we are just starting in the first partition and no such + guarantee is provided. + + - The callee may move tbl->file and tbl->record[0] to point to some other + row. + */ + virtual void pre_next_partition(longlong rownum, Item_sum* item){}; + virtual void next_partition(longlong rownum, Item_sum* item)=0; + + /* + The current row has moved one row forward. + Move this frame bound accordingly, and update the value of aggregate + function as necessary. + */ + virtual void pre_next_row(Item_sum* item){}; + virtual void next_row(Item_sum* item)=0; + + virtual ~Frame_cursor(){} +}; + +////////////////////////////////////////////////////////////////////////////// +// RANGE-type frames +////////////////////////////////////////////////////////////////////////////// + +/* + Frame_range_n_top handles the top end of RANGE-type frame. + + That is, it handles: + RANGE BETWEEN n PRECEDING AND ... + RANGE BETWEEN n FOLLOWING AND ... + + Top of the frame doesn't need to check for partition end, since bottom will + reach it before. +*/ + +class Frame_range_n_top : public Frame_cursor +{ + Table_read_cursor cursor; + + Cached_item_item *range_expr; + + Item *n_val; + Item *item_add; + + const bool is_preceding; + /* + 1 when order_list uses ASC ordering + -1 when order_list uses DESC ordering + */ + int order_direction; +public: + Frame_range_n_top(bool is_preceding_arg, Item *n_val_arg) : + n_val(n_val_arg), item_add(NULL), is_preceding(is_preceding_arg) + {} + + void init(THD *thd, READ_RECORD *info, + SQL_I_List *partition_list, + SQL_I_List *order_list) + { + cursor.init(info); + + DBUG_ASSERT(order_list->elements == 1); + Item *src_expr= order_list->first->item[0]; + if (order_list->first->direction == ORDER::ORDER_ASC) + order_direction= 1; + else + order_direction= -1; + + range_expr= (Cached_item_item*) new_Cached_item(thd, src_expr, FALSE); + + bool use_minus= is_preceding; + if (order_direction == -1) + use_minus= !use_minus; + + if (use_minus) + item_add= new (thd->mem_root) Item_func_minus(thd, src_expr, n_val); + else + item_add= new (thd->mem_root) Item_func_plus(thd, src_expr, n_val); + + item_add->fix_fields(thd, &item_add); + } + + void pre_next_partition(longlong rownum, Item_sum* item) + { + // Save the value of FUNC(current_row) + range_expr->fetch_value_from(item_add); + } + + void next_partition(longlong rownum, Item_sum* item) + { + cursor.move_to(rownum); + walk_till_non_peer(item); + } + + void pre_next_row(Item_sum* item) + { + range_expr->fetch_value_from(item_add); + } + + void next_row(Item_sum* item) + { + /* + Ok, our cursor is at the first row R where + (prev_row + n) >= R + We need to check about the current row. + */ + if (cursor.restore_last_row()) + { + if (order_direction * range_expr->cmp_read_only() <= 0) + return; + item->remove(); + } + walk_till_non_peer(item); + } + +private: + void walk_till_non_peer(Item_sum* item) + { + while (!cursor.get_next()) + { + if (order_direction * range_expr->cmp_read_only() <= 0) + break; + item->remove(); + } + } +}; + + +/* + Frame_range_n_bottom handles bottom end of RANGE-type frame. + + That is, it handles frame bounds in form: + RANGE BETWEEN ... AND n PRECEDING + RANGE BETWEEN ... AND n FOLLOWING + + Bottom end moves first so it needs to check for partition end + (todo: unless it's PRECEDING and in that case it doesnt) + (todo: factor out common parts with Frame_range_n_top into + a common ancestor) +*/ + +class Frame_range_n_bottom: public Frame_cursor +{ + Partition_read_cursor cursor; + + Cached_item_item *range_expr; + + Item *n_val; + Item *item_add; + + const bool is_preceding; + + bool end_of_partition; + + /* + 1 when order_list uses ASC ordering + -1 when order_list uses DESC ordering + */ + int order_direction; +public: + Frame_range_n_bottom(bool is_preceding_arg, Item *n_val_arg) : + n_val(n_val_arg), item_add(NULL), is_preceding(is_preceding_arg) + {} + + void init(THD *thd, READ_RECORD *info, + SQL_I_List *partition_list, + SQL_I_List *order_list) + { + cursor.init(thd, info, partition_list); + + DBUG_ASSERT(order_list->elements == 1); + Item *src_expr= order_list->first->item[0]; + + if (order_list->first->direction == ORDER::ORDER_ASC) + order_direction= 1; + else + order_direction= -1; + + range_expr= (Cached_item_item*) new_Cached_item(thd, src_expr, FALSE); + + bool use_minus= is_preceding; + if (order_direction == -1) + use_minus= !use_minus; + + if (use_minus) + item_add= new (thd->mem_root) Item_func_minus(thd, src_expr, n_val); + else + item_add= new (thd->mem_root) Item_func_plus(thd, src_expr, n_val); + + item_add->fix_fields(thd, &item_add); + } + + void pre_next_partition(longlong rownum, Item_sum* item) + { + // Save the value of FUNC(current_row) + range_expr->fetch_value_from(item_add); + + cursor.on_next_partition(rownum); + end_of_partition= false; + } + + void next_partition(longlong rownum, Item_sum* item) + { + cursor.move_to(rownum); + walk_till_non_peer(item); + } + + void pre_next_row(Item_sum* item) + { + if (end_of_partition) + return; + range_expr->fetch_value_from(item_add); + } + + void next_row(Item_sum* item) + { + if (end_of_partition) + return; + /* + Ok, our cursor is at the first row R where + (prev_row + n) >= R + We need to check about the current row. + */ + if (cursor.restore_last_row()) + { + if (order_direction * range_expr->cmp_read_only() < 0) + return; + item->add(); + } + walk_till_non_peer(item); + } + +private: + void walk_till_non_peer(Item_sum* item) + { + int res; + while (!(res= cursor.get_next())) + { + if (order_direction * range_expr->cmp_read_only() < 0) + break; + item->add(); + } + if (res) + end_of_partition= true; + } +}; + + +/* + RANGE BETWEEN ... AND CURRENT ROW, bottom frame bound for CURRENT ROW + ... + | peer1 + | peer2 <----- current_row + | peer3 + +-peer4 <----- the cursor points here. peer4 itself is included. + nonpeer1 + nonpeer2 + + This bound moves in front of the current_row. It should be a the first row + that is still a peer of the current row. +*/ + +class Frame_range_current_row_bottom: public Frame_cursor +{ + Partition_read_cursor cursor; + + Group_bound_tracker peer_tracker; + + bool dont_move; +public: + void init(THD *thd, READ_RECORD *info, + SQL_I_List *partition_list, + SQL_I_List *order_list) + { + cursor.init(thd, info, partition_list); + peer_tracker.init(thd, order_list); + } + + void pre_next_partition(longlong rownum, Item_sum* item) + { + // Save the value of the current_row + peer_tracker.check_if_next_group(); + cursor.on_next_partition(rownum); + if (rownum != 0) + { + // Add the current row now because our cursor has already seen it + item->add(); + } + } + + void next_partition(longlong rownum, Item_sum* item) + { + walk_till_non_peer(item); + } + + void pre_next_row(Item_sum* item) + { + dont_move= !peer_tracker.check_if_next_group(); + if (!dont_move) + item->add(); + } + + void next_row(Item_sum* item) + { + // Check if our cursor is pointing at a peer of the current row. + // If not, move forward until that becomes true + if (dont_move) + { + /* + Our current is not a peer of the current row. + No need to move the bound. + */ + return; + } + walk_till_non_peer(item); + } + +private: + void walk_till_non_peer(Item_sum* item) + { + /* + Walk forward until we've met first row that's not a peer of the current + row + */ + while (!cursor.get_next()) + { + if (peer_tracker.compare_with_cache()) + break; + item->add(); + } + } +}; + + +/* + RANGE BETWEEN CURRENT ROW AND .... Top CURRENT ROW, RANGE-type frame bound + + nonpeer1 + nonpeer2 + +-peer1 <----- the cursor points here. peer1 itself is included. + | peer2 + | peer3 <----- current_row + | peer4 + ... + + It moves behind the current_row. It is located right after the first peer of + the current_row. +*/ + +class Frame_range_current_row_top : public Frame_cursor +{ + Group_bound_tracker bound_tracker; + + Table_read_cursor cursor; + Group_bound_tracker peer_tracker; + + bool move; +public: + void init(THD *thd, READ_RECORD *info, + SQL_I_List *partition_list, + SQL_I_List *order_list) + { + bound_tracker.init(thd, partition_list); + + cursor.init(info); + peer_tracker.init(thd, order_list); + } + + void pre_next_partition(longlong rownum, Item_sum* item) + { + // Fetch the value from the first row + peer_tracker.check_if_next_group(); + cursor.move_to(rownum+1); + } + + void next_partition(longlong rownum, Item_sum* item) {} + + void pre_next_row(Item_sum* item) + { + // Check if the new current_row is a peer of the row that our cursor is + // pointing to. + move= peer_tracker.check_if_next_group(); + } + + void next_row(Item_sum* item) + { + if (move) + { + /* + Our cursor is pointing at the first row that was a peer of the previous + current row. Or, it was the first row in the partition. + */ + if (cursor.restore_last_row()) + { + // todo: need the following check ? + if (!peer_tracker.compare_with_cache()) + return; + item->remove(); + } + + do + { + if (cursor.get_next()) + return; + if (!peer_tracker.compare_with_cache()) + return; + item->remove(); + } + while (1); + } + } +}; + + +///////////////////////////////////////////////////////////////////////////// +// UNBOUNDED frame bounds (shared between RANGE and ROWS) +///////////////////////////////////////////////////////////////////////////// + +/* + UNBOUNDED PRECEDING frame bound +*/ +class Frame_unbounded_preceding : public Frame_cursor +{ +public: + void next_partition(longlong rownum, Item_sum* item) + { + /* + UNBOUNDED PRECEDING frame end just stays on the first row. + We are top of the frame, so we don't need to update the sum function. + */ + } + + void next_row(Item_sum* item) + { + /* Do nothing, UNBOUNDED PRECEDING frame end doesn't move. */ + } +}; + + +/* + UNBOUNDED FOLLOWING frame bound +*/ + +class Frame_unbounded_following : public Frame_cursor +{ +protected: + Partition_read_cursor cursor; + +public: + void init(THD *thd, READ_RECORD *info, SQL_I_List *partition_list, + SQL_I_List *order_list) + { + cursor.init(thd, info, partition_list); + } + + void pre_next_partition(longlong rownum, Item_sum* item) + { + cursor.on_next_partition(rownum); + } + + void next_partition(longlong rownum, Item_sum* item) + { + if (!rownum) + { + /* Read the first row */ + if (cursor.get_next()) + return; + } + item->add(); + + /* Walk to the end of the partition, updating the SUM function */ + while (!cursor.get_next()) + { + item->add(); + } + } + + void next_row(Item_sum* item) + { + /* Do nothing, UNBOUNDED FOLLOWING frame end doesn't move */ + } +}; + + +class Frame_unbounded_following_set_count : public Frame_unbounded_following +{ +public: + // pre_next_partition is inherited + + void next_partition(longlong rownum, Item_sum* item) + { + ulonglong num_rows_in_partition= 0; + if (!rownum) + { + /* Read the first row */ + if (cursor.get_next()) + return; + } + num_rows_in_partition++; + + /* Walk to the end of the partition, find how many rows there are. */ + while (!cursor.get_next()) + { + num_rows_in_partition++; + } + + Item_sum_window_with_row_count* item_with_row_count = + static_cast(item); + item_with_row_count->set_row_count(num_rows_in_partition); + } +}; + +///////////////////////////////////////////////////////////////////////////// +// ROWS-type frame bounds +///////////////////////////////////////////////////////////////////////////// +/* + ROWS $n PRECEDING frame bound + +*/ +class Frame_n_rows_preceding : public Frame_cursor +{ + /* Whether this is top of the frame or bottom */ + const bool is_top_bound; + const ha_rows n_rows; + + /* Number of rows that we need to skip before our cursor starts moving */ + ha_rows n_rows_to_skip; + + Table_read_cursor cursor; +public: + Frame_n_rows_preceding(bool is_top_bound_arg, ha_rows n_rows_arg) : + is_top_bound(is_top_bound_arg), n_rows(n_rows_arg) + {} + + void init(THD *thd, READ_RECORD *info, SQL_I_List *partition_list, + SQL_I_List *order_list) + { + cursor.init(info); + } + + void next_partition(longlong rownum, Item_sum* item) + { + /* + Position our cursor to point at the first row in the new partition + (for rownum=0, it is already there, otherwise, it lags behind) + */ + if (rownum != 0) + cursor.move_to(rownum); + + /* + Suppose the bound is ROWS 2 PRECEDING, and current row is row#n: + ... + n-3 + n-2 --- bound row + n-1 + n --- current_row + ... + The bound should point at row #(n-2). Bounds are inclusive, so + - bottom bound should add row #(n-2) into the window function + - top bound should remove row (#n-3) from the window function. + */ + n_rows_to_skip= n_rows + (is_top_bound? 1:0) - 1; + + /* Bottom bound "ROWS 0 PRECEDING" is a special case: */ + if (n_rows_to_skip == ha_rows(-1)) + { + cursor.get_next(); + item->add(); + n_rows_to_skip= 0; + } + } + + void next_row(Item_sum* item) + { + if (n_rows_to_skip) + { + n_rows_to_skip--; + return; + } + + if (cursor.get_next()) + return; // this is not expected to happen. + + if (is_top_bound) // this is frame start endpoint + item->remove(); + else + item->add(); + } +}; + + +/* + ROWS ... CURRENT ROW, Bottom bound. + + This case is moved to separate class because here we don't need to maintain + our own cursor, or check for partition bound. +*/ + +class Frame_rows_current_row_bottom : public Frame_cursor +{ +public: + void pre_next_partition(longlong rownum, Item_sum* item) + { + item->add(); + } + void next_partition(longlong rownum, Item_sum* item) {} + void pre_next_row(Item_sum* item) + { + /* Temp table's current row is current_row. Add it to the window func */ + item->add(); + } + void next_row(Item_sum* item) {}; +}; + + +/* + ROWS-type CURRENT ROW, top bound. + + This serves for processing "ROWS BETWEEN CURRENT ROW AND ..." frames. + + n-1 + n --+ --- current_row, and top frame bound + n+1 | + ... | + + when the current_row moves to row #n, this frame bound should remove the + row #(n-1) from the window function. + + In other words, we need what "ROWS PRECEDING 0" provides. +*/ +class Frame_rows_current_row_top: public Frame_n_rows_preceding + +{ +public: + Frame_rows_current_row_top() : + Frame_n_rows_preceding(true /*top*/, 0 /* n_rows */) + {} +}; + + +/* + ROWS $n FOLLOWING frame bound. +*/ + +class Frame_n_rows_following : public Frame_cursor +{ + /* Whether this is top of the frame or bottom */ + const bool is_top_bound; + const ha_rows n_rows; + + Partition_read_cursor cursor; + bool at_partition_end; +public: + Frame_n_rows_following(bool is_top_bound_arg, ha_rows n_rows_arg) : + is_top_bound(is_top_bound_arg), n_rows(n_rows_arg) + { + DBUG_ASSERT(n_rows > 0); + } + + void init(THD *thd, READ_RECORD *info, SQL_I_List *partition_list, + SQL_I_List *order_list) + { + cursor.init(thd, info, partition_list); + at_partition_end= false; + } + + void pre_next_partition(longlong rownum, Item_sum* item) + { + at_partition_end= false; + + cursor.on_next_partition(rownum); + + if (rownum != 0) + { + // This is only needed for "FOLLOWING 1". It is one row behind + cursor.move_to(rownum+1); + + // Current row points at the first row in the partition + if (is_top_bound) // this is frame top endpoint + item->remove(); + else + item->add(); + } + } + + /* Move our cursor to be n_rows ahead. */ + void next_partition(longlong rownum, Item_sum* item) + { + longlong i_end= n_rows + ((rownum==0)?1:0)- is_top_bound; + for (longlong i= 0; i < i_end; i++) + { + if (next_row_intern(item)) + break; + } + } + + void next_row(Item_sum* item) + { + if (at_partition_end) + return; + next_row_intern(item); + } + +private: + bool next_row_intern(Item_sum *item) + { + if (!cursor.get_next()) + { + if (is_top_bound) // this is frame start endpoint + item->remove(); + else + item->add(); + } + else + at_partition_end= true; + return at_partition_end; + } +}; + + +/* + Get a Frame_cursor for a frame bound. This is a "factory function". +*/ +Frame_cursor *get_frame_cursor(Window_frame *frame, bool is_top_bound) +{ + if (!frame) + { + /* + The docs say this about the lack of frame clause: + + Let WD be a window structure descriptor. + ... + If WD has no window framing clause, then + Case: + i) If the window ordering clause of WD is not present, then WF is the + window partition of R. + ii) Otherwise, WF consists of all rows of the partition of R that + precede R or are peers of R in the window ordering of the window + partition defined by the window ordering clause. + + For case #ii, the frame bounds essentially are "RANGE BETWEEN UNBOUNDED + PRECEDING AND CURRENT ROW". + For the case #i, without ordering clause all rows are considered peers, + so again the same frame bounds can be used. + */ + if (is_top_bound) + return new Frame_unbounded_preceding; + else + return new Frame_range_current_row_bottom; + } + + Window_frame_bound *bound= is_top_bound? frame->top_bound : + frame->bottom_bound; + + if (bound->precedence_type == Window_frame_bound::PRECEDING || + bound->precedence_type == Window_frame_bound::FOLLOWING) + { + bool is_preceding= (bound->precedence_type == + Window_frame_bound::PRECEDING); + + if (bound->offset == NULL) /* this is UNBOUNDED */ + { + /* The following serve both RANGE and ROWS: */ + if (is_preceding) + return new Frame_unbounded_preceding; + else + return new Frame_unbounded_following; + } + + if (frame->units == Window_frame::UNITS_ROWS) + { + longlong n_rows= bound->offset->val_int(); + /* These should be handled in the parser */ + DBUG_ASSERT(!bound->offset->null_value); + DBUG_ASSERT(n_rows >= 0); + if (is_preceding) + return new Frame_n_rows_preceding(is_top_bound, n_rows); + else + return new Frame_n_rows_following(is_top_bound, n_rows); + } + else + { + if (is_top_bound) + return new Frame_range_n_top(is_preceding, bound->offset); + else + return new Frame_range_n_bottom(is_preceding, bound->offset); + } + } + + if (bound->precedence_type == Window_frame_bound::CURRENT) + { + if (frame->units == Window_frame::UNITS_ROWS) + { + if (is_top_bound) + return new Frame_rows_current_row_top; + else + return new Frame_rows_current_row_bottom; + } + else + { + if (is_top_bound) + return new Frame_range_current_row_top; + else + return new Frame_range_current_row_bottom; + } + } + return NULL; +} + +void add_extra_frame_cursors(List *cursors, + const Item_sum *window_func) +{ + switch (window_func->sum_func()) + { + case Item_sum::CUME_DIST_FUNC: + cursors->push_back(new Frame_unbounded_preceding); + cursors->push_back(new Frame_range_current_row_bottom); + break; + default: + cursors->push_back(new Frame_unbounded_preceding); + cursors->push_back(new Frame_rows_current_row_bottom); + } +} + +void get_window_func_required_cursors( + List *result, const Item_window_func* item_win) +{ + if (item_win->requires_partition_size()) + result->push_back(new Frame_unbounded_following_set_count); + + /* + If it is not a regular window function that follows frame specifications, + specific cursors are required. + */ + if (item_win->is_frame_prohibited()) + { + add_extra_frame_cursors(result, item_win->window_func()); + return; + } + + /* A regular window function follows the frame specification. */ + result->push_back(get_frame_cursor(item_win->window_spec->window_frame, + false)); + result->push_back(get_frame_cursor(item_win->window_spec->window_frame, + true)); +} + +/* + Streamed window function computation with window frames. + + We make a single pass over the ordered temp.table, but we're using three + cursors: + - current row - the row that we're computing window func value for) + - start_bound - the start of the frame + - bottom_bound - the end of the frame + + All three cursors move together. + + @todo + Provided bounds have their 'cursors'... is it better to re-clone their + cursors or re-position them onto the current row? + + @detail + ROWS BETWEEN 3 PRECEDING -- frame start + AND 3 FOLLOWING -- frame end + + /------ frame end (aka BOTTOM) + Dataset start | + --------====*=======[*]========*========-------->> dataset end + | \ + | +-------- current row + | + \-------- frame start ("TOP") + + - frame_end moves forward and adds rows into the aggregate function. + - frame_start follows behind and removes rows from the aggregate function. + - current_row is the row where the value of aggregate function is stored. + + @TODO: Only the first cursor needs to check for run-out-of-partition + condition (Others can catch up by counting rows?) + +*/ + +bool compute_window_func_with_frames(Item_window_func *item_win, + TABLE *tbl, READ_RECORD *info) +{ + THD *thd= current_thd; + int err= 0; + + Item_sum *sum_func= item_win->window_func(); + /* This algorithm doesn't support DISTINCT aggregator */ + sum_func->set_aggregator(Aggregator::SIMPLE_AGGREGATOR); + + List cursors; + get_window_func_required_cursors(&cursors, item_win); + + List_iterator_fast it(cursors); + Frame_cursor *c; + while((c= it++)) + { + c->init(thd, info, item_win->window_spec->partition_list, + item_win->window_spec->order_list); + } + + bool is_error= false; + longlong rownum= 0; + uchar *rowid_buf= (uchar*) my_malloc(tbl->file->ref_length, MYF(0)); + + while (true) + { + /* Move the current_row */ + if ((err=info->read_record(info))) + { + break; /* End of file */ + } + bool partition_changed= item_win->check_if_partition_changed(); + + tbl->file->position(tbl->record[0]); + memcpy(rowid_buf, tbl->file->ref, tbl->file->ref_length); + + if (partition_changed || (rownum == 0)) + { + sum_func->clear(); + /* + pre_XXX functions assume that tbl->record[0] contains current_row, and + they may not change it. + */ + it.rewind(); + while ((c= it++)) + c->pre_next_partition(rownum, sum_func); + /* + We move bottom_bound first, because we want rows to be added into the + aggregate before top_bound attempts to remove them. + */ + it.rewind(); + while ((c= it++)) + c->next_partition(rownum, sum_func); + } + else + { + /* Again, both pre_XXX function can find current_row in tbl->record[0] */ + it.rewind(); + while ((c= it++)) + c->pre_next_row(sum_func); + + /* These make no assumptions about tbl->record[0] and may change it */ + it.rewind(); + while ((c= it++)) + c->next_row(sum_func); + } + rownum++; + + /* + Frame cursors may have made tbl->record[0] to point to some record other + than current_row. This applies to tbl->file's internal state, too. + Fix this by reading the current row again. + */ + tbl->file->ha_rnd_pos(tbl->record[0], rowid_buf); + store_record(tbl,record[1]); + item_win->save_in_field(item_win->result_field, true); + err= tbl->file->ha_update_row(tbl->record[1], tbl->record[0]); + if (err && err != HA_ERR_RECORD_IS_THE_SAME) + { + is_error= true; + break; + } + } + + my_free(rowid_buf); + cursors.delete_elements(); + return is_error? true: false; +} + + +/* Make a list that is a concation of two lists of ORDER elements */ + +static ORDER* concat_order_lists(MEM_ROOT *mem_root, ORDER *list1, ORDER *list2) +{ + if (!list1) + { + list1= list2; + list2= NULL; + } + + ORDER *res= NULL; // first element in the new list + ORDER *prev= NULL; // last element in the new list + ORDER *cur_list= list1; // this goes through list1, list2 + while (cur_list) + { + for (ORDER *cur= cur_list; cur; cur= cur->next) + { + ORDER *copy= (ORDER*)alloc_root(mem_root, sizeof(ORDER)); + memcpy(copy, cur, sizeof(ORDER)); + if (prev) + prev->next= copy; + prev= copy; + if (!res) + res= copy; + } + + cur_list= (cur_list == list1)? list2: NULL; + } + + if (prev) + prev->next= NULL; + + return res; +} + + +bool Window_func_runner::setup(THD *thd) +{ + win_func->setup_partition_border_check(thd); + + Item_sum::Sumfunctype type= win_func->window_func()->sum_func(); + switch (type) + { + case Item_sum::ROW_NUMBER_FUNC: + case Item_sum::RANK_FUNC: + case Item_sum::DENSE_RANK_FUNC: + { + /* + One-pass window function computation, walk through the rows and + assign values. + */ + compute_func= compute_window_func_values; + break; + } + case Item_sum::COUNT_FUNC: + case Item_sum::SUM_BIT_FUNC: + case Item_sum::SUM_FUNC: + case Item_sum::AVG_FUNC: + case Item_sum::PERCENT_RANK_FUNC: + case Item_sum::CUME_DIST_FUNC: + case Item_sum::NTILE_FUNC: + { + /* + Frame-aware window function computation. It does one pass, but + uses three cursors -frame_start, current_row, and frame_end. + */ + compute_func= compute_window_func_with_frames; + break; + } + default: + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "This aggregate as window function"); + return true; + } + + return false; +} + + +/* + Compute the value of window function for all rows. +*/ +bool Window_func_runner::exec(TABLE *tbl, SORT_INFO *filesort_result) +{ + THD *thd= current_thd; + win_func->set_phase_to_computation(); + + /* Go through the sorted array and compute the window function */ + READ_RECORD info; + + if (init_read_record(&info, thd, tbl, NULL/*select*/, filesort_result, + 0, 1, FALSE)) + return true; + + bool is_error= compute_func(win_func, tbl, &info); + + win_func->set_phase_to_retrieval(); + + end_read_record(&info); + + return is_error; +} + + +bool Window_funcs_sort::exec(JOIN *join) +{ + THD *thd= join->thd; + JOIN_TAB *join_tab= &join->join_tab[join->top_join_tab_count]; + + if (create_sort_index(thd, join, join_tab, filesort)) + return true; + + TABLE *tbl= join_tab->table; + SORT_INFO *filesort_result= join_tab->filesort_result; + + bool is_error= false; + List_iterator it(runners); + Window_func_runner *runner; + + while ((runner= it++)) + { + if ((is_error= runner->exec(tbl, filesort_result))) + break; + } + + delete join_tab->filesort_result; + join_tab->filesort_result= NULL; + return is_error; +} + + +bool Window_funcs_sort::setup(THD *thd, SQL_SELECT *sel, + List_iterator &it) +{ + Item_window_func *win_func= it.peek(); + Item_window_func *prev_win_func; + + do + { + Window_func_runner *runner; + if (!(runner= new Window_func_runner(win_func)) || + runner->setup(thd)) + { + return true; + } + runners.push_back(runner); + it++; + prev_win_func= win_func; + } while ((win_func= it.peek()) && !(win_func->marker & SORTORDER_CHANGE_FLAG)); + + /* + The sort criteria must be taken from the last win_func in the group of + adjacent win_funcs that do not have SORTORDER_CHANGE_FLAG. + */ + Window_spec *spec = prev_win_func->window_spec; + + ORDER* sort_order= concat_order_lists(thd->mem_root, + spec->partition_list->first, + spec->order_list->first); + filesort= new (thd->mem_root) Filesort(sort_order, HA_POS_ERROR, true, NULL); + + /* Apply the same condition that the subsequent sort has. */ + filesort->select= sel; + + return false; +} + + +bool Window_funcs_computation::setup(THD *thd, + List *window_funcs, + JOIN_TAB *tab) +{ + order_window_funcs_by_window_specs(window_funcs); + + SQL_SELECT *sel= NULL; + if (tab->filesort && tab->filesort->select) + { + sel= tab->filesort->select; + DBUG_ASSERT(!sel->quick); + } + + Window_funcs_sort *srt; + List_iterator iter(*window_funcs); + while (iter.peek()) + { + if (!(srt= new Window_funcs_sort()) || + srt->setup(thd, sel, iter)) + { + return true; + } + win_func_sorts.push_back(srt, thd->mem_root); + } + return false; +} + + +bool Window_funcs_computation::exec(JOIN *join) +{ + List_iterator it(win_func_sorts); + Window_funcs_sort *srt; + /* Execute each sort */ + while ((srt = it++)) + { + if (srt->exec(join)) + return true; + } + return false; +} + + +void Window_funcs_computation::cleanup() +{ + List_iterator it(win_func_sorts); + Window_funcs_sort *srt; + while ((srt = it++)) + { + srt->cleanup(); + delete srt; + } +} + + +Explain_aggr_window_funcs* +Window_funcs_computation::save_explain_plan(MEM_ROOT *mem_root, + bool is_analyze) +{ + Explain_aggr_window_funcs *xpl= new Explain_aggr_window_funcs; + List_iterator it(win_func_sorts); + Window_funcs_sort *srt; + while ((srt = it++)) + { + Explain_aggr_filesort *eaf= + new Explain_aggr_filesort(mem_root, is_analyze, srt->filesort); + xpl->sorts.push_back(eaf, mem_root); + } + return xpl; +} + +///////////////////////////////////////////////////////////////////////////// +// Unneeded comments (will be removed when we develop a replacement for +// the feature that was attempted here +///////////////////////////////////////////////////////////////////////////// + /* + TODO Get this code to set can_compute_window_function during preparation, + not during execution. + + The reason for this is the following: + Our single scan optimization for window functions without tmp table, + is valid, if and only if, we only need to perform one sorting operation, + via filesort. The cases where we need to perform one sorting operation only: + + * A select with only one window function. + * A select with multiple window functions, but they must have their + partition and order by clauses compatible. This means that one ordering + is acceptable for both window functions. + + For example: + partition by a, b, c; order by d, e results in sorting by a b c d e. + partition by a; order by d results in sorting by a d. + + This kind of sorting is compatible. The less specific partition does + not care for the order of b and c columns so it is valid if we sort + by those in case of equality over a. + + partition by a, b; order by d, e results in sorting by a b d e + partition by a; order by e results in sorting by a e + + This sorting is incompatible due to the order by clause. The partition by + clause is compatible, (partition by a) is a prefix for (partition by a, b) + However, order by e is not a prefix for order by d, e, thus it is not + compatible. + + The rule for having compatible sorting is thus: + Each partition order must contain the other window functions partitions + prefixes, or be a prefix itself. This must hold true for all partitions. + Analog for the order by clause. + */ +#if 0 + List window_functions; + SQL_I_List largest_partition; + SQL_I_List largest_order_by; + bool can_compute_window_live = !need_tmp; + // Construct the window_functions item list and check if they can be + // computed using only one sorting. + // + // TODO: Perhaps group functions into compatible sorting bins + // to minimize the number of sorting passes required to compute all of them. + while ((item= it++)) + { + if (item->type() == Item::WINDOW_FUNC_ITEM) + { + Item_window_func *item_win = (Item_window_func *) item; + window_functions.push_back(item_win); + if (!can_compute_window_live) + continue; // No point checking since we have to perform multiple sorts. + Window_spec *spec = item_win->window_spec; + // Having an empty partition list on one window function and a + // not empty list on a separate window function causes the sorting + // to be incompatible. + // + // Example: + // over (partition by a, order by x) && over (order by x). + // + // The first function requires an ordering by a first and then by x, + // while the seond function requires an ordering by x first. + // The same restriction is not required for the order by clause. + if (largest_partition.elements && !spec->partition_list.elements) + { + can_compute_window_live= FALSE; + continue; + } + can_compute_window_live= test_if_order_compatible(largest_partition, + spec->partition_list); + if (!can_compute_window_live) + continue; + + can_compute_window_live= test_if_order_compatible(largest_order_by, + spec->order_list); + if (!can_compute_window_live) + continue; + + if (largest_partition.elements < spec->partition_list.elements) + largest_partition = spec->partition_list; + if (largest_order_by.elements < spec->order_list.elements) + largest_order_by = spec->order_list; + } + } + if (can_compute_window_live && window_functions.elements && table_count == 1) + { + ha_rows examined_rows = 0; + ha_rows found_rows = 0; + ha_rows filesort_retval; + SORT_FIELD *s_order= (SORT_FIELD *) my_malloc(sizeof(SORT_FIELD) * + (largest_partition.elements + largest_order_by.elements) + 1, + MYF(MY_WME | MY_ZEROFILL | MY_THREAD_SPECIFIC)); + + size_t pos= 0; + for (ORDER* curr = largest_partition.first; curr; curr=curr->next, pos++) + s_order[pos].item = *curr->item; + + for (ORDER* curr = largest_order_by.first; curr; curr=curr->next, pos++) + s_order[pos].item = *curr->item; + + table[0]->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), + MYF(MY_WME | MY_ZEROFILL| + MY_THREAD_SPECIFIC)); + + + filesort_retval= filesort(thd, table[0], s_order, + (largest_partition.elements + largest_order_by.elements), + this->select, HA_POS_ERROR, FALSE, + &examined_rows, &found_rows, + this->explain->ops_tracker.report_sorting(thd)); + table[0]->sort.found_records= filesort_retval; + + join_tab->read_first_record = join_init_read_record; + join_tab->records= found_rows; + + my_free(s_order); + } + else +#endif + + diff --git a/sql/sql_window.h b/sql/sql_window.h new file mode 100644 index 00000000000..54e39d827fe --- /dev/null +++ b/sql/sql_window.h @@ -0,0 +1,230 @@ + +#ifndef SQL_WINDOW_INCLUDED +#define SQL_WINDOW_INCLUDED + +#include "my_global.h" +#include "item.h" +#include "filesort.h" +#include "records.h" + +class Item_window_func; + +/* + Window functions module. + + Each instance of window function has its own element in SELECT_LEX::window_specs. +*/ + + +class Window_frame_bound : public Sql_alloc +{ + +public: + + enum Bound_precedence_type + { + PRECEDING, + CURRENT, // Used for CURRENT ROW window frame bounds + FOLLOWING + }; + + Bound_precedence_type precedence_type; + + + /* + For UNBOUNDED PRECEDING / UNBOUNDED FOLLOWING window frame bounds + precedence type is seto to PRECEDING / FOLLOWING and + offset is set to NULL. + The offset is not meaningful with precedence type CURRENT + */ + Item *offset; + + Window_frame_bound(Bound_precedence_type prec_type, + Item *offset_val) + : precedence_type(prec_type), offset(offset_val) {} + + bool is_unbounded() { return offset == NULL; } + +}; + + +class Window_frame : public Sql_alloc +{ + +public: + + enum Frame_units + { + UNITS_ROWS, + UNITS_RANGE + }; + + enum Frame_exclusion + { + EXCL_NONE, + EXCL_CURRENT_ROW, + EXCL_GROUP, + EXCL_TIES + }; + + Frame_units units; + + Window_frame_bound *top_bound; + + Window_frame_bound *bottom_bound; + + Frame_exclusion exclusion; + + Window_frame(Frame_units win_frame_units, + Window_frame_bound *win_frame_top_bound, + Window_frame_bound *win_frame_bottom_bound, + Frame_exclusion win_frame_exclusion) + : units(win_frame_units), top_bound(win_frame_top_bound), + bottom_bound(win_frame_bottom_bound), exclusion(win_frame_exclusion) {} + + bool check_frame_bounds(); + +}; + +class Window_spec : public Sql_alloc +{ + bool window_names_are_checked; + public: + virtual ~Window_spec() {} + + LEX_STRING *window_ref; + + SQL_I_List *partition_list; + + SQL_I_List *order_list; + + Window_frame *window_frame; + + Window_spec *referenced_win_spec; + + Window_spec(LEX_STRING *win_ref, + SQL_I_List *part_list, + SQL_I_List *ord_list, + Window_frame *win_frame) + : window_names_are_checked(false), window_ref(win_ref), + partition_list(part_list), order_list(ord_list), + window_frame(win_frame), referenced_win_spec(NULL) {} + + virtual char *name() { return NULL; } + + bool check_window_names(List_iterator_fast &it); + + char *window_reference() { return window_ref ? window_ref->str : NULL; } + + void join_partition_and_order_lists() + { + *(partition_list->next)= order_list->first; + } + + void disjoin_partition_and_order_lists() + { + *(partition_list->next)= NULL; + } +}; + +class Window_def : public Window_spec +{ + public: + + LEX_STRING *window_name; + + Window_def(LEX_STRING *win_name, + LEX_STRING *win_ref, + SQL_I_List *part_list, + SQL_I_List *ord_list, + Window_frame *win_frame) + : Window_spec(win_ref, part_list, ord_list, win_frame), + window_name(win_name) {} + + char *name() { return window_name->str; } + +}; + +int setup_windows(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables, + List &fields, List &all_fields, + List &win_specs, List &win_funcs); + + +////////////////////////////////////////////////////////////////////////////// +// Classes that make window functions computation a part of SELECT's query plan +////////////////////////////////////////////////////////////////////////////// + +typedef bool (*window_compute_func_t)(Item_window_func *item_win, + TABLE *tbl, READ_RECORD *info); + +/* + This handles computation of one window function. + + Currently, we make a spearate filesort() call for each window function. +*/ + +class Window_func_runner : public Sql_alloc +{ + Item_window_func *win_func; + + /* The function to use for computation*/ + window_compute_func_t compute_func; + +public: + Window_func_runner(Item_window_func *win_func_arg) : + win_func(win_func_arg) + {} + + // Set things up. Create filesort structures, etc + bool setup(THD *thd); + + // This sorts and runs the window function. + bool exec(TABLE *tbl, SORT_INFO *filesort_result); +}; + + +/* + Represents a group of window functions that require the same sorting of + rows and so share the filesort() call. + +*/ + +class Window_funcs_sort : public Sql_alloc +{ + List runners; + + /* Window functions can be computed over this sorting */ + Filesort *filesort; +public: + bool setup(THD *thd, SQL_SELECT *sel, List_iterator &it); + bool exec(JOIN *join); + void cleanup() { delete filesort; } + + friend class Window_funcs_computation; +}; + + +struct st_join_table; +class Explain_aggr_window_funcs; +/* + This is a "window function computation phase": a single object of this class + takes care of computing all window functions in a SELECT. + + - JOIN optimizer is exected to call setup() during query optimization. + - JOIN::exec() should call exec() once it has collected join output in a + temporary table. +*/ + +class Window_funcs_computation : public Sql_alloc +{ + List win_func_sorts; +public: + bool setup(THD *thd, List *window_funcs, st_join_table *tab); + bool exec(JOIN *join); + + Explain_aggr_window_funcs *save_explain_plan(MEM_ROOT *mem_root, bool is_analyze); + void cleanup(); +}; + + +#endif /* SQL_WINDOW_INCLUDED */ diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 1870b3f719f..04c1ba7e99a 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -55,6 +55,8 @@ #include "sql_signal.h" #include "sql_get_diagnostics.h" // Sql_cmd_get_diagnostics #include "sql_cte.h" +#include "sql_window.h" +#include "item_windowfunc.h" #include "event_parse_data.h" #include "create_options.h" #include @@ -994,6 +996,8 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin) handlerton *db_type; st_select_lex *select_lex; struct p_elem_val *p_elem_value; + class Window_frame *window_frame; + class Window_frame_bound *window_frame_bound; udf_func *udf; /* enums */ @@ -1019,6 +1023,9 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin) enum sp_variable::enum_mode spvar_mode; enum thr_lock_type lock_type; enum enum_mysql_timestamp_type date_time_type; + enum Window_frame_bound::Bound_precedence_type bound_precedence_type; + enum Window_frame::Frame_units frame_units; + enum Window_frame::Frame_exclusion frame_exclusion; DDL_options_st object_ddl_options; } @@ -1030,10 +1037,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %parse-param { THD *thd } %lex-param { THD *thd } /* - Currently there are 121 shift/reduce conflicts. + Currently there are 123 shift/reduce conflicts. We should not introduce new conflicts any more. */ -%expect 121 +%expect 123 /* Comments for TOKENS. @@ -1156,6 +1163,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token CREATE /* SQL-2003-R */ %token CROSS /* SQL-2003-R */ %token CUBE_SYM /* SQL-2003-R */ +%token CUME_DIST_SYM %token CURDATE /* MYSQL-FUNC */ %token CURRENT_SYM /* SQL-2003-R */ %token CURRENT_USER /* SQL-2003-R */ @@ -1186,6 +1194,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token DELAYED_SYM %token DELAY_KEY_WRITE_SYM %token DELETE_SYM /* SQL-2003-R */ +%token DENSE_RANK_SYM %token DESC /* SQL-2003-N */ %token DESCRIBE /* SQL-2003-R */ %token DES_KEY_FILE @@ -1227,6 +1236,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token EVERY_SYM /* SQL-2003-N */ %token EXCHANGE_SYM %token EXAMINED_SYM +%token EXCLUDE_SYM %token EXECUTE_SYM /* SQL-2003-R */ %token EXISTS /* SQL-2003-R */ %token EXIT_SYM @@ -1245,6 +1255,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token FLOAT_NUM %token FLOAT_SYM /* SQL-2003-R */ %token FLUSH_SYM +%token FOLLOWING_SYM %token FORCE_SYM %token FOREIGN /* SQL-2003-R */ %token FOR_SYM /* SQL-2003-R */ @@ -1421,6 +1432,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token NO_SYM /* SQL-2003-R */ %token NO_WAIT_SYM %token NO_WRITE_TO_BINLOG +%token NTILE_SYM %token NULL_SYM /* SQL-2003-R */ %token NUM %token NUMBER_SYM /* SQL-2003-N */ @@ -1441,9 +1453,11 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ORDER_SYM /* SQL-2003-R */ %token OR_OR_SYM /* OPERATOR */ %token OR_SYM /* SQL-2003-R */ +%token OTHERS_SYM %token OUTER %token OUTFILE %token OUT_SYM /* SQL-2003-R */ +%token OVER_SYM %token OWNER_SYM %token PACK_KEYS_SYM %token PAGE_SYM @@ -1456,6 +1470,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token PARTITIONS_SYM %token PARTITIONING_SYM %token PASSWORD_SYM +%token PERCENT_RANK_SYM %token PERSISTENT_SYM %token PHASE_SYM %token PLUGINS_SYM @@ -1464,6 +1479,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token POLYGON %token PORT_SYM %token POSITION_SYM /* SQL-2003-N */ +%token PRECEDING_SYM %token PRECISION /* SQL-2003-R */ %token PREPARE_SYM /* SQL-2003-R */ %token PRESERVE_SYM @@ -1481,6 +1497,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token QUERY_SYM %token QUICK %token RANGE_SYM /* SQL-2003-R */ +%token RANK_SYM %token READS_SYM /* SQL-2003-R */ %token READ_ONLY_SYM %token READ_SYM /* SQL-2003-N */ @@ -1531,6 +1548,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ROW_FORMAT_SYM %token ROW_SYM /* SQL-2003-R */ %token ROW_COUNT_SYM /* SQL-2003-N */ +%token ROW_NUMBER_SYM %token RTREE_SYM %token SAVEPOINT_SYM /* SQL-2003-R */ %token SCHEDULE_SYM @@ -1621,6 +1639,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token TEXT_SYM %token THAN_SYM %token THEN_SYM /* SQL-2003-R */ +%token TIES_SYM %token TIMESTAMP /* SQL-2003-R */ %token TIMESTAMP_ADD %token TIMESTAMP_DIFF @@ -1641,6 +1660,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token TYPE_SYM /* SQL-2003-N */ %token UDF_RETURNS_SYM %token ULONGLONG_NUM +%token UNBOUNDED_SYM %token UNCOMMITTED_SYM /* SQL-2003-N */ %token UNDEFINED_SYM %token UNDERSCORE_CHARSET @@ -1682,6 +1702,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token WEIGHT_STRING_SYM %token WHEN_SYM /* SQL-2003-R */ %token WHERE /* SQL-2003-R */ +%token WINDOW_SYM %token WHILE_SYM %token WITH /* SQL-2003-R */ %token WITH_CUBE_SYM /* INTERNAL */ @@ -1821,6 +1842,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); simple_ident_nospvar simple_ident_q field_or_var limit_option part_func_expr + window_func_expr + window_func + simple_window_func function_call_keyword function_call_nonkeyword function_call_generic @@ -2010,6 +2034,15 @@ END_OF_INPUT %type condition_information_item_name; %type condition_information; +%type opt_window_clause window_def_list window_def window_spec +%type window_name +%type opt_window_ref opt_window_frame_clause +%type window_frame_units; +%type window_frame_extent; +%type opt_window_frame_exclusion; +%type window_frame_start window_frame_bound; + + %type '-' '+' '*' '/' '%' '(' ')' ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM @@ -4952,7 +4985,7 @@ opt_create_partitioning: /* This part of the parser is about handling of the partition information. - It's first version was written by Mikael Ronström with lots of answers to + It's first version was written by Mikael Ronstrm with lots of answers to questions provided by Antony Curtis. The partition grammar can be called from three places. @@ -5662,7 +5695,19 @@ create_select: { Select->parsing_place= NO_MATTER; } - table_expression + /* + TODO: + The following sequence repeats a few times: + opt_table_expression + opt_order_clause + opt_limit_clause + opt_select_lock_type + Perhaps they can be grouped into a dedicated rule. + */ + opt_table_expression + opt_order_clause + opt_limit_clause + opt_select_lock_type { /* The following work only with the local list, the global list @@ -7415,7 +7460,7 @@ alter_commands: | remove_partitioning | partitioning /* - This part was added for release 5.1 by Mikael Ronström. + This part was added for release 5.1 by Mikael Ronstrm. From here we insert a number of commands to manage the partitions of a partitioned table such as adding partitions, dropping partitions, reorganising partitions in various manners. In future releases the list @@ -8502,7 +8547,10 @@ select_paren_derived: Lex->current_select->set_braces(true); } SELECT_SYM select_part2_derived - table_expression + opt_table_expression + opt_order_clause + opt_limit_clause + opt_select_lock_type { if (setup_select_in_parentheses(Lex)) MYSQL_YYABORT; @@ -8533,23 +8581,20 @@ select_part2: | select_options_and_item_list into opt_select_lock_type | select_options_and_item_list opt_into - from_clause - opt_where_clause - opt_group_clause - opt_having_clause + table_expression opt_order_clause opt_limit_clause opt_procedure_clause opt_into opt_select_lock_type { - if ($2 && $10) + if ($2 && $7) { /* double "INTO" clause */ my_error(ER_WRONG_USAGE, MYF(0), "INTO", "INTO"); MYSQL_YYABORT; } - if ($9 && ($2 || $10)) + if ($6 && ($2 || $7)) { /* "INTO" with "PROCEDURE ANALYSE" */ my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "INTO"); @@ -8572,24 +8617,25 @@ select_options_and_item_list: } ; + +/** + , as in the SQL standard. +*/ table_expression: - opt_from_clause + from_clause opt_where_clause opt_group_clause opt_having_clause - opt_order_clause - opt_limit_clause - opt_procedure_clause - opt_select_lock_type + opt_window_clause ; -from_clause: - FROM table_reference_list +opt_table_expression: + /* Empty */ + | table_expression ; -opt_from_clause: - /* empty */ - | from_clause +from_clause: + FROM table_reference_list ; table_reference_list: @@ -9301,6 +9347,7 @@ simple_expr: | param_marker { $$= $1; } | variable | sum_expr + | window_func_expr | simple_expr OR_OR_SYM simple_expr { $$= new (thd->mem_root) Item_func_concat(thd, $1, $3); @@ -10436,6 +10483,92 @@ sum_expr: } ; +window_func_expr: + window_func OVER_SYM window_name + { + $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, $3); + if ($$ == NULL) + MYSQL_YYABORT; + if (Select->add_window_func((Item_window_func *) $$)) + MYSQL_YYABORT; + } + | + window_func OVER_SYM window_spec + { + LEX *lex= Lex; + if (Select->add_window_spec(thd, lex->win_ref, + Select->group_list, + Select->order_list, + lex->win_frame)) + MYSQL_YYABORT; + $$= new (thd->mem_root) Item_window_func(thd, (Item_sum *) $1, + thd->lex->win_spec); + if ($$ == NULL) + MYSQL_YYABORT; + if (Select->add_window_func((Item_window_func *) $$)) + MYSQL_YYABORT; + } + ; + +window_func: + simple_window_func + | + sum_expr + ; + +simple_window_func: + ROW_NUMBER_SYM '(' ')' + { + $$= new (thd->mem_root) Item_sum_row_number(thd); + if ($$ == NULL) + MYSQL_YYABORT; + } + | + RANK_SYM '(' ')' + { + $$= new (thd->mem_root) Item_sum_rank(thd); + if ($$ == NULL) + MYSQL_YYABORT; + } + | + DENSE_RANK_SYM '(' ')' + { + $$= new (thd->mem_root) Item_sum_dense_rank(thd); + if ($$ == NULL) + MYSQL_YYABORT; + } + | + PERCENT_RANK_SYM '(' ')' + { + $$= new (thd->mem_root) Item_sum_percent_rank(thd); + if ($$ == NULL) + MYSQL_YYABORT; + } + | + CUME_DIST_SYM '(' ')' + { + $$= new (thd->mem_root) Item_sum_cume_dist(thd); + if ($$ == NULL) + MYSQL_YYABORT; + } + | + NTILE_SYM '(' expr ')' + { + $$= new (thd->mem_root) Item_sum_ntile(thd, $3); + if ($$ == NULL) + MYSQL_YYABORT; + } + ; + +window_name: + ident + { + $$= (LEX_STRING *) thd->memdup(&$1, sizeof(LEX_STRING)); + if ($$ == NULL) + MYSQL_YYABORT; + } + ; + variable: '@' { @@ -11086,7 +11219,10 @@ select_derived2: { Select->parsing_place= NO_MATTER; } - table_expression + opt_table_expression + opt_order_clause + opt_limit_clause + opt_select_lock_type ; get_select_lex: @@ -11370,6 +11506,155 @@ olap_opt: } ; +/* + optional window clause in select +*/ + +opt_window_clause: + /* empty */ + {} + | WINDOW_SYM + window_def_list + {} + ; + +window_def_list: + window_def_list ',' window_def + | window_def + ; + +window_def: + window_name AS window_spec + { + LEX *lex= Lex; + if (Select->add_window_def(thd, $1, lex->win_ref, + Select->group_list, + Select->order_list, + lex->win_frame )) + MYSQL_YYABORT; + } + ; + +window_spec: + '(' + { Select->prepare_add_window_spec(thd); } + opt_window_ref opt_window_partition_clause + opt_window_order_clause opt_window_frame_clause + ')' + ; + +opt_window_ref: + /* empty */ {} + | ident + { + thd->lex->win_ref= (LEX_STRING *) thd->memdup(&$1, sizeof(LEX_STRING)); + if (thd->lex->win_ref == NULL) + MYSQL_YYABORT; + } + +opt_window_partition_clause: + /* empty */ { } + | PARTITION_SYM BY group_list + ; + +opt_window_order_clause: + /* empty */ { } + | ORDER_SYM BY order_list + ; + +opt_window_frame_clause: + /* empty */ {} + | window_frame_units window_frame_extent opt_window_frame_exclusion + { + LEX *lex= Lex; + lex->win_frame= + new (thd->mem_root) Window_frame($1, + lex->frame_top_bound, + lex->frame_bottom_bound, + $3); + if (lex->win_frame == NULL) + MYSQL_YYABORT; + } + ; + +window_frame_units: + ROWS_SYM { $$= Window_frame::UNITS_ROWS; } + | RANGE_SYM { $$= Window_frame::UNITS_RANGE; } + ; + +window_frame_extent: + window_frame_start + { + LEX *lex= Lex; + lex->frame_top_bound= $1; + lex->frame_bottom_bound= + new (thd->mem_root) + Window_frame_bound(Window_frame_bound::CURRENT, NULL); + if (lex->frame_bottom_bound == NULL) + MYSQL_YYABORT; + } + | BETWEEN_SYM window_frame_bound AND_SYM window_frame_bound + { + LEX *lex= Lex; + lex->frame_top_bound= $2; + lex->frame_bottom_bound= $4; + } + ; + +window_frame_start: + UNBOUNDED_SYM PRECEDING_SYM + { + $$= new (thd->mem_root) + Window_frame_bound(Window_frame_bound::PRECEDING, NULL); + if ($$ == NULL) + MYSQL_YYABORT; + } + | CURRENT_SYM ROW_SYM + { + $$= new (thd->mem_root) + Window_frame_bound(Window_frame_bound::CURRENT, NULL); + if ($$ == NULL) + MYSQL_YYABORT; + } + | literal PRECEDING_SYM + { + $$= new (thd->mem_root) + Window_frame_bound(Window_frame_bound::PRECEDING, $1); + if ($$ == NULL) + MYSQL_YYABORT; + } + ; + +window_frame_bound: + window_frame_start { $$= $1; } + | UNBOUNDED_SYM FOLLOWING_SYM + { + $$= new (thd->mem_root) + Window_frame_bound(Window_frame_bound::FOLLOWING, NULL); + if ($$ == NULL) + MYSQL_YYABORT; + } + | literal FOLLOWING_SYM + { + $$= new (thd->mem_root) + Window_frame_bound(Window_frame_bound::FOLLOWING, $1); + if ($$ == NULL) + MYSQL_YYABORT; + } + ; + +opt_window_frame_exclusion: + /* empty */ { $$= Window_frame::EXCL_NONE; } + | EXCLUDE_SYM CURRENT_SYM ROW_SYM + { $$= Window_frame::EXCL_CURRENT_ROW; } + | EXCLUDE_SYM GROUP_SYM + { $$= Window_frame::EXCL_GROUP; } + | EXCLUDE_SYM TIES_SYM + { $$= Window_frame::EXCL_TIES; } + | EXCLUDE_SYM NO_SYM OTHERS_SYM + { $$= Window_frame::EXCL_NONE; } + ; + /* Order by statement in ALTER TABLE */ @@ -11675,9 +11960,11 @@ opt_procedure_clause: if (&lex->select_lex != lex->current_select) { + // SELECT * FROM t1 UNION SELECT * FROM t2 PROCEDURE ANALYSE(); my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery"); MYSQL_YYABORT; } + lex->proc_list.elements=0; lex->proc_list.first=0; lex->proc_list.next= &lex->proc_list.first; @@ -16088,7 +16375,10 @@ union_option: query_specification: SELECT_SYM select_init2_derived - table_expression + opt_table_expression + opt_order_clause + opt_limit_clause + opt_select_lock_type { $$= Lex->current_select->master_unit()->first_select(); } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 9d871703bfe..4bf202813f3 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -4629,8 +4629,7 @@ static bool check_locale(sys_var *self, THD *thd, set_var *var) mysql_mutex_lock(&LOCK_error_messages); res= (!locale->errmsgs->errmsgs && read_texts(ERRMSG_FILE, locale->errmsgs->language, - &locale->errmsgs->errmsgs, - ER_ERROR_LAST - ER_ERROR_FIRST + 1)); + &locale->errmsgs->errmsgs)); mysql_mutex_unlock(&LOCK_error_messages); if (res) { diff --git a/sql/table.cc b/sql/table.cc index 07e2876f5ba..dc1730b5b6f 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -5748,7 +5748,7 @@ void TABLE::mark_columns_used_by_index(uint index) MY_BITMAP *bitmap= &tmp_set; DBUG_ENTER("TABLE::mark_columns_used_by_index"); - enable_keyread(); + set_keyread(true); bitmap_clear_all(bitmap); mark_columns_used_by_index_no_reset(index, bitmap); column_bitmaps_set(bitmap, bitmap); @@ -5769,7 +5769,7 @@ void TABLE::add_read_columns_used_by_index(uint index) MY_BITMAP *bitmap= &tmp_set; DBUG_ENTER("TABLE::add_read_columns_used_by_index"); - enable_keyread(); + set_keyread(true); bitmap_copy(bitmap, read_set); mark_columns_used_by_index_no_reset(index, bitmap); column_bitmaps_set(bitmap, write_set); @@ -5792,7 +5792,7 @@ void TABLE::restore_column_maps_after_mark_index() { DBUG_ENTER("TABLE::restore_column_maps_after_mark_index"); - disable_keyread(); + set_keyread(false); default_column_bitmaps(); file->column_bitmaps_signal(); DBUG_VOID_RETURN; diff --git a/sql/table.h b/sql/table.h index 1c461d96097..a105df31e93 100644 --- a/sql/table.h +++ b/sql/table.h @@ -213,8 +213,13 @@ typedef struct st_order { Field *fast_field_copier_setup; int counter; /* position in SELECT list, correct only if counter_used is true*/ - bool asc; /* true if ascending */ - bool free_me; /* true if item isn't shared */ + enum enum_order { + ORDER_NOT_RELEVANT, + ORDER_ASC, + ORDER_DESC + }; + + enum_order direction; /* Requested direction of ordering */ bool in_field_list; /* true if in select field list */ bool counter_used; /* parameter was counter of columns */ Field *field; /* If tmp-table group */ @@ -1239,7 +1244,9 @@ public: bool alias_name_used; /* true if table_name is alias */ bool get_fields_in_item_tree; /* Signal to fix_field */ bool m_needs_reopen; +private: bool created; /* For tmp tables. TRUE <=> tmp table was actually created.*/ +public: #ifdef HAVE_REPLICATION /* used in RBR Triggers */ bool master_had_triggers; @@ -1351,30 +1358,46 @@ public: map= map_arg; tablenr= tablenr_arg; } - inline void enable_keyread() + + void set_keyread(bool flag) { - DBUG_ENTER("enable_keyread"); - DBUG_ASSERT(key_read == 0); - key_read= 1; - file->extra(HA_EXTRA_KEYREAD); - DBUG_VOID_RETURN; + DBUG_ASSERT(file); + if (flag && !key_read) + { + key_read= 1; + if (is_created()) + file->extra(HA_EXTRA_KEYREAD); + } + else if (!flag && key_read) + { + key_read= 0; + if (is_created()) + file->extra(HA_EXTRA_NO_KEYREAD); + } } + + /// Return true if table is instantiated, and false otherwise. + bool is_created() const { return created; } + + /** + Set the table as "created", and enable flags in storage engine + that could not be enabled without an instantiated table. + */ + void set_created() + { + if (created) + return; + if (key_read) + file->extra(HA_EXTRA_KEYREAD); + created= true; + } + /* Returns TRUE if the table is filled at execution phase (and so, the optimizer must not do anything that depends on the contents of the table, like range analysis or constant table detection) */ bool is_filled_at_execution(); - inline void disable_keyread() - { - DBUG_ENTER("disable_keyread"); - if (key_read) - { - key_read= 0; - file->extra(HA_EXTRA_NO_KEYREAD); - } - DBUG_VOID_RETURN; - } bool update_const_key_parts(COND *conds); @@ -1941,6 +1964,7 @@ struct TABLE_LIST bool updating; /* for replicate-do/ignore table */ bool force_index; /* prefer index over table scan */ bool ignore_leaves; /* preload only non-leaf nodes */ + bool crashed; /* Table was found crashed */ table_map dep_tables; /* tables the table depends on */ table_map on_expr_dep_tables; /* tables on expression depends on */ struct st_nested_join *nested_join; /* if the element is a nested join */ @@ -2049,6 +2073,11 @@ struct TABLE_LIST /* TRUE <=> this table is a const one and was optimized away. */ bool optimized_away; + /** + TRUE <=> already materialized. Valid only for materialized derived + tables/views. + */ + bool materialized; /* I_S: Flags to open_table (e.g. OPEN_TABLE_ONLY or OPEN_VIEW_ONLY) */ uint i_s_requested_object; diff --git a/sql/unireg.h b/sql/unireg.h index 10751b6ec93..251597c1884 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -43,15 +43,16 @@ #define PLUGINDIR "lib/plugin" #endif -#define CURRENT_THD_ERRMSGS current_thd->variables.errmsgs -#define DEFAULT_ERRMSGS my_default_lc_messages->errmsgs->errmsgs - -#define ER(X) CURRENT_THD_ERRMSGS[(X) - ER_ERROR_FIRST] -#define ER_DEFAULT(X) DEFAULT_ERRMSGS[(X) - ER_ERROR_FIRST] -#define ER_SAFE(X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER(X) : "Invalid error code") -#define ER_SAFE_THD(T,X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER_THD(T,X) : "Invalid error code") -#define ER_THD(thd,X) ((thd)->variables.errmsgs[(X) - ER_ERROR_FIRST]) -#define ER_THD_OR_DEFAULT(thd,X) ((thd) ? ER_THD(thd, X) : ER_DEFAULT(X)) +#define MAX_ERROR_RANGES 4 /* 1000-2000, 2000-3000, 3000-4000, 4000-5000 */ +#define ERRORS_PER_RANGE 1000 + +#define DEFAULT_ERRMSGS my_default_lc_messages->errmsgs->errmsgs +#define CURRENT_THD_ERRMSGS (current_thd)->variables.errmsgs + +#define ER_DEFAULT(X) DEFAULT_ERRMSGS[((X)-ER_ERROR_FIRST) / ERRORS_PER_RANGE][(X)% ERRORS_PER_RANGE] +#define ER_THD(thd,X) ((thd)->variables.errmsgs[((X)-ER_ERROR_FIRST) / ERRORS_PER_RANGE][(X) % ERRORS_PER_RANGE]) +#define ER(X) ER_THD(current_thd, (X)) +#define ER_THD_OR_DEFAULT(thd,X) ((thd) ? ER_THD(thd, (X)) : ER_DEFAULT(X)) #define ME_INFO (ME_HOLDTANG+ME_OLDWIN+ME_NOREFRESH) #define ME_ERROR (ME_BELL+ME_OLDWIN+ME_NOREFRESH) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index f84ebe4dbb9..3cdf95e2bf2 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -202,7 +202,7 @@ wsrep_uuid_t local_uuid = WSREP_UUID_UNDEFINED; wsrep_seqno_t local_seqno = WSREP_SEQNO_UNDEFINED; long wsrep_protocol_version = 3; -wsp::Config_state wsrep_config_state; +wsp::Config_state *wsrep_config_state; // Boolean denoting if server is in initial startup phase. This is needed // to make sure that main thread waiting in wsrep_sst_wait() is signaled @@ -281,7 +281,7 @@ wsrep_view_handler_cb (void* app_ctx, *sst_req = NULL; *sst_req_len = 0; - wsrep_member_status_t memb_status= wsrep_config_state.get_status(); + wsrep_member_status_t memb_status= wsrep_config_state->get_status(); if (memcmp(&cluster_uuid, &view->state_id.uuid, sizeof(wsrep_uuid_t))) { @@ -442,7 +442,7 @@ wsrep_view_handler_cb (void* app_ctx, out: if (view->status == WSREP_VIEW_PRIMARY) wsrep_startup= FALSE; - wsrep_config_state.set(memb_status, view); + wsrep_config_state->set(memb_status, view); return WSREP_CB_SUCCESS; } @@ -484,7 +484,7 @@ static void wsrep_synced_cb(void* app_ctx) signal_main= true; } - wsrep_config_state.set(WSREP_MEMBER_SYNCED); + wsrep_config_state->set(WSREP_MEMBER_SYNCED); mysql_mutex_unlock (&LOCK_wsrep_ready); if (signal_main) @@ -753,6 +753,8 @@ done: /* Initialize wsrep thread LOCKs and CONDs */ void wsrep_thr_init() { + DBUG_ENTER("wsrep_thr_init"); + wsrep_config_state = new wsp::Config_state; #ifdef HAVE_PSI_INTERFACE mysql_mutex_register("sql", wsrep_mutexes, array_elements(wsrep_mutexes)); mysql_cond_register("sql", wsrep_conds, array_elements(wsrep_conds)); @@ -772,6 +774,7 @@ void wsrep_thr_init() mysql_mutex_init(key_LOCK_wsrep_slave_threads, &LOCK_wsrep_slave_threads, MY_MUTEX_INIT_FAST); mysql_mutex_init(key_LOCK_wsrep_desync, &LOCK_wsrep_desync, MY_MUTEX_INIT_FAST); mysql_mutex_init(key_LOCK_wsrep_config_state, &LOCK_wsrep_config_state, MY_MUTEX_INIT_FAST); + DBUG_VOID_RETURN; } @@ -819,6 +822,8 @@ void wsrep_deinit(bool free_options) /* Destroy wsrep thread LOCKs and CONDs */ void wsrep_thr_deinit() { + if (!wsrep_config_state) + return; // Never initialized mysql_mutex_destroy(&LOCK_wsrep_ready); mysql_cond_destroy(&COND_wsrep_ready); mysql_mutex_destroy(&LOCK_wsrep_sst); @@ -832,6 +837,8 @@ void wsrep_thr_deinit() mysql_mutex_destroy(&LOCK_wsrep_slave_threads); mysql_mutex_destroy(&LOCK_wsrep_desync); mysql_mutex_destroy(&LOCK_wsrep_config_state); + delete wsrep_config_state; + wsrep_config_state= 0; // Safety } void wsrep_recover() diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index 6d04527cbcb..562bc7effb4 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -1256,7 +1256,7 @@ wsrep_cb_status_t wsrep_sst_donate_cb (void* app_ctx, void* recv_ctx, /* This will be reset when sync callback is called. * Should we set wsrep_ready to FALSE here too? */ - wsrep_config_state.set(WSREP_MEMBER_DONOR); + wsrep_config_state->set(WSREP_MEMBER_DONOR); const char* method = (char*)msg; size_t method_len = strlen (method); diff --git a/sql/wsrep_utils.h b/sql/wsrep_utils.h index ed699eabec9..54235cf5273 100644 --- a/sql/wsrep_utils.h +++ b/sql/wsrep_utils.h @@ -233,7 +233,7 @@ private: } /* namespace wsp */ -extern wsp::Config_state wsrep_config_state; +extern wsp::Config_state *wsrep_config_state; namespace wsp { /* a class to manage env vars array */ diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc index 573357b54ee..0dff05567d7 100644 --- a/storage/innobase/dict/dict0boot.cc +++ b/storage/innobase/dict/dict0boot.cc @@ -458,12 +458,22 @@ dict_boot(void) if (err == DB_SUCCESS) { if (srv_read_only_mode && !ibuf_is_empty()) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Change buffer must be empty when --innodb-read-only " - "is set!"); + if (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Change buffer must be empty when --innodb-read-only " + "is set! " + "You can try to recover the database with innodb_force_recovery=5"); + + err = DB_ERROR; + } else { + ib_logf(IB_LOG_LEVEL_WARN, + "Change buffer not empty when --innodb-read-only " + "is set! but srv_force_recovery = %d, ignoring.", + srv_force_recovery); + } + } - err = DB_ERROR; - } else { + if (err == DB_SUCCESS) { /* Load definitions of other indexes on system tables */ dict_load_sys_table(dict_sys->sys_tables); diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index d918b21d779..22abf592adc 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -737,11 +737,9 @@ fil_node_open_file( } } - if (size_bytes >= FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) { + if (size_bytes >= (1024*1024)) { /* Truncate the size to whole extent size. */ - size_bytes = ut_2pow_round(size_bytes, - FSP_EXTENT_SIZE * - UNIV_PAGE_SIZE); + size_bytes = ut_2pow_round(size_bytes, (1024*1024)); } if (!fsp_flags_is_compressed(flags)) { diff --git a/storage/innobase/include/log0crypt.h b/storage/innobase/include/log0crypt.h index 7e737853465..6b164e90d6e 100644 --- a/storage/innobase/include/log0crypt.h +++ b/storage/innobase/include/log0crypt.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (C) 2013, 2015, Google Inc. All Rights Reserved. -Copyright (C) 2014, 2015, MariaDB Corporation. All Rights Reserved. +Copyright (C) 2014, 2016, MariaDB Corporation. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -117,4 +117,12 @@ log_crypt_print_error( /*==================*/ log_crypt_err_t err_info); /*!< out: error info */ +/*********************************************************************//** +Print checkpoint no from log block and all encryption keys from +checkpoints if they are present. Used for problem analysis. */ +void +log_crypt_print_checkpoint_keys( +/*============================*/ + const byte* log_block); + #endif // log0crypt.h diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h index e2635248b03..176f132704a 100644 --- a/storage/innobase/include/ut0ut.h +++ b/storage/innobase/include/ut0ut.h @@ -88,15 +88,32 @@ private: the YieldProcessor macro defined in WinNT.h. It is a CPU architecture- independent way by using YieldProcessor. */ # define UT_RELAX_CPU() YieldProcessor() -# elif defined(HAVE_ATOMIC_BUILTINS) +# elif defined(__powerpc__) +#include # define UT_RELAX_CPU() do { \ - volatile lint volatile_var; \ - os_compare_and_swap_lint(&volatile_var, 0, 1); \ + volatile lint volatile_var = __ppc_get_timebase(); \ } while (0) # else # define UT_RELAX_CPU() ((void)0) /* avoid warning for an empty statement */ # endif +#if defined (__GNUC__) +# define UT_COMPILER_BARRIER() __asm__ __volatile__ ("":::"memory") +#elif defined (_MSC_VER) +# define UT_COMPILER_BARRIER() _ReadWriteBarrier() +#else +# define UT_COMPILER_BARRIER() +#endif + +# if defined(HAVE_HMT_PRIORITY_INSTRUCTION) +#include +# define UT_LOW_PRIORITY_CPU() __ppc_set_ppr_low() +# define UT_RESUME_PRIORITY_CPU() __ppc_set_ppr_med() +# else +# define UT_LOW_PRIORITY_CPU() ((void)0) +# define UT_RESUME_PRIORITY_CPU() ((void)0) +# endif + /*********************************************************************//** Delays execution for at most max_wait_us microseconds or returns earlier if cond becomes true. @@ -342,7 +359,7 @@ Runs an idle loop on CPU. The argument gives the desired delay in microseconds on 100 MHz Pentium + Visual C++. @return dummy value */ UNIV_INTERN -ulint +void ut_delay( /*=====*/ ulint delay); /*!< in: delay in microseconds on 100 MHz Pentium */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 165db2d33ed..bc7ec1d221b 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -6817,7 +6817,7 @@ lock_clust_rec_modify_check_and_lock( lock_rec_convert_impl_to_expl(block, rec, index, offsets); lock_mutex_enter(); - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); ut_ad(lock_table_has(trx, index->table, LOCK_IX)); @@ -6877,7 +6877,7 @@ lock_sec_rec_modify_check_and_lock( index record, and this would not have been possible if another active transaction had modified this secondary index record. */ - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); lock_mutex_enter(); ut_ad(lock_table_has(trx, index->table, LOCK_IX)); @@ -6977,7 +6977,7 @@ lock_sec_rec_read_check_and_lock( lock_rec_convert_impl_to_expl(block, rec, index, offsets); } - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); lock_mutex_enter(); ut_ad(mode != LOCK_X @@ -7051,7 +7051,7 @@ lock_clust_rec_read_check_and_lock( } lock_mutex_enter(); - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); ut_ad(mode != LOCK_X || lock_table_has(trx, index->table, LOCK_IX)); diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc index 852148899e9..db2e84d7e45 100644 --- a/storage/innobase/log/log0crypt.cc +++ b/storage/innobase/log/log0crypt.cc @@ -127,11 +127,34 @@ static const crypt_info_t* get_crypt_info( /*===========*/ - const byte* log_block) { + const byte* log_block) +{ ib_uint64_t checkpoint_no = log_block_get_checkpoint_no(log_block); return get_crypt_info(checkpoint_no); } +/*********************************************************************//** +Print checkpoint no from log block and all encryption keys from +checkpoints if they are present. Used for problem analysis. */ +void +log_crypt_print_checkpoint_keys( +/*============================*/ + const byte* log_block) +{ + ib_uint64_t checkpoint_no = log_block_get_checkpoint_no(log_block); + + if (crypt_info.size()) { + fprintf(stderr, "InnoDB: redo log checkpoint: %lu [ chk key ]: ", checkpoint_no); + for (size_t i = 0; i < crypt_info.size(); i++) { + struct crypt_info_t* it = &crypt_info[i]; + fprintf(stderr, "[ %lu %u ] ", + it->checkpoint_no, + it->key_version); + } + fprintf(stderr, "\n"); + } +} + /*********************************************************************//** Call AES CTR to encrypt/decrypt log blocks. */ static @@ -278,12 +301,22 @@ Add crypt info to set if it is not already present @return true if successfull, false if not- */ static bool -add_crypt_info(crypt_info_t* info) +add_crypt_info( +/*===========*/ + crypt_info_t* info, /*!< in: crypt info */ + bool checkpoint_read)/*!< in: do we read checkpoint */ { + const crypt_info_t* found=NULL; /* so that no one is searching array while we modify it */ ut_ad(mutex_own(&(log_sys->mutex))); - if (get_crypt_info(info->checkpoint_no) != NULL) { + found = get_crypt_info(info->checkpoint_no); + + /* If one crypt info is found then we add a new one only if we + are reading checkpoint from the log. New checkpoints will always + use the first created crypt info. */ + if (found != NULL && + ( found->checkpoint_no == info->checkpoint_no || !checkpoint_read)) { // already present... return true; } @@ -356,7 +389,7 @@ log_crypt_set_ver_and_key( } - add_crypt_info(&info); + add_crypt_info(&info, false); } /******************************************************** @@ -514,7 +547,7 @@ log_crypt_read_checkpoint_buf( memcpy(info.crypt_msg, buf + 8, MY_AES_BLOCK_SIZE); memcpy(info.crypt_nonce, buf + 24, MY_AES_BLOCK_SIZE); - if (!add_crypt_info(&info)) { + if (!add_crypt_info(&info, true)) { return false; } buf += LOG_CRYPT_ENTRY_SIZE; diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 2304f4885c2..d574cd55397 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2714,6 +2714,8 @@ recv_scan_log_recs( /* Garbage or an incompletely written log block */ + /* Print checkpoint encryption keys if present */ + log_crypt_print_checkpoint_keys(log_block); finished = TRUE; if (maybe_encrypted) { diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index a5970c1dc3f..bde40220db3 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -45,9 +45,6 @@ Created 5/11/1994 Heikki Tuuri # include #endif /* UNIV_HOTBACKUP */ -/** A constant to prevent the compiler from optimizing ut_delay() away. */ -UNIV_INTERN ibool ut_always_false = FALSE; - #ifdef __WIN__ /*****************************************************************//** NOTE: The Windows epoch starts from 1601/01/01 whereas the Unix @@ -397,25 +394,21 @@ Runs an idle loop on CPU. The argument gives the desired delay in microseconds on 100 MHz Pentium + Visual C++. @return dummy value */ UNIV_INTERN -ulint +void ut_delay( /*=====*/ ulint delay) /*!< in: delay in microseconds on 100 MHz Pentium */ { - ulint i, j; + ulint i; - j = 0; + UT_LOW_PRIORITY_CPU(); for (i = 0; i < delay * 50; i++) { - j += i; UT_RELAX_CPU(); + UT_COMPILER_BARRIER(); } - if (ut_always_false) { - ut_always_false = (ibool) j; - } - - return(j); + UT_RESUME_PRIORITY_CPU(); } #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp index 947a019827c..75e029a7c9f 100644 --- a/storage/mroonga/ha_mroonga.cpp +++ b/storage/mroonga/ha_mroonga.cpp @@ -143,7 +143,7 @@ static mysql_mutex_t *mrn_LOCK_open; # define MRN_NEED_M_LOCK_TYPE_CHECK_FOR_WRAPPER_EXTERNAL_LOCK #endif -#if MYSQL_VERSION_ID >= 50603 && !defined(MRN_MARIADB_P) +#if MYSQL_VERSION_ID >= 50603 || defined(MRN_MARIADB_P) # define MRN_ORDER_IS_ASC(order) ((order)->direction == ORDER::ORDER_ASC) #else # define MRN_ORDER_IS_ASC(order) ((order)->asc) diff --git a/storage/mroonga/mrn_table.cpp b/storage/mroonga/mrn_table.cpp index 144dbe0c530..96f24ff2e00 100644 --- a/storage/mroonga/mrn_table.cpp +++ b/storage/mroonga/mrn_table.cpp @@ -998,7 +998,7 @@ int mrn_free_share(MRN_SHARE *share) TABLE_SHARE *mrn_get_table_share(TABLE_LIST *table_list, int *error) { - uint key_length; + uint key_length __attribute__((unused)); TABLE_SHARE *share; THD *thd = current_thd; MRN_DBUG_ENTER_FUNCTION(); diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 6215a5584d0..a41a943cd04 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -8134,7 +8134,7 @@ int spider_mysql_handler::append_key_order_for_direct_order_limit_with_alias( DBUG_PRINT("info",("spider error=%d", error_num)); DBUG_RETURN(error_num); } - if (order->asc) + if (order->direction == ORDER::ORDER_ASC) { if (str->reserve(SPIDER_SQL_COMMA_LEN)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h index e263cabb0d1..5b387924b1e 100644 --- a/storage/tokudb/ha_tokudb.h +++ b/storage/tokudb/ha_tokudb.h @@ -602,6 +602,12 @@ public: // ICP introduced in MariaDB 5.5 Item* idx_cond_push(uint keyno, class Item* idx_cond); +#ifdef MARIADB_BASE_VERSION + void cancel_pushed_idx_cond() + { + invalidate_icp(); + } +#endif #if TOKU_INCLUDE_ALTER_56 public: diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_all.py b/storage/tokudb/mysql-test/tokudb/t/change_column_all.py index 04e7d143696..75cffb88dff 100644 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_all.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_all.py @@ -168,6 +168,7 @@ def header(): print "# generated from change_column_all.py" print "# test random column change on wide tables" print "source include/have_tokudb.inc;" + print "--source include/big_test.inc" print "--disable_warnings" print "DROP TABLE IF EXISTS t, ti;" print "--enable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result b/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result index 2975d7d3116..12fec571d87 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result @@ -110,7 +110,7 @@ a b c d e 5 1 10 NULL NULL show status like '%Handler_read_prev%'; Variable_name Value -Handler_read_prev 41 +Handler_read_prev 800 flush status; show status like '%Handler_read_prev%'; Variable_name Value @@ -142,7 +142,7 @@ a b c d e 20 1 10 NULL NULL show status like '%Handler_read_prev%'; Variable_name Value -Handler_read_prev 21 +Handler_read_prev 400 flush status; show status like '%Handler_read_next%'; Variable_name Value diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result index 82cbcbc311e..5b860845490 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result @@ -47,6 +47,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with abs(col1) ------------------------------------------------------------------------- +begin; insert into t1 values (5 ); insert into t1 values (13 ); insert into t2 values (5 ); @@ -55,6 +56,7 @@ insert into t2 values (17 ); insert into t3 values (5 ); insert into t3 values (13 ); insert into t3 values (17 ); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t6; @@ -217,12 +219,14 @@ colint col1 50 56 51 34 55 123 +begin; update t1 set col1=15 where col1=5 ; update t2 set col1=15 where col1=5 ; update t3 set col1=15 where col1=5 ; update t4 set col1=15 where col1=5 ; update t5 set col1=15 where col1=5 ; update t6 set col1=15 where col1=5 ; +commit; select * from t1 order by col1; col1 13 @@ -877,12 +881,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with abs(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1=13 ; delete from t2 where col1=13 ; delete from t3 where col1=13 ; delete from t4 where col1=13 ; delete from t5 where col1=13 ; delete from t6 where col1=13 ; +commit; select * from t1 order by col1; col1 15 @@ -986,12 +992,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t1 values (13 ); insert into t2 values (13 ); insert into t3 values (13 ); insert into t4 values (60,13 ); insert into t5 values (60,13 ); insert into t6 values (60,13 ); +commit; select * from t1 order by col1; col1 13 @@ -1274,12 +1282,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with abs(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1=13 ; delete from t22 where col1=13 ; delete from t33 where col1=13 ; delete from t44 where col1=13 ; delete from t55 where col1=13 ; delete from t66 where col1=13 ; +commit; select * from t11 order by col1; col1 15 @@ -1383,12 +1393,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t11 values (13 ); insert into t22 values (13 ); insert into t33 values (13 ); insert into t44 values (60,13 ); insert into t55 values (60,13 ); insert into t66 values (60,13 ); +commit; select * from t11 order by col1; col1 13 @@ -1732,6 +1744,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with mod(col1,10) ------------------------------------------------------------------------- +begin; insert into t1 values (5); insert into t1 values (19); insert into t2 values (5); @@ -1740,6 +1753,7 @@ insert into t2 values (17); insert into t3 values (5); insert into t3 values (19); insert into t3 values (17); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t6; @@ -1902,12 +1916,14 @@ colint col1 50 56 51 34 55 123 +begin; update t1 set col1=15 where col1=5; update t2 set col1=15 where col1=5; update t3 set col1=15 where col1=5; update t4 set col1=15 where col1=5; update t5 set col1=15 where col1=5; update t6 set col1=15 where col1=5; +commit; select * from t1 order by col1; col1 15 @@ -2562,12 +2578,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with mod(col1,10) ------------------------------------------------------------------------- +begin; delete from t1 where col1=19; delete from t2 where col1=19; delete from t3 where col1=19; delete from t4 where col1=19; delete from t5 where col1=19; delete from t6 where col1=19; +commit; select * from t1 order by col1; col1 15 @@ -2673,12 +2691,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t1 values (19); insert into t2 values (19); insert into t3 values (19); insert into t4 values (60,19); insert into t5 values (60,19); insert into t6 values (60,19); +commit; select * from t1 order by col1; col1 15 @@ -2970,12 +2990,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with mod(col1,10) ------------------------------------------------------------------------- +begin; delete from t11 where col1=19; delete from t22 where col1=19; delete from t33 where col1=19; delete from t44 where col1=19; delete from t55 where col1=19; delete from t66 where col1=19; +commit; select * from t11 order by col1; col1 15 @@ -3081,12 +3103,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t11 values (19); insert into t22 values (19); insert into t33 values (19); insert into t44 values (60,19); insert into t55 values (60,19); insert into t66 values (60,19); +commit; select * from t11 order by col1; col1 15 @@ -3439,6 +3463,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with day(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-02-03'); insert into t1 values ('2006-01-17'); insert into t2 values ('2006-02-03'); @@ -3447,6 +3472,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-02-03'); insert into t3 values ('2006-01-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -3486,12 +3512,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-02-03'; update t2 set col1='2006-02-05' where col1='2006-02-03'; update t3 set col1='2006-02-05' where col1='2006-02-03'; update t4 set col1='2006-02-05' where col1='2006-02-03'; update t5 set col1='2006-02-05' where col1='2006-02-03'; update t6 set col1='2006-02-05' where col1='2006-02-03'; +commit; select * from t1 order by col1; col1 2006-01-17 @@ -3695,12 +3723,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with day(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-01-17'; delete from t2 where col1='2006-01-17'; delete from t3 where col1='2006-01-17'; delete from t4 where col1='2006-01-17'; delete from t5 where col1='2006-01-17'; delete from t6 where col1='2006-01-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -3722,12 +3752,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-17'); insert into t3 values ('2006-01-17'); insert into t4 values (60,'2006-01-17'); insert into t5 values (60,'2006-01-17'); insert into t6 values (60,'2006-01-17'); +commit; select * from t1 order by col1; col1 2006-01-17 @@ -3789,12 +3821,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with day(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-01-17'; delete from t22 where col1='2006-01-17'; delete from t33 where col1='2006-01-17'; delete from t44 where col1='2006-01-17'; delete from t55 where col1='2006-01-17'; delete from t66 where col1='2006-01-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -3816,12 +3850,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-01-17'); insert into t22 values ('2006-01-17'); insert into t33 values ('2006-01-17'); insert into t44 values (60,'2006-01-17'); insert into t55 values (60,'2006-01-17'); insert into t66 values (60,'2006-01-17'); +commit; select * from t11 order by col1; col1 2006-01-17 @@ -3944,6 +3980,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with dayofmonth(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-02-03'); insert into t1 values ('2006-01-17'); insert into t2 values ('2006-02-03'); @@ -3952,6 +3989,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-02-03'); insert into t3 values ('2006-01-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -3991,12 +4029,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-02-03'; update t2 set col1='2006-02-05' where col1='2006-02-03'; update t3 set col1='2006-02-05' where col1='2006-02-03'; update t4 set col1='2006-02-05' where col1='2006-02-03'; update t5 set col1='2006-02-05' where col1='2006-02-03'; update t6 set col1='2006-02-05' where col1='2006-02-03'; +commit; select * from t1 order by col1; col1 2006-01-17 @@ -4200,12 +4240,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofmonth(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-01-17'; delete from t2 where col1='2006-01-17'; delete from t3 where col1='2006-01-17'; delete from t4 where col1='2006-01-17'; delete from t5 where col1='2006-01-17'; delete from t6 where col1='2006-01-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4227,12 +4269,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-17'); insert into t3 values ('2006-01-17'); insert into t4 values (60,'2006-01-17'); insert into t5 values (60,'2006-01-17'); insert into t6 values (60,'2006-01-17'); +commit; select * from t1 order by col1; col1 2006-01-17 @@ -4294,12 +4338,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofmonth(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-01-17'; delete from t22 where col1='2006-01-17'; delete from t33 where col1='2006-01-17'; delete from t44 where col1='2006-01-17'; delete from t55 where col1='2006-01-17'; delete from t66 where col1='2006-01-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -4321,12 +4367,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-01-17'); insert into t22 values ('2006-01-17'); insert into t33 values ('2006-01-17'); insert into t44 values (60,'2006-01-17'); insert into t55 values (60,'2006-01-17'); insert into t66 values (60,'2006-01-17'); +commit; select * from t11 order by col1; col1 2006-01-17 @@ -4449,6 +4497,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with dayofweek(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-02-17'); insert into t2 values ('2006-01-03'); @@ -4457,6 +4506,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-02-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -4496,12 +4546,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-01-03'; update t2 set col1='2006-02-05' where col1='2006-01-03'; update t3 set col1='2006-02-05' where col1='2006-01-03'; update t4 set col1='2006-02-05' where col1='2006-01-03'; update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4705,12 +4757,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofweek(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-02-17'; delete from t2 where col1='2006-02-17'; delete from t3 where col1='2006-02-17'; delete from t4 where col1='2006-02-17'; delete from t5 where col1='2006-02-17'; delete from t6 where col1='2006-02-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4734,12 +4788,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-02-17'); insert into t2 values ('2006-02-17'); insert into t3 values ('2006-02-17'); insert into t4 values (60,'2006-02-17'); insert into t5 values (60,'2006-02-17'); insert into t6 values (60,'2006-02-17'); +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4805,12 +4861,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofweek(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-02-17'; delete from t22 where col1='2006-02-17'; delete from t33 where col1='2006-02-17'; delete from t44 where col1='2006-02-17'; delete from t55 where col1='2006-02-17'; delete from t66 where col1='2006-02-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -4834,12 +4892,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-02-17'); insert into t22 values ('2006-02-17'); insert into t33 values ('2006-02-17'); insert into t44 values (60,'2006-02-17'); insert into t55 values (60,'2006-02-17'); insert into t66 values (60,'2006-02-17'); +commit; select * from t11 order by col1; col1 2006-02-05 @@ -4966,6 +5026,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with dayofyear(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-03'); @@ -4974,6 +5035,7 @@ insert into t2 values ('2006-02-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-01-17'); insert into t3 values ('2006-02-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -5013,12 +5075,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-01-03'; update t2 set col1='2006-02-05' where col1='2006-01-03'; update t3 set col1='2006-02-05' where col1='2006-01-03'; update t4 set col1='2006-02-05' where col1='2006-01-03'; update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-01-17 @@ -5222,12 +5286,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofyear(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-01-17'; delete from t2 where col1='2006-01-17'; delete from t3 where col1='2006-01-17'; delete from t4 where col1='2006-01-17'; delete from t5 where col1='2006-01-17'; delete from t6 where col1='2006-01-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5249,12 +5315,14 @@ colint col1 1 2006-02-03 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-17'); insert into t3 values ('2006-01-17'); insert into t4 values (60,'2006-01-17'); insert into t5 values (60,'2006-01-17'); insert into t6 values (60,'2006-01-17'); +commit; select * from t1 order by col1; col1 2006-01-17 @@ -5317,12 +5385,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofyear(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-01-17'; delete from t22 where col1='2006-01-17'; delete from t33 where col1='2006-01-17'; delete from t44 where col1='2006-01-17'; delete from t55 where col1='2006-01-17'; delete from t66 where col1='2006-01-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -5344,12 +5414,14 @@ colint col1 1 2006-02-03 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-01-17'); insert into t22 values ('2006-01-17'); insert into t33 values ('2006-01-17'); insert into t44 values (60,'2006-01-17'); insert into t55 values (60,'2006-01-17'); insert into t66 values (60,'2006-01-17'); +commit; select * from t11 order by col1; col1 2006-01-17 @@ -5473,6 +5545,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with extract(month from col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-02-17'); insert into t2 values ('2006-01-03'); @@ -5481,6 +5554,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-02-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -5520,12 +5594,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-01-03'; update t2 set col1='2006-02-05' where col1='2006-01-03'; update t3 set col1='2006-02-05' where col1='2006-01-03'; update t4 set col1='2006-02-05' where col1='2006-01-03'; update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5729,12 +5805,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-02-17'; delete from t2 where col1='2006-02-17'; delete from t3 where col1='2006-02-17'; delete from t4 where col1='2006-02-17'; delete from t5 where col1='2006-02-17'; delete from t6 where col1='2006-02-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5758,12 +5836,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-02-17'); insert into t2 values ('2006-02-17'); insert into t3 values ('2006-02-17'); insert into t4 values (60,'2006-02-17'); insert into t5 values (60,'2006-02-17'); insert into t6 values (60,'2006-02-17'); +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5824,12 +5904,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-02-17'; delete from t22 where col1='2006-02-17'; delete from t33 where col1='2006-02-17'; delete from t44 where col1='2006-02-17'; delete from t55 where col1='2006-02-17'; delete from t66 where col1='2006-02-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -5853,12 +5935,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-02-17'); insert into t22 values ('2006-02-17'); insert into t33 values ('2006-02-17'); insert into t44 values (60,'2006-02-17'); insert into t55 values (60,'2006-02-17'); insert into t66 values (60,'2006-02-17'); +commit; select * from t11 order by col1; col1 2006-02-05 @@ -5980,6 +6064,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with hour(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09'); insert into t1 values ('14:30'); insert into t2 values ('09:09'); @@ -5988,6 +6073,7 @@ insert into t2 values ('21:59'); insert into t3 values ('09:09'); insert into t3 values ('14:30'); insert into t3 values ('21:59'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; @@ -6027,12 +6113,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; update t1 set col1='10:30' where col1='09:09'; update t2 set col1='10:30' where col1='09:09'; update t3 set col1='10:30' where col1='09:09'; update t4 set col1='10:30' where col1='09:09'; update t5 set col1='10:30' where col1='09:09'; update t6 set col1='10:30' where col1='09:09'; +commit; select * from t1 order by col1; col1 10:30:00 @@ -6236,12 +6324,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='14:30'; delete from t2 where col1='14:30'; delete from t3 where col1='14:30'; delete from t4 where col1='14:30'; delete from t5 where col1='14:30'; delete from t6 where col1='14:30'; +commit; select * from t1 order by col1; col1 10:30:00 @@ -6265,12 +6355,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t1 values ('14:30'); insert into t2 values ('14:30'); insert into t3 values ('14:30'); insert into t4 values (60,'14:30'); insert into t5 values (60,'14:30'); insert into t6 values (60,'14:30'); +commit; select * from t1 order by col1; col1 10:30:00 @@ -6334,12 +6426,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='14:30'; delete from t22 where col1='14:30'; delete from t33 where col1='14:30'; delete from t44 where col1='14:30'; delete from t55 where col1='14:30'; delete from t66 where col1='14:30'; +commit; select * from t11 order by col1; col1 10:30:00 @@ -6363,12 +6457,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t11 values ('14:30'); insert into t22 values ('14:30'); insert into t33 values ('14:30'); insert into t44 values (60,'14:30'); insert into t55 values (60,'14:30'); insert into t66 values (60,'14:30'); +commit; select * from t11 order by col1; col1 10:30:00 @@ -6445,7 +6541,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- microsecond(col1) in partition with coltype time +--- microsecond(col1) in partition with coltype time(6) ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -6456,11 +6552,11 @@ drop table if exists t6 ; ------------------------------------------------------------------------- --- Create tables with microsecond(col1) ------------------------------------------------------------------------- -create table t1 (col1 time) engine='TOKUDB' +create table t1 (col1 time(6)) engine='TOKUDB' partition by range(microsecond(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 time) engine='TOKUDB' +create table t2 (col1 time(6)) engine='TOKUDB' partition by list(microsecond(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), @@ -6469,14 +6565,14 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 time) engine='TOKUDB' +create table t3 (col1 time(6)) engine='TOKUDB' partition by hash(microsecond(col1)); -create table t4 (colint int, col1 time) engine='TOKUDB' +create table t4 (colint int, col1 time(6)) engine='TOKUDB' partition by range(colint) subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='TOKUDB' +create table t5 (colint int, col1 time(6)) engine='TOKUDB' partition by list(colint) subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), @@ -6486,13 +6582,14 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 time) engine='TOKUDB' +create table t6 (colint int, col1 time(6)) engine='TOKUDB' partition by range(colint) (partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with microsecond(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09:15.000002'); insert into t1 values ('04:30:01.000018'); insert into t2 values ('09:09:15.000002'); @@ -6501,83 +6598,86 @@ insert into t2 values ('00:59:22.000024'); insert into t3 values ('09:09:15.000002'); insert into t3 values ('04:30:01.000018'); insert into t3 values ('00:59:22.000024'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; select microsecond(col1) from t1 order by col1; microsecond(col1) -0 -0 +18 +2 select * from t1 order by col1; col1 -04:30:01 -09:09:15 +04:30:01.000018 +09:09:15.000002 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -09:09:15 +00:59:22.000024 +04:30:01.000018 +09:09:15.000002 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -09:09:15 +00:59:22.000024 +04:30:01.000018 +09:09:15.000002 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 09:09:15.000002 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 09:09:15.000002 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 09:09:15.000002 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 +begin; update t1 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t2 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t3 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t4 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t5 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t6 set col1='05:30:34.000037' where col1='09:09:15.000002'; +commit; select * from t1 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t4 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t5 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t6 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 ------------------------------------------------------------------------- --- Alter tables with microsecond(col1) ------------------------------------------------------------------------- @@ -6629,36 +6729,36 @@ partition by range(colint) partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t44 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t55 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 --------------------------- ---- some alter table begin --------------------------- @@ -6667,16 +6767,16 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 alter table t55 partition by list(colint) subpartition by hash(microsecond(col1)) subpartitions 5 @@ -6691,7 +6791,7 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL + `col1` time(6) DEFAULT NULL ) ENGINE=TokuDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (microsecond(col1)) @@ -6704,116 +6804,120 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = TokuDB) */ select * from t55 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition s1 into (partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition s1 into (partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='04:30:01.000018'; delete from t2 where col1='04:30:01.000018'; delete from t3 where col1='04:30:01.000018'; delete from t4 where col1='04:30:01.000018'; delete from t5 where col1='04:30:01.000018'; delete from t6 where col1='04:30:01.000018'; +commit; select * from t1 order by col1; col1 -05:30:34 +05:30:34.000037 select * from t2 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t4 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 select * from t5 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +begin; insert into t1 values ('04:30:01.000018'); insert into t2 values ('04:30:01.000018'); insert into t3 values ('04:30:01.000018'); insert into t4 values (60,'04:30:01.000018'); insert into t5 values (60,'04:30:01.000018'); insert into t6 values (60,'04:30:01.000018'); +commit; select * from t1 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t4 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t5 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t6 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -6821,90 +6925,99 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 +04:30:01.000018 +05:30:34.000037 select * from t2 order by col1; col1 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t4 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t5 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t6 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='04:30:01.000018'; delete from t22 where col1='04:30:01.000018'; delete from t33 where col1='04:30:01.000018'; delete from t44 where col1='04:30:01.000018'; delete from t55 where col1='04:30:01.000018'; delete from t66 where col1='04:30:01.000018'; +commit; select * from t11 order by col1; col1 -05:30:34 +05:30:34.000037 select * from t22 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t44 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 select * from t55 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +begin; insert into t11 values ('04:30:01.000018'); insert into t22 values ('04:30:01.000018'); insert into t33 values ('04:30:01.000018'); insert into t44 values (60,'04:30:01.000018'); insert into t55 values (60,'04:30:01.000018'); insert into t66 values (60,'04:30:01.000018'); +commit; select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t44 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t55 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t66 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -6912,22 +7025,27 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 +04:30:01.000018 +05:30:34.000037 select * from t22 order by col1; col1 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t44 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t55 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t66 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 ------------------------- ---- some alter table end ------------------------- @@ -6992,6 +7110,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with minute(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09:15'); insert into t1 values ('14:30:45'); insert into t2 values ('09:09:15'); @@ -7000,6 +7119,7 @@ insert into t2 values ('21:59:22'); insert into t3 values ('09:09:15'); insert into t3 values ('14:30:45'); insert into t3 values ('21:59:22'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; @@ -7039,12 +7159,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; update t1 set col1='10:24:23' where col1='09:09:15'; update t2 set col1='10:24:23' where col1='09:09:15'; update t3 set col1='10:24:23' where col1='09:09:15'; update t4 set col1='10:24:23' where col1='09:09:15'; update t5 set col1='10:24:23' where col1='09:09:15'; update t6 set col1='10:24:23' where col1='09:09:15'; +commit; select * from t1 order by col1; col1 10:24:23 @@ -7248,12 +7370,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='14:30:45'; delete from t2 where col1='14:30:45'; delete from t3 where col1='14:30:45'; delete from t4 where col1='14:30:45'; delete from t5 where col1='14:30:45'; delete from t6 where col1='14:30:45'; +commit; select * from t1 order by col1; col1 10:24:23 @@ -7277,12 +7401,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t1 values ('14:30:45'); insert into t2 values ('14:30:45'); insert into t3 values ('14:30:45'); insert into t4 values (60,'14:30:45'); insert into t5 values (60,'14:30:45'); insert into t6 values (60,'14:30:45'); +commit; select * from t1 order by col1; col1 10:24:23 @@ -7349,12 +7475,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='14:30:45'; delete from t22 where col1='14:30:45'; delete from t33 where col1='14:30:45'; delete from t44 where col1='14:30:45'; delete from t55 where col1='14:30:45'; delete from t66 where col1='14:30:45'; +commit; select * from t11 order by col1; col1 10:24:23 @@ -7378,12 +7506,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t11 values ('14:30:45'); insert into t22 values ('14:30:45'); insert into t33 values ('14:30:45'); insert into t44 values (60,'14:30:45'); insert into t55 values (60,'14:30:45'); insert into t66 values (60,'14:30:45'); +commit; select * from t11 order by col1; col1 10:24:23 @@ -7511,6 +7641,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with second(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09:09'); insert into t1 values ('14:30:20'); insert into t2 values ('09:09:09'); @@ -7519,6 +7650,7 @@ insert into t2 values ('21:59:22'); insert into t3 values ('09:09:09'); insert into t3 values ('14:30:20'); insert into t3 values ('21:59:22'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; @@ -7558,12 +7690,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; update t1 set col1='10:22:33' where col1='09:09:09'; update t2 set col1='10:22:33' where col1='09:09:09'; update t3 set col1='10:22:33' where col1='09:09:09'; update t4 set col1='10:22:33' where col1='09:09:09'; update t5 set col1='10:22:33' where col1='09:09:09'; update t6 set col1='10:22:33' where col1='09:09:09'; +commit; select * from t1 order by col1; col1 10:22:33 @@ -7767,12 +7901,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='14:30:20'; delete from t2 where col1='14:30:20'; delete from t3 where col1='14:30:20'; delete from t4 where col1='14:30:20'; delete from t5 where col1='14:30:20'; delete from t6 where col1='14:30:20'; +commit; select * from t1 order by col1; col1 10:22:33 @@ -7796,12 +7932,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t1 values ('14:30:20'); insert into t2 values ('14:30:20'); insert into t3 values ('14:30:20'); insert into t4 values (60,'14:30:20'); insert into t5 values (60,'14:30:20'); insert into t6 values (60,'14:30:20'); +commit; select * from t1 order by col1; col1 10:22:33 @@ -7868,12 +8006,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='14:30:20'; delete from t22 where col1='14:30:20'; delete from t33 where col1='14:30:20'; delete from t44 where col1='14:30:20'; delete from t55 where col1='14:30:20'; delete from t66 where col1='14:30:20'; +commit; select * from t11 order by col1; col1 10:22:33 @@ -7897,12 +8037,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t11 values ('14:30:20'); insert into t22 values ('14:30:20'); insert into t33 values ('14:30:20'); insert into t44 values (60,'14:30:20'); insert into t55 values (60,'14:30:20'); insert into t66 values (60,'14:30:20'); +commit; select * from t11 order by col1; col1 10:22:33 @@ -8030,6 +8172,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with month(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-12-17'); insert into t2 values ('2006-01-03'); @@ -8038,6 +8181,7 @@ insert into t2 values ('2006-05-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-12-17'); insert into t3 values ('2006-05-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -8077,12 +8221,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-11-06' where col1='2006-01-03'; update t2 set col1='2006-11-06' where col1='2006-01-03'; update t3 set col1='2006-11-06' where col1='2006-01-03'; update t4 set col1='2006-11-06' where col1='2006-01-03'; update t5 set col1='2006-11-06' where col1='2006-01-03'; update t6 set col1='2006-11-06' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-11-06 @@ -8286,12 +8432,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-12-17'; delete from t2 where col1='2006-12-17'; delete from t3 where col1='2006-12-17'; delete from t4 where col1='2006-12-17'; delete from t5 where col1='2006-12-17'; delete from t6 where col1='2006-12-17'; +commit; select * from t1 order by col1; col1 2006-11-06 @@ -8315,12 +8463,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-12-17'); insert into t2 values ('2006-12-17'); insert into t3 values ('2006-12-17'); insert into t4 values (60,'2006-12-17'); insert into t5 values (60,'2006-12-17'); insert into t6 values (60,'2006-12-17'); +commit; select * from t1 order by col1; col1 2006-11-06 @@ -8384,12 +8534,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-12-17'; delete from t22 where col1='2006-12-17'; delete from t33 where col1='2006-12-17'; delete from t44 where col1='2006-12-17'; delete from t55 where col1='2006-12-17'; delete from t66 where col1='2006-12-17'; +commit; select * from t11 order by col1; col1 2006-11-06 @@ -8413,12 +8565,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-12-17'); insert into t22 values ('2006-12-17'); insert into t33 values ('2006-12-17'); insert into t44 values (60,'2006-12-17'); insert into t55 values (60,'2006-12-17'); insert into t66 values (60,'2006-12-17'); +commit; select * from t11 order by col1; col1 2006-11-06 @@ -8543,6 +8697,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with quarter(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-12-17'); insert into t2 values ('2006-01-03'); @@ -8551,6 +8706,7 @@ insert into t2 values ('2006-09-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-12-17'); insert into t3 values ('2006-09-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -8590,12 +8746,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-07-30' where col1='2006-01-03'; update t2 set col1='2006-07-30' where col1='2006-01-03'; update t3 set col1='2006-07-30' where col1='2006-01-03'; update t4 set col1='2006-07-30' where col1='2006-01-03'; update t5 set col1='2006-07-30' where col1='2006-01-03'; update t6 set col1='2006-07-30' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-07-30 @@ -8799,12 +8957,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-12-17'; delete from t2 where col1='2006-12-17'; delete from t3 where col1='2006-12-17'; delete from t4 where col1='2006-12-17'; delete from t5 where col1='2006-12-17'; delete from t6 where col1='2006-12-17'; +commit; select * from t1 order by col1; col1 2006-07-30 @@ -8828,12 +8988,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-12-17'); insert into t2 values ('2006-12-17'); insert into t3 values ('2006-12-17'); insert into t4 values (60,'2006-12-17'); insert into t5 values (60,'2006-12-17'); insert into t6 values (60,'2006-12-17'); +commit; select * from t1 order by col1; col1 2006-07-30 @@ -8896,12 +9058,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-12-17'; delete from t22 where col1='2006-12-17'; delete from t33 where col1='2006-12-17'; delete from t44 where col1='2006-12-17'; delete from t55 where col1='2006-12-17'; delete from t66 where col1='2006-12-17'; +commit; select * from t11 order by col1; col1 2006-07-30 @@ -8925,12 +9089,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-12-17'); insert into t22 values ('2006-12-17'); insert into t33 values ('2006-12-17'); insert into t44 values (60,'2006-12-17'); insert into t55 values (60,'2006-12-17'); insert into t66 values (60,'2006-12-17'); +commit; select * from t11 order by col1; col1 2006-07-30 @@ -9006,523 +9172,6 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- time_to_sec(col1)-(time_to_sec(col1)-20) in partition with coltype time -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -create table t1 (col1 time) engine='TOKUDB' -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 time) engine='TOKUDB' -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 time) engine='TOKUDB' -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -create table t4 (colint int, col1 time) engine='TOKUDB' -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='TOKUDB' -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 time) engine='TOKUDB' -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -insert into t1 values ('09:09:15'); -insert into t1 values ('14:30:45'); -insert into t2 values ('09:09:15'); -insert into t2 values ('14:30:45'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:15'); -insert into t3 values ('14:30:45'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select time_to_sec(col1)-(time_to_sec(col1)-20) from t1 order by col1; -time_to_sec(col1)-(time_to_sec(col1)-20) -20 -20 -select * from t1 order by col1; -col1 -09:09:15 -14:30:45 -select * from t2 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -update t1 set col1='10:33:11' where col1='09:09:15'; -update t2 set col1='10:33:11' where col1='09:09:15'; -update t3 set col1='10:33:11' where col1='09:09:15'; -update t4 set col1='10:33:11' where col1='09:09:15'; -update t5 set col1='10:33:11' where col1='09:09:15'; -update t6 set col1='10:33:11' where col1='09:09:15'; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Alter tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='TOKUDB' as select * from t1; -create table t22 engine='TOKUDB' as select * from t2; -create table t33 engine='TOKUDB' as select * from t3; -create table t44 engine='TOKUDB' as select * from t4; -create table t55 engine='TOKUDB' as select * from t5; -create table t66 engine='TOKUDB' as select * from t6; -alter table t11 -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -alter table t44 -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (time_to_sec(col1)-(time_to_sec(col1)-20)) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = TokuDB, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = TokuDB, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = TokuDB, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = TokuDB, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = TokuDB, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = TokuDB) */ -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t1 where col1='14:30:45'; -delete from t2 where col1='14:30:45'; -delete from t3 where col1='14:30:45'; -delete from t4 where col1='14:30:45'; -delete from t5 where col1='14:30:45'; -delete from t6 where col1='14:30:45'; -select * from t1 order by col1; -col1 -10:33:11 -select * from t2 order by col1; -col1 -10:33:11 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t1 values ('14:30:45'); -insert into t2 values ('14:30:45'); -insert into t3 values ('14:30:45'); -insert into t4 values (60,'14:30:45'); -insert into t5 values (60,'14:30:45'); -insert into t6 values (60,'14:30:45'); -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -60 14:30:45 -select * from t5 order by colint; -colint col1 -60 14:30:45 -select * from t6 order by colint; -colint col1 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t11 where col1='14:30:45'; -delete from t22 where col1='14:30:45'; -delete from t33 where col1='14:30:45'; -delete from t44 where col1='14:30:45'; -delete from t55 where col1='14:30:45'; -delete from t66 where col1='14:30:45'; -select * from t11 order by col1; -col1 -10:33:11 -select * from t22 order by col1; -col1 -10:33:11 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t11 values ('14:30:45'); -insert into t22 values ('14:30:45'); -insert into t33 values ('14:30:45'); -insert into t44 values (60,'14:30:45'); -insert into t55 values (60,'14:30:45'); -insert into t66 values (60,'14:30:45'); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -60 14:30:45 -select * from t55 order by colint; -colint col1 -60 14:30:45 -select * from t66 order by colint; -colint col1 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- --- weekday(col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; @@ -9571,6 +9220,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with weekday(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-12-03'); insert into t1 values ('2006-11-17'); insert into t2 values ('2006-12-03'); @@ -9579,6 +9229,7 @@ insert into t2 values ('2006-05-25'); insert into t3 values ('2006-12-03'); insert into t3 values ('2006-11-17'); insert into t3 values ('2006-05-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -9618,12 +9269,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-06' where col1='2006-12-03'; update t2 set col1='2006-02-06' where col1='2006-12-03'; update t3 set col1='2006-02-06' where col1='2006-12-03'; update t4 set col1='2006-02-06' where col1='2006-12-03'; update t5 set col1='2006-02-06' where col1='2006-12-03'; update t6 set col1='2006-02-06' where col1='2006-12-03'; +commit; select * from t1 order by col1; col1 2006-02-06 @@ -9827,12 +9480,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with weekday(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-11-17'; delete from t2 where col1='2006-11-17'; delete from t3 where col1='2006-11-17'; delete from t4 where col1='2006-11-17'; delete from t5 where col1='2006-11-17'; delete from t6 where col1='2006-11-17'; +commit; select * from t1 order by col1; col1 2006-02-06 @@ -9856,12 +9511,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-11-17'); insert into t2 values ('2006-11-17'); insert into t3 values ('2006-11-17'); insert into t4 values (60,'2006-11-17'); insert into t5 values (60,'2006-11-17'); insert into t6 values (60,'2006-11-17'); +commit; select * from t1 order by col1; col1 2006-02-06 @@ -9923,12 +9580,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with weekday(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-11-17'; delete from t22 where col1='2006-11-17'; delete from t33 where col1='2006-11-17'; delete from t44 where col1='2006-11-17'; delete from t55 where col1='2006-11-17'; delete from t66 where col1='2006-11-17'; +commit; select * from t11 order by col1; col1 2006-02-06 @@ -9952,12 +9611,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-11-17'); insert into t22 values ('2006-11-17'); insert into t33 values ('2006-11-17'); insert into t44 values (60,'2006-11-17'); insert into t55 values (60,'2006-11-17'); insert into t66 values (60,'2006-11-17'); +commit; select * from t11 order by col1; col1 2006-02-06 @@ -10080,6 +9741,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with year(col1)-1990 ------------------------------------------------------------------------- +begin; insert into t1 values ('1996-01-03'); insert into t1 values ('2000-02-17'); insert into t2 values ('1996-01-03'); @@ -10088,6 +9750,7 @@ insert into t2 values ('2004-05-25'); insert into t3 values ('1996-01-03'); insert into t3 values ('2000-02-17'); insert into t3 values ('2004-05-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -10127,12 +9790,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2002-02-15' where col1='1996-01-03'; update t2 set col1='2002-02-15' where col1='1996-01-03'; update t3 set col1='2002-02-15' where col1='1996-01-03'; update t4 set col1='2002-02-15' where col1='1996-01-03'; update t5 set col1='2002-02-15' where col1='1996-01-03'; update t6 set col1='2002-02-15' where col1='1996-01-03'; +commit; select * from t1 order by col1; col1 2000-02-17 @@ -10336,12 +10001,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with year(col1)-1990 ------------------------------------------------------------------------- +begin; delete from t1 where col1='2000-02-17'; delete from t2 where col1='2000-02-17'; delete from t3 where col1='2000-02-17'; delete from t4 where col1='2000-02-17'; delete from t5 where col1='2000-02-17'; delete from t6 where col1='2000-02-17'; +commit; select * from t1 order by col1; col1 2002-02-15 @@ -10365,12 +10032,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2000-02-17'); insert into t2 values ('2000-02-17'); insert into t3 values ('2000-02-17'); insert into t4 values (60,'2000-02-17'); insert into t5 values (60,'2000-02-17'); insert into t6 values (60,'2000-02-17'); +commit; select * from t1 order by col1; col1 2000-02-17 @@ -10434,12 +10103,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with year(col1)-1990 ------------------------------------------------------------------------- +begin; delete from t11 where col1='2000-02-17'; delete from t22 where col1='2000-02-17'; delete from t33 where col1='2000-02-17'; delete from t44 where col1='2000-02-17'; delete from t55 where col1='2000-02-17'; delete from t66 where col1='2000-02-17'; +commit; select * from t11 order by col1; col1 2002-02-15 @@ -10463,12 +10134,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2000-02-17'); insert into t22 values ('2000-02-17'); insert into t33 values ('2000-02-17'); insert into t44 values (60,'2000-02-17'); insert into t55 values (60,'2000-02-17'); insert into t66 values (60,'2000-02-17'); +commit; select * from t11 order by col1; col1 2000-02-17 @@ -10593,6 +10266,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with yearweek(col1)-200600 ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-08-17'); insert into t2 values ('2006-01-03'); @@ -10601,6 +10275,7 @@ insert into t2 values ('2006-03-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-08-17'); insert into t3 values ('2006-03-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -10640,12 +10315,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-11-15' where col1='2006-01-03'; update t2 set col1='2006-11-15' where col1='2006-01-03'; update t3 set col1='2006-11-15' where col1='2006-01-03'; update t4 set col1='2006-11-15' where col1='2006-01-03'; update t5 set col1='2006-11-15' where col1='2006-01-03'; update t6 set col1='2006-11-15' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-08-17 @@ -10849,12 +10526,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with yearweek(col1)-200600 ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-08-17'; delete from t2 where col1='2006-08-17'; delete from t3 where col1='2006-08-17'; delete from t4 where col1='2006-08-17'; delete from t5 where col1='2006-08-17'; delete from t6 where col1='2006-08-17'; +commit; select * from t1 order by col1; col1 2006-11-15 @@ -10878,12 +10557,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-08-17'); insert into t2 values ('2006-08-17'); insert into t3 values ('2006-08-17'); insert into t4 values (60,'2006-08-17'); insert into t5 values (60,'2006-08-17'); insert into t6 values (60,'2006-08-17'); +commit; select * from t1 order by col1; col1 2006-08-17 @@ -10950,12 +10631,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with yearweek(col1)-200600 ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-08-17'; delete from t22 where col1='2006-08-17'; delete from t33 where col1='2006-08-17'; delete from t44 where col1='2006-08-17'; delete from t55 where col1='2006-08-17'; delete from t66 where col1='2006-08-17'; +commit; select * from t11 order by col1; col1 2006-11-15 @@ -10979,12 +10662,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-08-17'); insert into t22 values ('2006-08-17'); insert into t33 values ('2006-08-17'); insert into t44 values (60,'2006-08-17'); insert into t55 values (60,'2006-08-17'); insert into t66 values (60,'2006-08-17'); +commit; select * from t11 order by col1; col1 2006-08-17 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result index bd4239fd26d..3fb51c67d00 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 1.2.1 PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -540,8 +540,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1032,8 +1032,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1539,8 +1539,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2040,8 +2040,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2539,8 +2539,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3049,8 +3049,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3561,8 +3561,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4062,8 +4062,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4554,8 +4554,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5046,8 +5046,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5553,8 +5553,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6054,8 +6054,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6553,8 +6553,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7063,8 +7063,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7575,8 +7575,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8077,8 +8077,8 @@ DROP TABLE t1; # 1.2.2 UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8097,8 +8097,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8585,8 +8585,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8605,8 +8605,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9093,8 +9093,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9121,8 +9121,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9616,8 +9616,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9642,8 +9642,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10133,8 +10133,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10157,8 +10157,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10648,8 +10648,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10676,8 +10676,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11174,8 +11174,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11202,8 +11202,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11702,8 +11702,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11726,8 +11726,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12219,8 +12219,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12239,8 +12239,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12727,8 +12727,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12747,8 +12747,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13235,8 +13235,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13263,8 +13263,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13758,8 +13758,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13784,8 +13784,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14275,8 +14275,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14299,8 +14299,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14790,8 +14790,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14818,8 +14818,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15316,8 +15316,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15344,8 +15344,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15844,8 +15844,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15868,8 +15868,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16362,8 +16362,8 @@ DROP TABLE t1; # 1.2.3 PRIMARY KEY and UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16855,8 +16855,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17348,8 +17348,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17856,8 +17856,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18358,8 +18358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18858,8 +18858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19369,8 +19369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19882,8 +19882,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20384,8 +20384,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20877,8 +20877,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21370,8 +21370,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21878,8 +21878,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22380,8 +22380,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22880,8 +22880,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23391,8 +23391,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23904,8 +23904,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24406,8 +24406,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24899,8 +24899,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25392,8 +25392,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25900,8 +25900,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26402,8 +26402,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26902,8 +26902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27413,8 +27413,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27926,8 +27926,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result index a59d3daeec7..2cc7b4298fc 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; #------------------------------------------------------------------------ DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62,8 +62,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77,8 +77,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -100,8 +100,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -121,8 +121,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -140,8 +140,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -163,8 +163,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -186,8 +186,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -206,8 +206,8 @@ ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -221,8 +221,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -236,8 +236,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -259,8 +259,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -280,8 +280,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -299,8 +299,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -322,8 +322,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -345,8 +345,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -366,8 +366,8 @@ DROP TABLE t1; # 1.1.3 PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -858,8 +858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1350,8 +1350,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1857,8 +1857,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2358,8 +2358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2859,8 +2859,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3369,8 +3369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3881,8 +3881,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4382,8 +4382,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4874,8 +4874,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5366,8 +5366,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5873,8 +5873,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6374,8 +6374,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6875,8 +6875,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7385,8 +7385,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7897,8 +7897,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8399,8 +8399,8 @@ DROP TABLE t1; # 1.1.4 UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8419,8 +8419,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8907,8 +8907,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8927,8 +8927,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9415,8 +9415,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9443,8 +9443,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9938,8 +9938,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9964,8 +9964,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10455,8 +10455,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10479,8 +10479,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10972,8 +10972,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11000,8 +11000,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11498,8 +11498,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11526,8 +11526,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12026,8 +12026,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12050,8 +12050,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12543,8 +12543,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12563,8 +12563,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13051,8 +13051,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13071,8 +13071,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13559,8 +13559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13587,8 +13587,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14082,8 +14082,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14108,8 +14108,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14599,8 +14599,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14623,8 +14623,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15116,8 +15116,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15144,8 +15144,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15642,8 +15642,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15670,8 +15670,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16170,8 +16170,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16194,8 +16194,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result index 9202e5f3a97..4aded14f336 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; # 2.1.5 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -487,8 +487,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -927,8 +927,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1382,8 +1382,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1831,8 +1831,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2280,8 +2280,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2738,8 +2738,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3198,8 +3198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3647,8 +3647,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4087,8 +4087,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4527,8 +4527,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4982,8 +4982,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5431,8 +5431,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5880,8 +5880,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6338,8 +6338,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6798,8 +6798,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7247,8 +7247,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7267,8 +7267,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7703,8 +7703,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7723,8 +7723,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8159,8 +8159,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8187,8 +8187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8630,8 +8630,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8656,8 +8656,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9095,8 +9095,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9119,8 +9119,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9560,8 +9560,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9588,8 +9588,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10034,8 +10034,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10062,8 +10062,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10510,8 +10510,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10534,8 +10534,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10979,8 +10979,8 @@ DROP TABLE t1; # 2.2.1 DROP PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11419,8 +11419,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11859,8 +11859,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12314,8 +12314,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12763,8 +12763,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13210,8 +13210,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13668,8 +13668,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14128,8 +14128,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14577,8 +14577,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15017,8 +15017,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15457,8 +15457,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15912,8 +15912,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16361,8 +16361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16808,8 +16808,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17266,8 +17266,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17726,8 +17726,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18176,8 +18176,8 @@ DROP TABLE t1; # 2.2.2 DROP UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18196,8 +18196,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18632,8 +18632,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18652,8 +18652,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19088,8 +19088,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19116,8 +19116,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19559,8 +19559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19585,8 +19585,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20024,8 +20024,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20048,8 +20048,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20487,8 +20487,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20515,8 +20515,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20961,8 +20961,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20989,8 +20989,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21437,8 +21437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21461,8 +21461,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21902,8 +21902,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21922,8 +21922,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22358,8 +22358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22378,8 +22378,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22814,8 +22814,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22842,8 +22842,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23285,8 +23285,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23311,8 +23311,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23750,8 +23750,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23774,8 +23774,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24213,8 +24213,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24241,8 +24241,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24687,8 +24687,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24715,8 +24715,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25163,8 +25163,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25187,8 +25187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25629,8 +25629,8 @@ DROP TABLE t1; # 2.2.3 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26069,8 +26069,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26509,8 +26509,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26964,8 +26964,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27413,8 +27413,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27860,8 +27860,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28318,8 +28318,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28778,8 +28778,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29227,8 +29227,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29667,8 +29667,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30107,8 +30107,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30562,8 +30562,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31011,8 +31011,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31458,8 +31458,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31916,8 +31916,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32376,8 +32376,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32825,8 +32825,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32845,8 +32845,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33281,8 +33281,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33301,8 +33301,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33737,8 +33737,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33765,8 +33765,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34208,8 +34208,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34234,8 +34234,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34673,8 +34673,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34697,8 +34697,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35136,8 +35136,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35164,8 +35164,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35610,8 +35610,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35638,8 +35638,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36086,8 +36086,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36110,8 +36110,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result index 8c7bc5ef296..6838b33d89d 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 1.1.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -504,8 +504,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -524,7 +524,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -960,8 +960,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -988,7 +988,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1431,8 +1431,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1457,7 +1457,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1896,8 +1896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1920,7 +1920,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2361,8 +2361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2389,7 +2389,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2835,8 +2835,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2863,7 +2863,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3311,8 +3311,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3335,7 +3335,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3777,8 +3777,8 @@ DROP TABLE t1; # 1.1.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3798,7 +3798,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4237,6 +4237,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4269,8 +4271,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4290,7 +4292,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4729,6 +4731,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4761,8 +4765,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4790,7 +4794,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5236,6 +5240,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5268,8 +5274,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5295,7 +5301,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5737,6 +5743,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5769,8 +5777,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5794,7 +5802,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6238,6 +6246,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6270,8 +6280,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6299,7 +6309,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6748,6 +6758,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6780,8 +6792,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6809,7 +6821,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7260,6 +7272,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7292,8 +7306,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7317,7 +7331,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7760,6 +7774,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7793,8 +7809,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7814,7 +7830,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8253,6 +8269,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8285,8 +8303,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8306,7 +8324,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8745,6 +8763,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8777,8 +8797,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8806,7 +8826,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9252,6 +9272,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9284,8 +9306,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9311,7 +9333,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9753,6 +9775,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9785,8 +9809,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9810,7 +9834,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10254,6 +10278,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10286,8 +10312,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10315,7 +10341,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10764,6 +10790,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10796,8 +10824,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10825,7 +10853,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11276,6 +11304,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11308,8 +11338,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11333,7 +11363,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11776,6 +11806,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11810,8 +11842,8 @@ DROP TABLE t1; # 1.1.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11830,7 +11862,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12318,8 +12350,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12338,7 +12370,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12826,8 +12858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12854,7 +12886,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13349,8 +13381,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13375,7 +13407,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13866,8 +13898,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13890,7 +13922,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14383,8 +14415,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14411,7 +14443,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14909,8 +14941,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14937,7 +14969,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15437,8 +15469,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15461,7 +15493,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15954,8 +15986,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15974,7 +16006,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16462,8 +16494,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16482,7 +16514,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16970,8 +17002,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16998,7 +17030,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17493,8 +17525,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17519,7 +17551,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18010,8 +18042,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18034,7 +18066,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18527,8 +18559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18555,7 +18587,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19053,8 +19085,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19081,7 +19113,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19581,8 +19613,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19605,7 +19637,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result index 92cfa5d59b9..f6e98be3c98 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -44,8 +44,8 @@ SET @@session.sql_mode= ''; # 1.3.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -500,8 +500,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -956,8 +956,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1427,8 +1427,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1892,8 +1892,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2355,8 +2355,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2829,8 +2829,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3305,8 +3305,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3771,8 +3771,8 @@ DROP TABLE t1; # 1.3.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3791,8 +3791,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4231,6 +4231,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4263,8 +4266,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4283,8 +4286,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4723,6 +4726,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4755,8 +4761,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4783,8 +4789,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5230,6 +5236,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5262,8 +5271,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5288,8 +5297,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5731,6 +5740,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5763,8 +5775,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5787,8 +5799,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6230,6 +6242,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6262,8 +6277,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6290,8 +6305,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6740,6 +6755,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6772,8 +6790,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6800,8 +6818,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7252,6 +7270,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7284,8 +7305,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7308,8 +7329,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7752,6 +7773,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7785,8 +7809,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7805,8 +7829,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8245,6 +8269,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8277,8 +8304,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8297,8 +8324,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8737,6 +8764,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8769,8 +8799,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8797,8 +8827,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9244,6 +9274,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9276,8 +9309,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9302,8 +9335,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9745,6 +9778,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9777,8 +9813,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9801,8 +9837,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10244,6 +10280,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10276,8 +10315,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10304,8 +10343,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10754,6 +10793,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10786,8 +10828,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10814,8 +10856,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11266,6 +11308,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11298,8 +11343,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11322,8 +11367,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11766,6 +11811,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11800,8 +11848,8 @@ DROP TABLE t1; # 1.3.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12308,8 +12356,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12816,8 +12864,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13339,8 +13387,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13856,8 +13904,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14371,8 +14419,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14897,8 +14945,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15425,8 +15473,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15942,8 +15990,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16450,8 +16498,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16958,8 +17006,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17481,8 +17529,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17998,8 +18046,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18513,8 +18561,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19039,8 +19087,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19567,8 +19615,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result index 1ae379ccc07..883c9ec3453 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 2.1.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -506,8 +506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -526,7 +526,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -964,8 +964,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -992,7 +992,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1437,8 +1437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1463,7 +1463,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1902,8 +1902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1926,7 +1926,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2369,8 +2369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2397,7 +2397,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2843,8 +2843,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2871,7 +2871,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3321,8 +3321,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3345,7 +3345,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3788,8 +3788,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3809,7 +3809,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4250,6 +4250,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4282,8 +4284,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4303,7 +4305,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4744,6 +4746,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4776,8 +4780,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4805,7 +4809,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5253,6 +5257,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5285,8 +5291,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5312,7 +5318,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5754,6 +5760,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5786,8 +5794,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5811,7 +5819,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6257,6 +6265,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6289,8 +6299,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6318,7 +6328,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6767,6 +6777,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6799,8 +6811,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6828,7 +6840,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7281,6 +7293,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7313,8 +7327,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7338,7 +7352,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7783,6 +7797,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7816,8 +7832,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7837,7 +7853,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8278,6 +8294,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8310,8 +8328,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8331,7 +8349,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8772,6 +8790,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8804,8 +8824,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8833,7 +8853,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9281,6 +9301,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9313,8 +9335,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9340,7 +9362,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9782,6 +9804,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9814,8 +9838,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9839,7 +9863,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10285,6 +10309,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10317,8 +10343,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10346,7 +10372,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10795,6 +10821,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10827,8 +10855,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10856,7 +10884,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11309,6 +11337,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11341,8 +11371,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11366,7 +11396,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11811,6 +11841,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11845,8 +11877,8 @@ DROP TABLE t1; # 2.1.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11865,7 +11897,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12355,8 +12387,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12375,7 +12407,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12865,8 +12897,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12893,7 +12925,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13390,8 +13422,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13416,7 +13448,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13907,8 +13939,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13931,7 +13963,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14426,8 +14458,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14454,7 +14486,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14952,8 +14984,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14980,7 +15012,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15482,8 +15514,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15506,7 +15538,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16001,8 +16033,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16021,7 +16053,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16511,8 +16543,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16531,7 +16563,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17021,8 +17053,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17049,7 +17081,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17546,8 +17578,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17572,7 +17604,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18063,8 +18095,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18087,7 +18119,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18582,8 +18614,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18610,7 +18642,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19108,8 +19140,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19136,7 +19168,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19638,8 +19670,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19662,7 +19694,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result index 9ed9866a42f..993025c9fb2 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -43,8 +43,8 @@ SET @@session.sql_mode= ''; # 2.3.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -502,8 +502,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -961,8 +961,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1435,8 +1435,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1905,8 +1905,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2373,8 +2373,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2852,8 +2852,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3331,8 +3331,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3800,8 +3800,8 @@ DROP TABLE t1; # 2.3.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3820,8 +3820,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4263,6 +4263,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4295,8 +4298,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4315,8 +4318,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4758,6 +4761,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4790,8 +4796,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4818,8 +4824,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5268,6 +5274,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5300,8 +5309,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5326,8 +5335,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5774,6 +5783,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5806,8 +5818,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5830,8 +5842,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6278,6 +6290,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6310,8 +6325,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6338,8 +6353,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6793,6 +6808,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6825,8 +6843,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6853,8 +6871,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7308,6 +7326,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7340,8 +7361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7364,8 +7385,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7811,6 +7832,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7844,8 +7868,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7864,8 +7888,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8307,6 +8331,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8339,8 +8366,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8359,8 +8386,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8802,6 +8829,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8834,8 +8864,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8862,8 +8892,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9312,6 +9342,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9344,8 +9377,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9370,8 +9403,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9818,6 +9851,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9850,8 +9886,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9874,8 +9910,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10322,6 +10358,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10354,8 +10393,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10382,8 +10421,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10837,6 +10876,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10869,8 +10911,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10897,8 +10939,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11352,6 +11394,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11384,8 +11429,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11408,8 +11453,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11855,6 +11900,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11889,8 +11937,8 @@ DROP TABLE t1; # 2.3.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12400,8 +12448,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12911,8 +12959,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13437,8 +13485,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13959,8 +14007,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14479,8 +14527,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15010,8 +15058,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15541,8 +15589,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16061,8 +16109,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16572,8 +16620,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17083,8 +17131,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17609,8 +17657,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18131,8 +18179,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18651,8 +18699,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19182,8 +19230,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19713,8 +19761,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result index 644c4815a36..40d167b57ea 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; # 1.1 ALTER ... ANALYZE PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69,8 +69,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -506,8 +506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -528,8 +528,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -968,8 +968,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -998,8 +998,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1441,8 +1441,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1469,8 +1469,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1908,8 +1908,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1934,8 +1934,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2375,8 +2375,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2405,8 +2405,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2851,8 +2851,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2881,8 +2881,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3329,8 +3329,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3355,8 +3355,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3797,8 +3797,8 @@ DROP TABLE t1; # 1.2 ALTER ... ANALYZE PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3819,8 +3819,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4256,8 +4256,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4278,8 +4278,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4718,8 +4718,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4748,8 +4748,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5191,8 +5191,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5219,8 +5219,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5658,8 +5658,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5684,8 +5684,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6125,8 +6125,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6155,8 +6155,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6601,8 +6601,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6631,8 +6631,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7079,8 +7079,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7105,8 +7105,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7547,8 +7547,8 @@ DROP TABLE t1; # 1.3 ALTER ... ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7569,8 +7569,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8006,8 +8006,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8028,8 +8028,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8468,8 +8468,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8498,8 +8498,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8941,8 +8941,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8969,8 +8969,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9408,8 +9408,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9434,8 +9434,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9875,8 +9875,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9905,8 +9905,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10351,8 +10351,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10381,8 +10381,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10829,8 +10829,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10855,8 +10855,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11297,8 +11297,8 @@ DROP TABLE t1; # 1.4 ALTER ... ANALYZE PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11319,8 +11319,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11756,8 +11756,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11778,8 +11778,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12218,8 +12218,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12248,8 +12248,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12691,8 +12691,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12719,8 +12719,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13158,8 +13158,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13184,8 +13184,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13625,8 +13625,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13655,8 +13655,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14101,8 +14101,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14131,8 +14131,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14579,8 +14579,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14605,8 +14605,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15047,8 +15047,8 @@ DROP TABLE t1; # 1.5 ALTER ... ANALYZE PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15069,8 +15069,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15506,8 +15506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15528,8 +15528,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15968,8 +15968,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15998,8 +15998,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16441,8 +16441,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16469,8 +16469,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16908,8 +16908,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16934,8 +16934,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17375,8 +17375,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17405,8 +17405,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17851,8 +17851,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17881,8 +17881,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18329,8 +18329,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18355,8 +18355,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18800,8 +18800,8 @@ DROP TABLE t1; # 2.1 ALTER ... CHECK PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18822,8 +18822,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19259,8 +19259,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19281,8 +19281,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19721,8 +19721,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19751,8 +19751,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20194,8 +20194,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20222,8 +20222,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20661,8 +20661,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20687,8 +20687,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21128,8 +21128,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21158,8 +21158,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21604,8 +21604,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21634,8 +21634,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22082,8 +22082,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22108,8 +22108,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22550,8 +22550,8 @@ DROP TABLE t1; # 2.2 ALTER ... CHECK PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22572,8 +22572,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23009,8 +23009,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23031,8 +23031,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23471,8 +23471,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23501,8 +23501,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23944,8 +23944,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23972,8 +23972,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24411,8 +24411,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24437,8 +24437,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24878,8 +24878,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24908,8 +24908,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25354,8 +25354,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25384,8 +25384,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25832,8 +25832,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25858,8 +25858,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26300,8 +26300,8 @@ DROP TABLE t1; # 2.3 ALTER ... CHECK PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26322,8 +26322,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26759,8 +26759,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26781,8 +26781,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27221,8 +27221,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27251,8 +27251,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27694,8 +27694,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27722,8 +27722,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28161,8 +28161,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28187,8 +28187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28628,8 +28628,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28658,8 +28658,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29104,8 +29104,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29134,8 +29134,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29582,8 +29582,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29608,8 +29608,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30050,8 +30050,8 @@ DROP TABLE t1; # 2.4 ALTER ... CHECK PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30072,8 +30072,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30509,8 +30509,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30531,8 +30531,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30971,8 +30971,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31001,8 +31001,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31444,8 +31444,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31472,8 +31472,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31911,8 +31911,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31937,8 +31937,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32378,8 +32378,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32408,8 +32408,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32854,8 +32854,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32884,8 +32884,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33332,8 +33332,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33358,8 +33358,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33800,8 +33800,8 @@ DROP TABLE t1; # 2.5 ALTER ... CHECK PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33822,8 +33822,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34259,8 +34259,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34281,8 +34281,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34721,8 +34721,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34751,8 +34751,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35194,8 +35194,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35222,8 +35222,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35661,8 +35661,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35687,8 +35687,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36128,8 +36128,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36158,8 +36158,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36604,8 +36604,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36634,8 +36634,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37082,8 +37082,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -37108,8 +37108,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37553,8 +37553,8 @@ DROP TABLE t1; # 3.1 ALTER ... OPTIMIZE PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -37576,8 +37576,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38013,8 +38013,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -38036,8 +38036,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38476,8 +38476,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -38507,8 +38507,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38950,8 +38950,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -38979,8 +38979,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39418,8 +39418,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39445,8 +39445,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39886,8 +39886,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39917,8 +39917,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40363,8 +40363,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -40394,8 +40394,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40842,8 +40842,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -40869,8 +40869,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41311,8 +41311,8 @@ DROP TABLE t1; # 3.2 ALTER ... OPTIMIZE PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -41334,8 +41334,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41771,8 +41771,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -41794,8 +41794,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42234,8 +42234,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -42265,8 +42265,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42708,8 +42708,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -42737,8 +42737,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43176,8 +43176,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -43203,8 +43203,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43644,8 +43644,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -43675,8 +43675,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44121,8 +44121,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -44152,8 +44152,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44600,8 +44600,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -44627,8 +44627,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45069,8 +45069,8 @@ DROP TABLE t1; # 3.3 ALTER ... OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -45091,8 +45091,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45528,8 +45528,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -45550,8 +45550,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45990,8 +45990,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46020,8 +46020,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46463,8 +46463,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46491,8 +46491,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46930,8 +46930,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46956,8 +46956,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47397,8 +47397,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -47427,8 +47427,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47873,8 +47873,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -47903,8 +47903,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48351,8 +48351,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -48377,8 +48377,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48819,8 +48819,8 @@ DROP TABLE t1; # 3.4 ALTER ... OPTIMIZE PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -48841,8 +48841,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49278,8 +49278,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -49300,8 +49300,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49740,8 +49740,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -49770,8 +49770,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50213,8 +50213,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -50241,8 +50241,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50680,8 +50680,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -50706,8 +50706,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51147,8 +51147,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -51177,8 +51177,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51623,8 +51623,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -51653,8 +51653,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52101,8 +52101,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -52127,8 +52127,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52569,8 +52569,8 @@ DROP TABLE t1; # 3.5 ALTER ... OPTIMIZE PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -52592,8 +52592,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53029,8 +53029,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -53052,8 +53052,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53492,8 +53492,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -53523,8 +53523,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53966,8 +53966,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -53995,8 +53995,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54434,8 +54434,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54461,8 +54461,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54902,8 +54902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54933,8 +54933,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55379,8 +55379,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -55410,8 +55410,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55858,8 +55858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -55885,8 +55885,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56330,8 +56330,8 @@ DROP TABLE t1; # 4.1 ALTER ... REBUILD PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -56350,8 +56350,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56787,8 +56787,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -56807,8 +56807,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57247,8 +57247,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -57275,8 +57275,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57718,8 +57718,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -57744,8 +57744,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58183,8 +58183,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -58207,8 +58207,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58648,8 +58648,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -58676,8 +58676,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59122,8 +59122,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -59150,8 +59150,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59598,8 +59598,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -59622,8 +59622,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60064,8 +60064,8 @@ DROP TABLE t1; # 4.2 ALTER ... REBUILD PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -60084,8 +60084,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60521,8 +60521,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -60541,8 +60541,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60981,8 +60981,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61009,8 +61009,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61452,8 +61452,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61478,8 +61478,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61917,8 +61917,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61941,8 +61941,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62382,8 +62382,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62410,8 +62410,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62856,8 +62856,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62884,8 +62884,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -63332,8 +63332,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63356,8 +63356,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -63798,8 +63798,8 @@ DROP TABLE t1; # 4.3 ALTER ... REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63813,8 +63813,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63828,8 +63828,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63851,8 +63851,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63872,8 +63872,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63891,8 +63891,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63914,8 +63914,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63937,8 +63937,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63958,8 +63958,8 @@ DROP TABLE t1; # 4.4 ALTER ... REBUILD PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63973,8 +63973,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63988,8 +63988,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64011,8 +64011,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64032,8 +64032,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64051,8 +64051,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64074,8 +64074,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64097,8 +64097,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64118,8 +64118,8 @@ DROP TABLE t1; # 4.5 ALTER ... REBUILD PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64138,8 +64138,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -64575,8 +64575,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64595,8 +64595,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65035,8 +65035,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -65063,8 +65063,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65506,8 +65506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -65532,8 +65532,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65971,8 +65971,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -65995,8 +65995,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66436,8 +66436,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66464,8 +66464,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66910,8 +66910,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66938,8 +66938,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67386,8 +67386,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67410,8 +67410,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67855,8 +67855,8 @@ DROP TABLE t1; # 5.1 ALTER ... REPAIR PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67877,8 +67877,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68314,8 +68314,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68336,8 +68336,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68776,8 +68776,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68806,8 +68806,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69249,8 +69249,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69277,8 +69277,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69716,8 +69716,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69742,8 +69742,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70183,8 +70183,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -70213,8 +70213,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70659,8 +70659,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -70689,8 +70689,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71137,8 +71137,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -71163,8 +71163,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71605,8 +71605,8 @@ DROP TABLE t1; # 5.2 ALTER ... REPAIR PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -71627,8 +71627,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72064,8 +72064,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -72086,8 +72086,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72526,8 +72526,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -72556,8 +72556,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72999,8 +72999,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -73027,8 +73027,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73466,8 +73466,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -73492,8 +73492,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73933,8 +73933,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -73963,8 +73963,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74409,8 +74409,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74439,8 +74439,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74887,8 +74887,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74913,8 +74913,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75355,8 +75355,8 @@ DROP TABLE t1; # 5.3 ALTER ... REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -75377,8 +75377,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75814,8 +75814,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -75836,8 +75836,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76276,8 +76276,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -76306,8 +76306,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76749,8 +76749,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -76777,8 +76777,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77216,8 +77216,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77242,8 +77242,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77683,8 +77683,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77713,8 +77713,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78159,8 +78159,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -78189,8 +78189,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78637,8 +78637,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -78663,8 +78663,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79105,8 +79105,8 @@ DROP TABLE t1; # 5.4 ALTER ... REPAIR PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -79127,8 +79127,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79564,8 +79564,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -79586,8 +79586,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80026,8 +80026,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -80056,8 +80056,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80499,8 +80499,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -80527,8 +80527,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80966,8 +80966,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -80992,8 +80992,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81433,8 +81433,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -81463,8 +81463,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81909,8 +81909,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -81939,8 +81939,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82387,8 +82387,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -82413,8 +82413,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82855,8 +82855,8 @@ DROP TABLE t1; # 5.5 ALTER ... REPAIR PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -82877,8 +82877,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83314,8 +83314,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -83336,8 +83336,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83776,8 +83776,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -83806,8 +83806,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84249,8 +84249,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -84277,8 +84277,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84716,8 +84716,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -84742,8 +84742,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85183,8 +85183,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -85213,8 +85213,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85659,8 +85659,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -85689,8 +85689,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86137,8 +86137,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -86163,8 +86163,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86608,8 +86608,8 @@ DROP TABLE t1; # 6.1 ALTER ... REMOVE PARTITIONING; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -86628,8 +86628,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87062,8 +87062,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -87082,8 +87082,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87516,8 +87516,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -87544,8 +87544,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87978,8 +87978,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88004,8 +88004,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88438,8 +88438,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88462,8 +88462,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88896,8 +88896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88924,8 +88924,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89358,8 +89358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -89386,8 +89386,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89820,8 +89820,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -89844,8 +89844,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result index a13a53bd5f9..8182dce5625 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -52,8 +52,8 @@ SET @@session.sql_mode= ''; # 1.1 The partitioning function contains one column. DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67,8 +67,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -507,8 +507,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -522,8 +522,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -962,8 +962,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -985,8 +985,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1432,8 +1432,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1453,8 +1453,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1896,8 +1896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1915,8 +1915,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2360,8 +2360,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2383,8 +2383,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2833,8 +2833,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2860,8 +2860,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3312,8 +3312,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3331,8 +3331,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3777,8 +3777,8 @@ DROP TABLE t1; # 1.2 The partitioning function contains two columns. DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3792,8 +3792,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4232,8 +4232,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4247,8 +4247,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4687,8 +4687,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4710,8 +4710,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5157,8 +5157,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5178,8 +5178,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5621,8 +5621,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5640,8 +5640,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6083,8 +6083,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6106,8 +6106,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6556,8 +6556,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6579,8 +6579,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7031,8 +7031,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7050,8 +7050,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7500,8 +7500,8 @@ DROP TABLE t1; # 2.5 PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7992,8 +7992,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8484,8 +8484,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8991,8 +8991,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9492,8 +9492,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9993,8 +9993,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10503,8 +10503,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11019,8 +11019,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11520,8 +11520,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12012,8 +12012,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12504,8 +12504,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13011,8 +13011,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13512,8 +13512,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14013,8 +14013,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14523,8 +14523,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15039,8 +15039,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15540,8 +15540,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15555,8 +15555,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16048,8 +16048,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16063,8 +16063,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16556,8 +16556,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16579,8 +16579,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17079,8 +17079,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17100,8 +17100,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17596,8 +17596,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17615,8 +17615,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18113,8 +18113,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18136,8 +18136,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18639,8 +18639,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18666,8 +18666,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19171,8 +19171,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19190,8 +19190,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19693,8 +19693,8 @@ DROP TABLE t1; # 3.3 PRIMARY KEY and UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20185,8 +20185,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20677,8 +20677,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21184,8 +21184,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21685,8 +21685,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22184,8 +22184,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22694,8 +22694,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23206,8 +23206,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23707,8 +23707,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24199,8 +24199,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24691,8 +24691,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25198,8 +25198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25699,8 +25699,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26198,8 +26198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26708,8 +26708,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27220,8 +27220,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27721,8 +27721,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27736,8 +27736,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -28229,8 +28229,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28244,8 +28244,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -28737,8 +28737,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28760,8 +28760,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -29260,8 +29260,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29281,8 +29281,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -29777,8 +29777,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29796,8 +29796,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -30292,8 +30292,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30315,8 +30315,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -30818,8 +30818,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30841,8 +30841,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -31346,8 +31346,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31365,8 +31365,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result index 1a89df3eb1e..9155661d6d9 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result @@ -10,8 +10,9 @@ call mtr.add_suppression("TokuDB: Warning: MySQL is trying to drop table "); # after timed out COALESCE PARTITION # Extended crash recovery testing of fast_alter_partition_table. call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); +flush tables; # Crash testing ADD PARTITION -SET SESSION debug="+d,crash_add_partition_1"; +SET SESSION debug_dbug="+d,crash_add_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -69,8 +70,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_1"; -SET SESSION debug="+d,crash_add_partition_2"; +SET SESSION debug_dbug="-d,crash_add_partition_1"; +SET SESSION debug_dbug="+d,crash_add_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -130,8 +131,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_2"; -SET SESSION debug="+d,crash_add_partition_3"; +SET SESSION debug_dbug="-d,crash_add_partition_2"; +SET SESSION debug_dbug="+d,crash_add_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -191,8 +192,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_3"; -SET SESSION debug="+d,crash_add_partition_4"; +SET SESSION debug_dbug="-d,crash_add_partition_3"; +SET SESSION debug_dbug="+d,crash_add_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -252,8 +253,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_4"; -SET SESSION debug="+d,crash_add_partition_5"; +SET SESSION debug_dbug="-d,crash_add_partition_4"; +SET SESSION debug_dbug="+d,crash_add_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -313,8 +314,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_5"; -SET SESSION debug="+d,crash_add_partition_6"; +SET SESSION debug_dbug="-d,crash_add_partition_5"; +SET SESSION debug_dbug="+d,crash_add_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -374,8 +375,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_6"; -SET SESSION debug="+d,crash_add_partition_7"; +SET SESSION debug_dbug="-d,crash_add_partition_6"; +SET SESSION debug_dbug="+d,crash_add_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -435,8 +436,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_7"; -SET SESSION debug="+d,crash_add_partition_8"; +SET SESSION debug_dbug="-d,crash_add_partition_7"; +SET SESSION debug_dbug="+d,crash_add_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -497,8 +498,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_8"; -SET SESSION debug="+d,crash_add_partition_9"; +SET SESSION debug_dbug="-d,crash_add_partition_8"; +SET SESSION debug_dbug="+d,crash_add_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -557,8 +558,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_9"; -SET SESSION debug="+d,crash_add_partition_10"; +SET SESSION debug_dbug="-d,crash_add_partition_9"; +SET SESSION debug_dbug="+d,crash_add_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -617,9 +618,9 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_10"; +SET SESSION debug_dbug="-d,crash_add_partition_10"; # Error recovery testing ADD PARTITION -SET SESSION debug="+d,fail_add_partition_1"; +SET SESSION debug_dbug="+d,fail_add_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -731,8 +732,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_1"; -SET SESSION debug="+d,fail_add_partition_2"; +SET SESSION debug_dbug="-d,fail_add_partition_1"; +SET SESSION debug_dbug="+d,fail_add_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -844,8 +845,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_2"; -SET SESSION debug="+d,fail_add_partition_3"; +SET SESSION debug_dbug="-d,fail_add_partition_2"; +SET SESSION debug_dbug="+d,fail_add_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -957,8 +958,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_3"; -SET SESSION debug="+d,fail_add_partition_4"; +SET SESSION debug_dbug="-d,fail_add_partition_3"; +SET SESSION debug_dbug="+d,fail_add_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1070,8 +1071,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_4"; -SET SESSION debug="+d,fail_add_partition_5"; +SET SESSION debug_dbug="-d,fail_add_partition_4"; +SET SESSION debug_dbug="+d,fail_add_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1183,8 +1184,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_5"; -SET SESSION debug="+d,fail_add_partition_6"; +SET SESSION debug_dbug="-d,fail_add_partition_5"; +SET SESSION debug_dbug="+d,fail_add_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1296,8 +1297,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_6"; -SET SESSION debug="+d,fail_add_partition_7"; +SET SESSION debug_dbug="-d,fail_add_partition_6"; +SET SESSION debug_dbug="+d,fail_add_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1409,8 +1410,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_7"; -SET SESSION debug="+d,fail_add_partition_8"; +SET SESSION debug_dbug="-d,fail_add_partition_7"; +SET SESSION debug_dbug="+d,fail_add_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1524,8 +1525,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_8"; -SET SESSION debug="+d,fail_add_partition_9"; +SET SESSION debug_dbug="-d,fail_add_partition_8"; +SET SESSION debug_dbug="+d,fail_add_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1639,8 +1640,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_9"; -SET SESSION debug="+d,fail_add_partition_10"; +SET SESSION debug_dbug="-d,fail_add_partition_9"; +SET SESSION debug_dbug="+d,fail_add_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1754,9 +1755,9 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_10"; +SET SESSION debug_dbug="-d,fail_add_partition_10"; # Test DROP PARTITION -SET SESSION debug="+d,crash_drop_partition_1"; +SET SESSION debug_dbug="+d,crash_drop_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1813,8 +1814,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_1"; -SET SESSION debug="+d,crash_drop_partition_2"; +SET SESSION debug_dbug="-d,crash_drop_partition_1"; +SET SESSION debug_dbug="+d,crash_drop_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1873,8 +1874,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_2"; -SET SESSION debug="+d,crash_drop_partition_3"; +SET SESSION debug_dbug="-d,crash_drop_partition_2"; +SET SESSION debug_dbug="+d,crash_drop_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1933,8 +1934,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_3"; -SET SESSION debug="+d,crash_drop_partition_4"; +SET SESSION debug_dbug="-d,crash_drop_partition_3"; +SET SESSION debug_dbug="+d,crash_drop_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1988,8 +1989,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_4"; -SET SESSION debug="+d,crash_drop_partition_5"; +SET SESSION debug_dbug="-d,crash_drop_partition_4"; +SET SESSION debug_dbug="+d,crash_drop_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2043,8 +2044,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_5"; -SET SESSION debug="+d,crash_drop_partition_6"; +SET SESSION debug_dbug="-d,crash_drop_partition_5"; +SET SESSION debug_dbug="+d,crash_drop_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2098,8 +2099,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_6"; -SET SESSION debug="+d,crash_drop_partition_7"; +SET SESSION debug_dbug="-d,crash_drop_partition_6"; +SET SESSION debug_dbug="+d,crash_drop_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2151,8 +2152,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_7"; -SET SESSION debug="+d,crash_drop_partition_8"; +SET SESSION debug_dbug="-d,crash_drop_partition_7"; +SET SESSION debug_dbug="+d,crash_drop_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2204,8 +2205,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_8"; -SET SESSION debug="+d,crash_drop_partition_9"; +SET SESSION debug_dbug="-d,crash_drop_partition_8"; +SET SESSION debug_dbug="+d,crash_drop_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2257,9 +2258,9 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_9"; +SET SESSION debug_dbug="-d,crash_drop_partition_9"; # Error recovery DROP PARTITION -SET SESSION debug="+d,fail_drop_partition_1"; +SET SESSION debug_dbug="+d,fail_drop_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2369,8 +2370,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_1"; -SET SESSION debug="+d,fail_drop_partition_2"; +SET SESSION debug_dbug="-d,fail_drop_partition_1"; +SET SESSION debug_dbug="+d,fail_drop_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2480,8 +2481,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_2"; -SET SESSION debug="+d,fail_drop_partition_3"; +SET SESSION debug_dbug="-d,fail_drop_partition_2"; +SET SESSION debug_dbug="+d,fail_drop_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2591,8 +2592,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_3"; -SET SESSION debug="+d,fail_drop_partition_4"; +SET SESSION debug_dbug="-d,fail_drop_partition_3"; +SET SESSION debug_dbug="+d,fail_drop_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2692,8 +2693,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_4"; -SET SESSION debug="+d,fail_drop_partition_5"; +SET SESSION debug_dbug="-d,fail_drop_partition_4"; +SET SESSION debug_dbug="+d,fail_drop_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2793,8 +2794,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_5"; -SET SESSION debug="+d,fail_drop_partition_6"; +SET SESSION debug_dbug="-d,fail_drop_partition_5"; +SET SESSION debug_dbug="+d,fail_drop_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2894,8 +2895,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_6"; -SET SESSION debug="+d,fail_drop_partition_7"; +SET SESSION debug_dbug="-d,fail_drop_partition_6"; +SET SESSION debug_dbug="+d,fail_drop_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2995,8 +2996,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_7"; -SET SESSION debug="+d,fail_drop_partition_8"; +SET SESSION debug_dbug="-d,fail_drop_partition_7"; +SET SESSION debug_dbug="+d,fail_drop_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3096,8 +3097,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_8"; -SET SESSION debug="+d,fail_drop_partition_9"; +SET SESSION debug_dbug="-d,fail_drop_partition_8"; +SET SESSION debug_dbug="+d,fail_drop_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3197,10 +3198,10 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_9"; +SET SESSION debug_dbug="-d,fail_drop_partition_9"; # Test change partition (REORGANIZE/REBUILD/COALESCE # or ADD HASH PARTITION). -SET SESSION debug="+d,crash_change_partition_1"; +SET SESSION debug_dbug="+d,crash_change_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3259,8 +3260,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_1"; -SET SESSION debug="+d,crash_change_partition_2"; +SET SESSION debug_dbug="-d,crash_change_partition_1"; +SET SESSION debug_dbug="+d,crash_change_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3321,8 +3322,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_2"; -SET SESSION debug="+d,crash_change_partition_3"; +SET SESSION debug_dbug="-d,crash_change_partition_2"; +SET SESSION debug_dbug="+d,crash_change_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3383,8 +3384,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_3"; -SET SESSION debug="+d,crash_change_partition_4"; +SET SESSION debug_dbug="-d,crash_change_partition_3"; +SET SESSION debug_dbug="+d,crash_change_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3445,8 +3446,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_4"; -SET SESSION debug="+d,crash_change_partition_5"; +SET SESSION debug_dbug="-d,crash_change_partition_4"; +SET SESSION debug_dbug="+d,crash_change_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3507,8 +3508,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_5"; -SET SESSION debug="+d,crash_change_partition_6"; +SET SESSION debug_dbug="-d,crash_change_partition_5"; +SET SESSION debug_dbug="+d,crash_change_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3569,8 +3570,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_6"; -SET SESSION debug="+d,crash_change_partition_7"; +SET SESSION debug_dbug="-d,crash_change_partition_6"; +SET SESSION debug_dbug="+d,crash_change_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3632,8 +3633,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_7"; -SET SESSION debug="+d,crash_change_partition_8"; +SET SESSION debug_dbug="-d,crash_change_partition_7"; +SET SESSION debug_dbug="+d,crash_change_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3695,8 +3696,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_8"; -SET SESSION debug="+d,crash_change_partition_9"; +SET SESSION debug_dbug="-d,crash_change_partition_8"; +SET SESSION debug_dbug="+d,crash_change_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3756,8 +3757,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_9"; -SET SESSION debug="+d,crash_change_partition_10"; +SET SESSION debug_dbug="-d,crash_change_partition_9"; +SET SESSION debug_dbug="+d,crash_change_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3817,8 +3818,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_10"; -SET SESSION debug="+d,crash_change_partition_11"; +SET SESSION debug_dbug="-d,crash_change_partition_10"; +SET SESSION debug_dbug="+d,crash_change_partition_11"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3878,8 +3879,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_11"; -SET SESSION debug="+d,crash_change_partition_12"; +SET SESSION debug_dbug="-d,crash_change_partition_11"; +SET SESSION debug_dbug="+d,crash_change_partition_12"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3939,10 +3940,10 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_12"; +SET SESSION debug_dbug="-d,crash_change_partition_12"; # Error recovery change partition (REORGANIZE/REBUILD/COALESCE # or ADD HASH PARTITION). -SET SESSION debug="+d,fail_change_partition_1"; +SET SESSION debug_dbug="+d,fail_change_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4056,8 +4057,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_1"; -SET SESSION debug="+d,fail_change_partition_2"; +SET SESSION debug_dbug="-d,fail_change_partition_1"; +SET SESSION debug_dbug="+d,fail_change_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4171,8 +4172,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_2"; -SET SESSION debug="+d,fail_change_partition_3"; +SET SESSION debug_dbug="-d,fail_change_partition_2"; +SET SESSION debug_dbug="+d,fail_change_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4286,8 +4287,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_3"; -SET SESSION debug="+d,fail_change_partition_4"; +SET SESSION debug_dbug="-d,fail_change_partition_3"; +SET SESSION debug_dbug="+d,fail_change_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4401,8 +4402,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_4"; -SET SESSION debug="+d,fail_change_partition_5"; +SET SESSION debug_dbug="-d,fail_change_partition_4"; +SET SESSION debug_dbug="+d,fail_change_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4516,8 +4517,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_5"; -SET SESSION debug="+d,fail_change_partition_6"; +SET SESSION debug_dbug="-d,fail_change_partition_5"; +SET SESSION debug_dbug="+d,fail_change_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4631,8 +4632,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_6"; -SET SESSION debug="+d,fail_change_partition_7"; +SET SESSION debug_dbug="-d,fail_change_partition_6"; +SET SESSION debug_dbug="+d,fail_change_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4748,8 +4749,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_7"; -SET SESSION debug="+d,fail_change_partition_8"; +SET SESSION debug_dbug="-d,fail_change_partition_7"; +SET SESSION debug_dbug="+d,fail_change_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4865,8 +4866,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_8"; -SET SESSION debug="+d,fail_change_partition_9"; +SET SESSION debug_dbug="-d,fail_change_partition_8"; +SET SESSION debug_dbug="+d,fail_change_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4982,8 +4983,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_9"; -SET SESSION debug="+d,fail_change_partition_10"; +SET SESSION debug_dbug="-d,fail_change_partition_9"; +SET SESSION debug_dbug="+d,fail_change_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -5099,8 +5100,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_10"; -SET SESSION debug="+d,fail_change_partition_11"; +SET SESSION debug_dbug="-d,fail_change_partition_10"; +SET SESSION debug_dbug="+d,fail_change_partition_11"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -5216,8 +5217,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_11"; -SET SESSION debug="+d,fail_change_partition_12"; +SET SESSION debug_dbug="-d,fail_change_partition_11"; +SET SESSION debug_dbug="+d,fail_change_partition_12"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -5333,14 +5334,14 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_12"; +SET SESSION debug_dbug="-d,fail_change_partition_12"; # # WL#4445: EXCHANGE PARTITION WITH TABLE # Verify ddl_log and TokuDB in case of crashing. call mtr.add_suppression("TokuDB: Warning: allocated tablespace .*, old maximum was "); call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); call mtr.add_suppression("table .* does not exist in the TokuDB internal"); -SET SESSION debug="+d,exchange_partition_abort_1"; +SET SESSION debug_dbug="+d,exchange_partition_abort_1"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5435,8 +5436,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_1"; -SET SESSION debug="+d,exchange_partition_abort_2"; +SET SESSION debug_dbug="-d,exchange_partition_abort_1"; +SET SESSION debug_dbug="+d,exchange_partition_abort_2"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5531,8 +5532,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_2"; -SET SESSION debug="+d,exchange_partition_abort_3"; +SET SESSION debug_dbug="-d,exchange_partition_abort_2"; +SET SESSION debug_dbug="+d,exchange_partition_abort_3"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5627,8 +5628,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_3"; -SET SESSION debug="+d,exchange_partition_abort_4"; +SET SESSION debug_dbug="-d,exchange_partition_abort_3"; +SET SESSION debug_dbug="+d,exchange_partition_abort_4"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5723,8 +5724,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_4"; -SET SESSION debug="+d,exchange_partition_abort_5"; +SET SESSION debug_dbug="-d,exchange_partition_abort_4"; +SET SESSION debug_dbug="+d,exchange_partition_abort_5"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5819,8 +5820,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_5"; -SET SESSION debug="+d,exchange_partition_abort_6"; +SET SESSION debug_dbug="-d,exchange_partition_abort_5"; +SET SESSION debug_dbug="+d,exchange_partition_abort_6"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5915,8 +5916,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_6"; -SET SESSION debug="+d,exchange_partition_abort_7"; +SET SESSION debug_dbug="-d,exchange_partition_abort_6"; +SET SESSION debug_dbug="+d,exchange_partition_abort_7"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6011,8 +6012,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_7"; -SET SESSION debug="+d,exchange_partition_abort_8"; +SET SESSION debug_dbug="-d,exchange_partition_abort_7"; +SET SESSION debug_dbug="+d,exchange_partition_abort_8"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6107,8 +6108,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_8"; -SET SESSION debug="+d,exchange_partition_abort_9"; +SET SESSION debug_dbug="-d,exchange_partition_abort_8"; +SET SESSION debug_dbug="+d,exchange_partition_abort_9"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6203,8 +6204,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_9"; -SET SESSION debug="+d,exchange_partition_fail_1"; +SET SESSION debug_dbug="-d,exchange_partition_abort_9"; +SET SESSION debug_dbug="+d,exchange_partition_fail_1"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6295,8 +6296,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_1"; -SET SESSION debug="+d,exchange_partition_fail_2"; +SET SESSION debug_dbug="-d,exchange_partition_fail_1"; +SET SESSION debug_dbug="+d,exchange_partition_fail_2"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6387,8 +6388,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_2"; -SET SESSION debug="+d,exchange_partition_fail_3"; +SET SESSION debug_dbug="-d,exchange_partition_fail_2"; +SET SESSION debug_dbug="+d,exchange_partition_fail_3"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6437,7 +6438,7 @@ a b 3 Original from partition p0 4 Original from partition p0 ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; -ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 - n/a) +ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 "Internal error/check (Not system error)") # State after failure t1.frm t1.par @@ -6479,8 +6480,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_3"; -SET SESSION debug="+d,exchange_partition_fail_4"; +SET SESSION debug_dbug="-d,exchange_partition_fail_3"; +SET SESSION debug_dbug="+d,exchange_partition_fail_4"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6571,8 +6572,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_4"; -SET SESSION debug="+d,exchange_partition_fail_5"; +SET SESSION debug_dbug="-d,exchange_partition_fail_4"; +SET SESSION debug_dbug="+d,exchange_partition_fail_5"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6621,7 +6622,7 @@ a b 3 Original from partition p0 4 Original from partition p0 ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; -ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 - n/a) +ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 "Internal error/check (Not system error)") # State after failure t1.frm t1.par @@ -6663,8 +6664,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_5"; -SET SESSION debug="+d,exchange_partition_fail_6"; +SET SESSION debug_dbug="-d,exchange_partition_fail_5"; +SET SESSION debug_dbug="+d,exchange_partition_fail_6"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6755,8 +6756,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_6"; -SET SESSION debug="+d,exchange_partition_fail_7"; +SET SESSION debug_dbug="-d,exchange_partition_fail_6"; +SET SESSION debug_dbug="+d,exchange_partition_fail_7"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6805,7 +6806,7 @@ a b 3 Original from partition p0 4 Original from partition p0 ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; -ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 - n/a) +ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 "Internal error/check (Not system error)") # State after failure t1.frm t1.par @@ -6847,8 +6848,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_7"; -SET SESSION debug="+d,exchange_partition_fail_8"; +SET SESSION debug_dbug="-d,exchange_partition_fail_7"; +SET SESSION debug_dbug="+d,exchange_partition_fail_8"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6939,8 +6940,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_8"; -SET SESSION debug="+d,exchange_partition_fail_9"; +SET SESSION debug_dbug="-d,exchange_partition_fail_8"; +SET SESSION debug_dbug="+d,exchange_partition_fail_9"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -7031,4 +7032,4 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_9"; +SET SESSION debug_dbug="-d,exchange_partition_fail_9"; diff --git a/storage/xtradb/dict/dict0boot.cc b/storage/xtradb/dict/dict0boot.cc index 0a21264e23d..138d3131e09 100644 --- a/storage/xtradb/dict/dict0boot.cc +++ b/storage/xtradb/dict/dict0boot.cc @@ -464,12 +464,22 @@ dict_boot(void) if (err == DB_SUCCESS) { if (srv_read_only_mode && !ibuf_is_empty()) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Change buffer must be empty when --innodb-read-only " - "is set!"); + if (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Change buffer must be empty when --innodb-read-only " + "is set!" + "You can try to recover the database with innodb_force_recovery=5"); + + err = DB_ERROR; + } else { + ib_logf(IB_LOG_LEVEL_WARN, + "Change buffer not empty when --innodb-read-only " + "is set! but srv_force_recovery = %lu, ignoring.", + srv_force_recovery); + } + } - err = DB_ERROR; - } else { + if (err == DB_SUCCESS) { /* Load definitions of other indexes on system tables */ dict_load_sys_table(dict_sys->sys_tables); diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index b60a0e9ddaf..2da234ad094 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -739,11 +739,9 @@ fil_node_open_file( } } - if (size_bytes >= FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) { + if (size_bytes >= (1024*1024)) { /* Truncate the size to whole extent size. */ - size_bytes = ut_2pow_round(size_bytes, - FSP_EXTENT_SIZE * - UNIV_PAGE_SIZE); + size_bytes = ut_2pow_round(size_bytes, (1024*1024)); } if (!fsp_flags_is_compressed(flags)) { @@ -5683,7 +5681,7 @@ fil_space_get_node( /* Found! */ break; } else { - *block_offset -= node->size; + (*block_offset) -= node->size; node = UT_LIST_GET_NEXT(chain, node); } } diff --git a/storage/xtradb/include/log0crypt.h b/storage/xtradb/include/log0crypt.h index 7e737853465..6b164e90d6e 100644 --- a/storage/xtradb/include/log0crypt.h +++ b/storage/xtradb/include/log0crypt.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (C) 2013, 2015, Google Inc. All Rights Reserved. -Copyright (C) 2014, 2015, MariaDB Corporation. All Rights Reserved. +Copyright (C) 2014, 2016, MariaDB Corporation. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -117,4 +117,12 @@ log_crypt_print_error( /*==================*/ log_crypt_err_t err_info); /*!< out: error info */ +/*********************************************************************//** +Print checkpoint no from log block and all encryption keys from +checkpoints if they are present. Used for problem analysis. */ +void +log_crypt_print_checkpoint_keys( +/*============================*/ + const byte* log_block); + #endif // log0crypt.h diff --git a/storage/xtradb/include/ut0ut.h b/storage/xtradb/include/ut0ut.h index 9228c25d8be..980ac337002 100644 --- a/storage/xtradb/include/ut0ut.h +++ b/storage/xtradb/include/ut0ut.h @@ -80,20 +80,37 @@ private: # elif defined(HAVE_FAKE_PAUSE_INSTRUCTION) # define UT_RELAX_CPU() __asm__ __volatile__ ("rep; nop") -# elif defined(HAVE_ATOMIC_BUILTINS) -# define UT_RELAX_CPU() do { \ - volatile lint volatile_var; \ - os_compare_and_swap_lint(&volatile_var, 0, 1); \ - } while (0) # elif defined(HAVE_WINDOWS_ATOMICS) /* In the Win32 API, the x86 PAUSE instruction is executed by calling the YieldProcessor macro defined in WinNT.h. It is a CPU architecture- independent way by using YieldProcessor. */ # define UT_RELAX_CPU() YieldProcessor() +# elif defined(__powerpc__) +#include +# define UT_RELAX_CPU() do { \ + volatile lint volatile_var = __ppc_get_timebase(); \ + } while (0) # else # define UT_RELAX_CPU() ((void)0) /* avoid warning for an empty statement */ # endif +#if defined (__GNUC__) +# define UT_COMPILER_BARRIER() __asm__ __volatile__ ("":::"memory") +#elif defined (_MSC_VER) +# define UT_COMPILER_BARRIER() _ReadWriteBarrier() +#else +# define UT_COMPILER_BARRIER() +#endif + +# if defined(HAVE_HMT_PRIORITY_INSTRUCTION) +#include +# define UT_LOW_PRIORITY_CPU() __ppc_set_ppr_low() +# define UT_RESUME_PRIORITY_CPU() __ppc_set_ppr_med() +# else +# define UT_LOW_PRIORITY_CPU() ((void)0) +# define UT_RESUME_PRIORITY_CPU() ((void)0) +# endif + /*********************************************************************//** Delays execution for at most max_wait_us microseconds or returns earlier if cond becomes true. @@ -334,7 +351,7 @@ Runs an idle loop on CPU. The argument gives the desired delay in microseconds on 100 MHz Pentium + Visual C++. @return dummy value */ UNIV_INTERN -ulint +void ut_delay( /*=====*/ ulint delay); /*!< in: delay in microseconds on 100 MHz Pentium */ diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index c3044daafad..6d69f441019 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -6890,7 +6890,7 @@ lock_clust_rec_modify_check_and_lock( lock_rec_convert_impl_to_expl(block, rec, index, offsets); lock_mutex_enter(); - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); ut_ad(lock_table_has(trx, index->table, LOCK_IX)); @@ -6954,7 +6954,7 @@ lock_sec_rec_modify_check_and_lock( index record, and this would not have been possible if another active transaction had modified this secondary index record. */ - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); lock_mutex_enter(); ut_ad(lock_table_has(trx, index->table, LOCK_IX)); @@ -7063,7 +7063,7 @@ lock_sec_rec_read_check_and_lock( lock_rec_convert_impl_to_expl(block, rec, index, offsets); } - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); lock_mutex_enter(); ut_ad(mode != LOCK_X @@ -7146,7 +7146,7 @@ lock_clust_rec_read_check_and_lock( } lock_mutex_enter(); - trx_t* trx = thr_get_trx(thr); + trx_t* trx __attribute__((unused))= thr_get_trx(thr); ut_ad(mode != LOCK_X || lock_table_has(trx, index->table, LOCK_IX)); diff --git a/storage/xtradb/log/log0crypt.cc b/storage/xtradb/log/log0crypt.cc index 852148899e9..db2e84d7e45 100644 --- a/storage/xtradb/log/log0crypt.cc +++ b/storage/xtradb/log/log0crypt.cc @@ -127,11 +127,34 @@ static const crypt_info_t* get_crypt_info( /*===========*/ - const byte* log_block) { + const byte* log_block) +{ ib_uint64_t checkpoint_no = log_block_get_checkpoint_no(log_block); return get_crypt_info(checkpoint_no); } +/*********************************************************************//** +Print checkpoint no from log block and all encryption keys from +checkpoints if they are present. Used for problem analysis. */ +void +log_crypt_print_checkpoint_keys( +/*============================*/ + const byte* log_block) +{ + ib_uint64_t checkpoint_no = log_block_get_checkpoint_no(log_block); + + if (crypt_info.size()) { + fprintf(stderr, "InnoDB: redo log checkpoint: %lu [ chk key ]: ", checkpoint_no); + for (size_t i = 0; i < crypt_info.size(); i++) { + struct crypt_info_t* it = &crypt_info[i]; + fprintf(stderr, "[ %lu %u ] ", + it->checkpoint_no, + it->key_version); + } + fprintf(stderr, "\n"); + } +} + /*********************************************************************//** Call AES CTR to encrypt/decrypt log blocks. */ static @@ -278,12 +301,22 @@ Add crypt info to set if it is not already present @return true if successfull, false if not- */ static bool -add_crypt_info(crypt_info_t* info) +add_crypt_info( +/*===========*/ + crypt_info_t* info, /*!< in: crypt info */ + bool checkpoint_read)/*!< in: do we read checkpoint */ { + const crypt_info_t* found=NULL; /* so that no one is searching array while we modify it */ ut_ad(mutex_own(&(log_sys->mutex))); - if (get_crypt_info(info->checkpoint_no) != NULL) { + found = get_crypt_info(info->checkpoint_no); + + /* If one crypt info is found then we add a new one only if we + are reading checkpoint from the log. New checkpoints will always + use the first created crypt info. */ + if (found != NULL && + ( found->checkpoint_no == info->checkpoint_no || !checkpoint_read)) { // already present... return true; } @@ -356,7 +389,7 @@ log_crypt_set_ver_and_key( } - add_crypt_info(&info); + add_crypt_info(&info, false); } /******************************************************** @@ -514,7 +547,7 @@ log_crypt_read_checkpoint_buf( memcpy(info.crypt_msg, buf + 8, MY_AES_BLOCK_SIZE); memcpy(info.crypt_nonce, buf + 24, MY_AES_BLOCK_SIZE); - if (!add_crypt_info(&info)) { + if (!add_crypt_info(&info, true)) { return false; } buf += LOG_CRYPT_ENTRY_SIZE; diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index f98adbbca08..23ca8b1381f 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -2786,6 +2786,8 @@ recv_scan_log_recs( /* Garbage or an incompletely written log block */ + /* Print checkpoint encryption keys if present */ + log_crypt_print_checkpoint_keys(log_block); finished = TRUE; if (maybe_encrypted) { diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index b2b3e256211..2bb094e115d 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -1561,7 +1561,7 @@ os_file_set_nocache_if_needed(os_file_t file, const char* name, } if (srv_unix_file_flush_method == SRV_UNIX_ALL_O_DIRECT - || (type == OS_LOG_FILE + || (type == OS_DATA_FILE && (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT || (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC)))) { os_file_set_nocache(file, name, mode_str); diff --git a/storage/xtradb/ut/ut0ut.cc b/storage/xtradb/ut/ut0ut.cc index 4eade1fe26e..acedb56879a 100644 --- a/storage/xtradb/ut/ut0ut.cc +++ b/storage/xtradb/ut/ut0ut.cc @@ -46,9 +46,6 @@ Created 5/11/1994 Heikki Tuuri # include #endif /* UNIV_HOTBACKUP */ -/** A constant to prevent the compiler from optimizing ut_delay() away. */ -UNIV_INTERN ibool ut_always_false = FALSE; - #ifdef __WIN__ /*****************************************************************//** NOTE: The Windows epoch starts from 1601/01/01 whereas the Unix @@ -398,25 +395,21 @@ Runs an idle loop on CPU. The argument gives the desired delay in microseconds on 100 MHz Pentium + Visual C++. @return dummy value */ UNIV_INTERN -ulint +void ut_delay( /*=====*/ ulint delay) /*!< in: delay in microseconds on 100 MHz Pentium */ { - ulint i, j; + ulint i; - j = 0; + UT_LOW_PRIORITY_CPU(); for (i = 0; i < delay * 50; i++) { - j += i; UT_RELAX_CPU(); + UT_COMPILER_BARRIER(); } - if (ut_always_false) { - ut_always_false = (ibool) j; - } - - return(j); + UT_RESUME_PRIORITY_CPU(); } #endif /* !UNIV_HOTBACKUP */ diff --git a/strings/conf_to_src.c b/strings/conf_to_src.c index 28d2fd1515e..5b9793f388d 100644 --- a/strings/conf_to_src.c +++ b/strings/conf_to_src.c @@ -256,7 +256,7 @@ void dispcset(FILE *f,CHARSET_INFO *cs) fprintf(f," 255, /* max_sort_char */\n"); fprintf(f," ' ', /* pad_char */\n"); fprintf(f," 0, /* escape_with_backslash_is_dangerous */\n"); - + fprintf(f," 1, /* levels_for_order */\n"); fprintf(f," &my_charset_8bit_handler,\n"); if (cs->state & MY_CS_BINSORT) fprintf(f," &my_collation_8bit_bin_handler,\n"); @@ -270,9 +270,9 @@ static void fprint_copyright(FILE *file) { fprintf(file, -"/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems Inc.\n" +"/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.\n" " Copyright 2010-2011 Monty Program Ab\n" -" Copyright (c) 2003, 2011, Oracle and/or its affiliates\n" +" Copyright (c) 2000, 2011, Oracle and/or its affiliates.\n" "\n" " This program is free software; you can redistribute it and/or modify\n" " it under the terms of the GNU General Public License as published by\n" @@ -333,7 +333,7 @@ main(int argc, char **argv __attribute__((unused))) fprintf(f, " ./conf_to_src ../sql/share/charsets/ > FILE\n"); fprintf(f, "*/\n\n"); fprint_copyright(f); - fprintf(f,"#include \n"); + fprintf(f,"#include \"strings_def.h\"\n"); fprintf(f,"#include \n\n"); diff --git a/strings/ctype-eucjpms.c b/strings/ctype-eucjpms.c index 52494b7dfb3..469d3a5be6c 100644 --- a/strings/ctype-eucjpms.c +++ b/strings/ctype-eucjpms.c @@ -199,6 +199,7 @@ static const uchar sort_order_eucjpms[]= #define IS_MB2_KATA(x,y) (iseucjpms_ss2(x) && iskata(y)) #define IS_MB2_CHAR(x,y) (IS_MB2_KATA(x,y) || IS_MB2_JIS(x,y)) #define IS_MB3_CHAR(x,y,z) (iseucjpms_ss3(x) && IS_MB2_JIS(y,z)) +#define IS_MB_PREFIX2(x,y) (iseucjpms_ss3(x) && iseucjpms(y)) #define DEFINE_ASIAN_ROUTINES #include "ctype-mb.ic" diff --git a/strings/ctype-mb.ic b/strings/ctype-mb.ic index 6fc4d6e3db4..2df9c9d5e49 100644 --- a/strings/ctype-mb.ic +++ b/strings/ctype-mb.ic @@ -75,7 +75,13 @@ MY_FUNCTION_NAME(charlen)(CHARSET_INFO *cs __attribute__((unused)), #ifdef IS_MB3_CHAR if (b + 3 > e) + { +#ifdef IS_MB_PREFIX2 + if (!IS_MB_PREFIX2(b[0], b[1])) + return MY_CS_ILSEQ; +#endif return MY_CS_TOOSMALLN(3); + } if (IS_MB3_CHAR(b[0], b[1], b[2])) return 3; /* Three-byte character */ #endif diff --git a/strings/ctype-ujis.c b/strings/ctype-ujis.c index 67e68901573..b24fdb3075f 100644 --- a/strings/ctype-ujis.c +++ b/strings/ctype-ujis.c @@ -198,6 +198,7 @@ static const uchar sort_order_ujis[]= #define IS_MB2_KATA(x,y) (isujis_ss2(x) && iskata(y)) #define IS_MB2_CHAR(x, y) (IS_MB2_KATA(x,y) || IS_MB2_JIS(x,y)) #define IS_MB3_CHAR(x, y, z) (isujis_ss3(x) && IS_MB2_JIS(y,z)) +#define IS_MB_PREFIX2(x,y) (isujis_ss3(x) && isujis(y)) #define DEFINE_ASIAN_ROUTINES #include "ctype-mb.ic" diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 7cc59f6aca5..7af1249f596 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -16227,7 +16227,6 @@ static void test_change_user() const char *db= "mysqltest_user_test_database"; int rc; MYSQL* conn; - DBUG_ENTER("test_change_user"); myheader("test_change_user"); @@ -16240,6 +16239,9 @@ static void test_change_user() rc= mysql_query(mysql, buff); myquery(rc); + rc= mysql_query(mysql, "SET SQL_MODE=''"); + myquery(rc); + sprintf(buff, "grant select on %s.* to %s@'%%' identified by '%s'", db, @@ -17481,7 +17483,6 @@ static void test_wl4166_2() mysql_stmt_close(stmt); rc= mysql_query(mysql, "drop table t1"); myquery(rc); - } diff --git a/unittest/sql/explain_filename-t.cc b/unittest/sql/explain_filename-t.cc index 69ce51c0446..a737ebec608 100644 --- a/unittest/sql/explain_filename-t.cc +++ b/unittest/sql/explain_filename-t.cc @@ -26,7 +26,8 @@ char to[BUFLEN]; char from[BUFLEN]; -const char *error_messages[1000]; +static const char *error_messages_txt[1000]; +static const char **error_messages[1]= { error_messages_txt }; int setup() { @@ -34,12 +35,12 @@ int setup() my_default_lc_messages = &my_locale_en_US; /* Populate the necessary error messages */ - error_messages[ER_DATABASE_NAME - ER_ERROR_FIRST] = "Database"; - error_messages[ER_TABLE_NAME - ER_ERROR_FIRST] = "Table"; - error_messages[ER_PARTITION_NAME - ER_ERROR_FIRST] = "Partition"; - error_messages[ER_SUBPARTITION_NAME - ER_ERROR_FIRST] = "Subpartition"; - error_messages[ER_TEMPORARY_NAME - ER_ERROR_FIRST] = "Temporary"; - error_messages[ER_RENAMED_NAME - ER_ERROR_FIRST] = "Renamed"; + error_messages[0][ER_DATABASE_NAME - ER_ERROR_FIRST] = "Database"; + error_messages[0][ER_TABLE_NAME - ER_ERROR_FIRST] = "Table"; + error_messages[0][ER_PARTITION_NAME - ER_ERROR_FIRST] = "Partition"; + error_messages[0][ER_SUBPARTITION_NAME - ER_ERROR_FIRST] = "Subpartition"; + error_messages[0][ER_TEMPORARY_NAME - ER_ERROR_FIRST] = "Temporary"; + error_messages[0][ER_RENAMED_NAME - ER_ERROR_FIRST] = "Renamed"; my_default_lc_messages->errmsgs->errmsgs = error_messages; diff --git a/win/packaging/extra.wxs.in b/win/packaging/extra.wxs.in index 3425a76427b..978a5c9c90d 100644 --- a/win/packaging/extra.wxs.in +++ b/win/packaging/extra.wxs.in @@ -542,17 +542,6 @@ Value="utf8" /> - - - 600)]]> - - - - - - -- cgit v1.2.1 From f516b966e131177244941319c553e138fbf9f6ff Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Mon, 9 May 2016 23:39:10 +0300 Subject: Main patch for mdev-9864 --- sql/share/errmsg-utf8.txt | 4 + sql/sql_class.h | 21 ++++ sql/sql_cte.cc | 304 ++++++++++++++++++++++++++++++++++++++++++---- sql/sql_cte.h | 139 ++++++++++++++++++--- sql/sql_derived.cc | 79 +++++++++--- sql/sql_lex.cc | 1 + sql/sql_lex.h | 13 ++ sql/sql_parse.cc | 2 +- sql/sql_prepare.cc | 2 +- sql/sql_select.cc | 8 +- sql/sql_union.cc | 253 ++++++++++++++++++++++++++++++++++++-- sql/sql_yacc.yy | 17 ++- sql/table.cc | 79 +++++++++++- sql/table.h | 7 ++ 14 files changed, 856 insertions(+), 73 deletions(-) diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 376c1eb9d0d..f2a5666f1b1 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7154,6 +7154,10 @@ ER_WRONG_ORDER_IN_WITH_CLAUSE eng "The definition of the table '%s' refers to the table '%s' defined later in a non-recursive WITH clause" ER_RECURSIVE_QUERY_IN_WITH_CLAUSE eng "Recursive queries in WITH clause are not supported yet" +ER_RECURSIVE_WITHOUT_ANCHORS + eng "No anchors for recursive WITH element '%s'" +ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED + eng "Reference to recursive WITH table '%s' in materiazed derived" # # Internal errors, not used diff --git a/sql/sql_class.h b/sql/sql_class.h index e0792a4059f..0100a9807f5 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -4196,6 +4196,7 @@ protected: /* Something used only by the parser: */ public: select_result(THD *thd_arg): select_result_sink(thd_arg) {} + void set_unit(SELECT_LEX_UNIT *unit_arg) { unit= unit_arg; } virtual ~select_result() {}; /** Change wrapped select_result. @@ -4637,6 +4638,7 @@ public: } }; + class select_union :public select_result_interceptor { public: @@ -4674,6 +4676,25 @@ public: }; +class select_union_recursive :public select_union +{ + public: + TABLE *incr_table; + List
rec_tables; + + select_union_recursive(THD *thd_arg): + select_union(thd_arg), incr_table(0) {}; + + int send_data(List &items); + bool create_result_table(THD *thd, List *column_types, + bool is_distinct, ulonglong options, + const char *alias, + bool bit_fields_as_long, + bool create_table, + bool keep_row_order= FALSE); + void cleanup(); +}; + /** UNION result that is passed directly to the receiving select_result without filling a temporary table. diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 77f0bcf04ba..04d53495400 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -21,14 +21,17 @@ true on failure */ -bool check_dependencies_in_with_clauses(With_clause *with_clauses_list) +bool check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list) { for (With_clause *with_clause= with_clauses_list; with_clause; with_clause= with_clause->next_with_clause) { - if (with_clause->check_dependencies()) + if (with_clause->check_dependencies(thd)) return true; + if (with_clause->check_anchors()) + return true; + with_clause->move_anchors_ahead(); } return false; } @@ -57,7 +60,7 @@ bool check_dependencies_in_with_clauses(With_clause *with_clauses_list) false otherwise */ -bool With_clause::check_dependencies() +bool With_clause::check_dependencies(THD *thd) { if (dependencies_are_checked) return false; @@ -84,9 +87,14 @@ bool With_clause::check_dependencies() return true; } } - with_elem->check_dependencies_in_unit(with_elem->spec); + if (with_elem->check_dependencies_in_spec(thd)) + return true; } /* Build the transitive closure of the direct dependencies found above */ + for (With_element *with_elem= first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + with_elem->derived_dep_map= with_elem->base_dep_map; for (With_element *with_elem= first_elem; with_elem != NULL; with_elem= with_elem->next_elem) @@ -94,8 +102,8 @@ bool With_clause::check_dependencies() table_map with_elem_map= with_elem->get_elem_map(); for (With_element *elem= first_elem; elem != NULL; elem= elem->next_elem) { - if (elem->dependency_map & with_elem_map) - elem->dependency_map |= with_elem->dependency_map; + if (elem->derived_dep_map & with_elem_map) + elem->derived_dep_map |= with_elem->derived_dep_map; } } @@ -107,7 +115,7 @@ bool With_clause::check_dependencies() with_elem != NULL; with_elem= with_elem->next_elem) { - if (with_elem->dependency_map & with_elem->get_elem_map()) + if (with_elem->derived_dep_map & with_elem->get_elem_map()) with_elem->is_recursive= true; } for (With_element *with_elem= first_elem; @@ -115,10 +123,12 @@ bool With_clause::check_dependencies() with_elem= with_elem->next_elem) { if (with_elem->is_recursive) - { + { +#if 0 my_error(ER_RECURSIVE_QUERY_IN_WITH_CLAUSE, MYF(0), with_elem->query_name->str); return true; +#endif } } @@ -152,7 +162,39 @@ bool With_clause::check_dependencies() } -/** +bool With_element::check_dependencies_in_spec(THD *thd) +{ + for (st_select_lex *sl= spec->first_select(); sl; sl= sl->next_select()) + { + check_dependencies_in_select(sl, sl->with_dep); + base_dep_map|= sl->with_dep; + } + return false; +} + + +void With_element::check_dependencies_in_select(st_select_lex *sl, table_map &dep_map) +{ + for (TABLE_LIST *tbl= sl->table_list.first; tbl; tbl= tbl->next_local) + { + tbl->with_internal_reference_map= 0; + if (!tbl->with) + tbl->with= owner->find_table_def(tbl); + if (!tbl->with && tbl->select_lex) + tbl->with= tbl->select_lex->find_table_def_in_with_clauses(tbl); + if (tbl->with && tbl->with->owner== this->owner) + { + dep_map|= tbl->with->get_elem_map(); + tbl->with_internal_reference_map= get_elem_map(); + } + } + st_select_lex_unit *inner_unit= sl->first_inner_unit(); + for (; inner_unit; inner_unit= inner_unit->next_unit()) + check_dependencies_in_unit(inner_unit, dep_map); +} + + + /** @brief Check dependencies on the sibling with tables used in the given unit @@ -166,24 +208,102 @@ bool With_clause::check_dependencies() dependency_map of this element. */ -void With_element::check_dependencies_in_unit(st_select_lex_unit *unit) +void With_element::check_dependencies_in_unit(st_select_lex_unit *unit, + table_map &dep_map) { st_select_lex *sl= unit->first_select(); for (; sl; sl= sl->next_select()) { - for (TABLE_LIST *tbl= sl->table_list.first; tbl; tbl= tbl->next_local) + check_dependencies_in_select(sl, dep_map); + } +} + + +bool With_clause::check_anchors() +{ + /* Find mutually recursive with elements */ + for (With_element *with_elem= first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + { + if (!with_elem->is_recursive) + continue; + + table_map with_elem_dep= with_elem->derived_dep_map; + table_map with_elem_map= with_elem->get_elem_map(); + for (With_element *elem= with_elem; + elem != NULL; + elem= elem->next_elem) { - if (!tbl->with) - tbl->with= owner->find_table_def(tbl); - if (!tbl->with && tbl->select_lex) - tbl->with= tbl->select_lex->find_table_def_in_with_clauses(tbl); - if (tbl->with && tbl->with->owner== this->owner) - set_dependency_on(tbl->with); + if (!elem->is_recursive) + continue; + + if (elem == with_elem || + ((elem->derived_dep_map & with_elem_map) && + (with_elem_dep & elem->get_elem_map()))) + { + with_elem->mutually_recursive|= elem->get_elem_map(); + elem->mutually_recursive|= with_elem_map; + } } - st_select_lex_unit *inner_unit= sl->first_inner_unit(); - for (; inner_unit; inner_unit= inner_unit->next_unit()) - check_dependencies_in_unit(inner_unit); + + for (st_select_lex *sl= with_elem->spec->first_select(); + sl; + sl= sl->next_select()) + { + if (!(with_elem->mutually_recursive & sl->with_dep)) + { + with_elem->with_anchor= true; + break; + } + } + } + + for (With_element *with_elem= first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + { + if (!with_elem->is_recursive || with_elem->with_anchor) + continue; + + table_map anchored= 0; + for (With_element *elem= with_elem; + elem != NULL; + elem= elem->next_elem) + { + if (elem->mutually_recursive && elem->with_anchor) + anchored |= elem->get_elem_map(); + } + table_map non_anchored= with_elem->mutually_recursive & ~anchored; + with_elem->work_dep_map= non_anchored & with_elem->base_dep_map; } + + /*Building transitive clousure on work_dep_map*/ + for (With_element *with_elem= first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + { + table_map with_elem_map= with_elem->get_elem_map(); + for (With_element *elem= first_elem; elem != NULL; elem= elem->next_elem) + { + if (elem->work_dep_map & with_elem_map) + elem->work_dep_map|= with_elem->work_dep_map; + } + } + + for (With_element *with_elem= first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + { + if (with_elem->work_dep_map & with_elem->get_elem_map()) + { + my_error(ER_RECURSIVE_WITHOUT_ANCHORS, MYF(0), + with_elem->query_name->str); + return true; + } + } + + return false; } @@ -438,8 +558,8 @@ With_element::rename_columns_of_derived_unit(THD *thd, item->is_autogenerated_name= false; } } - - make_valid_column_names(thd, select->item_list); + else + make_valid_column_names(thd, select->item_list); unit->columns_are_renamed= true; @@ -486,6 +606,47 @@ bool With_element::prepare_unreferenced(THD *thd) } + +void With_clause::move_anchors_ahead() +{ + for (With_element *with_elem= first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + { + if (with_elem->is_recursive) + with_elem->move_anchors_ahead(); + } +} + + +void With_element::move_anchors_ahead() +{ + st_select_lex *next_sl; + st_select_lex *new_pos= spec->first_select(); + st_select_lex *last_sl; + new_pos->linkage= UNION_TYPE; + for (st_select_lex *sl= new_pos; sl; sl= next_sl) + { + next_sl= sl->next_select(); + if (is_anchor(sl)) + { + sl->move_node(new_pos); + new_pos= sl->next_select(); + } + last_sl= sl; + } + if (spec->union_distinct) + spec->union_distinct= last_sl; + first_recursive= new_pos; +} + + +bool With_element::is_anchor(st_select_lex *sel) +{ + return !(mutually_recursive & sel->with_dep); +} + + /** @brief Search for the definition of the given table referred in this select node @@ -540,7 +701,7 @@ With_element *st_select_lex::find_table_def_in_with_clauses(TABLE_LIST *table) bool TABLE_LIST::set_as_with_table(THD *thd, With_element *with_elem) { with= with_elem; - if (!with_elem->is_referenced()) + if (!with_elem->is_referenced() || with_elem->is_recursive) derived= with_elem->spec; else { @@ -553,6 +714,102 @@ bool TABLE_LIST::set_as_with_table(THD *thd, With_element *with_elem) } +bool TABLE_LIST::is_recursive_with_table() +{ + return with && with->is_recursive; +} + + +bool TABLE_LIST::is_with_table_recursive_reference() +{ + return (with_internal_reference_map && + (with->mutually_recursive & with_internal_reference_map)); +} + + + +bool st_select_lex::check_unrestricted_recursive() +{ + With_element *with_elem= get_with_element(); + if (!with_elem) + return false; + table_map unrestricted= 0; + table_map encountered= 0; + if (with_elem->check_unrestricted_recursive(this, + unrestricted, + encountered)) + return true; + with_elem->owner->unrestricted|= unrestricted; + return false; +} + + +bool With_element::check_unrestricted_recursive(st_select_lex *sel, + table_map &unrestricted, + table_map &encountered) +{ + List_iterator ti(sel->leaf_tables); + TABLE_LIST *tbl; + while ((tbl= ti++)) + { + if (tbl->get_unit() && !tbl->is_with_table()) + { + st_select_lex_unit *unit= tbl->get_unit(); + if (tbl->is_materialized_derived()) + { + table_map dep_map; + check_dependencies_in_unit(unit, dep_map); + if (dep_map & get_elem_map()) + { + my_error(ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED, + MYF(0), query_name->str); + return true; + } + } + if (check_unrestricted_recursive(unit->first_select(), + unrestricted, + encountered)) + return true; + if (!(tbl->is_recursive_with_table() && unit->with_element->owner == owner)) + continue; + With_element *with_elem= unit->with_element; + if (encountered & with_elem->get_elem_map()) + unrestricted|= with_elem->mutually_recursive; + else + encountered|= with_elem->get_elem_map(); + } + } + for (With_element *with_elem= sel->get_with_element()->owner->first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + { + if (!with_elem->is_recursive && (unrestricted & with_elem->get_elem_map())) + continue; + if (encountered & with_elem->get_elem_map()) + { + uint cnt= 0; + table_map mutually_recursive= with_elem->mutually_recursive; + for (table_map map= mutually_recursive >> with_elem->number; + map != 0; + map>>= 1) + { + if (map & 1) + { + if (cnt) + { + unrestricted|= with_elem->mutually_recursive; + break; + } + else + cnt++; + } + } + } + } + return false; +} + + /** @brief Print this with clause @@ -602,3 +859,4 @@ void With_element::print(String *str, enum_query_type query_type) str->append(')'); } + diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 0cbc9247af9..b559be93de5 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -4,6 +4,7 @@ #include "sql_lex.h" class With_clause; +class select_union; /** @class With_clause @@ -21,13 +22,22 @@ private: With_clause *owner; // with clause this object belongs to With_element *next_elem; // next element in the with clause uint number; // number of the element in the with clause (starting from 0) + table_map elem_map; // The map where with only one 1 set in this->number /* - The map dependency_map has 1 in the i-th position if the query that - specifies this element contains a reference to the element number i + The map base_dep_map has 1 in the i-th position if the query that + specifies this with element contains a reference to the with element number i in the query FROM list. + (In this case this with element depends directly on the i-th with element.) */ - table_map elem_map; // The map where with only one 1 set in this->number - table_map dependency_map; + table_map base_dep_map; + /* + The map derived_dep_map has 1 in i-th position if this with element depends + directly or indirectly from the i-th with element. + */ + table_map derived_dep_map; + table_map work_dep_map; // dependency map used for work + /* Dependency map of with elements mutually recursive with this with element */ + table_map mutually_recursive; /* Total number of references to this element in the FROM lists of the queries that are in the scope of the element (including @@ -43,6 +53,8 @@ private: /* Return the map where 1 is set only in the position for this element */ table_map get_elem_map() { return 1 << number; } + TABLE *table; + public: /* The name of the table introduced by this with elememt. The name @@ -64,20 +76,40 @@ public: */ bool is_recursive; + bool with_anchor; + + st_select_lex *first_recursive; + + uint level; + + select_union *partial_result; + select_union *final_result; + select_union_recursive *rec_result; + TABLE *result_table; + With_element(LEX_STRING *name, List list, st_select_lex_unit *unit) - : next_elem(NULL), dependency_map(0), references(0), + : next_elem(NULL), base_dep_map(0), derived_dep_map(0), + work_dep_map(0), mutually_recursive(0), + references(0), table(NULL), query_name(name), column_list(list), spec(unit), - is_recursive(false) {} - - void check_dependencies_in_unit(st_select_lex_unit *unit); - + is_recursive(false), with_anchor(false), + partial_result(NULL), final_result(NULL), + rec_result(NULL), result_table(NULL) + { reset();} + + bool check_dependencies_in_spec(THD *thd); + + void check_dependencies_in_select(st_select_lex *sl, table_map &dep_map); + + void check_dependencies_in_unit(st_select_lex_unit *unit, table_map &dep_map); + void set_dependency_on(With_element *with_elem) - { dependency_map|= with_elem->get_elem_map(); } + { base_dep_map|= with_elem->get_elem_map(); } bool check_dependency_on(With_element *with_elem) - { return dependency_map & with_elem->get_elem_map(); } + { return base_dep_map & with_elem->get_elem_map(); } bool set_unparsed_spec(THD *thd, char *spec_start, char *spec_end); @@ -91,9 +123,42 @@ public: bool prepare_unreferenced(THD *thd); - void print(String *str, enum_query_type query_type); + bool check_unrestricted_recursive(st_select_lex *sel, + table_map &unrestricted, + table_map &encountered); + + void print(String *str, enum_query_type query_type); + + void set_table(TABLE *tab) { table= tab; } + + TABLE *get_table() { return table; } + + bool is_anchor(st_select_lex *sel); + + void move_anchors_ahead(); + + bool is_unrestricted(); + + bool is_with_prepared_anchor(); + + void mark_as_with_prepared_anchor(); + + bool is_cleaned(); + + void mark_as_cleaned(); + + void reset() + { + level= 0; + } + + void set_result_table(TABLE *tab) { result_table= tab; } friend class With_clause; + friend + bool st_select_lex::check_unrestricted_recursive(); + friend + bool TABLE_LIST::is_with_table_recursive_reference(); }; @@ -126,6 +191,10 @@ private: /* Set to true if dependencies between with elements have been checked */ bool dependencies_are_checked; + table_map unrestricted; + table_map with_prepared_anchor; + table_map cleaned; + public: /* If true the specifier RECURSIVE is present in the with clause */ bool with_recursive; @@ -133,7 +202,8 @@ public: With_clause(bool recursive_fl, With_clause *emb_with_clause) : owner(NULL), first_elem(NULL), elements(0), embedding_with_clause(emb_with_clause), next_with_clause(NULL), - dependencies_are_checked(false), + dependencies_are_checked(false), + unrestricted(0), with_prepared_anchor(0), cleaned(0), with_recursive(recursive_fl) { last_next= &first_elem; } @@ -159,7 +229,11 @@ public: With_clause *pop() { return embedding_with_clause; } - bool check_dependencies(); + bool check_dependencies(THD *thd); + + bool check_anchors(); + + void move_anchors_ahead(); With_element *find_table_def(TABLE_LIST *table); @@ -169,10 +243,45 @@ public: void print(String *str, enum_query_type query_type); + friend class With_element; + + friend + bool check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list); friend - bool check_dependencies_in_with_clauses(With_clause *with_clauses_list); + bool st_select_lex::check_unrestricted_recursive(); }; +inline +bool With_element::is_unrestricted() +{ + return owner->unrestricted & get_elem_map(); +} + +inline + +bool With_element::is_with_prepared_anchor() +{ + return owner->with_prepared_anchor & get_elem_map(); +} + +inline +void With_element::mark_as_with_prepared_anchor() +{ + owner->with_prepared_anchor|= mutually_recursive; +} + + +inline +bool With_element::is_cleaned() +{ + return owner->cleaned & get_elem_map(); +} + +inline +void With_element::mark_as_cleaned() +{ + owner->cleaned|= get_elem_map(); +} #endif /* SQL_CTE_INCLUDED */ diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 79e57cded81..63302c1c6db 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -30,6 +30,7 @@ #include "sql_base.h" #include "sql_view.h" // check_duplicate_names #include "sql_acl.h" // SELECT_ACL +#include "sql_class.h" #include "sql_cte.h" typedef bool (*dt_processor)(THD *thd, LEX *lex, TABLE_LIST *derived); @@ -627,6 +628,7 @@ bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived) true Error */ + bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) { SELECT_LEX_UNIT *unit= derived->get_unit(); @@ -634,6 +636,34 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) bool res= FALSE; DBUG_PRINT("enter", ("unit 0x%lx", (ulong) unit)); + SELECT_LEX *first_select= unit->first_select(); + + if (unit->prepared && derived->is_recursive_with_table() && + !derived->table) + { + if (!(derived->derived_result= new (thd->mem_root) select_union(thd))) + DBUG_RETURN(TRUE); // out of memory + thd->create_tmp_table_for_derived= TRUE; + if (!derived->table) + res= derived->derived_result->create_result_table( + thd, &unit->types, FALSE, + (first_select->options | + thd->variables.option_bits | + TMP_TABLE_ALL_COLUMNS), + derived->alias, FALSE, TRUE); + thd->create_tmp_table_for_derived= FALSE; + + if (!res && !derived->table) + { + derived->derived_result->set_unit(unit); + derived->table= derived->derived_result->table; + if (derived->is_with_table_recursive_reference()) + unit->with_element->rec_result->rec_tables.push_back(derived->table); + } + DBUG_ASSERT(derived->table || res); + goto exit; + } + // Skip already prepared views/DT if (!unit || unit->prepared || (derived->merged_for_insert && @@ -642,16 +672,16 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) thd->lex->sql_command == SQLCOM_DELETE_MULTI)))) DBUG_RETURN(FALSE); - SELECT_LEX *first_select= unit->first_select(); - /* prevent name resolving out of derived table */ for (SELECT_LEX *sl= first_select; sl; sl= sl->next_select()) { sl->context.outer_context= 0; - // Prepare underlying views/DT first. - if ((res= sl->handle_derived(lex, DT_PREPARE))) - goto exit; - + if (!derived->is_with_table_recursive_reference()) + { + // Prepare underlying views/DT first. + if ((res= sl->handle_derived(lex, DT_PREPARE))) + goto exit; + } if (derived->outer_join && sl->first_cond_optimization) { /* Mark that table is part of OUTER JOIN and fields may be NULL */ @@ -697,19 +727,21 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) SELECT is last SELECT of UNION). */ thd->create_tmp_table_for_derived= TRUE; - if (derived->derived_result->create_result_table(thd, &unit->types, FALSE, - (first_select->options | - thd->variables.option_bits | - TMP_TABLE_ALL_COLUMNS), - derived->alias, - FALSE, FALSE)) + if (!(derived->table) && + derived->derived_result->create_result_table(thd, &unit->types, FALSE, + (first_select->options | + thd->variables.option_bits | + TMP_TABLE_ALL_COLUMNS), + derived->alias, + FALSE, FALSE, FALSE)) { thd->create_tmp_table_for_derived= FALSE; goto exit; } thd->create_tmp_table_for_derived= FALSE; - derived->table= derived->derived_result->table; + if (!derived->table) + derived->table= derived->derived_result->table; DBUG_ASSERT(derived->table); if (derived->is_derived() && derived->is_merged_derived()) first_select->mark_as_belong_to_derived(derived); @@ -756,8 +788,11 @@ exit: } #endif /* Add new temporary table to list of open derived tables */ - table->next= thd->derived_tables; - thd->derived_tables= table; + if (!derived->is_with_table_recursive_reference()) + { + table->next= thd->derived_tables; + thd->derived_tables= table; + } /* If table is used by a left join, mark that any column may be null */ if (derived->outer_join) @@ -909,6 +944,14 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) SELECT_LEX_UNIT *unit= derived->get_unit(); bool res= FALSE; + if (derived->is_recursive_with_table() && unit->executed) + { + TABLE *src= unit->with_element->rec_result->table; + TABLE *dest= derived->table; + res= src->insert_all_rows_into(thd, dest, true); + DBUG_RETURN(res); + } + if (unit->executed && !unit->uncacheable && !unit->describe) DBUG_RETURN(FALSE); /*check that table creation passed without problems. */ @@ -919,6 +962,8 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) if (unit->is_union()) { // execute union without clean up + if (derived->is_recursive_with_table()) + unit->with_element->set_result_table(derived->table); res= unit->exec(); } else @@ -948,7 +993,9 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) res= TRUE; unit->executed= TRUE; } - if (res || !lex->describe) + if (res || + (!lex->describe && + !(unit->with_element && unit->with_element->is_recursive))) unit->cleanup(); lex->current_select= save_current_select; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index de345b4dd1c..42058319fc9 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -2172,6 +2172,7 @@ void st_select_lex::init_select() m_non_agg_field_used= false; m_agg_func_used= false; name_visibility_map= 0; + with_dep= 0; join= 0; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 10247bd33a2..09463635b94 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -546,6 +546,14 @@ public: LEX_STRING *option= 0); virtual void set_lock_for_tables(thr_lock_type lock_type) {} void set_slave(st_select_lex_node *slave_arg) { slave= slave_arg; } + void move_node(st_select_lex_node *where_to_move) + { + if (where_to_move == this) + return; + *prev= next; + *where_to_move->prev= this; + next= where_to_move; + } st_select_lex_node *insert_chain_before(st_select_lex_node **ptr_pos_to_insert, st_select_lex_node *end_chain_node); friend class st_select_lex_unit; @@ -695,6 +703,7 @@ public: bool prepare(THD *thd, select_result *result, ulong additional_options); bool optimize(); bool exec(); + bool exec_recursive(); bool cleanup(); inline void unclean() { cleaned= 0; } void reinit_exec_mechanism(); @@ -911,6 +920,8 @@ public: /* namp of nesting SELECT visibility (for aggregate functions check) */ nesting_map name_visibility_map; + table_map with_dep; + void init_query(); void init_select(); st_select_lex_unit* master_unit() { return (st_select_lex_unit*) master; } @@ -1097,6 +1108,8 @@ public: return master_unit()->with_element; } With_element *find_table_def_in_with_clauses(TABLE_LIST *table); + bool check_unrestricted_recursive(); + List window_specs; void prepare_add_window_spec(THD *thd); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index a6bb89f05df..ecf27bd1239 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6215,7 +6215,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) new (thd->mem_root) Item_int(thd, (ulonglong) thd->variables.select_limit); } - if (check_dependencies_in_with_clauses(lex->with_clauses_list)) + if (check_dependencies_in_with_clauses(thd, lex->with_clauses_list)) return 1; if (!(res= open_and_lock_tables(thd, all_tables, TRUE, 0))) diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 2d6a7302afc..453ca936a88 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1509,7 +1509,7 @@ static int mysql_test_select(Prepared_statement *stmt, lex->select_lex.context.resolve_in_select_list= TRUE; ulong privilege= lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL; - if (check_dependencies_in_with_clauses(lex->with_clauses_list)) + if (check_dependencies_in_with_clauses(thd,lex->with_clauses_list)) goto error; if (tables) { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 6c4d2e1fc9c..0a961b4a53a 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3669,6 +3669,7 @@ make_join_statistics(JOIN *join, List &tables_list, s->checked_keys.init(); s->needed_reg.init(); table_vector[i]=s->table=table=tables->table; + s->tab_list= tables; table->pos_in_table_list= tables; error= tables->fetch_number_of_rows(); set_statistics_for_table(join->thd, table); @@ -11423,6 +11424,11 @@ bool error_if_full_join(JOIN *join) void JOIN_TAB::cleanup() { DBUG_ENTER("JOIN_TAB::cleanup"); + + if (tab_list && tab_list->is_with_table_recursive_reference() && + tab_list->with->is_cleaned()) + DBUG_VOID_RETURN; + DBUG_PRINT("enter", ("tab: %p table %s.%s", this, (table ? table->s->db.str : "?"), @@ -11592,7 +11598,7 @@ bool JOIN_TAB::preread_init() } /* Materialize derived table/view. */ - if (!derived->get_unit()->executed && + if ((!derived->get_unit()->executed || derived->is_recursive_with_table()) && mysql_handle_single_derived(join->thd->lex, derived, DT_CREATE | DT_FILL)) return TRUE; diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 87b836f40d9..ac582c115d8 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -28,6 +28,8 @@ #include "sql_cursor.h" #include "sql_base.h" // fill_record #include "filesort.h" // filesort_free_buffers +#include "sql_view.h" +#include "sql_cte.h" bool mysql_union(THD *thd, LEX *lex, select_result *result, SELECT_LEX_UNIT *unit, ulong setup_tables_done_option) @@ -98,6 +100,26 @@ int select_union::send_data(List &values) return 0; } +int select_union_recursive::send_data(List &values) +{ + int rc= select_union::send_data(values); + + if (!write_err) + { + int err; + if ((err= incr_table->file->ha_write_tmp_row(table->record[0]))) + { + bool is_duplicate; + rc= create_internal_tmp_table_from_heap(thd, incr_table, + tmp_table_param.start_recinfo, + &tmp_table_param.recinfo, + err, 1, &is_duplicate); + } + } + + return rc; +} + bool select_union::send_eof() { @@ -171,6 +193,61 @@ select_union::create_result_table(THD *thd_arg, List *column_types, return FALSE; } +bool +select_union_recursive::create_result_table(THD *thd_arg, + List *column_types, + bool is_union_distinct, + ulonglong options, + const char *alias, + bool bit_fields_as_long, + bool create_table, + bool keep_row_order) +{ + if (select_union::create_result_table(thd_arg, column_types, + is_union_distinct, options, + alias, bit_fields_as_long, + create_table, keep_row_order)) + return true; + + if (! (incr_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types, + (ORDER*) 0, false, 1, + options, HA_POS_ERROR, alias, + !create_table, keep_row_order))) + return true; + + incr_table->keys_in_use_for_query.clear_all(); + for (uint i=0; i < table->s->fields; i++) + incr_table->field[i]->flags &= ~PART_KEY_FLAG; + + if (create_table) + { + incr_table->file->extra(HA_EXTRA_WRITE_CACHE); + incr_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + } + + TABLE *rec_table= 0; + if (! (rec_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types, + (ORDER*) 0, false, 1, + options, HA_POS_ERROR, alias, + !create_table, keep_row_order))) + return true; + + rec_table->keys_in_use_for_query.clear_all(); + for (uint i=0; i < table->s->fields; i++) + rec_table->field[i]->flags &= ~PART_KEY_FLAG; + + if (create_table) + { + rec_table->file->extra(HA_EXTRA_WRITE_CACHE); + rec_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + } + + if (rec_tables.push_back(rec_table)) + return true; + + return false; +} + /** Reset and empty the temporary table that stores the materialized query @@ -187,6 +264,29 @@ void select_union::cleanup() } +void select_union_recursive::cleanup() +{ + select_union::cleanup(); + free_tmp_table(thd, table); + + incr_table->file->extra(HA_EXTRA_RESET_STATE); + incr_table->file->ha_delete_all_rows(); + //free_io_cache(incr_table); + //filesort_free_buffers(incr_table,0); + free_tmp_table(thd, incr_table); + + List_iterator
it(rec_tables); + TABLE *tab; + while ((tab= it++)) + { + tab->file->extra(HA_EXTRA_RESET_STATE); + tab->file->ha_delete_all_rows(); + //free_io_cache(tab); + //filesort_free_buffers(tab,0); + free_tmp_table(thd, tab); + } +} + /** Replace the current result with new_result and prepare it. @@ -332,11 +432,14 @@ st_select_lex_unit::init_prepare_fake_select_lex(THD *thd_arg, } + + bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, ulong additional_options) { SELECT_LEX *lex_select_save= thd_arg->lex->current_select; SELECT_LEX *sl, *first_sl= first_select(); + bool is_recursive= with_element && with_element->is_recursive; select_result *tmp_result; bool is_union_select; bool instantiate_tmp_table= false; @@ -404,8 +507,15 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, } else { - if (!(tmp_result= union_result= - new (thd_arg->mem_root) select_union(thd_arg))) + if (!is_recursive) + union_result= new (thd_arg->mem_root) select_union(thd_arg); + else + { + with_element->rec_result= + new (thd_arg->mem_root) select_union_recursive(thd_arg); + union_result= with_element->rec_result; + } + if (!(tmp_result= union_result)) goto err; /* purecov: inspected */ instantiate_tmp_table= true; } @@ -414,7 +524,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, tmp_result= sel_result; sl->context.resolve_in_select_list= TRUE; - + for (;sl; sl= sl->next_select()) { bool can_skip_order_by; @@ -477,6 +587,13 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, types= first_sl->item_list; else if (sl == first_sl) { + if (is_recursive) + { + if (derived->with->rename_columns_of_derived_unit(thd, this)) + goto err; + if (check_duplicate_names(thd, sl->item_list, 0)) + goto err; + } types.empty(); List_iterator_fast it(sl->item_list); Item *item_tmp; @@ -489,6 +606,23 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (thd_arg->is_fatal_error) goto err; // out of memory + + if (is_recursive) + { + + ulonglong create_options; + create_options= (first_sl->options | thd_arg->variables.option_bits | + TMP_TABLE_ALL_COLUMNS); + if (union_result->create_result_table(thd, &types, + MY_TEST(union_distinct), + create_options, "", false, + instantiate_tmp_table, false)) + goto err; + if (!derived->table) + derived->table= derived->derived_result->table= + with_element->rec_result->rec_tables.head(); + with_element->mark_as_with_prepared_anchor(); + } } else { @@ -507,6 +641,10 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, DBUG_RETURN(TRUE); } } + if (with_element && !with_element->is_anchor(sl)) + { + sl->uncacheable|= UNCACHEABLE_UNITED; + } } /* @@ -580,9 +718,11 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (global_parameters()->ftfunc_list->elements) create_options= create_options | TMP_TABLE_FORCE_MYISAM; - if (union_result->create_result_table(thd, &types, MY_TEST(union_distinct), - create_options, "", false, - instantiate_tmp_table)) + + if (!is_recursive && + union_result->create_result_table(thd, &types, MY_TEST(union_distinct), + create_options, "", false, + instantiate_tmp_table, false)) goto err; if (fake_select_lex && !fake_select_lex->first_cond_optimization) { @@ -784,6 +924,12 @@ bool st_select_lex_unit::exec() if (saved_error) DBUG_RETURN(saved_error); + if (with_element && with_element->is_recursive && !describe) + { + saved_error= exec_recursive(); + DBUG_RETURN(saved_error); + } + if (uncacheable || !item || !item->assigned() || describe) { if (!fake_select_lex) @@ -1009,6 +1155,89 @@ err: } + +bool st_select_lex_unit::exec_recursive() +{ + st_select_lex *lex_select_save= thd->lex->current_select; + st_select_lex *first_recursive_sel= with_element->first_recursive; + TABLE *incr_table= with_element->rec_result->incr_table; + TABLE *result_table= with_element->result_table; + ha_rows last_union_records= 0; + ha_rows examined_rows= 0; + bool unrestricted= with_element->is_unrestricted(); + bool is_stabilized= false; + DBUG_ENTER("st_select_lex_unit::exec_recursive"); + bool with_anchor= with_element->with_anchor; + st_select_lex *first_sl= first_select(); + st_select_lex *barrier= with_anchor ? first_recursive_sel : NULL; + List_iterator_fast
li(with_element->rec_result->rec_tables); + TABLE *rec_table; + + do + { + if ((saved_error= incr_table->file->ha_delete_all_rows())) + goto err; + + for (st_select_lex *sl= first_sl ; sl != barrier; sl= sl->next_select()) + { + thd->lex->current_select= sl; + sl->join->exec(); + saved_error= sl->join->error; + if (!saved_error) + { + examined_rows+= thd->get_examined_row_count(); + thd->set_examined_row_count(0); + if (union_result->flush()) + { + thd->lex->current_select= lex_select_save; + DBUG_RETURN(1); + } + } + if (saved_error) + { + thd->lex->current_select= lex_select_save; + goto err; + } + } + + if (with_element->level == 0) + { + first_sl= first_recursive_sel; + barrier= NULL; + } + + table->file->info(HA_STATUS_VARIABLE); + if (table->file->stats.records == last_union_records) + { + is_stabilized= true; + } + else + { + last_union_records= table->file->stats.records; + with_element->level++; + } + li.rewind(); + while ((rec_table= li++)) + { + if ((saved_error= incr_table->insert_all_rows_into(thd, rec_table, + !unrestricted))) + goto err; + } + } while (!is_stabilized); + + if ((saved_error= table->insert_all_rows_into(thd, + result_table, + true))) + goto err; + + thd->lex->current_select= lex_select_save; +err: + thd->lex->set_limit_rows_examined(); + DBUG_RETURN(saved_error); + +} + + bool st_select_lex_unit::cleanup() { int error= 0; @@ -1023,6 +1252,13 @@ bool st_select_lex_unit::cleanup() for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) error|= sl->cleanup(); + if (union_result && with_element && with_element->is_recursive) + { + ((select_union_recursive *) union_result)->cleanup(); + delete union_result; + union_result= 0; + } + if (fake_select_lex) { error|= fake_select_lex->cleanup(); @@ -1046,7 +1282,10 @@ bool st_select_lex_unit::cleanup() } } - if (union_result) + if (with_element && with_element->is_recursive) + with_element->mark_as_cleaned(); + + if (union_result && !(with_element->is_recursive)) { delete union_result; union_result=0; // Safety diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 04c1ba7e99a..0d83efcae04 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -978,6 +978,7 @@ bool LEX::set_bincmp(CHARSET_INFO *cs, bool bin) List *item_list; List *stmt_info_list; List *string_list; + List *lex_str_list; Statement_information_item *stmt_info_item; String *string; TABLE_LIST *table_list; @@ -2053,6 +2054,8 @@ END_OF_INPUT %type query_name +%type opt_with_column_list + %% @@ -14077,13 +14080,18 @@ with_list: with_list_element: query_name opt_with_column_list + { + $2= new List (Lex->with_column_list); + if ($2 == NULL) + MYSQL_YYABORT; + Lex->with_column_list.empty(); + } AS '(' remember_name subselect remember_end ')' { - With_element *elem= new With_element($1, Lex->with_column_list, $6->master_unit()); + With_element *elem= new With_element($1, *$2, $7->master_unit()); if (elem == NULL || Lex->curr_with_clause->add_with_element(elem)) MYSQL_YYABORT; - Lex->with_column_list.empty(); - if (elem->set_unparsed_spec(thd, $5+1, $7)) + if (elem->set_unparsed_spec(thd, $6+1, $8)) MYSQL_YYABORT; } ; @@ -14091,8 +14099,9 @@ with_list_element: opt_with_column_list: /* empty */ - {} + { $$= NULL; } | '(' with_column_list ')' + { $$= NULL; } ; diff --git a/sql/table.cc b/sql/table.cc index dc1730b5b6f..6109c16fb37 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -41,6 +41,7 @@ #include "mdl.h" // MDL_wait_for_graph_visitor #include "sql_view.h" #include "rpl_filter.h" +#include "sql_cte.h" /* INFORMATION_SCHEMA name */ LEX_STRING INFORMATION_SCHEMA_NAME= {C_STRING_WITH_LEN("information_schema")}; @@ -7089,7 +7090,7 @@ bool TABLE::validate_default_values_of_unset_fields(THD *thd) const /* We're here if: - validate_value_in_record_with_warn() failed and - strict mode converted WARN to ERROR + strict mo validate_default_values_of_unset_fieldsde converted WARN to ERROR - or the connection was killed, or closed unexpectedly */ DBUG_RETURN(true); @@ -7100,6 +7101,59 @@ bool TABLE::validate_default_values_of_unset_fields(THD *thd) const } +bool TABLE::insert_all_rows_into(THD *thd, TABLE *dest, bool with_cleanup) +{ + int write_err= 0; + + DBUG_ENTER("TABLE::insert_all_rows_into"); + + if (with_cleanup) + { + if ((write_err= dest->file->ha_delete_all_rows())) + goto err; + } + + if (file->indexes_are_disabled()) + dest->file->ha_disable_indexes(HA_KEY_SWITCH_ALL); + file->ha_index_or_rnd_end(); + + if (file->ha_rnd_init_with_error(1)) + DBUG_RETURN(1); + + if (dest->no_rows) + dest->file->extra(HA_EXTRA_NO_ROWS); + else + { + /* update table->file->stats.records */ + file->info(HA_STATUS_VARIABLE); + dest->file->ha_start_bulk_insert(file->stats.records); + } + + while (!file->ha_rnd_next(dest->record[1])) + { + write_err= dest->file->ha_write_tmp_row(dest->record[1]); + if (write_err) + goto err; + if (thd->check_killed()) + { + thd->send_kill_message(); + goto err_killed; + } + } + if (!dest->no_rows && dest->file->ha_end_bulk_insert()) + goto err; + DBUG_RETURN(0); + +err: + DBUG_PRINT("error",("Got error: %d",write_err)); + file->print_error(write_err, MYF(0)); +err_killed: + (void) file->ha_rnd_end(); + DBUG_RETURN(1); +} + + + /* @brief Reset const_table flag @@ -7140,20 +7194,34 @@ void TABLE_LIST::reset_const_table() bool TABLE_LIST::handle_derived(LEX *lex, uint phases) { - SELECT_LEX_UNIT *unit; + SELECT_LEX_UNIT *unit= get_unit(); DBUG_ENTER("handle_derived"); DBUG_PRINT("enter", ("phases: 0x%x", phases)); - if ((unit= get_unit())) + + if (is_with_table_recursive_reference()) + { + if (!(with->with_anchor || with->is_with_prepared_anchor())) + { + for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select()) + if (sl->handle_derived(lex, phases)) + DBUG_RETURN(TRUE); + } + else if (mysql_handle_single_derived(lex, this, phases)) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); + } + + if (unit) { for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select()) if (sl->handle_derived(lex, phases)) DBUG_RETURN(TRUE); - DBUG_RETURN(mysql_handle_single_derived(lex, this, phases)); + if (mysql_handle_single_derived(lex, this, phases)) + DBUG_RETURN(TRUE); } DBUG_RETURN(FALSE); } - /** @brief Return unit of this derived table/view @@ -7430,6 +7498,7 @@ bool TABLE_LIST::is_with_table() return derived && derived->with_element; } + uint TABLE_SHARE::actual_n_key_parts(THD *thd) { return use_ext_keys && diff --git a/sql/table.h b/sql/table.h index a105df31e93..122b036cae5 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1416,6 +1416,8 @@ public: inline Field **field_to_fill(); bool validate_default_values_of_unset_fields(THD *thd) const; + + bool insert_all_rows_into(THD *thd, TABLE *dest, bool with_cleanup); }; @@ -1856,6 +1858,8 @@ struct TABLE_LIST */ st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ With_element *with; /* With element of with_table */ + table_map with_internal_reference_map; + bool block_handle_derived; ST_SCHEMA_TABLE *schema_table; /* Information_schema table */ st_select_lex *schema_select_lex; /* @@ -2227,6 +2231,9 @@ struct TABLE_LIST return (derived_type & DTYPE_TABLE); } bool is_with_table(); + bool is_recursive_with_table(); + bool is_with_table_recursive_reference(); + inline void set_view() { derived_type= DTYPE_VIEW; -- cgit v1.2.1 From d0e973a3b0fc4384b8aeebfa6509a653b07b7eac Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Tue, 10 May 2016 22:32:02 +0300 Subject: Fixed merge problems to allow mysql-test suite 'main' to pass --- mysql-test/r/cte_nonrecursive.result | 17 ++++++++++------- mysql-test/t/cte_nonrecursive.test | 17 ++++++++++------- mysql-test/t/cte_recursive.test | 2 +- sql/share/errmsg-utf8.txt | 3 --- sql/sql_derived.cc | 5 ++++- sql/sql_union.cc | 2 +- 6 files changed, 26 insertions(+), 20 deletions(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index a9c13f3f10b..2cc291dcfbc 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -691,24 +691,27 @@ with recursive t as (select * from s where a>2), s as (select a from t1,r where t1.a>r.c), r as (select c from t,t2 where t.a=t2.c) select * from r where r.c<7; -ERROR HY000: Recursive queries in WITH clause are not supported yet -with t as (select * from s where a>2), +ERROR HY000: No anchors for recursive WITH element 't' +with recursive +t as (select * from s where a>2), s as (select a from t1,r where t1.a>r.c), r as (select c from t,t2 where t.a=t2.c) select * from r where r.c<7; -ERROR HY000: Recursive queries in WITH clause are not supported yet -with t as (select * from t1 +ERROR HY000: No anchors for recursive WITH element 't' +with recursive +t as (select * from t1 where a in (select c from s where b<='ccc') and b>'b'), s as (select * from t1,t2 where t1.a=t2.c and t1.c in (select a from t where a<5)) select * from s where s.b>'aaa'; -ERROR HY000: Recursive queries in WITH clause are not supported yet -with t as (select * from t1 where b>'aaa' and b <='d') +ERROR HY000: No anchors for recursive WITH element 't' +with recursive +t as (select * from t1 where b>'aaa' and b <='d') select t.b from t,t2 where t.a=t2.c and t2.c in (with s as (select t1.a from s,t1 where t1.a=s.a and t1.b<'c') select * from s); -ERROR HY000: Recursive queries in WITH clause are not supported yet +ERROR HY000: No anchors for recursive WITH element 's' #erroneous definition of unreferenced with table t with t as (select count(*) from t1 where d>='f' group by a) select t1.b from t2,t1 where t1.a = t2.c; diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index e3164f53887..978faaf0a4d 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -376,27 +376,30 @@ with recursive s as (select a from t1 where b>='d') select * from t,s where t.a=s.a; ---ERROR ER_RECURSIVE_QUERY_IN_WITH_CLAUSE +--ERROR ER_RECURSIVE_WITHOUT_ANCHORS with recursive t as (select * from s where a>2), s as (select a from t1,r where t1.a>r.c), r as (select c from t,t2 where t.a=t2.c) select * from r where r.c<7; ---ERROR ER_RECURSIVE_QUERY_IN_WITH_CLAUSE -with t as (select * from s where a>2), +--ERROR ER_RECURSIVE_WITHOUT_ANCHORS +with recursive + t as (select * from s where a>2), s as (select a from t1,r where t1.a>r.c), r as (select c from t,t2 where t.a=t2.c) select * from r where r.c<7; ---ERROR ER_RECURSIVE_QUERY_IN_WITH_CLAUSE -with t as (select * from t1 +--ERROR ER_RECURSIVE_WITHOUT_ANCHORS +with recursive + t as (select * from t1 where a in (select c from s where b<='ccc') and b>'b'), s as (select * from t1,t2 where t1.a=t2.c and t1.c in (select a from t where a<5)) select * from s where s.b>'aaa'; ---ERROR ER_RECURSIVE_QUERY_IN_WITH_CLAUSE -with t as (select * from t1 where b>'aaa' and b <='d') +--ERROR ER_RECURSIVE_WITHOUT_ANCHORS +with recursive + t as (select * from t1 where b>'aaa' and b <='d') select t.b from t,t2 where t.a=t2.c and t2.c in (with s as (select t1.a from s,t1 where t1.a=s.a and t1.b<'c') diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 34eee6d3bf2..a5ad1d66a51 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -4,7 +4,7 @@ insert into t1 values insert into t1 values (3,'eee'), (7,'bb'), (1,'fff'), (4,'ggg'); ---ERROR 1984 +--ERROR ER_RECURSIVE_WITHOUT_ANCHORS with recursive a1(a,b) as (select * from t1 where t1.a>3 diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index f2a5666f1b1..38b05ca9dce 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7152,13 +7152,10 @@ ER_DUP_QUERY_NAME eng "Duplicate query name in WITH clause" ER_WRONG_ORDER_IN_WITH_CLAUSE eng "The definition of the table '%s' refers to the table '%s' defined later in a non-recursive WITH clause" -ER_RECURSIVE_QUERY_IN_WITH_CLAUSE - eng "Recursive queries in WITH clause are not supported yet" ER_RECURSIVE_WITHOUT_ANCHORS eng "No anchors for recursive WITH element '%s'" ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED eng "Reference to recursive WITH table '%s' in materiazed derived" - # # Internal errors, not used # diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 63302c1c6db..95a7ee91435 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -636,6 +636,9 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) bool res= FALSE; DBUG_PRINT("enter", ("unit 0x%lx", (ulong) unit)); + if (!unit) + DBUG_RETURN(FALSE); + SELECT_LEX *first_select= unit->first_select(); if (unit->prepared && derived->is_recursive_with_table() && @@ -665,7 +668,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) } // Skip already prepared views/DT - if (!unit || unit->prepared || + if (unit->prepared || (derived->merged_for_insert && !(derived->is_multitable() && (thd->lex->sql_command == SQLCOM_UPDATE_MULTI || diff --git a/sql/sql_union.cc b/sql/sql_union.cc index ac582c115d8..7345c6f224e 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1285,7 +1285,7 @@ bool st_select_lex_unit::cleanup() if (with_element && with_element->is_recursive) with_element->mark_as_cleaned(); - if (union_result && !(with_element->is_recursive)) + if (union_result && !(with_element &&with_element->is_recursive)) { delete union_result; union_result=0; // Safety -- cgit v1.2.1 From d9b332bd2009cc520534bb9413e2f50c717237aa Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Thu, 12 May 2016 23:23:12 +0300 Subject: Made prepared statement, explain and views working with recursuve CTE. --- mysql-test/r/cte_nonrecursive.result | 8 ++-- mysql-test/r/cte_recursive.result | 83 ++++++++++++++++++++++++++++++++++++ mysql-test/t/cte_recursive.test | 61 ++++++++++++++++++++++++++ sql/sql_cte.cc | 20 +++++++-- sql/sql_cte.h | 23 ++++++---- sql/sql_select.cc | 4 +- sql/sql_union.cc | 10 +++-- sql/sql_view.cc | 10 +++++ 8 files changed, 198 insertions(+), 21 deletions(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index 2cc291dcfbc..d81c7c9ed4c 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -534,7 +534,7 @@ with t as (select a from t1 where b >= 'c') select * from t2,t where t2.c=t.a; show create view v1; View Create View character_set_client collation_connection -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS WITH t AS (select `t1`.`a` AS `a` from `t1` where (`t1`.`b` >= 'c'))select `t2`.`c` AS `c`,`t`.`a` AS `a` from (`t2` join `t`) where (`t2`.`c` = `t`.`a`) latin1 latin1_swedish_ci +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with t as (select `t1`.`a` AS `a` from `t1` where (`t1`.`b` >= 'c'))select `t2`.`c` AS `c`,`t`.`a` AS `a` from (`t2` join `t`) where (`t2`.`c` = `t`.`a`) latin1 latin1_swedish_ci select * from v1; c a 4 4 @@ -552,7 +552,7 @@ with t as (select a, count(*) from t1 where b >= 'c' group by a) select * from t2,t where t2.c=t.a; show create view v2; View Create View character_set_client collation_connection -v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS WITH t AS (select `t1`.`a` AS `a`,count(0) AS `count(*)` from `t1` where (`t1`.`b` >= 'c') group by `t1`.`a`)select `t2`.`c` AS `c`,`t`.`a` AS `a`,`t`.`count(*)` AS `count(*)` from (`t2` join `t`) where (`t2`.`c` = `t`.`a`) latin1 latin1_swedish_ci +v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with t as (select `t1`.`a` AS `a`,count(0) AS `count(*)` from `t1` where (`t1`.`b` >= 'c') group by `t1`.`a`)select `t2`.`c` AS `c`,`t`.`a` AS `a`,`t`.`count(*)` AS `count(*)` from (`t2` join `t`) where (`t2`.`c` = `t`.`a`) latin1 latin1_swedish_ci select * from v2; c a count(*) 4 4 2 @@ -571,7 +571,7 @@ with t(c) as (select a from t1 where b >= 'c') select * from t r1 where r1.c=4; show create view v3; View Create View character_set_client collation_connection -v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS WITH t AS (select `t1`.`a` AS `c` from `t1` where (`t1`.`b` >= 'c'))select `r1`.`c` AS `c` from `t` `r1` where (`r1`.`c` = 4) latin1 latin1_swedish_ci +v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS with t as (select `t1`.`a` AS `c` from `t1` where (`t1`.`b` >= 'c'))select `r1`.`c` AS `c` from `t` `r1` where (`r1`.`c` = 4) latin1 latin1_swedish_ci select * from v3; c 4 @@ -583,7 +583,7 @@ with t(c) as (select a from t1 where b >= 'c') select * from t r1, t r2 where r1.c=r2.c and r2.c=4; show create view v4; View Create View character_set_client collation_connection -v4 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS WITH t AS (select `t1`.`a` AS `c` from `t1` where (`t1`.`b` >= 'c'))select `r1`.`c` AS `c`,`r2`.`c` AS `d` from (`t` `r1` join `t` `r2`) where ((`r1`.`c` = `r2`.`c`) and (`r2`.`c` = 4)) latin1 latin1_swedish_ci +v4 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS with t as (select `t1`.`a` AS `c` from `t1` where (`t1`.`b` >= 'c'))select `r1`.`c` AS `c`,`r2`.`c` AS `d` from (`t` `r1` join `t` `r2`) where ((`r1`.`c` = `r2`.`c`) and (`r2`.`c` = 4)) latin1 latin1_swedish_ci select * from v4; c d 4 4 diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index 7408bc56e63..77b391d6629 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -286,4 +286,87 @@ from ancestor_couples; h_name h_dob w_name w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +prepare stmt1 from " +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.id, p.name, p.dob, p.father, p.mother + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; +"; +execute stmt1; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +execute stmt1; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +deallocate prepare stmt1; +create view v1 as +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.id, p.name, p.dob, p.father, p.mother +from folks as p, ancestors AS a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +show create view v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where ((`folks`.`name` = 'Vasya') and (`folks`.`dob` = '2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `a`) where ((`p`.`id` = `a`.`father`) or (`p`.`id` = `a`.`mother`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci +select * from v1; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +drop view v1; +explain extended +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' and dob = '2000-01-01' + union +select p.id, p.name, p.dob, p.father, p.mother +from folks as p, ancestors AS a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL NULL NULL NULL NULL 132 100.00 +2 SUBQUERY folks ALL NULL NULL NULL NULL 11 100.00 Using where +3 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 11 100.00 +3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 11 100.00 Using where; Using join buffer (flat, BNL join) +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1003 with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where ((`test`.`folks`.`name` = 'Vasya') and (`test`.`folks`.`dob` = DATE'2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestors` `a` where ((`a`.`father` = `p`.`id`) or (`a`.`mother` = `p`.`id`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` drop table folks; diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index a5ad1d66a51..47eae971c6d 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -237,5 +237,66 @@ as select h_name, h_dob, w_name, w_dob from ancestor_couples; + +prepare stmt1 from " +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.id, p.name, p.dob, p.father, p.mother + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; +"; + +execute stmt1; +execute stmt1; + +deallocate prepare stmt1; + + +create view v1 as +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.id, p.name, p.dob, p.father, p.mother + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + +show create view v1; + +select * from v1; + +drop view v1; + + +explain extended +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' and dob = '2000-01-01' + union + select p.id, p.name, p.dob, p.father, p.mother + from folks as p, ancestors AS a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; + + drop table folks; diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 04d53495400..7e60a8d1892 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -731,7 +731,7 @@ bool TABLE_LIST::is_with_table_recursive_reference() bool st_select_lex::check_unrestricted_recursive() { With_element *with_elem= get_with_element(); - if (!with_elem) + if (!with_elem ||!with_elem->is_recursive) return false; table_map unrestricted= 0; table_map encountered= 0; @@ -806,6 +806,18 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, } } } + ti.rewind(); + while ((tbl= ti++)) + { + for (TABLE_LIST *tab= tbl; tab; tab= tab->embedding) + { + if (tab->outer_join & (JOIN_TYPE_LEFT | JOIN_TYPE_RIGHT)) + { + unrestricted|= get_elem_map(); + break; + } + } + } return false; } @@ -824,9 +836,9 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, void With_clause::print(String *str, enum_query_type query_type) { - str->append(STRING_WITH_LEN("WITH ")); + str->append(STRING_WITH_LEN("with ")); if (with_recursive) - str->append(STRING_WITH_LEN("RECURSIVE ")); + str->append(STRING_WITH_LEN("recursive ")); for (With_element *with_elem= first_elem; with_elem != NULL; with_elem= with_elem->next_elem) @@ -853,7 +865,7 @@ void With_clause::print(String *str, enum_query_type query_type) void With_element::print(String *str, enum_query_type query_type) { str->append(query_name); - str->append(STRING_WITH_LEN(" AS ")); + str->append(STRING_WITH_LEN(" as ")); str->append('('); spec->print(str, query_type); str->append(')'); diff --git a/sql/sql_cte.h b/sql/sql_cte.h index b559be93de5..0312fcd0643 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -82,9 +82,8 @@ public: uint level; - select_union *partial_result; - select_union *final_result; select_union_recursive *rec_result; + TABLE *result_table; With_element(LEX_STRING *name, @@ -95,9 +94,8 @@ public: references(0), table(NULL), query_name(name), column_list(list), spec(unit), is_recursive(false), with_anchor(false), - partial_result(NULL), final_result(NULL), - rec_result(NULL), result_table(NULL) - { reset();} + level(0), rec_result(NULL), result_table(NULL) + {} bool check_dependencies_in_spec(THD *thd); @@ -147,10 +145,7 @@ public: void mark_as_cleaned(); - void reset() - { - level= 0; - } + void reset_for_exec(); void set_result_table(TABLE *tab) { result_table= tab; } @@ -284,4 +279,14 @@ void With_element::mark_as_cleaned() owner->cleaned|= get_elem_map(); } + +inline +void With_element::reset_for_exec() +{ + level= 0; + owner->with_prepared_anchor&= ~mutually_recursive; + owner->cleaned&= ~get_elem_map(); +} + + #endif /* SQL_CTE_INCLUDED */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 0a961b4a53a..71b672b6131 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -816,6 +816,8 @@ JOIN::prepare(TABLE_LIST *tables_init, &hidden_group_fields, &select_lex->select_n_reserved)) DBUG_RETURN(-1); + if (select_lex->check_unrestricted_recursive()) + DBUG_RETURN(-1); /* Resolve the ORDER BY that was skipped, then remove it. */ if (skip_order_by && select_lex != select_lex->master_unit()->global_parameters()) @@ -24491,7 +24493,7 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) if (unit->is_union()) { - if (unit->union_needs_tmp_table()) + if (unit->union_needs_tmp_table() && unit->fake_select_lex) { unit->fake_select_lex->select_number= FAKE_SELECT_LEX_ID; // just for initialization unit->fake_select_lex->type= "UNION RESULT"; diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 7345c6f224e..902620aaac1 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -514,6 +514,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, with_element->rec_result= new (thd_arg->mem_root) select_union_recursive(thd_arg); union_result= with_element->rec_result; + fake_select_lex= NULL; } if (!(tmp_result= union_result)) goto err; /* purecov: inspected */ @@ -615,7 +616,8 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, TMP_TABLE_ALL_COLUMNS); if (union_result->create_result_table(thd, &types, MY_TEST(union_distinct), - create_options, "", false, + create_options, derived->alias, + false, instantiate_tmp_table, false)) goto err; if (!derived->table) @@ -932,7 +934,7 @@ bool st_select_lex_unit::exec() if (uncacheable || !item || !item->assigned() || describe) { - if (!fake_select_lex) + if (!fake_select_lex && !(with_element && with_element->is_recursive)) union_result->cleanup(); for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select()) { @@ -973,7 +975,7 @@ bool st_select_lex_unit::exec() { records_at_start= table->file->stats.records; sl->join->exec(); - if (sl == union_distinct) + if (sl == union_distinct && !(with_element && with_element->is_recursive)) { // This is UNION DISTINCT, so there should be a fake_select_lex DBUG_ASSERT(fake_select_lex != NULL); @@ -1315,6 +1317,8 @@ void st_select_lex_unit::reinit_exec_mechanism() */ field->fixed= 0; } + if (with_element && with_element->is_recursive) + with_element->reset_for_exec(); } #endif } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index b66f678adfc..4fd4fb8dd01 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -35,6 +35,7 @@ #include "sp_cache.h" #include "datadict.h" // dd_frm_is_view() #include "sql_derived.h" +#include "sql_cte.h" // check_dependencies_in_with_clauses() #define MD5_BUFF_LENGTH 33 @@ -429,6 +430,12 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, lex->link_first_table_back(view, link_to_local); view->open_type= OT_BASE_ONLY; + if (check_dependencies_in_with_clauses(thd, lex->with_clauses_list)) + { + res= TRUE; + goto err; + } + if (open_temporary_tables(thd, lex->query_tables) || open_and_lock_tables(thd, lex->query_tables, TRUE, 0)) { @@ -1383,6 +1390,9 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, TABLE_LIST *tbl; Security_context *security_ctx= 0; + if (check_dependencies_in_with_clauses(thd, thd->lex->with_clauses_list)) + goto err; + /* Check rights to run commands (ANALYZE SELECT, EXPLAIN SELECT & SHOW CREATE) which show underlying tables. -- cgit v1.2.1 From df7ecf64f5b9c6fb4b7789a414306de89b58bec7 Mon Sep 17 00:00:00 2001 From: Sujatha Sivakumar Date: Fri, 13 May 2016 16:42:45 +0530 Subject: Bug#23251517: SEMISYNC REPLICATION HANGING Revert following bug fix: Bug#20685029: SLAVE IO THREAD SHOULD STOP WHEN DISK IS FULL Bug#21753696: MAKE SHOW SLAVE STATUS NON BLOCKING IF IO THREAD WAITS FOR DISK SPACE This fix results in a deadlock between slave IO thread and SQL thread. --- mysql-test/include/assert_grep.inc | 154 --------------------- mysql-test/include/rpl_init.inc | 31 +---- mysql-test/include/rpl_reconnect.inc | 33 ++--- mysql-test/include/start_slave_sql.inc | 39 ------ .../rpl/r/rpl_io_thd_wait_for_disk_space.result | 15 -- .../rpl/t/rpl_io_thd_wait_for_disk_space.test | 71 ---------- mysys/errors.c | 14 +- mysys/my_write.c | 8 +- sql/log.cc | 75 +--------- sql/log.h | 5 +- sql/slave.cc | 38 ++--- sql/slave.h | 2 +- sql/sql_reload.cc | 2 +- 13 files changed, 41 insertions(+), 446 deletions(-) delete mode 100644 mysql-test/include/assert_grep.inc delete mode 100644 mysql-test/include/start_slave_sql.inc delete mode 100644 mysql-test/suite/rpl/r/rpl_io_thd_wait_for_disk_space.result delete mode 100644 mysql-test/suite/rpl/t/rpl_io_thd_wait_for_disk_space.test diff --git a/mysql-test/include/assert_grep.inc b/mysql-test/include/assert_grep.inc deleted file mode 100644 index a980a6d73b1..00000000000 --- a/mysql-test/include/assert_grep.inc +++ /dev/null @@ -1,154 +0,0 @@ -# ==== Purpose ==== -# -# Grep a file for a pattern, produce a single string out of the -# matching lines, and assert that the string matches a given regular -# expression. -# -# ==== Usage ==== -# -# --let $assert_text= TEXT -# --let $assert_file= FILE -# --let $assert_select= REGEX -# [--let $assert_match= REGEX | --let $assert_count= NUMBER] -# [--let $assert_only_after= REGEX] -# --source include/assert_grep.inc -# -# Parameters: -# -# $assert_text -# Text that describes what is being checked. This text is written to -# the query log so it should not contain non-deterministic elements. -# -# $assert_file -# File to search. -# -# $assert_select -# All lines matching this text will be checked. -# -# $assert_match -# The script will find all lines that match $assert_select, -# concatenate them to a long string, and assert that it matches -# $assert_match. -# -# $assert_count -# Instead of asserting that the selected lines match -# $assert_match, assert that there were exactly $assert_count -# matching lines. -# -# $assert_only_after -# Reset all the lines matched and the counter when finding this pattern. -# It is useful for searching things in the mysqld.err log file just -# after the last server restart for example (discarding the log content -# of previous server executions). - - -if (!$assert_text) -{ - --die !!!ERROR IN TEST: you must set $assert_text -} -if (!$assert_file) -{ - --die !!!ERROR IN TEST: you must set $assert_file -} -if (!$assert_select) -{ - --die !!!ERROR IN TEST: you must set $assert_select -} -if ($assert_match == '') -{ - if ($assert_count == '') - { - --die !!!ERROR IN TEST: you must set either $assert_match or $assert_count - } -} -if ($assert_match != '') -{ - if ($assert_count != '') - { - --echo assert_text='$assert_text' assert_count='$assert_count' - --die !!!ERROR IN TEST: you must set only one of $assert_match or $assert_count - } -} - - ---let $include_filename= assert_grep.inc [$assert_text] ---source include/begin_include_file.inc - - ---let _AG_ASSERT_TEXT= $assert_text ---let _AG_ASSERT_FILE= $assert_file ---let _AG_ASSERT_SELECT= $assert_select ---let _AG_ASSERT_MATCH= $assert_match ---let _AG_ASSERT_COUNT= $assert_count ---let _AG_OUT= `SELECT CONCAT('$MYSQLTEST_VARDIR/tmp/_ag_', UUID())` ---let _AG_ASSERT_ONLY_AFTER= $assert_only_after - - ---perl - use strict; - use warnings; - my $file= $ENV{'_AG_ASSERT_FILE'}; - my $assert_select= $ENV{'_AG_ASSERT_SELECT'}; - my $assert_match= $ENV{'_AG_ASSERT_MATCH'}; - my $assert_count= $ENV{'_AG_ASSERT_COUNT'}; - my $assert_only_after= $ENV{'_AG_ASSERT_ONLY_AFTER'}; - my $out= $ENV{'_AG_OUT'}; - - my $result= ''; - my $count= 0; - open(FILE, "$file") or die("Error $? opening $file: $!\n"); - while () { - my $line = $_; - if ($assert_only_after && $line =~ /$assert_only_after/) { - $result = ""; - $count = 0; - } - if ($line =~ /$assert_select/) { - if ($assert_count ne '') { - $count++; - } - else { - $result .= $line; - } - } - } - close(FILE) or die("Error $? closing $file: $!"); - open OUT, "> $out" or die("Error $? opening $out: $!"); - if ($assert_count ne '' && ($count != $assert_count)) { - print OUT ($count) or die("Error $? writing $out: $!"); - } - elsif ($assert_count eq '' && $result !~ /$assert_match/) { - print OUT ($result) or die("Error $? writing $out: $!"); - } - else { - print OUT ("assert_grep.inc ok"); - } - close OUT or die("Error $? closing $out: $!"); -EOF - - ---let $_ag_outcome= `SELECT LOAD_FILE('$_AG_OUT')` -if ($_ag_outcome != 'assert_grep.inc ok') -{ - --source include/show_rpl_debug_info.inc - --echo include/assert_grep.inc failed! - --echo assert_text: '$assert_text' - --echo assert_file: '$assert_file' - --echo assert_select: '$assert_select' - --echo assert_match: '$assert_match' - --echo assert_count: '$assert_count' - --echo assert_only_after: '$assert_only_after' - if ($assert_match != '') - { - --echo matching lines: '$_ag_outcome' - } - if ($assert_count != '') - { - --echo number of matching lines: $_ag_outcome - } - --die assert_grep.inc failed. -} - - ---let $include_filename= include/assert_grep.inc [$assert_text] ---source include/end_include_file.inc diff --git a/mysql-test/include/rpl_init.inc b/mysql-test/include/rpl_init.inc index 820bc8e9016..2abfd901b03 100644 --- a/mysql-test/include/rpl_init.inc +++ b/mysql-test/include/rpl_init.inc @@ -43,7 +43,6 @@ # # [--let $rpl_server_count= 7] # --let $rpl_topology= 1->2->3->1->4, 2->5, 6->7 -# [--let $rpl_extra_connections_per_server= 1] # [--let $rpl_check_server_ids= 1] # [--let $rpl_skip_change_master= 1] # [--let $rpl_skip_start_slave= 1] @@ -66,12 +65,6 @@ # want to specify the empty topology (no server replicates at # all), you have to set $rpl_topology=none. # -# $rpl_extra_connections_per_server -# By default, this script creates connections server_N and -# server_N_1. If you can set this variable to a number, the -# script creates: -# server_N, server_N_1, ..., server_N_$rpl_extra_connections_per_server -# # $rpl_check_server_ids # If $rpl_check_server_ids is set, this script checks that the # @@server_id of all servers are different. This is normally @@ -146,17 +139,8 @@ if (!$SERVER_MYPORT_4) # Check that $rpl_server_count is set if (!$rpl_server_count) { - --let $rpl_server_count= `SELECT REPLACE('$rpl_topology', '->', ',')` - if (`SELECT LOCATE(',', '$rpl_server_count')`) - { - --let $rpl_server_count= `SELECT GREATEST($rpl_server_count)` - } -} - ---let $_rpl_extra_connections_per_server= $rpl_extra_connections_per_server -if ($_rpl_extra_connections_per_server == '') -{ - --let $_rpl_extra_connections_per_server= 1 + --let $_compute_rpl_server_count= `SELECT REPLACE('$rpl_topology', '->', ',')` + --let $rpl_server_count= `SELECT GREATEST($_compute_rpl_server_count)` } @@ -175,20 +159,15 @@ if (!$rpl_debug) # Create two connections to each server; reset master/slave, select # database, set autoinc variables. --let $_rpl_server= $rpl_server_count ---let $underscore= _ +--let $_rpl_one= _1 while ($_rpl_server) { # Connect. --let $rpl_server_number= $_rpl_server --let $rpl_connection_name= server_$_rpl_server --source include/rpl_connect.inc - --let $_rpl_connection_number= 1 - while ($_rpl_connection_number <= $_rpl_extra_connections_per_server) - { - --let $rpl_connection_name= server_$_rpl_server$underscore$_rpl_connection_number - --source include/rpl_connect.inc - --inc $_rpl_connection_number - } + --let $rpl_connection_name= server_$_rpl_server$_rpl_one + --source include/rpl_connect.inc # Configure server. --let $rpl_connection_name= server_$_rpl_server diff --git a/mysql-test/include/rpl_reconnect.inc b/mysql-test/include/rpl_reconnect.inc index 673f382bac0..cdbbd0a1bf1 100644 --- a/mysql-test/include/rpl_reconnect.inc +++ b/mysql-test/include/rpl_reconnect.inc @@ -12,7 +12,6 @@ # ==== Usage ==== # # --let $rpl_server_number= N -# [--let $rpl_extra_connections_per_server= 1] # [--let $rpl_debug= 1] # --source include/rpl_reconnect.inc # @@ -22,7 +21,7 @@ # master server, 2 the slave server, 3 the 3rd server, and so on. # Cf. include/rpl_init.inc # -# $rpl_extra_connections_per_server, $rpl_debug +# $rpl_debug # See include/rpl_init.inc --let $include_filename= rpl_reconnect.inc @@ -33,11 +32,6 @@ if (!$rpl_server_number) --die ERROR IN TEST: you must set $rpl_server_number before you source rpl_connect.inc } -if ($_rpl_extra_connections_per_server == '') -{ - --let $_rpl_extra_connections_per_server= 1 -} - if ($rpl_debug) { @@ -78,14 +72,10 @@ if (!$_rpl_server_number) --source include/rpl_connection.inc --enable_reconnect ---let $_rpl_connection_number= 1 -while ($_rpl_connection_number <= $_rpl_extra_connections_per_server) -{ - --let $rpl_connection_name= server_$rpl_server_number$underscore$_rpl_connection_number - --source include/rpl_connection.inc - --enable_reconnect - --inc $_rpl_connection_number -} +--let $_rpl_one= _1 +--let $rpl_connection_name= server_$rpl_server_number$_rpl_one +--source include/rpl_connection.inc +--enable_reconnect if ($rpl_debug) { @@ -132,15 +122,10 @@ if (!$_rpl_server_number) --source include/wait_until_connected_again.inc --disable_reconnect ---let $_rpl_connection_number= 1 -while ($_rpl_connection_number <= $_rpl_extra_connections_per_server) -{ - --let $rpl_connection_name= server_$rpl_server_number$underscore$_rpl_connection_number - --source include/rpl_connection.inc - --source include/wait_until_connected_again.inc - --disable_reconnect - --inc $_rpl_connection_number -} +--let $rpl_connection_name= server_$rpl_server_number$_rpl_one +--source include/rpl_connection.inc +--source include/wait_until_connected_again.inc +--disable_reconnect --let $include_filename= rpl_reconnect.inc diff --git a/mysql-test/include/start_slave_sql.inc b/mysql-test/include/start_slave_sql.inc deleted file mode 100644 index 9cb66a2eb40..00000000000 --- a/mysql-test/include/start_slave_sql.inc +++ /dev/null @@ -1,39 +0,0 @@ -# ==== Purpose ==== -# -# Issues START SLAVE SQL_THREAD on the current connection. Then waits -# until the SQL thread has started, or until a timeout is reached. -# -# Please use this instead of 'START SLAVE SQL_THREAD', to reduce the -# risk of races in test cases. -# -# -# ==== Usage ==== -# -# [--let $slave_timeout= NUMBER] -# [--let $rpl_debug= 1] -# --source include/start_slave_sql.inc -# -# Parameters: -# $slave_timeout -# See include/wait_for_slave_param.inc -# -# $rpl_debug -# See include/rpl_init.inc - - ---let $include_filename= start_slave_sql.inc ---source include/begin_include_file.inc - - -if (!$rpl_debug) -{ - --disable_query_log -} - - -START SLAVE SQL_THREAD; ---source include/wait_for_slave_sql_to_start.inc - - ---let $include_filename= start_slave_sql.inc ---source include/end_include_file.inc diff --git a/mysql-test/suite/rpl/r/rpl_io_thd_wait_for_disk_space.result b/mysql-test/suite/rpl/r/rpl_io_thd_wait_for_disk_space.result deleted file mode 100644 index b11ad4f53bd..00000000000 --- a/mysql-test/suite/rpl/r/rpl_io_thd_wait_for_disk_space.result +++ /dev/null @@ -1,15 +0,0 @@ -include/master-slave.inc -[connection master] -CREATE TABLE t1(a INT); -INSERT INTO t1 VALUES(1); -CALL mtr.add_suppression("Disk is full writing"); -CALL mtr.add_suppression("Retry in 60 secs"); -include/stop_slave_sql.inc -SET @@GLOBAL.DEBUG= 'd,simulate_io_thd_wait_for_disk_space'; -INSERT INTO t1 VALUES(2); -SET DEBUG_SYNC='now WAIT_FOR parked'; -SET @@GLOBAL.DEBUG= '$debug_saved'; -include/assert_grep.inc [Found the disk full error message on the slave] -include/start_slave_sql.inc -DROP TABLE t1; -include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_io_thd_wait_for_disk_space.test b/mysql-test/suite/rpl/t/rpl_io_thd_wait_for_disk_space.test deleted file mode 100644 index 6076be60ebc..00000000000 --- a/mysql-test/suite/rpl/t/rpl_io_thd_wait_for_disk_space.test +++ /dev/null @@ -1,71 +0,0 @@ -# ==== Purpose ==== -# -# Check that the execution of SHOW SLAVE STATUS command is not blocked when IO -# thread is blocked waiting for disk space. -# -# ==== Implementation ==== -# -# Simulate a scenario where IO thread is waiting for disk space while writing -# into the relay log. Execute SHOW SLAVE STATUS command after IO thread is -# blocked waiting for space. The command should not be blocked. -# -# ==== References ==== -# -# Bug#21753696: MAKE SHOW SLAVE STATUS NON BLOCKING IF IO THREAD WAITS FOR -# DISK SPACE -# Bug#20685029: SLAVE IO THREAD SHOULD STOP WHEN DISK IS FULL -# -############################################################################### ---source include/have_debug.inc -# Inorder to grep a specific error pattern in error log a fresh error log -# needs to be generated. ---source include/force_restart.inc ---source include/master-slave.inc - -# Generate events to be replicated to the slave -CREATE TABLE t1(a INT); -INSERT INTO t1 VALUES(1); ---sync_slave_with_master - -# Those errors will only happen in the slave -CALL mtr.add_suppression("Disk is full writing"); -CALL mtr.add_suppression("Retry in 60 secs"); - -# Stop the SQL thread to avoid writing on disk ---source include/stop_slave_sql.inc - -# Set the debug option that will simulate disk full ---let $debug_saved= `SELECT @@GLOBAL.DEBUG` -SET @@GLOBAL.DEBUG= 'd,simulate_io_thd_wait_for_disk_space'; - -# Generate events to be replicated to the slave ---connection master -INSERT INTO t1 VALUES(2); - ---connection slave1 -SET DEBUG_SYNC='now WAIT_FOR parked'; - -# Get the relay log file name using SHOW SLAVE STATUS ---let $relay_log_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1) - ---connection slave -# Restore the debug options to "simulate" freed space on disk -SET @@GLOBAL.DEBUG= '$debug_saved'; - -# There should be a message in the error log of the slave stating -# that it was waiting for space to write on the relay log. ---let $assert_file=$MYSQLTEST_VARDIR/log/mysqld.2.err -# Grep only after the message that the I/O thread has started ---let $assert_only_after= Slave I/O .* connected to master .*replication started in log .* at position ---let $assert_count= 1 ---let $assert_select=Disk is full writing .*$relay_log_file.* ---let $assert_text= Found the disk full error message on the slave ---source include/assert_grep.inc - -# Start the SQL thread to let the slave to sync and finish gracefully ---source include/start_slave_sql.inc - -# Cleanup ---connection master -DROP TABLE t1; ---source include/rpl_end.inc diff --git a/mysys/errors.c b/mysys/errors.c index b6064460535..a6e2e300a1f 100644 --- a/mysys/errors.c +++ b/mysys/errors.c @@ -15,7 +15,7 @@ #include "mysys_priv.h" #include "mysys_err.h" -#include "m_string.h" + #ifndef SHARED_LIBRARY const char *globerrs[GLOBERRS]= @@ -109,7 +109,6 @@ void init_glob_errs() */ void wait_for_free_space(const char *filename, int errors) { - size_t time_to_sleep= MY_WAIT_FOR_USER_TO_FIX_PANIC; if (!(errors % MY_WAIT_GIVE_USER_A_MESSAGE)) { my_printf_warning(EE(EE_DISK_FULL), @@ -120,15 +119,10 @@ void wait_for_free_space(const char *filename, int errors) } DBUG_EXECUTE_IF("simulate_no_free_space_error", { - time_to_sleep= 1; - }); - DBUG_EXECUTE_IF("simulate_io_thd_wait_for_disk_space", - { - time_to_sleep= 1; + (void) sleep(1); + return; }); - - (void) sleep(time_to_sleep); - DEBUG_SYNC_C("disk_full_reached"); + (void) sleep(MY_WAIT_FOR_USER_TO_FIX_PANIC); } const char **get_global_errmsgs() diff --git a/mysys/my_write.c b/mysys/my_write.c index 2e68a4dcff3..f092420756e 100644 --- a/mysys/my_write.c +++ b/mysys/my_write.c @@ -24,7 +24,6 @@ size_t my_write(File Filedes, const uchar *Buffer, size_t Count, myf MyFlags) { size_t writtenbytes, written; uint errors; - size_t ToWriteCount; DBUG_ENTER("my_write"); DBUG_PRINT("my",("fd: %d Buffer: %p Count: %lu MyFlags: %d", Filedes, Buffer, (ulong) Count, MyFlags)); @@ -38,14 +37,11 @@ size_t my_write(File Filedes, const uchar *Buffer, size_t Count, myf MyFlags) { DBUG_SET("+d,simulate_file_write_error");}); for (;;) { - ToWriteCount= Count; - DBUG_EXECUTE_IF("simulate_io_thd_wait_for_disk_space", { ToWriteCount= 1; }); #ifdef _WIN32 - writtenbytes= my_win_write(Filedes, Buffer, ToWriteCount); + writtenbytes= my_win_write(Filedes, Buffer, Count); #else - writtenbytes= write(Filedes, Buffer, ToWriteCount); + writtenbytes= write(Filedes, Buffer, Count); #endif - DBUG_EXECUTE_IF("simulate_io_thd_wait_for_disk_space", { errno= ENOSPC; }); DBUG_EXECUTE_IF("simulate_file_write_error", { errno= ENOSPC; diff --git a/sql/log.cc b/sql/log.cc index e0ba93b0959..50d7762af6d 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -37,7 +37,6 @@ #include "log_event.h" // Query_log_event #include "rpl_filter.h" #include "rpl_rli.h" -#include "rpl_mi.h" #include "sql_audit.h" #include "sql_show.h" @@ -4378,22 +4377,13 @@ end: } -#ifdef HAVE_REPLICATION -bool MYSQL_BIN_LOG::append(Log_event* ev, Master_info *mi) +bool MYSQL_BIN_LOG::append(Log_event* ev) { bool error = 0; - mysql_mutex_assert_owner(&mi->data_lock); mysql_mutex_lock(&LOCK_log); DBUG_ENTER("MYSQL_BIN_LOG::append"); DBUG_ASSERT(log_file.type == SEQ_READ_APPEND); - /* - Release data_lock by holding LOCK_log, while writing into the relay log. - If slave IO thread waits here for free space, we don't want - SHOW SLAVE STATUS to hang on mi->data_lock. Note LOCK_log mutex is - sufficient to block SQL thread when IO thread is updating relay log here. - */ - mysql_mutex_unlock(&mi->data_lock); /* Log_event::write() is smart enough to use my_b_write() or my_b_append() depending on the kind of cache we have. @@ -4408,58 +4398,24 @@ bool MYSQL_BIN_LOG::append(Log_event* ev, Master_info *mi) if (flush_and_sync(0)) goto err; if ((uint) my_b_append_tell(&log_file) > max_size) - { - /* - If rotation is required we must acquire data_lock to protect - description_event from clients executing FLUSH LOGS in parallel. - In order do that we must release the existing LOCK_log so that we - get it once again in proper locking order to avoid dead locks. - i.e data_lock , LOCK_log. - */ - mysql_mutex_unlock(&LOCK_log); - mysql_mutex_lock(&mi->data_lock); - mysql_mutex_lock(&LOCK_log); error= new_file_without_locking(); - /* - After rotation release data_lock, we need the LOCK_log till we signal - the updation. - */ - mysql_mutex_unlock(&mi->data_lock); - } err: - signal_update(); // Safe as we don't call close mysql_mutex_unlock(&LOCK_log); - mysql_mutex_lock(&mi->data_lock); + signal_update(); // Safe as we don't call close DBUG_RETURN(error); } -bool MYSQL_BIN_LOG::appendv(Master_info* mi, const char* buf, uint len,...) +bool MYSQL_BIN_LOG::appendv(const char* buf, uint len,...) { bool error= 0; DBUG_ENTER("MYSQL_BIN_LOG::appendv"); va_list(args); va_start(args,len); - mysql_mutex_assert_owner(&mi->data_lock); - mysql_mutex_lock(&LOCK_log); DBUG_ASSERT(log_file.type == SEQ_READ_APPEND); - /* - Release data_lock by holding LOCK_log, while writing into the relay log. - If slave IO thread waits here for free space, we don't want - SHOW SLAVE STATUS to hang on mi->data_lock. Note LOCK_log mutex is - sufficient to block SQL thread when IO thread is updating relay log here. - */ - mysql_mutex_unlock(&mi->data_lock); - DBUG_EXECUTE_IF("simulate_io_thd_wait_for_disk_space", - { - const char act[]= "disk_full_reached SIGNAL parked"; - DBUG_ASSERT(opt_debug_sync_timeout > 0); - DBUG_ASSERT(!debug_sync_set_action(current_thd, - STRING_WITH_LEN(act))); - };); - + mysql_mutex_assert_owner(&LOCK_log); do { if (my_b_append(&log_file,(uchar*) buf,len)) @@ -4472,34 +4428,13 @@ bool MYSQL_BIN_LOG::appendv(Master_info* mi, const char* buf, uint len,...) DBUG_PRINT("info",("max_size: %lu",max_size)); if (flush_and_sync(0)) goto err; - if ((uint) my_b_append_tell(&log_file) > - DBUG_EVALUATE_IF("rotate_slave_debug_group", 500, max_size)) - { - /* - If rotation is required we must acquire data_lock to protect - description_event from clients executing FLUSH LOGS in parallel. - In order do that we must release the existing LOCK_log so that we - get it once again in proper locking order to avoid dead locks. - i.e data_lock , LOCK_log. - */ - mysql_mutex_unlock(&LOCK_log); - mysql_mutex_lock(&mi->data_lock); - mysql_mutex_lock(&LOCK_log); + if ((uint) my_b_append_tell(&log_file) > max_size) error= new_file_without_locking(); - /* - After rotation release data_lock, we need the LOCK_log till we signal - the updation. - */ - mysql_mutex_unlock(&mi->data_lock); - } err: if (!error) signal_update(); - mysql_mutex_unlock(&LOCK_log); - mysql_mutex_lock(&mi->data_lock); DBUG_RETURN(error); } -#endif bool MYSQL_BIN_LOG::flush_and_sync(bool *synced) { diff --git a/sql/log.h b/sql/log.h index dd09cb41026..b5e751386a6 100644 --- a/sql/log.h +++ b/sql/log.h @@ -20,7 +20,6 @@ #include "handler.h" /* my_xid */ class Relay_log_info; -class Master_info; class Format_description_log_event; @@ -455,8 +454,8 @@ public: v stands for vector invoked as appendv(buf1,len1,buf2,len2,...,bufn,lenn,0) */ - bool appendv(Master_info* mi, const char* buf,uint len,...); - bool append(Log_event* ev, Master_info* mi); + bool appendv(const char* buf,uint len,...); + bool append(Log_event* ev); void make_log_name(char* buf, const char* log_ident); bool is_active(const char* log_file_name); diff --git a/sql/slave.cc b/sql/slave.cc index 31037c453d3..acf68e231f3 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1660,7 +1660,7 @@ Waiting for the slave SQL thread to free enough relay log space"); #endif if (rli->sql_force_rotate_relay) { - rotate_relay_log(rli->mi, true/*need_data_lock=true*/); + rotate_relay_log(rli->mi); rli->sql_force_rotate_relay= false; } @@ -1705,7 +1705,7 @@ static void write_ignored_events_info_to_relay_log(THD *thd, Master_info *mi) if (likely((bool)ev)) { ev->server_id= 0; // don't be ignored by slave SQL thread - if (unlikely(rli->relay_log.append(ev, mi))) + if (unlikely(rli->relay_log.append(ev))) mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), "failed to write a Rotate event" @@ -3605,7 +3605,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) break; Execute_load_log_event xev(thd,0,0); xev.log_pos = cev->log_pos; - if (unlikely(mi->rli.relay_log.append(&xev, mi))) + if (unlikely(mi->rli.relay_log.append(&xev))) { mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), @@ -3619,7 +3619,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) { cev->block = net->read_pos; cev->block_len = num_bytes; - if (unlikely(mi->rli.relay_log.append(cev, mi))) + if (unlikely(mi->rli.relay_log.append(cev))) { mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), @@ -3634,7 +3634,7 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev) aev.block = net->read_pos; aev.block_len = num_bytes; aev.log_pos = cev->log_pos; - if (unlikely(mi->rli.relay_log.append(&aev, mi))) + if (unlikely(mi->rli.relay_log.append(&aev))) { mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, ER(ER_SLAVE_RELAY_LOG_WRITE_FAILURE), @@ -3713,7 +3713,7 @@ static int process_io_rotate(Master_info *mi, Rotate_log_event *rev) Rotate the relay log makes binlog format detection easier (at next slave start or mysqlbinlog) */ - DBUG_RETURN(rotate_relay_log(mi, false/*need_data_lock=false*/)); + DBUG_RETURN(rotate_relay_log(mi) /* will take the right mutexes */); } /* @@ -3819,7 +3819,7 @@ static int queue_binlog_ver_1_event(Master_info *mi, const char *buf, Log_event::Log_event(const char* buf...) in log_event.cc). */ ev->log_pos+= event_len; /* make log_pos be the pos of the end of the event */ - if (unlikely(rli->relay_log.append(ev, mi))) + if (unlikely(rli->relay_log.append(ev))) { delete ev; mysql_mutex_unlock(&mi->data_lock); @@ -3875,7 +3875,7 @@ static int queue_binlog_ver_3_event(Master_info *mi, const char *buf, inc_pos= event_len; break; } - if (unlikely(rli->relay_log.append(ev, mi))) + if (unlikely(rli->relay_log.append(ev))) { delete ev; mysql_mutex_unlock(&mi->data_lock); @@ -4083,6 +4083,7 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) direct master (an unsupported, useless setup!). */ + mysql_mutex_lock(log_lock); s_id= uint4korr(buf + SERVER_ID_OFFSET); if ((s_id == ::server_id && !mi->rli.replicate_same_server_id) || /* @@ -4115,7 +4116,6 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) IGNORE_SERVER_IDS it increments mi->master_log_pos as well as rli->group_relay_log_pos. */ - mysql_mutex_lock(log_lock); if (!(s_id == ::server_id && !mi->rli.replicate_same_server_id) || (buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT && buf[EVENT_TYPE_OFFSET] != ROTATE_EVENT && @@ -4127,14 +4127,13 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) rli->ign_master_log_pos_end= mi->master_log_pos; } rli->relay_log.signal_update(); // the slave SQL thread needs to re-check - mysql_mutex_unlock(log_lock); DBUG_PRINT("info", ("master_log_pos: %lu, event originating from %u server, ignored", (ulong) mi->master_log_pos, uint4korr(buf + SERVER_ID_OFFSET))); } else { /* write the event to the relay log */ - if (likely(!(rli->relay_log.appendv(mi, buf,event_len,0)))) + if (likely(!(rli->relay_log.appendv(buf,event_len,0)))) { mi->master_log_pos+= inc_pos; DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); @@ -4144,10 +4143,9 @@ static int queue_event(Master_info* mi,const char* buf, ulong event_len) { error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE; } - mysql_mutex_lock(log_lock); rli->ign_master_log_name_end[0]= 0; // last event is not ignored - mysql_mutex_unlock(log_lock); } + mysql_mutex_unlock(log_lock); skip_relay_logging: @@ -5007,21 +5005,11 @@ err: locks; here we don't, so this function is mainly taking locks). Returns nothing as we cannot catch any error (MYSQL_BIN_LOG::new_file() is void). - - @param mi Master_info for the IO thread. - @param need_data_lock If true, mi->data_lock will be acquired otherwise, - mi->data_lock must be held by the caller. */ -int rotate_relay_log(Master_info* mi, bool need_data_lock) +int rotate_relay_log(Master_info* mi) { DBUG_ENTER("rotate_relay_log"); - if (need_data_lock) - mysql_mutex_lock(&mi->data_lock); - else - { - mysql_mutex_assert_owner(&mi->data_lock); - } Relay_log_info* rli= &mi->rli; int error= 0; @@ -5056,8 +5044,6 @@ int rotate_relay_log(Master_info* mi, bool need_data_lock) */ rli->relay_log.harvest_bytes_written(&rli->log_space_total); end: - if (need_data_lock) - mysql_mutex_unlock(&mi->data_lock); DBUG_RETURN(error); } diff --git a/sql/slave.h b/sql/slave.h index 0cf8adb0315..7bf136694cc 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -205,7 +205,7 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset, const char** errmsg); void set_slave_thread_options(THD* thd); void set_slave_thread_default_charset(THD *thd, Relay_log_info const *rli); -int rotate_relay_log(Master_info* mi, bool need_data_lock); +int rotate_relay_log(Master_info* mi); int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli); pthread_handler_t handle_slave_io(void *arg); diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index f24f31b6399..b29cc9a9433 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -157,7 +157,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long options, { #ifdef HAVE_REPLICATION mysql_mutex_lock(&LOCK_active_mi); - if (rotate_relay_log(active_mi, true/*need_data_lock=true*/)) + if (rotate_relay_log(active_mi)) *write_to_binlog= -1; mysql_mutex_unlock(&LOCK_active_mi); #endif -- cgit v1.2.1 From 3b47632bfc74a548c2f0a057f39e99a8a761a57a Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Sat, 14 May 2016 23:33:50 +0300 Subject: Fixed a bug that caused crashes for SHOW CREATE VIEW when was recursive. Added a test case to check the fix. --- mysql-test/r/cte_recursive.result | 42 +++++++++++++++++++++++++++++++++------ mysql-test/t/cte_recursive.test | 34 +++++++++++++++++++++++++------ sql/sql_cte.cc | 8 ++++++-- sql/sql_union.cc | 4 ++-- 4 files changed, 72 insertions(+), 16 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index 77b391d6629..aa9ed3fc28c 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -239,15 +239,15 @@ as ( select * from folks -where name = 'Vasya' and dob = '2000-01-01' +where name = 'Vasya' union select p.* -from folks as p, ancestors AS a -where p.id = a.father +from folks as p, ancestors as fa +where p.id = fa.father union select p.* -from folks as p, ancestors AS a -where p.id = a.mother +from folks as p, ancestors as ma +where p.id = ma.mother ) select * from ancestors; id name dob father mother @@ -346,7 +346,37 @@ id name dob father mother 9 Grandma Ann 1941-10-15 NULL NULL 8 Grandma Sally 1943-08-23 5 6 6 Grandgrandma Martha 1923-05-17 NULL NULL -drop view v1; +create view v2 as +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' + union +select p.* +from folks as p, ancestors as fa +where p.id = fa.father +union +select p.* +from folks as p, ancestors as ma +where p.id = ma.mother +) +select * from ancestors; +show create view v2; +View Create View character_set_client collation_connection +v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where (`folks`.`name` = 'Vasya') union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `fa`) where (`p`.`id` = `fa`.`father`) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `ma`) where (`p`.`id` = `ma`.`mother`))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci +select * from v2; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +9 Grandma Ann 1941-10-15 NULL NULL +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandma Sally 1943-08-23 5 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +drop view v1,v2; explain extended with recursive ancestors diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 47eae971c6d..4982bf78adb 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -199,15 +199,15 @@ as ( select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Vasya' union select p.* - from folks as p, ancestors AS a - where p.id = a.father + from folks as p, ancestors as fa + where p.id = fa.father union select p.* - from folks as p, ancestors AS a - where p.id = a.mother + from folks as p, ancestors as ma + where p.id = ma.mother ) select * from ancestors; @@ -279,8 +279,30 @@ show create view v1; select * from v1; -drop view v1; +create view v2 as +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' + union + select p.* + from folks as p, ancestors as fa + where p.id = fa.father + union + select p.* + from folks as p, ancestors as ma + where p.id = ma.mother +) +select * from ancestors; + +show create view v2; + +select * from v2; +drop view v1,v2; explain extended with recursive diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 7e60a8d1892..ffc54f50af1 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -330,8 +330,10 @@ With_element *With_clause::find_table_def(TABLE_LIST *table) with_elem != NULL; with_elem= with_elem->next_elem) { - if (my_strcasecmp(system_charset_info, with_elem->query_name->str, table->table_name) == 0) + if (my_strcasecmp(system_charset_info, with_elem->query_name->str, + table->table_name) == 0) { + table->set_derived(); return with_elem; } } @@ -740,6 +742,8 @@ bool st_select_lex::check_unrestricted_recursive() encountered)) return true; with_elem->owner->unrestricted|= unrestricted; + if (with_sum_func) + with_elem->owner->unrestricted|= with_elem->mutually_recursive; return false; } @@ -813,7 +817,7 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, { if (tab->outer_join & (JOIN_TYPE_LEFT | JOIN_TYPE_RIGHT)) { - unrestricted|= get_elem_map(); + unrestricted|= mutually_recursive; break; } } diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 902620aaac1..9308dc2a841 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -205,13 +205,13 @@ select_union_recursive::create_result_table(THD *thd_arg, { if (select_union::create_result_table(thd_arg, column_types, is_union_distinct, options, - alias, bit_fields_as_long, + "", bit_fields_as_long, create_table, keep_row_order)) return true; if (! (incr_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types, (ORDER*) 0, false, 1, - options, HA_POS_ERROR, alias, + options, HA_POS_ERROR, "", !create_table, keep_row_order))) return true; -- cgit v1.2.1 From cb2974156823977fd2c700c64ff0867183b3f744 Mon Sep 17 00:00:00 2001 From: Shishir Jaiswal Date: Mon, 16 May 2016 13:46:49 +0530 Subject: Bug#21977380 - POSSIBLE BUFFER OVERFLOW ISSUES DESCRIPTION =========== Buffer overflow is reported in a lot of code sections spanning across server, client programs, Regex libraries etc. If not handled appropriately, they can cause abnormal behaviour. ANALYSIS ======== The reported casea are the ones which are likely to result in SEGFAULT, MEMORY LEAK etc. FIX === - sprintf() has been replaced by my_snprintf() to avoid buffer overflow. - my_free() is done after checking if the pointer isn't NULL already and setting it to NULL thereafter at few places. - Buffer is ensured to be large enough to hold the data. - 'unsigned int' (aka 'uint') is replaced with 'size_t' to avoid wraparound. - Memory is freed (if not done so) after its alloced and used. - Inserted assert() for size check in InnoDb memcached code (from 5.6 onwards) - Other minor changes --- client/mysqlcheck.c | 42 ++++++++++++++++++++++++++---------------- client/mysqldump.c | 49 +++++++++++++++++++++++++++---------------------- client/mysqlshow.c | 36 +++++++++++++++++++----------------- extra/yassl/src/log.cpp | 4 ++-- regex/split.c | 4 ++++ 5 files changed, 78 insertions(+), 57 deletions(-) diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index a564e871281..55b941e7f1a 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -213,13 +213,13 @@ static int process_selected_tables(char *db, char **table_names, int tables); static int process_all_tables_in_db(char *database); static int process_one_db(char *database); static int use_db(char *database); -static int handle_request_for_tables(char *tables, uint length); +static int handle_request_for_tables(char *tables, size_t length); static int dbConnect(char *host, char *user,char *passwd); static void dbDisconnect(char *host); static void DBerror(MYSQL *mysql, const char *when); static void safe_exit(int error); static void print_result(); -static uint fixed_name_length(const char *name); +static size_t fixed_name_length(const char *name); static char *fix_table_name(char *dest, char *src); int what_to_do = 0; @@ -486,7 +486,7 @@ static int process_selected_tables(char *db, char **table_names, int tables) *end++= ','; } *--end = 0; - handle_request_for_tables(table_names_comma_sep + 1, (uint) (tot_length - 1)); + handle_request_for_tables(table_names_comma_sep + 1, tot_length - 1); my_free(table_names_comma_sep); } else @@ -496,10 +496,10 @@ static int process_selected_tables(char *db, char **table_names, int tables) } /* process_selected_tables */ -static uint fixed_name_length(const char *name) +static size_t fixed_name_length(const char *name) { const char *p; - uint extra_length= 2; /* count the first/last backticks */ + size_t extra_length= 2; /* count the first/last backticks */ for (p= name; *p; p++) { @@ -508,7 +508,7 @@ static uint fixed_name_length(const char *name) else if (*p == '.') extra_length+= 2; } - return (uint) ((p - name) + extra_length); + return (size_t) ((p - name) + extra_length); } @@ -564,7 +564,7 @@ static int process_all_tables_in_db(char *database) */ char *tables, *end; - uint tot_length = 0; + size_t tot_length = 0; while ((row = mysql_fetch_row(res))) tot_length+= fixed_name_length(row[0]) + 2; @@ -622,7 +622,9 @@ static int fix_table_storage_name(const char *name) int rc= 0; if (strncmp(name, "#mysql50#", 9)) return 1; - sprintf(qbuf, "RENAME TABLE `%s` TO `%s`", name, name + 9); + my_snprintf(qbuf, sizeof(qbuf), "RENAME TABLE `%s` TO `%s`", + name, name + 9); + rc= run_query(qbuf); if (verbose) printf("%-50s %s\n", name, rc ? "FAILED" : "OK"); @@ -635,7 +637,8 @@ static int fix_database_storage_name(const char *name) int rc= 0; if (strncmp(name, "#mysql50#", 9)) return 1; - sprintf(qbuf, "ALTER DATABASE `%s` UPGRADE DATA DIRECTORY NAME", name); + my_snprintf(qbuf, sizeof(qbuf), "ALTER DATABASE `%s` UPGRADE DATA DIRECTORY " + "NAME", name); rc= run_query(qbuf); if (verbose) printf("%-50s %s\n", name, rc ? "FAILED" : "OK"); @@ -653,7 +656,7 @@ static int rebuild_table(char *name) ptr= strmov(query, "ALTER TABLE "); ptr= fix_table_name(ptr, name); ptr= strxmov(ptr, " FORCE", NullS); - if (mysql_real_query(sock, query, (uint)(ptr - query))) + if (mysql_real_query(sock, query, (ulong)(ptr - query))) { fprintf(stderr, "Failed to %s\n", query); fprintf(stderr, "Error: %s\n", mysql_error(sock)); @@ -702,10 +705,10 @@ static int disable_binlog() return run_query(stmt); } -static int handle_request_for_tables(char *tables, uint length) +static int handle_request_for_tables(char *tables, size_t length) { char *query, *end, options[100], message[100]; - uint query_length= 0; + size_t query_length= 0, query_size= sizeof(char)*(length+110); const char *op = 0; options[0] = 0; @@ -736,10 +739,14 @@ static int handle_request_for_tables(char *tables, uint length) return fix_table_storage_name(tables); } - if (!(query =(char *) my_malloc((sizeof(char)*(length+110)), MYF(MY_WME)))) + if (!(query =(char *) my_malloc(query_size, MYF(MY_WME)))) + { return 1; + } if (opt_all_in_1) { + DBUG_ASSERT(strlen(op)+strlen(tables)+strlen(options)+8+1 <= query_size); + /* No backticks here as we added them before */ query_length= sprintf(query, "%s TABLE %s %s", op, tables, options); } @@ -750,7 +757,7 @@ static int handle_request_for_tables(char *tables, uint length) ptr= strmov(strmov(query, op), " TABLE "); ptr= fix_table_name(ptr, tables); ptr= strxmov(ptr, " ", options, NullS); - query_length= (uint) (ptr - query); + query_length= (size_t) (ptr - query); } if (mysql_real_query(sock, query, query_length)) { @@ -834,7 +841,10 @@ static void print_result() prev_alter[0]= 0; } else - strcpy(prev_alter, alter_txt); + { + strncpy(prev_alter, alter_txt, MAX_ALTER_STR_SIZE-1); + prev_alter[MAX_ALTER_STR_SIZE-1]= 0; + } } } } @@ -978,7 +988,7 @@ int main(int argc, char **argv) process_databases(argv); if (opt_auto_repair) { - uint i; + size_t i; if (!opt_silent && (tables4repair.elements || tables4rebuild.elements)) puts("\nRepairing tables"); diff --git a/client/mysqldump.c b/client/mysqldump.c index 6c4fec313c5..00265def489 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -86,7 +86,7 @@ static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value); -static ulong find_set(TYPELIB *lib, const char *x, uint length, +static ulong find_set(TYPELIB *lib, const char *x, size_t length, char **err_pos, uint *err_len); static char *alloc_query_str(ulong size); @@ -852,7 +852,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), opt_set_charset= 0; opt_compatible_mode_str= argument; opt_compatible_mode= find_set(&compatible_mode_typelib, - argument, (uint) strlen(argument), + argument, strlen(argument), &err_ptr, &err_len); if (err_len) { @@ -862,7 +862,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } #if !defined(DBUG_OFF) { - uint size_for_sql_mode= 0; + size_t size_for_sql_mode= 0; const char **ptr; for (ptr= compatible_mode_names; *ptr; ptr++) size_for_sql_mode+= strlen(*ptr); @@ -1138,8 +1138,8 @@ static int fetch_db_collation(const char *db_name, break; } - strncpy(db_cl_name, db_cl_row[0], db_cl_size); - db_cl_name[db_cl_size - 1]= 0; /* just in case. */ + strncpy(db_cl_name, db_cl_row[0], db_cl_size-1); + db_cl_name[db_cl_size - 1]= 0; } while (FALSE); @@ -1150,7 +1150,7 @@ static int fetch_db_collation(const char *db_name, static char *my_case_str(const char *str, - uint str_len, + size_t str_len, const char *token, uint token_len) { @@ -1366,7 +1366,7 @@ static int switch_character_set_results(MYSQL *mysql, const char *cs_name) */ static char *cover_definer_clause(const char *stmt_str, - uint stmt_length, + size_t stmt_length, const char *definer_version_str, uint definer_version_length, const char *stmt_version_str, @@ -1548,14 +1548,14 @@ static void dbDisconnect(char *host) } /* dbDisconnect */ -static void unescape(FILE *file,char *pos,uint length) +static void unescape(FILE *file,char *pos, size_t length) { char *tmp; DBUG_ENTER("unescape"); if (!(tmp=(char*) my_malloc(length*2+1, MYF(MY_WME)))) die(EX_MYSQLERR, "Couldn't allocate memory"); - mysql_real_escape_string(&mysql_connection, tmp, pos, length); + mysql_real_escape_string(&mysql_connection, tmp, pos, (ulong)length); fputc('\'', file); fputs(tmp, file); fputc('\'', file); @@ -1669,7 +1669,7 @@ static char *quote_for_like(const char *name, char *buff) Quote '<' '>' '&' '\"' chars and print a string to the xml_file. */ -static void print_quoted_xml(FILE *xml_file, const char *str, ulong len, +static void print_quoted_xml(FILE *xml_file, const char *str, size_t len, my_bool is_attribute_name) { const char *end; @@ -1928,7 +1928,7 @@ static void print_xml_row(FILE *xml_file, const char *row_name, squeezed to a single hyphen. */ -static void print_xml_comment(FILE *xml_file, ulong len, +static void print_xml_comment(FILE *xml_file, size_t len, const char *comment_string) { const char* end; @@ -2045,7 +2045,7 @@ static uint dump_events_for_db(char *db) DBUG_ENTER("dump_events_for_db"); DBUG_PRINT("enter", ("db: '%s'", db)); - mysql_real_escape_string(mysql, db_name_buff, db, strlen(db)); + mysql_real_escape_string(mysql, db_name_buff, db, (ulong)strlen(db)); /* nice comments */ print_comment(sql_file, 0, @@ -2164,6 +2164,11 @@ static uint dump_events_for_db(char *db) (const char *) (query_str != NULL ? query_str : row[3]), (const char *) delimiter); + if(query_str) + { + my_free(query_str); + query_str= NULL; + } restore_time_zone(sql_file, delimiter); restore_sql_mode(sql_file, delimiter); @@ -2257,7 +2262,7 @@ static uint dump_routines_for_db(char *db) DBUG_ENTER("dump_routines_for_db"); DBUG_PRINT("enter", ("db: '%s'", db)); - mysql_real_escape_string(mysql, db_name_buff, db, strlen(db)); + mysql_real_escape_string(mysql, db_name_buff, db, (ulong)strlen(db)); /* nice comments */ print_comment(sql_file, 0, @@ -2311,9 +2316,9 @@ static uint dump_routines_for_db(char *db) if the user has EXECUTE privilege he see routine names, but NOT the routine body of other routines that are not the creator of! */ - DBUG_PRINT("info",("length of body for %s row[2] '%s' is %d", + DBUG_PRINT("info",("length of body for %s row[2] '%s' is %zu", routine_name, row[2] ? row[2] : "(null)", - row[2] ? (int) strlen(row[2]) : 0)); + row[2] ? strlen(row[2]) : 0)); if (row[2] == NULL) { print_comment(sql_file, 1, "\n-- insufficient privileges to %s\n", @@ -3873,7 +3878,7 @@ static int dump_tablespaces_for_tables(char *db, char **table_names, int tables) int i; char name_buff[NAME_LEN*2+3]; - mysql_real_escape_string(mysql, name_buff, db, strlen(db)); + mysql_real_escape_string(mysql, name_buff, db, (ulong)strlen(db)); init_dynamic_string_checked(&where, " AND TABLESPACE_NAME IN (" "SELECT DISTINCT TABLESPACE_NAME FROM" @@ -3886,7 +3891,7 @@ static int dump_tablespaces_for_tables(char *db, char **table_names, int tables) for (i=0 ; imax_length) length=field->max_length; @@ -500,7 +501,8 @@ static int list_tables(MYSQL *mysql,const char *db,const char *table) { const char *header; - uint head_length, counter = 0; + size_t head_length; + uint counter = 0; char query[NAME_LEN + 100], rows[NAME_LEN], fields[16]; MYSQL_FIELD *field; MYSQL_RES *result; @@ -537,7 +539,7 @@ list_tables(MYSQL *mysql,const char *db,const char *table) putchar('\n'); header="Tables"; - head_length=(uint) strlen(header); + head_length= strlen(header); field=mysql_fetch_field(result); if (head_length < field->max_length) head_length=field->max_length; @@ -766,10 +768,10 @@ list_fields(MYSQL *mysql,const char *db,const char *table, *****************************************************************************/ static void -print_header(const char *header,uint head_length,...) +print_header(const char *header,size_t head_length,...) { va_list args; - uint length,i,str_length,pre_space; + size_t length,i,str_length,pre_space; const char *field; va_start(args,head_length); @@ -792,10 +794,10 @@ print_header(const char *header,uint head_length,...) putchar('|'); for (;;) { - str_length=(uint) strlen(field); + str_length= strlen(field); if (str_length > length) str_length=length+1; - pre_space=(uint) (((int) length-(int) str_length)/2)+1; + pre_space= ((length- str_length)/2)+1; for (i=0 ; i < pre_space ; i++) putchar(' '); for (i = 0 ; i < str_length ; i++) @@ -829,11 +831,11 @@ print_header(const char *header,uint head_length,...) static void -print_row(const char *header,uint head_length,...) +print_row(const char *header,size_t head_length,...) { va_list args; const char *field; - uint i,length,field_length; + size_t i,length,field_length; va_start(args,head_length); field=header; length=head_length; @@ -842,7 +844,7 @@ print_row(const char *header,uint head_length,...) putchar('|'); putchar(' '); fputs(field,stdout); - field_length=(uint) strlen(field); + field_length= strlen(field); for (i=field_length ; i <= length ; i++) putchar(' '); if (!(field=va_arg(args,char *))) @@ -856,10 +858,10 @@ print_row(const char *header,uint head_length,...) static void -print_trailer(uint head_length,...) +print_trailer(size_t head_length,...) { va_list args; - uint length,i; + size_t length,i; va_start(args,head_length); length=head_length; @@ -902,7 +904,7 @@ static void print_res_top(MYSQL_RES *result) mysql_field_seek(result,0); while((field = mysql_fetch_field(result))) { - if ((length=(uint) strlen(field->name)) > field->max_length) + if ((length= strlen(field->name)) > field->max_length) field->max_length=length; else length=field->max_length; diff --git a/extra/yassl/src/log.cpp b/extra/yassl/src/log.cpp index 13c68295747..2f112ac35f9 100644 --- a/extra/yassl/src/log.cpp +++ b/extra/yassl/src/log.cpp @@ -1,6 +1,5 @@ /* - Copyright (C) 2000-2007 MySQL AB - Use is subject to license terms + Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -61,6 +60,7 @@ namespace yaSSL { time_t clicks = time(0); char timeStr[32]; + memset(timeStr, 0, sizeof(timeStr)); // get rid of newline strncpy(timeStr, ctime(&clicks), sizeof(timeStr)); unsigned int len = strlen(timeStr); diff --git a/regex/split.c b/regex/split.c index bd2a53c01e3..a3a11f793ed 100644 --- a/regex/split.c +++ b/regex/split.c @@ -163,6 +163,10 @@ char *argv[]; } else if (argc > 3) for (n = atoi(argv[3]); n > 0; n--) { + if(sizeof(buf)-1 < strlen(argv[1])) + { + exit(EXIT_FAILURE); + } (void) strcpy(buf, argv[1]); (void) split(buf, fields, MNF, argv[2]); } -- cgit v1.2.1 From 90b9c957ba6380a717aaef6285b3f1498f4a29dc Mon Sep 17 00:00:00 2001 From: Karthik Kamath Date: Wed, 18 May 2016 11:07:29 +0530 Subject: BUG#21142859: FUNCTION UPDATING A VIEW FAILS TO FIND TABLE THAT ACTUALLY EXISTS ANALYSIS: ========= Stored functions updating a view where the view table has a trigger defined that updates another table, fails reporting an error that the table doesn't exist. If there is a trigger defined on a table, a variable 'trg_event_map' will be set to a non-zero value after the parsed tree creation. This indicates what triggers we need to pre-load for the TABLE_LIST when opening an associated table. During the prelocking phase, the variable 'trg_event_map' will not be set for the view table. This value will be set after the processing of triggers defined on the table. During the processing of sub-statements, 'locked_tables_mode' will be set to 'LTM_PRELOCKED' which denotes that further locking of tables/functions cannot be done. This results in the other table not being locked and thus further processing results in an error getting reported. FIX: ==== During the prelocking of view, the value of 'trg_event_map' of the view is copied to 'trg_event_map' of the next table in the TABLE_LIST. This results in the locking of tables associated with the trigger as well. --- mysql-test/r/sp-prelocking.result | 20 ++++++++++++++++++++ mysql-test/t/sp-prelocking.test | 26 ++++++++++++++++++++++++++ sql/sql_base.cc | 11 ++++++++++- 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/sp-prelocking.result b/mysql-test/r/sp-prelocking.result index 186b2c05d34..ac48459b0f2 100644 --- a/mysql-test/r/sp-prelocking.result +++ b/mysql-test/r/sp-prelocking.result @@ -320,3 +320,23 @@ c2 DROP TRIGGER t1_ai; DROP TABLE t1, t2; End of 5.0 tests +# +# Bug#21142859: FUNCTION UPDATING A VIEW FAILS TO FIND TABLE THAT ACTUALLY EXISTS +# +CREATE TABLE t1 SELECT 1 AS fld1, 'A' AS fld2; +CREATE TABLE t2 (fld3 INT, fld4 CHAR(1)); +CREATE VIEW v1 AS SELECT * FROM t1; +CREATE TRIGGER t1_au AFTER UPDATE ON t1 +FOR EACH ROW INSERT INTO t2 VALUES (new.fld1, new.fld2); +CREATE FUNCTION f1() RETURNS INT +BEGIN +UPDATE v1 SET fld2='B' WHERE fld1=1; +RETURN row_count(); +END ! +# Without the patch, an error was getting reported. +SELECT f1(); +f1() +1 +DROP FUNCTION f1; +DROP VIEW v1; +DROP TABLE t1,t2; diff --git a/mysql-test/t/sp-prelocking.test b/mysql-test/t/sp-prelocking.test index 966c59a5789..c1378d59196 100644 --- a/mysql-test/t/sp-prelocking.test +++ b/mysql-test/t/sp-prelocking.test @@ -388,3 +388,29 @@ DROP TABLE t1, t2; --echo End of 5.0 tests +--echo # +--echo # Bug#21142859: FUNCTION UPDATING A VIEW FAILS TO FIND TABLE THAT ACTUALLY EXISTS +--echo # + +CREATE TABLE t1 SELECT 1 AS fld1, 'A' AS fld2; +CREATE TABLE t2 (fld3 INT, fld4 CHAR(1)); + +CREATE VIEW v1 AS SELECT * FROM t1; + +CREATE TRIGGER t1_au AFTER UPDATE ON t1 +FOR EACH ROW INSERT INTO t2 VALUES (new.fld1, new.fld2); + +DELIMITER !; +CREATE FUNCTION f1() RETURNS INT +BEGIN + UPDATE v1 SET fld2='B' WHERE fld1=1; + RETURN row_count(); +END ! +DELIMITER ;! + +--echo # Without the patch, an error was getting reported. +SELECT f1(); + +DROP FUNCTION f1; +DROP VIEW v1; +DROP TABLE t1,t2; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index f559974c86b..27dcbee7b8f 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -5211,6 +5211,15 @@ handle_view(THD *thd, Query_tables_list *prelocking_ctx, &table_list->view->sroutines_list, table_list->top_table()); } + + /* + If a trigger was defined on one of the associated tables then assign the + 'trg_event_map' value of the view to the next table in table_list. When a + Stored function is invoked, all the associated tables including the tables + associated with the trigger are prelocked. + */ + if (table_list->trg_event_map && table_list->next_global) + table_list->next_global->trg_event_map= table_list->trg_event_map; return FALSE; } -- cgit v1.2.1 From 8281068f72385fb559906ca14f29bd3871b8830d Mon Sep 17 00:00:00 2001 From: Balasubramanian Kandasamy Date: Wed, 18 May 2016 17:23:16 +0530 Subject: BUG#21879694 - /VAR/LOG/MYSQLD.LOG HAS INCORRECT PERMISSIONS AFTER INSTALLING SERVER FROM REPO Description: This issue doesn't effect any default installation of repo rpms if user uses init scripts that are shipped as part of package but will have trouble if user tries to createdb or start server manually. After installing mysql-server from repository(yum,zypper) /var/log/mysqld.log is created with logged in user and group permissions instead of with mysql user and group permissions,due to which while creating database or starting server, it is failing Fix: Updated the user and group permissions of the /var/log/mysqld.log and /var/log/mysql/mysqld.log (for sles) files to mysql. --- packaging/rpm-oel/mysql.spec.in | 3 ++- packaging/rpm-sles/mysql.spec.in | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/rpm-oel/mysql.spec.in b/packaging/rpm-oel/mysql.spec.in index 8a020b05ae6..8f92f5b84f3 100644 --- a/packaging/rpm-oel/mysql.spec.in +++ b/packaging/rpm-oel/mysql.spec.in @@ -1,4 +1,4 @@ -# Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -629,6 +629,7 @@ rm -r $(readlink var) var datadir=$(/usr/bin/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p' | tail -n 1) /bin/chmod 0755 "$datadir" /bin/touch /var/log/mysqld.log +/bin/chown mysql:mysql /var/log/mysqld.log >/dev/null 2>&1 || : %if 0%{?systemd} %systemd_post mysqld.service /sbin/service mysqld enable >/dev/null 2>&1 || : diff --git a/packaging/rpm-sles/mysql.spec.in b/packaging/rpm-sles/mysql.spec.in index 91c708a583d..47c40b00e23 100644 --- a/packaging/rpm-sles/mysql.spec.in +++ b/packaging/rpm-sles/mysql.spec.in @@ -1,4 +1,4 @@ -# Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -492,6 +492,7 @@ rm -r $(readlink var) var datadir=$(/usr/bin/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p' | tail -n 1) /bin/chmod 0755 "$datadir" /bin/touch /var/log/mysql/mysqld.log +/bin/chown mysql:mysql /var/log/mysql/mysqld.log >/dev/null 2>&1 || : %if 0%{?systemd} %systemd_post mysqld.service /sbin/service mysqld enable >/dev/null 2>&1 || : -- cgit v1.2.1 From 46a2e4139830d176c31be1c53e533167ea4f95b9 Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Thu, 19 May 2016 22:07:53 +0300 Subject: Fixed many problems in the code of With_element::check_unrestricted_recursive(). Added the check whether there are set functions in the specifications of recursive CTE. Added the check whether there are recursive references in subqueries. Introduced boolean system variable 'standards_compliant_cte'. By default it's set to 'on'. When it's set to 'off' non-standard compliant CTE can be executed. --- mysql-test/r/cte_recursive.result | 347 ++++++++++++++++++++++++++++++++++++-- mysql-test/r/mysqld--help.result | 4 + mysql-test/t/cte_recursive.test | 267 ++++++++++++++++++++++++++++- sql/share/errmsg-utf8.txt | 2 + sql/sql_class.h | 1 + sql/sql_cte.cc | 53 +++--- sql/sql_cte.h | 12 +- sql/sql_derived.cc | 2 +- sql/sql_lex.cc | 3 + sql/sql_lex.h | 2 +- sql/sql_select.cc | 8 +- sql/sql_union.cc | 20 ++- sql/sys_vars.cc | 6 + 13 files changed, 675 insertions(+), 52 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index aa9ed3fc28c..2b9c8f721ef 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -30,7 +30,8 @@ insert into folks values (9, 'Grandma Ann', '1941-10-15', null, null), (25, 'Uncle Jim', '1968-11-18', 8, 7), (98, 'Sister Amy', '2001-06-20', 20, 30), -(8, 'Grandma Sally', '1943-08-23', 5, 6), +(7, 'Grandma Sally', '1943-08-23', null, 6), +(8, 'Grandpa Ben', '1940-10-21', null, null), (6, 'Grandgrandma Martha', '1923-05-17', null, null), (67, 'Cousin Eddie', '1992-02-28', 25, 27), (27, 'Auntie Melinda', '1971-03-29', null, null); @@ -53,7 +54,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL with recursive ancestors @@ -74,7 +76,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL with recursive ancestors @@ -93,7 +96,8 @@ id name dob father mother 67 Cousin Eddie 1992-02-28 25 27 25 Uncle Jim 1968-11-18 8 7 27 Auntie Melinda 1971-03-29 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL with recursive ancestors @@ -115,7 +119,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL with recursive prev_gen @@ -150,6 +155,7 @@ Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandma Sally 1943-08-23 +Grandpa Ben 1940-10-21 Grandgrandma Martha 1923-05-17 with recursive descendants @@ -183,7 +189,7 @@ where d.id=folks.father or d.id=folks.mother ) select * from descendants; id name dob father mother -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 30 Mom 1975-03-03 8 7 25 Uncle Jim 1968-11-18 8 7 100 Vasya 2000-01-01 20 30 @@ -208,6 +214,7 @@ where a.father=t1.id AND a.mother=t2.id); id name dob father mother id name dob father mother 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL +8 Grandpa Ben 1940-10-21 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 with ancestor_couples(husband, h_dob, wife, w_dob) as @@ -233,6 +240,7 @@ select * from ancestor_couples; husband h_dob wife w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 with recursive ancestors as @@ -256,7 +264,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 9 Grandma Ann 1941-10-15 NULL NULL 10 Grandpa Bill 1940-04-05 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +8 Grandpa Ben 1940-10-21 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 6 Grandgrandma Martha 1923-05-17 NULL NULL with recursive ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, @@ -286,6 +295,7 @@ from ancestor_couples; h_name h_dob w_name w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 prepare stmt1 from " with recursive ancestors @@ -308,7 +318,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL execute stmt1; id name dob father mother @@ -317,7 +328,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL deallocate prepare stmt1; create view v1 as @@ -344,7 +356,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL create view v2 as with recursive @@ -374,7 +387,8 @@ id name dob father mother 30 Mom 1975-03-03 8 7 9 Grandma Ann 1941-10-15 NULL NULL 10 Grandpa Bill 1940-04-05 NULL NULL -8 Grandma Sally 1943-08-23 5 6 +8 Grandpa Ben 1940-10-21 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 6 Grandgrandma Martha 1923-05-17 NULL NULL drop view v1,v2; explain extended @@ -392,11 +406,314 @@ where p.id = a.father or p.id = a.mother ) select * from ancestors; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY ALL NULL NULL NULL NULL 132 100.00 -2 SUBQUERY folks ALL NULL NULL NULL NULL 11 100.00 Using where -3 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 11 100.00 -3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 11 100.00 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ALL NULL NULL NULL NULL 156 100.00 +2 SUBQUERY folks ALL NULL NULL NULL NULL 12 100.00 Using where +3 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 +3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1003 with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where ((`test`.`folks`.`name` = 'Vasya') and (`test`.`folks`.`dob` = DATE'2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestors` `a` where ((`a`.`father` = `p`.`id`) or (`a`.`mother` = `p`.`id`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` +with recursive +ancestor_ids (id) +as +( +select father from folks where name = 'Vasya' + union +select mother from folks where name = 'Vasya' + union +select father from folks, ancestor_ids a where folks.id = a.id +union +select mother from folks, ancestor_ids a where folks.id = a.id +), +ancestors +as +( +select p.* from folks as p, ancestor_ids as a +where p.id = a.id +) +select * from ancestors; +id name dob father mother +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandpa Ben 1940-10-21 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestors +as +( +select * +from folks +where name = 'Vasya' + union all +select p.* +from folks as p, ancestors as fa +where p.id = fa.father +union all +select p.* +from folks as p, ancestors as ma +where p.id = ma.mother +) +select * from ancestors; +id name dob father mother +100 Vasya 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +9 Grandma Ann 1941-10-15 NULL NULL +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandpa Ben 1940-10-21 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +with recursive +ancestor_ids (id, generation) +as +( +select father, 1 from folks where name = 'Vasya' and father is not null +union all +select mother, 1 from folks where name = 'Vasya' and mother is not null +union all +select father, fa.generation+1 from folks, ancestor_ids fa +where folks.id = fa.id and (father not in (select id from ancestor_ids)) +union all +select mother, ma.generation+1 from folks, ancestor_ids ma +where folks.id = ma.id and (mother not in (select id from ancestor_ids)) +) +select generation, name from ancestor_ids a, folks +where a.id = folks.id; +ERROR HY000: Restrictions imposed on recursive definitions are violated for table 'ancestor_ids' +set standards_compliant_cte=0; +set optimizer_switch='materialization=off,subquery_cache=off'; +with recursive +ancestor_ids (id, generation) +as +( +select father from folks where name = 'Vasya' and father is not null +union all +select mother from folks where name = 'Vasya' and mother is not null +union all +select father, fa.generation+1 from folks, ancestor_ids fa +where folks.id = fa.id and (father not in (select id from ancestor_ids)) +union all +select mother, ma.generation+1 from folks, ancestor_ids ma +where folks.id = ma.id and (mother not in (select id from ancestor_ids)) +) +select generation, name from ancestor_ids a, folks +where a.id = folks.id; +ERROR HY000: WITH column list and SELECT field list have different column counts +with recursive +ancestor_ids (id, generation) +as +( +select father, 1 from folks where name = 'Vasya' and father is not null +union all +select mother, 1 from folks where name = 'Vasya' and mother is not null +union all +select father, fa.generation+1 from folks, ancestor_ids fa +where folks.id = fa.id and father is not null and +(father not in (select id from ancestor_ids)) +union all +select mother, ma.generation+1 from folks, ancestor_ids ma +where folks.id = ma.id and mother is not null and +(mother not in (select id from ancestor_ids)) +) +select generation, name from ancestor_ids a, folks +where a.id = folks.id; +generation name +1 Dad +1 Mom +2 Grandpa Bill +2 Grandpa Ben +2 Grandma Ann +2 Grandma Sally +3 Grandgrandma Martha +set optimizer_switch=default; +set standards_compliant_cte=1; +with recursive +coupled_ancestor_ids (id) +as +( +select father from folks where name = 'Vasya' and father is not null +union +select mother from folks where name = 'Vasya' and mother is not null +union +select n.father +from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n +where folks.father = fa.id and folks.mother = ma.id and +(fa.id = n.id or ma.id = n.id) and +n.father is not null and n.mother is not null +union +select n.mother +from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n +where folks.father = fa.id and folks.mother = ma.id and +(fa.id = n.id or ma.id = n.id) and +n.father is not null and n.mother is not null +) +select p.* from coupled_ancestor_ids a, folks p +where a.id = p.id; +ERROR HY000: Restrictions imposed on recursive definitions are violated for table 'coupled_ancestor_ids' +set statement standards_compliant_cte=0 for +with recursive +coupled_ancestor_ids (id) +as +( +select father from folks where name = 'Vasya' and father is not null +union +select mother from folks where name = 'Vasya' and mother is not null +union +select n.father +from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n +where folks.father = fa.id and folks.mother = ma.id and +(fa.id = n.id or ma.id = n.id) and +n.father is not null and n.mother is not null +union +select n.mother +from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n +where folks.father = fa.id and folks.mother = ma.id and +(fa.id = n.id or ma.id = n.id) and +n.father is not null and n.mother is not null +) +select p.* from coupled_ancestor_ids a, folks p +where a.id = p.id; +id name dob father mother +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandpa Ben 1940-10-21 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 +with recursive +ancestor_ids (id) +as +( +select father from folks where name = 'Vasya' + union +select mother from folks where name = 'Vasya' + union +select father from folks left join ancestor_ids a on folks.id = a.id +union +select mother from folks left join ancestor_ids a on folks.id = a.id +), +ancestors +as +( +select p.* from folks as p, ancestor_ids as a +where p.id = a.id +) +select * from ancestors; +ERROR HY000: Restrictions imposed on recursive definitions are violated for table 'ancestor_ids' +set statement standards_compliant_cte=0 for +with recursive +ancestor_ids (id) +as +( +select father from folks where name = 'Vasya' + union +select mother from folks where name = 'Vasya' + union +select father from folks left join ancestor_ids a on folks.id = a.id +union +select mother from folks left join ancestor_ids a on folks.id = a.id +), +ancestors +as +( +select p.* from folks as p, ancestor_ids as a +where p.id = a.id +) +select * from ancestors; +id name dob father mother +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandpa Ben 1940-10-21 NULL NULL +25 Uncle Jim 1968-11-18 8 7 +9 Grandma Ann 1941-10-15 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL +27 Auntie Melinda 1971-03-29 NULL NULL +with recursive +ancestor_ids (id, generation) +as +( +select father, 1 from folks where name = 'Vasya' + union +select mother, 1 from folks where name = 'Vasya' + union +select father, a.generation+1 from folks, ancestor_ids a +where folks.id = a.id +union +select mother, a.generation+1 from folks, ancestor_ids a +where folks.id = a.id +), +ancestors +as +( +select generation, name from folks as p, ancestor_ids as a +where p.id = a.id +) +select * from ancestors; +generation name +1 Dad +1 Mom +2 Grandpa Bill +2 Grandpa Ben +2 Grandma Ann +2 Grandma Sally +3 Grandgrandma Martha +with recursive +ancestor_ids (id, generation) +as +( +select father, 1 from folks where name = 'Vasya' + union +select mother, 1 from folks where name = 'Vasya' + union +select max(father), max(a.generation)+1 from folks, ancestor_ids a +where folks.id = a.id +group by a.generation +union +select max(mother), max(a.generation)+1 from folks, ancestor_ids a +where folks.id = a.id +group by a.generation +), +ancestors +as +( +select generation, name from folks as p, ancestor_ids as a +where p.id = a.id +) +select * from ancestors; +ERROR HY000: Restrictions imposed on recursive definitions are violated for table 'ancestor_ids' +set statement standards_compliant_cte=0 for +with recursive +ancestor_ids (id, generation) +as +( +select father, 1 from folks where name = 'Vasya' + union +select mother, 1 from folks where name = 'Vasya' + union +select max(father), a.generation+1 from folks, ancestor_ids a +where folks.id = a.id +group by a.generation +union +select max(mother), a.generation+1 from folks, ancestor_ids a +where folks.id = a.id +group by a.generation +), +ancestors +as +( +select generation, name from folks as p, ancestor_ids as a +where p.id = a.id +) +select * from ancestors; +generation name +1 Dad +1 Mom +2 Grandpa Bill +2 Grandma Ann drop table folks; diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index a35693eb93e..7a8b9dc3df4 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -1033,6 +1033,9 @@ The following options may be given as the first argument: NO_ENGINE_SUBSTITUTION, PAD_CHAR_TO_FULL_LENGTH --stack-trace Print a symbolic stack trace on failure (Defaults to on; use --skip-stack-trace to disable.) + --standards-compliant-cte + Allow only standards compiant CTE + (Defaults to on; use --skip-standards-compliant-cte to disable.) --stored-program-cache=# The soft upper limit for number of cached stored routines for one connection. @@ -1414,6 +1417,7 @@ slow-query-log FALSE sort-buffer-size 2097152 sql-mode NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION stack-trace TRUE +standards-compliant-cte TRUE stored-program-cache 256 strict-password-validation TRUE symbolic-links FALSE diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 4982bf78adb..da4de155186 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -34,7 +34,8 @@ insert into folks values (9, 'Grandma Ann', '1941-10-15', null, null), (25, 'Uncle Jim', '1968-11-18', 8, 7), (98, 'Sister Amy', '2001-06-20', 20, 30), -(8, 'Grandma Sally', '1943-08-23', 5, 6), +(7, 'Grandma Sally', '1943-08-23', null, 6), +(8, 'Grandpa Ben', '1940-10-21', null, null), (6, 'Grandgrandma Martha', '1923-05-17', null, null), (67, 'Cousin Eddie', '1992-02-28', 25, 27), (27, 'Auntie Melinda', '1971-03-29', null, null); @@ -320,5 +321,269 @@ as select * from ancestors; +with recursive +ancestor_ids (id) +as +( + select father from folks where name = 'Vasya' + union + select mother from folks where name = 'Vasya' + union + select father from folks, ancestor_ids a where folks.id = a.id + union + select mother from folks, ancestor_ids a where folks.id = a.id +), +ancestors +as +( + select p.* from folks as p, ancestor_ids as a + where p.id = a.id +) +select * from ancestors; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Vasya' + union all + select p.* + from folks as p, ancestors as fa + where p.id = fa.father + union all + select p.* + from folks as p, ancestors as ma + where p.id = ma.mother +) +select * from ancestors; + + + + +--ERROR ER_NOT_STANDARDS_COMPLIANT_RECURSIVE +with recursive +ancestor_ids (id, generation) +as +( + select father, 1 from folks where name = 'Vasya' and father is not null + union all + select mother, 1 from folks where name = 'Vasya' and mother is not null + union all + select father, fa.generation+1 from folks, ancestor_ids fa + where folks.id = fa.id and (father not in (select id from ancestor_ids)) + union all + select mother, ma.generation+1 from folks, ancestor_ids ma + where folks.id = ma.id and (mother not in (select id from ancestor_ids)) +) +select generation, name from ancestor_ids a, folks + where a.id = folks.id; + +set standards_compliant_cte=0; +set optimizer_switch='materialization=off,subquery_cache=off'; + +--ERROR ER_WITH_COL_WRONG_LIST +with recursive +ancestor_ids (id, generation) +as +( + select father from folks where name = 'Vasya' and father is not null + union all + select mother from folks where name = 'Vasya' and mother is not null + union all + select father, fa.generation+1 from folks, ancestor_ids fa + where folks.id = fa.id and (father not in (select id from ancestor_ids)) + union all + select mother, ma.generation+1 from folks, ancestor_ids ma + where folks.id = ma.id and (mother not in (select id from ancestor_ids)) +) +select generation, name from ancestor_ids a, folks + where a.id = folks.id; + +with recursive +ancestor_ids (id, generation) +as +( + select father, 1 from folks where name = 'Vasya' and father is not null + union all + select mother, 1 from folks where name = 'Vasya' and mother is not null + union all + select father, fa.generation+1 from folks, ancestor_ids fa + where folks.id = fa.id and father is not null and + (father not in (select id from ancestor_ids)) + union all + select mother, ma.generation+1 from folks, ancestor_ids ma + where folks.id = ma.id and mother is not null and + (mother not in (select id from ancestor_ids)) +) +select generation, name from ancestor_ids a, folks + where a.id = folks.id; + +set optimizer_switch=default; +set standards_compliant_cte=1; + +--ERROR ER_NOT_STANDARDS_COMPLIANT_RECURSIVE +with recursive +coupled_ancestor_ids (id) +as +( + select father from folks where name = 'Vasya' and father is not null + union + select mother from folks where name = 'Vasya' and mother is not null + union + select n.father + from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n + where folks.father = fa.id and folks.mother = ma.id and + (fa.id = n.id or ma.id = n.id) and + n.father is not null and n.mother is not null + union + select n.mother + from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n + where folks.father = fa.id and folks.mother = ma.id and + (fa.id = n.id or ma.id = n.id) and + n.father is not null and n.mother is not null +) +select p.* from coupled_ancestor_ids a, folks p + where a.id = p.id; + +set statement standards_compliant_cte=0 for +with recursive +coupled_ancestor_ids (id) +as +( + select father from folks where name = 'Vasya' and father is not null + union + select mother from folks where name = 'Vasya' and mother is not null + union + select n.father + from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n + where folks.father = fa.id and folks.mother = ma.id and + (fa.id = n.id or ma.id = n.id) and + n.father is not null and n.mother is not null + union + select n.mother + from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n + where folks.father = fa.id and folks.mother = ma.id and + (fa.id = n.id or ma.id = n.id) and + n.father is not null and n.mother is not null +) +select p.* from coupled_ancestor_ids a, folks p + where a.id = p.id; + +--ERROR ER_NOT_STANDARDS_COMPLIANT_RECURSIVE +with recursive +ancestor_ids (id) +as +( + select father from folks where name = 'Vasya' + union + select mother from folks where name = 'Vasya' + union + select father from folks left join ancestor_ids a on folks.id = a.id + union + select mother from folks left join ancestor_ids a on folks.id = a.id +), +ancestors +as +( + select p.* from folks as p, ancestor_ids as a + where p.id = a.id +) +select * from ancestors; + +set statement standards_compliant_cte=0 for +with recursive +ancestor_ids (id) +as +( + select father from folks where name = 'Vasya' + union + select mother from folks where name = 'Vasya' + union + select father from folks left join ancestor_ids a on folks.id = a.id + union + select mother from folks left join ancestor_ids a on folks.id = a.id +), +ancestors +as +( + select p.* from folks as p, ancestor_ids as a + where p.id = a.id +) +select * from ancestors; + +with recursive +ancestor_ids (id, generation) +as +( + select father, 1 from folks where name = 'Vasya' + union + select mother, 1 from folks where name = 'Vasya' + union + select father, a.generation+1 from folks, ancestor_ids a + where folks.id = a.id + union + select mother, a.generation+1 from folks, ancestor_ids a + where folks.id = a.id +), +ancestors +as +( + select generation, name from folks as p, ancestor_ids as a + where p.id = a.id +) +select * from ancestors; + +--ERROR ER_NOT_STANDARDS_COMPLIANT_RECURSIVE +with recursive +ancestor_ids (id, generation) +as +( + select father, 1 from folks where name = 'Vasya' + union + select mother, 1 from folks where name = 'Vasya' + union + select max(father), max(a.generation)+1 from folks, ancestor_ids a + where folks.id = a.id + group by a.generation + union + select max(mother), max(a.generation)+1 from folks, ancestor_ids a + where folks.id = a.id + group by a.generation +), +ancestors +as +( + select generation, name from folks as p, ancestor_ids as a + where p.id = a.id +) +select * from ancestors; + +set statement standards_compliant_cte=0 for +with recursive +ancestor_ids (id, generation) +as +( + select father, 1 from folks where name = 'Vasya' + union + select mother, 1 from folks where name = 'Vasya' + union + select max(father), a.generation+1 from folks, ancestor_ids a + where folks.id = a.id + group by a.generation + union + select max(mother), a.generation+1 from folks, ancestor_ids a + where folks.id = a.id + group by a.generation +), +ancestors +as +( + select generation, name from folks as p, ancestor_ids as a + where p.id = a.id +) +select * from ancestors; + drop table folks; diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 38b05ca9dce..00228ee1062 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7156,6 +7156,8 @@ ER_RECURSIVE_WITHOUT_ANCHORS eng "No anchors for recursive WITH element '%s'" ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED eng "Reference to recursive WITH table '%s' in materiazed derived" +ER_NOT_STANDARDS_COMPLIANT_RECURSIVE + eng "Restrictions imposed on recursive definitions are violated for table '%s'" # # Internal errors, not used # diff --git a/sql/sql_class.h b/sql/sql_class.h index 0100a9807f5..7e995c04b33 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -628,6 +628,7 @@ typedef struct system_variables my_bool old_alter_table; my_bool old_passwords; my_bool big_tables; + my_bool only_standards_compliant_cte; my_bool query_cache_strip_comments; my_bool sql_log_slow; my_bool sql_log_bin; diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index ffc54f50af1..77d2c7d24d3 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -173,8 +173,10 @@ bool With_element::check_dependencies_in_spec(THD *thd) } -void With_element::check_dependencies_in_select(st_select_lex *sl, table_map &dep_map) +void With_element::check_dependencies_in_select(st_select_lex *sl, + table_map &dep_map) { + bool is_sq_select= sl->master_unit()->item != NULL; for (TABLE_LIST *tbl= sl->table_list.first; tbl; tbl= tbl->next_local) { tbl->with_internal_reference_map= 0; @@ -186,6 +188,8 @@ void With_element::check_dependencies_in_select(st_select_lex *sl, table_map &de { dep_map|= tbl->with->get_elem_map(); tbl->with_internal_reference_map= get_elem_map(); + if (is_sq_select) + sq_dep_map|= tbl->with->get_elem_map(); } } st_select_lex_unit *inner_unit= sl->first_inner_unit(); @@ -730,7 +734,7 @@ bool TABLE_LIST::is_with_table_recursive_reference() -bool st_select_lex::check_unrestricted_recursive() +bool st_select_lex::check_unrestricted_recursive(bool only_standards_compliant) { With_element *with_elem= get_with_element(); if (!with_elem ||!with_elem->is_recursive) @@ -742,8 +746,16 @@ bool st_select_lex::check_unrestricted_recursive() encountered)) return true; with_elem->owner->unrestricted|= unrestricted; - if (with_sum_func) + if (with_sum_func || + (with_elem->sq_dep_map & with_elem->mutually_recursive)) with_elem->owner->unrestricted|= with_elem->mutually_recursive; + if (only_standards_compliant && with_elem->is_unrestricted()) + { + my_error(ER_NOT_STANDARDS_COMPLIANT_RECURSIVE, + MYF(0), with_elem->query_name->str); + return true; + } + return false; } @@ -756,24 +768,27 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, TABLE_LIST *tbl; while ((tbl= ti++)) { - if (tbl->get_unit() && !tbl->is_with_table()) - { - st_select_lex_unit *unit= tbl->get_unit(); - if (tbl->is_materialized_derived()) + st_select_lex_unit *unit= tbl->get_unit(); + if (unit) + { + if(!tbl->is_with_table()) { - table_map dep_map; - check_dependencies_in_unit(unit, dep_map); - if (dep_map & get_elem_map()) + if (tbl->is_materialized_derived()) { - my_error(ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED, - MYF(0), query_name->str); - return true; + table_map dep_map; + check_dependencies_in_unit(unit, dep_map); + if (dep_map & get_elem_map()) + { + my_error(ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED, + MYF(0), query_name->str); + return true; + } } + if (check_unrestricted_recursive(unit->first_select(), + unrestricted, + encountered)) + return true; } - if (check_unrestricted_recursive(unit->first_select(), - unrestricted, - encountered)) - return true; if (!(tbl->is_recursive_with_table() && unit->with_element->owner == owner)) continue; With_element *with_elem= unit->with_element; @@ -792,8 +807,8 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, if (encountered & with_elem->get_elem_map()) { uint cnt= 0; - table_map mutually_recursive= with_elem->mutually_recursive; - for (table_map map= mutually_recursive >> with_elem->number; + table_map encountered_mr= encountered & with_elem->mutually_recursive; + for (table_map map= encountered_mr >> with_elem->number; map != 0; map>>= 1) { diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 0312fcd0643..1c32f16258c 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -35,6 +35,7 @@ private: directly or indirectly from the i-th with element. */ table_map derived_dep_map; + table_map sq_dep_map; table_map work_dep_map; // dependency map used for work /* Dependency map of with elements mutually recursive with this with element */ table_map mutually_recursive; @@ -90,7 +91,7 @@ public: List list, st_select_lex_unit *unit) : next_elem(NULL), base_dep_map(0), derived_dep_map(0), - work_dep_map(0), mutually_recursive(0), + sq_dep_map(0), work_dep_map(0), mutually_recursive(0), references(0), table(NULL), query_name(name), column_list(list), spec(unit), is_recursive(false), with_anchor(false), @@ -151,7 +152,8 @@ public: friend class With_clause; friend - bool st_select_lex::check_unrestricted_recursive(); + bool + st_select_lex::check_unrestricted_recursive(bool only_standard_compliant); friend bool TABLE_LIST::is_with_table_recursive_reference(); }; @@ -241,9 +243,11 @@ public: friend class With_element; friend - bool check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list); + bool + check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list); friend - bool st_select_lex::check_unrestricted_recursive(); + bool + st_select_lex::check_unrestricted_recursive(bool only_standard_compliant); }; diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 95a7ee91435..0db94421aa5 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -771,7 +771,7 @@ exit: */ if (res) { - if (derived->table) + if (derived->table && !derived->is_with_table_recursive_reference()) free_tmp_table(thd, derived->table); delete derived->derived_result; } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 42058319fc9..27340bbec89 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -3189,6 +3189,8 @@ void st_select_lex_unit::set_limit(st_select_lex *sl) bool st_select_lex_unit::union_needs_tmp_table() { + if (with_element && with_element->is_recursive) + return true; return union_distinct != NULL || global_parameters()->order_list.elements != 0 || thd->lex->sql_command == SQLCOM_INSERT_SELECT || @@ -4236,6 +4238,7 @@ void st_select_lex::update_correlated_cache() while ((tl= ti++)) { + // is_correlated|= tl->is_with_table_recursive_reference(); if (tl->on_expr) is_correlated|= MY_TEST(tl->on_expr->used_tables() & OUTER_REF_TABLE_BIT); for (TABLE_LIST *embedding= tl->embedding ; embedding ; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 09463635b94..b17e19276da 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1108,7 +1108,7 @@ public: return master_unit()->with_element; } With_element *find_table_def_in_with_clauses(TABLE_LIST *table); - bool check_unrestricted_recursive(); + bool check_unrestricted_recursive(bool only_standards_compliant); List window_specs; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 71b672b6131..6792d7f5e2c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -816,8 +816,6 @@ JOIN::prepare(TABLE_LIST *tables_init, &hidden_group_fields, &select_lex->select_n_reserved)) DBUG_RETURN(-1); - if (select_lex->check_unrestricted_recursive()) - DBUG_RETURN(-1); /* Resolve the ORDER BY that was skipped, then remove it. */ if (skip_order_by && select_lex != select_lex->master_unit()->global_parameters()) @@ -861,6 +859,12 @@ JOIN::prepare(TABLE_LIST *tables_init, With_clause *with_clause=select_lex->get_with_clause(); if (with_clause && with_clause->prepare_unreferenced_elements(thd)) DBUG_RETURN(1); + + With_element *with_elem= select_lex->get_with_element(); + if (with_elem && + select_lex->check_unrestricted_recursive( + thd->variables.only_standards_compliant_cte)) + DBUG_RETURN(-1); int res= check_and_do_in_subquery_rewrites(this); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 9308dc2a841..c43fdf30a64 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -266,14 +266,18 @@ void select_union::cleanup() void select_union_recursive::cleanup() { - select_union::cleanup(); - free_tmp_table(thd, table); + if (table) + { + select_union::cleanup(); + free_tmp_table(thd, table); + } - incr_table->file->extra(HA_EXTRA_RESET_STATE); - incr_table->file->ha_delete_all_rows(); - //free_io_cache(incr_table); - //filesort_free_buffers(incr_table,0); - free_tmp_table(thd, incr_table); + if (incr_table) + { + incr_table->file->extra(HA_EXTRA_RESET_STATE); + incr_table->file->ha_delete_all_rows(); + free_tmp_table(thd, incr_table); + } List_iterator
it(rec_tables); TABLE *tab; @@ -281,8 +285,6 @@ void select_union_recursive::cleanup() { tab->file->extra(HA_EXTRA_RESET_STATE); tab->file->ha_delete_all_rows(); - //free_io_cache(tab); - //filesort_free_buffers(tab,0); free_tmp_table(thd, tab); } } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 4bf202813f3..c921fffc004 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3156,6 +3156,12 @@ static Sys_var_charptr Sys_ssl_crlpath( READ_ONLY GLOBAL_VAR(opt_ssl_crlpath), SSL_OPT(OPT_SSL_CRLPATH), IN_FS_CHARSET, DEFAULT(0)); +static Sys_var_mybool Sys_standards_compliant_cte( + "standards_compliant_cte", + "Allow only standards compiant CTE", + SESSION_VAR(only_standards_compliant_cte), CMD_LINE(OPT_ARG), + DEFAULT(TRUE)); + // why ENUM and not BOOL ? static const char *updatable_views_with_limit_names[]= {"NO", "YES", 0}; -- cgit v1.2.1 From 0f7fe2a7437e69d1973d4354c2cddd7beeca05b9 Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Thu, 19 May 2016 23:17:19 +0300 Subject: Changes in test files --- mysql-test/r/cte_recursive.result | 104 +++++++++++++++++++------------------- mysql-test/t/cte_recursive.test | 74 +++++++++++++-------------- 2 files changed, 89 insertions(+), 89 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index 2b9c8f721ef..e4107a1eeb4 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -23,7 +23,7 @@ ERROR HY000: No anchors for recursive WITH element 'b1' drop table t1; create table folks(id int, name char(32), dob date, father int, mother int); insert into folks values -(100, 'Vasya', '2000-01-01', 20, 30), +(100, 'Me', '2000-01-01', 20, 30), (20, 'Dad', '1970-02-02', 10, 9), (30, 'Mom', '1975-03-03', 8, 7), (10, 'Grandpa Bill', '1940-04-05', null, null), @@ -41,7 +41,7 @@ as ( select * from folks -where name = 'Vasya' and dob = '2000-01-01' +where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -49,7 +49,7 @@ where p.id = a.father or p.id = a.mother ) select * from ancestors; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL @@ -67,11 +67,11 @@ where p.id = a.father or p.id = a.mother union select * from folks -where name = 'Vasya' and dob = '2000-01-01' +where name = 'Me' and dob = '2000-01-01' ) select * from ancestors; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL @@ -105,7 +105,7 @@ as ( select * from folks -where name = 'Vasya' or name='Sister Amy' +where name = 'Me' or name='Sister Amy' union select p.* from folks as p, ancestors as a @@ -113,7 +113,7 @@ where p.id = a.father or p.id = a.mother ) select * from ancestors; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 98 Sister Amy 2001-06-20 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 @@ -132,14 +132,14 @@ where folks.id=prev_gen.father or folks.id=prev_gen.mother union select * from folks -where name='Vasya' +where name='Me' ), ancestors as ( select * from folks -where name='Vasya' +where name='Me' union select * from ancestors @@ -149,7 +149,7 @@ from prev_gen ) select ancestors.name, ancestors.dob from ancestors; name dob -Vasya 2000-01-01 +Me 2000-01-01 Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 @@ -173,7 +173,7 @@ select * from descendants; id name dob father mother 10 Grandpa Bill 1940-04-05 NULL NULL 20 Dad 1970-02-02 10 9 -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 98 Sister Amy 2001-06-20 20 30 with recursive descendants @@ -192,7 +192,7 @@ id name dob father mother 7 Grandma Sally 1943-08-23 NULL 6 30 Mom 1975-03-03 8 7 25 Uncle Jim 1968-11-18 8 7 -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 98 Sister Amy 2001-06-20 20 30 67 Cousin Eddie 1992-02-28 25 27 with recursive @@ -201,7 +201,7 @@ as ( select * from folks -where name = 'Vasya' and dob = '2000-01-01' +where name = 'Me' and dob = '2000-01-01' union select p.* from folks as p, ancestors AS a @@ -225,7 +225,7 @@ as ( select * from folks -where name = 'Vasya' +where name = 'Me' union select p.* from folks as p, ancestors AS a @@ -247,7 +247,7 @@ as ( select * from folks -where name = 'Vasya' +where name = 'Me' union select p.* from folks as p, ancestors as fa @@ -259,7 +259,7 @@ where p.id = ma.mother ) select * from ancestors; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 9 Grandma Ann 1941-10-15 NULL NULL @@ -278,7 +278,7 @@ where a.father = h.id AND a.mother = w.id union select h.*, w.* from folks v, folks h, folks w -where v.name = 'Vasya' and +where v.name = 'Me' and (v.father = h.id AND v.mother= w.id) ), coupled_ancestors (id, name, dob, father, mother) @@ -303,7 +303,7 @@ as ( select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -313,7 +313,7 @@ select * from ancestors; "; execute stmt1; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL @@ -323,7 +323,7 @@ id name dob father mother 6 Grandgrandma Martha 1923-05-17 NULL NULL execute stmt1; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL @@ -339,7 +339,7 @@ as ( select * from folks -where name = 'Vasya' and dob = '2000-01-01' +where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -348,10 +348,10 @@ where p.id = a.father or p.id = a.mother select * from ancestors; show create view v1; View Create View character_set_client collation_connection -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where ((`folks`.`name` = 'Vasya') and (`folks`.`dob` = '2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `a`) where ((`p`.`id` = `a`.`father`) or (`p`.`id` = `a`.`mother`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where ((`folks`.`name` = 'Me') and (`folks`.`dob` = '2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `a`) where ((`p`.`id` = `a`.`father`) or (`p`.`id` = `a`.`mother`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci select * from v1; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL @@ -366,7 +366,7 @@ as ( select * from folks -where name = 'Vasya' +where name = 'Me' union select p.* from folks as p, ancestors as fa @@ -379,10 +379,10 @@ where p.id = ma.mother select * from ancestors; show create view v2; View Create View character_set_client collation_connection -v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where (`folks`.`name` = 'Vasya') union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `fa`) where (`p`.`id` = `fa`.`father`) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `ma`) where (`p`.`id` = `ma`.`mother`))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci +v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS with recursive ancestors as (select `folks`.`id` AS `id`,`folks`.`name` AS `name`,`folks`.`dob` AS `dob`,`folks`.`father` AS `father`,`folks`.`mother` AS `mother` from `folks` where (`folks`.`name` = 'Me') union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `fa`) where (`p`.`id` = `fa`.`father`) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from (`folks` `p` join `ancestors` `ma`) where (`p`.`id` = `ma`.`mother`))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` latin1 latin1_swedish_ci select * from v2; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 9 Grandma Ann 1941-10-15 NULL NULL @@ -398,7 +398,7 @@ as ( select * from folks -where name = 'Vasya' and dob = '2000-01-01' +where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -412,14 +412,14 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: -Note 1003 with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where ((`test`.`folks`.`name` = 'Vasya') and (`test`.`folks`.`dob` = DATE'2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestors` `a` where ((`a`.`father` = `p`.`id`) or (`a`.`mother` = `p`.`id`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` +Note 1003 with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where ((`test`.`folks`.`name` = 'Me') and (`test`.`folks`.`dob` = DATE'2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestors` `a` where ((`a`.`father` = `p`.`id`) or (`a`.`mother` = `p`.`id`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` with recursive ancestor_ids (id) as ( -select father from folks where name = 'Vasya' +select father from folks where name = 'Me' union -select mother from folks where name = 'Vasya' +select mother from folks where name = 'Me' union select father from folks, ancestor_ids a where folks.id = a.id union @@ -446,7 +446,7 @@ as ( select * from folks -where name = 'Vasya' +where name = 'Me' union all select p.* from folks as p, ancestors as fa @@ -458,7 +458,7 @@ where p.id = ma.mother ) select * from ancestors; id name dob father mother -100 Vasya 2000-01-01 20 30 +100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 9 Grandma Ann 1941-10-15 NULL NULL @@ -471,9 +471,9 @@ with recursive ancestor_ids (id, generation) as ( -select father, 1 from folks where name = 'Vasya' and father is not null +select father, 1 from folks where name = 'Me' and father is not null union all -select mother, 1 from folks where name = 'Vasya' and mother is not null +select mother, 1 from folks where name = 'Me' and mother is not null union all select father, fa.generation+1 from folks, ancestor_ids fa where folks.id = fa.id and (father not in (select id from ancestor_ids)) @@ -490,9 +490,9 @@ with recursive ancestor_ids (id, generation) as ( -select father from folks where name = 'Vasya' and father is not null +select father from folks where name = 'Me' and father is not null union all -select mother from folks where name = 'Vasya' and mother is not null +select mother from folks where name = 'Me' and mother is not null union all select father, fa.generation+1 from folks, ancestor_ids fa where folks.id = fa.id and (father not in (select id from ancestor_ids)) @@ -507,9 +507,9 @@ with recursive ancestor_ids (id, generation) as ( -select father, 1 from folks where name = 'Vasya' and father is not null +select father, 1 from folks where name = 'Me' and father is not null union all -select mother, 1 from folks where name = 'Vasya' and mother is not null +select mother, 1 from folks where name = 'Me' and mother is not null union all select father, fa.generation+1 from folks, ancestor_ids fa where folks.id = fa.id and father is not null and @@ -535,9 +535,9 @@ with recursive coupled_ancestor_ids (id) as ( -select father from folks where name = 'Vasya' and father is not null +select father from folks where name = 'Me' and father is not null union -select mother from folks where name = 'Vasya' and mother is not null +select mother from folks where name = 'Me' and mother is not null union select n.father from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n @@ -559,9 +559,9 @@ with recursive coupled_ancestor_ids (id) as ( -select father from folks where name = 'Vasya' and father is not null +select father from folks where name = 'Me' and father is not null union -select mother from folks where name = 'Vasya' and mother is not null +select mother from folks where name = 'Me' and mother is not null union select n.father from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n @@ -588,9 +588,9 @@ with recursive ancestor_ids (id) as ( -select father from folks where name = 'Vasya' +select father from folks where name = 'Me' union -select mother from folks where name = 'Vasya' +select mother from folks where name = 'Me' union select father from folks left join ancestor_ids a on folks.id = a.id union @@ -609,9 +609,9 @@ with recursive ancestor_ids (id) as ( -select father from folks where name = 'Vasya' +select father from folks where name = 'Me' union -select mother from folks where name = 'Vasya' +select mother from folks where name = 'Me' union select father from folks left join ancestor_ids a on folks.id = a.id union @@ -638,9 +638,9 @@ with recursive ancestor_ids (id, generation) as ( -select father, 1 from folks where name = 'Vasya' +select father, 1 from folks where name = 'Me' union -select mother, 1 from folks where name = 'Vasya' +select mother, 1 from folks where name = 'Me' union select father, a.generation+1 from folks, ancestor_ids a where folks.id = a.id @@ -667,9 +667,9 @@ with recursive ancestor_ids (id, generation) as ( -select father, 1 from folks where name = 'Vasya' +select father, 1 from folks where name = 'Me' union -select mother, 1 from folks where name = 'Vasya' +select mother, 1 from folks where name = 'Me' union select max(father), max(a.generation)+1 from folks, ancestor_ids a where folks.id = a.id @@ -692,9 +692,9 @@ with recursive ancestor_ids (id, generation) as ( -select father, 1 from folks where name = 'Vasya' +select father, 1 from folks where name = 'Me' union -select mother, 1 from folks where name = 'Vasya' +select mother, 1 from folks where name = 'Me' union select max(father), a.generation+1 from folks, ancestor_ids a where folks.id = a.id diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index da4de155186..52ba4fb60e4 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -27,7 +27,7 @@ drop table t1; create table folks(id int, name char(32), dob date, father int, mother int); insert into folks values -(100, 'Vasya', '2000-01-01', 20, 30), +(100, 'Me', '2000-01-01', 20, 30), (20, 'Dad', '1970-02-02', 10, 9), (30, 'Mom', '1975-03-03', 8, 7), (10, 'Grandpa Bill', '1940-04-05', null, null), @@ -47,7 +47,7 @@ as ( select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -65,7 +65,7 @@ as union select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Me' and dob = '2000-01-01' ) select * from ancestors; @@ -89,7 +89,7 @@ as ( select * from folks - where name = 'Vasya' or name='Sister Amy' + where name = 'Me' or name='Sister Amy' union select p.* from folks as p, ancestors as a @@ -107,14 +107,14 @@ as union select * from folks - where name='Vasya' + where name='Me' ), ancestors as ( select * from folks - where name='Vasya' + where name='Me' union select * from ancestors @@ -160,7 +160,7 @@ as ( select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Me' and dob = '2000-01-01' union select p.* from folks as p, ancestors AS a @@ -181,7 +181,7 @@ as ( select * from folks - where name = 'Vasya' + where name = 'Me' union select p.* from folks as p, ancestors AS a @@ -200,7 +200,7 @@ as ( select * from folks - where name = 'Vasya' + where name = 'Me' union select p.* from folks as p, ancestors as fa @@ -223,7 +223,7 @@ as union select h.*, w.* from folks v, folks h, folks w - where v.name = 'Vasya' and + where v.name = 'Me' and (v.father = h.id AND v.mother= w.id) ), coupled_ancestors (id, name, dob, father, mother) @@ -246,7 +246,7 @@ as ( select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -268,7 +268,7 @@ as ( select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -287,7 +287,7 @@ as ( select * from folks - where name = 'Vasya' + where name = 'Me' union select p.* from folks as p, ancestors as fa @@ -312,7 +312,7 @@ as ( select * from folks - where name = 'Vasya' and dob = '2000-01-01' + where name = 'Me' and dob = '2000-01-01' union select p.id, p.name, p.dob, p.father, p.mother from folks as p, ancestors AS a @@ -325,9 +325,9 @@ with recursive ancestor_ids (id) as ( - select father from folks where name = 'Vasya' + select father from folks where name = 'Me' union - select mother from folks where name = 'Vasya' + select mother from folks where name = 'Me' union select father from folks, ancestor_ids a where folks.id = a.id union @@ -347,7 +347,7 @@ as ( select * from folks - where name = 'Vasya' + where name = 'Me' union all select p.* from folks as p, ancestors as fa @@ -367,9 +367,9 @@ with recursive ancestor_ids (id, generation) as ( - select father, 1 from folks where name = 'Vasya' and father is not null + select father, 1 from folks where name = 'Me' and father is not null union all - select mother, 1 from folks where name = 'Vasya' and mother is not null + select mother, 1 from folks where name = 'Me' and mother is not null union all select father, fa.generation+1 from folks, ancestor_ids fa where folks.id = fa.id and (father not in (select id from ancestor_ids)) @@ -388,9 +388,9 @@ with recursive ancestor_ids (id, generation) as ( - select father from folks where name = 'Vasya' and father is not null + select father from folks where name = 'Me' and father is not null union all - select mother from folks where name = 'Vasya' and mother is not null + select mother from folks where name = 'Me' and mother is not null union all select father, fa.generation+1 from folks, ancestor_ids fa where folks.id = fa.id and (father not in (select id from ancestor_ids)) @@ -405,9 +405,9 @@ with recursive ancestor_ids (id, generation) as ( - select father, 1 from folks where name = 'Vasya' and father is not null + select father, 1 from folks where name = 'Me' and father is not null union all - select mother, 1 from folks where name = 'Vasya' and mother is not null + select mother, 1 from folks where name = 'Me' and mother is not null union all select father, fa.generation+1 from folks, ancestor_ids fa where folks.id = fa.id and father is not null and @@ -428,9 +428,9 @@ with recursive coupled_ancestor_ids (id) as ( - select father from folks where name = 'Vasya' and father is not null + select father from folks where name = 'Me' and father is not null union - select mother from folks where name = 'Vasya' and mother is not null + select mother from folks where name = 'Me' and mother is not null union select n.father from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n @@ -452,9 +452,9 @@ with recursive coupled_ancestor_ids (id) as ( - select father from folks where name = 'Vasya' and father is not null + select father from folks where name = 'Me' and father is not null union - select mother from folks where name = 'Vasya' and mother is not null + select mother from folks where name = 'Me' and mother is not null union select n.father from folks, coupled_ancestor_ids fa, coupled_ancestor_ids ma, folks n @@ -476,9 +476,9 @@ with recursive ancestor_ids (id) as ( - select father from folks where name = 'Vasya' + select father from folks where name = 'Me' union - select mother from folks where name = 'Vasya' + select mother from folks where name = 'Me' union select father from folks left join ancestor_ids a on folks.id = a.id union @@ -497,9 +497,9 @@ with recursive ancestor_ids (id) as ( - select father from folks where name = 'Vasya' + select father from folks where name = 'Me' union - select mother from folks where name = 'Vasya' + select mother from folks where name = 'Me' union select father from folks left join ancestor_ids a on folks.id = a.id union @@ -517,9 +517,9 @@ with recursive ancestor_ids (id, generation) as ( - select father, 1 from folks where name = 'Vasya' + select father, 1 from folks where name = 'Me' union - select mother, 1 from folks where name = 'Vasya' + select mother, 1 from folks where name = 'Me' union select father, a.generation+1 from folks, ancestor_ids a where folks.id = a.id @@ -540,9 +540,9 @@ with recursive ancestor_ids (id, generation) as ( - select father, 1 from folks where name = 'Vasya' + select father, 1 from folks where name = 'Me' union - select mother, 1 from folks where name = 'Vasya' + select mother, 1 from folks where name = 'Me' union select max(father), max(a.generation)+1 from folks, ancestor_ids a where folks.id = a.id @@ -565,9 +565,9 @@ with recursive ancestor_ids (id, generation) as ( - select father, 1 from folks where name = 'Vasya' + select father, 1 from folks where name = 'Me' union - select mother, 1 from folks where name = 'Vasya' + select mother, 1 from folks where name = 'Me' union select max(father), a.generation+1 from folks, ancestor_ids a where folks.id = a.id -- cgit v1.2.1 From 4de9d9c261a6f2a32e98920bbc530c473b41de07 Mon Sep 17 00:00:00 2001 From: Terje Rosten Date: Fri, 20 May 2016 11:33:18 +0200 Subject: BUG#20693338 CONFLICTS WHILE INSTALLING PACKAGES WHEN LIBMYSQLCLIENT-DEVEL INSTALLED Remove mysql_config from client package to avoid conflict (file shipped in devel package any way). --- packaging/rpm-sles/mysql.spec.in | 1 - 1 file changed, 1 deletion(-) diff --git a/packaging/rpm-sles/mysql.spec.in b/packaging/rpm-sles/mysql.spec.in index 47c40b00e23..38201428fda 100644 --- a/packaging/rpm-sles/mysql.spec.in +++ b/packaging/rpm-sles/mysql.spec.in @@ -683,7 +683,6 @@ fi %attr(755, root, root) %{_bindir}/mysqlimport %attr(755, root, root) %{_bindir}/mysqlshow %attr(755, root, root) %{_bindir}/mysqlslap -%attr(755, root, root) %{_bindir}/mysql_config %attr(644, root, root) %{_mandir}/man1/msql2mysql.1* %attr(644, root, root) %{_mandir}/man1/mysql.1* -- cgit v1.2.1 From 115f08284df1dac6a29cbca49dc7534b4a4f23f7 Mon Sep 17 00:00:00 2001 From: Sreeharsha Ramanavarapu Date: Tue, 24 May 2016 07:44:21 +0530 Subject: Bug #23279858: MYSQLD GOT SIGNAL 11 ON SIMPLE SELECT NAME_CONST QUERY ISSUE: ------ Using NAME_CONST with a non-constant negated expression as value can result in incorrect behavior. SOLUTION: --------- The problem can be avoided by checking whether the argument is a constant value. The fix is a backport of Bug#12735545. --- mysql-test/r/func_misc.result | 7 +++++++ mysql-test/t/func_misc.test | 10 ++++++++++ sql/item.cc | 9 +++++++-- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index c9552d9e39f..de46f070065 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -403,3 +403,10 @@ DROP TABLE t1; # # End of tests # +SELECT NAME_CONST('a', -(1 OR 2)) OR 1; +ERROR HY000: Incorrect arguments to NAME_CONST +SELECT NAME_CONST('a', -(1 AND 2)) OR 1; +ERROR HY000: Incorrect arguments to NAME_CONST +SELECT NAME_CONST('a', -(1)) OR 1; +NAME_CONST('a', -(1)) OR 1 +1 diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index 9257314013d..c13b506ad6f 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -544,3 +544,13 @@ DROP TABLE t1; --echo # --echo # End of tests --echo # + +# +# Bug#12735545 - PARSER STACK OVERFLOW WITH NAME_CONST +# CONTAINING OR EXPRESSION +# +--error ER_WRONG_ARGUMENTS +SELECT NAME_CONST('a', -(1 OR 2)) OR 1; +--error ER_WRONG_ARGUMENTS +SELECT NAME_CONST('a', -(1 AND 2)) OR 1; +SELECT NAME_CONST('a', -(1)) OR 1; diff --git a/sql/item.cc b/sql/item.cc index f4917448dda..1541314ec97 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1358,6 +1358,11 @@ bool Item_name_const::is_null() Item_name_const::Item_name_const(Item *name_arg, Item *val): value_item(val), name_item(name_arg) { + /* + The value argument to NAME_CONST can only be a literal constant. Some extra + tests are needed to support a collation specificer and to handle negative + values. + */ if (!(valid_args= name_item->basic_const_item() && (value_item->basic_const_item() || ((value_item->type() == FUNC_ITEM) && @@ -1365,8 +1370,8 @@ Item_name_const::Item_name_const(Item *name_arg, Item *val): Item_func::COLLATE_FUNC) || ((((Item_func *) value_item)->functype() == Item_func::NEG_FUNC) && - (((Item_func *) value_item)->key_item()->type() != - FUNC_ITEM))))))) + (((Item_func *) + value_item)->key_item()->basic_const_item()))))))) my_error(ER_WRONG_ARGUMENTS, MYF(0), "NAME_CONST"); Item::maybe_null= TRUE; } -- cgit v1.2.1 From b4f1f42062d108230b62ad49fedd93ee6e38e168 Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Tue, 24 May 2016 21:29:52 +0300 Subject: Fixed the problem of wrong identification of WITH tables defined in WITH clauses without RECURSIVE. Added test cases to check the fix. Fixed the problem of wrong types of recursive tables when the type of anchor part does not coincide with the type of recursive part. Prevented usage of marerialization and subquery cache for subqueries with recursive references. Introduced system variables 'max_recursion_level'. Added a test case to test usage of this variable. --- mysql-test/r/cte_nonrecursive.result | 5 +- mysql-test/r/cte_recursive.result | 141 +++++++++++++++++++++++++++- mysql-test/r/mysqld--help.result | 4 + mysql-test/t/cte_nonrecursive.test | 7 +- mysql-test/t/cte_recursive.test | 124 ++++++++++++++++++++++++- sql/item_subselect.cc | 8 +- sql/item_subselect.h | 4 +- sql/opt_subselect.cc | 4 +- sql/share/errmsg-utf8.txt | 2 - sql/sql_class.h | 1 + sql/sql_cte.cc | 172 ++++++++++++++++++++++------------- sql/sql_cte.h | 59 ++++++++---- sql/sql_lex.h | 12 +-- sql/sql_select.cc | 1 + sql/sql_union.cc | 82 +++++++++-------- sql/sys_vars.cc | 6 ++ 16 files changed, 491 insertions(+), 141 deletions(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index d81c7c9ed4c..3899ee9aebf 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -675,7 +675,7 @@ ERROR HY000: Duplicate query name in WITH clause with t as (select a from s where a<5), s as (select a from t1 where b>='d') select * from t,s where t.a=s.a; -ERROR HY000: The definition of the table 't' refers to the table 's' defined later in a non-recursive WITH clause +ERROR 42S02: Table 'test.s' doesn't exist with recursive t as (select a from s where a<5), s as (select a from t1 where b>='d') @@ -709,7 +709,8 @@ with recursive t as (select * from t1 where b>'aaa' and b <='d') select t.b from t,t2 where t.a=t2.c and -t2.c in (with s as (select t1.a from s,t1 where t1.a=s.a and t1.b<'c') +t2.c in (with recursive +s as (select t1.a from s,t1 where t1.a=s.a and t1.b<'c') select * from s); ERROR HY000: No anchors for recursive WITH element 's' #erroneous definition of unreferenced with table t diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index e4107a1eeb4..c2a820f6e34 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -21,6 +21,116 @@ select * from b1 where b1.b > 'auu') select * from c1; ERROR HY000: No anchors for recursive WITH element 'b1' drop table t1; +# WITH RECURSIVE vs just WITH +create table t1 (a int); +insert into t1 values +(0), (1), (2), (3), (4); +create table t2 (a int); +insert into t2 values +(1), (2), (3), (4), (5); +# just WITH : s refers to t defined after s +with +s(a) as (select t.a + 10 from t), +t(a) as (select t1.a from t1) +select * from s; +ERROR 42S02: Table 'test.t' doesn't exist +# WITH RECURSIVE: s refers to t defined after s +with recursive +s(a) as (select t.a + 10 from t), +t(a) as (select t1.a from t1) +select * from s; +a +10 +11 +12 +13 +14 +# just WITH : defined t1 is non-recursive and uses base tables t1,t2 +with +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; +a +3 +1 +2 +4 +5 +explain +with +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 30 +2 SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +3 UNION t1 ALL NULL NULL NULL NULL 5 +3 UNION t2 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join) +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +#WITH RECURSIVE : defined t1 is recursive and uses only base table t2 +with recursive +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; +a +3 +4 +5 +explain +with recursive +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 30 +2 SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where +3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 5 +3 UNCACHEABLE UNION t2 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join) +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +# just WITH : types of t1 columns are determined by all parts of union +create view v1 as +with +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a+1 from t1,t2 where t1.a=t2.a +) +select * from t1; +show columns from v1; +Field Type Null Key Default Extra +a bigint(20) YES NULL +# WITH RECURSIVE : types of t1 columns are determined by anchor parts +create view v2 as +with recursive +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a+1 from t1,t2 where t1.a=t2.a +) +select * from t1; +show columns from v2; +Field Type Null Key Default Extra +a int(11) YES NULL +drop view v1,v2; +drop table t1,t2; create table folks(id int, name char(32), dob date, father int, mother int); insert into folks values (100, 'Me', '2000-01-01', 20, 30), @@ -485,7 +595,6 @@ select generation, name from ancestor_ids a, folks where a.id = folks.id; ERROR HY000: Restrictions imposed on recursive definitions are violated for table 'ancestor_ids' set standards_compliant_cte=0; -set optimizer_switch='materialization=off,subquery_cache=off'; with recursive ancestor_ids (id, generation) as @@ -529,7 +638,6 @@ generation name 2 Grandma Ann 2 Grandma Sally 3 Grandgrandma Martha -set optimizer_switch=default; set standards_compliant_cte=1; with recursive coupled_ancestor_ids (id) @@ -716,4 +824,33 @@ generation name 1 Mom 2 Grandpa Bill 2 Grandma Ann +set statement max_recursion_level=2 for +with recursive +ancestor_ids (id, generation) +as +( +select father, 1 from folks where name = 'Me' + union +select mother, 1 from folks where name = 'Me' + union +select father, a.generation+1 from folks, ancestor_ids a +where folks.id = a.id +union +select mother, a.generation+1 from folks, ancestor_ids a +where folks.id = a.id +), +ancestors +as +( +select generation, name from folks as p, ancestor_ids as a +where p.id = a.id +) +select * from ancestors; +generation name +1 Dad +1 Mom +2 Grandpa Bill +2 Grandpa Ben +2 Grandma Ann +2 Grandma Sally drop table folks; diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 7a8b9dc3df4..9d986c54a9d 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -450,6 +450,9 @@ The following options may be given as the first argument: max_allowed_packet instead. --max-prepared-stmt-count=# Maximum number of prepared statements in the server + --max-recursion-level[=#] + Maximum number of iterations when executing recursive + queries --max-relay-log-size=# relay log will be rotated automatically when the size exceeds this value. If 0 at startup, it's set to @@ -1270,6 +1273,7 @@ max-join-size 18446744073709551615 max-length-for-sort-data 1024 max-long-data-size 4194304 max-prepared-stmt-count 16382 +max-recursion-level 18446744073709551615 max-relay-log-size 1073741824 max-seeks-for-key 18446744073709551615 max-sort-length 1024 diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index 978faaf0a4d..aa14db97cd1 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -366,7 +366,7 @@ with t as (select * from t2 where c>3), t as (select a from t1 where a>2) select * from t,t1 where t1.a=t.c; ---ERROR ER_WRONG_ORDER_IN_WITH_CLAUSE +--ERROR ER_NO_SUCH_TABLE with t as (select a from s where a<5), s as (select a from t1 where b>='d') select * from t,s where t.a=s.a; @@ -402,8 +402,9 @@ with recursive t as (select * from t1 where b>'aaa' and b <='d') select t.b from t,t2 where t.a=t2.c and - t2.c in (with s as (select t1.a from s,t1 where t1.a=s.a and t1.b<'c') - select * from s); + t2.c in (with recursive + s as (select t1.a from s,t1 where t1.a=s.a and t1.b<'c') + select * from s); --echo #erroneous definition of unreferenced with table t --ERROR ER_BAD_FIELD_ERROR with t as (select count(*) from t1 where d>='f' group by a) diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 52ba4fb60e4..6fef24be34f 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -24,6 +24,105 @@ select * from c1; drop table t1; + +--echo # WITH RECURSIVE vs just WITH + +create table t1 (a int); +insert into t1 values + (0), (1), (2), (3), (4); +create table t2 (a int); +insert into t2 values + (1), (2), (3), (4), (5); + + +--echo # just WITH : s refers to t defined after s +--ERROR ER_NO_SUCH_TABLE +with + s(a) as (select t.a + 10 from t), + t(a) as (select t1.a from t1) +select * from s; + +--echo # WITH RECURSIVE: s refers to t defined after s +with recursive + s(a) as (select t.a + 10 from t), + t(a) as (select t1.a from t1) +select * from s; + +--echo # just WITH : defined t1 is non-recursive and uses base tables t1,t2 +with +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; + +explain +with +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; + + +--echo #WITH RECURSIVE : defined t1 is recursive and uses only base table t2 +with recursive +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; + +explain +with recursive +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a from t1,t2 where t1.a+1=t2.a +) +select * from t1; + +--echo # just WITH : types of t1 columns are determined by all parts of union + +create view v1 as +with +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a+1 from t1,t2 where t1.a=t2.a +) +select * from t1; + +show columns from v1; + + +--echo # WITH RECURSIVE : types of t1 columns are determined by anchor parts + +create view v2 as +with recursive +t1 as +( +select a from t2 where t2.a=3 +union +select t2.a+1 from t1,t2 where t1.a=t2.a +) +select * from t1; + +show columns from v2; + +drop view v1,v2; + +drop table t1,t2; + + create table folks(id int, name char(32), dob date, father int, mother int); insert into folks values @@ -381,7 +480,6 @@ select generation, name from ancestor_ids a, folks where a.id = folks.id; set standards_compliant_cte=0; -set optimizer_switch='materialization=off,subquery_cache=off'; --ERROR ER_WITH_COL_WRONG_LIST with recursive @@ -420,7 +518,6 @@ as select generation, name from ancestor_ids a, folks where a.id = folks.id; -set optimizer_switch=default; set standards_compliant_cte=1; --ERROR ER_NOT_STANDARDS_COMPLIANT_RECURSIVE @@ -585,5 +682,28 @@ as ) select * from ancestors; +set statement max_recursion_level=2 for +with recursive +ancestor_ids (id, generation) +as +( + select father, 1 from folks where name = 'Me' + union + select mother, 1 from folks where name = 'Me' + union + select father, a.generation+1 from folks, ancestor_ids a + where folks.id = a.id + union + select mother, a.generation+1 from folks, ancestor_ids a + where folks.id = a.id +), +ancestors +as +( + select generation, name from folks as p, ancestor_ids as a + where p.id = a.id +) +select * from ancestors; + drop table folks; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 94e7bc98618..7d458282825 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -54,7 +54,7 @@ Item_subselect::Item_subselect(THD *thd_arg): have_to_be_excluded(0), inside_first_fix_fields(0), done_first_fix_fields(FALSE), expr_cache(0), forced_const(FALSE), substitution(0), engine(0), eliminated(FALSE), - changed(0), is_correlated(FALSE) + changed(0), is_correlated(FALSE), with_recursive_reference(0) { DBUG_ENTER("Item_subselect::Item_subselect"); DBUG_PRINT("enter", ("this: 0x%lx", (ulong) this)); @@ -771,7 +771,8 @@ bool Item_subselect::expr_cache_is_needed(THD *thd) engine->cols() == 1 && optimizer_flag(thd, OPTIMIZER_SWITCH_SUBQUERY_CACHE) && !(engine->uncacheable() & (UNCACHEABLE_RAND | - UNCACHEABLE_SIDEEFFECT))); + UNCACHEABLE_SIDEEFFECT)) && + !with_recursive_reference); } @@ -810,7 +811,8 @@ bool Item_in_subselect::expr_cache_is_needed(THD *thd) { return (optimizer_flag(thd, OPTIMIZER_SWITCH_SUBQUERY_CACHE) && !(engine->uncacheable() & (UNCACHEABLE_RAND | - UNCACHEABLE_SIDEEFFECT))); + UNCACHEABLE_SIDEEFFECT)) && + !with_recursive_reference); } diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 58b5a948048..c1e68247220 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -126,7 +126,9 @@ public: bool changed; /* TRUE <=> The underlying SELECT is correlated w.r.t some ancestor select */ - bool is_correlated; + bool is_correlated; + + bool with_recursive_reference; enum subs_type {UNKNOWN_SUBS, SINGLEROW_SUBS, EXISTS_SUBS, IN_SUBS, ALL_SUBS, ANY_SUBS}; diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 55c6c075f48..afb439040de 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -512,6 +512,7 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs, (Subquery is correlated to the immediate outer query && Subquery !contains {GROUP BY, ORDER BY [LIMIT], aggregate functions}) && subquery predicate is not under "NOT IN")) + 5. Subquery does not contain recursive references A note about prepared statements: we want the if-branch to be taken on PREPARE and each EXECUTE. The rewrites are only done once, but we need @@ -528,7 +529,8 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs, OPTIMIZER_SWITCH_PARTIAL_MATCH_ROWID_MERGE) || //3 optimizer_flag(thd, OPTIMIZER_SWITCH_PARTIAL_MATCH_TABLE_SCAN)) && //3 - !in_subs->is_correlated) //4 + !in_subs->is_correlated && //4 + !in_subs->with_recursive_reference) //5 { return TRUE; } diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 00228ee1062..4bacee0d9f3 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7150,8 +7150,6 @@ ER_WITH_COL_WRONG_LIST eng "WITH column list and SELECT field list have different column counts" ER_DUP_QUERY_NAME eng "Duplicate query name in WITH clause" -ER_WRONG_ORDER_IN_WITH_CLAUSE - eng "The definition of the table '%s' refers to the table '%s' defined later in a non-recursive WITH clause" ER_RECURSIVE_WITHOUT_ANCHORS eng "No anchors for recursive WITH element '%s'" ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED diff --git a/sql/sql_class.h b/sql/sql_class.h index 7e995c04b33..04ca37295bb 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -558,6 +558,7 @@ typedef struct system_variables ulong max_allowed_packet; ulong max_error_count; ulong max_length_for_sort_data; + ulong max_recursion_level; ulong max_sort_length; ulong max_tmp_tables; ulong max_insert_delayed_threads; diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 77d2c7d24d3..04a4fcb8a2b 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -118,83 +118,78 @@ bool With_clause::check_dependencies(THD *thd) if (with_elem->derived_dep_map & with_elem->get_elem_map()) with_elem->is_recursive= true; } - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) - { - if (with_elem->is_recursive) - { -#if 0 - my_error(ER_RECURSIVE_QUERY_IN_WITH_CLAUSE, MYF(0), - with_elem->query_name->str); - return true; -#endif - } - } - - if (!with_recursive) - { - /* - For each with table T defined in this with clause check whether - it is used in any definition that follows the definition of T. - */ - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) - { - With_element *checked_elem= with_elem->next_elem; - for (uint i = with_elem->number+1; - i < elements; - i++, checked_elem= checked_elem->next_elem) - { - if (with_elem->check_dependency_on(checked_elem)) - { - my_error(ER_WRONG_ORDER_IN_WITH_CLAUSE, MYF(0), - with_elem->query_name->str, checked_elem->query_name->str); - return true; - } - } - } - } dependencies_are_checked= true; return false; } +struct st_unit_ctxt_elem +{ + st_unit_ctxt_elem *prev; + st_select_lex_unit *unit; +}; + bool With_element::check_dependencies_in_spec(THD *thd) { for (st_select_lex *sl= spec->first_select(); sl; sl= sl->next_select()) { - check_dependencies_in_select(sl, sl->with_dep); + st_unit_ctxt_elem ctxt0= {NULL, owner->owner}; + st_unit_ctxt_elem ctxt1= {&ctxt0, spec}; + check_dependencies_in_select(sl, &ctxt1, false, &sl->with_dep); base_dep_map|= sl->with_dep; } return false; } +With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl, + st_unit_ctxt_elem *ctxt) +{ + With_element *barrier= NULL; + for (st_unit_ctxt_elem *unit_ctxt_elem= ctxt; + unit_ctxt_elem; + unit_ctxt_elem= unit_ctxt_elem->prev) + { + st_select_lex_unit *unit= unit_ctxt_elem->unit; + With_clause *with_clause= unit->with_clause; + if (with_clause && + (tbl->with= with_clause->find_table_def(tbl, barrier))) + return tbl->with; + barrier= NULL; + if (unit->with_element && !unit->with_element->get_owner()->with_recursive) + barrier= unit->with_element; + } + return NULL; +} + + void With_element::check_dependencies_in_select(st_select_lex *sl, - table_map &dep_map) + st_unit_ctxt_elem *ctxt, + bool in_subq, + table_map *dep_map) { - bool is_sq_select= sl->master_unit()->item != NULL; + With_clause *with_clause= sl->get_with_clause(); for (TABLE_LIST *tbl= sl->table_list.first; tbl; tbl= tbl->next_local) { + if (tbl->derived || tbl->nested_join) + continue; tbl->with_internal_reference_map= 0; + if (with_clause && !tbl->with) + tbl->with= with_clause->find_table_def(tbl, NULL); if (!tbl->with) - tbl->with= owner->find_table_def(tbl); - if (!tbl->with && tbl->select_lex) - tbl->with= tbl->select_lex->find_table_def_in_with_clauses(tbl); + tbl->with= find_table_def_in_with_clauses(tbl, ctxt); if (tbl->with && tbl->with->owner== this->owner) { - dep_map|= tbl->with->get_elem_map(); + *dep_map|= tbl->with->get_elem_map(); tbl->with_internal_reference_map= get_elem_map(); - if (is_sq_select) + if (in_subq) sq_dep_map|= tbl->with->get_elem_map(); } } st_select_lex_unit *inner_unit= sl->first_inner_unit(); for (; inner_unit; inner_unit= inner_unit->next_unit()) - check_dependencies_in_unit(inner_unit, dep_map); + check_dependencies_in_unit(inner_unit, ctxt, in_subq, dep_map); } @@ -213,12 +208,32 @@ void With_element::check_dependencies_in_select(st_select_lex *sl, */ void With_element::check_dependencies_in_unit(st_select_lex_unit *unit, - table_map &dep_map) + st_unit_ctxt_elem *ctxt, + bool in_subq, + table_map *dep_map) { + if (unit->with_clause) + check_dependencies_in_with_clause(unit->with_clause, ctxt, in_subq, dep_map); + in_subq |= unit->item != NULL; + st_unit_ctxt_elem unit_ctxt_elem= {ctxt, unit}; st_select_lex *sl= unit->first_select(); for (; sl; sl= sl->next_select()) { - check_dependencies_in_select(sl, dep_map); + check_dependencies_in_select(sl, &unit_ctxt_elem, in_subq, dep_map); + } +} + +void +With_element::check_dependencies_in_with_clause(With_clause *with_clause, + st_unit_ctxt_elem *ctxt, + bool in_subq, + table_map *dep_map) +{ + for (With_element *with_elem= with_clause->first_elem; + with_elem != NULL; + with_elem= with_elem->next_elem) + { + check_dependencies_in_unit(with_elem->spec, ctxt, in_subq, dep_map); } } @@ -328,10 +343,11 @@ bool With_clause::check_anchors() NULL - otherwise */ -With_element *With_clause::find_table_def(TABLE_LIST *table) +With_element *With_clause::find_table_def(TABLE_LIST *table, + With_element *barrier) { for (With_element *with_elem= first_elem; - with_elem != NULL; + with_elem != barrier; with_elem= with_elem->next_elem) { if (my_strcasecmp(system_charset_info, with_elem->query_name->str, @@ -672,17 +688,27 @@ bool With_element::is_anchor(st_select_lex *sel) With_element *st_select_lex::find_table_def_in_with_clauses(TABLE_LIST *table) { + st_select_lex_unit *master_unit= NULL; With_element *found= NULL; for (st_select_lex *sl= this; sl; - sl= sl->master_unit()->outer_select()) + sl= master_unit->outer_select()) { + With_element *with_elem= sl->get_with_element(); + /* + If sl->master_unit() is the spec of a with element then the search for + a definition was already done by With_element::check_dependencies_in_spec + and it was unsuccesful. + */ + if (with_elem) + break; With_clause *with_clause=sl->get_with_clause(); - if (with_clause && (found= with_clause->find_table_def(table))) - return found; + if (with_clause && (found= with_clause->find_table_def(table,NULL))) + break; + master_unit= sl->master_unit(); /* Do not look for the table's definition beyond the scope of the view */ - if (sl->master_unit()->is_view) - break; + if (master_unit->is_view) + break; } return found; } @@ -729,7 +755,7 @@ bool TABLE_LIST::is_recursive_with_table() bool TABLE_LIST::is_with_table_recursive_reference() { return (with_internal_reference_map && - (with->mutually_recursive & with_internal_reference_map)); + (with->get_mutually_recursive() & with_internal_reference_map)); } @@ -745,10 +771,11 @@ bool st_select_lex::check_unrestricted_recursive(bool only_standards_compliant) unrestricted, encountered)) return true; - with_elem->owner->unrestricted|= unrestricted; + with_elem->get_owner()->add_unrestricted(unrestricted); if (with_sum_func || - (with_elem->sq_dep_map & with_elem->mutually_recursive)) - with_elem->owner->unrestricted|= with_elem->mutually_recursive; + (with_elem->contains_sq_with_recursive_reference())) + with_elem->get_owner()->add_unrestricted( + with_elem->get_mutually_recursive()); if (only_standards_compliant && with_elem->is_unrestricted()) { my_error(ER_NOT_STANDARDS_COMPLIANT_RECURSIVE, @@ -776,7 +803,7 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, if (tbl->is_materialized_derived()) { table_map dep_map; - check_dependencies_in_unit(unit, dep_map); + check_dependencies_in_unit(unit, NULL, false, &dep_map); if (dep_map & get_elem_map()) { my_error(ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED, @@ -797,7 +824,7 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, else encountered|= with_elem->get_elem_map(); } - } + } for (With_element *with_elem= sel->get_with_element()->owner->first_elem; with_elem != NULL; with_elem= with_elem->next_elem) @@ -841,6 +868,27 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, } +void st_select_lex::check_subqueries_with_recursive_references() +{ + st_select_lex_unit *sl_master= master_unit(); + List_iterator ti(leaf_tables); + TABLE_LIST *tbl; + while ((tbl= ti++)) + { + if (!(tbl->is_with_table_recursive_reference() && sl_master->item)) + continue; + for (st_select_lex *sl= this; sl; sl= sl_master->outer_select()) + { + sl_master= sl->master_unit(); + if (!sl_master->item) + continue; + Item_subselect *subq= (Item_subselect *) sl_master->item; + subq->with_recursive_reference= true; + } + } +} + + /** @brief Print this with clause diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 1c32f16258c..23eea8463e6 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -3,8 +3,8 @@ #include "sql_list.h" #include "sql_lex.h" -class With_clause; class select_union; +struct st_unit_ctxt_elem; /** @class With_clause @@ -100,10 +100,19 @@ public: bool check_dependencies_in_spec(THD *thd); - void check_dependencies_in_select(st_select_lex *sl, table_map &dep_map); + void check_dependencies_in_select(st_select_lex *sl, st_unit_ctxt_elem *ctxt, + bool in_subq, table_map *dep_map); - void check_dependencies_in_unit(st_select_lex_unit *unit, table_map &dep_map); - + void check_dependencies_in_unit(st_select_lex_unit *unit, + st_unit_ctxt_elem *ctxt, + bool in_subq, + table_map *dep_map); + + void check_dependencies_in_with_clause(With_clause *with_clause, + st_unit_ctxt_elem *ctxt, + bool in_subq, + table_map *dep_map); + void set_dependency_on(With_element *with_elem) { base_dep_map|= with_elem->get_elem_map(); } @@ -126,7 +135,14 @@ public: table_map &unrestricted, table_map &encountered); - void print(String *str, enum_query_type query_type); + void print(String *str, enum_query_type query_type); + + With_clause *get_owner() { return owner; } + + bool contains_sq_with_recursive_reference() + { return sq_dep_map & mutually_recursive; } + + table_map get_mutually_recursive() { return mutually_recursive; } void set_table(TABLE *tab) { table= tab; } @@ -151,11 +167,6 @@ public: void set_result_table(TABLE *tab) { result_table= tab; } friend class With_clause; - friend - bool - st_select_lex::check_unrestricted_recursive(bool only_standard_compliant); - friend - bool TABLE_LIST::is_with_table_recursive_reference(); }; @@ -209,8 +220,7 @@ public: { elem->owner= this; elem->number= elements; - owner= elem->spec; - owner->with_element= elem; + elem->spec->with_element= elem; *last_next= elem; last_next= &elem->next_elem; elements++; @@ -224,6 +234,8 @@ public: last_next= &this->next_with_clause; } + void set_owner(st_select_lex_unit *unit) { owner= unit; } + With_clause *pop() { return embedding_with_clause; } bool check_dependencies(THD *thd); @@ -232,12 +244,14 @@ public: void move_anchors_ahead(); - With_element *find_table_def(TABLE_LIST *table); + With_element *find_table_def(TABLE_LIST *table, With_element *barrier); With_element *find_table_def_in_with_clauses(TABLE_LIST *table); bool prepare_unreferenced_elements(THD *thd); + void add_unrestricted(table_map map) { unrestricted|= map; } + void print(String *str, enum_query_type query_type); friend class With_element; @@ -245,10 +259,6 @@ public: friend bool check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list); - friend - bool - st_select_lex::check_unrestricted_recursive(bool only_standard_compliant); - }; inline @@ -292,5 +302,20 @@ void With_element::reset_for_exec() owner->cleaned&= ~get_elem_map(); } +inline +void st_select_lex_unit::set_with_clause(With_clause *with_cl) +{ + with_clause= with_cl; + if (with_clause) + with_clause->set_owner(this); +} + +inline +void st_select_lex::set_with_clause(With_clause *with_clause) +{ + master_unit()->with_clause= with_clause; + if (with_clause) + with_clause->set_owner(master_unit()); +} #endif /* SQL_CTE_INCLUDED */ diff --git a/sql/sql_lex.h b/sql/sql_lex.h index b17e19276da..762d6718dcb 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -30,6 +30,7 @@ #include "sql_alter.h" // Alter_info #include "sql_window.h" + /* YACC and LEX Definitions */ /* These may not be declared yet */ @@ -690,7 +691,7 @@ public: { return reinterpret_cast(slave); } - void set_with_clause(With_clause *with_cl) { with_clause= with_cl; } + void set_with_clause(With_clause *with_cl); st_select_lex_unit* next_unit() { return reinterpret_cast(next); @@ -1095,10 +1096,7 @@ public: void set_non_agg_field_used(bool val) { m_non_agg_field_used= val; } void set_agg_func_used(bool val) { m_agg_func_used= val; } - void set_with_clause(With_clause *with_clause) - { - master_unit()->with_clause= with_clause; - } + void set_with_clause(With_clause *with_clause); With_clause *get_with_clause() { return master_unit()->with_clause; @@ -1109,8 +1107,8 @@ public: } With_element *find_table_def_in_with_clauses(TABLE_LIST *table); bool check_unrestricted_recursive(bool only_standards_compliant); - - + void check_subqueries_with_recursive_references(); + List window_specs; void prepare_add_window_spec(THD *thd); bool add_window_def(THD *thd, LEX_STRING *win_name, LEX_STRING *win_ref, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 6792d7f5e2c..84dd2e4b676 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -865,6 +865,7 @@ JOIN::prepare(TABLE_LIST *tables_init, select_lex->check_unrestricted_recursive( thd->variables.only_standards_compliant_cte)) DBUG_RETURN(-1); + select_lex->check_subqueries_with_recursive_references(); int res= check_and_do_in_subquery_rewrites(this); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index c43fdf30a64..d19bbaf103c 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -442,6 +442,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, SELECT_LEX *lex_select_save= thd_arg->lex->current_select; SELECT_LEX *sl, *first_sl= first_select(); bool is_recursive= with_element && with_element->is_recursive; + bool is_rec_result_table_created= false; select_result *tmp_result; bool is_union_select; bool instantiate_tmp_table= false; @@ -609,24 +610,6 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (thd_arg->is_fatal_error) goto err; // out of memory - - if (is_recursive) - { - - ulonglong create_options; - create_options= (first_sl->options | thd_arg->variables.option_bits | - TMP_TABLE_ALL_COLUMNS); - if (union_result->create_result_table(thd, &types, - MY_TEST(union_distinct), - create_options, derived->alias, - false, - instantiate_tmp_table, false)) - goto err; - if (!derived->table) - derived->table= derived->derived_result->table= - with_element->rec_result->rec_tables.head(); - with_element->mark_as_with_prepared_anchor(); - } } else { @@ -636,19 +619,42 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, ER_THD(thd, ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT),MYF(0)); goto err; } - List_iterator_fast it(sl->item_list); - List_iterator_fast tp(types); - Item *type, *item_tmp; - while ((type= tp++, item_tmp= it++)) + if (!is_rec_result_table_created) { - if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp)) - DBUG_RETURN(TRUE); + List_iterator_fast it(sl->item_list); + List_iterator_fast tp(types); + Item *type, *item_tmp; + while ((type= tp++, item_tmp= it++)) + { + if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp)) + DBUG_RETURN(TRUE); + } } } - if (with_element && !with_element->is_anchor(sl)) + if (is_recursive) { - sl->uncacheable|= UNCACHEABLE_UNITED; - } + if (!with_element->is_anchor(sl)) + sl->uncacheable|= UNCACHEABLE_UNITED; + if(!is_rec_result_table_created && + (!sl->next_select() || + sl->next_select() == with_element->first_recursive)) + { + ulonglong create_options; + create_options= (first_sl->options | thd_arg->variables.option_bits | + TMP_TABLE_ALL_COLUMNS); + if (union_result->create_result_table(thd, &types, + MY_TEST(union_distinct), + create_options, derived->alias, + false, + instantiate_tmp_table, false)) + goto err; + if (!derived->table) + derived->table= derived->derived_result->table= + with_element->rec_result->rec_tables.head(); + with_element->mark_as_with_prepared_anchor(); + is_rec_result_table_created= true; + } + } } /* @@ -1166,17 +1172,18 @@ bool st_select_lex_unit::exec_recursive() st_select_lex *first_recursive_sel= with_element->first_recursive; TABLE *incr_table= with_element->rec_result->incr_table; TABLE *result_table= with_element->result_table; - ha_rows last_union_records= 0; ha_rows examined_rows= 0; bool unrestricted= with_element->is_unrestricted(); - bool is_stabilized= false; - DBUG_ENTER("st_select_lex_unit::exec_recursive"); + bool no_more_iterations= false; bool with_anchor= with_element->with_anchor; st_select_lex *first_sl= first_select(); st_select_lex *barrier= with_anchor ? first_recursive_sel : NULL; + uint max_level= thd->variables.max_recursion_level; List_iterator_fast
li(with_element->rec_result->rec_tables); TABLE *rec_table; + DBUG_ENTER("st_select_lex_unit::exec_recursive"); + do { if ((saved_error= incr_table->file->ha_delete_all_rows())) @@ -1210,16 +1217,13 @@ bool st_select_lex_unit::exec_recursive() barrier= NULL; } - table->file->info(HA_STATUS_VARIABLE); - if (table->file->stats.records == last_union_records) - { - is_stabilized= true; - } + incr_table->file->info(HA_STATUS_VARIABLE); + if (incr_table->file->stats.records == 0 || + with_element->level + 1 == max_level) + no_more_iterations= true; else - { - last_union_records= table->file->stats.records; with_element->level++; - } + li.rewind(); while ((rec_table= li++)) { @@ -1227,7 +1231,7 @@ bool st_select_lex_unit::exec_recursive() !unrestricted))) goto err; } - } while (!is_stabilized); + } while (!no_more_iterations); if ((saved_error= table->insert_all_rows_into(thd, result_table, diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c921fffc004..f63549ba3d7 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -2145,6 +2145,12 @@ static Sys_var_ulong Sys_max_prepared_stmt_count( VALID_RANGE(0, 1024*1024), DEFAULT(16382), BLOCK_SIZE(1), &PLock_prepared_stmt_count); +static Sys_var_ulong Sys_max_recursion_level( + "max_recursion_level", + "Maximum number of iterations when executing recursive queries", + SESSION_VAR(max_recursion_level), CMD_LINE(OPT_ARG), + VALID_RANGE(0, UINT_MAX), DEFAULT(UINT_MAX), BLOCK_SIZE(1)); + static Sys_var_ulong Sys_max_sort_length( "max_sort_length", "The number of bytes to use when sorting BLOB or TEXT values (only " -- cgit v1.2.1 From 5dc6a77b409291140e072470673589982a6623a2 Mon Sep 17 00:00:00 2001 From: Arun Kuruvila Date: Mon, 30 May 2016 15:20:08 +0530 Subject: Bug#23035296: MAIN.MYSQLDUMP FAILS BECUASE OF UNEXPECTED ERROR MESSAGE Description:- Mtr test, "main.mysqldump" is failing with an assert when "mysqlimport" client utility is executed with the option "--use_threads". Analysis:- "mysqlimport" uses the option, "--use_threads", to spawn worker threads to complete its job in parallel. But currently the main thread is not waiting for the worker threads to complete its cleanup, rather just wait for the worker threads to say its done doing its job. So the cleanup is done in a race between the worker threads and the main thread. This lead to an assertion failure. Fix:- "my_thread_join()" is introduced in the main thread to join all the worker threads it have spawned. This will let the main thread to wait for all the worker threads to complete its cleanup before calling "my_end()". --- client/mysqlimport.c | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/client/mysqlimport.c b/client/mysqlimport.c index 416159abd81..3e8f694d96d 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -592,7 +592,7 @@ error: pthread_cond_signal(&count_threshhold); pthread_mutex_unlock(&counter_mutex); mysql_thread_end(); - + pthread_exit(0); return 0; } @@ -615,15 +615,30 @@ int main(int argc, char **argv) if (opt_use_threads && !lock_tables) { - pthread_t mainthread; /* Thread descriptor */ - pthread_attr_t attr; /* Thread attributes */ + char **save_argv; + uint worker_thread_count= 0, table_count= 0, i= 0; + pthread_t *worker_threads; /* Thread descriptor */ + pthread_attr_t attr; /* Thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, - PTHREAD_CREATE_DETACHED); + PTHREAD_CREATE_JOINABLE); pthread_mutex_init(&counter_mutex, NULL); pthread_cond_init(&count_threshhold, NULL); + /* Count the number of tables. This number denotes the total number + of threads spawn. + */ + save_argv= argv; + for (table_count= 0; *argv != NULL; argv++) + table_count++; + argv= save_argv; + + if (!(worker_threads= (pthread_t*) my_malloc(table_count * + sizeof(*worker_threads), + MYF(0)))) + return -2; + for (counter= 0; *argv != NULL; argv++) /* Loop through tables */ { pthread_mutex_lock(&counter_mutex); @@ -638,15 +653,16 @@ int main(int argc, char **argv) counter++; pthread_mutex_unlock(&counter_mutex); /* now create the thread */ - if (pthread_create(&mainthread, &attr, worker_thread, - (void *)*argv) != 0) + if (pthread_create(&worker_threads[worker_thread_count], &attr, + worker_thread, (void *)*argv) != 0) { pthread_mutex_lock(&counter_mutex); counter--; pthread_mutex_unlock(&counter_mutex); - fprintf(stderr,"%s: Could not create thread\n", - my_progname); + fprintf(stderr,"%s: Could not create thread\n", my_progname); + continue; } + worker_thread_count++; } /* @@ -664,6 +680,14 @@ int main(int argc, char **argv) pthread_mutex_destroy(&counter_mutex); pthread_cond_destroy(&count_threshhold); pthread_attr_destroy(&attr); + + for(i= 0; i < worker_thread_count; i++) + { + if (pthread_join(worker_threads[i], NULL)) + fprintf(stderr,"%s: Could not join worker thread.\n", my_progname); + } + + my_free(worker_threads); } else { -- cgit v1.2.1 From 96d90250c66d9159522582a541c87e3c9d8b8d08 Mon Sep 17 00:00:00 2001 From: Arun Kuruvila Date: Thu, 2 Jun 2016 15:02:46 +0530 Subject: Bug#23035296: MAIN.MYSQLDUMP FAILS BECUASE OF UNEXPECTED ERROR MESSAGE Post push patch to fix test case failure. --- client/mysqlimport.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/client/mysqlimport.c b/client/mysqlimport.c index 3e8f694d96d..81eb5a37fde 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -30,6 +30,7 @@ /* Global Thread counter */ uint counter; +pthread_mutex_t init_mutex; pthread_mutex_t counter_mutex; pthread_cond_t count_threshhold; @@ -417,8 +418,13 @@ static MYSQL *db_connect(char *host, char *database, MYSQL *mysql; if (verbose) fprintf(stdout, "Connecting to %s\n", host ? host : "localhost"); + pthread_mutex_lock(&init_mutex); if (!(mysql= mysql_init(NULL))) + { + pthread_mutex_unlock(&init_mutex); return 0; + } + pthread_mutex_unlock(&init_mutex); if (opt_compress) mysql_options(mysql,MYSQL_OPT_COMPRESS,NullS); if (opt_local_file) @@ -623,6 +629,7 @@ int main(int argc, char **argv) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + pthread_mutex_init(&init_mutex, NULL); pthread_mutex_init(&counter_mutex, NULL); pthread_cond_init(&count_threshhold, NULL); @@ -677,6 +684,7 @@ int main(int argc, char **argv) pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime); } pthread_mutex_unlock(&counter_mutex); + pthread_mutex_destroy(&init_mutex); pthread_mutex_destroy(&counter_mutex); pthread_cond_destroy(&count_threshhold); pthread_attr_destroy(&attr); -- cgit v1.2.1 From df0d8efaf25a69990cf422d55011c1c0eebdec51 Mon Sep 17 00:00:00 2001 From: Arun Kuruvila Date: Fri, 3 Jun 2016 12:50:23 +0530 Subject: Bug#23035296: MAIN.MYSQLDUMP FAILS BECUASE OF UNEXPECTED ERROR MESSAGE Post push patch to fix test case failure. --- client/mysqlimport.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/client/mysqlimport.c b/client/mysqlimport.c index 81eb5a37fde..5841c0b855a 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -418,13 +418,19 @@ static MYSQL *db_connect(char *host, char *database, MYSQL *mysql; if (verbose) fprintf(stdout, "Connecting to %s\n", host ? host : "localhost"); - pthread_mutex_lock(&init_mutex); - if (!(mysql= mysql_init(NULL))) + if (opt_use_threads && !lock_tables) { + pthread_mutex_lock(&init_mutex); + if (!(mysql= mysql_init(NULL))) + { + pthread_mutex_unlock(&init_mutex); + return 0; + } pthread_mutex_unlock(&init_mutex); - return 0; } - pthread_mutex_unlock(&init_mutex); + else + if (!(mysql= mysql_init(NULL))) + return 0; if (opt_compress) mysql_options(mysql,MYSQL_OPT_COMPRESS,NullS); if (opt_local_file) -- cgit v1.2.1 From 0a6e6d705b0bb4fb92ee1e91d21d6661d6473297 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 6 Jun 2016 10:01:16 -0700 Subject: Fixed numerous problems for mutually recursive CTE. Actually mutually recursive CTE were not functional. Now the code for mutually recursive CTE looks like functional, but still needs re-writing. Added many new test cases for mutually recursive CTE. --- mysql-test/r/cte_recursive.result | 253 +++++++++++++++++++++++++++++++++++++- mysql-test/t/cte_recursive.test | 215 ++++++++++++++++++++++++++++++++ sql/sql_cte.h | 123 +++++++++++++++++- sql/sql_derived.cc | 13 +- sql/sql_select.cc | 13 +- sql/sql_union.cc | 64 +++++++--- sql/table.cc | 3 +- sql/table.h | 1 + 8 files changed, 653 insertions(+), 32 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index c2a820f6e34..2d8fac6269d 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -372,9 +372,9 @@ id name dob father mother 100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 -9 Grandma Ann 1941-10-15 NULL NULL 10 Grandpa Bill 1940-04-05 NULL NULL 8 Grandpa Ben 1940-10-21 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 6 Grandgrandma Martha 1923-05-17 NULL NULL with recursive @@ -406,6 +406,254 @@ h_name h_dob w_name w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, +w_id, w_name, w_dob, w_father, w_mother) +as +( +select h.*, w.* +from folks h, folks w, coupled_ancestors a +where a.father = h.id AND a.mother = w.id +union +select h.*, w.* +from folks v, folks h, folks w +where v.name = 'Me' and +(v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select h_id, h_name, h_dob, h_father, h_mother +from ancestor_couples +union all +select w_id, w_name, w_dob, w_father, w_mother +from ancestor_couples +) +select h_name, h_dob, w_name, w_dob +from ancestor_couples; +h_name h_dob w_name w_dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, +w_id, w_name, w_dob, w_father, w_mother) +as +( +select h.*, w.* +from folks h, folks w, coupled_ancestors a +where a.father = h.id AND a.mother = w.id +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select * +from folks +where name = 'Me' + union all +select h_id, h_name, h_dob, h_father, h_mother +from ancestor_couples +union all +select w_id, w_name, w_dob, w_father, w_mother +from ancestor_couples +) +select h_name, h_dob, w_name, w_dob +from ancestor_couples; +h_name h_dob w_name w_dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +with recursive +ancestor_couple_ids(h_id, w_id) +as +( +select a.father, a.mother +from coupled_ancestors a +where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select * +from folks +where name = 'Me' + union all +select p.* +from folks p, ancestor_couple_ids fa +where p.id = fa.h_id +union all +select p.* +from folks p, ancestor_couple_ids ma +where p.id = ma.w_id +) +select * +from ancestor_couple_ids; +h_id w_id +20 30 +10 9 +8 7 +with recursive +ancestor_couple_ids(h_id, w_id) +as +( +select a.father, a.mother +from coupled_ancestors a +where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select * +from folks +where name = 'Me' + union all +select p.* +from folks p, ancestor_couple_ids fa +where p.id = fa.h_id +union all +select p.* +from folks p, ancestor_couple_ids ma +where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob +from ancestor_couple_ids c, folks h, folks w +where c.h_id = h.id and c.w_id= w.id; +name dob name dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +with recursive +ancestor_couple_ids(h_id, w_id) +as +( +select a.father, a.mother +from coupled_ancestors a +where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select * +from folks +where name = 'Me' + union all +select p.* +from folks p, ancestor_couple_ids fa +where p.id = fa.h_id +union all +select p.* +from folks p, ancestor_couple_ids ma +where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob +from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w +where c.h_id = h.id and c.w_id= w.id; +name dob name dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +explain extended +with recursive +ancestor_couple_ids(h_id, w_id) +as +( +select a.father, a.mother +from coupled_ancestors a +where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select * +from folks +where name = 'Me' + union all +select p.* +from folks p, ancestor_couple_ids fa +where p.id = fa.h_id +union all +select p.* +from folks p, ancestor_couple_ids ma +where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob +from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w +where c.h_id = h.id and c.w_id= w.id; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY ALL NULL NULL NULL NULL 36 100.00 +1 PRIMARY ALL NULL NULL NULL NULL 468 100.00 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ALL NULL NULL NULL NULL 468 100.00 Using where; Using join buffer (incremental, BNL join) +3 SUBQUERY folks ALL NULL NULL NULL NULL 12 100.00 Using where +4 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 +4 UNCACHEABLE UNION ALL NULL NULL NULL NULL 36 100.00 Using where; Using join buffer (flat, BNL join) +5 UNCACHEABLE UNION ALL NULL NULL NULL NULL 2 100.00 +5 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +2 UNCACHEABLE SUBQUERY ALL NULL NULL NULL NULL 36 100.00 Using where +Warnings: +Note 1003 with recursive ancestor_couple_ids as (select `a`.`father` AS `h_id`,`a`.`mother` AS `w_id` from `coupled_ancestors` `a` where ((`a`.`father` is not null) and (`a`.`mother` is not null)))coupled_ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where (`test`.`folks`.`name` = 'Me') union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `fa` where (`fa`.`h_id` = `test`.`p`.`id`) union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `ma` where (`test`.`p`.`id` = `ma`.`w_id`)), select `h`.`name` AS `name`,`h`.`dob` AS `dob`,`w`.`name` AS `name`,`w`.`dob` AS `dob` from `ancestor_couple_ids` `c` join `coupled_ancestors` `h` join `coupled_ancestors` `w` where ((`h`.`id` = `c`.`h_id`) and (`w`.`id` = `c`.`w_id`)) +with recursive +ancestor_couple_ids(h_id, w_id) +as +( +select a.father, a.mother +from coupled_ancestors a +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select * +from folks +where name = 'Me' + union all +select p.* +from folks p, ancestor_couple_ids fa +where p.id = fa.h_id +union all +select p.* +from folks p, ancestor_couple_ids ma +where p.id = ma.w_id +) +select * +from ancestor_couple_ids; +h_id w_id +20 30 +10 9 +8 7 +NULL NULL +NULL NULL +NULL NULL +NULL 6 +NULL NULL +with recursive +ancestor_couple_ids(h_id, w_id) +as +( +select a.father, a.mother +from coupled_ancestors a +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select * +from folks +where name = 'Me' + union all +select p.* +from folks p, ancestor_couple_ids fa +where p.id = fa.h_id +union all +select p.* +from folks p, ancestor_couple_ids ma +where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob +from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w +where c.h_id = h.id and c.w_id= w.id; +name dob name dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 prepare stmt1 from " with recursive ancestors @@ -495,9 +743,9 @@ id name dob father mother 100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 -9 Grandma Ann 1941-10-15 NULL NULL 10 Grandpa Bill 1940-04-05 NULL NULL 8 Grandpa Ben 1940-10-21 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 6 Grandgrandma Martha 1923-05-17 NULL NULL drop view v1,v2; @@ -571,7 +819,6 @@ id name dob father mother 100 Me 2000-01-01 20 30 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 -9 Grandma Ann 1941-10-15 NULL NULL 10 Grandpa Bill 1940-04-05 NULL NULL 8 Grandpa Ben 1940-10-21 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 6fef24be34f..911c381d46a 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -338,6 +338,221 @@ select h_name, h_dob, w_name, w_dob from ancestor_couples; +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, + w_id, w_name, w_dob, w_father, w_mother) +as +( + select h.*, w.* + from folks h, folks w, coupled_ancestors a + where a.father = h.id AND a.mother = w.id + union + select h.*, w.* + from folks v, folks h, folks w + where v.name = 'Me' and + (v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select h_id, h_name, h_dob, h_father, h_mother + from ancestor_couples + union all + select w_id, w_name, w_dob, w_father, w_mother + from ancestor_couples +) +select h_name, h_dob, w_name, w_dob + from ancestor_couples; + + +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, + w_id, w_name, w_dob, w_father, w_mother) +as +( + select h.*, w.* + from folks h, folks w, coupled_ancestors a + where a.father = h.id AND a.mother = w.id +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select * + from folks + where name = 'Me' + union all + select h_id, h_name, h_dob, h_father, h_mother + from ancestor_couples + union all + select w_id, w_name, w_dob, w_father, w_mother + from ancestor_couples +) +select h_name, h_dob, w_name, w_dob + from ancestor_couples; + + +with recursive +ancestor_couple_ids(h_id, w_id) +as +( + select a.father, a.mother + from coupled_ancestors a + where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select * + from folks + where name = 'Me' + union all + select p.* + from folks p, ancestor_couple_ids fa + where p.id = fa.h_id + union all + select p.* + from folks p, ancestor_couple_ids ma + where p.id = ma.w_id +) +select * + from ancestor_couple_ids; + +with recursive +ancestor_couple_ids(h_id, w_id) +as +( + select a.father, a.mother + from coupled_ancestors a + where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select * + from folks + where name = 'Me' + union all + select p.* + from folks p, ancestor_couple_ids fa + where p.id = fa.h_id + union all + select p.* + from folks p, ancestor_couple_ids ma + where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob + from ancestor_couple_ids c, folks h, folks w + where c.h_id = h.id and c.w_id= w.id; + + +with recursive +ancestor_couple_ids(h_id, w_id) +as +( + select a.father, a.mother + from coupled_ancestors a + where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select * + from folks + where name = 'Me' + union all + select p.* + from folks p, ancestor_couple_ids fa + where p.id = fa.h_id + union all + select p.* + from folks p, ancestor_couple_ids ma + where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob + from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w + where c.h_id = h.id and c.w_id= w.id; + +explain extended +with recursive +ancestor_couple_ids(h_id, w_id) +as +( + select a.father, a.mother + from coupled_ancestors a + where a.father is not null and a.mother is not null +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select * + from folks + where name = 'Me' + union all + select p.* + from folks p, ancestor_couple_ids fa + where p.id = fa.h_id + union all + select p.* + from folks p, ancestor_couple_ids ma + where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob + from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w + where c.h_id = h.id and c.w_id= w.id; + +with recursive +ancestor_couple_ids(h_id, w_id) +as +( + select a.father, a.mother + from coupled_ancestors a +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select * + from folks + where name = 'Me' + union all + select p.* + from folks p, ancestor_couple_ids fa + where p.id = fa.h_id + union all + select p.* + from folks p, ancestor_couple_ids ma + where p.id = ma.w_id +) +select * + from ancestor_couple_ids; + + +with recursive +ancestor_couple_ids(h_id, w_id) +as +( + select a.father, a.mother + from coupled_ancestors a +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select * + from folks + where name = 'Me' + union all + select p.* + from folks p, ancestor_couple_ids fa + where p.id = fa.h_id + union all + select p.* + from folks p, ancestor_couple_ids ma + where p.id = ma.w_id +) +select h.name, h.dob, w.name, w.dob + from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w + where c.h_id = h.id and c.w_id= w.id; + + prepare stmt1 from " with recursive ancestors diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 23eea8463e6..ed16b82d352 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -87,6 +87,7 @@ public: TABLE *result_table; + With_element(LEX_STRING *name, List list, st_select_lex_unit *unit) @@ -164,6 +165,30 @@ public: void reset_for_exec(); + bool no_driving_recursive_is_set(); + + void set_as_driving_recursive(); + + bool is_driving_recursive(); + + void cleanup_driving_recursive(); + + void cleanup_incr_ready(); + + void set_as_incr_ready(); + + bool is_incr_ready(); + + bool all_incr_are_ready(); + + void cleanup_stabilized(); + + void set_as_stabilized(); + + bool is_stabilized(); + + bool all_are_stabilized(); + void set_result_table(TABLE *tab) { result_table= tab; } friend class With_clause; @@ -202,6 +227,9 @@ private: table_map unrestricted; table_map with_prepared_anchor; table_map cleaned; + table_map driving_recursive; + table_map incr_ready; + table_map stabilized; public: /* If true the specifier RECURSIVE is present in the with clause */ @@ -211,7 +239,8 @@ public: : owner(NULL), first_elem(NULL), elements(0), embedding_with_clause(emb_with_clause), next_with_clause(NULL), dependencies_are_checked(false), - unrestricted(0), with_prepared_anchor(0), cleaned(0), + unrestricted(0), with_prepared_anchor(0), cleaned(0), + driving_recursive(0), incr_ready(0), stabilized(0), with_recursive(recursive_fl) { last_next= &first_elem; } @@ -287,6 +316,7 @@ bool With_element::is_cleaned() return owner->cleaned & get_elem_map(); } + inline void With_element::mark_as_cleaned() { @@ -299,9 +329,97 @@ void With_element::reset_for_exec() { level= 0; owner->with_prepared_anchor&= ~mutually_recursive; - owner->cleaned&= ~get_elem_map(); + owner->cleaned&= ~get_elem_map(); + owner->driving_recursive&= ~get_elem_map(); + cleanup_incr_ready(); + cleanup_stabilized(); +} + + +inline +bool With_element::no_driving_recursive_is_set() +{ + return !(owner->driving_recursive & mutually_recursive); +} + + +inline +void With_element::set_as_driving_recursive() +{ + owner->driving_recursive|= get_elem_map(); +} + + +inline +bool With_element::is_driving_recursive() +{ + return owner->driving_recursive & get_elem_map(); +} + + +inline +void With_element::cleanup_driving_recursive() +{ + owner->driving_recursive&= ~mutually_recursive; +} + + +inline +void With_element::cleanup_incr_ready() +{ + owner->incr_ready&= ~mutually_recursive; +} + + +inline +void With_element::set_as_incr_ready() +{ + owner->incr_ready|= get_elem_map(); +} + + +inline +bool With_element::is_incr_ready() +{ + return owner->incr_ready & get_elem_map(); +} + + +inline +bool With_element::all_incr_are_ready() +{ + return (owner->incr_ready & mutually_recursive) == mutually_recursive; +} + + +inline +void With_element::cleanup_stabilized() +{ + owner->stabilized&= ~mutually_recursive; +} + + +inline +void With_element::set_as_stabilized() +{ + owner->stabilized|= get_elem_map(); } + +inline +bool With_element::is_stabilized() +{ + return owner->stabilized & get_elem_map(); +} + + +inline +bool With_element::all_are_stabilized() +{ + return (owner->stabilized & mutually_recursive) == mutually_recursive; +} + + inline void st_select_lex_unit::set_with_clause(With_clause *with_cl) { @@ -310,6 +428,7 @@ void st_select_lex_unit::set_with_clause(With_clause *with_cl) with_clause->set_owner(this); } + inline void st_select_lex::set_with_clause(With_clause *with_clause) { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 0db94421aa5..f84cdd939fe 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -661,7 +661,10 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) derived->derived_result->set_unit(unit); derived->table= derived->derived_result->table; if (derived->is_with_table_recursive_reference()) + { unit->with_element->rec_result->rec_tables.push_back(derived->table); + derived->table->is_rec_table= true; + } } DBUG_ASSERT(derived->table || res); goto exit; @@ -945,9 +948,10 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) { DBUG_ENTER("mysql_derived_fill"); SELECT_LEX_UNIT *unit= derived->get_unit(); + bool derived_is_recursive= derived->is_recursive_with_table(); bool res= FALSE; - if (derived->is_recursive_with_table() && unit->executed) + if (derived_is_recursive && derived->with->all_are_stabilized()) { TABLE *src= unit->with_element->rec_result->table; TABLE *dest= derived->table; @@ -955,17 +959,18 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) DBUG_RETURN(res); } - if (unit->executed && !unit->uncacheable && !unit->describe) + if (unit->executed && !unit->uncacheable && !unit->describe && + !derived_is_recursive) DBUG_RETURN(FALSE); /*check that table creation passed without problems. */ DBUG_ASSERT(derived->table && derived->table->is_created()); SELECT_LEX *first_select= unit->first_select(); select_union *derived_result= derived->derived_result; SELECT_LEX *save_current_select= lex->current_select; - if (unit->is_union()) + if (unit->is_union() || derived_is_recursive) { // execute union without clean up - if (derived->is_recursive_with_table()) + if (derived_is_recursive) unit->with_element->set_result_table(derived->table); res= unit->exec(); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 84dd2e4b676..25a509472dc 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -11605,7 +11605,12 @@ bool JOIN_TAB::preread_init() } /* Materialize derived table/view. */ - if ((!derived->get_unit()->executed || derived->is_recursive_with_table()) && + if ((!derived->get_unit()->executed || + (derived->is_recursive_with_table() && + (!derived->is_with_table_recursive_reference() || + (!derived->with->is_driving_recursive() && + !derived->with->is_incr_ready()) && + !derived->with->all_are_stabilized()))) && mysql_handle_single_derived(join->thd->lex, derived, DT_CREATE | DT_FILL)) return TRUE; @@ -18236,7 +18241,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) flush_dups_table->sj_weedout_delete_rows(); } - if (!join_tab->preread_init_done && join_tab->preread_init()) + if ((!join_tab->preread_init_done || join_tab->table->is_rec_table) && + join_tab->preread_init()) DBUG_RETURN(NESTED_LOOP_ERROR); join->return_tab= join_tab; @@ -19189,7 +19195,8 @@ int join_init_read_record(JOIN_TAB *tab) report_error(tab->table, error); return 1; } - if (!tab->preread_init_done && tab->preread_init()) + if ((!tab->preread_init_done || tab->table->is_rec_table) && + tab->preread_init()) return 1; if (init_read_record(&tab->read_record, tab->join->thd, tab->table, tab->select, tab->filesort_result, 1,1, FALSE)) diff --git a/sql/sql_union.cc b/sql/sql_union.cc index d19bbaf103c..384cca70574 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -244,6 +244,7 @@ select_union_recursive::create_result_table(THD *thd_arg, if (rec_tables.push_back(rec_table)) return true; + rec_table->is_rec_table= true; return false; } @@ -494,7 +495,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, /* Global option */ - if (is_union_select) + if (is_union_select || is_recursive) { if (is_union() && !union_needs_tmp_table()) { @@ -530,7 +531,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, sl->context.resolve_in_select_list= TRUE; for (;sl; sl= sl->next_select()) - { + { bool can_skip_order_by; sl->options|= SELECT_NO_UNLOCK; JOIN *join= new JOIN(thd_arg, sl->item_list, @@ -587,7 +588,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, Use items list of underlaid select for derived tables to preserve information about fields lengths and exact types */ - if (!is_union_select) + if (!is_union_select && !is_recursive) types= first_sl->item_list; else if (sl == first_sl) { @@ -917,8 +918,9 @@ bool st_select_lex_unit::exec() bool first_execution= !executed; DBUG_ENTER("st_select_lex_unit::exec"); bool was_executed= executed; + bool is_recursive= with_element && with_element->is_recursive; - if (executed && !uncacheable && !describe) + if (executed && !uncacheable && !describe && !is_recursive) DBUG_RETURN(FALSE); executed= 1; if (!(uncacheable & ~UNCACHEABLE_EXPLAIN) && item) @@ -934,7 +936,7 @@ bool st_select_lex_unit::exec() if (saved_error) DBUG_RETURN(saved_error); - if (with_element && with_element->is_recursive && !describe) + if (is_recursive && !describe) { saved_error= exec_recursive(); DBUG_RETURN(saved_error); @@ -1174,10 +1176,7 @@ bool st_select_lex_unit::exec_recursive() TABLE *result_table= with_element->result_table; ha_rows examined_rows= 0; bool unrestricted= with_element->is_unrestricted(); - bool no_more_iterations= false; bool with_anchor= with_element->with_anchor; - st_select_lex *first_sl= first_select(); - st_select_lex *barrier= with_anchor ? first_recursive_sel : NULL; uint max_level= thd->variables.max_recursion_level; List_iterator_fast
li(with_element->rec_result->rec_tables); TABLE *rec_table; @@ -1186,9 +1185,31 @@ bool st_select_lex_unit::exec_recursive() do { + st_select_lex *first_sl; + st_select_lex *barrier; if ((saved_error= incr_table->file->ha_delete_all_rows())) goto err; + if (with_element->no_driving_recursive_is_set()) + with_element->set_as_driving_recursive(); + + if (with_element->level == 0) + { + first_sl= first_select(); + if (with_anchor) + barrier= first_recursive_sel; + else + barrier= NULL; + } + else + { + first_sl= first_recursive_sel; + barrier= NULL; + } + + if (with_element->all_incr_are_ready()) + with_element->cleanup_incr_ready(); + for (st_select_lex *sl= first_sl ; sl != barrier; sl= sl->next_select()) { thd->lex->current_select= sl; @@ -1211,16 +1232,12 @@ bool st_select_lex_unit::exec_recursive() } } - if (with_element->level == 0) - { - first_sl= first_recursive_sel; - barrier= NULL; - } + with_element->set_as_incr_ready(); incr_table->file->info(HA_STATUS_VARIABLE); if (incr_table->file->stats.records == 0 || with_element->level + 1 == max_level) - no_more_iterations= true; + with_element->set_as_stabilized(); else with_element->level++; @@ -1231,12 +1248,21 @@ bool st_select_lex_unit::exec_recursive() !unrestricted))) goto err; } - } while (!no_more_iterations); + + if (!with_element->is_driving_recursive()) + break; + + } while (!with_element->all_are_stabilized()); - if ((saved_error= table->insert_all_rows_into(thd, - result_table, - true))) - goto err; + if (with_element->is_driving_recursive()) + { + TABLE *table= with_element->rec_result->table; + if ((saved_error= table->insert_all_rows_into(thd, + result_table, + true))) + goto err; + with_element->cleanup_driving_recursive(); + } thd->lex->current_select= lex_select_save; err: diff --git a/sql/table.cc b/sql/table.cc index 6109c16fb37..e11ea59bcf8 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -7328,7 +7328,8 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view) if (!is_materialized_derived() && first_select->is_mergeable() && optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) && !(thd->lex->sql_command == SQLCOM_UPDATE_MULTI || - thd->lex->sql_command == SQLCOM_DELETE_MULTI)) + thd->lex->sql_command == SQLCOM_DELETE_MULTI) && + !is_recursive_with_table()) set_merged_derived(); else set_materialized_derived(); diff --git a/sql/table.h b/sql/table.h index 122b036cae5..a8d01a64599 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1244,6 +1244,7 @@ public: bool alias_name_used; /* true if table_name is alias */ bool get_fields_in_item_tree; /* Signal to fix_field */ bool m_needs_reopen; + bool is_rec_table; private: bool created; /* For tmp tables. TRUE <=> tmp table was actually created.*/ public: -- cgit v1.2.1 From 096286c95f4c85b16c65a71779abd49c98c3ac28 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Tue, 7 Jun 2016 11:06:54 -0700 Subject: The method With_element::reset_for_exec was not called in non-debug builds. --- .../sys_vars/r/sysvars_server_notembedded.result | 28 ++++++++++++++++++++++ sql/sql_union.cc | 4 ++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index 16205792031..d534669ac58 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -2165,6 +2165,20 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME MAX_RECURSION_LEVEL +SESSION_VALUE 4294967295 +GLOBAL_VALUE 4294967295 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 4294967295 +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Maximum number of iterations when executing recursive queries +NUMERIC_MIN_VALUE 0 +NUMERIC_MAX_VALUE 4294967295 +NUMERIC_BLOCK_SIZE 1 +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME MAX_RELAY_LOG_SIZE SESSION_VALUE 1073741824 GLOBAL_VALUE 1073741824 @@ -4419,6 +4433,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME STANDARDS_COMPLIANT_CTE +SESSION_VALUE ON +GLOBAL_VALUE ON +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE ON +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Allow only standards compiant CTE +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME STORAGE_ENGINE SESSION_VALUE MyISAM GLOBAL_VALUE MyISAM diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 384cca70574..14c66f6546c 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1349,10 +1349,10 @@ void st_select_lex_unit::reinit_exec_mechanism() */ field->fixed= 0; } - if (with_element && with_element->is_recursive) - with_element->reset_for_exec(); } #endif + if (with_element && with_element->is_recursive) + with_element->reset_for_exec(); } -- cgit v1.2.1 From 0eec187153e8266b99f4ae66dba810a9b39e206d Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Tue, 7 Jun 2016 15:01:34 -0700 Subject: A commit to force buildbot working. --- sql/sql_cte.h | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/sql_cte.h b/sql/sql_cte.h index ed16b82d352..744f50eb7ef 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -81,6 +81,7 @@ public: st_select_lex *first_recursive; + /* The number of the last performed iteration for recursive table */ uint level; select_union_recursive *rec_result; -- cgit v1.2.1 From 957aefdc8f5523a1d45775f5ce3de74c03f5ed98 Mon Sep 17 00:00:00 2001 From: Shishir Jaiswal Date: Fri, 17 Jun 2016 10:11:33 +0530 Subject: Bug#23498283 - BUFFER OVERFLOW DESCRIPTION =========== Buffer overflow is reported in Regex library. This can be triggered when the data corresponding to argv[1] is >= 512 bytes resutling in abnormal behaviour. ANALYSIS ======== Its a straight forward case of SEGFAULT where the target buffer is smaller than the source string to be copied. A simple pre-copy validation should do. FIX === A check is added before doing strcpy() to ensure that the target buffer is big enough to hold the to-be copied data. If the check fails, the program aborts. --- regex/split.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/regex/split.c b/regex/split.c index a3a11f793ed..abae74eba9c 100644 --- a/regex/split.c +++ b/regex/split.c @@ -159,6 +159,10 @@ char *argv[]; if (argc > 4) for (n = atoi(argv[3]); n > 0; n--) { + if(sizeof(buf)-1 < strlen(argv[1])) + { + exit(EXIT_FAILURE); + } (void) strcpy(buf, argv[1]); } else if (argc > 3) -- cgit v1.2.1 From 4a3f1c1f104cbfeb6d31ee02788589151b131eca Mon Sep 17 00:00:00 2001 From: Terje Rosten Date: Thu, 19 May 2016 15:58:35 +0200 Subject: BUG#17903583 MYSQL-COMMUNITY-SERVER SHOULD NOT DEPEND ON MYSQL-COMMUNITY-CLIENT (#70985) Fix is a backport of BUG#18518216/72230 to MySQL 5.5 and 5.6. Will also resolve: BUG#23605713/81384 LIBMYSQLCLIENT.SO.18 MISSING FROM MYSQL 5.7 as mysql-community-libs-5.5 or mysql-community-libs-5.6 can installed on EL6 system with libmysqlclient.16 (from MySQL 5.1) libmysqlclient.20 (from MySQL 5.7) by doing: $ rpm --oldpackage -ivh mysql-community-libs-5.5.50-2.el6.x86_64.rpm Providing a way to have several versions of libmysqlclient installed on the same system. and help: BUG#23088014/80981 LIBS-COMPAT RPMS SHOULD BE INDEPENDENT OF ALL OTHER SUBPACKAGES due to less strict coupling between -libs-compat and -common package. --- packaging/rpm-oel/mysql.spec.in | 52 +++++++++++++++++++++------------------- packaging/rpm-sles/mysql.spec.in | 42 ++++++++++++++++---------------- 2 files changed, 49 insertions(+), 45 deletions(-) diff --git a/packaging/rpm-oel/mysql.spec.in b/packaging/rpm-oel/mysql.spec.in index 8f92f5b84f3..29957d98ed0 100644 --- a/packaging/rpm-oel/mysql.spec.in +++ b/packaging/rpm-oel/mysql.spec.in @@ -81,6 +81,8 @@ %global license_type GPLv2 %endif +%global min 5.5.8 + Name: mysql-%{product_suffix} Summary: A very fast and reliable SQL database server Group: Applications/Databases @@ -156,11 +158,11 @@ Requires: net-tools Provides: MySQL-server-advanced%{?_isa} = %{version}-%{release} Obsoletes: MySQL-server-advanced < %{version}-%{release} Obsoletes: mysql-community-server < %{version}-%{release} -Requires: mysql-commercial-client%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-client%{?_isa} >= %{min} Requires: mysql-commercial-common%{?_isa} = %{version}-%{release} %else Provides: MySQL-server%{?_isa} = %{version}-%{release} -Requires: mysql-community-client%{?_isa} = %{version}-%{release} +Requires: mysql-community-client%{?_isa} >= %{min} Requires: mysql-community-common%{?_isa} = %{version}-%{release} %endif Obsoletes: MySQL-server < %{version}-%{release} @@ -209,10 +211,10 @@ Group: Applications/Databases Provides: MySQL-client-advanced%{?_isa} = %{version}-%{release} Obsoletes: MySQL-client-advanced < %{version}-%{release} Obsoletes: mysql-community-client < %{version}-%{release} -Requires: mysql-commercial-libs%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-libs%{?_isa} >= %{min} %else Provides: MySQL-client%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} +Requires: mysql-community-libs%{?_isa} >= %{min} %endif Obsoletes: MySQL-client < %{version}-%{release} Obsoletes: mariadb @@ -234,7 +236,7 @@ Obsoletes: mysql-community-common < %{version}-%{release} %endif Provides: mysql-common = %{version}-%{release} Provides: mysql-common%{?_isa} = %{version}-%{release} -%{?el5:Requires: mysql%{?_isa} = %{version}-%{release}} +%{?el5:Requires: mysql%{?_isa} >= %{min}} %description common This packages contains common files needed by MySQL client library, @@ -248,10 +250,10 @@ Group: Applications/Databases Provides: MySQL-test-advanced%{?_isa} = %{version}-%{release} Obsoletes: MySQL-test-advanced < %{version}-%{release} Obsoletes: mysql-community-test < %{version}-%{release} -Requires: mysql-commercial-server%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-server%{?_isa} >= %{min} %else Provides: MySQL-test%{?_isa} = %{version}-%{release} -Requires: mysql-community-server%{?_isa} = %{version}-%{release} +Requires: mysql-community-server%{?_isa} >= %{min} %endif Obsoletes: MySQL-test < %{version}-%{release} Obsoletes: mysql-test < %{version}-%{release} @@ -270,9 +272,9 @@ Summary: MySQL benchmark suite Group: Applications/Databases %if 0%{?commercial} Obsoletes: mysql-community-bench < %{version}-%{release} -Requires: mysql-commercial-server%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-server%{?_isa} >= %{min} %else -Requires: mysql-community-server%{?_isa} = %{version}-%{release} +Requires: mysql-community-server%{?_isa} >= %{min} %endif Obsoletes: mariadb-bench Obsoletes: community-mysql-bench < %{version}-%{release} @@ -291,10 +293,10 @@ Group: Applications/Databases Provides: MySQL-devel-advanced%{?_isa} = %{version}-%{release} Obsoletes: MySQL-devel-advanced < %{version}-%{release} Obsoletes: mysql-community-devel < %{version}-%{release} -Requires: mysql-commercial-libs%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-libs%{?_isa} >= %{min} %else Provides: MySQL-devel%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} +Requires: mysql-community-libs%{?_isa} >= %{min} %endif Obsoletes: MySQL-devel < %{version}-%{release} Obsoletes: mysql-devel < %{version}-%{release} @@ -314,10 +316,10 @@ Group: Applications/Databases Provides: MySQL-shared-advanced%{?_isa} = %{version}-%{release} Obsoletes: MySQL-shared-advanced < %{version}-%{release} Obsoletes: mysql-community-libs < %{version}-%{release} -Requires: mysql-commercial-common%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-common%{?_isa} >= %{min} %else Provides: MySQL-shared%{?_isa} = %{version}-%{release} -Requires: mysql-community-common%{?_isa} = %{version}-%{release} +Requires: mysql-community-common%{?_isa} >= %{min} %endif Obsoletes: MySQL-shared < %{version}-%{release} Obsoletes: mysql-libs < %{version}-%{release} @@ -341,10 +343,10 @@ Provides: mysql-libs-compat%{?_isa} = %{version}-%{release} Provides: MySQL-shared-compat-advanced%{?_isa} = %{version}-%{release} Obsoletes: MySQL-shared-compat-advanced < %{version}-%{release} Obsoletes: mysql-community-libs-compat < %{version}-%{release} -Requires: mysql-commercial-libs%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-libs%{?_isa} >= %{min} %else Provides: MySQL-shared-compat%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} +Requires: mysql-community-libs%{?_isa} >= %{min} %endif Obsoletes: MySQL-shared-compat < %{version}-%{release} %if 0%{?rhel} > 5 @@ -391,11 +393,11 @@ Summary: Development header files and libraries for MySQL as an embeddabl Group: Applications/Databases %if 0%{?commercial} Obsoletes: mysql-community-embedded-devel < %{version}-%{release} -Requires: mysql-commercial-devel%{?_isa} = %{version}-%{release} -Requires: mysql-commercial-embedded%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-devel%{?_isa} >= %{min} +Requires: mysql-commercial-embedded%{?_isa} >= %{min} %else -Requires: mysql-community-devel%{?_isa} = %{version}-%{release} -Requires: mysql-community-embedded%{?_isa} = %{version}-%{release} +Requires: mysql-community-devel%{?_isa} >= %{min} +Requires: mysql-community-embedded%{?_isa} >= %{min} %endif Obsoletes: mariadb-embedded-devel Obsoletes: mysql-embedded-devel < %{version}-%{release} @@ -411,13 +413,13 @@ the embedded version of the MySQL server. Summary: Convenience package for easy upgrades of MySQL package set Group: Applications/Databases %if 0%{?commercial} -Requires: mysql-commercial-client%{?_isa} = %{version}-%{release} -Requires: mysql-commercial-libs%{?_isa} = %{version}-%{release} -Requires: mysql-commercial-libs-compat%{?_isa} = %{version}-%{release} +Requires: mysql-commercial-client%{?_isa} >= %{min} +Requires: mysql-commercial-libs%{?_isa} >= %{min} +Requires: mysql-commercial-libs-compat%{?_isa} >= %{min} %else -Requires: mysql-community-client%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs-compat%{?_isa} = %{version}-%{release} +Requires: mysql-community-client%{?_isa} >= %{min} +Requires: mysql-community-libs%{?_isa} >= %{min} +Requires: mysql-community-libs-compat%{?_isa} >= %{min} %endif %description -n mysql diff --git a/packaging/rpm-sles/mysql.spec.in b/packaging/rpm-sles/mysql.spec.in index 38201428fda..a11dfff7b70 100644 --- a/packaging/rpm-sles/mysql.spec.in +++ b/packaging/rpm-sles/mysql.spec.in @@ -57,6 +57,8 @@ %global sles11 1 %endif +%global min 5.5.8 + Name: mysql-%{product_suffix} Summary: A very fast and reliable SQL database server Group: Applications/Databases @@ -125,12 +127,12 @@ Requires: perl-base Provides: MySQL-server-advanced = %{version}-%{release} Obsoletes: MySQL-server-advanced < %{version}-%{release} Obsoletes: mysql-community-server < %{version}-%{release} -Requires: mysql-commercial-client = %{version}-%{release} -Requires: mysql-commercial-common = %{version}-%{release} +Requires: mysql-commercial-client >= %{min} +Requires: mysql-commercial-common >= %{min} %else Provides: MySQL-server = %{version}-%{release} -Requires: mysql-community-client = %{version}-%{release} -Requires: mysql-community-common = %{version}-%{release} +Requires: mysql-community-client >= %{min} +Requires: mysql-community-common >= %{min} %endif Obsoletes: MySQL-server < %{version}-%{release} Obsoletes: mysql < %{version}-%{release} @@ -180,10 +182,10 @@ Group: Applications/Databases Provides: MySQL-client-advanced = %{version}-%{release} Obsoletes: MySQL-client-advanced < %{version}-%{release} Obsoletes: mysql-community-client < %{version}-%{release} -Requires: mysql-commercial-libs = %{version}-%{release} +Requires: mysql-commercial-libs >= %{min} %else Provides: MySQL-client = %{version}-%{release} -Requires: mysql-community-libs = %{version}-%{release} +Requires: mysql-community-libs >= %{min} %endif Obsoletes: MySQL-client < %{version}-%{release} Provides: mysql-client = %{version}-%{release} @@ -215,10 +217,10 @@ Group: Applications/Databases Provides: MySQL-test-advanced = %{version}-%{release} Obsoletes: MySQL-test-advanced < %{version}-%{release} Obsoletes: mysql-community-test < %{version}-%{release} -Requires: mysql-commercial-server = %{version}-%{release} +Requires: mysql-commercial-server >= %{min} %else Provides: MySQL-test = %{version}-%{release} -Requires: mysql-community-server = %{version}-%{release} +Requires: mysql-community-server >= %{min} %endif Obsoletes: MySQL-test < %{version}-%{release} Obsoletes: mysql-test < %{version}-%{release} @@ -236,9 +238,9 @@ Summary: MySQL benchmark suite Group: Applications/Databases %if 0%{?commercial} Obsoletes: mysql-community-bench < %{version}-%{release} -Requires: mysql-commercial-server = %{version}-%{release} +Requires: mysql-commercial-server >= %{min} %else -Requires: mysql-community-server = %{version}-%{release} +Requires: mysql-community-server >= %{min} %endif Obsoletes: mariadb-bench Obsoletes: community-mysql-bench < %{version}-%{release} @@ -257,10 +259,10 @@ Group: Applications/Databases Provides: MySQL-devel-advanced = %{version}-%{release} Obsoletes: MySQL-devel-advanced < %{version}-%{release} Obsoletes: mysql-community-devel < %{version}-%{release} -Requires: mysql-commercial-libs = %{version}-%{release} +Requires: mysql-commercial-libs >= %{min} %else Provides: MySQL-devel = %{version}-%{release} -Requires: mysql-community-libs = %{version}-%{release} +Requires: mysql-community-libs >= %{min} %endif Obsoletes: MySQL-devel < %{version}-%{release} Obsoletes: mysql-devel < %{version}-%{release} @@ -281,10 +283,10 @@ Group: Applications/Databases Provides: MySQL-shared-advanced = %{version}-%{release} Obsoletes: MySQL-shared-advanced < %{version}-%{release} Obsoletes: mysql-community-libs < %{version}-%{release} -Requires: mysql-commercial-common = %{version}-%{release} +Requires: mysql-commercial-common >= %{min} %else Provides: MySQL-shared = %{version}-%{release} -Requires: mysql-community-common = %{version}-%{release} +Requires: mysql-community-common >= %{min} %endif Obsoletes: MySQL-shared < %{version}-%{release} Obsoletes: mysql-libs < %{version}-%{release} @@ -307,10 +309,10 @@ Group: Applications/Databases Provides: MySQL-embedded-advanced = %{version}-%{release} Obsoletes: MySQL-embedded-advanced < %{version}-%{release} Obsoletes: mysql-community-embedded < %{version}-%{release} -Requires: mysql-commercial-common = %{version}-%{release} +Requires: mysql-commercial-common >= %{min} %else Provides: MySQL-embedded = %{version}-%{release} -Requires: mysql-community-common = %{version}-%{release} +Requires: mysql-community-common >= %{min} %endif Obsoletes: mariadb-embedded Obsoletes: MySQL-embedded < %{version}-%{release} @@ -334,11 +336,11 @@ Summary: Development header files and libraries for MySQL as an embeddabl Group: Applications/Databases %if 0%{?commercial} Obsoletes: mysql-community-embedded-devel < %{version}-%{release} -Requires: mysql-commercial-devel = %{version}-%{release} -Requires: mysql-commercial-embedded = %{version}-%{release} +Requires: mysql-commercial-devel >= %{min} +Requires: mysql-commercial-embedded >= %{min} %else -Requires: mysql-community-devel = %{version}-%{release} -Requires: mysql-community-embedded = %{version}-%{release} +Requires: mysql-community-devel >= %{min} +Requires: mysql-community-embedded >= %{min} %endif Obsoletes: mariadb-embedded-devel Obsoletes: mysql-embedded-devel < %{version}-%{release} -- cgit v1.2.1 From 9f7288e2e0179db478d20c74f57b5c7d6c95f793 Mon Sep 17 00:00:00 2001 From: Thayumanavar S Date: Mon, 20 Jun 2016 11:35:43 +0530 Subject: BUG#23080148 - BACKPORT BUG 14653594 AND BUG 20683959 TO MYSQL-5.5 The bug asks for a backport of bug#1463594 and bug#20682959. This is required because of the fact that if replication is enabled, master transaction can commit whereas slave can't commit due to not exact 'enviroment'. This manifestation is seen in bug#22024200. --- mysql-test/r/loaddata.result | 26 +++++++++- mysql-test/std_data/bug20683959loaddata.txt | 1 + mysql-test/t/loaddata.test | 25 +++++++++- sql/sql_load.cc | 77 ++++++++++++++++++----------- 4 files changed, 99 insertions(+), 30 deletions(-) create mode 100644 mysql-test/std_data/bug20683959loaddata.txt diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result index 2d67d24bedd..2f2a3579eec 100644 --- a/mysql-test/r/loaddata.result +++ b/mysql-test/r/loaddata.result @@ -507,7 +507,7 @@ DROP TABLE t1; # Bug#11765139 58069: LOAD DATA INFILE: VALGRIND REPORTS INVALID MEMORY READS AND WRITES WITH U # CREATE TABLE t1(f1 INT); -SELECT 0xE1BB30 INTO OUTFILE 't1.dat'; +SELECT 0xE1C330 INTO OUTFILE 't1.dat'; LOAD DATA INFILE 't1.dat' IGNORE INTO TABLE t1 CHARACTER SET utf8; DROP TABLE t1; # @@ -532,3 +532,27 @@ FIELDS TERMINATED BY 't' LINES TERMINATED BY ''; Got one of the listed errors SET @@sql_mode= @old_mode; DROP TABLE t1; + +# +# Bug#23080148 - Backport of Bug#20683959. +# Bug#20683959 LOAD DATA INFILE IGNORES A SPECIFIC ROW SILENTLY +# UNDER DB CHARSET IS UTF8. +# +CREATE DATABASE d1 CHARSET latin1; +USE d1; +CREATE TABLE t1 (val TEXT); +LOAD DATA INFILE '../../std_data/bug20683959loaddata.txt' INTO TABLE t1; +SELECT COUNT(*) FROM t1; +COUNT(*) +1 +SELECT HEX(val) FROM t1; +HEX(val) +C38322525420406E696F757A656368756E3A20E98198E2889AF58081AEE7B99DE4B88AE383A3E7B99DE69690F58087B3E7B9A7EFBDA8E7B99DEFBDB3E7B99DE78999E880B3E7B8BAEFBDAAE7B9A7E89699E296A1E7B8BAE4BBA3EFBD8CE7B8BAEFBDA9E7B8B2E2889AE38184E7B99DEFBDB3E7B99DE4B88AE383A3E7B99DE69690F58087B3E7B9A7EFBDA8E7B99DEFBDB3E7B99DE5B3A8EFBD84E8ABA0EFBDA8E89C89F580948EE599AAE7B8BAEFBDAAE7B8BAE9A198EFBDA9EFBDB1E7B9A7E581B5E289A0E7B8BAEFBDBEE7B9A7E9A194EFBDA9E882B4EFBDA5EFBDB5E980A7F5808B96E28693E99EABE38287E58F99E7B8BAE58AB1E28691E7B8BAF5808B9AE7828AE98095EFBDB1E7B8BAEFBDAFE7B8B2E288ABE6A89FE89EB3E6BA98F58081ADE88EA0EFBDBAE98095E6BA98F58081AEE89D93EFBDBAE8AD9BEFBDACE980A7F5808B96E28693E7B8BAF580918EE288AAE7B8BAE4B88AEFBC9EE7B8BAE4B99DE28691E7B8BAF5808B96EFBCA0E88DB3E6A68AEFBDB9EFBDB3E981B2E5B3A8E296A1E7B8BAE7A4BCE7828AE88DB3E6A68AEFBDB0EFBDBDE7B8BAA0E7B8BAE88B93EFBDBEE5B899EFBC9E +CREATE DATABASE d2 CHARSET utf8; +USE d2; +CREATE TABLE t1 (val TEXT); +LOAD DATA INFILE '../../std_data/bug20683959loaddata.txt' INTO TABLE t1; +ERROR HY000: Invalid utf8 character string: 'Ã"RT @niouzechun: \9058\221A' +DROP TABLE d1.t1, d2.t1; +DROP DATABASE d1; +DROP DATABASE d2; diff --git a/mysql-test/std_data/bug20683959loaddata.txt b/mysql-test/std_data/bug20683959loaddata.txt new file mode 100644 index 00000000000..1878cc78879 --- /dev/null +++ b/mysql-test/std_data/bug20683959loaddata.txt @@ -0,0 +1 @@ +Ã"RT @niouzechun: é˜âˆšõ€®ç¹ä¸Šãƒ£ç¹æ–õ€‡³ç¹§ï½¨ç¹ï½³ç¹ç‰™è€³ç¸ºï½ªç¹§è–™â–¡ç¸ºä»£ï½Œç¸ºï½©ç¸²âˆšã„ç¹ï½³ç¹ä¸Šãƒ£ç¹æ–õ€‡³ç¹§ï½¨ç¹ï½³ç¹å³¨ï½„諠ィ蜉õ€”Žå™ªç¸ºï½ªç¸ºé¡˜ï½©ï½±ç¹§åµâ‰ ç¸ºï½¾ç¹§é¡”ゥ肴・オ逧õ€‹–↓鞫ょå™ç¸ºåŠ±â†‘縺õ€‹šç‚Šé€•ï½±ç¸ºï½¯ç¸²âˆ«æ¨Ÿèž³æº˜õ€­èŽ ï½ºé€•æº˜õ€®è“コ譛ャ逧õ€‹–↓縺õ€‘Žâˆªç¸ºä¸Šï¼žç¸ºä¹â†‘縺õ€‹–ï¼ è³æ¦Šï½¹ï½³é²å³¨â–¡ç¸ºç¤¼ç‚Šè³æ¦Šï½°ï½½ç¸º ç¸ºè‹“セ帙> diff --git a/mysql-test/t/loaddata.test b/mysql-test/t/loaddata.test index aa7be52484e..9a664b84843 100644 --- a/mysql-test/t/loaddata.test +++ b/mysql-test/t/loaddata.test @@ -610,7 +610,7 @@ disconnect con1; --echo # CREATE TABLE t1(f1 INT); -EVAL SELECT 0xE1BB30 INTO OUTFILE 't1.dat'; +EVAL SELECT 0xE1C330 INTO OUTFILE 't1.dat'; --disable_warnings LOAD DATA INFILE 't1.dat' IGNORE INTO TABLE t1 CHARACTER SET utf8; --enable_warnings @@ -656,3 +656,26 @@ SET @@sql_mode= @old_mode; --remove_file $MYSQLTEST_VARDIR/mysql DROP TABLE t1; +--echo +--echo # +--echo # Bug#23080148 - Backport of Bug#20683959. +--echo # Bug#20683959 LOAD DATA INFILE IGNORES A SPECIFIC ROW SILENTLY +--echo # UNDER DB CHARSET IS UTF8. +--echo # + +CREATE DATABASE d1 CHARSET latin1; +USE d1; +CREATE TABLE t1 (val TEXT); +LOAD DATA INFILE '../../std_data/bug20683959loaddata.txt' INTO TABLE t1; +SELECT COUNT(*) FROM t1; +SELECT HEX(val) FROM t1; + +CREATE DATABASE d2 CHARSET utf8; +USE d2; +CREATE TABLE t1 (val TEXT); +--error ER_INVALID_CHARACTER_STRING +LOAD DATA INFILE '../../std_data/bug20683959loaddata.txt' INTO TABLE t1; + +DROP TABLE d1.t1, d2.t1; +DROP DATABASE d1; +DROP DATABASE d2; diff --git a/sql/sql_load.cc b/sql/sql_load.cc index c084e5e3839..a46967a24a8 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1363,8 +1363,8 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, set_if_bigger(length,line_start.length()); stack=stack_pos=(int*) sql_alloc(sizeof(int)*length); - if (!(buffer=(uchar*) my_malloc(buff_length+1,MYF(0)))) - error=1; /* purecov: inspected */ + if (!(buffer=(uchar*) my_malloc(buff_length+1,MYF(MY_WME)))) + error= true; /* purecov: inspected */ else { end_of_buff=buffer+buff_length; @@ -1556,37 +1556,50 @@ int READ_INFO::read_field() } } #ifdef USE_MB - if (my_mbcharlen(read_charset, chr) > 1 && - to + my_mbcharlen(read_charset, chr) <= end_of_buff) - { - uchar* p= to; - int ml, i; - *to++ = chr; - - ml= my_mbcharlen(read_charset, chr); + uint ml= my_mbcharlen(read_charset, chr); + if (ml == 0) + { + *to= '\0'; + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), + read_charset->csname, buffer); + error= true; + return 1; + } - for (i= 1; i < ml; i++) + if (ml > 1 && + to + ml <= end_of_buff) { - chr= GET; - if (chr == my_b_EOF) + uchar* p= to; + *to++ = chr; + + for (uint i= 1; i < ml; i++) { - /* - Need to back up the bytes already ready from illformed - multi-byte char - */ - to-= i; - goto found_eof; + chr= GET; + if (chr == my_b_EOF) + { + /* + Need to back up the bytes already ready from illformed + multi-byte char + */ + to-= i; + goto found_eof; + } + *to++ = chr; } - *to++ = chr; - } - if (my_ismbchar(read_charset, + if (my_ismbchar(read_charset, (const char *)p, (const char *)to)) - continue; - for (i= 0; i < ml; i++) - PUSH(*--to); - chr= GET; - } + continue; + for (uint i= 0; i < ml; i++) + PUSH(*--to); + chr= GET; + } + else if (ml > 1) + { + // Buffer is too small, exit while loop, and reallocate. + PUSH(chr); + break; + } #endif *to++ = (uchar) chr; } @@ -1830,7 +1843,15 @@ int READ_INFO::read_value(int delim, String *val) for (chr= GET; my_tospace(chr) != delim && chr != my_b_EOF;) { #ifdef USE_MB - if (my_mbcharlen(read_charset, chr) > 1) + uint ml= my_mbcharlen(read_charset, chr); + if (ml == 0) + { + chr= my_b_EOF; + val->length(0); + return chr; + } + + if (ml > 1) { DBUG_PRINT("read_xml",("multi byte")); int i, ml= my_mbcharlen(read_charset, chr); -- cgit v1.2.1 From 9606525666eeead285f211bd0e603b50b4f09189 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Sat, 25 Jun 2016 21:38:40 -0700 Subject: Simplified the code that fills recursive tables. --- sql/sql_cte.cc | 36 ++++++++----- sql/sql_cte.h | 91 +++++--------------------------- sql/sql_derived.cc | 47 +++++++++++------ sql/sql_lex.h | 2 +- sql/sql_select.cc | 14 ++--- sql/sql_union.cc | 148 +++++++++++++++++++++++------------------------------ sql/table.h | 2 +- 7 files changed, 135 insertions(+), 205 deletions(-) diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 04a4fcb8a2b..2b3a72c1b3a 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -248,22 +248,32 @@ bool With_clause::check_anchors() if (!with_elem->is_recursive) continue; - table_map with_elem_dep= with_elem->derived_dep_map; - table_map with_elem_map= with_elem->get_elem_map(); - for (With_element *elem= with_elem; - elem != NULL; - elem= elem->next_elem) + if (!with_elem->next_mutually_recursive) { - if (!elem->is_recursive) - continue; - - if (elem == with_elem || - ((elem->derived_dep_map & with_elem_map) && - (with_elem_dep & elem->get_elem_map()))) - { + With_element *last_mutually_recursive= with_elem; + table_map with_elem_dep= with_elem->derived_dep_map; + table_map with_elem_map= with_elem->get_elem_map(); + for (With_element *elem= with_elem; + elem != NULL; + elem= elem->next_elem) + { + if (!elem->is_recursive) + continue; + + if (elem == with_elem || + ((elem->derived_dep_map & with_elem_map) && + (with_elem_dep & elem->get_elem_map()))) + { + elem->next_mutually_recursive= with_elem; + last_mutually_recursive->next_mutually_recursive= elem; + last_mutually_recursive= elem; with_elem->mutually_recursive|= elem->get_elem_map(); - elem->mutually_recursive|= with_elem_map; } + } + for (With_element *elem= with_elem->next_mutually_recursive; + elem != with_elem; + elem= elem->next_mutually_recursive) + elem->mutually_recursive= with_elem->mutually_recursive; } for (st_select_lex *sl= with_elem->spec->first_select(); diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 744f50eb7ef..8b81644e838 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -39,6 +39,7 @@ private: table_map work_dep_map; // dependency map used for work /* Dependency map of with elements mutually recursive with this with element */ table_map mutually_recursive; + With_element *next_mutually_recursive; /* Total number of references to this element in the FROM lists of the queries that are in the scope of the element (including @@ -87,17 +88,21 @@ public: select_union_recursive *rec_result; TABLE *result_table; + + TABLE *first_rec_table_to_update; With_element(LEX_STRING *name, List list, st_select_lex_unit *unit) : next_elem(NULL), base_dep_map(0), derived_dep_map(0), - sq_dep_map(0), work_dep_map(0), mutually_recursive(0), + sq_dep_map(0), work_dep_map(0), mutually_recursive(0), + next_mutually_recursive(NULL), references(0), table(NULL), query_name(name), column_list(list), spec(unit), is_recursive(false), with_anchor(false), - level(0), rec_result(NULL), result_table(NULL) + level(0), rec_result(NULL), result_table(NULL), + first_rec_table_to_update(NULL) {} bool check_dependencies_in_spec(THD *thd); @@ -146,6 +151,9 @@ public: table_map get_mutually_recursive() { return mutually_recursive; } + With_element *get_next_mutually_recursive() + { return next_mutually_recursive; } + void set_table(TABLE *tab) { table= tab; } TABLE *get_table() { return table; } @@ -166,22 +174,6 @@ public: void reset_for_exec(); - bool no_driving_recursive_is_set(); - - void set_as_driving_recursive(); - - bool is_driving_recursive(); - - void cleanup_driving_recursive(); - - void cleanup_incr_ready(); - - void set_as_incr_ready(); - - bool is_incr_ready(); - - bool all_incr_are_ready(); - void cleanup_stabilized(); void set_as_stabilized(); @@ -228,8 +220,6 @@ private: table_map unrestricted; table_map with_prepared_anchor; table_map cleaned; - table_map driving_recursive; - table_map incr_ready; table_map stabilized; public: @@ -241,7 +231,7 @@ public: embedding_with_clause(emb_with_clause), next_with_clause(NULL), dependencies_are_checked(false), unrestricted(0), with_prepared_anchor(0), cleaned(0), - driving_recursive(0), incr_ready(0), stabilized(0), + stabilized(0), with_recursive(recursive_fl) { last_next= &first_elem; } @@ -331,68 +321,11 @@ void With_element::reset_for_exec() level= 0; owner->with_prepared_anchor&= ~mutually_recursive; owner->cleaned&= ~get_elem_map(); - owner->driving_recursive&= ~get_elem_map(); - cleanup_incr_ready(); + first_rec_table_to_update= NULL; cleanup_stabilized(); } -inline -bool With_element::no_driving_recursive_is_set() -{ - return !(owner->driving_recursive & mutually_recursive); -} - - -inline -void With_element::set_as_driving_recursive() -{ - owner->driving_recursive|= get_elem_map(); -} - - -inline -bool With_element::is_driving_recursive() -{ - return owner->driving_recursive & get_elem_map(); -} - - -inline -void With_element::cleanup_driving_recursive() -{ - owner->driving_recursive&= ~mutually_recursive; -} - - -inline -void With_element::cleanup_incr_ready() -{ - owner->incr_ready&= ~mutually_recursive; -} - - -inline -void With_element::set_as_incr_ready() -{ - owner->incr_ready|= get_elem_map(); -} - - -inline -bool With_element::is_incr_ready() -{ - return owner->incr_ready & get_elem_map(); -} - - -inline -bool With_element::all_incr_are_ready() -{ - return (owner->incr_ready & mutually_recursive) == mutually_recursive; -} - - inline void With_element::cleanup_stabilized() { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index f84cdd939fe..b1f665ad70b 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -663,7 +663,6 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) if (derived->is_with_table_recursive_reference()) { unit->with_element->rec_result->rec_tables.push_back(derived->table); - derived->table->is_rec_table= true; } } DBUG_ASSERT(derived->table || res); @@ -921,6 +920,28 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived) } +bool TABLE_LIST::fill_recursive(THD *thd) +{ + bool rc= false; + st_select_lex_unit *unit= get_unit(); + if (is_with_table_recursive_reference()) + rc= unit->exec_recursive(false); + else + { + while(!with->all_are_stabilized() && !rc) + { + rc= unit->exec_recursive(true); + } + if (!rc) + { + TABLE *src= with->rec_result->table; + rc =src->insert_all_rows_into(thd, table, true); + } + } + return rc; +} + + /* Execute subquery of a materialized derived table/view and fill the result table. @@ -944,6 +965,7 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived) @return TRUE Error */ + bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) { DBUG_ENTER("mysql_derived_fill"); @@ -951,14 +973,6 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) bool derived_is_recursive= derived->is_recursive_with_table(); bool res= FALSE; - if (derived_is_recursive && derived->with->all_are_stabilized()) - { - TABLE *src= unit->with_element->rec_result->table; - TABLE *dest= derived->table; - res= src->insert_all_rows_into(thd, dest, true); - DBUG_RETURN(res); - } - if (unit->executed && !unit->uncacheable && !unit->describe && !derived_is_recursive) DBUG_RETURN(FALSE); @@ -967,11 +981,14 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) SELECT_LEX *first_select= unit->first_select(); select_union *derived_result= derived->derived_result; SELECT_LEX *save_current_select= lex->current_select; - if (unit->is_union() || derived_is_recursive) + + if (derived_is_recursive) + { + res= derived->fill_recursive(thd); + } + else if (unit->is_union()) { // execute union without clean up - if (derived_is_recursive) - unit->with_element->set_result_table(derived->table); res= unit->exec(); } else @@ -995,15 +1012,13 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) derived_result, unit, first_select); } - if (!res) + if (!res && !derived_is_recursive) { if (derived_result->flush()) res= TRUE; unit->executed= TRUE; } - if (res || - (!lex->describe && - !(unit->with_element && unit->with_element->is_recursive))) + if (res || (!lex->describe && !derived_is_recursive)) unit->cleanup(); lex->current_select= save_current_select; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 8cfb2f99fb0..785908d9750 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -704,7 +704,7 @@ public: bool prepare(THD *thd, select_result *result, ulong additional_options); bool optimize(); bool exec(); - bool exec_recursive(); + bool exec_recursive(bool is_driving_recursive); bool cleanup(); inline void unclean() { cleaned= 0; } void reinit_exec_mechanism(); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 25a509472dc..9b537a61c29 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -11606,11 +11606,7 @@ bool JOIN_TAB::preread_init() /* Materialize derived table/view. */ if ((!derived->get_unit()->executed || - (derived->is_recursive_with_table() && - (!derived->is_with_table_recursive_reference() || - (!derived->with->is_driving_recursive() && - !derived->with->is_incr_ready()) && - !derived->with->all_are_stabilized()))) && + derived->is_recursive_with_table()) && mysql_handle_single_derived(join->thd->lex, derived, DT_CREATE | DT_FILL)) return TRUE; @@ -18241,8 +18237,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) flush_dups_table->sj_weedout_delete_rows(); } - if ((!join_tab->preread_init_done || join_tab->table->is_rec_table) && - join_tab->preread_init()) + if (!join_tab->preread_init_done && join_tab->preread_init()) DBUG_RETURN(NESTED_LOOP_ERROR); join->return_tab= join_tab; @@ -19195,8 +19190,7 @@ int join_init_read_record(JOIN_TAB *tab) report_error(tab->table, error); return 1; } - if ((!tab->preread_init_done || tab->table->is_rec_table) && - tab->preread_init()) + if (!tab->preread_init_done && tab->preread_init()) return 1; if (init_read_record(&tab->read_record, tab->join->thd, tab->table, tab->select, tab->filesort_result, 1,1, FALSE)) @@ -19429,8 +19423,6 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!end_of_records) { -#if 0 -#endif if (join->table_count && join->join_tab->is_using_loose_index_scan()) { diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 14c66f6546c..4c32779f347 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -244,7 +244,6 @@ select_union_recursive::create_result_table(THD *thd_arg, if (rec_tables.push_back(rec_table)) return true; - rec_table->is_rec_table= true; return false; } @@ -918,9 +917,8 @@ bool st_select_lex_unit::exec() bool first_execution= !executed; DBUG_ENTER("st_select_lex_unit::exec"); bool was_executed= executed; - bool is_recursive= with_element && with_element->is_recursive; - if (executed && !uncacheable && !describe && !is_recursive) + if (executed && !uncacheable && !describe) DBUG_RETURN(FALSE); executed= 1; if (!(uncacheable & ~UNCACHEABLE_EXPLAIN) && item) @@ -936,12 +934,6 @@ bool st_select_lex_unit::exec() if (saved_error) DBUG_RETURN(saved_error); - if (is_recursive && !describe) - { - saved_error= exec_recursive(); - DBUG_RETURN(saved_error); - } - if (uncacheable || !item || !item->assigned() || describe) { if (!fake_select_lex && !(with_element && with_element->is_recursive)) @@ -1168,107 +1160,95 @@ err: -bool st_select_lex_unit::exec_recursive() +bool st_select_lex_unit::exec_recursive(bool is_driving_recursive) { st_select_lex *lex_select_save= thd->lex->current_select; - st_select_lex *first_recursive_sel= with_element->first_recursive; + st_select_lex *start= with_element->first_recursive; TABLE *incr_table= with_element->rec_result->incr_table; - TABLE *result_table= with_element->result_table; - ha_rows examined_rows= 0; - bool unrestricted= with_element->is_unrestricted(); - bool with_anchor= with_element->with_anchor; - uint max_level= thd->variables.max_recursion_level; + st_select_lex *end= NULL; + bool is_unrestricted= with_element->is_unrestricted(); List_iterator_fast
li(with_element->rec_result->rec_tables); + ha_rows examined_rows= 0; + bool was_executed= executed; TABLE *rec_table; DBUG_ENTER("st_select_lex_unit::exec_recursive"); - do - { - st_select_lex *first_sl; - st_select_lex *barrier; - if ((saved_error= incr_table->file->ha_delete_all_rows())) - goto err; + executed= 1; + create_explain_query_if_not_exists(thd->lex, thd->mem_root); + if (!was_executed) + save_union_explain(thd->lex->explain); - if (with_element->no_driving_recursive_is_set()) - with_element->set_as_driving_recursive(); + if ((saved_error= incr_table->file->ha_delete_all_rows())) + goto err; - if (with_element->level == 0) - { - first_sl= first_select(); - if (with_anchor) - barrier= first_recursive_sel; - else - barrier= NULL; - } - else + if (is_driving_recursive) + { + With_element *with_elem= with_element; + while ((with_elem= with_elem->get_next_mutually_recursive()) != + with_element) { - first_sl= first_recursive_sel; - barrier= NULL; + rec_table= with_elem->first_rec_table_to_update; + if (rec_table) + rec_table->reginfo.join_tab->preread_init_done= false; } + } - if (with_element->all_incr_are_ready()) - with_element->cleanup_incr_ready(); + if (with_element->level == 0) + { + start= first_select(); + if (with_element->with_anchor) + end= with_element->first_recursive; + } - for (st_select_lex *sl= first_sl ; sl != barrier; sl= sl->next_select()) + for (st_select_lex *sl= start ; sl != end; sl= sl->next_select()) + { + thd->lex->current_select= sl; + sl->join->exec(); + saved_error= sl->join->error; + if (!saved_error) { - thd->lex->current_select= sl; - sl->join->exec(); - saved_error= sl->join->error; - if (!saved_error) - { - examined_rows+= thd->get_examined_row_count(); - thd->set_examined_row_count(0); - if (union_result->flush()) - { - thd->lex->current_select= lex_select_save; - DBUG_RETURN(1); - } - } - if (saved_error) - { - thd->lex->current_select= lex_select_save; - goto err; - } + examined_rows+= thd->get_examined_row_count(); + thd->set_examined_row_count(0); + if (union_result->flush()) + { + thd->lex->current_select= lex_select_save; + DBUG_RETURN(1); + } } - - with_element->set_as_incr_ready(); - - incr_table->file->info(HA_STATUS_VARIABLE); - if (incr_table->file->stats.records == 0 || - with_element->level + 1 == max_level) - with_element->set_as_stabilized(); - else - with_element->level++; - - li.rewind(); - while ((rec_table= li++)) + if (saved_error) { - if ((saved_error= incr_table->insert_all_rows_into(thd, rec_table, - !unrestricted))) - goto err; + thd->lex->current_select= lex_select_save; + goto err; + } - - if (!with_element->is_driving_recursive()) - break; + } - } while (!with_element->all_are_stabilized()); + thd->inc_examined_row_count(examined_rows); - if (with_element->is_driving_recursive()) + incr_table->file->info(HA_STATUS_VARIABLE); + if (incr_table->file->stats.records == 0) + with_element->set_as_stabilized(); + else + with_element->level++; + + while ((rec_table= li++)) { - TABLE *table= with_element->rec_result->table; - if ((saved_error= table->insert_all_rows_into(thd, - result_table, - true))) - goto err; - with_element->cleanup_driving_recursive(); + saved_error= + incr_table->insert_all_rows_into(thd, rec_table, !is_unrestricted); + if (!with_element->first_rec_table_to_update) + with_element->first_rec_table_to_update= rec_table; + if (with_element->level == 1) + rec_table->reginfo.join_tab->preread_init_done= true; } + + if (with_element->level == thd->variables.max_recursion_level) + with_element->set_as_stabilized(); thd->lex->current_select= lex_select_save; err: thd->lex->set_limit_rows_examined(); - DBUG_RETURN(saved_error); - + DBUG_RETURN(saved_error); } diff --git a/sql/table.h b/sql/table.h index a8d01a64599..143bf17f4d4 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1244,7 +1244,6 @@ public: bool alias_name_used; /* true if table_name is alias */ bool get_fields_in_item_tree; /* Signal to fix_field */ bool m_needs_reopen; - bool is_rec_table; private: bool created; /* For tmp tables. TRUE <=> tmp table was actually created.*/ public: @@ -2234,6 +2233,7 @@ struct TABLE_LIST bool is_with_table(); bool is_recursive_with_table(); bool is_with_table_recursive_reference(); + bool fill_recursive(THD *thd); inline void set_view() { -- cgit v1.2.1 From 3fd214c8be7c2340ebe06f4c887c67f5c928e5f0 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 29 Jun 2016 16:50:53 -0400 Subject: MDEV-9423: cannot add new node to the cluser: Binlog.. .. file '/var/log/mysql/mariadb-bin.000001' not found in binlog index, needed for recovery. Aborting. In Galera cluster, while preparing for rsync/xtrabackup based SST, the donor node takes an FTWRL followed by (REFRESH_ENGINE_LOG in rsync based state transfer and) REFRESH_BINARY_LOG. The latter rotates the binary log and logs Binlog_checkpoint_log_event corresponding to the penultimate binary log file into the new file. The checkpoint event for the current file is later logged synchronously by binlog_background_thread. Now, since in rsync/xtrabackup based snapshot state transfer methods, only the last binary log file is transferred to the joiner node; the file could get transferred even before the checkpoint event for the same file gets written to it. As a result, the joiner node would fail to start complaining about the missing binlog file needed for recovery. In order to fix this, a mechanism has been put in place to make REFRESH_BINARY_LOG operation wait for Binlog_checkpoint_log_event to be logged for the current binary log file if the node is part of a Galera cluster. As further safety, during rsync based state transfer the donor node now acquires and owns LOCK_log for the duration of file transfer during SST. --- sql/log.cc | 29 ++++++++++++++++++++++++++--- sql/log.h | 1 + sql/sql_reload.cc | 6 ++++++ sql/wsrep_sst.cc | 20 ++++++++++++++++++++ 4 files changed, 53 insertions(+), 3 deletions(-) diff --git a/sql/log.cc b/sql/log.cc index bf8695e06da..7efec982de7 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3690,7 +3690,10 @@ bool MYSQL_BIN_LOG::open(const char *log_name, new_xid_list_entry->binlog_id= current_binlog_id; /* Remove any initial entries with no pending XIDs. */ while ((b= binlog_xid_count_list.head()) && b->xid_count == 0) + { my_free(binlog_xid_count_list.get()); + } + mysql_cond_broadcast(&COND_xid_list); binlog_xid_count_list.push_back(new_xid_list_entry); mysql_mutex_unlock(&LOCK_xid_list); @@ -4227,6 +4230,7 @@ err: DBUG_ASSERT(b->xid_count == 0); my_free(binlog_xid_count_list.get()); } + mysql_cond_broadcast(&COND_xid_list); reset_master_pending--; mysql_mutex_unlock(&LOCK_xid_list); } @@ -4237,6 +4241,26 @@ err: } +void MYSQL_BIN_LOG::wait_for_last_checkpoint_event() +{ + mysql_mutex_lock(&LOCK_xid_list); + for (;;) + { + if (binlog_xid_count_list.is_last(binlog_xid_count_list.head())) + break; + mysql_cond_wait(&COND_xid_list, &LOCK_xid_list); + } + mysql_mutex_unlock(&LOCK_xid_list); + + /* + LOCK_xid_list and LOCK_log are chained, so the LOCK_log will only be + obtained after mark_xid_done() has written the last checkpoint event. + */ + mysql_mutex_lock(&LOCK_log); + mysql_mutex_unlock(&LOCK_log); +} + + /** Delete relay log files prior to rli->group_relay_log_name (i.e. all logs which are not involved in a non-finished group @@ -9394,7 +9418,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint) */ if (unlikely(reset_master_pending)) { - mysql_cond_signal(&COND_xid_list); + mysql_cond_broadcast(&COND_xid_list); mysql_mutex_unlock(&LOCK_xid_list); DBUG_VOID_RETURN; } @@ -9432,8 +9456,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint) mysql_mutex_lock(&LOCK_log); mysql_mutex_lock(&LOCK_xid_list); --mark_xid_done_waiting; - if (unlikely(reset_master_pending)) - mysql_cond_signal(&COND_xid_list); + mysql_cond_broadcast(&COND_xid_list); /* We need to reload current_binlog_id due to release/re-take of lock. */ current= current_binlog_id; diff --git a/sql/log.h b/sql/log.h index cdb2b9ce4b7..bf076fae31d 100644 --- a/sql/log.h +++ b/sql/log.h @@ -788,6 +788,7 @@ public: bool reset_logs(THD* thd, bool create_new_log, rpl_gtid *init_state, uint32 init_state_len, ulong next_log_number); + void wait_for_last_checkpoint_event(); void close(uint exiting); void clear_inuse_flag_when_closing(File file); diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index b99ddf45333..e361ed8b6e6 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -155,6 +155,12 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, { if (mysql_bin_log.rotate_and_purge(true)) *write_to_binlog= -1; + + if (WSREP_ON) + { + /* Wait for last binlog checkpoint event to be logged. */ + mysql_bin_log.wait_for_last_checkpoint_event(); + } } } if (options & REFRESH_RELAY_LOG) diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index c2a1ab58660..be10d2762a0 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -1120,6 +1120,16 @@ wait_signal: if (!err) { sst_disallow_writes (thd.ptr, true); + /* + Lets also keep statements that modify binary logs (like RESET LOGS, + RESET MASTER) from proceeding until the files have been transferred + to the joiner node. + */ + if (mysql_bin_log.is_open()) + { + mysql_mutex_lock(mysql_bin_log.get_log_lock()); + } + locked= true; goto wait_signal; } @@ -1128,6 +1138,11 @@ wait_signal: { if (locked) { + if (mysql_bin_log.is_open()) + { + mysql_mutex_assert_owner(mysql_bin_log.get_log_lock()); + mysql_mutex_unlock(mysql_bin_log.get_log_lock()); + } sst_disallow_writes (thd.ptr, false); thd.ptr->global_read_lock.unlock_global_read_lock (thd.ptr); locked= false; @@ -1160,6 +1175,11 @@ wait_signal: if (locked) // don't forget to unlock server before return { + if (mysql_bin_log.is_open()) + { + mysql_mutex_assert_owner(mysql_bin_log.get_log_lock()); + mysql_mutex_unlock(mysql_bin_log.get_log_lock()); + } sst_disallow_writes (thd.ptr, false); thd.ptr->global_read_lock.unlock_global_read_lock (thd.ptr); } -- cgit v1.2.1 From 22c37c1fcf39cae7387248616d072f272e851cd3 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Wed, 29 Jun 2016 15:20:24 -0700 Subject: Adjusted test results. --- mysql-test/r/mysqld--help,win.rdiff | 6 ++--- mysql-test/suite/funcs_1/r/myisam_views-big.result | 2 +- .../sys_vars/r/sysvars_server_embedded.result | 28 ++++++++++++++++++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/mysqld--help,win.rdiff b/mysql-test/r/mysqld--help,win.rdiff index a3bf5c55ee8..4de251832e3 100644 --- a/mysql-test/r/mysqld--help,win.rdiff +++ b/mysql-test/r/mysqld--help,win.rdiff @@ -42,9 +42,9 @@ --stack-trace Print a symbolic stack trace on failure (Defaults to on; use --skip-stack-trace to disable.) + --standalone Dummy option to start as a standalone program (NT). - --stored-program-cache=# - The soft upper limit for number of cached stored routines - for one connection. + --standards-compliant-cte + Allow only standards compiant CTE + (Defaults to on; use --skip-standards-compliant-cte to disable.) @@ -1070,25 +1078,11 @@ --thread-cache-size=# How many threads we should keep in a cache for reuse. diff --git a/mysql-test/suite/funcs_1/r/myisam_views-big.result b/mysql-test/suite/funcs_1/r/myisam_views-big.result index 0e0bede7257..49f3c8fb133 100644 --- a/mysql-test/suite/funcs_1/r/myisam_views-big.result +++ b/mysql-test/suite/funcs_1/r/myisam_views-big.result @@ -4000,7 +4000,7 @@ DROP VIEW IF EXISTS v2 ; CREATE TABLE t1 (f1 BIGINT) ; SET @x=0; CREATE or REPLACE VIEW v1 AS Select 1 INTO @x; -ERROR HY000: View's SELECT contains a 'INTO' clause +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INTO @x' at line 1 Select @x; @x 0 diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index 6dca5206946..69237f42208 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -1983,6 +1983,20 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME MAX_RECURSION_LEVEL +SESSION_VALUE 4294967295 +GLOBAL_VALUE 4294967295 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 4294967295 +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Maximum number of iterations when executing recursive queries +NUMERIC_MIN_VALUE 0 +NUMERIC_MAX_VALUE 4294967295 +NUMERIC_BLOCK_SIZE 1 +ENUM_VALUE_LIST NULL +READ_ONLY NO ++COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME MAX_SEEKS_FOR_KEY SESSION_VALUE 4294967295 GLOBAL_VALUE 4294967295 @@ -3733,6 +3747,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT NULL +VARIABLE_NAME STANDARDS_COMPLIANT_CTE +SESSION_VALUE ON +GLOBAL_VALUE ON +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE ON +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Allow only standards compiant CTE +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME STORAGE_ENGINE SESSION_VALUE MyISAM GLOBAL_VALUE MyISAM -- cgit v1.2.1 From a7814d44fc50ecb270bf9816de7b019a71405e46 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Thu, 30 Jun 2016 12:59:52 +0400 Subject: MDEV-10311 - funcs_1.processlist_priv_no_prot fails sporadically State column of SHOW PROCESSLIST can have NULL values for being initialized threads (between new connection was acknowledged and waiting for network data). Fixed test case to handle such cases by waiting for State to become empty string. --- mysql-test/suite/funcs_1/datadict/processlist_priv.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/funcs_1/datadict/processlist_priv.inc b/mysql-test/suite/funcs_1/datadict/processlist_priv.inc index b863b98d98a..38b9a3e309e 100644 --- a/mysql-test/suite/funcs_1/datadict/processlist_priv.inc +++ b/mysql-test/suite/funcs_1/datadict/processlist_priv.inc @@ -153,7 +153,7 @@ connection default; let $wait_timeout= 10; let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST -WHERE DB = 'information_schema' AND COMMAND = 'Sleep' AND USER = 'ddicttestuser1'; +WHERE DB = 'information_schema' AND COMMAND = 'Sleep' AND USER = 'ddicttestuser1' AND state=''; --source include/wait_condition.inc --replace_result ENGINE=MyISAM "" ENGINE=Aria "" " PAGE_CHECKSUM=1" "" " PAGE_CHECKSUM=0" "" eval SHOW CREATE TABLE $table; -- cgit v1.2.1 From 10880d67b95d29dc1a764f2ee5c2010dc89659df Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Thu, 30 Jun 2016 10:24:54 -0400 Subject: Postfix: memory leak in XtraDB --- storage/xtradb/srv/srv0srv.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 73fc93f9597..4377801f117 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -1209,13 +1209,14 @@ srv_free(void) os_event_free(srv_buf_dump_event); os_event_free(srv_checkpoint_completed_event); os_event_free(srv_redo_log_tracked_event); -#ifdef WITH_INNODB_DISALLOW_WRITES - os_event_free(srv_allow_writes_event); -#endif /* WITH_INNODB_DISALLOW_WRITES */ mutex_free(&srv_sys->mutex); mutex_free(&srv_sys->tasks_mutex); } +#ifdef WITH_INNODB_DISALLOW_WRITES + os_event_free(srv_allow_writes_event); +#endif /* WITH_INNODB_DISALLOW_WRITES */ + #ifndef HAVE_ATOMIC_BUILTINS mutex_free(&server_mutex); #endif -- cgit v1.2.1 From 8c6a9aa30f9e74388aaf923ac8e3b19ca0f86188 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Thu, 30 Jun 2016 15:13:12 -0700 Subject: Added a proper check for acceptable mutually recursive CTE. --- mysql-test/r/cte_recursive.result | 17 ++++- .../sys_vars/r/sysvars_server_embedded.result | 2 +- mysql-test/t/cte_recursive.test | 19 +++++- sql/share/errmsg-utf8.txt | 2 + sql/sql_cte.cc | 73 ++++++++++++---------- 5 files changed, 77 insertions(+), 36 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index 2d8fac6269d..22faade0b9f 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -4,6 +4,21 @@ insert into t1 values insert into t1 values (3,'eee'), (7,'bb'), (1,'fff'), (4,'ggg'); with recursive +t as +( +select * from t1 where t1.b >= 'c' + union +select * from r +), +r as +( +select * from t +union +select t1.* from t1,r where r.a+1 = t1.a +) +select * from r; +ERROR HY000: Unacceptable mutual recursion with anchored table 't' +with recursive a1(a,b) as (select * from t1 where t1.a>3 union @@ -19,7 +34,7 @@ c1(a,b) as union select * from b1 where b1.b > 'auu') select * from c1; -ERROR HY000: No anchors for recursive WITH element 'b1' +ERROR HY000: Unacceptable mutual recursion with anchored table 'a1' drop table t1; # WITH RECURSIVE vs just WITH create table t1 (a int); diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index 69237f42208..9fd1e249e64 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -1996,7 +1996,7 @@ NUMERIC_MAX_VALUE 4294967295 NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO -+COMMAND_LINE_ARGUMENT OPTIONAL +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME MAX_SEEKS_FOR_KEY SESSION_VALUE 4294967295 GLOBAL_VALUE 4294967295 diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 911c381d46a..8f85c7b0480 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -4,7 +4,24 @@ insert into t1 values insert into t1 values (3,'eee'), (7,'bb'), (1,'fff'), (4,'ggg'); ---ERROR ER_RECURSIVE_WITHOUT_ANCHORS +--ERROR ER_UNACCEPTABLE_MUTUAL_RECURSION +with recursive +t as +( + select * from t1 where t1.b >= 'c' + union + select * from r +), +r as +( + select * from t + union + select t1.* from t1,r where r.a+1 = t1.a +) +select * from r; + + +--ERROR ER_UNACCEPTABLE_MUTUAL_RECURSION with recursive a1(a,b) as (select * from t1 where t1.a>3 diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index f38182d10c0..6c921789eca 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7154,6 +7154,8 @@ ER_DUP_QUERY_NAME eng "Duplicate query name in WITH clause" ER_RECURSIVE_WITHOUT_ANCHORS eng "No anchors for recursive WITH element '%s'" +ER_UNACCEPTABLE_MUTUAL_RECURSION + eng "Unacceptable mutual recursion with anchored table '%s'" ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED eng "Reference to recursive WITH table '%s' in materiazed derived" ER_NOT_STANDARDS_COMPLIANT_RECURSIVE diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 2b3a72c1b3a..3c663d7d260 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -292,43 +292,50 @@ bool With_clause::check_anchors() with_elem != NULL; with_elem= with_elem->next_elem) { - if (!with_elem->is_recursive || with_elem->with_anchor) + if (!with_elem->is_recursive) continue; - - table_map anchored= 0; - for (With_element *elem= with_elem; - elem != NULL; - elem= elem->next_elem) - { - if (elem->mutually_recursive && elem->with_anchor) - anchored |= elem->get_elem_map(); - } - table_map non_anchored= with_elem->mutually_recursive & ~anchored; - with_elem->work_dep_map= non_anchored & with_elem->base_dep_map; - } - - /*Building transitive clousure on work_dep_map*/ - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) - { - table_map with_elem_map= with_elem->get_elem_map(); - for (With_element *elem= first_elem; elem != NULL; elem= elem->next_elem) + + if (!with_elem->with_anchor) { - if (elem->work_dep_map & with_elem_map) - elem->work_dep_map|= with_elem->work_dep_map; + With_element *elem= with_elem; + while ((elem= elem->get_next_mutually_recursive()) != with_elem) + { + if (elem->with_anchor) + break; + } + if (elem == with_elem) + { + my_error(ER_RECURSIVE_WITHOUT_ANCHORS, MYF(0), + with_elem->query_name->str); + return true; + } } - } - - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) - { - if (with_elem->work_dep_map & with_elem->get_elem_map()) + else { - my_error(ER_RECURSIVE_WITHOUT_ANCHORS, MYF(0), - with_elem->query_name->str); - return true; + With_element *elem= with_elem; + while ((elem= elem->get_next_mutually_recursive()) != with_elem) + elem->work_dep_map= elem->base_dep_map & elem->mutually_recursive; + elem= with_elem; + while ((elem= elem->get_next_mutually_recursive()) != with_elem) + { + table_map elem_map= elem->get_elem_map(); + With_element *el= with_elem; + while ((el= el->get_next_mutually_recursive()) != with_elem) + { + if (el->work_dep_map & elem_map) + el->work_dep_map|= elem->work_dep_map; + } + } + elem= with_elem; + while ((elem= elem->get_next_mutually_recursive()) != with_elem) + { + if (elem->work_dep_map & elem->get_elem_map()) + { + my_error(ER_UNACCEPTABLE_MUTUAL_RECURSION, MYF(0), + with_elem->query_name->str); + return true; + } + } } } -- cgit v1.2.1 From 37b08eff90e6af3a63a7cad43ec2c79ce75d7116 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 1 Jul 2016 17:10:46 +1000 Subject: Cross Compile HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE change to compile check HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE only needed a compile check rather than a RUN check so after changing to a compile check there is one less variable to manually set while cross compiling. --- configure.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.cmake b/configure.cmake index 5b8bc3688c7..896226de954 100644 --- a/configure.cmake +++ b/configure.cmake @@ -1107,7 +1107,7 @@ CHECK_STRUCT_HAS_MEMBER("struct timespec" tv_sec "time.h" STRUCT_TIMESPEC_HAS_TV CHECK_STRUCT_HAS_MEMBER("struct timespec" tv_nsec "time.h" STRUCT_TIMESPEC_HAS_TV_NSEC) IF(NOT MSVC) - CHECK_C_SOURCE_RUNS( + CHECK_C_SOURCE_COMPILES( " #define _GNU_SOURCE #include -- cgit v1.2.1 From ccdd63388a789f407d502848d12c618593a5ded1 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Fri, 1 Jul 2016 11:30:38 -0400 Subject: bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 9032212ff31..ac291a97eb1 100644 --- a/VERSION +++ b/VERSION @@ -1,3 +1,3 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=1 -MYSQL_VERSION_PATCH=15 +MYSQL_VERSION_PATCH=16 -- cgit v1.2.1 From f832b47833bd03fbb9e972508097f3f88c0ba184 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Sun, 3 Jul 2016 10:41:16 +0400 Subject: Removing the "thd" argument from Item::create_field_for_create_select(). "thd" is available through the "table" argument, as table->in_use. Backporting (partially) from 10.2. --- sql/item.h | 2 +- sql/item_func.h | 4 ++-- sql/sql_insert.cc | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sql/item.h b/sql/item.h index 93da3985d4c..8650eb9382c 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1023,7 +1023,7 @@ public: virtual Field *get_tmp_table_field() { return 0; } /* This is also used to create fields in CREATE ... SELECT: */ virtual Field *tmp_table_field(TABLE *t_arg) { return 0; } - virtual Field *create_field_for_create_select(THD *thd, TABLE *table); + virtual Field *create_field_for_create_select(TABLE *table); virtual Field *create_field_for_schema(THD *thd, TABLE *table); virtual const char *full_name() const { return name ? name : "???"; } const char *field_name_or_null() diff --git a/sql/item_func.h b/sql/item_func.h index 6a1494040c9..a6d0374d491 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -175,7 +175,7 @@ public: friend class udf_handler; Field *tmp_table_field() { return result_field; } Field *tmp_table_field(TABLE *t_arg); - Field *create_field_for_create_select(THD *thd, TABLE *table) + Field *create_field_for_create_select(TABLE *table) { return result_type() != STRING_RESULT ? tmp_table_field(table) : @@ -1762,7 +1762,7 @@ public: bool update(); bool fix_fields(THD *thd, Item **ref); void fix_length_and_dec(); - Field *create_field_for_create_select(THD *thd, TABLE *table) + Field *create_field_for_create_select(TABLE *table) { return result_type() != STRING_RESULT ? tmp_table_field(table) : diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index fcf8c143ec4..3efdd2535c2 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3924,10 +3924,10 @@ void select_insert::abort_result_set() { CREATE TABLE (SELECT) ... ***************************************************************************/ -Field *Item::create_field_for_create_select(THD *thd, TABLE *table) +Field *Item::create_field_for_create_select(TABLE *table) { Field *def_field, *tmp_field; - return create_tmp_field(thd, table, this, type(), + return create_tmp_field(table->in_use, table, this, type(), (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0); } @@ -4002,7 +4002,7 @@ static TABLE *create_table_from_items(THD *thd, while ((item=it++)) { - Field *tmp_field= item->create_field_for_create_select(thd, &tmp_table); + Field *tmp_field= item->create_field_for_create_select(&tmp_table); if (!tmp_field) DBUG_RETURN(NULL); -- cgit v1.2.1 From 3ccf8218bc03a9cc598cd2da5c5a98ea2412cc05 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Sun, 3 Jul 2016 11:20:46 +0400 Subject: Partial backporting of 7b50447aa6d051b8d14bb01ef14802cb8ffee223 (MDEV-9407, MDEV-9408) from 10.1 Needed to fix MDEV-10317 easier. --- sql/item.h | 17 +++++--- sql/item_cmpfunc.cc | 5 --- sql/item_cmpfunc.h | 4 +- sql/item_func.cc | 51 +----------------------- sql/item_func.h | 14 ++++--- sql/item_geofunc.cc | 2 +- sql/item_geofunc.h | 2 +- sql/item_sum.h | 3 +- sql/item_timefunc.h | 2 +- sql/sql_insert.cc | 4 +- sql/sql_select.cc | 110 +++++++++++++++++++++++++++------------------------- 11 files changed, 87 insertions(+), 127 deletions(-) diff --git a/sql/item.h b/sql/item.h index 8650eb9382c..1905e0f1216 100644 --- a/sql/item.h +++ b/sql/item.h @@ -657,6 +657,8 @@ protected: SEL_TREE *get_mm_tree_for_const(RANGE_OPT_PARAM *param); + Field *create_tmp_field(bool group, TABLE *table, uint convert_int_length); + public: /* Cache val_str() into the own buffer, e.g. to evaluate constant @@ -1021,8 +1023,6 @@ public: int save_str_value_in_field(Field *field, String *result); virtual Field *get_tmp_table_field() { return 0; } - /* This is also used to create fields in CREATE ... SELECT: */ - virtual Field *tmp_table_field(TABLE *t_arg) { return 0; } virtual Field *create_field_for_create_select(TABLE *table); virtual Field *create_field_for_schema(THD *thd, TABLE *table); virtual const char *full_name() const { return name ? name : "???"; } @@ -1630,6 +1630,15 @@ public: // used in row subselects to get value of elements virtual void bring_value() {} + virtual Field *create_tmp_field(bool group, TABLE *table) + { + /* + Values with MY_INT32_NUM_DECIMAL_DIGITS digits may or may not fit into + Field_long : make them Field_longlong. + */ + return create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS - 2); + } + Field *tmp_table_field_from_field_type(TABLE *table, bool fixed_length, bool set_blob_packlength); @@ -2224,7 +2233,6 @@ public: {} ~Item_result_field() {} /* Required with gcc 2.95 */ Field *get_tmp_table_field() { return result_field; } - Field *tmp_table_field(TABLE *t_arg) { return result_field; } /* This implementation of used_tables() used by Item_avg_field and Item_variance_field which work when only temporary table left, so theu @@ -3397,8 +3405,6 @@ public: { return val_real_from_date(); } my_decimal *val_decimal(my_decimal *decimal_value) { return val_decimal_from_date(decimal_value); } - Field *tmp_table_field(TABLE *table) - { return tmp_table_field_from_field_type(table, false, false); } int save_in_field(Field *field, bool no_conversions) { return save_date_in_field(field); } }; @@ -3903,7 +3909,6 @@ public: enum_field_types field_type() const { return (*ref)->field_type(); } Field *get_tmp_table_field() { return result_field ? result_field : (*ref)->get_tmp_table_field(); } - Field *tmp_table_field(TABLE *t_arg) { return 0; } Item *get_tmp_table_item(THD *thd); table_map used_tables() const; void update_used_tables(); diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index f3196c55873..bd1e8b72157 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -2263,11 +2263,6 @@ uint Item_func_case_abbreviation2::decimal_precision2(Item **args) const } -Field *Item_func_ifnull::tmp_table_field(TABLE *table) -{ - return tmp_table_field_from_field_type(table, false, false); -} - double Item_func_ifnull::real_op() { diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 2f66382941d..2e066b895e9 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -951,7 +951,9 @@ public: maybe_null= args[1]->maybe_null; } const char *func_name() const { return "ifnull"; } - Field *tmp_table_field(TABLE *table); + Field *create_field_for_create_select(TABLE *table) + { return tmp_table_field_from_field_type(table, false, false); } + table_map not_null_tables() const { return 0; } uint decimal_precision() const { diff --git a/sql/item_func.cc b/sql/item_func.cc index 6edb276ca20..0d21183cac8 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -509,43 +509,6 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const } -Field *Item_func::tmp_table_field(TABLE *table) -{ - Field *field= NULL; - MEM_ROOT *mem_root= table->in_use->mem_root; - - switch (result_type()) { - case INT_RESULT: - if (max_char_length() > MY_INT32_NUM_DECIMAL_DIGITS) - field= new (mem_root) - Field_longlong(max_char_length(), maybe_null, name, - unsigned_flag); - else - field= new (mem_root) - Field_long(max_char_length(), maybe_null, name, - unsigned_flag); - break; - case REAL_RESULT: - field= new (mem_root) - Field_double(max_char_length(), maybe_null, name, decimals); - break; - case STRING_RESULT: - return make_string_field(table); - case DECIMAL_RESULT: - field= Field_new_decimal::create_from_item(mem_root, this); - break; - case ROW_RESULT: - case TIME_RESULT: - // This case should never be chosen - DBUG_ASSERT(0); - field= 0; - break; - } - if (field) - field->init(table); - return field; -} - /* bool Item_func::is_expensive_processor(uchar *arg) { @@ -2910,10 +2873,10 @@ void Item_func_min_max::fix_length_and_dec() collation.set_numeric(); fix_char_length(float_length(decimals)); /* - Set type to DOUBLE, as Item_func::tmp_table_field() does not + Set type to DOUBLE, as Item_func::create_tmp_field() does not distinguish between DOUBLE and FLOAT and always creates Field_double. Perhaps we should eventually change this to use agg_field_type() here, - and fix Item_func::tmp_table_field() to create Field_float when possible. + and fix Item_func::create_tmp_field() to create Field_float when possible. */ set_handler_by_field_type(MYSQL_TYPE_DOUBLE); break; @@ -6805,16 +6768,6 @@ longlong Item_func_found_rows::val_int() } -Field * -Item_func_sp::tmp_table_field(TABLE *t_arg) -{ - DBUG_ENTER("Item_func_sp::tmp_table_field"); - - DBUG_ASSERT(sp_result_field); - DBUG_RETURN(sp_result_field); -} - - /** @brief Checks if requested access to function can be granted to user. If function isn't found yet, it searches function first. diff --git a/sql/item_func.h b/sql/item_func.h index a6d0374d491..47af2a3f898 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -173,12 +173,10 @@ public: } void signal_divide_by_null(); friend class udf_handler; - Field *tmp_table_field() { return result_field; } - Field *tmp_table_field(TABLE *t_arg); Field *create_field_for_create_select(TABLE *table) { return result_type() != STRING_RESULT ? - tmp_table_field(table) : + create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS) : tmp_table_field_from_field_type(table, false, false); } Item *get_tmp_table_item(THD *thd); @@ -1765,7 +1763,7 @@ public: Field *create_field_for_create_select(TABLE *table) { return result_type() != STRING_RESULT ? - tmp_table_field(table) : + create_tmp_field(false, table, MY_INT32_NUM_DECIMAL_DIGITS) : tmp_table_field_from_field_type(table, false, true); } table_map used_tables() const @@ -2106,8 +2104,12 @@ public: enum enum_field_types field_type() const; - Field *tmp_table_field(TABLE *t_arg); - + Field *create_field_for_create_select(TABLE *table) + { + return result_type() != STRING_RESULT ? + sp_result_field : + tmp_table_field_from_field_type(table, false, false); + } void make_field(Send_field *tmp_field); Item_result result_type() const; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 76e4a85e865..c856aa985b3 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -40,7 +40,7 @@ #include "opt_range.h" -Field *Item_geometry_func::tmp_table_field(TABLE *t_arg) +Field *Item_geometry_func::create_field_for_create_select(TABLE *t_arg) { Field *result; if ((result= new Field_geom(max_length, maybe_null, name, t_arg->s, diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 6b991a1b643..121e122d939 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -40,7 +40,7 @@ public: Item_geometry_func(THD *thd, List &list): Item_str_func(thd, list) {} void fix_length_and_dec(); enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } - Field *tmp_table_field(TABLE *t_arg); + Field *create_field_for_create_select(TABLE *table); }; class Item_func_geometry_from_text: public Item_geometry_func diff --git a/sql/item_sum.h b/sql/item_sum.h index 11d2f802af7..8568eaae907 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -481,7 +481,7 @@ public: } virtual void make_unique() { force_copy_fields= TRUE; } Item *get_tmp_table_item(THD *thd); - virtual Field *create_tmp_field(bool group, TABLE *table); + Field *create_tmp_field(bool group, TABLE *table); virtual bool collect_outer_ref_processor(uchar *param); bool init_sum_func_check(THD *thd); bool check_sum_func(THD *thd, Item **ref); @@ -1084,7 +1084,6 @@ public: fixed= true; } table_map used_tables() const { return (table_map) 1L; } - Field *tmp_table_field(TABLE *) { DBUG_ASSERT(0); return NULL; } void set_result_field(Field *) { DBUG_ASSERT(0); } void save_in_result_field(bool no_conversions) { DBUG_ASSERT(0); } }; diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index bb840987089..2c1a0943699 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -500,7 +500,7 @@ public: bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date) { DBUG_ASSERT(0); return 1; } my_decimal *val_decimal(my_decimal *decimal_value) { return val_decimal_from_date(decimal_value); } - Field *tmp_table_field(TABLE *table) + Field *create_field_for_create_select(TABLE *table) { return tmp_table_field_from_field_type(table, false, false); } int save_in_field(Field *field, bool no_conversions) { return save_date_in_field(field); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 3efdd2535c2..b97bae6b23c 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3927,8 +3927,8 @@ void select_insert::abort_result_set() { Field *Item::create_field_for_create_select(TABLE *table) { Field *def_field, *tmp_field; - return create_tmp_field(table->in_use, table, this, type(), - (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0); + return ::create_tmp_field(table->in_use, table, this, type(), + (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 86a7309315a..387e0403f96 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -15837,6 +15837,60 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field, return new_field; } + +Field *Item::create_tmp_field(bool group, TABLE *table, uint convert_int_length) +{ + Field *UNINIT_VAR(new_field); + MEM_ROOT *mem_root= table->in_use->mem_root; + + switch (cmp_type()) { + case REAL_RESULT: + new_field= new (mem_root) + Field_double(max_length, maybe_null, name, decimals, TRUE); + break; + case INT_RESULT: + /* + Select an integer type with the minimal fit precision. + convert_int_length is sign inclusive, don't consider the sign. + */ + if (max_char_length() > convert_int_length) + new_field= new (mem_root) + Field_longlong(max_char_length(), maybe_null, name, unsigned_flag); + else + new_field= new (mem_root) + Field_long(max_char_length(), maybe_null, name, unsigned_flag); + break; + case TIME_RESULT: + new_field= tmp_table_field_from_field_type(table, true, false); + break; + case STRING_RESULT: + DBUG_ASSERT(collation.collation); + /* + GEOMETRY fields have STRING_RESULT result type. + To preserve type they needed to be handled separately. + */ + if (field_type() == MYSQL_TYPE_GEOMETRY) + new_field= tmp_table_field_from_field_type(table, true, false); + else + new_field= make_string_field(table); + new_field->set_derivation(collation.derivation, collation.repertoire); + break; + case DECIMAL_RESULT: + new_field= Field_new_decimal::create_from_item(mem_root, this); + break; + case ROW_RESULT: + // This case should never be choosen + DBUG_ASSERT(0); + new_field= 0; + break; + } + if (new_field) + new_field->init(table); + return new_field; +} + + + /** Create field for temporary table using type of given item. @@ -15862,58 +15916,9 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field, static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, Item ***copy_func, bool modify_item) { - bool maybe_null= item->maybe_null; Field *UNINIT_VAR(new_field); - MEM_ROOT *mem_root= thd->mem_root; - - /* - To preserve type or DATE/TIME and GEOMETRY fields, - they need to be handled separately. - */ - if (item->cmp_type() == TIME_RESULT || - item->field_type() == MYSQL_TYPE_GEOMETRY) - new_field= item->tmp_table_field_from_field_type(table, true, false); - else - switch (item->result_type()) { - case REAL_RESULT: - new_field= new (mem_root) - Field_double(item->max_length, maybe_null, - item->name, item->decimals, TRUE); - break; - case INT_RESULT: - /* - Select an integer type with the minimal fit precision. - MY_INT32_NUM_DECIMAL_DIGITS is sign inclusive, don't consider the sign. - Values with MY_INT32_NUM_DECIMAL_DIGITS digits may or may not fit into - Field_long : make them Field_longlong. - */ - if (item->max_length >= (MY_INT32_NUM_DECIMAL_DIGITS - 1)) - new_field=new (mem_root) - Field_longlong(item->max_length, maybe_null, - item->name, item->unsigned_flag); - else - new_field=new (mem_root) - Field_long(item->max_length, maybe_null, item->name, - item->unsigned_flag); - break; - case STRING_RESULT: - DBUG_ASSERT(item->collation.collation); - new_field= item->make_string_field(table); - new_field->set_derivation(item->collation.derivation, - item->collation.repertoire); - break; - case DECIMAL_RESULT: - new_field= Field_new_decimal::create_from_item(mem_root, item); - break; - case ROW_RESULT: - default: - // This case should never be choosen - DBUG_ASSERT(0); - new_field= 0; - break; - } - if (new_field) - new_field->init(table); + DBUG_ASSERT(thd == table->in_use); + new_field= item->Item::create_tmp_field(false, table); if (copy_func && item->real_item()->is_result_field()) *((*copy_func)++) = item; // Save for copy_funcs @@ -16005,8 +16010,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, switch (type) { case Item::SUM_FUNC_ITEM: { - Item_sum *item_sum=(Item_sum*) item; - result= item_sum->create_tmp_field(group, table); + result= item->create_tmp_field(group, table); if (!result) my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR)); return result; -- cgit v1.2.1 From 1ec91803aca76c999d34d9f17938b976093cb67a Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Sun, 3 Jul 2016 13:52:06 +0400 Subject: MDEV-10317 EXCTACT(MINUTE_MICROSECOND) truncates data --- mysql-test/r/func_time.result | 329 ++++++++++++++++++++++++++++++++++++++++++ mysql-test/t/func_time.test | 74 ++++++++++ sql/item_timefunc.cc | 40 ++--- sql/item_timefunc.h | 49 +++++++ 4 files changed, 472 insertions(+), 20 deletions(-) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index ad51010de30..a6c63e6593a 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -2806,3 +2806,332 @@ Warning 1292 Truncated incorrect time value: '-1441:00:00' # # End of 10.0 tests # +# +# Start of 10.1 tests +# +# +# MDEV-10317 EXCTACT(MINUTE_MICROSECOND) truncates data +# +CREATE TABLE t1 (a DATETIME(6)); +INSERT INTO t1 VALUES ('1999-12-31 23:59:59.999999'); +SELECT +a, +EXTRACT(YEAR FROM a), +EXTRACT(YEAR_MONTH FROM a), +EXTRACT(QUARTER FROM a), +EXTRACT(MONTH FROM a), +EXTRACT(WEEK FROM a), +EXTRACT(DAY FROM a), +EXTRACT(DAY_HOUR FROM a), +EXTRACT(DAY_MINUTE FROM a), +EXTRACT(DAY_SECOND FROM a), +EXTRACT(HOUR FROM a), +EXTRACT(HOUR_MINUTE FROM a), +EXTRACT(HOUR_SECOND FROM a), +EXTRACT(MINUTE FROM a), +EXTRACT(MINUTE_SECOND FROM a), +EXTRACT(SECOND FROM a), +EXTRACT(MICROSECOND FROM a), +EXTRACT(DAY_MICROSECOND FROM a), +EXTRACT(HOUR_MICROSECOND FROM a), +EXTRACT(MINUTE_MICROSECOND FROM a), +EXTRACT(SECOND_MICROSECOND FROM a) +FROM t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 t1 a a 12 26 26 Y 128 6 63 +def EXTRACT(YEAR FROM a) 3 4 4 Y 32896 0 63 +def EXTRACT(YEAR_MONTH FROM a) 3 6 6 Y 32896 0 63 +def EXTRACT(QUARTER FROM a) 3 2 1 Y 32896 0 63 +def EXTRACT(MONTH FROM a) 3 2 2 Y 32896 0 63 +def EXTRACT(WEEK FROM a) 3 2 2 Y 32896 0 63 +def EXTRACT(DAY FROM a) 3 2 2 Y 32896 0 63 +def EXTRACT(DAY_HOUR FROM a) 3 5 4 Y 32896 0 63 +def EXTRACT(DAY_MINUTE FROM a) 3 7 6 Y 32896 0 63 +def EXTRACT(DAY_SECOND FROM a) 3 9 8 Y 32896 0 63 +def EXTRACT(HOUR FROM a) 3 3 2 Y 32896 0 63 +def EXTRACT(HOUR_MINUTE FROM a) 3 5 4 Y 32896 0 63 +def EXTRACT(HOUR_SECOND FROM a) 3 7 6 Y 32896 0 63 +def EXTRACT(MINUTE FROM a) 3 3 2 Y 32896 0 63 +def EXTRACT(MINUTE_SECOND FROM a) 3 5 4 Y 32896 0 63 +def EXTRACT(SECOND FROM a) 3 3 2 Y 32896 0 63 +def EXTRACT(MICROSECOND FROM a) 3 7 6 Y 32896 0 63 +def EXTRACT(DAY_MICROSECOND FROM a) 8 15 14 Y 32896 0 63 +def EXTRACT(HOUR_MICROSECOND FROM a) 8 13 12 Y 32896 0 63 +def EXTRACT(MINUTE_MICROSECOND FROM a) 8 11 10 Y 32896 0 63 +def EXTRACT(SECOND_MICROSECOND FROM a) 3 9 8 Y 32896 0 63 +a 1999-12-31 23:59:59.999999 +EXTRACT(YEAR FROM a) 1999 +EXTRACT(YEAR_MONTH FROM a) 199912 +EXTRACT(QUARTER FROM a) 4 +EXTRACT(MONTH FROM a) 12 +EXTRACT(WEEK FROM a) 52 +EXTRACT(DAY FROM a) 31 +EXTRACT(DAY_HOUR FROM a) 3123 +EXTRACT(DAY_MINUTE FROM a) 312359 +EXTRACT(DAY_SECOND FROM a) 31235959 +EXTRACT(HOUR FROM a) 23 +EXTRACT(HOUR_MINUTE FROM a) 2359 +EXTRACT(HOUR_SECOND FROM a) 235959 +EXTRACT(MINUTE FROM a) 59 +EXTRACT(MINUTE_SECOND FROM a) 5959 +EXTRACT(SECOND FROM a) 59 +EXTRACT(MICROSECOND FROM a) 999999 +EXTRACT(DAY_MICROSECOND FROM a) 31235959999999 +EXTRACT(HOUR_MICROSECOND FROM a) 235959999999 +EXTRACT(MINUTE_MICROSECOND FROM a) 5959999999 +EXTRACT(SECOND_MICROSECOND FROM a) 59999999 +CREATE TABLE t2 AS SELECT +a, +EXTRACT(YEAR FROM a), +EXTRACT(YEAR_MONTH FROM a), +EXTRACT(QUARTER FROM a), +EXTRACT(MONTH FROM a), +EXTRACT(WEEK FROM a), +EXTRACT(DAY FROM a), +EXTRACT(DAY_HOUR FROM a), +EXTRACT(DAY_MINUTE FROM a), +EXTRACT(DAY_SECOND FROM a), +EXTRACT(HOUR FROM a), +EXTRACT(HOUR_MINUTE FROM a), +EXTRACT(HOUR_SECOND FROM a), +EXTRACT(MINUTE FROM a), +EXTRACT(MINUTE_SECOND FROM a), +EXTRACT(SECOND FROM a), +EXTRACT(MICROSECOND FROM a), +EXTRACT(DAY_MICROSECOND FROM a), +EXTRACT(HOUR_MICROSECOND FROM a), +EXTRACT(MINUTE_MICROSECOND FROM a), +EXTRACT(SECOND_MICROSECOND FROM a) +FROM t1; +SELECT * FROM t2; +a 1999-12-31 23:59:59.999999 +EXTRACT(YEAR FROM a) 1999 +EXTRACT(YEAR_MONTH FROM a) 199912 +EXTRACT(QUARTER FROM a) 4 +EXTRACT(MONTH FROM a) 12 +EXTRACT(WEEK FROM a) 52 +EXTRACT(DAY FROM a) 31 +EXTRACT(DAY_HOUR FROM a) 3123 +EXTRACT(DAY_MINUTE FROM a) 312359 +EXTRACT(DAY_SECOND FROM a) 31235959 +EXTRACT(HOUR FROM a) 23 +EXTRACT(HOUR_MINUTE FROM a) 2359 +EXTRACT(HOUR_SECOND FROM a) 235959 +EXTRACT(MINUTE FROM a) 59 +EXTRACT(MINUTE_SECOND FROM a) 5959 +EXTRACT(SECOND FROM a) 59 +EXTRACT(MICROSECOND FROM a) 999999 +EXTRACT(DAY_MICROSECOND FROM a) 31235959999999 +EXTRACT(HOUR_MICROSECOND FROM a) 235959999999 +EXTRACT(MINUTE_MICROSECOND FROM a) 5959999999 +EXTRACT(SECOND_MICROSECOND FROM a) 59999999 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` datetime(6) DEFAULT NULL, + `EXTRACT(YEAR FROM a)` int(4) DEFAULT NULL, + `EXTRACT(YEAR_MONTH FROM a)` int(6) DEFAULT NULL, + `EXTRACT(QUARTER FROM a)` int(2) DEFAULT NULL, + `EXTRACT(MONTH FROM a)` int(2) DEFAULT NULL, + `EXTRACT(WEEK FROM a)` int(2) DEFAULT NULL, + `EXTRACT(DAY FROM a)` int(2) DEFAULT NULL, + `EXTRACT(DAY_HOUR FROM a)` int(5) DEFAULT NULL, + `EXTRACT(DAY_MINUTE FROM a)` int(7) DEFAULT NULL, + `EXTRACT(DAY_SECOND FROM a)` int(9) DEFAULT NULL, + `EXTRACT(HOUR FROM a)` int(3) DEFAULT NULL, + `EXTRACT(HOUR_MINUTE FROM a)` int(5) DEFAULT NULL, + `EXTRACT(HOUR_SECOND FROM a)` int(7) DEFAULT NULL, + `EXTRACT(MINUTE FROM a)` int(3) DEFAULT NULL, + `EXTRACT(MINUTE_SECOND FROM a)` int(5) DEFAULT NULL, + `EXTRACT(SECOND FROM a)` int(3) DEFAULT NULL, + `EXTRACT(MICROSECOND FROM a)` int(7) DEFAULT NULL, + `EXTRACT(DAY_MICROSECOND FROM a)` bigint(15) DEFAULT NULL, + `EXTRACT(HOUR_MICROSECOND FROM a)` bigint(13) DEFAULT NULL, + `EXTRACT(MINUTE_MICROSECOND FROM a)` bigint(11) DEFAULT NULL, + `EXTRACT(SECOND_MICROSECOND FROM a)` int(9) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1,t2; +CREATE TABLE t1 (a TIME(6)); +INSERT INTO t1 VALUES ('-838:59:59.999999'),('838:59:59.999999'); +SELECT +a, +EXTRACT(YEAR FROM a), +EXTRACT(YEAR_MONTH FROM a), +EXTRACT(QUARTER FROM a), +EXTRACT(MONTH FROM a), +EXTRACT(WEEK FROM a), +EXTRACT(DAY FROM a), +EXTRACT(DAY_HOUR FROM a), +EXTRACT(DAY_MINUTE FROM a), +EXTRACT(DAY_SECOND FROM a), +EXTRACT(HOUR FROM a), +EXTRACT(HOUR_MINUTE FROM a), +EXTRACT(HOUR_SECOND FROM a), +EXTRACT(MINUTE FROM a), +EXTRACT(MINUTE_SECOND FROM a), +EXTRACT(SECOND FROM a), +EXTRACT(MICROSECOND FROM a), +EXTRACT(DAY_MICROSECOND FROM a), +EXTRACT(HOUR_MICROSECOND FROM a), +EXTRACT(MINUTE_MICROSECOND FROM a), +EXTRACT(SECOND_MICROSECOND FROM a) +FROM t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 t1 a a 11 17 17 Y 128 6 63 +def EXTRACT(YEAR FROM a) 3 4 1 Y 32896 0 63 +def EXTRACT(YEAR_MONTH FROM a) 3 6 1 Y 32896 0 63 +def EXTRACT(QUARTER FROM a) 3 2 1 Y 32896 0 63 +def EXTRACT(MONTH FROM a) 3 2 1 Y 32896 0 63 +def EXTRACT(WEEK FROM a) 3 2 9 Y 32896 0 63 +def EXTRACT(DAY FROM a) 3 2 2 Y 32896 0 63 +def EXTRACT(DAY_HOUR FROM a) 3 5 5 Y 32896 0 63 +def EXTRACT(DAY_MINUTE FROM a) 3 7 7 Y 32896 0 63 +def EXTRACT(DAY_SECOND FROM a) 3 9 9 Y 32896 0 63 +def EXTRACT(HOUR FROM a) 3 3 3 Y 32896 0 63 +def EXTRACT(HOUR_MINUTE FROM a) 3 5 5 Y 32896 0 63 +def EXTRACT(HOUR_SECOND FROM a) 3 7 7 Y 32896 0 63 +def EXTRACT(MINUTE FROM a) 3 3 3 Y 32896 0 63 +def EXTRACT(MINUTE_SECOND FROM a) 3 5 5 Y 32896 0 63 +def EXTRACT(SECOND FROM a) 3 3 3 Y 32896 0 63 +def EXTRACT(MICROSECOND FROM a) 3 7 7 Y 32896 0 63 +def EXTRACT(DAY_MICROSECOND FROM a) 8 15 15 Y 32896 0 63 +def EXTRACT(HOUR_MICROSECOND FROM a) 8 13 13 Y 32896 0 63 +def EXTRACT(MINUTE_MICROSECOND FROM a) 8 11 11 Y 32896 0 63 +def EXTRACT(SECOND_MICROSECOND FROM a) 3 9 9 Y 32896 0 63 +a -838:59:59.999999 +EXTRACT(YEAR FROM a) 0 +EXTRACT(YEAR_MONTH FROM a) 0 +EXTRACT(QUARTER FROM a) 0 +EXTRACT(MONTH FROM a) 0 +EXTRACT(WEEK FROM a) 613566757 +EXTRACT(DAY FROM a) 34 +EXTRACT(DAY_HOUR FROM a) -3422 +EXTRACT(DAY_MINUTE FROM a) -342259 +EXTRACT(DAY_SECOND FROM a) -34225959 +EXTRACT(HOUR FROM a) -22 +EXTRACT(HOUR_MINUTE FROM a) -2259 +EXTRACT(HOUR_SECOND FROM a) -225959 +EXTRACT(MINUTE FROM a) -59 +EXTRACT(MINUTE_SECOND FROM a) -5959 +EXTRACT(SECOND FROM a) -59 +EXTRACT(MICROSECOND FROM a) -999999 +EXTRACT(DAY_MICROSECOND FROM a) -34225959999999 +EXTRACT(HOUR_MICROSECOND FROM a) -225959999999 +EXTRACT(MINUTE_MICROSECOND FROM a) -5959999999 +EXTRACT(SECOND_MICROSECOND FROM a) -59999999 +a 838:59:59.999999 +EXTRACT(YEAR FROM a) 0 +EXTRACT(YEAR_MONTH FROM a) 0 +EXTRACT(QUARTER FROM a) 0 +EXTRACT(MONTH FROM a) 0 +EXTRACT(WEEK FROM a) 613566757 +EXTRACT(DAY FROM a) 34 +EXTRACT(DAY_HOUR FROM a) 3422 +EXTRACT(DAY_MINUTE FROM a) 342259 +EXTRACT(DAY_SECOND FROM a) 34225959 +EXTRACT(HOUR FROM a) 22 +EXTRACT(HOUR_MINUTE FROM a) 2259 +EXTRACT(HOUR_SECOND FROM a) 225959 +EXTRACT(MINUTE FROM a) 59 +EXTRACT(MINUTE_SECOND FROM a) 5959 +EXTRACT(SECOND FROM a) 59 +EXTRACT(MICROSECOND FROM a) 999999 +EXTRACT(DAY_MICROSECOND FROM a) 34225959999999 +EXTRACT(HOUR_MICROSECOND FROM a) 225959999999 +EXTRACT(MINUTE_MICROSECOND FROM a) 5959999999 +EXTRACT(SECOND_MICROSECOND FROM a) 59999999 +CREATE TABLE t2 AS SELECT +a, +EXTRACT(YEAR FROM a), +EXTRACT(YEAR_MONTH FROM a), +EXTRACT(QUARTER FROM a), +EXTRACT(MONTH FROM a), +EXTRACT(WEEK FROM a), +EXTRACT(DAY FROM a), +EXTRACT(DAY_HOUR FROM a), +EXTRACT(DAY_MINUTE FROM a), +EXTRACT(DAY_SECOND FROM a), +EXTRACT(HOUR FROM a), +EXTRACT(HOUR_MINUTE FROM a), +EXTRACT(HOUR_SECOND FROM a), +EXTRACT(MINUTE FROM a), +EXTRACT(MINUTE_SECOND FROM a), +EXTRACT(SECOND FROM a), +EXTRACT(MICROSECOND FROM a), +EXTRACT(DAY_MICROSECOND FROM a), +EXTRACT(HOUR_MICROSECOND FROM a), +EXTRACT(MINUTE_MICROSECOND FROM a), +EXTRACT(SECOND_MICROSECOND FROM a) +FROM t1; +SELECT * FROM t2; +a -838:59:59.999999 +EXTRACT(YEAR FROM a) 0 +EXTRACT(YEAR_MONTH FROM a) 0 +EXTRACT(QUARTER FROM a) 0 +EXTRACT(MONTH FROM a) 0 +EXTRACT(WEEK FROM a) 613566757 +EXTRACT(DAY FROM a) 34 +EXTRACT(DAY_HOUR FROM a) -3422 +EXTRACT(DAY_MINUTE FROM a) -342259 +EXTRACT(DAY_SECOND FROM a) -34225959 +EXTRACT(HOUR FROM a) -22 +EXTRACT(HOUR_MINUTE FROM a) -2259 +EXTRACT(HOUR_SECOND FROM a) -225959 +EXTRACT(MINUTE FROM a) -59 +EXTRACT(MINUTE_SECOND FROM a) -5959 +EXTRACT(SECOND FROM a) -59 +EXTRACT(MICROSECOND FROM a) -999999 +EXTRACT(DAY_MICROSECOND FROM a) -34225959999999 +EXTRACT(HOUR_MICROSECOND FROM a) -225959999999 +EXTRACT(MINUTE_MICROSECOND FROM a) -5959999999 +EXTRACT(SECOND_MICROSECOND FROM a) -59999999 +a 838:59:59.999999 +EXTRACT(YEAR FROM a) 0 +EXTRACT(YEAR_MONTH FROM a) 0 +EXTRACT(QUARTER FROM a) 0 +EXTRACT(MONTH FROM a) 0 +EXTRACT(WEEK FROM a) 613566757 +EXTRACT(DAY FROM a) 34 +EXTRACT(DAY_HOUR FROM a) 3422 +EXTRACT(DAY_MINUTE FROM a) 342259 +EXTRACT(DAY_SECOND FROM a) 34225959 +EXTRACT(HOUR FROM a) 22 +EXTRACT(HOUR_MINUTE FROM a) 2259 +EXTRACT(HOUR_SECOND FROM a) 225959 +EXTRACT(MINUTE FROM a) 59 +EXTRACT(MINUTE_SECOND FROM a) 5959 +EXTRACT(SECOND FROM a) 59 +EXTRACT(MICROSECOND FROM a) 999999 +EXTRACT(DAY_MICROSECOND FROM a) 34225959999999 +EXTRACT(HOUR_MICROSECOND FROM a) 225959999999 +EXTRACT(MINUTE_MICROSECOND FROM a) 5959999999 +EXTRACT(SECOND_MICROSECOND FROM a) 59999999 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` time(6) DEFAULT NULL, + `EXTRACT(YEAR FROM a)` int(4) DEFAULT NULL, + `EXTRACT(YEAR_MONTH FROM a)` int(6) DEFAULT NULL, + `EXTRACT(QUARTER FROM a)` int(2) DEFAULT NULL, + `EXTRACT(MONTH FROM a)` int(2) DEFAULT NULL, + `EXTRACT(WEEK FROM a)` int(2) DEFAULT NULL, + `EXTRACT(DAY FROM a)` int(2) DEFAULT NULL, + `EXTRACT(DAY_HOUR FROM a)` int(5) DEFAULT NULL, + `EXTRACT(DAY_MINUTE FROM a)` int(7) DEFAULT NULL, + `EXTRACT(DAY_SECOND FROM a)` int(9) DEFAULT NULL, + `EXTRACT(HOUR FROM a)` int(3) DEFAULT NULL, + `EXTRACT(HOUR_MINUTE FROM a)` int(5) DEFAULT NULL, + `EXTRACT(HOUR_SECOND FROM a)` int(7) DEFAULT NULL, + `EXTRACT(MINUTE FROM a)` int(3) DEFAULT NULL, + `EXTRACT(MINUTE_SECOND FROM a)` int(5) DEFAULT NULL, + `EXTRACT(SECOND FROM a)` int(3) DEFAULT NULL, + `EXTRACT(MICROSECOND FROM a)` int(7) DEFAULT NULL, + `EXTRACT(DAY_MICROSECOND FROM a)` bigint(15) DEFAULT NULL, + `EXTRACT(HOUR_MICROSECOND FROM a)` bigint(13) DEFAULT NULL, + `EXTRACT(MINUTE_MICROSECOND FROM a)` bigint(11) DEFAULT NULL, + `EXTRACT(SECOND_MICROSECOND FROM a)` int(9) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1,t2; +# +# End of 10.1 tests +# diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 18293f31dd2..12b7c92688f 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -1712,3 +1712,77 @@ SELECT --echo # --echo # End of 10.0 tests --echo # + +--echo # +--echo # Start of 10.1 tests +--echo # + +--echo # +--echo # MDEV-10317 EXCTACT(MINUTE_MICROSECOND) truncates data +--echo # + +let $query= +SELECT + a, + EXTRACT(YEAR FROM a), + EXTRACT(YEAR_MONTH FROM a), + EXTRACT(QUARTER FROM a), + EXTRACT(MONTH FROM a), + EXTRACT(WEEK FROM a), + EXTRACT(DAY FROM a), + EXTRACT(DAY_HOUR FROM a), + EXTRACT(DAY_MINUTE FROM a), + EXTRACT(DAY_SECOND FROM a), + EXTRACT(HOUR FROM a), + EXTRACT(HOUR_MINUTE FROM a), + EXTRACT(HOUR_SECOND FROM a), + EXTRACT(MINUTE FROM a), + EXTRACT(MINUTE_SECOND FROM a), + EXTRACT(SECOND FROM a), + EXTRACT(MICROSECOND FROM a), + EXTRACT(DAY_MICROSECOND FROM a), + EXTRACT(HOUR_MICROSECOND FROM a), + EXTRACT(MINUTE_MICROSECOND FROM a), + EXTRACT(SECOND_MICROSECOND FROM a) +FROM t1; + + +CREATE TABLE t1 (a DATETIME(6)); +INSERT INTO t1 VALUES ('1999-12-31 23:59:59.999999'); + +--vertical_results +--enable_metadata +--disable_ps_protocol +--eval $query +--enable_ps_protocol +--disable_metadata +--horizontal_results + +--eval CREATE TABLE t2 AS $query +--vertical_results +SELECT * FROM t2; +--horizontal_results +SHOW CREATE TABLE t2; +DROP TABLE t1,t2; + +CREATE TABLE t1 (a TIME(6)); +INSERT INTO t1 VALUES ('-838:59:59.999999'),('838:59:59.999999'); + +--vertical_results +--enable_metadata +--disable_ps_protocol +--eval $query +--enable_ps_protocol +--disable_metadata +--horizontal_results + +--eval CREATE TABLE t2 AS $query +--vertical_results +SELECT * FROM t2; +--horizontal_results +SHOW CREATE TABLE t2; +DROP TABLE t1,t2; + +--echo # +--echo # End of 10.1 tests +--echo # diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 398b618fb73..ff13e707ac4 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -2192,26 +2192,26 @@ void Item_extract::fix_length_and_dec() { maybe_null=1; // If wrong date switch (int_type) { - case INTERVAL_YEAR: max_length=4; date_value=1; break; - case INTERVAL_YEAR_MONTH: max_length=6; date_value=1; break; - case INTERVAL_QUARTER: max_length=2; date_value=1; break; - case INTERVAL_MONTH: max_length=2; date_value=1; break; - case INTERVAL_WEEK: max_length=2; date_value=1; break; - case INTERVAL_DAY: max_length=2; date_value=1; break; - case INTERVAL_DAY_HOUR: max_length=9; date_value=0; break; - case INTERVAL_DAY_MINUTE: max_length=11; date_value=0; break; - case INTERVAL_DAY_SECOND: max_length=13; date_value=0; break; - case INTERVAL_HOUR: max_length=2; date_value=0; break; - case INTERVAL_HOUR_MINUTE: max_length=4; date_value=0; break; - case INTERVAL_HOUR_SECOND: max_length=6; date_value=0; break; - case INTERVAL_MINUTE: max_length=2; date_value=0; break; - case INTERVAL_MINUTE_SECOND: max_length=4; date_value=0; break; - case INTERVAL_SECOND: max_length=2; date_value=0; break; - case INTERVAL_MICROSECOND: max_length=2; date_value=0; break; - case INTERVAL_DAY_MICROSECOND: max_length=20; date_value=0; break; - case INTERVAL_HOUR_MICROSECOND: max_length=13; date_value=0; break; - case INTERVAL_MINUTE_MICROSECOND: max_length=11; date_value=0; break; - case INTERVAL_SECOND_MICROSECOND: max_length=9; date_value=0; break; + case INTERVAL_YEAR: set_date_length(4); break; // YYYY + case INTERVAL_YEAR_MONTH: set_date_length(6); break; // YYYYMM + case INTERVAL_QUARTER: set_date_length(2); break; // 1..4 + case INTERVAL_MONTH: set_date_length(2); break; // MM + case INTERVAL_WEEK: set_date_length(2); break; // 0..52 + case INTERVAL_DAY: set_date_length(2); break; // DD + case INTERVAL_DAY_HOUR: set_time_length(4); break; // DDhh + case INTERVAL_DAY_MINUTE: set_time_length(6); break; // DDhhmm + case INTERVAL_DAY_SECOND: set_time_length(8); break; // DDhhmmss + case INTERVAL_HOUR: set_time_length(2); break; // hh + case INTERVAL_HOUR_MINUTE: set_time_length(4); break; // hhmm + case INTERVAL_HOUR_SECOND: set_time_length(6); break; // hhmmss + case INTERVAL_MINUTE: set_time_length(2); break; // mm + case INTERVAL_MINUTE_SECOND: set_time_length(4); break; // mmss + case INTERVAL_SECOND: set_time_length(2); break; // ss + case INTERVAL_MICROSECOND: set_time_length(6); break; // ffffff + case INTERVAL_DAY_MICROSECOND: set_time_length(14); break; // DDhhmmssffffff + case INTERVAL_HOUR_MICROSECOND: set_time_length(12); break; // hhmmssffffff + case INTERVAL_MINUTE_MICROSECOND: set_time_length(10); break; // mmssffffff + case INTERVAL_SECOND_MICROSECOND: set_time_length(8); break; // ssffffff case INTERVAL_LAST: DBUG_ASSERT(0); break; /* purecov: deadcode */ } } diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 2c1a0943699..0e9329d501c 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -835,10 +835,57 @@ public: class Item_extract :public Item_int_func { bool date_value; + void set_date_length(uint32 length) + { + /* + Although DATE components (e.g. YEAR, YEAR_MONTH, QUARTER, MONTH, WEEK) + cannot have a sign, we should probably still add +1, + because all around the code we assume that max_length is sign inclusive. + Another options is to set unsigned_flag to "true". + */ + max_length= length; //QQ: see above + date_value= true; + } + void set_time_length(uint32 length) + { + max_length= length + 1/*sign*/; + date_value= false; + } public: const interval_type int_type; // keep it public Item_extract(THD *thd, interval_type type_arg, Item *a): Item_int_func(thd, a), int_type(type_arg) {} + enum_field_types field_type() const + { + switch (int_type) { + case INTERVAL_YEAR: + case INTERVAL_YEAR_MONTH: + case INTERVAL_QUARTER: + case INTERVAL_MONTH: + case INTERVAL_WEEK: + case INTERVAL_DAY: + case INTERVAL_DAY_HOUR: + case INTERVAL_DAY_MINUTE: + case INTERVAL_DAY_SECOND: + case INTERVAL_HOUR: + case INTERVAL_HOUR_MINUTE: + case INTERVAL_HOUR_SECOND: + case INTERVAL_MINUTE: + case INTERVAL_MINUTE_SECOND: + case INTERVAL_SECOND: + case INTERVAL_MICROSECOND: + case INTERVAL_SECOND_MICROSECOND: + return MYSQL_TYPE_LONG; + case INTERVAL_DAY_MICROSECOND: + case INTERVAL_HOUR_MICROSECOND: + case INTERVAL_MINUTE_MICROSECOND: + return MYSQL_TYPE_LONGLONG; + case INTERVAL_LAST: + break; + } + DBUG_ASSERT(0); + return MYSQL_TYPE_LONGLONG; + } longlong val_int(); enum Functype functype() const { return EXTRACT_FUNC; } const char *func_name() const { return "extract"; } @@ -883,6 +930,8 @@ class Item_extract :public Item_int_func } return true; } + Field *create_field_for_create_select(TABLE *table) + { return tmp_table_field_from_field_type(table, false, false); } }; -- cgit v1.2.1 From 406fe77763c43dd7e151d92364e0296539077d4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Mon, 4 Jul 2016 17:31:14 +0300 Subject: Add more diagnostic to find out the problem on innodb_shutdown_for_mysql in ppc64el on test case innodb_fts.innodb_fts_stopword_charset. --- storage/innobase/buf/buf0buf.cc | 14 ++++++++++++-- storage/innobase/fil/fil0fil.cc | 22 ++++++++++++++++++++++ storage/innobase/include/fil0fil.h | 8 ++++++++ storage/xtradb/buf/buf0buf.cc | 14 ++++++++++++-- storage/xtradb/fil/fil0fil.cc | 22 ++++++++++++++++++++++ storage/xtradb/include/fil0fil.h | 8 ++++++++ 6 files changed, 84 insertions(+), 4 deletions(-) diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 5754d66350d..5b1f479168a 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -4408,10 +4408,20 @@ buf_all_freed_instance( const buf_block_t* block = buf_chunk_not_freed(chunk); if (UNIV_LIKELY_NULL(block)) { - fprintf(stderr, - "Page %lu %lu still fixed or dirty\n", + fil_space_t* space = fil_space_get(block->page.space); + ib_logf(IB_LOG_LEVEL_ERROR, + "Page %lu %lu still fixed or dirty.", (ulong) block->page.space, (ulong) block->page.offset); + ib_logf(IB_LOG_LEVEL_ERROR, + "Page oldest_modification %lu fix_count %d io_fix %d.", + block->page.oldest_modification, + block->page.buf_fix_count, + buf_page_get_io_fix(&block->page)); + ib_logf(IB_LOG_LEVEL_ERROR, + "Page space_id %lu name %s.", + (ulong)block->page.space, + (space && space->name) ? space->name : "NULL"); ut_error; } } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 49550b6cdb2..9d471f9dbd3 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -1299,6 +1299,28 @@ fil_space_free( return(TRUE); } +/*******************************************************************//** +Returns a pointer to the file_space_t that is in the memory cache +associated with a space id. +@return file_space_t pointer, NULL if space not found */ +fil_space_t* +fil_space_get( +/*==========*/ + ulint id) /*!< in: space id */ +{ + fil_space_t* space; + + ut_ad(fil_system); + + mutex_enter(&fil_system->mutex); + + space = fil_space_get_by_id(id); + + mutex_exit(&fil_system->mutex); + + return (space); +} + /*******************************************************************//** Returns a pointer to the file_space_t that is in the memory cache associated with a space id. The caller must lock fil_system->mutex. diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 07929886b29..35a4b2496d4 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -1210,5 +1210,13 @@ fil_user_tablespace_restore_page( ulint page_no); /* in: page_no to obtain from double write buffer */ +/*******************************************************************//** +Returns a pointer to the file_space_t that is in the memory cache +associated with a space id. +@return file_space_t pointer, NULL if space not found */ +fil_space_t* +fil_space_get( +/*==========*/ + ulint id); /*!< in: space id */ #endif /* !UNIV_INNOCHECKSUM */ #endif /* fil0fil_h */ diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 3186cd47753..489c690d9f9 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -4694,10 +4694,20 @@ buf_all_freed_instance( mutex_exit(&buf_pool->LRU_list_mutex); if (UNIV_LIKELY_NULL(block)) { - fprintf(stderr, - "Page %lu %lu still fixed or dirty\n", + fil_space_t* space = fil_space_get(block->page.space); + ib_logf(IB_LOG_LEVEL_ERROR, + "Page %lu %lu still fixed or dirty.", (ulong) block->page.space, (ulong) block->page.offset); + ib_logf(IB_LOG_LEVEL_ERROR, + "Page oldest_modification %lu fix_count %d io_fix %d.", + block->page.oldest_modification, + block->page.buf_fix_count, + buf_page_get_io_fix(&block->page)); + ib_logf(IB_LOG_LEVEL_ERROR, + "Page space_id %lu name %s.", + (ulong)block->page.space, + (space && space->name) ? space->name : "NULL"); ut_error; } } diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 6bc3b57d9ca..40a01c9f055 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -1342,6 +1342,28 @@ fil_space_free( return(TRUE); } +/*******************************************************************//** +Returns a pointer to the file_space_t that is in the memory cache +associated with a space id. +@return file_space_t pointer, NULL if space not found */ +fil_space_t* +fil_space_get( +/*==========*/ + ulint id) /*!< in: space id */ +{ + fil_space_t* space; + + ut_ad(fil_system); + + mutex_enter(&fil_system->mutex); + + space = fil_space_get_by_id(id); + + mutex_exit(&fil_system->mutex); + + return (space); +} + /*******************************************************************//** Returns a pointer to the file_space_t that is in the memory cache associated with a space id. The caller must lock fil_system->mutex. diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index 547a0c621a5..b71d0f0f705 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -1210,6 +1210,14 @@ fil_user_tablespace_restore_page( ulint page_no); /* in: page_no to obtain from double write buffer */ +/*******************************************************************//** +Returns a pointer to the file_space_t that is in the memory cache +associated with a space id. +@return file_space_t pointer, NULL if space not found */ +fil_space_t* +fil_space_get( +/*==========*/ + ulint id); /*!< in: space id */ #endif /* !UNIV_INNOCHECKSUM */ /************************************************************************* -- cgit v1.2.1 From d1b25890745a140446e4bdd5fd4f489ce1f76fae Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 5 Jul 2016 15:23:22 +0400 Subject: Removing class Item_func_integer. It's not used since MySQL-5.0. --- sql/item_func.cc | 9 --------- sql/item_func.h | 7 ------- 2 files changed, 16 deletions(-) diff --git a/sql/item_func.cc b/sql/item_func.cc index 0d21183cac8..47ad756144a 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2313,15 +2313,6 @@ longlong Item_func_bit_neg::val_int() // Conversion functions -void Item_func_integer::fix_length_and_dec() -{ - max_length=args[0]->max_length - args[0]->decimals+1; - uint tmp=float_length(decimals); - set_if_smaller(max_length,tmp); - decimals=0; -} - - void Item_func_int_val::fix_length_and_dec() { DBUG_ENTER("Item_func_int_val::fix_length_and_dec"); diff --git a/sql/item_func.h b/sql/item_func.h index 47af2a3f898..4a55bd68453 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -949,13 +949,6 @@ public: const char *func_name() const { return "cot"; } }; -class Item_func_integer :public Item_int_func -{ -public: - inline Item_func_integer(THD *thd, Item *a): Item_int_func(thd, a) {} - void fix_length_and_dec(); -}; - class Item_func_int_val :public Item_func_num1 { -- cgit v1.2.1 From 95c286cedf4b9330240a0a91a9fc3e58a17782b9 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Tue, 5 Jul 2016 16:53:03 +0300 Subject: MDEV-10324: Server crash in get_sel_arg_for_keypart or Assertion The crash was caused by this problem: get_best_group_min_max() tries to construct query plans for keys that are not processed by the range optimizer. This wasn't a problem as long as SEL_TREE::keys was an array of MAX_KEY elements. However, now it is a Mem_root_array and only has elements for the used keys, and get_best_group_min_max attempts to address beyond the end of the array. The obvious way to fix the crash was to port (and improve) a part of 96fcfcbd7b5120e8f64fd45985001eca8d36fbfb from mysql-5.7. This makes get_best_group_min_max not to consider indexes that Mem_root_arrays have no element for. After that, I got non-sensical query plans (see MDEV-10325 for details). Fixed that by making get_best_group_min_max to check if the index is in table->keys_in_use_for_group_by bitmap. --- mysql-test/r/group_by.result | 34 ++++++++++++++++++++ mysql-test/r/group_by_innodb.result | 35 ++++++++++++++++++++ mysql-test/r/order_by.result | 2 +- mysql-test/t/group_by.test | 36 +++++++++++++++++++++ mysql-test/t/group_by_innodb.test | 30 +++++++++++++++++ sql/opt_range.cc | 64 ++++++------------------------------- 6 files changed, 145 insertions(+), 56 deletions(-) diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index 3ef73bb1943..adf1a3c9ab3 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -2692,3 +2692,37 @@ select distinct a from t1 group by 'a'; a 2001-02-02 drop table t1; +# +# MDEV-10324: Server crash in get_sel_arg_for_keypart or Assertion `n < size()' failed in Mem_root_array +# +CREATE TABLE t1 ( +job_id int(10) unsigned NOT NULL AUTO_INCREMENT, +job_cmd varbinary(60) NOT NULL DEFAULT '', +job_namespace int(11) NOT NULL, +job_title varbinary(255) NOT NULL, +job_params blob NOT NULL, +job_timestamp varbinary(14) DEFAULT NULL, +job_random int(10) unsigned NOT NULL DEFAULT '0', +job_token varbinary(32) NOT NULL DEFAULT '', +job_token_timestamp varbinary(14) DEFAULT NULL, +job_sha1 varbinary(32) NOT NULL DEFAULT '', +job_attempts int(10) unsigned NOT NULL DEFAULT '0', +PRIMARY KEY (job_id), +KEY job_cmd (job_cmd,job_namespace,job_title,job_params(128)), +KEY job_timestamp (job_timestamp), +KEY job_sha1 (job_sha1), +KEY job_cmd_token (job_cmd,job_token,job_random), +KEY job_cmd_token_id (job_cmd,job_token,job_id) +); +INSERT INTO t1 VALUES +(NULL, 'foo', 1, 'foo', 'foo', 'foo', 1, 'foo', 'foo', 'foo', 1), +(NULL, 'bar', 2, 'bar', 'bar', 'bar', 2, 'bar', 'bar', 'bar', 2); +SELECT DISTINCT job_cmd FROM t1 WHERE job_cmd IN ('foobar','null'); +job_cmd +drop table t1; +CREATE TABLE t1 (f1 INT NOT NULL, f2 VARCHAR(3) NOT NULL, KEY(f1), KEY(f2, f1)); +INSERT INTO t1 VALUES (0,'foo'),(1,'bar'); +SELECT 1 IN ( SELECT COUNT( DISTINCT f2 ) FROM t1 WHERE f1 <= 4 ); +1 IN ( SELECT COUNT( DISTINCT f2 ) FROM t1 WHERE f1 <= 4 ) +0 +drop table t1; diff --git a/mysql-test/r/group_by_innodb.result b/mysql-test/r/group_by_innodb.result index 381e0d7493c..bf6b25f31fa 100644 --- a/mysql-test/r/group_by_innodb.result +++ b/mysql-test/r/group_by_innodb.result @@ -123,4 +123,39 @@ id xtext optionen 2 number 22,25 1 select Kabel mit Stecker 5-polig,Kabel ohne Stecker DROP TABLE t1, t2; +# Port of testcase: +# +# Bug#20819199 ASSERTION FAILED IN TEST_IF_SKIP_SORT_ORDER +# +CREATE TABLE t0 ( a INT ); +INSERT INTO t0 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); +CREATE TABLE t1 ( +pk INT NOT NULL AUTO_INCREMENT, +a INT, +b INT, +PRIMARY KEY (pk), +KEY idx1 (a), +KEY idx2 (b, a), +KEY idx3 (a, b) +) ENGINE = InnoDB; +INSERT INTO t1 (a, b) SELECT t01.a, t02.a FROM t0 t01, t0 t02; +ANALYZE TABLE t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +EXPLAIN SELECT DISTINCT a, MAX(b) FROM t1 WHERE a >= 0 GROUP BY a,a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range idx1,idx3 idx3 5 NULL 100 Using where; Using index +SELECT DISTINCT a, MAX(b) FROM t1 WHERE a >= 0 GROUP BY a,a; +a MAX(b) +1 10 +2 10 +3 10 +4 10 +5 10 +6 10 +7 10 +8 10 +9 10 +10 10 +DROP TABLE t0, t1; # End of tests diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index 03e7c48951a..d108bce5eb1 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -1163,7 +1163,7 @@ INSERT INTO t1 SELECT a +32, b +32 FROM t1; INSERT INTO t1 SELECT a +64, b +64 FROM t1; EXPLAIN SELECT a FROM t1 IGNORE INDEX FOR GROUP BY (a, ab) GROUP BY a; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range NULL ab 4 NULL 10 Using index for group-by +1 SIMPLE t1 index NULL PRIMARY 4 NULL 128 Using index SELECT a FROM t1 IGNORE INDEX FOR GROUP BY (a, ab) GROUP BY a; a 1 diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index 5d8b79b9fca..4aa5c10ece8 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -1803,3 +1803,39 @@ select distinct a from t1 group by 'a'; insert into t1 values("2001-02-02"),("2001-02-03"); select distinct a from t1 group by 'a'; drop table t1; + +--echo # +--echo # MDEV-10324: Server crash in get_sel_arg_for_keypart or Assertion `n < size()' failed in Mem_root_array +--echo # +CREATE TABLE t1 ( + job_id int(10) unsigned NOT NULL AUTO_INCREMENT, + job_cmd varbinary(60) NOT NULL DEFAULT '', + job_namespace int(11) NOT NULL, + job_title varbinary(255) NOT NULL, + job_params blob NOT NULL, + job_timestamp varbinary(14) DEFAULT NULL, + job_random int(10) unsigned NOT NULL DEFAULT '0', + job_token varbinary(32) NOT NULL DEFAULT '', + job_token_timestamp varbinary(14) DEFAULT NULL, + job_sha1 varbinary(32) NOT NULL DEFAULT '', + job_attempts int(10) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (job_id), + KEY job_cmd (job_cmd,job_namespace,job_title,job_params(128)), + KEY job_timestamp (job_timestamp), + KEY job_sha1 (job_sha1), + KEY job_cmd_token (job_cmd,job_token,job_random), + KEY job_cmd_token_id (job_cmd,job_token,job_id) +); + +INSERT INTO t1 VALUES + (NULL, 'foo', 1, 'foo', 'foo', 'foo', 1, 'foo', 'foo', 'foo', 1), + (NULL, 'bar', 2, 'bar', 'bar', 'bar', 2, 'bar', 'bar', 'bar', 2); + +SELECT DISTINCT job_cmd FROM t1 WHERE job_cmd IN ('foobar','null'); +drop table t1; + +CREATE TABLE t1 (f1 INT NOT NULL, f2 VARCHAR(3) NOT NULL, KEY(f1), KEY(f2, f1)); +INSERT INTO t1 VALUES (0,'foo'),(1,'bar'); +SELECT 1 IN ( SELECT COUNT( DISTINCT f2 ) FROM t1 WHERE f1 <= 4 ); +drop table t1; + diff --git a/mysql-test/t/group_by_innodb.test b/mysql-test/t/group_by_innodb.test index e072a94fada..ed65e0c3e57 100644 --- a/mysql-test/t/group_by_innodb.test +++ b/mysql-test/t/group_by_innodb.test @@ -125,4 +125,34 @@ ORDER BY id DESC; DROP TABLE t1, t2; +--echo # Port of testcase: +--echo # +--echo # Bug#20819199 ASSERTION FAILED IN TEST_IF_SKIP_SORT_ORDER +--echo # + +CREATE TABLE t0 ( a INT ); +INSERT INTO t0 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); + +CREATE TABLE t1 ( + pk INT NOT NULL AUTO_INCREMENT, + a INT, + b INT, + PRIMARY KEY (pk), + KEY idx1 (a), + KEY idx2 (b, a), + KEY idx3 (a, b) +) ENGINE = InnoDB; + +INSERT INTO t1 (a, b) SELECT t01.a, t02.a FROM t0 t01, t0 t02; + +ANALYZE TABLE t1; + +let $query= +SELECT DISTINCT a, MAX(b) FROM t1 WHERE a >= 0 GROUP BY a,a; + +eval EXPLAIN $query; +eval $query; + +DROP TABLE t0, t1; + --echo # End of tests diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 1dc8f73a99d..0d92d434762 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -11902,8 +11902,6 @@ void QUICK_ROR_UNION_SELECT::add_used_key_part_to_set(MY_BITMAP *col_set) *******************************************************************************/ static inline uint get_field_keypart(KEY *index, Field *field); -static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, - PARAM *param, uint *param_idx); static bool get_sel_arg_for_keypart(Field *field, SEL_ARG *index_range_tree, SEL_ARG **cur_range); static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, @@ -12180,8 +12178,6 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) (GA1,GA2) are all TRUE. If there is more than one such index, select the first one. Here we set the variables: group_prefix_len and index_info. */ - KEY *cur_index_info= table->key_info; - KEY *cur_index_info_end= cur_index_info + table->s->keys; /* Cost-related variables for the best index so far. */ double best_read_cost= DBL_MAX; ha_rows best_records= 0; @@ -12193,11 +12189,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) uint max_key_part; SEL_ARG *cur_index_tree= NULL; ha_rows cur_quick_prefix_records= 0; - uint cur_param_idx=MAX_KEY; - for (uint cur_index= 0 ; cur_index_info != cur_index_info_end ; - cur_index_info++, cur_index++) + // We go through allowed indexes + for (uint cur_param_idx= 0; cur_param_idx < param->keys ; ++cur_param_idx) { + const uint cur_index= param->real_keynr[cur_param_idx]; + KEY *const cur_index_info= &table->key_info[cur_index]; KEY_PART_INFO *cur_part; KEY_PART_INFO *end_part; /* Last part for loops. */ /* Last index part. */ @@ -12220,7 +12217,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) (was also: "Exclude UNIQUE indexes ..." but this was removed because there are cases Loose Scan over a multi-part index is useful). */ - if (!table->covering_keys.is_set(cur_index)) + if (!table->covering_keys.is_set(cur_index) || + !table->keys_in_use_for_group_by.is_set(cur_index)) continue; /* @@ -12399,9 +12397,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) { if (tree) { - uint dummy; - SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param, - &dummy); + SEL_ARG *index_range_tree= tree->keys[cur_param_idx]; if (!get_constant_key_infix(cur_index_info, index_range_tree, first_non_group_part, min_max_arg_part, last_part, thd, cur_key_infix, @@ -12465,9 +12461,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) */ if (tree && min_max_arg_item) { - uint dummy; - SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param, - &dummy); + SEL_ARG *index_range_tree= tree->keys[cur_param_idx]; SEL_ARG *cur_range= NULL; if (get_sel_arg_for_keypart(min_max_arg_part->field, index_range_tree, &cur_range) || @@ -12485,9 +12479,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) /* Compute the cost of using this index. */ if (tree) { - /* Find the SEL_ARG sub-tree that corresponds to the chosen index. */ - cur_index_tree= get_index_range_tree(cur_index, tree, param, - &cur_param_idx); + cur_index_tree= tree->keys[cur_param_idx]; /* Check if this range tree can be used for prefix retrieval. */ Cost_estimate dummy_cost; uint mrr_flags= HA_MRR_USE_DEFAULT_IMPL; @@ -13020,44 +13012,6 @@ get_field_keypart(KEY *index, Field *field) } -/* - Find the SEL_ARG sub-tree that corresponds to the chosen index. - - SYNOPSIS - get_index_range_tree() - index [in] The ID of the index being looked for - range_tree[in] Tree of ranges being searched - param [in] PARAM from SQL_SELECT::test_quick_select - param_idx [out] Index in the array PARAM::key that corresponds to 'index' - - DESCRIPTION - - A SEL_TREE contains range trees for all usable indexes. This procedure - finds the SEL_ARG sub-tree for 'index'. The members of a SEL_TREE are - ordered in the same way as the members of PARAM::key, thus we first find - the corresponding index in the array PARAM::key. This index is returned - through the variable param_idx, to be used later as argument of - check_quick_select(). - - RETURN - Pointer to the SEL_ARG subtree that corresponds to index. -*/ - -SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, - uint *param_idx) -{ - uint idx= 0; /* Index nr in param->key_parts */ - while (idx < param->keys) - { - if (index == param->real_keynr[idx]) - break; - idx++; - } - *param_idx= idx; - return(range_tree->keys[idx]); -} - - /* Compute the cost of a quick_group_min_max_select for a particular index. -- cgit v1.2.1 From e81455bb1617e574faab93f0846a6339064968b3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 4 May 2015 08:32:05 +0200 Subject: MDEV-7973 bigint fail with gcc 5.0 -LONGLONG_MIN is the undefined behavior in C. longlong2decimal() used to do this: int longlong2decimal(longlong from, decimal_t *to) { if ((to->sign= from < 0)) return ull2dec(-from, to); return ull2dec(from, to); and later in ull2dec() (DIG_BASE is 1000000000): static int ull2dec(ulonglong from, decimal_t *to) { for (intg1=1; from >= DIG_BASE; intg1++, from/=DIG_BASE) {} this breaks in gcc-5 at -O3. Here ull2dec is inlined into longlong2decimal. And gcc-5 believes that 'from' in the inlined ull2dec is always a positive integer (indeed, if it was negative, then -from was used instead). So gcc-5 uses *signed* comparison with DIG_BASE. Fix: make a special case for LONGLONG_MIN, don't negate it --- strings/decimal.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/strings/decimal.c b/strings/decimal.c index 8dbe1bd57f4..b0c57d3db0c 100644 --- a/strings/decimal.c +++ b/strings/decimal.c @@ -1025,7 +1025,11 @@ int ulonglong2decimal(ulonglong from, decimal_t *to) int longlong2decimal(longlong from, decimal_t *to) { if ((to->sign= from < 0)) + { + if (from == LONGLONG_MIN) // avoid undefined behavior + return ull2dec((ulonglong)LONGLONG_MIN, to); return ull2dec(-from, to); + } return ull2dec(from, to); } -- cgit v1.2.1 From 7d57772f47e0d69b2e2a7bcd62da59e54f8c8343 Mon Sep 17 00:00:00 2001 From: Balasubramanian Kandasamy Date: Tue, 5 Jul 2016 17:08:37 +0530 Subject: Bug#23736787 - YUM UPDATE FAIL FROM 5.5.51(COMUNITY/COMMERCIAL) TO 5.6.32(COMUNITY/COMMERCIAL) Remove mysql_config from client sub-package (cherry picked from commit 45c4bfa0f3f1c70756591f48710bb3e76ffde9bc) --- packaging/rpm-oel/mysql.spec.in | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/rpm-oel/mysql.spec.in b/packaging/rpm-oel/mysql.spec.in index 29957d98ed0..409c325b675 100644 --- a/packaging/rpm-oel/mysql.spec.in +++ b/packaging/rpm-oel/mysql.spec.in @@ -835,8 +835,6 @@ fi %attr(755, root, root) %{_bindir}/mysqlimport %attr(755, root, root) %{_bindir}/mysqlshow %attr(755, root, root) %{_bindir}/mysqlslap -%attr(755, root, root) %{_bindir}/mysql_config -%attr(755, root, root) %{_bindir}/mysql_config-%{__isa_bits} %attr(644, root, root) %{_mandir}/man1/msql2mysql.1* %attr(644, root, root) %{_mandir}/man1/mysql.1* @@ -918,6 +916,9 @@ fi %endif %changelog +* Tue Jul 05 2016 Balasubramanian Kandasamy - 5.5.51-1 +- Remove mysql_config from client subpackage + * Tue Sep 29 2015 Balasubramanian Kandasamy - 5.5.47-1 - Added conflicts to mysql-connector-c-shared dependencies -- cgit v1.2.1 From ecb27d2650da546c53bf7ff709cc6bf5bfa7f289 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Mon, 27 Jun 2016 15:01:22 +0400 Subject: MDEV-10010 - Recursive call to mysql_rwlock_rdlock for LOCK_system_variables_hash Avoid recursive LOCK_system_variables_hash acquisition in intern_sys_var_ptr() by pre-syncing dynamic session variables. --- sql/sql_plugin.cc | 125 ++++++++++++++++++++++++++++-------------------------- sql/sql_plugin.h | 1 + sql/sql_show.cc | 11 +++++ 3 files changed, 77 insertions(+), 60 deletions(-) diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 0158ab7d206..850d44f83e3 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -2955,6 +2955,70 @@ static st_bookmark *register_var(const char *plugin, const char *name, return result; } + +void sync_dynamic_session_variables(THD* thd, bool global_lock) +{ + uint idx; + + thd->variables.dynamic_variables_ptr= (char*) + my_realloc(thd->variables.dynamic_variables_ptr, + global_variables_dynamic_size, + MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR)); + + if (global_lock) + mysql_mutex_lock(&LOCK_global_system_variables); + + mysql_mutex_assert_owner(&LOCK_global_system_variables); + + memcpy(thd->variables.dynamic_variables_ptr + + thd->variables.dynamic_variables_size, + global_system_variables.dynamic_variables_ptr + + thd->variables.dynamic_variables_size, + global_system_variables.dynamic_variables_size - + thd->variables.dynamic_variables_size); + + /* + now we need to iterate through any newly copied 'defaults' + and if it is a string type with MEMALLOC flag, we need to strdup + */ + for (idx= 0; idx < bookmark_hash.records; idx++) + { + sys_var_pluginvar *pi; + sys_var *var; + st_bookmark *v= (st_bookmark*) my_hash_element(&bookmark_hash,idx); + + if (v->version <= thd->variables.dynamic_variables_version) + continue; /* already in thd->variables */ + + if (!(var= intern_find_sys_var(v->key + 1, v->name_len)) || + !(pi= var->cast_pluginvar()) || + v->key[0] != plugin_var_bookmark_key(pi->plugin_var->flags)) + continue; + + /* Here we do anything special that may be required of the data types */ + + if ((pi->plugin_var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR && + pi->plugin_var->flags & PLUGIN_VAR_MEMALLOC) + { + int offset= ((thdvar_str_t *)(pi->plugin_var))->offset; + char **pp= (char**) (thd->variables.dynamic_variables_ptr + offset); + if (*pp) + *pp= my_strdup(*pp, MYF(MY_WME|MY_FAE)); + } + } + + if (global_lock) + mysql_mutex_unlock(&LOCK_global_system_variables); + + thd->variables.dynamic_variables_version= + global_system_variables.dynamic_variables_version; + thd->variables.dynamic_variables_head= + global_system_variables.dynamic_variables_head; + thd->variables.dynamic_variables_size= + global_system_variables.dynamic_variables_size; +} + + /* returns a pointer to the memory which holds the thd-local variable or a pointer to the global variable if thd==null. @@ -2976,67 +3040,8 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock) if (!thd->variables.dynamic_variables_ptr || (uint)offset > thd->variables.dynamic_variables_head) { - uint idx; - mysql_rwlock_rdlock(&LOCK_system_variables_hash); - - thd->variables.dynamic_variables_ptr= (char*) - my_realloc(thd->variables.dynamic_variables_ptr, - global_variables_dynamic_size, - MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR)); - - if (global_lock) - mysql_mutex_lock(&LOCK_global_system_variables); - - mysql_mutex_assert_owner(&LOCK_global_system_variables); - - memcpy(thd->variables.dynamic_variables_ptr + - thd->variables.dynamic_variables_size, - global_system_variables.dynamic_variables_ptr + - thd->variables.dynamic_variables_size, - global_system_variables.dynamic_variables_size - - thd->variables.dynamic_variables_size); - - /* - now we need to iterate through any newly copied 'defaults' - and if it is a string type with MEMALLOC flag, we need to strdup - */ - for (idx= 0; idx < bookmark_hash.records; idx++) - { - sys_var_pluginvar *pi; - sys_var *var; - st_bookmark *v= (st_bookmark*) my_hash_element(&bookmark_hash,idx); - - if (v->version <= thd->variables.dynamic_variables_version) - continue; /* already in thd->variables */ - - if (!(var= intern_find_sys_var(v->key + 1, v->name_len)) || - !(pi= var->cast_pluginvar()) || - v->key[0] != plugin_var_bookmark_key(pi->plugin_var->flags)) - continue; - - /* Here we do anything special that may be required of the data types */ - - if ((pi->plugin_var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR && - pi->plugin_var->flags & PLUGIN_VAR_MEMALLOC) - { - int offset= ((thdvar_str_t *)(pi->plugin_var))->offset; - char **pp= (char**) (thd->variables.dynamic_variables_ptr + offset); - if (*pp) - *pp= my_strdup(*pp, MYF(MY_WME|MY_FAE)); - } - } - - if (global_lock) - mysql_mutex_unlock(&LOCK_global_system_variables); - - thd->variables.dynamic_variables_version= - global_system_variables.dynamic_variables_version; - thd->variables.dynamic_variables_head= - global_system_variables.dynamic_variables_head; - thd->variables.dynamic_variables_size= - global_system_variables.dynamic_variables_size; - + sync_dynamic_session_variables(thd, global_lock); mysql_rwlock_unlock(&LOCK_system_variables_hash); } DBUG_RETURN((uchar*)thd->variables.dynamic_variables_ptr + offset); diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h index efa48b22ce8..9483fc8d9b3 100644 --- a/sql/sql_plugin.h +++ b/sql/sql_plugin.h @@ -190,4 +190,5 @@ extern bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func, extern bool plugin_dl_foreach(THD *thd, const LEX_STRING *dl, plugin_foreach_func *func, void *arg); +extern void sync_dynamic_session_variables(THD* thd, bool global_lock); #endif diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 8ec44bad966..ae3874506dd 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -7209,6 +7209,17 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond) COND *partial_cond= make_cond_for_info_schema(thd, cond, tables); mysql_rwlock_rdlock(&LOCK_system_variables_hash); + + /* + Avoid recursive LOCK_system_variables_hash acquisition in + intern_sys_var_ptr() by pre-syncing dynamic session variables. + */ + if (scope == OPT_SESSION && + (!thd->variables.dynamic_variables_ptr || + global_system_variables.dynamic_variables_head > + thd->variables.dynamic_variables_head)) + sync_dynamic_session_variables(thd, true); + res= show_status_array(thd, wild, enumerate_sys_vars(thd, sorted_vars, scope), scope, NULL, "", tables->table, upper_case_names, partial_cond); -- cgit v1.2.1 From ae511cbe387fdc16bf48b4fccc889d6d5d256d76 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Tue, 28 Jun 2016 14:53:17 +0400 Subject: MDEV-9363 - Mroonga tests with datetime field fail on Solaris in buildbot On Solaris mktime() adds one extra day to tm_mday field and returns appropriate value for dates 1600-01-01 and earlier. That is 1600-01-01 becomes 1600-01-02. Solaris mktime manual excerpts: ... The tm_year member must be for year 1901 or later. Calendar times before 20:45:52 UTC, December 13, 1901 or after 03:14:07 UTC, January 19, 2038 cannot be represented. Port- able applications should not try to create dates before 00:00:00 UTC, January 1, 1970 or after 00:00:00 UTC, January 1, 2038. ... The mktime() function assumes Gregorian dates. Times before the adoption of the Gregorian calendar will not match his- torial records. ... According to manual Mroonga only supports dates and datetimes after 1900: https://mariadb.com/kb/en/mariadb/about-mroonga/ Technically these tests cover unsupported values and should fail on all platforms. Disable tests until the problem is fixed upstream. --- storage/mroonga/mysql-test/mroonga/include/mroonga/skip_solaris10.inc | 3 +++ .../mroonga/storage/t/column_datetime_64bit_before_unix_epoch.test | 1 + .../storage/t/index_multiple_column_unique_date_64bit_equal.test | 1 + .../storage/t/index_multiple_column_unique_date_order_64bit_asc.test | 1 + .../storage/t/index_multiple_column_unique_date_order_64bit_desc.test | 1 + .../storage/t/index_multiple_column_unique_datetime_index_read.test | 1 + .../storage/t/index_multiple_column_unique_datetime_order_asc.test | 1 + .../storage/t/index_multiple_column_unique_datetime_order_desc.test | 1 + 8 files changed, 10 insertions(+) create mode 100644 storage/mroonga/mysql-test/mroonga/include/mroonga/skip_solaris10.inc diff --git a/storage/mroonga/mysql-test/mroonga/include/mroonga/skip_solaris10.inc b/storage/mroonga/mysql-test/mroonga/include/mroonga/skip_solaris10.inc new file mode 100644 index 00000000000..7cee5c38c53 --- /dev/null +++ b/storage/mroonga/mysql-test/mroonga/include/mroonga/skip_solaris10.inc @@ -0,0 +1,3 @@ +if (`SELECT @@version_compile_os='solaris10'`) { + skip This test is not for Solaris 10; +} diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/column_datetime_64bit_before_unix_epoch.test b/storage/mroonga/mysql-test/mroonga/storage/t/column_datetime_64bit_before_unix_epoch.test index 246500b4498..433e239c301 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/t/column_datetime_64bit_before_unix_epoch.test +++ b/storage/mroonga/mysql-test/mroonga/storage/t/column_datetime_64bit_before_unix_epoch.test @@ -17,6 +17,7 @@ --source ../../include/mroonga/skip_freebsd.inc --source ../../include/mroonga/skip_osx.inc +--source ../../include/mroonga/skip_solaris10.inc --source ../../include/mroonga/have_64bit.inc --source ../../include/mroonga/have_mroonga.inc diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_64bit_equal.test b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_64bit_equal.test index e84147d6041..010611ee3ec 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_64bit_equal.test +++ b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_64bit_equal.test @@ -18,6 +18,7 @@ --source ../../include/mroonga/skip_freebsd.inc --source ../../include/mroonga/skip_osx.inc +--source ../../include/mroonga/skip_solaris10.inc --source ../../include/mroonga/have_64bit.inc --source ../../include/mroonga/have_mroonga.inc diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_asc.test b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_asc.test index 7a0ec41f07a..ef7066164fd 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_asc.test +++ b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_asc.test @@ -18,6 +18,7 @@ --source ../../include/mroonga/skip_freebsd.inc --source ../../include/mroonga/skip_osx.inc +--source ../../include/mroonga/skip_solaris10.inc --source ../../include/mroonga/have_64bit.inc --source ../../include/mroonga/have_mroonga.inc diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_desc.test b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_desc.test index db434bca061..d34d29a224b 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_desc.test +++ b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_date_order_64bit_desc.test @@ -18,6 +18,7 @@ --source ../../include/mroonga/skip_freebsd.inc --source ../../include/mroonga/skip_osx.inc +--source ../../include/mroonga/skip_solaris10.inc --source ../../include/mroonga/have_64bit.inc --source ../../include/mroonga/have_mroonga.inc diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_index_read.test b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_index_read.test index 2b7ea97133e..66e0a4d6a9f 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_index_read.test +++ b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_index_read.test @@ -19,6 +19,7 @@ --source ../../include/mroonga/have_64bit.inc --source ../../include/mroonga/skip_freebsd.inc --source ../../include/mroonga/skip_osx.inc +--source ../../include/mroonga/skip_solaris10.inc --source ../../include/mroonga/have_mroonga.inc --disable_warnings diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_asc.test b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_asc.test index c2ebff516d0..c28fcd86a2a 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_asc.test +++ b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_asc.test @@ -18,6 +18,7 @@ --source ../../include/mroonga/skip_freebsd.inc --source ../../include/mroonga/skip_osx.inc +--source ../../include/mroonga/skip_solaris10.inc --source ../../include/mroonga/have_64bit.inc --source ../../include/mroonga/have_mroonga.inc diff --git a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_desc.test b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_desc.test index 6a60b00087e..b5b3545cc9d 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_desc.test +++ b/storage/mroonga/mysql-test/mroonga/storage/t/index_multiple_column_unique_datetime_order_desc.test @@ -18,6 +18,7 @@ --source ../../include/mroonga/skip_freebsd.inc --source ../../include/mroonga/skip_osx.inc +--source ../../include/mroonga/skip_solaris10.inc --source ../../include/mroonga/have_64bit.inc --source ../../include/mroonga/have_mroonga.inc -- cgit v1.2.1 From f280a87c6609384fc2b962b27c1f9d3ac4a39150 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Mon, 11 Jul 2016 17:03:03 +0000 Subject: MDEV-10318 : Fix crash in embedded, in case prepared statement has parameter placeholders, but does not bind parameters --- libmysqld/lib_sql.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index a313c624078..63424af27f6 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -333,6 +333,12 @@ static int emb_stmt_execute(MYSQL_STMT *stmt) THD *thd; my_bool res; + if (stmt->param_count && !stmt->bind_param_done) + { + set_stmt_error(stmt, CR_PARAMS_NOT_BOUND, unknown_sqlstate, NULL); + DBUG_RETURN(1); + } + int4store(header, stmt->stmt_id); header[4]= (uchar) stmt->flags; thd= (THD*)stmt->mysql->thd; -- cgit v1.2.1 From 97ded96a33abb98190537e10e94c7dadf5bd0a5f Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Mon, 11 Jul 2016 17:03:03 +0000 Subject: MDEV-10318 : Fix crash in embedded, in case prepared statement has parameter placeholders, but does not bind parameters --- libmysqld/lib_sql.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 3d6ca5a3810..623569c18de 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -341,6 +341,12 @@ static int emb_stmt_execute(MYSQL_STMT *stmt) THD *thd; my_bool res; + if (stmt->param_count && !stmt->bind_param_done) + { + set_stmt_error(stmt, CR_PARAMS_NOT_BOUND, unknown_sqlstate, NULL); + DBUG_RETURN(1); + } + int4store(header, stmt->stmt_id); header[4]= (uchar) stmt->flags; thd= (THD*)stmt->mysql->thd; -- cgit v1.2.1 From 7d4a7d8c5861e6587176052ea71c30ab12a49084 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Mon, 30 May 2016 22:33:34 +0300 Subject: [MDEV-9127] Crash reporter often fails to show the query that crashed Addreses are not necessarily between heap_start && heap_end. Malloc calls using mmap can place pointers outside these bounds. In this case, we'll warn the user that the query pointer is potentially invalid. However, we'll attempt to print the data anyway after we're done printing everything else. --- include/my_stacktrace.h | 2 +- mysys/stacktrace.c | 30 ++++++++++++++++++++++++++---- sql/signal_handler.cc | 23 ++++++++++++++++++++++- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/include/my_stacktrace.h b/include/my_stacktrace.h index fb2525e3a12..fad6e532de9 100644 --- a/include/my_stacktrace.h +++ b/include/my_stacktrace.h @@ -45,7 +45,7 @@ C_MODE_START #if defined(HAVE_STACKTRACE) || defined(HAVE_BACKTRACE) void my_init_stacktrace(); void my_print_stacktrace(uchar* stack_bottom, ulong thread_stack); -void my_safe_print_str(const char* val, int max_len); +int my_safe_print_str(const char* val, int max_len); void my_write_core(int sig); #if BACKTRACE_DEMANGLE char *my_demangle(const char *mangled_name, int *status); diff --git a/mysys/stacktrace.c b/mysys/stacktrace.c index 613911e4495..746b99d6112 100644 --- a/mysys/stacktrace.c +++ b/mysys/stacktrace.c @@ -129,13 +129,32 @@ static int safe_print_str(const char *addr, int max_len) #endif -void my_safe_print_str(const char* val, int max_len) +/* + Attempt to print a char * pointer as a string. + + SYNOPSIS + Prints either until the end of string ('\0'), or max_len characters have + been printed. + + RETURN VALUE + 0 Pointer was within the heap address space. + The string was printed fully, or until the end of the heap address space. + 1 Pointer is outside the heap address space. Printed as invalid. + + NOTE + On some systems, we can have valid pointers outside the heap address space. + This is through the use of mmap inside malloc calls. When this function + returns 1, it does not mean 100% that the pointer is corrupted. +*/ + +int my_safe_print_str(const char* val, int max_len) { char *heap_end; #ifdef __linux__ + // Try and make use of /proc filesystem to safely print memory contents. if (!safe_print_str(val, max_len)) - return; + return 0; #endif heap_end= (char*) sbrk(0); @@ -143,12 +162,14 @@ void my_safe_print_str(const char* val, int max_len) if (!PTR_SANE(val)) { my_safe_printf_stderr("%s", "is an invalid pointer"); - return; + return 1; } for (; max_len && PTR_SANE(val) && *val; --max_len) my_write_stderr((val++), 1); my_safe_printf_stderr("%s", "\n"); + + return 0; } #if defined(HAVE_PRINTSTACK) @@ -728,7 +749,7 @@ void my_write_core(int unused) } -void my_safe_print_str(const char *val, int len) +int my_safe_print_str(const char *val, int len) { __try { @@ -738,6 +759,7 @@ void my_safe_print_str(const char *val, int len) { my_safe_printf_stderr("%s", "is an invalid string pointer"); } + return 0; } #endif /*__WIN__*/ diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index bb1e6321042..fd6f62fa100 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -65,6 +65,12 @@ extern "C" sig_handler handle_fatal_signal(int sig) #ifdef HAVE_STACKTRACE THD *thd; #endif + /* + This flag remembers if the query pointer was found invalid. + We will try and print the query at the end of the signal handler, in case + we're wrong. + */ + bool print_invalid_query_pointer= false; if (segfaulted) { @@ -190,7 +196,12 @@ extern "C" sig_handler handle_fatal_signal(int sig) "Some pointers may be invalid and cause the dump to abort.\n"); my_safe_printf_stderr("Query (%p): ", thd->query()); - my_safe_print_str(thd->query(), MY_MIN(65536U, thd->query_length())); + if (my_safe_print_str(thd->query(), MY_MIN(65536U, thd->query_length()))) + { + // Query was found invalid. We will try to print it at the end. + print_invalid_query_pointer= true; + } + my_safe_printf_stderr("\nConnection ID (thread ID): %lu\n", (ulong) thd->thread_id); my_safe_printf_stderr("Status: %s\n\n", kreason); @@ -254,6 +265,16 @@ extern "C" sig_handler handle_fatal_signal(int sig) "\"mlockall\" bugs.\n"); } + if (print_invalid_query_pointer) + { + my_safe_printf_stderr( + "\nWe think the query pointer is invalid, but we will try " + "to print it anyway. \n" + "Query: "); + my_write_stderr(thd->query(), MY_MIN(65536U, thd->query_length())); + my_safe_printf_stderr("\n\n"); + } + #ifdef HAVE_WRITE_CORE if (test_flags & TEST_CORE_ON_SIGNAL) { -- cgit v1.2.1 From 4e19aa386493fcf0613049b47cbb9b151e2d3e8d Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 12 Jul 2016 12:13:31 +0200 Subject: MDEV-10318 unset params in --ps --embedded add a test case --- mysql-test/r/ps_1general.result | 2 ++ mysql-test/t/ps_1general.test | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result index 0a3d16cf48e..cc31944fd1f 100644 --- a/mysql-test/r/ps_1general.result +++ b/mysql-test/r/ps_1general.result @@ -788,3 +788,5 @@ execute stmt1; 1 drop prepare stmt1; drop table t1; +select ?+1; +Got one of the listed errors diff --git a/mysql-test/t/ps_1general.test b/mysql-test/t/ps_1general.test index 812b1b5ff94..7b7b87ef851 100644 --- a/mysql-test/t/ps_1general.test +++ b/mysql-test/t/ps_1general.test @@ -936,3 +936,10 @@ drop table t1; # Matthias # End of 4.1 tests + +# +# MDEV-10318 unset params in --ps --embedded +# +--error ER_PARSE_ERROR,2031 +select ?+1; + -- cgit v1.2.1 From 31e763ddc545bda747ede3ee218f67d3a6749cfe Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Mon, 11 Jul 2016 21:29:18 +0200 Subject: MDEV-10211 : fix ssl test not to use specific value of ssl_cipher, as it can change between different openssl/yassl version --- mysql-test/r/openssl_1.result | 10 +++++----- mysql-test/r/ssl.result | 11 +++++++---- mysql-test/r/ssl_ca.result | 16 ++++++++-------- mysql-test/r/ssl_compress.result | 12 ++++++------ mysql-test/r/ssl_timeout.result | 6 +++--- mysql-test/t/openssl_1.test | 6 ++---- mysql-test/t/ssl.test | 5 ++--- mysql-test/t/ssl_ca.test | 14 +++++--------- mysql-test/t/ssl_compress.test | 6 ++---- mysql-test/t/ssl_timeout.test | 3 +-- 10 files changed, 41 insertions(+), 48 deletions(-) diff --git a/mysql-test/r/openssl_1.result b/mysql-test/r/openssl_1.result index dd78b1967c4..1d264bd57b1 100644 --- a/mysql-test/r/openssl_1.result +++ b/mysql-test/r/openssl_1.result @@ -56,8 +56,8 @@ mysqltest: Could not open connection 'default': 2026 SSL connection error: Unabl SHOW STATUS LIKE 'Ssl_cipher'; Variable_name Value Ssl_cipher DHE-RSA-AES256-SHA -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +have_ssl +1 End of 5.0 tests DROP TABLE IF EXISTS thread_status; DROP EVENT IF EXISTS event_status; @@ -202,9 +202,9 @@ result is still running; no cipher request crashed the server GRANT SELECT ON test.* TO bug42158@localhost REQUIRE X509; FLUSH PRIVILEGES; -SHOW STATUS LIKE 'Ssl_cipher'; -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; +have_ssl +1 DROP USER bug42158@localhost; End of 5.1 tests /*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/; diff --git a/mysql-test/r/ssl.result b/mysql-test/r/ssl.result index 57427a228eb..2b4bb419643 100644 --- a/mysql-test/r/ssl.result +++ b/mysql-test/r/ssl.result @@ -1,6 +1,9 @@ SHOW STATUS LIKE 'Ssl_cipher'; Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +Ssl_cipher AES128-GCM-SHA256 +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; +have_ssl +1 SHOW STATUS LIKE 'Ssl_server_not_before'; Variable_name Value Ssl_server_not_before Apr 25 14:55:05 2015 GMT @@ -2163,9 +2166,9 @@ Privat (Private Nutzung) Mobilfunk Warnings: Warning 1052 Column 'kundentyp' in group statement is ambiguous drop table t1; -SHOW STATUS LIKE 'Ssl_cipher'; -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; +have_ssl +1 select aes_decrypt('MySQL','adf'); aes_decrypt('MySQL','adf') NULL diff --git a/mysql-test/r/ssl_ca.result b/mysql-test/r/ssl_ca.result index ffc5671f85f..83a98902581 100644 --- a/mysql-test/r/ssl_ca.result +++ b/mysql-test/r/ssl_ca.result @@ -4,21 +4,21 @@ # try to connect with wrong '--ssl-ca' path : should fail ERROR 2026 (HY000): SSL connection error: SSL_CTX_set_default_verify_paths failed # try to connect with correct '--ssl-ca' path : should connect -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +have_ssl +1 # # Bug#21920678: SSL-CA DOES NOT ACCEPT ~USER TILDE HOME DIRECTORY # PATH SUBSTITUTION # # try to connect with '--ssl-ca' option using tilde home directoy # path substitution : should connect -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +have_ssl +1 # try to connect with '--ssl-key' option using tilde home directoy # path substitution : should connect -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +have_ssl +1 # try to connect with '--ssl-cert' option using tilde home directoy # path substitution : should connect -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +have_ssl +1 diff --git a/mysql-test/r/ssl_compress.result b/mysql-test/r/ssl_compress.result index 31f484ab58c..d6a65c2b06d 100644 --- a/mysql-test/r/ssl_compress.result +++ b/mysql-test/r/ssl_compress.result @@ -1,6 +1,6 @@ -SHOW STATUS LIKE 'Ssl_cipher'; -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; +have_ssl +1 SHOW STATUS LIKE 'Compression'; Variable_name Value Compression ON @@ -2160,9 +2160,9 @@ Privat (Private Nutzung) Mobilfunk Warnings: Warning 1052 Column 'kundentyp' in group statement is ambiguous drop table t1; -SHOW STATUS LIKE 'Ssl_cipher'; -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; +have_ssl +1 SHOW STATUS LIKE 'Compression'; Variable_name Value Compression ON diff --git a/mysql-test/r/ssl_timeout.result b/mysql-test/r/ssl_timeout.result index 356e931ba4d..d9ef0f7a16f 100644 --- a/mysql-test/r/ssl_timeout.result +++ b/mysql-test/r/ssl_timeout.result @@ -1,7 +1,7 @@ # connect with read timeout so SLEEP() should timeout # Check ssl turned on -SHOW STATUS LIKE 'Ssl_cipher'; -Variable_name Value -Ssl_cipher DHE-RSA-AES256-SHA +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; +have_ssl +1 SELECT SLEEP(600); ERROR HY000: Lost connection to MySQL server during query diff --git a/mysql-test/t/openssl_1.test b/mysql-test/t/openssl_1.test index 91a8cc57b1b..7063cffbb36 100644 --- a/mysql-test/t/openssl_1.test +++ b/mysql-test/t/openssl_1.test @@ -132,8 +132,7 @@ drop table t1; # verification of servers certificate by setting both ca certificate # and ca path to NULL # ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA DHE-RSA-CHACHA20-POLY1305 DHE-RSA-AES256-SHA ---exec $MYSQL --ssl --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem -e "SHOW STATUS LIKE 'ssl_Cipher'" 2>&1 +--exec $MYSQL --ssl --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem -e "SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'" 2>&1 --echo End of 5.0 tests # @@ -257,8 +256,7 @@ select 'is still running; no cipher request crashed the server' as result from d GRANT SELECT ON test.* TO bug42158@localhost REQUIRE X509; FLUSH PRIVILEGES; connect(con1,localhost,bug42158,,,,,SSL); ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA DHE-RSA-CHACHA20-POLY1305 DHE-RSA-AES256-SHA -SHOW STATUS LIKE 'Ssl_cipher'; +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; disconnect con1; connection default; DROP USER bug42158@localhost; diff --git a/mysql-test/t/ssl.test b/mysql-test/t/ssl.test index 21733f7e594..21145bf08c3 100644 --- a/mysql-test/t/ssl.test +++ b/mysql-test/t/ssl.test @@ -11,8 +11,8 @@ connect (ssl_con,localhost,root,,,,,SSL); # Check ssl turned on ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA DHE-RSA-CHACHA20-POLY1305 DHE-RSA-AES256-SHA SHOW STATUS LIKE 'Ssl_cipher'; +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; # Check ssl expiration SHOW STATUS LIKE 'Ssl_server_not_before'; @@ -22,8 +22,7 @@ SHOW STATUS LIKE 'Ssl_server_not_after'; -- source include/common-tests.inc # Check ssl turned on ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA DHE-RSA-CHACHA20-POLY1305 DHE-RSA-AES256-SHA -SHOW STATUS LIKE 'Ssl_cipher'; +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; # # MDEV-7697 Client reports ERROR 2006 (MySQL server has gone away) or ERROR 2013 (Lost connection to MySQL server during query) while executing AES* functions under SSL diff --git a/mysql-test/t/ssl_ca.test b/mysql-test/t/ssl_ca.test index 8e81f44e61c..8d830a75879 100644 --- a/mysql-test/t/ssl_ca.test +++ b/mysql-test/t/ssl_ca.test @@ -7,11 +7,10 @@ --echo # try to connect with wrong '--ssl-ca' path : should fail --error 1 ---exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/wrong-cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SHOW STATUS LIKE 'Ssl_cipher'" 2>&1 +--exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/wrong-cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'" 2>&1 --echo # try to connect with correct '--ssl-ca' path : should connect ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA ---exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SHOW STATUS LIKE 'Ssl_cipher'" +--exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'" --echo # --echo # Bug#21920678: SSL-CA DOES NOT ACCEPT ~USER TILDE HOME DIRECTORY @@ -22,15 +21,12 @@ --echo # try to connect with '--ssl-ca' option using tilde home directoy --echo # path substitution : should connect ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA ---exec $MYSQL --ssl-ca$mysql_test_dir_path/std_data/cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SHOW STATUS LIKE 'Ssl_cipher'" +--exec $MYSQL --ssl-ca$mysql_test_dir_path/std_data/cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'" --echo # try to connect with '--ssl-key' option using tilde home directoy --echo # path substitution : should connect ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA ---exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem --ssl-key$mysql_test_dir_path/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SHOW STATUS LIKE 'Ssl_cipher'" +--exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem --ssl-key$mysql_test_dir_path/std_data/client-key.pem --ssl-cert=$MYSQL_TEST_DIR/std_data/client-cert.pem test -e "SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'" --echo # try to connect with '--ssl-cert' option using tilde home directoy --echo # path substitution : should connect ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA ---exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert$mysql_test_dir_path/std_data/client-cert.pem test -e "SHOW STATUS LIKE 'Ssl_cipher'" +--exec $MYSQL --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem --ssl-key=$MYSQL_TEST_DIR/std_data/client-key.pem --ssl-cert$mysql_test_dir_path/std_data/client-cert.pem test -e "SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'" diff --git a/mysql-test/t/ssl_compress.test b/mysql-test/t/ssl_compress.test index 28f3453c23e..588d4555db8 100644 --- a/mysql-test/t/ssl_compress.test +++ b/mysql-test/t/ssl_compress.test @@ -11,8 +11,7 @@ connect (ssl_compress_con,localhost,root,,,,,SSL COMPRESS); # Check ssl turned on ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA DHE-RSA-CHACHA20-POLY1305 DHE-RSA-AES256-SHA -SHOW STATUS LIKE 'Ssl_cipher'; +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; # Check compression turned on SHOW STATUS LIKE 'Compression'; @@ -21,8 +20,7 @@ SHOW STATUS LIKE 'Compression'; -- source include/common-tests.inc # Check ssl turned on ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA DHE-RSA-CHACHA20-POLY1305 DHE-RSA-AES256-SHA -SHOW STATUS LIKE 'Ssl_cipher'; +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; # Check compression turned on SHOW STATUS LIKE 'Compression'; diff --git a/mysql-test/t/ssl_timeout.test b/mysql-test/t/ssl_timeout.test index 806b928aca0..430fe7130de 100644 --- a/mysql-test/t/ssl_timeout.test +++ b/mysql-test/t/ssl_timeout.test @@ -7,8 +7,7 @@ connect (ssl_con,localhost,root,,,,,SSL read_timeout=5); --echo # Check ssl turned on ---replace_result DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES256-SHA DHE-RSA-CHACHA20-POLY1305 DHE-RSA-AES256-SHA -SHOW STATUS LIKE 'Ssl_cipher'; +SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; # --error CR_SERVER_LOST --error 2013 -- cgit v1.2.1 From 3e8ae6ef4f08937332e6ad317f37a9c0c9a717d3 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 12 Jul 2016 12:36:11 +0200 Subject: MDEV-10211 postfix - in ssl.test, remove remaining SHOW STATUS LIKE 'Ssl_cipher' --- mysql-test/r/ssl.result | 3 --- mysql-test/t/ssl.test | 1 - 2 files changed, 4 deletions(-) diff --git a/mysql-test/r/ssl.result b/mysql-test/r/ssl.result index 2b4bb419643..4bae442459a 100644 --- a/mysql-test/r/ssl.result +++ b/mysql-test/r/ssl.result @@ -1,6 +1,3 @@ -SHOW STATUS LIKE 'Ssl_cipher'; -Variable_name Value -Ssl_cipher AES128-GCM-SHA256 SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; have_ssl 1 diff --git a/mysql-test/t/ssl.test b/mysql-test/t/ssl.test index 21145bf08c3..67db668a1c2 100644 --- a/mysql-test/t/ssl.test +++ b/mysql-test/t/ssl.test @@ -11,7 +11,6 @@ connect (ssl_con,localhost,root,,,,,SSL); # Check ssl turned on -SHOW STATUS LIKE 'Ssl_cipher'; SELECT (VARIABLE_VALUE <> '') AS have_ssl FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_cipher'; # Check ssl expiration -- cgit v1.2.1 From 53e7fcca4191c47ca48c378163bcb93ef6e632a5 Mon Sep 17 00:00:00 2001 From: Craig Andrews Date: Tue, 28 Jun 2016 11:23:12 -0400 Subject: MDEV-10298: Systemd hardening Add ProtectSystem=full, NoNewPrivileges=true, PrivateDevices=true, and ProtectHome=true to the systemd units. --- support-files/mariadb.service.in | 10 ++++++++++ support-files/mariadb@.service.in | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in index 55202c696fb..879c4d90a6c 100644 --- a/support-files/mariadb.service.in +++ b/support-files/mariadb.service.in @@ -45,6 +45,16 @@ Group=mysql # To allow memlock to be used as non-root user if set in configuration CapabilityBoundingSet=CAP_IPC_LOCK +# Prevent writes to /usr, /boot, and /etc +ProtectSystem=full + +NoNewPrivileges=true + +PrivateDevices=true + +# Prevent accessing /home, /root and /run/user +ProtectHome=true + # Execute pre and post scripts as root, otherwise it does it as User= PermissionsStartOnly=true diff --git a/support-files/mariadb@.service.in b/support-files/mariadb@.service.in index 18adf0e0eac..b7ac3b808bf 100644 --- a/support-files/mariadb@.service.in +++ b/support-files/mariadb@.service.in @@ -52,6 +52,16 @@ Group=mysql # To allow memlock to be used as non-root user if set in configuration CapabilityBoundingSet=CAP_IPC_LOCK +# Prevent writes to /usr, /boot, and /etc +ProtectSystem=full + +NoNewPrivileges=true + +PrivateDevices=true + +# Prevent accessing /home, /root and /run/user +ProtectHome=true + # Execute pre and post scripts as root, otherwise it does it as User= PermissionsStartOnly=true -- cgit v1.2.1 From 0bb5d955423c2a7b29eab02e7bf6194ae003ae75 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 11 Jul 2016 22:01:24 +0300 Subject: MDEV-10325: Queries examines all rows of a tables when it should not The problem was introduced by 1859caf60b725f81f2ac6091eb44cb848a4a439a: MDEV-10175: range optimizer calls records_in_range() for full extended keys Make the range optimizer not call records_in_range() when it would not give any benefit. that patch used an incorrect way to check for full extended key. Now fixing the check. --- mysql-test/r/innodb_ext_key.result | 24 ++++++++++++++++++++++++ mysql-test/t/innodb_ext_key.test | 28 ++++++++++++++++++++++++++++ sql/opt_range_mrr.cc | 6 +++--- 3 files changed, 55 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/innodb_ext_key.result b/mysql-test/r/innodb_ext_key.result index cae402a9f12..bd4bdb28fff 100644 --- a/mysql-test/r/innodb_ext_key.result +++ b/mysql-test/r/innodb_ext_key.result @@ -1040,5 +1040,29 @@ a 1 drop table t1, t2; set optimizer_switch=@save_optimizer_switch; +# +# MDEV-10325: Queries examines all rows of a tables when it should not +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +pk int not null, +col1 varchar(32), +filler varchar(100), +key idx1(col1(10)), +primary key (pk) +)engine=innodb; +insert into t1 +select +A.a + 10*B.a + 100*C.a, +concat('1234567890-', 1000+ A.a + 10*B.a + 100*C.a), +repeat('filler-data-', 4) +from +t0 A, t0 B, t0 C; +# The following must use type=ALL (and NOT type=ref, rows=1) +explain select * from t1 where col1='1234567890-a'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL idx1 NULL NULL NULL # Using where +drop table t0,t1; set optimizer_switch=@save_ext_key_optimizer_switch; SET SESSION STORAGE_ENGINE=DEFAULT; diff --git a/mysql-test/t/innodb_ext_key.test b/mysql-test/t/innodb_ext_key.test index 9f3a89ff948..ec774b638e8 100644 --- a/mysql-test/t/innodb_ext_key.test +++ b/mysql-test/t/innodb_ext_key.test @@ -693,5 +693,33 @@ drop table t1, t2; set optimizer_switch=@save_optimizer_switch; +--echo # +--echo # MDEV-10325: Queries examines all rows of a tables when it should not +--echo # +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + pk int not null, + col1 varchar(32), + filler varchar(100), + key idx1(col1(10)), + primary key (pk) +)engine=innodb; + +insert into t1 +select + A.a + 10*B.a + 100*C.a, + concat('1234567890-', 1000+ A.a + 10*B.a + 100*C.a), + repeat('filler-data-', 4) +from + t0 A, t0 B, t0 C; + +--echo # The following must use type=ALL (and NOT type=ref, rows=1) +--replace_column 9 # +explain select * from t1 where col1='1234567890-a'; + +drop table t0,t1; + set optimizer_switch=@save_ext_key_optimizer_switch; SET SESSION STORAGE_ENGINE=DEFAULT; diff --git a/sql/opt_range_mrr.cc b/sql/opt_range_mrr.cc index 729c491a6f1..fbccb7c4e1d 100644 --- a/sql/opt_range_mrr.cc +++ b/sql/opt_range_mrr.cc @@ -278,14 +278,14 @@ walk_up_n_right: (1) - range analysis is used for estimating condition selectivity (2) - This is a unique key, and we have conditions for all its user-defined key parts. - (3) - The table uses extended keys, and we have conditions for - all key parts. + (3) - The table uses extended keys, this key covers all components, + and we have conditions for all key parts. */ if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag && (!key_info || // (1) ((uint)key_tree->part+1 == key_info->user_defined_key_parts && // (2) key_info->flags & HA_NOSAME) || // (2) - (seq->param->table->s->use_ext_keys && // (3) + ((key_info->flags & HA_EXT_NOSAME) && // (3) (uint)key_tree->part+1 == key_info->ext_key_parts) // (3) ) && range->start_key.length == range->end_key.length && -- cgit v1.2.1 From 8a8ba1949bf4bdc1dd6504d88d20cfa3ef2c0794 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 11 Jul 2016 22:22:32 +0300 Subject: MDEV-10360: Extended keys: index properties depend on index order TABLE_SHARE::init_from_binary_frm_image has a rule: if an index has a partially-covered column (like in "KEY(col(N))" ), then dont provide "Extended Keys" feature for this index. The problem was that due to coding error Extended Keys feature was disabled for *ALL* subsequent indexes. Fixed the error. --- mysql-test/r/innodb_ext_key.result | 45 ++++++++++++++++++++++++++++++++ mysql-test/t/innodb_ext_key.test | 53 ++++++++++++++++++++++++++++++++++++++ sql/table.cc | 8 +++--- 3 files changed, 103 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/innodb_ext_key.result b/mysql-test/r/innodb_ext_key.result index bd4bdb28fff..c0b91c88d8a 100644 --- a/mysql-test/r/innodb_ext_key.result +++ b/mysql-test/r/innodb_ext_key.result @@ -1064,5 +1064,50 @@ explain select * from t1 where col1='1234567890-a'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL idx1 NULL NULL NULL # Using where drop table t0,t1; +# +# MDEV-10360: Extended keys: index properties depend on index order +# +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 ( +index_id bigint(20) unsigned NOT NULL, +index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL , +index_object_id int(10) unsigned NOT NULL DEFAULT '0' , +index_date_updated int(10) unsigned DEFAULT NULL , +PRIMARY KEY (index_id), +KEY object (index_class(181),index_object_id), +KEY index_date_updated (index_date_updated) +) engine=innodb; +create table t2 ( +index_id bigint(20) unsigned NOT NULL, +index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL , +index_object_id int(10) unsigned NOT NULL DEFAULT '0' , +index_date_updated int(10) unsigned DEFAULT NULL , +PRIMARY KEY (index_id), +KEY index_date_updated (index_date_updated), +KEY object (index_class(181),index_object_id) +) engine=innodb; +insert into t1 select +@a:=A.a + 10*B.a + 100*C.a, +concat('val-', @a), +123456, +A.a + 10*B.a +from +t0 A, t0 B, t0 C; +insert into t2 select * from t1; +# This must have the same query plan as the query below it: +# type=range, key=index_date_updated, key_len=13 +explain +select * from t1 force index(index_date_updated) +where index_date_updated= 10 and index_id < 800; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range index_date_updated index_date_updated 13 NULL # Using index condition +# This used to work from the start: +explain +select * from t2 force index(index_date_updated) +where index_date_updated= 10 and index_id < 800; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 range index_date_updated index_date_updated 13 NULL # Using index condition +drop table t0,t1,t2; set optimizer_switch=@save_ext_key_optimizer_switch; SET SESSION STORAGE_ENGINE=DEFAULT; diff --git a/mysql-test/t/innodb_ext_key.test b/mysql-test/t/innodb_ext_key.test index ec774b638e8..ebea442d8ca 100644 --- a/mysql-test/t/innodb_ext_key.test +++ b/mysql-test/t/innodb_ext_key.test @@ -721,5 +721,58 @@ explain select * from t1 where col1='1234567890-a'; drop table t0,t1; +--echo # +--echo # MDEV-10360: Extended keys: index properties depend on index order +--echo # +create table t0 (a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table t1 ( + index_id bigint(20) unsigned NOT NULL, + index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL , + index_object_id int(10) unsigned NOT NULL DEFAULT '0' , + index_date_updated int(10) unsigned DEFAULT NULL , + + PRIMARY KEY (index_id), + KEY object (index_class(181),index_object_id), + KEY index_date_updated (index_date_updated) +) engine=innodb; + +create table t2 ( + index_id bigint(20) unsigned NOT NULL, + index_class varchar(265) COLLATE latin1_general_ci DEFAULT NULL , + index_object_id int(10) unsigned NOT NULL DEFAULT '0' , + index_date_updated int(10) unsigned DEFAULT NULL , + + PRIMARY KEY (index_id), + KEY index_date_updated (index_date_updated), + KEY object (index_class(181),index_object_id) +) engine=innodb; + +insert into t1 select + @a:=A.a + 10*B.a + 100*C.a, + concat('val-', @a), + 123456, + A.a + 10*B.a +from + t0 A, t0 B, t0 C; + +insert into t2 select * from t1; + +--echo # This must have the same query plan as the query below it: +--echo # type=range, key=index_date_updated, key_len=13 +--replace_column 9 # +explain +select * from t1 force index(index_date_updated) +where index_date_updated= 10 and index_id < 800; + +--echo # This used to work from the start: +--replace_column 9 # +explain +select * from t2 force index(index_date_updated) +where index_date_updated= 10 and index_id < 800; + +drop table t0,t1,t2; + set optimizer_switch=@save_ext_key_optimizer_switch; SET SESSION STORAGE_ENGINE=DEFAULT; diff --git a/sql/table.cc b/sql/table.cc index f7dddd92553..85ffd560992 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1804,6 +1804,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, { KEY_PART_INFO *new_key_part= (keyinfo-1)->key_part + (keyinfo-1)->ext_key_parts; + uint add_keyparts_for_this_key= add_first_key_parts; /* Do not extend the key that contains a component @@ -1815,19 +1816,20 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (share->field[fieldnr-1]->key_length() != keyinfo->key_part[i].length) { - add_first_key_parts= 0; + add_keyparts_for_this_key= 0; break; } } - if (add_first_key_parts < keyinfo->ext_key_parts-keyinfo->user_defined_key_parts) + if (add_keyparts_for_this_key < (keyinfo->ext_key_parts - + keyinfo->user_defined_key_parts)) { share->ext_key_parts-= keyinfo->ext_key_parts; key_part_map ext_key_part_map= keyinfo->ext_key_part_map; keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; keyinfo->ext_key_flags= keyinfo->flags; keyinfo->ext_key_part_map= 0; - for (i= 0; i < add_first_key_parts; i++) + for (i= 0; i < add_keyparts_for_this_key; i++) { if (ext_key_part_map & 1< Date: Thu, 23 Jun 2016 14:41:51 +0200 Subject: add a test case vcol.charsets a test case for a broken vcols behavior with different charsets. this is fixed in 10.2 --- mysql-test/suite/vcol/r/charsets.result | 31 +++++++++++++++++++++++++++++++ mysql-test/suite/vcol/t/charsets.test | 24 ++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 mysql-test/suite/vcol/r/charsets.result create mode 100644 mysql-test/suite/vcol/t/charsets.test diff --git a/mysql-test/suite/vcol/r/charsets.result b/mysql-test/suite/vcol/r/charsets.result new file mode 100644 index 00000000000..d06edc544d9 --- /dev/null +++ b/mysql-test/suite/vcol/r/charsets.result @@ -0,0 +1,31 @@ +set names utf8; +create table t1 ( +a int, +b varchar(100) as (if(a,collation('й'),hex('ю'))) +) character set koi8r; +insert t1 (a) values (0),(1); +select * from t1; +a b +0 D18E +1 utf8_general_ci +set names latin1; +select * from t1; +a b +0 D18E +1 utf8_general_ci +flush tables; +select * from t1; +a b +0 D18E +1 latin1_swedish_ci +set names koi8r; +select * from t1; +a b +0 D18E +1 latin1_swedish_ci +flush tables; +select * from t1; +a b +0 D18E +1 koi8r_general_ci +drop table t1; diff --git a/mysql-test/suite/vcol/t/charsets.test b/mysql-test/suite/vcol/t/charsets.test new file mode 100644 index 00000000000..32fb9a7741c --- /dev/null +++ b/mysql-test/suite/vcol/t/charsets.test @@ -0,0 +1,24 @@ +# +# This shows a bug in vcol charset handling. +# vcol definition is stored in the connection charset when a table was created +# vcol is parsed in the connection charset when a table was opened +# +# this cannot be fixed without changing frm format, so we only +# fix it in 10.2. +# +set names utf8; +create table t1 ( + a int, + b varchar(100) as (if(a,collation('й'),hex('ю'))) +) character set koi8r; +insert t1 (a) values (0),(1); +select * from t1; +set names latin1; +select * from t1; +flush tables; +select * from t1; +set names koi8r; +select * from t1; +flush tables; +select * from t1; +drop table t1; -- cgit v1.2.1 From 79fc519eed9c053e4d347e085a862b9f856e8c2f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 12 Jul 2016 22:20:20 +0200 Subject: json_udf slowdown don't call strlen() in the loop --- storage/connect/json.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 75bf277b25b..3558c5762bb 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -767,7 +767,7 @@ bool JOUTSTR::Escape(const char *s) { WriteChr('"'); - for (unsigned int i = 0; i < strlen(s); i++) + for (unsigned int i = 0; s[i]; i++) switch (s[i]) { case '"': case '\\': @@ -816,7 +816,7 @@ bool JOUTFILE::Escape(const char *s) // This is temporary fputc('"', Stream); - for (unsigned int i = 0; i < strlen(s); i++) + for (unsigned int i = 0; s[i]; i++) switch (s[i]) { case '"': fputs("\\\"", Stream); break; case '\\': fputs("\\\\", Stream); break; -- cgit v1.2.1 From 865ae5d38edafc58ec474485711846a7cc32414c Mon Sep 17 00:00:00 2001 From: Rik Prohaska Date: Fri, 1 Jul 2016 18:44:28 -0400 Subject: MDEV-10261 fix some tokudb partition test result files since the underlying tests have changed. --- .../r/part_supported_sql_func_tokudb.result | 1055 +++++-------- .../r/partition_alter1_1_2_tokudb.result | 296 ++-- .../r/partition_alter1_1_tokudb.result | 264 ++-- .../r/partition_alter1_2_tokudb.result | 456 +++--- .../r/partition_alter2_1_1_tokudb.result | 280 ++-- .../r/partition_alter2_1_2_tokudb.result | 280 ++-- .../r/partition_alter2_2_1_tokudb.result | 280 ++-- .../r/partition_alter2_2_2_tokudb.result | 280 ++-- .../tokudb_parts/r/partition_alter4_tokudb.result | 1608 ++++++++++---------- .../tokudb_parts/r/partition_basic_tokudb.result | 392 ++--- .../tokudb_parts/r/partition_debug_tokudb.result | 327 ++-- 11 files changed, 2682 insertions(+), 2836 deletions(-) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result index 82cbcbc311e..5b860845490 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result @@ -47,6 +47,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with abs(col1) ------------------------------------------------------------------------- +begin; insert into t1 values (5 ); insert into t1 values (13 ); insert into t2 values (5 ); @@ -55,6 +56,7 @@ insert into t2 values (17 ); insert into t3 values (5 ); insert into t3 values (13 ); insert into t3 values (17 ); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t6; @@ -217,12 +219,14 @@ colint col1 50 56 51 34 55 123 +begin; update t1 set col1=15 where col1=5 ; update t2 set col1=15 where col1=5 ; update t3 set col1=15 where col1=5 ; update t4 set col1=15 where col1=5 ; update t5 set col1=15 where col1=5 ; update t6 set col1=15 where col1=5 ; +commit; select * from t1 order by col1; col1 13 @@ -877,12 +881,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with abs(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1=13 ; delete from t2 where col1=13 ; delete from t3 where col1=13 ; delete from t4 where col1=13 ; delete from t5 where col1=13 ; delete from t6 where col1=13 ; +commit; select * from t1 order by col1; col1 15 @@ -986,12 +992,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t1 values (13 ); insert into t2 values (13 ); insert into t3 values (13 ); insert into t4 values (60,13 ); insert into t5 values (60,13 ); insert into t6 values (60,13 ); +commit; select * from t1 order by col1; col1 13 @@ -1274,12 +1282,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with abs(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1=13 ; delete from t22 where col1=13 ; delete from t33 where col1=13 ; delete from t44 where col1=13 ; delete from t55 where col1=13 ; delete from t66 where col1=13 ; +commit; select * from t11 order by col1; col1 15 @@ -1383,12 +1393,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t11 values (13 ); insert into t22 values (13 ); insert into t33 values (13 ); insert into t44 values (60,13 ); insert into t55 values (60,13 ); insert into t66 values (60,13 ); +commit; select * from t11 order by col1; col1 13 @@ -1732,6 +1744,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with mod(col1,10) ------------------------------------------------------------------------- +begin; insert into t1 values (5); insert into t1 values (19); insert into t2 values (5); @@ -1740,6 +1753,7 @@ insert into t2 values (17); insert into t3 values (5); insert into t3 values (19); insert into t3 values (17); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_int.inc' into table t6; @@ -1902,12 +1916,14 @@ colint col1 50 56 51 34 55 123 +begin; update t1 set col1=15 where col1=5; update t2 set col1=15 where col1=5; update t3 set col1=15 where col1=5; update t4 set col1=15 where col1=5; update t5 set col1=15 where col1=5; update t6 set col1=15 where col1=5; +commit; select * from t1 order by col1; col1 15 @@ -2562,12 +2578,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with mod(col1,10) ------------------------------------------------------------------------- +begin; delete from t1 where col1=19; delete from t2 where col1=19; delete from t3 where col1=19; delete from t4 where col1=19; delete from t5 where col1=19; delete from t6 where col1=19; +commit; select * from t1 order by col1; col1 15 @@ -2673,12 +2691,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t1 values (19); insert into t2 values (19); insert into t3 values (19); insert into t4 values (60,19); insert into t5 values (60,19); insert into t6 values (60,19); +commit; select * from t1 order by col1; col1 15 @@ -2970,12 +2990,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with mod(col1,10) ------------------------------------------------------------------------- +begin; delete from t11 where col1=19; delete from t22 where col1=19; delete from t33 where col1=19; delete from t44 where col1=19; delete from t55 where col1=19; delete from t66 where col1=19; +commit; select * from t11 order by col1; col1 15 @@ -3081,12 +3103,14 @@ colint col1 50 56 51 34 55 123 +begin; insert into t11 values (19); insert into t22 values (19); insert into t33 values (19); insert into t44 values (60,19); insert into t55 values (60,19); insert into t66 values (60,19); +commit; select * from t11 order by col1; col1 15 @@ -3439,6 +3463,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with day(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-02-03'); insert into t1 values ('2006-01-17'); insert into t2 values ('2006-02-03'); @@ -3447,6 +3472,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-02-03'); insert into t3 values ('2006-01-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -3486,12 +3512,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-02-03'; update t2 set col1='2006-02-05' where col1='2006-02-03'; update t3 set col1='2006-02-05' where col1='2006-02-03'; update t4 set col1='2006-02-05' where col1='2006-02-03'; update t5 set col1='2006-02-05' where col1='2006-02-03'; update t6 set col1='2006-02-05' where col1='2006-02-03'; +commit; select * from t1 order by col1; col1 2006-01-17 @@ -3695,12 +3723,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with day(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-01-17'; delete from t2 where col1='2006-01-17'; delete from t3 where col1='2006-01-17'; delete from t4 where col1='2006-01-17'; delete from t5 where col1='2006-01-17'; delete from t6 where col1='2006-01-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -3722,12 +3752,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-17'); insert into t3 values ('2006-01-17'); insert into t4 values (60,'2006-01-17'); insert into t5 values (60,'2006-01-17'); insert into t6 values (60,'2006-01-17'); +commit; select * from t1 order by col1; col1 2006-01-17 @@ -3789,12 +3821,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with day(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-01-17'; delete from t22 where col1='2006-01-17'; delete from t33 where col1='2006-01-17'; delete from t44 where col1='2006-01-17'; delete from t55 where col1='2006-01-17'; delete from t66 where col1='2006-01-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -3816,12 +3850,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-01-17'); insert into t22 values ('2006-01-17'); insert into t33 values ('2006-01-17'); insert into t44 values (60,'2006-01-17'); insert into t55 values (60,'2006-01-17'); insert into t66 values (60,'2006-01-17'); +commit; select * from t11 order by col1; col1 2006-01-17 @@ -3944,6 +3980,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with dayofmonth(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-02-03'); insert into t1 values ('2006-01-17'); insert into t2 values ('2006-02-03'); @@ -3952,6 +3989,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-02-03'); insert into t3 values ('2006-01-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -3991,12 +4029,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-02-03'; update t2 set col1='2006-02-05' where col1='2006-02-03'; update t3 set col1='2006-02-05' where col1='2006-02-03'; update t4 set col1='2006-02-05' where col1='2006-02-03'; update t5 set col1='2006-02-05' where col1='2006-02-03'; update t6 set col1='2006-02-05' where col1='2006-02-03'; +commit; select * from t1 order by col1; col1 2006-01-17 @@ -4200,12 +4240,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofmonth(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-01-17'; delete from t2 where col1='2006-01-17'; delete from t3 where col1='2006-01-17'; delete from t4 where col1='2006-01-17'; delete from t5 where col1='2006-01-17'; delete from t6 where col1='2006-01-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4227,12 +4269,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-17'); insert into t3 values ('2006-01-17'); insert into t4 values (60,'2006-01-17'); insert into t5 values (60,'2006-01-17'); insert into t6 values (60,'2006-01-17'); +commit; select * from t1 order by col1; col1 2006-01-17 @@ -4294,12 +4338,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofmonth(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-01-17'; delete from t22 where col1='2006-01-17'; delete from t33 where col1='2006-01-17'; delete from t44 where col1='2006-01-17'; delete from t55 where col1='2006-01-17'; delete from t66 where col1='2006-01-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -4321,12 +4367,14 @@ colint col1 1 2006-02-05 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-01-17'); insert into t22 values ('2006-01-17'); insert into t33 values ('2006-01-17'); insert into t44 values (60,'2006-01-17'); insert into t55 values (60,'2006-01-17'); insert into t66 values (60,'2006-01-17'); +commit; select * from t11 order by col1; col1 2006-01-17 @@ -4449,6 +4497,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with dayofweek(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-02-17'); insert into t2 values ('2006-01-03'); @@ -4457,6 +4506,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-02-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -4496,12 +4546,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-01-03'; update t2 set col1='2006-02-05' where col1='2006-01-03'; update t3 set col1='2006-02-05' where col1='2006-01-03'; update t4 set col1='2006-02-05' where col1='2006-01-03'; update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4705,12 +4757,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofweek(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-02-17'; delete from t2 where col1='2006-02-17'; delete from t3 where col1='2006-02-17'; delete from t4 where col1='2006-02-17'; delete from t5 where col1='2006-02-17'; delete from t6 where col1='2006-02-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4734,12 +4788,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-02-17'); insert into t2 values ('2006-02-17'); insert into t3 values ('2006-02-17'); insert into t4 values (60,'2006-02-17'); insert into t5 values (60,'2006-02-17'); insert into t6 values (60,'2006-02-17'); +commit; select * from t1 order by col1; col1 2006-02-05 @@ -4805,12 +4861,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofweek(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-02-17'; delete from t22 where col1='2006-02-17'; delete from t33 where col1='2006-02-17'; delete from t44 where col1='2006-02-17'; delete from t55 where col1='2006-02-17'; delete from t66 where col1='2006-02-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -4834,12 +4892,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-02-17'); insert into t22 values ('2006-02-17'); insert into t33 values ('2006-02-17'); insert into t44 values (60,'2006-02-17'); insert into t55 values (60,'2006-02-17'); insert into t66 values (60,'2006-02-17'); +commit; select * from t11 order by col1; col1 2006-02-05 @@ -4966,6 +5026,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with dayofyear(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-03'); @@ -4974,6 +5035,7 @@ insert into t2 values ('2006-02-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-01-17'); insert into t3 values ('2006-02-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -5013,12 +5075,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-01-03'; update t2 set col1='2006-02-05' where col1='2006-01-03'; update t3 set col1='2006-02-05' where col1='2006-01-03'; update t4 set col1='2006-02-05' where col1='2006-01-03'; update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-01-17 @@ -5222,12 +5286,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofyear(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-01-17'; delete from t2 where col1='2006-01-17'; delete from t3 where col1='2006-01-17'; delete from t4 where col1='2006-01-17'; delete from t5 where col1='2006-01-17'; delete from t6 where col1='2006-01-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5249,12 +5315,14 @@ colint col1 1 2006-02-03 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-01-17'); insert into t2 values ('2006-01-17'); insert into t3 values ('2006-01-17'); insert into t4 values (60,'2006-01-17'); insert into t5 values (60,'2006-01-17'); insert into t6 values (60,'2006-01-17'); +commit; select * from t1 order by col1; col1 2006-01-17 @@ -5317,12 +5385,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with dayofyear(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-01-17'; delete from t22 where col1='2006-01-17'; delete from t33 where col1='2006-01-17'; delete from t44 where col1='2006-01-17'; delete from t55 where col1='2006-01-17'; delete from t66 where col1='2006-01-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -5344,12 +5414,14 @@ colint col1 1 2006-02-03 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-01-17'); insert into t22 values ('2006-01-17'); insert into t33 values ('2006-01-17'); insert into t44 values (60,'2006-01-17'); insert into t55 values (60,'2006-01-17'); insert into t66 values (60,'2006-01-17'); +commit; select * from t11 order by col1; col1 2006-01-17 @@ -5473,6 +5545,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with extract(month from col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-02-17'); insert into t2 values ('2006-01-03'); @@ -5481,6 +5554,7 @@ insert into t2 values ('2006-01-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-02-17'); insert into t3 values ('2006-01-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -5520,12 +5594,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-05' where col1='2006-01-03'; update t2 set col1='2006-02-05' where col1='2006-01-03'; update t3 set col1='2006-02-05' where col1='2006-01-03'; update t4 set col1='2006-02-05' where col1='2006-01-03'; update t5 set col1='2006-02-05' where col1='2006-01-03'; update t6 set col1='2006-02-05' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5729,12 +5805,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-02-17'; delete from t2 where col1='2006-02-17'; delete from t3 where col1='2006-02-17'; delete from t4 where col1='2006-02-17'; delete from t5 where col1='2006-02-17'; delete from t6 where col1='2006-02-17'; +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5758,12 +5836,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-02-17'); insert into t2 values ('2006-02-17'); insert into t3 values ('2006-02-17'); insert into t4 values (60,'2006-02-17'); insert into t5 values (60,'2006-02-17'); insert into t6 values (60,'2006-02-17'); +commit; select * from t1 order by col1; col1 2006-02-05 @@ -5824,12 +5904,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with extract(month from col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-02-17'; delete from t22 where col1='2006-02-17'; delete from t33 where col1='2006-02-17'; delete from t44 where col1='2006-02-17'; delete from t55 where col1='2006-02-17'; delete from t66 where col1='2006-02-17'; +commit; select * from t11 order by col1; col1 2006-02-05 @@ -5853,12 +5935,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-02-17'); insert into t22 values ('2006-02-17'); insert into t33 values ('2006-02-17'); insert into t44 values (60,'2006-02-17'); insert into t55 values (60,'2006-02-17'); insert into t66 values (60,'2006-02-17'); +commit; select * from t11 order by col1; col1 2006-02-05 @@ -5980,6 +6064,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with hour(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09'); insert into t1 values ('14:30'); insert into t2 values ('09:09'); @@ -5988,6 +6073,7 @@ insert into t2 values ('21:59'); insert into t3 values ('09:09'); insert into t3 values ('14:30'); insert into t3 values ('21:59'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; @@ -6027,12 +6113,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; update t1 set col1='10:30' where col1='09:09'; update t2 set col1='10:30' where col1='09:09'; update t3 set col1='10:30' where col1='09:09'; update t4 set col1='10:30' where col1='09:09'; update t5 set col1='10:30' where col1='09:09'; update t6 set col1='10:30' where col1='09:09'; +commit; select * from t1 order by col1; col1 10:30:00 @@ -6236,12 +6324,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='14:30'; delete from t2 where col1='14:30'; delete from t3 where col1='14:30'; delete from t4 where col1='14:30'; delete from t5 where col1='14:30'; delete from t6 where col1='14:30'; +commit; select * from t1 order by col1; col1 10:30:00 @@ -6265,12 +6355,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t1 values ('14:30'); insert into t2 values ('14:30'); insert into t3 values ('14:30'); insert into t4 values (60,'14:30'); insert into t5 values (60,'14:30'); insert into t6 values (60,'14:30'); +commit; select * from t1 order by col1; col1 10:30:00 @@ -6334,12 +6426,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with hour(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='14:30'; delete from t22 where col1='14:30'; delete from t33 where col1='14:30'; delete from t44 where col1='14:30'; delete from t55 where col1='14:30'; delete from t66 where col1='14:30'; +commit; select * from t11 order by col1; col1 10:30:00 @@ -6363,12 +6457,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t11 values ('14:30'); insert into t22 values ('14:30'); insert into t33 values ('14:30'); insert into t44 values (60,'14:30'); insert into t55 values (60,'14:30'); insert into t66 values (60,'14:30'); +commit; select * from t11 order by col1; col1 10:30:00 @@ -6445,7 +6541,7 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- microsecond(col1) in partition with coltype time +--- microsecond(col1) in partition with coltype time(6) ------------------------------------------------------------------------- drop table if exists t1 ; drop table if exists t2 ; @@ -6456,11 +6552,11 @@ drop table if exists t6 ; ------------------------------------------------------------------------- --- Create tables with microsecond(col1) ------------------------------------------------------------------------- -create table t1 (col1 time) engine='TOKUDB' +create table t1 (col1 time(6)) engine='TOKUDB' partition by range(microsecond(col1)) (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t2 (col1 time) engine='TOKUDB' +create table t2 (col1 time(6)) engine='TOKUDB' partition by list(microsecond(col1)) (partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), partition p1 values in (11,12,13,14,15,16,17,18,19,20), @@ -6469,14 +6565,14 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t3 (col1 time) engine='TOKUDB' +create table t3 (col1 time(6)) engine='TOKUDB' partition by hash(microsecond(col1)); -create table t4 (colint int, col1 time) engine='TOKUDB' +create table t4 (colint int, col1 time(6)) engine='TOKUDB' partition by range(colint) subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values less than (15), partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='TOKUDB' +create table t5 (colint int, col1 time(6)) engine='TOKUDB' partition by list(colint) subpartition by hash(microsecond(col1)) subpartitions 2 (partition p0 values in (1,2,3,4,5,6,7,8,9,10), @@ -6486,13 +6582,14 @@ partition p3 values in (31,32,33,34,35,36,37,38,39,40), partition p4 values in (41,42,43,44,45,46,47,48,49,50), partition p5 values in (51,52,53,54,55,56,57,58,59,60) ); -create table t6 (colint int, col1 time) engine='TOKUDB' +create table t6 (colint int, col1 time(6)) engine='TOKUDB' partition by range(colint) (partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with microsecond(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09:15.000002'); insert into t1 values ('04:30:01.000018'); insert into t2 values ('09:09:15.000002'); @@ -6501,83 +6598,86 @@ insert into t2 values ('00:59:22.000024'); insert into t3 values ('09:09:15.000002'); insert into t3 values ('04:30:01.000018'); insert into t3 values ('00:59:22.000024'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; select microsecond(col1) from t1 order by col1; microsecond(col1) -0 -0 +18 +2 select * from t1 order by col1; col1 -04:30:01 -09:09:15 +04:30:01.000018 +09:09:15.000002 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -09:09:15 +00:59:22.000024 +04:30:01.000018 +09:09:15.000002 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -09:09:15 +00:59:22.000024 +04:30:01.000018 +09:09:15.000002 select * from t4 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 09:09:15.000002 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t5 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 09:09:15.000002 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t6 order by colint; colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 09:09:15.000002 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 +begin; update t1 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t2 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t3 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t4 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t5 set col1='05:30:34.000037' where col1='09:09:15.000002'; update t6 set col1='05:30:34.000037' where col1='09:09:15.000002'; +commit; select * from t1 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t4 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t5 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t6 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 ------------------------------------------------------------------------- --- Alter tables with microsecond(col1) ------------------------------------------------------------------------- @@ -6629,36 +6729,36 @@ partition by range(colint) partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t44 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t55 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 --------------------------- ---- some alter table begin --------------------------- @@ -6667,16 +6767,16 @@ reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 alter table t11 reorganize partition s1 into (partition p0 values less than (15), partition p1 values less than maxvalue); select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 alter table t55 partition by list(colint) subpartition by hash(microsecond(col1)) subpartitions 5 @@ -6691,7 +6791,7 @@ show create table t55; Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL + `col1` time(6) DEFAULT NULL ) ENGINE=TokuDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (microsecond(col1)) @@ -6704,116 +6804,120 @@ SUBPARTITIONS 5 PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = TokuDB) */ select * from t55 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition s1 into (partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition p0,p1 into (partition s1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 alter table t66 reorganize partition s1 into (partition p0 values less than (microsecond('10:30:10.000010')), partition p1 values less than maxvalue); select * from t66 order by colint; colint col1 -1 05:30:34 -2 04:30:01 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +2 04:30:01.000018 +3 00:59:22.000024 +4 05:30:34.000037 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='04:30:01.000018'; delete from t2 where col1='04:30:01.000018'; delete from t3 where col1='04:30:01.000018'; delete from t4 where col1='04:30:01.000018'; delete from t5 where col1='04:30:01.000018'; delete from t6 where col1='04:30:01.000018'; +commit; select * from t1 order by col1; col1 -05:30:34 +05:30:34.000037 select * from t2 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t4 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 select * from t5 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +begin; insert into t1 values ('04:30:01.000018'); insert into t2 values ('04:30:01.000018'); insert into t3 values ('04:30:01.000018'); insert into t4 values (60,'04:30:01.000018'); insert into t5 values (60,'04:30:01.000018'); insert into t6 values (60,'04:30:01.000018'); +commit; select * from t1 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t2 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t4 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t5 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t6 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 alter table t1 drop partition p0; alter table t2 drop partition p0; alter table t4 drop partition p0; @@ -6821,90 +6925,99 @@ alter table t5 drop partition p0; alter table t6 drop partition p0; select * from t1 order by col1; col1 +04:30:01.000018 +05:30:34.000037 select * from t2 order by col1; col1 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t3 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t4 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t5 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t6 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with microsecond(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='04:30:01.000018'; delete from t22 where col1='04:30:01.000018'; delete from t33 where col1='04:30:01.000018'; delete from t44 where col1='04:30:01.000018'; delete from t55 where col1='04:30:01.000018'; delete from t66 where col1='04:30:01.000018'; +commit; select * from t11 order by col1; col1 -05:30:34 +05:30:34.000037 select * from t22 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -05:30:34 +00:59:22.000024 +05:30:34.000037 select * from t44 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 select * from t55 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +begin; insert into t11 values ('04:30:01.000018'); insert into t22 values ('04:30:01.000018'); insert into t33 values ('04:30:01.000018'); insert into t44 values (60,'04:30:01.000018'); insert into t55 values (60,'04:30:01.000018'); insert into t66 values (60,'04:30:01.000018'); +commit; select * from t11 order by col1; col1 -04:30:01 -05:30:34 +04:30:01.000018 +05:30:34.000037 select * from t22 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t44 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t55 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 select * from t66 order by colint; colint col1 -1 05:30:34 -3 00:59:22 -4 05:30:34 -60 04:30:01 +1 05:30:34.000037 +3 00:59:22.000024 +4 05:30:34.000037 +60 04:30:01.000018 alter table t11 drop partition p0; alter table t22 drop partition p0; alter table t44 drop partition p0; @@ -6912,22 +7025,27 @@ alter table t55 drop partition p0; alter table t66 drop partition p0; select * from t11 order by col1; col1 +04:30:01.000018 +05:30:34.000037 select * from t22 order by col1; col1 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t33 order by col1; col1 -00:59:22 -04:30:01 -05:30:34 +00:59:22.000024 +04:30:01.000018 +05:30:34.000037 select * from t44 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t55 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 select * from t66 order by colint; colint col1 -60 04:30:01 +60 04:30:01.000018 ------------------------- ---- some alter table end ------------------------- @@ -6992,6 +7110,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with minute(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09:15'); insert into t1 values ('14:30:45'); insert into t2 values ('09:09:15'); @@ -7000,6 +7119,7 @@ insert into t2 values ('21:59:22'); insert into t3 values ('09:09:15'); insert into t3 values ('14:30:45'); insert into t3 values ('21:59:22'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; @@ -7039,12 +7159,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; update t1 set col1='10:24:23' where col1='09:09:15'; update t2 set col1='10:24:23' where col1='09:09:15'; update t3 set col1='10:24:23' where col1='09:09:15'; update t4 set col1='10:24:23' where col1='09:09:15'; update t5 set col1='10:24:23' where col1='09:09:15'; update t6 set col1='10:24:23' where col1='09:09:15'; +commit; select * from t1 order by col1; col1 10:24:23 @@ -7248,12 +7370,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='14:30:45'; delete from t2 where col1='14:30:45'; delete from t3 where col1='14:30:45'; delete from t4 where col1='14:30:45'; delete from t5 where col1='14:30:45'; delete from t6 where col1='14:30:45'; +commit; select * from t1 order by col1; col1 10:24:23 @@ -7277,12 +7401,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t1 values ('14:30:45'); insert into t2 values ('14:30:45'); insert into t3 values ('14:30:45'); insert into t4 values (60,'14:30:45'); insert into t5 values (60,'14:30:45'); insert into t6 values (60,'14:30:45'); +commit; select * from t1 order by col1; col1 10:24:23 @@ -7349,12 +7475,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with minute(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='14:30:45'; delete from t22 where col1='14:30:45'; delete from t33 where col1='14:30:45'; delete from t44 where col1='14:30:45'; delete from t55 where col1='14:30:45'; delete from t66 where col1='14:30:45'; +commit; select * from t11 order by col1; col1 10:24:23 @@ -7378,12 +7506,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t11 values ('14:30:45'); insert into t22 values ('14:30:45'); insert into t33 values ('14:30:45'); insert into t44 values (60,'14:30:45'); insert into t55 values (60,'14:30:45'); insert into t66 values (60,'14:30:45'); +commit; select * from t11 order by col1; col1 10:24:23 @@ -7511,6 +7641,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with second(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('09:09:09'); insert into t1 values ('14:30:20'); insert into t2 values ('09:09:09'); @@ -7519,6 +7650,7 @@ insert into t2 values ('21:59:22'); insert into t3 values ('09:09:09'); insert into t3 values ('14:30:20'); insert into t3 values ('21:59:22'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; @@ -7558,12 +7690,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; update t1 set col1='10:22:33' where col1='09:09:09'; update t2 set col1='10:22:33' where col1='09:09:09'; update t3 set col1='10:22:33' where col1='09:09:09'; update t4 set col1='10:22:33' where col1='09:09:09'; update t5 set col1='10:22:33' where col1='09:09:09'; update t6 set col1='10:22:33' where col1='09:09:09'; +commit; select * from t1 order by col1; col1 10:22:33 @@ -7767,12 +7901,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='14:30:20'; delete from t2 where col1='14:30:20'; delete from t3 where col1='14:30:20'; delete from t4 where col1='14:30:20'; delete from t5 where col1='14:30:20'; delete from t6 where col1='14:30:20'; +commit; select * from t1 order by col1; col1 10:22:33 @@ -7796,12 +7932,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t1 values ('14:30:20'); insert into t2 values ('14:30:20'); insert into t3 values ('14:30:20'); insert into t4 values (60,'14:30:20'); insert into t5 values (60,'14:30:20'); insert into t6 values (60,'14:30:20'); +commit; select * from t1 order by col1; col1 10:22:33 @@ -7868,12 +8006,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with second(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='14:30:20'; delete from t22 where col1='14:30:20'; delete from t33 where col1='14:30:20'; delete from t44 where col1='14:30:20'; delete from t55 where col1='14:30:20'; delete from t66 where col1='14:30:20'; +commit; select * from t11 order by col1; col1 10:22:33 @@ -7897,12 +8037,14 @@ colint col1 2 04:30:01 3 00:59:22 4 05:30:34 +begin; insert into t11 values ('14:30:20'); insert into t22 values ('14:30:20'); insert into t33 values ('14:30:20'); insert into t44 values (60,'14:30:20'); insert into t55 values (60,'14:30:20'); insert into t66 values (60,'14:30:20'); +commit; select * from t11 order by col1; col1 10:22:33 @@ -8030,6 +8172,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with month(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-12-17'); insert into t2 values ('2006-01-03'); @@ -8038,6 +8181,7 @@ insert into t2 values ('2006-05-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-12-17'); insert into t3 values ('2006-05-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -8077,12 +8221,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-11-06' where col1='2006-01-03'; update t2 set col1='2006-11-06' where col1='2006-01-03'; update t3 set col1='2006-11-06' where col1='2006-01-03'; update t4 set col1='2006-11-06' where col1='2006-01-03'; update t5 set col1='2006-11-06' where col1='2006-01-03'; update t6 set col1='2006-11-06' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-11-06 @@ -8286,12 +8432,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-12-17'; delete from t2 where col1='2006-12-17'; delete from t3 where col1='2006-12-17'; delete from t4 where col1='2006-12-17'; delete from t5 where col1='2006-12-17'; delete from t6 where col1='2006-12-17'; +commit; select * from t1 order by col1; col1 2006-11-06 @@ -8315,12 +8463,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-12-17'); insert into t2 values ('2006-12-17'); insert into t3 values ('2006-12-17'); insert into t4 values (60,'2006-12-17'); insert into t5 values (60,'2006-12-17'); insert into t6 values (60,'2006-12-17'); +commit; select * from t1 order by col1; col1 2006-11-06 @@ -8384,12 +8534,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with month(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-12-17'; delete from t22 where col1='2006-12-17'; delete from t33 where col1='2006-12-17'; delete from t44 where col1='2006-12-17'; delete from t55 where col1='2006-12-17'; delete from t66 where col1='2006-12-17'; +commit; select * from t11 order by col1; col1 2006-11-06 @@ -8413,12 +8565,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-12-17'); insert into t22 values ('2006-12-17'); insert into t33 values ('2006-12-17'); insert into t44 values (60,'2006-12-17'); insert into t55 values (60,'2006-12-17'); insert into t66 values (60,'2006-12-17'); +commit; select * from t11 order by col1; col1 2006-11-06 @@ -8543,6 +8697,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with quarter(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-12-17'); insert into t2 values ('2006-01-03'); @@ -8551,6 +8706,7 @@ insert into t2 values ('2006-09-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-12-17'); insert into t3 values ('2006-09-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -8590,12 +8746,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-07-30' where col1='2006-01-03'; update t2 set col1='2006-07-30' where col1='2006-01-03'; update t3 set col1='2006-07-30' where col1='2006-01-03'; update t4 set col1='2006-07-30' where col1='2006-01-03'; update t5 set col1='2006-07-30' where col1='2006-01-03'; update t6 set col1='2006-07-30' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-07-30 @@ -8799,12 +8957,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-12-17'; delete from t2 where col1='2006-12-17'; delete from t3 where col1='2006-12-17'; delete from t4 where col1='2006-12-17'; delete from t5 where col1='2006-12-17'; delete from t6 where col1='2006-12-17'; +commit; select * from t1 order by col1; col1 2006-07-30 @@ -8828,12 +8988,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-12-17'); insert into t2 values ('2006-12-17'); insert into t3 values ('2006-12-17'); insert into t4 values (60,'2006-12-17'); insert into t5 values (60,'2006-12-17'); insert into t6 values (60,'2006-12-17'); +commit; select * from t1 order by col1; col1 2006-07-30 @@ -8896,12 +9058,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with quarter(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-12-17'; delete from t22 where col1='2006-12-17'; delete from t33 where col1='2006-12-17'; delete from t44 where col1='2006-12-17'; delete from t55 where col1='2006-12-17'; delete from t66 where col1='2006-12-17'; +commit; select * from t11 order by col1; col1 2006-07-30 @@ -8925,12 +9089,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-12-17'); insert into t22 values ('2006-12-17'); insert into t33 values ('2006-12-17'); insert into t44 values (60,'2006-12-17'); insert into t55 values (60,'2006-12-17'); insert into t66 values (60,'2006-12-17'); +commit; select * from t11 order by col1; col1 2006-07-30 @@ -9006,523 +9172,6 @@ drop table if exists t44 ; drop table if exists t55 ; drop table if exists t66 ; ------------------------------------------------------------------------- ---- time_to_sec(col1)-(time_to_sec(col1)-20) in partition with coltype time -------------------------------------------------------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -------------------------------------------------------------------------- ---- Create tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -create table t1 (col1 time) engine='TOKUDB' -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t2 (col1 time) engine='TOKUDB' -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t3 (col1 time) engine='TOKUDB' -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -create table t4 (colint int, col1 time) engine='TOKUDB' -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -create table t5 (colint int, col1 time) engine='TOKUDB' -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -create table t6 (colint int, col1 time) engine='TOKUDB' -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -------------------------------------------------------------------------- ---- Access tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -insert into t1 values ('09:09:15'); -insert into t1 values ('14:30:45'); -insert into t2 values ('09:09:15'); -insert into t2 values ('14:30:45'); -insert into t2 values ('21:59:22'); -insert into t3 values ('09:09:15'); -insert into t3 values ('14:30:45'); -insert into t3 values ('21:59:22'); -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t4; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t5; -load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_time.inc' into table t6; -select time_to_sec(col1)-(time_to_sec(col1)-20) from t1 order by col1; -time_to_sec(col1)-(time_to_sec(col1)-20) -20 -20 -select * from t1 order by col1; -col1 -09:09:15 -14:30:45 -select * from t2 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -09:09:15 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 09:09:15 -2 04:30:01 -3 00:59:22 -4 05:30:34 -update t1 set col1='10:33:11' where col1='09:09:15'; -update t2 set col1='10:33:11' where col1='09:09:15'; -update t3 set col1='10:33:11' where col1='09:09:15'; -update t4 set col1='10:33:11' where col1='09:09:15'; -update t5 set col1='10:33:11' where col1='09:09:15'; -update t6 set col1='10:33:11' where col1='09:09:15'; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Alter tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -create table t11 engine='TOKUDB' as select * from t1; -create table t22 engine='TOKUDB' as select * from t2; -create table t33 engine='TOKUDB' as select * from t3; -create table t44 engine='TOKUDB' as select * from t4; -create table t55 engine='TOKUDB' as select * from t5; -create table t66 engine='TOKUDB' as select * from t6; -alter table t11 -partition by range(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t22 -partition by list(time_to_sec(col1)-(time_to_sec(col1)-20)) -(partition p0 values in (0,1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t33 -partition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)); -alter table t44 -partition by range(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values less than (15), -partition p1 values less than maxvalue); -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 2 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -alter table t66 -partition by range(colint) -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 ---------------------------- ----- some alter table begin ---------------------------- -alter table t11 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t11 -reorganize partition s1 into -(partition p0 values less than (15), -partition p1 values less than maxvalue); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -alter table t55 -partition by list(colint) -subpartition by hash(time_to_sec(col1)-(time_to_sec(col1)-20)) subpartitions 5 -(partition p0 values in (1,2,3,4,5,6,7,8,9,10), -partition p1 values in (11,12,13,14,15,16,17,18,19,20), -partition p2 values in (21,22,23,24,25,26,27,28,29,30), -partition p3 values in (31,32,33,34,35,36,37,38,39,40), -partition p4 values in (41,42,43,44,45,46,47,48,49,50), -partition p5 values in (51,52,53,54,55,56,57,58,59,60) -); -show create table t55; -Table Create Table -t55 CREATE TABLE `t55` ( - `colint` int(11) DEFAULT NULL, - `col1` time DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 -/*!50100 PARTITION BY LIST (colint) -SUBPARTITION BY HASH (time_to_sec(col1)-(time_to_sec(col1)-20)) -SUBPARTITIONS 5 -(PARTITION p0 VALUES IN (1,2,3,4,5,6,7,8,9,10) ENGINE = TokuDB, - PARTITION p1 VALUES IN (11,12,13,14,15,16,17,18,19,20) ENGINE = TokuDB, - PARTITION p2 VALUES IN (21,22,23,24,25,26,27,28,29,30) ENGINE = TokuDB, - PARTITION p3 VALUES IN (31,32,33,34,35,36,37,38,39,40) ENGINE = TokuDB, - PARTITION p4 VALUES IN (41,42,43,44,45,46,47,48,49,50) ENGINE = TokuDB, - PARTITION p5 VALUES IN (51,52,53,54,55,56,57,58,59,60) ENGINE = TokuDB) */ -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition p0,p1 into -(partition s1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -alter table t66 -reorganize partition s1 into -(partition p0 values less than (time_to_sec('18:30:14')-(time_to_sec('17:59:59'))), -partition p1 values less than maxvalue); -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t1 where col1='14:30:45'; -delete from t2 where col1='14:30:45'; -delete from t3 where col1='14:30:45'; -delete from t4 where col1='14:30:45'; -delete from t5 where col1='14:30:45'; -delete from t6 where col1='14:30:45'; -select * from t1 order by col1; -col1 -10:33:11 -select * from t2 order by col1; -col1 -10:33:11 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t1 values ('14:30:45'); -insert into t2 values ('14:30:45'); -insert into t3 values ('14:30:45'); -insert into t4 values (60,'14:30:45'); -insert into t5 values (60,'14:30:45'); -insert into t6 values (60,'14:30:45'); -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t5 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t6 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t1 drop partition p0; -alter table t2 drop partition p0; -alter table t4 drop partition p0; -alter table t5 drop partition p0; -alter table t6 drop partition p0; -select * from t1 order by col1; -col1 -10:33:11 -14:30:45 -select * from t2 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t3 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t4 order by colint; -colint col1 -60 14:30:45 -select * from t5 order by colint; -colint col1 -60 14:30:45 -select * from t6 order by colint; -colint col1 -------------------------------------------------------------------------- ---- Delete rows and partitions of tables with time_to_sec(col1)-(time_to_sec(col1)-20) -------------------------------------------------------------------------- -delete from t11 where col1='14:30:45'; -delete from t22 where col1='14:30:45'; -delete from t33 where col1='14:30:45'; -delete from t44 where col1='14:30:45'; -delete from t55 where col1='14:30:45'; -delete from t66 where col1='14:30:45'; -select * from t11 order by col1; -col1 -10:33:11 -select * from t22 order by col1; -col1 -10:33:11 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -insert into t11 values ('14:30:45'); -insert into t22 values ('14:30:45'); -insert into t33 values ('14:30:45'); -insert into t44 values (60,'14:30:45'); -insert into t55 values (60,'14:30:45'); -insert into t66 values (60,'14:30:45'); -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t55 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -select * from t66 order by colint; -colint col1 -1 10:33:11 -2 04:30:01 -3 00:59:22 -4 05:30:34 -60 14:30:45 -alter table t11 drop partition p0; -alter table t22 drop partition p0; -alter table t44 drop partition p0; -alter table t55 drop partition p0; -alter table t66 drop partition p0; -select * from t11 order by col1; -col1 -10:33:11 -14:30:45 -select * from t22 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t33 order by col1; -col1 -10:33:11 -14:30:45 -21:59:22 -select * from t44 order by colint; -colint col1 -60 14:30:45 -select * from t55 order by colint; -colint col1 -60 14:30:45 -select * from t66 order by colint; -colint col1 -------------------------- ----- some alter table end -------------------------- -drop table if exists t1 ; -drop table if exists t2 ; -drop table if exists t3 ; -drop table if exists t4 ; -drop table if exists t5 ; -drop table if exists t6 ; -drop table if exists t11 ; -drop table if exists t22 ; -drop table if exists t33 ; -drop table if exists t44 ; -drop table if exists t55 ; -drop table if exists t66 ; -------------------------------------------------------------------------- --- weekday(col1) in partition with coltype date ------------------------------------------------------------------------- drop table if exists t1 ; @@ -9571,6 +9220,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with weekday(col1) ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-12-03'); insert into t1 values ('2006-11-17'); insert into t2 values ('2006-12-03'); @@ -9579,6 +9229,7 @@ insert into t2 values ('2006-05-25'); insert into t3 values ('2006-12-03'); insert into t3 values ('2006-11-17'); insert into t3 values ('2006-05-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -9618,12 +9269,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-02-06' where col1='2006-12-03'; update t2 set col1='2006-02-06' where col1='2006-12-03'; update t3 set col1='2006-02-06' where col1='2006-12-03'; update t4 set col1='2006-02-06' where col1='2006-12-03'; update t5 set col1='2006-02-06' where col1='2006-12-03'; update t6 set col1='2006-02-06' where col1='2006-12-03'; +commit; select * from t1 order by col1; col1 2006-02-06 @@ -9827,12 +9480,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with weekday(col1) ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-11-17'; delete from t2 where col1='2006-11-17'; delete from t3 where col1='2006-11-17'; delete from t4 where col1='2006-11-17'; delete from t5 where col1='2006-11-17'; delete from t6 where col1='2006-11-17'; +commit; select * from t1 order by col1; col1 2006-02-06 @@ -9856,12 +9511,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-11-17'); insert into t2 values ('2006-11-17'); insert into t3 values ('2006-11-17'); insert into t4 values (60,'2006-11-17'); insert into t5 values (60,'2006-11-17'); insert into t6 values (60,'2006-11-17'); +commit; select * from t1 order by col1; col1 2006-02-06 @@ -9923,12 +9580,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with weekday(col1) ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-11-17'; delete from t22 where col1='2006-11-17'; delete from t33 where col1='2006-11-17'; delete from t44 where col1='2006-11-17'; delete from t55 where col1='2006-11-17'; delete from t66 where col1='2006-11-17'; +commit; select * from t11 order by col1; col1 2006-02-06 @@ -9952,12 +9611,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-11-17'); insert into t22 values ('2006-11-17'); insert into t33 values ('2006-11-17'); insert into t44 values (60,'2006-11-17'); insert into t55 values (60,'2006-11-17'); insert into t66 values (60,'2006-11-17'); +commit; select * from t11 order by col1; col1 2006-02-06 @@ -10080,6 +9741,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with year(col1)-1990 ------------------------------------------------------------------------- +begin; insert into t1 values ('1996-01-03'); insert into t1 values ('2000-02-17'); insert into t2 values ('1996-01-03'); @@ -10088,6 +9750,7 @@ insert into t2 values ('2004-05-25'); insert into t3 values ('1996-01-03'); insert into t3 values ('2000-02-17'); insert into t3 values ('2004-05-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -10127,12 +9790,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2002-02-15' where col1='1996-01-03'; update t2 set col1='2002-02-15' where col1='1996-01-03'; update t3 set col1='2002-02-15' where col1='1996-01-03'; update t4 set col1='2002-02-15' where col1='1996-01-03'; update t5 set col1='2002-02-15' where col1='1996-01-03'; update t6 set col1='2002-02-15' where col1='1996-01-03'; +commit; select * from t1 order by col1; col1 2000-02-17 @@ -10336,12 +10001,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with year(col1)-1990 ------------------------------------------------------------------------- +begin; delete from t1 where col1='2000-02-17'; delete from t2 where col1='2000-02-17'; delete from t3 where col1='2000-02-17'; delete from t4 where col1='2000-02-17'; delete from t5 where col1='2000-02-17'; delete from t6 where col1='2000-02-17'; +commit; select * from t1 order by col1; col1 2002-02-15 @@ -10365,12 +10032,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2000-02-17'); insert into t2 values ('2000-02-17'); insert into t3 values ('2000-02-17'); insert into t4 values (60,'2000-02-17'); insert into t5 values (60,'2000-02-17'); insert into t6 values (60,'2000-02-17'); +commit; select * from t1 order by col1; col1 2000-02-17 @@ -10434,12 +10103,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with year(col1)-1990 ------------------------------------------------------------------------- +begin; delete from t11 where col1='2000-02-17'; delete from t22 where col1='2000-02-17'; delete from t33 where col1='2000-02-17'; delete from t44 where col1='2000-02-17'; delete from t55 where col1='2000-02-17'; delete from t66 where col1='2000-02-17'; +commit; select * from t11 order by col1; col1 2002-02-15 @@ -10463,12 +10134,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2000-02-17'); insert into t22 values ('2000-02-17'); insert into t33 values ('2000-02-17'); insert into t44 values (60,'2000-02-17'); insert into t55 values (60,'2000-02-17'); insert into t66 values (60,'2000-02-17'); +commit; select * from t11 order by col1; col1 2000-02-17 @@ -10593,6 +10266,7 @@ partition p1 values less than maxvalue); ------------------------------------------------------------------------- --- Access tables with yearweek(col1)-200600 ------------------------------------------------------------------------- +begin; insert into t1 values ('2006-01-03'); insert into t1 values ('2006-08-17'); insert into t2 values ('2006-01-03'); @@ -10601,6 +10275,7 @@ insert into t2 values ('2006-03-25'); insert into t3 values ('2006-01-03'); insert into t3 values ('2006-08-17'); insert into t3 values ('2006-03-25'); +commit; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t4; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t5; load data infile 'MYSQLTEST_VARDIR/std_data/parts/part_supported_sql_funcs_int_date.inc' into table t6; @@ -10640,12 +10315,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; update t1 set col1='2006-11-15' where col1='2006-01-03'; update t2 set col1='2006-11-15' where col1='2006-01-03'; update t3 set col1='2006-11-15' where col1='2006-01-03'; update t4 set col1='2006-11-15' where col1='2006-01-03'; update t5 set col1='2006-11-15' where col1='2006-01-03'; update t6 set col1='2006-11-15' where col1='2006-01-03'; +commit; select * from t1 order by col1; col1 2006-08-17 @@ -10849,12 +10526,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with yearweek(col1)-200600 ------------------------------------------------------------------------- +begin; delete from t1 where col1='2006-08-17'; delete from t2 where col1='2006-08-17'; delete from t3 where col1='2006-08-17'; delete from t4 where col1='2006-08-17'; delete from t5 where col1='2006-08-17'; delete from t6 where col1='2006-08-17'; +commit; select * from t1 order by col1; col1 2006-11-15 @@ -10878,12 +10557,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t1 values ('2006-08-17'); insert into t2 values ('2006-08-17'); insert into t3 values ('2006-08-17'); insert into t4 values (60,'2006-08-17'); insert into t5 values (60,'2006-08-17'); insert into t6 values (60,'2006-08-17'); +commit; select * from t1 order by col1; col1 2006-08-17 @@ -10950,12 +10631,14 @@ colint col1 ------------------------------------------------------------------------- --- Delete rows and partitions of tables with yearweek(col1)-200600 ------------------------------------------------------------------------- +begin; delete from t11 where col1='2006-08-17'; delete from t22 where col1='2006-08-17'; delete from t33 where col1='2006-08-17'; delete from t44 where col1='2006-08-17'; delete from t55 where col1='2006-08-17'; delete from t66 where col1='2006-08-17'; +commit; select * from t11 order by col1; col1 2006-11-15 @@ -10979,12 +10662,14 @@ colint col1 2 2006-01-17 3 2006-01-25 4 2006-02-05 +begin; insert into t11 values ('2006-08-17'); insert into t22 values ('2006-08-17'); insert into t33 values ('2006-08-17'); insert into t44 values (60,'2006-08-17'); insert into t55 values (60,'2006-08-17'); insert into t66 values (60,'2006-08-17'); +commit; select * from t11 order by col1; col1 2006-08-17 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result index bd4239fd26d..3fb51c67d00 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 1.2.1 PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -540,8 +540,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1032,8 +1032,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1539,8 +1539,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2040,8 +2040,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2539,8 +2539,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3049,8 +3049,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3561,8 +3561,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4062,8 +4062,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4554,8 +4554,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5046,8 +5046,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5553,8 +5553,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6054,8 +6054,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6553,8 +6553,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7063,8 +7063,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7575,8 +7575,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8077,8 +8077,8 @@ DROP TABLE t1; # 1.2.2 UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8097,8 +8097,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8585,8 +8585,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8605,8 +8605,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9093,8 +9093,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9121,8 +9121,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9616,8 +9616,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9642,8 +9642,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10133,8 +10133,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10157,8 +10157,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10648,8 +10648,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10676,8 +10676,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11174,8 +11174,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11202,8 +11202,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11702,8 +11702,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11726,8 +11726,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12219,8 +12219,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12239,8 +12239,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12727,8 +12727,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12747,8 +12747,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13235,8 +13235,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13263,8 +13263,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13758,8 +13758,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13784,8 +13784,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14275,8 +14275,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14299,8 +14299,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14790,8 +14790,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14818,8 +14818,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15316,8 +15316,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15344,8 +15344,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15844,8 +15844,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15868,8 +15868,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16362,8 +16362,8 @@ DROP TABLE t1; # 1.2.3 PRIMARY KEY and UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16855,8 +16855,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17348,8 +17348,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17856,8 +17856,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18358,8 +18358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18858,8 +18858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19369,8 +19369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19882,8 +19882,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20384,8 +20384,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20877,8 +20877,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21370,8 +21370,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21878,8 +21878,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22380,8 +22380,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22880,8 +22880,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23391,8 +23391,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23904,8 +23904,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24406,8 +24406,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24899,8 +24899,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25392,8 +25392,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25900,8 +25900,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26402,8 +26402,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26902,8 +26902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27413,8 +27413,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27926,8 +27926,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result index a59d3daeec7..2cc7b4298fc 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; #------------------------------------------------------------------------ DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62,8 +62,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77,8 +77,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -100,8 +100,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -121,8 +121,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -140,8 +140,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -163,8 +163,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -186,8 +186,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -206,8 +206,8 @@ ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -221,8 +221,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -236,8 +236,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -259,8 +259,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -280,8 +280,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -299,8 +299,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -322,8 +322,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -345,8 +345,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -366,8 +366,8 @@ DROP TABLE t1; # 1.1.3 PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -858,8 +858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1350,8 +1350,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1857,8 +1857,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2358,8 +2358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2859,8 +2859,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3369,8 +3369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3881,8 +3881,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4382,8 +4382,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4874,8 +4874,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5366,8 +5366,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5873,8 +5873,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6374,8 +6374,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6875,8 +6875,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7385,8 +7385,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7897,8 +7897,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8399,8 +8399,8 @@ DROP TABLE t1; # 1.1.4 UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8419,8 +8419,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8907,8 +8907,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8927,8 +8927,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9415,8 +9415,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9443,8 +9443,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9938,8 +9938,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9964,8 +9964,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10455,8 +10455,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10479,8 +10479,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10972,8 +10972,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11000,8 +11000,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11498,8 +11498,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11526,8 +11526,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12026,8 +12026,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12050,8 +12050,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12543,8 +12543,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12563,8 +12563,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13051,8 +13051,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13071,8 +13071,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13559,8 +13559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13587,8 +13587,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14082,8 +14082,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14108,8 +14108,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14599,8 +14599,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14623,8 +14623,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15116,8 +15116,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15144,8 +15144,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15642,8 +15642,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15670,8 +15670,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16170,8 +16170,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16194,8 +16194,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result index 9202e5f3a97..4aded14f336 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; # 2.1.5 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -487,8 +487,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -927,8 +927,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1382,8 +1382,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1831,8 +1831,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2280,8 +2280,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2738,8 +2738,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3198,8 +3198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3647,8 +3647,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4087,8 +4087,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4527,8 +4527,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4982,8 +4982,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5431,8 +5431,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5880,8 +5880,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6338,8 +6338,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6798,8 +6798,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7247,8 +7247,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7267,8 +7267,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7703,8 +7703,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7723,8 +7723,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8159,8 +8159,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8187,8 +8187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8630,8 +8630,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8656,8 +8656,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9095,8 +9095,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9119,8 +9119,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9560,8 +9560,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9588,8 +9588,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10034,8 +10034,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10062,8 +10062,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10510,8 +10510,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10534,8 +10534,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10979,8 +10979,8 @@ DROP TABLE t1; # 2.2.1 DROP PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11419,8 +11419,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11859,8 +11859,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12314,8 +12314,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12763,8 +12763,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13210,8 +13210,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13668,8 +13668,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14128,8 +14128,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14577,8 +14577,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15017,8 +15017,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15457,8 +15457,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15912,8 +15912,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16361,8 +16361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16808,8 +16808,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17266,8 +17266,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17726,8 +17726,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18176,8 +18176,8 @@ DROP TABLE t1; # 2.2.2 DROP UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18196,8 +18196,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18632,8 +18632,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18652,8 +18652,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19088,8 +19088,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19116,8 +19116,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19559,8 +19559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19585,8 +19585,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20024,8 +20024,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20048,8 +20048,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20487,8 +20487,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20515,8 +20515,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20961,8 +20961,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20989,8 +20989,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21437,8 +21437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21461,8 +21461,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21902,8 +21902,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21922,8 +21922,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22358,8 +22358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22378,8 +22378,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22814,8 +22814,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22842,8 +22842,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23285,8 +23285,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23311,8 +23311,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23750,8 +23750,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23774,8 +23774,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24213,8 +24213,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24241,8 +24241,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24687,8 +24687,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24715,8 +24715,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25163,8 +25163,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25187,8 +25187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25629,8 +25629,8 @@ DROP TABLE t1; # 2.2.3 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26069,8 +26069,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26509,8 +26509,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26964,8 +26964,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27413,8 +27413,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27860,8 +27860,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28318,8 +28318,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28778,8 +28778,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29227,8 +29227,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29667,8 +29667,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30107,8 +30107,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30562,8 +30562,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31011,8 +31011,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31458,8 +31458,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31916,8 +31916,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32376,8 +32376,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32825,8 +32825,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32845,8 +32845,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33281,8 +33281,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33301,8 +33301,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33737,8 +33737,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33765,8 +33765,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34208,8 +34208,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34234,8 +34234,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34673,8 +34673,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34697,8 +34697,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35136,8 +35136,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35164,8 +35164,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35610,8 +35610,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35638,8 +35638,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36086,8 +36086,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36110,8 +36110,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result index 8c7bc5ef296..6838b33d89d 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 1.1.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -504,8 +504,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -524,7 +524,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -960,8 +960,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -988,7 +988,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1431,8 +1431,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1457,7 +1457,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1896,8 +1896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1920,7 +1920,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2361,8 +2361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2389,7 +2389,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2835,8 +2835,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2863,7 +2863,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3311,8 +3311,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3335,7 +3335,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3777,8 +3777,8 @@ DROP TABLE t1; # 1.1.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3798,7 +3798,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4237,6 +4237,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4269,8 +4271,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4290,7 +4292,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4729,6 +4731,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4761,8 +4765,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4790,7 +4794,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5236,6 +5240,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5268,8 +5274,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5295,7 +5301,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5737,6 +5743,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5769,8 +5777,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5794,7 +5802,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6238,6 +6246,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6270,8 +6280,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6299,7 +6309,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6748,6 +6758,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6780,8 +6792,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6809,7 +6821,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7260,6 +7272,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7292,8 +7306,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7317,7 +7331,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7760,6 +7774,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7793,8 +7809,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7814,7 +7830,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8253,6 +8269,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8285,8 +8303,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8306,7 +8324,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8745,6 +8763,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8777,8 +8797,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8806,7 +8826,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9252,6 +9272,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9284,8 +9306,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9311,7 +9333,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9753,6 +9775,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9785,8 +9809,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9810,7 +9834,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10254,6 +10278,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10286,8 +10312,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10315,7 +10341,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10764,6 +10790,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10796,8 +10824,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10825,7 +10853,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11276,6 +11304,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11308,8 +11338,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11333,7 +11363,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11776,6 +11806,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11810,8 +11842,8 @@ DROP TABLE t1; # 1.1.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11830,7 +11862,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12318,8 +12350,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12338,7 +12370,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12826,8 +12858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12854,7 +12886,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13349,8 +13381,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13375,7 +13407,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13866,8 +13898,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13890,7 +13922,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14383,8 +14415,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14411,7 +14443,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14909,8 +14941,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14937,7 +14969,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15437,8 +15469,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15461,7 +15493,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15954,8 +15986,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15974,7 +16006,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16462,8 +16494,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16482,7 +16514,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16970,8 +17002,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16998,7 +17030,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17493,8 +17525,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17519,7 +17551,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18010,8 +18042,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18034,7 +18066,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18527,8 +18559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18555,7 +18587,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19053,8 +19085,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19081,7 +19113,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19581,8 +19613,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19605,7 +19637,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result index 92cfa5d59b9..f6e98be3c98 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -44,8 +44,8 @@ SET @@session.sql_mode= ''; # 1.3.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -500,8 +500,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -956,8 +956,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1427,8 +1427,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1892,8 +1892,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2355,8 +2355,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2829,8 +2829,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3305,8 +3305,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3771,8 +3771,8 @@ DROP TABLE t1; # 1.3.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3791,8 +3791,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4231,6 +4231,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4263,8 +4266,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4283,8 +4286,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4723,6 +4726,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4755,8 +4761,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4783,8 +4789,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5230,6 +5236,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5262,8 +5271,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5288,8 +5297,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5731,6 +5740,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5763,8 +5775,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5787,8 +5799,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6230,6 +6242,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6262,8 +6277,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6290,8 +6305,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6740,6 +6755,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6772,8 +6790,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6800,8 +6818,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7252,6 +7270,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7284,8 +7305,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7308,8 +7329,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7752,6 +7773,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7785,8 +7809,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7805,8 +7829,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8245,6 +8269,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8277,8 +8304,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8297,8 +8324,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8737,6 +8764,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8769,8 +8799,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8797,8 +8827,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9244,6 +9274,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9276,8 +9309,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9302,8 +9335,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9745,6 +9778,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9777,8 +9813,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9801,8 +9837,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10244,6 +10280,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10276,8 +10315,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10304,8 +10343,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10754,6 +10793,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10786,8 +10828,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10814,8 +10856,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11266,6 +11308,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11298,8 +11343,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11322,8 +11367,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL DEFAULT '0', + `f_int1` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11766,6 +11811,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11800,8 +11848,8 @@ DROP TABLE t1; # 1.3.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12308,8 +12356,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12816,8 +12864,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13339,8 +13387,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13856,8 +13904,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14371,8 +14419,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14897,8 +14945,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15425,8 +15473,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15942,8 +15990,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16450,8 +16498,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16958,8 +17006,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17481,8 +17529,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17998,8 +18046,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18513,8 +18561,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19039,8 +19087,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19567,8 +19615,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result index 1ae379ccc07..883c9ec3453 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 2.1.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -506,8 +506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -526,7 +526,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -964,8 +964,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -992,7 +992,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1437,8 +1437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1463,7 +1463,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1902,8 +1902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1926,7 +1926,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2369,8 +2369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2397,7 +2397,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2843,8 +2843,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2871,7 +2871,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3321,8 +3321,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3345,7 +3345,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3788,8 +3788,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3809,7 +3809,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4250,6 +4250,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4282,8 +4284,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4303,7 +4305,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4744,6 +4746,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4776,8 +4780,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4805,7 +4809,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5253,6 +5257,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5285,8 +5291,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5312,7 +5318,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5754,6 +5760,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5786,8 +5794,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5811,7 +5819,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6257,6 +6265,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6289,8 +6299,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6318,7 +6328,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6767,6 +6777,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6799,8 +6811,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6828,7 +6840,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7281,6 +7293,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7313,8 +7327,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7338,7 +7352,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7783,6 +7797,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7816,8 +7832,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7837,7 +7853,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8278,6 +8294,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8310,8 +8328,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8331,7 +8349,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8772,6 +8790,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8804,8 +8824,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8833,7 +8853,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9281,6 +9301,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9313,8 +9335,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9340,7 +9362,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9782,6 +9804,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9814,8 +9838,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9839,7 +9863,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10285,6 +10309,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10317,8 +10343,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10346,7 +10372,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10795,6 +10821,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10827,8 +10855,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10856,7 +10884,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11309,6 +11337,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11341,8 +11371,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11366,7 +11396,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11811,6 +11841,8 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11845,8 +11877,8 @@ DROP TABLE t1; # 2.1.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11865,7 +11897,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12355,8 +12387,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12375,7 +12407,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12865,8 +12897,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12893,7 +12925,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13390,8 +13422,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13416,7 +13448,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13907,8 +13939,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13931,7 +13963,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14426,8 +14458,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14454,7 +14486,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14952,8 +14984,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14980,7 +15012,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15482,8 +15514,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15506,7 +15538,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16001,8 +16033,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16021,7 +16053,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16511,8 +16543,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16531,7 +16563,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17021,8 +17053,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17049,7 +17081,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17546,8 +17578,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17572,7 +17604,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18063,8 +18095,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18087,7 +18119,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18582,8 +18614,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18610,7 +18642,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19108,8 +19140,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19136,7 +19168,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19638,8 +19670,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19662,7 +19694,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result index 9ed9866a42f..993025c9fb2 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -43,8 +43,8 @@ SET @@session.sql_mode= ''; # 2.3.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -502,8 +502,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -961,8 +961,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1435,8 +1435,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1905,8 +1905,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2373,8 +2373,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2852,8 +2852,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3331,8 +3331,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3800,8 +3800,8 @@ DROP TABLE t1; # 2.3.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3820,8 +3820,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4263,6 +4263,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4295,8 +4298,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4315,8 +4318,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4758,6 +4761,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4790,8 +4796,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4818,8 +4824,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5268,6 +5274,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5300,8 +5309,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5326,8 +5335,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5774,6 +5783,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5806,8 +5818,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5830,8 +5842,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6278,6 +6290,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6310,8 +6325,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6338,8 +6353,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6793,6 +6808,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6825,8 +6843,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6853,8 +6871,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7308,6 +7326,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7340,8 +7361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7364,8 +7385,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7811,6 +7832,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7844,8 +7868,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7864,8 +7888,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8307,6 +8331,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8339,8 +8366,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8359,8 +8386,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8802,6 +8829,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8834,8 +8864,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8862,8 +8892,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9312,6 +9342,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9344,8 +9377,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9370,8 +9403,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9818,6 +9851,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9850,8 +9886,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9874,8 +9910,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10322,6 +10358,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10354,8 +10393,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10382,8 +10421,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10837,6 +10876,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10869,8 +10911,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10897,8 +10939,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11352,6 +11394,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11384,8 +11429,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11408,8 +11453,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL DEFAULT '0', + `f_int1` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11855,6 +11900,9 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; +Warnings: +Warning 1364 Field 'f_int1' doesn't have a default value +Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11889,8 +11937,8 @@ DROP TABLE t1; # 2.3.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12400,8 +12448,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12911,8 +12959,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13437,8 +13485,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13959,8 +14007,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14479,8 +14527,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15010,8 +15058,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15541,8 +15589,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16061,8 +16109,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16572,8 +16620,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17083,8 +17131,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17609,8 +17657,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18131,8 +18179,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18651,8 +18699,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19182,8 +19230,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19713,8 +19761,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result index b4e8e47b7d9..8412c7b37b0 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; # 1.1 ALTER ... ANALYZE PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69,8 +69,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -506,8 +506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -528,8 +528,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -968,8 +968,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -998,8 +998,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1441,8 +1441,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1469,8 +1469,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1908,8 +1908,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1934,8 +1934,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2375,8 +2375,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2405,8 +2405,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2851,8 +2851,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2881,8 +2881,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3329,8 +3329,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3355,8 +3355,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3797,8 +3797,8 @@ DROP TABLE t1; # 1.2 ALTER ... ANALYZE PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3819,8 +3819,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4256,8 +4256,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4278,8 +4278,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4718,8 +4718,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4748,8 +4748,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5191,8 +5191,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5219,8 +5219,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5658,8 +5658,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5684,8 +5684,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6125,8 +6125,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6155,8 +6155,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6601,8 +6601,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6631,8 +6631,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7079,8 +7079,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7105,8 +7105,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7547,8 +7547,8 @@ DROP TABLE t1; # 1.3 ALTER ... ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7570,8 +7570,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8007,8 +8007,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8030,8 +8030,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8470,8 +8470,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8501,8 +8501,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8944,8 +8944,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8973,8 +8973,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9412,8 +9412,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9439,8 +9439,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9880,8 +9880,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9911,8 +9911,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10357,8 +10357,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10388,8 +10388,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10836,8 +10836,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10863,8 +10863,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11305,8 +11305,8 @@ DROP TABLE t1; # 1.4 ALTER ... ANALYZE PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11328,8 +11328,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11765,8 +11765,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11788,8 +11788,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12228,8 +12228,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12259,8 +12259,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12702,8 +12702,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12731,8 +12731,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13170,8 +13170,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13197,8 +13197,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13638,8 +13638,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13669,8 +13669,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14115,8 +14115,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14146,8 +14146,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14594,8 +14594,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14621,8 +14621,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15063,8 +15063,8 @@ DROP TABLE t1; # 1.5 ALTER ... ANALYZE PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15085,8 +15085,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15522,8 +15522,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15544,8 +15544,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15984,8 +15984,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16014,8 +16014,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16457,8 +16457,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16485,8 +16485,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16924,8 +16924,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16950,8 +16950,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17391,8 +17391,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17421,8 +17421,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17867,8 +17867,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17897,8 +17897,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18345,8 +18345,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18371,8 +18371,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18816,8 +18816,8 @@ DROP TABLE t1; # 2.1 ALTER ... CHECK PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18838,8 +18838,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19275,8 +19275,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19297,8 +19297,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19737,8 +19737,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19767,8 +19767,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20210,8 +20210,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20238,8 +20238,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20677,8 +20677,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20703,8 +20703,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21144,8 +21144,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21174,8 +21174,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21620,8 +21620,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21650,8 +21650,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22098,8 +22098,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22124,8 +22124,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22566,8 +22566,8 @@ DROP TABLE t1; # 2.2 ALTER ... CHECK PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22588,8 +22588,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23025,8 +23025,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23047,8 +23047,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23487,8 +23487,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23517,8 +23517,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23960,8 +23960,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23988,8 +23988,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24427,8 +24427,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24453,8 +24453,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24894,8 +24894,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24924,8 +24924,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25370,8 +25370,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25400,8 +25400,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25848,8 +25848,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25874,8 +25874,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26316,8 +26316,8 @@ DROP TABLE t1; # 2.3 ALTER ... CHECK PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26339,8 +26339,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26776,8 +26776,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26799,8 +26799,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27239,8 +27239,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27270,8 +27270,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27713,8 +27713,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27742,8 +27742,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28181,8 +28181,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28208,8 +28208,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28649,8 +28649,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28680,8 +28680,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29126,8 +29126,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29157,8 +29157,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29605,8 +29605,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29632,8 +29632,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30074,8 +30074,8 @@ DROP TABLE t1; # 2.4 ALTER ... CHECK PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30097,8 +30097,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30534,8 +30534,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30557,8 +30557,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30997,8 +30997,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31028,8 +31028,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31471,8 +31471,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31500,8 +31500,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31939,8 +31939,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31966,8 +31966,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32407,8 +32407,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32438,8 +32438,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32884,8 +32884,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32915,8 +32915,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33363,8 +33363,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33390,8 +33390,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33832,8 +33832,8 @@ DROP TABLE t1; # 2.5 ALTER ... CHECK PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33854,8 +33854,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34291,8 +34291,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34313,8 +34313,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34753,8 +34753,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34783,8 +34783,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35226,8 +35226,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35254,8 +35254,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35693,8 +35693,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35719,8 +35719,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36160,8 +36160,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36190,8 +36190,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36636,8 +36636,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36666,8 +36666,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37114,8 +37114,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -37140,8 +37140,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37585,8 +37585,8 @@ DROP TABLE t1; # 3.1 ALTER ... OPTIMIZE PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -37608,8 +37608,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38045,8 +38045,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -38068,8 +38068,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38508,8 +38508,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -38539,8 +38539,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38982,8 +38982,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39011,8 +39011,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39450,8 +39450,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39477,8 +39477,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39918,8 +39918,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39949,8 +39949,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40395,8 +40395,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -40426,8 +40426,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40874,8 +40874,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -40901,8 +40901,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41343,8 +41343,8 @@ DROP TABLE t1; # 3.2 ALTER ... OPTIMIZE PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -41366,8 +41366,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41803,8 +41803,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -41826,8 +41826,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42266,8 +42266,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -42297,8 +42297,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42740,8 +42740,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -42769,8 +42769,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43208,8 +43208,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -43235,8 +43235,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43676,8 +43676,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -43707,8 +43707,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44153,8 +44153,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -44184,8 +44184,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44632,8 +44632,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -44659,8 +44659,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45101,8 +45101,8 @@ DROP TABLE t1; # 3.3 ALTER ... OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -45124,8 +45124,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45561,8 +45561,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -45584,8 +45584,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46024,8 +46024,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46055,8 +46055,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46498,8 +46498,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46527,8 +46527,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46966,8 +46966,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46993,8 +46993,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47434,8 +47434,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -47465,8 +47465,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47911,8 +47911,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -47942,8 +47942,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48390,8 +48390,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -48417,8 +48417,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48859,8 +48859,8 @@ DROP TABLE t1; # 3.4 ALTER ... OPTIMIZE PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -48882,8 +48882,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49319,8 +49319,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -49342,8 +49342,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49782,8 +49782,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -49813,8 +49813,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50256,8 +50256,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -50285,8 +50285,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50724,8 +50724,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -50751,8 +50751,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51192,8 +51192,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -51223,8 +51223,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51669,8 +51669,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -51700,8 +51700,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52148,8 +52148,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -52175,8 +52175,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52617,8 +52617,8 @@ DROP TABLE t1; # 3.5 ALTER ... OPTIMIZE PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -52640,8 +52640,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53077,8 +53077,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -53100,8 +53100,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53540,8 +53540,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -53571,8 +53571,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54014,8 +54014,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54043,8 +54043,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54482,8 +54482,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54509,8 +54509,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54950,8 +54950,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54981,8 +54981,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55427,8 +55427,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -55458,8 +55458,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55906,8 +55906,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -55933,8 +55933,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56378,8 +56378,8 @@ DROP TABLE t1; # 4.1 ALTER ... REBUILD PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -56398,8 +56398,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56835,8 +56835,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -56855,8 +56855,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57295,8 +57295,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -57323,8 +57323,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57766,8 +57766,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -57792,8 +57792,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58231,8 +58231,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -58255,8 +58255,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58696,8 +58696,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -58724,8 +58724,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59170,8 +59170,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -59198,8 +59198,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59646,8 +59646,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -59670,8 +59670,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60112,8 +60112,8 @@ DROP TABLE t1; # 4.2 ALTER ... REBUILD PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -60132,8 +60132,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60569,8 +60569,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -60589,8 +60589,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61029,8 +61029,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61057,8 +61057,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61500,8 +61500,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61526,8 +61526,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61965,8 +61965,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61989,8 +61989,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62430,8 +62430,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62458,8 +62458,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62904,8 +62904,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62932,8 +62932,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -63380,8 +63380,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63404,8 +63404,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -63846,8 +63846,8 @@ DROP TABLE t1; # 4.3 ALTER ... REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63861,8 +63861,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63876,8 +63876,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63899,8 +63899,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63920,8 +63920,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63939,8 +63939,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63962,8 +63962,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63985,8 +63985,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64006,8 +64006,8 @@ DROP TABLE t1; # 4.4 ALTER ... REBUILD PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64021,8 +64021,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64036,8 +64036,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64059,8 +64059,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64080,8 +64080,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64099,8 +64099,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64122,8 +64122,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64145,8 +64145,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64166,8 +64166,8 @@ DROP TABLE t1; # 4.5 ALTER ... REBUILD PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64186,8 +64186,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -64623,8 +64623,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64643,8 +64643,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65083,8 +65083,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -65111,8 +65111,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65554,8 +65554,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -65580,8 +65580,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66019,8 +66019,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66043,8 +66043,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66484,8 +66484,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66512,8 +66512,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66958,8 +66958,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66986,8 +66986,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67434,8 +67434,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67458,8 +67458,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67903,8 +67903,8 @@ DROP TABLE t1; # 5.1 ALTER ... REPAIR PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67925,8 +67925,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68362,8 +68362,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68384,8 +68384,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68824,8 +68824,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68854,8 +68854,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69297,8 +69297,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69325,8 +69325,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69764,8 +69764,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69790,8 +69790,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70231,8 +70231,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -70261,8 +70261,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70707,8 +70707,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -70737,8 +70737,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71185,8 +71185,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -71211,8 +71211,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71653,8 +71653,8 @@ DROP TABLE t1; # 5.2 ALTER ... REPAIR PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -71675,8 +71675,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72112,8 +72112,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -72134,8 +72134,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72574,8 +72574,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -72604,8 +72604,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73047,8 +73047,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -73075,8 +73075,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73514,8 +73514,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -73540,8 +73540,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73981,8 +73981,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74011,8 +74011,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74457,8 +74457,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74487,8 +74487,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74935,8 +74935,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74961,8 +74961,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75403,8 +75403,8 @@ DROP TABLE t1; # 5.3 ALTER ... REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -75426,8 +75426,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75863,8 +75863,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -75886,8 +75886,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76326,8 +76326,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -76357,8 +76357,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76800,8 +76800,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -76829,8 +76829,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77268,8 +77268,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77295,8 +77295,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77736,8 +77736,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77767,8 +77767,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78213,8 +78213,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -78244,8 +78244,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78692,8 +78692,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -78719,8 +78719,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79161,8 +79161,8 @@ DROP TABLE t1; # 5.4 ALTER ... REPAIR PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -79184,8 +79184,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79621,8 +79621,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -79644,8 +79644,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80084,8 +80084,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -80115,8 +80115,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80558,8 +80558,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -80587,8 +80587,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81026,8 +81026,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -81053,8 +81053,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81494,8 +81494,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -81525,8 +81525,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81971,8 +81971,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -82002,8 +82002,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82450,8 +82450,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -82477,8 +82477,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82919,8 +82919,8 @@ DROP TABLE t1; # 5.5 ALTER ... REPAIR PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -82941,8 +82941,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83378,8 +83378,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -83400,8 +83400,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83840,8 +83840,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -83870,8 +83870,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84313,8 +84313,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -84341,8 +84341,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84780,8 +84780,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -84806,8 +84806,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85247,8 +85247,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -85277,8 +85277,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85723,8 +85723,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -85753,8 +85753,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86201,8 +86201,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -86227,8 +86227,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86672,8 +86672,8 @@ DROP TABLE t1; # 6.1 ALTER ... REMOVE PARTITIONING; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -86692,8 +86692,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87126,8 +87126,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -87146,8 +87146,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87580,8 +87580,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -87608,8 +87608,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88042,8 +88042,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88068,8 +88068,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88502,8 +88502,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88526,8 +88526,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88960,8 +88960,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88988,8 +88988,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89422,8 +89422,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -89450,8 +89450,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89884,8 +89884,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -89908,8 +89908,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result index a13a53bd5f9..8182dce5625 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER, -f_int2 INTEGER, +CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -52,8 +52,8 @@ SET @@session.sql_mode= ''; # 1.1 The partitioning function contains one column. DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67,8 +67,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -507,8 +507,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -522,8 +522,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -962,8 +962,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -985,8 +985,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1432,8 +1432,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1453,8 +1453,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1896,8 +1896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1915,8 +1915,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2360,8 +2360,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2383,8 +2383,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2833,8 +2833,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2860,8 +2860,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3312,8 +3312,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3331,8 +3331,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3777,8 +3777,8 @@ DROP TABLE t1; # 1.2 The partitioning function contains two columns. DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3792,8 +3792,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4232,8 +4232,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4247,8 +4247,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4687,8 +4687,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4710,8 +4710,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5157,8 +5157,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5178,8 +5178,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5621,8 +5621,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5640,8 +5640,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6083,8 +6083,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6106,8 +6106,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6556,8 +6556,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6579,8 +6579,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7031,8 +7031,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7050,8 +7050,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7500,8 +7500,8 @@ DROP TABLE t1; # 2.5 PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7992,8 +7992,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8484,8 +8484,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8991,8 +8991,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9492,8 +9492,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9993,8 +9993,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10503,8 +10503,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11019,8 +11019,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11520,8 +11520,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12012,8 +12012,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12504,8 +12504,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13011,8 +13011,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13512,8 +13512,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14013,8 +14013,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14523,8 +14523,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15039,8 +15039,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15540,8 +15540,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15555,8 +15555,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16048,8 +16048,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16063,8 +16063,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16556,8 +16556,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16579,8 +16579,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17079,8 +17079,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17100,8 +17100,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17596,8 +17596,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17615,8 +17615,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18113,8 +18113,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18136,8 +18136,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18639,8 +18639,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18666,8 +18666,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19171,8 +19171,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19190,8 +19190,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19693,8 +19693,8 @@ DROP TABLE t1; # 3.3 PRIMARY KEY and UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20185,8 +20185,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20677,8 +20677,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21184,8 +21184,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21685,8 +21685,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22184,8 +22184,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22694,8 +22694,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23206,8 +23206,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23707,8 +23707,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24199,8 +24199,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24691,8 +24691,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25198,8 +25198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25699,8 +25699,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26198,8 +26198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26708,8 +26708,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27220,8 +27220,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27721,8 +27721,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27736,8 +27736,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -28229,8 +28229,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28244,8 +28244,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -28737,8 +28737,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28760,8 +28760,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -29260,8 +29260,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29281,8 +29281,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -29777,8 +29777,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29796,8 +29796,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -30292,8 +30292,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30315,8 +30315,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -30818,8 +30818,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30841,8 +30841,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -31346,8 +31346,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER, -f_int2 INTEGER, +f_int1 INTEGER DEFAULT 0, +f_int2 INTEGER DEFAULT 0, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31365,8 +31365,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT NULL, - `f_int2` int(11) DEFAULT NULL, + `f_int1` int(11) DEFAULT '0', + `f_int2` int(11) DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result index 1a89df3eb1e..9155661d6d9 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result @@ -10,8 +10,9 @@ call mtr.add_suppression("TokuDB: Warning: MySQL is trying to drop table "); # after timed out COALESCE PARTITION # Extended crash recovery testing of fast_alter_partition_table. call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); +flush tables; # Crash testing ADD PARTITION -SET SESSION debug="+d,crash_add_partition_1"; +SET SESSION debug_dbug="+d,crash_add_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -69,8 +70,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_1"; -SET SESSION debug="+d,crash_add_partition_2"; +SET SESSION debug_dbug="-d,crash_add_partition_1"; +SET SESSION debug_dbug="+d,crash_add_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -130,8 +131,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_2"; -SET SESSION debug="+d,crash_add_partition_3"; +SET SESSION debug_dbug="-d,crash_add_partition_2"; +SET SESSION debug_dbug="+d,crash_add_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -191,8 +192,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_3"; -SET SESSION debug="+d,crash_add_partition_4"; +SET SESSION debug_dbug="-d,crash_add_partition_3"; +SET SESSION debug_dbug="+d,crash_add_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -252,8 +253,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_4"; -SET SESSION debug="+d,crash_add_partition_5"; +SET SESSION debug_dbug="-d,crash_add_partition_4"; +SET SESSION debug_dbug="+d,crash_add_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -313,8 +314,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_5"; -SET SESSION debug="+d,crash_add_partition_6"; +SET SESSION debug_dbug="-d,crash_add_partition_5"; +SET SESSION debug_dbug="+d,crash_add_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -374,8 +375,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_6"; -SET SESSION debug="+d,crash_add_partition_7"; +SET SESSION debug_dbug="-d,crash_add_partition_6"; +SET SESSION debug_dbug="+d,crash_add_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -435,8 +436,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_7"; -SET SESSION debug="+d,crash_add_partition_8"; +SET SESSION debug_dbug="-d,crash_add_partition_7"; +SET SESSION debug_dbug="+d,crash_add_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -497,8 +498,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_8"; -SET SESSION debug="+d,crash_add_partition_9"; +SET SESSION debug_dbug="-d,crash_add_partition_8"; +SET SESSION debug_dbug="+d,crash_add_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -557,8 +558,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_9"; -SET SESSION debug="+d,crash_add_partition_10"; +SET SESSION debug_dbug="-d,crash_add_partition_9"; +SET SESSION debug_dbug="+d,crash_add_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -617,9 +618,9 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_add_partition_10"; +SET SESSION debug_dbug="-d,crash_add_partition_10"; # Error recovery testing ADD PARTITION -SET SESSION debug="+d,fail_add_partition_1"; +SET SESSION debug_dbug="+d,fail_add_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -731,8 +732,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_1"; -SET SESSION debug="+d,fail_add_partition_2"; +SET SESSION debug_dbug="-d,fail_add_partition_1"; +SET SESSION debug_dbug="+d,fail_add_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -844,8 +845,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_2"; -SET SESSION debug="+d,fail_add_partition_3"; +SET SESSION debug_dbug="-d,fail_add_partition_2"; +SET SESSION debug_dbug="+d,fail_add_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -957,8 +958,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_3"; -SET SESSION debug="+d,fail_add_partition_4"; +SET SESSION debug_dbug="-d,fail_add_partition_3"; +SET SESSION debug_dbug="+d,fail_add_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1070,8 +1071,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_4"; -SET SESSION debug="+d,fail_add_partition_5"; +SET SESSION debug_dbug="-d,fail_add_partition_4"; +SET SESSION debug_dbug="+d,fail_add_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1183,8 +1184,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_5"; -SET SESSION debug="+d,fail_add_partition_6"; +SET SESSION debug_dbug="-d,fail_add_partition_5"; +SET SESSION debug_dbug="+d,fail_add_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1296,8 +1297,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_6"; -SET SESSION debug="+d,fail_add_partition_7"; +SET SESSION debug_dbug="-d,fail_add_partition_6"; +SET SESSION debug_dbug="+d,fail_add_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1409,8 +1410,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_7"; -SET SESSION debug="+d,fail_add_partition_8"; +SET SESSION debug_dbug="-d,fail_add_partition_7"; +SET SESSION debug_dbug="+d,fail_add_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1524,8 +1525,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_8"; -SET SESSION debug="+d,fail_add_partition_9"; +SET SESSION debug_dbug="-d,fail_add_partition_8"; +SET SESSION debug_dbug="+d,fail_add_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1639,8 +1640,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_9"; -SET SESSION debug="+d,fail_add_partition_10"; +SET SESSION debug_dbug="-d,fail_add_partition_9"; +SET SESSION debug_dbug="+d,fail_add_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1754,9 +1755,9 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_add_partition_10"; +SET SESSION debug_dbug="-d,fail_add_partition_10"; # Test DROP PARTITION -SET SESSION debug="+d,crash_drop_partition_1"; +SET SESSION debug_dbug="+d,crash_drop_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1813,8 +1814,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_1"; -SET SESSION debug="+d,crash_drop_partition_2"; +SET SESSION debug_dbug="-d,crash_drop_partition_1"; +SET SESSION debug_dbug="+d,crash_drop_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1873,8 +1874,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_2"; -SET SESSION debug="+d,crash_drop_partition_3"; +SET SESSION debug_dbug="-d,crash_drop_partition_2"; +SET SESSION debug_dbug="+d,crash_drop_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1933,8 +1934,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_3"; -SET SESSION debug="+d,crash_drop_partition_4"; +SET SESSION debug_dbug="-d,crash_drop_partition_3"; +SET SESSION debug_dbug="+d,crash_drop_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -1988,8 +1989,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_4"; -SET SESSION debug="+d,crash_drop_partition_5"; +SET SESSION debug_dbug="-d,crash_drop_partition_4"; +SET SESSION debug_dbug="+d,crash_drop_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2043,8 +2044,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_5"; -SET SESSION debug="+d,crash_drop_partition_6"; +SET SESSION debug_dbug="-d,crash_drop_partition_5"; +SET SESSION debug_dbug="+d,crash_drop_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2098,8 +2099,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_6"; -SET SESSION debug="+d,crash_drop_partition_7"; +SET SESSION debug_dbug="-d,crash_drop_partition_6"; +SET SESSION debug_dbug="+d,crash_drop_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2151,8 +2152,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_7"; -SET SESSION debug="+d,crash_drop_partition_8"; +SET SESSION debug_dbug="-d,crash_drop_partition_7"; +SET SESSION debug_dbug="+d,crash_drop_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2204,8 +2205,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_8"; -SET SESSION debug="+d,crash_drop_partition_9"; +SET SESSION debug_dbug="-d,crash_drop_partition_8"; +SET SESSION debug_dbug="+d,crash_drop_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2257,9 +2258,9 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_drop_partition_9"; +SET SESSION debug_dbug="-d,crash_drop_partition_9"; # Error recovery DROP PARTITION -SET SESSION debug="+d,fail_drop_partition_1"; +SET SESSION debug_dbug="+d,fail_drop_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2369,8 +2370,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_1"; -SET SESSION debug="+d,fail_drop_partition_2"; +SET SESSION debug_dbug="-d,fail_drop_partition_1"; +SET SESSION debug_dbug="+d,fail_drop_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2480,8 +2481,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_2"; -SET SESSION debug="+d,fail_drop_partition_3"; +SET SESSION debug_dbug="-d,fail_drop_partition_2"; +SET SESSION debug_dbug="+d,fail_drop_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2591,8 +2592,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_3"; -SET SESSION debug="+d,fail_drop_partition_4"; +SET SESSION debug_dbug="-d,fail_drop_partition_3"; +SET SESSION debug_dbug="+d,fail_drop_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2692,8 +2693,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_4"; -SET SESSION debug="+d,fail_drop_partition_5"; +SET SESSION debug_dbug="-d,fail_drop_partition_4"; +SET SESSION debug_dbug="+d,fail_drop_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2793,8 +2794,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_5"; -SET SESSION debug="+d,fail_drop_partition_6"; +SET SESSION debug_dbug="-d,fail_drop_partition_5"; +SET SESSION debug_dbug="+d,fail_drop_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2894,8 +2895,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_6"; -SET SESSION debug="+d,fail_drop_partition_7"; +SET SESSION debug_dbug="-d,fail_drop_partition_6"; +SET SESSION debug_dbug="+d,fail_drop_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -2995,8 +2996,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_7"; -SET SESSION debug="+d,fail_drop_partition_8"; +SET SESSION debug_dbug="-d,fail_drop_partition_7"; +SET SESSION debug_dbug="+d,fail_drop_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3096,8 +3097,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_8"; -SET SESSION debug="+d,fail_drop_partition_9"; +SET SESSION debug_dbug="-d,fail_drop_partition_8"; +SET SESSION debug_dbug="+d,fail_drop_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3197,10 +3198,10 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_drop_partition_9"; +SET SESSION debug_dbug="-d,fail_drop_partition_9"; # Test change partition (REORGANIZE/REBUILD/COALESCE # or ADD HASH PARTITION). -SET SESSION debug="+d,crash_change_partition_1"; +SET SESSION debug_dbug="+d,crash_change_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3259,8 +3260,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_1"; -SET SESSION debug="+d,crash_change_partition_2"; +SET SESSION debug_dbug="-d,crash_change_partition_1"; +SET SESSION debug_dbug="+d,crash_change_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3321,8 +3322,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_2"; -SET SESSION debug="+d,crash_change_partition_3"; +SET SESSION debug_dbug="-d,crash_change_partition_2"; +SET SESSION debug_dbug="+d,crash_change_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3383,8 +3384,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_3"; -SET SESSION debug="+d,crash_change_partition_4"; +SET SESSION debug_dbug="-d,crash_change_partition_3"; +SET SESSION debug_dbug="+d,crash_change_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3445,8 +3446,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_4"; -SET SESSION debug="+d,crash_change_partition_5"; +SET SESSION debug_dbug="-d,crash_change_partition_4"; +SET SESSION debug_dbug="+d,crash_change_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3507,8 +3508,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_5"; -SET SESSION debug="+d,crash_change_partition_6"; +SET SESSION debug_dbug="-d,crash_change_partition_5"; +SET SESSION debug_dbug="+d,crash_change_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3569,8 +3570,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_6"; -SET SESSION debug="+d,crash_change_partition_7"; +SET SESSION debug_dbug="-d,crash_change_partition_6"; +SET SESSION debug_dbug="+d,crash_change_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3632,8 +3633,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_7"; -SET SESSION debug="+d,crash_change_partition_8"; +SET SESSION debug_dbug="-d,crash_change_partition_7"; +SET SESSION debug_dbug="+d,crash_change_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3695,8 +3696,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_8"; -SET SESSION debug="+d,crash_change_partition_9"; +SET SESSION debug_dbug="-d,crash_change_partition_8"; +SET SESSION debug_dbug="+d,crash_change_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3756,8 +3757,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_9"; -SET SESSION debug="+d,crash_change_partition_10"; +SET SESSION debug_dbug="-d,crash_change_partition_9"; +SET SESSION debug_dbug="+d,crash_change_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3817,8 +3818,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_10"; -SET SESSION debug="+d,crash_change_partition_11"; +SET SESSION debug_dbug="-d,crash_change_partition_10"; +SET SESSION debug_dbug="+d,crash_change_partition_11"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3878,8 +3879,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_11"; -SET SESSION debug="+d,crash_change_partition_12"; +SET SESSION debug_dbug="-d,crash_change_partition_11"; +SET SESSION debug_dbug="+d,crash_change_partition_12"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -3939,10 +3940,10 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t1; -SET SESSION debug="-d,crash_change_partition_12"; +SET SESSION debug_dbug="-d,crash_change_partition_12"; # Error recovery change partition (REORGANIZE/REBUILD/COALESCE # or ADD HASH PARTITION). -SET SESSION debug="+d,fail_change_partition_1"; +SET SESSION debug_dbug="+d,fail_change_partition_1"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4056,8 +4057,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_1"; -SET SESSION debug="+d,fail_change_partition_2"; +SET SESSION debug_dbug="-d,fail_change_partition_1"; +SET SESSION debug_dbug="+d,fail_change_partition_2"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4171,8 +4172,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_2"; -SET SESSION debug="+d,fail_change_partition_3"; +SET SESSION debug_dbug="-d,fail_change_partition_2"; +SET SESSION debug_dbug="+d,fail_change_partition_3"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4286,8 +4287,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_3"; -SET SESSION debug="+d,fail_change_partition_4"; +SET SESSION debug_dbug="-d,fail_change_partition_3"; +SET SESSION debug_dbug="+d,fail_change_partition_4"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4401,8 +4402,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_4"; -SET SESSION debug="+d,fail_change_partition_5"; +SET SESSION debug_dbug="-d,fail_change_partition_4"; +SET SESSION debug_dbug="+d,fail_change_partition_5"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4516,8 +4517,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_5"; -SET SESSION debug="+d,fail_change_partition_6"; +SET SESSION debug_dbug="-d,fail_change_partition_5"; +SET SESSION debug_dbug="+d,fail_change_partition_6"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4631,8 +4632,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_6"; -SET SESSION debug="+d,fail_change_partition_7"; +SET SESSION debug_dbug="-d,fail_change_partition_6"; +SET SESSION debug_dbug="+d,fail_change_partition_7"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4748,8 +4749,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_7"; -SET SESSION debug="+d,fail_change_partition_8"; +SET SESSION debug_dbug="-d,fail_change_partition_7"; +SET SESSION debug_dbug="+d,fail_change_partition_8"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4865,8 +4866,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_8"; -SET SESSION debug="+d,fail_change_partition_9"; +SET SESSION debug_dbug="-d,fail_change_partition_8"; +SET SESSION debug_dbug="+d,fail_change_partition_9"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -4982,8 +4983,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_9"; -SET SESSION debug="+d,fail_change_partition_10"; +SET SESSION debug_dbug="-d,fail_change_partition_9"; +SET SESSION debug_dbug="+d,fail_change_partition_10"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -5099,8 +5100,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_10"; -SET SESSION debug="+d,fail_change_partition_11"; +SET SESSION debug_dbug="-d,fail_change_partition_10"; +SET SESSION debug_dbug="+d,fail_change_partition_11"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -5216,8 +5217,8 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_11"; -SET SESSION debug="+d,fail_change_partition_12"; +SET SESSION debug_dbug="-d,fail_change_partition_11"; +SET SESSION debug_dbug="+d,fail_change_partition_12"; CREATE TABLE t1 (a INT, b VARCHAR(64)) ENGINE = 'TokuDB' PARTITION BY LIST (a) @@ -5333,14 +5334,14 @@ a b 4 Original from partition p0 UNLOCK TABLES; DROP TABLE t1; -SET SESSION debug="-d,fail_change_partition_12"; +SET SESSION debug_dbug="-d,fail_change_partition_12"; # # WL#4445: EXCHANGE PARTITION WITH TABLE # Verify ddl_log and TokuDB in case of crashing. call mtr.add_suppression("TokuDB: Warning: allocated tablespace .*, old maximum was "); call mtr.add_suppression("Attempting backtrace. You can use the following information to find out"); call mtr.add_suppression("table .* does not exist in the TokuDB internal"); -SET SESSION debug="+d,exchange_partition_abort_1"; +SET SESSION debug_dbug="+d,exchange_partition_abort_1"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5435,8 +5436,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_1"; -SET SESSION debug="+d,exchange_partition_abort_2"; +SET SESSION debug_dbug="-d,exchange_partition_abort_1"; +SET SESSION debug_dbug="+d,exchange_partition_abort_2"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5531,8 +5532,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_2"; -SET SESSION debug="+d,exchange_partition_abort_3"; +SET SESSION debug_dbug="-d,exchange_partition_abort_2"; +SET SESSION debug_dbug="+d,exchange_partition_abort_3"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5627,8 +5628,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_3"; -SET SESSION debug="+d,exchange_partition_abort_4"; +SET SESSION debug_dbug="-d,exchange_partition_abort_3"; +SET SESSION debug_dbug="+d,exchange_partition_abort_4"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5723,8 +5724,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_4"; -SET SESSION debug="+d,exchange_partition_abort_5"; +SET SESSION debug_dbug="-d,exchange_partition_abort_4"; +SET SESSION debug_dbug="+d,exchange_partition_abort_5"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5819,8 +5820,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_5"; -SET SESSION debug="+d,exchange_partition_abort_6"; +SET SESSION debug_dbug="-d,exchange_partition_abort_5"; +SET SESSION debug_dbug="+d,exchange_partition_abort_6"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -5915,8 +5916,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_6"; -SET SESSION debug="+d,exchange_partition_abort_7"; +SET SESSION debug_dbug="-d,exchange_partition_abort_6"; +SET SESSION debug_dbug="+d,exchange_partition_abort_7"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6011,8 +6012,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_7"; -SET SESSION debug="+d,exchange_partition_abort_8"; +SET SESSION debug_dbug="-d,exchange_partition_abort_7"; +SET SESSION debug_dbug="+d,exchange_partition_abort_8"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6107,8 +6108,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_8"; -SET SESSION debug="+d,exchange_partition_abort_9"; +SET SESSION debug_dbug="-d,exchange_partition_abort_8"; +SET SESSION debug_dbug="+d,exchange_partition_abort_9"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6203,8 +6204,8 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_abort_9"; -SET SESSION debug="+d,exchange_partition_fail_1"; +SET SESSION debug_dbug="-d,exchange_partition_abort_9"; +SET SESSION debug_dbug="+d,exchange_partition_fail_1"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6295,8 +6296,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_1"; -SET SESSION debug="+d,exchange_partition_fail_2"; +SET SESSION debug_dbug="-d,exchange_partition_fail_1"; +SET SESSION debug_dbug="+d,exchange_partition_fail_2"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6387,8 +6388,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_2"; -SET SESSION debug="+d,exchange_partition_fail_3"; +SET SESSION debug_dbug="-d,exchange_partition_fail_2"; +SET SESSION debug_dbug="+d,exchange_partition_fail_3"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6437,7 +6438,7 @@ a b 3 Original from partition p0 4 Original from partition p0 ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; -ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 - n/a) +ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 "Internal error/check (Not system error)") # State after failure t1.frm t1.par @@ -6479,8 +6480,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_3"; -SET SESSION debug="+d,exchange_partition_fail_4"; +SET SESSION debug_dbug="-d,exchange_partition_fail_3"; +SET SESSION debug_dbug="+d,exchange_partition_fail_4"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6571,8 +6572,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_4"; -SET SESSION debug="+d,exchange_partition_fail_5"; +SET SESSION debug_dbug="-d,exchange_partition_fail_4"; +SET SESSION debug_dbug="+d,exchange_partition_fail_5"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6621,7 +6622,7 @@ a b 3 Original from partition p0 4 Original from partition p0 ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; -ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 - n/a) +ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 "Internal error/check (Not system error)") # State after failure t1.frm t1.par @@ -6663,8 +6664,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_5"; -SET SESSION debug="+d,exchange_partition_fail_6"; +SET SESSION debug_dbug="-d,exchange_partition_fail_5"; +SET SESSION debug_dbug="+d,exchange_partition_fail_6"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6755,8 +6756,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_6"; -SET SESSION debug="+d,exchange_partition_fail_7"; +SET SESSION debug_dbug="-d,exchange_partition_fail_6"; +SET SESSION debug_dbug="+d,exchange_partition_fail_7"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6805,7 +6806,7 @@ a b 3 Original from partition p0 4 Original from partition p0 ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2; -ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 - n/a) +ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 "Internal error/check (Not system error)") # State after failure t1.frm t1.par @@ -6847,8 +6848,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_7"; -SET SESSION debug="+d,exchange_partition_fail_8"; +SET SESSION debug_dbug="-d,exchange_partition_fail_7"; +SET SESSION debug_dbug="+d,exchange_partition_fail_8"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -6939,8 +6940,8 @@ a b 7 Original from table t2 8 Original from table t2 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_8"; -SET SESSION debug="+d,exchange_partition_fail_9"; +SET SESSION debug_dbug="-d,exchange_partition_fail_8"; +SET SESSION debug_dbug="+d,exchange_partition_fail_9"; CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = TokuDB; INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2"); SHOW CREATE TABLE t2; @@ -7031,4 +7032,4 @@ a b 3 Original from partition p0 4 Original from partition p0 DROP TABLE t2; -SET SESSION debug="-d,exchange_partition_fail_9"; +SET SESSION debug_dbug="-d,exchange_partition_fail_9"; -- cgit v1.2.1 From f12ebed0a46d3051bbc76d62a8cd73b2b572364b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 8 Jul 2016 15:44:47 +0200 Subject: fixes for tokudb_parts --big suite --- .../r/part_supported_sql_func_tokudb.result | 32 +- .../r/partition_alter1_1_2_tokudb.result | 408 ++-- .../r/partition_alter1_1_tokudb.result | 328 +-- .../r/partition_alter1_2_tokudb.result | 616 +++--- .../r/partition_alter2_1_1_tokudb.result | 360 ++-- .../r/partition_alter2_1_2_tokudb.result | 360 ++-- .../r/partition_alter2_2_1_tokudb.result | 360 ++-- .../r/partition_alter2_2_2_tokudb.result | 360 ++-- .../tokudb_parts/r/partition_alter4_tokudb.result | 2184 ++++++++++---------- .../tokudb_parts/r/partition_basic_tokudb.result | 520 ++--- .../tokudb_parts/r/partition_debug_tokudb.result | 516 ++--- 11 files changed, 2910 insertions(+), 3134 deletions(-) diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result index 5b860845490..0f2532c6f4b 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/part_supported_sql_func_tokudb.result @@ -619,7 +619,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` int(11) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (abs(col1)) SUBPARTITIONS 5 @@ -2316,7 +2316,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` int(11) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (mod(col1,10)) SUBPARTITIONS 5 @@ -3666,7 +3666,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (day(col1)) SUBPARTITIONS 5 @@ -4183,7 +4183,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (dayofmonth(col1)) SUBPARTITIONS 5 @@ -4700,7 +4700,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (dayofweek(col1)) SUBPARTITIONS 5 @@ -5229,7 +5229,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (dayofyear(col1)) SUBPARTITIONS 5 @@ -5748,7 +5748,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (extract(month from col1)) SUBPARTITIONS 5 @@ -6267,7 +6267,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` time DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (hour(col1)) SUBPARTITIONS 5 @@ -6792,7 +6792,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` time(6) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (microsecond(col1)) SUBPARTITIONS 5 @@ -7313,7 +7313,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` time DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (minute(col1)) SUBPARTITIONS 5 @@ -7844,7 +7844,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` time DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (second(col1)) SUBPARTITIONS 5 @@ -8375,7 +8375,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (month(col1)) SUBPARTITIONS 5 @@ -8900,7 +8900,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (quarter(col1)) SUBPARTITIONS 5 @@ -9423,7 +9423,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (weekday(col1)) SUBPARTITIONS 5 @@ -9944,7 +9944,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (year(col1)-1990) SUBPARTITIONS 5 @@ -10469,7 +10469,7 @@ Table Create Table t55 CREATE TABLE `t55` ( `colint` int(11) DEFAULT NULL, `col1` date DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (colint) SUBPARTITION BY HASH (yearweek(col1)-200600) SUBPARTITIONS 5 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result index 3fb51c67d00..865df022890 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 1.2.1 PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74,7 +74,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -540,8 +540,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -566,7 +566,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -1032,8 +1032,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1066,7 +1066,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1539,8 +1539,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1571,7 +1571,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -2040,8 +2040,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2070,7 +2070,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -2539,8 +2539,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2573,7 +2573,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -3049,8 +3049,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3083,7 +3083,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -3561,8 +3561,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3591,7 +3591,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -4062,8 +4062,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4088,7 +4088,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -4554,8 +4554,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4580,7 +4580,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -5046,8 +5046,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5080,7 +5080,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5553,8 +5553,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5585,7 +5585,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -6054,8 +6054,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6084,7 +6084,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -6553,8 +6553,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6587,7 +6587,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -7063,8 +7063,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7097,7 +7097,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -7575,8 +7575,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7605,7 +7605,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -8077,8 +8077,8 @@ DROP TABLE t1; # 1.2.2 UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8097,13 +8097,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -8585,8 +8585,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8605,13 +8605,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -9093,8 +9093,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9121,13 +9121,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -9616,8 +9616,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9642,13 +9642,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -10133,8 +10133,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10157,13 +10157,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -10648,8 +10648,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10676,13 +10676,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -11174,8 +11174,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11202,13 +11202,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -11702,8 +11702,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11726,13 +11726,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -12219,8 +12219,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12239,13 +12239,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -12727,8 +12727,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12747,13 +12747,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -13235,8 +13235,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13263,13 +13263,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -13758,8 +13758,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13784,13 +13784,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -14275,8 +14275,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14299,13 +14299,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -14790,8 +14790,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14818,13 +14818,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -15316,8 +15316,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15344,13 +15344,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -15844,8 +15844,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15868,13 +15868,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -16362,8 +16362,8 @@ DROP TABLE t1; # 1.2.3 PRIMARY KEY and UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16389,7 +16389,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -16855,8 +16855,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16882,7 +16882,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -17348,8 +17348,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17383,7 +17383,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -17856,8 +17856,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17889,7 +17889,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -18358,8 +18358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18389,7 +18389,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -18858,8 +18858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18893,7 +18893,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -19369,8 +19369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19404,7 +19404,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -19882,8 +19882,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19913,7 +19913,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -20384,8 +20384,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20411,7 +20411,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -20877,8 +20877,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20904,7 +20904,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -21370,8 +21370,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21405,7 +21405,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -21878,8 +21878,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21911,7 +21911,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -22380,8 +22380,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22411,7 +22411,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -22880,8 +22880,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22915,7 +22915,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -23391,8 +23391,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23426,7 +23426,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -23904,8 +23904,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23935,7 +23935,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -24406,8 +24406,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24433,7 +24433,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -24899,8 +24899,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24926,7 +24926,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -25392,8 +25392,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25427,7 +25427,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -25900,8 +25900,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25933,7 +25933,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -26402,8 +26402,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26433,7 +26433,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -26902,8 +26902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26937,7 +26937,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -27413,8 +27413,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27448,7 +27448,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -27926,8 +27926,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27957,7 +27957,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result index 2cc7b4298fc..f34ca8939af 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; #------------------------------------------------------------------------ DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62,8 +62,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77,8 +77,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -100,8 +100,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -121,8 +121,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -140,8 +140,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -163,8 +163,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -186,8 +186,8 @@ ALTER TABLE t1 ADD PRIMARY KEY(f_int2); ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -206,8 +206,8 @@ ERROR HY000: A PRIMARY KEY must include all columns in the table's partitioning DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -221,8 +221,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -236,8 +236,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -259,8 +259,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -280,8 +280,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -299,8 +299,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -322,8 +322,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -345,8 +345,8 @@ ALTER TABLE t1 ADD UNIQUE INDEX uidx1 (f_int2); ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -366,8 +366,8 @@ DROP TABLE t1; # 1.1.3 PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -392,7 +392,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -858,8 +858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -884,7 +884,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -1350,8 +1350,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1384,7 +1384,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1857,8 +1857,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1889,7 +1889,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -2358,8 +2358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2388,7 +2388,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -2859,8 +2859,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2893,7 +2893,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -3369,8 +3369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3403,7 +3403,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -3881,8 +3881,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3911,7 +3911,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -4382,8 +4382,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4408,7 +4408,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -4874,8 +4874,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4900,7 +4900,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -5366,8 +5366,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5400,7 +5400,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5873,8 +5873,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5905,7 +5905,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -6374,8 +6374,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6404,7 +6404,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -6875,8 +6875,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6909,7 +6909,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -7385,8 +7385,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7419,7 +7419,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -7897,8 +7897,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7927,7 +7927,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -8399,8 +8399,8 @@ DROP TABLE t1; # 1.1.4 UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8419,13 +8419,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -8907,8 +8907,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8927,13 +8927,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -9415,8 +9415,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9443,13 +9443,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -9938,8 +9938,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9964,13 +9964,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -10455,8 +10455,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10479,13 +10479,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -10972,8 +10972,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11000,13 +11000,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -11498,8 +11498,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11526,13 +11526,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -12026,8 +12026,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12050,13 +12050,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -12543,8 +12543,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12563,13 +12563,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -13051,8 +13051,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13071,13 +13071,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -13559,8 +13559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13587,13 +13587,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -14082,8 +14082,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14108,13 +14108,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -14599,8 +14599,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14623,13 +14623,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -15116,8 +15116,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15144,13 +15144,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -15642,8 +15642,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15670,13 +15670,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -16170,8 +16170,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16194,13 +16194,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result index 4aded14f336..a9acdaa23d3 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; # 2.1.5 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -72,7 +72,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -487,8 +487,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -512,7 +512,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -927,8 +927,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -960,7 +960,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1382,8 +1382,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1413,7 +1413,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -1831,8 +1831,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1860,7 +1860,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -2280,8 +2280,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2313,7 +2313,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -2738,8 +2738,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2771,7 +2771,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -3198,8 +3198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3227,7 +3227,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -3647,8 +3647,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3672,7 +3672,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -4087,8 +4087,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4112,7 +4112,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -4527,8 +4527,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4560,7 +4560,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -4982,8 +4982,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5013,7 +5013,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -5431,8 +5431,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5460,7 +5460,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -5880,8 +5880,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5913,7 +5913,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -6338,8 +6338,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6371,7 +6371,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -6798,8 +6798,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6827,7 +6827,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -7247,8 +7247,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7267,12 +7267,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -7703,8 +7703,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7723,12 +7723,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -8159,8 +8159,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8187,12 +8187,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -8630,8 +8630,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8656,12 +8656,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -9095,8 +9095,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9119,12 +9119,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -9560,8 +9560,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9588,12 +9588,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -10034,8 +10034,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10062,12 +10062,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -10510,8 +10510,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10534,12 +10534,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -10979,8 +10979,8 @@ DROP TABLE t1; # 2.2.1 DROP PRIMARY KEY consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11004,7 +11004,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -11419,8 +11419,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11444,7 +11444,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -11859,8 +11859,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11892,7 +11892,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -12314,8 +12314,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12345,7 +12345,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -12763,8 +12763,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12792,7 +12792,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -13210,8 +13210,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13243,7 +13243,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -13668,8 +13668,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13701,7 +13701,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -14128,8 +14128,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14157,7 +14157,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -14577,8 +14577,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14602,7 +14602,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -15017,8 +15017,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15042,7 +15042,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -15457,8 +15457,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15490,7 +15490,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -15912,8 +15912,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15943,7 +15943,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -16361,8 +16361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16390,7 +16390,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -16808,8 +16808,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16841,7 +16841,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -17266,8 +17266,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17299,7 +17299,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -17726,8 +17726,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17755,7 +17755,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -18176,8 +18176,8 @@ DROP TABLE t1; # 2.2.2 DROP UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18196,12 +18196,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -18632,8 +18632,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18652,12 +18652,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -19088,8 +19088,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19116,12 +19116,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -19559,8 +19559,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19585,12 +19585,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -20024,8 +20024,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20048,12 +20048,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -20487,8 +20487,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20515,12 +20515,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -20961,8 +20961,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20989,12 +20989,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -21437,8 +21437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21461,12 +21461,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -21902,8 +21902,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21922,12 +21922,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -22358,8 +22358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22378,12 +22378,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -22814,8 +22814,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22842,12 +22842,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -23285,8 +23285,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23311,12 +23311,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -23750,8 +23750,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23774,12 +23774,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -24213,8 +24213,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24241,12 +24241,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -24687,8 +24687,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24715,12 +24715,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -25163,8 +25163,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25187,12 +25187,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -25629,8 +25629,8 @@ DROP TABLE t1; # 2.2.3 DROP PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25654,7 +25654,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -26069,8 +26069,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26094,7 +26094,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -26509,8 +26509,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26542,7 +26542,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -26964,8 +26964,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26995,7 +26995,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -27413,8 +27413,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27442,7 +27442,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -27860,8 +27860,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27893,7 +27893,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -28318,8 +28318,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28351,7 +28351,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -28778,8 +28778,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28807,7 +28807,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -29227,8 +29227,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29252,7 +29252,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -29667,8 +29667,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29692,7 +29692,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -30107,8 +30107,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30140,7 +30140,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -30562,8 +30562,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30593,7 +30593,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -31011,8 +31011,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31040,7 +31040,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -31458,8 +31458,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31491,7 +31491,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -31916,8 +31916,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31949,7 +31949,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -32376,8 +32376,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32405,7 +32405,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -32825,8 +32825,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32845,12 +32845,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -33281,8 +33281,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33301,12 +33301,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -33737,8 +33737,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33765,12 +33765,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -34208,8 +34208,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34234,12 +34234,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -34673,8 +34673,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34697,12 +34697,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -35136,8 +35136,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35164,12 +35164,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -35610,8 +35610,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35638,12 +35638,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -36086,8 +36086,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36110,12 +36110,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result index 6838b33d89d..9decb8c21f5 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 1.1.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68,12 +68,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -504,8 +504,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -524,12 +524,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -960,8 +960,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -988,12 +988,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1431,8 +1431,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1457,12 +1457,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -1896,8 +1896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1920,12 +1920,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -2361,8 +2361,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2389,12 +2389,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -2835,8 +2835,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2863,12 +2863,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -3311,8 +3311,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3335,12 +3335,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -3777,8 +3777,8 @@ DROP TABLE t1; # 1.1.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3798,12 +3798,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -4237,8 +4237,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4271,8 +4269,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4292,12 +4290,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -4731,8 +4729,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4765,8 +4761,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4794,12 +4790,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5240,8 +5236,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5274,8 +5268,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5301,12 +5295,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -5743,8 +5737,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5777,8 +5769,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5802,12 +5794,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -6246,8 +6238,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6280,8 +6270,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6309,12 +6299,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -6758,8 +6748,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6792,8 +6780,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6821,12 +6809,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -7272,8 +7260,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7306,8 +7292,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7331,12 +7317,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -7774,8 +7760,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7809,8 +7793,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7830,12 +7814,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -8269,8 +8253,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8303,8 +8285,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8324,12 +8306,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -8763,8 +8745,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8797,8 +8777,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8826,12 +8806,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -9272,8 +9252,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9306,8 +9284,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9333,12 +9311,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -9775,8 +9753,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9809,8 +9785,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9834,12 +9810,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -10278,8 +10254,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10312,8 +10286,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10341,12 +10315,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -10790,8 +10764,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10824,8 +10796,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10853,12 +10825,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -11304,8 +11276,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11338,8 +11308,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11363,12 +11333,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` bigint(20) NOT NULL, + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -11806,8 +11776,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11842,8 +11810,8 @@ DROP TABLE t1; # 1.1.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11862,13 +11830,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -12350,8 +12318,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12370,13 +12338,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -12858,8 +12826,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12886,13 +12854,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -13381,8 +13349,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13407,13 +13375,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -13898,8 +13866,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13922,13 +13890,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -14415,8 +14383,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14443,13 +14411,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -14941,8 +14909,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14969,13 +14937,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -15469,8 +15437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15493,13 +15461,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -15986,8 +15954,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16006,13 +15974,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -16494,8 +16462,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16514,13 +16482,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -17002,8 +16970,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17030,13 +16998,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -17525,8 +17493,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17551,13 +17519,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -18042,8 +18010,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18066,13 +18034,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -18559,8 +18527,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18587,13 +18555,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -19085,8 +19053,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19113,13 +19081,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -19613,8 +19581,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19637,13 +19605,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result index f6e98be3c98..f14da0a5749 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -44,8 +44,8 @@ SET @@session.sql_mode= ''; # 1.3.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69,7 +69,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -500,8 +500,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -525,7 +525,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -956,8 +956,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -989,7 +989,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1427,8 +1427,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1458,7 +1458,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -1892,8 +1892,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1921,7 +1921,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -2355,8 +2355,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2388,7 +2388,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -2829,8 +2829,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2862,7 +2862,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -3305,8 +3305,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3334,7 +3334,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -3771,8 +3771,8 @@ DROP TABLE t1; # 1.3.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3791,13 +3791,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -4231,9 +4231,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4266,8 +4263,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4286,13 +4283,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -4726,9 +4723,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4761,8 +4755,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4789,13 +4783,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5236,9 +5230,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5271,8 +5262,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5297,13 +5288,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -5740,9 +5731,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5775,8 +5763,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5799,13 +5787,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -6242,9 +6230,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6277,8 +6262,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6305,13 +6290,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -6755,9 +6740,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6790,8 +6772,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6818,13 +6800,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -7270,9 +7252,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7305,8 +7284,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7329,13 +7308,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -7773,9 +7752,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7809,8 +7785,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7829,13 +7805,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -8269,9 +8245,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8304,8 +8277,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8324,13 +8297,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -8764,9 +8737,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8799,8 +8769,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8827,13 +8797,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -9274,9 +9244,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9309,8 +9276,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9335,13 +9302,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -9778,9 +9745,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9813,8 +9777,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9837,13 +9801,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -10280,9 +10244,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10315,8 +10276,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10343,13 +10304,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -10793,9 +10754,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10828,8 +10786,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10856,13 +10814,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -11308,9 +11266,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11343,8 +11298,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11367,13 +11322,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` bigint(20) NOT NULL, - `f_int2` bigint(20) NOT NULL, + `f_int1` bigint(20) NOT NULL DEFAULT '0', + `f_int2` bigint(20) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -11811,9 +11766,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11848,8 +11800,8 @@ DROP TABLE t1; # 1.3.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11874,7 +11826,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -12356,8 +12308,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12382,7 +12334,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -12864,8 +12816,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12898,7 +12850,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -13387,8 +13339,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13419,7 +13371,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -13904,8 +13856,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13934,7 +13886,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -14419,8 +14371,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14453,7 +14405,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -14945,8 +14897,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14979,7 +14931,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -15473,8 +15425,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15503,7 +15455,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -15990,8 +15942,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16016,7 +15968,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -16498,8 +16450,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16524,7 +16476,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -17006,8 +16958,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17040,7 +16992,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -17529,8 +17481,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17561,7 +17513,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -18046,8 +17998,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18076,7 +18028,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -18561,8 +18513,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18595,7 +18547,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -19087,8 +19039,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19121,7 +19073,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -19615,8 +19567,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19645,7 +19597,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result index 883c9ec3453..f63805fcab9 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -48,8 +48,8 @@ SET @@session.sql_mode= ''; # 2.1.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68,12 +68,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -506,8 +506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -526,12 +526,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -964,8 +964,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -992,12 +992,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1437,8 +1437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1463,12 +1463,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -1902,8 +1902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1926,12 +1926,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -2369,8 +2369,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2397,12 +2397,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -2843,8 +2843,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2871,12 +2871,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -3321,8 +3321,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3345,12 +3345,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -3788,8 +3788,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3809,12 +3809,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -4250,8 +4250,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4284,8 +4282,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4305,12 +4303,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -4746,8 +4744,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4780,8 +4776,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4809,12 +4805,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5257,8 +5253,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5291,8 +5285,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5318,12 +5312,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -5760,8 +5754,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5794,8 +5786,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5819,12 +5811,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -6265,8 +6257,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6299,8 +6289,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6328,12 +6318,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -6777,8 +6767,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6811,8 +6799,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6840,12 +6828,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -7293,8 +7281,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7327,8 +7313,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7352,12 +7338,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -7797,8 +7783,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7832,8 +7816,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7853,12 +7837,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -8294,8 +8278,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8328,8 +8310,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8349,12 +8331,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -8790,8 +8772,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8824,8 +8804,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8853,12 +8833,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -9301,8 +9281,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9335,8 +9313,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9362,12 +9340,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -9804,8 +9782,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9838,8 +9814,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9863,12 +9839,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -10309,8 +10285,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10343,8 +10317,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10372,12 +10346,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -10821,8 +10795,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10855,8 +10827,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10884,12 +10856,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -11337,8 +11309,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11371,8 +11341,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11396,12 +11366,12 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` mediumint(9) NOT NULL, + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -11841,8 +11811,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11877,8 +11845,8 @@ DROP TABLE t1; # 2.1.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11897,13 +11865,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -12387,8 +12355,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12407,13 +12375,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -12897,8 +12865,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12925,13 +12893,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -13422,8 +13390,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13448,13 +13416,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -13939,8 +13907,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13963,13 +13931,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -14458,8 +14426,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14486,13 +14454,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -14984,8 +14952,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15012,13 +14980,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -15514,8 +15482,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15538,13 +15506,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -16033,8 +16001,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16053,13 +16021,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -16543,8 +16511,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16563,13 +16531,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -17053,8 +17021,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17081,13 +17049,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -17578,8 +17546,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17604,13 +17572,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -18095,8 +18063,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18119,13 +18087,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -18614,8 +18582,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18642,13 +18610,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -19140,8 +19108,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19168,13 +19136,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -19670,8 +19638,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19694,13 +19662,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result index 993025c9fb2..9d34287f794 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -43,8 +43,8 @@ SET @@session.sql_mode= ''; # 2.3.1 no PRIMARY KEY or UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68,7 +68,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -502,8 +502,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -527,7 +527,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -961,8 +961,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -994,7 +994,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1435,8 +1435,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1466,7 +1466,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -1905,8 +1905,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1934,7 +1934,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -2373,8 +2373,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2406,7 +2406,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -2852,8 +2852,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2885,7 +2885,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -3331,8 +3331,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3360,7 +3360,7 @@ t1 CREATE TABLE `t1` ( `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -3800,8 +3800,8 @@ DROP TABLE t1; # 2.3.2 PRIMARY KEY exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3820,13 +3820,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -4263,9 +4263,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4298,8 +4295,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4318,13 +4315,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -4761,9 +4758,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -4796,8 +4790,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4824,13 +4818,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5274,9 +5268,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5309,8 +5300,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5335,13 +5326,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -5783,9 +5774,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -5818,8 +5806,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5842,13 +5830,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -6290,9 +6278,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6325,8 +6310,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6353,13 +6338,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -6808,9 +6793,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -6843,8 +6825,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6871,13 +6853,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -7326,9 +7308,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7361,8 +7340,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7385,13 +7364,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -7832,9 +7811,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -7868,8 +7844,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7888,13 +7864,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -8331,9 +8307,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8366,8 +8339,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8386,13 +8359,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -8829,9 +8802,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -8864,8 +8834,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8892,13 +8862,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -9342,9 +9312,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9377,8 +9344,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9403,13 +9370,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -9851,9 +9818,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -9886,8 +9850,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9910,13 +9874,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -10358,9 +10322,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10393,8 +10354,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10421,13 +10382,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -10876,9 +10837,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -10911,8 +10869,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10939,13 +10897,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -11394,9 +11352,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11429,8 +11384,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11453,13 +11408,13 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` mediumint(9) NOT NULL, - `f_int2` mediumint(9) NOT NULL, + `f_int1` mediumint(9) NOT NULL DEFAULT '0', + `f_int2` mediumint(9) NOT NULL DEFAULT '0', `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -11900,9 +11855,6 @@ SELECT CAST(f_int1 AS CHAR), CAST(f_int1 AS CHAR), 'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1 ORDER BY f_int1; -Warnings: -Warning 1364 Field 'f_int1' doesn't have a default value -Warning 1364 Field 'f_int2' doesn't have a default value DROP TRIGGER trg_3; # check trigger-12 success: 1 @@ -11937,8 +11889,8 @@ DROP TABLE t1; # 2.3.3 UNIQUE INDEX exists DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11963,7 +11915,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -12448,8 +12400,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12474,7 +12426,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -12959,8 +12911,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12993,7 +12945,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -13485,8 +13437,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13517,7 +13469,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -14007,8 +13959,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14037,7 +13989,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -14527,8 +14479,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14561,7 +14513,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -15058,8 +15010,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15092,7 +15044,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -15589,8 +15541,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15619,7 +15571,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -16109,8 +16061,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16135,7 +16087,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -16620,8 +16572,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16646,7 +16598,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -17131,8 +17083,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17165,7 +17117,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -17657,8 +17609,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17689,7 +17641,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -18179,8 +18131,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18209,7 +18161,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -18699,8 +18651,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18733,7 +18685,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -19230,8 +19182,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19264,7 +19216,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -19761,8 +19713,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19791,7 +19743,7 @@ t1 CREATE TABLE `t1` ( `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result index 8412c7b37b0..bdec60ed889 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -47,8 +47,8 @@ SET @@session.sql_mode= ''; # 1.1 ALTER ... ANALYZE PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69,12 +69,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -506,8 +506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -528,12 +528,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -968,8 +968,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -998,12 +998,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1441,8 +1441,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1469,12 +1469,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -1908,8 +1908,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1934,12 +1934,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -2375,8 +2375,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2405,12 +2405,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -2851,8 +2851,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2881,12 +2881,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -3329,8 +3329,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3355,12 +3355,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -3797,8 +3797,8 @@ DROP TABLE t1; # 1.2 ALTER ... ANALYZE PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3819,12 +3819,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -4256,8 +4256,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4278,12 +4278,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -4718,8 +4718,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4748,12 +4748,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5191,8 +5191,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5219,12 +5219,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -5658,8 +5658,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5684,12 +5684,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -6125,8 +6125,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6155,12 +6155,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -6601,8 +6601,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6631,12 +6631,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -7079,8 +7079,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7105,12 +7105,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -7547,8 +7547,8 @@ DROP TABLE t1; # 1.3 ALTER ... ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7560,8 +7560,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -7570,12 +7569,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -8007,8 +8006,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8020,8 +8019,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -8030,12 +8028,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -8470,8 +8468,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8491,8 +8489,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -8501,12 +8498,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -8944,8 +8941,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8963,8 +8960,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -8973,12 +8969,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -9412,8 +9408,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9429,8 +9425,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -9439,12 +9434,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -9880,8 +9875,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9901,8 +9896,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -9911,12 +9905,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -10357,8 +10351,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10378,8 +10372,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -10388,12 +10381,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -10836,8 +10829,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10853,8 +10846,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -10863,12 +10855,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -11305,8 +11297,8 @@ DROP TABLE t1; # 1.4 ALTER ... ANALYZE PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11318,8 +11310,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -11328,12 +11319,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -11765,8 +11756,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11778,8 +11769,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -11788,12 +11778,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -12228,8 +12218,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12249,8 +12239,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -12259,12 +12248,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -12702,8 +12691,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12721,8 +12710,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -12731,12 +12719,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -13170,8 +13158,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13187,8 +13175,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -13197,12 +13184,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -13638,8 +13625,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13659,8 +13646,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -13669,12 +13655,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -14115,8 +14101,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14136,8 +14122,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -14146,12 +14131,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -14594,8 +14579,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14611,8 +14596,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 ANALYZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 analyze Error Error in list of partitions to test.t1 -test.t1 analyze status Operation failed +test.t1 analyze error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -14621,12 +14605,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -15063,8 +15047,8 @@ DROP TABLE t1; # 1.5 ALTER ... ANALYZE PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15085,12 +15069,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -15522,8 +15506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15544,12 +15528,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -15984,8 +15968,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16014,12 +15998,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -16457,8 +16441,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16485,12 +16469,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -16924,8 +16908,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16950,12 +16934,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -17391,8 +17375,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17421,12 +17405,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -17867,8 +17851,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17897,12 +17881,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -18345,8 +18329,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18371,12 +18355,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -18816,8 +18800,8 @@ DROP TABLE t1; # 2.1 ALTER ... CHECK PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18838,12 +18822,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -19275,8 +19259,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19297,12 +19281,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -19737,8 +19721,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19767,12 +19751,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -20210,8 +20194,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20238,12 +20222,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -20677,8 +20661,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20703,12 +20687,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -21144,8 +21128,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21174,12 +21158,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -21620,8 +21604,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21650,12 +21634,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -22098,8 +22082,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22124,12 +22108,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -22566,8 +22550,8 @@ DROP TABLE t1; # 2.2 ALTER ... CHECK PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22588,12 +22572,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -23025,8 +23009,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23047,12 +23031,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -23487,8 +23471,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23517,12 +23501,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -23960,8 +23944,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23988,12 +23972,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -24427,8 +24411,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24453,12 +24437,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -24894,8 +24878,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24924,12 +24908,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -25370,8 +25354,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25400,12 +25384,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -25848,8 +25832,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25874,12 +25858,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -26316,8 +26300,8 @@ DROP TABLE t1; # 2.3 ALTER ... CHECK PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26329,8 +26313,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -26339,12 +26322,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -26776,8 +26759,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26789,8 +26772,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -26799,12 +26781,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -27239,8 +27221,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27260,8 +27242,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -27270,12 +27251,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -27713,8 +27694,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27732,8 +27713,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -27742,12 +27722,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -28181,8 +28161,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28198,8 +28178,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -28208,12 +28187,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -28649,8 +28628,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28670,8 +28649,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -28680,12 +28658,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -29126,8 +29104,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29147,8 +29125,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -29157,12 +29134,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -29605,8 +29582,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29622,8 +29599,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -29632,12 +29608,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -30074,8 +30050,8 @@ DROP TABLE t1; # 2.4 ALTER ... CHECK PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30087,8 +30063,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -30097,12 +30072,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -30534,8 +30509,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30547,8 +30522,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -30557,12 +30531,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -30997,8 +30971,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31018,8 +30992,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -31028,12 +31001,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -31471,8 +31444,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31490,8 +31463,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -31500,12 +31472,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -31939,8 +31911,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31956,8 +31928,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -31966,12 +31937,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -32407,8 +32378,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32428,8 +32399,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -32438,12 +32408,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -32884,8 +32854,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -32905,8 +32875,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -32915,12 +32884,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -33363,8 +33332,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33380,8 +33349,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 CHECK PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 check Error Error in list of partitions to test.t1 -test.t1 check status Operation failed +test.t1 check error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -33390,12 +33358,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -33832,8 +33800,8 @@ DROP TABLE t1; # 2.5 ALTER ... CHECK PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -33854,12 +33822,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -34291,8 +34259,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34313,12 +34281,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -34753,8 +34721,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -34783,12 +34751,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -35226,8 +35194,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35254,12 +35222,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -35693,8 +35661,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -35719,12 +35687,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -36160,8 +36128,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36190,12 +36158,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -36636,8 +36604,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -36666,12 +36634,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -37114,8 +37082,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -37140,12 +37108,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -37585,8 +37553,8 @@ DROP TABLE t1; # 3.1 ALTER ... OPTIMIZE PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -37608,12 +37576,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -38045,8 +38013,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -38068,12 +38036,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -38508,8 +38476,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -38539,12 +38507,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -38982,8 +38950,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39011,12 +38979,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -39450,8 +39418,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39477,12 +39445,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -39918,8 +39886,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -39949,12 +39917,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -40395,8 +40363,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -40426,12 +40394,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -40874,8 +40842,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -40901,12 +40869,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -41343,8 +41311,8 @@ DROP TABLE t1; # 3.2 ALTER ... OPTIMIZE PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -41366,12 +41334,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -41803,8 +41771,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -41826,12 +41794,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -42266,8 +42234,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -42297,12 +42265,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -42740,8 +42708,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -42769,12 +42737,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -43208,8 +43176,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -43235,12 +43203,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -43676,8 +43644,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -43707,12 +43675,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -44153,8 +44121,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -44184,12 +44152,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -44632,8 +44600,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -44659,12 +44627,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -45101,8 +45069,8 @@ DROP TABLE t1; # 3.3 ALTER ... OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -45114,8 +45082,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -45124,12 +45091,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -45561,8 +45528,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -45574,8 +45541,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -45584,12 +45550,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -46024,8 +45990,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46045,8 +46011,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -46055,12 +46020,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -46498,8 +46463,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46517,8 +46482,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -46527,12 +46491,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -46966,8 +46930,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -46983,8 +46947,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -46993,12 +46956,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -47434,8 +47397,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -47455,8 +47418,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -47465,12 +47427,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -47911,8 +47873,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -47932,8 +47894,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -47942,12 +47903,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -48390,8 +48351,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -48407,8 +48368,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -48417,12 +48377,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -48859,8 +48819,8 @@ DROP TABLE t1; # 3.4 ALTER ... OPTIMIZE PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -48872,8 +48832,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -48882,12 +48841,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -49319,8 +49278,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -49332,8 +49291,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -49342,12 +49300,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -49782,8 +49740,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -49803,8 +49761,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -49813,12 +49770,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -50256,8 +50213,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -50275,8 +50232,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -50285,12 +50241,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -50724,8 +50680,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -50741,8 +50697,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -50751,12 +50706,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -51192,8 +51147,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -51213,8 +51168,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -51223,12 +51177,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -51669,8 +51623,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -51690,8 +51644,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -51700,12 +51653,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -52148,8 +52101,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -52165,8 +52118,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 OPTIMIZE PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 optimize Error Error in list of partitions to test.t1 -test.t1 optimize status Operation failed +test.t1 optimize error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -52175,12 +52127,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -52617,8 +52569,8 @@ DROP TABLE t1; # 3.5 ALTER ... OPTIMIZE PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -52640,12 +52592,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -53077,8 +53029,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -53100,12 +53052,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -53540,8 +53492,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -53571,12 +53523,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -54014,8 +53966,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54043,12 +53995,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -54482,8 +54434,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54509,12 +54461,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -54950,8 +54902,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -54981,12 +54933,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -55427,8 +55379,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -55458,12 +55410,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -55906,8 +55858,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -55933,12 +55885,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -56378,8 +56330,8 @@ DROP TABLE t1; # 4.1 ALTER ... REBUILD PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -56398,12 +56350,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -56835,8 +56787,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -56855,12 +56807,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -57295,8 +57247,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -57323,12 +57275,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -57766,8 +57718,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -57792,12 +57744,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -58231,8 +58183,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -58255,12 +58207,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -58696,8 +58648,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -58724,12 +58676,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -59170,8 +59122,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -59198,12 +59150,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -59646,8 +59598,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -59670,12 +59622,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -60112,8 +60064,8 @@ DROP TABLE t1; # 4.2 ALTER ... REBUILD PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -60132,12 +60084,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -60569,8 +60521,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -60589,12 +60541,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -61029,8 +60981,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61057,12 +61009,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -61500,8 +61452,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61526,12 +61478,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -61965,8 +61917,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -61989,12 +61941,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -62430,8 +62382,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62458,12 +62410,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -62904,8 +62856,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -62932,12 +62884,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -63380,8 +63332,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63404,12 +63356,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -63846,8 +63798,8 @@ DROP TABLE t1; # 4.3 ALTER ... REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63861,8 +63813,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63876,8 +63828,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63899,8 +63851,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63920,8 +63872,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63939,8 +63891,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63962,8 +63914,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -63985,8 +63937,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_2,part_5,part_6,part_10; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64006,8 +63958,8 @@ DROP TABLE t1; # 4.4 ALTER ... REBUILD PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64021,8 +63973,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64036,8 +63988,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64059,8 +64011,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64080,8 +64032,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64099,8 +64051,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64122,8 +64074,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64145,8 +64097,8 @@ ALTER TABLE t1 REBUILD PARTITION part_1,part_1,part_1; ERROR HY000: Error in list of partitions to REBUILD DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64166,8 +64118,8 @@ DROP TABLE t1; # 4.5 ALTER ... REBUILD PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64186,12 +64138,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -64623,8 +64575,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -64643,12 +64595,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -65083,8 +65035,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -65111,12 +65063,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -65554,8 +65506,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -65580,12 +65532,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -66019,8 +65971,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66043,12 +65995,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -66484,8 +66436,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66512,12 +66464,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -66958,8 +66910,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -66986,12 +66938,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -67434,8 +67386,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67458,12 +67410,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -67903,8 +67855,8 @@ DROP TABLE t1; # 5.1 ALTER ... REPAIR PARTITION part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67925,12 +67877,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -68362,8 +68314,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68384,12 +68336,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -68824,8 +68776,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -68854,12 +68806,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -69297,8 +69249,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69325,12 +69277,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -69764,8 +69716,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -69790,12 +69742,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -70231,8 +70183,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -70261,12 +70213,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -70707,8 +70659,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -70737,12 +70689,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -71185,8 +71137,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -71211,12 +71163,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -71653,8 +71605,8 @@ DROP TABLE t1; # 5.2 ALTER ... REPAIR PARTITION part_1,part_2; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -71675,12 +71627,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -72112,8 +72064,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -72134,12 +72086,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -72574,8 +72526,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -72604,12 +72556,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -73047,8 +72999,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -73075,12 +73027,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -73514,8 +73466,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -73540,12 +73492,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -73981,8 +73933,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74011,12 +73963,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -74457,8 +74409,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74487,12 +74439,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -74935,8 +74887,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -74961,12 +74913,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -75403,8 +75355,8 @@ DROP TABLE t1; # 5.3 ALTER ... REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -75416,8 +75368,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -75426,12 +75377,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -75863,8 +75814,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -75876,8 +75827,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -75886,12 +75836,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -76326,8 +76276,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -76347,8 +76297,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -76357,12 +76306,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -76800,8 +76749,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -76819,8 +76768,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -76829,12 +76777,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -77268,8 +77216,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77285,8 +77233,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -77295,12 +77242,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -77736,8 +77683,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -77757,8 +77704,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -77767,12 +77713,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -78213,8 +78159,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -78234,8 +78180,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -78244,12 +78189,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -78692,8 +78637,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -78709,8 +78654,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_2,part_5,part_6,part_10; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -78719,12 +78663,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -79161,8 +79105,8 @@ DROP TABLE t1; # 5.4 ALTER ... REPAIR PARTITION part_1,part_1,part_1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -79174,8 +79118,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -79184,12 +79127,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -79621,8 +79564,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -79634,8 +79577,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -79644,12 +79586,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -80084,8 +80026,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -80105,8 +80047,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -80115,12 +80056,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -80558,8 +80499,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -80577,8 +80518,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -80587,12 +80527,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -81026,8 +80966,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -81043,8 +80983,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -81053,12 +80992,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -81494,8 +81433,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -81515,8 +81454,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -81525,12 +81463,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -81971,8 +81909,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -81992,8 +81930,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -82002,12 +81939,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -82450,8 +82387,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -82467,8 +82404,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1; ALTER TABLE t1 REPAIR PARTITION part_1,part_1,part_1; Table Op Msg_type Msg_text -test.t1 repair Error Error in list of partitions to test.t1 -test.t1 repair status Operation failed +test.t1 repair error Error in list of partitions to test.t1 INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 AND @max_row; @@ -82477,12 +82413,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -82919,8 +82855,8 @@ DROP TABLE t1; # 5.5 ALTER ... REPAIR PARTITION ALL; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -82941,12 +82877,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB) */ @@ -83378,8 +83314,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -83400,12 +83336,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) (PARTITION part_1 ENGINE = TokuDB, PARTITION part_2 ENGINE = TokuDB, @@ -83840,8 +83776,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -83870,12 +83806,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -84313,8 +84249,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -84341,12 +84277,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION part_1 VALUES LESS THAN (5) ENGINE = TokuDB, @@ -84780,8 +84716,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -84806,12 +84742,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -85247,8 +85183,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -85277,12 +85213,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part_1 VALUES LESS THAN (0) @@ -85723,8 +85659,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -85753,12 +85689,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part_1 VALUES IN (0) @@ -86201,8 +86137,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -86227,12 +86163,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -86672,8 +86608,8 @@ DROP TABLE t1; # 6.1 ALTER ... REMOVE PARTITIONING; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -86692,12 +86628,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 @@ -87126,8 +87062,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -87146,12 +87082,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 @@ -87580,8 +87516,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -87608,12 +87544,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 @@ -88042,8 +87978,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88068,12 +88004,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 @@ -88502,8 +88438,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88526,12 +88462,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 @@ -88960,8 +88896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -88988,12 +88924,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 @@ -89422,8 +89358,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -89450,12 +89386,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 @@ -89884,8 +89820,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -89908,12 +89844,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' # check prerequisites-1 success: 1 # check COUNT(*) success: 1 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result index 8182dce5625..f3fd6b4012e 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result @@ -11,8 +11,8 @@ SELECT @max_row DIV 4 INTO @max_row_div4; SET @max_int_4 = 2147483647; DROP TABLE IF EXISTS t0_template; CREATE TABLE t0_template ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) , @@ -27,8 +27,8 @@ file_list VARBINARY(10000), PRIMARY KEY (state) ) ENGINE = MEMORY; DROP TABLE IF EXISTS t0_aux; -CREATE TABLE t0_aux ( f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +CREATE TABLE t0_aux ( f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) ) @@ -52,8 +52,8 @@ SET @@session.sql_mode= ''; # 1.1 The partitioning function contains one column. DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -67,12 +67,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -507,8 +507,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -522,12 +522,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -962,8 +962,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -985,12 +985,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -1432,8 +1432,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1453,12 +1453,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -1896,8 +1896,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -1915,12 +1915,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -2360,8 +2360,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2383,12 +2383,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -2833,8 +2833,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -2860,12 +2860,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -3312,8 +3312,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3331,12 +3331,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -3777,8 +3777,8 @@ DROP TABLE t1; # 1.2 The partitioning function contains two columns. DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -3792,12 +3792,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -4232,8 +4232,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4247,12 +4247,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -4687,8 +4687,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -4710,12 +4710,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -5157,8 +5157,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5178,12 +5178,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -5621,8 +5621,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -5640,12 +5640,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -6083,8 +6083,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6106,12 +6106,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -6556,8 +6556,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -6579,12 +6579,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -7031,8 +7031,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7050,12 +7050,12 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -7500,8 +7500,8 @@ DROP TABLE t1; # 2.5 PRIMARY KEY + UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -7522,7 +7522,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -7992,8 +7992,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8014,7 +8014,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -8484,8 +8484,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -8514,7 +8514,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -8991,8 +8991,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9019,7 +9019,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -9492,8 +9492,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -9518,7 +9518,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -9993,8 +9993,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10023,7 +10023,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -10503,8 +10503,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -10537,7 +10537,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -11019,8 +11019,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11045,7 +11045,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -11520,8 +11520,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -11542,7 +11542,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -12012,8 +12012,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12034,7 +12034,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -12504,8 +12504,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -12534,7 +12534,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -13011,8 +13011,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13039,7 +13039,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -13512,8 +13512,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -13538,7 +13538,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -14013,8 +14013,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14043,7 +14043,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -14523,8 +14523,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -14557,7 +14557,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -15039,8 +15039,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15065,7 +15065,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -15540,8 +15540,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -15555,14 +15555,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1) PARTITIONS 2 */ @@ -16048,8 +16048,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16063,14 +16063,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1) PARTITIONS 5 */ @@ -16556,8 +16556,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -16579,14 +16579,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -17079,8 +17079,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17100,14 +17100,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -17596,8 +17596,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -17615,14 +17615,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1 DIV 2) SUBPARTITION BY HASH (f_int1) SUBPARTITIONS 2 @@ -18113,8 +18113,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18136,14 +18136,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int1) (PARTITION part1 VALUES LESS THAN (0) @@ -18639,8 +18639,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -18666,14 +18666,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int1 + 1) (PARTITION part1 VALUES IN (0) @@ -19171,8 +19171,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19190,14 +19190,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int1) SUBPARTITIONS 3 @@ -19693,8 +19693,8 @@ DROP TABLE t1; # 3.3 PRIMARY KEY and UNIQUE INDEX consisting of two columns DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -19715,7 +19715,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -20185,8 +20185,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20207,7 +20207,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -20677,8 +20677,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -20707,7 +20707,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -21184,8 +21184,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21212,7 +21212,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -21685,8 +21685,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -21711,7 +21711,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -22184,8 +22184,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22214,7 +22214,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -22694,8 +22694,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -22724,7 +22724,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -23206,8 +23206,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23232,7 +23232,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int2`,`f_int1`), UNIQUE KEY `uidx1` (`f_int1`,`f_int2`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -23707,8 +23707,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -23729,7 +23729,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -24199,8 +24199,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24221,7 +24221,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -24691,8 +24691,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -24721,7 +24721,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -25198,8 +25198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25226,7 +25226,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -25699,8 +25699,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -25725,7 +25725,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -26198,8 +26198,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26228,7 +26228,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -26708,8 +26708,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -26738,7 +26738,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -27220,8 +27220,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27246,7 +27246,7 @@ t1 CREATE TABLE `t1` ( `f_charbig` varchar(1000) DEFAULT NULL, PRIMARY KEY (`f_int1`,`f_int2`), UNIQUE KEY `uidx1` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 @@ -27721,8 +27721,8 @@ TRUNCATE t1; DROP TABLE t1; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -27736,14 +27736,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY HASH (f_int1 + f_int2) PARTITIONS 2 */ @@ -28229,8 +28229,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28244,14 +28244,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY KEY (f_int1,f_int2) PARTITIONS 5 */ @@ -28737,8 +28737,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -28760,14 +28760,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (MOD(f_int1 + f_int2,4)) (PARTITION part_3 VALUES IN (-3) ENGINE = TokuDB, PARTITION part_2 VALUES IN (-2) ENGINE = TokuDB, @@ -29260,8 +29260,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29281,14 +29281,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE ((f_int1 + f_int2) DIV 2) (PARTITION parta VALUES LESS THAN (0) ENGINE = TokuDB, PARTITION partb VALUES LESS THAN (5) ENGINE = TokuDB, @@ -29777,8 +29777,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -29796,14 +29796,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY HASH (f_int2) SUBPARTITIONS 2 @@ -30292,8 +30292,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30315,14 +30315,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (f_int1) SUBPARTITION BY KEY (f_int2) (PARTITION part1 VALUES LESS THAN (0) @@ -30818,8 +30818,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -30841,14 +30841,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,3))) SUBPARTITION BY HASH (f_int2 + 1) (PARTITION part1 VALUES IN (0) @@ -31346,8 +31346,8 @@ TRUNCATE t1; # End usability test (inc/partition_check.inc) DROP TABLE t1; CREATE TABLE t1 ( -f_int1 INTEGER DEFAULT 0, -f_int2 INTEGER DEFAULT 0, +f_int1 INTEGER, +f_int2 INTEGER, f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000) @@ -31365,14 +31365,14 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT NULL, + `f_int2` int(11) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, UNIQUE KEY `uidx1` (`f_int1`,`f_int2`), UNIQUE KEY `uidx2` (`f_int2`,`f_int1`) -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (ABS(MOD(f_int1,2))) SUBPARTITION BY KEY (f_int2) SUBPARTITIONS 3 diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result index 9155661d6d9..dcc18cd1487 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result @@ -27,7 +27,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -55,7 +55,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -86,7 +86,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -116,7 +116,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -147,7 +147,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -177,7 +177,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -208,7 +208,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -238,7 +238,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -269,7 +269,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -299,7 +299,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -330,7 +330,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -360,7 +360,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -391,7 +391,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -421,7 +421,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -452,7 +452,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -482,7 +482,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -514,7 +514,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -542,7 +542,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -574,7 +574,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -602,7 +602,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -635,7 +635,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -660,7 +660,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -690,7 +690,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -716,7 +716,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -748,7 +748,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -773,7 +773,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -803,7 +803,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -829,7 +829,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -861,7 +861,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -886,7 +886,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -916,7 +916,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -942,7 +942,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -974,7 +974,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -999,7 +999,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1029,7 +1029,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1055,7 +1055,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1087,7 +1087,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1112,7 +1112,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1142,7 +1142,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1168,7 +1168,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1200,7 +1200,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1225,7 +1225,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1255,7 +1255,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1281,7 +1281,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1313,7 +1313,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1338,7 +1338,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1368,7 +1368,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1394,7 +1394,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1426,7 +1426,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1451,7 +1451,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -1482,7 +1482,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1508,7 +1508,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -1541,7 +1541,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1566,7 +1566,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -1597,7 +1597,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1623,7 +1623,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -1656,7 +1656,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1681,7 +1681,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -1712,7 +1712,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1738,7 +1738,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -1772,7 +1772,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1799,7 +1799,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1830,7 +1830,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1859,7 +1859,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1890,7 +1890,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1919,7 +1919,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1950,7 +1950,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -1979,7 +1979,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2005,7 +2005,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2034,7 +2034,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2060,7 +2060,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2089,7 +2089,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2115,7 +2115,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2142,7 +2142,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2168,7 +2168,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2195,7 +2195,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2221,7 +2221,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2248,7 +2248,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2275,7 +2275,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2299,7 +2299,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2329,7 +2329,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2354,7 +2354,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2386,7 +2386,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2410,7 +2410,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2440,7 +2440,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2465,7 +2465,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2497,7 +2497,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2521,7 +2521,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2551,7 +2551,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2576,7 +2576,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2608,7 +2608,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2632,7 +2632,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2657,7 +2657,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2682,7 +2682,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2709,7 +2709,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2733,7 +2733,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2758,7 +2758,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2783,7 +2783,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2810,7 +2810,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2834,7 +2834,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2859,7 +2859,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2884,7 +2884,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2911,7 +2911,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2935,7 +2935,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -2960,7 +2960,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -2985,7 +2985,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -3012,7 +3012,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3036,7 +3036,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -3061,7 +3061,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3086,7 +3086,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -3113,7 +3113,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3137,7 +3137,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -3162,7 +3162,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3187,7 +3187,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB) */ SELECT * FROM t1; @@ -3216,7 +3216,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3245,7 +3245,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3276,7 +3276,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3307,7 +3307,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3338,7 +3338,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3369,7 +3369,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3400,7 +3400,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3431,7 +3431,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3462,7 +3462,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3493,7 +3493,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3524,7 +3524,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3555,7 +3555,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3586,7 +3586,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3617,7 +3617,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -3649,7 +3649,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3680,7 +3680,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -3712,7 +3712,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3741,7 +3741,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -3773,7 +3773,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3802,7 +3802,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -3834,7 +3834,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3863,7 +3863,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -3895,7 +3895,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3924,7 +3924,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -3958,7 +3958,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -3984,7 +3984,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4014,7 +4014,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4041,7 +4041,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4073,7 +4073,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4099,7 +4099,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4129,7 +4129,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4156,7 +4156,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4188,7 +4188,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4214,7 +4214,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4244,7 +4244,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4271,7 +4271,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4303,7 +4303,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4329,7 +4329,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4359,7 +4359,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4386,7 +4386,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4418,7 +4418,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4444,7 +4444,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4474,7 +4474,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4501,7 +4501,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4533,7 +4533,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4559,7 +4559,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4589,7 +4589,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4616,7 +4616,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4648,7 +4648,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4674,7 +4674,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -4705,7 +4705,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4732,7 +4732,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -4765,7 +4765,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4791,7 +4791,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -4822,7 +4822,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4849,7 +4849,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -4882,7 +4882,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4908,7 +4908,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -4939,7 +4939,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -4966,7 +4966,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -4999,7 +4999,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -5025,7 +5025,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -5056,7 +5056,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -5083,7 +5083,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -5116,7 +5116,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -5142,7 +5142,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -5173,7 +5173,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -5200,7 +5200,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -5233,7 +5233,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -5259,7 +5259,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -5290,7 +5290,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = TokuDB) */ @@ -5317,7 +5317,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY LIST (a) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = TokuDB, PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = TokuDB, @@ -5349,7 +5349,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5371,7 +5371,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5404,7 +5404,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5428,7 +5428,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5445,7 +5445,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5467,7 +5467,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5500,7 +5500,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5524,7 +5524,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5541,7 +5541,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5563,7 +5563,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5596,7 +5596,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5620,7 +5620,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5637,7 +5637,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5659,7 +5659,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5692,7 +5692,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5716,7 +5716,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5733,7 +5733,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5755,7 +5755,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5788,7 +5788,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5812,7 +5812,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5829,7 +5829,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5851,7 +5851,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5884,7 +5884,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5908,7 +5908,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5925,7 +5925,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -5947,7 +5947,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -5980,7 +5980,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6004,7 +6004,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6021,7 +6021,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6043,7 +6043,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6076,7 +6076,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6100,7 +6100,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6117,7 +6117,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6139,7 +6139,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6172,7 +6172,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6196,7 +6196,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 1 Original from partition p0 @@ -6213,7 +6213,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6235,7 +6235,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6264,7 +6264,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6288,7 +6288,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6305,7 +6305,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6327,7 +6327,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6356,7 +6356,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6380,7 +6380,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6397,7 +6397,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6419,7 +6419,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6448,7 +6448,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6472,7 +6472,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6489,7 +6489,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6511,7 +6511,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6540,7 +6540,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6564,7 +6564,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6581,7 +6581,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6603,7 +6603,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6632,7 +6632,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6656,7 +6656,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6673,7 +6673,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6695,7 +6695,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6724,7 +6724,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6748,7 +6748,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6765,7 +6765,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6787,7 +6787,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6816,7 +6816,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6840,7 +6840,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6857,7 +6857,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6879,7 +6879,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6908,7 +6908,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -6932,7 +6932,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6949,7 +6949,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 5 Original from table t2 @@ -6971,7 +6971,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -7000,7 +7000,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' /*!50100 PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (10) ENGINE = TokuDB, PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = TokuDB) */ @@ -7024,7 +7024,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, `b` varchar(64) DEFAULT NULL -) ENGINE=TokuDB DEFAULT CHARSET=latin1 +) ENGINE=TokuDB DEFAULT CHARSET=latin1 `compression`='tokudb_zlib' SELECT * FROM t2; a b 1 Original from partition p0 -- cgit v1.2.1 From 98b1bb0b8e7169030640378d25ad045a0ed1c8ed Mon Sep 17 00:00:00 2001 From: Rik Prohaska Date: Sat, 25 Jun 2016 16:44:48 -0400 Subject: fix MDEV-7225. see https://jira.mariadb.org/browse/MDEV-72225 for details. --- sql-bench/server-cfg.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh index 892d301c70d..7e83b7365e3 100644 --- a/sql-bench/server-cfg.sh +++ b/sql-bench/server-cfg.sh @@ -204,6 +204,11 @@ sub new $limits{'max_temporary_tables'}= $limits{"max_tables"}; $self->{'transactions'} = 1; # Transactions enabled } + if (defined($main::opt_create_options) && + $main::opt_create_options =~ /engine=tokudb/i) + { + $self->{'transactions'} = 1; # Transactions enabled + } return $self; } -- cgit v1.2.1 From 4b88cf33c25a2597925264aa4de91810fc53f4c4 Mon Sep 17 00:00:00 2001 From: Rik Prohaska Date: Wed, 29 Jun 2016 13:27:51 -0400 Subject: fix sql-bench test-table-elimination view leak. see MDEV-10310 for details --- sql-bench/test-table-elimination.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/sql-bench/test-table-elimination.sh b/sql-bench/test-table-elimination.sh index 7772cab42b0..de440fc0633 100755 --- a/sql-bench/test-table-elimination.sh +++ b/sql-bench/test-table-elimination.sh @@ -305,6 +305,7 @@ if ($opt_lock_tables) if (!$opt_skip_delete) { do_query($dbh,"drop table elim_facts, elim_attr1, elim_attr2" . $server->{'drop_attr'}); + $dbh->do("drop view elim_current_facts"); } if ($opt_fast && defined($server->{vacuum})) -- cgit v1.2.1 From c5d73186c01998178eaed206e200334b160eb5e8 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 12 Jul 2016 13:02:26 +0200 Subject: MDEV-8227 simple_password_check_minimal_length gets adjusted without a warning --- mysql-test/suite/plugins/r/simple_password_check.result | 4 ++++ plugin/simple_password_check/simple_password_check.c | 16 +++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/mysql-test/suite/plugins/r/simple_password_check.result b/mysql-test/suite/plugins/r/simple_password_check.result index bbb96bb8c38..6848a37999c 100644 --- a/mysql-test/suite/plugins/r/simple_password_check.result +++ b/mysql-test/suite/plugins/r/simple_password_check.result @@ -85,7 +85,11 @@ grant select on *.* to foo1; drop user foo1; set global simple_password_check_digits=3; set global simple_password_check_letters_same_case=3; +Warnings: +Warning 1292 Adjusted the value of simple_password_check_minimal_length from 8 to 10 set global simple_password_check_other_characters=3; +Warnings: +Warning 1292 Adjusted the value of simple_password_check_minimal_length from 10 to 12 show variables like 'simple_password_check_%'; Variable_name Value simple_password_check_digits 3 diff --git a/plugin/simple_password_check/simple_password_check.c b/plugin/simple_password_check/simple_password_check.c index 1b7668204b6..2d2a0ce0e37 100644 --- a/plugin/simple_password_check/simple_password_check.c +++ b/plugin/simple_password_check/simple_password_check.c @@ -14,6 +14,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include +#include #include #include #include @@ -50,12 +52,20 @@ static int validate(MYSQL_LEX_STRING *username, MYSQL_LEX_STRING *password) } static void fix_min_length(MYSQL_THD thd __attribute__((unused)), - struct st_mysql_sys_var *var, + struct st_mysql_sys_var *var __attribute__((unused)), void *var_ptr, const void *save) { + uint new_min_length; *((unsigned int *)var_ptr)= *((unsigned int *)save); - if (min_length < min_digits + 2 * min_letters + min_others) - min_length= min_digits + 2 * min_letters + min_others; + new_min_length= min_digits + 2 * min_letters + min_others; + if (min_length < new_min_length) + { + my_printf_error(ER_TRUNCATED_WRONG_VALUE, + "Adjusted the value of simple_password_check_minimal_length " + "from %u to %u", ME_JUST_WARNING, + min_length, new_min_length); + min_length= new_min_length; + } } static MYSQL_SYSVAR_UINT(minimal_length, min_length, PLUGIN_VAR_RQCMDARG, -- cgit v1.2.1 From 0d5583b41bb500f0efc5ca9b5768eee5d81e803c Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 13 Jul 2016 08:55:20 +0200 Subject: cleanup --- sql/sql_acl.cc | 2 +- sql/sql_delete.cc | 1 - sql/sql_rename.cc | 2 +- sql/sys_vars.ic | 1 - 4 files changed, 2 insertions(+), 4 deletions(-) diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 06e38546f43..23302e1847e 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -31,7 +31,7 @@ #include "sql_base.h" // close_mysql_tables #include "key.h" // key_copy, key_cmp_if_same, key_restore #include "sql_show.h" // append_identifier -#include "sql_table.h" // build_table_filename +#include "sql_table.h" // write_bin_log #include "hash_filo.h" #include "sql_parse.h" // check_access #include "sql_view.h" // VIEW_ANY_ACL diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e077073c2df..3252e403226 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -27,7 +27,6 @@ #include "sql_delete.h" #include "sql_cache.h" // query_cache_* #include "sql_base.h" // open_temprary_table -#include "sql_table.h" // build_table_filename #include "lock.h" // unlock_table_name #include "sql_view.h" // check_key_in_view, mysql_frm_type #include "sql_parse.h" // mysql_init_select diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index 17b297f63bd..b795f5d7f22 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -24,7 +24,7 @@ #include "unireg.h" #include "sql_rename.h" #include "sql_cache.h" // query_cache_* -#include "sql_table.h" // build_table_filename +#include "sql_table.h" // write_bin_log #include "sql_view.h" // mysql_frm_type, mysql_rename_view #include "sql_trigger.h" #include "lock.h" // MYSQL_OPEN_SKIP_TEMPORARY diff --git a/sql/sys_vars.ic b/sql/sys_vars.ic index 7c62516d52b..334dd318643 100644 --- a/sql/sys_vars.ic +++ b/sql/sys_vars.ic @@ -96,7 +96,6 @@ enum charset_enum {IN_SYSTEM_CHARSET, IN_FS_CHARSET}; static const char *bool_values[3]= {"OFF", "ON", 0}; TYPELIB bool_typelib={ array_elements(bool_values)-1, "", bool_values, 0 }; -extern const char *encrypt_algorithm_names[]; /** A small wrapper class to pass getopt arguments as a pair -- cgit v1.2.1 From 12dc083a453dd12eb6563f622de2db05030f35de Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 12 Jul 2016 13:41:29 +0200 Subject: MDEV-8580 For some BOOLEAN or ENUM sysvars list of valid values is not generated set the 'typelib' member for plugin boolean sysvars --- mysql-test/suite/sys_vars/r/sysvars_aria.result | 6 +- .../suite/sys_vars/r/sysvars_innodb,xtradb.rdiff | 544 +++++++++++++++++++-- .../sys_vars/r/sysvars_server_notembedded.result | 4 +- sql/sql_plugin.cc | 2 + 4 files changed, 513 insertions(+), 43 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/sysvars_aria.result b/mysql-test/suite/sys_vars/r/sysvars_aria.result index 2948b2ad3fc..1308992e771 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_aria.result +++ b/mysql-test/suite/sys_vars/r/sysvars_aria.result @@ -54,7 +54,7 @@ VARIABLE_COMMENT Encrypt tables (only for tables with ROW_FORMAT=PAGE (default) NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME ARIA_FORCE_START_AFTER_RECOVERY_FAILURES @@ -208,7 +208,7 @@ VARIABLE_COMMENT Maintain page checksums (can be overridden per table with PAGE_ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME ARIA_RECOVER @@ -292,6 +292,6 @@ VARIABLE_COMMENT Whether temporary tables should be MyISAM or Aria NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NULL diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff index 6d95a517b33..e083a8f8c1f 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff @@ -1,7 +1,20 @@ --- suite/sys_vars/r/sysvars_innodb.result +++ suite/sys_vars/r/sysvars_innodb,xtradb.reject -@@ -47,6 +47,20 @@ - ENUM_VALUE_LIST NULL +@@ -16,7 +16,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_ADAPTIVE_FLUSHING_LWM +@@ -44,9 +44,23 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_ADAPTIVE_HASH_INDEX_PARTITIONS @@ -21,8 +34,93 @@ VARIABLE_NAME INNODB_ADAPTIVE_MAX_SLEEP_DELAY SESSION_VALUE NULL GLOBAL_VALUE 150000 -@@ -355,6 +369,20 @@ - ENUM_VALUE_LIST NULL +@@ -100,7 +114,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_API_ENABLE_BINLOG +@@ -114,7 +128,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_API_ENABLE_MDL +@@ -128,7 +142,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_API_TRX_LEVEL +@@ -198,7 +212,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_BACKGROUND_SCRUB_DATA_INTERVAL +@@ -226,7 +240,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_AT_SHUTDOWN +@@ -240,7 +254,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_NOW +@@ -254,7 +268,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_PCT +@@ -324,7 +338,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_AT_STARTUP +@@ -338,7 +352,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_NOW +@@ -352,9 +366,23 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME INNODB_BUFFER_POOL_POPULATE @@ -36,13 +134,22 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY YES +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_BUFFER_POOL_SIZE SESSION_VALUE NULL GLOBAL_VALUE 8388608 -@@ -446,7 +474,7 @@ +@@ -394,7 +422,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_CHANGE_BUFFERING +@@ -446,11 +474,11 @@ DEFAULT_VALUE ON VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN @@ -51,6 +158,11 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_CHECKSUM_ALGORITHM @@ -467,6 +495,104 @@ ENUM_VALUE_LIST CRC32,STRICT_CRC32,INNODB,STRICT_INNODB,NONE,STRICT_NONE READ_ONLY NO @@ -66,7 +178,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_CLEANER_FLUSH_CHUNK_SIZE @@ -156,6 +268,15 @@ VARIABLE_NAME INNODB_CMP_PER_INDEX_ENABLED SESSION_VALUE NULL GLOBAL_VALUE OFF +@@ -478,7 +604,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_COMMIT_CONCURRENCY @@ -565,6 +691,20 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -177,6 +298,51 @@ VARIABLE_NAME INNODB_DATA_FILE_PATH SESSION_VALUE NULL GLOBAL_VALUE ibdata1:12M:autoextend +@@ -604,7 +744,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_DEFAULT_ENCRYPTION_KEY_ID +@@ -632,7 +772,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_DEFRAGMENT_FILL_FACTOR +@@ -716,7 +856,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_DISABLE_SORT_FILE_CACHE +@@ -730,7 +870,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_DOUBLEWRITE +@@ -744,7 +884,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_DOUBLEWRITE_BATCH_SIZE @@ -761,6 +901,20 @@ ENUM_VALUE_LIST NULL READ_ONLY YES @@ -198,6 +364,15 @@ VARIABLE_NAME INNODB_ENCRYPTION_ROTATE_KEY_AGE SESSION_VALUE NULL GLOBAL_VALUE 1 +@@ -814,7 +968,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_ENCRYPT_TABLES @@ -831,6 +985,20 @@ ENUM_VALUE_LIST OFF,ON,FORCE READ_ONLY NO @@ -213,12 +388,30 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_FAST_SHUTDOWN SESSION_VALUE NULL GLOBAL_VALUE 1 +@@ -884,7 +1052,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_FILE_FORMAT_MAX +@@ -912,7 +1080,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_FIL_MAKE_PAGE_DIRTY_DEBUG @@ -958,11 +1126,11 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL @@ -233,6 +426,24 @@ VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Controls the durability/speed trade-off for commits. Set to 0 (write and flush redo log to disk only once per second), 1 (flush to disk at each commit), 2 (write to log at commit but flush to disk only once per second) or 3 (flush to disk at prepare and at commit, slower and usually redundant). 1 and 3 guarantees that after a crash, committed transactions will not be lost and will be consistent with the binlog and other transactional engines. 2 can get inconsistent and lose transactions if there is a power failure or kernel crash but not if mysqld crashes. 0 has no guarantees in case of crash. 0 and 2 can be faster than 1 or 3. NUMERIC_MIN_VALUE 0 +@@ -1010,7 +1178,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_FORCE_PRIMARY_KEY +@@ -1024,7 +1192,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_FORCE_RECOVERY @@ -1055,6 +1223,20 @@ ENUM_VALUE_LIST NULL READ_ONLY YES @@ -254,6 +465,42 @@ VARIABLE_NAME INNODB_FT_AUX_TABLE SESSION_VALUE NULL GLOBAL_VALUE +@@ -1094,7 +1276,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_FT_ENABLE_STOPWORD +@@ -1108,7 +1290,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_FT_MAX_TOKEN_SIZE +@@ -1248,7 +1430,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_INSTRUMENT_SEMAPHORES +@@ -1262,7 +1444,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_IO_CAPACITY @@ -1293,6 +1475,20 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -275,6 +522,15 @@ VARIABLE_NAME INNODB_LARGE_PREFIX SESSION_VALUE NULL GLOBAL_VALUE OFF +@@ -1304,7 +1500,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG @@ -1321,6 +1517,20 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -290,12 +546,21 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_LOCKS_UNSAFE_FOR_BINLOG SESSION_VALUE NULL GLOBAL_VALUE OFF +@@ -1332,7 +1542,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_LOCK_WAIT_TIMEOUT @@ -1349,6 +1559,62 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -311,7 +576,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_LOG_ARCH_DIR @@ -359,8 +624,12 @@ VARIABLE_NAME INNODB_LOG_BUFFER_SIZE SESSION_VALUE NULL GLOBAL_VALUE 1048576 -@@ -1377,6 +1643,20 @@ - ENUM_VALUE_LIST NULL +@@ -1374,9 +1640,23 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_LOG_CHECKSUM_ALGORITHM @@ -380,6 +649,15 @@ VARIABLE_NAME INNODB_LOG_COMPRESSED_PAGES SESSION_VALUE NULL GLOBAL_VALUE OFF +@@ -1388,7 +1668,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_LOG_FILES_IN_GROUP @@ -1447,6 +1727,34 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -415,10 +693,32 @@ VARIABLE_NAME INNODB_MAX_DIRTY_PAGES_PCT SESSION_VALUE NULL GLOBAL_VALUE 75.000000 -@@ -1713,6 +2021,62 @@ - ENUM_VALUE_LIST NULL +@@ -1654,7 +1962,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_PAGE_HASH_LOCKS +@@ -1696,7 +2004,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_PRINT_ALL_DEADLOCKS +@@ -1710,7 +2018,63 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON ++READ_ONLY NO ++COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_CLEANER +SESSION_VALUE NULL +GLOBAL_VALUE OFF @@ -430,7 +730,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_IO @@ -444,7 +744,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_MASTER @@ -458,7 +758,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_PURGE @@ -472,12 +772,55 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL -+READ_ONLY NO -+COMMAND_LINE_ARGUMENT OPTIONAL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_BATCH_SIZE - SESSION_VALUE NULL - GLOBAL_VALUE 300 +@@ -1738,7 +2102,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_PURGE_STOP_NOW +@@ -1752,7 +2116,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_PURGE_THREADS +@@ -1780,7 +2144,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_READ_AHEAD_THRESHOLD +@@ -1822,7 +2186,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_REPLICATION_DELAY +@@ -1850,7 +2214,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_ROLLBACK_SEGMENTS @@ -1881,6 +2245,48 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -527,6 +870,15 @@ VARIABLE_NAME INNODB_SCRUB_LOG SESSION_VALUE NULL GLOBAL_VALUE OFF +@@ -1892,7 +2298,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_SCRUB_LOG_SPEED @@ -1909,6 +2315,34 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -562,6 +914,15 @@ VARIABLE_NAME INNODB_SIMULATE_COMP_FAILURES SESSION_VALUE NULL GLOBAL_VALUE 0 +@@ -1962,7 +2396,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_STATS_METHOD @@ -1972,7 +2406,7 @@ DEFAULT_VALUE nulls_equal VARIABLE_SCOPE GLOBAL @@ -571,6 +932,78 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL +@@ -2004,7 +2438,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_STATS_PERSISTENT +@@ -2018,7 +2452,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_STATS_PERSISTENT_SAMPLE_PAGES +@@ -2060,7 +2494,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT REQUIRED + VARIABLE_NAME INNODB_STATS_TRANSIENT_SAMPLE_PAGES +@@ -2088,7 +2522,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_STATUS_OUTPUT_LOCKS +@@ -2102,7 +2536,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_STRICT_MODE +@@ -2116,7 +2550,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_SUPPORT_XA +@@ -2130,7 +2564,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_SYNC_ARRAY_SIZE +@@ -2172,7 +2606,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT OPTIONAL + VARIABLE_NAME INNODB_THREAD_CONCURRENCY @@ -2217,6 +2651,34 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -586,7 +1019,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_TRACK_REDO_LOG_NOW @@ -600,13 +1033,22 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_TRX_PURGE_VIEW_UPDATE_ONLY_DEBUG SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -2294,7 +2756,7 @@ +@@ -2228,7 +2690,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO + COMMAND_LINE_ARGUMENT NULL + VARIABLE_NAME INNODB_TRX_RSEG_N_SLOTS_DEBUG +@@ -2294,11 +2756,11 @@ DEFAULT_VALUE OFF VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN @@ -615,8 +1057,17 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -@@ -2315,6 +2777,20 @@ - ENUM_VALUE_LIST NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_USE_FALLOCATE +@@ -2312,9 +2774,23 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_USE_GLOBAL_FLUSH_LOG_AT_TRX_COMMIT @@ -630,16 +1081,20 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_MTFLUSH SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -2329,6 +2805,20 @@ - ENUM_VALUE_LIST NULL - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE +@@ -2326,7 +2802,21 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON ++READ_ONLY YES ++COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_USE_STACKTRACE +SESSION_VALUE NULL +GLOBAL_VALUE OFF @@ -651,13 +1106,26 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL -+READ_ONLY YES -+COMMAND_LINE_ARGUMENT NONE ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_SYS_MALLOC - SESSION_VALUE NULL - GLOBAL_VALUE ON -@@ -2359,12 +2849,12 @@ +@@ -2340,7 +2830,7 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE + VARIABLE_NAME INNODB_USE_TRIM +@@ -2354,17 +2844,17 @@ + NUMERIC_MIN_VALUE NULL + NUMERIC_MAX_VALUE NULL + NUMERIC_BLOCK_SIZE NULL +-ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON + READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_VERSION SESSION_VALUE NULL diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index e33decf0f0d..0797110926e 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -230,7 +230,7 @@ VARIABLE_COMMENT Run fast part of group commit in a single thread, to optimize k NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME BINLOG_ROW_IMAGE @@ -2470,7 +2470,7 @@ VARIABLE_COMMENT Use memory mapping for reading and writing MyISAM tables NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME MYSQL56_TEMPORAL_FORMAT diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 850d44f83e3..60248f3fef4 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -3575,6 +3575,7 @@ void plugin_opt_set_limits(struct my_option *options, case PLUGIN_VAR_BOOL: options->var_type= GET_BOOL; options->def_value= ((sysvar_bool_t*) opt)->def_val; + options->typelib= &bool_typelib; break; case PLUGIN_VAR_STR: options->var_type= ((opt->flags & PLUGIN_VAR_MEMALLOC) ? @@ -3623,6 +3624,7 @@ void plugin_opt_set_limits(struct my_option *options, case PLUGIN_VAR_BOOL | PLUGIN_VAR_THDLOCAL: options->var_type= GET_BOOL; options->def_value= ((thdvar_bool_t*) opt)->def_val; + options->typelib= &bool_typelib; break; case PLUGIN_VAR_STR | PLUGIN_VAR_THDLOCAL: options->var_type= ((opt->flags & PLUGIN_VAR_MEMALLOC) ? -- cgit v1.2.1 From 64583629aded09f74d6d14178475fe6b2c849c00 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 12 Jul 2016 16:36:43 +0200 Subject: MDEV-9588 Mariadb client-only build creates a useless mysqld_safe file --- scripts/CMakeLists.txt | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 759104c040a..d960f792a95 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -278,6 +278,15 @@ ELSE() wsrep_sst_xtrabackup-v2 ) ENDIF() + IF (NOT WITHOUT_SERVER) + SET(SERVER_SCRIPTS + mysql_fix_extensions + mysql_zap + mysqld_multi + mysqld_safe + mysqldumpslow + ) + ENDIF() # Configure this one, for testing, but do not install it. CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/mysql_config.pl.in ${CMAKE_CURRENT_BINARY_DIR}/mysql_config.pl ESCAPE_QUOTES @ONLY) @@ -285,18 +294,14 @@ ELSE() SET(BIN_SCRIPTS msql2mysql mysql_config - mysql_fix_extensions mysql_setpermission mysql_secure_installation - mysql_zap mysqlaccess mysql_convert_table_format mysql_find_rows mytop mysqlhotcopy - mysqldumpslow - mysqld_multi - mysqld_safe + ${SERVER_SCRIPTS} ${WSREP_BINARIES} ${SYSTEMD_SCRIPTS} ) -- cgit v1.2.1 From 12ac3ee11e8bc433432a2b3ba1bb79ea2e2d9965 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 13 Jul 2016 14:47:58 +0300 Subject: Update test results: make innodb_ext_key test stable --- mysql-test/r/innodb_ext_key.result | 4 ---- mysql-test/t/innodb_ext_key.test | 10 +++++++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/innodb_ext_key.result b/mysql-test/r/innodb_ext_key.result index c0b91c88d8a..f800676c863 100644 --- a/mysql-test/r/innodb_ext_key.result +++ b/mysql-test/r/innodb_ext_key.result @@ -1059,10 +1059,6 @@ concat('1234567890-', 1000+ A.a + 10*B.a + 100*C.a), repeat('filler-data-', 4) from t0 A, t0 B, t0 C; -# The following must use type=ALL (and NOT type=ref, rows=1) -explain select * from t1 where col1='1234567890-a'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ALL idx1 NULL NULL NULL # Using where drop table t0,t1; # # MDEV-10360: Extended keys: index properties depend on index order diff --git a/mysql-test/t/innodb_ext_key.test b/mysql-test/t/innodb_ext_key.test index ebea442d8ca..bf94b7dd3d5 100644 --- a/mysql-test/t/innodb_ext_key.test +++ b/mysql-test/t/innodb_ext_key.test @@ -715,9 +715,13 @@ select from t0 A, t0 B, t0 C; ---echo # The following must use type=ALL (and NOT type=ref, rows=1) ---replace_column 9 # -explain select * from t1 where col1='1234567890-a'; +let $q=explain select * from t1 where col1='1234567890-a'; +let $rows=query_get_value($q, rows, 1); +if ($rows < 2) +{ + --echo The EXPLAIN should not produce a query plan with type=ref, rows=1 + --die Fix for MDEV-10325 didnt work; +} drop table t0,t1; -- cgit v1.2.1 From 1168c1a3b984fb6b9cdf376392a7fcd301800162 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Thu, 14 Jul 2016 03:55:33 +0300 Subject: Fix embedded and 32-bit test results after MDEV-8580 --- .../suite/sys_vars/r/sysvars_debug,32bit.rdiff | 13 - .../sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff | 28 +- .../suite/sys_vars/r/sysvars_innodb,xtradb.rdiff | 518 +-------------------- mysql-test/suite/sys_vars/r/sysvars_innodb.result | 114 ++--- .../sys_vars/r/sysvars_server_embedded.result | 4 +- 5 files changed, 98 insertions(+), 579 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff index 799ad90a474..3164d094528 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_debug,32bit.rdiff @@ -9,16 +9,3 @@ VARIABLE_COMMENT Extra sleep (in microseconds) to add to binlog fsync(), for debugging NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 4294967295 -@@ -35,10 +35,10 @@ - GLOBAL_VALUE_ORIGIN COMPILE-TIME - DEFAULT_VALUE 0 - VARIABLE_SCOPE GLOBAL --VARIABLE_TYPE BIGINT UNSIGNED -+VARIABLE_TYPE INT UNSIGNED - VARIABLE_COMMENT Call my_debug_put_break_here() if crc matches this number (for debug) - NUMERIC_MIN_VALUE 0 --NUMERIC_MAX_VALUE 18446744073709551615 -+NUMERIC_MAX_VALUE 4294967295 - NUMERIC_BLOCK_SIZE 1 - ENUM_VALUE_LIST NULL - READ_ONLY NO diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff index 4d507126872..f2d46fe0120 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit,xtradb.rdiff @@ -111,7 +111,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY YES +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_BUFFER_POOL_SIZE @@ -150,7 +150,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_CLEANER_FLUSH_CHUNK_SIZE @@ -343,7 +343,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_FAST_SHUTDOWN @@ -578,7 +578,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_LOCKS_UNSAFE_FOR_BINLOG @@ -624,7 +624,7 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL - ENUM_VALUE_LIST NULL + ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL -VARIABLE_NAME INNODB_LOG_COMPRESSED_PAGES @@ -695,7 +695,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_LOG_CHECKSUM_ALGORITHM @@ -850,7 +850,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_IO @@ -864,7 +864,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_MASTER @@ -878,7 +878,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_PURGE @@ -892,7 +892,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_BATCH_SIZE @@ -1134,7 +1134,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_TRACK_REDO_LOG_NOW @@ -1148,7 +1148,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_TRX_PURGE_VIEW_UPDATE_ONLY_DEBUG @@ -1196,7 +1196,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_MTFLUSH @@ -1217,7 +1217,7 @@ +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL -+ENUM_VALUE_LIST NULL ++ENUM_VALUE_LIST OFF,ON +READ_ONLY YES +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_SYS_MALLOC diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff index e083a8f8c1f..f9679c9f6f4 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,xtradb.rdiff @@ -1,20 +1,7 @@ --- suite/sys_vars/r/sysvars_innodb.result +++ suite/sys_vars/r/sysvars_innodb,xtradb.reject -@@ -16,7 +16,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_ADAPTIVE_FLUSHING_LWM -@@ -44,9 +44,23 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON +@@ -47,6 +47,20 @@ + ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_ADAPTIVE_HASH_INDEX_PARTITIONS @@ -34,93 +21,8 @@ VARIABLE_NAME INNODB_ADAPTIVE_MAX_SLEEP_DELAY SESSION_VALUE NULL GLOBAL_VALUE 150000 -@@ -100,7 +114,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_API_ENABLE_BINLOG -@@ -114,7 +128,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_API_ENABLE_MDL -@@ -128,7 +142,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_API_TRX_LEVEL -@@ -198,7 +212,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_BACKGROUND_SCRUB_DATA_INTERVAL -@@ -226,7 +240,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_AT_SHUTDOWN -@@ -240,7 +254,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_NOW -@@ -254,7 +268,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_PCT -@@ -324,7 +338,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_AT_STARTUP -@@ -338,7 +352,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_NOW -@@ -352,9 +366,23 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON +@@ -355,6 +369,20 @@ + ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME INNODB_BUFFER_POOL_POPULATE @@ -140,16 +42,7 @@ VARIABLE_NAME INNODB_BUFFER_POOL_SIZE SESSION_VALUE NULL GLOBAL_VALUE 8388608 -@@ -394,7 +422,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_CHANGE_BUFFERING -@@ -446,11 +474,11 @@ +@@ -446,7 +474,7 @@ DEFAULT_VALUE ON VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN @@ -158,11 +51,6 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_CHECKSUM_ALGORITHM @@ -467,6 +495,104 @@ ENUM_VALUE_LIST CRC32,STRICT_CRC32,INNODB,STRICT_INNODB,NONE,STRICT_NONE READ_ONLY NO @@ -268,15 +156,6 @@ VARIABLE_NAME INNODB_CMP_PER_INDEX_ENABLED SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -478,7 +604,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_COMMIT_CONCURRENCY @@ -565,6 +691,20 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -298,51 +177,6 @@ VARIABLE_NAME INNODB_DATA_FILE_PATH SESSION_VALUE NULL GLOBAL_VALUE ibdata1:12M:autoextend -@@ -604,7 +744,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_DEFAULT_ENCRYPTION_KEY_ID -@@ -632,7 +772,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_DEFRAGMENT_FILL_FACTOR -@@ -716,7 +856,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_DISABLE_SORT_FILE_CACHE -@@ -730,7 +870,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_DOUBLEWRITE -@@ -744,7 +884,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_DOUBLEWRITE_BATCH_SIZE @@ -761,6 +901,20 @@ ENUM_VALUE_LIST NULL READ_ONLY YES @@ -364,15 +198,6 @@ VARIABLE_NAME INNODB_ENCRYPTION_ROTATE_KEY_AGE SESSION_VALUE NULL GLOBAL_VALUE 1 -@@ -814,7 +968,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_ENCRYPT_TABLES @@ -831,6 +985,20 @@ ENUM_VALUE_LIST OFF,ON,FORCE READ_ONLY NO @@ -394,24 +219,6 @@ VARIABLE_NAME INNODB_FAST_SHUTDOWN SESSION_VALUE NULL GLOBAL_VALUE 1 -@@ -884,7 +1052,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_FILE_FORMAT_MAX -@@ -912,7 +1080,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_FIL_MAKE_PAGE_DIRTY_DEBUG @@ -958,11 +1126,11 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL @@ -426,24 +233,6 @@ VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Controls the durability/speed trade-off for commits. Set to 0 (write and flush redo log to disk only once per second), 1 (flush to disk at each commit), 2 (write to log at commit but flush to disk only once per second) or 3 (flush to disk at prepare and at commit, slower and usually redundant). 1 and 3 guarantees that after a crash, committed transactions will not be lost and will be consistent with the binlog and other transactional engines. 2 can get inconsistent and lose transactions if there is a power failure or kernel crash but not if mysqld crashes. 0 has no guarantees in case of crash. 0 and 2 can be faster than 1 or 3. NUMERIC_MIN_VALUE 0 -@@ -1010,7 +1178,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_FORCE_PRIMARY_KEY -@@ -1024,7 +1192,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_FORCE_RECOVERY @@ -1055,6 +1223,20 @@ ENUM_VALUE_LIST NULL READ_ONLY YES @@ -465,42 +254,6 @@ VARIABLE_NAME INNODB_FT_AUX_TABLE SESSION_VALUE NULL GLOBAL_VALUE -@@ -1094,7 +1276,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_FT_ENABLE_STOPWORD -@@ -1108,7 +1290,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_FT_MAX_TOKEN_SIZE -@@ -1248,7 +1430,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_INSTRUMENT_SEMAPHORES -@@ -1262,7 +1444,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_IO_CAPACITY @@ -1293,6 +1475,20 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -522,15 +275,6 @@ VARIABLE_NAME INNODB_LARGE_PREFIX SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -1304,7 +1500,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG @@ -1321,6 +1517,20 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -552,15 +296,6 @@ VARIABLE_NAME INNODB_LOCKS_UNSAFE_FOR_BINLOG SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -1332,7 +1542,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_LOCK_WAIT_TIMEOUT @@ -1349,6 +1559,62 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -624,12 +359,8 @@ VARIABLE_NAME INNODB_LOG_BUFFER_SIZE SESSION_VALUE NULL GLOBAL_VALUE 1048576 -@@ -1374,9 +1640,23 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON +@@ -1377,6 +1643,20 @@ + ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_LOG_CHECKSUM_ALGORITHM @@ -649,15 +380,6 @@ VARIABLE_NAME INNODB_LOG_COMPRESSED_PAGES SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -1388,7 +1668,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_LOG_FILES_IN_GROUP @@ -1447,6 +1727,34 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -693,32 +415,10 @@ VARIABLE_NAME INNODB_MAX_DIRTY_PAGES_PCT SESSION_VALUE NULL GLOBAL_VALUE 75.000000 -@@ -1654,7 +1962,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_PAGE_HASH_LOCKS -@@ -1696,7 +2004,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON +@@ -1713,6 +2021,62 @@ + ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_PRINT_ALL_DEADLOCKS -@@ -1710,7 +2018,63 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON -+READ_ONLY NO -+COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PRIORITY_CLEANER +SESSION_VALUE NULL +GLOBAL_VALUE OFF @@ -773,54 +473,11 @@ +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL ++READ_ONLY NO ++COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_BATCH_SIZE -@@ -1738,7 +2102,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_PURGE_STOP_NOW -@@ -1752,7 +2116,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_PURGE_THREADS -@@ -1780,7 +2144,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_READ_AHEAD_THRESHOLD -@@ -1822,7 +2186,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_REPLICATION_DELAY -@@ -1850,7 +2214,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_ROLLBACK_SEGMENTS + SESSION_VALUE NULL + GLOBAL_VALUE 300 @@ -1881,6 +2245,48 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -870,15 +527,6 @@ VARIABLE_NAME INNODB_SCRUB_LOG SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -1892,7 +2298,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_SCRUB_LOG_SPEED @@ -1909,6 +2315,34 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -914,15 +562,6 @@ VARIABLE_NAME INNODB_SIMULATE_COMP_FAILURES SESSION_VALUE NULL GLOBAL_VALUE 0 -@@ -1962,7 +2396,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_STATS_METHOD @@ -1972,7 +2406,7 @@ DEFAULT_VALUE nulls_equal VARIABLE_SCOPE GLOBAL @@ -932,78 +571,6 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -@@ -2004,7 +2438,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_STATS_PERSISTENT -@@ -2018,7 +2452,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_STATS_PERSISTENT_SAMPLE_PAGES -@@ -2060,7 +2494,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT REQUIRED - VARIABLE_NAME INNODB_STATS_TRANSIENT_SAMPLE_PAGES -@@ -2088,7 +2522,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_STATUS_OUTPUT_LOCKS -@@ -2102,7 +2536,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_STRICT_MODE -@@ -2116,7 +2550,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_SUPPORT_XA -@@ -2130,7 +2564,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_SYNC_ARRAY_SIZE -@@ -2172,7 +2606,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT OPTIONAL - VARIABLE_NAME INNODB_THREAD_CONCURRENCY @@ -2217,6 +2651,34 @@ ENUM_VALUE_LIST NULL READ_ONLY NO @@ -1039,16 +606,7 @@ VARIABLE_NAME INNODB_TRX_PURGE_VIEW_UPDATE_ONLY_DEBUG SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -2228,7 +2690,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO - COMMAND_LINE_ARGUMENT NULL - VARIABLE_NAME INNODB_TRX_RSEG_N_SLOTS_DEBUG -@@ -2294,11 +2756,11 @@ +@@ -2294,7 +2756,7 @@ DEFAULT_VALUE OFF VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN @@ -1057,17 +615,8 @@ NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_USE_FALLOCATE -@@ -2312,9 +2774,23 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON +@@ -2315,6 +2777,20 @@ + ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_USE_GLOBAL_FLUSH_LOG_AT_TRX_COMMIT @@ -1087,14 +636,10 @@ VARIABLE_NAME INNODB_USE_MTFLUSH SESSION_VALUE NULL GLOBAL_VALUE OFF -@@ -2326,7 +2802,21 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON -+READ_ONLY YES -+COMMAND_LINE_ARGUMENT NONE +@@ -2329,6 +2805,20 @@ + ENUM_VALUE_LIST NULL + READ_ONLY YES + COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_USE_STACKTRACE +SESSION_VALUE NULL +GLOBAL_VALUE OFF @@ -1107,25 +652,12 @@ +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE ++READ_ONLY YES ++COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_SYS_MALLOC -@@ -2340,7 +2830,7 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY YES - COMMAND_LINE_ARGUMENT NONE - VARIABLE_NAME INNODB_USE_TRIM -@@ -2354,17 +2844,17 @@ - NUMERIC_MIN_VALUE NULL - NUMERIC_MAX_VALUE NULL - NUMERIC_BLOCK_SIZE NULL --ENUM_VALUE_LIST NULL -+ENUM_VALUE_LIST OFF,ON - READ_ONLY NO + SESSION_VALUE NULL + GLOBAL_VALUE ON +@@ -2359,12 +2849,12 @@ COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_VERSION SESSION_VALUE NULL diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 9f92ea99437..ea7c8b5433e 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -16,7 +16,7 @@ VARIABLE_COMMENT Attempt flushing dirty pages to avoid IO bursts at checkpoints. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_ADAPTIVE_FLUSHING_LWM @@ -44,7 +44,7 @@ VARIABLE_COMMENT Enable InnoDB adaptive hash index (enabled by default). Disabl NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_ADAPTIVE_MAX_SLEEP_DELAY @@ -100,7 +100,7 @@ VARIABLE_COMMENT Disable row lock when direct access InnoDB through InnoDB APIs NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_API_ENABLE_BINLOG @@ -114,7 +114,7 @@ VARIABLE_COMMENT Enable binlog for applications direct access InnoDB through Inn NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_API_ENABLE_MDL @@ -128,7 +128,7 @@ VARIABLE_COMMENT Enable MDL for applications direct access InnoDB through InnoDB NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_API_TRX_LEVEL @@ -198,7 +198,7 @@ VARIABLE_COMMENT Enable scrubbing of compressed data by background threads (same NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BACKGROUND_SCRUB_DATA_INTERVAL @@ -226,7 +226,7 @@ VARIABLE_COMMENT Enable scrubbing of uncompressed data by background threads (sa NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_AT_SHUTDOWN @@ -240,7 +240,7 @@ VARIABLE_COMMENT Dump the buffer pool into a file named @@innodb_buffer_pool_fil NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_NOW @@ -254,7 +254,7 @@ VARIABLE_COMMENT Trigger an immediate dump of the buffer pool into a file named NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_PCT @@ -324,7 +324,7 @@ VARIABLE_COMMENT Abort a currently running load of the buffer pool NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_AT_STARTUP @@ -338,7 +338,7 @@ VARIABLE_COMMENT Load the buffer pool from a file named @@innodb_buffer_pool_fil NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_NOW @@ -352,7 +352,7 @@ VARIABLE_COMMENT Trigger an immediate load of the buffer pool from a file named NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_SIZE @@ -394,7 +394,7 @@ VARIABLE_COMMENT Force dirty page flush now NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_CHANGE_BUFFERING @@ -450,7 +450,7 @@ VARIABLE_COMMENT DEPRECATED. Use innodb_checksum_algorithm=NONE instead of setti NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_CHECKSUM_ALGORITHM @@ -478,7 +478,7 @@ VARIABLE_COMMENT Enable INFORMATION_SCHEMA.innodb_cmp_per_index, may have negati NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_COMMIT_CONCURRENCY @@ -604,7 +604,7 @@ VARIABLE_COMMENT Perform extra scrubbing to increase test exposure NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_DEFAULT_ENCRYPTION_KEY_ID @@ -632,7 +632,7 @@ VARIABLE_COMMENT Enable/disable InnoDB defragmentation (default FALSE). When set NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_DEFRAGMENT_FILL_FACTOR @@ -716,7 +716,7 @@ VARIABLE_COMMENT Disable change buffering merges by the master thread NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_DISABLE_SORT_FILE_CACHE @@ -730,7 +730,7 @@ VARIABLE_COMMENT Whether to disable OS system file cache for sort I/O NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_DOUBLEWRITE @@ -744,7 +744,7 @@ VARIABLE_COMMENT Enable InnoDB doublewrite buffer (enabled by default). Disable NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_DOUBLEWRITE_BATCH_SIZE @@ -814,7 +814,7 @@ VARIABLE_COMMENT Enable redo log encryption NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_ENCRYPT_TABLES @@ -884,7 +884,7 @@ VARIABLE_COMMENT Whether to perform system file format check. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_FILE_FORMAT_MAX @@ -912,7 +912,7 @@ VARIABLE_COMMENT Stores each InnoDB table to an .ibd file in the database dir. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_FIL_MAKE_PAGE_DIRTY_DEBUG @@ -1010,7 +1010,7 @@ VARIABLE_COMMENT Force InnoDB to load metadata of corrupted table. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_FORCE_PRIMARY_KEY @@ -1024,7 +1024,7 @@ VARIABLE_COMMENT Do not allow to create table without primary key (off by defaul NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_FORCE_RECOVERY @@ -1094,7 +1094,7 @@ VARIABLE_COMMENT Whether to enable additional FTS diagnostic printout NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_FT_ENABLE_STOPWORD @@ -1108,7 +1108,7 @@ VARIABLE_COMMENT Create FTS index with stopword. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_FT_MAX_TOKEN_SIZE @@ -1248,7 +1248,7 @@ VARIABLE_COMMENT Enable scrubbing of data NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_INSTRUMENT_SEMAPHORES @@ -1262,7 +1262,7 @@ VARIABLE_COMMENT Enable semaphore request instrumentation. This could have some NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_IO_CAPACITY @@ -1304,7 +1304,7 @@ VARIABLE_COMMENT Support large index prefix length of REC_VERSION_56_MAX_INDEX_C NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG @@ -1332,7 +1332,7 @@ VARIABLE_COMMENT DEPRECATED. This option may be removed in future releases. Plea NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_LOCK_WAIT_TIMEOUT @@ -1374,7 +1374,7 @@ VARIABLE_COMMENT Force checkpoint now NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_LOG_COMPRESSED_PAGES @@ -1388,7 +1388,7 @@ VARIABLE_COMMENT Enables/disables the logging of entire compressed page images. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_LOG_FILES_IN_GROUP @@ -1654,7 +1654,7 @@ VARIABLE_COMMENT Only optimize the Fulltext index of the table NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_PAGE_HASH_LOCKS @@ -1696,7 +1696,7 @@ VARIABLE_COMMENT Enable prefix optimization to sometimes avoid cluster index loo NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PRINT_ALL_DEADLOCKS @@ -1710,7 +1710,7 @@ VARIABLE_COMMENT Print all deadlocks to MySQL error log (off by default) NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_BATCH_SIZE @@ -1738,7 +1738,7 @@ VARIABLE_COMMENT Set purge state to RUN NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_STOP_NOW @@ -1752,7 +1752,7 @@ VARIABLE_COMMENT Set purge state to STOP NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_THREADS @@ -1780,7 +1780,7 @@ VARIABLE_COMMENT Whether to use read ahead for random access within an extent. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_READ_AHEAD_THRESHOLD @@ -1822,7 +1822,7 @@ VARIABLE_COMMENT Start InnoDB in read only mode (off by default) NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_REPLICATION_DELAY @@ -1850,7 +1850,7 @@ VARIABLE_COMMENT Roll back the complete transaction on lock wait timeout, for 4. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_ROLLBACK_SEGMENTS @@ -1892,7 +1892,7 @@ VARIABLE_COMMENT Enable background redo log (ib_logfile0, ib_logfile1...) scrubb NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_SCRUB_LOG_SPEED @@ -1962,7 +1962,7 @@ VARIABLE_COMMENT InnoDB automatic recalculation of persistent statistics enabled NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_STATS_METHOD @@ -2004,7 +2004,7 @@ VARIABLE_COMMENT Enable statistics gathering for metadata commands such as SHOW NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_STATS_PERSISTENT @@ -2018,7 +2018,7 @@ VARIABLE_COMMENT InnoDB persistent statistics enabled for all tables unless over NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_STATS_PERSISTENT_SAMPLE_PAGES @@ -2060,7 +2060,7 @@ VARIABLE_COMMENT Enable traditional statistic calculation based on number of con NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_STATS_TRANSIENT_SAMPLE_PAGES @@ -2088,7 +2088,7 @@ VARIABLE_COMMENT Enable InnoDB monitor output to the error log. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_STATUS_OUTPUT_LOCKS @@ -2102,7 +2102,7 @@ VARIABLE_COMMENT Enable InnoDB lock monitor output to the error log. Requires in NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_STRICT_MODE @@ -2116,7 +2116,7 @@ VARIABLE_COMMENT Use strict mode when evaluating create options. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_SUPPORT_XA @@ -2130,7 +2130,7 @@ VARIABLE_COMMENT Enable InnoDB support for the XA two-phase commit NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_SYNC_ARRAY_SIZE @@ -2172,7 +2172,7 @@ VARIABLE_COMMENT Enable InnoDB locking in LOCK TABLES NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_THREAD_CONCURRENCY @@ -2228,7 +2228,7 @@ VARIABLE_COMMENT Pause actual purging any delete-marked records, but merely upda NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NULL VARIABLE_NAME INNODB_TRX_RSEG_N_SLOTS_DEBUG @@ -2298,7 +2298,7 @@ VARIABLE_COMMENT Prevent partial page writes, via atomic writes.The option is us NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_FALLOCATE @@ -2312,7 +2312,7 @@ VARIABLE_COMMENT Preallocate files fast, using operating system functionality. O NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_MTFLUSH @@ -2326,7 +2326,7 @@ VARIABLE_COMMENT Use multi-threaded flush. Default FALSE. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_SYS_MALLOC @@ -2340,7 +2340,7 @@ VARIABLE_COMMENT DEPRECATED. This option may be removed in future releases, toge NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_TRIM @@ -2354,7 +2354,7 @@ VARIABLE_COMMENT Use trim. Default FALSE. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_VERSION diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index 8b373d7d77f..ae61832eb0a 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -230,7 +230,7 @@ VARIABLE_COMMENT Run fast part of group commit in a single thread, to optimize k NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME BINLOG_ROW_IMAGE @@ -2274,7 +2274,7 @@ VARIABLE_COMMENT Use memory mapping for reading and writing MyISAM tables NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL +ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME MYSQL56_TEMPORAL_FORMAT -- cgit v1.2.1 From 44012db6350b51ad7e78f286dfa6e5d4ae84f807 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 14 Jul 2016 20:12:22 +0200 Subject: All changes made on 10.1 for last 11 commits --- storage/connect/ApacheInterface.java | 58 ++ storage/connect/CMakeLists.txt | 32 +- storage/connect/Client.java | 183 +++++ storage/connect/JdbcApacheInterface.class | Bin 15357 -> 0 bytes storage/connect/JdbcApacheInterface.java | 709 -------------------- storage/connect/JdbcDSInterface.class | Bin 16175 -> 0 bytes storage/connect/JdbcDSInterface.java | 743 --------------------- storage/connect/JdbcInterface.class | Bin 15215 -> 0 bytes storage/connect/JdbcInterface.java | 105 ++- storage/connect/MariadbInterface.java | 69 ++ storage/connect/MysqlInterface.java | 69 ++ storage/connect/OracleInterface.java | 69 ++ storage/connect/PostgresqlInterface.java | 69 ++ storage/connect/connect.h | 3 +- storage/connect/csort.cpp | 13 +- storage/connect/ha_connect.cc | 45 +- storage/connect/jdbconn.cpp | 233 ++++--- storage/connect/jdbconn.h | 13 +- storage/connect/jsonudf.cpp | 12 +- storage/connect/mysql-test/connect/r/jdbc.result | 20 +- .../connect/mysql-test/connect/r/jdbc_new.result | 21 +- .../mysql-test/connect/std_data/JdbcMariaDB.jar | Bin 0 -> 5993273 bytes storage/connect/mysql-test/connect/t/jdbc.test | 12 +- storage/connect/mysql-test/connect/t/jdbc_new.test | 5 + storage/connect/mysql-test/connect/t/jdbconn.inc | 13 +- .../mysql-test/connect/t/jdbconn_cleanup.inc | 2 + storage/connect/odbccat.h | 2 +- storage/connect/odbconn.cpp | 6 +- storage/connect/odbconn.h | 2 +- storage/connect/tabjdbc.cpp | 24 +- storage/connect/tabjdbc.h | 11 +- storage/connect/tabodbc.cpp | 29 +- storage/connect/tabodbc.h | 15 +- 33 files changed, 910 insertions(+), 1677 deletions(-) create mode 100644 storage/connect/ApacheInterface.java create mode 100644 storage/connect/Client.java delete mode 100644 storage/connect/JdbcApacheInterface.class delete mode 100644 storage/connect/JdbcApacheInterface.java delete mode 100644 storage/connect/JdbcDSInterface.class delete mode 100644 storage/connect/JdbcDSInterface.java delete mode 100644 storage/connect/JdbcInterface.class create mode 100644 storage/connect/MariadbInterface.java create mode 100644 storage/connect/MysqlInterface.java create mode 100644 storage/connect/OracleInterface.java create mode 100644 storage/connect/PostgresqlInterface.java create mode 100644 storage/connect/mysql-test/connect/std_data/JdbcMariaDB.jar diff --git a/storage/connect/ApacheInterface.java b/storage/connect/ApacheInterface.java new file mode 100644 index 00000000000..b4c8a4e9885 --- /dev/null +++ b/storage/connect/ApacheInterface.java @@ -0,0 +1,58 @@ +package wrappers; + +import java.sql.*; +import java.util.Hashtable; +import org.apache.commons.dbcp2.BasicDataSource; + +public class ApacheInterface extends JdbcInterface { + static Hashtable pool = new Hashtable(); + + public ApacheInterface() { + this(true); + } // end of default constructor + + public ApacheInterface(boolean b) { + super(b); + } // end of constructor + + @Override + public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { + int rc = 0; + String url = parms[1]; + BasicDataSource ds = null; + + if (DEBUG) + System.out.println("Connecting to Apache data source"); + + try { + CheckURL(url, null); + + if ((ds = pool.get(url)) == null) { + ds = new BasicDataSource(); + ds.setDriverClassName(parms[0]); + ds.setUrl(url); + ds.setUsername(parms[2]); + ds.setPassword(parms[3]); + pool.put(url, ds); + } // endif ds + + // Get a connection from the data source + conn = ds.getConnection(); + + // Get the data base meta data object + dbmd = conn.getMetaData(); + + // Get a statement from the connection + stmt = GetStmt(fsize, scrollable); + } catch (SQLException se) { + SetErrmsg(se); + rc = -2; + } catch (Exception e) { + SetErrmsg(e); + rc = -3; + } // end try/catch + + return rc; + } // end of JdbcConnect + +} // end of class ApacheInterface diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index 254d074612a..95d88538119 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -235,25 +235,29 @@ ENDIF(CONNECT_WITH_ODBC) # # JDBC # - -OPTION(CONNECT_WITH_JDBC "Compile CONNECT storage engine with JDBC support" ON) +IF(APPLE) + OPTION(CONNECT_WITH_JDBC "some comment" OFF) +ELSE() + OPTION(CONNECT_WITH_JDBC "some comment" ON) +ENDIF() IF(CONNECT_WITH_JDBC) - # TODO: detect Java SDK and the presence of JDBC connectors - # TODO: Find how to compile and install the java wrapper class - # Find required libraries and include directories - - FIND_PACKAGE(Java) - FIND_PACKAGE(JNI) + FIND_PACKAGE(Java 1.6) + FIND_PACKAGE(JNI) IF (JAVA_FOUND AND JNI_FOUND) + INCLUDE(UseJava) INCLUDE_DIRECTORIES(${JAVA_INCLUDE_PATH}) INCLUDE_DIRECTORIES(${JAVA_INCLUDE_PATH2}) - # SET(JDBC_LIBRARY ${JAVA_JVM_LIBRARY}) - SET(CONNECT_SOURCES ${CONNECT_SOURCES} - JdbcInterface.java JdbcInterface.class - JdbcDSInterface.java JdbcDSInterface.class - JdbcApacheInterface.java JdbcApacheInterface.class - jdbconn.cpp tabjdbc.cpp jdbconn.h tabjdbc.h jdbccat.h) + # SET(JDBC_LIBRARY ${JAVA_JVM_LIBRARY}) will be dynamically linked + SET(CONNECT_SOURCES ${CONNECT_SOURCES} + jdbconn.cpp tabjdbc.cpp jdbconn.h tabjdbc.h jdbccat.h + JdbcInterface.java ApacheInterface.java MariadbInterface.java + MysqlInterface.java OracleInterface.java PostgresqlInterface.java) + # TODO: Find how to compile and install the java wrapper classes + # Find required libraries and include directories + SET (JAVA_SOURCES JdbcInterface.java) + add_jar(JdbcInterface ${JAVA_SOURCES}) + install_jar(JdbcInterface DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) add_definitions(-DJDBC_SUPPORT) ELSE() SET(JDBC_LIBRARY "") diff --git a/storage/connect/Client.java b/storage/connect/Client.java new file mode 100644 index 00000000000..aaf1b7bf2f8 --- /dev/null +++ b/storage/connect/Client.java @@ -0,0 +1,183 @@ +package wrappers; + +import java.io.BufferedReader; +import java.io.Console; +import java.io.IOException; +import java.io.InputStreamReader; + +public class Client { + static boolean DEBUG = true; + static final Console c = System.console(); + static JdbcInterface jdi = null; + + public static void main(String[] args) { + int rc, n, ncol, i = 0, fsize = 0; + boolean scrollable = false; + String s; + String[] parms = new String[4]; + + if (args.length > 0) + try { + i = Integer.parseInt(args[i]); + } catch (NumberFormatException e) { + i = 0; + } // end try/catch + + switch (i) { + case 1: + jdi = new ApacheInterface(DEBUG); + break; + case 2: + jdi = new MysqlInterface(DEBUG); + break; + case 3: + jdi = new MariadbInterface(DEBUG); + break; + case 4: + jdi = new OracleInterface(DEBUG); + break; + case 5: + jdi = new PostgresqlInterface(DEBUG); + break; + default: + jdi = new JdbcInterface(DEBUG); + } // endswitch i + + parms[0] = getLine("Driver: ", false); + parms[1] = getLine("URL: ", false); + parms[2] = getLine("User: ", false); + parms[3] = getLine("Password: ", true); + s = getLine("Fsize: ", false); + fsize = (s != null) ? Integer.parseInt(s) : 0; + s = getLine("Scrollable: ", false); + scrollable = (s != null) ? s.toLowerCase().charAt(0) != 'n' : false; + + rc = jdi.JdbcConnect(parms, fsize, scrollable); + + if (rc == 0) { + String query; + System.out.println("Successfully connected to " + parms[1]); + + while ((query = getLine("Query: ", false)) != null) { + n = jdi.Execute(query); + System.out.println("Returned n = " + n); + + if ((ncol = jdi.GetResult()) > 0) + PrintResult(ncol); + else + System.out.println("Affected rows = " + n); + + } // endwhile + + rc = jdi.JdbcDisconnect(); + System.out.println("Disconnect returned " + rc); + } else + System.out.println(jdi.GetErrmsg() + " rc=" + rc); + + } // end of main + + private static void PrintResult(int ncol) { + // Get result set meta data + int i; + String columnName; + + // Get the column names; column indices start from 1 + for (i = 1; i <= ncol; i++) { + columnName = jdi.ColumnName(i); + + if (columnName == null) + return; + + // Get the name of the column's table name + //String tableName = rsmd.getTableName(i); + + if (i > 1) + System.out.print("\t"); + + System.out.print(columnName); + } // endfor i + + System.out.println(); + + // Loop through the result set + while (jdi.ReadNext() > 0) { + for (i = 1; i <= ncol; i++) { + if (i > 1) + System.out.print("\t"); + + if (DEBUG) + System.out.print("(" + jdi.ColumnType(i, null) + ")"); + + switch (jdi.ColumnType(i, null)) { + case java.sql.Types.VARCHAR: + case java.sql.Types.LONGVARCHAR: + case java.sql.Types.CHAR: + System.out.print(jdi.StringField(i, null)); + break; + case java.sql.Types.INTEGER: + System.out.print(jdi.IntField(i, null)); + break; + case java.sql.Types.BIGINT: + System.out.print(jdi.BigintField(i, null)); + break; + case java.sql.Types.TIMESTAMP: + System.out.print(jdi.TimestampField(i, null)); + break; + case java.sql.Types.TIME: + System.out.print(jdi.TimeField(i, null)); + break; + case java.sql.Types.DATE: + System.out.print(jdi.DateField(i, null)); + break; + case java.sql.Types.SMALLINT: + System.out.print(jdi.IntField(i, null)); + break; + case java.sql.Types.DOUBLE: + case java.sql.Types.REAL: + case java.sql.Types.FLOAT: + case java.sql.Types.DECIMAL: + System.out.print(jdi.DoubleField(i, null)); + break; + case java.sql.Types.BOOLEAN: + System.out.print(jdi.BooleanField(i, null)); + default: + break; + } // endswitch Type + + } // endfor i + + System.out.println(); + } // end while rs + + } // end of PrintResult + + // ================================================================== + private static String getLine(String p, boolean b) { + String response; + + if (c != null) { + // Standard console mode + if (b) { + response = new String(c.readPassword(p)); + } else + response = c.readLine(p); + + } else { + // For instance when testing from Eclipse + BufferedReader in = new BufferedReader(new InputStreamReader(System.in)); + + System.out.print(p); + + try { + // Cannot suppress echo for password entry + response = in.readLine(); + } catch (IOException e) { + response = ""; + } // end of try/catch + + } // endif c + + return (response.isEmpty()) ? null : response; + } // end of getLine + +} // end of class Client diff --git a/storage/connect/JdbcApacheInterface.class b/storage/connect/JdbcApacheInterface.class deleted file mode 100644 index acd4258e3d3..00000000000 Binary files a/storage/connect/JdbcApacheInterface.class and /dev/null differ diff --git a/storage/connect/JdbcApacheInterface.java b/storage/connect/JdbcApacheInterface.java deleted file mode 100644 index fdbc5bff203..00000000000 --- a/storage/connect/JdbcApacheInterface.java +++ /dev/null @@ -1,709 +0,0 @@ -import java.math.*; -import java.sql.*; -import java.util.Collections; -import java.util.Hashtable; -import java.util.List; - -import org.apache.commons.dbcp2.BasicDataSource; - -public class JdbcApacheInterface { - boolean DEBUG = false; - String Errmsg = "No error"; - Connection conn = null; - DatabaseMetaData dbmd = null; - Statement stmt = null; - PreparedStatement pstmt = null; - ResultSet rs = null; - ResultSetMetaData rsmd = null; - static Hashtable pool = new Hashtable(); - - // === Constructors/finalize ========================================= - public JdbcApacheInterface() { - this(true); - } // end of default constructor - - public JdbcApacheInterface(boolean b) { - DEBUG = b; - } // end of constructor - - private void SetErrmsg(Exception e) { - if (DEBUG) - System.out.println(e.getMessage()); - - Errmsg = e.toString(); - } // end of SetErrmsg - - private void SetErrmsg(String s) { - if (DEBUG) - System.out.println(s); - - Errmsg = s; - } // end of SetErrmsg - - public String GetErrmsg() { - String err = Errmsg; - - Errmsg = "No error"; - return err; - } // end of GetErrmsg - - public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { - int rc = 0; - String url = parms[1]; - BasicDataSource ds = null; - - if (url == null) { - SetErrmsg("URL cannot be null"); - return -1; - } // endif url - - try { - if ((ds = pool.get(url)) == null) { - ds = new BasicDataSource(); - ds.setDriverClassName(parms[0]); - ds.setUrl(url); - ds.setUsername(parms[2]); - ds.setPassword(parms[3]); - pool.put(url, ds); - } // endif ds - - // Get a connection from the data source - conn = ds.getConnection(); - - // Get the data base meta data object - dbmd = conn.getMetaData(); - - // Get a statement from the connection - if (scrollable) - stmt = conn.createStatement(java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE, java.sql.ResultSet.CONCUR_READ_ONLY); - else - stmt = conn.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, java.sql.ResultSet.CONCUR_READ_ONLY); - - if (DEBUG) - System.out.println("Statement type = " + stmt.getResultSetType() - + " concurrency = " + stmt.getResultSetConcurrency()); - - if (DEBUG) // Get the fetch size of a statement - System.out.println("Default fetch size = " + stmt.getFetchSize()); - - if (fsize != 0) { - // Set the fetch size - stmt.setFetchSize(fsize); - - if (DEBUG) - System.out.println("New fetch size = " + stmt.getFetchSize()); - - } // endif fsize - - } catch (SQLException se) { - SetErrmsg(se); - rc = -2; - } catch( Exception e ) { - SetErrmsg(e); - rc = -3; - } // end try/catch - - return rc; - } // end of JdbcConnect - - public int CreatePrepStmt(String sql) { - int rc = 0; - - try { - pstmt = conn.prepareStatement(sql); - } catch (SQLException se) { - SetErrmsg(se); - rc = -1; - } catch (Exception e) { - SetErrmsg(e); - rc = -2; - } // end try/catch - - return rc; - } // end of CreatePrepStmt - - public void SetStringParm(int i, String s) { - try { - pstmt.setString(i, s); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetStringParm - - public void SetIntParm(int i, int n) { - try { - pstmt.setInt(i, n); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetIntParm - - public void SetShortParm(int i, short n) { - try { - pstmt.setShort(i, n); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetShortParm - - public void SetBigintParm(int i, long n) { - try { - pstmt.setLong(i, n); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetBigintParm - - public void SetFloatParm(int i, float f) { - try { - pstmt.setFloat(i, f); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetFloatParm - - public void SetDoubleParm(int i, double d) { - try { - pstmt.setDouble(i, d); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetDoubleParm - - public void SetTimestampParm(int i, Timestamp t) { - try { - pstmt.setTimestamp(i, t); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetTimestampParm - - public int ExecutePrep() { - int n = -3; - - if (pstmt != null) try { - n = pstmt.executeUpdate(); - } catch (SQLException se) { - SetErrmsg(se); - n = -1; - } catch (Exception e) { - SetErrmsg(e); - n = -2; - } //end try/catch - - return n; - } // end of ExecutePrep - - public boolean ClosePrepStmt() { - boolean b = false; - - if (pstmt != null) try { - pstmt.close(); - pstmt = null; - } catch (SQLException se) { - SetErrmsg(se); - b = true; - } catch (Exception e) { - SetErrmsg(e); - b = true; - } // end try/catch - - return b; - } // end of ClosePrepStmt - - public int JdbcDisconnect() { - int rc = 0; - - // Cancel pending statement - if (stmt != null) - try { - System.out.println("Cancelling statement"); - stmt.cancel(); - } catch(SQLException se) { - SetErrmsg(se); - rc += 1; - } // nothing more we can do - - // Close the statement and the connection - if (rs != null) - try { - if (DEBUG) - System.out.println("Closing result set"); - - rs.close(); - } catch(SQLException se) { - SetErrmsg(se); - rc = 2; - } // nothing more we can do - - if (stmt != null) - try { - if (DEBUG) - System.out.println("Closing statement"); - - stmt.close(); - } catch(SQLException se) { - SetErrmsg(se); - rc += 4; - } // nothing more we can do - - ClosePrepStmt(); - - if (conn != null) - try { - if (DEBUG) - System.out.println("Closing connection"); - - conn.close(); - } catch (SQLException se) { - SetErrmsg(se); - rc += 8; - } //end try/catch - - if (DEBUG) - System.out.println("All closed"); - - return rc; - } // end of JdbcDisconnect - - public int GetMaxValue(int n) { - int m = 0; - - try { - switch (n) { - case 1: // Max columns in table - m = dbmd.getMaxColumnsInTable(); - break; - case 2: // Max catalog name length - m = dbmd.getMaxCatalogNameLength(); - break; - case 3: // Max schema name length - m = dbmd.getMaxSchemaNameLength(); - break; - case 4: // Max table name length - m = dbmd.getMaxTableNameLength(); - break; - case 5: // Max column name length - m = dbmd.getMaxColumnNameLength(); - break; - } // endswitch n - - } catch(Exception e) { - SetErrmsg(e); - m = -1; - } // end try/catch - - return m; - } // end of GetMaxValue - - public int GetColumns(String[] parms) { - int ncol = 0; - - try { - if (rs != null) rs.close(); - rs = dbmd.getColumns(parms[0], parms[1], parms[2], parms[3]); - - if (rs != null) { - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - } // endif rs - - } catch(SQLException se) { - SetErrmsg(se); - } // end try/catch - - return ncol; - } // end of GetColumns - - public int GetTables(String[] parms) { - int ncol = 0; - String[] typ = null; - - if (parms[3] != null) { - typ = new String[1]; - typ[0] = parms[3]; - } // endif parms - - try { - if (rs != null) rs.close(); - rs = dbmd.getTables(parms[0], parms[1], parms[2], typ); - - if (rs != null) { - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - } // endif rs - - } catch(SQLException se) { - SetErrmsg(se); - } // end try/catch - - return ncol; - } // end of GetColumns - - public int Execute(String query) { - int n = 0; - - if (DEBUG) - System.out.println("Executing '" + query + "'"); - - try { - boolean b = stmt.execute(query); - - if (b == false) { - n = stmt.getUpdateCount(); - if (rs != null) rs.close(); - } // endif b - - if (DEBUG) - System.out.println("Query '" + query + "' executed: n = " + n); - - } catch (SQLException se) { - SetErrmsg(se); - n = -1; - } catch (Exception e) { - SetErrmsg(e); - n = -2; - } //end try/catch - - return n; - } // end of Execute - - public int GetResult() { - int ncol = 0; - - try { - rs = stmt.getResultSet(); - - if (rs != null) { - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - - if (DEBUG) - System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)"); - - } // endif rs - - } catch (SQLException se) { - SetErrmsg(se); - ncol = -1; - } catch (Exception e) { - SetErrmsg(e); - ncol = -2; - } //end try/catch - - return ncol; - } // end of GetResult - - public int ExecuteQuery(String query) { - int ncol = 0; - - if (DEBUG) - System.out.println("Executing query '" + query + "'"); - - try { - rs = stmt.executeQuery(query); - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - - if (DEBUG) { - System.out.println("Query '" + query + "' executed successfully"); - System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)"); - } // endif DEBUG - - } catch (SQLException se) { - SetErrmsg(se); - ncol = -1; - } catch (Exception e) { - SetErrmsg(e); - ncol = -2; - } //end try/catch - - return ncol; - } // end of ExecuteQuery - - public int ExecuteUpdate(String query) { - int n = 0; - - if (DEBUG) - System.out.println("Executing update query '" + query + "'"); - - try { - n = stmt.executeUpdate(query); - - if (DEBUG) - System.out.println("Update Query '" + query + "' executed: n = " + n); - - } catch (SQLException se) { - SetErrmsg(se); - n = -1; - } catch (Exception e) { - SetErrmsg(e); - n = -2; - } //end try/catch - - return n; - } // end of ExecuteUpdate - - public int ReadNext() { - if (rs != null) { - try { - return rs.next() ? 1 : 0; - } catch (SQLException se) { - SetErrmsg(se); - return -1; - } //end try/catch - - } else - return 0; - - } // end of ReadNext - - public boolean Fetch(int row) { - if (rs != null) { - try { - return rs.absolute(row); - } catch (SQLException se) { - SetErrmsg(se); - return false; - } //end try/catch - - } else - return false; - - } // end of Fetch - - public String ColumnName(int n) { - if (rsmd == null) { - System.out.println("No result metadata"); - } else try { - return rsmd.getColumnLabel(n); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of ColumnName - - public int ColumnType(int n, String name) { - if (rsmd == null) { - System.out.println("No result metadata"); - } else try { - if (n == 0) - n = rs.findColumn(name); - - return rsmd.getColumnType(n); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 666; // Not a type - } // end of ColumnType - - public String ColumnDesc(int n, int[] val) { - if (rsmd == null) { - System.out.println("No result metadata"); - return null; - } else try { - val[0] = rsmd.getColumnType(n); - val[1] = rsmd.getPrecision(n); - val[2] = rsmd.getScale(n); - val[3] = rsmd.isNullable(n); - return rsmd.getColumnLabel(n); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of ColumnDesc - - public String StringField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getString(n) : rs.getString(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of StringField - - public int IntField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getInt(n) : rs.getInt(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0; - } // end of IntField - - public long BigintField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - BigDecimal bigDecimal = (n > 0) ? rs.getBigDecimal(n) : rs.getBigDecimal(name); - return bigDecimal != null ? bigDecimal.longValue() : 0; - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0; - } // end of BiginttField - - public double DoubleField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getDouble(n) : rs.getDouble(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0.; - } // end of DoubleField - - public float FloatField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getFloat(n) : rs.getFloat(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0; - } // end of FloatField - - public boolean BooleanField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getBoolean(n) : rs.getBoolean(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return false; - } // end of BooleanField - - public Date DateField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getDate(n) : rs.getDate(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of DateField - - public Time TimeField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getTime(n) : rs.getTime(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of TimeField - - public Timestamp TimestampField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getTimestamp(n) : rs.getTimestamp(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of TimestampField - - public String ObjectField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getObject(n).toString() : rs.getObject(name).toString(); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of ObjectField - - public int GetDrivers(String[] s, int mxs) { - int n = 0; - List drivers = Collections.list(DriverManager.getDrivers()); - int size = Math.min(mxs, drivers.size()); - - for (int i = 0; i < size; i++) { - Driver driver = (Driver)drivers.get(i); - - // Get name of driver - s[n++] = driver.getClass().getName(); - - // Get version info - s[n++] = driver.getMajorVersion() + "." + driver.getMinorVersion(); - s[n++] = driver.jdbcCompliant() ? "Yes" : "No"; - s[n++] = driver.toString(); - } // endfor i - - return size; - } // end of GetDrivers - - /** - * Adds the specified path to the java library path - * from Fahd Shariff blog - * - * @param pathToAdd the path to add - static public int addLibraryPath(String pathToAdd) { - System.out.println("jpath = " + pathToAdd); - - try { - Field usrPathsField = ClassLoader.class.getDeclaredField("usr_paths"); - usrPathsField.setAccessible(true); - - //get array of paths - String[] paths = (String[])usrPathsField.get(null); - - //check if the path to add is already present - for (String path : paths) { - System.out.println("path = " + path); - - if (path.equals(pathToAdd)) - return -5; - - } // endfor path - - //add the new path - String[] newPaths = Arrays.copyOf(paths, paths.length + 1); - newPaths[paths.length] = pathToAdd; - usrPathsField.set(null, newPaths); - System.setProperty("java.library.path", - System.getProperty("java.library.path") + File.pathSeparator + pathToAdd); - Field fieldSysPath = ClassLoader.class.getDeclaredField("sys_paths"); - fieldSysPath.setAccessible(true); - fieldSysPath.set(null, null); - } catch (Exception e) { - SetErrmsg(e); - return -1; - } // end try/catch - - return 0; - } // end of addLibraryPath - */ - -} // end of class JdbcApacheInterface diff --git a/storage/connect/JdbcDSInterface.class b/storage/connect/JdbcDSInterface.class deleted file mode 100644 index d56c04bd81f..00000000000 Binary files a/storage/connect/JdbcDSInterface.class and /dev/null differ diff --git a/storage/connect/JdbcDSInterface.java b/storage/connect/JdbcDSInterface.java deleted file mode 100644 index 09f545bfb74..00000000000 --- a/storage/connect/JdbcDSInterface.java +++ /dev/null @@ -1,743 +0,0 @@ -import java.math.*; -import java.sql.*; -import java.util.Collections; -import java.util.Hashtable; -import java.util.List; - -import javax.sql.DataSource; - -import org.mariadb.jdbc.MariaDbDataSource; -import org.postgresql.jdbc2.optional.PoolingDataSource; -import com.mysql.cj.jdbc.MysqlDataSource; -import oracle.jdbc.pool.OracleDataSource; - -public class JdbcDSInterface { - boolean DEBUG = false; - String Errmsg = "No error"; - Connection conn = null; - DatabaseMetaData dbmd = null; - Statement stmt = null; - PreparedStatement pstmt = null; - ResultSet rs = null; - ResultSetMetaData rsmd = null; - Hashtable dst = null; - - // === Constructors/finalize ========================================= - public JdbcDSInterface() { - this(true); - } // end of default constructor - - public JdbcDSInterface(boolean b) { - DEBUG = b; - dst = new Hashtable(); - } // end of constructor - - private void SetErrmsg(Exception e) { - if (DEBUG) - System.out.println(e.getMessage()); - - Errmsg = e.toString(); - } // end of SetErrmsg - - private void SetErrmsg(String s) { - if (DEBUG) - System.out.println(s); - - Errmsg = s; - } // end of SetErrmsg - - public String GetErrmsg() { - String err = Errmsg; - - Errmsg = "No error"; - return err; - } // end of GetErrmsg - - public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { - int rc = 0; - String url = parms[1]; - DataSource ds = null; - MysqlDataSource mds = null; - MariaDbDataSource ads = null; - OracleDataSource ods = null; - PoolingDataSource pds = null; - - if (url == null) { - SetErrmsg("URL cannot be null"); - return -1; - } // endif driver - - try { - if ((ds = dst.get(url)) == null) { - if (url.toLowerCase().contains("mysql")) { - mds = new MysqlDataSource(); - mds.setURL(url); - mds.setUser(parms[2]); - mds.setPassword(parms[3]); - ds = mds; - } else if (url.toLowerCase().contains("mariadb")) { - ads = new MariaDbDataSource(); - ads.setUrl(url); - ads.setUser(parms[2]); - ads.setPassword(parms[3]); - ds = ads; - } else if (url.toLowerCase().contains("oracle")) { - ods = new OracleDataSource(); - ods.setURL(url); - ods.setUser(parms[2]); - ods.setPassword(parms[3]); - ds = ods; - } else if (url.toLowerCase().contains("postgresql")) { - pds = new PoolingDataSource(); - pds.setUrl(url); - pds.setUser(parms[2]); - pds.setPassword(parms[3]); - ds = pds; - } else { - SetErrmsg("Unsupported driver"); - return -4; - } // endif driver - - dst.put(url, ds); - } // endif ds - - // Get a connection from the data source - conn = ds.getConnection(); - - // Get the data base meta data object - dbmd = conn.getMetaData(); - - // Get a statement from the connection - if (scrollable) - stmt = conn.createStatement(java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE, java.sql.ResultSet.CONCUR_READ_ONLY); - else - stmt = conn.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, java.sql.ResultSet.CONCUR_READ_ONLY); - - if (DEBUG) - System.out.println("Statement type = " + stmt.getResultSetType() - + " concurrency = " + stmt.getResultSetConcurrency()); - - if (DEBUG) // Get the fetch size of a statement - System.out.println("Default fetch size = " + stmt.getFetchSize()); - - if (fsize != 0) { - // Set the fetch size - stmt.setFetchSize(fsize); - - if (DEBUG) - System.out.println("New fetch size = " + stmt.getFetchSize()); - - } // endif fsize - - } catch (SQLException se) { - SetErrmsg(se); - rc = -2; - } catch( Exception e ) { - SetErrmsg(e); - rc = -3; - } // end try/catch - - return rc; - } // end of JdbcConnect - - public int CreatePrepStmt(String sql) { - int rc = 0; - - try { - pstmt = conn.prepareStatement(sql); - } catch (SQLException se) { - SetErrmsg(se); - rc = -1; - } catch (Exception e) { - SetErrmsg(e); - rc = -2; - } // end try/catch - - return rc; - } // end of CreatePrepStmt - - public void SetStringParm(int i, String s) { - try { - pstmt.setString(i, s); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetStringParm - - public void SetIntParm(int i, int n) { - try { - pstmt.setInt(i, n); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetIntParm - - public void SetShortParm(int i, short n) { - try { - pstmt.setShort(i, n); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetShortParm - - public void SetBigintParm(int i, long n) { - try { - pstmt.setLong(i, n); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetBigintParm - - public void SetFloatParm(int i, float f) { - try { - pstmt.setFloat(i, f); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetFloatParm - - public void SetDoubleParm(int i, double d) { - try { - pstmt.setDouble(i, d); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetDoubleParm - - public void SetTimestampParm(int i, Timestamp t) { - try { - pstmt.setTimestamp(i, t); - } catch (Exception e) { - SetErrmsg(e); - } // end try/catch - - } // end of SetTimestampParm - - public int ExecutePrep() { - int n = -3; - - if (pstmt != null) try { - n = pstmt.executeUpdate(); - } catch (SQLException se) { - SetErrmsg(se); - n = -1; - } catch (Exception e) { - SetErrmsg(e); - n = -2; - } //end try/catch - - return n; - } // end of ExecutePrep - - public boolean ClosePrepStmt() { - boolean b = false; - - if (pstmt != null) try { - pstmt.close(); - pstmt = null; - } catch (SQLException se) { - SetErrmsg(se); - b = true; - } catch (Exception e) { - SetErrmsg(e); - b = true; - } // end try/catch - - return b; - } // end of ClosePrepStmt - - public int JdbcDisconnect() { - int rc = 0; - - // Cancel pending statement - if (stmt != null) - try { - System.out.println("Cancelling statement"); - stmt.cancel(); - } catch(SQLException se) { - SetErrmsg(se); - rc += 1; - } // nothing more we can do - - // Close the statement and the connection - if (rs != null) - try { - if (DEBUG) - System.out.println("Closing result set"); - - rs.close(); - } catch(SQLException se) { - SetErrmsg(se); - rc = 2; - } // nothing more we can do - - if (stmt != null) - try { - if (DEBUG) - System.out.println("Closing statement"); - - stmt.close(); - } catch(SQLException se) { - SetErrmsg(se); - rc += 4; - } // nothing more we can do - - ClosePrepStmt(); - - if (conn != null) - try { - if (DEBUG) - System.out.println("Closing connection"); - - conn.close(); - } catch (SQLException se) { - SetErrmsg(se); - rc += 8; - } //end try/catch - - if (DEBUG) - System.out.println("All closed"); - - return rc; - } // end of JdbcDisconnect - - public int GetMaxValue(int n) { - int m = 0; - - try { - switch (n) { - case 1: // Max columns in table - m = dbmd.getMaxColumnsInTable(); - break; - case 2: // Max catalog name length - m = dbmd.getMaxCatalogNameLength(); - break; - case 3: // Max schema name length - m = dbmd.getMaxSchemaNameLength(); - break; - case 4: // Max table name length - m = dbmd.getMaxTableNameLength(); - break; - case 5: // Max column name length - m = dbmd.getMaxColumnNameLength(); - break; - } // endswitch n - - } catch(Exception e) { - SetErrmsg(e); - m = -1; - } // end try/catch - - return m; - } // end of GetMaxValue - - public int GetColumns(String[] parms) { - int ncol = 0; - - try { - if (rs != null) rs.close(); - rs = dbmd.getColumns(parms[0], parms[1], parms[2], parms[3]); - - if (rs != null) { - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - } // endif rs - - } catch(SQLException se) { - SetErrmsg(se); - } // end try/catch - - return ncol; - } // end of GetColumns - - public int GetTables(String[] parms) { - int ncol = 0; - String[] typ = null; - - if (parms[3] != null) { - typ = new String[1]; - typ[0] = parms[3]; - } // endif parms - - try { - if (rs != null) rs.close(); - rs = dbmd.getTables(parms[0], parms[1], parms[2], typ); - - if (rs != null) { - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - } // endif rs - - } catch(SQLException se) { - SetErrmsg(se); - } // end try/catch - - return ncol; - } // end of GetColumns - - public int Execute(String query) { - int n = 0; - - if (DEBUG) - System.out.println("Executing '" + query + "'"); - - try { - boolean b = stmt.execute(query); - - if (b == false) { - n = stmt.getUpdateCount(); - if (rs != null) rs.close(); - } // endif b - - if (DEBUG) - System.out.println("Query '" + query + "' executed: n = " + n); - - } catch (SQLException se) { - SetErrmsg(se); - n = -1; - } catch (Exception e) { - SetErrmsg(e); - n = -2; - } //end try/catch - - return n; - } // end of Execute - - public int GetResult() { - int ncol = 0; - - try { - rs = stmt.getResultSet(); - - if (rs != null) { - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - - if (DEBUG) - System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)"); - - } // endif rs - - } catch (SQLException se) { - SetErrmsg(se); - ncol = -1; - } catch (Exception e) { - SetErrmsg(e); - ncol = -2; - } //end try/catch - - return ncol; - } // end of GetResult - - public int ExecuteQuery(String query) { - int ncol = 0; - - if (DEBUG) - System.out.println("Executing query '" + query + "'"); - - try { - rs = stmt.executeQuery(query); - rsmd = rs.getMetaData(); - ncol = rsmd.getColumnCount(); - - if (DEBUG) { - System.out.println("Query '" + query + "' executed successfully"); - System.out.println("Result set has " + rsmd.getColumnCount() + " column(s)"); - } // endif DEBUG - - } catch (SQLException se) { - SetErrmsg(se); - ncol = -1; - } catch (Exception e) { - SetErrmsg(e); - ncol = -2; - } //end try/catch - - return ncol; - } // end of ExecuteQuery - - public int ExecuteUpdate(String query) { - int n = 0; - - if (DEBUG) - System.out.println("Executing update query '" + query + "'"); - - try { - n = stmt.executeUpdate(query); - - if (DEBUG) - System.out.println("Update Query '" + query + "' executed: n = " + n); - - } catch (SQLException se) { - SetErrmsg(se); - n = -1; - } catch (Exception e) { - SetErrmsg(e); - n = -2; - } //end try/catch - - return n; - } // end of ExecuteUpdate - - public int ReadNext() { - if (rs != null) { - try { - return rs.next() ? 1 : 0; - } catch (SQLException se) { - SetErrmsg(se); - return -1; - } //end try/catch - - } else - return 0; - - } // end of ReadNext - - public boolean Fetch(int row) { - if (rs != null) { - try { - return rs.absolute(row); - } catch (SQLException se) { - SetErrmsg(se); - return false; - } //end try/catch - - } else - return false; - - } // end of Fetch - - public String ColumnName(int n) { - if (rsmd == null) { - System.out.println("No result metadata"); - } else try { - return rsmd.getColumnLabel(n); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of ColumnName - - public int ColumnType(int n, String name) { - if (rsmd == null) { - System.out.println("No result metadata"); - } else try { - if (n == 0) - n = rs.findColumn(name); - - return rsmd.getColumnType(n); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 666; // Not a type - } // end of ColumnType - - public String ColumnDesc(int n, int[] val) { - if (rsmd == null) { - System.out.println("No result metadata"); - return null; - } else try { - val[0] = rsmd.getColumnType(n); - val[1] = rsmd.getPrecision(n); - val[2] = rsmd.getScale(n); - val[3] = rsmd.isNullable(n); - return rsmd.getColumnLabel(n); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of ColumnDesc - - public String StringField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getString(n) : rs.getString(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of StringField - - public int IntField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getInt(n) : rs.getInt(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0; - } // end of IntField - - public long BigintField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - BigDecimal bigDecimal = (n > 0) ? rs.getBigDecimal(n) : rs.getBigDecimal(name); - return bigDecimal != null ? bigDecimal.longValue() : 0; - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0; - } // end of BiginttField - - public double DoubleField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getDouble(n) : rs.getDouble(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0.; - } // end of DoubleField - - public float FloatField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getFloat(n) : rs.getFloat(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return 0; - } // end of FloatField - - public boolean BooleanField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getBoolean(n) : rs.getBoolean(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return false; - } // end of BooleanField - - public Date DateField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getDate(n) : rs.getDate(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of DateField - - public Time TimeField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getTime(n) : rs.getTime(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of TimeField - - public Timestamp TimestampField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getTimestamp(n) : rs.getTimestamp(name); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of TimestampField - - public String ObjectField(int n, String name) { - if (rs == null) { - System.out.println("No result set"); - } else try { - return (n > 0) ? rs.getObject(n).toString() : rs.getObject(name).toString(); - } catch (SQLException se) { - SetErrmsg(se); - } //end try/catch - - return null; - } // end of ObjectField - - public int GetDrivers(String[] s, int mxs) { - int n = 0; - List drivers = Collections.list(DriverManager.getDrivers()); - int size = Math.min(mxs, drivers.size()); - - for (int i = 0; i < size; i++) { - Driver driver = (Driver)drivers.get(i); - - // Get name of driver - s[n++] = driver.getClass().getName(); - - // Get version info - s[n++] = driver.getMajorVersion() + "." + driver.getMinorVersion(); - s[n++] = driver.jdbcCompliant() ? "Yes" : "No"; - s[n++] = driver.toString(); - } // endfor i - - return size; - } // end of GetDrivers - - /** - * Adds the specified path to the java library path - * from Fahd Shariff blog - * - * @param pathToAdd the path to add - static public int addLibraryPath(String pathToAdd) { - System.out.println("jpath = " + pathToAdd); - - try { - Field usrPathsField = ClassLoader.class.getDeclaredField("usr_paths"); - usrPathsField.setAccessible(true); - - //get array of paths - String[] paths = (String[])usrPathsField.get(null); - - //check if the path to add is already present - for (String path : paths) { - System.out.println("path = " + path); - - if (path.equals(pathToAdd)) - return -5; - - } // endfor path - - //add the new path - String[] newPaths = Arrays.copyOf(paths, paths.length + 1); - newPaths[paths.length] = pathToAdd; - usrPathsField.set(null, newPaths); - System.setProperty("java.library.path", - System.getProperty("java.library.path") + File.pathSeparator + pathToAdd); - Field fieldSysPath = ClassLoader.class.getDeclaredField("sys_paths"); - fieldSysPath.setAccessible(true); - fieldSysPath.set(null, null); - } catch (Exception e) { - SetErrmsg(e); - return -1; - } // end try/catch - - return 0; - } // end of addLibraryPath - */ - -} // end of class JdbcDSInterface diff --git a/storage/connect/JdbcInterface.class b/storage/connect/JdbcInterface.class deleted file mode 100644 index 8c5ba6439f3..00000000000 Binary files a/storage/connect/JdbcInterface.class and /dev/null differ diff --git a/storage/connect/JdbcInterface.java b/storage/connect/JdbcInterface.java index f9a6e734454..793d29936c8 100644 --- a/storage/connect/JdbcInterface.java +++ b/storage/connect/JdbcInterface.java @@ -1,13 +1,19 @@ +package wrappers; + import java.math.*; import java.sql.*; -//import java.util.Arrays; import java.util.Collections; +import java.util.Hashtable; import java.util.List; -//import java.io.File; -//import java.lang.reflect.Field; + +import javax.sql.DataSource; public class JdbcInterface { + // This is used by DS classes + static Hashtable dst = null; + boolean DEBUG = false; + boolean CatisSchema = false; String Errmsg = "No error"; Connection conn = null; DatabaseMetaData dbmd = null; @@ -18,14 +24,14 @@ public class JdbcInterface { // === Constructors/finalize ========================================= public JdbcInterface() { - this(true); + this(false); } // end of default constructor public JdbcInterface(boolean b) { DEBUG = b; } // end of constructor - private void SetErrmsg(Exception e) { + protected void SetErrmsg(Exception e) { if (DEBUG) System.out.println(e.getMessage()); @@ -38,6 +44,22 @@ public class JdbcInterface { Errmsg = "No error"; return err; } // end of GetErrmsg + + protected void CheckURL(String url, String vendor) throws Exception { + if (url == null) + throw new Exception("URL cannot be null"); + + String[] tk = url.split(":", 3); + + if (!tk[0].equals("jdbc") || tk[1] == null) + throw new Exception("Invalid URL"); + + if (vendor != null && !tk[1].equals(vendor)) + throw new Exception("Wrong URL for this wrapper"); + + // Some drivers use Catalog as Schema + CatisSchema = tk[1].equals("mysql") || tk[1].equals("mariadb"); + } // end of CatalogIsSchema public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { int rc = 0; @@ -58,6 +80,8 @@ public class JdbcInterface { if (DEBUG) System.out.println("URL=" + parms[1]); + + CheckURL(parms[1], null); if (parms[2] != null && !parms[2].isEmpty()) { if (DEBUG) @@ -74,27 +98,7 @@ public class JdbcInterface { dbmd = conn.getMetaData(); // Get a statement from the connection - if (scrollable) - stmt = conn.createStatement(java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE, java.sql.ResultSet.CONCUR_READ_ONLY); - else - stmt = conn.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, java.sql.ResultSet.CONCUR_READ_ONLY); - - if (DEBUG) - System.out.println("Statement type = " + stmt.getResultSetType() - + " concurrency = " + stmt.getResultSetConcurrency()); - - if (DEBUG) // Get the fetch size of a statement - System.out.println("Default fetch size = " + stmt.getFetchSize()); - - if (fsize != 0) { - // Set the fetch size - stmt.setFetchSize(fsize); - - if (DEBUG) - System.out.println("New fetch size = " + stmt.getFetchSize()); - - } // endif fsize - + stmt = GetStmt(fsize, scrollable); } catch(ClassNotFoundException e) { SetErrmsg(e); rc = -1; @@ -109,6 +113,34 @@ public class JdbcInterface { return rc; } // end of JdbcConnect + protected Statement GetStmt(int fsize, boolean scrollable) throws SQLException, Exception { + Statement stmt = null; + + if (scrollable) + stmt = conn.createStatement(java.sql.ResultSet.TYPE_SCROLL_INSENSITIVE, java.sql.ResultSet.CONCUR_READ_ONLY); + else + stmt = conn.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, java.sql.ResultSet.CONCUR_READ_ONLY); + + if (DEBUG) + System.out.println("Statement type = " + stmt.getResultSetType() + + " concurrency = " + stmt.getResultSetConcurrency()); + + if (DEBUG) // Get the fetch size of a statement + System.out.println("Default fetch size = " + stmt.getFetchSize()); + + if (fsize != 0) { + // Set the fetch size + stmt.setFetchSize(fsize); + + if (DEBUG) + System.out.println("New fetch size = " + stmt.getFetchSize()); + + } // endif fsize + + return stmt; + } // end of GetStmt + + public int CreatePrepStmt(String sql) { int rc = 0; @@ -227,7 +259,9 @@ public class JdbcInterface { // Cancel pending statement if (stmt != null) try { - System.out.println("Cancelling statement"); + if (DEBUG) + System.out.println("Cancelling statement"); + stmt.cancel(); } catch(SQLException se) { SetErrmsg(se); @@ -307,11 +341,15 @@ public class JdbcInterface { } // end of GetMaxValue public int GetColumns(String[] parms) { - int ncol = 0; + int ncol = -1; try { if (rs != null) rs.close(); - rs = dbmd.getColumns(parms[0], parms[1], parms[2], parms[3]); + + if (CatisSchema) + rs = dbmd.getColumns(parms[1], null, parms[2], parms[3]); + else + rs = dbmd.getColumns(parms[0], parms[1], parms[2], parms[3]); if (rs != null) { rsmd = rs.getMetaData(); @@ -326,7 +364,7 @@ public class JdbcInterface { } // end of GetColumns public int GetTables(String[] parms) { - int ncol = 0; + int ncol = -1; String[] typ = null; if (parms[3] != null) { @@ -336,7 +374,11 @@ public class JdbcInterface { try { if (rs != null) rs.close(); - rs = dbmd.getTables(parms[0], parms[1], parms[2], typ); + + if (CatisSchema) + rs = dbmd.getTables(parms[1], null, parms[2], typ); + else + rs = dbmd.getTables(parms[0], parms[1], parms[2], typ); if (rs != null) { rsmd = rs.getMetaData(); @@ -710,3 +752,4 @@ public class JdbcInterface { */ } // end of class JdbcInterface + diff --git a/storage/connect/MariadbInterface.java b/storage/connect/MariadbInterface.java new file mode 100644 index 00000000000..26ff7a82301 --- /dev/null +++ b/storage/connect/MariadbInterface.java @@ -0,0 +1,69 @@ +package wrappers; + +import java.sql.*; +import java.util.Hashtable; + +import javax.sql.DataSource; +import org.mariadb.jdbc.MariaDbDataSource; + +public class MariadbInterface extends JdbcInterface { + public MariadbInterface() { + this(true); + } // end of default constructor + + public MariadbInterface(boolean b) { + super(b); + + if (dst == null) + dst = new Hashtable(); + + } // end of default constructor + + @Override + public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { + int rc = 0; + String url = parms[1]; + DataSource ds = null; + MariaDbDataSource ads = null; + + if (DEBUG) + System.out.println("Connecting to MariaDB data source"); + + try { + CheckURL(url, "mariadb"); + + if ((ds = dst.get(url)) == null) { + ads = new MariaDbDataSource(); + ads.setUrl(url); + + if (parms[2] != null) + ads.setUser(parms[2]); + + if (parms[3] != null) + ads.setPassword(parms[3]); + + ds = ads; + + dst.put(url, ds); + } // endif ds + + // Get a connection from the data source + conn = ds.getConnection(); + + // Get the data base meta data object + dbmd = conn.getMetaData(); + + // Get a statement from the connection + stmt = GetStmt(fsize, scrollable); + } catch (SQLException se) { + SetErrmsg(se); + rc = -2; + } catch( Exception e ) { + SetErrmsg(e); + rc = -3; + } // end try/catch + + return rc; + } // end of JdbcConnect + +} diff --git a/storage/connect/MysqlInterface.java b/storage/connect/MysqlInterface.java new file mode 100644 index 00000000000..a13020e30b0 --- /dev/null +++ b/storage/connect/MysqlInterface.java @@ -0,0 +1,69 @@ +package wrappers; + +import java.sql.*; +import java.util.Hashtable; + +import javax.sql.DataSource; +import com.mysql.cj.jdbc.MysqlDataSource; + +public class MysqlInterface extends JdbcInterface { + public MysqlInterface() { + this(true); + } // end of default constructor + + public MysqlInterface(boolean b) { + super(b); + + if (dst == null) + dst = new Hashtable(); + + } // end of default constructor + + @Override + public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { + int rc = 0; + String url = parms[1]; + DataSource ds = null; + MysqlDataSource mds = null; + + if (DEBUG) + System.out.println("Connecting to MySQL data source"); + + try { + CheckURL(url, "mysql"); + + if ((ds = dst.get(url)) == null) { + mds = new MysqlDataSource(); + mds.setUrl(url); + + if (parms[2] != null) + mds.setUser(parms[2]); + + if (parms[3] != null) + mds.setPassword(parms[3]); + + ds = mds; + + dst.put(url, ds); + } // endif ds + + // Get a connection from the data source + conn = ds.getConnection(); + + // Get the data base meta data object + dbmd = conn.getMetaData(); + + // Get a statement from the connection + stmt = GetStmt(fsize, scrollable); + } catch (SQLException se) { + SetErrmsg(se); + rc = -2; + } catch( Exception e ) { + SetErrmsg(e); + rc = -3; + } // end try/catch + + return rc; + } // end of JdbcConnect + +} // end of class MysqlInterface diff --git a/storage/connect/OracleInterface.java b/storage/connect/OracleInterface.java new file mode 100644 index 00000000000..0bfdd20e032 --- /dev/null +++ b/storage/connect/OracleInterface.java @@ -0,0 +1,69 @@ +package wrappers; + +import java.sql.*; +import java.util.Hashtable; + +import javax.sql.DataSource; +import oracle.jdbc.pool.OracleDataSource; + +public class OracleInterface extends JdbcInterface { + public OracleInterface() { + this(true); + } // end of OracleInterface constructor + + public OracleInterface(boolean b) { + super(b); + + if (dst == null) + dst = new Hashtable(); + + } // end of OracleInterface constructor + + @Override + public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { + int rc = 0; + String url = parms[1]; + DataSource ds = null; + OracleDataSource ods = null; + + if (DEBUG) + System.out.println("Connecting to Oracle data source"); + + try { + CheckURL(url, "oracle"); + + if ((ds = dst.get(url)) == null) { + ods = new OracleDataSource(); + ods.setURL(url); + + if (parms[2] != null) + ods.setUser(parms[2]); + + if (parms[3] != null) + ods.setPassword(parms[3]); + + ds = ods; + + dst.put(url, ds); + } // endif ds + + // Get a connection from the data source + conn = ds.getConnection(); + + // Get the data base meta data object + dbmd = conn.getMetaData(); + + // Get a statement from the connection + stmt = GetStmt(fsize, scrollable); + } catch (SQLException se) { + SetErrmsg(se); + rc = -2; + } catch( Exception e ) { + SetErrmsg(e); + rc = -3; + } // end try/catch + + return rc; + } // end of JdbcConnect + +} // end of class OracleInterface diff --git a/storage/connect/PostgresqlInterface.java b/storage/connect/PostgresqlInterface.java new file mode 100644 index 00000000000..adce0616a1b --- /dev/null +++ b/storage/connect/PostgresqlInterface.java @@ -0,0 +1,69 @@ +package wrappers; + +import java.sql.*; +import java.util.Hashtable; + +import javax.sql.DataSource; +import org.postgresql.jdbc2.optional.PoolingDataSource; + +public class PostgresqlInterface extends JdbcInterface { + public PostgresqlInterface() { + this(true); + } // end of constructor + + public PostgresqlInterface(boolean b) { + super(b); + + if (dst == null) + dst = new Hashtable(); + + } // end of constructor + + @Override + public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { + int rc = 0; + String url = parms[1]; + DataSource ds = null; + PoolingDataSource pds = null; + + if (DEBUG) + System.out.println("Connecting to Postgresql data source"); + + try { + CheckURL(url, "postgresql"); + + if ((ds = dst.get(url)) == null) { + pds = new PoolingDataSource(); + pds.setUrl(url); + + if (parms[2] != null) + pds.setUser(parms[2]); + + if (parms[3] != null) + pds.setPassword(parms[3]); + + ds = pds; + + dst.put(url, ds); + } // endif ds + + // Get a connection from the data source + conn = ds.getConnection(); + + // Get the data base meta data object + dbmd = conn.getMetaData(); + + // Get a statement from the connection + stmt = GetStmt(fsize, scrollable); + } catch (SQLException se) { + SetErrmsg(se); + rc = -2; + } catch( Exception e ) { + SetErrmsg(e); + rc = -3; + } // end try/catch + + return rc; + } // end of JdbcConnect + +} // end of class PostgresqlInterface diff --git a/storage/connect/connect.h b/storage/connect/connect.h index bbefda52274..ce4cf9bf8b9 100644 --- a/storage/connect/connect.h +++ b/storage/connect/connect.h @@ -65,7 +65,8 @@ class TDBDOX: public TDBDOS { friend int CntIndexRange(PGLOBAL, PTDB, const uchar**, uint*, bool*, key_part_map*); friend class ha_connect; - }; // end of class TDBDOX + TDBDOX() : TDBDOS((PGLOBAL)0, (PTDBDOS)0) {} /* Never called */ +}; // end of class TDBDOX class XKPDEF: public KPARTDEF { friend class TDBDOX; diff --git a/storage/connect/csort.cpp b/storage/connect/csort.cpp index 2f918782c80..13f325d8f3f 100644 --- a/storage/connect/csort.cpp +++ b/storage/connect/csort.cpp @@ -5,7 +5,7 @@ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier Bertrand 1995-2012 */ +/* (C) Copyright to the author Olivier Bertrand 1995-2016 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -721,8 +721,8 @@ int CSORT::Qsortc(void) void CSORT::Qstc(int *base, int *max) { register int *i, *j, *jj, *lt, *eq, *gt, *mid; - int c, lo, hi, rc; - size_t zlo, zhi, cnm; + int c = 0, lo, hi, rc; + size_t zlo, zhi, cnm; zlo = zhi = cnm = 0; // Avoid warning message @@ -774,8 +774,11 @@ void CSORT::Qstc(int *base, int *max) /*****************************************************************/ /* Small group. Do special quicker processing. */ /*****************************************************************/ - if ((rc = Qcompare(base, (i = base + 1))) > 0) - c = *base, *base = *i, *i = c; + if ((rc = Qcompare(base, (i = base + 1))) > 0) { + c = *base; + *base = *i; + *i = c; + } // endif rc if (Pof) Pof[base - Pex] = Pof[i - Pex] = (rc) ? 1 : 2; diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 45ad4a484e1..85380860652 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -195,7 +195,6 @@ extern "C" { #if defined(JDBC_SUPPORT) char *JvmPath; char *ClassPath; - char *Wrapper; #endif // JDBC_SUPPORT #if defined(__WIN__) @@ -211,7 +210,7 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); PQRYRES VirColumns(PGLOBAL g, bool info); PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info); PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info); -int TranslateJDBCType(int stp, int prec, int& len, char& v); +int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v); void PushWarning(PGLOBAL g, THD *thd, int level); bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, const char *db, char *tab, const char *src, int port); @@ -220,6 +219,7 @@ USETEMP UseTemp(void); int GetConvSize(void); TYPCONV GetTypeConv(void); uint GetJsonGrpSize(void); +char *GetJavaWrapper(void); uint GetWorkSize(void); void SetWorkSize(uint); extern "C" const char *msglang(void); @@ -332,6 +332,15 @@ static MYSQL_THDVAR_UINT(json_grp_size, "max number of rows for JSON aggregate functions.", NULL, NULL, JSONMAX, 1, INT_MAX, 1); +#if defined(JDBC_SUPPORT) +// Default java wrapper to use with JDBC tables +static MYSQL_THDVAR_STR(java_wrapper, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, + "Java wrapper class name", + // check_class_path, update_class_path, + NULL, NULL, "wrappers/JdbcInterface"); +#endif // JDBC_SUPPORT + #if defined(XMSG) || defined(NEWMSG) const char *language_names[]= { @@ -384,6 +393,12 @@ extern "C" const char *msglang(void) return language_names[THDVAR(current_thd, msg_lang)]; } // end of msglang #else // !XMSG && !NEWMSG + +#if defined(JDBC_SUPPORT) +char *GetJavaWrapper(void) +{return connect_hton ? THDVAR(current_thd, java_wrapper) : (char*)"wrappers/JdbcInterface";} +#endif // JDBC_SUPPORT + extern "C" const char *msglang(void) { #if defined(FRENCH) @@ -1123,7 +1138,7 @@ bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef) /****************************************************************************/ int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef) { - ulonglong opval= NO_IVAL; + ulonglong opval= (ulonglong) NO_IVAL; if (!options) return idef; @@ -5508,7 +5523,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, break; case FNC_TABLE: - qrp= ODBCTables(g, dsn, shm, tab, mxr, true, sop); + qrp= ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop); break; case FNC_DSN: qrp= ODBCDataSources(g, mxr, true); @@ -5633,6 +5648,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } else { char *schem= NULL; + char *tn= NULL; // Not a catalog table if (!qrp->Nblin) { @@ -5649,7 +5665,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, typ= len= prec= dec= 0; tm= NOT_NULL_FLAG; cnm= (char*)"noname"; - dft= xtra= key= fmt= NULL; + dft= xtra= key= fmt= tn= NULL; v= ' '; rem= NULL; @@ -5669,7 +5685,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd, typ= crp->Kdata->GetIntValue(i); v = (crp->Nulls) ? crp->Nulls[i] : 0; break; - case FLD_PREC: + case FLD_TYPENAME: + tn= crp->Kdata->GetCharValue(i); + break; + case FLD_PREC: // PREC must be always before LENGTH len= prec= crp->Kdata->GetIntValue(i); break; @@ -5713,8 +5732,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd, break; case FLD_SCHEM: -#if defined(ODBC_SUPPORT) - if (ttp == TAB_ODBC && crp->Kdata) { +#if defined(ODBC_SUPPORT) || defined(JDBC_SUPPORT) + if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) { if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) { sprintf(g->Message, "Several %s tables found, specify DBNAME", tab); @@ -5724,7 +5743,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, schem= crp->Kdata->GetCharValue(i); } // endif ttp -#endif // ODBC_SUPPORT +#endif // ODBC_SUPPORT || JDBC_SUPPORT default: break; // Ignore } // endswitch Fld @@ -5777,7 +5796,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, int plgtyp; // typ must be PLG type, not SQL type - if (!(plgtyp= TranslateJDBCType(typ, dec, prec, v))) { + if (!(plgtyp= TranslateJDBCType(typ, tn, dec, prec, v))) { if (GetTypeConv() == TPC_SKIP) { // Skip this column sprintf(g->Message, "Column %s skipped (unsupported type %d)", @@ -6875,12 +6894,6 @@ static MYSQL_SYSVAR_STR(class_path, ClassPath, "Java class path", // check_class_path, update_class_path, NULL, NULL, NULL); - -static MYSQL_SYSVAR_STR(java_wrapper, Wrapper, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, - "Java wrapper class", - // check_class_path, update_class_path, - NULL, NULL, "JdbcInterface"); #endif // JDBC_SUPPORT diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index a8c0b193dcd..d3e573e692c 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -6,6 +6,13 @@ /* This file contains the JDBC connection classes functions. */ /***********************************************************************/ +#if defined(__WIN__) +// This is needed for RegGetValue +#define _WINVER 0x0601 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0601 +#endif // __WIN__ + /***********************************************************************/ /* Include relevant MariaDB header file. */ /***********************************************************************/ @@ -55,7 +62,8 @@ extern "C" HINSTANCE s_hModule; // Saved module handle int GetConvSize(); extern char *JvmPath; // The connect_jvm_path global variable value extern char *ClassPath; // The connect_class_path global variable value -extern char *Wrapper; // The connect_java_wrapper global variable value + +char *GetJavaWrapper(void); // The connect_java_wrapper variable value /***********************************************************************/ /* Static JDBConn objects. */ @@ -79,7 +87,7 @@ GETDEF JDBConn::GetDefaultJavaVMInitArgs = NULL; #endif // !_DEBUG // To avoid gcc warning -int TranslateJDBCType(int stp, int prec, int& len, char& v); +int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v); /***********************************************************************/ /* GetJDBCType: returns the SQL_TYPE corresponding to a PLG type. */ @@ -107,7 +115,7 @@ static short GetJDBCType(int type) /***********************************************************************/ /* TranslateJDBCType: translate a JDBC Type to a PLG type. */ /***********************************************************************/ -int TranslateJDBCType(int stp, int prec, int& len, char& v) +int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) { int type; @@ -139,17 +147,24 @@ int TranslateJDBCType(int stp, int prec, int& len, char& v) case 8: // DOUBLE type = TYPE_DOUBLE; break; - case 93: // TIMESTAMP + case 93: // TIMESTAMP, DATETIME type = TYPE_DATE; len = 19 + ((prec) ? (prec+1) : 0); - v = 'S'; + v = (tn && toupper(tn[0]) == 'T') ? 'S' : 'E'; break; - case 91: // TYPE_DATE + case 91: // DATE, YEAR type = TYPE_DATE; - len = 10; - v = 'D'; + + if (!tn || toupper(tn[0]) != 'Y') { + len = 10; + v = 'D'; + } else { + len = 4; + v = 'Y'; + } // endif len + break; - case 92: // TYPE_TIME + case 92: // TIME type = TYPE_DATE; len = 8 + ((prec) ? (prec+1) : 0); v = 'T'; @@ -174,42 +189,20 @@ int TranslateJDBCType(int stp, int prec, int& len, char& v) static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, char *db, char *tab, PQRYRES qrp) { -//size_t m, n; JCATPARM *cap; #if defined(_DEBUG) assert(qrp); #endif - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - cap = NULL; - goto fin; - } // endif rc - -//m = (size_t)qrp->Maxres; -//n = (size_t)qrp->Nbcol; - cap = (JCATPARM *)PlugSubAlloc(g, NULL, sizeof(JCATPARM)); - memset(cap, 0, sizeof(JCATPARM)); - cap->Id = fid; - cap->Qrp = qrp; - cap->DB = (PUCHAR)db; - cap->Tab = (PUCHAR)tab; -//cap->Vlen = (SQLLEN* *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN *)); - -//for (i = 0; i < n; i++) -// cap->Vlen[i] = (SQLLEN *)PlugSubAlloc(g, NULL, m * sizeof(SQLLEN)); - -//cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD)); + if ((cap = (JCATPARM *)PlgDBSubAlloc(g, NULL, sizeof(JCATPARM)))) { + memset(cap, 0, sizeof(JCATPARM)); + cap->Id = fid; + cap->Qrp = qrp; + cap->DB = db; + cap->Tab = tab; + } // endif cap -fin: - g->jump_level--; return cap; } // end of AllocCatInfo @@ -291,7 +284,8 @@ PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat, if (!(cap = AllocCatInfo(g, CAT_COL, db, table, qrp))) return NULL; - cap->Pat = (PUCHAR)colpat; + // Colpat cannot be null or empty for some drivers + cap->Pat = (colpat && *colpat) ? colpat : PlugDup(g, "%"); /************************************************************************/ /* Now get the results into blocks. */ @@ -399,10 +393,12 @@ PQRYRES JDBCTables(PGLOBAL g, char *db, char *tabpat, char *tabtyp, if (info || !qrp) return qrp; - if (!(cap = AllocCatInfo(g, CAT_TAB, db, tabpat, qrp))) + // Tabpat cannot be null or empty for some drivers + if (!(cap = AllocCatInfo(g, CAT_TAB, db, + (tabpat && *tabpat) ? tabpat : PlugDup(g, "%"), qrp))) return NULL; - cap->Pat = (PUCHAR)tabtyp; + cap->Pat = tabtyp; if (trace) htrc("Getting table results ncol=%d\n", cap->Qrp->Nbcol); @@ -650,11 +646,20 @@ JDBConn::JDBConn(PGLOBAL g, TDBJDBC *tdbp) job = nullptr; // The java wrapper class object xqid = xuid = xid = grs = readid = fetchid = typid = errid = nullptr; prepid = xpid = pcid = nullptr; - chrfldid = intfldid = dblfldid = fltfldid = datfldid = bigfldid = nullptr; -//m_LoginTimeout = DEFAULT_LOGIN_TIMEOUT; + chrfldid = intfldid = dblfldid = fltfldid = bigfldid = nullptr; + datfldid = timfldid = tspfldid = nullptr; + //m_LoginTimeout = DEFAULT_LOGIN_TIMEOUT; //m_QueryTimeout = DEFAULT_QUERY_TIMEOUT; //m_UpdateOptions = 0; Msg = NULL; + m_Wrap = (tdbp && tdbp->WrapName) ? tdbp->WrapName : GetJavaWrapper(); + + if (!strchr(m_Wrap, '/')) { + // Add the wrapper package name + char *wn = (char*)PlugSubAlloc(g, NULL, strlen(m_Wrap) + 10); + m_Wrap = strcat(strcpy(wn, "wrappers/"), m_Wrap); + } // endif m_Wrap + m_Driver = NULL; m_Url = NULL; m_User = NULL; @@ -830,17 +835,52 @@ void JDBConn::ResetJVM(void) /***********************************************************************/ bool JDBConn::GetJVM(PGLOBAL g) { + int ntry; + if (!LibJvm) { char soname[512]; #if defined(__WIN__) - if (JvmPath) - strcat(strcpy(soname, JvmPath), "\\jvm.dll"); - else - strcpy(soname, "jvm.dll"); + for (ntry = 0; !LibJvm && ntry < 3; ntry++) { + if (!ntry && JvmPath) { + strcat(strcpy(soname, JvmPath), "\\jvm.dll"); + ntry = 3; // No other try + } else if (ntry < 2 && getenv("JAVA_HOME")) { + strcpy(soname, getenv("JAVA_HOME")); - // Load the desired shared library - if (!(LibJvm = LoadLibrary(soname))) { + if (ntry == 1) + strcat(soname, "\\jre"); + + strcat(soname, "\\bin\\client\\jvm.dll"); + } else { + // Try to find it through the registry + char version[16]; + char javaKey[64] = "SOFTWARE\\JavaSoft\\Java Runtime Environment"; + LONG rc; + DWORD BufferSize = 16; + + strcpy(soname, "jvm.dll"); // In case it fails + + if ((rc = RegGetValue(HKEY_LOCAL_MACHINE, javaKey, "CurrentVersion", + RRF_RT_ANY, NULL, (PVOID)&version, &BufferSize)) == ERROR_SUCCESS) { + strcat(strcat(javaKey, "\\"), version); + BufferSize = sizeof(soname); + + if ((rc = RegGetValue(HKEY_LOCAL_MACHINE, javaKey, "RuntimeLib", + RRF_RT_ANY, NULL, (PVOID)&soname, &BufferSize)) != ERROR_SUCCESS) + printf("RegGetValue: rc=%ld\n", rc); + + } // endif rc + + ntry = 3; // Try this only once + } // endelse + + // Load the desired shared library + LibJvm = LoadLibrary(soname); + } // endfor ntry + + // Get the needed entries + if (!LibJvm) { char buf[256]; DWORD rc = GetLastError(); @@ -871,13 +911,23 @@ bool JDBConn::GetJVM(PGLOBAL g) #else // !__WIN__ const char *error = NULL; - if (JvmPath) - strcat(strcpy(soname, JvmPath), "/libjvm.so"); - else - strcpy(soname, "libjvm.so"); + for (ntry = 0; !LibJvm && ntry < 2; ntry++) { + if (!ntry && JvmPath) { + strcat(strcpy(soname, JvmPath), "/libjvm.so"); + ntry = 2; + } else if (!ntry && getenv("JAVA_HOME")) { + // TODO: Replace i386 by a better guess + strcat(strcpy(soname, getenv("JAVA_HOME")), "/jre/lib/i386/client/libjvm.so"); + } else { // Will need LD_LIBRARY_PATH to be set + strcpy(soname, "libjvm.so"); + ntry = 2; + } // endelse + + LibJvm = dlopen(soname, RTLD_LAZY); + } // endfor ntry // Load the desired shared library - if (!(LibJvm = dlopen(soname, RTLD_LAZY))) { + if (!LibJvm) { error = dlerror(); sprintf(g->Message, MSG(SHARED_LIB_ERR), soname, SVP(error)); } else if (!(CreateJavaVM = (CRTJVM)dlsym(LibJvm, "JNI_CreateJavaVM"))) { @@ -911,7 +961,9 @@ bool JDBConn::GetJVM(PGLOBAL g) /***********************************************************************/ int JDBConn::Open(PJPARM sop) { + bool err = false; + jboolean jt = (trace > 0); PGLOBAL& g = m_G; // Link or check whether jvm library was linked @@ -951,6 +1003,11 @@ int JDBConn::Open(PJPARM sop) #define N 1 #endif + // Java source will be compiled as ajar file installed in the plugin dir + jpop->Append(sep); + jpop->Append(GetPluginDir()); + jpop->Append("JdbcInterface.jar"); + //================== prepare loading of Java VM ============================ JavaVMInitArgs vm_args; // Initialization arguments JavaVMOption* options = new JavaVMOption[N]; // JVM invocation options @@ -1021,19 +1078,16 @@ int JDBConn::Open(PJPARM sop) return RC_FX; } // endswitch rc + //=============== Display JVM version =============== + jint ver = env->GetVersion(); + printf("JVM Version %d.%d\n", ((ver>>16)&0x0f), (ver&0x0f)); } // endif rc - //=============== Display JVM version ======================================= -#if defined(_DEBUG) - jint ver = env->GetVersion(); - printf("JVM Version %d.%d\n", ((ver>>16)&0x0f), (ver&0x0f)); -#endif //_DEBUG - // try to find the java wrapper class - jdi = env->FindClass(Wrapper); + jdi = env->FindClass(m_Wrap); if (jdi == nullptr) { - sprintf(g->Message, "ERROR: class %s not found!", Wrapper); + sprintf(g->Message, "ERROR: class %s not found!", m_Wrap); return RC_FX; } // endif jdi @@ -1076,19 +1130,19 @@ int JDBConn::Open(PJPARM sop) #endif // 0 // if class found, continue - jmethodID ctor = env->GetMethodID(jdi, "", "()V"); + jmethodID ctor = env->GetMethodID(jdi, "", "(Z)V"); if (ctor == nullptr) { - sprintf(g->Message, "ERROR: %s constructor not found!", Wrapper); + sprintf(g->Message, "ERROR: %s constructor not found!", m_Wrap); return RC_FX; } else - job = env->NewObject(jdi, ctor); + job = env->NewObject(jdi, ctor, jt); // If the object is successfully constructed, // we can then search for the method we want to call, // and invoke it for the object: if (job == nullptr) { - sprintf(g->Message, "%s class object not constructed!", Wrapper); + sprintf(g->Message, "%s class object not constructed!", m_Wrap); return RC_FX; } // endif job @@ -1289,9 +1343,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) { PGLOBAL& g = m_G; jint ctyp; - jlong dtv; jstring cn, jn = nullptr; - jobject dob; if (rank == 0) if (!name || (jn = env->NewStringUTF(name)) == nullptr) { @@ -1354,31 +1406,22 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) break; case 91: // DATE - case 92: // TIME - case 93: // TIMESTAMP - if (!gmID(g, datfldid, "TimestampField", - "(ILjava/lang/String;)Ljava/sql/Timestamp;")) { - dob = env->CallObjectMethod(job, datfldid, (jint)rank, jn); - - if (dob) { - jclass jts = env->FindClass("java/sql/Timestamp"); - - if (env->ExceptionCheck()) { - val->Reset(); - } else { - jmethodID getTime = env->GetMethodID(jts, "getTime", "()J"); - - if (getTime != nullptr) { - dtv = env->CallLongMethod(dob, getTime); - val->SetValue((int)(dtv / 1000)); - } else - val->Reset(); - - } // endif check + if (!gmID(g, datfldid, "DateField", "(ILjava/lang/String;)I")) { + val->SetValue((int)env->CallIntMethod(job, datfldid, (jint)rank, jn)); + } else + val->Reset(); - } else - val->Reset(); + break; + case 92: // TIME + if (!gmID(g, timfldid, "TimeField", "(ILjava/lang/String;)I")) { + val->SetValue((int)env->CallIntMethod(job, timfldid, (jint)rank, jn)); + } else + val->Reset(); + break; + case 93: // TIMESTAMP + if (!gmID(g, tspfldid, "TimestampField", "(ILjava/lang/String;)I")) { + val->SetValue((int)env->CallIntMethod(job, tspfldid, (jint)rank, jn)); } else val->Reset(); @@ -1931,9 +1974,9 @@ bool JDBConn::SetParam(JDBCCOL *colp) { PGLOBAL& g = m_G; // void *buffer; - int i; + int i, ncol; PSZ fnc = "Unknown"; - uint n, ncol; + uint n; short len, tp; int crow = 0; PQRYRES qrp = cap->Qrp; @@ -1956,9 +1999,7 @@ bool JDBConn::SetParam(JDBCCOL *colp) env->SetObjectArrayElement(parms, 0, env->NewStringUTF(name.ptr(2))); env->SetObjectArrayElement(parms, 1, env->NewStringUTF(name.ptr(1))); env->SetObjectArrayElement(parms, 2, env->NewStringUTF(name.ptr(0))); - - if (cap->Pat) - env->SetObjectArrayElement(parms, 3, env->NewStringUTF((const char*)cap->Pat)); + env->SetObjectArrayElement(parms, 3, env->NewStringUTF((const char*)cap->Pat)); // Now do call the proper JDBC API switch (cap->Id) { diff --git a/storage/connect/jdbconn.h b/storage/connect/jdbconn.h index db8a11716e5..095b1565bd2 100644 --- a/storage/connect/jdbconn.h +++ b/storage/connect/jdbconn.h @@ -24,7 +24,7 @@ //efine MAX_FNAME_LEN 256 // Max size of field names //efine MAX_STRING_INFO 256 // Max size of string from SQLGetInfo //efine MAX_DNAME_LEN 256 // Max size of Recordset names -#define MAX_CONNECT_LEN 512 // Max size of Connect string +//efine MAX_CONNECT_LEN 512 // Max size of Connect string //efine MAX_CURSOR_NAME 18 // Max size of a cursor name #define DEFAULT_FIELD_TYPE 0 // TYPE_NULL @@ -46,9 +46,9 @@ enum JCATINFO { typedef struct tagJCATPARM { JCATINFO Id; // Id to indicate function PQRYRES Qrp; // Result set pointer - PUCHAR DB; // Database (Schema) - PUCHAR Tab; // Table name or pattern - PUCHAR Pat; // Table type or column pattern + char *DB; // Database (Schema) + char *Tab; // Table name or pattern + char *Pat; // Table type or column pattern } JCATPARM; typedef jint(JNICALL *CRTJVM) (JavaVM **, void **, void *); @@ -169,12 +169,15 @@ protected: jmethodID intfldid; // The IntField method ID jmethodID dblfldid; // The DoubleField method ID jmethodID fltfldid; // The FloatField method ID - jmethodID datfldid; // The TimestampField method ID + jmethodID datfldid; // The DateField method ID + jmethodID timfldid; // The TimeField method ID + jmethodID tspfldid; // The TimestampField method ID jmethodID bigfldid; // The BigintField method ID //DWORD m_LoginTimeout; //DWORD m_QueryTimeout; //DWORD m_UpdateOptions; char *Msg; + char *m_Wrap; char m_IDQuoteChar[2]; PSZ m_Driver; PSZ m_Url; diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index e94d3817926..8bddc68e2ae 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -30,6 +30,10 @@ uint GetJsonGrpSize(void); static int IsJson(UDF_ARGS *args, uint i); static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i); +static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error); +static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error); static uint JsonGrpSize = 10; @@ -1302,7 +1306,7 @@ static my_bool CalcLen(UDF_ARGS *args, my_bool obj, { char fn[_MAX_PATH]; unsigned long i, k, m, n; - long fl= 0, j = -1; + long fl = 0, j = -1; reslen = args->arg_count + 2; @@ -2126,7 +2130,7 @@ my_bool json_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args, char *json_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *, char *) { - char *str= 0; + char *str = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; if (!g->Xchk) { @@ -2699,7 +2703,7 @@ char *json_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif Xchk if (!CheckMemory(g, initid, args, 2, false, false, true)) { - PJSON top= 0; + PJSON top = NULL; PJVAL jvp; PJSON jsp[2] = {NULL, NULL}; @@ -4899,7 +4903,7 @@ char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, my_bool b = true; PJSON jsp; PJSNX jsx; - PJVAL jvp= 0; + PJVAL jvp = NULL; PBSON bsp = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; PGLOBAL gb = GetMemPtr(g, args, 0); diff --git a/storage/connect/mysql-test/connect/r/jdbc.result b/storage/connect/mysql-test/connect/r/jdbc.result index 5e844bc9900..9b6f0e65102 100644 --- a/storage/connect/mysql-test/connect/r/jdbc.result +++ b/storage/connect/mysql-test/connect/r/jdbc.result @@ -1,3 +1,4 @@ +SET GLOBAL time_zone='+1:00'; CREATE DATABASE connect; USE connect; CREATE TABLE t2 ( @@ -99,8 +100,8 @@ George San Jose 1981-08-10 2010-06-02 Sam Chicago 1979-11-22 2007-10-10 James Dallas 1992-05-13 2009-12-14 Bill Boston 1986-09-11 2008-02-10 -Donald Atlanta 1999-04-01 2016-03-31 -Mick New York 1980-01-20 2002-09-11 +Donald Atlanta 1999-03-31 2016-03-30 +Mick New York 1980-01-20 2002-09-10 Tom Seatle 2002-03-15 1970-01-01 DROP TABLE t3; # @@ -110,7 +111,7 @@ CREATE TABLE t3 ( name CHAR(9) NOT NULL, city CHAR(12) NOT NULL, age INT(2)) -engine=CONNECT table_type=FIX file_name='girls.txt'; +ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='girls.txt' ENDING=2; SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN connect.boys b where g.city = b.city; name name city Mary John Boston @@ -167,8 +168,11 @@ serialno name sex title manager department secretary salary 00137 BROWNY 1 ENGINEER 40567 0319 12345 10500.00 73111 WHEELFOR 1 SALESMAN 70012 0318 24888 10030.00 00023 MARTIN 1 ENGINEER 40567 0319 12345 10000.00 +# +# Option Driver is required to find the Driver class inside the executable jar file +# USE test; -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=emp CONNECTION='jdbc:mariadb://localhost:PORT/connect?user=root'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=emp CONNECTION='jdbc:mariadb://localhost:PORT/connect?user=root' OPTION_LIST='Driver=org.mariadb.jdbc.Driver'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -180,7 +184,7 @@ t1 CREATE TABLE `t1` ( `department` char(4) NOT NULL, `secretary` char(5) NOT NULL, `salary` double(12,2) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mariadb://localhost:PORT/connect?user=root' `TABLE_TYPE`='JDBC' `TABNAME`='emp' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mariadb://localhost:PORT/connect?user=root' `TABLE_TYPE`='JDBC' `TABNAME`='emp' `OPTION_LIST`='Driver=org.mariadb.jdbc.Driver' SELECT * FROM t1; serialno name sex title manager department secretary salary 74200 BANCROFT 2 SALESMAN 70012 0318 24888 9600.00 @@ -260,10 +264,8 @@ DROP TABLE t2; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:mariadb://localhost:PORT/connect' option_list='User=root,Maxres=50'; SELECT * FROM t1; Table_Cat Table_Schema Table_Name Table_Type Remark -connect NULL tx1 BASE TABLE +connect NULL tx1 TABLE DROP TABLE t1; DROP TABLE connect.tx1; DROP DATABASE connect; -SET GLOBAL connect_jvm_path=NULL; -SET GLOBAL connect_class_path=NULL; -SET GLOBAL time_zone = SYSTEM; +SET GLOBAL time_zone=SYSTEM; diff --git a/storage/connect/mysql-test/connect/r/jdbc_new.result b/storage/connect/mysql-test/connect/r/jdbc_new.result index e5356edd5d8..14381b0b11f 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_new.result +++ b/storage/connect/mysql-test/connect/r/jdbc_new.result @@ -1,3 +1,4 @@ +SET GLOBAL time_zone='+1:00'; CREATE TABLE t1 (a int, b char(10)); INSERT INTO t1 VALUES (NULL,NULL),(0,'test00'),(1,'test01'),(2,'test02'),(3,'test03'); SELECT * FROM t1; @@ -10,6 +11,7 @@ NULL NULL # # Testing errors # +SET GLOBAL time_zone='+1:00'; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=unknown'; SELECT * FROM t1; @@ -32,15 +34,13 @@ t1 CREATE TABLE `t1` ( `y` char(10) DEFAULT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`=JDBC SELECT * FROM t1; -ERROR HY000: Got error 174 'ExecuteQuery: java.sql.SQLSyntaxErrorException: Unknown column 'x' in 'field list' -Query is : SELECT x, y FROM t1' from CONNECT +ERROR HY000: Got error 174 'ExecuteQuery: java.sql.SQLSyntaxErrorException: Unknown column 'x' in 'field list'' from CONNECT DROP TABLE t1; CREATE TABLE t1 (a int, b char(10)) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root'; ALTER TABLE t1 RENAME t1backup; SELECT * FROM t1; -ERROR HY000: Got error 174 'ExecuteQuery: java.sql.SQLSyntaxErrorException: Table 'test.t1' doesn't exist -Query is : SELECT a, b FROM t1' from CONNECT +ERROR HY000: Got error 174 'ExecuteQuery: java.sql.SQLSyntaxErrorException: Table 'test.t1' doesn't exist' from CONNECT ALTER TABLE t1backup RENAME t1; DROP TABLE t1; # @@ -201,16 +201,15 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` date DEFAULT NULL, - `b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `b` datetime DEFAULT NULL, `c` time DEFAULT NULL, - `d` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `e` date DEFAULT NULL + `d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `e` year(4) DEFAULT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=root' `TABLE_TYPE`='JDBC' SELECT * FROM t1; a b c d e -2003-05-27 2003-05-27 10:45:23 10:45:23 2003-05-27 10:45:23 1970-01-01 +2003-05-27 2003-05-27 11:45:23 10:45:23 2003-05-27 10:45:23 2003 DROP TABLE t1; DROP TABLE t1; -SET GLOBAL connect_jvm_path=NULL; -SET GLOBAL connect_class_path=NULL; -SET GLOBAL time_zone = SYSTEM; +SET GLOBAL time_zone=SYSTEM; +SET GLOBAL time_zone=SYSTEM; diff --git a/storage/connect/mysql-test/connect/std_data/JdbcMariaDB.jar b/storage/connect/mysql-test/connect/std_data/JdbcMariaDB.jar new file mode 100644 index 00000000000..81f91e4465a Binary files /dev/null and b/storage/connect/mysql-test/connect/std_data/JdbcMariaDB.jar differ diff --git a/storage/connect/mysql-test/connect/t/jdbc.test b/storage/connect/mysql-test/connect/t/jdbc.test index 9389747ad9c..247bd406100 100644 --- a/storage/connect/mysql-test/connect/t/jdbc.test +++ b/storage/connect/mysql-test/connect/t/jdbc.test @@ -1,4 +1,5 @@ -- source jdbconn.inc +SET GLOBAL time_zone='+1:00'; let $MYSQLD_DATADIR= `select @@datadir`; --copy_file $MTR_SUITE_DIR/std_data/girls.txt $MYSQLD_DATADIR/test/girls.txt @@ -80,7 +81,7 @@ CREATE TABLE t3 ( name CHAR(9) NOT NULL, city CHAR(12) NOT NULL, age INT(2)) -engine=CONNECT table_type=FIX file_name='girls.txt'; +ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='girls.txt' ENDING=2; SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN connect.boys b where g.city = b.city; SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN t1 b where g.city = b.city; DROP TABLE t1, t3, connect.boys; @@ -102,9 +103,12 @@ CREATE TABLE emp ( ENGINE=connect TABLE_TYPE=fix FILE_NAME='employee.dat' ENDING=1; SELECT * FROM emp; +--echo # +--echo # Option Driver is required to find the Driver class inside the executable jar file +--echo # USE test; --replace_result $PORT PORT ---eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=emp CONNECTION='jdbc:mariadb://localhost:$PORT/connect?user=root' +--eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME=emp CONNECTION='jdbc:mariadb://localhost:$PORT/connect?user=root' OPTION_LIST='Driver=org.mariadb.jdbc.Driver' --replace_result $PORT PORT --eval SHOW CREATE TABLE t1 SELECT * FROM t1; @@ -113,7 +117,7 @@ SELECT name, title, salary FROM t1 WHERE sex = 1; DROP TABLE t1, connect.emp; # -# Testing remote command execution +# Testing remote command execution (Driver option is no more necessary) # --replace_result $PORT PORT --eval CREATE TABLE t2 (command varchar(128) not null,number int(5) not null flag=1,message varchar(255) flag=2) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mariadb://localhost:$PORT/connect' OPTION_LIST='User=root,Execsrc=1' @@ -139,5 +143,5 @@ DROP TABLE connect.tx1; --remove_file $MYSQLD_DATADIR/connect/employee.dat DROP DATABASE connect; --remove_file $MYSQLD_DATADIR/test/girls.txt - +SET GLOBAL time_zone=SYSTEM; -- source jdbconn_cleanup.inc diff --git a/storage/connect/mysql-test/connect/t/jdbc_new.test b/storage/connect/mysql-test/connect/t/jdbc_new.test index 33ec1b343cc..d1ad5117b72 100644 --- a/storage/connect/mysql-test/connect/t/jdbc_new.test +++ b/storage/connect/mysql-test/connect/t/jdbc_new.test @@ -8,6 +8,8 @@ connection master; -- source jdbconn.inc connection slave; +SET GLOBAL time_zone='+1:00'; + CREATE TABLE t1 (a int, b char(10)); INSERT INTO t1 VALUES (NULL,NULL),(0,'test00'),(1,'test01'),(2,'test02'),(3,'test03'); SELECT * FROM t1; @@ -16,6 +18,7 @@ SELECT * FROM t1; --echo # Testing errors --echo # connection master; +SET GLOBAL time_zone='+1:00'; # Bad user name # Suppress "mysql_real_connect failed:" (printed in _DEBUG build) @@ -173,7 +176,9 @@ DROP TABLE t1; connection slave; DROP TABLE t1; +SET GLOBAL time_zone=SYSTEM; connection master; +SET GLOBAL time_zone=SYSTEM; -- source jdbconn_cleanup.inc diff --git a/storage/connect/mysql-test/connect/t/jdbconn.inc b/storage/connect/mysql-test/connect/t/jdbconn.inc index 0bac0b35fc4..05122f51924 100644 --- a/storage/connect/mysql-test/connect/t/jdbconn.inc +++ b/storage/connect/mysql-test/connect/t/jdbconn.inc @@ -12,19 +12,20 @@ if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES } DROP TABLE t1; -# This is specific and explains why this test is disabled. -# You should edit this file to reflect what is the required files location on your machine. +# You cand edit this file to reflect what is the required files location on your machine. # This is the path to the JVM library (dll or so) -SET GLOBAL connect_jvm_path='C:\\Program Files\\Java\\jdk1.8.0_77\\jre\\bin\\client'; +# If not set CONNECT will try to use the JAVA_HOME environment variable +# and if not found try to find it in the registers (Windows only) +#SET GLOBAL connect_jvm_path='C:\\Program Files\\Java\\jdk1.8.0_77\\jre\\bin\\client'; # The complete class path send when creating the Java Virtual Machine is, in that order: # 1 - The current directory. # 2 - The paths of the connect_class_path global variable. # 3 - The paths of the CLASSPATH environment variable. -# These are the paths to the needed classes or jar files. The Apache ones are only for the JdbcApacheInterface wrapper. -SET GLOBAL connect_class_path='E:\\MariaDB-10.1\\Connect\\storage\\connect;E:\\MariaDB-10.1\\Connect\\sql\\data\\postgresql-9.4.1208.jar;E:\\Oracle\\ojdbc6.jar;E:\\Apache\\commons-dbcp2-2.1.1\\commons-dbcp2-2.1.1.jar;E:\\Apache\\commons-pool2-2.4.2\\commons-pool2-2.4.2.jar;E:\\Apache\\commons-logging-1.2\\commons-logging-1.2.jar'; +# In this test we use an executable jar file that contains all what is needed. +eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/JdbcMariaDB.jar'; -# On my machine, paths to the JDK classes and to the MySQL and MariaDB drivers are defined in the CLASSPATH environment variable +# Paths to the JDK classes and to the MySQL and MariaDB drivers can be defined in the CLASSPATH environment variable #CREATE FUNCTION envar RETURNS STRING SONAME 'ha_connect.dll'; #SELECT envar('CLASSPATH'); diff --git a/storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc b/storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc index 48e321495ad..d70e594df63 100644 --- a/storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc +++ b/storage/connect/mysql-test/connect/t/jdbconn_cleanup.inc @@ -1,6 +1,8 @@ +--disable_query_log --disable_warnings #DROP FUNCTION envar; SET GLOBAL connect_jvm_path=NULL; SET GLOBAL connect_class_path=NULL; SET GLOBAL time_zone = SYSTEM; --enable_warnings +--enable_query_log diff --git a/storage/connect/odbccat.h b/storage/connect/odbccat.h index 1b5febadd3a..3b729bcb4bb 100644 --- a/storage/connect/odbccat.h +++ b/storage/connect/odbccat.h @@ -21,5 +21,5 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table, char *colpat, int maxres, bool info, POPARM sop); PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, POPARM sop); PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, - int maxres, bool info, POPARM sop); + char *tabtyp, int maxres, bool info, POPARM sop); PQRYRES ODBCDrivers(PGLOBAL g, int maxres, bool info); diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index 8b2626fe962..863d3320f7f 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -606,7 +606,7 @@ PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info) /* an ODBC database that will be retrieved by GetData commands. */ /**************************************************************************/ PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, - int maxres, bool info, POPARM sop) + char *tabtyp, int maxres, bool info, POPARM sop) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING}; @@ -668,7 +668,7 @@ PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, if (!(cap = AllocCatInfo(g, CAT_TAB, db, tabpat, qrp))) return NULL; -//cap->Pat = (PUCHAR)tabtyp; + cap->Pat = (PUCHAR)tabtyp; if (trace) htrc("Getting table results ncol=%d\n", cap->Qrp->Nbcol); @@ -1752,7 +1752,7 @@ bool ODBConn::BindParam(ODBCCOL *colp) void *buf; int buftype = colp->GetResultType(); SQLUSMALLINT n = colp->GetRank(); - SQLSMALLINT ct, sqlt, dec, nul; + SQLSMALLINT ct, sqlt, dec, nul __attribute__((unused)); SQLULEN colsize; SQLLEN len; SQLLEN *strlen = colp->GetStrLen(); diff --git a/storage/connect/odbconn.h b/storage/connect/odbconn.h index 6a24334f08c..063985218ec 100644 --- a/storage/connect/odbconn.h +++ b/storage/connect/odbconn.h @@ -25,7 +25,7 @@ //efine MAX_FNAME_LEN 256 // Max size of field names #define MAX_STRING_INFO 256 // Max size of string from SQLGetInfo //efine MAX_DNAME_LEN 256 // Max size of Recordset names -#define MAX_CONNECT_LEN 512 // Max size of Connect string +#define MAX_CONNECT_LEN 1024 // Max size of Connect string //efine MAX_CURSOR_NAME 18 // Max size of a cursor name #define DEFAULT_FIELD_TYPE SQL_TYPE_NULL // pick "C" data type to match SQL data type diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index f507e3df3ea..06a2c025827 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -96,7 +96,7 @@ bool ExactInfo(void); /***********************************************************************/ JDBCDEF::JDBCDEF(void) { - Driver = Url = Tabname = Tabschema = Username = NULL; + Driver = Url = Wrapname =Tabname = Tabschema = Username = Colpat = NULL; Password = Tabcat = Tabtype = Srcdef = Qchar = Qrystr = Sep = NULL; Options = Quoted = Maxerr = Maxres = Memory = 0; Scrollable = Xsrc = false; @@ -233,11 +233,18 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) Read_Only = true; + Wrapname = GetStringCatInfo(g, "Wrapper", NULL); Tabcat = GetStringCatInfo(g, "Qualifier", NULL); Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); Tabschema = GetStringCatInfo(g, "Dbname", NULL); Tabschema = GetStringCatInfo(g, "Schema", Tabschema); - Tabtype = GetStringCatInfo(g, "Tabtype", NULL); + + if (Catfunc == FNC_COL) + Colpat = GetStringCatInfo(g, "Colpat", NULL); + + if (Catfunc == FNC_TABLE) + Tabtype = GetStringCatInfo(g, "Tabtype", NULL); + Qrystr = GetStringCatInfo(g, "Query_String", "?"); Sep = GetStringCatInfo(g, "Separator", NULL); Xsrc = GetBoolCatInfo("Execsrc", FALSE); @@ -325,6 +332,7 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBASE(tdp) if (tdp) { Ops.Driver = tdp->Driver; Ops.Url = tdp->Url; + WrapName = tdp->Wrapname; TableName = tdp->Tabname; Schema = tdp->Tabschema; Ops.User = tdp->Username; @@ -341,6 +349,7 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBASE(tdp) Memory = tdp->Memory; Ops.Scrollable = tdp->Scrollable; } else { + WrapName = NULL; TableName = NULL; Schema = NULL; Ops.Driver = NULL; @@ -386,6 +395,7 @@ TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBASE(tdbp) { Jcp = tdbp->Jcp; // is that right ? Cnp = tdbp->Cnp; + WrapName = tdbp->WrapName; TableName = tdbp->TableName; Schema = tdbp->Schema; Ops = tdbp->Ops; @@ -1787,12 +1797,20 @@ PQRYRES TDBJTB::GetResult(PGLOBAL g) /* --------------------------TDBJDBCL class -------------------------- */ +/***********************************************************************/ +/* TDBJDBCL class constructor. */ +/***********************************************************************/ +TDBJDBCL::TDBJDBCL(PJDBCDEF tdp) : TDBJTB(tdp) +{ + Colpat = tdp->Colpat; +} // end of TDBJDBCL constructor + /***********************************************************************/ /* GetResult: Get the list of JDBC table columns. */ /***********************************************************************/ PQRYRES TDBJDBCL::GetResult(PGLOBAL g) { - return JDBCColumns(g, Schema, Tab, NULL, Maxres, false, &Ops); + return JDBCColumns(g, Schema, Tab, Colpat, Maxres, false, &Ops); } // end of GetResult #if 0 diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h index 537276a6a7f..7244ebd3832 100644 --- a/storage/connect/tabjdbc.h +++ b/storage/connect/tabjdbc.h @@ -26,6 +26,7 @@ class DllExport JDBCDEF : public TABDEF { /* Logical table description */ friend class TDBXJDC; friend class TDBJDRV; friend class TDBJTB; + friend class TDBJDBCL; public: // Constructor JDBCDEF(void); @@ -53,11 +54,13 @@ protected: PSZ Driver; /* JDBC driver */ PSZ Url; /* JDBC driver URL */ PSZ Tabname; /* External table name */ + PSZ Wrapname; /* Java wrapper name */ PSZ Tabschema; /* External table schema */ PSZ Username; /* User connect name */ PSZ Password; /* Password connect info */ PSZ Tabcat; /* External table catalog */ PSZ Tabtype; /* External table type */ + PSZ Colpat; /* Catalog column pattern */ PSZ Srcdef; /* The source table SQL definition */ PSZ Qchar; /* Identifier quoting character */ PSZ Qrystr; /* The original query */ @@ -131,6 +134,7 @@ protected: JDBCCOL *Cnp; // Points to count(*) column JDBCPARM Ops; // Additional parameters PSTRG Query; // Constructed SQL query + char *WrapName; // Points to Java wrapper name char *TableName; // Points to JDBC table name char *Schema; // Points to JDBC table Schema char *User; // User connect info @@ -317,14 +321,15 @@ protected: class TDBJDBCL : public TDBJTB { public: // Constructor - TDBJDBCL(PJDBCDEF tdp) : TDBJTB(tdp) {} + TDBJDBCL(PJDBCDEF tdp); protected: // Specific routines virtual PQRYRES GetResult(PGLOBAL g); - // No additional Members -}; // end of class TDBJCL + // Members + char *Colpat; // Points to catalog column pattern +}; // end of class TDBJDBCL #if 0 /***********************************************************************/ diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index 5fd0534210d..e76d9c46bd3 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -1,7 +1,7 @@ /************* Tabodbc C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: TABODBC */ /* ------------- */ -/* Version 3.0 */ +/* Version 3.1 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -96,7 +96,7 @@ bool ExactInfo(void); ODBCDEF::ODBCDEF(void) { Connect = Tabname = Tabschema = Username = Password = NULL; - Tabcat = Srcdef = Qchar = Qrystr = Sep = NULL; + Tabcat = Colpat = Srcdef = Qchar = Qrystr = Sep = NULL; Catver = Options = Cto = Qto = Quoted = Maxerr = Maxres = Memory = 0; Scrollable = Xsrc = UseCnc = false; } // end of ODBCDEF constructor @@ -120,7 +120,7 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) Tabschema = GetStringCatInfo(g, "Schema", Tabschema); Tabcat = GetStringCatInfo(g, "Qualifier", NULL); Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); - Username = GetStringCatInfo(g, "User", NULL); + Username = GetStringCatInfo(g, "User", NULL); Password = GetStringCatInfo(g, "Password", NULL); if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) @@ -141,7 +141,13 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt) Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch - UseCnc = GetBoolCatInfo("UseDSN", false); + if (Catfunc == FNC_COL) + Colpat = GetStringCatInfo(g, "Colpat", NULL); + + if (Catfunc == FNC_TABLE) + Tabtyp = GetStringCatInfo(g, "Tabtype", NULL); + + UseCnc = GetBoolCatInfo("UseDSN", false); // Memory was Boolean, it is now integer if (!(Memory = GetIntCatInfo("Memory", 0))) @@ -681,7 +687,7 @@ bool TDBODBC::MakeCommand(PGLOBAL g) } else { sprintf(g->Message, "Cannot use this %s command", (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE"); - return NULL; + return false; } // endif p Query = new(g) STRING(g, 0, stmt); @@ -1768,6 +1774,7 @@ TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp) Dsn = tdp->GetConnect(); Schema = tdp->GetTabschema(); Tab = tdp->GetTabname(); + Tabtyp = tdp->Tabtyp; Ops.User = tdp->Username; Ops.Pwd = tdp->Password; Ops.Cto = tdp->Cto; @@ -1780,17 +1787,25 @@ TDBOTB::TDBOTB(PODEF tdp) : TDBDRV(tdp) /***********************************************************************/ PQRYRES TDBOTB::GetResult(PGLOBAL g) { - return ODBCTables(g, Dsn, Schema, Tab, Maxres, false, &Ops); + return ODBCTables(g, Dsn, Schema, Tab, Tabtyp, Maxres, false, &Ops); } // end of GetResult /* ---------------------------TDBOCL class --------------------------- */ +/***********************************************************************/ +/* TDBOCL class constructor. */ +/***********************************************************************/ +TDBOCL::TDBOCL(PODEF tdp) : TDBOTB(tdp) +{ + Colpat = tdp->Colpat; +} // end of TDBOTB constructor + /***********************************************************************/ /* GetResult: Get the list of ODBC table columns. */ /***********************************************************************/ PQRYRES TDBOCL::GetResult(PGLOBAL g) { - return ODBCColumns(g, Dsn, Schema, Tab, NULL, Maxres, false, &Ops); + return ODBCColumns(g, Dsn, Schema, Tab, Colpat, Maxres, false, &Ops); } // end of GetResult /* ------------------------ End of Tabodbc --------------------------- */ diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h index 6440dee830d..aa6592d8abf 100644 --- a/storage/connect/tabodbc.h +++ b/storage/connect/tabodbc.h @@ -25,7 +25,8 @@ class DllExport ODBCDEF : public TABDEF { /* Logical table description */ friend class TDBXDBC; friend class TDBDRV; friend class TDBOTB; - public: + friend class TDBOCL; +public: // Constructor ODBCDEF(void); @@ -54,7 +55,9 @@ class DllExport ODBCDEF : public TABDEF { /* Logical table description */ PSZ Username; /* User connect name */ PSZ Password; /* Password connect info */ PSZ Tabcat; /* External table catalog */ - PSZ Srcdef; /* The source table SQL definition */ + PSZ Tabtyp; /* Catalog table type */ + PSZ Colpat; /* Catalog column pattern */ + PSZ Srcdef; /* The source table SQL definition */ PSZ Qchar; /* Identifier quoting character */ PSZ Qrystr; /* The original query */ PSZ Sep; /* Decimal separator */ @@ -326,7 +329,8 @@ class TDBOTB : public TDBDRV { char *Dsn; // Points to connection string char *Schema; // Points to schema name or NULL char *Tab; // Points to ODBC table name or pattern - ODBCPARM Ops; // Additional parameters + char *Tabtyp; // Points to ODBC table type + ODBCPARM Ops; // Additional parameters }; // end of class TDBOTB /***********************************************************************/ @@ -335,13 +339,14 @@ class TDBOTB : public TDBDRV { class TDBOCL : public TDBOTB { public: // Constructor - TDBOCL(PODEF tdp) : TDBOTB(tdp) {} + TDBOCL(PODEF tdp); protected: // Specific routines virtual PQRYRES GetResult(PGLOBAL g); - // No additional Members + // Members + char *Colpat; // Points to column pattern }; // end of class TDBOCL #endif // !NODBC -- cgit v1.2.1 From ec725089cfde2886d74c7939c64ad6e2266dc853 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 15 Jul 2016 00:50:18 +0200 Subject: Change jdbc test to reflect girls.txt LF ending --- storage/connect/mysql-test/connect/r/jdbc.result | 2 +- storage/connect/mysql-test/connect/t/jdbc.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/connect/mysql-test/connect/r/jdbc.result b/storage/connect/mysql-test/connect/r/jdbc.result index 9b6f0e65102..895b4070d70 100644 --- a/storage/connect/mysql-test/connect/r/jdbc.result +++ b/storage/connect/mysql-test/connect/r/jdbc.result @@ -111,7 +111,7 @@ CREATE TABLE t3 ( name CHAR(9) NOT NULL, city CHAR(12) NOT NULL, age INT(2)) -ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='girls.txt' ENDING=2; +ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='girls.txt' ENDING=1; SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN connect.boys b where g.city = b.city; name name city Mary John Boston diff --git a/storage/connect/mysql-test/connect/t/jdbc.test b/storage/connect/mysql-test/connect/t/jdbc.test index 247bd406100..41fd298776b 100644 --- a/storage/connect/mysql-test/connect/t/jdbc.test +++ b/storage/connect/mysql-test/connect/t/jdbc.test @@ -81,7 +81,7 @@ CREATE TABLE t3 ( name CHAR(9) NOT NULL, city CHAR(12) NOT NULL, age INT(2)) -ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='girls.txt' ENDING=2; +ENGINE=CONNECT TABLE_TYPE=FIX FILE_NAME='girls.txt' ENDING=1; SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN connect.boys b where g.city = b.city; SELECT g.name, b.name, g.city FROM t3 g STRAIGHT_JOIN t1 b where g.city = b.city; DROP TABLE t1, t3, connect.boys; -- cgit v1.2.1 From 5cf49cdf92bb57c2e20f72422a22768be8c7a8a8 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Fri, 15 Jul 2016 23:51:30 +0300 Subject: MDEV-10248 Cannot Remove Test Tables While dropping the test database, use IF EXISTS to avoid bogus errors --- scripts/mysql_secure_installation.pl.in | 2 +- scripts/mysql_secure_installation.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mysql_secure_installation.pl.in b/scripts/mysql_secure_installation.pl.in index 188a6bd7104..32331c3d601 100644 --- a/scripts/mysql_secure_installation.pl.in +++ b/scripts/mysql_secure_installation.pl.in @@ -217,7 +217,7 @@ sub remove_remote_root { sub remove_test_database { print " - Dropping test database...\n"; - if (do_query("DROP DATABASE test;")) { + if (do_query("DROP DATABASE IF EXISTS test;")) { print " ... Success!\n"; } else { print " ... Failed! Not critical, keep moving...\n"; diff --git a/scripts/mysql_secure_installation.sh b/scripts/mysql_secure_installation.sh index 9e9bce9fa87..7fb8b73ef8f 100644 --- a/scripts/mysql_secure_installation.sh +++ b/scripts/mysql_secure_installation.sh @@ -324,7 +324,7 @@ remove_remote_root() { remove_test_database() { echo " - Dropping test database..." - do_query "DROP DATABASE test;" + do_query "DROP DATABASE IF EXISTS test;" if [ $? -eq 0 ]; then echo " ... Success!" else -- cgit v1.2.1 From bf2e31500c9a0a7fcdccfb724e9447347a3ab684 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Mon, 18 Jul 2016 11:50:08 +0400 Subject: MDEV-8569 build_table_filename() doesn't support temporary tables. Temporary tables support added for RENAME and ALTER TABLE. --- mysql-test/r/grant2.result | 2 +- mysql-test/r/temp_table.result | 3 +++ .../suite/innodb/r/innodb-fk-warnings.result | 14 ++++++++++ mysql-test/suite/innodb/t/innodb-fk-warnings.test | 20 +++++++------- mysql-test/t/temp_table.test | 4 +++ sql/sql_parse.cc | 1 + sql/sql_rename.cc | 31 ++++++++++++++++++++-- sql/sql_table.cc | 10 ++++--- sql/sql_table.h | 3 ++- 9 files changed, 70 insertions(+), 18 deletions(-) diff --git a/mysql-test/r/grant2.result b/mysql-test/r/grant2.result index 3df9a5480d3..9e9b3ffc4e5 100644 --- a/mysql-test/r/grant2.result +++ b/mysql-test/r/grant2.result @@ -708,7 +708,7 @@ mysqltest_db1.t3 preload_keys status OK # RENAME (doesn't work for temporary tables, thus should fail). # RENAME TABLE t3 TO t3_1; -ERROR 42000: DROP, ALTER command denied to user 'mysqltest_u1'@'localhost' for table 't3' +ERROR 42000: INSERT, CREATE command denied to user 'mysqltest_u1'@'localhost' for table 't3_1' # # HANDLER OPEN/READ/CLOSE. # diff --git a/mysql-test/r/temp_table.result b/mysql-test/r/temp_table.result index 0a1701be0d7..dd8bab31d75 100644 --- a/mysql-test/r/temp_table.result +++ b/mysql-test/r/temp_table.result @@ -291,3 +291,6 @@ test.t1 repair status OK test.t2 repair status OK test.t3 repair status OK DROP TABLES t1, t2, t3; +CREATE TEMPORARY TABLE t1 (a int); +RENAME TABLE t1 TO t2; +DROP TABLE t2; diff --git a/mysql-test/suite/innodb/r/innodb-fk-warnings.result b/mysql-test/suite/innodb/r/innodb-fk-warnings.result index eddedfc3620..d7c7acfb424 100644 --- a/mysql-test/suite/innodb/r/innodb-fk-warnings.result +++ b/mysql-test/suite/innodb/r/innodb-fk-warnings.result @@ -70,6 +70,20 @@ Level Code Message Warning 150 Alter table `mysqld.1`.`t1` with foreign key constraint failed. Referenced table `mysqld.1`.`t1` not found in the data dictionary close to foreign key(b) references t1(a). Error 1005 Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed") Warning 1215 Cannot add foreign key constraint +create temporary table t2(a int, foreign key(a) references t1(a)) engine=innodb; +ERROR HY000: Can't create table `test`.`t2` (errno: 150 "Foreign key constraint is incorrectly formed") +show warnings; +Level Code Message +Warning 150 Create table `mysqld.1`.`t2` with foreign key constraint failed. Referenced table `mysqld.1`.`t1` not found in the data dictionary near 'foreign key(a) references t1(a)) engine=innodb'. +Error 1005 Can't create table `test`.`t2` (errno: 150 "Foreign key constraint is incorrectly formed") +Warning 1215 Cannot add foreign key constraint +alter table t1 add foreign key(b) references t1(a); +ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed") +show warnings; +Level Code Message +Warning 150 Alter table `mysqld.1`.`t1` with foreign key constraint failed. Referenced table `mysqld.1`.`t1` not found in the data dictionary near 'foreign key(b) references t1(a)'. +Error 1005 Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed") +Warning 1215 Cannot add foreign key constraint drop table t1; create table t1(a int not null primary key, b int, key(b)) engine=innodb; alter table t1 add foreign key(a,b) references t1(a); diff --git a/mysql-test/suite/innodb/t/innodb-fk-warnings.test b/mysql-test/suite/innodb/t/innodb-fk-warnings.test index a95a7f55a40..f45ae00d788 100644 --- a/mysql-test/suite/innodb/t/innodb-fk-warnings.test +++ b/mysql-test/suite/innodb/t/innodb-fk-warnings.test @@ -87,16 +87,16 @@ create temporary table t1(a int not null primary key, b int, key(b)) engine=inno --echo Warning 150 Alter table `mysqld.1`.`t1` with foreign key constraint failed. Referenced table `mysqld.1`.`t1` not found in the data dictionary close to foreign key(b) references t1(a). --echo Error 1005 Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed") --echo Warning 1215 Cannot add foreign key constraint -#--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ -#--error 1005 -#create temporary table t2(a int, foreign key(a) references t1(a)) engine=innodb; -#--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ -#show warnings; -#--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ -#--error 1005 -#alter table t1 add foreign key(b) references t1(a); -#--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ -#show warnings; +--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ +--error 1005 +create temporary table t2(a int, foreign key(a) references t1(a)) engine=innodb; +--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ +show warnings; +--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ +--error 1005 +alter table t1 add foreign key(b) references t1(a); +--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/ +show warnings; drop table t1; # diff --git a/mysql-test/t/temp_table.test b/mysql-test/t/temp_table.test index f594f0c6c48..1de0f086a5e 100644 --- a/mysql-test/t/temp_table.test +++ b/mysql-test/t/temp_table.test @@ -319,3 +319,7 @@ INSERT INTO t3 VALUES (101), (102), (103); REPAIR TABLE t1, t2, t3; DROP TABLES t1, t2, t3; + +CREATE TEMPORARY TABLE t1 (a int); +RENAME TABLE t1 TO t2; +DROP TABLE t2; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index fdaf3323366..118602c5127 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -508,6 +508,7 @@ void init_update_queries(void) sql_command_flags[SQLCOM_INSERT_SELECT]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_DELETE]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_DELETE_MULTI]|= CF_PREOPEN_TMP_TABLES; + sql_command_flags[SQLCOM_RENAME_TABLE]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_REPLACE_SELECT]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_SELECT]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_SET_OPTION]|= CF_PREOPEN_TMP_TABLES; diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index e0fd7005cd5..1a9cb842e6a 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -212,6 +212,28 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list) } +static bool +do_rename_temporary(THD *thd, TABLE_LIST *ren_table, TABLE_LIST *new_table, + bool skip_error) +{ + const char *new_alias; + DBUG_ENTER("do_rename_temporary"); + + new_alias= (lower_case_table_names == 2) ? new_table->alias : + new_table->table_name; + + if (is_temporary_table(new_table)) + { + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias); + DBUG_RETURN(1); // This can't be skipped + } + + + DBUG_RETURN(rename_temporary_table(thd, ren_table->table, + new_table->db, new_alias)); +} + + /* Rename a single table or a view @@ -317,6 +339,8 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name, DBUG_RETURN(0); } + + /* Rename all tables in list; Return pointer to wrong entry if something goes wrong. Note that the table_list may be empty! @@ -351,8 +375,11 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error) for (ren_table= table_list; ren_table; ren_table= new_table->next_local) { new_table= ren_table->next_local; - if (do_rename(thd, ren_table, new_table->db, new_table->table_name, - new_table->alias, skip_error)) + + if (is_temporary_table(ren_table) ? + do_rename_temporary(thd, ren_table, new_table, skip_error) : + do_rename(thd, ren_table, new_table->db, new_table->table_name, + new_table->alias, skip_error)) DBUG_RETURN(ren_table); } DBUG_RETURN(0); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 7bf008870b9..7cf31ee4fe8 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2710,14 +2710,15 @@ bool log_drop_table(THD *thd, const char *db_name, size_t db_name_length, */ bool quick_rm_table(THD *thd, handlerton *base, const char *db, - const char *table_name, uint flags) + const char *table_name, uint flags, const char *table_path) { char path[FN_REFLEN + 1]; bool error= 0; DBUG_ENTER("quick_rm_table"); - uint path_length= build_table_filename(path, sizeof(path) - 1, - db, table_name, reg_ext, flags); + uint path_length= table_path ? + (strxnmov(path, sizeof(path) - 1, table_path, reg_ext, NullS) - path) : + build_table_filename(path, sizeof(path)-1, db, table_name, reg_ext, flags); if (mysql_file_delete(key_file_frm, path, MYF(0))) error= 1; /* purecov: inspected */ path[path_length - reg_ext_length]= '\0'; // Remove reg_ext @@ -9220,7 +9221,8 @@ err_new_table_cleanup: else (void) quick_rm_table(thd, new_db_type, alter_ctx.new_db, alter_ctx.tmp_name, - (FN_IS_TMP | (no_ha_table ? NO_HA_TABLE : 0))); + (FN_IS_TMP | (no_ha_table ? NO_HA_TABLE : 0)), + alter_ctx.get_tmp_path()); /* No default value was provided for a DATE/DATETIME field, the diff --git a/sql/sql_table.h b/sql/sql_table.h index 2b383623873..6c74586b2f5 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -247,7 +247,8 @@ bool log_drop_table(THD *thd, const char *db_name, size_t db_name_length, const char *table_name, size_t table_name_length, bool temporary_table); bool quick_rm_table(THD *thd, handlerton *base, const char *db, - const char *table_name, uint flags); + const char *table_name, uint flags, + const char *table_path=0); void close_cached_table(THD *thd, TABLE *table); void sp_prepare_create_field(THD *thd, Create_field *sql_field); int prepare_create_field(Create_field *sql_field, -- cgit v1.2.1 From 9b668d7c8ae9cc2a72e2d968f8a6332db0742cd8 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Mon, 18 Jul 2016 11:01:03 -0400 Subject: bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index ac291a97eb1..eda3118b090 100644 --- a/VERSION +++ b/VERSION @@ -1,3 +1,3 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=1 -MYSQL_VERSION_PATCH=16 +MYSQL_VERSION_PATCH=17 -- cgit v1.2.1 From f0386598dd825e3a42c463699988f13c123dd6be Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 19 Jul 2016 20:44:02 +0000 Subject: MDEV-10314 : wsrep_client_thread was not set in threadpool. Fixed threadpool_add_connection to use thd_prepare_connection() to match thread-per-conection flow. --- sql/threadpool_common.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index 5bcea767aae..ae8a81b1bcd 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -148,9 +148,8 @@ int threadpool_add_connection(THD *thd) if (!setup_connection_thread_globals(thd)) { - if (!login_connection(thd)) + if (!thd_prepare_connection(thd)) { - prepare_new_connection_state(thd); /* Check if THD is ok, as prepare_new_connection_state() -- cgit v1.2.1 From 3db92ee43358f5df256bf1b0db4955ec86bdceee Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Tue, 3 May 2016 16:22:01 +0200 Subject: MW-265 Add support for wsrep_max_ws_rows Variable wsrep_max_ws_rows limits the number of rows that a transaction can insert/update/delete. --- .../suite/galera/r/galera_var_max_ws_rows.result | 93 ++++++++++++++++ .../suite/galera/t/galera_var_max_ws_rows.test | 118 +++++++++++++++++++++ sql/handler.cc | 33 ++++++ sql/sql_class.cc | 8 +- sql/sql_class.h | 1 + sql/sys_vars.cc | 2 +- 6 files changed, 252 insertions(+), 3 deletions(-) create mode 100644 mysql-test/suite/galera/r/galera_var_max_ws_rows.result create mode 100644 mysql-test/suite/galera/t/galera_var_max_ws_rows.test diff --git a/mysql-test/suite/galera/r/galera_var_max_ws_rows.result b/mysql-test/suite/galera/r/galera_var_max_ws_rows.result new file mode 100644 index 00000000000..e41f0f96c95 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_var_max_ws_rows.result @@ -0,0 +1,93 @@ +CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB; +INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); +CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; +SET GLOBAL wsrep_max_ws_rows = 4; +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +INSERT INTO t1 (f2) VALUES (3); +INSERT INTO t1 (f2) VALUES (4); +INSERT INTO t1 (f2) VALUES (5); +ERROR HY000: wsrep_max_ws_rows exceeded +COMMIT; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +INSERT INTO t1 (f2) VALUES (3); +INSERT INTO t1 (f2) VALUES (4); +UPDATE t1 SET f2 = 10 WHERE f2 = 4; +ERROR HY000: wsrep_max_ws_rows exceeded +COMMIT; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +INSERT INTO t1 (f2) VALUES (3); +INSERT INTO t1 (f2) VALUES (4); +DELETE FROM t1 WHERE f2 = 1; +ERROR HY000: wsrep_max_ws_rows exceeded +COMMIT; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +SET GLOBAL wsrep_max_ws_rows = 5; +INSERT INTO t1 (f2) VALUES (1),(2),(3),(4),(5); +SET GLOBAL wsrep_max_ws_rows = 4; +UPDATE t1 SET f2 = f2 + 10; +ERROR HY000: wsrep_max_ws_rows exceeded +SELECT COUNT(*) = 5 FROM t1; +COUNT(*) = 5 +1 +DELETE FROM t1 WHERE f2 < 10; +ERROR HY000: wsrep_max_ws_rows exceeded +SELECT COUNT(*) = 5 FROM t1; +COUNT(*) = 5 +1 +INSERT INTO t1 (f2) SELECT * FROM ten; +ERROR HY000: wsrep_max_ws_rows exceeded +SELECT COUNT(*) = 5 FROM t1; +COUNT(*) = 5 +1 +INSERT INTO t1 (f2) VALUES (10),(20),(30),(40),(50); +ERROR HY000: wsrep_max_ws_rows exceeded +SELECT COUNT(*) = 5 FROM t1; +COUNT(*) = 5 +1 +SET GLOBAL wsrep_max_ws_rows = 10; +DELETE FROM t1 WHERE f2 < 10; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +SET GLOBAL wsrep_max_ws_rows = 100; +SELECT COUNT(*) = 100 FROM t1; +COUNT(*) = 100 +1 +DELETE FROM t1 WHERE f2 < 101; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +SET GLOBAL wsrep_max_ws_rows = 9999; +INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; +ERROR HY000: wsrep_max_ws_rows exceeded +SET GLOBAL wsrep_max_ws_rows = 10000; +INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; +SET GLOBAL wsrep_max_ws_rows = 9999; +UPDATE t1 SET f2 = 2 WHERE f2 = 1; +ERROR HY000: wsrep_max_ws_rows exceeded +SET GLOBAL wsrep_max_ws_rows = 10000; +UPDATE t1 SET f2 = 2 WHERE f2 = 1; +SET GLOBAL wsrep_max_ws_rows = 9999; +DELETE FROM t1 WHERE f2 = 2; +ERROR HY000: wsrep_max_ws_rows exceeded +SET GLOBAL wsrep_max_ws_rows = 10000; +DELETE FROM t1 WHERE f2 = 2; +SELECT COUNT(*) = 0 FROM t1; +COUNT(*) = 0 +1 +DROP TABLE t1; +DROP TABLE ten; diff --git a/mysql-test/suite/galera/t/galera_var_max_ws_rows.test b/mysql-test/suite/galera/t/galera_var_max_ws_rows.test new file mode 100644 index 00000000000..d086142d1e1 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_var_max_ws_rows.test @@ -0,0 +1,118 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc + +CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB; +INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); + +CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB; + +--let $wsrep_max_ws_rows_orig = `SELECT @@wsrep_max_ws_rows` +SET GLOBAL wsrep_max_ws_rows = 4; + +# Test that wsrep_max_ws_rows is enforced with multi statement transactions + +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +INSERT INTO t1 (f2) VALUES (3); +INSERT INTO t1 (f2) VALUES (4); +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) VALUES (5); +COMMIT; +SELECT COUNT(*) = 0 FROM t1; + +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +INSERT INTO t1 (f2) VALUES (3); +INSERT INTO t1 (f2) VALUES (4); +--error ER_ERROR_DURING_COMMIT +UPDATE t1 SET f2 = 10 WHERE f2 = 4; +COMMIT; +SELECT COUNT(*) = 0 FROM t1; + +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +INSERT INTO t1 (f2) VALUES (3); +INSERT INTO t1 (f2) VALUES (4); +--error ER_ERROR_DURING_COMMIT +DELETE FROM t1 WHERE f2 = 1; +COMMIT; +SELECT COUNT(*) = 0 FROM t1; + + +# Test that wsrep_max_ws_rows is enforced on sigle statements + +SET GLOBAL wsrep_max_ws_rows = 5; +INSERT INTO t1 (f2) VALUES (1),(2),(3),(4),(5); +SET GLOBAL wsrep_max_ws_rows = 4; + +--error ER_ERROR_DURING_COMMIT +UPDATE t1 SET f2 = f2 + 10; +SELECT COUNT(*) = 5 FROM t1; + +--error ER_ERROR_DURING_COMMIT +DELETE FROM t1 WHERE f2 < 10; +SELECT COUNT(*) = 5 FROM t1; + +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) SELECT * FROM ten; +SELECT COUNT(*) = 5 FROM t1; + +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) VALUES (10),(20),(30),(40),(50); +SELECT COUNT(*) = 5 FROM t1; + +# Fewer than wsrep_max_ws_rows is OK + +SET GLOBAL wsrep_max_ws_rows = 10; +DELETE FROM t1 WHERE f2 < 10; +SELECT COUNT(*) = 0 FROM t1; + +# Test a series of transactions + +--disable_query_log +SET GLOBAL wsrep_max_ws_rows = 5; +let $i= 100; +while ($i) +{ + START TRANSACTION; + --eval INSERT INTO t1 (f2) VALUES ($i); + COMMIT; + dec $i; +} +--enable_query_log +SET GLOBAL wsrep_max_ws_rows = 100; +SELECT COUNT(*) = 100 FROM t1; +DELETE FROM t1 WHERE f2 < 101; +SELECT COUNT(*) = 0 FROM t1; + +# Test large statements + +SET GLOBAL wsrep_max_ws_rows = 9999; +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; +SET GLOBAL wsrep_max_ws_rows = 10000; +INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4; + +SET GLOBAL wsrep_max_ws_rows = 9999; +--error ER_ERROR_DURING_COMMIT +UPDATE t1 SET f2 = 2 WHERE f2 = 1; +SET GLOBAL wsrep_max_ws_rows = 10000; +UPDATE t1 SET f2 = 2 WHERE f2 = 1; + +SET GLOBAL wsrep_max_ws_rows = 9999; +--error ER_ERROR_DURING_COMMIT +DELETE FROM t1 WHERE f2 = 2; +SET GLOBAL wsrep_max_ws_rows = 10000; +DELETE FROM t1 WHERE f2 = 2; + +SELECT COUNT(*) = 0 FROM t1; + +--disable_query_log +--eval SET GLOBAL wsrep_max_ws_rows = $wsrep_max_ws_rows_orig +--enable_query_log + +DROP TABLE t1; +DROP TABLE ten; diff --git a/sql/handler.cc b/sql/handler.cc index e84e1b52ca2..6fa937faa84 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -6098,6 +6098,17 @@ int handler::ha_write_row(uchar *buf) rows_changed++; if (unlikely(error= binlog_log_row(table, 0, buf, log_func))) DBUG_RETURN(error); /* purecov: inspected */ +#ifdef WITH_WSREP + current_thd->wsrep_affected_rows++; + if (wsrep_max_ws_rows && + current_thd->wsrep_exec_mode != REPL_RECV && + current_thd->wsrep_affected_rows > wsrep_max_ws_rows) + { + current_thd->transaction_rollback_request= TRUE; + my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); + DBUG_RETURN(ER_ERROR_DURING_COMMIT); + } +#endif /* WITH_WSREP */ DEBUG_SYNC_C("ha_write_row_end"); DBUG_RETURN(0); @@ -6131,6 +6142,17 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data) rows_changed++; if (unlikely(error= binlog_log_row(table, old_data, new_data, log_func))) return error; +#ifdef WITH_WSREP + current_thd->wsrep_affected_rows++; + if (wsrep_max_ws_rows && + current_thd->wsrep_exec_mode != REPL_RECV && + current_thd->wsrep_affected_rows > wsrep_max_ws_rows) + { + current_thd->transaction_rollback_request= TRUE; + my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); + return ER_ERROR_DURING_COMMIT; + } +#endif /* WITH_WSREP */ return 0; } @@ -6158,6 +6180,17 @@ int handler::ha_delete_row(const uchar *buf) rows_changed++; if (unlikely(error= binlog_log_row(table, buf, 0, log_func))) return error; +#ifdef WITH_WSREP + current_thd->wsrep_affected_rows++; + if (wsrep_max_ws_rows && + current_thd->wsrep_exec_mode != REPL_RECV && + current_thd->wsrep_affected_rows > wsrep_max_ws_rows) + { + current_thd->transaction_rollback_request= TRUE; + my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); + return ER_ERROR_DURING_COMMIT; + } +#endif /* WITH_WSREP */ return 0; } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 5705694208a..4873586aba5 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1213,7 +1213,8 @@ THD::THD() wsrep_mysql_replicated = 0; wsrep_TOI_pre_query = NULL; wsrep_TOI_pre_query_len = 0; - wsrep_sync_wait_gtid= WSREP_GTID_UNDEFINED; + wsrep_sync_wait_gtid = WSREP_GTID_UNDEFINED; + wsrep_affected_rows = 0; #endif /* Call to init() below requires fully initialized Open_tables_state. */ reset_open_tables_state(this); @@ -1629,7 +1630,8 @@ void THD::init(void) wsrep_mysql_replicated = 0; wsrep_TOI_pre_query = NULL; wsrep_TOI_pre_query_len = 0; - wsrep_sync_wait_gtid= WSREP_GTID_UNDEFINED; + wsrep_sync_wait_gtid = WSREP_GTID_UNDEFINED; + wsrep_affected_rows = 0; /* @@wsrep_causal_reads is now being handled via wsrep_sync_wait, update it @@ -2383,6 +2385,8 @@ void THD::cleanup_after_query() #ifdef WITH_WSREP wsrep_sync_wait_gtid= WSREP_GTID_UNDEFINED; + if (!in_active_multi_stmt_transaction()) + wsrep_affected_rows= 0; #endif /* WITH_WSREP */ DBUG_VOID_RETURN; diff --git a/sql/sql_class.h b/sql/sql_class.h index b9f0c0a0ae7..9ee5a40dc99 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3878,6 +3878,7 @@ public: bool wsrep_apply_toi; /* applier processing in TOI */ bool wsrep_skip_append_keys; wsrep_gtid_t wsrep_sync_wait_gtid; + ulong wsrep_affected_rows; #endif /* WITH_WSREP */ }; diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c49a5de75e3..0bfa0bf5eb0 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -4694,7 +4694,7 @@ static Sys_var_ulong Sys_wsrep_max_ws_size ( static Sys_var_ulong Sys_wsrep_max_ws_rows ( "wsrep_max_ws_rows", "Max number of rows in write set", GLOBAL_VAR(wsrep_max_ws_rows), CMD_LINE(REQUIRED_ARG), - VALID_RANGE(1, 1048576), DEFAULT(131072), BLOCK_SIZE(1)); + VALID_RANGE(0, 1048576), DEFAULT(0), BLOCK_SIZE(1)); static Sys_var_charptr Sys_wsrep_notify_cmd( "wsrep_notify_cmd", "", -- cgit v1.2.1 From e373f60fd161eaf050eb117c9a05d8d83fe0e501 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 20 Jul 2016 18:12:17 -0400 Subject: MW-265 Add support for wsrep_max_ws_rows Update test results. --- mysql-test/suite/sys_vars/r/wsrep_max_ws_rows_basic.result | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/wsrep_max_ws_rows_basic.result b/mysql-test/suite/sys_vars/r/wsrep_max_ws_rows_basic.result index 15438a2afd5..d96bc8708c5 100644 --- a/mysql-test/suite/sys_vars/r/wsrep_max_ws_rows_basic.result +++ b/mysql-test/suite/sys_vars/r/wsrep_max_ws_rows_basic.result @@ -6,7 +6,7 @@ SET @wsrep_max_ws_rows_global_saved = @@global.wsrep_max_ws_rows; # default SELECT @@global.wsrep_max_ws_rows; @@global.wsrep_max_ws_rows -131072 +0 # scope SELECT @@session.wsrep_max_ws_rows; @@ -26,11 +26,9 @@ SELECT @@global.wsrep_max_ws_rows; @@global.wsrep_max_ws_rows 131073 SET @@global.wsrep_max_ws_rows=0; -Warnings: -Warning 1292 Truncated incorrect wsrep_max_ws_rows value: '0' SELECT @@global.wsrep_max_ws_rows; @@global.wsrep_max_ws_rows -1 +0 SET @@global.wsrep_max_ws_rows=default; SELECT @global.wsrep_max_ws_rows; @global.wsrep_max_ws_rows -- cgit v1.2.1 From 1b5da2ca49f69605ccfe4d98e9207e7b8551e21f Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Thu, 21 Jul 2016 15:32:28 +0400 Subject: MDEV-10316 - main.type_date fails around midnight sporadically A better fix for MySQL Bug#41776: use hard timestamp rather than unreliable sleep. --- mysql-test/r/type_date.result | 2 ++ mysql-test/t/type_date.test | 15 +++------------ 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result index ecbda1d13e6..ad7560fa3f8 100644 --- a/mysql-test/r/type_date.result +++ b/mysql-test/r/type_date.result @@ -165,6 +165,7 @@ str_to_date( '', a ) NULL DROP TABLE t1; CREATE TABLE t1 (a DATE, b INT, PRIMARY KEY (a,b)); +SET timestamp=UNIX_TIMESTAMP('2016-07-21 14:48:18'); INSERT INTO t1 VALUES (DATE(NOW()), 1); SELECT COUNT(*) FROM t1 WHERE a = NOW(); COUNT(*) @@ -192,6 +193,7 @@ COUNT(*) EXPLAIN SELECT COUNT(*) FROM t1 WHERE a = NOW(); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where +SET timestamp=DEFAULT; DROP TABLE t1; CREATE TABLE t1 (a DATE); CREATE TABLE t2 (a DATE); diff --git a/mysql-test/t/type_date.test b/mysql-test/t/type_date.test index 8b0c5dcf330..832e72520ce 100644 --- a/mysql-test/t/type_date.test +++ b/mysql-test/t/type_date.test @@ -169,18 +169,8 @@ DROP TABLE t1; # CREATE TABLE t1 (a DATE, b INT, PRIMARY KEY (a,b)); -## The current sub test could fail (difference to expected result) if we -## have just reached midnight. -## (Bug#41776 type_date.test may fail if run around midnight) -## Therefore we sleep a bit if we are too close to midnight. -## The complete test itself needs in average less than 1 second. -## Therefore a time_distance to midnight of 5 seconds should be sufficient. -if (`SELECT CURTIME() > SEC_TO_TIME(24 * 3600 - 5)`) -{ - # We are here when CURTIME() is between '23:59:56' and '23:59:59'. - # So a sleep time of 5 seconds brings us between '00:00:01' and '00:00:04'. - --real_sleep 5 -} + +SET timestamp=UNIX_TIMESTAMP('2016-07-21 14:48:18'); INSERT INTO t1 VALUES (DATE(NOW()), 1); SELECT COUNT(*) FROM t1 WHERE a = NOW(); EXPLAIN SELECT COUNT(*) FROM t1 WHERE a = NOW(); @@ -192,6 +182,7 @@ EXPLAIN SELECT COUNT(*) FROM t1 WHERE a = NOW() AND b = 1; ALTER TABLE t1 DROP PRIMARY KEY; SELECT COUNT(*) FROM t1 WHERE a = NOW(); EXPLAIN SELECT COUNT(*) FROM t1 WHERE a = NOW(); +SET timestamp=DEFAULT; DROP TABLE t1; -- cgit v1.2.1 From e6a64e8f0ea36f12bd24ba906aa1f4e2e367a8e0 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 22 Jul 2016 09:19:35 +1000 Subject: MDEV-10294: MTR using --valgrind-option to specify a tool / fixing callgrind [10.1] (#200) * MDEV-10294: Put testname into environment as MTR_TEST_NAME during MTR * MDEV-10294: restructure mtr to allow --valgrind-option=--tool=XXX * MDEV-10294: mtr valgrind - supressions all tools + feedback --- mysql-test/mysql-test-run.pl | 48 ++++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index dbd58f6da00..d0bff1af99e 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -317,7 +317,6 @@ our $opt_user = "root"; our $opt_valgrind= 0; my $opt_valgrind_mysqld= 0; my $opt_valgrind_mysqltest= 0; -my @default_valgrind_args= ("--show-reachable=yes"); my @valgrind_args; my $opt_strace= 0; my $opt_strace_client; @@ -1722,17 +1721,27 @@ sub command_line_setup { # Set special valgrind options unless options passed on command line push(@valgrind_args, "--trace-children=yes") unless @valgrind_args; + unshift(@valgrind_args, "--tool=callgrind"); } - if ( $opt_valgrind ) + # default to --tool=memcheck + if ($opt_valgrind && ! grep(/^--tool=/i, @valgrind_args)) { - # Set valgrind_options to default unless already defined - push(@valgrind_args, @default_valgrind_args) + # Set valgrind_option unless already defined + push(@valgrind_args, ("--show-reachable=yes", "--leak-check=yes", + "--num-callers=16")) unless @valgrind_args; + unshift(@valgrind_args, "--tool=memcheck"); + } + if ( $opt_valgrind ) + { # Make valgrind run in quiet mode so it only print errors push(@valgrind_args, "--quiet" ); + push(@valgrind_args, "--suppressions=${glob_mysql_test_dir}/valgrind.supp") + if -f "$glob_mysql_test_dir/valgrind.supp"; + mtr_report("Running valgrind with options \"", join(" ", @valgrind_args), "\""); } @@ -3764,6 +3773,7 @@ sub run_testcase ($$) { my $print_freq=20; mtr_verbose("Running test:", $tinfo->{name}); + $ENV{'MTR_TEST_NAME'} = $tinfo->{name}; resfile_report_test($tinfo) if $opt_resfile; # Allow only alpanumerics pluss _ - + . in combination names, @@ -5819,29 +5829,15 @@ sub valgrind_arguments { my $args= shift; my $exe= shift; - if ( $opt_callgrind) + # Ensure the jemalloc works with mysqld + if ($$exe =~ /mysqld/) { - mtr_add_arg($args, "--tool=callgrind"); - mtr_add_arg($args, "--base=$opt_vardir/log"); - } - else - { - mtr_add_arg($args, "--tool=memcheck"); # From >= 2.1.2 needs this option - mtr_add_arg($args, "--leak-check=yes"); - mtr_add_arg($args, "--num-callers=16"); - mtr_add_arg($args, "--suppressions=%s/valgrind.supp", $glob_mysql_test_dir) - if -f "$glob_mysql_test_dir/valgrind.supp"; - - # Ensure the jemalloc works with mysqld - if ($$exe =~ /mysqld/) - { - my %somalloc=( - 'system jemalloc' => 'libjemalloc*', - 'bundled jemalloc' => 'NONE' - ); - my ($syn) = $somalloc{$mysqld_variables{'version-malloc-library'}}; - mtr_add_arg($args, '--soname-synonyms=somalloc=%s', $syn) if $syn; - } + my %somalloc=( + 'system jemalloc' => 'libjemalloc*', + 'bundled jemalloc' => 'NONE' + ); + my ($syn) = $somalloc{$mysqld_variables{'version-malloc-library'}}; + mtr_add_arg($args, '--soname-synonyms=somalloc=%s', $syn) if $syn; } # Add valgrind options, can be overriden by user -- cgit v1.2.1 From a52d3aa831454aa2e7dd4dfde9c65d4b87532caa Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 22 Jun 2016 11:17:44 +0200 Subject: MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops Do not set 'optimized' flag until whole optimization procedure is finished. --- mysql-test/r/subselect.result | 16 +++++++++++ mysql-test/r/subselect_no_exists_to_in.result | 16 +++++++++++ mysql-test/r/subselect_no_mat.result | 16 +++++++++++ mysql-test/r/subselect_no_opts.result | 16 +++++++++++ mysql-test/r/subselect_no_scache.result | 16 +++++++++++ mysql-test/r/subselect_no_semijoin.result | 16 +++++++++++ mysql-test/t/subselect.test | 17 ++++++++++++ sql/item_subselect.cc | 39 ++++++++++++++++++++------- sql/sql_select.cc | 32 +++++++--------------- sql/sql_select.h | 5 ++-- 10 files changed, 155 insertions(+), 34 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 6a531997d79..428cf89c36c 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -7157,5 +7157,21 @@ INSERT INTO t1 VALUES ('foo'); SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar' UNION SELECT 'baz' ); f foo +SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar'); +f +foo drop table t1; SET NAMES default; +# +# MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops +# +SET NAMES utf8; +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT UNSIGNED) ENGINE=MyISAM; +CREATE TABLE t2 (f3 INT) ENGINE=MyISAM; +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo' UNION SELECT 'bar' ); +f1 f2 f3 +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo'); +f1 f2 f3 +DROP TABLE t1, t2; +SET NAMES default; +End of 10.1 tests diff --git a/mysql-test/r/subselect_no_exists_to_in.result b/mysql-test/r/subselect_no_exists_to_in.result index aa6843409c0..b24edd438b2 100644 --- a/mysql-test/r/subselect_no_exists_to_in.result +++ b/mysql-test/r/subselect_no_exists_to_in.result @@ -7157,8 +7157,24 @@ INSERT INTO t1 VALUES ('foo'); SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar' UNION SELECT 'baz' ); f foo +SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar'); +f +foo drop table t1; SET NAMES default; +# +# MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops +# +SET NAMES utf8; +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT UNSIGNED) ENGINE=MyISAM; +CREATE TABLE t2 (f3 INT) ENGINE=MyISAM; +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo' UNION SELECT 'bar' ); +f1 f2 f3 +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo'); +f1 f2 f3 +DROP TABLE t1, t2; +SET NAMES default; +End of 10.1 tests set optimizer_switch=default; select @@optimizer_switch like '%exists_to_in=off%'; @@optimizer_switch like '%exists_to_in=off%' diff --git a/mysql-test/r/subselect_no_mat.result b/mysql-test/r/subselect_no_mat.result index 754aec1db20..23b8ade7ef5 100644 --- a/mysql-test/r/subselect_no_mat.result +++ b/mysql-test/r/subselect_no_mat.result @@ -7150,8 +7150,24 @@ INSERT INTO t1 VALUES ('foo'); SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar' UNION SELECT 'baz' ); f foo +SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar'); +f +foo drop table t1; SET NAMES default; +# +# MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops +# +SET NAMES utf8; +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT UNSIGNED) ENGINE=MyISAM; +CREATE TABLE t2 (f3 INT) ENGINE=MyISAM; +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo' UNION SELECT 'bar' ); +f1 f2 f3 +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo'); +f1 f2 f3 +DROP TABLE t1, t2; +SET NAMES default; +End of 10.1 tests set optimizer_switch=default; select @@optimizer_switch like '%materialization=on%'; @@optimizer_switch like '%materialization=on%' diff --git a/mysql-test/r/subselect_no_opts.result b/mysql-test/r/subselect_no_opts.result index e05dd4d140d..2907fd3f4b3 100644 --- a/mysql-test/r/subselect_no_opts.result +++ b/mysql-test/r/subselect_no_opts.result @@ -7148,6 +7148,22 @@ INSERT INTO t1 VALUES ('foo'); SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar' UNION SELECT 'baz' ); f foo +SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar'); +f +foo drop table t1; SET NAMES default; +# +# MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops +# +SET NAMES utf8; +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT UNSIGNED) ENGINE=MyISAM; +CREATE TABLE t2 (f3 INT) ENGINE=MyISAM; +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo' UNION SELECT 'bar' ); +f1 f2 f3 +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo'); +f1 f2 f3 +DROP TABLE t1, t2; +SET NAMES default; +End of 10.1 tests set @optimizer_switch_for_subselect_test=null; diff --git a/mysql-test/r/subselect_no_scache.result b/mysql-test/r/subselect_no_scache.result index 71ade62b423..08394bc6332 100644 --- a/mysql-test/r/subselect_no_scache.result +++ b/mysql-test/r/subselect_no_scache.result @@ -7163,8 +7163,24 @@ INSERT INTO t1 VALUES ('foo'); SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar' UNION SELECT 'baz' ); f foo +SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar'); +f +foo drop table t1; SET NAMES default; +# +# MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops +# +SET NAMES utf8; +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT UNSIGNED) ENGINE=MyISAM; +CREATE TABLE t2 (f3 INT) ENGINE=MyISAM; +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo' UNION SELECT 'bar' ); +f1 f2 f3 +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo'); +f1 f2 f3 +DROP TABLE t1, t2; +SET NAMES default; +End of 10.1 tests set optimizer_switch=default; select @@optimizer_switch like '%subquery_cache=on%'; @@optimizer_switch like '%subquery_cache=on%' diff --git a/mysql-test/r/subselect_no_semijoin.result b/mysql-test/r/subselect_no_semijoin.result index 43d191b1225..9b1d4d24031 100644 --- a/mysql-test/r/subselect_no_semijoin.result +++ b/mysql-test/r/subselect_no_semijoin.result @@ -7148,7 +7148,23 @@ INSERT INTO t1 VALUES ('foo'); SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar' UNION SELECT 'baz' ); f foo +SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar'); +f +foo drop table t1; SET NAMES default; +# +# MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops +# +SET NAMES utf8; +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT UNSIGNED) ENGINE=MyISAM; +CREATE TABLE t2 (f3 INT) ENGINE=MyISAM; +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo' UNION SELECT 'bar' ); +f1 f2 f3 +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo'); +f1 f2 f3 +DROP TABLE t1, t2; +SET NAMES default; +End of 10.1 tests set @optimizer_switch_for_subselect_test=null; set @join_cache_level_for_subselect_test=NULL; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index 3599b523d91..dba2154ef73 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -6009,5 +6009,22 @@ SET NAMES utf8; CREATE TABLE t1 (f VARCHAR(8)) ENGINE=MyISAM; INSERT INTO t1 VALUES ('foo'); SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar' UNION SELECT 'baz' ); +SELECT f FROM t1 WHERE f > ALL ( SELECT 'bar'); drop table t1; SET NAMES default; + +--echo # +--echo # MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops +--echo # +SET NAMES utf8; + +CREATE TABLE t1 (f1 VARCHAR(3), f2 INT UNSIGNED) ENGINE=MyISAM; +CREATE TABLE t2 (f3 INT) ENGINE=MyISAM; + +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo' UNION SELECT 'bar' ); +SELECT * FROM t1, t2 WHERE f3 = f2 AND f1 > ANY ( SELECT 'foo'); + +DROP TABLE t1, t2; +SET NAMES default; + +--echo End of 10.1 tests diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 301856ea3b8..2999e01d166 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -560,6 +560,21 @@ void Item_subselect::recalc_used_tables(st_select_lex *new_parent, bool Item_subselect::is_expensive() { double examined_rows= 0; + bool all_are_simple= true; + + /* check extremely simple select */ + if (!unit->first_select()->next_select()) // no union + { + /* + such single selects works even without optimization because + can not makes loops + */ + SELECT_LEX *sl= unit->first_select(); + JOIN *join = sl->join; + if (join && !join->tables_list && !sl->first_inner_unit()) + return false; + } + for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select()) { @@ -569,23 +584,27 @@ bool Item_subselect::is_expensive() if (!cur_join) return true; - /* very simple subquery */ - if (!cur_join->tables_list && !sl->first_inner_unit()) - return false; - /* If the subquery is not optimised or in the process of optimization it supposed to be expensive */ - if (!cur_join->optimized) + if (cur_join->optimization_state != JOIN::OPTIMIZATION_DONE) return true; + if (!cur_join->tables_list && !sl->first_inner_unit()) + continue; + /* Subqueries whose result is known after optimization are not expensive. Such subqueries have all tables optimized away, thus have no join plan. */ if ((cur_join->zero_result_cause || !cur_join->tables_list)) - return false; + continue; + + /* + This is not simple SELECT in union so we can not go by simple condition + */ + all_are_simple= false; /* If a subquery is not optimized we cannot estimate its cost. A subquery is @@ -606,7 +625,8 @@ bool Item_subselect::is_expensive() examined_rows+= cur_join->get_examined_rows(); } - return (examined_rows > thd->variables.expensive_subquery_limit); + return !all_are_simple && + (examined_rows > thd->variables.expensive_subquery_limit); } @@ -3672,7 +3692,7 @@ int subselect_single_select_engine::exec() SELECT_LEX *save_select= thd->lex->current_select; thd->lex->current_select= select_lex; - if (!join->optimized) + if (join->optimization_state == JOIN::NOT_OPTIMIZED) { SELECT_LEX_UNIT *unit= select_lex->master_unit(); @@ -5321,7 +5341,8 @@ int subselect_hash_sj_engine::exec() */ thd->lex->current_select= materialize_engine->select_lex; /* The subquery should be optimized, and materialized only once. */ - DBUG_ASSERT(materialize_join->optimized && !is_materialized); + DBUG_ASSERT(materialize_join->optimization_state == JOIN::OPTIMIZATION_DONE && + !is_materialized); materialize_join->exec(); if ((res= MY_TEST(materialize_join->error || thd->is_fatal_error || thd->is_error()))) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 387e0403f96..96ac6f43c45 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -694,7 +694,7 @@ JOIN::prepare(Item ***rref_pointer_array, DBUG_ENTER("JOIN::prepare"); // to prevent double initialization on EXPLAIN - if (optimized) + if (optimization_state != JOIN::NOT_OPTIMIZED) DBUG_RETURN(0); conds= conds_init; @@ -1032,24 +1032,13 @@ err: int JOIN::optimize() { - bool was_optimized= optimized; + // to prevent double initialization on EXPLAIN + if (optimization_state != JOIN::NOT_OPTIMIZED) + return FALSE; + optimization_state= JOIN::OPTIMIZATION_IN_PROGRESS; + int res= optimize_inner(); - /* - If we're inside a non-correlated subquery, this function may be - called for the second time after the subquery has been executed - and deleted. The second call will not produce a valid query plan, it will - short-circuit because optimized==TRUE. - - "was_optimized != optimized" is here to handle this case: - - first optimization starts, gets an error (from a const. cheap - subquery), returns 1 - - another JOIN::optimize() call made, and now join->optimize() will - return 0, even though we never had a query plan. - - Can have QEP_NOT_PRESENT_YET for degenerate queries (for example, - SELECT * FROM tbl LIMIT 0) - */ - if (was_optimized != optimized && !res && have_query_plan != QEP_DELETED) + if (!res && have_query_plan != QEP_DELETED) { create_explain_query_if_not_exists(thd->lex, thd->mem_root); have_query_plan= QEP_AVAILABLE; @@ -1058,6 +1047,7 @@ int JOIN::optimize() !skip_sort_order && !no_order && (order || group_list), select_distinct); } + optimization_state= JOIN::OPTIMIZATION_DONE; return res; } @@ -1083,10 +1073,6 @@ JOIN::optimize_inner() DBUG_ENTER("JOIN::optimize"); do_send_rows = (unit->select_limit_cnt) ? 1 : 0; - // to prevent double initialization on EXPLAIN - if (optimized) - DBUG_RETURN(0); - optimized= 1; DEBUG_SYNC(thd, "before_join_optimize"); THD_STAGE_INFO(thd, stage_optimizing); @@ -2060,7 +2046,7 @@ int JOIN::init_execution() { DBUG_ENTER("JOIN::init_execution"); - DBUG_ASSERT(optimized); + DBUG_ASSERT(optimization_state == JOIN::OPTIMIZATION_DONE); DBUG_ASSERT(!(select_options & SELECT_DESCRIBE)); initialized= true; diff --git a/sql/sql_select.h b/sql/sql_select.h index 89ee63e87b0..dfa96f1c81c 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1290,7 +1290,8 @@ public: enum join_optimization_state { NOT_OPTIMIZED=0, OPTIMIZATION_IN_PROGRESS=1, OPTIMIZATION_DONE=2}; - bool optimized; ///< flag to avoid double optimization in EXPLAIN + // state of JOIN optimization + enum join_optimization_state optimization_state; bool initialized; ///< flag to avoid double init_execution calls Explain_select *explain; @@ -1378,7 +1379,7 @@ public: ref_pointer_array= items0= items1= items2= items3= 0; ref_pointer_array_size= 0; zero_result_cause= 0; - optimized= 0; + optimization_state= JOIN::NOT_OPTIMIZED; have_query_plan= QEP_NOT_PRESENT_YET; initialized= 0; cleaned= 0; -- cgit v1.2.1 From 2f5ae0fbe76d749518b8f943b61c198e4b9315aa Mon Sep 17 00:00:00 2001 From: Rik Prohaska Date: Sun, 24 Jul 2016 10:06:18 -0400 Subject: MDEV-10412 fix WITH_ASAN option for 10.1 --- CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index ab981bc330f..cc696e36682 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -187,12 +187,16 @@ IF (WITH_ASAN) # gcc 4.8.1 and new versions of clang MY_CHECK_AND_SET_COMPILER_FLAG("-fsanitize=address -O1 -Wno-error -fPIC" DEBUG RELWITHDEBINFO) + SET(HAVE_C_FSANITIZE ${HAVE_C__fsanitize_address__O1__Wno_error__fPIC}) + SET(HAVE_CXX_FSANITIZE ${HAVE_CXX__fsanitize_address__O1__Wno_error__fPIC}) IF(HAVE_C_FSANITIZE AND HAVE_CXX_FSANITIZE) SET(WITH_ASAN_OK 1) ELSE() # older versions of clang MY_CHECK_AND_SET_COMPILER_FLAG("-faddress-sanitizer -O1 -fPIC" DEBUG RELWITHDEBINFO) + SET(HAVE_C_FADDRESS ${HAVE_C__faddress_sanitizer__O1__fPIC}) + SET(HAVE_CXX_FADDRESS ${HAVE_CXX__faddress_sanitizer__O1__fPIC}) IF(HAVE_C_FADDRESS AND HAVE_CXX_FADDRESS) SET(WITH_ASAN_OK 1) ENDIF() -- cgit v1.2.1 From df4fddb8961355fc68f32375d2bac5735057f205 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Mon, 25 Jul 2016 01:57:00 +0300 Subject: MDEV-10428 main.information_schema_stats fails sporadically in buildbot Resultsets which contain more than one row need to be sorted --- mysql-test/r/information_schema_stats.result | 4 ++-- mysql-test/t/information_schema_stats.test | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/information_schema_stats.result b/mysql-test/r/information_schema_stats.result index cd73636879c..bc2ce9f0294 100644 --- a/mysql-test/r/information_schema_stats.result +++ b/mysql-test/r/information_schema_stats.result @@ -16,8 +16,8 @@ count(*) 2 select * from information_schema.index_statistics where table_schema='test' and table_name='just_a_test'; TABLE_SCHEMA TABLE_NAME INDEX_NAME ROWS_READ -test just_a_test IND_just_a_test_state 2 test just_a_test IND_just_a_test_first_name_last_name 1 +test just_a_test IND_just_a_test_state 2 select * from information_schema.table_statistics where table_schema='test' and table_name='just_a_test'; TABLE_SCHEMA TABLE_NAME ROWS_READ ROWS_CHANGED ROWS_CHANGED_X_INDEXES test just_a_test 18 5 5 @@ -56,9 +56,9 @@ count(*) 3 select * from information_schema.index_statistics where table_schema='test' and table_name='just_a_test'; TABLE_SCHEMA TABLE_NAME INDEX_NAME ROWS_READ +test just_a_test PRIMARY 5 test just_a_test first_name 1 test just_a_test state 2 -test just_a_test PRIMARY 5 select * from information_schema.table_statistics where table_schema='test' and table_name='just_a_test'; TABLE_SCHEMA TABLE_NAME ROWS_READ ROWS_CHANGED ROWS_CHANGED_X_INDEXES test just_a_test 8 5 15 diff --git a/mysql-test/t/information_schema_stats.test b/mysql-test/t/information_schema_stats.test index 38248063d68..c7f39894ce7 100644 --- a/mysql-test/t/information_schema_stats.test +++ b/mysql-test/t/information_schema_stats.test @@ -13,6 +13,7 @@ alter table just_a_test add key IND_just_a_test_first_name_last_name(first_name, alter table just_a_test add key IND_just_a_test_state(state); select count(*) from just_a_test where first_name='fc' and last_name='lc'; select count(*) from just_a_test where state = 'California'; +--sorted_result select * from information_schema.index_statistics where table_schema='test' and table_name='just_a_test'; select * from information_schema.table_statistics where table_schema='test' and table_name='just_a_test'; alter table just_a_test drop key IND_just_a_test_first_name_last_name; @@ -36,6 +37,7 @@ insert into just_a_test values(1,'fa','la','china_a',11111111,'fa_la@163.com','C select count(*) from just_a_test where first_name='fc' and last_name='lc'; select count(*) from just_a_test where state = 'California'; select count(*) from just_a_test where id between 2 and 4; +--sorted_result select * from information_schema.index_statistics where table_schema='test' and table_name='just_a_test'; select * from information_schema.table_statistics where table_schema='test' and table_name='just_a_test'; drop table just_a_test; -- cgit v1.2.1 From 5197fcf6b4611a26b3847d1101f1a4fb6d17570a Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 5 May 2016 13:20:32 +0200 Subject: MW-269 Fix outstanding issues with wsrep_max_ws_rows This patch includes two fixes: 1) Rollback when wsrep_max_ws_rows is exceeded would not switch back to previous autocommit mode; and 2) Internal rows counter would not be reset on implicit commits. --- mysql-test/suite/galera/r/galera_defaults.result | 2 +- .../suite/galera/r/galera_var_max_ws_rows.result | 22 +++++++++++++ .../suite/galera/t/galera_var_max_ws_rows.test | 37 ++++++++++++++++++++++ sql/handler.cc | 6 ++-- sql/wsrep_hton.cc | 1 + 5 files changed, 64 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result index e15301c019e..6442cfebcb0 100644 --- a/mysql-test/suite/galera/r/galera_defaults.result +++ b/mysql-test/suite/galera/r/galera_defaults.result @@ -29,7 +29,7 @@ WSREP_DRUPAL_282555_WORKAROUND OFF WSREP_FORCED_BINLOG_FORMAT NONE WSREP_LOAD_DATA_SPLITTING ON WSREP_LOG_CONFLICTS OFF -WSREP_MAX_WS_ROWS 131072 +WSREP_MAX_WS_ROWS 0 WSREP_MAX_WS_SIZE 1073741824 WSREP_MYSQL_REPLICATION_BUNDLE 0 WSREP_NOTIFY_CMD diff --git a/mysql-test/suite/galera/r/galera_var_max_ws_rows.result b/mysql-test/suite/galera/r/galera_var_max_ws_rows.result index e41f0f96c95..6e239c70a3e 100644 --- a/mysql-test/suite/galera/r/galera_var_max_ws_rows.result +++ b/mysql-test/suite/galera/r/galera_var_max_ws_rows.result @@ -89,5 +89,27 @@ DELETE FROM t1 WHERE f2 = 2; SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +SET AUTOCOMMIT = ON; +SET GLOBAL wsrep_max_ws_rows = 1; +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +ERROR HY000: wsrep_max_ws_rows exceeded +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +SET AUTOCOMMIT = OFF; +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +ERROR HY000: wsrep_max_ws_rows exceeded +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +ERROR HY000: wsrep_max_ws_rows exceeded +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); +ERROR HY000: wsrep_max_ws_rows exceeded DROP TABLE t1; DROP TABLE ten; diff --git a/mysql-test/suite/galera/t/galera_var_max_ws_rows.test b/mysql-test/suite/galera/t/galera_var_max_ws_rows.test index d086142d1e1..944238bf1aa 100644 --- a/mysql-test/suite/galera/t/galera_var_max_ws_rows.test +++ b/mysql-test/suite/galera/t/galera_var_max_ws_rows.test @@ -110,6 +110,43 @@ DELETE FROM t1 WHERE f2 = 2; SELECT COUNT(*) = 0 FROM t1; + +# Test that wsrep_max_ws_rows is reset when switching autocommit mode + +SET AUTOCOMMIT = ON; +SET GLOBAL wsrep_max_ws_rows = 1; + +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) VALUES (2); + +INSERT INTO t1 (f2) VALUES (1); +INSERT INTO t1 (f2) VALUES (2); + + +SET AUTOCOMMIT = OFF; +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) VALUES (2); + +INSERT INTO t1 (f2) VALUES (1); +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) VALUES (2); + + +# Test that wsrep_max_ws_rows is reset on implicit commits + +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); + +START TRANSACTION; +INSERT INTO t1 (f2) VALUES (1); +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 (f2) VALUES (2); + + --disable_query_log --eval SET GLOBAL wsrep_max_ws_rows = $wsrep_max_ws_rows_orig --enable_query_log diff --git a/sql/handler.cc b/sql/handler.cc index 6fa937faa84..0eef23ad753 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -6104,7 +6104,7 @@ int handler::ha_write_row(uchar *buf) current_thd->wsrep_exec_mode != REPL_RECV && current_thd->wsrep_affected_rows > wsrep_max_ws_rows) { - current_thd->transaction_rollback_request= TRUE; + trans_rollback_stmt(current_thd) || trans_rollback(current_thd); my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); DBUG_RETURN(ER_ERROR_DURING_COMMIT); } @@ -6148,7 +6148,7 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data) current_thd->wsrep_exec_mode != REPL_RECV && current_thd->wsrep_affected_rows > wsrep_max_ws_rows) { - current_thd->transaction_rollback_request= TRUE; + trans_rollback_stmt(current_thd) || trans_rollback(current_thd); my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); return ER_ERROR_DURING_COMMIT; } @@ -6186,7 +6186,7 @@ int handler::ha_delete_row(const uchar *buf) current_thd->wsrep_exec_mode != REPL_RECV && current_thd->wsrep_affected_rows > wsrep_max_ws_rows) { - current_thd->transaction_rollback_request= TRUE; + trans_rollback_stmt(current_thd) || trans_rollback(current_thd); my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); return ER_ERROR_DURING_COMMIT; } diff --git a/sql/wsrep_hton.cc b/sql/wsrep_hton.cc index e5ff462eb19..e1bf63cd31f 100644 --- a/sql/wsrep_hton.cc +++ b/sql/wsrep_hton.cc @@ -42,6 +42,7 @@ void wsrep_cleanup_transaction(THD *thd) thd->wsrep_trx_meta.gtid= WSREP_GTID_UNDEFINED; thd->wsrep_trx_meta.depends_on= WSREP_SEQNO_UNDEFINED; thd->wsrep_exec_mode= LOCAL_STATE; + thd->wsrep_affected_rows= 0; return; } -- cgit v1.2.1 From 74f80b349924c7f0c091a0973dea0ec61191c2c9 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Fri, 6 May 2016 16:07:53 +0200 Subject: MW-267 Enforce wsrep_max_ws_size limit in wsrep provider This changes variable wsrep_max_ws_size so that its value is linked to the value of provider option repl.max_ws_size. That is, changing the value of variable wsrep_max_ws_size will change the value of provider option repl.max_ws_size, and viceversa. The writeset size limit is always enforced in the provider, regardless of which option is used. --- mysql-test/suite/galera/r/galera_defaults.result | 2 +- .../suite/galera/r/galera_var_max_ws_size.result | 9 ++++ .../suite/galera/t/galera_var_max_ws_size.test | 23 +++++++++++ sql/sys_vars.cc | 5 ++- sql/wsrep_applier.cc | 10 +---- sql/wsrep_binlog.h | 2 +- sql/wsrep_var.cc | 48 ++++++++++++++++++++-- sql/wsrep_var.h | 2 + 8 files changed, 86 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result index 6442cfebcb0..3b89ccb7dbe 100644 --- a/mysql-test/suite/galera/r/galera_defaults.result +++ b/mysql-test/suite/galera/r/galera_defaults.result @@ -30,7 +30,7 @@ WSREP_FORCED_BINLOG_FORMAT NONE WSREP_LOAD_DATA_SPLITTING ON WSREP_LOG_CONFLICTS OFF WSREP_MAX_WS_ROWS 0 -WSREP_MAX_WS_SIZE 1073741824 +WSREP_MAX_WS_SIZE 2147483647 WSREP_MYSQL_REPLICATION_BUNDLE 0 WSREP_NOTIFY_CMD WSREP_ON ON diff --git a/mysql-test/suite/galera/r/galera_var_max_ws_size.result b/mysql-test/suite/galera/r/galera_var_max_ws_size.result index 5a1b5cf621a..0940b5f12c0 100644 --- a/mysql-test/suite/galera/r/galera_var_max_ws_size.result +++ b/mysql-test/suite/galera/r/galera_var_max_ws_size.result @@ -5,4 +5,13 @@ ERROR HY000: Got error 5 "Input/output error" during COMMIT SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 +SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=10000'; +SELECT @@wsrep_max_ws_size = 10000; +@@wsrep_max_ws_size = 10000 +1 +SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=20000'; +SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=10000'; +SET GLOBAL wsrep_max_ws_size = 20000; +provider_options_match +1 DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_var_max_ws_size.test b/mysql-test/suite/galera/t/galera_var_max_ws_size.test index b66ef2d5ee2..8eb93bda9be 100644 --- a/mysql-test/suite/galera/t/galera_var_max_ws_size.test +++ b/mysql-test/suite/galera/t/galera_var_max_ws_size.test @@ -16,6 +16,29 @@ SET GLOBAL wsrep_max_ws_size = 1024; INSERT INTO t1 VALUES (DEFAULT, REPEAT('X', 1024)); SELECT COUNT(*) = 0 FROM t1; +# +# Changing repl.max_ws_size also changes wsrep_max_ws_size +# + +SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=10000'; +SELECT @@wsrep_max_ws_size = 10000; + + +# +# Changing wsrep_max_ws_size is equivalent to changing repl.max_ws_size +# + +SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=20000'; +--let $provider_options = `SELECT @@wsrep_provider_options` +SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=10000'; + +SET GLOBAL wsrep_max_ws_size = 20000; +--let $provider_options_updated = `SELECT @@wsrep_provider_options` + +--disable_query_log +--eval SELECT STRCMP('$provider_options', '$provider_options_updated') = 0 AS provider_options_match +--enable_query_log + --disable_query_log --eval SET GLOBAL wsrep_max_ws_size = $wsrep_max_ws_size_orig --enable_query_log diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 0bfa0bf5eb0..0ac8d40fbae 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -4688,8 +4688,9 @@ static Sys_var_charptr Sys_wsrep_start_position ( static Sys_var_ulong Sys_wsrep_max_ws_size ( "wsrep_max_ws_size", "Max write set size (bytes)", GLOBAL_VAR(wsrep_max_ws_size), CMD_LINE(REQUIRED_ARG), - /* Upper limit is 65K short of 4G to avoid overlows on 32-bit systems */ - VALID_RANGE(1024, WSREP_MAX_WS_SIZE), DEFAULT(1073741824UL), BLOCK_SIZE(1)); + VALID_RANGE(1024, WSREP_MAX_WS_SIZE), DEFAULT(WSREP_MAX_WS_SIZE), + BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), + ON_UPDATE(wsrep_max_ws_size_update)); static Sys_var_ulong Sys_wsrep_max_ws_rows ( "wsrep_max_ws_rows", "Max number of rows in write set", diff --git a/sql/wsrep_applier.cc b/sql/wsrep_applier.cc index 90c84f1c2cc..73a43185162 100644 --- a/sql/wsrep_applier.cc +++ b/sql/wsrep_applier.cc @@ -39,15 +39,9 @@ static Log_event* wsrep_read_log_event( const char *error= 0; Log_event *res= 0; - if (data_len > wsrep_max_ws_size) - { - error = "Event too big"; - goto err; - } - - res= Log_event::read_log_event(buf, data_len, &error, description_event, true); + res= Log_event::read_log_event(buf, data_len, &error, description_event, + true); -err: if (!res) { DBUG_ASSERT(error != 0); diff --git a/sql/wsrep_binlog.h b/sql/wsrep_binlog.h index a7b680f616b..c29d51caf2c 100644 --- a/sql/wsrep_binlog.h +++ b/sql/wsrep_binlog.h @@ -19,7 +19,7 @@ #include "sql_class.h" // THD, IO_CACHE #define HEAP_PAGE_SIZE 65536 /* 64K */ -#define WSREP_MAX_WS_SIZE (0xFFFFFFFFUL - HEAP_PAGE_SIZE) +#define WSREP_MAX_WS_SIZE 2147483647 /* 2GB */ /* Write the contents of a cache to a memory buffer. diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 7ac68df66bd..44d17e3e78a 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -179,6 +179,32 @@ void wsrep_start_position_init (const char* val) wsrep_set_local_position (val, false); } +static int get_provider_option_value(const char* opts, + const char* opt_name, + ulong* opt_value) +{ + int ret= 1; + ulong opt_value_tmp; + char *opt_value_str, *s, *opts_copy= my_strdup(opts, MYF(MY_WME)); + + if ((opt_value_str= strstr(opts_copy, opt_name)) == NULL) + goto end; + opt_value_str= strtok_r(opt_value_str, "=", &s); + if (opt_value_str == NULL) goto end; + opt_value_str= strtok_r(NULL, ";", &s); + if (opt_value_str == NULL) goto end; + + opt_value_tmp= strtoul(opt_value_str, NULL, 10); + if (errno == ERANGE) goto end; + + *opt_value= opt_value_tmp; + ret= 0; + +end: + my_free(opts_copy); + return ret; +} + static bool refresh_provider_options() { WSREP_DEBUG("refresh_provider_options: %s", @@ -186,9 +212,10 @@ static bool refresh_provider_options() char* opts= wsrep->options_get(wsrep); if (opts) { - if (wsrep_provider_options) my_free((void *)wsrep_provider_options); - wsrep_provider_options = (char*)my_memdup(opts, strlen(opts) + 1, - MYF(MY_WME)); + wsrep_provider_options_init(opts); + get_provider_option_value(wsrep_provider_options, + (char*)"repl.max_ws_size", + &wsrep_max_ws_size); } else { @@ -531,6 +558,21 @@ bool wsrep_desync_update (sys_var *self, THD* thd, enum_var_type type) return false; } +bool wsrep_max_ws_size_update (sys_var *self, THD *thd, enum_var_type) +{ + char max_ws_size_opt[128]; + my_snprintf(max_ws_size_opt, sizeof(max_ws_size_opt), + "repl.max_ws_size=%d", wsrep_max_ws_size); + wsrep_status_t ret= wsrep->options_set(wsrep, max_ws_size_opt); + if (ret != WSREP_OK) + { + WSREP_ERROR("Set options returned %d", ret); + refresh_provider_options(); + return true; + } + return refresh_provider_options(); +} + /* * Status variables stuff below */ diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h index 524dabfd9c0..f72df9d098a 100644 --- a/sql/wsrep_var.h +++ b/sql/wsrep_var.h @@ -83,4 +83,6 @@ extern bool wsrep_slave_threads_update UPDATE_ARGS; extern bool wsrep_desync_check CHECK_ARGS; extern bool wsrep_desync_update UPDATE_ARGS; +extern bool wsrep_max_ws_size_update UPDATE_ARGS; + #endif /* WSREP_VAR_H */ -- cgit v1.2.1 From cbc8a84fa2e65cad7561fa53799ca0273e8a5ff5 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 25 Jul 2016 11:51:21 -0400 Subject: MW-267 Enforce wsrep_max_ws_size limit in wsrep provider Update test results. --- mysql-test/r/mysqld--help.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index ef07db5ac52..8ff9073b625 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -1458,8 +1458,8 @@ wsrep-drupal-282555-workaround FALSE wsrep-forced-binlog-format NONE wsrep-load-data-splitting TRUE wsrep-log-conflicts FALSE -wsrep-max-ws-rows 131072 -wsrep-max-ws-size 1073741824 +wsrep-max-ws-rows 0 +wsrep-max-ws-size 2147483647 wsrep-mysql-replication-bundle 0 wsrep-new-cluster FALSE wsrep-node-address -- cgit v1.2.1 From 7431368eafb3e531ba3926e85cc515778f6b5e2f Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 14 Jul 2016 14:29:59 +0200 Subject: MW-292 Reset timestamp after transaction replay Transaction replay causes the THD to re-apply the replication events from execution, using the same path appliers do. While applying the log events, the THD's timestamp is set to the timestamp of the event. Setting the timestamp explicitly causes function NOW() to always the timestamp that was set. To avoid this behavior we reset the timestamp after replaying is done. --- sql/sql_class.h | 1 + sql/wsrep_thd.cc | 2 ++ 2 files changed, 3 insertions(+) diff --git a/sql/sql_class.h b/sql/sql_class.h index 9ee5a40dc99..bf3d043cc1a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -68,6 +68,7 @@ struct wsrep_thd_shadow { ulong tx_isolation; char *db; size_t db_length; + my_hrtime_t user_time; }; #endif class Reprepare_observer; diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 464a68a8221..9c2fa4ba856 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -160,6 +160,7 @@ static void wsrep_prepare_bf_thd(THD *thd, struct wsrep_thd_shadow* shadow) shadow->db = thd->db; shadow->db_length = thd->db_length; + shadow->user_time = thd->user_time; thd->reset_db(NULL, 0); } @@ -170,6 +171,7 @@ static void wsrep_return_from_bf_mode(THD *thd, struct wsrep_thd_shadow* shadow) thd->wsrep_exec_mode = shadow->wsrep_exec_mode; thd->net.vio = shadow->vio; thd->variables.tx_isolation = shadow->tx_isolation; + thd->user_time = shadow->user_time; thd->reset_db(shadow->db, shadow->db_length); delete thd->system_thread_info.rpl_sql_info; -- cgit v1.2.1 From e57287866fd33b4494839c21ccd7875480c8558d Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Fri, 15 Jul 2016 01:13:32 -0700 Subject: Galera MTR Tests: Test case for MW-292 : NOW() returns stale timestamp after transaction replay --- mysql-test/suite/galera/r/MW-292.result | 30 +++++++++++++ mysql-test/suite/galera/t/MW-292.test | 79 +++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 mysql-test/suite/galera/r/MW-292.result create mode 100644 mysql-test/suite/galera/t/MW-292.test diff --git a/mysql-test/suite/galera/r/MW-292.result b/mysql-test/suite/galera/r/MW-292.result new file mode 100644 index 00000000000..f038f880efa --- /dev/null +++ b/mysql-test/suite/galera/r/MW-292.result @@ -0,0 +1,30 @@ +CREATE TABLE rand_table (f1 FLOAT); +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); +INSERT INTO t1 VALUES (1, 'a'); +INSERT INTO t1 VALUES (2, 'a'); +SET AUTOCOMMIT=ON; +START TRANSACTION; +UPDATE t1 SET f2 = 'b' WHERE f1 = 1; +SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE; +f1 f2 +2 a +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +COMMIT;; +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; +UPDATE t1 SET f2 = 'c' WHERE f1 = 2; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; +SELECT TIMEDIFF(SYSDATE(), NOW()) < 2; +TIMEDIFF(SYSDATE(), NOW()) < 2 +1 +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); +SELECT COUNT(DISTINCT f1) = 10 FROM rand_table; +COUNT(DISTINCT f1) = 10 +1 +wsrep_local_replays +1 +DROP TABLE t1; +DROP TABLE rand_table; diff --git a/mysql-test/suite/galera/t/MW-292.test b/mysql-test/suite/galera/t/MW-292.test new file mode 100644 index 00000000000..945d9f42458 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-292.test @@ -0,0 +1,79 @@ +# +# MW-292 Reset timestamp after transaction replay +# +# We force transaction replay to happen and then we check that NOW() is not stuck in time. +# As a bonus we also check that RAND() continues to return random values after replay +# +# + +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source suite/galera/include/galera_have_debug_sync.inc + +--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` + +CREATE TABLE rand_table (f1 FLOAT); +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); +INSERT INTO t1 VALUES (1, 'a'); +INSERT INTO t1 VALUES (2, 'a'); + +--connection node_1 +SET AUTOCOMMIT=ON; +START TRANSACTION; + +UPDATE t1 SET f2 = 'b' WHERE f1 = 1; +SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE; + +# Block the commit +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_set_sync_point.inc + +--connection node_1 +--send COMMIT; + +# Wait until commit is blocked +--connection node_1a +SET SESSION wsrep_sync_wait = 0; +--source include/galera_wait_sync_point.inc + +# Issue a conflicting update on node #2 +--connection node_2 +UPDATE t1 SET f2 = 'c' WHERE f1 = 2; + +# Wait for both transactions to be blocked +--connection node_1a +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'System lock'; +--source include/wait_condition.inc + +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT'; +--source include/wait_condition.inc + +# Unblock the commit +--connection node_1a +--source include/galera_clear_sync_point.inc +--source include/galera_signal_sync_point.inc + +# Commit succeeds via replay +--connection node_1 +--reap + +# Confirm that NOW() is not stuck in time relative to SYSDATE(); +--sleep 3 +SELECT TIMEDIFF(SYSDATE(), NOW()) < 2; + +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); + +SELECT COUNT(DISTINCT f1) = 10 FROM rand_table; + +# wsrep_local_replays has increased by 1 +--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` +--disable_query_log +--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays; +--enable_query_log + +--connection node_2 +DROP TABLE t1; +DROP TABLE rand_table; -- cgit v1.2.1 From 963673e7af5ecdfd31279ed733bcdc964b9d0619 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 25 Jul 2016 21:52:02 -0400 Subject: MW-292: Fix test case --- mysql-test/suite/galera/t/MW-292.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/galera/t/MW-292.test b/mysql-test/suite/galera/t/MW-292.test index 945d9f42458..ecb1273759e 100644 --- a/mysql-test/suite/galera/t/MW-292.test +++ b/mysql-test/suite/galera/t/MW-292.test @@ -44,7 +44,7 @@ UPDATE t1 SET f2 = 'c' WHERE f1 = 2; # Wait for both transactions to be blocked --connection node_1a ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'System lock'; +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event::find_row%'; --source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT'; -- cgit v1.2.1 From 9f211d49562da522c8b1dec35ff871fa20f5d89f Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 19 Jul 2016 20:44:02 +0000 Subject: MDEV-10314 : wsrep_client_thread was not set in threadpool. Fixed threadpool_add_connection to use thd_prepare_connection() to match thread-per-conection flow. --- sql/threadpool_common.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index 5bcea767aae..ae8a81b1bcd 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -148,9 +148,8 @@ int threadpool_add_connection(THD *thd) if (!setup_connection_thread_globals(thd)) { - if (!login_connection(thd)) + if (!thd_prepare_connection(thd)) { - prepare_new_connection_state(thd); /* Check if THD is ok, as prepare_new_connection_state() -- cgit v1.2.1 From 15ef38d2ea97575c71b83db6669ee20000c23a6b Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 27 Jul 2016 00:38:51 +0300 Subject: MDEV-10228: Delete missing rows with OR conditions Fix get_quick_keys(): When building range tree from a condition in form keypart1=const AND (keypart2 < 0 OR keypart2>=0) the SEL_ARG for keypart2 represents an interval (-inf, +inf). However, the logic that sets UNIQUE_RANGE flag fails to recognize this, and sets UNIQUE_RANGE flag if (keypart1, keypart2) covered a unique key. As a result, range access executor assumes the interval can have at most one row and only reads the first row from it. --- mysql-test/r/range.result | 31 +++++++++++++++++++++++++++++++ mysql-test/t/range.test | 29 +++++++++++++++++++++++++++++ sql/opt_range.cc | 2 ++ 3 files changed, 62 insertions(+) diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index a19d906b645..630a692cef6 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -2113,3 +2113,34 @@ a b 0 0 1 1 drop table t2; +# +# MDEV-10228: Delete missing rows with OR conditions +# (The example uses UPDATE, because UPDATE allows to use index hints +# and so it's possible to make an example that works with any storage +# engine) +# +CREATE TABLE t1 ( +key1varchar varchar(14) NOT NULL, +key2int int(11) NOT NULL DEFAULT '0', +col1 int, +PRIMARY KEY (key1varchar,key2int), +KEY key1varchar (key1varchar), +KEY key2int (key2int) +) DEFAULT CHARSET=utf8; +insert into t1 values +('value1',0, 0), +('value1',1, 0), +('value1',1000685, 0), +('value1',1003560, 0), +('value1',1004807, 0); +update t1 force index (PRIMARY) set col1=12345 +where (key1varchar='value1' AND (key2int <=1 OR key2int > 1)); +# The following must show col1=12345 for all rows: +select * from t1; +key1varchar key2int col1 +value1 0 12345 +value1 1 12345 +value1 1000685 12345 +value1 1003560 12345 +value1 1004807 12345 +drop table t1; diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index b73b09dffd5..393ca68e945 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -1689,3 +1689,32 @@ insert into t2 values (0, 0, 0, 0), (1, 1, 1, 1); analyze table t2; select a, b from t2 where (a, b) in ((0, 0), (1, 1)); drop table t2; + +--echo # +--echo # MDEV-10228: Delete missing rows with OR conditions +--echo # (The example uses UPDATE, because UPDATE allows to use index hints +--echo # and so it's possible to make an example that works with any storage +--echo # engine) +--echo # + +CREATE TABLE t1 ( + key1varchar varchar(14) NOT NULL, + key2int int(11) NOT NULL DEFAULT '0', + col1 int, + PRIMARY KEY (key1varchar,key2int), + KEY key1varchar (key1varchar), + KEY key2int (key2int) +) DEFAULT CHARSET=utf8; + +insert into t1 values + ('value1',0, 0), + ('value1',1, 0), + ('value1',1000685, 0), + ('value1',1003560, 0), + ('value1',1004807, 0); + +update t1 force index (PRIMARY) set col1=12345 +where (key1varchar='value1' AND (key2int <=1 OR key2int > 1)); +--echo # The following must show col1=12345 for all rows: +select * from t1; +drop table t1; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index f4ac47fee96..a40363ff9ab 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -10409,8 +10409,10 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, KEY *table_key=quick->head->key_info+quick->index; flag=EQ_RANGE; if ((table_key->flags & HA_NOSAME) && + min_part == key_tree->part && key_tree->part == table_key->key_parts-1) { + DBUG_ASSERT(min_part == max_part); if ((table_key->flags & HA_NULL_PART_KEY) && null_part_in_key(key, param->min_key, -- cgit v1.2.1 From f982d1074a3bc880462ab2372803b278af8dc4dd Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Tue, 26 Jul 2016 22:58:33 -0700 Subject: Fixed the following problem: Temporary tables created for recursive CTE were instantiated at the prepare phase. As a result these temporary tables missed indexes for look-ups and optimizer could not use them. --- mysql-test/r/cte_recursive.result | 85 +++++++++++++++++++++++++++++++-------- mysql-test/t/cte_recursive.test | 40 ++++++++++++++++++ sql/sql_cte.cc | 18 +++++++++ sql/sql_cte.h | 34 ++++++++-------- sql/sql_derived.cc | 9 +++-- sql/sql_lex.cc | 14 +++++++ sql/sql_lex.h | 2 + sql/sql_select.cc | 5 --- sql/sql_select.h | 4 ++ sql/sql_union.cc | 14 +++++-- sql/table.cc | 14 +------ 11 files changed, 184 insertions(+), 55 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index 22faade0b9f..be6617b6a1e 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -113,7 +113,7 @@ select t2.a from t1,t2 where t1.a+1=t2.a ) select * from t1; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY ALL NULL NULL NULL NULL 30 +1 PRIMARY ALL NULL NULL NULL NULL 5 2 SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where 3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 5 3 UNCACHEABLE UNION t2 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join) @@ -595,18 +595,18 @@ select h.name, h.dob, w.name, w.dob from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w where c.h_id = h.id and c.w_id= w.id; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY ALL NULL NULL NULL NULL 36 100.00 -1 PRIMARY ALL NULL NULL NULL NULL 468 100.00 Using where; Using join buffer (flat, BNL join) -1 PRIMARY ALL NULL NULL NULL NULL 468 100.00 Using where; Using join buffer (incremental, BNL join) +1 PRIMARY ALL NULL NULL NULL NULL 2 100.00 Using where +1 PRIMARY ref key0 key0 5 c.h_id 2 100.00 +1 PRIMARY ref key0 key0 5 c.w_id 2 100.00 3 SUBQUERY folks ALL NULL NULL NULL NULL 12 100.00 Using where -4 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 -4 UNCACHEABLE UNION ALL NULL NULL NULL NULL 36 100.00 Using where; Using join buffer (flat, BNL join) +4 UNCACHEABLE UNION ALL NULL NULL NULL NULL 2 100.00 +4 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) 5 UNCACHEABLE UNION ALL NULL NULL NULL NULL 2 100.00 5 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL -2 UNCACHEABLE SUBQUERY ALL NULL NULL NULL NULL 36 100.00 Using where +2 UNCACHEABLE SUBQUERY ALL NULL NULL NULL NULL 12 100.00 Using where Warnings: -Note 1003 with recursive ancestor_couple_ids as (select `a`.`father` AS `h_id`,`a`.`mother` AS `w_id` from `coupled_ancestors` `a` where ((`a`.`father` is not null) and (`a`.`mother` is not null)))coupled_ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where (`test`.`folks`.`name` = 'Me') union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `fa` where (`fa`.`h_id` = `test`.`p`.`id`) union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `ma` where (`test`.`p`.`id` = `ma`.`w_id`)), select `h`.`name` AS `name`,`h`.`dob` AS `dob`,`w`.`name` AS `name`,`w`.`dob` AS `dob` from `ancestor_couple_ids` `c` join `coupled_ancestors` `h` join `coupled_ancestors` `w` where ((`h`.`id` = `c`.`h_id`) and (`w`.`id` = `c`.`w_id`)) +Note 1003 with recursive ancestor_couple_ids as (select `a`.`father` AS `h_id`,`a`.`mother` AS `w_id` from `coupled_ancestors` `a` where ((`a`.`father` is not null) and (`a`.`mother` is not null)))coupled_ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where (`test`.`folks`.`name` = 'Me') union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `fa` where (`test`.`p`.`id` = `fa`.`h_id`) union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `ma` where (`test`.`p`.`id` = `ma`.`w_id`)), select `h`.`name` AS `name`,`h`.`dob` AS `dob`,`w`.`name` AS `name`,`w`.`dob` AS `dob` from `ancestor_couple_ids` `c` join `coupled_ancestors` `h` join `coupled_ancestors` `w` where ((`h`.`id` = `c`.`h_id`) and (`w`.`id` = `c`.`w_id`)) with recursive ancestor_couple_ids(h_id, w_id) as @@ -779,7 +779,7 @@ where p.id = a.father or p.id = a.mother ) select * from ancestors; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY ALL NULL NULL NULL NULL 156 100.00 +1 PRIMARY ALL NULL NULL NULL NULL 12 100.00 2 SUBQUERY folks ALL NULL NULL NULL NULL 12 100.00 Using where 3 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) @@ -809,9 +809,9 @@ id name dob father mother 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL -8 Grandpa Ben 1940-10-21 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL with recursive ancestors @@ -896,9 +896,9 @@ generation name 1 Dad 1 Mom 2 Grandpa Bill -2 Grandpa Ben 2 Grandma Ann 2 Grandma Sally +2 Grandpa Ben 3 Grandgrandma Martha set standards_compliant_cte=1; with recursive @@ -951,9 +951,9 @@ id name dob father mother 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL -8 Grandpa Ben 1940-10-21 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL with recursive ancestor_ids (id) as @@ -998,10 +998,10 @@ id name dob father mother 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL -8 Grandpa Ben 1940-10-21 NULL NULL -25 Uncle Jim 1968-11-18 8 7 9 Grandma Ann 1941-10-15 NULL NULL +25 Uncle Jim 1968-11-18 8 7 7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL 27 Auntie Melinda 1971-03-29 NULL NULL with recursive @@ -1029,9 +1029,9 @@ generation name 1 Dad 1 Mom 2 Grandpa Bill -2 Grandpa Ben 2 Grandma Ann 2 Grandma Sally +2 Grandpa Ben 3 Grandgrandma Martha with recursive ancestor_ids (id, generation) @@ -1112,7 +1112,60 @@ generation name 1 Dad 1 Mom 2 Grandpa Bill -2 Grandpa Ben 2 Grandma Ann 2 Grandma Sally +2 Grandpa Ben +alter table folks add primary key (id); +explain +with recursive +ancestors +as +( +select * +from folks +where name = 'Me' + union +select p.* +from folks as p, ancestors as fa +where p.id = fa.father +union +select p.* +from folks as p, ancestors as ma +where p.id = ma.mother +) +select * from ancestors; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 12 +2 SUBQUERY folks ALL NULL NULL NULL NULL 12 Using where +3 UNCACHEABLE UNION p ALL PRIMARY NULL NULL NULL 12 +3 UNCACHEABLE UNION ref key0 key0 5 test.p.id 2 +4 UNCACHEABLE UNION p ALL PRIMARY NULL NULL NULL 12 +4 UNCACHEABLE UNION ref key0 key0 5 test.p.id 2 +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +with recursive +ancestors +as +( +select * +from folks +where name = 'Me' + union +select p.* +from folks as p, ancestors as fa +where p.id = fa.father +union +select p.* +from folks as p, ancestors as ma +where p.id = ma.mother +) +select * from ancestors; +id name dob father mother +100 Me 2000-01-01 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +8 Grandpa Ben 1940-10-21 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 +6 Grandgrandma Martha 1923-05-17 NULL NULL drop table folks; diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 8f85c7b0480..8262b53c374 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -937,5 +937,45 @@ as ) select * from ancestors; +alter table folks add primary key (id); + +explain +with recursive +ancestors +as +( + select * + from folks + where name = 'Me' + union + select p.* + from folks as p, ancestors as fa + where p.id = fa.father + union + select p.* + from folks as p, ancestors as ma + where p.id = ma.mother +) +select * from ancestors; + +with recursive +ancestors +as +( + select * + from folks + where name = 'Me' + union + select p.* + from folks as p, ancestors as fa + where p.id = fa.father + union + select p.* + from folks as p, ancestors as ma + where p.id = ma.mother +) +select * from ancestors; + + drop table folks; diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 3c663d7d260..dd877b5598a 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -3,6 +3,7 @@ #include "sql_cte.h" #include "sql_view.h" // for make_valid_column_names #include "sql_parse.h" +#include "sql_select.h" /** @@ -956,3 +957,20 @@ void With_element::print(String *str, enum_query_type query_type) } +bool With_element::instantiate_tmp_tables() +{ + List_iterator_fast
li(rec_result->rec_tables); + TABLE *rec_table; + while ((rec_table= li++)) + { + if (!rec_table->is_created() && + instantiate_tmp_table(rec_table, + rec_result->tmp_table_param.keyinfo, + rec_result->tmp_table_param.start_recinfo, + &rec_result->tmp_table_param.recinfo, + 0)) + return true; + } + return false; +} + diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 8b81644e838..52b2b8f4f77 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -6,15 +6,15 @@ class select_union; struct st_unit_ctxt_elem; -/** - @class With_clause - @brief Set of with_elements - It has a reference to the first with element from this with clause. - This reference allows to navigate through all the elements of the with clause. - It contains a reference to the unit to which this with clause is attached. - It also contains a flag saying whether this with clause was specified as recursive. -*/ +/** + @class With_element + @brief Definition of a CTE table + + It contains a reference to the name of the table introduced by this with element, + and a reference to the unit that specificies this table. Also it contains + a reference to the with clause to which this element belongs to. +*/ class With_element : public Sql_alloc { @@ -184,18 +184,20 @@ public: void set_result_table(TABLE *tab) { result_table= tab; } + bool instantiate_tmp_tables(); + friend class With_clause; }; - /** - @class With_element - @brief Definition of a CTE table - - It contains a reference to the name of the table introduced by this with element, - and a reference to the unit that specificies this table. Also it contains - a reference to the with clause to which this element belongs to. -*/ + @class With_clause + @brief Set of with_elements + + It has a reference to the first with element from this with clause. + This reference allows to navigate through all the elements of the with clause. + It contains a reference to the unit to which this with clause is attached. + It also contains a flag saying whether this with clause was specified as recursive. +*/ class With_clause : public Sql_alloc { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index b1f665ad70b..e17896f9f24 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -653,7 +653,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) (first_select->options | thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS), - derived->alias, FALSE, TRUE); + derived->alias, FALSE, FALSE); thd->create_tmp_table_for_derived= FALSE; if (!res && !derived->table) @@ -681,7 +681,9 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) for (SELECT_LEX *sl= first_select; sl; sl= sl->next_select()) { sl->context.outer_context= 0; - if (!derived->is_with_table_recursive_reference()) + if (!derived->is_with_table_recursive_reference() || + (!derived->with->with_anchor && + !derived->with->is_with_prepared_anchor())) { // Prepare underlying views/DT first. if ((res= sl->handle_derived(lex, DT_PREPARE))) @@ -928,7 +930,8 @@ bool TABLE_LIST::fill_recursive(THD *thd) rc= unit->exec_recursive(false); else { - while(!with->all_are_stabilized() && !rc) + rc= with->instantiate_tmp_tables(); + while(!rc && !with->all_are_stabilized()) { rc= unit->exec_recursive(true); } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index ee1ffcba336..cffa779a27f 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -2065,6 +2065,7 @@ void st_select_lex_unit::init_query() offset_limit_cnt= 0; union_distinct= 0; prepared= optimized= executed= 0; + optimize_started= 0; item= 0; union_result= 0; table= 0; @@ -4393,6 +4394,19 @@ void SELECT_LEX::increase_derived_records(ha_rows records) SELECT_LEX_UNIT *unit= master_unit(); DBUG_ASSERT(unit->derived); + if (unit->with_element && unit->with_element->is_recursive) + { + st_select_lex *first_recursive= unit->with_element->first_recursive; + st_select_lex *sl= unit->first_select(); + for ( ; sl != first_recursive; sl= sl->next_select()) + { + if (sl == this) + break; + } + if (sl == first_recursive) + return; + } + select_union *result= (select_union*)unit->result; result->records+= records; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 785908d9750..de3ccfc08a9 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -611,6 +611,8 @@ public: executed, // already executed cleaned; + bool optimize_started; + // list of fields which points to temporary table for union List item_list; /* diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9b537a61c29..4415596f5b8 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -165,10 +165,6 @@ static COND *optimize_cond(JOIN *join, COND *conds, int flags= 0); bool const_expression_in_where(COND *conds,Item *item, Item **comp_item); static int do_select(JOIN *join, Procedure *procedure); -static bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, - MARIA_COLUMNDEF *start_recinfo, - MARIA_COLUMNDEF **recinfo, - ulonglong options); static enum_nested_loop_state evaluate_join_record(JOIN *, JOIN_TAB *, int); static enum_nested_loop_state @@ -17915,7 +17911,6 @@ int rr_sequential_and_unpack(READ_RECORD *info) TRUE - Error */ -static bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, MARIA_COLUMNDEF *start_recinfo, MARIA_COLUMNDEF **recinfo, diff --git a/sql/sql_select.h b/sql/sql_select.h index c143d58c2e8..535cf5a29e0 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -2235,6 +2235,10 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, TMP_ENGINE_COLUMNDEF *start_recinfo, TMP_ENGINE_COLUMNDEF **recinfo, ulonglong options); +bool instantiate_tmp_table(TABLE *table, KEY *keyinfo, + MARIA_COLUMNDEF *start_recinfo, + MARIA_COLUMNDEF **recinfo, + ulonglong options); bool open_tmp_table(TABLE *table); void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps); double prev_record_reads(POSITION *positions, uint idx, table_map found_ref); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 4c32779f347..23c3801f4ab 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -229,7 +229,7 @@ select_union_recursive::create_result_table(THD *thd_arg, if (! (rec_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types, (ORDER*) 0, false, 1, options, HA_POS_ERROR, alias, - !create_table, keep_row_order))) + true, keep_row_order))) return true; rec_table->keys_in_use_for_query.clear_all(); @@ -283,8 +283,11 @@ void select_union_recursive::cleanup() TABLE *tab; while ((tab= it++)) { - tab->file->extra(HA_EXTRA_RESET_STATE); - tab->file->ha_delete_all_rows(); + if (tab->is_created()) + { + tab->file->extra(HA_EXTRA_RESET_STATE); + tab->file->ha_delete_all_rows(); + } free_tmp_table(thd, tab); } } @@ -840,6 +843,10 @@ bool st_select_lex_unit::optimize() if (optimized && !uncacheable && !describe) DBUG_RETURN(FALSE); + if (with_element && with_element->is_recursive && optimize_started) + DBUG_RETURN(FALSE); + optimize_started= true; + if (uncacheable || !item || !item->assigned() || describe) { if (item) @@ -1315,6 +1322,7 @@ bool st_select_lex_unit::cleanup() void st_select_lex_unit::reinit_exec_mechanism() { prepared= optimized= executed= 0; + optimize_started= 0; #ifndef DBUG_OFF if (is_union()) { diff --git a/sql/table.cc b/sql/table.cc index e11ea59bcf8..3dff8ac7f59 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -7198,24 +7198,14 @@ bool TABLE_LIST::handle_derived(LEX *lex, uint phases) DBUG_ENTER("handle_derived"); DBUG_PRINT("enter", ("phases: 0x%x", phases)); - if (is_with_table_recursive_reference()) + if (unit) { - if (!(with->with_anchor || with->is_with_prepared_anchor())) + if (!is_with_table_recursive_reference()) { for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select()) if (sl->handle_derived(lex, phases)) DBUG_RETURN(TRUE); } - else if (mysql_handle_single_derived(lex, this, phases)) - DBUG_RETURN(TRUE); - DBUG_RETURN(FALSE); - } - - if (unit) - { - for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select()) - if (sl->handle_derived(lex, phases)) - DBUG_RETURN(TRUE); if (mysql_handle_single_derived(lex, this, phases)) DBUG_RETURN(TRUE); } -- cgit v1.2.1 From 5d0dfcbecb3026fc105bd0dcc7a9b2a5c9536130 Mon Sep 17 00:00:00 2001 From: iangilfillan Date: Wed, 27 Jul 2016 15:29:32 +0200 Subject: Update contributors --- CREDITS | 34 ++++++++++++++++++---------------- mysql-test/r/contributors.result | 17 +++++++++-------- sql/contributors.h | 21 ++++++++++----------- 3 files changed, 37 insertions(+), 35 deletions(-) diff --git a/CREDITS b/CREDITS index 1f071e4fb44..f0e6de7f08f 100644 --- a/CREDITS +++ b/CREDITS @@ -1,24 +1,26 @@ MariaDB is brought to you by the MariaDB Foundation, a non profit organization registered in the USA. -The current main members and sponsors of the MariaDB Foundation are: - -MariaDB Corporation http://www.mariadb.com (2013 - 2016) -Booking.com http://www.booking.com (2013 - 2016) -Parallels http://www.parallels.com/products/plesk (2013 - 2016) -Automattic http://automattic.com (2014 - 2016) -Verkkokauppa.com http://verkkokauppa.com (2015 - 2016) -Visma http://visma.com/ (2015 - 2016) -Webyog http://webyog.com (2015 - 2016) -Wikimedia Foundation http://wikimedia.org (2015 - 2016) -Acronis http://acronis.com (2016) - -For a full list of supporters and sponsors see +The current main sponsors of the MariaDB Foundation are: + +Booking.com http://www.booking.com (2013 - 2016) +Development Bank of Singapore http://dbs.com (2016) +MariaDB Corporation https://www.mariadb.com (2013 - 2016) +Visma http://visma.com (2015 - 2016) +Acronis http://acronis.com (2016) +Nexedi https://www.nexedi.com (2016) +Automattic https://automattic.com (2014 - 2016) +Verkkokauppa.com https://www.verkkokauppa.com (2015 - 2016) +Virtuozzo https://virtuozzo.com (2016) + +For a full list of sponsors, see https://mariadb.org/about/supporters/ +and for individual contributors, see +https://mariadb.org/donate/individual-sponsors/ -You can also do this by running SHOW CONTRIBUTORS. +You can also get the list of sponsors by running SHOW CONTRIBUTORS. -For all corporate memberships and sponsorships please contact the +For all corporate sponsorships please contact the MariaDB Foundation Board via foundation@mariadb.org. The MariaDB Foundation is responsible for the MariaDB source @@ -38,7 +40,7 @@ following services to the MariaDB community: To be able to do the above we need help from corporations and individuals! You can help support MariaDB by becoming a MariaDB developer or a -member or sponsor of the MariaDB Foundation. To donate or sponsor, +sponsor of the MariaDB Foundation. To donate or sponsor, go to https://mariadb.org/donate/ You can get a list of all the main authors of MariaDB / MySQL by running diff --git a/mysql-test/r/contributors.result b/mysql-test/r/contributors.result index 1e01ca81990..03f5ad2ab15 100644 --- a/mysql-test/r/contributors.result +++ b/mysql-test/r/contributors.result @@ -1,13 +1,14 @@ SHOW CONTRIBUTORS; Name Location Comment -Booking.com http://www.booking.com Founding member of the MariaDB Foundation -MariaDB Corporation https://mariadb.com Founding member of the MariaDB Foundation -Auttomattic http://automattic.com Member of the MariaDB Foundation -Visma http://visma.com Member of the MariaDB Foundation -Nexedi http://www.nexedi.com Member of the MariaDB Foundation -Acronis http://www.acronis.com Member of the MariaDB Foundation -Verkkokauppa.com Finland Sponsor of the MariaDB Foundation -Virtuozzo https://virtuozzo.com/ Sponsor of the MariaDB Foundation +Booking.com http://www.booking.com Founding member, Platinum Sponsor of the MariaDB Foundation +MariaDB Corporation https://mariadb.com Founding member, Gold Sponsor of the MariaDB Foundation +Visma http://visma.com Gold Sponsor of the MariaDB Foundation +DBS http://dbs.com Gold Sponsor of the MariaDB Foundation +Nexedi https://www.nexedi.com Silver Sponsor of the MariaDB Foundation +Acronis http://www.acronis.com Silver Sponsor of the MariaDB Foundation +Auttomattic https://automattic.com Bronze Sponsor of the MariaDB Foundation +Verkkokauppa.com https://virtuozzo.com Bronze Sponsor of the MariaDB Foundation +Virtuozzo https://virtuozzo.com/ Bronze Sponsor of the MariaDB Foundation Google USA Sponsoring encryption, parallel replication and GTID Facebook USA Sponsoring non-blocking API, LIMIT ROWS EXAMINED etc Ronald Bradford Brisbane, Australia EFF contribution for UC2006 Auction diff --git a/sql/contributors.h b/sql/contributors.h index 76674d654e5..f52d3243453 100644 --- a/sql/contributors.h +++ b/sql/contributors.h @@ -36,17 +36,16 @@ struct show_table_contributors_st { */ struct show_table_contributors_st show_table_contributors[]= { - /* MariaDB foundation members, in contribution, size , time order */ - {"Booking.com", "http://www.booking.com", "Founding member of the MariaDB Foundation"}, - {"MariaDB Corporation", "https://mariadb.com", "Founding member of the MariaDB Foundation"}, - {"Auttomattic", "http://automattic.com", "Member of the MariaDB Foundation"}, - {"Visma", "http://visma.com", "Member of the MariaDB Foundation"}, - {"Nexedi", "http://www.nexedi.com", "Member of the MariaDB Foundation"}, - {"Acronis", "http://www.acronis.com", "Member of the MariaDB Foundation"}, - - /* Smaller sponsors, newer per year */ - {"Verkkokauppa.com", "Finland", "Sponsor of the MariaDB Foundation"}, - {"Virtuozzo", "https://virtuozzo.com/", "Sponsor of the MariaDB Foundation"}, + /* MariaDB foundation sponsors, in contribution, size , time order */ + {"Booking.com", "http://www.booking.com", "Founding member, Platinum Sponsor of the MariaDB Foundation"}, + {"MariaDB Corporation", "https://mariadb.com", "Founding member, Gold Sponsor of the MariaDB Foundation"}, + {"Visma", "http://visma.com", "Gold Sponsor of the MariaDB Foundation"}, + {"DBS", "http://dbs.com", "Gold Sponsor of the MariaDB Foundation"}, + {"Nexedi", "https://www.nexedi.com", "Silver Sponsor of the MariaDB Foundation"}, + {"Acronis", "http://www.acronis.com", "Silver Sponsor of the MariaDB Foundation"}, + {"Auttomattic", "https://automattic.com", "Bronze Sponsor of the MariaDB Foundation"}, + {"Verkkokauppa.com", "https://virtuozzo.com", "Bronze Sponsor of the MariaDB Foundation"}, + {"Virtuozzo", "https://virtuozzo.com/", "Bronze Sponsor of the MariaDB Foundation"}, /* Sponsors of important features */ {"Google", "USA", "Sponsoring encryption, parallel replication and GTID"}, -- cgit v1.2.1 From a63ceaeab10fc1fc72f5853ce14b278da95579b4 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 27 Jul 2016 17:01:45 +0300 Subject: MDEV-10389: Query returns different results on a debug vs non-debug build The problem was caused by a merged semi-join, which contained a non-merged semi-join, which used references to the top-level query in the left_expr. When moving non-merged semi-join from the subquery to its parent, do not forget to call fix_after_pullout for its Item_subselect. We need to do that specifically, because non-merged semi-joins do not have their IN-equality in the WHERE clause at this stage. --- mysql-test/r/subselect_mat.result | 13 +++++++++++++ mysql-test/r/subselect_sj_mat.result | 13 +++++++++++++ mysql-test/t/subselect_sj_mat.test | 12 ++++++++++++ sql/opt_subselect.cc | 5 +++++ 4 files changed, 43 insertions(+) diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result index c9fa7c0c672..d0c5c2eda26 100644 --- a/mysql-test/r/subselect_mat.result +++ b/mysql-test/r/subselect_mat.result @@ -2261,6 +2261,19 @@ PREPARE stmt FROM ' EXECUTE stmt; EXECUTE stmt; DROP TABLE t1,t2,t3; +# +# MDEV-10389: Query returns different results on a debug vs non-debug build of the same revision +# +CREATE TABLE t1 (i1 INT, i2 INT NOT NULL); +INSERT INTO t1 VALUES (1,4),(2,6); +SELECT * FROM t1 AS alias1 +WHERE alias1.i1 IN ( +SELECT i1 FROM t1 WHERE alias1.i2 IN ( SELECT i2 FROM t1 HAVING i2 <> 7 ) +); +i1 i2 +1 4 +2 6 +DROP TABLE t1; set @subselect_mat_test_optimizer_switch_value=null; set @@optimizer_switch='materialization=on,in_to_exists=off,semijoin=off'; set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on'; diff --git a/mysql-test/r/subselect_sj_mat.result b/mysql-test/r/subselect_sj_mat.result index 2f3777a47fd..e846549f406 100644 --- a/mysql-test/r/subselect_sj_mat.result +++ b/mysql-test/r/subselect_sj_mat.result @@ -2301,3 +2301,16 @@ PREPARE stmt FROM ' EXECUTE stmt; EXECUTE stmt; DROP TABLE t1,t2,t3; +# +# MDEV-10389: Query returns different results on a debug vs non-debug build of the same revision +# +CREATE TABLE t1 (i1 INT, i2 INT NOT NULL); +INSERT INTO t1 VALUES (1,4),(2,6); +SELECT * FROM t1 AS alias1 +WHERE alias1.i1 IN ( +SELECT i1 FROM t1 WHERE alias1.i2 IN ( SELECT i2 FROM t1 HAVING i2 <> 7 ) +); +i1 i2 +1 4 +2 6 +DROP TABLE t1; diff --git a/mysql-test/t/subselect_sj_mat.test b/mysql-test/t/subselect_sj_mat.test index d2bafa86028..559b380a7dc 100644 --- a/mysql-test/t/subselect_sj_mat.test +++ b/mysql-test/t/subselect_sj_mat.test @@ -1925,3 +1925,15 @@ EXECUTE stmt; DROP TABLE t1,t2,t3; +--echo # +--echo # MDEV-10389: Query returns different results on a debug vs non-debug build of the same revision +--echo # +CREATE TABLE t1 (i1 INT, i2 INT NOT NULL); +INSERT INTO t1 VALUES (1,4),(2,6); + +SELECT * FROM t1 AS alias1 +WHERE alias1.i1 IN ( + SELECT i1 FROM t1 WHERE alias1.i2 IN ( SELECT i2 FROM t1 HAVING i2 <> 7 ) +); +DROP TABLE t1; + diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 537774aea2a..361da4e6279 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -1558,7 +1558,12 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred) { tl->set_tablenr(table_no); if (tl->is_jtbm()) + { tl->jtbm_table_no= table_no; + Item *dummy= tl->jtbm_subselect; + tl->jtbm_subselect->fix_after_pullout(parent_lex, &dummy); + DBUG_ASSERT(dummy == tl->jtbm_subselect); + } SELECT_LEX *old_sl= tl->select_lex; tl->select_lex= parent_join->select_lex; for (TABLE_LIST *emb= tl->embedding; -- cgit v1.2.1 From b522c71e4be54708dc935f0ad5347fd4b06199ef Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Fri, 22 Jul 2016 19:00:49 -0400 Subject: MDEV-10396: MariaDB does not restart after upgrade on debian 8 During wsrep position recovery, galera_recovery.sh script redirected mysqld's error log to a temporary file in order to find the start position. This, however, will not work if --log-error is configured for the server. Fixed by using --log-error in command line instead of redirection. [Patch contributed by Philippe MARASSE] --- scripts/galera_recovery.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/galera_recovery.sh b/scripts/galera_recovery.sh index 0e5a42b1676..d734ceb7ac7 100644 --- a/scripts/galera_recovery.sh +++ b/scripts/galera_recovery.sh @@ -68,7 +68,8 @@ parse_arguments() { wsrep_recover_position() { # Redirect server's error log to the log file. - eval /usr/sbin/mysqld $cmdline_args --user=$user --wsrep_recover 2> "$log_file" + eval /usr/sbin/mysqld $cmdline_args --user=$user --wsrep_recover \ + --log-error="$log_file" ret=$? if [ $ret -ne 0 ]; then # Something went wrong, let us also print the error log so that it -- cgit v1.2.1 From 67480fc5fd7fafc5838a6b8230100a3fc4810481 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Thu, 28 Jul 2016 15:49:59 -0400 Subject: MDEV-10429: sys_vars.sysvars_wsrep fails in buildbot on host 'work' (valgrind builder) - Fixed the test - Improved comment for wsrep_drupal_282555_workaround --- mysql-test/suite/sys_vars/r/sysvars_wsrep.result | 2 +- mysql-test/suite/sys_vars/t/sysvars_wsrep.test | 4 +++- sql/sys_vars.cc | 7 ++++--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result index f1745a1ad34..3d7ec08166d 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result +++ b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result @@ -162,7 +162,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE OFF VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT To use a workaround forbad autoincrement value +VARIABLE_COMMENT Enable a workaround to handle the cases where inserting a DEFAULT value into an auto-increment column could fail with duplicate key error NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL diff --git a/mysql-test/suite/sys_vars/t/sysvars_wsrep.test b/mysql-test/suite/sys_vars/t/sysvars_wsrep.test index 25454191e2e..700b129fd62 100644 --- a/mysql-test/suite/sys_vars/t/sysvars_wsrep.test +++ b/mysql-test/suite/sys_vars/t/sysvars_wsrep.test @@ -3,7 +3,9 @@ --let $datadir = `SELECT @@datadir` --let $hostname = `SELECT @@hostname` ---replace_result $datadir DATADIR $hostname HOSTNAME +--replace_result $datadir DATADIR +--let $hostname_regex=/^$hostname\$/HOSTNAME/ +--replace_regex $hostname_regex --vertical_results select * from information_schema.system_variables where variable_name like 'wsrep%' diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c733976452c..1943408678c 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -4782,9 +4782,10 @@ static Sys_var_mybool Sys_wsrep_auto_increment_control( CMD_LINE(OPT_ARG), DEFAULT(TRUE)); static Sys_var_mybool Sys_wsrep_drupal_282555_workaround( - "wsrep_drupal_282555_workaround", "To use a workaround for" - "bad autoincrement value", - GLOBAL_VAR(wsrep_drupal_282555_workaround), + "wsrep_drupal_282555_workaround", "Enable a workaround to handle the " + "cases where inserting a DEFAULT value into an auto-increment column " + "could fail with duplicate key error", + GLOBAL_VAR(wsrep_drupal_282555_workaround), CMD_LINE(OPT_ARG), DEFAULT(FALSE)); static Sys_var_charptr sys_wsrep_sst_method( -- cgit v1.2.1 From 247632e67ea49227978cfc50f4df6274ccda4a33 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Fri, 29 Jul 2016 01:10:00 -0700 Subject: Fixed bug mdev-10344. The patch for bug mdev-9937 actually did not fix the problem of name resolution for tables used in views referred in queries with WITH clauses. This fix corrects the patch. --- mysql-test/r/cte_nonrecursive.result | 23 +++++++++++++++++++++++ mysql-test/t/cte_nonrecursive.test | 18 ++++++++++++++++++ sql/sql_view.cc | 2 +- 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index 295c7546ca0..713ec4bcec3 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -780,3 +780,26 @@ a 2 3 DROP TABLE t1,t2,t3; +# +# Bug mdev-10344: the WITH clause of the query refers to a view that uses +# a base table with the same name as a CTE table from the clause +# +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; +create view v1 as select * from ten; +select * from v1; +a +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +drop view v1; +drop table ten, one_k; diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index 9a0e43bf8f7..8caf0832df4 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -471,3 +471,21 @@ SELECT * FROM (WITH a AS (SELECT * FROM t1) SELECT * FROM t2 NATURAL JOIN t3) AS DROP TABLE t1,t2,t3; +--echo # +--echo # Bug mdev-10344: the WITH clause of the query refers to a view that uses +--echo # a base table with the same name as a CTE table from the clause +--echo # + + +create table ten(a int primary key); +insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +create table one_k(a int primary key); +insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C; + +create view v1 as select * from ten; + +select * from v1; + +drop view v1; +drop table ten, one_k; diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 4fd4fb8dd01..36f5c294663 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -1622,7 +1622,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, sl->context.error_processor_data= (void *)table; } - table->select_lex->master_unit()->is_view= true; + view_select->master_unit()->is_view= true; /* check MERGE algorithm ability -- cgit v1.2.1 From c6aaa2adbefa04463bb9b67264c09a04b9c4bfcd Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Sat, 30 Jul 2016 10:53:01 +0300 Subject: MDEV-10228: update test results --- mysql-test/r/range_mrr_icp.result | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/mysql-test/r/range_mrr_icp.result b/mysql-test/r/range_mrr_icp.result index 9a089106c76..3f5de5b0189 100644 --- a/mysql-test/r/range_mrr_icp.result +++ b/mysql-test/r/range_mrr_icp.result @@ -2115,4 +2115,35 @@ a b 0 0 1 1 drop table t2; +# +# MDEV-10228: Delete missing rows with OR conditions +# (The example uses UPDATE, because UPDATE allows to use index hints +# and so it's possible to make an example that works with any storage +# engine) +# +CREATE TABLE t1 ( +key1varchar varchar(14) NOT NULL, +key2int int(11) NOT NULL DEFAULT '0', +col1 int, +PRIMARY KEY (key1varchar,key2int), +KEY key1varchar (key1varchar), +KEY key2int (key2int) +) DEFAULT CHARSET=utf8; +insert into t1 values +('value1',0, 0), +('value1',1, 0), +('value1',1000685, 0), +('value1',1003560, 0), +('value1',1004807, 0); +update t1 force index (PRIMARY) set col1=12345 +where (key1varchar='value1' AND (key2int <=1 OR key2int > 1)); +# The following must show col1=12345 for all rows: +select * from t1; +key1varchar key2int col1 +value1 0 12345 +value1 1 12345 +value1 1000685 12345 +value1 1003560 12345 +value1 1004807 12345 +drop table t1; set optimizer_switch=@mrr_icp_extra_tmp; -- cgit v1.2.1 From 5fdb3cfcd432b85dc305a1a61c2d018a798a6ac3 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Fri, 29 Jul 2016 18:21:08 +0200 Subject: MDEV-10419: crash in mariadb 10.1.16-MariaDB-1~trusty Fixed initialization and usage of THD reference in subselect engines. --- mysql-test/r/view.result | 15 +++++++++++++++ mysql-test/t/view.test | 15 +++++++++++++++ sql/item_subselect.cc | 8 +++++--- sql/item_subselect.h | 5 +++-- 4 files changed, 38 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index dbfdf3f0f56..6848ba30245 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -5520,6 +5520,21 @@ test.v1 check Error 'test.v1' is not BASE TABLE test.v1 check status Operation failed drop view v1; drop table t1; +# +# MDEV-10419: crash in mariadb 10.1.16-MariaDB-1~trusty +# +CREATE TABLE t1 (c1 CHAR(13)); +CREATE TABLE t2 (c2 CHAR(13)); +CREATE FUNCTION f() RETURNS INT RETURN 0; +CREATE OR REPLACE VIEW v1 AS select f() from t1 where c1 in (select c2 from t2); +DROP FUNCTION f; +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `f`() AS `f()` from `t1` where `test`.`t1`.`c1` in (select `test`.`t2`.`c2` from `t2`) latin1 latin1_swedish_ci +Warnings: +Warning 1356 View 'test.v1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +drop view v1; +drop table t1,t2; # ----------------------------------------------------------------- # -- End of 5.5 tests. # ----------------------------------------------------------------- diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index a25a4d129aa..ebd68587a47 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -5490,6 +5490,21 @@ alter table v1 check partition p1; drop view v1; drop table t1; + +--echo # +--echo # MDEV-10419: crash in mariadb 10.1.16-MariaDB-1~trusty +--echo # +CREATE TABLE t1 (c1 CHAR(13)); +CREATE TABLE t2 (c2 CHAR(13)); + +CREATE FUNCTION f() RETURNS INT RETURN 0; +CREATE OR REPLACE VIEW v1 AS select f() from t1 where c1 in (select c2 from t2); +DROP FUNCTION f; + +SHOW CREATE VIEW v1; + +drop view v1; +drop table t1,t2; --echo # ----------------------------------------------------------------- --echo # -- End of 5.5 tests. --echo # ----------------------------------------------------------------- diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 690318c610a..3727711a395 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -3308,7 +3308,7 @@ int subselect_uniquesubquery_engine::scan_table() } table->file->extra_opt(HA_EXTRA_CACHE, - current_thd->variables.read_buff_size); + get_thd()->variables.read_buff_size); table->null_row= 0; for (;;) { @@ -3746,7 +3746,7 @@ table_map subselect_union_engine::upper_select_const_tables() void subselect_single_select_engine::print(String *str, enum_query_type query_type) { - select_lex->print(thd, str, query_type); + select_lex->print(get_thd(), str, query_type); } @@ -4276,6 +4276,7 @@ bitmap_init_memroot(MY_BITMAP *map, uint n_bits, MEM_ROOT *mem_root) bool subselect_hash_sj_engine::init(List *tmp_columns, uint subquery_id) { + THD *thd= get_thd(); select_union *result_sink; /* Options to create_tmp_table. */ ulonglong tmp_create_options= thd->variables.option_bits | TMP_TABLE_ALL_COLUMNS; @@ -5500,6 +5501,7 @@ bool subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts, MY_BITMAP *partial_match_key_parts) { + THD *thd= get_thd(); /* The length in bytes of the rowids (positions) of tmp_table. */ uint rowid_length= tmp_table->file->ref_length; ha_rows row_count= tmp_table->file->stats.records; @@ -6038,7 +6040,7 @@ bool subselect_table_scan_engine::partial_match() } tmp_table->file->extra_opt(HA_EXTRA_CACHE, - current_thd->variables.read_buff_size); + get_thd()->variables.read_buff_size); for (;;) { error= tmp_table->file->ha_rnd_next(tmp_table->record[0]); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 0abfe0d5abc..a44503b4471 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -716,7 +716,8 @@ public: ROWID_MERGE_ENGINE, TABLE_SCAN_ENGINE}; subselect_engine(Item_subselect *si, - select_result_interceptor *res) + select_result_interceptor *res): + thd(NULL) { result= res; item= si; @@ -732,7 +733,7 @@ public: Should be called before prepare(). */ void set_thd(THD *thd_arg); - THD * get_thd() { return thd; } + THD * get_thd() { return thd ? thd : current_thd; } virtual int prepare(THD *)= 0; virtual void fix_length_and_dec(Item_cache** row)= 0; /* -- cgit v1.2.1 From 558c8ce0f0aabbe38c37c0a3737adde05a862971 Mon Sep 17 00:00:00 2001 From: Hyeonseok Oh Date: Mon, 1 Aug 2016 12:13:14 +0900 Subject: Remove unnecessary semicolon --- storage/xtradb/os/os0sync.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/xtradb/os/os0sync.cc b/storage/xtradb/os/os0sync.cc index 1569fa7a865..a494be537d9 100644 --- a/storage/xtradb/os/os0sync.cc +++ b/storage/xtradb/os/os0sync.cc @@ -320,7 +320,7 @@ os_event_t os_event_create(void) /*==================*/ { - os_event_t event = static_cast(ut_malloc(sizeof(*event)));; + os_event_t event = static_cast(ut_malloc(sizeof(*event))); os_event_create(event); -- cgit v1.2.1 From 84a9e05003e2af253b6e4679cc85a0f0d624c49a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 1 Aug 2016 12:19:29 +0300 Subject: MDEV-10470: main.derived fails, buildbot is broken - Update test result (checked) --- mysql-test/r/derived.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/derived.result b/mysql-test/r/derived.result index 1d643333424..08709c6def0 100644 --- a/mysql-test/r/derived.result +++ b/mysql-test/r/derived.result @@ -621,13 +621,13 @@ SELECT f3 FROM t2 HAVING f3 >= 8 ); id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY system NULL NULL NULL NULL 1 100.00 -1 PRIMARY eq_ref distinct_key distinct_key 4 sq.f2 1 100.00 +1 PRIMARY eq_ref distinct_key distinct_key 4 const 1 100.00 1 PRIMARY t2 ALL NULL NULL NULL NULL 2 100.00 Using where; FirstMatch(); Using join buffer (flat, BNL join) 4 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 100.00 2 DERIVED t1 system NULL NULL NULL NULL 1 100.00 Warnings: Note 1276 Field or reference 'sq.f2' of SELECT #3 was resolved in SELECT #1 -Note 1003 select 6 AS `f1` from (select `test`.`t2`.`f3` from `test`.`t2` having (`test`.`t2`.`f3` >= 8)) semi join (`test`.`t2`) where ((`test`.`t2`.`f3` = 6) and (9 = ``.`f3`)) +Note 1003 select 6 AS `f1` from (select `test`.`t2`.`f3` from `test`.`t2` having (`test`.`t2`.`f3` >= 8)) semi join (`test`.`t2`) where ((`test`.`t2`.`f3` = 6) and (``.`f3` = 9)) DROP TABLE t2,t1; # # MDEV-9462: Out of memory using explain on 2 empty tables -- cgit v1.2.1 From ed48fcf177bf1077db731a4d5bb0b6dd7468fc9c Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 1 Aug 2016 17:02:28 -0400 Subject: MDEV-10478: Trx abort does not work in autocommit mode THD's statement transaction handle (st_transactions::stmt) should also be looked for registered htons while serving a request to abort a transaction. --- sql/handler.cc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sql/handler.cc b/sql/handler.cc index 947ca005cdb..e4aa1660c82 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -6078,7 +6078,10 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal) DBUG_RETURN(0); } - THD_TRANS *trans= &victim_thd->transaction.all; + /* Try statement transaction if standard one is not set. */ + THD_TRANS *trans= (victim_thd->transaction.all.ha_list) ? + &victim_thd->transaction.all : &victim_thd->transaction.stmt; + Ha_trx_info *ha_info= trans->ha_list, *ha_info_next; for (; ha_info; ha_info= ha_info_next) @@ -6086,8 +6089,8 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal) handlerton *hton= ha_info->ht(); if (!hton->abort_transaction) { - /* Skip warning for binlog SE */ - if (hton->db_type != DB_TYPE_BINLOG) + /* Skip warning for binlog & wsrep. */ + if (hton->db_type != DB_TYPE_BINLOG && hton != wsrep_hton) { WSREP_WARN("Cannot abort transaction."); } -- cgit v1.2.1 From b5fb2a685b6ec67d37033b020a8145d1aac1fc93 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Tue, 2 Aug 2016 14:29:55 +0400 Subject: Fixed main.contributors failure --- mysql-test/r/contributors.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/contributors.result b/mysql-test/r/contributors.result index 03f5ad2ab15..918ceaa496f 100644 --- a/mysql-test/r/contributors.result +++ b/mysql-test/r/contributors.result @@ -2,11 +2,11 @@ SHOW CONTRIBUTORS; Name Location Comment Booking.com http://www.booking.com Founding member, Platinum Sponsor of the MariaDB Foundation MariaDB Corporation https://mariadb.com Founding member, Gold Sponsor of the MariaDB Foundation -Visma http://visma.com Gold Sponsor of the MariaDB Foundation +Visma http://visma.com Gold Sponsor of the MariaDB Foundation DBS http://dbs.com Gold Sponsor of the MariaDB Foundation Nexedi https://www.nexedi.com Silver Sponsor of the MariaDB Foundation Acronis http://www.acronis.com Silver Sponsor of the MariaDB Foundation -Auttomattic https://automattic.com Bronze Sponsor of the MariaDB Foundation +Auttomattic https://automattic.com Bronze Sponsor of the MariaDB Foundation Verkkokauppa.com https://virtuozzo.com Bronze Sponsor of the MariaDB Foundation Virtuozzo https://virtuozzo.com/ Bronze Sponsor of the MariaDB Foundation Google USA Sponsoring encryption, parallel replication and GTID -- cgit v1.2.1 From 6b71a6d2d935d997f43a658d45d1e518620cf0ad Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 2 Aug 2016 18:52:51 +0200 Subject: MDEV-10383 Named pipes : multiple servers can listen on the same pipename Use FILE_FLAG_FIRST_PIPE_INSTANCE with the first CreateNamedPipe() call to make sure the pipe does not already exist. --- mysql-test/t/named_pipe.test | 9 +++++++++ sql/mysqld.cc | 31 +++++++++++-------------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/mysql-test/t/named_pipe.test b/mysql-test/t/named_pipe.test index 8dcab3329e4..af74c200e96 100644 --- a/mysql-test/t/named_pipe.test +++ b/mysql-test/t/named_pipe.test @@ -22,3 +22,12 @@ connect(pipe_con,localhost,root,,,,,PIPE); connection default; disconnect pipe_con; + +# MDEV-10383 : check that other server cannot 'bind' on the same pipe +let $MYSQLD_DATADIR= `select @@datadir`; +--error 1 +--exec $MYSQLD_CMD --enable-named-pipe --skip-networking --log-error=second-mysqld.err +let SEARCH_FILE=$MYSQLD_DATADIR/second-mysqld.err; +let SEARCH_RANGE= -50; +let SEARCH_PATTERN=\[ERROR\] Create named pipe failed; +source include/search_pattern_in_file.inc; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3450447ceb9..9b8f964629d 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2316,26 +2316,17 @@ static void network_init(void) saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor; saPipeSecurity.bInheritHandle = FALSE; if ((hPipe= CreateNamedPipe(pipe_name, - PIPE_ACCESS_DUPLEX|FILE_FLAG_OVERLAPPED, - PIPE_TYPE_BYTE | - PIPE_READMODE_BYTE | - PIPE_WAIT, - PIPE_UNLIMITED_INSTANCES, - (int) global_system_variables.net_buffer_length, - (int) global_system_variables.net_buffer_length, - NMPWAIT_USE_DEFAULT_WAIT, - &saPipeSecurity)) == INVALID_HANDLE_VALUE) - { - LPVOID lpMsgBuf; - int error=GetLastError(); - FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM, - NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPTSTR) &lpMsgBuf, 0, NULL ); - sql_perror((char *)lpMsgBuf); - LocalFree(lpMsgBuf); - unireg_abort(1); - } + PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE, + PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, + PIPE_UNLIMITED_INSTANCES, + (int) global_system_variables.net_buffer_length, + (int) global_system_variables.net_buffer_length, + NMPWAIT_USE_DEFAULT_WAIT, + &saPipeSecurity)) == INVALID_HANDLE_VALUE) + { + sql_perror("Create named pipe failed"); + unireg_abort(1); + } } #endif -- cgit v1.2.1 From 35c9c856347fe340f3d564f33e76bb6f9ea05e76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 3 Aug 2016 13:40:53 +0300 Subject: MDEV-10217: innodb.innodb_bug59641 fails sporadically in buildbot: InnoDB: Failing assertion: current_rec != insert_rec in file page0cur.c line 1052 Added record printout when current_rec == insert_rec with lengths for debug builds. --- storage/innobase/page/page0cur.c | 20 ++++++++++++++++++++ storage/xtradb/page/page0cur.c | 20 ++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/storage/innobase/page/page0cur.c b/storage/innobase/page/page0cur.c index a722f5b188d..4b6a1551ada 100644 --- a/storage/innobase/page/page0cur.c +++ b/storage/innobase/page/page0cur.c @@ -1048,6 +1048,26 @@ use_heap: insert_rec = rec_copy(insert_buf, rec, offsets); rec_offs_make_valid(insert_rec, index, offsets); + /* This is because assertion below is debug assertion */ +#ifdef UNIV_DEBUG + if (UNIV_UNLIKELY(current_rec == insert_rec)) { + ulint extra_len, data_len; + extra_len = rec_offs_extra_size(offsets); + data_len = rec_offs_data_size(offsets); + + fprintf(stderr, "InnoDB: Error: current_rec == insert_rec " + " extra_len %lu data_len %lu insert_buf %p rec %p\n", + extra_len, data_len, insert_buf, rec); + fprintf(stderr, "InnoDB; Physical record: \n"); + rec_print(stderr, rec, index); + fprintf(stderr, "InnoDB: Inserted record: \n"); + rec_print(stderr, insert_rec, index); + fprintf(stderr, "InnoDB: Current record: \n"); + rec_print(stderr, current_rec, index); + ut_a(current_rec != insert_rec); + } +#endif /* UNIV_DEBUG */ + /* 4. Insert the record in the linked list of records */ ut_ad(current_rec != insert_rec); diff --git a/storage/xtradb/page/page0cur.c b/storage/xtradb/page/page0cur.c index a722f5b188d..4b6a1551ada 100644 --- a/storage/xtradb/page/page0cur.c +++ b/storage/xtradb/page/page0cur.c @@ -1048,6 +1048,26 @@ use_heap: insert_rec = rec_copy(insert_buf, rec, offsets); rec_offs_make_valid(insert_rec, index, offsets); + /* This is because assertion below is debug assertion */ +#ifdef UNIV_DEBUG + if (UNIV_UNLIKELY(current_rec == insert_rec)) { + ulint extra_len, data_len; + extra_len = rec_offs_extra_size(offsets); + data_len = rec_offs_data_size(offsets); + + fprintf(stderr, "InnoDB: Error: current_rec == insert_rec " + " extra_len %lu data_len %lu insert_buf %p rec %p\n", + extra_len, data_len, insert_buf, rec); + fprintf(stderr, "InnoDB; Physical record: \n"); + rec_print(stderr, rec, index); + fprintf(stderr, "InnoDB: Inserted record: \n"); + rec_print(stderr, insert_rec, index); + fprintf(stderr, "InnoDB: Current record: \n"); + rec_print(stderr, current_rec, index); + ut_a(current_rec != insert_rec); + } +#endif /* UNIV_DEBUG */ + /* 4. Insert the record in the linked list of records */ ut_ad(current_rec != insert_rec); -- cgit v1.2.1 From ecb7ce7844237e2366ab5e8d9963f370cb1042aa Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Wed, 3 Aug 2016 15:55:48 +0400 Subject: MDEV-10467 Assertion `nr >= 0.0' failed in Item_sum_std::val_real() Backporting MDEV-5781 from 10.0. --- mysql-test/r/func_math.result | 12 ++++++++++++ mysql-test/t/func_math.test | 12 ++++++++++++ sql/item_func.cc | 3 +++ 3 files changed, 27 insertions(+) diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result index d122d435ac7..66bbb25b309 100644 --- a/mysql-test/r/func_math.result +++ b/mysql-test/r/func_math.result @@ -761,3 +761,15 @@ select 5 div 2.0; select 5.9 div 2, 1.23456789e3 DIV 2, 1.23456789e9 DIV 2, 1.23456789e19 DIV 2; 5.9 div 2 1.23456789e3 DIV 2 1.23456789e9 DIV 2 1.23456789e19 DIV 2 2 617 617283945 6172839450000000000 +# +# MDEV-10467 Assertion `nr >= 0.0' failed in Item_sum_std::val_real() +# +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +SELECT STDDEV_SAMP(ROUND('0', 309)) FROM t1; +STDDEV_SAMP(ROUND('0', 309)) +0 +DROP TABLE t1; +# +# End of 5.5 tests +# diff --git a/mysql-test/t/func_math.test b/mysql-test/t/func_math.test index cd90184ebf5..d31b33b5df9 100644 --- a/mysql-test/t/func_math.test +++ b/mysql-test/t/func_math.test @@ -567,3 +567,15 @@ select 5.0 div 2.0; select 5.0 div 2; select 5 div 2.0; select 5.9 div 2, 1.23456789e3 DIV 2, 1.23456789e9 DIV 2, 1.23456789e19 DIV 2; + +--echo # +--echo # MDEV-10467 Assertion `nr >= 0.0' failed in Item_sum_std::val_real() +--echo # +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +SELECT STDDEV_SAMP(ROUND('0', 309)) FROM t1; +DROP TABLE t1; + +--echo # +--echo # End of 5.5 tests +--echo # diff --git a/sql/item_func.cc b/sql/item_func.cc index 4b5f96cd3e7..6c80c7d3d86 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2661,6 +2661,9 @@ double my_double_round(double value, longlong dec, bool dec_unsigned, volatile double value_div_tmp= value / tmp; volatile double value_mul_tmp= value * tmp; + if (!dec_negative && my_isinf(tmp)) // "dec" is too large positive number + return value; + if (dec_negative && my_isinf(tmp)) tmp2= 0.0; else if (!dec_negative && my_isinf(value_mul_tmp)) -- cgit v1.2.1 From 141f88d1d5bd61ed736d200a9dd9d5c8d1a437ab Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 3 Aug 2016 12:41:38 +0000 Subject: MDEV-10357 my_context_continue() does not store current fiber on Windows Make sure current fiber is saved in my_context::app_fiber in both my_context_spawn() and my_context_continue() --- mysys/my_context.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/mysys/my_context.c b/mysys/my_context.c index 01d6f404627..5ddb2ccd566 100644 --- a/mysys/my_context.c +++ b/mysys/my_context.c @@ -698,30 +698,27 @@ my_context_destroy(struct my_context *c) int my_context_spawn(struct my_context *c, void (*f)(void *), void *d) { - void *current_fiber; c->user_func= f; c->user_arg= d; + return my_context_continue(c); +} + +int +my_context_continue(struct my_context *c) +{ /* This seems to be a common trick to run ConvertThreadToFiber() only on the first occurence in a thread, in a way that works on multiple Windows versions. */ - current_fiber= GetCurrentFiber(); + void *current_fiber= GetCurrentFiber(); if (current_fiber == NULL || current_fiber == (void *)0x1e00) current_fiber= ConvertThreadToFiber(c); c->app_fiber= current_fiber; DBUG_SWAP_CODE_STATE(&c->dbug_state); SwitchToFiber(c->lib_fiber); DBUG_SWAP_CODE_STATE(&c->dbug_state); - return c->return_value; -} -int -my_context_continue(struct my_context *c) -{ - DBUG_SWAP_CODE_STATE(&c->dbug_state); - SwitchToFiber(c->lib_fiber); - DBUG_SWAP_CODE_STATE(&c->dbug_state); return c->return_value; } -- cgit v1.2.1 From 511313b9d640b8e4b2860980e79889f125d3cd5e Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 3 Aug 2016 13:42:46 +0000 Subject: MDEV-10010 - potential deadlock on windows due to recursive SRWLock acquisition Backport patch from 10.1 --- sql/sql_plugin.cc | 126 ++++++++++++++++++++++++++++-------------------------- sql/sql_plugin.h | 2 + sql/sql_show.cc | 19 ++++++-- 3 files changed, 82 insertions(+), 65 deletions(-) diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index dbbc2622866..c8c8c8ba324 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -2896,68 +2896,8 @@ static uchar *intern_sys_var_ptr(THD* thd, int offset, bool global_lock) if (!thd->variables.dynamic_variables_ptr || (uint)offset > thd->variables.dynamic_variables_head) { - uint idx; - mysql_rwlock_rdlock(&LOCK_system_variables_hash); - - thd->variables.dynamic_variables_ptr= (char*) - my_realloc(thd->variables.dynamic_variables_ptr, - global_variables_dynamic_size, - MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR)); - - if (global_lock) - mysql_mutex_lock(&LOCK_global_system_variables); - - mysql_mutex_assert_owner(&LOCK_global_system_variables); - - memcpy(thd->variables.dynamic_variables_ptr + - thd->variables.dynamic_variables_size, - global_system_variables.dynamic_variables_ptr + - thd->variables.dynamic_variables_size, - global_system_variables.dynamic_variables_size - - thd->variables.dynamic_variables_size); - - /* - now we need to iterate through any newly copied 'defaults' - and if it is a string type with MEMALLOC flag, we need to strdup - */ - for (idx= 0; idx < bookmark_hash.records; idx++) - { - sys_var_pluginvar *pi; - sys_var *var; - st_bookmark *v= (st_bookmark*) my_hash_element(&bookmark_hash,idx); - - if (v->version <= thd->variables.dynamic_variables_version) - continue; /* already in thd->variables */ - - if (!(var= intern_find_sys_var(v->key + 1, v->name_len)) || - !(pi= var->cast_pluginvar()) || - v->key[0] != plugin_var_bookmark_key(pi->plugin_var->flags)) - continue; - - /* Here we do anything special that may be required of the data types */ - - if ((pi->plugin_var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR && - pi->plugin_var->flags & PLUGIN_VAR_MEMALLOC) - { - char **pp= (char**) (thd->variables.dynamic_variables_ptr + - *(int*)(pi->plugin_var + 1)); - if ((*pp= *(char**) (global_system_variables.dynamic_variables_ptr + - *(int*)(pi->plugin_var + 1)))) - *pp= my_strdup(*pp, MYF(MY_WME|MY_FAE)); - } - } - - if (global_lock) - mysql_mutex_unlock(&LOCK_global_system_variables); - - thd->variables.dynamic_variables_version= - global_system_variables.dynamic_variables_version; - thd->variables.dynamic_variables_head= - global_system_variables.dynamic_variables_head; - thd->variables.dynamic_variables_size= - global_system_variables.dynamic_variables_size; - + sync_dynamic_session_variables(thd, global_lock); mysql_rwlock_unlock(&LOCK_system_variables_hash); } DBUG_RETURN((uchar*)thd->variables.dynamic_variables_ptr + offset); @@ -3037,6 +2977,70 @@ void plugin_thdvar_init(THD *thd) } + +void sync_dynamic_session_variables(THD* thd, bool global_lock) +{ + uint idx; + + thd->variables.dynamic_variables_ptr= (char*) + my_realloc(thd->variables.dynamic_variables_ptr, + global_variables_dynamic_size, + MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR)); + + if (global_lock) + mysql_mutex_lock(&LOCK_global_system_variables); + + mysql_mutex_assert_owner(&LOCK_global_system_variables); + + memcpy(thd->variables.dynamic_variables_ptr + + thd->variables.dynamic_variables_size, + global_system_variables.dynamic_variables_ptr + + thd->variables.dynamic_variables_size, + global_system_variables.dynamic_variables_size - + thd->variables.dynamic_variables_size); + + /* + now we need to iterate through any newly copied 'defaults' + and if it is a string type with MEMALLOC flag, we need to strdup + */ + for (idx= 0; idx < bookmark_hash.records; idx++) + { + sys_var_pluginvar *pi; + sys_var *var; + st_bookmark *v= (st_bookmark*) my_hash_element(&bookmark_hash,idx); + + if (v->version <= thd->variables.dynamic_variables_version) + continue; /* already in thd->variables */ + + if (!(var= intern_find_sys_var(v->key + 1, v->name_len)) || + !(pi= var->cast_pluginvar()) || + v->key[0] != plugin_var_bookmark_key(pi->plugin_var->flags)) + continue; + + /* Here we do anything special that may be required of the data types */ + + if ((pi->plugin_var->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR && + pi->plugin_var->flags & PLUGIN_VAR_MEMALLOC) + { + int offset= ((thdvar_str_t *)(pi->plugin_var))->offset; + char **pp= (char**) (thd->variables.dynamic_variables_ptr + offset); + if (*pp) + *pp= my_strdup(*pp, MYF(MY_WME|MY_FAE)); + } + } + + if (global_lock) + mysql_mutex_unlock(&LOCK_global_system_variables); + + thd->variables.dynamic_variables_version= + global_system_variables.dynamic_variables_version; + thd->variables.dynamic_variables_head= + global_system_variables.dynamic_variables_head; + thd->variables.dynamic_variables_size= + global_system_variables.dynamic_variables_size; +} + + /* Unlocks all system variables which hold a reference */ diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h index be1cfcdcc4f..fcc73c83adb 100644 --- a/sql/sql_plugin.h +++ b/sql/sql_plugin.h @@ -174,4 +174,6 @@ typedef my_bool (plugin_foreach_func)(THD *thd, #define plugin_foreach(A,B,C,D) plugin_foreach_with_mask(A,B,C,PLUGIN_IS_READY,D) extern bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func, int type, uint state_mask, void *arg); + +extern void sync_dynamic_session_variables(THD* thd, bool global_lock); #endif diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 98cade49962..d8ea232caea 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -6937,19 +6937,30 @@ int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond) const char *wild= lex->wild ? lex->wild->ptr() : NullS; enum enum_schema_tables schema_table_idx= get_schema_table_idx(tables->schema_table); - enum enum_var_type option_type= OPT_SESSION; + enum enum_var_type scope= OPT_SESSION; bool upper_case_names= (schema_table_idx != SCH_VARIABLES); bool sorted_vars= (schema_table_idx == SCH_VARIABLES); if ((sorted_vars && lex->option_type == OPT_GLOBAL) || schema_table_idx == SCH_GLOBAL_VARIABLES) - option_type= OPT_GLOBAL; + scope= OPT_GLOBAL; COND *partial_cond= make_cond_for_info_schema(cond, tables); mysql_rwlock_rdlock(&LOCK_system_variables_hash); - res= show_status_array(thd, wild, enumerate_sys_vars(thd, sorted_vars, option_type), - option_type, NULL, "", tables->table, + + /* + Avoid recursive LOCK_system_variables_hash acquisition in + intern_sys_var_ptr() by pre-syncing dynamic session variables. + */ + if (scope == OPT_SESSION && + (!thd->variables.dynamic_variables_ptr || + global_system_variables.dynamic_variables_head > + thd->variables.dynamic_variables_head)) + sync_dynamic_session_variables(thd, true); + + res= show_status_array(thd, wild, enumerate_sys_vars(thd, sorted_vars, scope), + scope, NULL, "", tables->table, upper_case_names, partial_cond); mysql_rwlock_unlock(&LOCK_system_variables_hash); DBUG_RETURN(res); -- cgit v1.2.1 From 19fe10c3e9609d48c1240667e4400395dd8e9a3b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 3 Aug 2016 20:39:47 +0200 Subject: MDEV-6581 Writing to TEMPORARY TABLE not possible in read-only don't mark transactions read-write if no real storage engine is affected (only binlog writes). --- sql/handler.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/handler.cc b/sql/handler.cc index d528c0aea7a..5fc75602039 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1238,7 +1238,8 @@ int ha_commit_trans(THD *thd, bool all) uint rw_ha_count= ha_check_and_coalesce_trx_read_only(thd, ha_info, all); /* rw_trans is TRUE when we in a transaction changing data */ - bool rw_trans= is_real_trans && (rw_ha_count > 0); + bool rw_trans= is_real_trans && + (rw_ha_count > !thd->is_current_stmt_binlog_disabled()); MDL_request mdl_request; if (rw_trans) -- cgit v1.2.1 From e316c46f439bbbe1888656fc022fd5fedfd315b1 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 3 Aug 2016 20:43:29 +0200 Subject: 5.5.50-38.0 --- storage/xtradb/include/log0online.h | 4 ++-- storage/xtradb/include/univ.i | 2 +- storage/xtradb/log/log0online.c | 4 ++-- storage/xtradb/log/log0recv.c | 46 ++++++++++++++++++++++++------------- 4 files changed, 35 insertions(+), 21 deletions(-) diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h index a20eef57d7a..02d75001505 100644 --- a/storage/xtradb/include/log0online.h +++ b/storage/xtradb/include/log0online.h @@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., 59 Temple -Place, Suite 330, Boston, MA 02111-1307 USA +this program; if not, write to the Free Software Foundation, Inc., 51 Franklin +Street, Fifth Floor, Boston, MA 02110-1301, USA *****************************************************************************/ diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index d5ec85ce995..b158a12027f 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -64,7 +64,7 @@ component, i.e. we show M.N.P as M.N */ (INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR) #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 37.9 +#define PERCONA_INNODB_VERSION 38.0 #endif #define INNODB_VERSION_STR MYSQL_SERVER_VERSION diff --git a/storage/xtradb/log/log0online.c b/storage/xtradb/log/log0online.c index a8444199ea9..d0127488f67 100644 --- a/storage/xtradb/log/log0online.c +++ b/storage/xtradb/log/log0online.c @@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., 59 Temple -Place, Suite 330, Boston, MA 02111-1307 USA +this program; if not, write to the Free Software Foundation, Inc., 51 Franklin +Street, Fifth Floor, Boston, MA 02110-1301, USA *****************************************************************************/ diff --git a/storage/xtradb/log/log0recv.c b/storage/xtradb/log/log0recv.c index 3429361c74c..6c2a121967e 100644 --- a/storage/xtradb/log/log0recv.c +++ b/storage/xtradb/log/log0recv.c @@ -659,6 +659,7 @@ recv_check_cp_is_consistent( } #ifndef UNIV_HOTBACKUP + /********************************************************//** Looks for the maximum consistent checkpoint from the log groups. @return error code or DB_SUCCESS */ @@ -685,8 +686,37 @@ recv_find_max_checkpoint( buf = log_sys->checkpoint_buf; while (group) { + + ulint log_hdr_log_block_size; + group->state = LOG_GROUP_CORRUPTED; + /* Assert that we can reuse log_sys->checkpoint_buf to read the + part of the header that contains the log block size. */ + ut_ad(LOG_FILE_OS_FILE_LOG_BLOCK_SIZE + 4 + < OS_FILE_LOG_BLOCK_SIZE); + + fil_io(OS_FILE_READ | OS_FILE_LOG, TRUE, group->space_id, 0, + 0, 0, OS_FILE_LOG_BLOCK_SIZE, + log_sys->checkpoint_buf, NULL); + log_hdr_log_block_size + = mach_read_from_4(log_sys->checkpoint_buf + + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE); + if (log_hdr_log_block_size == 0) { + /* 0 means default value */ + log_hdr_log_block_size = 512; + } + if (log_hdr_log_block_size != srv_log_block_size) { + fprintf(stderr, + "InnoDB: Error: The block size of ib_logfile " + "%lu is not equal to innodb_log_block_size " + "%lu.\n" + "InnoDB: Error: Suggestion - Recreate log " + "files.\n", + log_hdr_log_block_size, srv_log_block_size); + return(DB_ERROR); + } + for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { @@ -2982,7 +3012,6 @@ recv_recovery_from_checkpoint_start_func( log_group_t* max_cp_group; log_group_t* up_to_date_group; ulint max_cp_field; - ulint log_hdr_log_block_size; ib_uint64_t checkpoint_lsn; ib_uint64_t checkpoint_no; ib_uint64_t old_scanned_lsn; @@ -3085,21 +3114,6 @@ recv_recovery_from_checkpoint_start_func( log_hdr_buf, max_cp_group); } - log_hdr_log_block_size - = mach_read_from_4(log_hdr_buf + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE); - if (log_hdr_log_block_size == 0) { - /* 0 means default value */ - log_hdr_log_block_size = 512; - } - if (log_hdr_log_block_size != srv_log_block_size) { - fprintf(stderr, - "InnoDB: Error: The block size of ib_logfile (%lu) " - "is not equal to innodb_log_block_size.\n" - "InnoDB: Error: Suggestion - Recreate log files.\n", - log_hdr_log_block_size); - return(DB_ERROR); - } - #ifdef UNIV_LOG_ARCHIVE group = UT_LIST_GET_FIRST(log_sys->log_groups); -- cgit v1.2.1 From 75891eda111b399a9a5da24c2e21a5083d8811bf Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 3 Aug 2016 17:50:45 +0200 Subject: improve pam_cleartext.test a bit --- mysql-test/suite/plugins/r/pam_cleartext.result | 3 +++ mysql-test/suite/plugins/t/pam_cleartext.test | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/plugins/r/pam_cleartext.result b/mysql-test/suite/plugins/r/pam_cleartext.result index 00e0e94618e..b9eee74ec3e 100644 --- a/mysql-test/suite/plugins/r/pam_cleartext.result +++ b/mysql-test/suite/plugins/r/pam_cleartext.result @@ -5,6 +5,9 @@ grant proxy on pam_test to test_pam; show variables like 'pam%'; Variable_name Value pam_use_cleartext_plugin ON +# +# same test as in pam.test now fails +# drop user test_pam; drop user pam_test; uninstall plugin pam; diff --git a/mysql-test/suite/plugins/t/pam_cleartext.test b/mysql-test/suite/plugins/t/pam_cleartext.test index e80cff5f476..c1f710118dc 100644 --- a/mysql-test/suite/plugins/t/pam_cleartext.test +++ b/mysql-test/suite/plugins/t/pam_cleartext.test @@ -3,10 +3,20 @@ show variables like 'pam%'; +--write_file $MYSQLTEST_VARDIR/tmp/pam_good.txt +not very secret challenge +9225 +select user(), current_user(), database(); +EOF + +--echo # +--echo # same test as in pam.test now fails +--echo # --error 1 ---exec echo FAIL | $MYSQL_TEST -u test_pam --plugin-dir=$plugindir +--exec $MYSQL_TEST -u test_pam --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/pam_good.txt + +--remove_file $MYSQLTEST_VARDIR/tmp/pam_good.txt drop user test_pam; drop user pam_test; uninstall plugin pam; - -- cgit v1.2.1 From 9d2f8929994a5401a53dbbaef64ac161b6171757 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 3 Aug 2016 17:58:56 +0200 Subject: MDEV-7329 plugins.pam_cleartext fails sporadically in buildbot wait until the failed connection thread completely dies before uninstalling pam plugin --- mysql-test/suite/plugins/t/pam.test | 3 ++- mysql-test/suite/plugins/t/pam_cleartext.test | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/plugins/t/pam.test b/mysql-test/suite/plugins/t/pam.test index 1871e5801a3..8a95d6baed2 100644 --- a/mysql-test/suite/plugins/t/pam.test +++ b/mysql-test/suite/plugins/t/pam.test @@ -29,5 +29,6 @@ EOF --remove_file $MYSQLTEST_VARDIR/tmp/pam_bad.txt drop user test_pam; drop user pam_test; +let $count_sessions= 1; +--source include/wait_until_count_sessions.inc uninstall plugin pam; - diff --git a/mysql-test/suite/plugins/t/pam_cleartext.test b/mysql-test/suite/plugins/t/pam_cleartext.test index c1f710118dc..8476c39fd89 100644 --- a/mysql-test/suite/plugins/t/pam_cleartext.test +++ b/mysql-test/suite/plugins/t/pam_cleartext.test @@ -19,4 +19,6 @@ EOF drop user test_pam; drop user pam_test; +let $count_sessions= 1; +--source include/wait_until_count_sessions.inc uninstall plugin pam; -- cgit v1.2.1 From 03dec1aa493517e846b6cecd67e4a9f72a44b92b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 3 Aug 2016 18:05:29 +0200 Subject: MDEV-10350 "./mtr --report-features" doesn't work removed --- mysql-test/include/report-features.test | 12 ------------ mysql-test/mysql-test-run.pl | 18 ------------------ 2 files changed, 30 deletions(-) delete mode 100644 mysql-test/include/report-features.test diff --git a/mysql-test/include/report-features.test b/mysql-test/include/report-features.test deleted file mode 100644 index 75879f67165..00000000000 --- a/mysql-test/include/report-features.test +++ /dev/null @@ -1,12 +0,0 @@ -# -# show server variables -# - ---disable_query_log ---echo ===== ENGINES ===== -show engines; ---echo ===== VARIABLES ===== -show variables; ---echo ===== STOP ===== ---enable_query_log -exit; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index a85bed88395..7bbbcead665 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -276,7 +276,6 @@ my $opt_port_base= $ENV{'MTR_PORT_BASE'} || "auto"; my $build_thread= 0; my $opt_record; -my $opt_report_features; our $opt_resfile= $ENV{'MTR_RESULT_FILE'} || 0; @@ -422,21 +421,6 @@ sub main { my $tests= collect_test_cases($opt_reorder, $opt_suites, \@opt_cases, \@opt_skip_test_list); mark_time_used('collect'); - if ( $opt_report_features ) { - # Put "report features" as the first test to run - my $tinfo = My::Test->new - ( - name => 'report_features', - # No result_file => Prints result - path => 'include/report-features.test', - template_path => "include/default_my.cnf", - master_opt => [], - slave_opt => [], - suite => 'main', - ); - unshift(@$tests, $tinfo); - } - ####################################################################### my $num_tests= @$tests; if ( $opt_parallel eq "auto" ) { @@ -1203,7 +1187,6 @@ sub command_line_setup { 'client-libdir=s' => \$path_client_libdir, # Misc - 'report-features' => \$opt_report_features, 'comment=s' => \$opt_comment, 'fast' => \$opt_fast, 'force-restart' => \$opt_force_restart, @@ -6569,7 +6552,6 @@ Misc options gprof Collect profiling information using gprof. experimental= Refer to list of tests considered experimental; failures will be marked exp-fail instead of fail. - report-features First run a "test" that reports mysql features timestamp Print timestamp before each test report line timediff With --timestamp, also print time passed since *previous* test started -- cgit v1.2.1 From 0214115c7f8007a325cf3466a5bc6680e575a119 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 1 Aug 2016 16:53:57 +0200 Subject: trivial cleanup --- sql/sys_vars.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 63d3b388a36..bf7ed231d77 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3025,14 +3025,16 @@ static bool check_log_path(sys_var *self, THD *thd, set_var *var) if (!var->save_result.string_value.str) return true; - if (var->save_result.string_value.length > FN_REFLEN) + LEX_STRING *val= &var->save_result.string_value; + + if (val->length > FN_REFLEN) { // path is too long my_error(ER_PATH_LENGTH, MYF(0), self->name.str); return true; } char path[FN_REFLEN]; - size_t path_length= unpack_filename(path, var->save_result.string_value.str); + size_t path_length= unpack_filename(path, val->str); if (!path_length) return true; @@ -3046,9 +3048,9 @@ static bool check_log_path(sys_var *self, THD *thd, set_var *var) return false; } - (void) dirname_part(path, var->save_result.string_value.str, &path_length); + (void) dirname_part(path, val->str, &path_length); - if (var->save_result.string_value.length - path_length >= FN_LEN) + if (val->length - path_length >= FN_LEN) { // filename is too long my_error(ER_PATH_LENGTH, MYF(0), self->name.str); return true; -- cgit v1.2.1 From 470f2598cca350b79531bf0b88463a47d94abec3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 3 Aug 2016 20:56:24 +0200 Subject: MDEV-10465 general_log_file can be abused This issue was discovered by Dawid Golunski (http://legalhackers.com) --- mysql-test/suite/sys_vars/r/general_log_file_basic.result | 6 ++++++ mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result | 6 ++++++ mysql-test/suite/sys_vars/t/general_log_file_basic.test | 10 ++++++++++ mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test | 10 ++++++++++ sql/sys_vars.cc | 7 +++++++ 5 files changed, 39 insertions(+) diff --git a/mysql-test/suite/sys_vars/r/general_log_file_basic.result b/mysql-test/suite/sys_vars/r/general_log_file_basic.result index 369ef7844db..54b450a2fce 100644 --- a/mysql-test/suite/sys_vars/r/general_log_file_basic.result +++ b/mysql-test/suite/sys_vars/r/general_log_file_basic.result @@ -12,6 +12,12 @@ SET @@global.general_log_file = mytest.log; ERROR 42000: Incorrect argument type to variable 'general_log_file' SET @@global.general_log_file = 12; ERROR 42000: Incorrect argument type to variable 'general_log_file' +SET @@global.general_log_file = 'my.cnf'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of 'my.cnf' +SET @@global.general_log_file = '/tmp/my.cnf'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of '/tmp/my.cnf' +SET @@global.general_log_file = '.my.cnf'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of '.my.cnf' '#----------------------FN_DYNVARS_004_03------------------------#' SELECT @@global.general_log_file = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result b/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result index f45c568ff4a..e2ed7d63fdb 100644 --- a/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result +++ b/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result @@ -9,6 +9,12 @@ SET @@global.slow_query_log_file = mytest.log; ERROR 42000: Incorrect argument type to variable 'slow_query_log_file' SET @@global.slow_query_log_file = 12; ERROR 42000: Incorrect argument type to variable 'slow_query_log_file' +SET @@global.slow_query_log_file = 'my.cnf'; +ERROR 42000: Variable 'slow_query_log_file' can't be set to the value of 'my.cnf' +SET @@global.slow_query_log_file = '/tmp/my.cnf'; +ERROR 42000: Variable 'slow_query_log_file' can't be set to the value of '/tmp/my.cnf' +SET @@global.general_log_file = '.my.cnf'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of '.my.cnf' '#----------------------FN_DYNVARS_004_03------------------------#' SELECT @@global.slow_query_log_file = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/t/general_log_file_basic.test b/mysql-test/suite/sys_vars/t/general_log_file_basic.test index 12362fa123c..cdb2cc4b36e 100644 --- a/mysql-test/suite/sys_vars/t/general_log_file_basic.test +++ b/mysql-test/suite/sys_vars/t/general_log_file_basic.test @@ -58,6 +58,16 @@ SET @@global.general_log_file = mytest.log; --error ER_WRONG_TYPE_FOR_VAR SET @@global.general_log_file = 12; +# +# MDEV-10465 +# +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = 'my.cnf'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = '/tmp/my.cnf'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = '.my.cnf'; + --echo '#----------------------FN_DYNVARS_004_03------------------------#' ############################################################################## diff --git a/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test b/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test index 28fc17f6077..835cb251e39 100644 --- a/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test +++ b/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test @@ -56,6 +56,16 @@ SET @@global.slow_query_log_file = mytest.log; --error ER_WRONG_TYPE_FOR_VAR SET @@global.slow_query_log_file = 12; +# +# MDEV-10465 +# +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.slow_query_log_file = 'my.cnf'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.slow_query_log_file = '/tmp/my.cnf'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = '.my.cnf'; + --echo '#----------------------FN_DYNVARS_004_03------------------------#' ############################################################################## # Check if the value in GLOBAL Tables matches values in variable # diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index bf7ed231d77..2ed5be3bf3b 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3033,6 +3033,13 @@ static bool check_log_path(sys_var *self, THD *thd, set_var *var) return true; } + static const LEX_CSTRING my_cnf= { STRING_WITH_LEN("my.cnf") }; + if (val->length >= my_cnf.length) + { + if (strcasecmp(val->str + val->length - my_cnf.length, my_cnf.str) == 0) + return true; // log file name ends with "my.cnf" + } + char path[FN_REFLEN]; size_t path_length= unpack_filename(path, val->str); -- cgit v1.2.1 From 44e3046d3b09a21e21295979d6ddad9f332ebadd Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 3 Aug 2016 22:15:57 -0400 Subject: MDEV-10487: Galera SST using rsync does not filter out lost+found In rsync based SST method, during third phase of data transfer, 'lost+found' should be filtered out while recursively transferring files from various directories under data directory. --- scripts/wsrep_sst_rsync.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index 3202087f526..7f0901c10fb 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -176,7 +176,8 @@ then [ "$OS" == "Linux" ] && count=$(grep -c processor /proc/cpuinfo) [ "$OS" == "Darwin" -o "$OS" == "FreeBSD" ] && count=$(sysctl -n hw.ncpu) - find . -maxdepth 1 -mindepth 1 -type d -print0 | xargs -I{} -0 -P $count \ + find . -maxdepth 1 -mindepth 1 -type d -not -name "lost+found" -print0 | \ + xargs -I{} -0 -P $count \ rsync --owner --group --perms --links --specials \ --ignore-times --inplace --recursive --delete --quiet \ $WHOLE_FILE_OPT --exclude '*/ib_logfile*' "$WSREP_SST_OPT_DATA"/{}/ \ -- cgit v1.2.1 From eb32dfd8092a656e2eb77107d8b5d31e143e2cc4 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 3 Aug 2016 11:49:35 +0400 Subject: MDEV-10365 - Race condition in error handling of INSERT DELAYED Shared variables of Delayed_insert may be updated without mutex protection when delayed insert thread gets an error. Re-acquire mutex earlier, so that shared variables are protected. --- sql/sql_insert.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index c60ef6fcc6e..70a12faafb5 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3238,6 +3238,7 @@ bool Delayed_insert::handle_inserts(void) max_rows= 0; // For DBUG output #endif /* Remove all not used rows */ + mysql_mutex_lock(&mutex); while ((row=rows.get())) { if (table->s->blob_fields) @@ -3254,7 +3255,6 @@ bool Delayed_insert::handle_inserts(void) } DBUG_PRINT("error", ("dropped %lu rows after an error", max_rows)); thread_safe_increment(delayed_insert_errors, &LOCK_delayed_status); - mysql_mutex_lock(&mutex); DBUG_RETURN(1); } #endif /* EMBEDDED_LIBRARY */ -- cgit v1.2.1 From 93d5cdf03f22df5cc6e071edd623a00f82b0e6e7 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 4 Aug 2016 13:14:45 +0300 Subject: MDEV-9946: main.xtradb_mrr fails sporadically Make the testcase stable by adding FORCE INDEX --- mysql-test/r/xtradb_mrr.result | 4 ++-- mysql-test/t/xtradb_mrr.test | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/xtradb_mrr.result b/mysql-test/r/xtradb_mrr.result index 15b750d2fd3..c238d0530af 100644 --- a/mysql-test/r/xtradb_mrr.result +++ b/mysql-test/r/xtradb_mrr.result @@ -311,10 +311,10 @@ concat('c-', 1000 + C.a, '-c'), 'filler' from t1 A, t1 B, t1 C; explain -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; +select count(length(a) + length(filler)) from t2 force index (a) where a>='a-1000-a' and a <'a-1001-a'; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 range a a 9 NULL 99 Using index condition; Rowid-ordered scan -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; +select count(length(a) + length(filler)) from t2 force index (a) where a>='a-1000-a' and a <'a-1001-a'; count(length(a) + length(filler)) 100 drop table t2; diff --git a/mysql-test/t/xtradb_mrr.test b/mysql-test/t/xtradb_mrr.test index 260eb9f3955..d994c182ccc 100644 --- a/mysql-test/t/xtradb_mrr.test +++ b/mysql-test/t/xtradb_mrr.test @@ -33,8 +33,8 @@ insert into t2 select from t1 A, t1 B, t1 C; explain -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; -select count(length(a) + length(filler)) from t2 where a>='a-1000-a' and a <'a-1001-a'; +select count(length(a) + length(filler)) from t2 force index (a) where a>='a-1000-a' and a <'a-1001-a'; +select count(length(a) + length(filler)) from t2 force index (a) where a>='a-1000-a' and a <'a-1001-a'; drop table t2; # Try a very big rowid -- cgit v1.2.1 From e1c92a6ca9130ae07c9fa596c969a4b4f3a95ee3 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Fri, 5 Aug 2016 14:12:01 -0700 Subject: Fixed a problem with unreferenced CTE: explain for the query containing WITH clause with an unreferenced CTE caused a crash. Added a test covered this case. Also added a test for usage CTE in different parts of union. --- mysql-test/r/cte_nonrecursive.result | 48 ++++++++++++++++++++++++++++++++++++ mysql-test/t/cte_nonrecursive.test | 32 ++++++++++++++++++++++++ sql/sql_select.cc | 13 +++++++--- 3 files changed, 89 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index 713ec4bcec3..7481f26591f 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -309,6 +309,41 @@ c c 1 1 4 4 4 4 +# t two references of t used in different parts of a union +with t as (select a from t1 where b >= 'c') +select * from t where a < 2 +union +select * from t where a >= 4; +a +1 +4 +select * from (select a from t1 where b >= 'c') as t +where t.a < 2 +union +select * from (select a from t1 where b >= 'c') as t +where t.a >= 4; +a +1 +4 +explain +with t as (select a from t1 where b >= 'c') +select * from t where a < 2 +union +select * from t where a >= 4; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where +3 UNION t1 ALL NULL NULL NULL NULL 8 Using where +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain +select * from (select a from t1 where b >= 'c') as t +where t.a < 2 +union +select * from (select a from t1 where b >= 'c') as t +where t.a >= 4; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where +3 UNION t1 ALL NULL NULL NULL NULL 8 Using where +NULL UNION RESULT ALL NULL NULL NULL NULL NULL # specification of t contains union with t as (select a from t1 where b >= 'f' union @@ -749,6 +784,19 @@ ERROR HY000: WITH column list and SELECT field list have different column counts with t(f1,f1) as (select * from t1 where b >= 'c') select t1.b from t2,t1 where t1.a = t2.c; ERROR 42S21: Duplicate column name 'f1' +# explain for query with unreferenced with table +explain +with t as (select a from t1 where b >= 'c') +select t1.b from t2,t1 where t1.a = t2.c; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 4 +1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join) +explain +with t as (select a, count(*) from t1 where b >= 'c' group by a) +select t1.b from t2,t1 where t1.a = t2.c; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 4 +1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join) drop table t1,t2; # # Bug mdev-9937: View used in the specification of with table diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index 8caf0832df4..eb6677e7f75 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -152,6 +152,28 @@ select * from (select * from t1 where b >= 'c') as r1, with t(c) as (select a from t1 where b >= 'c') select * from t r1, t r2 where r1.c=r2.c; +--echo # t two references of t used in different parts of a union +with t as (select a from t1 where b >= 'c') + select * from t where a < 2 + union + select * from t where a >= 4; +select * from (select a from t1 where b >= 'c') as t + where t.a < 2 +union +select * from (select a from t1 where b >= 'c') as t + where t.a >= 4; +explain +with t as (select a from t1 where b >= 'c') + select * from t where a < 2 + union + select * from t where a >= 4; +explain +select * from (select a from t1 where b >= 'c') as t + where t.a < 2 +union +select * from (select a from t1 where b >= 'c') as t + where t.a >= 4; + --echo # specification of t contains union with t as (select a from t1 where b >= 'f' union @@ -437,6 +459,16 @@ with t(f) as (select * from t1 where b >= 'c') with t(f1,f1) as (select * from t1 where b >= 'c') select t1.b from t2,t1 where t1.a = t2.c; +--echo # explain for query with unreferenced with table + +explain +with t as (select a from t1 where b >= 'c') + select t1.b from t2,t1 where t1.a = t2.c; + +explain +with t as (select a, count(*) from t1 where b >= 'c' group by a) + select t1.b from t2,t1 where t1.a = t2.c; + drop table t1,t2; --echo # diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 4415596f5b8..e4257d6e94d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -24402,11 +24402,14 @@ int JOIN::save_explain_data_intern(Explain_query *output, (1) they are not parts of ON clauses that were eliminated by table elimination. (2) they are not merged derived tables + (3) they are not unreferenced CTE */ if (!(tmp_unit->item && tmp_unit->item->eliminated) && // (1) (!tmp_unit->derived || - tmp_unit->derived->is_materialized_derived())) // (2) - { + tmp_unit->derived->is_materialized_derived()) && // (2) + !(tmp_unit->with_element && + !tmp_unit->with_element->is_referenced())) // (3) + { explain->add_child(tmp_unit->first_select()->select_number); } } @@ -24466,9 +24469,11 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, Save plans for child subqueries, when (1) they are not parts of eliminated WHERE/ON clauses. (2) they are not VIEWs that were "merged for INSERT". + (3) they are not unreferenced CTE. */ - if (!(unit->item && unit->item->eliminated) && // (1) - !(unit->derived && unit->derived->merged_for_insert)) // (2) + if (!(unit->item && unit->item->eliminated) && // (1) + !(unit->derived && unit->derived->merged_for_insert) && // (2) + !(unit->with_element && !unit->with_element->is_referenced())) // (3) { if (mysql_explain_union(thd, unit, result)) DBUG_VOID_RETURN; -- cgit v1.2.1 From 5e23b6344f3b229edcb0d9c42ec23b689c329a38 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 7 Aug 2016 11:02:42 +0200 Subject: MDEV-10506 Protocol::end_statement(): Assertion `0' failed upon ALTER TABLE thd->clear_error() destroyed already existing error status --- mysql-test/r/myisam_enable_keys-10506.result | 114 ++++++++++++++++++++++++++ mysql-test/t/myisam_enable_keys-10506.test | 117 +++++++++++++++++++++++++++ storage/myisam/ha_myisam.cc | 3 +- 3 files changed, 233 insertions(+), 1 deletion(-) create mode 100644 mysql-test/r/myisam_enable_keys-10506.result create mode 100644 mysql-test/t/myisam_enable_keys-10506.test diff --git a/mysql-test/r/myisam_enable_keys-10506.result b/mysql-test/r/myisam_enable_keys-10506.result new file mode 100644 index 00000000000..547f001fe34 --- /dev/null +++ b/mysql-test/r/myisam_enable_keys-10506.result @@ -0,0 +1,114 @@ +CREATE TABLE t1 ( +pk INT AUTO_INCREMENT, +i INT, +d DATE, +dt DATETIME, +v VARCHAR(1), +PRIMARY KEY (pk), +KEY (dt) +) ENGINE=MyISAM; +INSERT INTO t1 (i, d, dt, v) VALUES +(9, '2005-07-23', '2004-05-13 01:01:39', 't'), +(2, '2009-11-01', '2003-12-24 07:39:29', 'h'), +(6, NULL, '2008-07-03 05:32:22', 'l'), +(6, '2007-07-16', '2008-08-28 18:46:11', 'j'), +(5, NULL, '2001-07-12 21:27:00', 'h'), +(3, '2007-07-22', '1900-01-01 00:00:00', 'p'), +(2, '2000-11-21', '2007-05-25 11:58:54', 'g'), +(6, '1900-01-01', '2009-06-03 17:11:10', 'i'), +(2, '2008-02-10', '2001-06-15 16:20:07', 'p'), +(3, '2009-06-04', '1900-01-01 00:00:00', 'h'), +(9, '2007-04-25', '1900-01-01 00:00:00', 'e'), +(9, '2006-03-02', '1900-01-01 00:00:00', 'e'), +(1, '1900-01-01', '2002-11-08 09:33:27', 'u'), +(7, '2008-07-13', '2007-08-07 17:35:52', 'j'), +(0, '2004-11-12', '2006-05-01 00:00:00', 'e'), +(0, '1900-01-01', '2003-05-01 00:00:00', 'z'), +(1, '2009-09-02', '2007-02-12 09:30:49', 'w'), +(0, '2004-11-06', '1900-01-01 00:00:00', 't'), +(4, '2003-01-06', '2002-07-03 02:51:11', 'i'), +(6, '2006-01-14', '2008-02-26 04:57:32', 'i'), +(0, '2002-01-19', '2009-02-12 00:00:00', 'i'), +(8, '2007-02-12', '1900-01-01 00:00:00', 'b'), +(4, '1900-01-01', '2001-05-16 05:28:40', 'm'), +(2, '2005-07-16', NULL, 'j'), +(1, '2004-09-04', '2001-01-24 21:45:18', 'v'), +(3, '2009-07-01', NULL, NULL), +(2, '2009-07-21', '2002-07-24 00:00:00', 'h'), +(4, NULL, '2001-11-03 12:22:30', 'q'), +(1, '2002-06-22', '2008-06-17 03:17:59', 'f'), +(7, '2005-06-23', '2005-12-24 00:00:00', 'p'), +(6, '2001-05-20', '2008-10-23 00:00:00', NULL), +(3, '2001-10-01', '2000-10-12 16:32:35', 'o'), +(3, '2001-01-07', '2005-09-11 10:09:54', 'w'), +(6, '2007-11-02', '2009-09-10 01:44:18', 'l'), +(6, NULL, NULL, 'i'), +(9, NULL, '2002-05-18 15:21:55', 'd'), +(4, '2008-12-21', '2004-10-15 10:09:54', 'j'), +(6, '2003-10-05', '2009-07-13 03:51:02', 'e'), +(2, '2001-03-03', '1900-01-01 00:00:00', 'e'), +(2, '2007-04-04', '2001-11-08 21:14:52', 'q'), +(5, NULL, '2006-12-02 00:00:00', 'm'), +(0, '2009-01-04', '1900-01-01 00:00:00', NULL), +(8, '2008-04-03', '2005-01-01 11:55:18', 'q'), +(8, NULL, '2005-02-28 03:44:02', 'w'), +(0, '2003-08-22', NULL, 'c'), +(9, '1900-01-01', NULL, 'y'), +(NULL, NULL, '2006-08-25 16:28:09', 'g'), +(5, '2004-07-04', '2002-08-11 00:00:00', 'z'), +(1, '1900-01-01', '2007-07-22 21:19:18', 'm'), +(2, '2007-02-04', '2006-02-10 18:41:38', 't'), +(2, '1900-01-01', '2009-02-16 14:58:58', 'd'), +(7, '2001-03-14', '2007-08-14 00:00:00', 'h'), +(0, NULL, '1900-01-01 00:00:00', NULL), +(1, '2008-10-05', NULL, 'f'), +(6, '2001-11-25', '2008-12-03 06:59:23', 'l'), +(NULL, '2003-01-27', '2008-10-04 00:00:00', 'g'), +(8, '2008-08-08', '2009-07-07 07:00:21', 'v'), +(8, '2006-07-03', '2001-04-15 00:00:00', NULL), +(5, '2002-11-21', '2007-07-08 04:01:58', 'm'), +(5, '2006-04-08', '2007-09-23 00:01:35', 'i'), +(5, '2001-05-06', '2008-05-15 00:00:00', 'h'), +(7, '1900-01-01', '1900-01-01 00:00:00', 'u'), +(30, '2007-04-16', '2004-03-05 23:35:38', 'o'), +(NULL, '1900-01-01', '2007-08-25 01:32:47', 'z'), +(6, '2004-12-03', '1900-01-01 00:00:00', 'o'), +(8, '2001-06-23', '1900-01-01 00:00:00', 'f'), +(NULL, '2008-12-15', '2001-05-19 08:28:28', 'a'), +(9, '2000-02-15', '2009-09-03 06:07:22', 'd'), +(2, '2001-08-05', '2006-10-08 07:17:27', 'k'), +(5, '2004-01-17', '2003-09-06 20:36:01', 'd'), +(4, '2003-10-01', '2001-02-05 18:10:49', 'u'), +(4, '2003-07-28', '2001-01-07 16:11:37', 'h'), +(0, '1900-01-01', '2008-08-01 05:26:38', 'w'), +(9, '1900-01-01', '2001-05-08 00:00:00', 't'), +(1, '2000-04-17', '2008-07-10 21:26:28', 'i'), +(8, '2002-01-05', '2006-08-06 20:56:35', 'k'), +(9, '2001-04-10', '2003-02-17 00:00:00', 'z'), +(0, '2009-12-04', NULL, 'h'), +(7, NULL, '2004-10-27 00:29:57', 'h'), +(2, '2006-03-07', '2008-03-04 06:14:13', 'b'), +(0, '2001-10-15', '2001-03-17 00:00:00', 'm'), +(5, '1900-01-01', '2009-02-21 11:35:50', 'i'), +(4, NULL, '1900-01-01 00:00:00', 'w'), +(5, '2009-04-05', '1900-01-01 00:00:00', 'm'), +(6, '2001-03-19', '2001-04-12 00:00:00', 'q'), +(NULL, '2009-12-08', '2001-12-04 20:21:01', 'k'), +(2, '2005-02-09', '2001-05-27 08:41:01', 'l'), +(9, '2004-05-25', '2004-09-18 00:00:00', 'c'), +(3, '2005-01-17', '2002-09-12 11:18:48', 'd'), +(0, '2003-08-28', '1900-01-01 00:00:00', 'k'), +(6, '2006-10-11', '2003-10-28 03:31:02', 'a'), +(5, '1900-01-01', '2001-08-22 10:20:09', 'p'), +(8, '1900-01-01', '2008-04-24 00:00:00', 'o'), +(4, '2005-08-18', '2006-11-10 10:08:49', 'e'), +(NULL, '2007-03-12', '2007-10-16 00:00:00', 'n'), +(1, '2000-11-18', '2009-05-27 12:25:07', 't'), +(4, '2001-03-03', NULL, 'u'), +(3, '2003-09-11', '2001-09-10 18:10:10', 'f'), +(4, '2007-06-17', '1900-01-01 00:00:00', 't'), +(NULL, '2008-09-11', '2004-06-07 23:17:09', 'k'); +ALTER TABLE t1 ADD UNIQUE KEY ind1 (pk, d, i, v); +ALTER TABLE t1 ADD UNIQUE KEY ind2 (d, v); +ERROR 23000: Duplicate entry '1900-01-01-m' for key 'ind2' +DROP TABLE t1; diff --git a/mysql-test/t/myisam_enable_keys-10506.test b/mysql-test/t/myisam_enable_keys-10506.test new file mode 100644 index 00000000000..8e1c058c3f0 --- /dev/null +++ b/mysql-test/t/myisam_enable_keys-10506.test @@ -0,0 +1,117 @@ +# +# MDEV-10506 Protocol::end_statement(): Assertion `0' failed upon ALTER TABLE +# +CREATE TABLE t1 ( + pk INT AUTO_INCREMENT, + i INT, + d DATE, + dt DATETIME, + v VARCHAR(1), + PRIMARY KEY (pk), + KEY (dt) +) ENGINE=MyISAM; +INSERT INTO t1 (i, d, dt, v) VALUES + (9, '2005-07-23', '2004-05-13 01:01:39', 't'), + (2, '2009-11-01', '2003-12-24 07:39:29', 'h'), + (6, NULL, '2008-07-03 05:32:22', 'l'), + (6, '2007-07-16', '2008-08-28 18:46:11', 'j'), + (5, NULL, '2001-07-12 21:27:00', 'h'), + (3, '2007-07-22', '1900-01-01 00:00:00', 'p'), + (2, '2000-11-21', '2007-05-25 11:58:54', 'g'), + (6, '1900-01-01', '2009-06-03 17:11:10', 'i'), + (2, '2008-02-10', '2001-06-15 16:20:07', 'p'), + (3, '2009-06-04', '1900-01-01 00:00:00', 'h'), + (9, '2007-04-25', '1900-01-01 00:00:00', 'e'), + (9, '2006-03-02', '1900-01-01 00:00:00', 'e'), + (1, '1900-01-01', '2002-11-08 09:33:27', 'u'), + (7, '2008-07-13', '2007-08-07 17:35:52', 'j'), + (0, '2004-11-12', '2006-05-01 00:00:00', 'e'), + (0, '1900-01-01', '2003-05-01 00:00:00', 'z'), + (1, '2009-09-02', '2007-02-12 09:30:49', 'w'), + (0, '2004-11-06', '1900-01-01 00:00:00', 't'), + (4, '2003-01-06', '2002-07-03 02:51:11', 'i'), + (6, '2006-01-14', '2008-02-26 04:57:32', 'i'), + (0, '2002-01-19', '2009-02-12 00:00:00', 'i'), + (8, '2007-02-12', '1900-01-01 00:00:00', 'b'), + (4, '1900-01-01', '2001-05-16 05:28:40', 'm'), + (2, '2005-07-16', NULL, 'j'), + (1, '2004-09-04', '2001-01-24 21:45:18', 'v'), + (3, '2009-07-01', NULL, NULL), + (2, '2009-07-21', '2002-07-24 00:00:00', 'h'), + (4, NULL, '2001-11-03 12:22:30', 'q'), + (1, '2002-06-22', '2008-06-17 03:17:59', 'f'), + (7, '2005-06-23', '2005-12-24 00:00:00', 'p'), + (6, '2001-05-20', '2008-10-23 00:00:00', NULL), + (3, '2001-10-01', '2000-10-12 16:32:35', 'o'), + (3, '2001-01-07', '2005-09-11 10:09:54', 'w'), + (6, '2007-11-02', '2009-09-10 01:44:18', 'l'), + (6, NULL, NULL, 'i'), + (9, NULL, '2002-05-18 15:21:55', 'd'), + (4, '2008-12-21', '2004-10-15 10:09:54', 'j'), + (6, '2003-10-05', '2009-07-13 03:51:02', 'e'), + (2, '2001-03-03', '1900-01-01 00:00:00', 'e'), + (2, '2007-04-04', '2001-11-08 21:14:52', 'q'), + (5, NULL, '2006-12-02 00:00:00', 'm'), + (0, '2009-01-04', '1900-01-01 00:00:00', NULL), + (8, '2008-04-03', '2005-01-01 11:55:18', 'q'), + (8, NULL, '2005-02-28 03:44:02', 'w'), + (0, '2003-08-22', NULL, 'c'), + (9, '1900-01-01', NULL, 'y'), + (NULL, NULL, '2006-08-25 16:28:09', 'g'), + (5, '2004-07-04', '2002-08-11 00:00:00', 'z'), + (1, '1900-01-01', '2007-07-22 21:19:18', 'm'), + (2, '2007-02-04', '2006-02-10 18:41:38', 't'), + (2, '1900-01-01', '2009-02-16 14:58:58', 'd'), + (7, '2001-03-14', '2007-08-14 00:00:00', 'h'), + (0, NULL, '1900-01-01 00:00:00', NULL), + (1, '2008-10-05', NULL, 'f'), + (6, '2001-11-25', '2008-12-03 06:59:23', 'l'), + (NULL, '2003-01-27', '2008-10-04 00:00:00', 'g'), + (8, '2008-08-08', '2009-07-07 07:00:21', 'v'), + (8, '2006-07-03', '2001-04-15 00:00:00', NULL), + (5, '2002-11-21', '2007-07-08 04:01:58', 'm'), + (5, '2006-04-08', '2007-09-23 00:01:35', 'i'), + (5, '2001-05-06', '2008-05-15 00:00:00', 'h'), + (7, '1900-01-01', '1900-01-01 00:00:00', 'u'), + (30, '2007-04-16', '2004-03-05 23:35:38', 'o'), + (NULL, '1900-01-01', '2007-08-25 01:32:47', 'z'), + (6, '2004-12-03', '1900-01-01 00:00:00', 'o'), + (8, '2001-06-23', '1900-01-01 00:00:00', 'f'), + (NULL, '2008-12-15', '2001-05-19 08:28:28', 'a'), + (9, '2000-02-15', '2009-09-03 06:07:22', 'd'), + (2, '2001-08-05', '2006-10-08 07:17:27', 'k'), + (5, '2004-01-17', '2003-09-06 20:36:01', 'd'), + (4, '2003-10-01', '2001-02-05 18:10:49', 'u'), + (4, '2003-07-28', '2001-01-07 16:11:37', 'h'), + (0, '1900-01-01', '2008-08-01 05:26:38', 'w'), + (9, '1900-01-01', '2001-05-08 00:00:00', 't'), + (1, '2000-04-17', '2008-07-10 21:26:28', 'i'), + (8, '2002-01-05', '2006-08-06 20:56:35', 'k'), + (9, '2001-04-10', '2003-02-17 00:00:00', 'z'), + (0, '2009-12-04', NULL, 'h'), + (7, NULL, '2004-10-27 00:29:57', 'h'), + (2, '2006-03-07', '2008-03-04 06:14:13', 'b'), + (0, '2001-10-15', '2001-03-17 00:00:00', 'm'), + (5, '1900-01-01', '2009-02-21 11:35:50', 'i'), + (4, NULL, '1900-01-01 00:00:00', 'w'), + (5, '2009-04-05', '1900-01-01 00:00:00', 'm'), + (6, '2001-03-19', '2001-04-12 00:00:00', 'q'), + (NULL, '2009-12-08', '2001-12-04 20:21:01', 'k'), + (2, '2005-02-09', '2001-05-27 08:41:01', 'l'), + (9, '2004-05-25', '2004-09-18 00:00:00', 'c'), + (3, '2005-01-17', '2002-09-12 11:18:48', 'd'), + (0, '2003-08-28', '1900-01-01 00:00:00', 'k'), + (6, '2006-10-11', '2003-10-28 03:31:02', 'a'), + (5, '1900-01-01', '2001-08-22 10:20:09', 'p'), + (8, '1900-01-01', '2008-04-24 00:00:00', 'o'), + (4, '2005-08-18', '2006-11-10 10:08:49', 'e'), + (NULL, '2007-03-12', '2007-10-16 00:00:00', 'n'), + (1, '2000-11-18', '2009-05-27 12:25:07', 't'), + (4, '2001-03-03', NULL, 'u'), + (3, '2003-09-11', '2001-09-10 18:10:10', 'f'), + (4, '2007-06-17', '1900-01-01 00:00:00', 't'), + (NULL, '2008-09-11', '2004-06-07 23:17:09', 'k'); +ALTER TABLE t1 ADD UNIQUE KEY ind1 (pk, d, i, v); +--error ER_DUP_ENTRY +ALTER TABLE t1 ADD UNIQUE KEY ind2 (d, v); +DROP TABLE t1; diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index a2e62d8ae1e..784da17d790 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -1433,6 +1433,7 @@ int ha_myisam::enable_indexes(uint mode) else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE) { THD *thd= table->in_use; + int was_error= thd->is_error(); HA_CHECK ¶m= *(HA_CHECK*) thd->alloc(sizeof(param)); const char *save_proc_info=thd->proc_info; @@ -1475,7 +1476,7 @@ int ha_myisam::enable_indexes(uint mode) might have been set by the first repair. They can still be seen with SHOW WARNINGS then. */ - if (! error) + if (! error && ! was_error) thd->clear_error(); } info(HA_STATUS_CONST); -- cgit v1.2.1 From 1b3430a5ae6b2f6d5c251f1ff07f5c273b1dc175 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Mon, 8 Aug 2016 16:04:40 +0400 Subject: MDEV-10500 CASE/IF Statement returns multiple values and shifts further result values to the next column We assume all around the code that null_value==true is in sync with NULL value returned by val_str()/val_decimal(). Item_sum_sum::val_decimal() erroneously returned a non-NULL value together with null_value set to true. Fixing to return NULL instead. --- mysql-test/r/func_group.result | 27 +++++++++++++++++++++++++++ mysql-test/t/func_group.test | 25 +++++++++++++++++++++++++ sql/item_sum.cc | 2 +- 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index ac076ec4348..0253548236c 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -2270,3 +2270,30 @@ t2_id GROUP_CONCAT(IF (t6.b, t6.f, t5.f) ORDER BY 1) EXECUTE stmt; t2_id GROUP_CONCAT(IF (t6.b, t6.f, t5.f) ORDER BY 1) DROP TABLE t1,t2,t3,t4,t5,t6; +# +# MDEV-10500 CASE/IF Statement returns multiple values and shifts further result values to the next column +# +CREATE TABLE t1 ( +id int not null AUTO_INCREMENT, +active bool not null, +data1 bigint, +data2 bigint, +data3 bigint, +primary key (id) +); +INSERT INTO t1 (active,data1,data2,data3) VALUES (1,null,100,200); +SELECT +CASE WHEN active THEN SUM(data1) END AS C_1, +SUM(data2) AS C_2, +SUM(data3) AS C_3 +FROM t1; +C_1 C_2 C_3 +NULL 100 200 +SELECT +IF(active, SUM(data1), 5) AS C_1, +SUM(data2) AS C_2, +SUM(data3) AS C_3 +FROM t1; +C_1 C_2 C_3 +NULL 100 200 +DROP TABLE t1; diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index bd3ed4ad32d..5824d99afa5 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -1565,3 +1565,28 @@ EXECUTE stmt; EXECUTE stmt; DROP TABLE t1,t2,t3,t4,t5,t6; + +--echo # +--echo # MDEV-10500 CASE/IF Statement returns multiple values and shifts further result values to the next column +--echo # + +CREATE TABLE t1 ( + id int not null AUTO_INCREMENT, + active bool not null, + data1 bigint, + data2 bigint, + data3 bigint, + primary key (id) +); +INSERT INTO t1 (active,data1,data2,data3) VALUES (1,null,100,200); +SELECT + CASE WHEN active THEN SUM(data1) END AS C_1, + SUM(data2) AS C_2, + SUM(data3) AS C_3 +FROM t1; +SELECT + IF(active, SUM(data1), 5) AS C_1, + SUM(data2) AS C_2, + SUM(data3) AS C_3 +FROM t1; +DROP TABLE t1; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index adf48f6feec..445895111ed 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1462,7 +1462,7 @@ my_decimal *Item_sum_sum::val_decimal(my_decimal *val) if (aggr) aggr->endup(); if (hybrid_type == DECIMAL_RESULT) - return (dec_buffs + curr_dec_buff); + return null_value ? NULL : (dec_buffs + curr_dec_buff); return val_decimal_from_real(val); } -- cgit v1.2.1 From 5269d378dfd576ecd03c82b3e763a98c83235636 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Mon, 8 Aug 2016 18:37:02 +0400 Subject: MDEV-10468 Assertion `nr >= 0.0' failed in Item_sum_std::val_real() --- mysql-test/r/func_group.result | 12 ++++++++++++ mysql-test/t/func_group.test | 6 ++++++ sql/item_sum.cc | 2 ++ 3 files changed, 20 insertions(+) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 0253548236c..38fae2f0a4f 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -2297,3 +2297,15 @@ FROM t1; C_1 C_2 C_3 NULL 100 200 DROP TABLE t1; +# +# MDEV-10468 Assertion `nr >= 0.0' failed in Item_sum_std::val_real() +# +SELECT STDDEV_POP(f) FROM (SELECT "1e+309" AS f UNION SELECT "-1e+309" AS f) tbl; +STDDEV_POP(f) +1.7976931348623157e308 +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: '1e+309' +Warning 1292 Truncated incorrect DOUBLE value: '-1e+309' +SELECT STDDEV(f) FROM (SELECT 1.7976931348623157e+308 AS f UNION SELECT -1.7976931348623157e+308 AS f) tbl; +STDDEV(f) +1.7976931348623157e308 diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 5824d99afa5..7013009fae7 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -1590,3 +1590,9 @@ SELECT SUM(data3) AS C_3 FROM t1; DROP TABLE t1; + +--echo # +--echo # MDEV-10468 Assertion `nr >= 0.0' failed in Item_sum_std::val_real() +--echo # +SELECT STDDEV_POP(f) FROM (SELECT "1e+309" AS f UNION SELECT "-1e+309" AS f) tbl; +SELECT STDDEV(f) FROM (SELECT 1.7976931348623157e+308 AS f UNION SELECT -1.7976931348623157e+308 AS f) tbl; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 445895111ed..02d2875195d 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1762,6 +1762,8 @@ double Item_sum_std::val_real() { DBUG_ASSERT(fixed == 1); double nr= Item_sum_variance::val_real(); + if (my_isinf(nr)) + return DBL_MAX; DBUG_ASSERT(nr >= 0.0); return sqrt(nr); } -- cgit v1.2.1 From a7c43a684ac390636f2859dfe5cf65fb4be8f75b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 26 Jan 2016 14:49:25 +0200 Subject: MDEV-9304: MariaDB crash with specific query tmp_join may get its tables freed twice during JOIN cleanup. Set them to NULL when the tmp_join is different than the current join. --- sql/sql_union.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 87d3e86b2c7..95ce1c9b940 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -887,6 +887,12 @@ bool st_select_lex_unit::cleanup() join->tables_list= 0; join->table_count= 0; join->top_join_tab_count= 0; + if (join->tmp_join && join->tmp_join != join) + { + join->tmp_join->tables_list= 0; + join->tmp_join->table_count= 0; + join->tmp_join->top_join_tab_count= 0; + } } error|= fake_select_lex->cleanup(); /* -- cgit v1.2.1 From 2a54a530a9ba96a9a57607dd156a42192dae0873 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 8 Aug 2016 10:27:22 +0200 Subject: MDEV-10465 general_log_file can be abused followup --- mysql-test/suite/sys_vars/r/general_log_file_basic.result | 2 ++ .../suite/sys_vars/r/slow_query_log_file_basic.result | 2 ++ mysql-test/suite/sys_vars/t/general_log_file_basic.test | 2 ++ mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test | 2 ++ sql/sys_vars.cc | 14 +++++++------- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/general_log_file_basic.result b/mysql-test/suite/sys_vars/r/general_log_file_basic.result index 54b450a2fce..4c26cab8956 100644 --- a/mysql-test/suite/sys_vars/r/general_log_file_basic.result +++ b/mysql-test/suite/sys_vars/r/general_log_file_basic.result @@ -18,6 +18,8 @@ SET @@global.general_log_file = '/tmp/my.cnf'; ERROR 42000: Variable 'general_log_file' can't be set to the value of '/tmp/my.cnf' SET @@global.general_log_file = '.my.cnf'; ERROR 42000: Variable 'general_log_file' can't be set to the value of '.my.cnf' +SET @@global.general_log_file = 'my.cnf\0foo'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of 'my.cnf' '#----------------------FN_DYNVARS_004_03------------------------#' SELECT @@global.general_log_file = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result b/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result index e2ed7d63fdb..db7eb238c43 100644 --- a/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result +++ b/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result @@ -15,6 +15,8 @@ SET @@global.slow_query_log_file = '/tmp/my.cnf'; ERROR 42000: Variable 'slow_query_log_file' can't be set to the value of '/tmp/my.cnf' SET @@global.general_log_file = '.my.cnf'; ERROR 42000: Variable 'general_log_file' can't be set to the value of '.my.cnf' +SET @@global.general_log_file = 'my.cnf\0foo'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of 'my.cnf' '#----------------------FN_DYNVARS_004_03------------------------#' SELECT @@global.slow_query_log_file = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/t/general_log_file_basic.test b/mysql-test/suite/sys_vars/t/general_log_file_basic.test index cdb2cc4b36e..fdc99fb6dea 100644 --- a/mysql-test/suite/sys_vars/t/general_log_file_basic.test +++ b/mysql-test/suite/sys_vars/t/general_log_file_basic.test @@ -67,6 +67,8 @@ SET @@global.general_log_file = 'my.cnf'; SET @@global.general_log_file = '/tmp/my.cnf'; --error ER_WRONG_VALUE_FOR_VAR SET @@global.general_log_file = '.my.cnf'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = 'my.cnf\0foo'; --echo '#----------------------FN_DYNVARS_004_03------------------------#' diff --git a/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test b/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test index 835cb251e39..79132a1bdc5 100644 --- a/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test +++ b/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test @@ -65,6 +65,8 @@ SET @@global.slow_query_log_file = 'my.cnf'; SET @@global.slow_query_log_file = '/tmp/my.cnf'; --error ER_WRONG_VALUE_FOR_VAR SET @@global.general_log_file = '.my.cnf'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = 'my.cnf\0foo'; --echo '#----------------------FN_DYNVARS_004_03------------------------#' ############################################################################## diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 2ed5be3bf3b..7d43984c9c0 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3033,19 +3033,19 @@ static bool check_log_path(sys_var *self, THD *thd, set_var *var) return true; } - static const LEX_CSTRING my_cnf= { STRING_WITH_LEN("my.cnf") }; - if (val->length >= my_cnf.length) - { - if (strcasecmp(val->str + val->length - my_cnf.length, my_cnf.str) == 0) - return true; // log file name ends with "my.cnf" - } - char path[FN_REFLEN]; size_t path_length= unpack_filename(path, val->str); if (!path_length) return true; + static const LEX_CSTRING my_cnf= { STRING_WITH_LEN("my.cnf") }; + if (path_length >= my_cnf.length) + { + if (strcasecmp(path + path_length - my_cnf.length, my_cnf.str) == 0) + return true; // log file name ends with "my.cnf" + } + MY_STAT f_stat; if (my_stat(path, &f_stat, MYF(0))) -- cgit v1.2.1 From a3f642415a8f8c52ed8a6b38ba5b48f814ab6bd8 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 8 Aug 2016 12:58:27 +0200 Subject: MDEV-6128:[PATCH] mysqlcheck wrongly escapes '.' in table names a correct fix: * store properly quoted table names in tables4repair/etc lists * tell handle_request_for_tables whether the name is aalready properly quoted * test cases for all uses of fix_table_name() --- client/mysqlcheck.c | 67 +++++++++++++++---------------------- mysql-test/r/mysqlcheck.result | 76 ++++++++++++++++++++++++++++++++++++++++-- mysql-test/t/mysqlcheck.test | 47 ++++++++++++++++++++++++-- 3 files changed, 146 insertions(+), 44 deletions(-) diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index 75f841f3c5e..a07fc773726 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -232,7 +232,7 @@ static int process_selected_tables(char *db, char **table_names, int tables); static int process_all_tables_in_db(char *database); static int process_one_db(char *database); static int use_db(char *database); -static int handle_request_for_tables(char *tables, size_t length, my_bool view); +static int handle_request_for_tables(char *, size_t, my_bool, my_bool); static int dbConnect(char *host, char *user,char *passwd); static void dbDisconnect(char *host); static void DBerror(MYSQL *mysql, const char *when); @@ -566,7 +566,7 @@ static int process_selected_tables(char *db, char **table_names, int tables) } *--end = 0; handle_request_for_tables(table_names_comma_sep + 1, tot_length - 1, - opt_do_views != 0); + opt_do_views != 0, opt_all_in_1); my_free(table_names_comma_sep); } else @@ -577,7 +577,7 @@ static int process_selected_tables(char *db, char **table_names, int tables) view= is_view(table); if (view < 0) continue; - handle_request_for_tables(table, table_len, (view == 1)); + handle_request_for_tables(table, table_len, view == 1, opt_all_in_1); } DBUG_RETURN(0); } /* process_selected_tables */ @@ -605,13 +605,9 @@ static char *fix_table_name(char *dest, char *src) *dest++= '`'; for (; *src; src++) { - switch (*src) { - case '`': /* escape backtick character */ + if (*src == '`') *dest++= '`'; - /* fall through */ - default: - *dest++= *src; - } + *dest++= *src; } *dest++= '`'; @@ -700,9 +696,9 @@ static int process_all_tables_in_db(char *database) *--end = 0; *--views_end = 0; if (tot_length) - handle_request_for_tables(tables + 1, tot_length - 1, FALSE); + handle_request_for_tables(tables + 1, tot_length - 1, FALSE, opt_all_in_1); if (tot_views_length) - handle_request_for_tables(views + 1, tot_views_length - 1, TRUE); + handle_request_for_tables(views + 1, tot_views_length - 1, TRUE, opt_all_in_1); my_free(tables); my_free(views); } @@ -728,7 +724,7 @@ static int process_all_tables_in_db(char *database) !strcmp(row[0], "slow_log"))) continue; /* Skip logging tables */ - handle_request_for_tables(row[0], fixed_name_length(row[0]), view); + handle_request_for_tables(row[0], fixed_name_length(row[0]), view, opt_all_in_1); } } mysql_free_result(res); @@ -787,13 +783,11 @@ static int rebuild_table(char *name) int rc= 0; DBUG_ENTER("rebuild_table"); - query= (char*)my_malloc(sizeof(char) * (12 + fixed_name_length(name) + 6 + 1), + query= (char*)my_malloc(sizeof(char) * (12 + strlen(name) + 6 + 1), MYF(MY_WME)); if (!query) DBUG_RETURN(1); - ptr= strmov(query, "ALTER TABLE "); - ptr= fix_table_name(ptr, name); - ptr= strxmov(ptr, " FORCE", NullS); + ptr= strxmov(query, "ALTER TABLE ", name, " FORCE", NullS); if (mysql_real_query(sock, query, (ulong)(ptr - query))) { fprintf(stderr, "Failed to %s\n", query); @@ -849,7 +843,8 @@ static int disable_binlog() return run_query(stmt); } -static int handle_request_for_tables(char *tables, size_t length, my_bool view) +static int handle_request_for_tables(char *tables, size_t length, + my_bool view, my_bool dont_quote) { char *query, *end, options[100], message[100]; char table_name_buff[NAME_CHAR_LEN*2*2+1], *table_name; @@ -907,7 +902,7 @@ static int handle_request_for_tables(char *tables, size_t length, my_bool view) if (!(query =(char *) my_malloc(query_size, MYF(MY_WME)))) DBUG_RETURN(1); - if (opt_all_in_1) + if (dont_quote) { DBUG_ASSERT(strlen(op)+strlen(tables)+strlen(options)+8+1 <= query_size); @@ -950,6 +945,13 @@ static int handle_request_for_tables(char *tables, size_t length, my_bool view) DBUG_RETURN(0); } +static void insert_table_name(DYNAMIC_ARRAY *arr, char *in, size_t dblen) +{ + char buf[NAME_LEN*2+2]; + in[dblen]= 0; + my_snprintf(buf, sizeof(buf), "%`s.%`s", in, in + dblen + 1); + insert_dynamic(arr, (uchar*) buf); +} static void print_result() { @@ -957,16 +959,13 @@ static void print_result() MYSQL_ROW row; char prev[(NAME_LEN+9)*3+2]; char prev_alter[MAX_ALTER_STR_SIZE]; - char *db_name; - uint length_of_db; + size_t length_of_db= strlen(sock->db); uint i; my_bool found_error=0, table_rebuild=0; DYNAMIC_ARRAY *array4repair= &tables4repair; DBUG_ENTER("print_result"); res = mysql_use_result(sock); - db_name= sock->db; - length_of_db= strlen(db_name); prev[0] = '\0'; prev_alter[0]= 0; @@ -990,16 +989,10 @@ static void print_result() if (prev_alter[0]) insert_dynamic(&alter_table_cmds, (uchar*) prev_alter); else - { - char *table_name= prev + (length_of_db+1); - insert_dynamic(&tables4rebuild, (uchar*) table_name); - } + insert_table_name(&tables4rebuild, prev, length_of_db); } else - { - char *table_name= prev + (length_of_db+1); - insert_dynamic(array4repair, (uchar*) table_name); - } + insert_table_name(array4repair, prev, length_of_db); } array4repair= &tables4repair; found_error=0; @@ -1066,16 +1059,10 @@ static void print_result() if (prev_alter[0]) insert_dynamic(&alter_table_cmds, (uchar*) prev_alter); else - { - char *table_name= prev + (length_of_db+1); - insert_dynamic(&tables4rebuild, (uchar*) table_name); - } + insert_table_name(&tables4rebuild, prev, length_of_db); } else - { - char *table_name= prev + (length_of_db+1); - insert_dynamic(array4repair, (uchar*) table_name); - } + insert_table_name(array4repair, prev, length_of_db); } mysql_free_result(res); DBUG_VOID_RETURN; @@ -1209,7 +1196,7 @@ int main(int argc, char **argv) for (i = 0; i < tables4repair.elements ; i++) { char *name= (char*) dynamic_array_ptr(&tables4repair, i); - handle_request_for_tables(name, fixed_name_length(name), FALSE); + handle_request_for_tables(name, fixed_name_length(name), FALSE, TRUE); } for (i = 0; i < tables4rebuild.elements ; i++) rebuild_table((char*) dynamic_array_ptr(&tables4rebuild, i)); @@ -1220,7 +1207,7 @@ int main(int argc, char **argv) for (i = 0; i < views4repair.elements ; i++) { char *name= (char*) dynamic_array_ptr(&views4repair, i); - handle_request_for_tables(name, fixed_name_length(name), TRUE); + handle_request_for_tables(name, fixed_name_length(name), TRUE, TRUE); } } ret= test(first_error); diff --git a/mysql-test/r/mysqlcheck.result b/mysql-test/r/mysqlcheck.result index d2f4745c5f1..56556fffb77 100644 --- a/mysql-test/r/mysqlcheck.result +++ b/mysql-test/r/mysqlcheck.result @@ -312,10 +312,37 @@ DROP TABLE bug47205; # #MDEV-6128:[PATCH] mysqlcheck wrongly escapes '.' in table names # -CREATE TABLE test.`t.1` (id int); +create table `t.1` (id int); +create view `v.1` as select 1; mysqlcheck test t.1 test.t.1 OK -drop table test.`t.1`; +mysqlcheck --all-in-1 test t.1 +test.t.1 OK +mysqlcheck --all-in-1 --databases --process-views test +test.t.1 OK +test.v.1 OK +create table `t.2`(a varchar(20) primary key) default character set utf8 collate utf8_general_ci engine=innodb; +flush table `t.2`; +mysqlcheck --check-upgrade --auto-repair test +test.t.1 OK +test.t.2 +error : Table rebuild required. Please do "ALTER TABLE `t.2` FORCE" or dump/reload to fix it! +test.t.3 Needs upgrade + +Repairing tables +test.t.3 OK +check table `t.1`, `t.2`, `t.3`; +Table Op Msg_type Msg_text +test.t.1 check status OK +test.t.2 check status OK +test.t.3 check status OK +check table `t.1`, `t.2`, `t.3` for upgrade; +Table Op Msg_type Msg_text +test.t.1 check status OK +test.t.2 check status OK +test.t.3 check status OK +drop view `v.1`; +drop table test.`t.1`, `t.2`, `t.3`; create view v1 as select 1; mysqlcheck --process-views test test.v1 OK @@ -344,3 +371,48 @@ show tables; Tables_in_test t1`1 drop table `t1``1`; +call mtr.add_suppression("ha_myisam"); +call mtr.add_suppression("Checking table"); +create database mysqltest1; +create table mysqltest1.t1 (a int) engine=myisam; +create table t2 (a int); +check table mysqltest1.t1; +Table Op Msg_type Msg_text +mysqltest1.t1 check warning Size of datafile is: 4 Should be: 0 +mysqltest1.t1 check error got error: 0 when reading datafile at record: 0 +mysqltest1.t1 check error Corrupt +mtr.global_suppressions Table is already up to date +mtr.test_suppressions Table is already up to date +mysql.columns_priv Table is already up to date +mysql.db Table is already up to date +mysql.event Table is already up to date +mysql.func Table is already up to date +mysql.help_category Table is already up to date +mysql.help_keyword Table is already up to date +mysql.help_relation Table is already up to date +mysql.help_topic Table is already up to date +mysql.host Table is already up to date +mysql.ndb_binlog_index Table is already up to date +mysql.plugin Table is already up to date +mysql.proc Table is already up to date +mysql.procs_priv Table is already up to date +mysql.proxies_priv Table is already up to date +mysql.servers Table is already up to date +mysql.tables_priv Table is already up to date +mysql.time_zone Table is already up to date +mysql.time_zone_leap_second Table is already up to date +mysql.time_zone_name Table is already up to date +mysql.time_zone_transition Table is already up to date +mysql.time_zone_transition_type Table is already up to date +mysql.user Table is already up to date +mysqltest1.t1 +warning : Table is marked as crashed +warning : Size of datafile is: 4 Should be: 0 +error : got error: 0 when reading datafile at record: 0 +error : Corrupt +test.t2 Table is already up to date + +Repairing tables +mysqltest1.t1 OK +drop table t2; +drop database mysqltest1; diff --git a/mysql-test/t/mysqlcheck.test b/mysql-test/t/mysqlcheck.test index 7da14e3742a..781af357408 100644 --- a/mysql-test/t/mysqlcheck.test +++ b/mysql-test/t/mysqlcheck.test @@ -310,15 +310,36 @@ CHECK TABLE bug47205 FOR UPGRADE; DROP TABLE bug47205; + --echo # --echo #MDEV-6128:[PATCH] mysqlcheck wrongly escapes '.' in table names --echo # -CREATE TABLE test.`t.1` (id int); +create table `t.1` (id int); +create view `v.1` as select 1; --echo mysqlcheck test t.1 --exec $MYSQL_CHECK test t.1 +--echo mysqlcheck --all-in-1 test t.1 +--exec $MYSQL_CHECK --all-in-1 test t.1 +--echo mysqlcheck --all-in-1 --databases --process-views test +--exec $MYSQL_CHECK --all-in-1 --databases --process-views test + +create table `t.2`(a varchar(20) primary key) default character set utf8 collate utf8_general_ci engine=innodb; +flush table `t.2`; +--remove_file $MYSQLD_DATADIR/test/t@002e2.frm +--copy_file std_data/bug47205.frm $MYSQLD_DATADIR/test/t@002e2.frm + +--copy_file std_data/bug36055.frm $MYSQLD_DATADIR/test/t@002e3.frm +--copy_file std_data/bug36055.MYD $MYSQLD_DATADIR/test/t@002e3.MYD +--copy_file std_data/bug36055.MYI $MYSQLD_DATADIR/test/t@002e3.MYI -drop table test.`t.1`; +--echo mysqlcheck --check-upgrade --auto-repair test +--exec $MYSQL_CHECK --check-upgrade --auto-repair test + +check table `t.1`, `t.2`, `t.3`; +check table `t.1`, `t.2`, `t.3` for upgrade; +drop view `v.1`; +drop table test.`t.1`, `t.2`, `t.3`; # # MDEV-8123 mysqlcheck: new --process-views option conflicts with --quick, --extended and such @@ -355,3 +376,25 @@ create table `#mysql50#t1``1` (a int) engine=myisam; --exec $MYSQL_CHECK --fix-table-names --databases test show tables; drop table `t1``1`; + +# +# MDEV-9440 mysqlcheck -A --auto-repair selects wrong database when trying to repair broken table +# +call mtr.add_suppression("ha_myisam"); +call mtr.add_suppression("Checking table"); +create database mysqltest1; +create table mysqltest1.t1 (a int) engine=myisam; +create table t2 (a int); + +let $datadir= `select @@datadir`; +remove_file $datadir/mysqltest1/t1.MYD; +write_file $datadir/mysqltest1/t1.MYD; +foo +EOF + +check table mysqltest1.t1; + +--exec $MYSQL_CHECK -A --auto-repair --fast + +drop table t2; +drop database mysqltest1; -- cgit v1.2.1 From a2f245e49f8747f9fd202cae451861a8371e6151 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 8 Aug 2016 23:02:52 +0300 Subject: MDEV-10372: EXPLAIN fixes for recursive CTEs, including FORMAT=JSON - Tabular EXPLAIN now prints "RECURSIVE UNION". - There is a basic implementation of EXPLAIN FORMAT=JSON. - it produces "recursive_union" JSON struct - No other details or ANALYZE support, yet. --- mysql-test/r/cte_recursive.result | 366 ++++++++++++++++++++++++++++++++++++-- mysql-test/t/cte_recursive.test | 101 +++++++++++ sql/sql_explain.cc | 7 +- sql/sql_explain.h | 2 + sql/sql_lex.cc | 25 ++- sql/sql_select.cc | 3 +- sql/sql_select.h | 4 + sql/sql_union.cc | 2 +- 8 files changed, 493 insertions(+), 17 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index be6617b6a1e..a21416b5f43 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -115,8 +115,8 @@ select * from t1; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY ALL NULL NULL NULL NULL 5 2 SUBQUERY t2 ALL NULL NULL NULL NULL 5 Using where -3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 5 -3 UNCACHEABLE UNION t2 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join) +3 RECURSIVE UNION ALL NULL NULL NULL NULL 5 +3 RECURSIVE UNION t2 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join) NULL UNION RESULT ALL NULL NULL NULL NULL NULL # just WITH : types of t1 columns are determined by all parts of union create view v1 as @@ -599,10 +599,10 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY ref key0 key0 5 c.h_id 2 100.00 1 PRIMARY ref key0 key0 5 c.w_id 2 100.00 3 SUBQUERY folks ALL NULL NULL NULL NULL 12 100.00 Using where -4 UNCACHEABLE UNION ALL NULL NULL NULL NULL 2 100.00 -4 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) -5 UNCACHEABLE UNION ALL NULL NULL NULL NULL 2 100.00 -5 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) +4 RECURSIVE UNION ALL NULL NULL NULL NULL 2 100.00 +4 RECURSIVE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) +5 RECURSIVE UNION ALL NULL NULL NULL NULL 2 100.00 +5 RECURSIVE UNION p ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL 2 UNCACHEABLE SUBQUERY ALL NULL NULL NULL NULL 12 100.00 Using where Warnings: @@ -781,8 +781,8 @@ select * from ancestors; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY ALL NULL NULL NULL NULL 12 100.00 2 SUBQUERY folks ALL NULL NULL NULL NULL 12 100.00 Using where -3 UNCACHEABLE UNION p ALL NULL NULL NULL NULL 12 100.00 -3 UNCACHEABLE UNION ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) +3 RECURSIVE UNION p ALL NULL NULL NULL NULL 12 100.00 +3 RECURSIVE UNION ALL NULL NULL NULL NULL 12 100.00 Using where; Using join buffer (flat, BNL join) NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1003 with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where ((`test`.`folks`.`name` = 'Me') and (`test`.`folks`.`dob` = DATE'2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestors` `a` where ((`a`.`father` = `p`.`id`) or (`a`.`mother` = `p`.`id`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` @@ -1137,10 +1137,10 @@ select * from ancestors; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY ALL NULL NULL NULL NULL 12 2 SUBQUERY folks ALL NULL NULL NULL NULL 12 Using where -3 UNCACHEABLE UNION p ALL PRIMARY NULL NULL NULL 12 -3 UNCACHEABLE UNION ref key0 key0 5 test.p.id 2 -4 UNCACHEABLE UNION p ALL PRIMARY NULL NULL NULL 12 -4 UNCACHEABLE UNION ref key0 key0 5 test.p.id 2 +3 RECURSIVE UNION p ALL PRIMARY NULL NULL NULL 12 +3 RECURSIVE UNION ref key0 key0 5 test.p.id 2 +4 RECURSIVE UNION p ALL PRIMARY NULL NULL NULL 12 +4 RECURSIVE UNION ref key0 key0 5 test.p.id 2 NULL UNION RESULT ALL NULL NULL NULL NULL NULL with recursive ancestors @@ -1168,4 +1168,346 @@ id name dob father mother 9 Grandma Ann 1941-10-15 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 6 Grandgrandma Martha 1923-05-17 NULL NULL +# +# EXPLAIN FORMAT=JSON on a query where one recursive CTE uses another: +# +explain +with recursive +prev_gen +as +( +select folks.* +from folks, prev_gen +where folks.id=prev_gen.father or folks.id=prev_gen.mother +union +select * +from folks +where name='Me' +), +ancestors +as +( +select * +from folks +where name='Me' + union +select * +from ancestors +union +select * +from prev_gen +) +select ancestors.name, ancestors.dob from ancestors; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 24 +4 SUBQUERY folks ALL NULL NULL NULL NULL 12 Using where +6 RECURSIVE UNION ALL NULL NULL NULL NULL 12 +5 RECURSIVE UNION ALL NULL NULL NULL NULL 24 +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +3 SUBQUERY folks ALL NULL NULL NULL NULL 12 Using where +2 RECURSIVE UNION folks ALL PRIMARY NULL NULL NULL 12 +2 RECURSIVE UNION ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join) +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain FORMAT=JSON +with recursive +prev_gen +as +( +select folks.* +from folks, prev_gen +where folks.id=prev_gen.father or folks.id=prev_gen.mother +union +select * +from folks +where name='Me' +), +ancestors +as +( +select * +from folks +where name='Me2' + union +select * +from ancestors where id < 234 +union +select * +from prev_gen where id < 345 +) +select ancestors.name, ancestors.dob from ancestors; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 24, + "filtered": 100, + "materialized": { + "query_block": { + "recursive_union": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 4, + "table": { + "table_name": "folks", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "(folks.`name` = 'Me2')" + } + } + }, + { + "query_block": { + "select_id": 6, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "(prev_gen.`id` < 345)", + "materialized": { + "query_block": { + "recursive_union": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 3, + "table": { + "table_name": "folks", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "(folks.`name` = 'Me')" + } + } + }, + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "folks", + "access_type": "ALL", + "possible_keys": ["PRIMARY"], + "rows": 12, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 12, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((prev_gen.father = folks.`id`) or (prev_gen.mother = folks.`id`))" + } + } + } + ] + } + } + } + } + } + }, + { + "query_block": { + "select_id": 5, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 24, + "filtered": 100, + "attached_condition": "(ancestors.`id` < 234)" + } + } + } + ] + } + } + } + } + } +} +# +explain format=json +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, +w_id, w_name, w_dob, w_father, w_mother) +as +( +select h.*, w.* +from folks h, folks w, coupled_ancestors a +where a.father = h.id AND a.mother = w.id +union +select h.*, w.* +from folks v, folks h, folks w +where v.name = 'Me' and +(v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select h_id, h_name, h_dob, h_father, h_mother +from ancestor_couples +union all +select w_id, w_name, w_dob, w_father, w_mother +from ancestor_couples +) +select h_name, h_dob, w_name, w_dob +from ancestor_couples; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "materialized": { + "query_block": { + "recursive_union": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 3, + "table": { + "table_name": "v", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "((v.`name` = 'Me') and (v.father is not null) and (v.mother is not null))" + }, + "table": { + "table_name": "h", + "access_type": "eq_ref", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["test.v.father"], + "rows": 1, + "filtered": 100 + }, + "table": { + "table_name": "w", + "access_type": "eq_ref", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["test.v.mother"], + "rows": 1, + "filtered": 100 + } + } + }, + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 2, + "filtered": 100, + "attached_condition": "((a.father is not null) and (a.mother is not null))" + }, + "table": { + "table_name": "h", + "access_type": "eq_ref", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["a.father"], + "rows": 1, + "filtered": 100 + }, + "table": { + "table_name": "w", + "access_type": "eq_ref", + "possible_keys": ["PRIMARY"], + "key": "PRIMARY", + "key_length": "4", + "used_key_parts": ["id"], + "ref": ["a.mother"], + "rows": 1, + "filtered": 100 + } + } + } + ] + } + } + } + } + } +} drop table folks; +# +# MDEV-10372: [bb-10.2-mdev9864 tree] EXPLAIN with recursive CTE enters endless recursion +# +create table t1(a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +explain format=json +with recursive T as (select a from t1 union select a+10 from T where a < 1000) +select * from T; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 10, + "filtered": 100, + "materialized": { + "query_block": { + "recursive_union": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 10, + "filtered": 100 + } + } + }, + { + "query_block": { + "select_id": 3, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 10, + "filtered": 100, + "attached_condition": "(T.a < 1000)" + } + } + } + ] + } + } + } + } + } +} +drop table t1; diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 8262b53c374..60f058b15f7 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -977,5 +977,106 @@ as select * from ancestors; +--echo # +--echo # EXPLAIN FORMAT=JSON on a query where one recursive CTE uses another: +--echo # +explain +with recursive +prev_gen +as +( + select folks.* + from folks, prev_gen + where folks.id=prev_gen.father or folks.id=prev_gen.mother + union + select * + from folks + where name='Me' +), +ancestors +as +( + select * + from folks + where name='Me' + union + select * + from ancestors + union + select * + from prev_gen +) +select ancestors.name, ancestors.dob from ancestors; + +explain FORMAT=JSON +with recursive +prev_gen +as +( + select folks.* + from folks, prev_gen + where folks.id=prev_gen.father or folks.id=prev_gen.mother + union + select * + from folks + where name='Me' +), +ancestors +as +( + select * + from folks + where name='Me2' + union + select * + from ancestors where id < 234 + union + select * + from prev_gen where id < 345 +) +select ancestors.name, ancestors.dob from ancestors; + +--echo # +explain format=json +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, + w_id, w_name, w_dob, w_father, w_mother) +as +( + select h.*, w.* + from folks h, folks w, coupled_ancestors a + where a.father = h.id AND a.mother = w.id + union + select h.*, w.* + from folks v, folks h, folks w + where v.name = 'Me' and + (v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select h_id, h_name, h_dob, h_father, h_mother + from ancestor_couples + union all + select w_id, w_name, w_dob, w_father, w_mother + from ancestor_couples +) +select h_name, h_dob, w_name, w_dob + from ancestor_couples; + + drop table folks; +--echo # +--echo # MDEV-10372: [bb-10.2-mdev9864 tree] EXPLAIN with recursive CTE enters endless recursion +--echo # +create table t1(a int); +insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +explain format=json +with recursive T as (select a from t1 union select a+10 from T where a < 1000) +select * from T; + +drop table t1; + + diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 1f8b4f2dcb1..131c5a3bcfa 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -547,7 +547,12 @@ void Explain_union::print_explain_json(Explain_query *query, bool started_object= print_explain_json_cache(writer, is_analyze); writer->add_member("query_block").start_object(); - writer->add_member("union_result").start_object(); + + if (is_recursive_cte) + writer->add_member("recursive_union").start_object(); + else + writer->add_member("union_result").start_object(); + // using_temporary_table make_union_table_name(table_name_buffer); writer->add_member("table_name").add_str(table_name_buffer); diff --git a/sql/sql_explain.h b/sql/sql_explain.h index abdb1bb978b..5793599f4e1 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -327,6 +327,7 @@ class Explain_union : public Explain_node public: Explain_union(MEM_ROOT *root, bool is_analyze) : Explain_node(root), + is_recursive_cte(false), fake_select_lex_explain(root, is_analyze) {} @@ -362,6 +363,7 @@ public: const char *fake_select_type; bool using_filesort; bool using_tmp; + bool is_recursive_cte; /* Explain data structure for "fake_select_lex" (i.e. for the degenerate diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index cffa779a27f..d0fa273b0b0 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -4368,7 +4368,26 @@ void st_select_lex::set_explain_type(bool on_the_fly) type= is_uncacheable ? "UNCACHEABLE UNION": "UNION"; if (this == master_unit()->fake_select_lex) type= "UNION RESULT"; - + /* + join below may be =NULL when this functions is called at an early + stage. It will be later called again and we will set the correct + value. + */ + if (join) + { + bool uses_cte= false; + for (JOIN_TAB *tab= first_explain_order_tab(join); tab; + tab= next_explain_order_tab(join, tab)) + { + if (tab->table->pos_in_table_list->with) + { + uses_cte= true; + break; + } + } + if (uses_cte) + type= "RECURSIVE UNION"; + } } } } @@ -4683,7 +4702,9 @@ int st_select_lex_unit::save_union_explain(Explain_query *output) new (output->mem_root) Explain_union(output->mem_root, thd->lex->analyze_stmt); - + if (with_element && with_element->is_recursive) + eu->is_recursive_cte= true; + if (derived) eu->connection_type= Explain_node::EXPLAIN_NODE_DERIVED; /* diff --git a/sql/sql_select.cc b/sql/sql_select.cc index e4257d6e94d..55d2838e869 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -24186,7 +24186,8 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, In case this is a derived table, here we remember the number of subselect that used to produce it. */ - eta->derived_select_number= table->derived_select_number; + if (!(table_list && table_list->is_with_table_recursive_reference())) + eta->derived_select_number= table->derived_select_number; /* The same for non-merged semi-joins */ eta->non_merged_sjm_number = get_non_merged_semijoin_select(); diff --git a/sql/sql_select.h b/sql/sql_select.h index 535cf5a29e0..12a40996258 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -2278,4 +2278,8 @@ public: bool test_if_order_compatible(SQL_I_List &a, SQL_I_List &b); int test_if_group_changed(List &list); int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort); + +JOIN_TAB *first_explain_order_tab(JOIN* join); +JOIN_TAB *next_explain_order_tab(JOIN* join, JOIN_TAB* tab); + #endif /* SQL_SELECT_INCLUDED */ diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 23c3801f4ab..4a73a503ebe 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1166,7 +1166,7 @@ err: } - +// One step of recursive execution bool st_select_lex_unit::exec_recursive(bool is_driving_recursive) { st_select_lex *lex_select_save= thd->lex->current_select; -- cgit v1.2.1 From 0098d789c9d8be15d62230289f603ac8f3d5b275 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 9 Aug 2016 13:25:40 +0200 Subject: MDEV-10465 general_log_file can be abused Windows! --- mysql-test/suite/sys_vars/r/general_log_file_basic.result | 2 ++ mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result | 2 ++ mysql-test/suite/sys_vars/t/general_log_file_basic.test | 2 ++ mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test | 2 ++ sql/sys_vars.cc | 4 ++++ 5 files changed, 12 insertions(+) diff --git a/mysql-test/suite/sys_vars/r/general_log_file_basic.result b/mysql-test/suite/sys_vars/r/general_log_file_basic.result index 4c26cab8956..c7c24f155ca 100644 --- a/mysql-test/suite/sys_vars/r/general_log_file_basic.result +++ b/mysql-test/suite/sys_vars/r/general_log_file_basic.result @@ -20,6 +20,8 @@ SET @@global.general_log_file = '.my.cnf'; ERROR 42000: Variable 'general_log_file' can't be set to the value of '.my.cnf' SET @@global.general_log_file = 'my.cnf\0foo'; ERROR 42000: Variable 'general_log_file' can't be set to the value of 'my.cnf' +SET @@global.general_log_file = 'my.ini'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of 'my.ini' '#----------------------FN_DYNVARS_004_03------------------------#' SELECT @@global.general_log_file = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result b/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result index db7eb238c43..a64666f6298 100644 --- a/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result +++ b/mysql-test/suite/sys_vars/r/slow_query_log_file_basic.result @@ -17,6 +17,8 @@ SET @@global.general_log_file = '.my.cnf'; ERROR 42000: Variable 'general_log_file' can't be set to the value of '.my.cnf' SET @@global.general_log_file = 'my.cnf\0foo'; ERROR 42000: Variable 'general_log_file' can't be set to the value of 'my.cnf' +SET @@global.general_log_file = 'my.ini'; +ERROR 42000: Variable 'general_log_file' can't be set to the value of 'my.ini' '#----------------------FN_DYNVARS_004_03------------------------#' SELECT @@global.slow_query_log_file = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/t/general_log_file_basic.test b/mysql-test/suite/sys_vars/t/general_log_file_basic.test index fdc99fb6dea..0a169b472e2 100644 --- a/mysql-test/suite/sys_vars/t/general_log_file_basic.test +++ b/mysql-test/suite/sys_vars/t/general_log_file_basic.test @@ -69,6 +69,8 @@ SET @@global.general_log_file = '/tmp/my.cnf'; SET @@global.general_log_file = '.my.cnf'; --error ER_WRONG_VALUE_FOR_VAR SET @@global.general_log_file = 'my.cnf\0foo'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = 'my.ini'; --echo '#----------------------FN_DYNVARS_004_03------------------------#' diff --git a/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test b/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test index 79132a1bdc5..69ca5f21f62 100644 --- a/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test +++ b/mysql-test/suite/sys_vars/t/slow_query_log_file_basic.test @@ -67,6 +67,8 @@ SET @@global.slow_query_log_file = '/tmp/my.cnf'; SET @@global.general_log_file = '.my.cnf'; --error ER_WRONG_VALUE_FOR_VAR SET @@global.general_log_file = 'my.cnf\0foo'; +--error ER_WRONG_VALUE_FOR_VAR +SET @@global.general_log_file = 'my.ini'; --echo '#----------------------FN_DYNVARS_004_03------------------------#' ############################################################################## diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 7d43984c9c0..7b898906184 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3040,10 +3040,14 @@ static bool check_log_path(sys_var *self, THD *thd, set_var *var) return true; static const LEX_CSTRING my_cnf= { STRING_WITH_LEN("my.cnf") }; + static const LEX_CSTRING my_ini= { STRING_WITH_LEN("my.ini") }; if (path_length >= my_cnf.length) { if (strcasecmp(path + path_length - my_cnf.length, my_cnf.str) == 0) return true; // log file name ends with "my.cnf" + DBUG_ASSERT(my_cnf.length == my_ini.length); + if (strcasecmp(path + path_length - my_ini.length, my_ini.str) == 0) + return true; // log file name ends with "my.ini" } MY_STAT f_stat; -- cgit v1.2.1 From 5ad02062d928cccbd29c0a2db6f0f7ceb33195d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 9 Aug 2016 16:15:10 +0300 Subject: MDEV-10341: InnoDB: Failing assertion: mutex_own(mutex) - mutex_exit_func Fix memory barrier issues on releasing mutexes. We must have a full memory barrier between releasing a mutex lock and reading its waiters. This prevents us from missing to release waiters due to reading the number of waiters speculatively before releasing the lock. If threads try and wait between us reading the waiters count and releasing the lock, those threads might stall indefinitely. Also, we must use proper ACQUIRE/RELEASE semantics for atomic operations, not ACQUIRE/ACQUIRE. --- storage/innobase/include/os0sync.h | 11 ++--------- storage/innobase/include/sync0sync.ic | 5 +++++ storage/xtradb/include/os0sync.h | 11 ++--------- storage/xtradb/include/sync0sync.ic | 5 +++++ 4 files changed, 14 insertions(+), 18 deletions(-) diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h index 213974b01de..af58a232746 100644 --- a/storage/innobase/include/os0sync.h +++ b/storage/innobase/include/os0sync.h @@ -348,20 +348,13 @@ os_atomic_test_and_set(volatile lock_word_t* ptr) } /** Do an atomic release. - -In theory __sync_lock_release should be used to release the lock. -Unfortunately, it does not work properly alone. The workaround is -that more conservative __sync_lock_test_and_set is used instead. - -Performance regression was observed at some conditions for Intel -architecture. Disable release barrier on Intel architecture for now. @param[in,out] ptr Memory location to write to @return the previous value */ static inline -lock_word_t +void os_atomic_clear(volatile lock_word_t* ptr) { - return(__sync_lock_test_and_set(ptr, 0)); + __sync_lock_release(ptr); } # elif defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET) diff --git a/storage/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic index 1120da8a3be..d0f266309fc 100644 --- a/storage/innobase/include/sync0sync.ic +++ b/storage/innobase/include/sync0sync.ic @@ -178,6 +178,11 @@ mutex_exit_func( to wake up possible hanging threads if they are missed in mutex_signal_object. */ + /* We add a memory barrier to prevent reading of the + number of waiters before releasing the lock. */ + + os_mb; + if (mutex_get_waiters(mutex) != 0) { mutex_signal_object(mutex); diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index b52c078fa54..08da9dff4e3 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -381,20 +381,13 @@ os_atomic_test_and_set(volatile lock_word_t* ptr) } /** Do an atomic release. - -In theory __sync_lock_release should be used to release the lock. -Unfortunately, it does not work properly alone. The workaround is -that more conservative __sync_lock_test_and_set is used instead. - -Performance regression was observed at some conditions for Intel -architecture. Disable release barrier on Intel architecture for now. @param[in,out] ptr Memory location to write to @return the previous value */ static inline -lock_word_t +void os_atomic_clear(volatile lock_word_t* ptr) { - return(__sync_lock_test_and_set(ptr, 0)); + __sync_lock_release(ptr); } # elif defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET) diff --git a/storage/xtradb/include/sync0sync.ic b/storage/xtradb/include/sync0sync.ic index 48039c854d9..c733becf6df 100644 --- a/storage/xtradb/include/sync0sync.ic +++ b/storage/xtradb/include/sync0sync.ic @@ -178,6 +178,11 @@ mutex_exit_func( to wake up possible hanging threads if they are missed in mutex_signal_object. */ + /* We add a memory barrier to prevent reading of the + number of waiters before releasing the lock. */ + + os_mb; + if (mutex_get_waiters(mutex) != 0) { mutex_signal_object(mutex); -- cgit v1.2.1 From e20e28bd2eca6bb4961a298c2765885155bfdd57 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 10 Aug 2016 01:13:09 +0300 Subject: Fix for the previous cset: make first_explain_order_tab handle degenerate joins --- sql/sql_select.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 55d2838e869..5885529d37d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8365,6 +8365,8 @@ JOIN_TAB *first_explain_order_tab(JOIN* join) { JOIN_TAB* tab; tab= join->join_tab; + if (!tab) + return NULL; /* Can happen when when the tables were optimized away */ return (tab->bush_children) ? tab->bush_children->start : tab; } -- cgit v1.2.1 From 9a809fe31be15131baf909e898c1ad2c02976728 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 14 Jul 2016 14:29:59 +0200 Subject: MW-292 Reset timestamp after transaction replay Transaction replay causes the THD to re-apply the replication events from execution, using the same path appliers do. While applying the log events, the THD's timestamp is set to the timestamp of the event. Setting the timestamp explicitly causes function NOW() to always the timestamp that was set. To avoid this behavior we reset the timestamp after replaying is done. --- sql/sql_class.h | 1 + sql/wsrep_thd.cc | 2 ++ 2 files changed, 3 insertions(+) diff --git a/sql/sql_class.h b/sql/sql_class.h index ad3e94d43ca..e42eeacfff0 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -55,6 +55,7 @@ struct wsrep_thd_shadow { ulong tx_isolation; char *db; size_t db_length; + my_hrtime_t user_time; }; #endif class Reprepare_observer; diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 50e8a09706b..4d665775f2d 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -127,6 +127,7 @@ static void wsrep_prepare_bf_thd(THD *thd, struct wsrep_thd_shadow* shadow) shadow->db = thd->db; shadow->db_length = thd->db_length; + shadow->user_time = thd->user_time; thd->reset_db(NULL, 0); } @@ -137,6 +138,7 @@ static void wsrep_return_from_bf_mode(THD *thd, struct wsrep_thd_shadow* shadow) thd->wsrep_exec_mode = shadow->wsrep_exec_mode; thd->net.vio = shadow->vio; thd->variables.tx_isolation = shadow->tx_isolation; + thd->user_time = shadow->user_time; thd->reset_db(shadow->db, shadow->db_length); thd->wsrep_rli->cleanup_after_session(); -- cgit v1.2.1 From dfadb3680d0ffc211ce4f36fed28e59e3fec0842 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Fri, 15 Jul 2016 01:13:32 -0700 Subject: Galera MTR Tests: Test case for MW-292 : NOW() returns stale timestamp after transaction replay --- mysql-test/suite/galera/r/MW-292.result | 30 +++++++++++++ mysql-test/suite/galera/t/MW-292.test | 79 +++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 mysql-test/suite/galera/r/MW-292.result create mode 100644 mysql-test/suite/galera/t/MW-292.test diff --git a/mysql-test/suite/galera/r/MW-292.result b/mysql-test/suite/galera/r/MW-292.result new file mode 100644 index 00000000000..f038f880efa --- /dev/null +++ b/mysql-test/suite/galera/r/MW-292.result @@ -0,0 +1,30 @@ +CREATE TABLE rand_table (f1 FLOAT); +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); +INSERT INTO t1 VALUES (1, 'a'); +INSERT INTO t1 VALUES (2, 'a'); +SET AUTOCOMMIT=ON; +START TRANSACTION; +UPDATE t1 SET f2 = 'b' WHERE f1 = 1; +SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE; +f1 f2 +2 a +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +COMMIT;; +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; +UPDATE t1 SET f2 = 'c' WHERE f1 = 2; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; +SELECT TIMEDIFF(SYSDATE(), NOW()) < 2; +TIMEDIFF(SYSDATE(), NOW()) < 2 +1 +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); +SELECT COUNT(DISTINCT f1) = 10 FROM rand_table; +COUNT(DISTINCT f1) = 10 +1 +wsrep_local_replays +1 +DROP TABLE t1; +DROP TABLE rand_table; diff --git a/mysql-test/suite/galera/t/MW-292.test b/mysql-test/suite/galera/t/MW-292.test new file mode 100644 index 00000000000..945d9f42458 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-292.test @@ -0,0 +1,79 @@ +# +# MW-292 Reset timestamp after transaction replay +# +# We force transaction replay to happen and then we check that NOW() is not stuck in time. +# As a bonus we also check that RAND() continues to return random values after replay +# +# + +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source suite/galera/include/galera_have_debug_sync.inc + +--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` + +CREATE TABLE rand_table (f1 FLOAT); +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); +INSERT INTO t1 VALUES (1, 'a'); +INSERT INTO t1 VALUES (2, 'a'); + +--connection node_1 +SET AUTOCOMMIT=ON; +START TRANSACTION; + +UPDATE t1 SET f2 = 'b' WHERE f1 = 1; +SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE; + +# Block the commit +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_set_sync_point.inc + +--connection node_1 +--send COMMIT; + +# Wait until commit is blocked +--connection node_1a +SET SESSION wsrep_sync_wait = 0; +--source include/galera_wait_sync_point.inc + +# Issue a conflicting update on node #2 +--connection node_2 +UPDATE t1 SET f2 = 'c' WHERE f1 = 2; + +# Wait for both transactions to be blocked +--connection node_1a +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'System lock'; +--source include/wait_condition.inc + +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT'; +--source include/wait_condition.inc + +# Unblock the commit +--connection node_1a +--source include/galera_clear_sync_point.inc +--source include/galera_signal_sync_point.inc + +# Commit succeeds via replay +--connection node_1 +--reap + +# Confirm that NOW() is not stuck in time relative to SYSDATE(); +--sleep 3 +SELECT TIMEDIFF(SYSDATE(), NOW()) < 2; + +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); +INSERT INTO rand_table VALUES (RAND()),(RAND()),(RAND()),(RAND()),(RAND()); + +SELECT COUNT(DISTINCT f1) = 10 FROM rand_table; + +# wsrep_local_replays has increased by 1 +--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` +--disable_query_log +--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays; +--enable_query_log + +--connection node_2 +DROP TABLE t1; +DROP TABLE rand_table; -- cgit v1.2.1 From a2934d2710d24c20ec205007bdad2495a492ad83 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 10 Aug 2016 18:27:31 +0200 Subject: - JdbcInterface: change return type of ...Field function modified: storage/connect/JdbcInterface.java - Change Version number and date modified: storage/connect/ha_connect.cc - Implement the test on connect_type_conv YES/NO modified: storage/connect/jdbconn.cpp modified: storage/connect/odbconn.cpp - Fix MDEV-10520. Local schema was confused with remote schema modified: storage/connect/tabjdbc.cpp modified: storage/connect/tabodbc.cpp - Fix crash when using mapped indices. Was trying to write in a mapped file declared as read only. modified: storage/connect/xindex.cpp --- storage/connect/JdbcInterface.java | 21 ++++++++++++--------- storage/connect/ha_connect.cc | 6 +++--- storage/connect/jdbconn.cpp | 6 +++++- storage/connect/odbconn.cpp | 11 ++++++++--- storage/connect/tabjdbc.cpp | 14 ++++++++------ storage/connect/tabodbc.cpp | 18 ++++++++++++------ storage/connect/xindex.cpp | 11 ++++++----- 7 files changed, 54 insertions(+), 33 deletions(-) diff --git a/storage/connect/JdbcInterface.java b/storage/connect/JdbcInterface.java index 793d29936c8..f765052915d 100644 --- a/storage/connect/JdbcInterface.java +++ b/storage/connect/JdbcInterface.java @@ -641,40 +641,43 @@ public class JdbcInterface { return false; } // end of BooleanField - public Date DateField(int n, String name) { + public int DateField(int n, String name) { if (rs == null) { System.out.println("No result set"); } else try { - return (n > 0) ? rs.getDate(n) : rs.getDate(name); + Date d = (n > 0) ? rs.getDate(n) : rs.getDate(name); + return (d != null) ? (int)(d.getTime() / 1000) : 0; } catch (SQLException se) { SetErrmsg(se); } //end try/catch - return null; + return 0; } // end of DateField - public Time TimeField(int n, String name) { + public int TimeField(int n, String name) { if (rs == null) { System.out.println("No result set"); } else try { - return (n > 0) ? rs.getTime(n) : rs.getTime(name); + Time t = (n > 0) ? rs.getTime(n) : rs.getTime(name); + return (t != null) ? (int)(t.getTime() / 1000) : 0; } catch (SQLException se) { SetErrmsg(se); } //end try/catch - return null; + return 0; } // end of TimeField - public Timestamp TimestampField(int n, String name) { + public int TimestampField(int n, String name) { if (rs == null) { System.out.println("No result set"); } else try { - return (n > 0) ? rs.getTimestamp(n) : rs.getTimestamp(name); + Timestamp ts = (n > 0) ? rs.getTimestamp(n) : rs.getTimestamp(name); + return (ts != null) ? (int)(ts.getTime() / 1000) : 0; } catch (SQLException se) { SetErrmsg(se); } //end try/catch - return null; + return 0; } // end of TimestampField public String ObjectField(int n, String name) { diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 85380860652..ea6fb1b08c1 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -171,9 +171,9 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.04.0006 May 08, 2016"; + char version[]= "Version 1.04.0008 August 10, 2016"; #if defined(__WIN__) - char compver[]= "Version 1.04.0006 " __DATE__ " " __TIME__; + char compver[]= "Version 1.04.0008 " __DATE__ " " __TIME__; char slash= '\\'; #else // !__WIN__ char slash= '/'; @@ -6935,7 +6935,7 @@ maria_declare_plugin(connect) 0x0104, /* version number (1.04) */ NULL, /* status variables */ connect_system_variables, /* system variables */ - "1.04.0006", /* string version */ + "1.04.0008", /* string version */ MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index d3e573e692c..3b8de3e975b 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -59,6 +59,7 @@ extern "C" HINSTANCE s_hModule; // Saved module handle #define nullptr 0 #endif // !__WIN__ +TYPCONV GetTypeConv(); int GetConvSize(); extern char *JvmPath; // The connect_jvm_path global variable value extern char *ClassPath; // The connect_class_path global variable value @@ -121,7 +122,10 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) switch (stp) { case -1: // LONGVARCHAR - len = MY_MIN(abs(len), GetConvSize()); + if (GetTypeConv() != TPC_YES) + return TYPE_ERROR; + else + len = MY_MIN(abs(len), GetConvSize()); case 12: // VARCHAR v = 'V'; case 1: // CHAR diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index 863d3320f7f..7320f4cc1d9 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -53,6 +53,7 @@ extern "C" HINSTANCE s_hModule; // Saved module handle #endif // __WIN__ +TYPCONV GetTypeConv(); int GetConvSize(); /***********************************************************************/ @@ -135,9 +136,13 @@ int TranslateSQLType(int stp, int prec, int& len, char& v, bool& w) case SQL_WLONGVARCHAR: // (-10) w = true; case SQL_LONGVARCHAR: // (-1) - v = 'V'; - type = TYPE_STRING; - len = MY_MIN(abs(len), GetConvSize()); + if (GetTypeConv() == TPC_YES) { + v = 'V'; + type = TYPE_STRING; + len = MY_MIN(abs(len), GetConvSize()); + } else + type = TYPE_ERROR; + break; case SQL_NUMERIC: // 2 case SQL_DECIMAL: // 3 diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index 06a2c025827..86fd831b262 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -522,9 +522,10 @@ bool TDBJDBC::MakeSQL(PGLOBAL g, bool cnt) if (Catalog && *Catalog) catp = Catalog; - if (tablep->GetSchema()) - schmp = (char*)tablep->GetSchema(); - else if (Schema && *Schema) + //if (tablep->GetSchema()) + // schmp = (char*)tablep->GetSchema(); + //else + if (Schema && *Schema) schmp = Schema; if (catp) { @@ -606,9 +607,10 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) if (catp) len += strlen(catp) + 1; - if (tablep->GetSchema()) - schmp = (char*)tablep->GetSchema(); - else if (Schema && *Schema) + //if (tablep->GetSchema()) + // schmp = (char*)tablep->GetSchema(); + //else + if (Schema && *Schema) schmp = Schema; if (schmp) diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index e76d9c46bd3..f3ffc99ac15 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -458,9 +458,14 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt) if (Catalog && *Catalog) catp = Catalog; - if (tablep->GetSchema()) - schmp = (char*)tablep->GetSchema(); - else if (Schema && *Schema) + // Following lines are commented because of MSDEV-10520 + // Indeed the schema in the tablep is the local table database and + // is normally not related to the remote table database. + // TODO: Try to remember why this was done and if it was useful in some case. + //if (tablep->GetSchema()) + // schmp = (char*)tablep->GetSchema(); + //else + if (Schema && *Schema) schmp = Schema; if (catp) { @@ -541,9 +546,10 @@ bool TDBODBC::MakeInsert(PGLOBAL g) if (catp) len += strlen(catp) + 1; - if (tablep->GetSchema()) - schmp = (char*)tablep->GetSchema(); - else if (Schema && *Schema) + //if (tablep->GetSchema()) + // schmp = (char*)tablep->GetSchema(); + //else + if (Schema && *Schema) schmp = Schema; if (schmp) diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 69aa7e2c20e..56312630278 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -1198,7 +1198,7 @@ bool XINDEX::MapInit(PGLOBAL g) const char *ftype; BYTE *mbase; char fn[_MAX_PATH]; - int *nv, k, n, id = -1; + int *nv, nv0, k, n, id = -1; bool estim; PCOL colp; PXCOL prev = NULL, kcp = NULL; @@ -1288,25 +1288,26 @@ bool XINDEX::MapInit(PGLOBAL g) if (nv[0] >= MAX_INDX) { // New index format Srtd = nv[7] != 0; - nv[0] -= MAX_INDX; + nv0 = nv[0] - MAX_INDX; mbase += NZ * sizeof(int); } else { Srtd = false; mbase += (NZ - 1) * sizeof(int); + nv0 = nv[0]; } // endif nv if (trace) htrc("nv=%d %d %d %d %d %d %d %d\n", - nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6], Srtd); + nv0, nv[1], nv[2], nv[3], nv[4], nv[5], nv[6], Srtd); // The test on ID was suppressed because MariaDB can change an index ID // when other indexes are added or deleted - if (/*nv[0] != ID ||*/ nv[1] != Nk) { + if (/*nv0 != ID ||*/ nv[1] != Nk) { // Not this index sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); if (trace) - htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); + htrc("nv0=%d ID=%d nv[1]=%d Nk=%d\n", nv0, ID, nv[1], Nk); goto err; } // endif nv -- cgit v1.2.1 From b4f97a1499c8c5de454f0593d9d694057d0a46c9 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 10 Aug 2016 19:23:00 +0200 Subject: 5.6.32 --- storage/innobase/fts/fts0fts.cc | 67 ++++++++++++++++++++++++++++------- storage/innobase/fts/fts0opt.cc | 2 +- storage/innobase/handler/ha_innodb.cc | 7 ++-- storage/innobase/handler/i_s.cc | 39 ++++++++++++++++++-- storage/innobase/include/fts0fts.h | 4 ++- storage/innobase/row/row0merge.cc | 2 +- storage/innobase/srv/srv0mon.cc | 7 ++-- 7 files changed, 107 insertions(+), 21 deletions(-) diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 22278338072..25059db96b0 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -265,13 +265,15 @@ FTS auxiliary INDEX table and clear the cache at the end. @param[in,out] sync sync state @param[in] unlock_cache whether unlock cache lock when write node @param[in] wait whether wait when a sync is in progress +@param[in] has_dict whether has dict operation lock @return DB_SUCCESS if all OK */ static dberr_t fts_sync( fts_sync_t* sync, bool unlock_cache, - bool wait); + bool wait, + bool has_dict); /****************************************************************//** Release all resources help by the words rb tree e.g., the node ilist. */ @@ -3566,7 +3568,7 @@ fts_add_doc_by_id( DBUG_EXECUTE_IF( "fts_instrument_sync_debug", - fts_sync(cache->sync, true, true); + fts_sync(cache->sync, true, true, false); ); DEBUG_SYNC_C("fts_instrument_sync_request"); @@ -4378,13 +4380,11 @@ fts_sync_index( } /** Check if index cache has been synced completely -@param[in,out] sync sync state @param[in,out] index_cache index cache @return true if index is synced, otherwise false. */ static bool fts_sync_index_check( - fts_sync_t* sync, fts_index_cache_t* index_cache) { const ib_rbt_node_t* rbt_node; @@ -4407,14 +4407,36 @@ fts_sync_index_check( return(true); } -/*********************************************************************//** -Commit the SYNC, change state of processed doc ids etc. +/** Reset synced flag in index cache when rollback +@param[in,out] index_cache index cache */ +static +void +fts_sync_index_reset( + fts_index_cache_t* index_cache) +{ + const ib_rbt_node_t* rbt_node; + + for (rbt_node = rbt_first(index_cache->words); + rbt_node != NULL; + rbt_node = rbt_next(index_cache->words, rbt_node)) { + + fts_tokenizer_word_t* word; + word = rbt_value(fts_tokenizer_word_t, rbt_node); + + fts_node_t* fts_node; + fts_node = static_cast(ib_vector_last(word->nodes)); + + fts_node->synced = false; + } +} + +/** Commit the SYNC, change state of processed doc ids etc. +@param[in,out] sync sync state @return DB_SUCCESS if all OK */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_sync_commit( -/*============*/ - fts_sync_t* sync) /*!< in: sync state */ + fts_sync_t* sync) { dberr_t error; trx_t* trx = sync->trx; @@ -4467,6 +4489,8 @@ fts_sync_commit( (double) n_nodes/ (double) elapsed_time); } + /* Avoid assertion in trx_free(). */ + trx->dict_operation_lock_mode = 0; trx_free_for_background(trx); return(error); @@ -4489,6 +4513,10 @@ fts_sync_rollback( index_cache = static_cast( ib_vector_get(cache->indexes, i)); + /* Reset synced flag so nodes will not be skipped + in the next sync, see fts_sync_write_words(). */ + fts_sync_index_reset(index_cache); + for (j = 0; fts_index_selector[j].value; ++j) { if (index_cache->ins_graph[j] != NULL) { @@ -4514,6 +4542,9 @@ fts_sync_rollback( rw_lock_x_unlock(&cache->lock); fts_sql_rollback(trx); + + /* Avoid assertion in trx_free(). */ + trx->dict_operation_lock_mode = 0; trx_free_for_background(trx); } @@ -4522,13 +4553,15 @@ FTS auxiliary INDEX table and clear the cache at the end. @param[in,out] sync sync state @param[in] unlock_cache whether unlock cache lock when write node @param[in] wait whether wait when a sync is in progress +@param[in] has_dict whether has dict operation lock @return DB_SUCCESS if all OK */ static dberr_t fts_sync( fts_sync_t* sync, bool unlock_cache, - bool wait) + bool wait, + bool has_dict) { ulint i; dberr_t error = DB_SUCCESS; @@ -4557,6 +4590,12 @@ fts_sync( DEBUG_SYNC_C("fts_sync_begin"); fts_sync_begin(sync); + /* When sync in background, we hold dict operation lock + to prevent DDL like DROP INDEX, etc. */ + if (has_dict) { + sync->trx->dict_operation_lock_mode = RW_S_LATCH; + } + begin_sync: if (cache->total_size > fts_max_cache_size) { /* Avoid the case: sync never finish when @@ -4597,7 +4636,7 @@ begin_sync: ib_vector_get(cache->indexes, i)); if (index_cache->index->to_be_dropped - || fts_sync_index_check(sync, index_cache)) { + || fts_sync_index_check(index_cache)) { continue; } @@ -4612,6 +4651,7 @@ end_sync: } rw_lock_x_lock(&cache->lock); + sync->interrupted = false; sync->in_progress = false; os_event_set(sync->event); rw_lock_x_unlock(&cache->lock); @@ -4635,20 +4675,23 @@ FTS auxiliary INDEX table and clear the cache at the end. @param[in,out] table fts table @param[in] unlock_cache whether unlock cache when write node @param[in] wait whether wait for existing sync to finish +@param[in] has_dict whether has dict operation lock @return DB_SUCCESS on success, error code on failure. */ UNIV_INTERN dberr_t fts_sync_table( dict_table_t* table, bool unlock_cache, - bool wait) + bool wait, + bool has_dict) { dberr_t err = DB_SUCCESS; ut_ad(table->fts); if (!dict_table_is_discarded(table) && table->fts->cache) { - err = fts_sync(table->fts->cache->sync, unlock_cache, wait); + err = fts_sync(table->fts->cache->sync, + unlock_cache, wait, has_dict); } return(err); diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc index 1cf45961ae2..0d45a195c95 100644 --- a/storage/innobase/fts/fts0opt.cc +++ b/storage/innobase/fts/fts0opt.cc @@ -2986,7 +2986,7 @@ fts_optimize_sync_table( if (table) { if (dict_table_has_fts_index(table) && table->fts->cache) { - fts_sync_table(table, true, false); + fts_sync_table(table, true, false, true); } dict_table_close(table, FALSE, FALSE); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 2a16f8daf24..95a0e08a806 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -6529,6 +6529,7 @@ dberr_t ha_innobase::innobase_lock_autoinc(void) /*====================================*/ { + DBUG_ENTER("ha_innobase::innobase_lock_autoinc"); dberr_t error = DB_SUCCESS; ut_ad(!srv_read_only_mode); @@ -6563,6 +6564,8 @@ ha_innobase::innobase_lock_autoinc(void) /* Fall through to old style locking. */ case AUTOINC_OLD_STYLE_LOCKING: + DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", + ut_ad(0);); error = row_lock_table_autoinc_for_mysql(prebuilt); if (error == DB_SUCCESS) { @@ -6576,7 +6579,7 @@ ha_innobase::innobase_lock_autoinc(void) ut_error; } - return(error); + DBUG_RETURN(error); } /********************************************************************//** @@ -11392,7 +11395,7 @@ ha_innobase::optimize( if (innodb_optimize_fulltext_only) { if (prebuilt->table->fts && prebuilt->table->fts->cache && !dict_table_is_discarded(prebuilt->table)) { - fts_sync_table(prebuilt->table, false, true); + fts_sync_table(prebuilt->table, false, true, false); fts_optimize_table(prebuilt->table); } return(HA_ADMIN_OK); diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 192003a19d2..43905f4a32b 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3004,15 +3004,26 @@ i_s_fts_deleted_generic_fill( DBUG_RETURN(0); } - deleted = fts_doc_ids_create(); + /* Prevent DDL to drop fts aux tables. */ + rw_lock_s_lock(&dict_operation_lock); user_table = dict_table_open_on_name( fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); if (!user_table) { + rw_lock_s_unlock(&dict_operation_lock); + + DBUG_RETURN(0); + } else if (!dict_table_has_fts_index(user_table)) { + dict_table_close(user_table, FALSE, FALSE); + + rw_lock_s_unlock(&dict_operation_lock); + DBUG_RETURN(0); } + deleted = fts_doc_ids_create(); + trx = trx_allocate_for_background(); trx->op_info = "Select for FTS DELETE TABLE"; @@ -3040,6 +3051,8 @@ i_s_fts_deleted_generic_fill( dict_table_close(user_table, FALSE, FALSE); + rw_lock_s_unlock(&dict_operation_lock); + DBUG_RETURN(0); } @@ -3421,6 +3434,12 @@ i_s_fts_index_cache_fill( DBUG_RETURN(0); } + if (user_table->fts == NULL || user_table->fts->cache == NULL) { + dict_table_close(user_table, FALSE, FALSE); + + DBUG_RETURN(0); + } + cache = user_table->fts->cache; ut_a(cache); @@ -3859,10 +3878,15 @@ i_s_fts_index_table_fill( DBUG_RETURN(0); } + /* Prevent DDL to drop fts aux tables. */ + rw_lock_s_lock(&dict_operation_lock); + user_table = dict_table_open_on_name( fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); if (!user_table) { + rw_lock_s_unlock(&dict_operation_lock); + DBUG_RETURN(0); } @@ -3875,6 +3899,8 @@ i_s_fts_index_table_fill( dict_table_close(user_table, FALSE, FALSE); + rw_lock_s_unlock(&dict_operation_lock); + DBUG_RETURN(0); } @@ -4014,14 +4040,21 @@ i_s_fts_config_fill( fields = table->field; + /* Prevent DDL to drop fts aux tables. */ + rw_lock_s_lock(&dict_operation_lock); + user_table = dict_table_open_on_name( fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); if (!user_table) { + rw_lock_s_unlock(&dict_operation_lock); + DBUG_RETURN(0); } else if (!dict_table_has_fts_index(user_table)) { dict_table_close(user_table, FALSE, FALSE); + rw_lock_s_unlock(&dict_operation_lock); + DBUG_RETURN(0); } @@ -4077,6 +4110,8 @@ i_s_fts_config_fill( dict_table_close(user_table, FALSE, FALSE); + rw_lock_s_unlock(&dict_operation_lock); + DBUG_RETURN(0); } diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h index 68d4d333245..87b5787d416 100644 --- a/storage/innobase/include/fts0fts.h +++ b/storage/innobase/include/fts0fts.h @@ -840,13 +840,15 @@ FTS auxiliary INDEX table and clear the cache at the end. @param[in,out] table fts table @param[in] unlock_cache whether unlock cache when write node @param[in] wait whether wait for existing sync to finish +@param[in] has_dict whether has dict operation lock @return DB_SUCCESS on success, error code on failure. */ UNIV_INTERN dberr_t fts_sync_table( dict_table_t* table, bool unlock_cache, - bool wait); + bool wait, + bool has_dict); /****************************************************************//** Free the query graph but check whether dict_sys->mutex is already diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index dbb7a79cf58..c094be8a23b 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -1987,7 +1987,7 @@ wait_again: /* Sync fts cache for other fts indexes to keep all fts indexes consistent in sync_doc_id. */ err = fts_sync_table(const_cast(new_table), - false, true); + false, true, false); if (err == DB_SUCCESS) { fts_update_next_doc_id( diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index 80c8f7fadbc..1aab9495644 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2010, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -1347,7 +1347,10 @@ srv_mon_set_module_control( module */ set_current_module = FALSE; } else if (module_id == MONITOR_ALL_COUNTER) { - continue; + if (!(innodb_counter_info[ix].monitor_type + & MONITOR_GROUP_MODULE)) { + continue; + } } else { /* Hitting the next module, stop */ break; -- cgit v1.2.1 From 64752acf72175ce65250f5c15a93b1850e5640d1 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 10 Aug 2016 19:24:58 +0200 Subject: 5.6.31-77.0 --- storage/xtradb/btr/btr0btr.cc | 20 +-- storage/xtradb/btr/btr0cur.cc | 16 +- storage/xtradb/btr/btr0sea.cc | 4 +- storage/xtradb/buf/buf0buddy.cc | 6 +- storage/xtradb/buf/buf0buf.cc | 4 +- storage/xtradb/buf/buf0dump.cc | 8 +- storage/xtradb/buf/buf0flu.cc | 20 +-- storage/xtradb/buf/buf0lru.cc | 18 +-- storage/xtradb/data/data0data.cc | 7 +- storage/xtradb/dict/dict0crea.cc | 12 +- storage/xtradb/dict/dict0dict.cc | 14 +- storage/xtradb/dict/dict0load.cc | 26 ++-- storage/xtradb/dict/dict0mem.cc | 4 +- storage/xtradb/dict/dict0stats_bg.cc | 12 +- storage/xtradb/fil/fil0fil.cc | 21 ++- storage/xtradb/fsp/fsp0fsp.cc | 39 ++--- storage/xtradb/fts/fts0blex.cc | 26 ++-- storage/xtradb/fts/fts0fts.cc | 64 ++++---- storage/xtradb/fts/fts0opt.cc | 52 ++++--- storage/xtradb/fts/fts0que.cc | 28 ++-- storage/xtradb/fts/fts0tlex.cc | 26 ++-- storage/xtradb/fts/make_parser.sh | 28 ++-- storage/xtradb/handler/ha_innodb.cc | 95 +++++++----- storage/xtradb/handler/ha_innodb.h | 20 +-- storage/xtradb/handler/handler0alter.cc | 74 ++++----- storage/xtradb/ibuf/ibuf0ibuf.cc | 141 +++++++---------- storage/xtradb/include/api0api.h | 4 +- storage/xtradb/include/btr0btr.h | 84 +++++------ storage/xtradb/include/btr0btr.ic | 18 ++- storage/xtradb/include/btr0cur.h | 38 ++--- storage/xtradb/include/btr0pcur.h | 4 +- storage/xtradb/include/btr0sea.h | 16 +- storage/xtradb/include/btr0types.h | 18 +-- storage/xtradb/include/buf0buddy.h | 6 +- storage/xtradb/include/buf0buddy.ic | 6 +- storage/xtradb/include/buf0buf.h | 74 ++++----- storage/xtradb/include/buf0flu.h | 8 +- storage/xtradb/include/buf0lru.h | 10 +- storage/xtradb/include/data0data.h | 84 +++++------ storage/xtradb/include/data0data.ic | 8 +- storage/xtradb/include/dict0boot.h | 8 +- storage/xtradb/include/dict0crea.h | 8 +- storage/xtradb/include/dict0crea.ic | 4 +- storage/xtradb/include/dict0dict.h | 260 ++++++++++++++++---------------- storage/xtradb/include/dict0dict.ic | 7 +- storage/xtradb/include/dict0load.h | 4 +- storage/xtradb/include/dict0mem.h | 18 +-- storage/xtradb/include/dict0stats.h | 10 +- storage/xtradb/include/dict0stats_bg.h | 4 +- storage/xtradb/include/dyn0dyn.h | 22 +-- storage/xtradb/include/dyn0dyn.ic | 4 +- storage/xtradb/include/fil0fil.h | 20 +-- storage/xtradb/include/fsp0fsp.h | 18 +-- storage/xtradb/include/fts0ast.h | 10 +- storage/xtradb/include/fts0fts.h | 46 +++--- storage/xtradb/include/fts0priv.h | 88 +++++------ storage/xtradb/include/fts0priv.ic | 8 +- storage/xtradb/include/ha_prototypes.h | 22 +-- storage/xtradb/include/handler0alter.h | 10 +- storage/xtradb/include/ibuf0ibuf.h | 44 +++--- storage/xtradb/include/lock0lock.h | 36 ++--- storage/xtradb/include/lock0priv.h | 4 +- storage/xtradb/include/log0online.h | 4 +- storage/xtradb/include/log0recv.h | 4 +- storage/xtradb/include/mach0data.h | 38 ++--- storage/xtradb/include/mem0mem.h | 4 +- storage/xtradb/include/mem0mem.ic | 6 +- storage/xtradb/include/mtr0mtr.h | 10 +- storage/xtradb/include/mtr0mtr.ic | 4 +- storage/xtradb/include/os0file.h | 16 +- storage/xtradb/include/os0thread.h | 4 +- storage/xtradb/include/page0cur.h | 8 +- storage/xtradb/include/page0page.h | 56 +++---- storage/xtradb/include/page0types.h | 10 +- storage/xtradb/include/page0zip.h | 50 +++--- storage/xtradb/include/pars0pars.h | 6 +- storage/xtradb/include/read0read.h | 4 +- storage/xtradb/include/rem0cmp.h | 6 +- storage/xtradb/include/rem0rec.h | 152 +++++++++---------- storage/xtradb/include/rem0rec.ic | 5 +- storage/xtradb/include/row0ftsort.h | 6 +- storage/xtradb/include/row0import.h | 8 +- storage/xtradb/include/row0ins.h | 14 +- storage/xtradb/include/row0log.h | 32 ++-- storage/xtradb/include/row0merge.h | 38 ++--- storage/xtradb/include/row0mysql.h | 44 +++--- storage/xtradb/include/row0purge.h | 8 +- storage/xtradb/include/row0quiesce.h | 8 +- storage/xtradb/include/row0row.h | 30 ++-- storage/xtradb/include/row0sel.h | 6 +- storage/xtradb/include/row0uins.h | 4 +- storage/xtradb/include/row0umod.h | 4 +- storage/xtradb/include/row0upd.h | 18 +-- storage/xtradb/include/row0vers.h | 6 +- storage/xtradb/include/srv0srv.h | 6 +- storage/xtradb/include/srv0start.h | 6 +- storage/xtradb/include/sync0arr.h | 4 +- storage/xtradb/include/sync0rw.h | 8 +- storage/xtradb/include/sync0rw.ic | 4 +- storage/xtradb/include/sync0sync.h | 12 +- storage/xtradb/include/trx0rec.h | 14 +- storage/xtradb/include/trx0roll.h | 16 +- storage/xtradb/include/trx0sys.h | 6 +- storage/xtradb/include/trx0trx.h | 28 ++-- storage/xtradb/include/trx0trx.ic | 2 +- storage/xtradb/include/trx0undo.h | 14 +- storage/xtradb/include/univ.i | 10 +- storage/xtradb/include/ut0byte.h | 8 +- storage/xtradb/include/ut0dbg.h | 4 +- storage/xtradb/include/ut0mem.h | 4 +- storage/xtradb/include/ut0rnd.h | 12 +- storage/xtradb/include/ut0ut.h | 8 +- storage/xtradb/lock/lock0lock.cc | 23 +-- storage/xtradb/lock/lock0wait.cc | 4 +- storage/xtradb/log/log0log.cc | 4 +- storage/xtradb/log/log0online.cc | 4 +- storage/xtradb/log/log0recv.cc | 52 ++++--- storage/xtradb/mem/mem0dbg.cc | 6 +- storage/xtradb/mtr/mtr0mtr.cc | 8 +- storage/xtradb/os/os0file.cc | 18 +-- storage/xtradb/page/page0page.cc | 4 +- storage/xtradb/page/page0zip.cc | 11 +- storage/xtradb/pars/lexyy.cc | 10 +- storage/xtradb/pars/make_flex.sh | 12 +- storage/xtradb/pars/pars0pars.cc | 8 +- storage/xtradb/rem/rem0cmp.cc | 19 ++- storage/xtradb/rem/rem0rec.cc | 12 +- storage/xtradb/row/row0ftsort.cc | 18 ++- storage/xtradb/row/row0import.cc | 35 +++-- storage/xtradb/row/row0ins.cc | 30 ++-- storage/xtradb/row/row0log.cc | 34 ++--- storage/xtradb/row/row0merge.cc | 28 ++-- storage/xtradb/row/row0mysql.cc | 23 +-- storage/xtradb/row/row0purge.cc | 24 +-- storage/xtradb/row/row0quiesce.cc | 16 +- storage/xtradb/row/row0row.cc | 18 ++- storage/xtradb/row/row0sel.cc | 20 +-- storage/xtradb/row/row0uins.cc | 10 +- storage/xtradb/row/row0umod.cc | 27 ++-- storage/xtradb/row/row0undo.cc | 7 +- storage/xtradb/row/row0upd.cc | 25 +-- storage/xtradb/srv/srv0srv.cc | 24 +-- storage/xtradb/srv/srv0start.cc | 10 +- storage/xtradb/sync/sync0sync.cc | 6 +- storage/xtradb/trx/trx0purge.cc | 4 +- storage/xtradb/trx/trx0rec.cc | 10 +- storage/xtradb/trx/trx0roll.cc | 6 +- storage/xtradb/trx/trx0trx.cc | 14 +- storage/xtradb/trx/trx0undo.cc | 14 +- 149 files changed, 1631 insertions(+), 1564 deletions(-) diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc index ee36302b75b..95b8892415c 100644 --- a/storage/xtradb/btr/btr0btr.cc +++ b/storage/xtradb/btr/btr0btr.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -1122,7 +1122,7 @@ that the caller has made the reservation for free extents! @retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) @retval block (not allocated or initialized) otherwise */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) buf_block_t* btr_page_alloc_low( /*===============*/ @@ -2013,7 +2013,7 @@ IBUF_BITMAP_FREE is unaffected by reorganization. @retval true if the operation was successful @retval false if it is a compressed page, and recompression failed */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) bool btr_page_reorganize_block( /*======================*/ @@ -2075,7 +2075,8 @@ btr_parse_page_reorganize( { ulint level; - ut_ad(ptr && end_ptr); + ut_ad(ptr != NULL); + ut_ad(end_ptr != NULL); /* If dealing with a compressed page the record has the compression level used during original compression written in @@ -2542,7 +2543,7 @@ func_exit: Returns TRUE if the insert fits on the appropriate half-page with the chosen split_rec. @return true if fits */ -static __attribute__((nonnull(1,3,4,6), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1,3,4,6), warn_unused_result)) bool btr_page_insert_fits( /*=================*/ @@ -2685,7 +2686,7 @@ btr_insert_on_non_leaf_level_func( /**************************************************************//** Attaches the halves of an index page on the appropriate level in an index tree. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void btr_attach_half_pages( /*==================*/ @@ -2821,7 +2822,7 @@ btr_attach_half_pages( /*************************************************************//** Determine if a tuple is smaller than any record on the page. @return TRUE if smaller */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool btr_page_tuple_smaller( /*===================*/ @@ -3397,7 +3398,7 @@ Removes a page from the level list of pages. /*************************************************************//** Removes a page from the level list of pages. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void btr_level_list_remove_func( /*=======================*/ @@ -3413,7 +3414,8 @@ btr_level_list_remove_func( ulint prev_page_no; ulint next_page_no; - ut_ad(page && mtr); + ut_ad(page != NULL); + ut_ad(mtr != NULL); ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); ut_ad(space == page_get_space_id(page)); /* Get the previous and next page numbers of page */ diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc index 6e6e2e08390..c1efe9ca91c 100644 --- a/storage/xtradb/btr/btr0cur.cc +++ b/storage/xtradb/btr/btr0cur.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. @@ -1185,7 +1185,7 @@ This has to be done either within the same mini-transaction, or by invoking ibuf_reset_free_bits() before mtr_commit(). @return pointer to inserted record if succeed, else NULL */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) rec_t* btr_cur_insert_if_possible( /*=======================*/ @@ -1228,7 +1228,7 @@ btr_cur_insert_if_possible( /*************************************************************//** For an insert, checks the locks and does the undo logging if desired. @return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */ -UNIV_INLINE __attribute__((warn_unused_result, nonnull(2,3,5,6))) +UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,5,6))) dberr_t btr_cur_ins_lock_and_undo( /*======================*/ @@ -1782,7 +1782,7 @@ btr_cur_pessimistic_insert( /*************************************************************//** For an update, checks the locks and does the undo logging. @return DB_SUCCESS, DB_WAIT_LOCK, or error number */ -UNIV_INLINE __attribute__((warn_unused_result, nonnull(2,3,6,7))) +UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,6,7))) dberr_t btr_cur_upd_lock_and_undo( /*======================*/ @@ -1801,7 +1801,7 @@ btr_cur_upd_lock_and_undo( const rec_t* rec; dberr_t err; - ut_ad(thr || (flags & BTR_NO_LOCKING_FLAG)); + ut_ad((thr != NULL) || (flags & BTR_NO_LOCKING_FLAG)); if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) { /* skip LOCK, UNDO */ @@ -3141,7 +3141,7 @@ btr_cur_del_mark_set_clust_rec( ut_ad(page_is_leaf(page_align(rec))); #ifdef UNIV_DEBUG - if (btr_cur_print_record_ops && thr) { + if (btr_cur_print_record_ops && (thr != NULL)) { btr_cur_trx_report(thr_get_trx(thr)->id, index, "del mark "); rec_print_new(stderr, rec, offsets); } @@ -3299,7 +3299,7 @@ btr_cur_del_mark_set_sec_rec( rec = btr_cur_get_rec(cursor); #ifdef UNIV_DEBUG - if (btr_cur_print_record_ops && thr) { + if (btr_cur_print_record_ops && (thr != NULL)) { btr_cur_trx_report(thr_get_trx(thr)->id, cursor->index, "del mark "); rec_print(stderr, rec, cursor->index); @@ -5209,7 +5209,7 @@ btr_free_externally_stored_field( ulint i, /*!< in: field number of field_ref; ignored if rec == NULL */ enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ - mtr_t* local_mtr __attribute__((unused))) /*!< in: mtr + mtr_t* local_mtr MY_ATTRIBUTE((unused))) /*!< in: mtr containing the latch to data an an X-latch to the index tree */ { diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc index ac5e9aec67b..78cb9dfd6e0 100644 --- a/storage/xtradb/btr/btr0sea.cc +++ b/storage/xtradb/btr/btr0sea.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -512,7 +512,7 @@ btr_search_update_block_hash_info( /*==============================*/ btr_search_t* info, /*!< in: search info */ buf_block_t* block, /*!< in: buffer block */ - btr_cur_t* cursor __attribute__((unused))) + btr_cur_t* cursor MY_ATTRIBUTE((unused))) /*!< in: cursor */ { #ifdef UNIV_SYNC_DEBUG diff --git a/storage/xtradb/buf/buf0buddy.cc b/storage/xtradb/buf/buf0buddy.cc index 8f6be0cf2af..8cb880c1169 100644 --- a/storage/xtradb/buf/buf0buddy.cc +++ b/storage/xtradb/buf/buf0buddy.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -112,7 +112,7 @@ buf_buddy_mem_invalid( /**********************************************************************//** Check if a buddy is stamped free. @return whether the buddy is free */ -UNIV_INLINE __attribute__((warn_unused_result)) +UNIV_INLINE MY_ATTRIBUTE((warn_unused_result)) bool buf_buddy_stamp_is_free( /*====================*/ @@ -225,7 +225,7 @@ Checks if a buf is free i.e.: in the zip_free[]. @retval BUF_BUDDY_STATE_FREE if fully free @retval BUF_BUDDY_STATE_USED if currently in use @retval BUF_BUDDY_STATE_PARTIALLY_USED if partially in use. */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) buf_buddy_state_t buf_buddy_is_free( /*==============*/ diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 882f0af5aad..321a1d9f673 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -3621,7 +3621,7 @@ buf_page_init_low( /********************************************************************//** Inits a page to the buffer buf_pool. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void buf_page_init( /*==========*/ diff --git a/storage/xtradb/buf/buf0dump.cc b/storage/xtradb/buf/buf0dump.cc index bd2343c223d..88ff4399115 100644 --- a/storage/xtradb/buf/buf0dump.cc +++ b/storage/xtradb/buf/buf0dump.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -105,7 +105,7 @@ SELECT variable_value FROM information_schema.global_status WHERE variable_name = 'INNODB_BUFFER_POOL_DUMP_STATUS'; or by: SHOW STATUS LIKE 'innodb_buffer_pool_dump_status'; */ -static __attribute__((nonnull, format(printf, 2, 3))) +static MY_ATTRIBUTE((nonnull, format(printf, 2, 3))) void buf_dump_status( /*============*/ @@ -141,7 +141,7 @@ SELECT variable_value FROM information_schema.global_status WHERE variable_name = 'INNODB_BUFFER_POOL_LOAD_STATUS'; or by: SHOW STATUS LIKE 'innodb_buffer_pool_load_status'; */ -static __attribute__((nonnull, format(printf, 2, 3))) +static MY_ATTRIBUTE((nonnull, format(printf, 2, 3))) void buf_load_status( /*============*/ @@ -594,7 +594,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(buf_dump_thread)( /*============================*/ - void* arg __attribute__((unused))) /*!< in: a dummy parameter + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { ut_ad(!srv_read_only_mode); diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index e151ca47c2b..03504b15599 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1488,7 +1488,7 @@ It attempts to make 'max' blocks available in the free list. Note that it is a best effort attempt and it is not guaranteed that after a call to this function there will be 'max' blocks in the free list. @return number of blocks for which the write request was queued. */ -__attribute__((nonnull)) +MY_ATTRIBUTE((nonnull)) static void buf_flush_LRU_list_batch( @@ -1634,7 +1634,7 @@ Whether LRU or unzip_LRU is used depends on the state of the system. @return number of blocks for which either the write request was queued or in case of unzip_LRU the number of blocks actually moved to the free list */ -__attribute__((nonnull)) +MY_ATTRIBUTE((nonnull)) static void buf_do_LRU_batch( @@ -1754,7 +1754,7 @@ pages: to avoid deadlocks, this function must be written so that it cannot end up waiting for these latches! NOTE 2: in the case of a flush list flush, the calling thread is not allowed to own any latches on pages! @return number of blocks for which the write request was queued */ -__attribute__((nonnull)) +MY_ATTRIBUTE((nonnull)) static void buf_flush_batch( @@ -1932,7 +1932,7 @@ list. NOTE: The calling thread is not allowed to own any latches on pages! @return true if a batch was queued successfully. false if another batch of same type was already running. */ -__attribute__((nonnull)) +MY_ATTRIBUTE((nonnull)) static bool buf_flush_LRU( @@ -2581,7 +2581,7 @@ page_cleaner_sleep_if_needed( /*********************************************************************//** Returns the aggregate free list length over all buffer pool instances. @return total free list length. */ -__attribute__((warn_unused_result)) +MY_ATTRIBUTE((warn_unused_result)) static ulint buf_get_total_free_list_length(void) @@ -2599,7 +2599,7 @@ buf_get_total_free_list_length(void) /*********************************************************************//** Adjust the desired page cleaner thread sleep time for LRU flushes. */ -__attribute__((nonnull)) +MY_ATTRIBUTE((nonnull)) static void page_cleaner_adapt_lru_sleep_time( @@ -2638,7 +2638,7 @@ page_cleaner_adapt_lru_sleep_time( /*********************************************************************//** Get the desired page cleaner thread sleep time for flush list flushes. @return desired sleep time */ -__attribute__((warn_unused_result)) +MY_ATTRIBUTE((warn_unused_result)) static ulint page_cleaner_adapt_flush_sleep_time(void) @@ -2665,7 +2665,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(buf_flush_page_cleaner_thread)( /*==========================================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { @@ -2821,7 +2821,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(buf_flush_lru_manager_thread)( /*==========================================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc index bb0f4d44052..a40ed6a3256 100644 --- a/storage/xtradb/buf/buf0lru.cc +++ b/storage/xtradb/buf/buf0lru.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -143,7 +143,7 @@ If a compressed page is freed other compressed pages may be relocated. caller needs to free the page to the free list @retval false if BUF_BLOCK_ZIP_PAGE was removed from page_hash. In this case the block is already returned to the buddy allocator. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool buf_LRU_block_remove_hashed( /*========================*/ @@ -368,7 +368,7 @@ want to hog the CPU and resources. Release the buffer pool and block mutex and try to force a context switch. Then reacquire the same mutexes. The current page is "fixed" before the release of the mutexes and then "unfixed" again once we have reacquired the mutexes. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void buf_flush_yield( /*============*/ @@ -407,7 +407,7 @@ If we have hogged the resources for too long then release the buffer pool and flush list mutex and do a thread yield. Set the current page to "sticky" so that it is not relocated during the yield. @return true if yielded */ -static __attribute__((nonnull(1), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1), warn_unused_result)) bool buf_flush_try_yield( /*================*/ @@ -476,7 +476,7 @@ buf_flush_try_yield( Removes a single page from a given tablespace inside a specific buffer pool instance. @return true if page was removed. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool buf_flush_or_remove_page( /*=====================*/ @@ -578,7 +578,7 @@ the list as they age towards the tail of the LRU. @retval DB_SUCCESS if all freed @retval DB_FAIL if not all freed @retval DB_INTERRUPTED if the transaction was interrupted */ -static __attribute__((nonnull(1), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1), warn_unused_result)) dberr_t buf_flush_or_remove_pages( /*======================*/ @@ -699,7 +699,7 @@ Remove or flush all the dirty pages that belong to a given tablespace inside a specific buffer pool instance. The pages will remain in the LRU list and will be evicted from the LRU list as they age and move towards the tail of the LRU list. */ -static __attribute__((nonnull(1))) +static MY_ATTRIBUTE((nonnull(1))) void buf_flush_dirty_pages( /*==================*/ @@ -739,7 +739,7 @@ buf_flush_dirty_pages( /******************************************************************//** Remove all pages that belong to a given tablespace inside a specific buffer pool instance when we are DISCARDing the tablespace. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void buf_LRU_remove_all_pages( /*=====================*/ @@ -895,7 +895,7 @@ buffer pool instance when we are deleting the data file(s) of that tablespace. The pages still remain a part of LRU and are evicted from the list as they age towards the tail of the LRU only if buf_remove is BUF_REMOVE_FLUSH_NO_WRITE. */ -static __attribute__((nonnull(1))) +static MY_ATTRIBUTE((nonnull(1))) void buf_LRU_remove_pages( /*=================*/ diff --git a/storage/xtradb/data/data0data.cc b/storage/xtradb/data/data0data.cc index 179de79b69f..593af089b00 100644 --- a/storage/xtradb/data/data0data.cc +++ b/storage/xtradb/data/data0data.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -67,7 +67,8 @@ dtuple_coll_cmp( ulint n_fields; ulint i; - ut_ad(tuple1 && tuple2); + ut_ad(tuple1 != NULL); + ut_ad(tuple2 != NULL); ut_ad(tuple1->magic_n == DATA_TUPLE_MAGIC_N); ut_ad(tuple2->magic_n == DATA_TUPLE_MAGIC_N); ut_ad(dtuple_check_typed(tuple1)); @@ -715,7 +716,7 @@ UNIV_INTERN void dtuple_convert_back_big_rec( /*========================*/ - dict_index_t* index __attribute__((unused)), /*!< in: index */ + dict_index_t* index MY_ATTRIBUTE((unused)), /*!< in: index */ dtuple_t* entry, /*!< in: entry whose data was put to vector */ big_rec_t* vector) /*!< in, own: big rec vector; it is freed in this function */ diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc index 95643eef50c..a4fcf57c028 100644 --- a/storage/xtradb/dict/dict0crea.cc +++ b/storage/xtradb/dict/dict0crea.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -246,7 +246,7 @@ dict_create_sys_columns_tuple( /***************************************************************//** Builds a table definition to insert. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_build_table_def_step( /*======================*/ @@ -573,7 +573,7 @@ dict_create_search_tuple( /***************************************************************//** Builds an index definition row to insert. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_build_index_def_step( /*======================*/ @@ -648,7 +648,7 @@ dict_build_field_def_step( /***************************************************************//** Creates an index tree for the index if it is not a member of a cluster. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_create_index_tree_step( /*========================*/ @@ -1464,7 +1464,7 @@ dict_create_or_check_foreign_constraint_tables(void) /****************************************************************//** Evaluate the given foreign key SQL statement. @return error code or DB_SUCCESS */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_foreign_eval_sql( /*==================*/ @@ -1530,7 +1530,7 @@ dict_foreign_eval_sql( Add a single foreign key field definition to the data dictionary tables in the database. @return error code or DB_SUCCESS */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_create_add_foreign_field_to_dictionary( /*========================================*/ diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc index d5fd3722ba1..f1fbf25c3a6 100644 --- a/storage/xtradb/dict/dict0dict.cc +++ b/storage/xtradb/dict/dict0dict.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -5787,7 +5787,7 @@ dict_set_corrupted_index_cache_only( dict_index_t* index, /*!< in/out: index */ dict_table_t* table) /*!< in/out: table */ { - ut_ad(index); + ut_ad(index != NULL); ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); @@ -5797,8 +5797,9 @@ dict_set_corrupted_index_cache_only( if (dict_index_is_clust(index)) { dict_table_t* corrupt_table; - corrupt_table = table ? table : index->table; - ut_ad(!index->table || !table || index->table == table); + corrupt_table = (table != NULL) ? table : index->table; + ut_ad((index->table != NULL) || (table != NULL) + || index->table == table); if (corrupt_table) { corrupt_table->corrupted = TRUE; @@ -5916,11 +5917,6 @@ dict_table_get_index_on_name( { dict_index_t* index; - /* If name is NULL, just return */ - if (!name) { - return(NULL); - } - index = dict_table_get_first_index(table); while (index != NULL) { diff --git a/storage/xtradb/dict/dict0load.cc b/storage/xtradb/dict/dict0load.cc index ef8a2896b28..988351dbca5 100644 --- a/storage/xtradb/dict/dict0load.cc +++ b/storage/xtradb/dict/dict0load.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1744,7 +1744,7 @@ err_len: goto err_len; } type = mach_read_from_4(field); - if (type & (~0 << DICT_IT_BITS)) { + if (type & (~0U << DICT_IT_BITS)) { return("unknown SYS_INDEXES.TYPE bits"); } @@ -1784,7 +1784,7 @@ Loads definitions for table indexes. Adds them to the data dictionary cache. @return DB_SUCCESS if ok, DB_CORRUPTION if corruption of dictionary table or DB_UNSUPPORTED if table has unknown index type */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t dict_load_indexes( /*==============*/ @@ -2532,6 +2532,7 @@ func_exit: /* the table->fts could be created in dict_load_column when a user defined FTS_DOC_ID is present, but no FTS */ + fts_optimize_remove_table(table); fts_free(table); } else { fts_optimize_add_table(table); @@ -2597,14 +2598,13 @@ dict_load_table_on_id( btr_pcur_open_on_user_rec(sys_table_ids, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); -check_rec: rec = btr_pcur_get_rec(&pcur); if (page_rec_is_user_rec(rec)) { /*---------------------------------------------------*/ /* Now we have the record in the secondary index containing the table ID and NAME */ - +check_rec: field = rec_get_nth_field_old( rec, DICT_FLD__SYS_TABLE_IDS__ID, &len); ut_ad(len == 8); @@ -2614,12 +2614,14 @@ check_rec: if (rec_get_deleted_flag(rec, 0)) { /* Until purge has completed, there may be delete-marked duplicate records - for the same SYS_TABLES.ID. - Due to Bug #60049, some delete-marked - records may survive the purge forever. */ - if (btr_pcur_move_to_next(&pcur, &mtr)) { - - goto check_rec; + for the same SYS_TABLES.ID, but different + SYS_TABLES.NAME. */ + while (btr_pcur_move_to_next(&pcur, &mtr)) { + rec = btr_pcur_get_rec(&pcur); + + if (page_rec_is_user_rec(rec)) { + goto check_rec; + } } } else { /* Now we get the table name from the record */ @@ -2788,7 +2790,7 @@ dict_load_foreign_cols( /***********************************************************************//** Loads a foreign key constraint to the dictionary cache. @return DB_SUCCESS or error code */ -static __attribute__((nonnull(1), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1), warn_unused_result)) dberr_t dict_load_foreign( /*==============*/ diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc index 58876d15f1d..24a9e00412a 100644 --- a/storage/xtradb/dict/dict0mem.cc +++ b/storage/xtradb/dict/dict0mem.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -267,7 +267,7 @@ dict_mem_table_add_col( /**********************************************************************//** Renames a column of a table in the data dictionary cache. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void dict_mem_table_col_rename_low( /*==========================*/ diff --git a/storage/xtradb/dict/dict0stats_bg.cc b/storage/xtradb/dict/dict0stats_bg.cc index a5e0e2cf044..6f01c379776 100644 --- a/storage/xtradb/dict/dict0stats_bg.cc +++ b/storage/xtradb/dict/dict0stats_bg.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -331,7 +331,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(dict_stats_thread)( /*==============================*/ - void* arg __attribute__((unused))) /*!< in: a dummy parameter + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { ut_a(!srv_read_only_mode); @@ -352,14 +352,6 @@ DECLARE_THREAD(dict_stats_thread)( break; } -#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG - if (srv_ibuf_disable_background_merge) { - usleep(100000); - os_event_reset(dict_stats_event); - continue; - } -#endif - dict_stats_process_entry_from_recalc_pool(); os_event_reset(dict_stats_event); diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 1dd5620825f..75bb811198a 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -822,12 +822,19 @@ fil_node_open_file( ut_error; } - if (UNIV_UNLIKELY(space->flags != flags)) { + /* Validate the flags but do not compare the data directory + flag, in case this tablespace was relocated. */ + const unsigned relevant_space_flags + = space->flags & ~FSP_FLAGS_MASK_DATA_DIR; + const unsigned relevant_flags + = flags & ~FSP_FLAGS_MASK_DATA_DIR; + if (UNIV_UNLIKELY(relevant_space_flags != relevant_flags)) { fprintf(stderr, - "InnoDB: Error: table flags are 0x%lx" + "InnoDB: Error: table flags are 0x%x" " in the data dictionary\n" - "InnoDB: but the flags in file %s are 0x%lx!\n", - space->flags, node->name, flags); + "InnoDB: but the flags in file %s are 0x%x!\n", + relevant_space_flags, node->name, + relevant_flags); ut_error; } @@ -1919,7 +1926,7 @@ fil_set_max_space_id_if_bigger( Writes the flushed lsn and the latest archived log number to the page header of the first page of a data file of the system tablespace (space 0), which is uncompressed. */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) dberr_t fil_write_lsn_and_arch_no_to_file( /*==============================*/ @@ -1927,7 +1934,7 @@ fil_write_lsn_and_arch_no_to_file( ulint sum_of_sizes, /*!< in: combined size of previous files in space, in database pages */ lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no __attribute__((unused))) + ulint arch_log_no MY_ATTRIBUTE((unused))) /*!< in: archived log number to write */ { byte* buf1; @@ -2014,7 +2021,7 @@ Checks the consistency of the first data page of a tablespace at database startup. @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) const char* fil_check_first_page( /*=================*/ diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc index 59328dadc38..c6a25f53c87 100644 --- a/storage/xtradb/fsp/fsp0fsp.cc +++ b/storage/xtradb/fsp/fsp0fsp.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -93,7 +93,7 @@ fseg_n_reserved_pages_low( /********************************************************************//** Marks a page used. The page must reside within the extents of the given segment. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void fseg_mark_page_used( /*================*/ @@ -132,7 +132,7 @@ fsp_fill_free_list( ulint space, /*!< in: space */ fsp_header_t* header, /*!< in/out: space header */ mtr_t* mtr) /*!< in/out: mini-transaction */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Allocates a single free page from a segment. This function implements the intelligent allocation strategy which tries to minimize file space @@ -161,7 +161,7 @@ fseg_alloc_free_page_low( in which the page should be initialized. If init_mtr!=mtr, but the page is already latched in mtr, do not initialize the page. */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** @@ -428,7 +428,7 @@ descriptor resides is x-locked. This function no longer extends the data file. @return pointer to the extent descriptor, NULL if the page does not exist in the space or if the offset is >= the free limit */ -UNIV_INLINE __attribute__((nonnull, warn_unused_result)) +UNIV_INLINE MY_ATTRIBUTE((nonnull, warn_unused_result)) xdes_t* xdes_get_descriptor_with_space_hdr( /*===============================*/ @@ -490,7 +490,7 @@ is necessary to make the descriptor defined, as they are uninitialized above the free limit. @return pointer to the extent descriptor, NULL if the page does not exist in the space or if the offset exceeds the free limit */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) xdes_t* xdes_get_descriptor( /*================*/ @@ -620,7 +620,7 @@ byte* fsp_parse_init_file_page( /*=====================*/ byte* ptr, /*!< in: buffer */ - byte* end_ptr __attribute__((unused)), /*!< in: buffer end */ + byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */ buf_block_t* block) /*!< in: block or NULL */ { ut_ad(ptr && end_ptr); @@ -856,7 +856,7 @@ fsp_header_get_tablespace_size(void) Tries to extend a single-table tablespace so that a page would fit in the data file. @return TRUE if success */ -static UNIV_COLD __attribute__((nonnull, warn_unused_result)) +static UNIV_COLD MY_ATTRIBUTE((nonnull, warn_unused_result)) ibool fsp_try_extend_data_file_with_pages( /*================================*/ @@ -888,7 +888,7 @@ fsp_try_extend_data_file_with_pages( /***********************************************************************//** Tries to extend the last data file of a tablespace if it is auto-extending. @return FALSE if not auto-extending */ -static UNIV_COLD __attribute__((nonnull)) +static UNIV_COLD MY_ATTRIBUTE((nonnull)) ibool fsp_try_extend_data_file( /*=====================*/ @@ -1070,7 +1070,8 @@ fsp_fill_free_list( ulint i; mtr_t ibuf_mtr; - ut_ad(header && mtr); + ut_ad(header != NULL); + ut_ad(mtr != NULL); ut_ad(page_offset(header) == FSP_HEADER_OFFSET); /* Check if we can fill free list from above the free list limit */ @@ -1242,7 +1243,7 @@ fsp_alloc_free_extent( /**********************************************************************//** Allocates a single free page from a space. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void fsp_alloc_from_free_frag( /*=====================*/ @@ -1333,7 +1334,7 @@ Allocates a single free page from a space. The page is marked as used. @retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) @retval block (not allocated or initialized) otherwise */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) buf_block_t* fsp_alloc_free_page( /*================*/ @@ -1582,9 +1583,9 @@ fsp_seg_inode_page_get_nth_inode( /*=============================*/ page_t* page, /*!< in: segment inode page */ ulint i, /*!< in: inode index on page */ - ulint zip_size __attribute__((unused)), + ulint zip_size MY_ATTRIBUTE((unused)), /*!< in: compressed page size, or 0 */ - mtr_t* mtr __attribute__((unused))) + mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in/out: mini-transaction */ { ut_ad(i < FSP_SEG_INODES_PER_PAGE(zip_size)); @@ -1889,7 +1890,7 @@ fseg_get_nth_frag_page_no( /*======================*/ fseg_inode_t* inode, /*!< in: segment inode */ ulint n, /*!< in: slot index */ - mtr_t* mtr __attribute__((unused))) + mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in/out: mini-transaction */ { ut_ad(inode && mtr); @@ -2971,7 +2972,7 @@ fsp_get_available_space_in_free_extents( /********************************************************************//** Marks a page used. The page must reside within the extents of the given segment. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void fseg_mark_page_used( /*================*/ @@ -3043,7 +3044,8 @@ fseg_free_page_low( ib_id_t seg_id; ulint i; - ut_ad(seg_inode && mtr); + ut_ad(seg_inode != NULL); + ut_ad(mtr != NULL); ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); @@ -3258,7 +3260,8 @@ fseg_free_extent( ulint descr_n_used; ulint i; - ut_ad(seg_inode && mtr); + ut_ad(seg_inode != NULL); + ut_ad(mtr != NULL); descr = xdes_get_descriptor(space, zip_size, page, mtr); diff --git a/storage/xtradb/fts/fts0blex.cc b/storage/xtradb/fts/fts0blex.cc index 7d0acb00a3b..2d71934fa0e 100644 --- a/storage/xtradb/fts/fts0blex.cc +++ b/storage/xtradb/fts/fts0blex.cc @@ -305,9 +305,9 @@ YY_BUFFER_STATE fts0b_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner YY_BUFFER_STATE fts0b_scan_string (yyconst char *yy_str ,yyscan_t yyscanner ); YY_BUFFER_STATE fts0b_scan_bytes (yyconst char *bytes,int len ,yyscan_t yyscanner ); -void *fts0balloc (yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); -void *fts0brealloc (void *,yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); -void fts0bfree (void * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); +void *fts0balloc (yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); +void *fts0brealloc (void *,yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); +void fts0bfree (void * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); #define yy_new_buffer fts0b_create_buffer @@ -347,7 +347,7 @@ typedef int yy_state_type; static yy_state_type yy_get_previous_state (yyscan_t yyscanner ); static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner); static int yy_get_next_buffer (yyscan_t yyscanner ); -static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); +static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); /* Done after the current pattern has been matched and before the * corresponding action - sets up yytext. @@ -451,7 +451,7 @@ static yyconst flex_int16_t yy_chk[32] = #line 1 "fts0blex.l" /***************************************************************************** -Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -579,11 +579,11 @@ extern int fts0bwrap (yyscan_t yyscanner ); #endif #ifndef yytext_ptr -static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))); +static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))); #endif #ifdef YY_NEED_STRLEN -static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))); +static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))); #endif #ifndef YY_NO_INPUT @@ -1609,7 +1609,7 @@ YY_BUFFER_STATE fts0b_scan_bytes (yyconst char * yybytes, int _yybytes_len , y #define YY_EXIT_FAILURE 2 #endif -static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { (void) fprintf( stderr, "%s\n", msg ); exit( YY_EXIT_FAILURE ); @@ -1910,7 +1910,7 @@ int fts0blex_destroy (yyscan_t yyscanner) */ #ifndef yytext_ptr -static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { register int i; for ( i = 0; i < n; ++i ) @@ -1919,7 +1919,7 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yys #endif #ifdef YY_NEED_STRLEN -static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { register int n; for ( n = 0; s[n]; ++n ) @@ -1929,12 +1929,12 @@ static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __at } #endif -void *fts0balloc (yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +void *fts0balloc (yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { return (void *) malloc( size ); } -void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { /* The cast to (char *) in the following accommodates both * implementations that use char* generic pointers, and those @@ -1946,7 +1946,7 @@ void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner return (void *) realloc( (char *) ptr, size ); } -void fts0bfree (void * ptr , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +void fts0bfree (void * ptr , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { free( (char *) ptr ); /* see fts0brealloc() for (char *) cast */ } diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc index 25047b38b9d..22278338072 100644 --- a/storage/xtradb/fts/fts0fts.cc +++ b/storage/xtradb/fts/fts0fts.cc @@ -280,7 +280,7 @@ void fts_words_free( /*===========*/ ib_rbt_t* words) /*!< in: rb tree of words */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef FTS_CACHE_SIZE_DEBUG /****************************************************************//** Read the max cache size parameter from the config table. */ @@ -302,7 +302,7 @@ fts_add_doc_by_id( /*==============*/ fts_trx_table_t*ftt, /*!< in: FTS trx table */ doc_id_t doc_id, /*!< in: doc id */ - ib_vector_t* fts_indexes __attribute__((unused))); + ib_vector_t* fts_indexes MY_ATTRIBUTE((unused))); /*!< in: affected fts indexes */ #ifdef FTS_DOC_STATS_DEBUG /****************************************************************//** @@ -317,7 +317,7 @@ fts_is_word_in_index( fts_table_t* fts_table, /*!< in: table instance */ const fts_string_t* word, /*!< in: the word to check */ ibool* found) /*!< out: TRUE if exists */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* FTS_DOC_STATS_DEBUG */ /******************************************************************//** @@ -332,7 +332,7 @@ fts_update_sync_doc_id( const char* table_name, /*!< in: table name, or NULL */ doc_id_t doc_id, /*!< in: last document id */ trx_t* trx) /*!< in: update trx, or NULL */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /****************************************************************//** This function loads the default InnoDB stopword list */ @@ -1075,13 +1075,12 @@ fts_words_free( } } -/*********************************************************************//** -Clear cache. */ +/** Clear cache. +@param[in,out] cache fts cache */ UNIV_INTERN void fts_cache_clear( -/*============*/ - fts_cache_t* cache) /*!< in: cache */ + fts_cache_t* cache) { ulint i; @@ -1477,7 +1476,7 @@ fts_cache_add_doc( /****************************************************************//** Drops a table. If the table can't be found we return a SUCCESS code. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_drop_table( /*===========*/ @@ -1519,7 +1518,7 @@ fts_drop_table( /****************************************************************//** Rename a single auxiliary table due to database name change. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_rename_one_aux_table( /*=====================*/ @@ -1628,7 +1627,7 @@ Drops the common ancillary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have been called before this. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_drop_common_tables( /*===================*/ @@ -1755,7 +1754,7 @@ Drops FTS ancillary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have been called before this. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_drop_all_index_tables( /*======================*/ @@ -2663,7 +2662,7 @@ fts_get_next_doc_id( This function fetch the Doc ID from CONFIG table, and compare with the Doc ID supplied. And store the larger one to the CONFIG table. @return DB_SUCCESS if OK */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t fts_cmp_set_sync_doc_id( /*====================*/ @@ -2917,7 +2916,7 @@ fts_add( /*********************************************************************//** Do commit-phase steps necessary for the deletion of a row. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_delete( /*=======*/ @@ -3008,7 +3007,7 @@ fts_delete( /*********************************************************************//** Do commit-phase steps necessary for the modification of a row. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_modify( /*=======*/ @@ -3079,7 +3078,7 @@ fts_create_doc_id( The given transaction is about to be committed; do whatever is necessary from the FTS system's POV. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_commit_table( /*=============*/ @@ -3412,7 +3411,7 @@ fts_add_doc_by_id( /*==============*/ fts_trx_table_t*ftt, /*!< in: FTS trx table */ doc_id_t doc_id, /*!< in: doc id */ - ib_vector_t* fts_indexes __attribute__((unused))) + ib_vector_t* fts_indexes MY_ATTRIBUTE((unused))) /*!< in: affected fts indexes */ { mtr_t mtr; @@ -3532,7 +3531,7 @@ fts_add_doc_by_id( get_doc, clust_index, doc_pcur, offsets, &doc); if (doc.found) { - ibool success __attribute__((unused)); + ibool success MY_ATTRIBUTE((unused)); btr_pcur_store_position(doc_pcur, &mtr); mtr_commit(&mtr); @@ -3641,7 +3640,7 @@ fts_get_max_doc_id( dict_table_t* table) /*!< in: user table */ { dict_index_t* index; - dict_field_t* dfield __attribute__((unused)) = NULL; + dict_field_t* dfield MY_ATTRIBUTE((unused)) = NULL; doc_id_t doc_id = 0; mtr_t mtr; btr_pcur_t pcur; @@ -3899,7 +3898,7 @@ fts_write_node( /*********************************************************************//** Add rows to the DELETED_CACHE table. @return DB_SUCCESS if all went well else error code*/ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_sync_add_deleted_cache( /*=======================*/ @@ -3953,7 +3952,7 @@ fts_sync_add_deleted_cache( @param[in] index_cache index cache @param[in] unlock_cache whether unlock cache when write node @return DB_SUCCESS if all went well else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_sync_write_words( trx_t* trx, @@ -4089,7 +4088,7 @@ fts_sync_write_words( /*********************************************************************//** Write a single documents statistics to disk. @return DB_SUCCESS if all went well else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_sync_write_doc_stat( /*====================*/ @@ -4343,7 +4342,7 @@ fts_sync_begin( Run SYNC on the table, i.e., write out data from the index specific cache to the FTS aux INDEX table and FTS aux doc id stats table. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_sync_index( /*===========*/ @@ -4411,7 +4410,7 @@ fts_sync_index_check( /*********************************************************************//** Commit the SYNC, change state of processed doc ids etc. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_sync_commit( /*============*/ @@ -4473,13 +4472,12 @@ fts_sync_commit( return(error); } -/*********************************************************************//** -Rollback a sync operation */ +/** Rollback a sync operation +@param[in,out] sync sync state */ static void fts_sync_rollback( -/*==============*/ - fts_sync_t* sync) /*!< in: sync state */ + fts_sync_t* sync) { trx_t* trx = sync->trx; fts_cache_t* cache = sync->table->fts->cache; @@ -6169,7 +6167,7 @@ fts_update_hex_format_flag( /*********************************************************************//** Rename an aux table to HEX format. It's called when "%016llu" is used to format an object id in table name, which only happens in Windows. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_rename_one_aux_table_to_hex_format( /*===================================*/ @@ -6260,7 +6258,7 @@ Note the ids in tables are correct but the names are old ambiguous ones. This function should make sure that either all the parent table and aux tables are set DICT_TF2_FTS_AUX_HEX_NAME with flags2 or none of them are set */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_rename_aux_tables_to_hex_format_low( /*====================================*/ @@ -6414,14 +6412,14 @@ fts_fake_hex_to_dec( { ib_id_t dec_id = 0; char tmp_id[FTS_AUX_MIN_TABLE_ID_LENGTH]; - int ret __attribute__((unused)); + int ret MY_ATTRIBUTE((unused)); ret = sprintf(tmp_id, UINT64PFx, id); ut_ad(ret == 16); #ifdef _WIN32 ret = sscanf(tmp_id, "%016llu", &dec_id); #else - ret = sscanf(tmp_id, "%016"PRIu64, &dec_id); + ret = sscanf(tmp_id, "%016" PRIu64, &dec_id); #endif /* _WIN32 */ ut_ad(ret == 1); @@ -6736,7 +6734,7 @@ fts_drop_aux_table_from_vector( Check and drop all orphaned FTS auxiliary tables, those that don't have a parent table or FTS index defined on them. @return DB_SUCCESS or error code */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void fts_check_and_drop_orphaned_tables( /*===============================*/ diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc index 711c5f53d01..1cf45961ae2 100644 --- a/storage/xtradb/fts/fts0opt.cc +++ b/storage/xtradb/fts/fts0opt.cc @@ -797,7 +797,7 @@ fts_zip_deflate_end( Read the words from the FTS INDEX. @return DB_SUCCESS if all OK, DB_TABLE_NOT_FOUND if no more indexes to search else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_index_fetch_words( /*==================*/ @@ -1131,7 +1131,7 @@ fts_optimize_lookup( /**********************************************************************//** Encode the word pos list into the node @return DB_SUCCESS or error code*/ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t fts_optimize_encode_node( /*=====================*/ @@ -1220,7 +1220,7 @@ fts_optimize_encode_node( /**********************************************************************//** Optimize the data contained in a node. @return DB_SUCCESS or error code*/ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t fts_optimize_node( /*==============*/ @@ -1318,7 +1318,7 @@ test_again: /**********************************************************************//** Determine the starting pos within the deleted doc id vector for a word. @return delete position */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) int fts_optimize_deleted_pos( /*=====================*/ @@ -1447,7 +1447,7 @@ fts_optimize_word( /**********************************************************************//** Update the FTS index table. This is a delete followed by an insert. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_write_word( /*====================*/ @@ -1550,7 +1550,7 @@ fts_word_free( /**********************************************************************//** Optimize the word ilist and rewrite data to the FTS index. @return status one of RESTART, EXIT, ERROR */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_compact( /*=================*/ @@ -1645,7 +1645,7 @@ fts_optimize_create( /**********************************************************************//** Get optimize start time of an FTS index. @return DB_SUCCESS if all OK else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_get_index_start_time( /*==============================*/ @@ -1661,7 +1661,7 @@ fts_optimize_get_index_start_time( /**********************************************************************//** Set the optimize start time of an FTS index. @return DB_SUCCESS if all OK else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_set_index_start_time( /*==============================*/ @@ -1677,7 +1677,7 @@ fts_optimize_set_index_start_time( /**********************************************************************//** Get optimize end time of an FTS index. @return DB_SUCCESS if all OK else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_get_index_end_time( /*============================*/ @@ -1692,7 +1692,7 @@ fts_optimize_get_index_end_time( /**********************************************************************//** Set the optimize end time of an FTS index. @return DB_SUCCESS if all OK else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_set_index_end_time( /*============================*/ @@ -1912,7 +1912,7 @@ fts_optimize_set_next_word( Optimize is complete. Set the completion time, and reset the optimize start string for this FTS index to "". @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_index_completed( /*=========================*/ @@ -1952,7 +1952,7 @@ fts_optimize_index_completed( Read the list of words from the FTS auxiliary index that will be optimized in this pass. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_index_read_words( /*==========================*/ @@ -2009,7 +2009,7 @@ fts_optimize_index_read_words( Run OPTIMIZE on the given FTS index. Note: this can take a very long time (hours). @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_index( /*===============*/ @@ -2080,7 +2080,7 @@ fts_optimize_index( /**********************************************************************//** Delete the document ids in the delete, and delete cache tables. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_purge_deleted_doc_ids( /*===============================*/ @@ -2149,7 +2149,7 @@ fts_optimize_purge_deleted_doc_ids( /**********************************************************************//** Delete the document ids in the pending delete, and delete tables. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_purge_deleted_doc_id_snapshot( /*=======================================*/ @@ -2199,7 +2199,7 @@ Copy the deleted doc ids that will be purged during this optimize run to the being deleted FTS auxiliary tables. The transaction is committed upon successfull copy and rolled back on DB_DUPLICATE_KEY error. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_create_deleted_doc_id_snapshot( /*========================================*/ @@ -2237,7 +2237,7 @@ fts_optimize_create_deleted_doc_id_snapshot( Read in the document ids that are to be purged during optimize. The transaction is committed upon successfully read. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_read_deleted_doc_id_snapshot( /*======================================*/ @@ -2274,7 +2274,7 @@ Optimze all the FTS indexes, skipping those that have already been optimized, since the FTS auxiliary indexes are not guaranteed to be of the same cardinality. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_indexes( /*=================*/ @@ -2344,7 +2344,7 @@ fts_optimize_indexes( /*********************************************************************//** Cleanup the snapshot tables and the master deleted table. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_purge_snapshot( /*========================*/ @@ -2373,7 +2373,7 @@ fts_optimize_purge_snapshot( /*********************************************************************//** Reset the start time to 0 so that a new optimize can be started. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_optimize_reset_start_time( /*==========================*/ @@ -2412,7 +2412,7 @@ fts_optimize_reset_start_time( /*********************************************************************//** Run OPTIMIZE on the given table by a background thread. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t fts_optimize_table_bk( /*==================*/ @@ -2757,6 +2757,7 @@ fts_optimize_new_table( empty_slot = i; } else if (slot->table->id == table->id) { /* Already exists in our optimize queue. */ + ut_ad(slot->table_id = table->id); return(FALSE); } } @@ -2974,6 +2975,13 @@ fts_optimize_sync_table( { dict_table_t* table = NULL; + /* Prevent DROP INDEX etc. from running when we are syncing + cache in background. */ + if (!rw_lock_s_lock_nowait(&dict_operation_lock, __FILE__, __LINE__)) { + /* Exit when fail to get dict operation lock. */ + return; + } + table = dict_table_open_on_id(table_id, FALSE, DICT_TABLE_OP_NORMAL); if (table) { @@ -2983,6 +2991,8 @@ fts_optimize_sync_table( dict_table_close(table, FALSE, FALSE); } + + rw_lock_s_unlock(&dict_operation_lock); } /**********************************************************************//** diff --git a/storage/xtradb/fts/fts0que.cc b/storage/xtradb/fts/fts0que.cc index fcae6561764..2c44a21a8f2 100644 --- a/storage/xtradb/fts/fts0que.cc +++ b/storage/xtradb/fts/fts0que.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -287,7 +287,7 @@ fts_expand_query( dict_index_t* index, /*!< in: FTS index to search */ fts_query_t* query) /*!< in: query result, to be freed by the client */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** This function finds documents that contain all words in a phrase or proximity search. And if proximity search, verify @@ -1128,7 +1128,7 @@ cont_search: /*****************************************************************//** Set difference. @return DB_SUCCESS if all go well */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_difference( /*=================*/ @@ -1220,7 +1220,7 @@ fts_query_difference( /*****************************************************************//** Intersect the token doc ids with the current set. @return DB_SUCCESS if all go well */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_intersect( /*================*/ @@ -1398,7 +1398,7 @@ fts_query_cache( /*****************************************************************//** Set union. @return DB_SUCCESS if all go well */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_union( /*============*/ @@ -2014,7 +2014,7 @@ fts_query_select( Read the rows from the FTS index, that match word and where the doc id is between first and last doc id. @return DB_SUCCESS if all go well else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_find_term( /*================*/ @@ -2154,7 +2154,7 @@ fts_query_sum( /******************************************************************** Calculate the total documents that contain a particular word (term). @return DB_SUCCESS if all go well else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_total_docs_containing_term( /*=================================*/ @@ -2233,7 +2233,7 @@ fts_query_total_docs_containing_term( /******************************************************************** Get the total number of words in a documents. @return DB_SUCCESS if all go well else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_terms_in_document( /*========================*/ @@ -2314,7 +2314,7 @@ fts_query_terms_in_document( /*****************************************************************//** Retrieve the document and match the phrase tokens. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_match_document( /*=====================*/ @@ -2360,7 +2360,7 @@ fts_query_match_document( This function fetches the original documents and count the words in between matching words to see that is in specified distance @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool fts_query_is_in_proximity_range( /*============================*/ @@ -2415,7 +2415,7 @@ fts_query_is_in_proximity_range( Iterate over the matched document ids and search the for the actual phrase in the text. @return DB_SUCCESS if all OK */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_search_phrase( /*====================*/ @@ -2503,7 +2503,7 @@ func_exit: /*****************************************************************//** Text/Phrase search. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_phrase_search( /*====================*/ @@ -2754,7 +2754,7 @@ func_exit: /*****************************************************************//** Find the word and evaluate. @return DB_SUCCESS if all go well */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_query_execute( /*==============*/ @@ -4123,7 +4123,7 @@ words in documents found in the first search pass will be used as search arguments to search the document again, thus "expand" the search result set. @return DB_SUCCESS if success, otherwise the error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fts_expand_query( /*=============*/ diff --git a/storage/xtradb/fts/fts0tlex.cc b/storage/xtradb/fts/fts0tlex.cc index b744fbf0763..d4d9b4c48d1 100644 --- a/storage/xtradb/fts/fts0tlex.cc +++ b/storage/xtradb/fts/fts0tlex.cc @@ -305,9 +305,9 @@ YY_BUFFER_STATE fts0t_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner YY_BUFFER_STATE fts0t_scan_string (yyconst char *yy_str ,yyscan_t yyscanner ); YY_BUFFER_STATE fts0t_scan_bytes (yyconst char *bytes,int len ,yyscan_t yyscanner ); -void *fts0talloc (yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); -void *fts0trealloc (void *,yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); -void fts0tfree (void * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); +void *fts0talloc (yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); +void *fts0trealloc (void *,yy_size_t , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); +void fts0tfree (void * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); #define yy_new_buffer fts0t_create_buffer @@ -347,7 +347,7 @@ typedef int yy_state_type; static yy_state_type yy_get_previous_state (yyscan_t yyscanner ); static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner); static int yy_get_next_buffer (yyscan_t yyscanner ); -static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) ); +static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) ); /* Done after the current pattern has been matched and before the * corresponding action - sets up yytext. @@ -447,7 +447,7 @@ static yyconst flex_int16_t yy_chk[29] = #line 1 "fts0tlex.l" /***************************************************************************** -Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -575,11 +575,11 @@ extern int fts0twrap (yyscan_t yyscanner ); #endif #ifndef yytext_ptr -static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))); +static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))); #endif #ifdef YY_NEED_STRLEN -static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))); +static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))); #endif #ifndef YY_NO_INPUT @@ -1602,7 +1602,7 @@ YY_BUFFER_STATE fts0t_scan_bytes (yyconst char * yybytes, int _yybytes_len , y #define YY_EXIT_FAILURE 2 #endif -static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { (void) fprintf( stderr, "%s\n", msg ); exit( YY_EXIT_FAILURE ); @@ -1903,7 +1903,7 @@ int fts0tlex_destroy (yyscan_t yyscanner) */ #ifndef yytext_ptr -static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { register int i; for ( i = 0; i < n; ++i ) @@ -1912,7 +1912,7 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yys #endif #ifdef YY_NEED_STRLEN -static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { register int n; for ( n = 0; s[n]; ++n ) @@ -1922,12 +1922,12 @@ static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __at } #endif -void *fts0talloc (yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +void *fts0talloc (yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { return (void *) malloc( size ); } -void *fts0trealloc (void * ptr, yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +void *fts0trealloc (void * ptr, yy_size_t size , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { /* The cast to (char *) in the following accommodates both * implementations that use char* generic pointers, and those @@ -1939,7 +1939,7 @@ void *fts0trealloc (void * ptr, yy_size_t size , yyscan_t yyscanner return (void *) realloc( (char *) ptr, size ); } -void fts0tfree (void * ptr , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused))) +void fts0tfree (void * ptr , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { free( (char *) ptr ); /* see fts0trealloc() for (char *) cast */ } diff --git a/storage/xtradb/fts/make_parser.sh b/storage/xtradb/fts/make_parser.sh index 2c072914c8b..52b63eff674 100755 --- a/storage/xtradb/fts/make_parser.sh +++ b/storage/xtradb/fts/make_parser.sh @@ -1,6 +1,6 @@ #!/bin/sh # -# Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +# Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software @@ -22,15 +22,15 @@ make -f Makefile.query echo '#include "univ.i"' > $TMPF # This is to avoid compiler warning about unused parameters. -# FIXME: gcc extension "__attribute__" causing compilation errors on windows +# FIXME: gcc extension "MY_ATTRIBUTE" causing compilation errors on windows # platform. Quote them out for now. sed -e ' -s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/; -s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/; -s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/; -s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/; -s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/; -s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/; +s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/; +s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/; +s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/; +s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/; +s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/; +s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/; ' < fts0blex.cc >> $TMPF mv $TMPF fts0blex.cc @@ -38,12 +38,12 @@ mv $TMPF fts0blex.cc echo '#include "univ.i"' > $TMPF sed -e ' -s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/; -s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/; -s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 __attribute__((unused))/; -s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/; -s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/; -s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 __attribute__((unused))/; +s/^\(static.*void.*yy_fatal_error.*msg.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/; +s/^\(static.*void.*yy_flex_strncpy.*n.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/; +s/^\(static.*int.*yy_flex_strlen.*s.*,\)\(.*yyscanner\)/\1 \2 MY_ATTRIBUTE((unused))/; +s/^\(\(static\|void\).*fts0[bt]alloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/; +s/^\(\(static\|void\).*fts0[bt]realloc.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/; +s/^\(\(static\|void\).*fts0[bt]free.*,\)\(.*yyscanner\)/\1 \3 MY_ATTRIBUTE((unused))/; ' < fts0tlex.cc >> $TMPF mv $TMPF fts0tlex.cc diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 06230a95076..c492bfcdd1f 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -1737,7 +1737,7 @@ thd_expand_fast_index_creation( /********************************************************************//** Obtain the InnoDB transaction of a MySQL thread. @return reference to transaction pointer */ -__attribute__((warn_unused_result, nonnull)) +MY_ATTRIBUTE((warn_unused_result, nonnull)) static inline trx_t*& thd_to_trx( @@ -1763,8 +1763,8 @@ static int innobase_release_temporary_latches( /*===============================*/ - handlerton* hton __attribute__((unused)), /*!< in: handlerton */ - THD* thd __attribute__((unused))) /*!< in: MySQL thread */ + handlerton* hton MY_ATTRIBUTE((unused)), /*!< in: handlerton */ + THD* thd MY_ATTRIBUTE((unused))) /*!< in: MySQL thread */ { #ifdef UNIV_DEBUG DBUG_ASSERT(hton == innodb_hton_ptr); @@ -4006,7 +4006,7 @@ int innobase_end( /*=========*/ handlerton* hton, /*!< in/out: InnoDB handlerton */ - ha_panic_function type __attribute__((unused))) + ha_panic_function type MY_ATTRIBUTE((unused))) /*!< in: ha_panic() parameter */ { int err= 0; @@ -4093,7 +4093,7 @@ static my_bool innobase_is_fake_change( /*====================*/ - handlerton *hton __attribute__((unused)), + handlerton *hton MY_ATTRIBUTE((unused)), /*!< in: InnoDB handlerton */ THD* thd) /*!< in: MySQL thread handle of the user for whom the transaction is being committed */ @@ -9456,7 +9456,7 @@ create_table_check_doc_id_col( /*****************************************************************//** Creates a table definition to an InnoDB database. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) int create_table_def( /*=============*/ @@ -10913,6 +10913,25 @@ ha_innobase::discard_or_import_tablespace( /* Commit the transaction in order to release the table lock. */ trx_commit_for_mysql(prebuilt->trx); + if (err == DB_SUCCESS && !discard + && dict_stats_is_persistent_enabled(dict_table)) { + dberr_t ret; + + /* Adjust the persistent statistics. */ + ret = dict_stats_update(dict_table, + DICT_STATS_RECALC_PERSISTENT); + + if (ret != DB_SUCCESS) { + push_warning_printf( + ha_thd(), + Sql_condition::WARN_LEVEL_WARN, + ER_ALTER_INFO, + "Error updating stats for table '%s'" + " after table rebuild: %s", + dict_table->name, ut_strerr(ret)); + } + } + DBUG_RETURN(convert_error_code_to_mysql(err, dict_table->flags, NULL)); } @@ -11188,7 +11207,7 @@ innobase_drop_database( /*********************************************************************//** Renames an InnoDB table. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t innobase_rename_table( /*==================*/ @@ -16192,7 +16211,7 @@ static char* srv_buffer_pool_evict; Evict all uncompressed pages of compressed tables from the buffer pool. Keep the compressed pages in the buffer pool. @return whether all uncompressed pages were evicted */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) bool innodb_buffer_pool_evict_uncompressed(void) /*=======================================*/ @@ -16785,13 +16804,13 @@ void purge_run_now_set( /*==============*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system variable */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal string goes */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -16808,13 +16827,13 @@ void purge_stop_now_set( /*===============*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system variable */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal string goes */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -16830,13 +16849,13 @@ void checkpoint_now_set( /*===============*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system variable */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal string goes */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -16857,13 +16876,13 @@ void buf_flush_list_now_set( /*===================*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system - variable */ - __attribute__((unused)), + variable */ + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal - string goes */ - __attribute__((unused)), + string goes */ + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -16880,13 +16899,13 @@ void track_redo_log_now_set( /*===================*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system variable */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal string goes */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -16995,13 +17014,13 @@ void buffer_pool_dump_now( /*=================*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system variable */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal string goes */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -17018,13 +17037,13 @@ void buffer_pool_load_now( /*=================*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system variable */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal string goes */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -17041,13 +17060,13 @@ void buffer_pool_load_abort( /*===================*/ THD* thd /*!< in: thread handle */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), struct st_mysql_sys_var* var /*!< in: pointer to system variable */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), void* var_ptr /*!< out: where the formal string goes */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const void* save) /*!< in: immediate result from check function */ { @@ -17065,10 +17084,10 @@ which control InnoDB "status monitor" output to the error log. static void innodb_status_output_update( - THD* thd __attribute__((unused)), - struct st_mysql_sys_var* var __attribute__((unused)), - void* var_ptr __attribute__((unused)), - const void* save __attribute__((unused))) + THD* thd MY_ATTRIBUTE((unused)), + struct st_mysql_sys_var* var MY_ATTRIBUTE((unused)), + void* var_ptr MY_ATTRIBUTE((unused)), + const void* save MY_ATTRIBUTE((unused))) { *static_cast(var_ptr) = *static_cast(save); /* The lock timeout monitor thread also takes care of this diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h index b4c0e0b7cf4..c9f9cfabc1f 100644 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -432,14 +432,14 @@ enum durability_properties thd_get_durability_property(const MYSQL_THD thd); @param off auto_increment_offset @param inc auto_increment_increment */ void thd_get_autoinc(const MYSQL_THD thd, ulong* off, ulong* inc) -__attribute__((nonnull)); +MY_ATTRIBUTE((nonnull)); /** Is strict sql_mode set. @param thd Thread object @return True if sql_mode has strict mode (all or trans), false otherwise. */ bool thd_is_strict_mode(const MYSQL_THD thd) -__attribute__((nonnull)); +MY_ATTRIBUTE((nonnull)); } /* extern "C" */ struct trx_t; @@ -477,7 +477,7 @@ innobase_index_name_is_reserved( const KEY* key_info, /*!< in: Indexes to be created */ ulint num_of_keys) /*!< in: Number of indexes to be created. */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** Determines InnoDB table flags. @@ -494,7 +494,7 @@ innobase_table_flags( outside system tablespace */ ulint* flags, /*!< out: DICT_TF flags */ ulint* flags2) /*!< out: DICT_TF2 flags */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** Validates the create options. We may build on this function @@ -511,7 +511,7 @@ create_options_are_invalid( columns and indexes */ HA_CREATE_INFO* create_info, /*!< in: create info. */ bool use_tablespace) /*!< in: srv_file_per_table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Retrieve the FTS Relevance Ranking result for doc with doc_id @@ -541,7 +541,7 @@ void innobase_fts_close_ranking( /*=======================*/ FT_INFO* fts_hdl) /*!< in: FTS handler */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*****************************************************************//** Initialize the table FTS stopword list @return TRUE if success */ @@ -552,7 +552,7 @@ innobase_fts_load_stopword( dict_table_t* table, /*!< in: Table has the FTS */ trx_t* trx, /*!< in: transaction */ THD* thd) /*!< in: current thread */ - __attribute__((nonnull(1,3), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,3), warn_unused_result)); /** Some defines for innobase_fts_check_doc_id_index() return value */ enum fts_doc_id_index_enum { @@ -574,7 +574,7 @@ innobase_fts_check_doc_id_index( that is being altered */ ulint* fts_doc_col_no) /*!< out: The column number for Doc ID */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*******************************************************************//** Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME @@ -587,7 +587,7 @@ innobase_fts_check_doc_id_index_in_def( /*===================================*/ ulint n_key, /*!< in: Number of keys */ const KEY* key_info) /*!< in: Key definitions */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************** @return version of the extended FTS API */ diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index cc016d8cb6d..1a39f70614d 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -98,7 +98,7 @@ static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ALTER_NOREBUILD | Alter_inplace_info::ALTER_COLUMN_NAME; /* Report an InnoDB error to the client by invoking my_error(). */ -static UNIV_COLD __attribute__((nonnull)) +static UNIV_COLD MY_ATTRIBUTE((nonnull)) void my_error_innodb( /*============*/ @@ -195,7 +195,7 @@ innobase_fulltext_exist( Determine if ALTER TABLE needs to rebuild the table. @param ha_alter_info the DDL operation @return whether it is necessary to rebuild the table */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_need_rebuild( /*==================*/ @@ -515,7 +515,7 @@ ha_innobase::check_if_supported_inplace_alter( /*************************************************************//** Initialize the dict_foreign_t structure with supplied info @return true if added, false if duplicate foreign->id */ -static __attribute__((nonnull(1,3,5,7))) +static MY_ATTRIBUTE((nonnull(1,3,5,7))) bool innobase_init_foreign( /*==================*/ @@ -604,7 +604,7 @@ innobase_init_foreign( /*************************************************************//** Check whether the foreign key options is legit @return true if it is */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_check_fk_option( /*=====================*/ @@ -636,7 +636,7 @@ innobase_check_fk_option( /*************************************************************//** Set foreign key options @return true if successfully set */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_set_foreign_key_option( /*============================*/ @@ -681,7 +681,7 @@ innobase_set_foreign_key_option( Check if a foreign key constraint can make use of an index that is being created. @return useable index, or NULL if none found */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const KEY* innobase_find_equiv_index( /*======================*/ @@ -737,7 +737,7 @@ no_match: Find an index whose first fields are the columns in the array in the same order and is not marked for deletion @return matching index, NULL if not found */ -static __attribute__((nonnull(1,2,6), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1,2,6), warn_unused_result)) dict_index_t* innobase_find_fk_index( /*===================*/ @@ -784,7 +784,7 @@ next_rec: Create InnoDB foreign key structure from MySQL alter_info @retval true if successful @retval false on error (will call my_error()) */ -static __attribute__((nonnull(1,2,3,7,8), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1,2,3,7,8), warn_unused_result)) bool innobase_get_foreign_key_info( /*==========================*/ @@ -1269,7 +1269,7 @@ innobase_rec_reset( /*******************************************************************//** This function checks that index keys are sensible. @return 0 or error number */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) int innobase_check_index_keys( /*======================*/ @@ -1390,7 +1390,7 @@ name_ok: /*******************************************************************//** Create index field definition for key part */ -static __attribute__((nonnull(2,3))) +static MY_ATTRIBUTE((nonnull(2,3))) void innobase_create_index_field_def( /*============================*/ @@ -1437,7 +1437,7 @@ innobase_create_index_field_def( /*******************************************************************//** Create index definition for key */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void innobase_create_index_def( /*======================*/ @@ -1721,7 +1721,7 @@ ELSE ENDIF @return key definitions */ -static __attribute__((nonnull, warn_unused_result, malloc)) +static MY_ATTRIBUTE((nonnull, warn_unused_result, malloc)) index_def_t* innobase_create_key_defs( /*=====================*/ @@ -1940,7 +1940,7 @@ created_clustered: /*******************************************************************//** Check each index column size, make sure they do not exceed the max limit @return true if index column size exceeds limit */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_check_column_length( /*=========================*/ @@ -2090,7 +2090,7 @@ online_retry_drop_indexes_low( /********************************************************************//** Drop any indexes that we were not able to free previously due to open table handles. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void online_retry_drop_indexes( /*======================*/ @@ -2120,7 +2120,7 @@ online_retry_drop_indexes( /********************************************************************//** Commit a dictionary transaction and drop any indexes that we were not able to free previously due to open table handles. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void online_retry_drop_indexes_with_trx( /*===============================*/ @@ -2149,7 +2149,7 @@ online_retry_drop_indexes_with_trx( @param drop_fk constraints being dropped @param n_drop_fk number of constraints that are being dropped @return whether the constraint is being dropped */ -inline __attribute__((pure, nonnull, warn_unused_result)) +inline MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) bool innobase_dropping_foreign( /*======================*/ @@ -2176,7 +2176,7 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static __attribute__((pure, nonnull, warn_unused_result)) +static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) bool innobase_check_foreigns_low( /*========================*/ @@ -2276,7 +2276,7 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static __attribute__((pure, nonnull, warn_unused_result)) +static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) bool innobase_check_foreigns( /*====================*/ @@ -2321,7 +2321,7 @@ innobase_check_foreigns( @param dfield InnoDB data field to copy to @param field MySQL value for the column @param comp nonzero if in compact format */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void innobase_build_col_map_add( /*=======================*/ @@ -2355,7 +2355,7 @@ adding columns. @param heap Memory heap where allocated @return array of integers, mapping column numbers in the table to column numbers in altered_table */ -static __attribute__((nonnull(1,2,3,4,5,7), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1,2,3,4,5,7), warn_unused_result)) const ulint* innobase_build_col_map( /*===================*/ @@ -2492,7 +2492,7 @@ innobase_drop_fts_index_table( @param user_table InnoDB table as it is before the ALTER operation @param heap Memory heap for the allocation @return array of new column names in rebuilt_table, or NULL if not renamed */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const char** innobase_get_col_names( Alter_inplace_info* ha_alter_info, @@ -2555,7 +2555,7 @@ while preparing ALTER TABLE. @retval true Failure @retval false Success */ -static __attribute__((warn_unused_result, nonnull(1,2,3,4))) +static MY_ATTRIBUTE((warn_unused_result, nonnull(1,2,3,4))) bool prepare_inplace_alter_table_dict( /*=============================*/ @@ -3196,7 +3196,7 @@ err_exit: /* Check whether an index is needed for the foreign key constraint. If so, if it is dropped, is there an equivalent index can play its role. @return true if the index is needed and can't be dropped */ -static __attribute__((nonnull(1,2,3,5), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1,2,3,5), warn_unused_result)) bool innobase_check_foreign_key_index( /*=============================*/ @@ -4076,7 +4076,7 @@ temparary index prefix @param locked TRUE=table locked, FALSE=may need to do a lazy drop @param trx the transaction */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void innobase_rollback_sec_index( /*========================*/ @@ -4110,7 +4110,7 @@ during prepare, but might not be during commit). @retval true Failure @retval false Success */ -inline __attribute__((nonnull, warn_unused_result)) +inline MY_ATTRIBUTE((nonnull, warn_unused_result)) bool rollback_inplace_alter_table( /*=========================*/ @@ -4242,7 +4242,7 @@ func_exit: @param foreign_id Foreign key constraint identifier @retval true Failure @retval false Success */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_drop_foreign_try( /*======================*/ @@ -4299,7 +4299,7 @@ innobase_drop_foreign_try( @param new_clustered whether the table has been rebuilt @retval true Failure @retval false Success */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_rename_column_try( /*=======================*/ @@ -4508,7 +4508,7 @@ rename_foreign: @param table_name Table name in MySQL @retval true Failure @retval false Success */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_rename_columns_try( /*========================*/ @@ -4558,7 +4558,7 @@ as part of commit_cache_norebuild(). @param ha_alter_info Data used during in-place alter. @param table the TABLE @param user_table InnoDB table that was being altered */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void innobase_rename_columns_cache( /*==========================*/ @@ -4602,7 +4602,7 @@ processed_field: @param altered_table MySQL table that is being altered @param old_table MySQL table as it is before the ALTER operation @return the next auto-increment value (0 if not present) */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ulonglong commit_get_autoinc( /*===============*/ @@ -4684,7 +4684,7 @@ but do not touch the data dictionary cache. @retval true Failure @retval false Success */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_update_foreign_try( /*========================*/ @@ -4767,7 +4767,7 @@ after the changes to data dictionary tables were committed. @param ctx In-place ALTER TABLE context @param user_thd MySQL connection @return InnoDB error code (should always be DB_SUCCESS) */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t innobase_update_foreign_cache( /*==========================*/ @@ -4852,7 +4852,7 @@ when rebuilding the table. @retval true Failure @retval false Success */ -inline __attribute__((nonnull, warn_unused_result)) +inline MY_ATTRIBUTE((nonnull, warn_unused_result)) bool commit_try_rebuild( /*===============*/ @@ -5014,7 +5014,7 @@ commit_try_rebuild( /** Apply the changes made during commit_try_rebuild(), to the data dictionary cache and the file system. @param ctx In-place ALTER TABLE context */ -inline __attribute__((nonnull)) +inline MY_ATTRIBUTE((nonnull)) void commit_cache_rebuild( /*=================*/ @@ -5111,7 +5111,7 @@ when not rebuilding the table. @retval true Failure @retval false Success */ -inline __attribute__((nonnull, warn_unused_result)) +inline MY_ATTRIBUTE((nonnull, warn_unused_result)) bool commit_try_norebuild( /*=================*/ @@ -5221,7 +5221,7 @@ after a successful commit_try_norebuild() call. @param trx Data dictionary transaction object (will be started and committed) @return whether all replacements were found for dropped indexes */ -inline __attribute__((nonnull, warn_unused_result)) +inline MY_ATTRIBUTE((nonnull, warn_unused_result)) bool commit_cache_norebuild( /*===================*/ diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc index 4bca5bbdf2a..4334fd8c6dd 100644 --- a/storage/xtradb/ibuf/ibuf0ibuf.cc +++ b/storage/xtradb/ibuf/ibuf0ibuf.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -698,7 +698,7 @@ byte* ibuf_parse_bitmap_init( /*===================*/ byte* ptr, /*!< in: buffer */ - byte* end_ptr __attribute__((unused)), /*!< in: buffer end */ + byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */ buf_block_t* block, /*!< in: block or NULL */ mtr_t* mtr) /*!< in: mtr or NULL */ { @@ -2535,7 +2535,7 @@ ibuf_get_merge_page_nos_func( /*******************************************************************//** Get the matching records for space id. @return current rec or NULL */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const rec_t* ibuf_get_user_rec( /*===============*/ @@ -2557,7 +2557,7 @@ ibuf_get_user_rec( Reads page numbers for a space id from an ibuf tree. @return a lower limit for the combined volume of records which will be merged */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ulint ibuf_get_merge_pages( /*=================*/ @@ -2665,40 +2665,22 @@ ibuf_merge_pages( } /*********************************************************************//** -Get the table instance from the table id. -@return table instance */ -static __attribute__((warn_unused_result)) -dict_table_t* -ibuf_get_table( -/*===========*/ - table_id_t table_id) /*!< in: valid table id */ -{ - rw_lock_s_lock_func(&dict_operation_lock, 0, __FILE__, __LINE__); - - dict_table_t* table = dict_table_open_on_id( - table_id, FALSE, DICT_TABLE_OP_NORMAL); - - rw_lock_s_unlock_gen(&dict_operation_lock, 0); - - return(table); -} - -/*********************************************************************//** -Contracts insert buffer trees by reading pages to the buffer pool. -@return a lower limit for the combined size in bytes of entries which -will be merged from ibuf trees to the pages read, 0 if ibuf is -empty */ -static +Contracts insert buffer trees by reading pages referring to space_id +to the buffer pool. +@returns number of pages merged.*/ +UNIV_INTERN ulint ibuf_merge_space( /*=============*/ - ulint space, /*!< in: tablespace id to merge */ - ulint* n_pages)/*!< out: number of pages to which merged */ + ulint space) /*!< in: tablespace id to merge */ { mtr_t mtr; btr_pcur_t pcur; mem_heap_t* heap = mem_heap_create(512); dtuple_t* tuple = ibuf_search_tuple_build(space, 0, heap); + ulint n_pages = 0; + + ut_ad(space < SRV_LOG_SPACE_FIRST_ID); ibuf_mtr_start(&mtr); @@ -2730,52 +2712,46 @@ ibuf_merge_space( } else { sum_sizes = ibuf_get_merge_pages( - &pcur, space, IBUF_MAX_N_PAGES_MERGED, - &pages[0], &spaces[0], &versions[0], n_pages, - &mtr); - - if (*n_pages > 0) { - ++sum_sizes; - } + &pcur, space, IBUF_MAX_N_PAGES_MERGED, + &pages[0], &spaces[0], &versions[0], &n_pages, + &mtr); + ib_logf(IB_LOG_LEVEL_INFO,"\n Size of pages merged %lu" + ,sum_sizes); } ibuf_mtr_commit(&mtr); btr_pcur_close(&pcur); - if (sum_sizes > 0) { - - ut_a(*n_pages > 0 || sum_sizes == 1); + if (n_pages > 0) { #ifdef UNIV_DEBUG - ut_ad(*n_pages <= UT_ARR_SIZE(pages)); + ut_ad(n_pages <= UT_ARR_SIZE(pages)); - for (ulint i = 0; i < *n_pages; ++i) { + for (ulint i = 0; i < n_pages; ++i) { ut_ad(spaces[i] == space); ut_ad(i == 0 || versions[i] == versions[i - 1]); } #endif /* UNIV_DEBUG */ buf_read_ibuf_merge_pages( - true, spaces, versions, pages, *n_pages); + true, spaces, versions, pages, n_pages); } - return(sum_sizes); + return(n_pages); } -/*********************************************************************//** -Contracts insert buffer trees by reading pages to the buffer pool. +/** Contract the change buffer by reading pages to the buffer pool. +@param[out] n_pages number of pages merged +@param[in] sync whether the caller waits for +the issued reads to complete @return a lower limit for the combined size in bytes of entries which will be merged from ibuf trees to the pages read, 0 if ibuf is empty */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ulint ibuf_merge( /*=======*/ - table_id_t table_id, /*!< in: if merge should be - done only for a specific - table, for all tables this - should be 0 */ ulint* n_pages, /*!< out: number of pages to which merged */ bool sync) /*!< in: TRUE if the caller @@ -2783,8 +2759,6 @@ ibuf_merge( read with the highest tablespace address to complete */ { - dict_table_t* table; - *n_pages = 0; /* We perform a dirty read of ibuf->empty, without latching @@ -2798,55 +2772,45 @@ ibuf_merge( } else if (ibuf_debug) { return(0); #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ - } else if (table_id == 0) { + } else { return(ibuf_merge_pages(n_pages, sync)); - } else if ((table = ibuf_get_table(table_id)) == 0) { - /* Table has been dropped. */ - return(0); } - - ulint volume = ibuf_merge_space(table->space, n_pages); - - dict_table_close(table, FALSE, FALSE); - - return(volume); } -/*********************************************************************//** -Contracts insert buffer trees by reading pages to the buffer pool. +/** Contract the change buffer by reading pages to the buffer pool. +@param[in] sync whether the caller waits for +the issued reads to complete @return a lower limit for the combined size in bytes of entries which -will be merged from ibuf trees to the pages read, 0 if ibuf is -empty */ +will be merged from ibuf trees to the pages read, 0 if ibuf is empty */ static ulint ibuf_contract( /*==========*/ - ibool sync) /*!< in: TRUE if the caller wants to wait for the + bool sync) /*!< in: TRUE if the caller wants to wait for the issued read with the highest tablespace address to complete */ { ulint n_pages; - return(ibuf_merge(0, &n_pages, sync)); + return(ibuf_merge_pages(&n_pages, sync)); } -/*********************************************************************//** -Contracts insert buffer trees by reading pages to the buffer pool. +/** Contract the change buffer by reading pages to the buffer pool. +@param[in] full If true, do a full contraction based +on PCT_IO(100). If false, the size of contract batch is determined +based on the current size of the change buffer. @return a lower limit for the combined size in bytes of entries which will be merged from ibuf trees to the pages read, 0 if ibuf is empty */ UNIV_INTERN ulint -ibuf_contract_in_background( -/*========================*/ - table_id_t table_id, /*!< in: if merge should be done only - for a specific table, for all tables - this should be 0 */ - ibool full) /*!< in: TRUE if the caller wants to - do a full contract based on PCT_IO(100). - If FALSE then the size of contract - batch is determined based on the - current size of the ibuf tree. */ +ibuf_merge_in_background( +/*=====================*/ + bool full) /*!< in: TRUE if the caller wants to + do a full contract based on PCT_IO(100). + If FALSE then the size of contract + batch is determined based on the + current size of the ibuf tree. */ { ulint sum_bytes = 0; ulint sum_pages = 0; @@ -2854,7 +2818,7 @@ ibuf_contract_in_background( ulint n_pages; #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG - if (srv_ibuf_disable_background_merge && table_id == 0) { + if (srv_ibuf_disable_background_merge) { return(0); } #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ @@ -2883,7 +2847,7 @@ ibuf_contract_in_background( while (sum_pages < n_pages) { ulint n_bytes; - n_bytes = ibuf_merge(table_id, &n_pag2, FALSE); + n_bytes = ibuf_merge(&n_pag2, false); if (n_bytes == 0) { return(sum_bytes); @@ -3489,7 +3453,7 @@ ibuf_get_entry_counter_func( Buffer an operation in the insert/delete buffer, instead of doing it directly to the disk page, if this is possible. @return DB_SUCCESS, DB_STRONG_FAIL or other error */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t ibuf_insert_low( /*============*/ @@ -3558,8 +3522,7 @@ ibuf_insert_low( #ifdef UNIV_IBUF_DEBUG fputs("Ibuf too big\n", stderr); #endif - /* Use synchronous contract (== TRUE) */ - ibuf_contract(TRUE); + ibuf_contract(true); return(DB_STRONG_FAIL); } @@ -3982,7 +3945,7 @@ skip_watch: During merge, inserts to an index page a secondary index entry extracted from the insert buffer. @return newly inserted record */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) rec_t* ibuf_insert_to_index_page_low( /*==========================*/ @@ -4413,7 +4376,7 @@ ibuf_delete( /*********************************************************************//** Restores insert buffer tree cursor position @return TRUE if the position was restored; FALSE if not */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) ibool ibuf_restore_pos( /*=============*/ @@ -4468,7 +4431,7 @@ Deletes from ibuf the record on which pcur is positioned. If we have to resort to a pessimistic delete, this function commits mtr and closes the cursor. @return TRUE if mtr was committed and pcur closed in this operation */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) ibool ibuf_delete_rec( /*============*/ diff --git a/storage/xtradb/include/api0api.h b/storage/xtradb/include/api0api.h index e4c9c941de5..500bf4fe3b2 100644 --- a/storage/xtradb/include/api0api.h +++ b/storage/xtradb/include/api0api.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,7 +36,7 @@ InnoDB Native API #endif #if defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER) -#define UNIV_NO_IGNORE __attribute__ ((warn_unused_result)) +#define UNIV_NO_IGNORE MY_ATTRIBUTE ((warn_unused_result)) #else #define UNIV_NO_IGNORE #endif /* __GNUC__ && __GNUC__ > 2 && !__INTEL_COMPILER */ diff --git a/storage/xtradb/include/btr0btr.h b/storage/xtradb/include/btr0btr.h index a3f7cee2733..add45db441e 100644 --- a/storage/xtradb/include/btr0btr.h +++ b/storage/xtradb/include/btr0btr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -117,7 +117,7 @@ btr_corruption_report( /*==================*/ const buf_block_t* block, /*!< in: corrupted block */ const dict_index_t* index) /*!< in: index tree */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /** Assert that a B-tree page is not corrupted. @param block buffer block containing a B-tree page @@ -159,7 +159,7 @@ btr_blob_dbg_add_blob( ulint page_no, /*!< in: start page of the column */ dict_index_t* index, /*!< in/out: index tree */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Display the references to off-page columns. This function is to be called from a debugger, @@ -169,7 +169,7 @@ void btr_blob_dbg_print( /*===============*/ const dict_index_t* index) /*!< in: index tree */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Check that there are no references to off-page columns from or to the given page. Invoked when freeing or clearing a page. @@ -180,7 +180,7 @@ btr_blob_dbg_is_empty( /*==================*/ dict_index_t* index, /*!< in: index */ ulint page_no) /*!< in: page number */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Modify the 'deleted' flag of a record. */ @@ -192,7 +192,7 @@ btr_blob_dbg_set_deleted_flag( dict_index_t* index, /*!< in/out: index */ const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ ibool del) /*!< in: TRUE=deleted, FALSE=exists */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Change the ownership of an off-page column. */ UNIV_INTERN @@ -204,7 +204,7 @@ btr_blob_dbg_owner( const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ ulint i, /*!< in: ith field in rec */ ibool own) /*!< in: TRUE=owned, FALSE=disowned */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Assert that there are no BLOB references to or from the given page. */ # define btr_blob_dbg_assert_empty(index, page_no) \ ut_a(btr_blob_dbg_is_empty(index, page_no)) @@ -224,7 +224,7 @@ btr_root_get( /*=========*/ const dict_index_t* index, /*!< in: index tree */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Checks and adjusts the root node of a tree during IMPORT TABLESPACE. @@ -234,7 +234,7 @@ dberr_t btr_root_adjust_on_import( /*======================*/ const dict_index_t* index) /*!< in: index tree */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Gets the height of the B-tree (the level of the root, when the leaf @@ -247,7 +247,7 @@ btr_height_get( /*===========*/ dict_index_t* index, /*!< in: index tree */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Gets a buffer page and declares its latching order level. */ UNIV_INLINE @@ -309,7 +309,7 @@ index_id_t btr_page_get_index_id( /*==================*/ const page_t* page) /*!< in: index page */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); #ifndef UNIV_HOTBACKUP /********************************************************//** Gets the node level field in an index page. @@ -319,7 +319,7 @@ ulint btr_page_get_level_low( /*===================*/ const page_t* page) /*!< in: index page */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); #define btr_page_get_level(page, mtr) btr_page_get_level_low(page) /********************************************************//** Gets the next index page number. @@ -330,7 +330,7 @@ btr_page_get_next( /*==============*/ const page_t* page, /*!< in: index page */ mtr_t* mtr) /*!< in: mini-transaction handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************//** Gets the previous index page number. @return prev page number */ @@ -340,7 +340,7 @@ btr_page_get_prev( /*==============*/ const page_t* page, /*!< in: index page */ mtr_t* mtr) /*!< in: mini-transaction handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Gets pointer to the previous user record in the tree. It is assumed that the caller has appropriate latches on the page and its neighbor. @@ -352,7 +352,7 @@ btr_get_prev_user_rec( rec_t* rec, /*!< in: record on leaf level */ mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if needed, also to the previous page */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Gets pointer to the next user record in the tree. It is assumed that the caller has appropriate latches on the page and its neighbor. @@ -364,7 +364,7 @@ btr_get_next_user_rec( rec_t* rec, /*!< in: record on leaf level */ mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if needed, also to the next page */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Releases the latch on a leaf page and bufferunfixes it. */ UNIV_INLINE @@ -375,7 +375,7 @@ btr_leaf_page_release( ulint latch_mode, /*!< in: BTR_SEARCH_LEAF or BTR_MODIFY_LEAF */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Gets the child node file address in a node pointer. NOTE: the offsets array must contain all offsets for the record since @@ -389,7 +389,7 @@ btr_node_ptr_get_child_page_no( /*===========================*/ const rec_t* rec, /*!< in: node pointer record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /************************************************************//** Creates the root node for a new index tree. @return page number of the created root, FIL_NULL if did not succeed */ @@ -404,7 +404,7 @@ btr_create( index_id_t index_id,/*!< in: index id */ dict_index_t* index, /*!< in: index */ mtr_t* mtr) /*!< in: mini-transaction handle */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /************************************************************//** Frees a B-tree except the root page, which MUST be freed after this by calling btr_free_root. */ @@ -427,7 +427,7 @@ btr_free_root( or 0 for uncompressed pages */ ulint root_page_no, /*!< in: root page number */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Makes tree one level higher by splitting the root, and inserts the tuple. It is assumed that mtr contains an x-latch on the tree. @@ -450,7 +450,7 @@ btr_root_raise_and_insert( const dtuple_t* tuple, /*!< in: tuple to insert */ ulint n_ext, /*!< in: number of externally stored columns */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Reorganizes an index page. @@ -476,7 +476,7 @@ btr_page_reorganize_low( page_cur_t* cursor, /*!< in/out: page cursor */ dict_index_t* index, /*!< in: the index tree of the page */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Reorganizes an index page. @@ -495,7 +495,7 @@ btr_page_reorganize( page_cur_t* cursor, /*!< in/out: page cursor */ dict_index_t* index, /*!< in: the index tree of the page */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Decides if the page should be split at the convergence point of inserts converging to left. @@ -508,7 +508,7 @@ btr_page_get_split_rec_to_left( rec_t** split_rec)/*!< out: if split recommended, the first record on upper half page, or NULL if tuple should be first */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Decides if the page should be split at the convergence point of inserts converging to right. @@ -521,7 +521,7 @@ btr_page_get_split_rec_to_right( rec_t** split_rec)/*!< out: if split recommended, the first record on upper half page, or NULL if tuple should be first */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Splits an index page to halves and inserts the tuple. It is assumed that mtr holds an x-latch to the index tree. NOTE: the tree x-latch is @@ -545,7 +545,7 @@ btr_page_split_and_insert( const dtuple_t* tuple, /*!< in: tuple to insert */ ulint n_ext, /*!< in: number of externally stored columns */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************//** Inserts a data tuple to a tree on a non-leaf level. It is assumed that mtr holds an x-latch on the tree. */ @@ -560,7 +560,7 @@ btr_insert_on_non_leaf_level_func( const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); # define btr_insert_on_non_leaf_level(f,i,l,t,m) \ btr_insert_on_non_leaf_level_func(f,i,l,t,__FILE__,__LINE__,m) #endif /* !UNIV_HOTBACKUP */ @@ -572,7 +572,7 @@ btr_set_min_rec_mark( /*=================*/ rec_t* rec, /*!< in/out: record */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP /*************************************************************//** Deletes on the upper level the node pointer to a page. */ @@ -583,7 +583,7 @@ btr_node_ptr_delete( dict_index_t* index, /*!< in: index tree */ buf_block_t* block, /*!< in: page whose node pointer is deleted */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG /************************************************************//** Checks that the node pointer to a page is appropriate. @@ -595,7 +595,7 @@ btr_check_node_ptr( dict_index_t* index, /*!< in: index tree */ buf_block_t* block, /*!< in: index page */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* UNIV_DEBUG */ /*************************************************************//** Tries to merge the page first to the left immediate brother if such a @@ -618,7 +618,7 @@ btr_compress( ibool adjust, /*!< in: TRUE if should adjust the cursor position even if compression occurs */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Discards a page from a B-tree. This is used to remove the last record from a B-tree page: the whole page must be removed at the same time. This cannot @@ -630,7 +630,7 @@ btr_discard_page( btr_cur_t* cursor, /*!< in: cursor on the page to discard: not on the root page */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /****************************************************************//** Parses the redo log record for setting an index record as the predefined @@ -645,7 +645,7 @@ btr_parse_set_min_rec_mark( ulint comp, /*!< in: nonzero=compact page format */ page_t* page, /*!< in: page or NULL */ mtr_t* mtr) /*!< in: mtr or NULL */ - __attribute__((nonnull(1,2), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2), warn_unused_result)); /***********************************************************//** Parses a redo log record of reorganizing a page. @return end of log record or NULL */ @@ -659,7 +659,7 @@ btr_parse_page_reorganize( bool compressed,/*!< in: true if compressed page */ buf_block_t* block, /*!< in: page to be reorganized, or NULL */ mtr_t* mtr) /*!< in: mtr or NULL */ - __attribute__((nonnull(1,2,3), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3), warn_unused_result)); #ifndef UNIV_HOTBACKUP /**************************************************************//** Gets the number of pages in a B-tree. @@ -672,7 +672,7 @@ btr_get_size( ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */ mtr_t* mtr) /*!< in/out: mini-transaction where index is s-latched */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Allocates a new file page to be used in an index tree. NOTE: we assume that the caller has made the reservation for free extents! @@ -695,7 +695,7 @@ btr_page_alloc( mtr_t* init_mtr) /*!< in/out: mini-transaction for x-latching and initializing the page */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Frees a file page used in an index tree. NOTE: cannot free field external storage pages because the page must contain info on its level. */ @@ -706,7 +706,7 @@ btr_page_free( dict_index_t* index, /*!< in: index tree */ buf_block_t* block, /*!< in: block to be freed, x-latched */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Frees a file page used in an index tree. Can be used also to BLOB external storage pages, because the page level 0 can be given as an @@ -719,7 +719,7 @@ btr_page_free_low( buf_block_t* block, /*!< in: block to be freed, x-latched */ ulint level, /*!< in: page level */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_BTR_PRINT /*************************************************************//** Prints size info of a B-tree. */ @@ -728,7 +728,7 @@ void btr_print_size( /*===========*/ dict_index_t* index) /*!< in: index tree */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Prints directories and other info of all nodes in the index. */ UNIV_INTERN @@ -738,7 +738,7 @@ btr_print_index( dict_index_t* index, /*!< in: index */ ulint width) /*!< in: print this many entries from start and end */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* UNIV_BTR_PRINT */ /************************************************************//** Checks the size and number of fields in a record based on the definition of @@ -753,7 +753,7 @@ btr_index_rec_validate( ibool dump_on_error) /*!< in: TRUE if the function should print hex dump of record and page on error */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Checks the consistency of an index tree. @return TRUE if ok */ @@ -763,7 +763,7 @@ btr_validate_index( /*===============*/ dict_index_t* index, /*!< in: index */ const trx_t* trx) /*!< in: transaction or 0 */ - __attribute__((nonnull(1), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); #define BTR_N_LEAF_PAGES 1 #define BTR_TOTAL_SIZE 2 diff --git a/storage/xtradb/include/btr0btr.ic b/storage/xtradb/include/btr0btr.ic index 9cc611ee450..0227cfdb5af 100644 --- a/storage/xtradb/include/btr0btr.ic +++ b/storage/xtradb/include/btr0btr.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -161,10 +161,11 @@ ulint btr_page_get_next( /*==============*/ const page_t* page, /*!< in: index page */ - mtr_t* mtr __attribute__((unused))) + mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mini-transaction handle */ { - ut_ad(page && mtr); + ut_ad(page != NULL); + ut_ad(mtr != NULL); ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_S_FIX)); @@ -183,7 +184,8 @@ btr_page_set_next( ulint next, /*!< in: next page number */ mtr_t* mtr) /*!< in: mini-transaction handle */ { - ut_ad(page && mtr); + ut_ad(page != NULL); + ut_ad(mtr != NULL); if (page_zip) { mach_write_to_4(page + FIL_PAGE_NEXT, next); @@ -201,9 +203,10 @@ ulint btr_page_get_prev( /*==============*/ const page_t* page, /*!< in: index page */ - mtr_t* mtr __attribute__((unused))) /*!< in: mini-transaction handle */ + mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mini-transaction handle */ { - ut_ad(page && mtr); + ut_ad(page != NULL); + ut_ad(mtr != NULL); return(mach_read_from_4(page + FIL_PAGE_PREV)); } @@ -220,7 +223,8 @@ btr_page_set_prev( ulint prev, /*!< in: previous page number */ mtr_t* mtr) /*!< in: mini-transaction handle */ { - ut_ad(page && mtr); + ut_ad(page != NULL); + ut_ad(mtr != NULL); if (page_zip) { mach_write_to_4(page + FIL_PAGE_PREV, prev); diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h index 4ed66e76fe0..0f0bdcfb9bf 100644 --- a/storage/xtradb/include/btr0cur.h +++ b/storage/xtradb/include/btr0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -186,7 +186,7 @@ btr_cur_open_at_index_side_func( const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #define btr_cur_open_at_index_side(f,i,l,c,lv,m) \ btr_cur_open_at_index_side_func(f,i,l,c,lv,__FILE__,__LINE__,m) /**********************************************************************//** @@ -235,7 +235,7 @@ btr_cur_optimistic_insert( compressed tablespace, the caller must mtr_commit(mtr) before latching any further pages */ - __attribute__((nonnull(2,3,4,5,6,7,10), warn_unused_result)); + MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result)); /*************************************************************//** Performs an insert on a page of an index tree. It is assumed that mtr holds an x-latch on the tree and on the cursor page. If the insert is @@ -266,7 +266,7 @@ btr_cur_pessimistic_insert( ulint n_ext, /*!< in: number of externally stored columns */ que_thr_t* thr, /*!< in: query thread or NULL */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull(2,3,4,5,6,7,10), warn_unused_result)); + MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result)); /*************************************************************//** See if there is enough place in the page modification log to log an update-in-place. @@ -295,9 +295,9 @@ btr_cur_update_alloc_zip_func( mtr_t* mtr, /*!< in/out: mini-transaction */ trx_t* trx) /*!< in: NULL or transaction */ #ifdef UNIV_DEBUG - __attribute__((nonnull (1, 2, 3, 4, 7), warn_unused_result)); + MY_ATTRIBUTE((nonnull (1, 2, 3, 4, 7), warn_unused_result)); #else - __attribute__((nonnull (1, 2, 3, 6), warn_unused_result)); + MY_ATTRIBUTE((nonnull (1, 2, 3, 6), warn_unused_result)); #endif #ifdef UNIV_DEBUG @@ -331,7 +331,7 @@ btr_cur_update_in_place( is a secondary index, the caller must mtr_commit(mtr) before latching any further pages */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /***********************************************************//** Writes a redo log record of updating a record in-place. */ UNIV_INTERN @@ -345,7 +345,7 @@ btr_cur_update_in_place_log( trx_id_t trx_id, /*!< in: transaction id */ roll_ptr_t roll_ptr, /*!< in: roll ptr */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Tries to update a record on a page in an index tree. It is assumed that mtr holds an x-latch on the page. The operation does not succeed if there is too @@ -377,7 +377,7 @@ btr_cur_optimistic_update( is a secondary index, the caller must mtr_commit(mtr) before latching any further pages */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /*************************************************************//** Performs an update of a record on a page of a tree. It is assumed that mtr holds an x-latch on the tree and on the cursor page. If the @@ -411,7 +411,7 @@ btr_cur_pessimistic_update( trx_id_t trx_id, /*!< in: transaction id */ mtr_t* mtr) /*!< in/out: mini-transaction; must be committed before latching any further pages */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /***********************************************************//** Marks a clustered index record deleted. Writes an undo log record to undo log on this delete marking. Writes in the trx id field the id @@ -428,7 +428,7 @@ btr_cur_del_mark_set_clust_rec( const ulint* offsets,/*!< in: rec_get_offsets(rec) */ que_thr_t* thr, /*!< in: query thread */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************//** Sets a secondary index record delete mark to TRUE or FALSE. @return DB_SUCCESS, DB_LOCK_WAIT, or error number */ @@ -441,7 +441,7 @@ btr_cur_del_mark_set_sec_rec( ibool val, /*!< in: value to set */ que_thr_t* thr, /*!< in: query thread */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Tries to compress a page of the tree if it seems useful. It is assumed that mtr holds an x-latch on the tree and on the cursor page. To avoid @@ -459,7 +459,7 @@ btr_cur_compress_if_useful( ibool adjust, /*!< in: TRUE if should adjust the cursor position even if compression occurs */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************//** Removes the record on which the tree cursor is positioned. It is assumed that the mtr has an x-latch on the page where the cursor is positioned, @@ -480,7 +480,7 @@ btr_cur_optimistic_delete_func( TRUE on a leaf page of a secondary index, the mtr must be committed before latching any further pages */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); # ifdef UNIV_DEBUG # define btr_cur_optimistic_delete(cursor, flags, mtr) \ btr_cur_optimistic_delete_func(cursor, flags, mtr) @@ -516,7 +516,7 @@ btr_cur_pessimistic_delete( ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */ enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Parses a redo log record of updating a record in-place. @@ -609,7 +609,7 @@ btr_cur_disown_inherited_fields( const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ const upd_t* update, /*!< in: update vector */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull(2,3,4,5,6))); + MY_ATTRIBUTE((nonnull(2,3,4,5,6))); /** Operation code for btr_store_big_rec_extern_fields(). */ enum blob_op { @@ -629,7 +629,7 @@ ibool btr_blob_op_is_update( /*==================*/ enum blob_op op) /*!< in: operation */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*******************************************************************//** Stores the fields in big_rec_vec to the tablespace and puts pointers to @@ -654,7 +654,7 @@ btr_store_big_rec_extern_fields( mtr_t* btr_mtr, /*!< in: mtr containing the latches to the clustered index */ enum blob_op op) /*! in: operation code */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Frees the space in an externally stored field to the file space @@ -748,7 +748,7 @@ btr_push_update_extern_fields( dtuple_t* tuple, /*!< in/out: data tuple */ const upd_t* update, /*!< in: update vector */ mem_heap_t* heap) /*!< in: memory heap */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***********************************************************//** Sets a secondary index record's delete mark to the given value. This function is only used by the insert buffer merge mechanism. */ diff --git a/storage/xtradb/include/btr0pcur.h b/storage/xtradb/include/btr0pcur.h index cfbaacf4de3..e5b40040615 100644 --- a/storage/xtradb/include/btr0pcur.h +++ b/storage/xtradb/include/btr0pcur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -155,7 +155,7 @@ btr_pcur_open_at_index_side( ulint level, /*!< in: level to search for (0=leaf) */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Gets the up_match value for a pcur after a search. @return number of matched fields at the cursor or to the right if diff --git a/storage/xtradb/include/btr0sea.h b/storage/xtradb/include/btr0sea.h index d40094461ff..8f438bf640e 100644 --- a/storage/xtradb/include/btr0sea.h +++ b/storage/xtradb/include/btr0sea.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -69,7 +69,7 @@ btr_search_t* btr_search_get_info( /*================*/ dict_index_t* index) /*!< in: index */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*****************************************************************//** Creates and initializes a search info struct. @return own: search info struct */ @@ -200,7 +200,7 @@ hash_table_t* btr_search_get_hash_table( /*======================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((pure,warn_unused_result)); + MY_ATTRIBUTE((pure,warn_unused_result)); /********************************************************************//** Returns the adaptive hash index latch for a given index key. @@ -210,7 +210,7 @@ prio_rw_lock_t* btr_search_get_latch( /*=================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((pure,warn_unused_result)); + MY_ATTRIBUTE((pure,warn_unused_result)); /*********************************************************************//** Returns the AHI partition number corresponding to a given index ID. */ @@ -219,7 +219,7 @@ ulint btr_search_get_key( /*===============*/ index_id_t index_id) /*!< in: index ID */ - __attribute__((pure,warn_unused_result)); + MY_ATTRIBUTE((pure,warn_unused_result)); /*********************************************************************//** Initializes AHI-related fields in a newly created index. */ @@ -228,7 +228,7 @@ void btr_search_index_init( /*===============*/ dict_index_t* index) /*!< in: index */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Latches all adaptive hash index latches in exclusive mode. */ @@ -256,7 +256,7 @@ bool btr_search_own_all( /*===============*/ ulint lock_type) - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /********************************************************************//** Checks if the thread owns any adaptive hash latches in either S or X mode. @return true if the thread owns at least one latch in any mode. */ @@ -264,7 +264,7 @@ UNIV_INLINE bool btr_search_own_any(void) /*=====================*/ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #endif /** The search info struct in an index */ diff --git a/storage/xtradb/include/btr0types.h b/storage/xtradb/include/btr0types.h index cd0392e7951..4bc9c72eccc 100644 --- a/storage/xtradb/include/btr0types.h +++ b/storage/xtradb/include/btr0types.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -82,7 +82,7 @@ btr_blob_dbg_rbt_insert( dict_index_t* index, /*!< in/out: index tree */ const btr_blob_dbg_t* b, /*!< in: the reference */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Remove from index->blobs a reference to an off-page column. @param index the index tree @@ -95,7 +95,7 @@ btr_blob_dbg_rbt_delete( dict_index_t* index, /*!< in/out: index tree */ const btr_blob_dbg_t* b, /*!< in: the reference */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Add to index->blobs any references to off-page columns from a record. @@ -108,7 +108,7 @@ btr_blob_dbg_add_rec( dict_index_t* index, /*!< in/out: index */ const ulint* offsets,/*!< in: offsets */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Remove from index->blobs any references to off-page columns from a record. @return number of references removed */ @@ -120,7 +120,7 @@ btr_blob_dbg_remove_rec( dict_index_t* index, /*!< in/out: index */ const ulint* offsets,/*!< in: offsets */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Count and add to index->blobs any references to off-page columns from records on a page. @@ -132,7 +132,7 @@ btr_blob_dbg_add( const page_t* page, /*!< in: rewritten page */ dict_index_t* index, /*!< in/out: index */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Count and remove from index->blobs any references to off-page columns from records on a page. @@ -145,7 +145,7 @@ btr_blob_dbg_remove( const page_t* page, /*!< in: b-tree page */ dict_index_t* index, /*!< in/out: index */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Restore in index->blobs any references to off-page columns Used when page reorganize fails due to compressed page overflow. */ @@ -157,7 +157,7 @@ btr_blob_dbg_restore( const page_t* page, /*!< in: copy of original page */ dict_index_t* index, /*!< in/out: index */ const char* ctx) /*!< in: context (for logging) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Operation that processes the BLOB references of an index record @param[in] rec record on index page @@ -181,7 +181,7 @@ btr_blob_dbg_op( dict_index_t* index, /*!< in/out: index */ const char* ctx, /*!< in: context (for logging) */ const btr_blob_dbg_op_f op) /*!< in: operation on records */ - __attribute__((nonnull(1,3,4,5))); + MY_ATTRIBUTE((nonnull(1,3,4,5))); #else /* UNIV_BLOB_DEBUG */ # define btr_blob_dbg_add_rec(rec, index, offsets, ctx) ((void) 0) # define btr_blob_dbg_add(page, index, ctx) ((void) 0) diff --git a/storage/xtradb/include/buf0buddy.h b/storage/xtradb/include/buf0buddy.h index a86fc87e3d3..09768dda92f 100644 --- a/storage/xtradb/include/buf0buddy.h +++ b/storage/xtradb/include/buf0buddy.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -54,7 +54,7 @@ buf_buddy_alloc( storage was allocated from the LRU list and buf_pool->LRU_list_mutex was temporarily released */ - __attribute__((malloc, nonnull)); + MY_ATTRIBUTE((malloc, nonnull)); /**********************************************************************//** Deallocate a block. */ @@ -68,7 +68,7 @@ buf_buddy_free( be pointed to by the buffer pool */ ulint size) /*!< in: block size, up to UNIV_PAGE_SIZE */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_NONINL # include "buf0buddy.ic" diff --git a/storage/xtradb/include/buf0buddy.ic b/storage/xtradb/include/buf0buddy.ic index 020442016d0..9bc8e9e8762 100644 --- a/storage/xtradb/include/buf0buddy.ic +++ b/storage/xtradb/include/buf0buddy.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -50,7 +50,7 @@ buf_buddy_alloc_low( allocated from the LRU list and buf_pool->LRU_list_mutex was temporarily released */ - __attribute__((malloc, nonnull)); + MY_ATTRIBUTE((malloc, nonnull)); /**********************************************************************//** Deallocate a block. */ @@ -63,7 +63,7 @@ buf_buddy_free_low( pointed to by the buffer pool */ ulint i) /*!< in: index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Get the index of buf_pool->zip_free[] for a given block size. diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h index 158fc423403..b265b8661c8 100644 --- a/storage/xtradb/include/buf0buf.h +++ b/storage/xtradb/include/buf0buf.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -243,7 +243,7 @@ buf_relocate( buf_page_get_state(bpage) must be BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ buf_page_t* dpage) /*!< in/out: destination control block */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets the current size of buffer buf_pool in bytes. @return size in bytes */ @@ -274,7 +274,7 @@ UNIV_INLINE buf_page_t* buf_page_alloc_descriptor(void) /*===========================*/ - __attribute__((malloc)); + MY_ATTRIBUTE((malloc)); /********************************************************************//** Free a buf_page_t descriptor. */ UNIV_INLINE @@ -282,7 +282,7 @@ void buf_page_free_descriptor( /*=====================*/ buf_page_t* bpage) /*!< in: bpage descriptor to free. */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Allocates a buffer block. @@ -519,7 +519,7 @@ ulint buf_page_get_freed_page_clock( /*==========================*/ const buf_page_t* bpage) /*!< in: block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /********************************************************************//** Reads the freed_page_clock of a buffer block. @return freed_page_clock */ @@ -528,7 +528,7 @@ ulint buf_block_get_freed_page_clock( /*===========================*/ const buf_block_t* block) /*!< in: block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /********************************************************************//** Tells if a block is still close enough to the MRU end of the LRU list @@ -591,7 +591,7 @@ buf_block_buf_fix_inc_func( ulint line, /*!< in: line */ # endif /* UNIV_SYNC_DEBUG */ buf_block_t* block) /*!< in/out: block to bufferfix */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************************//** Increments the bufferfix count. */ @@ -637,7 +637,7 @@ buf_page_is_corrupted( const byte* read_buf, /*!< in: a database page */ ulint zip_size) /*!< in: size of compressed page; 0 for uncompressed pages */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Checks if a page is all zeroes. @return TRUE if the page is all zeroes */ @@ -667,7 +667,7 @@ ulint buf_block_get_lock_hash_val( /*========================*/ const buf_block_t* block) /*!< in: block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); #ifdef UNIV_DEBUG /*********************************************************************//** Finds a block in the buffer pool that points to a @@ -725,10 +725,10 @@ buf_page_print( const byte* read_buf, /*!< in: a database page */ ulint zip_size, /*!< in: compressed page size, or 0 for uncompressed pages */ - ulint flags); /*!< in: 0 or + ulint flags) /*!< in: 0 or BUF_PAGE_PRINT_NO_CRASH or BUF_PAGE_PRINT_NO_FULL */ - + UNIV_COLD MY_ATTRIBUTE((nonnull)); /********************************************************************//** Decompress a block. @return TRUE if successful */ @@ -855,7 +855,7 @@ enum buf_page_state buf_block_get_state( /*================*/ const buf_block_t* block) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Sets the state of a block. */ UNIV_INLINE @@ -880,7 +880,7 @@ ibool buf_page_in_file( /*=============*/ const buf_page_t* bpage) /*!< in: pointer to control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); #ifndef UNIV_HOTBACKUP /*********************************************************************//** Determines if a block should be on unzip_LRU list. @@ -890,7 +890,7 @@ ibool buf_page_belongs_to_unzip_LRU( /*==========================*/ const buf_page_t* bpage) /*!< in: pointer to control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the mutex of a block. @@ -900,7 +900,7 @@ ib_mutex_t* buf_page_get_mutex( /*===============*/ const buf_page_t* bpage) /*!< in: pointer to control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Get the flush type of a page. @@ -910,7 +910,7 @@ buf_flush_t buf_page_get_flush_type( /*====================*/ const buf_page_t* bpage) /*!< in: buffer page */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Set the flush type of a page. */ UNIV_INLINE @@ -936,7 +936,7 @@ enum buf_io_fix buf_page_get_io_fix( /*================*/ const buf_page_t* bpage) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the io_fix state of a block. @return io_fix state */ @@ -945,7 +945,7 @@ enum buf_io_fix buf_block_get_io_fix( /*================*/ const buf_block_t* block) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the io_fix state of a block. Does not assert that the buf_page_get_mutex() mutex is held, to be used in the cases where it is safe @@ -956,7 +956,7 @@ enum buf_io_fix buf_page_get_io_fix_unlocked( /*=========================*/ const buf_page_t* bpage) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Sets the io_fix state of a block. */ UNIV_INLINE @@ -1002,7 +1002,7 @@ ibool buf_page_can_relocate( /*==================*/ const buf_page_t* bpage) /*!< control block being relocated */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Determine if a block has been flagged old. @@ -1012,7 +1012,7 @@ ibool buf_page_is_old( /*============*/ const buf_page_t* bpage) /*!< in: control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Flag a block old. */ UNIV_INLINE @@ -1029,7 +1029,7 @@ unsigned buf_page_is_accessed( /*=================*/ const buf_page_t* bpage) /*!< in: control block */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************************//** Flag a block accessed. */ UNIV_INLINE @@ -1037,7 +1037,7 @@ void buf_page_set_accessed( /*==================*/ buf_page_t* bpage) /*!< in/out: control block */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets the buf_block_t handle of a buffered file block if an uncompressed page frame exists, or NULL. Note: even though bpage is not declared a @@ -1048,7 +1048,7 @@ buf_block_t* buf_page_get_block( /*===============*/ buf_page_t* bpage) /*!< in: control block, or NULL */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); #endif /* !UNIV_HOTBACKUP */ #ifdef UNIV_DEBUG /*********************************************************************//** @@ -1059,7 +1059,7 @@ buf_frame_t* buf_block_get_frame( /*================*/ const buf_block_t* block) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); #else /* UNIV_DEBUG */ # define buf_block_get_frame(block) (block ? (block)->frame : 0) #endif /* UNIV_DEBUG */ @@ -1071,7 +1071,7 @@ ulint buf_page_get_space( /*===============*/ const buf_page_t* bpage) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the space id of a block. @return space id */ @@ -1080,7 +1080,7 @@ ulint buf_block_get_space( /*================*/ const buf_block_t* block) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the page number of a block. @return page number */ @@ -1089,7 +1089,7 @@ ulint buf_page_get_page_no( /*=================*/ const buf_page_t* bpage) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the page number of a block. @return page number */ @@ -1098,7 +1098,7 @@ ulint buf_block_get_page_no( /*==================*/ const buf_block_t* block) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the compressed page size of a block. @return compressed page size, or 0 */ @@ -1107,7 +1107,7 @@ ulint buf_page_get_zip_size( /*==================*/ const buf_page_t* bpage) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the compressed page size of a block. @return compressed page size, or 0 */ @@ -1116,7 +1116,7 @@ ulint buf_block_get_zip_size( /*===================*/ const buf_block_t* block) /*!< in: pointer to the control block */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the compressed page descriptor corresponding to an uncompressed page if applicable. */ @@ -1205,7 +1205,7 @@ buf_page_address_fold( /*==================*/ ulint space, /*!< in: space id */ ulint offset) /*!< in: offset of the page within space */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /********************************************************************//** Calculates the index of a buffer pool to the buf_pool[] array. @return the position of the buffer pool in buf_pool[] */ @@ -1214,7 +1214,7 @@ ulint buf_pool_index( /*===========*/ const buf_pool_t* buf_pool) /*!< in: buffer pool */ - __attribute__((nonnull, const)); + MY_ATTRIBUTE((nonnull, const)); /******************************************************************//** Returns the buffer pool instance given a page instance @return buf_pool */ @@ -1354,7 +1354,7 @@ buf_pool_watch_is_sentinel( /*=======================*/ buf_pool_t* buf_pool, /*!< buffer pool instance */ const buf_page_t* bpage) /*!< in: block */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** Add watch for the given page to be read in. Caller must have appropriate hash_lock for the bpage and hold the LRU list mutex to avoid a race @@ -1368,7 +1368,7 @@ buf_pool_watch_set( ulint space, /*!< in: space id */ ulint offset, /*!< in: page number */ ulint fold) /*!< in: buf_page_address_fold(space, offset) */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /****************************************************************//** Stop watching if the page has been read in. buf_pool_watch_set(space,offset) must have returned NULL before. */ @@ -1389,7 +1389,7 @@ buf_pool_watch_occurred( /*====================*/ ulint space, /*!< in: space id */ ulint offset) /*!< in: page number */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /********************************************************************//** Get total buffer pool statistics. */ UNIV_INTERN @@ -1447,7 +1447,7 @@ bool buf_own_zip_mutex_for_page( /*=======================*/ const buf_page_t* bpage) - __attribute__((nonnull,warn_unused_result)); + MY_ATTRIBUTE((nonnull,warn_unused_result)); #endif /* UNIV_DEBUG */ /** The common buffer control block structure diff --git a/storage/xtradb/include/buf0flu.h b/storage/xtradb/include/buf0flu.h index 83f1275e4d2..96035452e6e 100644 --- a/storage/xtradb/include/buf0flu.h +++ b/storage/xtradb/include/buf0flu.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -88,7 +88,7 @@ buf_flush_page_try( /*===============*/ buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */ buf_block_t* block) /*!< in/out: buffer control block */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); # endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ /*******************************************************************//** This utility flushes dirty blocks from the end of the flush list of @@ -269,7 +269,7 @@ buf_flush_ready_for_flush( buf_page_t* bpage, /*!< in: buffer control block, must be buf_page_in_file(bpage) */ buf_flush_t flush_type)/*!< in: type of flush */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #ifdef UNIV_DEBUG /******************************************************************//** @@ -302,7 +302,7 @@ UNIV_INLINE bool buf_flush_flush_list_in_progress(void) /*==================================*/ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /** If LRU list of a buf_pool is less than this size then LRU eviction should not happen. This is because when we do LRU flushing we also put diff --git a/storage/xtradb/include/buf0lru.h b/storage/xtradb/include/buf0lru.h index f421e329bf0..f056c6c4116 100644 --- a/storage/xtradb/include/buf0lru.h +++ b/storage/xtradb/include/buf0lru.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -97,7 +97,7 @@ buf_LRU_free_page( buf_page_t* bpage, /*!< in: block to be freed */ bool zip) /*!< in: true if should remove also the compressed page of an uncompressed page */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Try to free a replaceable block. @return TRUE if found and freed */ @@ -109,7 +109,7 @@ buf_LRU_scan_and_free_block( ibool scan_all) /*!< in: scan whole LRU list if TRUE, otherwise scan only 'old' blocks. */ - __attribute__((nonnull,warn_unused_result)); + MY_ATTRIBUTE((nonnull,warn_unused_result)); /******************************************************************//** Returns a free block from the buf_pool. The block is taken off the free list. If it is empty, returns NULL. @@ -150,7 +150,7 @@ buf_block_t* buf_LRU_get_free_block( /*===================*/ buf_pool_t* buf_pool) /*!< in/out: buffer pool instance */ - __attribute__((nonnull,warn_unused_result)); + MY_ATTRIBUTE((nonnull,warn_unused_result)); /******************************************************************//** Determines if the unzip_LRU list should be used for evicting a victim instead of the general LRU list. @@ -233,7 +233,7 @@ buf_LRU_free_one_page( buf_page_t* bpage) /*!< in/out: block, must contain a file page and be in a state where it can be freed; there may or may not be a hash index to the page */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** diff --git a/storage/xtradb/include/data0data.h b/storage/xtradb/include/data0data.h index a548c7b89b3..1d954bfc07c 100644 --- a/storage/xtradb/include/data0data.h +++ b/storage/xtradb/include/data0data.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -46,7 +46,7 @@ dtype_t* dfield_get_type( /*============*/ const dfield_t* field) /*!< in: SQL data field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets pointer to the data in a field. @return pointer to data */ @@ -55,7 +55,7 @@ void* dfield_get_data( /*============*/ const dfield_t* field) /*!< in: field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #else /* UNIV_DEBUG */ # define dfield_get_type(field) (&(field)->type) # define dfield_get_data(field) ((field)->data) @@ -68,7 +68,7 @@ dfield_set_type( /*============*/ dfield_t* field, /*!< in: SQL data field */ const dtype_t* type) /*!< in: pointer to data type struct */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets length of field data. @return length of data; UNIV_SQL_NULL if SQL null data */ @@ -77,7 +77,7 @@ ulint dfield_get_len( /*===========*/ const dfield_t* field) /*!< in: field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Sets length in a field. */ UNIV_INLINE @@ -86,7 +86,7 @@ dfield_set_len( /*===========*/ dfield_t* field, /*!< in: field */ ulint len) /*!< in: length or UNIV_SQL_NULL */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Determines if a field is SQL NULL @return nonzero if SQL null data */ @@ -95,7 +95,7 @@ ulint dfield_is_null( /*===========*/ const dfield_t* field) /*!< in: field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Determines if a field is externally stored @return nonzero if externally stored */ @@ -104,7 +104,7 @@ ulint dfield_is_ext( /*==========*/ const dfield_t* field) /*!< in: field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Sets the "external storage" flag */ UNIV_INLINE @@ -112,7 +112,7 @@ void dfield_set_ext( /*===========*/ dfield_t* field) /*!< in/out: field */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Sets pointer to the data and length in a field. */ UNIV_INLINE @@ -122,7 +122,7 @@ dfield_set_data( dfield_t* field, /*!< in: field */ const void* data, /*!< in: data */ ulint len) /*!< in: length or UNIV_SQL_NULL */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /*********************************************************************//** Sets a data field to SQL NULL. */ UNIV_INLINE @@ -130,7 +130,7 @@ void dfield_set_null( /*============*/ dfield_t* field) /*!< in/out: field */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Writes an SQL null field full of zeros. */ UNIV_INLINE @@ -139,7 +139,7 @@ data_write_sql_null( /*================*/ byte* data, /*!< in: pointer to a buffer of size len */ ulint len) /*!< in: SQL null size in bytes */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Copies the data and len fields. */ UNIV_INLINE @@ -148,7 +148,7 @@ dfield_copy_data( /*=============*/ dfield_t* field1, /*!< out: field to copy to */ const dfield_t* field2) /*!< in: field to copy from */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Copies a data field to another. */ UNIV_INLINE @@ -157,7 +157,7 @@ dfield_copy( /*========*/ dfield_t* field1, /*!< out: field to copy to */ const dfield_t* field2) /*!< in: field to copy from */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Copies the data pointed to by a data field. */ UNIV_INLINE @@ -166,7 +166,7 @@ dfield_dup( /*=======*/ dfield_t* field, /*!< in/out: data field */ mem_heap_t* heap) /*!< in: memory heap where allocated */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP /*********************************************************************//** Tests if two data fields are equal. @@ -181,7 +181,7 @@ dfield_datas_are_binary_equal( const dfield_t* field2, /*!< in: field */ ulint len) /*!< in: maximum prefix to compare, or 0 to compare the whole field length */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Tests if dfield data length and content is equal to the given. @return TRUE if equal */ @@ -192,7 +192,7 @@ dfield_data_is_binary_equal( const dfield_t* field, /*!< in: field */ ulint len, /*!< in: data length or UNIV_SQL_NULL */ const byte* data) /*!< in: data */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Gets number of fields in a data tuple. @@ -202,7 +202,7 @@ ulint dtuple_get_n_fields( /*================*/ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifdef UNIV_DEBUG /*********************************************************************//** Gets nth field of a tuple. @@ -224,7 +224,7 @@ ulint dtuple_get_info_bits( /*=================*/ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Sets info bits in a data tuple. */ UNIV_INLINE @@ -233,7 +233,7 @@ dtuple_set_info_bits( /*=================*/ dtuple_t* tuple, /*!< in: tuple */ ulint info_bits) /*!< in: info bits */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets number of fields used in record comparisons. @return number of fields used in comparisons in rem0cmp.* */ @@ -242,7 +242,7 @@ ulint dtuple_get_n_fields_cmp( /*====================*/ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets number of fields used in record comparisons. */ UNIV_INLINE @@ -252,7 +252,7 @@ dtuple_set_n_fields_cmp( dtuple_t* tuple, /*!< in: tuple */ ulint n_fields_cmp) /*!< in: number of fields used in comparisons in rem0cmp.* */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /* Estimate the number of bytes that are going to be allocated when creating a new dtuple_t object */ @@ -272,7 +272,7 @@ dtuple_create_from_mem( void* buf, /*!< in, out: buffer to use */ ulint buf_size, /*!< in: buffer size */ ulint n_fields) /*!< in: number of fields */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Creates a data tuple to a memory heap. The default value for number @@ -286,7 +286,7 @@ dtuple_create( is created, DTUPLE_EST_ALLOC(n_fields) bytes will be allocated from this heap */ ulint n_fields)/*!< in: number of fields */ - __attribute__((nonnull, malloc)); + MY_ATTRIBUTE((nonnull, malloc)); /*********************************************************************//** Sets number of fields used in a tuple. Normally this is set in @@ -297,7 +297,7 @@ dtuple_set_n_fields( /*================*/ dtuple_t* tuple, /*!< in: tuple */ ulint n_fields) /*!< in: number of fields */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Copies a data tuple to another. This is a shallow copy; if a deep copy is desired, dfield_dup() will have to be invoked on each field. @@ -309,7 +309,7 @@ dtuple_copy( const dtuple_t* tuple, /*!< in: tuple to copy from */ mem_heap_t* heap) /*!< in: memory heap where the tuple is created */ - __attribute__((nonnull, malloc)); + MY_ATTRIBUTE((nonnull, malloc)); /**********************************************************//** The following function returns the sum of data lengths of a tuple. The space occupied by the field structs or the tuple struct is not counted. @@ -320,7 +320,7 @@ dtuple_get_data_size( /*=================*/ const dtuple_t* tuple, /*!< in: typed data tuple */ ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Computes the number of externally stored fields in a data tuple. @return number of fields */ @@ -329,7 +329,7 @@ ulint dtuple_get_n_ext( /*=============*/ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /************************************************************//** Compare two data tuples, respecting the collation of character fields. @return 1, 0 , -1 if tuple1 is greater, equal, less, respectively, @@ -340,7 +340,7 @@ dtuple_coll_cmp( /*============*/ const dtuple_t* tuple1, /*!< in: tuple 1 */ const dtuple_t* tuple2) /*!< in: tuple 2 */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /************************************************************//** Folds a prefix given as the number of fields of a tuple. @return the folded value */ @@ -353,7 +353,7 @@ dtuple_fold( ulint n_bytes,/*!< in: number of bytes to fold in an incomplete last field */ index_id_t tree_id)/*!< in: index tree id */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /*******************************************************************//** Sets types of fields binary in a tuple. */ UNIV_INLINE @@ -362,7 +362,7 @@ dtuple_set_types_binary( /*====================*/ dtuple_t* tuple, /*!< in: data tuple */ ulint n) /*!< in: number of fields to set */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Checks if a dtuple contains an SQL null value. @return TRUE if some field is SQL null */ @@ -371,7 +371,7 @@ ibool dtuple_contains_null( /*=================*/ const dtuple_t* tuple) /*!< in: dtuple */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Checks that a data field is typed. Asserts an error if not. @return TRUE if ok */ @@ -380,7 +380,7 @@ ibool dfield_check_typed( /*===============*/ const dfield_t* field) /*!< in: data field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Checks that a data tuple is typed. Asserts an error if not. @return TRUE if ok */ @@ -389,7 +389,7 @@ ibool dtuple_check_typed( /*===============*/ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Checks that a data tuple is typed. @return TRUE if ok */ @@ -398,7 +398,7 @@ ibool dtuple_check_typed_no_assert( /*=========================*/ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifdef UNIV_DEBUG /**********************************************************//** Validates the consistency of a tuple which must be complete, i.e, @@ -409,7 +409,7 @@ ibool dtuple_validate( /*============*/ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* UNIV_DEBUG */ /*************************************************************//** Pretty prints a dfield value according to its data type. */ @@ -418,7 +418,7 @@ void dfield_print( /*=========*/ const dfield_t* dfield) /*!< in: dfield */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Pretty prints a dfield value according to its data type. Also the hex string is printed if a string contains non-printable characters. */ @@ -427,7 +427,7 @@ void dfield_print_also_hex( /*==================*/ const dfield_t* dfield) /*!< in: dfield */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************//** The following function prints the contents of a tuple. */ UNIV_INTERN @@ -436,7 +436,7 @@ dtuple_print( /*=========*/ FILE* f, /*!< in: output stream */ const dtuple_t* tuple) /*!< in: tuple */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Moves parts of long fields in entry to the big record vector so that the size of tuple drops below the maximum record size allowed in the @@ -453,7 +453,7 @@ dtuple_convert_big_rec( dtuple_t* entry, /*!< in/out: index entry */ ulint* n_ext) /*!< in/out: number of externally stored columns */ - __attribute__((nonnull, malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); /**************************************************************//** Puts back to entry the data stored in vector. Note that to ensure the fields in entry can accommodate the data, vector must have been created @@ -466,7 +466,7 @@ dtuple_convert_back_big_rec( dtuple_t* entry, /*!< in: entry whose data was put to vector */ big_rec_t* vector) /*!< in, own: big rec vector; it is freed in this function */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Frees the memory in a big rec vector. */ UNIV_INLINE @@ -475,7 +475,7 @@ dtuple_big_rec_free( /*================*/ big_rec_t* vector) /*!< in, own: big rec vector; it is freed in this function */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*######################################################################*/ diff --git a/storage/xtradb/include/data0data.ic b/storage/xtradb/include/data0data.ic index 6937d55d211..11499ab928c 100644 --- a/storage/xtradb/include/data0data.ic +++ b/storage/xtradb/include/data0data.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -56,7 +56,8 @@ dfield_set_type( dfield_t* field, /*!< in: SQL data field */ const dtype_t* type) /*!< in: pointer to data type struct */ { - ut_ad(field && type); + ut_ad(field != NULL); + ut_ad(type != NULL); field->type = *type; } @@ -194,7 +195,8 @@ dfield_copy_data( dfield_t* field1, /*!< out: field to copy to */ const dfield_t* field2) /*!< in: field to copy from */ { - ut_ad(field1 && field2); + ut_ad(field1 != NULL); + ut_ad(field2 != NULL); field1->data = field2->data; field1->len = field2->len; diff --git a/storage/xtradb/include/dict0boot.h b/storage/xtradb/include/dict0boot.h index a994c9d8ff1..477e1150f43 100644 --- a/storage/xtradb/include/dict0boot.h +++ b/storage/xtradb/include/dict0boot.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -95,7 +95,7 @@ UNIV_INTERN dberr_t dict_boot(void) /*===========*/ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*****************************************************************//** Creates and initializes the data dictionary at the server bootstrap. @@ -104,7 +104,7 @@ UNIV_INTERN dberr_t dict_create(void) /*=============*/ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*********************************************************************//** Check if a table id belongs to system table. @@ -114,7 +114,7 @@ bool dict_is_sys_table( /*==============*/ table_id_t id) /*!< in: table id to check */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /* Space id and page no where the dictionary header resides */ #define DICT_HDR_SPACE 0 /* the SYSTEM tablespace */ diff --git a/storage/xtradb/include/dict0crea.h b/storage/xtradb/include/dict0crea.h index 67eab9058da..6146917469a 100644 --- a/storage/xtradb/include/dict0crea.h +++ b/storage/xtradb/include/dict0crea.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -123,7 +123,7 @@ dict_create_add_foreign_id( incremented if used */ const char* name, /*!< in: table name */ dict_foreign_t* foreign)/*!< in/out: foreign key */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Adds the given set of foreign key objects to the dictionary tables in the database. This function does not modify the dictionary cache. The @@ -142,7 +142,7 @@ dict_create_add_foreigns_to_dictionary( const dict_foreign_set& local_fk_set, const dict_table_t* table, trx_t* trx) - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** Creates the tablespaces and datafiles system tables inside InnoDB at server bootstrap or server start if they are not found or are @@ -177,7 +177,7 @@ dict_create_add_foreign_to_dictionary( const char* name, /*!< in: table name */ const dict_foreign_t* foreign,/*!< in: foreign key */ trx_t* trx) /*!< in/out: dictionary transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /* Table create node structure */ struct tab_node_t{ diff --git a/storage/xtradb/include/dict0crea.ic b/storage/xtradb/include/dict0crea.ic index 2d0d9dcb858..1cbaa47032b 100644 --- a/storage/xtradb/include/dict0crea.ic +++ b/storage/xtradb/include/dict0crea.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -33,7 +33,7 @@ UNIV_INTERN bool row_is_mysql_tmp_table_name( /*========================*/ - const char* name) __attribute__((warn_unused_result)); + const char* name) MY_ATTRIBUTE((warn_unused_result)); /*!< in: table name in the form 'database/tablename' */ diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h index 7c3e51b3c81..f539f62960b 100644 --- a/storage/xtradb/include/dict0dict.h +++ b/storage/xtradb/include/dict0dict.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -53,7 +53,7 @@ void dict_casedn_str( /*============*/ char* a) /*!< in/out: string to put in lower case */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Get the database name length in a table name. @return database name length */ @@ -63,7 +63,7 @@ dict_get_db_name_len( /*=================*/ const char* name) /*!< in: table name in the form dbname '/' tablename */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Open a table from its database and table name, this is currently used by foreign constraint parser to get the referenced table. @@ -107,7 +107,7 @@ dict_remove_db_name( /*================*/ const char* name) /*!< in: table name in the form dbname '/' tablename */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Operation to perform when opening a table */ enum dict_table_op_t { @@ -130,7 +130,7 @@ dict_table_open_on_id( table_id_t table_id, /*!< in: table id */ ibool dict_locked, /*!< in: TRUE=data dictionary locked */ dict_table_op_t table_op) /*!< in: operation to perform */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /********************************************************************//** Decrements the count of open handles to a table. */ UNIV_INTERN @@ -142,7 +142,7 @@ dict_table_close( ibool try_drop) /*!< in: TRUE=try to drop any orphan indexes after an aborted online index creation */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Inits the data dictionary module. */ UNIV_INTERN @@ -167,7 +167,7 @@ ulint dict_col_get_mbminlen( /*==================*/ const dict_col_t* col) /*!< in: column */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the maximum number of bytes per character. @return maximum multi-byte char size, in bytes */ @@ -176,7 +176,7 @@ ulint dict_col_get_mbmaxlen( /*==================*/ const dict_col_t* col) /*!< in: column */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Sets the minimum and maximum number of bytes per character. */ UNIV_INLINE @@ -188,7 +188,7 @@ dict_col_set_mbminmaxlen( character size, in bytes */ ulint mbmaxlen) /*!< in: minimum multi-byte character size, in bytes */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets the column data type. */ UNIV_INLINE @@ -197,7 +197,7 @@ dict_col_copy_type( /*===============*/ const dict_col_t* col, /*!< in: column */ dtype_t* type) /*!< out: data type */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Determine bytes of column prefix to be stored in the undo log. Please note if the table format is UNIV_FORMAT_A (< UNIV_FORMAT_B), no prefix @@ -210,7 +210,7 @@ dict_max_field_len_store_undo( dict_table_t* table, /*!< in: table */ const dict_col_t* col) /*!< in: column which index prefix is based on */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ #ifdef UNIV_DEBUG /*********************************************************************//** @@ -222,7 +222,7 @@ dict_col_type_assert_equal( /*=======================*/ const dict_col_t* col, /*!< in: column */ const dtype_t* type) /*!< in: data type */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* UNIV_DEBUG */ #ifndef UNIV_HOTBACKUP /***********************************************************************//** @@ -233,7 +233,7 @@ ulint dict_col_get_min_size( /*==================*/ const dict_col_t* col) /*!< in: column */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Returns the maximum size of the column. @return maximum size */ @@ -242,7 +242,7 @@ ulint dict_col_get_max_size( /*==================*/ const dict_col_t* col) /*!< in: column */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Returns the size of a fixed size column, 0 if not a fixed size column. @return fixed size, or 0 */ @@ -252,7 +252,7 @@ dict_col_get_fixed_size( /*====================*/ const dict_col_t* col, /*!< in: column */ ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a column. For fixed length types it is the fixed length of the type, otherwise 0. @@ -263,7 +263,7 @@ dict_col_get_sql_null_size( /*=======================*/ const dict_col_t* col, /*!< in: column */ ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the column number. @return col->ind, table column position (starting from 0) */ @@ -272,7 +272,7 @@ ulint dict_col_get_no( /*============*/ const dict_col_t* col) /*!< in: column */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the column position in the clustered index. */ UNIV_INLINE @@ -281,7 +281,7 @@ dict_col_get_clust_pos( /*===================*/ const dict_col_t* col, /*!< in: table column */ const dict_index_t* clust_index) /*!< in: clustered index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** If the given column name is reserved for InnoDB system columns, return TRUE. @@ -291,7 +291,7 @@ ibool dict_col_name_is_reserved( /*======================*/ const char* name) /*!< in: column name */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Acquire the autoinc lock. */ UNIV_INTERN @@ -299,7 +299,7 @@ void dict_table_autoinc_lock( /*====================*/ dict_table_t* table) /*!< in/out: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Unconditionally set the autoinc counter. */ UNIV_INTERN @@ -308,7 +308,7 @@ dict_table_autoinc_initialize( /*==========================*/ dict_table_t* table, /*!< in/out: table */ ib_uint64_t value) /*!< in: next value to assign to a row */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Store autoinc value when the table is evicted. @param[in] table table evicted */ @@ -333,7 +333,7 @@ ib_uint64_t dict_table_autoinc_read( /*====================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Updates the autoinc counter if the value supplied is greater than the current value. */ @@ -344,7 +344,7 @@ dict_table_autoinc_update_if_greater( dict_table_t* table, /*!< in/out: table */ ib_uint64_t value) /*!< in: value which was assigned to a row */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Release the autoinc lock. */ UNIV_INTERN @@ -352,7 +352,7 @@ void dict_table_autoinc_unlock( /*======================*/ dict_table_t* table) /*!< in/out: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** Adds system columns to a table object. */ @@ -362,7 +362,7 @@ dict_table_add_system_columns( /*==========================*/ dict_table_t* table, /*!< in/out: table */ mem_heap_t* heap) /*!< in: temporary heap */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP /**********************************************************************//** Adds a table object to the dictionary cache. */ @@ -373,7 +373,7 @@ dict_table_add_to_cache( dict_table_t* table, /*!< in: table */ ibool can_be_evicted, /*!< in: TRUE if can be evicted*/ mem_heap_t* heap) /*!< in: temporary heap */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Removes a table object from the dictionary cache. */ UNIV_INTERN @@ -381,7 +381,7 @@ void dict_table_remove_from_cache( /*=========================*/ dict_table_t* table) /*!< in, own: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Removes a table object from the dictionary cache. */ UNIV_INTERN @@ -404,7 +404,7 @@ dict_table_rename_in_cache( /*!< in: in ALTER TABLE we want to preserve the original table name in constraints which reference it */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Removes an index from the dictionary cache. */ UNIV_INTERN @@ -413,7 +413,7 @@ dict_index_remove_from_cache( /*=========================*/ dict_table_t* table, /*!< in/out: table */ dict_index_t* index) /*!< in, own: index */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Change the id of a table object in the dictionary cache. This is used in DISCARD TABLESPACE. */ @@ -423,7 +423,7 @@ dict_table_change_id_in_cache( /*==========================*/ dict_table_t* table, /*!< in/out: table object already in cache */ table_id_t new_id) /*!< in: new id to set */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Removes a foreign constraint struct from the dictionary cache. */ UNIV_INTERN @@ -431,7 +431,7 @@ void dict_foreign_remove_from_cache( /*===========================*/ dict_foreign_t* foreign) /*!< in, own: foreign constraint */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Adds a foreign key constraint object to the dictionary cache. May free the object if there already is an object with the same identifier in. @@ -452,7 +452,7 @@ dict_foreign_add_to_cache( compatibility */ dict_err_ignore_t ignore_err) /*!< in: error to be ignored */ - __attribute__((nonnull(1), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /*********************************************************************//** Checks if a table is referenced by foreign keys. @return TRUE if table is referenced by a foreign key */ @@ -461,7 +461,7 @@ ibool dict_table_is_referenced_by_foreign_key( /*====================================*/ const dict_table_t* table) /*!< in: InnoDB table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Replace the index passed in with another equivalent index in the foreign key lists of the table. @@ -475,7 +475,7 @@ dict_foreign_replace_index( /*!< in: column names, or NULL to use table->col_names */ const dict_index_t* index) /*!< in: index to be replaced */ - __attribute__((nonnull(1,3), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,3), warn_unused_result)); /**********************************************************************//** Determines whether a string starts with the specified keyword. @return TRUE if str starts with keyword */ @@ -486,7 +486,7 @@ dict_str_starts_with_keyword( THD* thd, /*!< in: MySQL thread handle */ const char* str, /*!< in: string to scan for keyword */ const char* keyword) /*!< in: keyword to look for */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Scans a table create SQL string and adds to the data dictionary the foreign key constraints declared in the string. This function @@ -515,7 +515,7 @@ dict_create_foreign_constraints( ibool reject_fks) /*!< in: if TRUE, fail with error code DB_CANNOT_ADD_CONSTRAINT if any foreign keys are found. */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement. @return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the @@ -532,7 +532,7 @@ dict_foreign_parse_drop_constraints( to drop */ const char*** constraints_to_drop) /*!< out: id's of the constraints to drop */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Returns a table object and increments its open handle count. NOTE! This is a high-level function to be used mainly from outside the @@ -551,7 +551,7 @@ dict_table_open_on_name( dict_err_ignore_t ignore_err) /*!< in: error to be ignored when loading the table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Tries to find an index whose first fields are the columns in the array, @@ -580,7 +580,7 @@ dict_foreign_find_index( /*!< in: nonzero if none of the columns must be declared NOT NULL */ - __attribute__((nonnull(1,3), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,3), warn_unused_result)); /**********************************************************************//** Returns a column's name. @return column name. NOTE: not guaranteed to stay valid if table is @@ -591,7 +591,7 @@ dict_table_get_col_name( /*====================*/ const dict_table_t* table, /*!< in: table */ ulint col_nr) /*!< in: column number */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Prints a table data. */ UNIV_INTERN @@ -599,7 +599,7 @@ void dict_table_print( /*=============*/ dict_table_t* table) /*!< in: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Outputs info on foreign keys of a table. */ UNIV_INTERN @@ -613,7 +613,7 @@ dict_print_info_on_foreign_keys( FILE* file, /*!< in: file where to print */ trx_t* trx, /*!< in: transaction */ dict_table_t* table) /*!< in: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Outputs info on a foreign key of a table in a format suitable for CREATE TABLE. */ @@ -625,7 +625,7 @@ dict_print_info_on_foreign_key_in_create_format( trx_t* trx, /*!< in: transaction */ dict_foreign_t* foreign, /*!< in: foreign key constraint */ ibool add_newline) /*!< in: whether to add a newline */ - __attribute__((nonnull(1,3))); + MY_ATTRIBUTE((nonnull(1,3))); /********************************************************************//** Displays the names of the index and the table. */ UNIV_INTERN @@ -635,7 +635,7 @@ dict_index_name_print( FILE* file, /*!< in: output stream */ const trx_t* trx, /*!< in: transaction */ const dict_index_t* index) /*!< in: index to print */ - __attribute__((nonnull(1,3))); + MY_ATTRIBUTE((nonnull(1,3))); /*********************************************************************//** Tries to find an index whose first fields are the columns in the array, in the same order and is not marked for deletion and is not the same @@ -664,7 +664,7 @@ dict_foreign_qualify_index( /*!< in: nonzero if none of the columns must be declared NOT NULL */ - __attribute__((nonnull(1,3), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,3), warn_unused_result)); #ifdef UNIV_DEBUG /********************************************************************//** Gets the first index on the table (the clustered index). @@ -674,7 +674,7 @@ dict_index_t* dict_table_get_first_index( /*=======================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the last index on the table. @return index, NULL if none exists */ @@ -683,7 +683,7 @@ dict_index_t* dict_table_get_last_index( /*=======================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the next index on the table. @return index, NULL if none left */ @@ -692,7 +692,7 @@ dict_index_t* dict_table_get_next_index( /*======================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #else /* UNIV_DEBUG */ # define dict_table_get_first_index(table) UT_LIST_GET_FIRST((table)->indexes) # define dict_table_get_last_index(table) UT_LIST_GET_LAST((table)->indexes) @@ -721,7 +721,7 @@ ulint dict_index_is_clust( /*================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Check whether the index is unique. @return nonzero for unique index, zero for other indexes */ @@ -730,7 +730,7 @@ ulint dict_index_is_unique( /*=================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Check whether the index is the insert buffer tree. @return nonzero for insert buffer, zero for other indexes */ @@ -739,7 +739,7 @@ ulint dict_index_is_ibuf( /*===============*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Check whether the index is a secondary index or the insert buffer tree. @return nonzero for insert buffer, zero for other indexes */ @@ -748,7 +748,7 @@ ulint dict_index_is_sec_or_ibuf( /*======================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /************************************************************************ Gets the all the FTS indexes for the table. NOTE: must not be called for @@ -760,7 +760,7 @@ dict_table_get_all_fts_indexes( /* out: number of indexes collected */ dict_table_t* table, /* in: table */ ib_vector_t* indexes)/* out: vector for collecting FTS indexes */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Gets the number of user-defined columns in a table in the dictionary cache. @@ -770,7 +770,7 @@ ulint dict_table_get_n_user_cols( /*=======================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Gets the number of system columns in a table in the dictionary cache. @return number of system (e.g., ROW_ID) columns of a table */ @@ -779,7 +779,7 @@ ulint dict_table_get_n_sys_cols( /*======================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Gets the number of all columns (also system) in a table in the dictionary cache. @@ -789,7 +789,7 @@ ulint dict_table_get_n_cols( /*==================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Gets the approximately estimated number of rows in the table. @return estimated number of rows */ @@ -798,7 +798,7 @@ ib_uint64_t dict_table_get_n_rows( /*==================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Increment the number of rows in the table by one. Notice that this operation is not protected by any latch, the number is @@ -808,7 +808,7 @@ void dict_table_n_rows_inc( /*==================*/ dict_table_t* table) /*!< in/out: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Decrement the number of rows in the table by one. Notice that this operation is not protected by any latch, the number is @@ -818,7 +818,7 @@ void dict_table_n_rows_dec( /*==================*/ dict_table_t* table) /*!< in/out: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG /********************************************************************//** Gets the nth column of a table. @@ -829,7 +829,7 @@ dict_table_get_nth_col( /*===================*/ const dict_table_t* table, /*!< in: table */ ulint pos) /*!< in: position of column */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the given system column of a table. @return pointer to column object */ @@ -839,7 +839,7 @@ dict_table_get_sys_col( /*===================*/ const dict_table_t* table, /*!< in: table */ ulint sys) /*!< in: DATA_ROW_ID, ... */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #else /* UNIV_DEBUG */ #define dict_table_get_nth_col(table, pos) \ ((table)->cols + (pos)) @@ -855,7 +855,7 @@ dict_table_get_sys_col_no( /*======================*/ const dict_table_t* table, /*!< in: table */ ulint sys) /*!< in: DATA_ROW_ID, ... */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_HOTBACKUP /********************************************************************//** Returns the minimum data size of an index record. @@ -865,7 +865,7 @@ ulint dict_index_get_min_size( /*====================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /********************************************************************//** Check whether the table uses the compact page format. @@ -875,7 +875,7 @@ ibool dict_table_is_comp( /*===============*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Determine the file format of a table. @return file format version */ @@ -884,7 +884,7 @@ ulint dict_table_get_format( /*==================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Determine the file format from a dict_table_t::flags. @return file format version */ @@ -893,7 +893,7 @@ ulint dict_tf_get_format( /*===============*/ ulint flags) /*!< in: dict_table_t::flags */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /********************************************************************//** Set the various values in a dict_table_t::flags pointer. */ UNIV_INLINE @@ -904,7 +904,7 @@ dict_tf_set( rec_format_t format, /*!< in: file format */ ulint zip_ssize, /*!< in: zip shift size */ bool remote_path) /*!< in: table uses DATA DIRECTORY */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Convert a 32 bit integer table flags to the 32 bit integer that is written into the tablespace header at the offset FSP_SPACE_FLAGS and is @@ -921,7 +921,7 @@ ulint dict_tf_to_fsp_flags( /*=================*/ ulint flags) /*!< in: dict_table_t::flags */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /********************************************************************//** Extract the compressed page size from table flags. @return compressed page size, or 0 if not compressed */ @@ -930,7 +930,7 @@ ulint dict_tf_get_zip_size( /*=================*/ ulint flags) /*!< in: flags */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /********************************************************************//** Check whether the table uses the compressed compact page format. @return compressed page size, or 0 if not compressed */ @@ -939,7 +939,7 @@ ulint dict_table_zip_size( /*================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_HOTBACKUP /*********************************************************************//** Obtain exclusive locks on all index trees of the table. This is to prevent @@ -950,7 +950,7 @@ void dict_table_x_lock_indexes( /*======================*/ dict_table_t* table) /*!< in: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Release the exclusive locks on all index tree. */ UNIV_INLINE @@ -958,7 +958,7 @@ void dict_table_x_unlock_indexes( /*========================*/ dict_table_t* table) /*!< in: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Checks if a column is in the ordering columns of the clustered index of a table. Column prefixes are treated like whole columns. @@ -969,7 +969,7 @@ dict_table_col_in_clustered_key( /*============================*/ const dict_table_t* table, /*!< in: table */ ulint n) /*!< in: column number */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Check if the table has an FTS index. @return TRUE if table has an FTS index */ @@ -978,7 +978,7 @@ ibool dict_table_has_fts_index( /*=====================*/ dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Copies types of columns contained in table to tuple and sets all fields of the tuple to the SQL NULL value. This function should @@ -989,7 +989,7 @@ dict_table_copy_types( /*==================*/ dtuple_t* tuple, /*!< in/out: data tuple */ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************** Wait until all the background threads of the given table have exited, i.e., bg_threads == 0. Note: bg_threads_mutex must be reserved when @@ -1001,7 +1001,7 @@ dict_table_wait_for_bg_threads_to_exit( dict_table_t* table, /* in: table */ ulint delay) /* in: time in microseconds to wait between checks of bg_threads. */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Looks for an index with the given id. NOTE that we do not reserve the dictionary mutex: this function is for emergency purposes like @@ -1012,7 +1012,7 @@ dict_index_t* dict_index_find_on_id_low( /*======================*/ index_id_t id) /*!< in: index id */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /**********************************************************************//** Make room in the table cache by evicting an unused table. The unused table should not be part of FK relationship and currently not used in any user @@ -1038,7 +1038,7 @@ dict_index_add_to_cache( ibool strict) /*!< in: TRUE=refuse to create the index if records could be too big to fit in an B-tree page */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Removes an index from the dictionary cache. */ UNIV_INTERN @@ -1047,7 +1047,7 @@ dict_index_remove_from_cache( /*=========================*/ dict_table_t* table, /*!< in/out: table */ dict_index_t* index) /*!< in, own: index */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /********************************************************************//** Gets the number of fields in the internal representation of an index, @@ -1060,7 +1060,7 @@ dict_index_get_n_fields( const dict_index_t* index) /*!< in: an internal representation of index (in the dictionary cache) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the number of fields in the internal representation of an index that uniquely determine the position of an index entry in the index, if @@ -1073,7 +1073,7 @@ dict_index_get_n_unique( /*====================*/ const dict_index_t* index) /*!< in: an internal representation of index (in the dictionary cache) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the number of fields in the internal representation of an index which uniquely determine the position of an index entry in the index, if @@ -1085,7 +1085,7 @@ dict_index_get_n_unique_in_tree( /*============================*/ const dict_index_t* index) /*!< in: an internal representation of index (in the dictionary cache) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the number of user-defined ordering fields in the index. In the internal representation we add the row id to the ordering fields to make all indexes @@ -1098,7 +1098,7 @@ dict_index_get_n_ordering_defined_by_user( /*======================================*/ const dict_index_t* index) /*!< in: an internal representation of index (in the dictionary cache) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifdef UNIV_DEBUG /********************************************************************//** Gets the nth field of an index. @@ -1109,7 +1109,7 @@ dict_index_get_nth_field( /*=====================*/ const dict_index_t* index, /*!< in: index */ ulint pos) /*!< in: position of field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #else /* UNIV_DEBUG */ # define dict_index_get_nth_field(index, pos) ((index)->fields + (pos)) #endif /* UNIV_DEBUG */ @@ -1122,7 +1122,7 @@ dict_index_get_nth_col( /*===================*/ const dict_index_t* index, /*!< in: index */ ulint pos) /*!< in: position of the field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the column number of the nth field in an index. @return column number */ @@ -1132,7 +1132,7 @@ dict_index_get_nth_col_no( /*======================*/ const dict_index_t* index, /*!< in: index */ ulint pos) /*!< in: position of the field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Looks for column n in an index. @return position in internal representation of the index; @@ -1143,7 +1143,7 @@ dict_index_get_nth_col_pos( /*=======================*/ const dict_index_t* index, /*!< in: index */ ulint n) /*!< in: column number */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Looks for column n in an index. @return position in internal representation of the index; @@ -1156,7 +1156,7 @@ dict_index_get_nth_col_or_prefix_pos( ulint n, /*!< in: column number */ ibool inc_prefix) /*!< in: TRUE=consider column prefixes too */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Returns TRUE if the index contains a column or a prefix of that column. @return TRUE if contains the column or its prefix */ @@ -1166,7 +1166,7 @@ dict_index_contains_col_or_prefix( /*==============================*/ const dict_index_t* index, /*!< in: index */ ulint n) /*!< in: column number */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Looks for a matching field in an index. The column has to be the same. The column in index must be complete, or must contain a prefix longer than the @@ -1181,7 +1181,7 @@ dict_index_get_nth_field_pos( const dict_index_t* index, /*!< in: index from which to search */ const dict_index_t* index2, /*!< in: index */ ulint n) /*!< in: field number in index2 */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Looks for column n position in the clustered index. @return position in internal representation of the clustered index */ @@ -1191,7 +1191,7 @@ dict_table_get_nth_col_pos( /*=======================*/ const dict_table_t* table, /*!< in: table */ ulint n) /*!< in: column number */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Returns the position of a system column in an index. @return position, ULINT_UNDEFINED if not contained */ @@ -1201,7 +1201,7 @@ dict_index_get_sys_col_pos( /*=======================*/ const dict_index_t* index, /*!< in: index */ ulint type) /*!< in: DATA_ROW_ID, ... */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Adds a column to index. */ UNIV_INTERN @@ -1212,7 +1212,7 @@ dict_index_add_col( const dict_table_t* table, /*!< in: table */ dict_col_t* col, /*!< in: column */ ulint prefix_len) /*!< in: column prefix length */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP /*******************************************************************//** Copies types of fields contained in index to tuple. */ @@ -1224,7 +1224,7 @@ dict_index_copy_types( const dict_index_t* index, /*!< in: index */ ulint n_fields) /*!< in: number of field types to copy */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Gets the field column. @@ -1234,7 +1234,7 @@ const dict_col_t* dict_field_get_col( /*===============*/ const dict_field_t* field) /*!< in: index field */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_HOTBACKUP /**********************************************************************//** Returns an index object if it is found in the dictionary cache. @@ -1245,7 +1245,7 @@ dict_index_t* dict_index_get_if_in_cache_low( /*===========================*/ index_id_t index_id) /*!< in: index id */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** Returns an index object if it is found in the dictionary cache. @@ -1255,7 +1255,7 @@ dict_index_t* dict_index_get_if_in_cache( /*=======================*/ index_id_t index_id) /*!< in: index id */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ #ifdef UNIV_DEBUG /**********************************************************************//** @@ -1268,7 +1268,7 @@ dict_index_check_search_tuple( /*==========================*/ const dict_index_t* index, /*!< in: index tree */ const dtuple_t* tuple) /*!< in: tuple used in a search */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Whether and when to allow temporary index names */ enum check_name { /** Require all indexes to be complete. */ @@ -1288,7 +1288,7 @@ dict_table_check_for_dup_indexes( in this table */ enum check_name check) /*!< in: whether and when to allow temporary index names */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* UNIV_DEBUG */ /**********************************************************************//** Builds a node pointer out of a physical record and a page number. @@ -1306,7 +1306,7 @@ dict_index_build_node_ptr( created */ ulint level) /*!< in: level of rec in tree: 0 means leaf level */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Copies an initial segment of a physical record, long enough to specify an index entry uniquely. @@ -1322,7 +1322,7 @@ dict_index_copy_rec_order_prefix( byte** buf, /*!< in/out: memory buffer for the copied prefix, or NULL */ ulint* buf_size)/*!< in/out: buffer size */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Builds a typed data tuple out of a physical record. @return own: data tuple */ @@ -1334,7 +1334,7 @@ dict_index_build_data_tuple( rec_t* rec, /*!< in: record for which to build data tuple */ ulint n_fields,/*!< in: number of data fields */ mem_heap_t* heap) /*!< in: memory heap where tuple created */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the space id of the root of the index tree. @return space id */ @@ -1343,7 +1343,7 @@ ulint dict_index_get_space( /*=================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Sets the space id of the root of the index tree. */ UNIV_INLINE @@ -1352,7 +1352,7 @@ dict_index_set_space( /*=================*/ dict_index_t* index, /*!< in/out: index */ ulint space) /*!< in: space id */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets the page number of the root of the index tree. @return page number */ @@ -1361,7 +1361,7 @@ ulint dict_index_get_page( /*================*/ const dict_index_t* tree) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the read-write lock of the index tree. @return read-write lock */ @@ -1370,7 +1370,7 @@ prio_rw_lock_t* dict_index_get_lock( /*================*/ dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Returns free space reserved for future updates of records. This is relevant only in the case of many consecutive inserts, as updates @@ -1390,7 +1390,7 @@ enum online_index_status dict_index_get_online_status( /*=========================*/ const dict_index_t* index) /*!< in: secondary index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Sets the status of online index creation. */ UNIV_INLINE @@ -1399,7 +1399,7 @@ dict_index_set_online_status( /*=========================*/ dict_index_t* index, /*!< in/out: index */ enum online_index_status status) /*!< in: status */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Determines if a secondary index is being or has been created online, or if the table is being rebuilt online, allowing concurrent modifications @@ -1413,7 +1413,7 @@ bool dict_index_is_online_ddl( /*=====================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Calculates the minimum record length in an index. */ UNIV_INTERN @@ -1421,7 +1421,7 @@ ulint dict_index_calc_min_rec_len( /*========================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Reserves the dictionary system mutex for MySQL. */ UNIV_INTERN @@ -1485,7 +1485,7 @@ dict_tables_have_same_db( dbname '/' tablename */ const char* name2) /*!< in: table name in the form dbname '/' tablename */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Removes an index from the cache */ UNIV_INTERN @@ -1494,7 +1494,7 @@ dict_index_remove_from_cache( /*=========================*/ dict_table_t* table, /*!< in/out: table */ dict_index_t* index) /*!< in, own: index */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Get index by name @return index, NULL if does not exist */ @@ -1504,7 +1504,7 @@ dict_table_get_index_on_name( /*=========================*/ dict_table_t* table, /*!< in: table */ const char* name) /*!< in: name of the index to find */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** In case there is more than one index with the same name return the index with the min(id). @@ -1515,7 +1515,7 @@ dict_table_get_index_on_name_and_min_id( /*====================================*/ dict_table_t* table, /*!< in: table */ const char* name) /*!< in: name of the index to find */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************** Check whether a column exists in an FTS index. */ UNIV_INLINE @@ -1526,7 +1526,7 @@ dict_table_is_fts_column( the offset within the vector */ ib_vector_t* indexes,/* in: vector containing only FTS indexes */ ulint col_no) /* in: col number to search for */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Move a table to the non LRU end of the LRU list. */ UNIV_INTERN @@ -1534,7 +1534,7 @@ void dict_table_move_from_lru_to_non_lru( /*================================*/ dict_table_t* table) /*!< in: table to move from LRU to non-LRU */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Move a table to the LRU list from the non-LRU list. */ UNIV_INTERN @@ -1542,7 +1542,7 @@ void dict_table_move_from_non_lru_to_lru( /*================================*/ dict_table_t* table) /*!< in: table to move from non-LRU to LRU */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Move to the most recently used segment of the LRU list. */ UNIV_INTERN @@ -1550,7 +1550,7 @@ void dict_move_to_mru( /*=============*/ dict_table_t* table) /*!< in: table to move to MRU */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Maximum number of columns in a foreign key constraint. Please Note MySQL has a much lower limit on the number of columns allowed in a foreign key @@ -1674,7 +1674,7 @@ dict_table_schema_check( != DB_TABLE_NOT_FOUND is returned */ size_t errstr_sz) /*!< in: errstr size */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /* @} */ /*********************************************************************//** @@ -1692,7 +1692,7 @@ dict_fs2utf8( size_t db_utf8_size, /*!< in: dbname_utf8 size */ char* table_utf8, /*!< out: table name, e.g. aÑŽbØc */ size_t table_utf8_size)/*!< in: table_utf8 size */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Closes the data dictionary module. */ @@ -1709,7 +1709,7 @@ ulint dict_table_is_corrupted( /*====================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Check whether the index is corrupted. @@ -1719,7 +1719,7 @@ ulint dict_index_is_corrupted( /*====================*/ const dict_index_t* index) /*!< in: index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** @@ -1732,7 +1732,7 @@ dict_set_corrupted( dict_index_t* index, /*!< in/out: index */ trx_t* trx, /*!< in/out: transaction */ const char* ctx) /*!< in: context */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Flags an index corrupted in the data dictionary cache only. This @@ -1744,7 +1744,7 @@ dict_set_corrupted_index_cache_only( /*================================*/ dict_index_t* index, /*!< in/out: index */ dict_table_t* table) /*!< in/out: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Flags a table with specified space_id corrupted in the table dictionary @@ -1764,7 +1764,7 @@ bool dict_tf_is_valid( /*=============*/ ulint flags) /*!< in: table flags */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /********************************************************************//** Check if the tablespace for the table has been discarded. @@ -1774,7 +1774,7 @@ bool dict_table_is_discarded( /*====================*/ const dict_table_t* table) /*!< in: table to check */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Check if it is a temporary table. @@ -1784,7 +1784,7 @@ bool dict_table_is_temporary( /*====================*/ const dict_table_t* table) /*!< in: table to check */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); #ifndef UNIV_HOTBACKUP /*********************************************************************//** @@ -1795,7 +1795,7 @@ void dict_index_zip_success( /*===================*/ dict_index_t* index) /*!< in/out: index to be updated. */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** This function should be called whenever a page compression attempt fails. Updates the compression padding information. */ @@ -1804,7 +1804,7 @@ void dict_index_zip_failure( /*===================*/ dict_index_t* index) /*!< in/out: index to be updated. */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Return the optimal page size, for which page will likely compress. @return page size beyond which page may not compress*/ @@ -1814,7 +1814,7 @@ dict_index_zip_pad_optimal_page_size( /*=================================*/ dict_index_t* index) /*!< in: index for which page size is requested */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Convert table flag to row format string. @return row format name */ diff --git a/storage/xtradb/include/dict0dict.ic b/storage/xtradb/include/dict0dict.ic index 753f10aba86..58a9ef4d65d 100644 --- a/storage/xtradb/include/dict0dict.ic +++ b/storage/xtradb/include/dict0dict.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -80,7 +80,8 @@ dict_col_copy_type( const dict_col_t* col, /*!< in: column */ dtype_t* type) /*!< out: data type */ { - ut_ad(col && type); + ut_ad(col != NULL); + ut_ad(type != NULL); type->mtype = col->mtype; type->prtype = col->prtype; @@ -357,7 +358,7 @@ UNIV_INLINE ulint dict_table_get_n_sys_cols( /*======================*/ - const dict_table_t* table __attribute__((unused))) /*!< in: table */ + const dict_table_t* table MY_ATTRIBUTE((unused))) /*!< in: table */ { ut_ad(table); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); diff --git a/storage/xtradb/include/dict0load.h b/storage/xtradb/include/dict0load.h index 030190b1a8e..dcbc3de8e94 100644 --- a/storage/xtradb/include/dict0load.h +++ b/storage/xtradb/include/dict0load.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -243,7 +243,7 @@ dict_load_foreigns( bool check_charsets, /*!< in: whether to check charset compatibility */ dict_err_ignore_t ignore_err) /*!< in: error to be ignored */ - __attribute__((nonnull(1), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /********************************************************************//** Prints to the standard output information on all tables found in the data dictionary system table. */ diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h index 754c3810a84..a64cb71370e 100644 --- a/storage/xtradb/include/dict0mem.h +++ b/storage/xtradb/include/dict0mem.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -155,19 +155,19 @@ allows InnoDB to update_create_info() accordingly. */ /** Bit mask of the COMPACT field */ #define DICT_TF_MASK_COMPACT \ - ((~(~0 << DICT_TF_WIDTH_COMPACT)) \ + ((~(~0U << DICT_TF_WIDTH_COMPACT)) \ << DICT_TF_POS_COMPACT) /** Bit mask of the ZIP_SSIZE field */ #define DICT_TF_MASK_ZIP_SSIZE \ - ((~(~0 << DICT_TF_WIDTH_ZIP_SSIZE)) \ + ((~(~0U << DICT_TF_WIDTH_ZIP_SSIZE)) \ << DICT_TF_POS_ZIP_SSIZE) /** Bit mask of the ATOMIC_BLOBS field */ #define DICT_TF_MASK_ATOMIC_BLOBS \ - ((~(~0 << DICT_TF_WIDTH_ATOMIC_BLOBS)) \ + ((~(~0U << DICT_TF_WIDTH_ATOMIC_BLOBS)) \ << DICT_TF_POS_ATOMIC_BLOBS) /** Bit mask of the DATA_DIR field */ #define DICT_TF_MASK_DATA_DIR \ - ((~(~0 << DICT_TF_WIDTH_DATA_DIR)) \ + ((~(~0U << DICT_TF_WIDTH_DATA_DIR)) \ << DICT_TF_POS_DATA_DIR) /** Return the value of the COMPACT field */ @@ -182,7 +182,7 @@ allows InnoDB to update_create_info() accordingly. */ #define DICT_TF_HAS_ATOMIC_BLOBS(flags) \ ((flags & DICT_TF_MASK_ATOMIC_BLOBS) \ >> DICT_TF_POS_ATOMIC_BLOBS) -/** Return the value of the ATOMIC_BLOBS field */ +/** Return the value of the DATA_DIR field */ #define DICT_TF_HAS_DATA_DIR(flags) \ ((flags & DICT_TF_MASK_DATA_DIR) \ >> DICT_TF_POS_DATA_DIR) @@ -203,7 +203,7 @@ for unknown bits in order to protect backward incompatibility. */ /* @{ */ /** Total number of bits in table->flags2. */ #define DICT_TF2_BITS 7 -#define DICT_TF2_BIT_MASK ~(~0 << DICT_TF2_BITS) +#define DICT_TF2_BIT_MASK ~(~0U << DICT_TF2_BITS) /** TEMPORARY; TRUE for tables from CREATE TEMPORARY TABLE. */ #define DICT_TF2_TEMPORARY 1 @@ -283,7 +283,7 @@ dict_mem_table_add_col( ulint mtype, /*!< in: main datatype */ ulint prtype, /*!< in: precise type */ ulint len) /*!< in: precision */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /**********************************************************************//** Renames a column of a table in the data dictionary cache. */ UNIV_INTERN @@ -294,7 +294,7 @@ dict_mem_table_col_rename( unsigned nth_col,/*!< in: column index */ const char* from, /*!< in: old column name */ const char* to) /*!< in: new column name */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** This function populates a dict_col_t memory structure with supplied information. */ diff --git a/storage/xtradb/include/dict0stats.h b/storage/xtradb/include/dict0stats.h index 186f90e3694..35ee1a00d8a 100644 --- a/storage/xtradb/include/dict0stats.h +++ b/storage/xtradb/include/dict0stats.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2009, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -77,7 +77,7 @@ dict_stats_set_persistent( dict_table_t* table, /*!< in/out: table */ ibool ps_on, /*!< in: persistent stats explicitly enabled */ ibool ps_off) /*!< in: persistent stats explicitly disabled */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Check whether persistent statistics is enabled for a given table. @@ -87,7 +87,7 @@ ibool dict_stats_is_persistent_enabled( /*=============================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Set the auto recalc flag for a given table (only honored for a persistent @@ -127,7 +127,7 @@ void dict_stats_deinit( /*==============*/ dict_table_t* table) /*!< in/out: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Calculates new estimates for table and index statistics. The statistics @@ -179,7 +179,7 @@ void dict_stats_update_for_index( /*========================*/ dict_index_t* index) /*!< in/out: index */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Renames a table in InnoDB persistent stats storage. diff --git a/storage/xtradb/include/dict0stats_bg.h b/storage/xtradb/include/dict0stats_bg.h index e866ab419fe..82cd2b468b9 100644 --- a/storage/xtradb/include/dict0stats_bg.h +++ b/storage/xtradb/include/dict0stats_bg.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -74,7 +74,7 @@ bool dict_stats_stop_bg( /*===============*/ dict_table_t* table) /*!< in/out: table */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*****************************************************************//** Wait until background stats thread has stopped using the specified table. diff --git a/storage/xtradb/include/dyn0dyn.h b/storage/xtradb/include/dyn0dyn.h index 7f23302d1ff..1bd10b6bf58 100644 --- a/storage/xtradb/include/dyn0dyn.h +++ b/storage/xtradb/include/dyn0dyn.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -48,7 +48,7 @@ dyn_array_create( /*=============*/ dyn_array_t* arr) /*!< in/out memory buffer of size sizeof(dyn_array_t) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /************************************************************//** Frees a dynamic array. */ UNIV_INLINE @@ -56,7 +56,7 @@ void dyn_array_free( /*===========*/ dyn_array_t* arr) /*!< in,own: dyn array */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Makes room on top of a dyn array and returns a pointer to a buffer in it. After copying the elements, the caller must close the buffer using @@ -69,7 +69,7 @@ dyn_array_open( dyn_array_t* arr, /*!< in: dynamic array */ ulint size) /*!< in: size in bytes of the buffer; MUST be smaller than DYN_ARRAY_DATA_SIZE! */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Closes the buffer returned by dyn_array_open. */ UNIV_INLINE @@ -78,7 +78,7 @@ dyn_array_close( /*============*/ dyn_array_t* arr, /*!< in: dynamic array */ const byte* ptr) /*!< in: end of used space */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Makes room on top of a dyn array and returns a pointer to the added element. The caller must copy the element to @@ -90,7 +90,7 @@ dyn_array_push( /*===========*/ dyn_array_t* arr, /*!< in/out: dynamic array */ ulint size) /*!< in: size in bytes of the element */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /************************************************************//** Returns pointer to an element in dyn array. @return pointer to element */ @@ -101,7 +101,7 @@ dyn_array_get_element( const dyn_array_t* arr, /*!< in: dyn array */ ulint pos) /*!< in: position of element in bytes from array start */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /************************************************************//** Returns the size of stored data in a dyn array. @return data size in bytes */ @@ -110,7 +110,7 @@ ulint dyn_array_get_data_size( /*====================*/ const dyn_array_t* arr) /*!< in: dyn array */ - __attribute__((nonnull, warn_unused_result, pure)); + MY_ATTRIBUTE((nonnull, warn_unused_result, pure)); /************************************************************//** Gets the first block in a dyn array. @param arr dyn array @@ -144,7 +144,7 @@ ulint dyn_block_get_used( /*===============*/ const dyn_block_t* block) /*!< in: dyn array block */ - __attribute__((nonnull, warn_unused_result, pure)); + MY_ATTRIBUTE((nonnull, warn_unused_result, pure)); /********************************************************************//** Gets pointer to the start of data in a dyn array block. @return pointer to data */ @@ -153,7 +153,7 @@ byte* dyn_block_get_data( /*===============*/ const dyn_block_t* block) /*!< in: dyn array block */ - __attribute__((nonnull, warn_unused_result, pure)); + MY_ATTRIBUTE((nonnull, warn_unused_result, pure)); /********************************************************//** Pushes n bytes to a dyn array. */ UNIV_INLINE @@ -163,7 +163,7 @@ dyn_push_string( dyn_array_t* arr, /*!< in/out: dyn array */ const byte* str, /*!< in: string to write */ ulint len) /*!< in: string length */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*#################################################################*/ diff --git a/storage/xtradb/include/dyn0dyn.ic b/storage/xtradb/include/dyn0dyn.ic index 0296554e2ee..f18f2e6dff9 100644 --- a/storage/xtradb/include/dyn0dyn.ic +++ b/storage/xtradb/include/dyn0dyn.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,7 +36,7 @@ dyn_block_t* dyn_array_add_block( /*================*/ dyn_array_t* arr) /*!< in/out: dyn array */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the number of used bytes in a dyn array block. diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index 29d3ed98779..14dfa296435 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -231,7 +231,7 @@ fil_node_create( ulint id, /*!< in: space id where to append */ ibool is_raw) /*!< in: TRUE if a raw device or a raw disk partition */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifdef UNIV_LOG_ARCHIVE /****************************************************************//** Drops files from the start of a file space, so that its size is cut by @@ -400,7 +400,7 @@ fil_read_first_page( lsn values in data files */ lsn_t* max_flushed_lsn) /*!< out: max of flushed lsn values in data files */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*******************************************************************//** Increments the count of pending operation, if space is not being deleted. @return TRUE if being deleted, and operation should be skipped */ @@ -488,7 +488,7 @@ dberr_t fil_discard_tablespace( /*===================*/ ulint id) /*!< in: space id */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /** Test if a tablespace file can be renamed to a new filepath by checking @@ -597,7 +597,7 @@ fil_create_new_single_table_tablespace( ulint size) /*!< in: the initial size of the tablespace file in pages, must be >= FIL_IBD_FILE_INITIAL_SIZE */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_HOTBACKUP /********************************************************************//** Tries to open a single-table tablespace and optionally checks the space id is @@ -631,7 +631,7 @@ fil_open_single_table_tablespace( const char* tablename, /*!< in: table name in the databasename/tablename format */ const char* filepath) /*!< in: tablespace filepath */ - __attribute__((nonnull(5), warn_unused_result)); + MY_ATTRIBUTE((nonnull(5), warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /********************************************************************//** @@ -784,7 +784,7 @@ _fil_io( void* message, /*!< in: message for aio handler if non-sync aio used, else ignored */ trx_t* trx) - __attribute__((nonnull(8))); + MY_ATTRIBUTE((nonnull(8))); /**********************************************************************//** Waits for an aio operation to complete. This function is used to write the handler for completed requests. The aio array of pending requests is divided @@ -979,7 +979,7 @@ fil_tablespace_iterate( dict_table_t* table, ulint n_io_buffers, PageCallback& callback) - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Checks if a single-table tablespace for a given table name exists in the @@ -1003,7 +1003,7 @@ fil_get_space_names( /*================*/ space_name_list_t& space_name_list) /*!< in/out: Vector for collecting the names. */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /** Generate redo log for swapping two .ibd files @param[in] old_table old table @@ -1018,7 +1018,7 @@ fil_mtr_rename_log( const dict_table_t* new_table, const char* tmp_name, mtr_t* mtr) - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************************//** Finds the given page_no of the given space id from the double write buffer, diff --git a/storage/xtradb/include/fsp0fsp.h b/storage/xtradb/include/fsp0fsp.h index a587ccc9f20..099cb8edc14 100644 --- a/storage/xtradb/include/fsp0fsp.h +++ b/storage/xtradb/include/fsp0fsp.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -83,23 +83,23 @@ is found in a remote location, not the default data directory. */ /** Bit mask of the POST_ANTELOPE field */ #define FSP_FLAGS_MASK_POST_ANTELOPE \ - ((~(~0 << FSP_FLAGS_WIDTH_POST_ANTELOPE)) \ + ((~(~0U << FSP_FLAGS_WIDTH_POST_ANTELOPE)) \ << FSP_FLAGS_POS_POST_ANTELOPE) /** Bit mask of the ZIP_SSIZE field */ #define FSP_FLAGS_MASK_ZIP_SSIZE \ - ((~(~0 << FSP_FLAGS_WIDTH_ZIP_SSIZE)) \ + ((~(~0U << FSP_FLAGS_WIDTH_ZIP_SSIZE)) \ << FSP_FLAGS_POS_ZIP_SSIZE) /** Bit mask of the ATOMIC_BLOBS field */ #define FSP_FLAGS_MASK_ATOMIC_BLOBS \ - ((~(~0 << FSP_FLAGS_WIDTH_ATOMIC_BLOBS)) \ + ((~(~0U << FSP_FLAGS_WIDTH_ATOMIC_BLOBS)) \ << FSP_FLAGS_POS_ATOMIC_BLOBS) /** Bit mask of the PAGE_SSIZE field */ #define FSP_FLAGS_MASK_PAGE_SSIZE \ - ((~(~0 << FSP_FLAGS_WIDTH_PAGE_SSIZE)) \ + ((~(~0U << FSP_FLAGS_WIDTH_PAGE_SSIZE)) \ << FSP_FLAGS_POS_PAGE_SSIZE) /** Bit mask of the DATA_DIR field */ #define FSP_FLAGS_MASK_DATA_DIR \ - ((~(~0 << FSP_FLAGS_WIDTH_DATA_DIR)) \ + ((~(~0U << FSP_FLAGS_WIDTH_DATA_DIR)) \ << FSP_FLAGS_POS_DATA_DIR) /** Return the value of the POST_ANTELOPE field */ @@ -510,7 +510,7 @@ fseg_alloc_free_page_general( in which the page should be initialized. If init_mtr!=mtr, but the page is already latched in mtr, do not initialize the page. */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /**********************************************************************//** Reserves free pages from a tablespace. All mini-transactions which may use several pages from the tablespace should call this function beforehand @@ -579,7 +579,7 @@ fseg_page_is_free( fseg_header_t* seg_header, /*!< in: segment header */ ulint space, /*!< in: space id */ ulint page) /*!< in: page offset */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Frees part of a segment. This function can be used to free a segment by repeatedly calling this function in different mini-transactions. @@ -675,7 +675,7 @@ bool fsp_flags_is_valid( /*===============*/ ulint flags) /*!< in: tablespace flags */ - __attribute__((warn_unused_result, const)); + MY_ATTRIBUTE((warn_unused_result, const)); /********************************************************************//** Determine if the tablespace is compressed from dict_table_t::flags. @return TRUE if compressed, FALSE if not compressed */ diff --git a/storage/xtradb/include/fts0ast.h b/storage/xtradb/include/fts0ast.h index b2380f78b39..50f62063893 100644 --- a/storage/xtradb/include/fts0ast.h +++ b/storage/xtradb/include/fts0ast.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -200,7 +200,7 @@ fts_ast_visit( and ignored processing an operator, currently we only ignore FTS_IGNORE operator */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** Process (nested) sub-expression, create a new result set to store the sub-expression result by processing nodes under current sub-expression @@ -213,7 +213,7 @@ fts_ast_visit_sub_exp( fts_ast_node_t* node, /*!< in: instance to traverse*/ fts_ast_callback visitor, /*!< in: callback */ void* arg) /*!< in: callback arg */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************** Create a lex instance.*/ UNIV_INTERN @@ -223,7 +223,7 @@ fts_lexer_create( ibool boolean_mode, /*!< in: query type */ const byte* query, /*!< in: query string */ ulint query_len) /*!< in: query string len */ - __attribute__((nonnull, malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); /******************************************************************** Free an fts_lexer_t instance.*/ UNIV_INTERN @@ -232,7 +232,7 @@ fts_lexer_free( /*===========*/ fts_lexer_t* fts_lexer) /*!< in: lexer instance to free */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Create an ast string object, with NUL-terminator, so the string diff --git a/storage/xtradb/include/fts0fts.h b/storage/xtradb/include/fts0fts.h index 9f7b0216d9b..68d4d333245 100644 --- a/storage/xtradb/include/fts0fts.h +++ b/storage/xtradb/include/fts0fts.h @@ -94,7 +94,10 @@ those defined in mysql file ft_global.h */ /** Threshold where our optimize thread automatically kicks in */ #define FTS_OPTIMIZE_THRESHOLD 10000000 -#define FTS_DOC_ID_MAX_STEP 10000 +/** Threshold to avoid exhausting of doc ids. Consecutive doc id difference +should not exceed FTS_DOC_ID_MAX_STEP */ +#define FTS_DOC_ID_MAX_STEP 65535 + /** Variable specifying the FTS parallel sort degree */ extern ulong fts_sort_pll_degree; @@ -408,7 +411,7 @@ fts_get_next_doc_id( /*================*/ const dict_table_t* table, /*!< in: table */ doc_id_t* doc_id) /*!< out: new document id */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Update the next and last Doc ID in the CONFIG table to be the input "doc_id" value (+ 1). We would do so after each FTS index build or @@ -421,7 +424,7 @@ fts_update_next_doc_id( const dict_table_t* table, /*!< in: table */ const char* table_name, /*!< in: table name, or NULL */ doc_id_t doc_id) /*!< in: DOC ID to set */ - __attribute__((nonnull(2))); + MY_ATTRIBUTE((nonnull(2))); /******************************************************************//** Create a new document id . @@ -437,7 +440,7 @@ fts_create_doc_id( current row that is being inserted. */ mem_heap_t* heap) /*!< in: heap */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Create a new fts_doc_ids_t. @return new fts_doc_ids_t. */ @@ -466,7 +469,7 @@ fts_trx_add_op( fts_row_state state, /*!< in: state of the row */ ib_vector_t* fts_indexes) /*!< in: FTS indexes affected (NULL=all) */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /******************************************************************//** Free an FTS trx. */ @@ -491,7 +494,7 @@ fts_create_common_tables( index */ const char* name, /*!< in: table name */ bool skip_doc_id_index) /*!< in: Skip index on doc id */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Wrapper function of fts_create_index_tables_low(), create auxiliary tables for an FTS index @@ -503,7 +506,7 @@ fts_create_index_tables( trx_t* trx, /*!< in: transaction handle */ const dict_index_t* index) /*!< in: the FTS index instance */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Creates the column specific ancillary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have @@ -519,7 +522,7 @@ fts_create_index_tables_low( instance */ const char* table_name, /*!< in: the table name */ table_id_t table_id) /*!< in: the table id */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Add the FTS document id hidden column. */ UNIV_INTERN @@ -528,7 +531,7 @@ fts_add_doc_id_column( /*==================*/ dict_table_t* table, /*!< in/out: Table with FTS index */ mem_heap_t* heap) /*!< in: temporary memory heap, or NULL */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /*********************************************************************//** Drops the ancillary tables needed for supporting an FTS index on the @@ -542,7 +545,7 @@ fts_drop_tables( trx_t* trx, /*!< in: transaction */ dict_table_t* table) /*!< in: table has the FTS index */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** The given transaction is about to be committed; do whatever is necessary from the FTS system's POV. @@ -552,7 +555,7 @@ dberr_t fts_commit( /*=======*/ trx_t* trx) /*!< in: transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** FTS Query entry point. @@ -569,7 +572,7 @@ fts_query( in bytes */ fts_result_t** result) /*!< out: query result, to be freed by the caller.*/ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Retrieve the FTS Relevance Ranking result for doc with doc_id @@ -687,7 +690,7 @@ dberr_t fts_optimize_table( /*===============*/ dict_table_t* table) /*!< in: table to optimiza */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Startup the optimize thread and create the work queue. */ @@ -713,7 +716,7 @@ fts_drop_index_tables( /*==================*/ trx_t* trx, /*!< in: transaction */ dict_index_t* index) /*!< in: Index to drop */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Remove the table from the OPTIMIZER's list. We do wait for @@ -754,7 +757,7 @@ fts_savepoint_take( trx_t* trx, /*!< in: transaction */ fts_trx_t* fts_trx, /*!< in: fts transaction */ const char* name) /*!< in: savepoint name */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Refresh last statement savepoint. */ UNIV_INTERN @@ -762,7 +765,7 @@ void fts_savepoint_laststmt_refresh( /*===========================*/ trx_t* trx) /*!< in: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Release the savepoint data identified by name. */ UNIV_INTERN @@ -780,13 +783,12 @@ fts_cache_destroy( /*==============*/ fts_cache_t* cache); /*!< in: cache*/ -/*********************************************************************//** -Clear cache. */ +/** Clear cache. +@param[in,out] cache fts cache */ UNIV_INTERN void fts_cache_clear( -/*============*/ - fts_cache_t* cache); /*!< in: cache */ + fts_cache_t* cache); /*********************************************************************//** Initialize things in cache. */ @@ -831,7 +833,7 @@ fts_drop_index_split_tables( /*========================*/ trx_t* trx, /*!< in: transaction */ dict_index_t* index) /*!< in: fts instance */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Run SYNC on the table, i.e., write out data from the cache to the FTS auxiliary INDEX table and clear the cache at the end. @@ -1023,7 +1025,7 @@ fts_drop_index( dict_table_t* table, /*!< in: Table where indexes are dropped */ dict_index_t* index, /*!< in: Index to be dropped */ trx_t* trx) /*!< in: Transaction for the drop */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /****************************************************************//** Rename auxiliary tables for all fts index for a table diff --git a/storage/xtradb/include/fts0priv.h b/storage/xtradb/include/fts0priv.h index b4d9e1d41ec..2d4e9d88fd1 100644 --- a/storage/xtradb/include/fts0priv.h +++ b/storage/xtradb/include/fts0priv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -121,7 +121,7 @@ fts_parse_sql( fts_table_t* fts_table, /*!< in: FTS aux table */ pars_info_t* info, /*!< in: info struct, or NULL */ const char* sql) /*!< in: SQL string to evaluate */ - __attribute__((nonnull(3), malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull(3), malloc, warn_unused_result)); /******************************************************************//** Evaluate a parsed SQL statement @return DB_SUCCESS or error code */ @@ -131,7 +131,7 @@ fts_eval_sql( /*=========*/ trx_t* trx, /*!< in: transaction */ que_t* graph) /*!< in: Parsed statement */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Construct the name of an ancillary FTS table for the given table. @return own: table name, must be freed with mem_free() */ @@ -141,7 +141,7 @@ fts_get_table_name( /*===============*/ const fts_table_t* fts_table) /*!< in: FTS aux table info */ - __attribute__((nonnull, malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); /******************************************************************//** Construct the column specification part of the SQL string for selecting the indexed FTS columns for the given table. Adds the necessary bound @@ -164,7 +164,7 @@ fts_get_select_columns_str( dict_index_t* index, /*!< in: FTS index */ pars_info_t* info, /*!< in/out: parser info */ mem_heap_t* heap) /*!< in: memory heap */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /** define for fts_doc_fetch_by_doc_id() "option" value, defines whether we want to get Doc whose ID is equal to or greater or smaller than supplied @@ -191,7 +191,7 @@ fts_doc_fetch_by_doc_id( callback, /*!< in: callback to read records */ void* arg) /*!< in: callback arg */ - __attribute__((nonnull(6))); + MY_ATTRIBUTE((nonnull(6))); /*******************************************************************//** Callback function for fetch that stores the text of an FTS document, @@ -203,7 +203,7 @@ fts_query_expansion_fetch_doc( /*==========================*/ void* row, /*!< in: sel_node_t* */ void* user_arg) /*!< in: fts_doc_t* */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************** Write out a single word's data as new entry/entries in the INDEX table. @return DB_SUCCESS if all OK. */ @@ -216,7 +216,7 @@ fts_write_node( fts_table_t* fts_table, /*!< in: the FTS aux index */ fts_string_t* word, /*!< in: word in UTF-8 */ fts_node_t* node) /*!< in: node columns */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Tokenize a document. */ UNIV_INTERN @@ -227,7 +227,7 @@ fts_tokenize_document( tokenize */ fts_doc_t* result) /*!< out: if provided, save result tokens here */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /*******************************************************************//** Continue to tokenize a document. */ @@ -241,7 +241,7 @@ fts_tokenize_document_next( tokens from this tokenization */ fts_doc_t* result) /*!< out: if provided, save result tokens here */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /******************************************************************//** Initialize a document. */ UNIV_INTERN @@ -249,7 +249,7 @@ void fts_doc_init( /*=========*/ fts_doc_t* doc) /*!< in: doc to initialize */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Do a binary search for a doc id in the array @@ -263,7 +263,7 @@ fts_bsearch( int lower, /*!< in: lower bound of array*/ int upper, /*!< in: upper bound of array*/ doc_id_t doc_id) /*!< in: doc id to lookup */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Free document. */ UNIV_INTERN @@ -271,7 +271,7 @@ void fts_doc_free( /*=========*/ fts_doc_t* doc) /*!< in: document */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Free fts_optimizer_word_t instanace.*/ UNIV_INTERN @@ -279,7 +279,7 @@ void fts_word_free( /*==========*/ fts_word_t* word) /*!< in: instance to free.*/ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Read the rows from the FTS inde @return DB_SUCCESS or error code */ @@ -293,7 +293,7 @@ fts_index_fetch_nodes( const fts_string_t* word, /*!< in: the word to fetch */ fts_fetch_t* fetch) /*!< in: fetch callback.*/ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Create a fts_optimizer_word_t instance. @return new instance */ @@ -304,7 +304,7 @@ fts_word_init( fts_word_t* word, /*!< in: word to initialize */ byte* utf8, /*!< in: UTF-8 string */ ulint len) /*!< in: length of string in bytes */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Compare two fts_trx_table_t instances, we actually compare the table id's here. @@ -315,7 +315,7 @@ fts_trx_table_cmp( /*==============*/ const void* v1, /*!< in: id1 */ const void* v2) /*!< in: id2 */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Compare a table id with a trx_table_t table id. @return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */ @@ -325,7 +325,7 @@ fts_trx_table_id_cmp( /*=================*/ const void* p1, /*!< in: id1 */ const void* p2) /*!< in: id2 */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Commit a transaction. @return DB_SUCCESS if all OK */ @@ -334,7 +334,7 @@ dberr_t fts_sql_commit( /*===========*/ trx_t* trx) /*!< in: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Rollback a transaction. @return DB_SUCCESS if all OK */ @@ -343,7 +343,7 @@ dberr_t fts_sql_rollback( /*=============*/ trx_t* trx) /*!< in: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Parse an SQL string. %s is replaced with the table's id. Don't acquire the dict mutex @@ -355,7 +355,7 @@ fts_parse_sql_no_dict_lock( fts_table_t* fts_table, /*!< in: table with FTS index */ pars_info_t* info, /*!< in: parser info */ const char* sql) /*!< in: SQL string to evaluate */ - __attribute__((nonnull(3), malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull(3), malloc, warn_unused_result)); /******************************************************************//** Get value from config table. The caller must ensure that enough space is allocated for value to hold the column contents @@ -370,7 +370,7 @@ fts_config_get_value( this parameter name */ fts_string_t* value) /*!< out: value read from config table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Get value specific to an FTS index from the config table. The caller must ensure that enough space is allocated for value to hold the @@ -386,7 +386,7 @@ fts_config_get_index_value( this parameter name */ fts_string_t* value) /*!< out: value read from config table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Set the value in the config table for name. @return DB_SUCCESS or error code */ @@ -400,7 +400,7 @@ fts_config_set_value( this parameter name */ const fts_string_t* value) /*!< in: value to update */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /****************************************************************//** Set an ulint value in the config table. @return DB_SUCCESS if all OK else error code */ @@ -412,7 +412,7 @@ fts_config_set_ulint( fts_table_t* fts_table, /*!< in: the indexed FTS table */ const char* name, /*!< in: param name */ ulint int_value) /*!< in: value */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Set the value specific to an FTS index in the config table. @return DB_SUCCESS or error code */ @@ -426,7 +426,7 @@ fts_config_set_index_value( this parameter name */ fts_string_t* value) /*!< out: value read from config table */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Increment the value in the config table for column name. @return DB_SUCCESS or error code */ @@ -439,7 +439,7 @@ fts_config_increment_value( const char* name, /*!< in: increment config value for this parameter name */ ulint delta) /*!< in: increment by this much */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Increment the per index value in the config table for column name. @return DB_SUCCESS or error code */ @@ -452,7 +452,7 @@ fts_config_increment_index_value( const char* name, /*!< in: increment config value for this parameter name */ ulint delta) /*!< in: increment by this much */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Get an ulint value from the config table. @return DB_SUCCESS or error code */ @@ -464,7 +464,7 @@ fts_config_get_index_ulint( dict_index_t* index, /*!< in: FTS index */ const char* name, /*!< in: param name */ ulint* int_value) /*!< out: value */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Set an ulint value int the config table. @return DB_SUCCESS or error code */ @@ -476,7 +476,7 @@ fts_config_set_index_ulint( dict_index_t* index, /*!< in: FTS index */ const char* name, /*!< in: param name */ ulint int_value) /*!< in: value */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Get an ulint value from the config table. @return DB_SUCCESS or error code */ @@ -488,7 +488,7 @@ fts_config_get_ulint( fts_table_t* fts_table, /*!< in: the indexed FTS table */ const char* name, /*!< in: param name */ ulint* int_value) /*!< out: value */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Search cache for word. @return the word node vector if found else NULL */ @@ -500,7 +500,7 @@ fts_cache_find_word( index_cache, /*!< in: cache to search */ const fts_string_t* text) /*!< in: word to search for */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Check cache for deleted doc id. @return TRUE if deleted */ @@ -511,7 +511,7 @@ fts_cache_is_deleted_doc_id( const fts_cache_t* cache, /*!< in: cache ito search */ doc_id_t doc_id) /*!< in: doc id to search for */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Append deleted doc ids to vector and sort the vector. */ UNIV_INTERN @@ -546,7 +546,7 @@ fts_get_total_word_count( trx_t* trx, /*!< in: transaction */ dict_index_t* index, /*!< in: for this index */ ulint* total) /*!< out: total words */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /******************************************************************//** Search the index specific cache for a particular FTS index. @@ -559,7 +559,7 @@ fts_find_index_cache( cache, /*!< in: cache to search */ const dict_index_t* index) /*!< in: index to search for */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Write the table id to the given buffer (including final NUL). Buffer must be at least FTS_AUX_MIN_TABLE_ID_LENGTH bytes long. @@ -570,10 +570,10 @@ fts_write_object_id( /*================*/ ib_id_t id, /*!< in: a table/index id */ char* str, /*!< in: buffer to write the id to */ - bool hex_format __attribute__((unused))) + bool hex_format MY_ATTRIBUTE((unused))) /*!< in: true for fixed hex format, false for old ambiguous format */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Read the table id from the string generated by fts_write_object_id(). @return TRUE if parse successful */ @@ -583,7 +583,7 @@ fts_read_object_id( /*===============*/ ib_id_t* id, /*!< out: a table id */ const char* str) /*!< in: buffer to read from */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Get the table id. @return number of bytes written */ @@ -596,7 +596,7 @@ fts_get_table_id( char* table_id) /*!< out: table id, must be at least FTS_AUX_MIN_TABLE_ID_LENGTH bytes long */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Add the table to add to the OPTIMIZER's list. */ UNIV_INTERN @@ -604,7 +604,7 @@ void fts_optimize_add_table( /*===================*/ dict_table_t* table) /*!< in: table to add */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Optimize a table. */ UNIV_INTERN @@ -612,7 +612,7 @@ void fts_optimize_do_table( /*==================*/ dict_table_t* table) /*!< in: table to optimize */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Construct the prefix name of an FTS table. @return own: table name, must be freed with mem_free() */ @@ -622,7 +622,7 @@ fts_get_table_name_prefix( /*======================*/ const fts_table_t* fts_table) /*!< in: Auxiliary table type */ - __attribute__((nonnull, malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); /******************************************************************//** Add node positions. */ UNIV_INTERN @@ -633,7 +633,7 @@ fts_cache_node_add_positions( fts_node_t* node, /*!< in: word node */ doc_id_t doc_id, /*!< in: doc id */ ib_vector_t* positions) /*!< in: fts_token_t::positions */ - __attribute__((nonnull(2,4))); + MY_ATTRIBUTE((nonnull(2,4))); /******************************************************************//** Create the config table name for retrieving index specific value. @@ -644,7 +644,7 @@ fts_config_create_index_param_name( /*===============================*/ const char* param, /*!< in: base name of param */ const dict_index_t* index) /*!< in: index for config */ - __attribute__((nonnull, malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); #ifndef UNIV_NONINL #include "fts0priv.ic" diff --git a/storage/xtradb/include/fts0priv.ic b/storage/xtradb/include/fts0priv.ic index 2d07c60f980..88f2d67c7b8 100644 --- a/storage/xtradb/include/fts0priv.ic +++ b/storage/xtradb/include/fts0priv.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -33,7 +33,7 @@ fts_write_object_id( /*================*/ ib_id_t id, /* in: a table/index id */ char* str, /* in: buffer to write the id to */ - bool hex_format __attribute__((unused))) + bool hex_format MY_ATTRIBUTE((unused))) /* in: true for fixed hex format, false for old ambiguous format */ { @@ -53,7 +53,7 @@ fts_write_object_id( /* Use this to construct old(5.6.14 and 5.7.3) windows ambiguous aux table names */ DBUG_EXECUTE_IF("innodb_test_wrong_windows_fts_aux_table_name", - return(sprintf(str, "%016"PRIu64, id));); + return(sprintf(str, "%016" PRIu64, id));); DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name", return(sprintf(str, UINT64PFx, id));); @@ -66,7 +66,7 @@ fts_write_object_id( // FIXME: Use ut_snprintf(), so does following one. return(sprintf(str, "%016llu", id)); #else /* _WIN32 */ - return(sprintf(str, "%016"PRIu64, id)); + return(sprintf(str, "%016" PRIu64, id)); #endif /* _WIN32 */ } diff --git a/storage/xtradb/include/ha_prototypes.h b/storage/xtradb/include/ha_prototypes.h index 5a7e24c6b4a..70f2f47f120 100644 --- a/storage/xtradb/include/ha_prototypes.h +++ b/storage/xtradb/include/ha_prototypes.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2006, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -142,7 +142,7 @@ enum durability_properties thd_requested_durability( /*=====================*/ const THD* thd) /*!< in: thread handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Returns true if the transaction this thread is processing has edited @@ -183,7 +183,7 @@ innobase_mysql_cmp( const unsigned char* b, /*!< in: data field */ unsigned int b_length) /*!< in: data field length, not UNIV_SQL_NULL */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Converts a MySQL type to an InnoDB type. Note that this function returns the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1 @@ -199,7 +199,7 @@ get_innobase_type_from_mysql_type( and unsigned integer types are 'unsigned types' */ const void* field) /*!< in: MySQL Field */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Get the variable length bounds of the given character set. */ @@ -297,7 +297,7 @@ innobase_get_stmt( /*==============*/ THD* thd, /*!< in: MySQL thread handle */ size_t* length) /*!< out: length of the SQL statement */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** This function is used to find the storage length in bytes of the first n characters for prefix indexes using a multibyte character set. The function @@ -323,7 +323,7 @@ enum icp_result innobase_index_cond( /*================*/ void* file) /*!< in/out: pointer to ha_innobase */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Returns true if the thread supports XA, global value of innodb_supports_xa if thd is NULL. @@ -476,7 +476,7 @@ innobase_format_name( const char* name, /*!< in: index or table name to format */ ibool is_index_name) /*!< in: index name */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Corresponds to Sql_condition:enum_warning_level. */ enum ib_log_level_t { @@ -506,7 +506,7 @@ ib_errf( ib_uint32_t code, /*!< MySQL error code */ const char* format, /*!< printf format */ ...) /*!< Args */ - __attribute__((format(printf, 4, 5))); + MY_ATTRIBUTE((format(printf, 4, 5))); /******************************************************************//** Use this when the args are passed to the format string from @@ -537,7 +537,7 @@ ib_logf( ib_log_level_t level, /*!< in: warning level */ const char* format, /*!< printf format */ ...) /*!< Args */ - __attribute__((format(printf, 2, 3))); + MY_ATTRIBUTE((format(printf, 2, 3))); /******************************************************************//** Returns the NUL terminated value of glob_hostname. @@ -593,7 +593,7 @@ innobase_next_autoinc( ulonglong step, /*!< in: AUTOINC increment step */ ulonglong offset, /*!< in: AUTOINC offset */ ulonglong max_value) /*!< in: max value for type */ - __attribute__((pure, warn_unused_result)); + MY_ATTRIBUTE((pure, warn_unused_result)); /********************************************************************//** Get the upper limit of the MySQL integral and floating-point type. @@ -603,7 +603,7 @@ ulonglong innobase_get_int_col_max_value( /*===========================*/ const Field* field) /*!< in: MySQL field */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************** Check if the length of the identifier exceeds the maximum allowed. diff --git a/storage/xtradb/include/handler0alter.h b/storage/xtradb/include/handler0alter.h index 66b963ae39a..3dd6c99eb6d 100644 --- a/storage/xtradb/include/handler0alter.h +++ b/storage/xtradb/include/handler0alter.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -32,7 +32,7 @@ innobase_rec_to_mysql( const dict_index_t* index, /*!< in: index */ const ulint* offsets)/*!< in: rec_get_offsets( rec, index, ...) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Copies an InnoDB index entry to table->record[0]. */ @@ -43,7 +43,7 @@ innobase_fields_to_mysql( struct TABLE* table, /*!< in/out: MySQL table */ const dict_index_t* index, /*!< in: InnoDB index */ const dfield_t* fields) /*!< in: InnoDB index fields */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Copies an InnoDB row to table->record[0]. */ @@ -54,7 +54,7 @@ innobase_row_to_mysql( struct TABLE* table, /*!< in/out: MySQL table */ const dict_table_t* itab, /*!< in: InnoDB table */ const dtuple_t* row) /*!< in: InnoDB row */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Resets table->record[0]. */ @@ -63,7 +63,7 @@ void innobase_rec_reset( /*===============*/ struct TABLE* table) /*!< in/out: MySQL table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Generate the next autoinc based on a snapshot of the session auto_increment_increment and auto_increment_offset variables. */ diff --git a/storage/xtradb/include/ibuf0ibuf.h b/storage/xtradb/include/ibuf0ibuf.h index ac16b10e097..1afdabdeb63 100644 --- a/storage/xtradb/include/ibuf0ibuf.h +++ b/storage/xtradb/include/ibuf0ibuf.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -118,7 +118,7 @@ void ibuf_mtr_start( /*===========*/ mtr_t* mtr) /*!< out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***************************************************************//** Commits an insert buffer mini-transaction. */ UNIV_INLINE @@ -126,7 +126,7 @@ void ibuf_mtr_commit( /*============*/ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Initializes an ibuf bitmap page. */ UNIV_INTERN @@ -252,7 +252,7 @@ ibool ibuf_inside( /*========*/ const mtr_t* mtr) /*!< in: mini-transaction */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /***********************************************************************//** Checks if a page address is an ibuf bitmap page (level 3 page) address. @return TRUE if a bitmap page */ @@ -285,7 +285,7 @@ ibuf_page_low( is not one of the fixed address ibuf pages, or NULL, in which case a new transaction is created. */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #ifdef UNIV_DEBUG /** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages. Must not be called when recv_no_ibuf_operations==TRUE. @@ -364,23 +364,31 @@ void ibuf_delete_for_discarded_space( /*============================*/ ulint space); /*!< in: space id */ -/*********************************************************************//** -Contracts insert buffer trees by reading pages to the buffer pool. +/** Contract the change buffer by reading pages to the buffer pool. +@param[in] full If true, do a full contraction based +on PCT_IO(100). If false, the size of contract batch is determined +based on the current size of the change buffer. @return a lower limit for the combined size in bytes of entries which will be merged from ibuf trees to the pages read, 0 if ibuf is empty */ UNIV_INTERN ulint -ibuf_contract_in_background( -/*========================*/ - table_id_t table_id, /*!< in: if merge should be done only - for a specific table, for all tables - this should be 0 */ - ibool full); /*!< in: TRUE if the caller wants to - do a full contract based on PCT_IO(100). - If FALSE then the size of contract - batch is determined based on the - current size of the ibuf tree. */ +ibuf_merge_in_background( + bool full); /*!< in: TRUE if the caller wants to + do a full contract based on PCT_IO(100). + If FALSE then the size of contract + batch is determined based on the + current size of the ibuf tree. */ + +/** Contracts insert buffer trees by reading pages referring to space_id +to the buffer pool. +@returns number of pages merged.*/ +UNIV_INTERN +ulint +ibuf_merge_space( +/*=============*/ + ulint space); /*!< in: space id */ + #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Parses a redo log record of an ibuf bitmap page init. @@ -461,7 +469,7 @@ ibuf_check_bitmap_on_import( /*========================*/ const trx_t* trx, /*!< in: transaction */ ulint space_id) /*!< in: tablespace identifier */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #define IBUF_HEADER_PAGE_NO FSP_IBUF_HEADER_PAGE_NO #define IBUF_TREE_ROOT_PAGE_NO FSP_IBUF_TREE_ROOT_PAGE_NO diff --git a/storage/xtradb/include/lock0lock.h b/storage/xtradb/include/lock0lock.h index cb95c58fe3c..ee89f3512fb 100644 --- a/storage/xtradb/include/lock0lock.h +++ b/storage/xtradb/include/lock0lock.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -268,7 +268,7 @@ lock_rec_expl_exist_on_page( /*========================*/ ulint space, /*!< in: space id */ ulint page_no)/*!< in: page number */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*********************************************************************//** Checks if locks of other transactions prevent an immediate insert of a record. If they do, first tests if the query thread should anyway @@ -291,7 +291,7 @@ lock_rec_insert_check_and_lock( inserted record maybe should inherit LOCK_GAP type locks from the successor record */ - __attribute__((nonnull(2,3,4,6,7), warn_unused_result)); + MY_ATTRIBUTE((nonnull(2,3,4,6,7), warn_unused_result)); /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (update, delete mark, or delete unmark) of a clustered index record. If they do, @@ -312,7 +312,7 @@ lock_clust_rec_modify_check_and_lock( dict_index_t* index, /*!< in: clustered index */ const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ que_thr_t* thr) /*!< in: query thread */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (delete mark or delete unmark) of a secondary index record. @@ -333,7 +333,7 @@ lock_sec_rec_modify_check_and_lock( que_thr_t* thr, /*!< in: query thread (can be NULL if BTR_NO_LOCKING_FLAG) */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((warn_unused_result, nonnull(2,3,4,6))); + MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,4,6))); /*********************************************************************//** Like lock_clust_rec_read_check_and_lock(), but reads a secondary index record. @@ -420,7 +420,7 @@ lock_clust_rec_read_check_and_lock_alt( ulint gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or LOCK_REC_NOT_GAP */ que_thr_t* thr) /*!< in: query thread */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Checks that a record is seen in a consistent read. @return true if sees, or false if an earlier version of the record @@ -452,7 +452,7 @@ lock_sec_rec_cons_read_sees( should be read or passed over by a read cursor */ const read_view_t* view) /*!< in: consistent read view */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Locks the specified database table in the mode given. If the lock cannot be granted immediately, the query thread is put to wait. @@ -467,7 +467,7 @@ lock_table( in dictionary cache */ enum lock_mode mode, /*!< in: lock mode */ que_thr_t* thr) /*!< in: query thread */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Creates a table IX lock object for a resurrected transaction. */ UNIV_INTERN @@ -532,7 +532,7 @@ lock_rec_fold( /*==========*/ ulint space, /*!< in: space */ ulint page_no)/*!< in: page number */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*********************************************************************//** Calculates the hash value of a page file address: used in inserting or searching for a lock in the hash table. @@ -582,7 +582,7 @@ lock_is_table_exclusive( /*====================*/ const dict_table_t* table, /*!< in: table */ const trx_t* trx) /*!< in: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Checks if a lock request lock1 has to wait for request lock2. @return TRUE if lock1 has to wait for lock2 to be removed */ @@ -606,7 +606,7 @@ lock_report_trx_id_insanity( dict_index_t* index, /*!< in: index */ const ulint* offsets, /*!< in: rec_get_offsets(rec, index) */ trx_id_t max_trx_id) /*!< in: trx_sys_get_max_trx_id() */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Prints info of a table lock. */ UNIV_INTERN @@ -633,7 +633,7 @@ lock_print_info_summary( /*====================*/ FILE* file, /*!< in: file where to print */ ibool nowait) /*!< in: whether to wait for the lock mutex */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Prints info of locks for each transaction. This function assumes that the caller holds the lock mutex and more importantly it will release the lock @@ -653,7 +653,7 @@ ulint lock_number_of_rows_locked( /*=======================*/ const trx_lock_t* trx_lock) /*!< in: transaction locks */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Gets the type of a lock. Non-inline version for using outside of the @@ -811,7 +811,7 @@ dberr_t lock_trx_handle_wait( /*=================*/ trx_t* trx) /*!< in/out: trx lock state */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Get the number of locks on a table. @return number of locks */ @@ -820,7 +820,7 @@ ulint lock_table_get_n_locks( /*===================*/ const dict_table_t* table) /*!< in: table */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG /*********************************************************************//** Checks that a transaction id is sensible, i.e., not in the future. @@ -833,7 +833,7 @@ lock_check_trx_id_sanity( const rec_t* rec, /*!< in: user record */ dict_index_t* index, /*!< in: index */ const ulint* offsets) /*!< in: rec_get_offsets(rec, index) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Check if the transaction holds any locks on the sys tables or its records. @@ -843,7 +843,7 @@ const lock_t* lock_trx_has_sys_table_locks( /*=========================*/ const trx_t* trx) /*!< in: transaction to check */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*******************************************************************//** Check if the transaction holds an exclusive lock on a record. @@ -856,7 +856,7 @@ lock_trx_has_rec_x_lock( const dict_table_t* table, /*!< in: table to check */ const buf_block_t* block, /*!< in: buffer block of the record */ ulint heap_no)/*!< in: record heap number */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* UNIV_DEBUG */ /** Lock modes and types */ diff --git a/storage/xtradb/include/lock0priv.h b/storage/xtradb/include/lock0priv.h index e564387ec53..e7ea0c08845 100644 --- a/storage/xtradb/include/lock0priv.h +++ b/storage/xtradb/include/lock0priv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -108,7 +108,7 @@ lock_clust_rec_some_has_impl( const rec_t* rec, /*!< in: user record */ const dict_index_t* index, /*!< in: clustered index */ const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_NONINL #include "lock0priv.ic" diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h index 67dc0d72b4b..5706f3af4b0 100644 --- a/storage/xtradb/include/log0online.h +++ b/storage/xtradb/include/log0online.h @@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., 59 Temple -Place, Suite 330, Boston, MA 02111-1307 USA +this program; if not, write to the Free Software Foundation, Inc., 51 Franklin +Street, Fifth Floor, Boston, MA 02110-1301, USA *****************************************************************************/ diff --git a/storage/xtradb/include/log0recv.h b/storage/xtradb/include/log0recv.h index 674f68bd1dc..6955491bac8 100644 --- a/storage/xtradb/include/log0recv.h +++ b/storage/xtradb/include/log0recv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -73,7 +73,7 @@ recv_read_checkpoint_info_for_backup( lsn_t* first_header_lsn) /*!< out: lsn of of the start of the first log file */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************************//** Scans the log segment and n_bytes_scanned is set to the length of valid log scanned. */ diff --git a/storage/xtradb/include/mach0data.h b/storage/xtradb/include/mach0data.h index d0087f56aaa..9859def0adc 100644 --- a/storage/xtradb/include/mach0data.h +++ b/storage/xtradb/include/mach0data.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -53,7 +53,7 @@ ulint mach_read_from_1( /*=============*/ const byte* b) /*!< in: pointer to byte */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*******************************************************//** The following function is used to store data in two consecutive bytes. We store the most significant byte to the lower address. */ @@ -72,7 +72,7 @@ ulint mach_read_from_2( /*=============*/ const byte* b) /*!< in: pointer to two bytes */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /********************************************************//** The following function is used to convert a 16-bit data item @@ -84,7 +84,7 @@ uint16 mach_encode_2( /*==========*/ ulint n) /*!< in: integer in machine-dependent format */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /********************************************************//** The following function is used to convert a 16-bit data item from the canonical format, for fast bytewise equality test @@ -95,7 +95,7 @@ ulint mach_decode_2( /*==========*/ uint16 n) /*!< in: 16-bit integer in canonical format */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*******************************************************//** The following function is used to store data in 3 consecutive bytes. We store the most significant byte to the lowest address. */ @@ -114,7 +114,7 @@ ulint mach_read_from_3( /*=============*/ const byte* b) /*!< in: pointer to 3 bytes */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*******************************************************//** The following function is used to store data in four consecutive bytes. We store the most significant byte to the lowest address. */ @@ -133,7 +133,7 @@ ulint mach_read_from_4( /*=============*/ const byte* b) /*!< in: pointer to four bytes */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Writes a ulint in a compressed form (1..5 bytes). @return stored size in bytes */ @@ -151,7 +151,7 @@ ulint mach_get_compressed_size( /*=====================*/ ulint n) /*!< in: ulint integer to be stored */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*********************************************************//** Reads a ulint in a compressed form. @return read integer */ @@ -160,7 +160,7 @@ ulint mach_read_compressed( /*=================*/ const byte* b) /*!< in: pointer to memory from where to read */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*******************************************************//** The following function is used to store data in 6 consecutive bytes. We store the most significant byte to the lowest address. */ @@ -179,7 +179,7 @@ ib_uint64_t mach_read_from_6( /*=============*/ const byte* b) /*!< in: pointer to 6 bytes */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*******************************************************//** The following function is used to store data in 7 consecutive bytes. We store the most significant byte to the lowest address. */ @@ -198,7 +198,7 @@ ib_uint64_t mach_read_from_7( /*=============*/ const byte* b) /*!< in: pointer to 7 bytes */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*******************************************************//** The following function is used to store data in 8 consecutive bytes. We store the most significant byte to the lowest address. */ @@ -217,7 +217,7 @@ ib_uint64_t mach_read_from_8( /*=============*/ const byte* b) /*!< in: pointer to 8 bytes */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Writes a 64-bit integer in a compressed form (5..9 bytes). @return size in bytes */ @@ -243,7 +243,7 @@ ib_uint64_t mach_ull_read_compressed( /*=====================*/ const byte* b) /*!< in: pointer to memory from where to read */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Writes a 64-bit integer in a compressed form (1..11 bytes). @return size in bytes */ @@ -261,7 +261,7 @@ ulint mach_ull_get_much_compressed_size( /*==============================*/ ib_uint64_t n) /*!< in: 64-bit integer to be stored */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*********************************************************//** Reads a 64-bit integer in a compressed form. @return the value read */ @@ -270,7 +270,7 @@ ib_uint64_t mach_ull_read_much_compressed( /*==========================*/ const byte* b) /*!< in: pointer to memory from where to read */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Reads a ulint in a compressed form if the log record fully contains it. @return pointer to end of the stored field, NULL if not complete */ @@ -301,7 +301,7 @@ double mach_double_read( /*=============*/ const byte* b) /*!< in: pointer to memory from where to read */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Writes a double. It is stored in a little-endian format. */ UNIV_INLINE @@ -318,7 +318,7 @@ float mach_float_read( /*============*/ const byte* b) /*!< in: pointer to memory from where to read */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Writes a float. It is stored in a little-endian format. */ UNIV_INLINE @@ -336,7 +336,7 @@ mach_read_from_n_little_endian( /*===========================*/ const byte* buf, /*!< in: from where to read */ ulint buf_size) /*!< in: from how many bytes to read */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Writes a ulint in the little-endian format. */ UNIV_INLINE @@ -354,7 +354,7 @@ ulint mach_read_from_2_little_endian( /*===========================*/ const byte* buf) /*!< in: from where to read */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************//** Writes a ulint in the little-endian format. */ UNIV_INLINE diff --git a/storage/xtradb/include/mem0mem.h b/storage/xtradb/include/mem0mem.h index f30034f3074..de9b8b29fd9 100644 --- a/storage/xtradb/include/mem0mem.h +++ b/storage/xtradb/include/mem0mem.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -353,7 +353,7 @@ mem_heap_printf( /*============*/ mem_heap_t* heap, /*!< in: memory heap */ const char* format, /*!< in: format string */ - ...) __attribute__ ((format (printf, 2, 3))); + ...) MY_ATTRIBUTE ((format (printf, 2, 3))); #ifdef MEM_PERIODIC_CHECK /******************************************************************//** diff --git a/storage/xtradb/include/mem0mem.ic b/storage/xtradb/include/mem0mem.ic index 0d983d69e1a..63e68150b61 100644 --- a/storage/xtradb/include/mem0mem.ic +++ b/storage/xtradb/include/mem0mem.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2010, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -476,9 +476,9 @@ void mem_heap_free_func( /*===============*/ mem_heap_t* heap, /*!< in, own: heap to be freed */ - const char* file_name __attribute__((unused)), + const char* file_name MY_ATTRIBUTE((unused)), /*!< in: file name where freed */ - ulint line __attribute__((unused))) + ulint line MY_ATTRIBUTE((unused))) { mem_block_t* block; mem_block_t* prev_block; diff --git a/storage/xtradb/include/mtr0mtr.h b/storage/xtradb/include/mtr0mtr.h index 0730e870b3f..ce5ab6d2218 100644 --- a/storage/xtradb/include/mtr0mtr.h +++ b/storage/xtradb/include/mtr0mtr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -207,7 +207,7 @@ void mtr_start( /*======*/ mtr_t* mtr) /*!< out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***************************************************************//** Commits a mini-transaction. */ UNIV_INTERN @@ -215,7 +215,7 @@ void mtr_commit( /*=======*/ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************//** Sets and returns a savepoint in mtr. @return savepoint */ @@ -308,7 +308,7 @@ mtr_memo_release( mtr_t* mtr, /*!< in/out: mini-transaction */ void* object, /*!< in: object */ ulint type) /*!< in: object type: MTR_MEMO_S_LOCK, ... */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG # ifndef UNIV_HOTBACKUP /**********************************************************//** @@ -321,7 +321,7 @@ mtr_memo_contains( mtr_t* mtr, /*!< in: mtr */ const void* object, /*!< in: object to search */ ulint type) /*!< in: type of object */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /**********************************************************//** Checks if memo contains the given page. diff --git a/storage/xtradb/include/mtr0mtr.ic b/storage/xtradb/include/mtr0mtr.ic index cc021038001..04c39cf7f7e 100644 --- a/storage/xtradb/include/mtr0mtr.ic +++ b/storage/xtradb/include/mtr0mtr.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -37,7 +37,7 @@ ibool mtr_block_dirtied( /*==============*/ const buf_block_t* block) /*!< in: block being x-fixed */ - __attribute__((nonnull,warn_unused_result)); + MY_ATTRIBUTE((nonnull,warn_unused_result)); /***************************************************************//** Starts a mini-transaction. */ diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h index de2c7287dbe..4293ad45052 100644 --- a/storage/xtradb/include/os0file.h +++ b/storage/xtradb/include/os0file.h @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted @@ -549,7 +549,7 @@ os_file_create_simple_no_error_handling_func( OS_FILE_READ_WRITE_CACHED (disable O_DIRECT if it would be enabled otherwise) */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** Tries to disable OS caching on an opened file descriptor. */ UNIV_INTERN @@ -583,7 +583,7 @@ os_file_create_func( function source code for the exact rules */ ulint type, /*!< in: OS_DATA_FILE or OS_LOG_FILE */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Deletes a file. The file has to be closed before calling this. @return TRUE if success */ @@ -649,7 +649,7 @@ pfs_os_file_create_simple_func( ibool* success,/*!< out: TRUE if succeed, FALSE if error */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** NOTE! Please use the corresponding macro @@ -674,7 +674,7 @@ pfs_os_file_create_simple_no_error_handling_func( ibool* success,/*!< out: TRUE if succeed, FALSE if error */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** NOTE! Please use the corresponding macro os_file_create(), not directly @@ -702,7 +702,7 @@ pfs_os_file_create_func( ibool* success,/*!< out: TRUE if succeed, FALSE if error */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** NOTE! Please use the corresponding macro os_file_close(), not directly @@ -882,7 +882,7 @@ os_offset_t os_file_get_size( /*=============*/ os_file_t file) /*!< in: handle to a file */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /***********************************************************************//** Write the specified number of zeros to a newly created file. @return TRUE if success */ @@ -894,7 +894,7 @@ os_file_set_size( null-terminated string */ os_file_t file, /*!< in: handle to a file */ os_offset_t size) /*!< in: file size */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Truncates a file at its current position. @return TRUE if success */ diff --git a/storage/xtradb/include/os0thread.h b/storage/xtradb/include/os0thread.h index d84eff99519..e36f836e0be 100644 --- a/storage/xtradb/include/os0thread.h +++ b/storage/xtradb/include/os0thread.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -139,7 +139,7 @@ os_thread_exit( /*===========*/ void* exit_value) /*!< in: exit value; in Windows this void* is cast as a DWORD */ - UNIV_COLD __attribute__((noreturn)); + UNIV_COLD MY_ATTRIBUTE((noreturn)); /*****************************************************************//** Returns the thread identifier of current thread. @return current thread identifier */ diff --git a/storage/xtradb/include/page0cur.h b/storage/xtradb/include/page0cur.h index b1ad49b4915..f04667ff29c 100644 --- a/storage/xtradb/include/page0cur.h +++ b/storage/xtradb/include/page0cur.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -180,7 +180,7 @@ page_cur_tuple_insert( mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ ulint n_ext, /*!< in: number of externally stored columns */ mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */ - __attribute__((nonnull(1,2,3,4,5), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3,4,5), warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Inserts a record next to page cursor. Returns pointer to inserted record if @@ -218,7 +218,7 @@ page_cur_insert_rec_low( const rec_t* rec, /*!< in: pointer to a physical record */ ulint* offsets,/*!< in/out: rec_get_offsets(rec, index) */ mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */ - __attribute__((nonnull(1,2,3,4), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3,4), warn_unused_result)); /***********************************************************//** Inserts a record next to page cursor on a compressed and uncompressed page. Returns pointer to inserted record if succeed, i.e., @@ -240,7 +240,7 @@ page_cur_insert_rec_zip( const rec_t* rec, /*!< in: pointer to a physical record */ ulint* offsets,/*!< in/out: rec_get_offsets(rec, index) */ mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */ - __attribute__((nonnull(1,2,3,4), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3,4), warn_unused_result)); /*************************************************************//** Copies records from page to a newly created page, from a given record onward, including that record. Infimum and supremum records are not copied. diff --git a/storage/xtradb/include/page0page.h b/storage/xtradb/include/page0page.h index 5de1655a3ac..543c9df17ff 100644 --- a/storage/xtradb/include/page0page.h +++ b/storage/xtradb/include/page0page.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -169,7 +169,7 @@ page_t* page_align( /*=======*/ const void* ptr) /*!< in: pointer to page frame */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /************************************************************//** Gets the offset within a page. @return offset from the start of the page */ @@ -178,7 +178,7 @@ ulint page_offset( /*========*/ const void* ptr) /*!< in: pointer to page frame */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*************************************************************//** Returns the max trx id field value. */ UNIV_INLINE @@ -236,7 +236,7 @@ page_header_get_offs( /*=================*/ const page_t* page, /*!< in: page */ ulint field) /*!< in: PAGE_FREE, ... */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*************************************************************//** Returns the pointer stored in the given header field, or NULL. */ @@ -296,7 +296,7 @@ page_rec_get_nth_const( /*===================*/ const page_t* page, /*!< in: page */ ulint nth) /*!< in: nth record */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /************************************************************//** Returns the nth record of the record list. This is the inverse function of page_rec_get_n_recs_before(). @@ -307,7 +307,7 @@ page_rec_get_nth( /*=============*/ page_t* page, /*< in: page */ ulint nth) /*!< in: nth record */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_HOTBACKUP /************************************************************//** @@ -320,7 +320,7 @@ rec_t* page_get_middle_rec( /*================*/ page_t* page) /*!< in: page */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************//** Compares a data tuple to a physical record. Differs from the function cmp_dtuple_rec_with_match in the way that the record must reside on an @@ -528,7 +528,7 @@ bool page_is_leaf( /*=========*/ const page_t* page) /*!< in: page */ - __attribute__((pure)); + MY_ATTRIBUTE((nonnull, pure)); /************************************************************//** Determine whether the page is empty. @return true if the page is empty (PAGE_N_RECS = 0) */ @@ -537,7 +537,7 @@ bool page_is_empty( /*==========*/ const page_t* page) /*!< in: page */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /************************************************************//** Determine whether the page contains garbage. @return true if the page contains garbage (PAGE_GARBAGE is not 0) */ @@ -546,7 +546,7 @@ bool page_has_garbage( /*=============*/ const page_t* page) /*!< in: page */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /************************************************************//** Gets the pointer to the next record on the page. @return pointer to next record */ @@ -618,7 +618,7 @@ ibool page_rec_is_user_rec_low( /*=====================*/ ulint offset) /*!< in: record offset on page */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /************************************************************//** TRUE if the record is the supremum record on a page. @return TRUE if the supremum record */ @@ -627,7 +627,7 @@ ibool page_rec_is_supremum_low( /*=====================*/ ulint offset) /*!< in: record offset on page */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /************************************************************//** TRUE if the record is the infimum record on a page. @return TRUE if the infimum record */ @@ -636,7 +636,7 @@ ibool page_rec_is_infimum_low( /*====================*/ ulint offset) /*!< in: record offset on page */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /************************************************************//** TRUE if the record is a user record on the page. @@ -646,7 +646,7 @@ ibool page_rec_is_user_rec( /*=================*/ const rec_t* rec) /*!< in: record */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /************************************************************//** TRUE if the record is the supremum record on a page. @return TRUE if the supremum record */ @@ -655,7 +655,7 @@ ibool page_rec_is_supremum( /*=================*/ const rec_t* rec) /*!< in: record */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /************************************************************//** TRUE if the record is the infimum record on a page. @@ -665,7 +665,7 @@ ibool page_rec_is_infimum( /*================*/ const rec_t* rec) /*!< in: record */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /***************************************************************//** Looks for the record which owns the given record. @return the owner record */ @@ -685,7 +685,7 @@ page_rec_write_field( ulint i, /*!< in: index of the field to update */ ulint val, /*!< in: value to write */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /************************************************************//** Returns the maximum combined size of records which can be inserted on top @@ -715,7 +715,7 @@ ulint page_get_free_space_of_empty( /*=========================*/ ulint comp) /*!< in: nonzero=compact page format */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /**********************************************************//** Returns the base extra size of a physical record. This is the size of the fixed header, independent of the record size. @@ -801,7 +801,7 @@ page_create_zip( ulint level, /*!< in: the B-tree level of the page */ trx_id_t max_trx_id, /*!< in: PAGE_MAX_TRX_ID */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************//** Empty a previously created B-tree index page. */ UNIV_INTERN @@ -811,7 +811,7 @@ page_create_empty( buf_block_t* block, /*!< in/out: B-tree block */ dict_index_t* index, /*!< in: the index of the page */ mtr_t* mtr) /*!< in/out: mini-transaction */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /*************************************************************//** Differs from page_copy_rec_list_end, because this function does not touch the lock table and max trx id on page or compress the page. @@ -850,7 +850,7 @@ page_copy_rec_list_end( rec_t* rec, /*!< in: record on page */ dict_index_t* index, /*!< in: record descriptor */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Copies records from page to new_page, up to the given record, NOT including that record. Infimum and supremum records are not copied. @@ -872,7 +872,7 @@ page_copy_rec_list_start( rec_t* rec, /*!< in: record on page */ dict_index_t* index, /*!< in: record descriptor */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Deletes records from a page from a given record onward, including that record. The infimum and supremum records are not deleted. */ @@ -889,7 +889,7 @@ page_delete_rec_list_end( records in the end of the chain to delete, or ULINT_UNDEFINED if not known */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Deletes records from page, up to the given record, NOT including that record. Infimum and supremum records are not deleted. */ @@ -901,7 +901,7 @@ page_delete_rec_list_start( buf_block_t* block, /*!< in: buffer block of the page */ dict_index_t* index, /*!< in: record descriptor */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*************************************************************//** Moves record list end to another page. Moved records include split_rec. @@ -922,7 +922,7 @@ page_move_rec_list_end( rec_t* split_rec, /*!< in: first record to move */ dict_index_t* index, /*!< in: record descriptor */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull(1, 2, 4, 5))); + MY_ATTRIBUTE((nonnull(1, 2, 4, 5))); /*************************************************************//** Moves record list start to another page. Moved records do not include split_rec. @@ -942,7 +942,7 @@ page_move_rec_list_start( rec_t* split_rec, /*!< in: first record not to move */ dict_index_t* index, /*!< in: record descriptor */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull(1, 2, 4, 5))); + MY_ATTRIBUTE((nonnull(1, 2, 4, 5))); /****************************************************************//** Splits a directory slot which owns too many records. */ UNIV_INTERN @@ -953,7 +953,7 @@ page_dir_split_slot( page_zip_des_t* page_zip,/*!< in/out: compressed page whose uncompressed part will be written, or NULL */ ulint slot_no)/*!< in: the directory slot */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /*************************************************************//** Tries to balance the given directory slot with too few records with the upper neighbor, so that there are at least the minimum number @@ -966,7 +966,7 @@ page_dir_balance_slot( page_t* page, /*!< in/out: index page */ page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */ ulint slot_no)/*!< in: the directory slot */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /**********************************************************//** Parses a log record of a record list end or start deletion. @return end of log record or NULL */ diff --git a/storage/xtradb/include/page0types.h b/storage/xtradb/include/page0types.h index 74ad6f72f7e..3b53de6cc2b 100644 --- a/storage/xtradb/include/page0types.h +++ b/storage/xtradb/include/page0types.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -133,7 +133,7 @@ page_zip_rec_set_deleted( page_zip_des_t* page_zip,/*!< in/out: compressed page */ const byte* rec, /*!< in: record on the uncompressed page */ ulint flag) /*!< in: the deleted flag (nonzero=TRUE) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Write the "owned" flag of a record on a compressed page. The n_owned field @@ -145,7 +145,7 @@ page_zip_rec_set_owned( page_zip_des_t* page_zip,/*!< in/out: compressed page */ const byte* rec, /*!< in: record on the uncompressed page */ ulint flag) /*!< in: the owned flag (nonzero=TRUE) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Shift the dense page directory when a record is deleted. */ @@ -158,7 +158,7 @@ page_zip_dir_delete( dict_index_t* index, /*!< in: index of rec */ const ulint* offsets,/*!< in: rec_get_offsets(rec) */ const byte* free) /*!< in: previous start of the free list */ - __attribute__((nonnull(1,2,3,4))); + MY_ATTRIBUTE((nonnull(1,2,3,4))); /**********************************************************************//** Add a slot to the dense page directory. */ @@ -169,5 +169,5 @@ page_zip_dir_add_slot( page_zip_des_t* page_zip, /*!< in/out: compressed page */ ulint is_clustered) /*!< in: nonzero for clustered index, zero for others */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif diff --git a/storage/xtradb/include/page0zip.h b/storage/xtradb/include/page0zip.h index 41eb1e35d78..81068e7bd29 100644 --- a/storage/xtradb/include/page0zip.h +++ b/storage/xtradb/include/page0zip.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -63,7 +63,7 @@ ulint page_zip_get_size( /*==============*/ const page_zip_des_t* page_zip) /*!< in: compressed page */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /**********************************************************************//** Set the size of a compressed page in bytes. */ UNIV_INLINE @@ -86,7 +86,7 @@ page_zip_rec_needs_ext( ulint n_fields, /*!< in: number of fields in the record; ignored if zip_size == 0 */ ulint zip_size) /*!< in: compressed page size in bytes, or 0 */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /**********************************************************************//** Determine the guaranteed free space on an empty page. @@ -97,7 +97,7 @@ page_zip_empty_size( /*================*/ ulint n_fields, /*!< in: number of columns in the index */ ulint zip_size) /*!< in: compressed page size in bytes */ - __attribute__((const)); + MY_ATTRIBUTE((const)); #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** @@ -132,7 +132,7 @@ page_zip_compress( dict_index_t* index, /*!< in: index of the B-tree node */ ulint level, /*!< in: compression level */ mtr_t* mtr) /*!< in: mini-transaction, or NULL */ - __attribute__((nonnull(1,3))); + MY_ATTRIBUTE((nonnull(1,2,3))); /**********************************************************************//** Decompress a page. This function should tolerate errors on the compressed @@ -150,7 +150,7 @@ page_zip_decompress( FALSE=verify but do not copy some page header fields that should not change after page creation */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); #ifdef UNIV_DEBUG /**********************************************************************//** @@ -179,7 +179,7 @@ page_zip_validate_low( const dict_index_t* index, /*!< in: index of the page, if known */ ibool sloppy) /*!< in: FALSE=strict, TRUE=ignore the MIN_REC_FLAG */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /**********************************************************************//** Check that the compressed and decompressed pages match. */ UNIV_INTERN @@ -189,7 +189,7 @@ page_zip_validate( const page_zip_des_t* page_zip,/*!< in: compressed page */ const page_t* page, /*!< in: uncompressed page */ const dict_index_t* index) /*!< in: index of the page, if known */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); #endif /* UNIV_ZIP_DEBUG */ #ifndef UNIV_INNOCHECKSUM @@ -203,7 +203,7 @@ page_zip_max_ins_size( /*==================*/ const page_zip_des_t* page_zip,/*!< in: compressed page */ ibool is_clust)/*!< in: TRUE if clustered index */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /**********************************************************************//** Determine if enough space is available in the modification log. @@ -217,7 +217,7 @@ page_zip_available( ulint length, /*!< in: combined size of the record */ ulint create) /*!< in: nonzero=add the record to the heap */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /**********************************************************************//** Write data to the uncompressed header portion of a page. The data must @@ -230,7 +230,7 @@ page_zip_write_header( const byte* str, /*!< in: address on the uncompressed page */ ulint length, /*!< in: length of the data */ mtr_t* mtr) /*!< in: mini-transaction, or NULL */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /**********************************************************************//** Write an entire record on the compressed page. The data must already @@ -244,7 +244,7 @@ page_zip_write_rec( dict_index_t* index, /*!< in: the index the record belongs to */ const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ ulint create) /*!< in: nonzero=insert, zero=update */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***********************************************************//** Parses a log record of writing a BLOB pointer of a record. @@ -273,7 +273,7 @@ page_zip_write_blob_ptr( ulint n, /*!< in: column index */ mtr_t* mtr) /*!< in: mini-transaction handle, or NULL if no logging is needed */ - __attribute__((nonnull(1,2,3,4))); + MY_ATTRIBUTE((nonnull(1,2,3,4))); /***********************************************************//** Parses a log record of writing the node pointer of a record. @@ -298,7 +298,7 @@ page_zip_write_node_ptr( ulint size, /*!< in: data size of rec */ ulint ptr, /*!< in: node pointer */ mtr_t* mtr) /*!< in: mini-transaction, or NULL */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /**********************************************************************//** Write the trx_id and roll_ptr of a record on a B-tree leaf node page. */ @@ -312,7 +312,7 @@ page_zip_write_trx_id_and_roll_ptr( ulint trx_id_col,/*!< in: column number of TRX_ID in rec */ trx_id_t trx_id, /*!< in: transaction identifier */ roll_ptr_t roll_ptr)/*!< in: roll_ptr */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Write the "deleted" flag of a record on a compressed page. The flag must @@ -324,7 +324,7 @@ page_zip_rec_set_deleted( page_zip_des_t* page_zip,/*!< in/out: compressed page */ const byte* rec, /*!< in: record on the uncompressed page */ ulint flag) /*!< in: the deleted flag (nonzero=TRUE) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Write the "owned" flag of a record on a compressed page. The n_owned field @@ -336,7 +336,7 @@ page_zip_rec_set_owned( page_zip_des_t* page_zip,/*!< in/out: compressed page */ const byte* rec, /*!< in: record on the uncompressed page */ ulint flag) /*!< in: the owned flag (nonzero=TRUE) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Insert a record to the dense page directory. */ @@ -363,7 +363,7 @@ page_zip_dir_delete( const ulint* offsets, /*!< in: rec_get_offsets(rec) */ const byte* free) /*!< in: previous start of the free list */ - __attribute__((nonnull(1,2,3,4))); + MY_ATTRIBUTE((nonnull(1,2,3,4))); /**********************************************************************//** Add a slot to the dense page directory. */ @@ -374,7 +374,7 @@ page_zip_dir_add_slot( page_zip_des_t* page_zip, /*!< in/out: compressed page */ ulint is_clustered) /*!< in: nonzero for clustered index, zero for others */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***********************************************************//** Parses a log record of writing to the header of a page. @@ -402,7 +402,7 @@ page_zip_write_header( const byte* str, /*!< in: address on the uncompressed page */ ulint length, /*!< in: length of the data */ mtr_t* mtr) /*!< in: mini-transaction, or NULL */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /**********************************************************************//** Reorganize and compress a page. This is a low-level operation for @@ -425,7 +425,7 @@ page_zip_reorganize( m_start, m_end, m_nonempty */ dict_index_t* index, /*!< in: index of the B-tree node */ mtr_t* mtr) /*!< in: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP /**********************************************************************//** Copy the records of a page byte for byte. Do not copy the page header @@ -444,7 +444,7 @@ page_zip_copy_recs( const page_t* src, /*!< in: page */ dict_index_t* index, /*!< in: index of the B-tree */ mtr_t* mtr) /*!< in: mini-transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** @@ -458,7 +458,7 @@ page_zip_parse_compress( byte* end_ptr,/*!< in: buffer end */ page_t* page, /*!< out: uncompressed page */ page_zip_des_t* page_zip)/*!< out: compressed page */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); #endif /* !UNIV_INNOCHECKSUM */ @@ -472,7 +472,7 @@ page_zip_calc_checksum( const void* data, /*!< in: compressed page */ ulint size, /*!< in: size of compressed page */ srv_checksum_algorithm_t algo) /*!< in: algorithm to use */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Verify a compressed page's checksum. @@ -509,7 +509,7 @@ page_zip_parse_compress_no_data( page_t* page, /*!< in: uncompressed page */ page_zip_des_t* page_zip, /*!< out: compressed page */ dict_index_t* index) /*!< in: index */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /**********************************************************************//** Reset the counters used for filling diff --git a/storage/xtradb/include/pars0pars.h b/storage/xtradb/include/pars0pars.h index 65ff7533828..73585c78a6a 100644 --- a/storage/xtradb/include/pars0pars.h +++ b/storage/xtradb/include/pars0pars.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -472,7 +472,7 @@ pars_complete_graph_for_exec( query graph, or NULL for dummy graph */ trx_t* trx, /*!< in: transaction handle */ mem_heap_t* heap) /*!< in: memory heap from which allocated */ - __attribute__((nonnull(2,3), warn_unused_result)); + MY_ATTRIBUTE((nonnull(2,3), warn_unused_result)); /****************************************************************//** Create parser info struct. @@ -628,7 +628,7 @@ pars_info_bind_ull_literal( pars_info_t* info, /*!< in: info struct */ const char* name, /*!< in: name */ const ib_uint64_t* val) /*!< in: value */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /****************************************************************//** Add bound id. */ diff --git a/storage/xtradb/include/read0read.h b/storage/xtradb/include/read0read.h index 0352f129c30..2d6885884f7 100644 --- a/storage/xtradb/include/read0read.h +++ b/storage/xtradb/include/read0read.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -116,7 +116,7 @@ read_view_sees_trx_id( /*==================*/ const read_view_t* view, /*!< in: read view */ trx_id_t trx_id) /*!< in: trx id */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Prints a read view to file. */ UNIV_INTERN diff --git a/storage/xtradb/include/rem0cmp.h b/storage/xtradb/include/rem0cmp.h index cb3c85ac2c8..65116229fdc 100644 --- a/storage/xtradb/include/rem0cmp.h +++ b/storage/xtradb/include/rem0cmp.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -174,7 +174,7 @@ cmp_dtuple_rec_with_match_low( bytes within the first field not completely matched; when function returns, contains the value for current comparison */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #define cmp_dtuple_rec_with_match(tuple,rec,offsets,fields,bytes) \ cmp_dtuple_rec_with_match_low( \ tuple,rec,offsets,dtuple_get_n_fields_cmp(tuple),fields,bytes) @@ -218,7 +218,7 @@ cmp_rec_rec_simple( struct TABLE* table) /*!< in: MySQL table, for reporting duplicate key value if applicable, or NULL */ - __attribute__((nonnull(1,2,3,4), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3,4), warn_unused_result)); /*************************************************************//** This function is used to compare two physical records. Only the common first fields are compared, and if an externally stored field is diff --git a/storage/xtradb/include/rem0rec.h b/storage/xtradb/include/rem0rec.h index 8e7d5ff2d48..971709fd2c8 100644 --- a/storage/xtradb/include/rem0rec.h +++ b/storage/xtradb/include/rem0rec.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -98,7 +98,7 @@ rec_get_next_ptr_const( /*===================*/ const rec_t* rec, /*!< in: physical record */ ulint comp) /*!< in: nonzero=compact page format */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to get the pointer of the next chained record on the same page. @@ -109,7 +109,7 @@ rec_get_next_ptr( /*=============*/ rec_t* rec, /*!< in: physical record */ ulint comp) /*!< in: nonzero=compact page format */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to get the offset of the next chained record on the same page. @@ -120,7 +120,7 @@ rec_get_next_offs( /*==============*/ const rec_t* rec, /*!< in: physical record */ ulint comp) /*!< in: nonzero=compact page format */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the next record offset field of an old-style record. */ @@ -130,7 +130,7 @@ rec_set_next_offs_old( /*==================*/ rec_t* rec, /*!< in: old-style physical record */ ulint next) /*!< in: offset of the next record */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to set the next record offset field of a new-style record. */ @@ -140,7 +140,7 @@ rec_set_next_offs_new( /*==================*/ rec_t* rec, /*!< in/out: new-style physical record */ ulint next) /*!< in: offset of the next record */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to get the number of fields in an old-style record. @@ -150,7 +150,7 @@ ulint rec_get_n_fields_old( /*=================*/ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to get the number of fields in a record. @@ -161,7 +161,7 @@ rec_get_n_fields( /*=============*/ const rec_t* rec, /*!< in: physical record */ const dict_index_t* index) /*!< in: record descriptor */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to get the number of records owned by the previous directory record. @@ -171,7 +171,7 @@ ulint rec_get_n_owned_old( /*================*/ const rec_t* rec) /*!< in: old-style physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the number of owned records. */ UNIV_INLINE @@ -180,7 +180,7 @@ rec_set_n_owned_old( /*================*/ rec_t* rec, /*!< in: old-style physical record */ ulint n_owned) /*!< in: the number of owned */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to get the number of records owned by the previous directory record. @@ -190,7 +190,7 @@ ulint rec_get_n_owned_new( /*================*/ const rec_t* rec) /*!< in: new-style physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the number of owned records. */ UNIV_INLINE @@ -200,7 +200,7 @@ rec_set_n_owned_new( rec_t* rec, /*!< in/out: new-style physical record */ page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */ ulint n_owned)/*!< in: the number of owned */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /******************************************************//** The following function is used to retrieve the info bits of a record. @@ -211,7 +211,7 @@ rec_get_info_bits( /*==============*/ const rec_t* rec, /*!< in: physical record */ ulint comp) /*!< in: nonzero=compact page format */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the info bits of a record. */ UNIV_INLINE @@ -220,7 +220,7 @@ rec_set_info_bits_old( /*==================*/ rec_t* rec, /*!< in: old-style physical record */ ulint bits) /*!< in: info bits */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to set the info bits of a record. */ UNIV_INLINE @@ -229,7 +229,7 @@ rec_set_info_bits_new( /*==================*/ rec_t* rec, /*!< in/out: new-style physical record */ ulint bits) /*!< in: info bits */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function retrieves the status bits of a new-style record. @return status bits */ @@ -238,7 +238,7 @@ ulint rec_get_status( /*===========*/ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the status bits of a new-style record. */ @@ -248,7 +248,7 @@ rec_set_status( /*===========*/ rec_t* rec, /*!< in/out: physical record */ ulint bits) /*!< in: info bits */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to retrieve the info and status @@ -260,7 +260,7 @@ rec_get_info_and_status_bits( /*=========================*/ const rec_t* rec, /*!< in: physical record */ ulint comp) /*!< in: nonzero=compact page format */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the info and status bits of a record. (Only compact records have status bits.) */ @@ -270,7 +270,7 @@ rec_set_info_and_status_bits( /*=========================*/ rec_t* rec, /*!< in/out: compact physical record */ ulint bits) /*!< in: info bits */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function tells if record is delete marked. @@ -281,7 +281,7 @@ rec_get_deleted_flag( /*=================*/ const rec_t* rec, /*!< in: physical record */ ulint comp) /*!< in: nonzero=compact page format */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the deleted bit. */ UNIV_INLINE @@ -290,7 +290,7 @@ rec_set_deleted_flag_old( /*=====================*/ rec_t* rec, /*!< in: old-style physical record */ ulint flag) /*!< in: nonzero if delete marked */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to set the deleted bit. */ UNIV_INLINE @@ -300,7 +300,7 @@ rec_set_deleted_flag_new( rec_t* rec, /*!< in/out: new-style physical record */ page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */ ulint flag) /*!< in: nonzero if delete marked */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /******************************************************//** The following function tells if a new-style record is a node pointer. @return TRUE if node pointer */ @@ -309,7 +309,7 @@ ibool rec_get_node_ptr_flag( /*==================*/ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to get the order number of an old-style record in the heap of the index page. @@ -319,7 +319,7 @@ ulint rec_get_heap_no_old( /*================*/ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the heap number field in an old-style record. */ @@ -329,7 +329,7 @@ rec_set_heap_no_old( /*================*/ rec_t* rec, /*!< in: physical record */ ulint heap_no)/*!< in: the heap number */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to get the order number of a new-style record in the heap of the index page. @@ -339,7 +339,7 @@ ulint rec_get_heap_no_new( /*================*/ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the heap number field in a new-style record. */ @@ -349,7 +349,7 @@ rec_set_heap_no_new( /*================*/ rec_t* rec, /*!< in/out: physical record */ ulint heap_no)/*!< in: the heap number */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** The following function is used to test whether the data offsets in the record are stored in one-byte or two-byte format. @@ -359,7 +359,7 @@ ibool rec_get_1byte_offs_flag( /*====================*/ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** The following function is used to set the 1-byte offsets flag. */ @@ -369,7 +369,7 @@ rec_set_1byte_offs_flag( /*====================*/ rec_t* rec, /*!< in: physical record */ ibool flag) /*!< in: TRUE if 1byte form */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** Returns the offset of nth field end if the record is stored in the 1-byte @@ -382,7 +382,7 @@ rec_1_get_field_end_info( /*=====================*/ const rec_t* rec, /*!< in: record */ ulint n) /*!< in: field index */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Returns the offset of nth field end if the record is stored in the 2-byte @@ -396,7 +396,7 @@ rec_2_get_field_end_info( /*=====================*/ const rec_t* rec, /*!< in: record */ ulint n) /*!< in: field index */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Returns nonzero if the field is stored off-page. @@ -408,7 +408,7 @@ rec_2_is_field_extern( /*==================*/ const rec_t* rec, /*!< in: record */ ulint n) /*!< in: field index */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Determine how many of the first n columns in a compact @@ -421,7 +421,7 @@ rec_get_n_extern_new( const rec_t* rec, /*!< in: compact physical record */ const dict_index_t* index, /*!< in: record descriptor */ ulint n) /*!< in: number of columns to scan */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************//** The following function determines the offsets to each field @@ -446,9 +446,9 @@ rec_get_offsets_func( #endif /* UNIV_DEBUG */ mem_heap_t** heap) /*!< in/out: memory heap */ #ifdef UNIV_DEBUG - __attribute__((nonnull(1,2,5,7),warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,5,7),warn_unused_result)); #else /* UNIV_DEBUG */ - __attribute__((nonnull(1,2,5),warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,5),warn_unused_result)); #endif /* UNIV_DEBUG */ #ifdef UNIV_DEBUG @@ -475,7 +475,7 @@ rec_get_offsets_reverse( 0=leaf node */ ulint* offsets)/*!< in/out: array consisting of offsets[0] allocated elements */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG /************************************************************//** Validates offsets returned by rec_get_offsets(). @@ -488,7 +488,7 @@ rec_offs_validate( const dict_index_t* index, /*!< in: record descriptor or NULL */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull(3), warn_unused_result)); + MY_ATTRIBUTE((nonnull(3), warn_unused_result)); /************************************************************//** Updates debug data in offsets, in order to avoid bogus rec_offs_validate() failures. */ @@ -500,7 +500,7 @@ rec_offs_make_valid( const dict_index_t* index, /*!< in: record descriptor */ ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #else # define rec_offs_make_valid(rec, index, offsets) ((void) 0) #endif /* UNIV_DEBUG */ @@ -517,7 +517,7 @@ rec_get_nth_field_offs_old( ulint n, /*!< in: index of the field */ ulint* len) /*!< out: length of the field; UNIV_SQL_NULL if SQL null */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #define rec_get_nth_field_old(rec, n, len) \ ((rec) + rec_get_nth_field_offs_old(rec, n, len)) /************************************************************//** @@ -531,7 +531,7 @@ rec_get_nth_field_size( /*===================*/ const rec_t* rec, /*!< in: record */ ulint n) /*!< in: index of the field */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /************************************************************//** The following function is used to get an offset to the nth data field in a record. @@ -544,7 +544,7 @@ rec_get_nth_field_offs( ulint n, /*!< in: index of the field */ ulint* len) /*!< out: length of the field; UNIV_SQL_NULL if SQL null */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #define rec_get_nth_field(rec, offsets, n, len) \ ((rec) + rec_get_nth_field_offs(offsets, n, len)) /******************************************************//** @@ -556,7 +556,7 @@ ulint rec_offs_comp( /*==========*/ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Determine if the offsets are for a record containing externally stored columns. @@ -566,7 +566,7 @@ ulint rec_offs_any_extern( /*================*/ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Determine if the offsets are for a record containing null BLOB pointers. @return first field containing a null BLOB pointer, or NULL if none found */ @@ -576,7 +576,7 @@ rec_offs_any_null_extern( /*=====================*/ const rec_t* rec, /*!< in: record */ const ulint* offsets) /*!< in: rec_get_offsets(rec) */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Returns nonzero if the extern bit is set in nth field of rec. @return nonzero if externally stored */ @@ -586,7 +586,7 @@ rec_offs_nth_extern( /*================*/ const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ ulint n) /*!< in: nth field */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Returns nonzero if the SQL NULL bit is set in nth field of rec. @return nonzero if SQL NULL */ @@ -596,7 +596,7 @@ rec_offs_nth_sql_null( /*==================*/ const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ ulint n) /*!< in: nth field */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Gets the physical size of a field. @return length of field */ @@ -606,7 +606,7 @@ rec_offs_nth_size( /*==============*/ const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ ulint n) /*!< in: nth field */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /******************************************************//** Returns the number of extern bits set in a record. @@ -616,7 +616,7 @@ ulint rec_offs_n_extern( /*==============*/ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /***********************************************************//** This is used to modify the value of an already existing field in a record. The previous value must have exactly the same size as the new value. If len @@ -636,7 +636,7 @@ rec_set_nth_field( length as the previous value. If SQL null, previous value must be SQL null. */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /**********************************************************//** The following function returns the data size of an old-style physical record, that is the sum of field lengths. SQL null fields @@ -648,7 +648,7 @@ ulint rec_get_data_size_old( /*==================*/ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /**********************************************************//** The following function returns the number of allocated elements for an array of offsets. @@ -658,7 +658,7 @@ ulint rec_offs_get_n_alloc( /*=================*/ const ulint* offsets)/*!< in: array for rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /**********************************************************//** The following function sets the number of allocated elements for an array of offsets. */ @@ -669,7 +669,7 @@ rec_offs_set_n_alloc( ulint* offsets, /*!< out: array for rec_get_offsets(), must be allocated */ ulint n_alloc) /*!< in: number of elements */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #define rec_offs_init(offsets) \ rec_offs_set_n_alloc(offsets, (sizeof offsets) / sizeof *offsets) /**********************************************************//** @@ -680,7 +680,7 @@ ulint rec_offs_n_fields( /*==============*/ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /**********************************************************//** The following function returns the data size of a physical record, that is the sum of field lengths. SQL null fields @@ -692,7 +692,7 @@ ulint rec_offs_data_size( /*===============*/ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /**********************************************************//** Returns the total size of record minus data size of record. The value returned by the function is the distance from record @@ -703,7 +703,7 @@ ulint rec_offs_extra_size( /*================*/ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /**********************************************************//** Returns the total size of a physical record. @return size */ @@ -712,7 +712,7 @@ ulint rec_offs_size( /*==========*/ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); #ifdef UNIV_DEBUG /**********************************************************//** Returns a pointer to the start of the record. @@ -723,7 +723,7 @@ rec_get_start( /*==========*/ const rec_t* rec, /*!< in: pointer to record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /**********************************************************//** Returns a pointer to the end of the record. @return pointer to end */ @@ -733,7 +733,7 @@ rec_get_end( /*========*/ const rec_t* rec, /*!< in: pointer to record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); #else /* UNIV_DEBUG */ # define rec_get_start(rec, offsets) ((rec) - rec_offs_extra_size(offsets)) # define rec_get_end(rec, offsets) ((rec) + rec_offs_data_size(offsets)) @@ -748,7 +748,7 @@ rec_copy( void* buf, /*!< in: buffer */ const rec_t* rec, /*!< in: physical record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP /**********************************************************//** Determines the size of a data tuple prefix in a temporary file. @@ -761,7 +761,7 @@ rec_get_converted_size_temp( const dfield_t* fields, /*!< in: array of data fields */ ulint n_fields,/*!< in: number of data fields */ ulint* extra) /*!< out: extra size */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /******************************************************//** Determine the offset to each field in temporary file. @@ -774,7 +774,7 @@ rec_init_offsets_temp( const dict_index_t* index, /*!< in: record descriptor */ ulint* offsets)/*!< in/out: array of offsets; in: n=rec_offs_n_fields(offsets) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************//** Builds a temporary file record out of a data tuple. @@ -787,7 +787,7 @@ rec_convert_dtuple_to_temp( const dict_index_t* index, /*!< in: record descriptor */ const dfield_t* fields, /*!< in: array of data fields */ ulint n_fields) /*!< in: number of fields */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**************************************************************//** Copies the first n fields of a physical record to a new physical record in @@ -805,7 +805,7 @@ rec_copy_prefix_to_buf( for the copied prefix, or NULL */ ulint* buf_size) /*!< in/out: buffer size */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /************************************************************//** Folds a prefix of a physical record to a ulint. @return the folded value */ @@ -821,7 +821,7 @@ rec_fold( ulint n_bytes, /*!< in: number of bytes to fold in an incomplete last field */ index_id_t tree_id) /*!< in: index tree id */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /*********************************************************//** Builds a physical record out of a data tuple and @@ -837,7 +837,7 @@ rec_convert_dtuple_to_rec( const dtuple_t* dtuple, /*!< in: data tuple */ ulint n_ext) /*!< in: number of externally stored columns */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Returns the extra size of an old-style physical record if we know its data size and number of fields. @@ -849,7 +849,7 @@ rec_get_converted_extra_size( ulint data_size, /*!< in: data size */ ulint n_fields, /*!< in: number of fields */ ulint n_ext) /*!< in: number of externally stored columns */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /**********************************************************//** Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT. @return total size */ @@ -861,7 +861,7 @@ rec_get_converted_size_comp_prefix( const dfield_t* fields, /*!< in: array of data fields */ ulint n_fields,/*!< in: number of data fields */ ulint* extra) /*!< out: extra size */ - __attribute__((warn_unused_result, nonnull(1,2))); + MY_ATTRIBUTE((warn_unused_result, nonnull(1,2))); /**********************************************************//** Determines the size of a data tuple in ROW_FORMAT=COMPACT. @return total size */ @@ -877,7 +877,7 @@ rec_get_converted_size_comp( const dfield_t* fields, /*!< in: array of data fields */ ulint n_fields,/*!< in: number of data fields */ ulint* extra) /*!< out: extra size */ - __attribute__((nonnull(1,3))); + MY_ATTRIBUTE((nonnull(1,3))); /**********************************************************//** The following function returns the size of a data tuple when converted to a physical record. @@ -889,7 +889,7 @@ rec_get_converted_size( dict_index_t* index, /*!< in: record descriptor */ const dtuple_t* dtuple, /*!< in: data tuple */ ulint n_ext) /*!< in: number of externally stored columns */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); #ifndef UNIV_HOTBACKUP /**************************************************************//** Copies the first n fields of a physical record to a data tuple. @@ -904,7 +904,7 @@ rec_copy_prefix_to_dtuple( ulint n_fields, /*!< in: number of fields to copy */ mem_heap_t* heap) /*!< in: memory heap */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /***************************************************************//** Validates the consistency of a physical record. @@ -915,7 +915,7 @@ rec_validate( /*=========*/ const rec_t* rec, /*!< in: physical record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***************************************************************//** Prints an old-style physical record. */ UNIV_INTERN @@ -924,7 +924,7 @@ rec_print_old( /*==========*/ FILE* file, /*!< in: file where to print */ const rec_t* rec) /*!< in: physical record */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP /***************************************************************//** Prints a physical record in ROW_FORMAT=COMPACT. Ignores the @@ -936,7 +936,7 @@ rec_print_comp( FILE* file, /*!< in: file where to print */ const rec_t* rec, /*!< in: physical record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***************************************************************//** Prints a physical record. */ UNIV_INTERN @@ -946,7 +946,7 @@ rec_print_new( FILE* file, /*!< in: file where to print */ const rec_t* rec, /*!< in: physical record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***************************************************************//** Prints a physical record. */ UNIV_INTERN @@ -956,7 +956,7 @@ rec_print( FILE* file, /*!< in: file where to print */ const rec_t* rec, /*!< in: physical record */ const dict_index_t* index) /*!< in: record descriptor */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); # ifdef UNIV_DEBUG /************************************************************//** @@ -968,7 +968,7 @@ rec_get_trx_id( /*===========*/ const rec_t* rec, /*!< in: record */ const dict_index_t* index) /*!< in: clustered index */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); # endif /* UNIV_DEBUG */ #endif /* UNIV_HOTBACKUP */ diff --git a/storage/xtradb/include/rem0rec.ic b/storage/xtradb/include/rem0rec.ic index a539320dd2a..5811a77a48b 100644 --- a/storage/xtradb/include/rem0rec.ic +++ b/storage/xtradb/include/rem0rec.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1540,7 +1540,8 @@ rec_copy( ulint extra_len; ulint data_len; - ut_ad(rec && buf); + ut_ad(rec != NULL); + ut_ad(buf != NULL); ut_ad(rec_offs_validate(rec, NULL, offsets)); ut_ad(rec_validate(rec, offsets)); diff --git a/storage/xtradb/include/row0ftsort.h b/storage/xtradb/include/row0ftsort.h index 4e04a099140..e949ba302b9 100644 --- a/storage/xtradb/include/row0ftsort.h +++ b/storage/xtradb/include/row0ftsort.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2010, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -187,7 +187,7 @@ row_fts_psort_info_init( instantiated */ fts_psort_t** merge) /*!< out: parallel merge info to be instantiated */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Clean up and deallocate FTS parallel sort structures, and close temparary merge sort files */ @@ -275,5 +275,5 @@ row_fts_merge_insert( fts_psort_t* psort_info, /*!< parallel sort info */ ulint id) /* !< in: which auxiliary table's data to insert to */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* row0ftsort_h */ diff --git a/storage/xtradb/include/row0import.h b/storage/xtradb/include/row0import.h index aa46fdb7c27..a821c230a3b 100644 --- a/storage/xtradb/include/row0import.h +++ b/storage/xtradb/include/row0import.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -46,7 +46,7 @@ row_import_for_mysql( dict_table_t* table, /*!< in/out: table */ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** Update the DICT_TF2_DISCARDED flag in SYS_TABLES. @@ -64,7 +64,7 @@ row_import_update_discarded_flag( bool dict_locked) /*!< in: Set to true if the caller already owns the dict_sys_t:: mutex. */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** Update the (space, root page) of a table's indexes from the values @@ -83,7 +83,7 @@ row_import_update_index_root( bool dict_locked) /*!< in: Set to true if the caller already owns the dict_sys_t:: mutex. */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_NONINL #include "row0import.ic" #endif diff --git a/storage/xtradb/include/row0ins.h b/storage/xtradb/include/row0ins.h index 2a892d2f5df..71ee39070ef 100644 --- a/storage/xtradb/include/row0ins.h +++ b/storage/xtradb/include/row0ins.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -53,7 +53,7 @@ row_ins_check_foreign_constraint( table, else the referenced table */ dtuple_t* entry, /*!< in: index entry for index */ que_thr_t* thr) /*!< in: query thread */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Creates an insert node struct. @return own: insert node struct */ @@ -98,7 +98,7 @@ row_ins_clust_index_entry_low( dtuple_t* entry, /*!< in/out: index entry to insert */ ulint n_ext, /*!< in: number of externally stored columns */ que_thr_t* thr) /*!< in: query thread or NULL */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***************************************************************//** Tries to insert an entry into a secondary index. If a record with exactly the same fields is found, the other record is necessarily marked deleted. @@ -123,7 +123,7 @@ row_ins_sec_index_entry_low( trx_id_t trx_id, /*!< in: PAGE_MAX_TRX_ID during row_log_table_apply(), or 0 */ que_thr_t* thr) /*!< in: query thread */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***************************************************************//** Tries to insert the externally stored fields (off-page columns) of a clustered index entry. @@ -142,7 +142,7 @@ row_ins_index_entry_big_rec_func( const void* thd, /*!< in: connection, or NULL */ #endif /* DBUG_OFF */ ulint line) /*!< in: line number of caller */ - __attribute__((nonnull(1,2,3,4,5,6), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3,4,5,6), warn_unused_result)); #ifdef DBUG_OFF # define row_ins_index_entry_big_rec(e,big,ofs,heap,index,thd,file,line) \ row_ins_index_entry_big_rec_func(e,big,ofs,heap,index,file,line) @@ -164,7 +164,7 @@ row_ins_clust_index_entry( dtuple_t* entry, /*!< in/out: index entry to insert */ que_thr_t* thr, /*!< in: query thread */ ulint n_ext) /*!< in: number of externally stored columns */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***************************************************************//** Inserts an entry into a secondary index. Tries first optimistic, then pessimistic descent down the tree. If the entry matches enough @@ -178,7 +178,7 @@ row_ins_sec_index_entry( dict_index_t* index, /*!< in: secondary index */ dtuple_t* entry, /*!< in/out: index entry to insert */ que_thr_t* thr) /*!< in: query thread */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************//** Inserts a row to a table. This is a high-level function used in SQL execution graphs. diff --git a/storage/xtradb/include/row0log.h b/storage/xtradb/include/row0log.h index 5eed390aced..ec14556588b 100644 --- a/storage/xtradb/include/row0log.h +++ b/storage/xtradb/include/row0log.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -54,7 +54,7 @@ row_log_allocate( const ulint* col_map,/*!< in: mapping of old column numbers to new ones, or NULL if !table */ const char* path) /*!< in: where to create temporary file */ - __attribute__((nonnull(1), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /******************************************************//** Free the row log for an index that was being created online. */ @@ -63,7 +63,7 @@ void row_log_free( /*=========*/ row_log_t*& log) /*!< in,own: row log */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** Free the row log for an index on which online creation was aborted. */ @@ -72,7 +72,7 @@ void row_log_abort_sec( /*==============*/ dict_index_t* index) /*!< in/out: index (x-latched) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************//** Try to log an operation to a secondary index that is @@ -87,7 +87,7 @@ row_log_online_op_try( const dtuple_t* tuple, /*!< in: index tuple */ trx_id_t trx_id) /*!< in: transaction ID for insert, or 0 for delete */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************//** Logs an operation to a secondary index that is (or was) being created. */ UNIV_INTERN @@ -98,7 +98,7 @@ row_log_online_op( const dtuple_t* tuple, /*!< in: index tuple */ trx_id_t trx_id) /*!< in: transaction ID for insert, or 0 for delete */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /******************************************************//** Gets the error status of the online index rebuild log. @@ -109,7 +109,7 @@ row_log_table_get_error( /*====================*/ const dict_index_t* index) /*!< in: clustered index of a table that is being rebuilt online */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************//** Logs a delete operation to a table that is being rebuilt. @@ -125,7 +125,7 @@ row_log_table_delete( const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */ const byte* sys) /*!< in: DB_TRX_ID,DB_ROLL_PTR that should be logged, or NULL to use those in rec */ - UNIV_COLD __attribute__((nonnull(1,2,3))); + UNIV_COLD MY_ATTRIBUTE((nonnull(1,2,3))); /******************************************************//** Logs an update operation to a table that is being rebuilt. @@ -141,7 +141,7 @@ row_log_table_update( const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */ const dtuple_t* old_pk) /*!< in: row_log_table_get_pk() before the update */ - UNIV_COLD __attribute__((nonnull(1,2,3))); + UNIV_COLD MY_ATTRIBUTE((nonnull(1,2,3))); /******************************************************//** Constructs the old PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR @@ -161,7 +161,7 @@ row_log_table_get_pk( byte* sys, /*!< out: DB_TRX_ID,DB_ROLL_PTR for row_log_table_delete(), or NULL */ mem_heap_t** heap) /*!< in/out: memory heap where allocated */ - UNIV_COLD __attribute__((nonnull(1,2,5), warn_unused_result)); + UNIV_COLD MY_ATTRIBUTE((nonnull(1,2,5), warn_unused_result)); /******************************************************//** Logs an insert to a table that is being rebuilt. @@ -175,7 +175,7 @@ row_log_table_insert( dict_index_t* index, /*!< in/out: clustered index, S-latched or X-latched */ const ulint* offsets)/*!< in: rec_get_offsets(rec,index) */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /******************************************************//** Notes that a BLOB is being freed during online ALTER TABLE. */ UNIV_INTERN @@ -184,7 +184,7 @@ row_log_table_blob_free( /*====================*/ dict_index_t* index, /*!< in/out: clustered index, X-latched */ ulint page_no)/*!< in: starting page number of the BLOB */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /******************************************************//** Notes that a BLOB is being allocated during online ALTER TABLE. */ UNIV_INTERN @@ -193,7 +193,7 @@ row_log_table_blob_alloc( /*=====================*/ dict_index_t* index, /*!< in/out: clustered index, X-latched */ ulint page_no)/*!< in: starting page number of the BLOB */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /******************************************************//** Apply the row_log_table log to a table upon completing rebuild. @return DB_SUCCESS, or error code on failure */ @@ -206,7 +206,7 @@ row_log_table_apply( /*!< in: old table */ struct TABLE* table) /*!< in/out: MySQL table (for reporting duplicates) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************//** Get the latest transaction ID that has invoked row_log_online_op() @@ -217,7 +217,7 @@ trx_id_t row_log_get_max_trx( /*================*/ dict_index_t* index) /*!< in: index, must be locked */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************//** Merge the row log to the index upon completing index creation. @@ -231,7 +231,7 @@ row_log_apply( dict_index_t* index, /*!< in/out: secondary index */ struct TABLE* table) /*!< in/out: MySQL table (for reporting duplicates) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_NONINL #include "row0log.ic" diff --git a/storage/xtradb/include/row0merge.h b/storage/xtradb/include/row0merge.h index 06e9fec544b..9d3395f3734 100644 --- a/storage/xtradb/include/row0merge.h +++ b/storage/xtradb/include/row0merge.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -127,7 +127,7 @@ row_merge_dup_report( /*=================*/ row_merge_dup_t* dup, /*!< in/out: for reporting duplicates */ const dfield_t* entry) /*!< in: duplicate index entry */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Sets an exclusive lock on a table, for the duration of creating indexes. @return error code or DB_SUCCESS */ @@ -138,7 +138,7 @@ row_merge_lock_table( trx_t* trx, /*!< in/out: transaction */ dict_table_t* table, /*!< in: table to lock */ enum lock_mode mode) /*!< in: LOCK_X or LOCK_S */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Drop indexes that were created before an error occurred. The data dictionary must have been locked exclusively by the caller, @@ -149,7 +149,7 @@ row_merge_drop_indexes_dict( /*========================*/ trx_t* trx, /*!< in/out: dictionary transaction */ table_id_t table_id)/*!< in: table identifier */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Drop those indexes which were created before an error occurred. The data dictionary must have been locked exclusively by the caller, @@ -162,7 +162,7 @@ row_merge_drop_indexes( dict_table_t* table, /*!< in/out: table containing the indexes */ ibool locked) /*!< in: TRUE=table locked, FALSE=may need to do a lazy drop */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Drop all partially created indexes during crash recovery. */ UNIV_INTERN @@ -178,7 +178,7 @@ UNIV_INTERN int row_merge_file_create_low( const char* path) - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*********************************************************************//** Destroy a merge file. And de-register the file from Performance Schema if UNIV_PFS_IO is defined. */ @@ -214,7 +214,7 @@ row_merge_rename_tables_dict( old_table->name */ const char* tmp_name, /*!< in: new name for old_table */ trx_t* trx) /*!< in/out: dictionary transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Rename an index in the dictionary that was created. The data @@ -228,7 +228,7 @@ row_merge_rename_index_to_add( trx_t* trx, /*!< in/out: transaction */ table_id_t table_id, /*!< in: table identifier */ index_id_t index_id) /*!< in: index identifier */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Rename an index in the dictionary that is to be dropped. The data dictionary must have been locked exclusively by the caller, because @@ -241,7 +241,7 @@ row_merge_rename_index_to_drop( trx_t* trx, /*!< in/out: transaction */ table_id_t table_id, /*!< in: table identifier */ index_id_t index_id) /*!< in: index identifier */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Create the index and load in to the dictionary. @return index, or NULL on error */ @@ -274,7 +274,7 @@ row_merge_drop_table( /*=================*/ trx_t* trx, /*!< in: transaction */ dict_table_t* table) /*!< in: table instance to drop */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Build indexes on a table by reading a clustered index, creating a temporary file containing index entries, merge sorting @@ -307,7 +307,7 @@ row_merge_build_indexes( AUTO_INCREMENT column, or ULINT_UNDEFINED if none is added */ ib_sequence_t& sequence) /*!< in/out: autoinc sequence */ - __attribute__((nonnull(1,2,3,5,6,8), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3,5,6,8), warn_unused_result)); /********************************************************************//** Write a buffer to a block. */ UNIV_INTERN @@ -317,7 +317,7 @@ row_merge_buf_write( const row_merge_buf_t* buf, /*!< in: sorted buffer */ const merge_file_t* of, /*!< in: output file */ row_merge_block_t* block) /*!< out: buffer for writing to file */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Sort a buffer. */ UNIV_INTERN @@ -327,7 +327,7 @@ row_merge_buf_sort( row_merge_buf_t* buf, /*!< in/out: sort buffer */ row_merge_dup_t* dup) /*!< in/out: reporter of duplicates (NULL if non-unique index) */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /********************************************************************//** Write a merge block to the file system. @return TRUE if request was successful, FALSE if fail */ @@ -347,7 +347,7 @@ row_merge_buf_t* row_merge_buf_empty( /*================*/ row_merge_buf_t* buf) /*!< in,own: sort buffer */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /** Create a merge file in the given location. @param[out] merge_file merge file structure @@ -373,7 +373,7 @@ row_merge_sort( index entries */ row_merge_block_t* block, /*!< in/out: 3 buffers */ int* tmpfd) /*!< in/out: temporary file handle */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Allocate a sort buffer. @return own: sort buffer */ @@ -382,7 +382,7 @@ row_merge_buf_t* row_merge_buf_create( /*=================*/ dict_index_t* index) /*!< in: secondary index */ - __attribute__((warn_unused_result, nonnull, malloc)); + MY_ATTRIBUTE((warn_unused_result, nonnull, malloc)); /*********************************************************************//** Deallocate a sort buffer. */ UNIV_INTERN @@ -390,7 +390,7 @@ void row_merge_buf_free( /*===============*/ row_merge_buf_t* buf) /*!< in,own: sort buffer to be freed */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Destroy a merge file. */ UNIV_INTERN @@ -398,7 +398,7 @@ void row_merge_file_destroy( /*===================*/ merge_file_t* merge_file) /*!< in/out: merge file structure */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Read a merge block from the file system. @return TRUE if request was successful, FALSE if fail */ @@ -428,5 +428,5 @@ row_merge_read_rec( or NULL on end of list (non-NULL on I/O error) */ ulint* offsets)/*!< out: offsets of mrec */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* row0merge.h */ diff --git a/storage/xtradb/include/row0mysql.h b/storage/xtradb/include/row0mysql.h index 06c07002c2b..fc1846b76f3 100644 --- a/storage/xtradb/include/row0mysql.h +++ b/storage/xtradb/include/row0mysql.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -167,7 +167,7 @@ row_mysql_handle_errors( trx_t* trx, /*!< in: transaction */ que_thr_t* thr, /*!< in: query thread, or NULL */ trx_savept_t* savept) /*!< in: savepoint, or NULL */ - __attribute__((nonnull(1,2))); + MY_ATTRIBUTE((nonnull(1,2))); /********************************************************************//** Create a prebuilt struct for a MySQL table handle. @return own: a prebuilt struct */ @@ -209,7 +209,7 @@ row_lock_table_autoinc_for_mysql( /*=============================*/ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in the MySQL table handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Sets a table lock on the table mentioned in prebuilt. @return error code or DB_SUCCESS */ @@ -225,7 +225,7 @@ row_lock_table_for_mysql( prebuilt->select_lock_type */ ulint mode) /*!< in: lock mode of table (ignored if table==NULL) */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /*********************************************************************//** Does an insert for MySQL. @return error code or DB_SUCCESS */ @@ -236,7 +236,7 @@ row_insert_for_mysql( byte* mysql_rec, /*!< in: row in the MySQL format */ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Builds a dummy query graph used in selects. */ UNIV_INTERN @@ -276,7 +276,7 @@ row_update_for_mysql( the MySQL format */ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** This can only be used when srv_locks_unsafe_for_binlog is TRUE or this session is using a READ COMMITTED or READ UNCOMMITTED isolation level. @@ -297,7 +297,7 @@ row_unlock_for_mysql( the records under pcur and clust_pcur, and we do not need to reposition the cursors. */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Checks if a table name contains the string "/#sql" which denotes temporary tables in MySQL. @@ -306,7 +306,7 @@ UNIV_INTERN bool row_is_mysql_tmp_table_name( /*========================*/ - const char* name) __attribute__((warn_unused_result)); + const char* name) MY_ATTRIBUTE((warn_unused_result)); /*!< in: table name in the form 'database/tablename' */ @@ -331,7 +331,7 @@ row_update_cascade_for_mysql( upd_node_t* node, /*!< in: update node used in the cascade or set null operation */ dict_table_t* table) /*!< in: table where we do the operation */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Locks the data dictionary exclusively for performing a table create or other data dictionary modification operation. */ @@ -387,7 +387,7 @@ row_create_table_for_mysql( added to the data dictionary cache) */ trx_t* trx, /*!< in/out: transaction */ bool commit) /*!< in: if true, commit the transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Does an index creation operation for MySQL. TODO: currently failure to create an index results in dropping the whole table! This is no problem @@ -406,7 +406,7 @@ row_create_index_for_mysql( index columns, which are then checked for not being too large. */ - __attribute__((nonnull(1,2), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2), warn_unused_result)); /*********************************************************************//** Scans a table create SQL string and adds to the data dictionary the foreign key constraints declared in the string. This function @@ -432,7 +432,7 @@ row_table_add_foreign_constraints( ibool reject_fks) /*!< in: if TRUE, fail with error code DB_CANNOT_ADD_CONSTRAINT if any foreign keys are found. */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** The master thread in srv0srv.cc calls this regularly to drop tables which we must drop in background after queries to them have ended. Such lazy @@ -461,7 +461,7 @@ row_mysql_lock_table( dict_table_t* table, /*!< in: table to lock */ enum lock_mode mode, /*!< in: LOCK_X or LOCK_S */ const char* op_info) /*!< in: string for trx->op_info */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Truncates a table for MySQL. @@ -472,7 +472,7 @@ row_truncate_table_for_mysql( /*=========================*/ dict_table_t* table, /*!< in: table handle */ trx_t* trx) /*!< in: transaction handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Drops a table for MySQL. If the name of the dropped table ends in one of "innodb_monitor", "innodb_lock_monitor", "innodb_tablespace_monitor", @@ -491,7 +491,7 @@ row_drop_table_for_mysql( bool nonatomic = true) /*!< in: whether it is permitted to release and reacquire dict_operation_lock */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Drop all temporary tables during crash recovery. */ UNIV_INTERN @@ -510,7 +510,7 @@ row_discard_tablespace_for_mysql( /*=============================*/ const char* name, /*!< in: table name */ trx_t* trx) /*!< in: transaction handle */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** Imports a tablespace. The space id in the .ibd file must match the space id of the table in the data dictionary. @@ -521,7 +521,7 @@ row_import_tablespace_for_mysql( /*============================*/ dict_table_t* table, /*!< in/out: table */ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Drops a database for MySQL. @return error code or DB_SUCCESS */ @@ -531,7 +531,7 @@ row_drop_database_for_mysql( /*========================*/ const char* name, /*!< in: database name which ends to '/' */ trx_t* trx) /*!< in: transaction handle */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Renames a table for MySQL. @return error code or DB_SUCCESS */ @@ -543,7 +543,7 @@ row_rename_table_for_mysql( const char* new_name, /*!< in: new table name */ trx_t* trx, /*!< in/out: transaction */ bool commit) /*!< in: whether to commit trx */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Checks that the index contains entries in an ascending order, unique constraint is not broken, and calculates the number of index entries @@ -558,7 +558,7 @@ row_check_index_for_mysql( const dict_index_t* index, /*!< in: index */ ulint* n_rows) /*!< out: number of entries seen in the consistent read */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Determines if a table is a magic monitor table. @return true if monitor table */ @@ -568,7 +568,7 @@ row_is_magic_monitor_table( /*=======================*/ const char* table_name) /*!< in: name of the table, in the form database/table_name */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Initialize this module */ UNIV_INTERN @@ -593,7 +593,7 @@ row_mysql_table_id_reassign( dict_table_t* table, /*!< in/out: table */ trx_t* trx, /*!< in/out: transaction */ table_id_t* new_id) /*!< out: new table id */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /* A struct describing a place for an individual column in the MySQL row format which is presented to the table handler in ha_innobase. diff --git a/storage/xtradb/include/row0purge.h b/storage/xtradb/include/row0purge.h index 888289a6c79..5df899bc399 100644 --- a/storage/xtradb/include/row0purge.h +++ b/storage/xtradb/include/row0purge.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -47,7 +47,7 @@ row_purge_node_create( que_thr_t* parent, /*!< in: parent node, i.e., a thr node */ mem_heap_t* heap) /*!< in: memory heap where created */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************//** Determines if it is possible to remove a secondary index entry. Removal is possible if the secondary index entry does not refer to any @@ -70,7 +70,7 @@ row_purge_poss_sec( purge_node_t* node, /*!< in/out: row purge node */ dict_index_t* index, /*!< in: secondary index */ const dtuple_t* entry) /*!< in: secondary index entry */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*************************************************************** Does the purge operation for a single undo log record. This is a high-level function used in an SQL execution graph. @@ -80,7 +80,7 @@ que_thr_t* row_purge_step( /*===========*/ que_thr_t* thr) /*!< in: query thread */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /* Purge node structure */ diff --git a/storage/xtradb/include/row0quiesce.h b/storage/xtradb/include/row0quiesce.h index 1d6d11291b8..35d8184d33c 100644 --- a/storage/xtradb/include/row0quiesce.h +++ b/storage/xtradb/include/row0quiesce.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -43,7 +43,7 @@ row_quiesce_table_start( /*====================*/ dict_table_t* table, /*!< in: quiesce this table */ trx_t* trx) /*!< in/out: transaction/session */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Set a table's quiesce state. @@ -55,7 +55,7 @@ row_quiesce_set_state( dict_table_t* table, /*!< in: quiesce this table */ ib_quiesce_t state, /*!< in: quiesce state to set */ trx_t* trx) /*!< in/out: transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Cleanup after table quiesce. */ @@ -65,7 +65,7 @@ row_quiesce_table_complete( /*=======================*/ dict_table_t* table, /*!< in: quiesce this table */ trx_t* trx) /*!< in/out: transaction/session */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_NONINL #include "row0quiesce.ic" diff --git a/storage/xtradb/include/row0row.h b/storage/xtradb/include/row0row.h index a4e5e0dd2fa..b04068c5a5d 100644 --- a/storage/xtradb/include/row0row.h +++ b/storage/xtradb/include/row0row.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -47,7 +47,7 @@ row_get_trx_id_offset( /*==================*/ const dict_index_t* index, /*!< in: clustered index */ const ulint* offsets)/*!< in: record offsets */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Reads the trx id field from a clustered index record. @return value of the field */ @@ -58,7 +58,7 @@ row_get_rec_trx_id( const rec_t* rec, /*!< in: record */ const dict_index_t* index, /*!< in: clustered index */ const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Reads the roll pointer field from a clustered index record. @return value of the field */ @@ -69,7 +69,7 @@ row_get_rec_roll_ptr( const rec_t* rec, /*!< in: record */ const dict_index_t* index, /*!< in: clustered index */ const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** When an insert or purge to a table is performed, this function builds the entry to be inserted into or purged from an index on the table. @@ -88,7 +88,7 @@ row_build_index_entry_low( mem_heap_t* heap) /*!< in: memory heap from which the memory for the index entry is allocated */ - __attribute__((warn_unused_result, nonnull(1,3,4))); + MY_ATTRIBUTE((warn_unused_result, nonnull(1,3,4))); /*****************************************************************//** When an insert or purge to a table is performed, this function builds the entry to be inserted into or purged from an index on the table. @@ -107,7 +107,7 @@ row_build_index_entry( mem_heap_t* heap) /*!< in: memory heap from which the memory for the index entry is allocated */ - __attribute__((warn_unused_result, nonnull(1,3,4))); + MY_ATTRIBUTE((warn_unused_result, nonnull(1,3,4))); /*******************************************************************//** An inverse function to row_build_index_entry. Builds a row from a record in a clustered index. @@ -155,7 +155,7 @@ row_build( prefixes, or NULL */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ - __attribute__((nonnull(2,3,9))); + MY_ATTRIBUTE((nonnull(2,3,9))); /*******************************************************************//** Converts an index record to a typed data tuple. @return index entry built; does not set info_bits, and the data fields @@ -171,7 +171,7 @@ row_rec_to_index_entry_low( stored columns */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Converts an index record to a typed data tuple. NOTE that externally stored (often big) fields are NOT copied to heap. @@ -187,7 +187,7 @@ row_rec_to_index_entry( stored columns */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Builds from a secondary index record a row reference with which we can search the clustered index record. @@ -210,7 +210,7 @@ row_build_row_ref( as long as the row reference is used! */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Builds from a secondary index record a row reference with which we can search the clustered index record. */ @@ -232,7 +232,7 @@ row_build_row_ref_in_tuple( ulint* offsets,/*!< in: rec_get_offsets(rec, index) or NULL */ trx_t* trx) /*!< in: transaction or NULL */ - __attribute__((nonnull(1,2,3))); + MY_ATTRIBUTE((nonnull(1,2,3))); /*******************************************************************//** Builds from a secondary index record a row reference with which we can search the clustered index record. */ @@ -263,7 +263,7 @@ row_search_on_row_ref( const dict_table_t* table, /*!< in: table */ const dtuple_t* ref, /*!< in: row reference */ mtr_t* mtr) /*!< in/out: mtr */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Fetches the clustered index record for a secondary index record. The latches on the secondary index record are preserved. @@ -277,7 +277,7 @@ row_get_clust_rec( dict_index_t* index, /*!< in: secondary index */ dict_index_t** clust_index,/*!< out: clustered index */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Result of row_search_index_entry */ enum row_search_result { @@ -305,7 +305,7 @@ row_search_index_entry( btr_pcur_t* pcur, /*!< in/out: persistent cursor, which must be closed by the caller */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #define ROW_COPY_DATA 1 #define ROW_COPY_POINTERS 2 @@ -334,7 +334,7 @@ row_raw_format( char* buf, /*!< out: output buffer */ ulint buf_size) /*!< in: output buffer size in bytes */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_NONINL #include "row0row.ic" diff --git a/storage/xtradb/include/row0sel.h b/storage/xtradb/include/row0sel.h index c8be80f89d9..fd5bc755a22 100644 --- a/storage/xtradb/include/row0sel.h +++ b/storage/xtradb/include/row0sel.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -168,7 +168,7 @@ row_search_for_mysql( then prebuilt must have a pcur with stored position! In opening of a cursor 'direction' should be 0. */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Checks if MySQL at the moment is allowed for this table to retrieve a consistent read result, or store it to the query cache. @@ -190,7 +190,7 @@ row_search_max_autoinc( dict_index_t* index, /*!< in: index to search */ const char* col_name, /*!< in: autoinc column name */ ib_uint64_t* value) /*!< out: AUTOINC value read */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /** A structure for caching column values for prefetched rows */ struct sel_buf_t{ diff --git a/storage/xtradb/include/row0uins.h b/storage/xtradb/include/row0uins.h index ebf4881208a..89e334e5433 100644 --- a/storage/xtradb/include/row0uins.h +++ b/storage/xtradb/include/row0uins.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -46,7 +46,7 @@ dberr_t row_undo_ins( /*=========*/ undo_node_t* node) /*!< in: row undo node */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_NONINL #include "row0uins.ic" #endif diff --git a/storage/xtradb/include/row0umod.h b/storage/xtradb/include/row0umod.h index f89d5a334fc..4f1d8e1f66c 100644 --- a/storage/xtradb/include/row0umod.h +++ b/storage/xtradb/include/row0umod.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -43,7 +43,7 @@ row_undo_mod( /*=========*/ undo_node_t* node, /*!< in: row undo node */ que_thr_t* thr) /*!< in: query thread */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #ifndef UNIV_NONINL #include "row0umod.ic" diff --git a/storage/xtradb/include/row0upd.h b/storage/xtradb/include/row0upd.h index 27dedeb65a7..e59ec58b63c 100644 --- a/storage/xtradb/include/row0upd.h +++ b/storage/xtradb/include/row0upd.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -91,7 +91,7 @@ upd_get_field_by_field_no( /*======================*/ const upd_t* update, /*!< in: update vector */ ulint no) /*!< in: field_no */ - __attribute__((nonnull, pure)); + MY_ATTRIBUTE((nonnull, pure)); /*********************************************************************//** Writes into the redo log the values of trx id and roll ptr and enough info to determine their positions within a clustered index record. @@ -174,7 +174,7 @@ bool row_upd_changes_disowned_external( /*==============================*/ const upd_t* update) /*!< in: update vector */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Replaces the new column values stored in the update vector to the @@ -207,7 +207,7 @@ row_upd_build_sec_rec_difference_binary( const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ const dtuple_t* entry, /*!< in: entry to insert */ mem_heap_t* heap) /*!< in: memory heap from which allocated */ - __attribute__((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull)); /***************************************************************//** Builds an update vector from those fields, excluding the roll ptr and trx id fields, which in an index entry differ from a record that has @@ -227,7 +227,7 @@ row_upd_build_difference_binary( trx_t* trx, /*!< in: transaction (for diagnostics), or NULL */ mem_heap_t* heap) /*!< in: memory heap from which allocated */ - __attribute__((nonnull(1,2,3,7), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,3,7), warn_unused_result)); /***********************************************************//** Replaces the new column values stored in the update vector to the index entry given. */ @@ -250,7 +250,7 @@ row_upd_index_replace_new_col_vals_index_pos( does not work for non-clustered indexes. */ mem_heap_t* heap) /*!< in: memory heap for allocating and copying the new values */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***********************************************************//** Replaces the new column values stored in the update vector to the index entry given. */ @@ -269,7 +269,7 @@ row_upd_index_replace_new_col_vals( an upd_field is the clustered index position */ mem_heap_t* heap) /*!< in: memory heap for allocating and copying the new values */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /***********************************************************//** Replaces the new column values stored in the update vector. */ UNIV_INTERN @@ -311,7 +311,7 @@ row_upd_changes_ord_field_binary_func( compile time */ const row_ext_t*ext) /*!< NULL, or prefixes of the externally stored columns in the old row */ - __attribute__((nonnull(1,2), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2), warn_unused_result)); #ifdef UNIV_DEBUG # define row_upd_changes_ord_field_binary(index,update,thr,row,ext) \ row_upd_changes_ord_field_binary_func(index,update,thr,row,ext) @@ -338,7 +338,7 @@ row_upd_changes_doc_id( /*===================*/ dict_table_t* table, /*!< in: table */ upd_field_t* upd_field) /*!< in: field to check */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************//** Checks if an update vector changes an ordering field of an index record. This function is fast if the update vector is short or the number of ordering diff --git a/storage/xtradb/include/row0vers.h b/storage/xtradb/include/row0vers.h index 1df5b4d3e98..7b850215701 100644 --- a/storage/xtradb/include/row0vers.h +++ b/storage/xtradb/include/row0vers.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -110,7 +110,7 @@ row_vers_build_for_consistent_read( if the history is missing or the record does not exist in the view, that is, it was freshly inserted afterwards */ - __attribute__((nonnull(1,2,3,4,5,6,7))); + MY_ATTRIBUTE((nonnull(1,2,3,4,5,6,7))); /*****************************************************************//** Constructs the last committed version of a clustered index record, @@ -136,7 +136,7 @@ row_vers_build_for_semi_consistent_read( const rec_t** old_vers)/*!< out: rec, old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ - __attribute__((nonnull(1,2,3,4,5))); + MY_ATTRIBUTE((nonnull(1,2,3,4,5))); #ifndef UNIV_NONINL diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index 480d1a2ac2a..95065d69974 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. @@ -913,7 +913,7 @@ UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_purge_coordinator_thread)( /*=========================================*/ - void* arg __attribute__((unused))); /*!< in: a dummy parameter + void* arg MY_ATTRIBUTE((unused))); /*!< in: a dummy parameter required by os_thread_create */ /*********************************************************************//** @@ -923,7 +923,7 @@ UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_worker_thread)( /*==============================*/ - void* arg __attribute__((unused))); /*!< in: a dummy parameter + void* arg MY_ATTRIBUTE((unused))); /*!< in: a dummy parameter required by os_thread_create */ } /* extern "C" */ diff --git a/storage/xtradb/include/srv0start.h b/storage/xtradb/include/srv0start.h index 40d502f4459..963b767f0fb 100644 --- a/storage/xtradb/include/srv0start.h +++ b/storage/xtradb/include/srv0start.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -105,7 +105,7 @@ srv_path_copy( ulint dest_len, /*!< in: max bytes to copy */ const char* basedir, /*!< in: base directory */ const char* table_name) /*!< in: source table name */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*****************************************************************//** Get the meta-data filename from the table name. */ @@ -116,7 +116,7 @@ srv_get_meta_data_filename( dict_table_t* table, /*!< in: table */ char* filename, /*!< out: filename */ ulint max_len) /*!< in: filename max length */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /** Log sequence number at shutdown */ extern lsn_t srv_shutdown_lsn; diff --git a/storage/xtradb/include/sync0arr.h b/storage/xtradb/include/sync0arr.h index 15dbdcb540d..a9e66f261c6 100644 --- a/storage/xtradb/include/sync0arr.h +++ b/storage/xtradb/include/sync0arr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -110,7 +110,7 @@ sync_array_print_long_waits( /*========================*/ os_thread_id_t* waiter, /*!< out: longest waiting thread */ const void** sema) /*!< out: longest-waited-for semaphore */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /********************************************************************//** Validates the integrity of the wait array. Checks that the number of reserved cells equals the count variable. */ diff --git a/storage/xtradb/include/sync0rw.h b/storage/xtradb/include/sync0rw.h index 93f184b6147..4f058190bd9 100644 --- a/storage/xtradb/include/sync0rw.h +++ b/storage/xtradb/include/sync0rw.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -370,7 +370,7 @@ ibool rw_lock_s_lock_low( /*===============*/ rw_lock_t* lock, /*!< in: pointer to rw-lock */ - ulint pass __attribute__((unused)), + ulint pass MY_ATTRIBUTE((unused)), /*!< in: pass value; != 0, if the lock will be passed to another thread to unlock */ const char* file_name, /*!< in: file name where lock requested */ @@ -633,7 +633,7 @@ rw_lock_own( rw_lock_t* lock, /*!< in: rw-lock */ ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED, RW_LOCK_EX */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /******************************************************************//** Checks if the thread has locked the priority rw-lock in the specified mode, with the pass value == 0. */ @@ -644,7 +644,7 @@ rw_lock_own( prio_rw_lock_t* lock, /*!< in: rw-lock */ ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED, RW_LOCK_EX */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #endif /* UNIV_SYNC_DEBUG */ /******************************************************************//** Checks if somebody has locked the rw-lock in the specified mode. */ diff --git a/storage/xtradb/include/sync0rw.ic b/storage/xtradb/include/sync0rw.ic index d7898befe8c..73b1ae2d361 100644 --- a/storage/xtradb/include/sync0rw.ic +++ b/storage/xtradb/include/sync0rw.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -363,7 +363,7 @@ ibool rw_lock_s_lock_low( /*===============*/ rw_lock_t* lock, /*!< in: pointer to rw-lock */ - ulint pass __attribute__((unused)), + ulint pass MY_ATTRIBUTE((unused)), /*!< in: pass value; != 0, if the lock will be passed to another thread to unlock */ const char* file_name, /*!< in: file name where lock requested */ diff --git a/storage/xtradb/include/sync0sync.h b/storage/xtradb/include/sync0sync.h index 93f37e6208b..77502ea1461 100644 --- a/storage/xtradb/include/sync0sync.h +++ b/storage/xtradb/include/sync0sync.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. @@ -566,7 +566,7 @@ ibool mutex_own( /*======*/ const ib_mutex_t* mutex) /*!< in: mutex */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /******************************************************************//** Checks that the current thread owns the priority mutex. Works only in the debug version. @@ -576,7 +576,7 @@ ibool mutex_own( /*======*/ const ib_prio_mutex_t* mutex) /*!< in: priority mutex */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #endif /* UNIV_DEBUG */ #ifdef UNIV_SYNC_DEBUG /******************************************************************//** @@ -591,7 +591,7 @@ sync_thread_add_level( ulint level, /*!< in: level in the latching order; if SYNC_LEVEL_VARYING, nothing is done */ ibool relock) /*!< in: TRUE if re-entering an x-lock */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /******************************************************************//** Removes a latch from the thread level array if it is found there. @return TRUE if found in the array; it is no error if the latch is @@ -621,7 +621,7 @@ sync_thread_levels_nonempty_gen( /*============================*/ ibool dict_mutex_allowed) /*!< in: TRUE if dictionary mutex is allowed to be owned by the thread */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /******************************************************************//** Checks if the level array for the current thread is empty, except for data dictionary latches. */ @@ -638,7 +638,7 @@ sync_thread_levels_nonempty_trx( ibool has_search_latch) /*!< in: TRUE if and only if the thread is supposed to hold btr_search_latch */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /******************************************************************//** Gets the debug information for a reserved mutex. */ diff --git a/storage/xtradb/include/trx0rec.h b/storage/xtradb/include/trx0rec.h index 96e7d595035..359937e3583 100644 --- a/storage/xtradb/include/trx0rec.h +++ b/storage/xtradb/include/trx0rec.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -86,7 +86,7 @@ ulint trx_undo_rec_get_offset( /*====================*/ undo_no_t undo_no) /*!< in: undo no read from node */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /**********************************************************************//** Returns the start of the undo record data area. */ @@ -109,7 +109,7 @@ trx_undo_rec_get_pars( externally stored fild */ undo_no_t* undo_no, /*!< out: undo log record number */ table_id_t* table_id) /*!< out: table id */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************************//** Builds a row reference from an undo log record. @return pointer to remaining part of undo record */ @@ -201,7 +201,7 @@ trx_undo_rec_get_partial_row( only in the assertion. */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Writes information to an undo log about an insert, update, or a delete marking of a clustered index record. This information is used in a rollback of the @@ -233,7 +233,7 @@ trx_undo_report_row_operation( inserted undo log record, 0 if BTR_NO_UNDO_LOG flag was specified */ - __attribute__((nonnull(4,10), warn_unused_result)); + MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result)); /******************************************************************//** Copies an undo record to heap. This function can be called if we know that the undo log record exists. @@ -244,7 +244,7 @@ trx_undo_get_undo_rec_low( /*======================*/ roll_ptr_t roll_ptr, /*!< in: roll pointer to record */ mem_heap_t* heap) /*!< in: memory heap where copied */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Build a previous version of a clustered index record. The caller must hold a latch on the index page of the clustered index record. @@ -268,7 +268,7 @@ trx_undo_prev_version_build( rec_t** old_vers)/*!< out, own: previous version, or NULL if rec is the first inserted version, or if history data has been deleted */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Parses a redo log record of adding an undo log record. diff --git a/storage/xtradb/include/trx0roll.h b/storage/xtradb/include/trx0roll.h index 629b41569f6..b2e9d8a077f 100644 --- a/storage/xtradb/include/trx0roll.h +++ b/storage/xtradb/include/trx0roll.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -126,7 +126,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( /*================================================*/ - void* arg __attribute__((unused))); + void* arg MY_ATTRIBUTE((unused))); /*!< in: a dummy parameter required by os_thread_create */ /*********************************************************************//** @@ -153,7 +153,7 @@ dberr_t trx_rollback_for_mysql( /*===================*/ trx_t* trx) /*!< in/out: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************************//** Rollback the latest SQL statement for MySQL. @return error code or DB_SUCCESS */ @@ -162,7 +162,7 @@ dberr_t trx_rollback_last_sql_stat_for_mysql( /*=================================*/ trx_t* trx) /*!< in/out: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************************//** Rollback a transaction to a given savepoint or do a complete rollback. @return error code or DB_SUCCESS */ @@ -174,7 +174,7 @@ trx_rollback_to_savepoint( trx_savept_t* savept) /*!< in: pointer to savepoint undo number, if partial rollback requested, or NULL for complete rollback */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /*******************************************************************//** Rolls back a transaction back to a named savepoint. Modifications after the savepoint are undone but InnoDB does NOT release the corresponding locks @@ -196,7 +196,7 @@ trx_rollback_to_savepoint_for_mysql( information to remove the binlog entries of the queries executed after the savepoint */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Creates a named savepoint. If the transaction is not yet started, starts it. If there is already a savepoint of the same name, this call erases that old @@ -213,7 +213,7 @@ trx_savepoint_for_mysql( position corresponding to this connection at the time of the savepoint */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /*******************************************************************//** Releases a named savepoint. Savepoints which were set after this savepoint are deleted. @@ -225,7 +225,7 @@ trx_release_savepoint_for_mysql( /*============================*/ trx_t* trx, /*!< in: transaction handle */ const char* savepoint_name) /*!< in: savepoint name */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Frees savepoint structs starting from savep. */ UNIV_INTERN diff --git a/storage/xtradb/include/trx0sys.h b/storage/xtradb/include/trx0sys.h index 7b97c6e99cd..7fcdf71e1cc 100644 --- a/storage/xtradb/include/trx0sys.h +++ b/storage/xtradb/include/trx0sys.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -288,7 +288,7 @@ ibool trx_in_trx_list( /*============*/ const trx_t* in_trx) /*!< in: transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* UNIV_DEBUG */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /***********************************************************//** @@ -299,7 +299,7 @@ ibool trx_assert_recovered( /*=================*/ trx_id_t trx_id) /*!< in: transaction identifier */ - __attribute__((warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ /*****************************************************************//** Updates the offset information about the end of the MySQL binlog entry diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h index 9ae25ef2d7d..93dda121b6b 100644 --- a/storage/xtradb/include/trx0trx.h +++ b/storage/xtradb/include/trx0trx.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -51,7 +51,7 @@ UNIV_INLINE void trx_search_latch_release_if_reserved( /*=================================*/ - trx_t* trx __attribute__((unused))); /*!< in: transaction */ + trx_t* trx MY_ATTRIBUTE((unused))); /*!< in: transaction */ /******************************************************************//** Set detailed error message for the transaction. */ UNIV_INTERN @@ -105,7 +105,7 @@ void trx_free_prepared( /*==============*/ trx_t* trx) /*!< in, own: trx object */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); /********************************************************************//** Frees a transaction object for MySQL. */ UNIV_INTERN @@ -171,7 +171,7 @@ trx_start_for_ddl_low( /*==================*/ trx_t* trx, /*!< in/out: transaction */ trx_dict_op_t op) /*!< in: dictionary operation type */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG #define trx_start_for_ddl(t, o) \ @@ -193,7 +193,7 @@ void trx_commit( /*=======*/ trx_t* trx) /*!< in/out: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /****************************************************************//** Commits a transaction and a mini-transaction. */ UNIV_INTERN @@ -203,7 +203,7 @@ trx_commit_low( trx_t* trx, /*!< in/out: transaction */ mtr_t* mtr) /*!< in/out: mini-transaction (will be committed), or NULL if trx made no modifications */ - __attribute__((nonnull(1))); + MY_ATTRIBUTE((nonnull(1))); /****************************************************************//** Cleans up a transaction at database startup. The cleanup is needed if the transaction already got to the middle of a commit when the database @@ -257,7 +257,7 @@ void trx_commit_complete_for_mysql( /*==========================*/ trx_t* trx) /*!< in/out: transaction */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Marks the latest SQL statement ended. */ UNIV_INTERN @@ -285,7 +285,7 @@ trx_clone_read_view( /*================*/ trx_t* trx, /*!< in: receiver transaction */ trx_t* from_trx) /*!< in: donor transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** Prepares a transaction for commit/rollback. */ UNIV_INTERN @@ -330,7 +330,7 @@ trx_print_low( /*!< in: length of trx->lock.trx_locks */ ulint heap_size) /*!< in: mem_heap_get_size(trx->lock.lock_heap) */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Prints info about a transaction. @@ -344,7 +344,7 @@ trx_print_latched( const trx_t* trx, /*!< in: transaction */ ulint max_query_len) /*!< in: max query length to print, or 0 to use the default max length */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Prints info about a transaction. @@ -357,7 +357,7 @@ trx_print( const trx_t* trx, /*!< in: transaction */ ulint max_query_len) /*!< in: max query length to print, or 0 to use the default max length */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Determine if a transaction is a dictionary operation. @@ -367,7 +367,7 @@ enum trx_dict_op_t trx_get_dict_operation( /*===================*/ const trx_t* trx) /*!< in: transaction */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /**********************************************************************//** Flag a transaction a dictionary operation. */ UNIV_INLINE @@ -396,7 +396,7 @@ trx_state_eq( if state != TRX_STATE_NOT_STARTED asserts that trx->state != TRX_STATE_NOT_STARTED */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); # ifdef UNIV_DEBUG /**********************************************************************//** Asserts that a transaction has been started. @@ -407,7 +407,7 @@ ibool trx_assert_started( /*===============*/ const trx_t* trx) /*!< in: transaction */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); # endif /* UNIV_DEBUG */ /**********************************************************************//** diff --git a/storage/xtradb/include/trx0trx.ic b/storage/xtradb/include/trx0trx.ic index 787931dc4b6..8582b63f806 100644 --- a/storage/xtradb/include/trx0trx.ic +++ b/storage/xtradb/include/trx0trx.ic @@ -171,7 +171,7 @@ UNIV_INLINE void trx_search_latch_release_if_reserved( /*=================================*/ - trx_t* trx __attribute__((unused))) /*!< in: transaction */ + trx_t* trx MY_ATTRIBUTE((unused))) /*!< in: transaction */ { ut_ad(!trx->has_search_latch); } diff --git a/storage/xtradb/include/trx0undo.h b/storage/xtradb/include/trx0undo.h index 61b0dabb1e6..0148cc61579 100644 --- a/storage/xtradb/include/trx0undo.h +++ b/storage/xtradb/include/trx0undo.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -73,7 +73,7 @@ bool trx_undo_trx_id_is_insert( /*======================*/ const byte* trx_id) /*!< in: DB_TRX_ID, followed by DB_ROLL_PTR */ - __attribute__((nonnull, pure, warn_unused_result)); + MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /*****************************************************************//** Writes a roll ptr to an index page. In case that the size changes in @@ -214,7 +214,7 @@ trx_undo_add_page( mtr_t* mtr) /*!< in: mtr which does not have a latch to any undo log page; the caller must have reserved the rollback segment mutex */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Frees the last undo log page. The caller must hold the rollback segment mutex. */ @@ -229,7 +229,7 @@ trx_undo_free_last_page_func( mtr_t* mtr) /*!< in/out: mini-transaction which does not have a latch to any undo log page or which has allocated the undo log page */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG # define trx_undo_free_last_page(trx,undo,mtr) \ trx_undo_free_last_page_func(trx,undo,mtr) @@ -251,7 +251,7 @@ trx_undo_truncate_end_func( trx_undo_t* undo, /*!< in/out: undo log */ undo_no_t limit) /*!< in: all undo records with undo number >= this value should be truncated */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifdef UNIV_DEBUG # define trx_undo_truncate_end(trx,undo,limit) \ trx_undo_truncate_end_func(trx,undo,limit) @@ -300,7 +300,7 @@ trx_undo_assign_undo( /*=================*/ trx_t* trx, /*!< in: transaction */ ulint type) /*!< in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Sets the state of the undo log segment at a transaction finish. @return undo log segment header page, x-latched */ @@ -350,7 +350,7 @@ void trx_undo_free_prepared( /*===================*/ trx_t* trx) /*!< in/out: PREPARED transaction */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Parses the redo log entry of an undo log page initialization. diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index 592265c15bd..ffae4af4c56 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -47,7 +47,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 76.3 +#define PERCONA_INNODB_VERSION 77.0 #endif /* Enable UNIV_LOG_ARCHIVE in XtraDB */ @@ -261,7 +261,7 @@ that are only referenced from within InnoDB, not from MySQL. We disable the GCC visibility directive on all Sun operating systems because there is no easy way to get it to work. See http://bugs.mysql.com/bug.php?id=52263. */ #if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(sun) || defined(__INTEL_COMPILER) -# define UNIV_INTERN __attribute__((visibility ("hidden"))) +# define UNIV_INTERN MY_ATTRIBUTE((visibility ("hidden"))) #else # define UNIV_INTERN #endif @@ -276,7 +276,7 @@ appears close together improving code locality of non-cold parts of program. The paths leading to call of cold functions within code are marked as unlikely by the branch prediction mechanism. optimize a rarely invoked function for size instead for speed. */ -# define UNIV_COLD __attribute__((cold)) +# define UNIV_COLD MY_ATTRIBUTE((cold)) #else # define UNIV_COLD /* empty */ #endif @@ -547,7 +547,7 @@ contains the sum of the following flag and the locally stored len. */ #if defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER) #define HAVE_GCC_GT_2 /* Tell the compiler that variable/function is unused. */ -# define UNIV_UNUSED __attribute__ ((unused)) +# define UNIV_UNUSED MY_ATTRIBUTE ((unused)) #else # define UNIV_UNUSED #endif /* CHECK FOR GCC VER_GT_2 */ diff --git a/storage/xtradb/include/ut0byte.h b/storage/xtradb/include/ut0byte.h index 5bdd553ca80..4893ab9f9af 100644 --- a/storage/xtradb/include/ut0byte.h +++ b/storage/xtradb/include/ut0byte.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -39,7 +39,7 @@ ut_ull_create( /*==========*/ ulint high, /*!< in: high-order 32 bits */ ulint low) /*!< in: low-order 32 bits */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /********************************************************//** Rounds a 64-bit integer downward to a multiple of a power of 2. @@ -80,7 +80,7 @@ ut_align_down( /*==========*/ const void* ptr, /*!< in: pointer */ ulint align_no) /*!< in: align by this number */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*********************************************************//** The following function computes the offset of a pointer from the nearest aligned address. @@ -91,7 +91,7 @@ ut_align_offset( /*============*/ const void* ptr, /*!< in: pointer */ ulint align_no) /*!< in: align by this number */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*****************************************************************//** Gets the nth bit of a ulint. @return TRUE if nth bit is 1; 0th bit is defined to be the least significant */ diff --git a/storage/xtradb/include/ut0dbg.h b/storage/xtradb/include/ut0dbg.h index 6a4afe99597..3f5baef0a3c 100644 --- a/storage/xtradb/include/ut0dbg.h +++ b/storage/xtradb/include/ut0dbg.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -59,7 +59,7 @@ ut_dbg_assertion_failed( const char* expr, /*!< in: the failed assertion */ const char* file, /*!< in: source file containing the assertion */ ulint line) /*!< in: line number of the assertion */ - UNIV_COLD __attribute__((nonnull(2))); + UNIV_COLD MY_ATTRIBUTE((nonnull(2))); /** Abort the execution. */ # define UT_DBG_PANIC abort() diff --git a/storage/xtradb/include/ut0mem.h b/storage/xtradb/include/ut0mem.h index af7eb4e9b1d..81470358f2f 100644 --- a/storage/xtradb/include/ut0mem.h +++ b/storage/xtradb/include/ut0mem.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -87,7 +87,7 @@ ut_malloc_low( ulint n, /*!< in: number of bytes to allocate */ ibool assert_on_error) /*!< in: if TRUE, we crash mysqld if the memory cannot be allocated */ - __attribute__((malloc)); + MY_ATTRIBUTE((malloc)); /**********************************************************************//** Allocates memory. */ #define ut_malloc(n) ut_malloc_low(n, TRUE) diff --git a/storage/xtradb/include/ut0rnd.h b/storage/xtradb/include/ut0rnd.h index 53b769849a5..6ed3ee3b2e5 100644 --- a/storage/xtradb/include/ut0rnd.h +++ b/storage/xtradb/include/ut0rnd.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -96,7 +96,7 @@ ulint ut_fold_ull( /*========*/ ib_uint64_t d) /*!< in: 64-bit integer */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*************************************************************//** Folds a character string ending in the null character. @return folded value */ @@ -105,7 +105,7 @@ ulint ut_fold_string( /*===========*/ const char* str) /*!< in: null-terminated string */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); /***********************************************************//** Looks for a prime number slightly greater than the given argument. The prime is chosen so that it is not near any power of 2. @@ -115,7 +115,7 @@ ulint ut_find_prime( /*==========*/ ulint n) /*!< in: positive number > 100 */ - __attribute__((const)); + MY_ATTRIBUTE((const)); #endif /* !UNIV_INNOCHECKSUM */ @@ -128,7 +128,7 @@ ut_fold_ulint_pair( /*===============*/ ulint n1, /*!< in: ulint */ ulint n2) /*!< in: ulint */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /*************************************************************//** Folds a binary string. @return folded value */ @@ -138,7 +138,7 @@ ut_fold_binary( /*===========*/ const byte* str, /*!< in: string of bytes */ ulint len) /*!< in: length */ - __attribute__((pure)); + MY_ATTRIBUTE((pure)); #ifndef UNIV_NONINL diff --git a/storage/xtradb/include/ut0ut.h b/storage/xtradb/include/ut0ut.h index 0caf379d8fa..ef887ed5e58 100644 --- a/storage/xtradb/include/ut0ut.h +++ b/storage/xtradb/include/ut0ut.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -217,7 +217,7 @@ ulint ut_2_power_up( /*==========*/ ulint n) /*!< in: number != 0 */ - __attribute__((const)); + MY_ATTRIBUTE((const)); /** Determine how many bytes (groups of 8 bits) are needed to store the given number of bits. @@ -297,7 +297,7 @@ void ut_print_timestamp( /*===============*/ FILE* file) /*!< in: file where to print */ - UNIV_COLD __attribute__((nonnull)); + UNIV_COLD MY_ATTRIBUTE((nonnull)); #ifndef UNIV_INNOCHECKSUM @@ -485,7 +485,7 @@ ut_ulint_sort( ulint* aux_arr, /*!< in/out: aux array to use in sort */ ulint low, /*!< in: lower bound */ ulint high) /*!< in: upper bound */ - __attribute__((nonnull)); + MY_ATTRIBUTE((nonnull)); #ifndef UNIV_NONINL #include "ut0ut.ic" diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index 38b9257f5ea..afef5101f25 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -409,7 +409,7 @@ ibool lock_rec_validate_page( /*===================*/ const buf_block_t* block) /*!< in: buffer block */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* UNIV_DEBUG */ /* The lock system */ @@ -493,7 +493,7 @@ Checks that a transaction id is sensible, i.e., not in the future. #ifdef UNIV_DEBUG UNIV_INTERN #else -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) #endif bool lock_check_trx_id_sanity( @@ -1532,7 +1532,7 @@ lock_rec_has_expl( /*********************************************************************//** Checks if some other transaction has a lock request in the queue. @return lock or NULL */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const lock_t* lock_rec_other_has_expl_req( /*========================*/ @@ -3650,7 +3650,7 @@ lock_get_next_lock( ut_ad(heap_no == ULINT_UNDEFINED); ut_ad(lock_get_type_low(lock) == LOCK_TABLE); - lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock); + lock = UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock); } } while (lock != NULL && lock->trx->lock.deadlock_mark > ctx->mark_start); @@ -3700,7 +3700,8 @@ lock_get_first_lock( } else { *heap_no = ULINT_UNDEFINED; ut_ad(lock_get_type_low(lock) == LOCK_TABLE); - lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock); + dict_table_t* table = lock->un_member.tab_lock.table; + lock = UT_LIST_GET_FIRST(table->locks); } ut_a(lock != NULL); @@ -4048,8 +4049,6 @@ lock_deadlock_check_and_resolve( lock_deadlock_joining_trx_print(trx, lock); } - MONITOR_INC(MONITOR_DEADLOCK); - } else if (victim_trx_id != 0 && victim_trx_id != trx->id) { ut_ad(victim_trx_id == ctx.wait_lock->trx->id); @@ -4058,14 +4057,15 @@ lock_deadlock_check_and_resolve( lock_deadlock_found = TRUE; MONITOR_INC(MONITOR_DEADLOCK); + srv_stats.lock_deadlock_count.inc(); } - } while (victim_trx_id != 0 && victim_trx_id != trx->id); /* If the joining transaction was selected as the victim. */ if (victim_trx_id != 0) { ut_a(victim_trx_id == trx->id); + MONITOR_INC(MONITOR_DEADLOCK); srv_stats.lock_deadlock_count.inc(); lock_deadlock_fputs("*** WE ROLL BACK TRANSACTION (2)\n"); @@ -4435,7 +4435,8 @@ lock_table( dberr_t err; const lock_t* wait_for; - ut_ad(table && thr); + ut_ad(table != NULL); + ut_ad(thr != NULL); if (flags & BTR_NO_LOCKING_FLAG) { @@ -5838,7 +5839,7 @@ lock_validate_table_locks( /*********************************************************************//** Validate record locks up to a limit. @return lock at limit or NULL if no more locks in the hash bucket */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const lock_t* lock_rec_validate( /*==============*/ diff --git a/storage/xtradb/lock/lock0wait.cc b/storage/xtradb/lock/lock0wait.cc index a1c35e20ead..e2e7c4207a1 100644 --- a/storage/xtradb/lock/lock0wait.cc +++ b/storage/xtradb/lock/lock0wait.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -479,7 +479,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(lock_wait_timeout_thread)( /*=====================================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /* in: a dummy parameter required by os_thread_create */ { diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index 4c5a8b37076..e6e5762b1e9 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -2468,7 +2468,7 @@ log_archived_file_name_gen( /*=======================*/ char* buf, /*!< in: buffer where to write */ ulint buf_len,/*!< in: buffer length */ - ulint id __attribute__((unused)), + ulint id MY_ATTRIBUTE((unused)), /*!< in: group id; currently we only archive the first group */ lsn_t file_no)/*!< in: file number */ diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc index 0d6140f137e..4e58755e1f2 100644 --- a/storage/xtradb/log/log0online.cc +++ b/storage/xtradb/log/log0online.cc @@ -11,8 +11,8 @@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., 59 Temple -Place, Suite 330, Boston, MA 02111-1307 USA +this program; if not, write to the Free Software Foundation, Inc., 51 Franklin +Street, Fifth Floor, Boston, MA 02110-1301, USA *****************************************************************************/ diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index a99527f53ca..23fadfb0bf2 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -320,7 +320,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(recv_writer_thread)( /*===============================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { @@ -735,7 +735,7 @@ recv_check_cp_is_consistent( /********************************************************//** Looks for the maximum consistent checkpoint from the log groups. @return error code or DB_SUCCESS */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t recv_find_max_checkpoint( /*=====================*/ @@ -758,8 +758,38 @@ recv_find_max_checkpoint( buf = log_sys->checkpoint_buf; while (group) { + + ulint log_hdr_log_block_size; + group->state = LOG_GROUP_CORRUPTED; + /* Assert that we can reuse log_sys->checkpoint_buf to read the + part of the header that contains the log block size. */ + ut_ad(LOG_FILE_OS_FILE_LOG_BLOCK_SIZE + 4 + < OS_FILE_LOG_BLOCK_SIZE); + + fil_io(OS_FILE_READ | OS_FILE_LOG, true, group->space_id, 0, + 0, 0, OS_FILE_LOG_BLOCK_SIZE, + log_sys->checkpoint_buf, NULL); + log_hdr_log_block_size + = mach_read_from_4(log_sys->checkpoint_buf + + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE); + if (log_hdr_log_block_size == 0) { + /* 0 means default value */ + log_hdr_log_block_size = 512; + } + if (UNIV_UNLIKELY(log_hdr_log_block_size + != srv_log_block_size)) { + fprintf(stderr, + "InnoDB: Error: The block size of ib_logfile " + "%lu is not equal to innodb_log_block_size " + "%lu.\n" + "InnoDB: Error: Suggestion - Recreate log " + "files.\n", + log_hdr_log_block_size, srv_log_block_size); + return(DB_ERROR); + } + for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { @@ -3075,7 +3105,6 @@ recv_recovery_from_checkpoint_start_func( log_group_t* group; log_group_t* max_cp_group; ulint max_cp_field; - ulint log_hdr_log_block_size; lsn_t checkpoint_lsn; ib_uint64_t checkpoint_no; lsn_t group_scanned_lsn = 0; @@ -3182,21 +3211,6 @@ recv_recovery_from_checkpoint_start_func( log_hdr_buf, max_cp_group); } - log_hdr_log_block_size - = mach_read_from_4(log_hdr_buf + LOG_FILE_OS_FILE_LOG_BLOCK_SIZE); - if (log_hdr_log_block_size == 0) { - /* 0 means default value */ - log_hdr_log_block_size = 512; - } - if (UNIV_UNLIKELY(log_hdr_log_block_size != srv_log_block_size)) { - fprintf(stderr, - "InnoDB: Error: The block size of ib_logfile (" ULINTPF - ") is not equal to innodb_log_block_size.\n" - "InnoDB: Error: Suggestion - Recreate log files.\n", - log_hdr_log_block_size); - return(DB_ERROR); - } - #ifdef UNIV_LOG_ARCHIVE group = UT_LIST_GET_FIRST(log_sys->log_groups); diff --git a/storage/xtradb/mem/mem0dbg.cc b/storage/xtradb/mem/mem0dbg.cc index 308c2979551..a77785a369a 100644 --- a/storage/xtradb/mem/mem0dbg.cc +++ b/storage/xtradb/mem/mem0dbg.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -248,7 +248,7 @@ void mem_field_erase( /*============*/ byte* buf, /*!< in: memory field */ - ulint n __attribute__((unused))) + ulint n MY_ATTRIBUTE((unused))) /*!< in: how many bytes the user requested */ { byte* usr_buf; @@ -450,7 +450,7 @@ void mem_heap_validate_or_print( /*=======================*/ mem_heap_t* heap, /*!< in: memory heap */ - byte* top __attribute__((unused)), + byte* top MY_ATTRIBUTE((unused)), /*!< in: calculate and validate only until this top pointer in the heap is reached, if this pointer is NULL, ignored */ diff --git a/storage/xtradb/mtr/mtr0mtr.cc b/storage/xtradb/mtr/mtr0mtr.cc index d17b5c5259d..349adeaf5f5 100644 --- a/storage/xtradb/mtr/mtr0mtr.cc +++ b/storage/xtradb/mtr/mtr0mtr.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -58,7 +58,7 @@ mtr_block_dirtied( /*****************************************************************//** Releases the item in the slot given. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void mtr_memo_slot_release_func( /*=======================*/ @@ -105,7 +105,7 @@ mtr_memo_slot_release_func( Releases the mlocks and other objects stored in an mtr memo. They are released in the order opposite to which they were pushed to the memo. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void mtr_memo_pop_all( /*=============*/ @@ -397,7 +397,7 @@ mtr_read_ulint( /*===========*/ const byte* ptr, /*!< in: pointer from where to read */ ulint type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */ - mtr_t* mtr __attribute__((unused))) + mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mini-transaction handle */ { ut_ad(mtr->state == MTR_ACTIVE); diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index 0d94534d139..1e2a01c8a88 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted @@ -1543,11 +1543,11 @@ void os_file_set_nocache( /*================*/ int fd /*!< in: file descriptor to alter */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), const char* file_name /*!< in: used in the diagnostic message */ - __attribute__((unused)), - const char* operation_name __attribute__((unused))) + MY_ATTRIBUTE((unused)), + const char* operation_name MY_ATTRIBUTE((unused))) /*!< in: "open" or "create"; used in the diagnostic message */ { @@ -1598,14 +1598,14 @@ short_warning: Tries to enable the atomic write feature, if available, for the specified file handle. @return TRUE if success */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) ibool os_file_set_atomic_writes( /*======================*/ const char* name /*!< in: name of the file */ - __attribute__((unused)), + MY_ATTRIBUTE((unused)), os_file_t file /*!< in: handle to the file */ - __attribute__((unused))) + MY_ATTRIBUTE((unused))) { #ifdef DFS_IOCTL_ATOMIC_WRITE_SET @@ -2500,7 +2500,7 @@ os_file_flush_func( /*******************************************************************//** Does a synchronous read operation in Posix. @return number of bytes read, -1 if error */ -static __attribute__((nonnull(2), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(2), warn_unused_result)) ssize_t os_file_pread( /*==========*/ @@ -2663,7 +2663,7 @@ os_file_pread( /*******************************************************************//** Does a synchronous write operation in Posix. @return number of bytes written, -1 if error */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ssize_t os_file_pwrite( /*===========*/ diff --git a/storage/xtradb/page/page0page.cc b/storage/xtradb/page/page0page.cc index b7a17164d4f..48c4b53aaa4 100644 --- a/storage/xtradb/page/page0page.cc +++ b/storage/xtradb/page/page0page.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -309,7 +309,7 @@ byte* page_parse_create( /*==============*/ byte* ptr, /*!< in: buffer */ - byte* end_ptr __attribute__((unused)), /*!< in: buffer end */ + byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */ ulint comp, /*!< in: nonzero=compact page format */ buf_block_t* block, /*!< in: block or NULL */ mtr_t* mtr) /*!< in: mtr or NULL */ diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc index 1605394c958..06b88e3c3af 100644 --- a/storage/xtradb/page/page0zip.cc +++ b/storage/xtradb/page/page0zip.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -134,7 +134,7 @@ Compare at most sizeof(field_ref_zero) bytes. independently of any UNIV_ debugging conditions. */ #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG # include -__attribute__((format (printf, 1, 2))) +MY_ATTRIBUTE((format (printf, 1, 2))) /**********************************************************************//** Report a failure to decompress or compress. @return number of characters printed */ @@ -754,8 +754,8 @@ static void page_zip_free( /*==========*/ - void* opaque __attribute__((unused)), /*!< in: memory heap */ - void* address __attribute__((unused)))/*!< in: object to free */ + void* opaque MY_ATTRIBUTE((unused)), /*!< in: memory heap */ + void* address MY_ATTRIBUTE((unused)))/*!< in: object to free */ { } @@ -4789,7 +4789,8 @@ page_zip_parse_compress( ulint size; ulint trailer_size; - ut_ad(ptr && end_ptr); + ut_ad(ptr != NULL); + ut_ad(end_ptr != NULL); ut_ad(!page == !page_zip); if (UNIV_UNLIKELY(ptr + (2 + 2) > end_ptr)) { diff --git a/storage/xtradb/pars/lexyy.cc b/storage/xtradb/pars/lexyy.cc index 1c01becd9ed..bfa8e2ea950 100644 --- a/storage/xtradb/pars/lexyy.cc +++ b/storage/xtradb/pars/lexyy.cc @@ -295,7 +295,7 @@ static int yy_start = 0; /* start state number */ static int yy_did_buffer_switch_on_eof; void yyrestart (FILE *input_file ); -__attribute__((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); +MY_ATTRIBUTE((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); static YY_BUFFER_STATE yy_create_buffer (FILE *file,int size ); void yy_delete_buffer (YY_BUFFER_STATE b ); void yy_flush_buffer (YY_BUFFER_STATE b ); @@ -916,7 +916,7 @@ char *yytext; #line 1 "pars0lex.l" /***************************************************************************** -Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1027,7 +1027,7 @@ static int yy_init_globals (void ); /* Accessor methods to globals. These are made visible to non-reentrant scanners for convenience. */ -__attribute__((unused)) static int yylex_destroy (void ); +MY_ATTRIBUTE((unused)) static int yylex_destroy (void ); int yyget_debug (void ); @@ -2664,7 +2664,7 @@ static int yy_get_next_buffer (void) * @param new_buffer The new input buffer. * */ - __attribute__((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ) + MY_ATTRIBUTE((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ) { /* TODO. We should be able to replace this entire function body @@ -3042,7 +3042,7 @@ static int yy_init_globals (void) } /* yylex_destroy is for both reentrant and non-reentrant scanners. */ -__attribute__((unused)) static int yylex_destroy (void) +MY_ATTRIBUTE((unused)) static int yylex_destroy (void) { /* Pop the buffer stack, destroying each element. */ diff --git a/storage/xtradb/pars/make_flex.sh b/storage/xtradb/pars/make_flex.sh index 581fc2342aa..c3db8aea298 100755 --- a/storage/xtradb/pars/make_flex.sh +++ b/storage/xtradb/pars/make_flex.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +# Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software @@ -33,15 +33,15 @@ sed -e ' s/'"$TMPFILE"'/'"$OUTFILE"'/; s/\(int offset = \)\((yy_c_buf_p) - (yytext_ptr)\);/\1(int)(\2);/; s/\(void yy\(restart\|_\(delete\|flush\)_buffer\)\)/static \1/; -s/\(void yy_switch_to_buffer\)/__attribute__((unused)) static \1/; -s/\(void yy\(push\|pop\)_buffer_state\)/__attribute__((unused)) static \1/; +s/\(void yy_switch_to_buffer\)/MY_ATTRIBUTE((unused)) static \1/; +s/\(void yy\(push\|pop\)_buffer_state\)/MY_ATTRIBUTE((unused)) static \1/; s/\(YY_BUFFER_STATE yy_create_buffer\)/static \1/; -s/\(\(int\|void\) yy[gs]et_\)/__attribute__((unused)) static \1/; +s/\(\(int\|void\) yy[gs]et_\)/MY_ATTRIBUTE((unused)) static \1/; s/\(void \*\?yy\(\(re\)\?alloc\|free\)\)/static \1/; s/\(extern \)\?\(int yy\(leng\|lineno\|_flex_debug\)\)/static \2/; -s/\(int yylex_destroy\)/__attribute__((unused)) static \1/; +s/\(int yylex_destroy\)/MY_ATTRIBUTE((unused)) static \1/; s/\(extern \)\?\(int yylex \)/UNIV_INTERN \2/; -s/^\(\(FILE\|char\) *\* *yyget\)/__attribute__((unused)) static \1/; +s/^\(\(FILE\|char\) *\* *yyget\)/MY_ATTRIBUTE((unused)) static \1/; s/^\(extern \)\?\(\(FILE\|char\) *\* *yy\)/static \2/; ' < $TMPFILE >> $OUTFILE diff --git a/storage/xtradb/pars/pars0pars.cc b/storage/xtradb/pars/pars0pars.cc index 655e5ba1324..b116357c5b9 100644 --- a/storage/xtradb/pars/pars0pars.cc +++ b/storage/xtradb/pars/pars0pars.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1925,7 +1925,7 @@ pars_create_table( sym_node_t* column_defs, /*!< in: list of column names */ sym_node_t* compact, /* in: non-NULL if COMPACT table. */ sym_node_t* block_size, /* in: block size (can be NULL) */ - void* not_fit_in_memory __attribute__((unused))) + void* not_fit_in_memory MY_ATTRIBUTE((unused))) /*!< in: a non-NULL pointer means that this is a table which in simulations should be simulated as not fitting @@ -2141,7 +2141,7 @@ UNIV_INTERN que_fork_t* pars_stored_procedure_call( /*=======================*/ - sym_node_t* sym_node __attribute__((unused))) + sym_node_t* sym_node MY_ATTRIBUTE((unused))) /*!< in: stored procedure name */ { ut_error; @@ -2201,7 +2201,7 @@ UNIV_INTERN void yyerror( /*====*/ - const char* s __attribute__((unused))) + const char* s MY_ATTRIBUTE((unused))) /*!< in: error message string */ { ut_ad(s); diff --git a/storage/xtradb/rem/rem0cmp.cc b/storage/xtradb/rem/rem0cmp.cc index 426cf9e3ac5..616ef322fb5 100644 --- a/storage/xtradb/rem/rem0cmp.cc +++ b/storage/xtradb/rem/rem0cmp.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -75,7 +75,7 @@ cmp_debug_dtuple_rec_with_match( completely matched fields; when function returns, contains the value for current comparison */ - __attribute__((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull, warn_unused_result)); #endif /* UNIV_DEBUG */ /*************************************************************//** This function is used to compare two data fields for which the data type @@ -659,7 +659,10 @@ cmp_dtuple_rec_with_match_low( in current field */ int ret; /* return value */ - ut_ad(dtuple && rec && matched_fields && matched_bytes); + ut_ad(dtuple != NULL); + ut_ad(rec != NULL); + ut_ad(matched_fields != NULL); + ut_ad(matched_bytes != NULL); ut_ad(dtuple_check_typed(dtuple)); ut_ad(rec_offs_validate(rec, NULL, offsets)); @@ -920,7 +923,7 @@ Compare two physical record fields. @retval 1 if rec1 field is greater than rec2 @retval -1 if rec1 field is less than rec2 @retval 0 if rec1 field equals to rec2 */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) int cmp_rec_rec_simple_field( /*=====================*/ @@ -1139,7 +1142,9 @@ cmp_rec_rec_with_match( int ret = 0; /* return value */ ulint comp; - ut_ad(rec1 && rec2 && index); + ut_ad(rec1 != NULL); + ut_ad(rec2 != NULL); + ut_ad(index != NULL); ut_ad(rec_offs_validate(rec1, index, offsets1)); ut_ad(rec_offs_validate(rec2, index, offsets2)); ut_ad(rec_offs_comp(offsets1) == rec_offs_comp(offsets2)); @@ -1375,7 +1380,9 @@ cmp_debug_dtuple_rec_with_match( int ret; /* return value */ ulint cur_field; /* current field number */ - ut_ad(dtuple && rec && matched_fields); + ut_ad(dtuple != NULL); + ut_ad(rec != NULL); + ut_ad(matched_fields != NULL); ut_ad(dtuple_check_typed(dtuple)); ut_ad(rec_offs_validate(rec, NULL, offsets)); diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc index 0d7b7c16785..a95e9c23613 100644 --- a/storage/xtradb/rem/rem0rec.cc +++ b/storage/xtradb/rem/rem0rec.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -241,7 +241,7 @@ rec_get_n_extern_new( Determine the offset to each field in a leaf-page record in ROW_FORMAT=COMPACT. This is a special case of rec_init_offsets() and rec_get_offsets_func(). */ -UNIV_INLINE __attribute__((nonnull)) +UNIV_INLINE MY_ATTRIBUTE((nonnull)) void rec_init_offsets_comp_ordinary( /*===========================*/ @@ -785,7 +785,7 @@ rec_get_nth_field_offs_old( /**********************************************************//** Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT. @return total size */ -UNIV_INLINE __attribute__((warn_unused_result, nonnull(1,2))) +UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(1,2))) ulint rec_get_converted_size_comp_prefix_low( /*===================================*/ @@ -1130,7 +1130,7 @@ rec_convert_dtuple_to_rec_old( /*********************************************************//** Builds a ROW_FORMAT=COMPACT record out of a data tuple. */ -UNIV_INLINE __attribute__((nonnull)) +UNIV_INLINE MY_ATTRIBUTE((nonnull)) void rec_convert_dtuple_to_rec_comp( /*===========================*/ @@ -1338,7 +1338,9 @@ rec_convert_dtuple_to_rec( { rec_t* rec; - ut_ad(buf && index && dtuple); + ut_ad(buf != NULL); + ut_ad(index != NULL); + ut_ad(dtuple != NULL); ut_ad(dtuple_validate(dtuple)); ut_ad(dtuple_check_typed(dtuple)); diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc index 37706898e65..6fac6c0d317 100644 --- a/storage/xtradb/row/row0ftsort.cc +++ b/storage/xtradb/row/row0ftsort.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -515,8 +515,18 @@ row_merge_fts_doc_tokenize( dfield_dup(field, buf->heap); /* One variable length column, word with its lenght less than - fts_max_token_size, add one extra size and one extra byte */ - cur_len += 2; + fts_max_token_size, add one extra size and one extra byte. + + Since the max length for FTS token now is larger than 255, + so we will need to signify length byte itself, so only 1 to 128 + bytes can be used for 1 bytes, larger than that 2 bytes. */ + if (t_str.f_len < 128) { + /* Extra size is one byte. */ + cur_len += 2; + } else { + /* Extra size is two bytes. */ + cur_len += 3; + } /* Reserve one byte for the end marker of row_merge_block_t. */ if (buf->total_size + data_size[idx] + cur_len @@ -979,7 +989,7 @@ row_fts_start_parallel_merge( /********************************************************************//** Insert processed FTS data to auxillary index tables. @return DB_SUCCESS if insertion runs fine */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t row_merge_write_fts_word( /*=====================*/ diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 89496b4176b..11c4333577e 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1335,12 +1335,15 @@ row_import::match_schema( THD* thd) UNIV_NOTHROW { /* Do some simple checks. */ + const unsigned relevant_flags = m_flags & ~DICT_TF_MASK_DATA_DIR; + const unsigned relevant_table_flags + = m_table->flags & ~DICT_TF_MASK_DATA_DIR; - if (m_flags != m_table->flags) { + if (relevant_flags != relevant_table_flags) { ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH, - "Table flags don't match, server table has 0x%lx " - "and the meta-data file has 0x%lx", - (ulong) m_table->n_cols, (ulong) m_flags); + "Table flags don't match, server table has 0x%x " + "and the meta-data file has 0x%x", + relevant_table_flags, relevant_flags); return(DB_ERROR); } else if (m_table->n_cols != m_n_cols) { @@ -2179,7 +2182,7 @@ PageConverter::operator() ( Clean up after import tablespace failure, this function will acquire the dictionary latches on behalf of the transaction if the transaction hasn't already acquired them. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_import_discard_changes( /*=======================*/ @@ -2230,7 +2233,7 @@ row_import_discard_changes( /*****************************************************************//** Clean up after import tablespace. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_cleanup( /*===============*/ @@ -2265,7 +2268,7 @@ row_import_cleanup( /*****************************************************************//** Report error during tablespace import. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_error( /*=============*/ @@ -2293,7 +2296,7 @@ row_import_error( Adjust the root page index node and leaf node segment headers, update with the new space id. For all the table's secondary indexes. @return error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_adjust_root_pages_of_secondary_indexes( /*==============================================*/ @@ -2409,7 +2412,7 @@ row_import_adjust_root_pages_of_secondary_indexes( /*****************************************************************//** Ensure that dict_sys->row_id exceeds SELECT MAX(DB_ROW_ID). @return error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_set_sys_max_row_id( /*==========================*/ @@ -2559,7 +2562,7 @@ row_import_cfg_read_string( /*********************************************************************//** Write the meta data (index user fields) config file. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_cfg_read_index_fields( /*=============================*/ @@ -2642,7 +2645,7 @@ row_import_cfg_read_index_fields( Read the index names and root page numbers of the indexes and set the values. Row format [root_page_no, len of str, str ... ] @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_read_index_data( /*=======================*/ @@ -2837,7 +2840,7 @@ row_import_read_indexes( /*********************************************************************//** Read the meta data (table columns) config file. Deserialise the contents of dict_col_t structure, along with the column name. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_read_columns( /*====================*/ @@ -2962,7 +2965,7 @@ row_import_read_columns( /*****************************************************************//** Read the contents of the .cfg file. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_read_v1( /*===============*/ @@ -3128,7 +3131,7 @@ row_import_read_v1( /** Read the contents of the .cfg file. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_read_meta_data( /*======================*/ @@ -3171,7 +3174,7 @@ row_import_read_meta_data( /** Read the contents of the .cfg file. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_import_read_cfg( /*================*/ diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc index 113aacce39a..b6ca72747b3 100644 --- a/storage/xtradb/row/row0ins.cc +++ b/storage/xtradb/row/row0ins.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -224,7 +224,7 @@ Does an insert operation by updating a delete-marked existing record in the index. This situation can occur if the delete-marked record is kept in the index for consistent reads. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_sec_index_entry_by_modify( /*==============================*/ @@ -325,7 +325,7 @@ Does an insert operation by delete unmarking and updating a delete marked existing record in the index. This situation can occur if the delete marked record is kept in the index for consistent reads. @return DB_SUCCESS, DB_FAIL, or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_clust_index_entry_by_modify( /*================================*/ @@ -433,7 +433,7 @@ row_ins_cascade_ancestor_updates_table( Returns the number of ancestor UPDATE or DELETE nodes of a cascaded update/delete node. @return number of ancestors */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ulint row_ins_cascade_n_ancestors( /*========================*/ @@ -459,7 +459,7 @@ a cascaded update. can also be 0 if no foreign key fields changed; the returned value is ULINT_UNDEFINED if the column type in the child table is too short to fit the new value in the parent table: that means the update fails */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ulint row_ins_cascade_calc_update_vec( /*============================*/ @@ -932,7 +932,7 @@ Perform referential actions or checks when a parent row is deleted or updated and the constraint had an ON DELETE or ON UPDATE condition which was not RESTRICT. @return DB_SUCCESS, DB_LOCK_WAIT, or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_foreign_check_on_constraint( /*================================*/ @@ -1765,7 +1765,7 @@ Otherwise does searches to the indexes of referenced tables and sets shared locks which lock either the success or the failure of a constraint. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_check_foreign_constraints( /*==============================*/ @@ -1906,7 +1906,7 @@ Scans a unique non-clustered index at a given index entry to determine whether a uniqueness violation has occurred for the key value of the entry. Set shared locks on possible duplicate records. @return DB_SUCCESS, DB_DUPLICATE_KEY, or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_scan_sec_index_for_duplicate( /*=================================*/ @@ -2048,7 +2048,7 @@ end_scan: @retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or a newer version of entry (the entry should not be inserted) @retval DB_DUPLICATE_KEY when entry is a duplicate of rec */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_duplicate_online( /*=====================*/ @@ -2089,7 +2089,7 @@ row_ins_duplicate_online( @retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or a newer version of entry (the entry should not be inserted) @retval DB_DUPLICATE_KEY when entry is a duplicate of rec */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_duplicate_error_in_clust_online( /*====================================*/ @@ -2132,7 +2132,7 @@ for a clustered index! record @retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_duplicate_error_in_clust( /*=============================*/ @@ -2581,7 +2581,7 @@ func_exit: /***************************************************************//** Starts a mini-transaction and checks if the index will be dropped. @return true if the index is to be dropped */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool row_ins_sec_mtr_start_and_check_if_aborted( /*=======================================*/ @@ -3046,7 +3046,7 @@ row_ins_index_entry( /***********************************************************//** Sets the values of the dtuple fields in entry from the values of appropriate columns in row. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_ins_index_entry_set_vals( /*=========================*/ @@ -3099,7 +3099,7 @@ row_ins_index_entry_set_vals( Inserts a single index entry to the table. @return DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_index_entry_step( /*=====================*/ @@ -3222,7 +3222,7 @@ row_ins_get_row_from_select( Inserts a row to a table. @return DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins( /*====*/ diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc index bb473ca92cf..a6751b208f7 100644 --- a/storage/xtradb/row/row0log.cc +++ b/storage/xtradb/row/row0log.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -201,7 +201,7 @@ struct row_log_t { /** Create the file or online log if it does not exist. @param[in,out] log online rebuild log @return file descriptor. */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) int row_log_tmpfile( row_log_t* log) @@ -217,7 +217,7 @@ row_log_tmpfile( /** Allocate the memory for the log buffer. @param[in,out] log_buf Buffer used for log operation @return TRUE if success, false if not */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) bool row_log_block_allocate( row_log_buf_t& log_buf) @@ -407,7 +407,7 @@ row_log_table_get_error( /******************************************************//** Starts logging an operation to a table that is being rebuilt. @return pointer to log, or NULL if no logging is necessary */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) byte* row_log_table_open( /*===============*/ @@ -442,7 +442,7 @@ err_exit: /******************************************************//** Stops logging an operation to a table that is being rebuilt. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_log_table_close_func( /*=====================*/ @@ -812,7 +812,7 @@ row_log_table_low_redundant( /******************************************************//** Logs an insert or update to a table that is being rebuilt. */ -static __attribute__((nonnull(1,2,3))) +static MY_ATTRIBUTE((nonnull(1,2,3))) void row_log_table_low( /*==============*/ @@ -1312,7 +1312,7 @@ row_log_table_blob_alloc( /******************************************************//** Converts a log record to a table row. @return converted row, or NULL if the conversion fails */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const dtuple_t* row_log_table_apply_convert_mrec( /*=============================*/ @@ -1466,7 +1466,7 @@ blob_done: /******************************************************//** Replays an insert operation on a table that was rebuilt. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_log_table_apply_insert_low( /*===========================*/ @@ -1548,7 +1548,7 @@ row_log_table_apply_insert_low( /******************************************************//** Replays an insert operation on a table that was rebuilt. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_log_table_apply_insert( /*=======================*/ @@ -1600,7 +1600,7 @@ row_log_table_apply_insert( /******************************************************//** Deletes a record from a table that is being rebuilt. @return DB_SUCCESS or error code */ -static __attribute__((nonnull(1, 2, 4, 5), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1, 2, 4, 5), warn_unused_result)) dberr_t row_log_table_apply_delete_low( /*===========================*/ @@ -1698,7 +1698,7 @@ flag_ok: /******************************************************//** Replays a delete operation on a table that was rebuilt. @return DB_SUCCESS or error code */ -static __attribute__((nonnull(1, 3, 4, 5, 6, 7), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1, 3, 4, 5, 6, 7), warn_unused_result)) dberr_t row_log_table_apply_delete( /*=======================*/ @@ -1820,7 +1820,7 @@ all_done: /******************************************************//** Replays an update operation on a table that was rebuilt. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_log_table_apply_update( /*=======================*/ @@ -2183,7 +2183,7 @@ func_exit_committed: Applies an operation to a table that was rebuilt. @return NULL on failure (mrec corruption) or when out of data; pointer to next record on success */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const mrec_t* row_log_table_apply_op( /*===================*/ @@ -2474,7 +2474,7 @@ row_log_table_apply_op( /******************************************************//** Applies operations to a table was rebuilt. @return DB_SUCCESS, or error code on failure */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_log_table_apply_ops( /*====================*/ @@ -2971,7 +2971,7 @@ row_log_get_max_trx( /******************************************************//** Applies an operation to a secondary index that was being created. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_log_apply_op_low( /*=================*/ @@ -3198,7 +3198,7 @@ func_exit: Applies an operation to a secondary index that was being created. @return NULL on failure (mrec corruption) or when out of data; pointer to next record on success */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) const mrec_t* row_log_apply_op( /*=============*/ @@ -3323,7 +3323,7 @@ corrupted: /******************************************************//** Applies operations to a secondary index that was being created. @return DB_SUCCESS, or error code on failure */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t row_log_apply_ops( /*==============*/ diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index 48c165bbc54..0bba529d167 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -70,7 +70,7 @@ UNIV_INTERN char srv_disable_sort_file_cache; #ifdef UNIV_DEBUG /******************************************************//** Display a merge tuple. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_merge_tuple_print( /*==================*/ @@ -105,7 +105,7 @@ row_merge_tuple_print( /******************************************************//** Encode an index record. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_merge_buf_encode( /*=================*/ @@ -142,7 +142,7 @@ row_merge_buf_encode( /******************************************************//** Allocate a sort buffer. @return own: sort buffer */ -static __attribute__((malloc, nonnull)) +static MY_ATTRIBUTE((malloc, nonnull)) row_merge_buf_t* row_merge_buf_create_low( /*=====================*/ @@ -642,7 +642,7 @@ row_merge_dup_report( /*************************************************************//** Compare two tuples. @return 1, 0, -1 if a is greater, equal, less, respectively, than b */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) int row_merge_tuple_cmp( /*================*/ @@ -721,7 +721,7 @@ UT_SORT_FUNCTION_BODY(). /**********************************************************************//** Merge sort the tuple buffer in main memory. */ -static __attribute__((nonnull(4,5))) +static MY_ATTRIBUTE((nonnull(4,5))) void row_merge_tuple_sort( /*=================*/ @@ -1245,7 +1245,7 @@ row_merge_write_eof( @param[in,out] tmpfd temporary file handle @param[in] path path to create temporary file @return file descriptor, or -1 on failure */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) int row_merge_tmpfile_if_needed( int* tmpfd, @@ -1264,7 +1264,7 @@ row_merge_tmpfile_if_needed( @param[in] nrec number of records in the file @param[in] path path to create temporary files @return file descriptor, or -1 on failure */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) int row_merge_file_create_if_needed( merge_file_t* file, @@ -1310,7 +1310,7 @@ containing the index entries for the indexes to be built. @param[in,out] block file buffer @param[in,out] tmpfd temporary file handle return DB_SUCCESS or error */ -static __attribute__((nonnull(1,2,3,4,6,9,10,16), warn_unused_result)) +static MY_ATTRIBUTE((nonnull(1,2,3,4,6,9,10,16), warn_unused_result)) dberr_t row_merge_read_clustered_index( trx_t* trx, @@ -2034,7 +2034,7 @@ wait_again: /*************************************************************//** Merge two blocks of records on disk and write a bigger block. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_merge_blocks( /*=============*/ @@ -2145,7 +2145,7 @@ done1: /*************************************************************//** Copy a block of index entries. @return TRUE on success, FALSE on failure */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ibool row_merge_blocks_copy( /*==================*/ @@ -2218,7 +2218,7 @@ done0: /*************************************************************//** Merge disk files. @return DB_SUCCESS or error code */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) dberr_t row_merge( /*======*/ @@ -2404,7 +2404,7 @@ row_merge_sort( /*************************************************************//** Copy externally stored columns to the data tuple. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_merge_copy_blobs( /*=================*/ @@ -2449,7 +2449,7 @@ row_merge_copy_blobs( Read sorted file containing index data tuples and insert these data tuples to the index @return DB_SUCCESS or error number */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_merge_insert_index_tuples( /*==========================*/ @@ -3462,7 +3462,7 @@ row_merge_rename_tables_dict( /*********************************************************************//** Create and execute a query graph for creating an index. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_merge_create_index_graph( /*=========================*/ diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index a1bdc732fa4..466ff113127 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1421,9 +1421,12 @@ error_exit: } /* Difference between Doc IDs are restricted within - 4 bytes integer. See fts_get_encoded_len() */ + 4 bytes integer. See fts_get_encoded_len(). Consecutive + doc_ids difference should not exceed + FTS_DOC_ID_MAX_STEP value. */ - if (doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { + if (next_doc_id > 1 + && doc_id - next_doc_id >= FTS_DOC_ID_MAX_STEP) { fprintf(stderr, "InnoDB: Doc ID " UINT64PF " is too" " big. Its difference with largest" @@ -1684,7 +1687,8 @@ row_update_for_mysql( trx_t* trx = prebuilt->trx; ulint fk_depth = 0; - ut_ad(prebuilt && trx); + ut_ad(prebuilt != NULL); + ut_ad(trx != NULL); UT_NOT_USED(mysql_rec); if (prebuilt->table->ibd_file_missing) { @@ -1883,7 +1887,8 @@ row_unlock_for_mysql( btr_pcur_t* clust_pcur = &prebuilt->clust_pcur; trx_t* trx = prebuilt->trx; - ut_ad(prebuilt && trx); + ut_ad(prebuilt != NULL); + ut_ad(trx != NULL); if (UNIV_UNLIKELY (!srv_locks_unsafe_for_binlog @@ -4490,7 +4495,7 @@ row_mysql_drop_temp_tables(void) Drop all foreign keys in a database, see Bug#18942. Called at the end of row_drop_database_for_mysql(). @return error code or DB_SUCCESS */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t drop_all_foreign_keys_in_db( /*========================*/ @@ -4682,7 +4687,7 @@ loop: Checks if a table name contains the string "/#sql" which denotes temporary tables in MySQL. @return true if temporary table */ -UNIV_INTERN __attribute__((warn_unused_result)) +UNIV_INTERN MY_ATTRIBUTE((warn_unused_result)) bool row_is_mysql_tmp_table_name( /*========================*/ @@ -4696,7 +4701,7 @@ row_is_mysql_tmp_table_name( /****************************************************************//** Delete a single constraint. @return error code or DB_SUCCESS */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_delete_constraint_low( /*======================*/ @@ -4719,7 +4724,7 @@ row_delete_constraint_low( /****************************************************************//** Delete a single constraint. @return error code or DB_SUCCESS */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_delete_constraint( /*==================*/ diff --git a/storage/xtradb/row/row0purge.cc b/storage/xtradb/row/row0purge.cc index b26ba971a95..bc2e0b0e1cb 100644 --- a/storage/xtradb/row/row0purge.cc +++ b/storage/xtradb/row/row0purge.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -69,7 +69,8 @@ row_purge_node_create( { purge_node_t* node; - ut_ad(parent && heap); + ut_ad(parent != NULL); + ut_ad(heap != NULL); node = static_cast( mem_heap_zalloc(heap, sizeof(*node))); @@ -120,7 +121,7 @@ row_purge_reposition_pcur( Removes a delete marked clustered index record if possible. @retval true if the row was not found, or it was successfully removed @retval false if the row was modified after the delete marking */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool row_purge_remove_clust_if_poss_low( /*===============================*/ @@ -202,7 +203,7 @@ marking. @retval true if the row was not found, or it was successfully removed @retval false the purge needs to be suspended because of running out of file space. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool row_purge_remove_clust_if_poss( /*===========================*/ @@ -274,7 +275,7 @@ row_purge_poss_sec( Removes a secondary index entry if possible, by modifying the index tree. Does not try to buffer the delete. @return TRUE if success or if not found */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) ibool row_purge_remove_sec_if_poss_tree( /*==============================*/ @@ -396,7 +397,7 @@ Removes a secondary index entry without modifying the index tree, if possible. @retval true if success or if not found @retval false if row_purge_remove_sec_if_poss_tree() should be invoked */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool row_purge_remove_sec_if_poss_leaf( /*==============================*/ @@ -507,7 +508,7 @@ row_purge_remove_sec_if_poss_leaf( /***********************************************************//** Removes a secondary index entry if possible. */ -UNIV_INLINE __attribute__((nonnull(1,2))) +UNIV_INLINE MY_ATTRIBUTE((nonnull(1,2))) void row_purge_remove_sec_if_poss( /*=========================*/ @@ -554,7 +555,7 @@ Purges a delete marking of a record. @retval true if the row was not found, or it was successfully removed @retval false the purge needs to be suspended because of running out of file space */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool row_purge_del_mark( /*===============*/ @@ -745,7 +746,8 @@ row_purge_parse_undo_rec( ulint info_bits; ulint type; - ut_ad(node && thr); + ut_ad(node != NULL); + ut_ad(thr != NULL); ptr = trx_undo_rec_get_pars( undo_rec, &type, &node->cmpl_info, @@ -830,7 +832,7 @@ err_exit: /***********************************************************//** Purges the parsed record. @return true if purged, false if skipped */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool row_purge_record_func( /*==================*/ @@ -895,7 +897,7 @@ row_purge_record_func( Fetches an undo log record and does the purge for the recorded operation. If none left, or the current purge completed, returns the control to the parent node, which is always a query thread node. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_purge( /*======*/ diff --git a/storage/xtradb/row/row0quiesce.cc b/storage/xtradb/row/row0quiesce.cc index ecd6f47947b..583fbe60fb3 100644 --- a/storage/xtradb/row/row0quiesce.cc +++ b/storage/xtradb/row/row0quiesce.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -37,7 +37,7 @@ Created 2012-02-08 by Sunny Bains. /*********************************************************************//** Write the meta data (index user fields) config file. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_quiesce_write_index_fields( /*===========================*/ @@ -97,7 +97,7 @@ row_quiesce_write_index_fields( /*********************************************************************//** Write the meta data config file index information. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_quiesce_write_indexes( /*======================*/ @@ -210,7 +210,7 @@ Write the meta data (table columns) config file. Serialise the contents of dict_col_t structure, along with the column name. All fields are serialized as ib_uint32_t. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_quiesce_write_table( /*====================*/ @@ -293,7 +293,7 @@ row_quiesce_write_table( /*********************************************************************//** Write the meta data config file header. @return DB_SUCCESS or error code. */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_quiesce_write_header( /*=====================*/ @@ -415,7 +415,7 @@ row_quiesce_write_header( /*********************************************************************//** Write the table meta data after quiesce. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_quiesce_write_cfg( /*==================*/ @@ -530,10 +530,8 @@ row_quiesce_table_start( trx_purge_stop(); } - ut_a(table->id > 0); - for (ulint count = 0; - ibuf_contract_in_background(table->id, TRUE) != 0 + ibuf_merge_space(table->space) != 0 && !trx_is_interrupted(trx); ++count) { if (!(count % 20)) { diff --git a/storage/xtradb/row/row0row.cc b/storage/xtradb/row/row0row.cc index be786f954fb..96d25e15777 100644 --- a/storage/xtradb/row/row0row.cc +++ b/storage/xtradb/row/row0row.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -240,7 +240,9 @@ row_build( ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_); - ut_ad(index && rec && heap); + ut_ad(index != NULL); + ut_ad(rec != NULL); + ut_ad(heap != NULL); ut_ad(dict_index_is_clust(index)); ut_ad(!mutex_own(&trx_sys->mutex)); ut_ad(!col_map || col_table); @@ -409,7 +411,9 @@ row_rec_to_index_entry_low( ulint len; ulint rec_len; - ut_ad(rec && heap && index); + ut_ad(rec != NULL); + ut_ad(heap != NULL); + ut_ad(index != NULL); /* Because this function may be invoked by row0merge.cc on a record whose header is in different format, the check rec_offs_validate(rec, index, offsets) must be avoided here. */ @@ -464,7 +468,9 @@ row_rec_to_index_entry( byte* buf; const rec_t* copy_rec; - ut_ad(rec && heap && index); + ut_ad(rec != NULL); + ut_ad(heap != NULL); + ut_ad(index != NULL); ut_ad(rec_offs_validate(rec, index, offsets)); /* Take a copy of rec to heap */ @@ -523,7 +529,9 @@ row_build_row_ref( ulint* offsets = offsets_; rec_offs_init(offsets_); - ut_ad(index && rec && heap); + ut_ad(index != NULL); + ut_ad(rec != NULL); + ut_ad(heap != NULL); ut_ad(!dict_index_is_clust(index)); offsets = rec_get_offsets(rec, index, offsets, diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 388668afb47..74579687a9b 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -676,7 +676,7 @@ sel_enqueue_prefetched_row( /*********************************************************************//** Builds a previous version of a clustered index record for a consistent read @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_sel_build_prev_vers( /*====================*/ @@ -711,7 +711,7 @@ row_sel_build_prev_vers( /*********************************************************************//** Builds the last committed version of a clustered index record for a semi-consistent read. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_sel_build_committed_vers_for_mysql( /*===================================*/ @@ -809,7 +809,7 @@ row_sel_test_other_conds( Retrieves the clustered index record corresponding to a record in a non-clustered index. Does the necessary locking. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_sel_get_clust_rec( /*==================*/ @@ -1314,7 +1314,7 @@ func_exit: /*********************************************************************//** Performs a select step. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_sel( /*====*/ @@ -2566,7 +2566,7 @@ row_sel_store_row_id_to_prebuilt( /**************************************************************//** Stores a non-SQL-NULL field in the MySQL format. The counterpart of this function is row_mysql_store_col_in_innobase_format() in row0mysql.cc. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_sel_field_store_in_mysql_format_func( /*=====================================*/ @@ -2755,7 +2755,7 @@ row_sel_field_store_in_mysql_format_func( #endif /* UNIV_DEBUG */ /**************************************************************//** Convert a field in the Innobase format to a field in the MySQL format. */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_field_func( /*===========================*/ @@ -2908,7 +2908,7 @@ Note that the template in prebuilt may advise us to copy only a few columns to mysql_rec, other columns are left blank. All columns may not be needed in the query. @return TRUE on success, FALSE if not all columns could be retrieved */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_rec( /*====================*/ @@ -2970,7 +2970,7 @@ row_sel_store_mysql_rec( /*********************************************************************//** Builds a previous version of a clustered index record for a consistent read @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_sel_build_prev_vers_for_mysql( /*==============================*/ @@ -3007,7 +3007,7 @@ Retrieves the clustered index record corresponding to a record in a non-clustered index. Does the necessary locking. Used in the MySQL interface. @return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_sel_get_clust_rec_for_mysql( /*============================*/ diff --git a/storage/xtradb/row/row0uins.cc b/storage/xtradb/row/row0uins.cc index 849bf096492..651042fb820 100644 --- a/storage/xtradb/row/row0uins.cc +++ b/storage/xtradb/row/row0uins.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -61,7 +61,7 @@ introduced where a call to log_free_check() is bypassed. */ Removes a clustered index record. The pcur in node was positioned on the record, now it is detached. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_ins_remove_clust_rec( /*==========================*/ @@ -176,7 +176,7 @@ func_exit: /***************************************************************//** Removes a secondary index entry if found. @return DB_SUCCESS, DB_FAIL, or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_ins_remove_sec_low( /*========================*/ @@ -251,7 +251,7 @@ func_exit_no_pcur: Removes a secondary index entry from the index if found. Tries first optimistic, then pessimistic descent down the tree. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_ins_remove_sec( /*====================*/ @@ -350,7 +350,7 @@ close_table: /***************************************************************//** Removes secondary index records. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_ins_remove_sec_rec( /*========================*/ diff --git a/storage/xtradb/row/row0umod.cc b/storage/xtradb/row/row0umod.cc index 29252c7834a..4b44245bd96 100644 --- a/storage/xtradb/row/row0umod.cc +++ b/storage/xtradb/row/row0umod.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -72,7 +72,7 @@ introduced where a call to log_free_check() is bypassed. */ /***********************************************************//** Undoes a modify in a clustered index record. @return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_clust_low( /*===================*/ @@ -154,7 +154,7 @@ This is attempted when the record was inserted by updating a delete-marked record and there no longer exist transactions that would see the delete-marked record. @return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_remove_clust_low( /*==========================*/ @@ -243,7 +243,7 @@ row_undo_mod_remove_clust_low( Undoes a modify in a clustered index record. Sets also the node state for the next round of undo. @return DB_SUCCESS or error code: we may run out of file space */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_clust( /*===============*/ @@ -380,7 +380,7 @@ row_undo_mod_clust( /***********************************************************//** Delete marks or removes a secondary index entry if found. @return DB_SUCCESS, DB_FAIL, or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_del_mark_or_remove_sec_low( /*====================================*/ @@ -516,7 +516,7 @@ not cause problems because in row0sel.cc, in queries we always retrieve the clustered index record or an earlier version of it, if the secondary index record through which we do the search is delete-marked. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_del_mark_or_remove_sec( /*================================*/ @@ -549,7 +549,7 @@ fields but alphabetically they stayed the same, e.g., 'abc' -> 'aBc'. @retval DB_OUT_OF_FILE_SPACE when running out of tablespace @retval DB_DUPLICATE_KEY if the value was missing and an insert would lead to a duplicate exists */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_del_unmark_sec_and_undo_update( /*========================================*/ @@ -745,7 +745,7 @@ func_exit_no_pcur: /***********************************************************//** Flags a secondary index corrupted. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_undo_mod_sec_flag_corrupted( /*============================*/ @@ -777,7 +777,7 @@ row_undo_mod_sec_flag_corrupted( /***********************************************************//** Undoes a modify in secondary indexes when undo record type is UPD_DEL. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_upd_del_sec( /*=====================*/ @@ -844,7 +844,7 @@ row_undo_mod_upd_del_sec( /***********************************************************//** Undoes a modify in secondary indexes when undo record type is DEL_MARK. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_del_mark_sec( /*======================*/ @@ -912,7 +912,7 @@ row_undo_mod_del_mark_sec( /***********************************************************//** Undoes a modify in secondary indexes when undo record type is UPD_EXIST. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo_mod_upd_exist_sec( /*=======================*/ @@ -1028,7 +1028,7 @@ row_undo_mod_upd_exist_sec( /***********************************************************//** Parses the row reference and other info in a modify undo log record. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void row_undo_mod_parse_undo_rec( /*========================*/ @@ -1105,7 +1105,8 @@ row_undo_mod( dberr_t err; ibool dict_locked; - ut_ad(node && thr); + ut_ad(node != NULL); + ut_ad(thr != NULL); ut_ad(node->state == UNDO_NODE_MODIFY); dict_locked = thr_get_trx(thr)->dict_operation_lock_mode == RW_X_LATCH; diff --git a/storage/xtradb/row/row0undo.cc b/storage/xtradb/row/row0undo.cc index 9977a1e8f04..149dc671930 100644 --- a/storage/xtradb/row/row0undo.cc +++ b/storage/xtradb/row/row0undo.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -245,7 +245,7 @@ Fetches an undo log record and does the undo for the recorded operation. If none left, or a partial rollback completed, returns control to the parent node, which is always a query thread node. @return DB_SUCCESS if operation successfully completed, else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_undo( /*=====*/ @@ -257,7 +257,8 @@ row_undo( roll_ptr_t roll_ptr; ibool locked_data_dict; - ut_ad(node && thr); + ut_ad(node != NULL); + ut_ad(thr != NULL); trx = node->trx; diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc index b1002a1db03..fe765edf421 100644 --- a/storage/xtradb/row/row0upd.cc +++ b/storage/xtradb/row/row0upd.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -172,7 +172,7 @@ NOTE that this function will temporarily commit mtr and lose the pcur position! @return DB_SUCCESS or an error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_check_references_constraints( /*=================================*/ @@ -609,7 +609,7 @@ row_upd_write_sys_vals_to_log( roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */ byte* log_ptr,/*!< pointer to a buffer of size > 20 opened in mlog */ - mtr_t* mtr __attribute__((unused))) /*!< in: mtr */ + mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mtr */ { ut_ad(dict_index_is_clust(index)); ut_ad(mtr); @@ -1644,7 +1644,7 @@ row_upd_store_row( Updates a secondary index entry of a row. @return DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_sec_index_entry( /*====================*/ @@ -1849,7 +1849,7 @@ Updates the secondary index record if it is changed in the row update or deletes it if this is a delete. @return DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_sec_step( /*=============*/ @@ -1882,7 +1882,7 @@ updated. We must mark them as inherited in entry, so that they are not freed in a rollback. A limited version of this function used to be called btr_cur_mark_dtuple_inherited_extern(). @return TRUE if any columns were inherited */ -static __attribute__((warn_unused_result)) +static MY_ATTRIBUTE((warn_unused_result)) ibool row_upd_clust_rec_by_insert_inherit_func( /*=====================================*/ @@ -1961,7 +1961,7 @@ fields of the clustered index record change. This should be quite rare in database applications. @return DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_clust_rec_by_insert( /*========================*/ @@ -2089,7 +2089,7 @@ Updates a clustered index record of a row when the ordering fields do not change. @return DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_clust_rec( /*==============*/ @@ -2251,7 +2251,7 @@ func_exit: /***********************************************************//** Delete marks a clustered index record. @return DB_SUCCESS if operation successfully completed, else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_del_mark_clust_rec( /*=======================*/ @@ -2303,7 +2303,7 @@ row_upd_del_mark_clust_rec( Updates the clustered index record. @return DB_SUCCESS if operation successfully completed, DB_LOCK_WAIT in case of a lock wait, else error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd_clust_step( /*===============*/ @@ -2520,7 +2520,7 @@ to this node, we assume that we have a persistent cursor which was on a record, and the position of the cursor is stored in the cursor. @return DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_upd( /*====*/ @@ -2529,7 +2529,8 @@ row_upd( { dberr_t err = DB_SUCCESS; - ut_ad(node && thr); + ut_ad(node != NULL); + ut_ad(thr != NULL); if (UNIV_LIKELY(node->in_mysql_interface)) { diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 14d272ac4c0..cba87df32b0 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -496,16 +496,16 @@ UNIV_INTERN ulint srv_available_undo_logs = 0; /* Ensure status variables are on separate cache lines */ #define CACHE_LINE_SIZE 64 -#define CACHE_ALIGNED __attribute__ ((aligned (CACHE_LINE_SIZE))) +#define CACHE_ALIGNED MY_ATTRIBUTE((aligned (CACHE_LINE_SIZE))) UNIV_INTERN byte -counters_pad_start[CACHE_LINE_SIZE] __attribute__((unused)) = {0}; +counters_pad_start[CACHE_LINE_SIZE] MY_ATTRIBUTE((unused)) = {0}; UNIV_INTERN ulint srv_read_views_memory CACHE_ALIGNED = 0; UNIV_INTERN ulint srv_descriptors_memory CACHE_ALIGNED = 0; UNIV_INTERN byte -counters_pad_end[CACHE_LINE_SIZE] __attribute__((unused)) = {0}; +counters_pad_end[CACHE_LINE_SIZE] MY_ATTRIBUTE((unused)) = {0}; /* Set the following to 0 if you want InnoDB to write messages on stderr on startup/shutdown. */ @@ -1889,7 +1889,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_monitor_thread)( /*===============================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { @@ -2066,7 +2066,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_error_monitor_thread)( /*=====================================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { @@ -2302,7 +2302,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_redo_log_follow_thread)( /*=======================================*/ - void* arg __attribute__((unused))) /*!< in: a dummy parameter + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { @@ -2749,7 +2749,7 @@ srv_master_do_active_tasks(void) /* Do an ibuf merge */ srv_main_thread_op_info = "doing insert buffer merge"; counter_time = ut_time_us(NULL); - ibuf_contract_in_background(0, FALSE); + ibuf_merge_in_background(false); MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time); @@ -2841,7 +2841,7 @@ srv_master_do_idle_tasks(void) /* Do an ibuf merge */ counter_time = ut_time_us(NULL); srv_main_thread_op_info = "doing insert buffer merge"; - ibuf_contract_in_background(0, TRUE); + ibuf_merge_in_background(true); MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time); @@ -2927,7 +2927,7 @@ srv_master_do_shutdown_tasks( /* Do an ibuf merge */ srv_main_thread_op_info = "doing insert buffer merge"; - n_bytes_merged = ibuf_contract_in_background(0, TRUE); + n_bytes_merged = ibuf_merge_in_background(true); /* Flush logs if needed */ srv_sync_log_buffer_in_background(); @@ -2967,7 +2967,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_master_thread)( /*==============================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { @@ -3127,7 +3127,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_worker_thread)( /*==============================*/ - void* arg __attribute__((unused))) /*!< in: a dummy parameter + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { srv_slot_t* slot; @@ -3395,7 +3395,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(srv_purge_coordinator_thread)( /*=========================================*/ - void* arg __attribute__((unused))) /*!< in: a dummy parameter + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { srv_slot_t* slot; diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 20a5e5e80f6..930694ac0af 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. @@ -532,7 +532,7 @@ UNIV_INTERN void srv_normalize_path_for_win( /*=======================*/ - char* str __attribute__((unused))) /*!< in/out: null-terminated + char* str MY_ATTRIBUTE((unused))) /*!< in/out: null-terminated character string */ { #ifdef __WIN__ @@ -549,7 +549,7 @@ srv_normalize_path_for_win( /*********************************************************************//** Creates a log file. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t create_log_file( /*============*/ @@ -766,7 +766,7 @@ create_log_files_rename( /*********************************************************************//** Opens a log file. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_log_file( /*==========*/ @@ -794,7 +794,7 @@ open_log_file( /*********************************************************************//** Creates or opens database data files and closes them. @return DB_SUCCESS or error code */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_or_create_data_files( /*======================*/ diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc index fe50e17f106..b795100972c 100644 --- a/storage/xtradb/sync/sync0sync.cc +++ b/storage/xtradb/sync/sync0sync.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -441,10 +441,10 @@ ulint mutex_enter_nowait_func( /*====================*/ ib_mutex_t* mutex, /*!< in: pointer to mutex */ - const char* file_name __attribute__((unused)), + const char* file_name MY_ATTRIBUTE((unused)), /*!< in: file name where mutex requested */ - ulint line __attribute__((unused))) + ulint line MY_ATTRIBUTE((unused))) /*!< in: line where requested */ { ut_ad(mutex_validate(mutex)); diff --git a/storage/xtradb/trx/trx0purge.cc b/storage/xtradb/trx/trx0purge.cc index 9d9fe73de6e..d9e40c5d6f5 100644 --- a/storage/xtradb/trx/trx0purge.cc +++ b/storage/xtradb/trx/trx0purge.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -917,7 +917,7 @@ Fetches the next undo log record from the history list to purge. It must be released with the corresponding release function. @return copy of an undo log record or pointer to trx_purge_dummy_rec, if the whole undo log can skipped in purge; NULL if none left */ -static __attribute__((warn_unused_result, nonnull)) +static MY_ATTRIBUTE((warn_unused_result, nonnull)) trx_undo_rec_t* trx_purge_fetch_next_rec( /*=====================*/ diff --git a/storage/xtradb/trx/trx0rec.cc b/storage/xtradb/trx/trx0rec.cc index a698b37c2a6..868a8a6c0b6 100644 --- a/storage/xtradb/trx/trx0rec.cc +++ b/storage/xtradb/trx/trx0rec.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1132,7 +1132,7 @@ trx_undo_rec_get_partial_row( /***********************************************************************//** Erases the unused undo log page end. @return TRUE if the page contained something, FALSE if it was empty */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) ibool trx_undo_erase_page_end( /*====================*/ @@ -1158,7 +1158,7 @@ byte* trx_undo_parse_erase_page_end( /*==========================*/ byte* ptr, /*!< in: buffer */ - byte* end_ptr __attribute__((unused)), /*!< in: buffer end */ + byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */ page_t* page, /*!< in: page or NULL */ mtr_t* mtr) /*!< in: mtr or NULL */ { @@ -1441,7 +1441,7 @@ NOTE: the caller must have latches on the clustered index page. @retval true if the undo log has been truncated and we cannot fetch the old version @retval false if the undo log record is available */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool trx_undo_get_undo_rec( /*==================*/ @@ -1469,7 +1469,7 @@ trx_undo_get_undo_rec( #ifdef UNIV_DEBUG #define ATTRIB_USED_ONLY_IN_DEBUG #else /* UNIV_DEBUG */ -#define ATTRIB_USED_ONLY_IN_DEBUG __attribute__((unused)) +#define ATTRIB_USED_ONLY_IN_DEBUG MY_ATTRIBUTE((unused)) #endif /* UNIV_DEBUG */ /*******************************************************************//** diff --git a/storage/xtradb/trx/trx0roll.cc b/storage/xtradb/trx/trx0roll.cc index bc11f1d76bd..e1e253cbb76 100644 --- a/storage/xtradb/trx/trx0roll.cc +++ b/storage/xtradb/trx/trx0roll.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -336,7 +336,7 @@ the row, these locks are naturally released in the rollback. Savepoints which were set after this savepoint are deleted. @return if no savepoint of the name found then DB_NO_SAVEPOINT, otherwise DB_SUCCESS */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t trx_rollback_to_savepoint_for_mysql_low( /*====================================*/ @@ -796,7 +796,7 @@ extern "C" UNIV_INTERN os_thread_ret_t DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( /*================================================*/ - void* arg __attribute__((unused))) + void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc index a5e578c547a..45f2a1c68aa 100644 --- a/storage/xtradb/trx/trx0trx.cc +++ b/storage/xtradb/trx/trx0trx.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1165,7 +1165,7 @@ trx_serialisation_number_get( /****************************************************************//** Assign the transaction its history serialisation number and write the update UNDO log record to the assigned rollback segment. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void trx_write_serialisation_history( /*============================*/ @@ -1236,7 +1236,7 @@ trx_write_serialisation_history( /******************************************************************** Finalize a transaction containing updates for a FTS table. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void trx_finalize_for_fts_table( /*=======================*/ @@ -1269,7 +1269,7 @@ trx_finalize_for_fts_table( /******************************************************************//** Finalize a transaction containing updates to FTS tables. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void trx_finalize_for_fts( /*=================*/ @@ -1343,7 +1343,7 @@ trx_flush_log_if_needed_low( /**********************************************************************//** If required, flushes the log to disk based on the value of innodb_flush_log_at_trx_commit. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void trx_flush_log_if_needed( /*====================*/ @@ -1358,7 +1358,7 @@ trx_flush_log_if_needed( /****************************************************************//** Commits a transaction in memory. */ -static __attribute__((nonnull)) +static MY_ATTRIBUTE((nonnull)) void trx_commit_in_memory( /*=================*/ @@ -2399,7 +2399,7 @@ which is in the prepared state @return trx on match, the trx->xid will be invalidated; note that the trx may have been committed, unless the caller is holding lock_sys->mutex */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) trx_t* trx_get_trx_by_xid_low( /*===================*/ diff --git a/storage/xtradb/trx/trx0undo.cc b/storage/xtradb/trx/trx0undo.cc index 290271c6cab..2ddb35d5ad0 100644 --- a/storage/xtradb/trx/trx0undo.cc +++ b/storage/xtradb/trx/trx0undo.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -419,11 +419,11 @@ trx_undo_page_init( Creates a new undo log segment in file. @return DB_SUCCESS if page creation OK possible error codes are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t trx_undo_seg_create( /*================*/ - trx_rseg_t* rseg __attribute__((unused)),/*!< in: rollback segment */ + trx_rseg_t* rseg MY_ATTRIBUTE((unused)),/*!< in: rollback segment */ trx_rsegf_t* rseg_hdr,/*!< in: rollback segment header, page x-latched */ ulint type, /*!< in: type of the segment: TRX_UNDO_INSERT or @@ -443,7 +443,9 @@ trx_undo_seg_create( ibool success; dberr_t err = DB_SUCCESS; - ut_ad(mtr && id && rseg_hdr); + ut_ad(mtr != NULL); + ut_ad(id != NULL); + ut_ad(rseg_hdr != NULL); ut_ad(mutex_own(&(rseg->mutex))); /* fputs(type == TRX_UNDO_INSERT @@ -827,7 +829,7 @@ byte* trx_undo_parse_discard_latest( /*==========================*/ byte* ptr, /*!< in: buffer */ - byte* end_ptr __attribute__((unused)), /*!< in: buffer end */ + byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */ page_t* page, /*!< in: page or NULL */ mtr_t* mtr) /*!< in: mtr or NULL */ { @@ -1557,7 +1559,7 @@ Creates a new undo log. @return DB_SUCCESS if successful in creating the new undo lob object, possible error codes are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE DB_OUT_OF_MEMORY */ -static __attribute__((nonnull, warn_unused_result)) +static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t trx_undo_create( /*============*/ -- cgit v1.2.1 From 0d8bb019b690894fde0f00cbd931aa023778ee48 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 10 Aug 2016 19:26:54 +0200 Subject: 5.6.32 --- storage/perfschema/table_events_statements.cc | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/storage/perfschema/table_events_statements.cc b/storage/perfschema/table_events_statements.cc index 6931584895f..233994dc9c1 100644 --- a/storage/perfschema/table_events_statements.cc +++ b/storage/perfschema/table_events_statements.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -340,11 +340,17 @@ void table_events_statements_common::make_row_part_1(PFS_events_statements *stat CHARSET_INFO *cs= get_charset(statement->m_sqltext_cs_number, MYF(0)); size_t valid_length= statement->m_sqltext_length; - if (cs->mbmaxlen > 1) + if (cs != NULL) { - int well_formed_error; - valid_length= cs->cset->well_formed_len(cs, statement->m_sqltext, statement->m_sqltext + valid_length, - valid_length, &well_formed_error); + if (cs->mbmaxlen > 1) + { + int well_formed_error; + valid_length= cs->cset->well_formed_len(cs, + statement->m_sqltext, + statement->m_sqltext + valid_length, + valid_length, + &well_formed_error); + } } m_row.m_sqltext.set_charset(cs); -- cgit v1.2.1 From 4f2d2143599bd63889265a6be00d0b8494d525ef Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 10 Aug 2016 19:30:20 +0200 Subject: 5.6.31-77.0 --- storage/tokudb/CMakeLists.txt | 6 +- storage/tokudb/PerconaFT/buildheader/make_tdb.cc | 2 + storage/tokudb/PerconaFT/src/ydb-internal.h | 1 + storage/tokudb/PerconaFT/src/ydb.cc | 152 +++++++++++++++------ storage/tokudb/hatoku_defines.h | 4 - storage/tokudb/hatoku_hton.cc | 4 +- storage/tokudb/mysql-test/tokudb/bulk-fetch-gen.py | 1 + .../tokudb/locks-blocking-row-locks-testgen.py | 1 + .../tokudb/mysql-test/tokudb/replace-ignore-gen.py | 1 + .../mysql-test/tokudb/t/change_column_bin.py | 1 + .../tokudb/t/change_column_bin_rename.py | 1 + .../mysql-test/tokudb/t/change_column_blob.py | 1 + .../mysql-test/tokudb/t/change_column_char.py | 1 + .../tokudb/t/change_column_char_binary.py | 1 + .../tokudb/t/change_column_char_charbinary.py | 1 + .../tokudb/t/change_column_char_rename.py | 1 + .../mysql-test/tokudb/t/change_column_int.py | 1 + .../mysql-test/tokudb/t/change_column_int_key.py | 1 + .../tokudb/t/change_column_int_not_supported.py | 1 + .../tokudb/t/change_column_int_rename.py | 1 + .../tokudb/t/change_column_multiple_columns.py | 1 + .../mysql-test/tokudb/t/change_column_text.py | 1 + .../tokudb/t/i_s_tokudb_lock_waits_released.test | 3 + .../tokudb/t/i_s_tokudb_locks_released.test | 3 + storage/tokudb/mysql-test/tokudb/t/suite.opt | 2 +- .../tokudb/mysql-test/tokudb_add_index/t/suite.opt | 2 +- .../mysql-test/tokudb_alter_table/t/suite.opt | 2 +- .../tokudb/mysql-test/tokudb_backup/t/suite.opt | 2 +- storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt | 2 +- storage/tokudb/mysql-test/tokudb_parts/t/suite.opt | 2 +- .../t/rpl_rfr_disable_on_expl_pk_absence-slave.opt | 2 +- storage/tokudb/mysql-test/tokudb_rpl/t/suite.opt | 2 +- .../tokudb/mysql-test/tokudb_sys_vars/t/suite.opt | 2 +- storage/tokudb/tokudb_sysvars.cc | 19 +-- storage/tokudb/tokudb_sysvars.h | 4 +- 35 files changed, 153 insertions(+), 79 deletions(-) mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_bin.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_char.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_int.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py mode change 100644 => 100755 storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index b1b5a38fd75..4ec539f7d0b 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(TOKUDB_VERSION 5.6.30-76.3) +SET(TOKUDB_VERSION 5.6.31-77.0) # PerconaFT only supports x86-64 and cmake-2.8.9+ IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT CMAKE_VERSION VERSION_LESS "2.8.9") @@ -52,10 +52,6 @@ IF(DEFINED TOKUDB_NOPATCH_CONFIG) ADD_DEFINITIONS("-DTOKUDB_NOPATCH_CONFIG=${TOKUDB_NOPATCH_CONFIG}") ENDIF() -IF(DEFINED TOKUDB_CHECK_JEMALLOC) - ADD_DEFINITIONS("-DTOKUDB_CHECK_JEMALLOC=${TOKUDB_CHECK_JEMALLOC}") -ENDIF() - macro(set_cflags_if_supported) foreach(flag ${ARGN}) string(REGEX REPLACE "-" "_" temp_flag ${flag}) diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc index 5c29209e19d..4b62703480f 100644 --- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc +++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc @@ -420,6 +420,8 @@ static void print_db_env_struct (void) { "int (*set_client_pool_threads)(DB_ENV *, uint32_t)", "int (*set_cachetable_pool_threads)(DB_ENV *, uint32_t)", "int (*set_checkpoint_pool_threads)(DB_ENV *, uint32_t)", + "void (*set_check_thp)(DB_ENV *, bool new_val)", + "bool (*get_check_thp)(DB_ENV *)", NULL}; sort_and_dump_fields("db_env", true, extra); diff --git a/storage/tokudb/PerconaFT/src/ydb-internal.h b/storage/tokudb/PerconaFT/src/ydb-internal.h index 3737a1caf99..462a2a3d861 100644 --- a/storage/tokudb/PerconaFT/src/ydb-internal.h +++ b/storage/tokudb/PerconaFT/src/ydb-internal.h @@ -132,6 +132,7 @@ struct __toku_db_env_internal { int datadir_lockfd; int logdir_lockfd; int tmpdir_lockfd; + bool check_thp; // if set check if transparent huge pages are disables uint64_t (*get_loader_memory_size_callback)(void); uint64_t default_lock_timeout_msec; uint64_t (*get_lock_timeout_callback)(uint64_t default_lock_timeout_msec); diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc index 55da418a0de..aed271bce40 100644 --- a/storage/tokudb/PerconaFT/src/ydb.cc +++ b/storage/tokudb/PerconaFT/src/ydb.cc @@ -623,32 +623,39 @@ ydb_recover_log_exists(DB_ENV *env) { } // Validate that all required files are present, no side effects. -// Return 0 if all is well, ENOENT if some files are present but at least one is missing, +// Return 0 if all is well, ENOENT if some files are present but at least one is +// missing, // other non-zero value if some other error occurs. // Set *valid_newenv if creating a new environment (all files missing). -// (Note, if special dictionaries exist, then they were created transactionally and log should exist.) -static int -validate_env(DB_ENV * env, bool * valid_newenv, bool need_rollback_cachefile) { +// (Note, if special dictionaries exist, then they were created transactionally +// and log should exist.) +static int validate_env(DB_ENV *env, + bool *valid_newenv, + bool need_rollback_cachefile) { int r; - bool expect_newenv = false; // set true if we expect to create a new env + bool expect_newenv = false; // set true if we expect to create a new env toku_struct_stat buf; - char* path = NULL; + char *path = NULL; // Test for persistent environment - path = toku_construct_full_name(2, env->i->dir, toku_product_name_strings.environmentdictionary); + path = toku_construct_full_name( + 2, env->i->dir, toku_product_name_strings.environmentdictionary); assert(path); r = toku_stat(path, &buf); if (r == 0) { expect_newenv = false; // persistent info exists - } - else { + } else { int stat_errno = get_error_errno(); if (stat_errno == ENOENT) { expect_newenv = true; r = 0; - } - else { - r = toku_ydb_do_error(env, stat_errno, "Unable to access persistent environment\n"); + } else { + r = toku_ydb_do_error( + env, + stat_errno, + "Unable to access persistent environment [%s] in [%s]\n", + toku_product_name_strings.environmentdictionary, + env->i->dir); assert(r); } } @@ -656,23 +663,40 @@ validate_env(DB_ENV * env, bool * valid_newenv, bool need_rollback_cachefile) { // Test for existence of rollback cachefile if it is expected to exist if (r == 0 && need_rollback_cachefile) { - path = toku_construct_full_name(2, env->i->dir, toku_product_name_strings.rollback_cachefile); + path = toku_construct_full_name( + 2, env->i->dir, toku_product_name_strings.rollback_cachefile); assert(path); r = toku_stat(path, &buf); - if (r == 0) { - if (expect_newenv) // rollback cachefile exists, but persistent env is missing - r = toku_ydb_do_error(env, ENOENT, "Persistent environment is missing\n"); - } - else { + if (r == 0) { + if (expect_newenv) // rollback cachefile exists, but persistent env + // is missing + r = toku_ydb_do_error( + env, + ENOENT, + "Persistent environment is missing while looking for " + "rollback cachefile [%s] in [%s]\n", + toku_product_name_strings.rollback_cachefile, env->i->dir); + } else { int stat_errno = get_error_errno(); if (stat_errno == ENOENT) { - if (!expect_newenv) // rollback cachefile is missing but persistent env exists - r = toku_ydb_do_error(env, ENOENT, "rollback cachefile directory is missing\n"); - else - r = 0; // both rollback cachefile and persistent env are missing - } - else { - r = toku_ydb_do_error(env, stat_errno, "Unable to access rollback cachefile\n"); + if (!expect_newenv) // rollback cachefile is missing but + // persistent env exists + r = toku_ydb_do_error( + env, + ENOENT, + "rollback cachefile [%s] is missing from [%s]\n", + toku_product_name_strings.rollback_cachefile, + env->i->dir); + else + r = 0; // both rollback cachefile and persistent env are + // missing + } else { + r = toku_ydb_do_error( + env, + stat_errno, + "Unable to access rollback cachefile [%s] in [%s]\n", + toku_product_name_strings.rollback_cachefile, + env->i->dir); assert(r); } } @@ -681,23 +705,41 @@ validate_env(DB_ENV * env, bool * valid_newenv, bool need_rollback_cachefile) { // Test for fileops directory if (r == 0) { - path = toku_construct_full_name(2, env->i->dir, toku_product_name_strings.fileopsdirectory); + path = toku_construct_full_name( + 2, env->i->dir, toku_product_name_strings.fileopsdirectory); assert(path); r = toku_stat(path, &buf); - if (r == 0) { - if (expect_newenv) // fileops directory exists, but persistent env is missing - r = toku_ydb_do_error(env, ENOENT, "Persistent environment is missing\n"); - } - else { + if (r == 0) { + if (expect_newenv) // fileops directory exists, but persistent env + // is missing + r = toku_ydb_do_error( + env, + ENOENT, + "Persistent environment is missing while looking for " + "fileops directory [%s] in [%s]\n", + toku_product_name_strings.fileopsdirectory, + env->i->dir); + } else { int stat_errno = get_error_errno(); if (stat_errno == ENOENT) { - if (!expect_newenv) // fileops directory is missing but persistent env exists - r = toku_ydb_do_error(env, ENOENT, "Fileops directory is missing\n"); - else - r = 0; // both fileops directory and persistent env are missing - } - else { - r = toku_ydb_do_error(env, stat_errno, "Unable to access fileops directory\n"); + if (!expect_newenv) // fileops directory is missing but + // persistent env exists + r = toku_ydb_do_error( + env, + ENOENT, + "Fileops directory [%s] is missing from [%s]\n", + toku_product_name_strings.fileopsdirectory, + env->i->dir); + else + r = 0; // both fileops directory and persistent env are + // missing + } else { + r = toku_ydb_do_error( + env, + stat_errno, + "Unable to access fileops directory [%s] in [%s]\n", + toku_product_name_strings.fileopsdirectory, + env->i->dir); assert(r); } } @@ -709,16 +751,26 @@ validate_env(DB_ENV * env, bool * valid_newenv, bool need_rollback_cachefile) { // if using transactions, test for existence of log r = ydb_recover_log_exists(env); // return 0 or ENOENT if (expect_newenv && (r != ENOENT)) - r = toku_ydb_do_error(env, ENOENT, "Persistent environment information is missing (but log exists)\n"); + r = toku_ydb_do_error(env, + ENOENT, + "Persistent environment information is " + "missing (but log exists) while looking for " + "recovery log files in [%s]\n", + env->i->real_log_dir); else if (!expect_newenv && r == ENOENT) - r = toku_ydb_do_error(env, ENOENT, "Recovery log is missing (persistent environment information is present)\n"); + r = toku_ydb_do_error(env, + ENOENT, + "Recovery log is missing (persistent " + "environment information is present) while " + "looking for recovery log files in [%s]\n", + env->i->real_log_dir); else r = 0; } if (r == 0) *valid_newenv = expect_newenv; - else + else *valid_newenv = false; return r; } @@ -768,7 +820,7 @@ env_open(DB_ENV * env, const char *home, uint32_t flags, int mode) { goto cleanup; } - if (toku_os_huge_pages_enabled()) { + if (env->get_check_thp(env) && toku_os_huge_pages_enabled()) { r = toku_ydb_do_error(env, TOKUDB_HUGE_PAGES_ENABLED, "Huge pages are enabled, disable them before continuing\n"); goto cleanup; @@ -1234,6 +1286,18 @@ env_set_checkpoint_pool_threads(DB_ENV * env, uint32_t threads) { return 0; } +static void +env_set_check_thp(DB_ENV * env, bool new_val) { + assert(env); + env->i->check_thp = new_val; +} + +static bool +env_get_check_thp(DB_ENV * env) { + assert(env); + return env->i->check_thp; +} + static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags); static int @@ -2634,6 +2698,8 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) { USENV(get_loader_memory_size); USENV(set_killed_callback); USENV(do_backtrace); + USENV(set_check_thp); + USENV(get_check_thp); #undef USENV // unlocked methods @@ -2659,6 +2725,8 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) { env_fs_init(result); env_fsync_log_init(result); + result->i->check_thp = true; + result->i->bt_compare = toku_builtin_compare_fun; r = toku_logger_create(&result->i->logger); diff --git a/storage/tokudb/hatoku_defines.h b/storage/tokudb/hatoku_defines.h index 174a885d36f..0269c47ffa3 100644 --- a/storage/tokudb/hatoku_defines.h +++ b/storage/tokudb/hatoku_defines.h @@ -69,10 +69,6 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #pragma interface /* gcc class implementation */ #endif -#if !defined(TOKUDB_CHECK_JEMALLOC) -#define TOKUDB_CHECK_JEMALLOC 1 -#endif - #if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099 // mariadb 10.0 #define TOKU_USE_DB_TYPE_TOKUDB 1 diff --git a/storage/tokudb/hatoku_hton.cc b/storage/tokudb/hatoku_hton.cc index a8876cddcf0..2b121189e83 100644 --- a/storage/tokudb/hatoku_hton.cc +++ b/storage/tokudb/hatoku_hton.cc @@ -278,7 +278,6 @@ static int tokudb_init_func(void *p) { db_env = NULL; tokudb_hton = (handlerton *) p; -#if TOKUDB_CHECK_JEMALLOC if (tokudb::sysvars::check_jemalloc) { typedef int (*mallctl_type)( const char*, @@ -305,7 +304,6 @@ static int tokudb_init_func(void *p) { goto error; } } -#endif r = tokudb_set_product_name(); if (r) { @@ -550,6 +548,8 @@ static int tokudb_init_func(void *p) { db_env, tokudb_get_loader_memory_size_callback); + db_env->set_check_thp(db_env, tokudb::sysvars::check_jemalloc); + r = db_env->open( db_env, tokudb_home, diff --git a/storage/tokudb/mysql-test/tokudb/bulk-fetch-gen.py b/storage/tokudb/mysql-test/tokudb/bulk-fetch-gen.py index f125ce771da..4e343772511 100644 --- a/storage/tokudb/mysql-test/tokudb/bulk-fetch-gen.py +++ b/storage/tokudb/mysql-test/tokudb/bulk-fetch-gen.py @@ -71,6 +71,7 @@ tables = [ ] # Code generation stats here +print "source include/have_tokudb.inc;" print "# Tokutek" print "# Test that bulk fetch works with various table types" print "" diff --git a/storage/tokudb/mysql-test/tokudb/locks-blocking-row-locks-testgen.py b/storage/tokudb/mysql-test/tokudb/locks-blocking-row-locks-testgen.py index 372431ada04..28bea5dea56 100644 --- a/storage/tokudb/mysql-test/tokudb/locks-blocking-row-locks-testgen.py +++ b/storage/tokudb/mysql-test/tokudb/locks-blocking-row-locks-testgen.py @@ -56,6 +56,7 @@ write_range_queries = [ timeouts = [0, 500] # Here's where all the magic happens +print "source include/have_tokudb.inc;" print "# Tokutek" print "# Blocking row lock tests;" print "# Generated by %s on %s;" % (__file__, datetime.date.today()) diff --git a/storage/tokudb/mysql-test/tokudb/replace-ignore-gen.py b/storage/tokudb/mysql-test/tokudb/replace-ignore-gen.py index d8227da96a5..65c0f317abe 100644 --- a/storage/tokudb/mysql-test/tokudb/replace-ignore-gen.py +++ b/storage/tokudb/mysql-test/tokudb/replace-ignore-gen.py @@ -23,6 +23,7 @@ def sqlgen_explain_and_do(query): def sqlgen_drop_table(): print "drop table t;" +print "source include/have_tokudb.inc;" print "# Tokutek" print "# Test that replace into and insert ignore insertions " print "# work under various index schemas. " diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py b/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py old mode 100644 new mode 100755 index 0ddee301d1b..d21f0cb9a96 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_bin.py @@ -22,6 +22,7 @@ def gen_test(n): print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_bin.py" print "# test binary expansion is hot" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py b/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py old mode 100644 new mode 100755 index b574f15735a..6bd5de38fe8 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_bin_rename.py @@ -26,6 +26,7 @@ def gen_test(n): print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_char_rename.py" print "# test char expansion + rename is hot" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_blob.py b/storage/tokudb/mysql-test/tokudb/t/change_column_blob.py index d884932307f..e25fce54d0e 100644 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_blob.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_blob.py @@ -1,5 +1,6 @@ import sys def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_blob.py" print "# generate hot blob expansion test cases" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char.py old mode 100644 new mode 100755 index 99d99d1c017..d2ac40eec16 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_char.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char.py @@ -22,6 +22,7 @@ def gen_test(n): print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_char.py" print "# test char expansion" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py old mode 100644 new mode 100755 index 4b94d2fc98b..e92797918d5 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char_binary.py @@ -23,6 +23,7 @@ def gen_test(n): print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_char_binary.py" print "# test that char(X) <-> binary(X) is not hot" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py old mode 100644 new mode 100755 index 81cf2058a3e..065e37b186d --- a/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char_charbinary.py @@ -23,6 +23,7 @@ def gen_test(n): print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_char_charbinary.py" print "# test that char(X) <-> char(X) binary is not hot" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py b/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py old mode 100644 new mode 100755 index 6bc8ae80c7d..fe73fce0d53 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_char_rename.py @@ -26,6 +26,7 @@ def gen_test(n): print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_char_rename.py" print "# test char expansion + rename is hot" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int.py old mode 100644 new mode 100755 index f93b9d7bb69..6f69156e260 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_int.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int.py @@ -25,6 +25,7 @@ def gen_test(types, values): print "DROP TABLE ti;" print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_int.py" print "# test int expansion is hot" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py old mode 100644 new mode 100755 index 8b217b59ab1..fd7e5868c40 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int_key.py @@ -25,6 +25,7 @@ def gen_test(types): print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_int_key.py" print "# ensure that changing an int column that is part of a key is not hot" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py old mode 100644 new mode 100755 index 6fbb2343fea..1708c65efde --- a/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int_not_supported.py @@ -21,6 +21,7 @@ def gen_tests(int_types, modifiers): for from_modifier in range(len(modifiers)): gen_tests_for_int(from_int, from_modifier, int_types, modifiers) def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_int_not_supported.py" print "# ensure that int types are only expanded and are not cnverted to some other type" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py b/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py old mode 100644 new mode 100755 index a1cd155f3de..5222564a9a2 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_int_rename.py @@ -27,6 +27,7 @@ def gen_test(types, values): print "DROP TABLE ti;" print "DROP TABLE t;" def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_int_rename.py" print "--disable_warnings" print "DROP TABLE IF EXISTS t, ti;" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_multiple_columns.py b/storage/tokudb/mysql-test/tokudb/t/change_column_multiple_columns.py index 7e562f38a0f..05f3683a14f 100644 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_multiple_columns.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_multiple_columns.py @@ -6,6 +6,7 @@ old_types = [ 'VARCHAR(1)', 'VARBINARY(1)', 'INT', 'CHAR(1)', 'BINARY(1)' ] new_types = [ 'VARCHAR(2)', 'VARBINARY(2)', 'BIGINT', 'CHAR(2)', 'BINARY(2)' ] def main(): + print "source include/have_tokudb.inc;" print "# this test generated by change_multiple_columns.py" print "# this test generated multiple column changes which should all fail since we support only one at a time" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/change_column_text.py b/storage/tokudb/mysql-test/tokudb/t/change_column_text.py index 13de13dc222..fada9b0852e 100644 --- a/storage/tokudb/mysql-test/tokudb/t/change_column_text.py +++ b/storage/tokudb/mysql-test/tokudb/t/change_column_text.py @@ -1,5 +1,6 @@ import sys def main(): + print "source include/have_tokudb.inc;" print "# this test is generated by change_text.py" print "# generate hot text expansion test cases" print "--disable_warnings" diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test index 1e9eecb98cf..6488f27cfbb 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_released.test @@ -48,6 +48,9 @@ select trx_id,trx_mysql_thread_id from information_schema.tokudb_trx; connection conn_a; commit; # verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction +let $wait_condition= select count(*)=1 from information_schema.tokudb_locks where locks_dname='./test/t-main'; +source include/wait_condition.inc; + replace_column 1 TRX_ID 2 MYSQL_ID; select * from information_schema.tokudb_locks; select * from information_schema.tokudb_lock_waits; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test index 52a40e470ab..455c5100407 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks_released.test @@ -39,6 +39,9 @@ eval select * from information_schema.tokudb_locks; connection conn_a; commit; # verify that the lock on the 1st transaction is released and replaced by the lock for the 2nd transaction +let $wait_condition= select count(*)=1 from information_schema.tokudb_locks where locks_dname='./test/t-main'; +source include/wait_condition.inc; + replace_column 1 TRX_ID 2 MYSQL_ID; select * from information_schema.tokudb_locks; diff --git a/storage/tokudb/mysql-test/tokudb/t/suite.opt b/storage/tokudb/mysql-test/tokudb/t/suite.opt index 23511b05020..15acdf7397f 100644 --- a/storage/tokudb/mysql-test/tokudb/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt b/storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt index 23511b05020..15acdf7397f 100644 --- a/storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_add_index/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt b/storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt index 23511b05020..15acdf7397f 100644 --- a/storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_alter_table/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt index e52bd6327e0..0d80cf85a91 100644 --- a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt b/storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt index 23511b05020..15acdf7397f 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_parts/t/suite.opt b/storage/tokudb/mysql-test/tokudb_parts/t/suite.opt index 23511b05020..15acdf7397f 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_parts/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt index fb12af6c5bd..c87bbb62418 100644 --- a/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt +++ b/storage/tokudb/mysql-test/tokudb_rpl/t/rpl_rfr_disable_on_expl_pk_absence-slave.opt @@ -1 +1 @@ ---read-only=true --tokudb-rpl-unique-checks=false --tokudb-rpl-lookup-rows=false +--read-only=true --loose-tokudb-rpl-unique-checks=false --loose-tokudb-rpl-lookup-rows=false diff --git a/storage/tokudb/mysql-test/tokudb_rpl/t/suite.opt b/storage/tokudb/mysql-test/tokudb_rpl/t/suite.opt index 23511b05020..15acdf7397f 100644 --- a/storage/tokudb/mysql-test/tokudb_rpl/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_rpl/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt b/storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt index 23511b05020..15acdf7397f 100644 --- a/storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_sys_vars/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 +$TOKUDB_OPT $TOKUDB_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc index b3f598f845f..84f1c873a26 100644 --- a/storage/tokudb/tokudb_sysvars.cc +++ b/storage/tokudb/tokudb_sysvars.cc @@ -74,9 +74,7 @@ char* gdb_path = NULL; my_bool gdb_on_fatal = FALSE; #endif -#if TOKUDB_CHECK_JEMALLOC -uint check_jemalloc = 0; -#endif +my_bool check_jemalloc = TRUE; static MYSQL_SYSVAR_ULONGLONG( cache_size, @@ -416,19 +414,14 @@ static MYSQL_SYSVAR_BOOL( true); #endif -#if TOKUDB_CHECK_JEMALLOC -static MYSQL_SYSVAR_UINT( +static MYSQL_SYSVAR_BOOL( check_jemalloc, check_jemalloc, - 0, - "check if jemalloc is linked", + PLUGIN_VAR_READONLY|PLUGIN_VAR_RQCMDARG, + "check if jemalloc is linked and transparent huge pages are disabled", NULL, NULL, - 1, - 0, - 1, - 0); -#endif + TRUE); //****************************************************************************** @@ -948,9 +941,7 @@ st_mysql_sys_var* system_variables[] = { MYSQL_SYSVAR(gdb_on_fatal), #endif -#if TOKUDB_CHECK_JEMALLOC MYSQL_SYSVAR(check_jemalloc), -#endif // session vars MYSQL_SYSVAR(alter_print_error), diff --git a/storage/tokudb/tokudb_sysvars.h b/storage/tokudb/tokudb_sysvars.h index adc5a50c0a7..70784fdcae3 100644 --- a/storage/tokudb/tokudb_sysvars.h +++ b/storage/tokudb/tokudb_sysvars.h @@ -88,9 +88,7 @@ extern char* gdb_path; extern my_bool gdb_on_fatal; #endif -#if TOKUDB_CHECK_JEMALLOC -extern uint check_jemalloc; -#endif +extern my_bool check_jemalloc; #if TOKUDB_DEBUG // used to control background job manager -- cgit v1.2.1 From abfbe80840e4b8ad63b31ea65b59f52ef7d151a2 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 10 Aug 2016 14:48:44 -0400 Subject: MW-292: Fix test case Also backported missing test include files. --- mysql-test/include/galera_clear_sync_point.inc | 1 + mysql-test/include/galera_set_sync_point.inc | 1 + mysql-test/include/galera_signal_sync_point.inc | 1 + mysql-test/include/galera_wait_sync_point.inc | 6 ++++++ mysql-test/suite/galera/include/galera_have_debug_sync.inc | 9 +++++++++ mysql-test/suite/galera/t/MW-292.test | 6 +++--- 6 files changed, 21 insertions(+), 3 deletions(-) create mode 100644 mysql-test/include/galera_clear_sync_point.inc create mode 100644 mysql-test/include/galera_set_sync_point.inc create mode 100644 mysql-test/include/galera_signal_sync_point.inc create mode 100644 mysql-test/include/galera_wait_sync_point.inc create mode 100644 mysql-test/suite/galera/include/galera_have_debug_sync.inc diff --git a/mysql-test/include/galera_clear_sync_point.inc b/mysql-test/include/galera_clear_sync_point.inc new file mode 100644 index 00000000000..589522a55b0 --- /dev/null +++ b/mysql-test/include/galera_clear_sync_point.inc @@ -0,0 +1 @@ +SET GLOBAL wsrep_provider_options = 'dbug='; diff --git a/mysql-test/include/galera_set_sync_point.inc b/mysql-test/include/galera_set_sync_point.inc new file mode 100644 index 00000000000..5fe4e8c38c0 --- /dev/null +++ b/mysql-test/include/galera_set_sync_point.inc @@ -0,0 +1 @@ +--eval SET GLOBAL wsrep_provider_options = 'dbug=d,$galera_sync_point' diff --git a/mysql-test/include/galera_signal_sync_point.inc b/mysql-test/include/galera_signal_sync_point.inc new file mode 100644 index 00000000000..eaa5cdd43f5 --- /dev/null +++ b/mysql-test/include/galera_signal_sync_point.inc @@ -0,0 +1 @@ +--eval SET GLOBAL wsrep_provider_options = 'signal=$galera_sync_point' diff --git a/mysql-test/include/galera_wait_sync_point.inc b/mysql-test/include/galera_wait_sync_point.inc new file mode 100644 index 00000000000..cf3a4980186 --- /dev/null +++ b/mysql-test/include/galera_wait_sync_point.inc @@ -0,0 +1,6 @@ +--let $wait_timeout = 10 +--let $wsrep_on_orig = `SELECT @@wsrep_on` +SET SESSION wsrep_on = 0; +--let $wait_condition = SELECT 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_debug_sync_waiters' AND VARIABLE_VALUE = '$galera_sync_point' +--source include/wait_condition.inc +--eval SET SESSION wsrep_on = $wsrep_on_orig diff --git a/mysql-test/suite/galera/include/galera_have_debug_sync.inc b/mysql-test/suite/galera/include/galera_have_debug_sync.inc new file mode 100644 index 00000000000..7c0156052d8 --- /dev/null +++ b/mysql-test/suite/galera/include/galera_have_debug_sync.inc @@ -0,0 +1,9 @@ +--disable_query_log + +--let $galera_have_debug_sync = `SELECT 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_debug_sync_waiters'` + +--if (!$galera_have_debug_sync) { + --skip "Test requires Galera debug library with debug_sync functionality" +} + +--enable_query_log diff --git a/mysql-test/suite/galera/t/MW-292.test b/mysql-test/suite/galera/t/MW-292.test index 945d9f42458..7e4cf9050ae 100644 --- a/mysql-test/suite/galera/t/MW-292.test +++ b/mysql-test/suite/galera/t/MW-292.test @@ -44,11 +44,11 @@ UPDATE t1 SET f2 = 'c' WHERE f1 = 2; # Wait for both transactions to be blocked --connection node_1a ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'System lock'; +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event::find_row%'; --source include/wait_condition.inc ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT'; ---source include/wait_condition.inc +#--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT'; +#--source include/wait_condition.inc # Unblock the commit --connection node_1a -- cgit v1.2.1 From d40d3f4e57f375897aa29e72e947e372e6bc229d Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 19 Jul 2016 20:44:02 +0000 Subject: MDEV-10314 : wsrep_client_thread was not set in threadpool. Fixed threadpool_add_connection to use thd_prepare_connection() to match thread-per-conection flow. --- sql/threadpool_common.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index 9e0cb07b86c..75b8f821d22 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -148,9 +148,8 @@ int threadpool_add_connection(THD *thd) if (!setup_connection_thread_globals(thd)) { - if (!login_connection(thd)) + if (!thd_prepare_connection(thd)) { - prepare_new_connection_state(thd); /* Check if THD is ok, as prepare_new_connection_state() -- cgit v1.2.1 From 191f7b0fc1a1c319ea360ffe5440280a26703506 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 10 Aug 2016 21:15:51 +0200 Subject: after merge fixes --- .../suite/innodb/r/innodb-wl5522,xtradb.rdiff | 56 ++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 mysql-test/suite/innodb/r/innodb-wl5522,xtradb.rdiff diff --git a/mysql-test/suite/innodb/r/innodb-wl5522,xtradb.rdiff b/mysql-test/suite/innodb/r/innodb-wl5522,xtradb.rdiff new file mode 100644 index 00000000000..4aea0b451ec --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-wl5522,xtradb.rdiff @@ -0,0 +1,56 @@ +--- suite/innodb/r/innodb-wl5522.result ++++ suite/innodb/r/innodb-wl5522.reject +@@ -580,7 +580,7 @@ + ERROR HY000: Tablespace has been discarded for table 't1' + restore: t1 .ibd and .cfg files + ALTER TABLE t1 IMPORT TABLESPACE; +-ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x5 and the meta-data file has 0x0) ++ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x1 and the meta-data file has 0x0) + unlink: t1.ibd + unlink: t1.cfg + DROP TABLE t1; +@@ -592,7 +592,7 @@ + ERROR HY000: Tablespace has been discarded for table 't1' + restore: t1 .ibd and .cfg files + ALTER TABLE t1 IMPORT TABLESPACE; +-ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x5 and the meta-data file has 0x0) ++ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x21 and the meta-data file has 0x0) + unlink: t1.ibd + unlink: t1.cfg + DROP TABLE t1; +@@ -766,7 +766,7 @@ + ERROR HY000: Tablespace has been discarded for table 't1' + restore: t1 .ibd and .cfg files + ALTER TABLE t1 IMPORT TABLESPACE; +-ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x5 and the meta-data file has 0x1) ++ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x0 and the meta-data file has 0x1) + unlink: t1.ibd + unlink: t1.cfg + DROP TABLE t1; +@@ -778,7 +778,7 @@ + ERROR HY000: Tablespace has been discarded for table 't1' + restore: t1 .ibd and .cfg files + ALTER TABLE t1 IMPORT TABLESPACE; +-ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x5 and the meta-data file has 0x1) ++ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x21 and the meta-data file has 0x1) + unlink: t1.ibd + unlink: t1.cfg + DROP TABLE t1; +@@ -955,7 +955,7 @@ + ERROR HY000: Tablespace has been discarded for table 't1' + restore: t1 .ibd and .cfg files + ALTER TABLE t1 IMPORT TABLESPACE; +-ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x5 and the meta-data file has 0x21) ++ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x1 and the meta-data file has 0x21) + unlink: t1.ibd + unlink: t1.cfg + DROP TABLE t1; +@@ -967,7 +967,7 @@ + ERROR HY000: Tablespace has been discarded for table 't1' + restore: t1 .ibd and .cfg files + ALTER TABLE t1 IMPORT TABLESPACE; +-ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x5 and the meta-data file has 0x21) ++ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x0 and the meta-data file has 0x21) + unlink: t1.ibd + unlink: t1.cfg + DROP TABLE t1; -- cgit v1.2.1 From 2f9555c40f96a956184a97e99d1b8f4cafbab024 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Wed, 10 Aug 2016 15:51:40 -0700 Subject: Removed the parameter from st_select_lex_unit::exec_recursive. Moved checking whether the limit set for the number of iterations when executing a recursive query has been reached from st_select_lex_unit::exec_recursive to TABLE_LIST::fill_recursive. Changed the name of the system variable max_recursion_level for max_recursive_iterations. Adjusted test cases. --- mysql-test/r/cte_recursive.result | 8 ++++---- mysql-test/r/mysqld--help.result | 4 ++-- .../suite/sys_vars/r/sysvars_server_notembedded.result | 2 +- mysql-test/t/cte_recursive.test | 6 +++--- sql/sql_class.h | 2 +- sql/sql_cte.h | 16 ++++++++++++++++ sql/sql_derived.cc | 11 ++++++++--- sql/sql_lex.h | 2 +- sql/sql_union.cc | 17 +---------------- sql/sys_vars.cc | 6 +++--- 10 files changed, 40 insertions(+), 34 deletions(-) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index a21416b5f43..f6ab5ee5dd7 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -1086,7 +1086,7 @@ generation name 1 Mom 2 Grandpa Bill 2 Grandma Ann -set statement max_recursion_level=2 for +set statement max_recursive_iterations=1 for with recursive ancestor_ids (id, generation) as @@ -1463,8 +1463,8 @@ drop table folks; create table t1(a int); insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); explain format=json -with recursive T as (select a from t1 union select a+10 from T where a < 1000) -select * from T; +with recursive t as (select a from t1 union select a+10 from t where a < 1000) +select * from t; EXPLAIN { "query_block": { @@ -1499,7 +1499,7 @@ EXPLAIN "access_type": "ALL", "rows": 10, "filtered": 100, - "attached_condition": "(T.a < 1000)" + "attached_condition": "(t.a < 1000)" } } } diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 9717b358f13..38075ef8520 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -450,7 +450,7 @@ The following options may be given as the first argument: max_allowed_packet instead. --max-prepared-stmt-count=# Maximum number of prepared statements in the server - --max-recursion-level[=#] + --max-recursive-iterations[=#] Maximum number of iterations when executing recursive queries --max-relay-log-size=# @@ -1273,7 +1273,7 @@ max-join-size 18446744073709551615 max-length-for-sort-data 1024 max-long-data-size 4194304 max-prepared-stmt-count 16382 -max-recursion-level 18446744073709551615 +max-recursive-iterations 18446744073709551615 max-relay-log-size 1073741824 max-seeks-for-key 18446744073709551615 max-sort-length 1024 diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index d534669ac58..04709f014f7 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -2165,7 +2165,7 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED -VARIABLE_NAME MAX_RECURSION_LEVEL +VARIABLE_NAME MAX_RECURSIVE_ITERATIONS SESSION_VALUE 4294967295 GLOBAL_VALUE 4294967295 GLOBAL_VALUE_ORIGIN COMPILE-TIME diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 60f058b15f7..98fe159e174 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -914,7 +914,7 @@ as ) select * from ancestors; -set statement max_recursion_level=2 for +set statement max_recursive_iterations=1 for with recursive ancestor_ids (id, generation) as @@ -1074,8 +1074,8 @@ create table t1(a int); insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); explain format=json -with recursive T as (select a from t1 union select a+10 from T where a < 1000) -select * from T; +with recursive t as (select a from t1 union select a+10 from t where a < 1000) +select * from t; drop table t1; diff --git a/sql/sql_class.h b/sql/sql_class.h index 04ca37295bb..04a80166ad1 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -558,7 +558,7 @@ typedef struct system_variables ulong max_allowed_packet; ulong max_error_count; ulong max_length_for_sort_data; - ulong max_recursion_level; + ulong max_recursive_iterations; ulong max_sort_length; ulong max_tmp_tables; ulong max_insert_delayed_threads; diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 52b2b8f4f77..dfe673dcce9 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -2,6 +2,7 @@ #define SQL_CTE_INCLUDED #include "sql_list.h" #include "sql_lex.h" +#include "sql_select.h" class select_union; struct st_unit_ctxt_elem; @@ -186,6 +187,8 @@ public: bool instantiate_tmp_tables(); + void prepare_for_next_iteration(); + friend class With_clause; }; @@ -356,6 +359,19 @@ bool With_element::all_are_stabilized() } +inline +void With_element::prepare_for_next_iteration() +{ + With_element *with_elem= this; + while ((with_elem= with_elem->get_next_mutually_recursive()) != this) + { + TABLE *rec_table= with_elem->first_rec_table_to_update; + if (rec_table) + rec_table->reginfo.join_tab->preread_init_done= false; + } +} + + inline void st_select_lex_unit::set_with_clause(With_clause *with_cl) { diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index e17896f9f24..33befdd4639 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -927,13 +927,18 @@ bool TABLE_LIST::fill_recursive(THD *thd) bool rc= false; st_select_lex_unit *unit= get_unit(); if (is_with_table_recursive_reference()) - rc= unit->exec_recursive(false); + { + rc= unit->exec_recursive(); + } else { rc= with->instantiate_tmp_tables(); - while(!rc && !with->all_are_stabilized()) + while (!rc && !with->all_are_stabilized()) { - rc= unit->exec_recursive(true); + if (with->level > thd->variables.max_recursive_iterations) + break; + with->prepare_for_next_iteration(); + rc= unit->exec_recursive(); } if (!rc) { diff --git a/sql/sql_lex.h b/sql/sql_lex.h index de3ccfc08a9..91741961db5 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -706,7 +706,7 @@ public: bool prepare(THD *thd, select_result *result, ulong additional_options); bool optimize(); bool exec(); - bool exec_recursive(bool is_driving_recursive); + bool exec_recursive(); bool cleanup(); inline void unclean() { cleaned= 0; } void reinit_exec_mechanism(); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 4a73a503ebe..382fabd39d7 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1167,7 +1167,7 @@ err: // One step of recursive execution -bool st_select_lex_unit::exec_recursive(bool is_driving_recursive) +bool st_select_lex_unit::exec_recursive() { st_select_lex *lex_select_save= thd->lex->current_select; st_select_lex *start= with_element->first_recursive; @@ -1189,18 +1189,6 @@ bool st_select_lex_unit::exec_recursive(bool is_driving_recursive) if ((saved_error= incr_table->file->ha_delete_all_rows())) goto err; - if (is_driving_recursive) - { - With_element *with_elem= with_element; - while ((with_elem= with_elem->get_next_mutually_recursive()) != - with_element) - { - rec_table= with_elem->first_rec_table_to_update; - if (rec_table) - rec_table->reginfo.join_tab->preread_init_done= false; - } - } - if (with_element->level == 0) { start= first_select(); @@ -1248,9 +1236,6 @@ bool st_select_lex_unit::exec_recursive(bool is_driving_recursive) if (with_element->level == 1) rec_table->reginfo.join_tab->preread_init_done= true; } - - if (with_element->level == thd->variables.max_recursion_level) - with_element->set_as_stabilized(); thd->lex->current_select= lex_select_save; err: diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index f63549ba3d7..29bb49a0083 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -2145,10 +2145,10 @@ static Sys_var_ulong Sys_max_prepared_stmt_count( VALID_RANGE(0, 1024*1024), DEFAULT(16382), BLOCK_SIZE(1), &PLock_prepared_stmt_count); -static Sys_var_ulong Sys_max_recursion_level( - "max_recursion_level", +static Sys_var_ulong Sys_max_recursive_iterations( + "max_recursive_iterations", "Maximum number of iterations when executing recursive queries", - SESSION_VAR(max_recursion_level), CMD_LINE(OPT_ARG), + SESSION_VAR(max_recursive_iterations), CMD_LINE(OPT_ARG), VALID_RANGE(0, UINT_MAX), DEFAULT(UINT_MAX), BLOCK_SIZE(1)); static Sys_var_ulong Sys_max_sort_length( -- cgit v1.2.1 From 66ac894c40ad089175aaf6d4922f7250c23b9b3d Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 11 Aug 2016 17:50:21 +0200 Subject: MDEV-10455: libmariadbclient18 + MySQL-python leaks memory on failed connections Support of CLIENT_REMEMBER_OPTIONS and freeing options added. --- sql-common/client.c | 4 +++- sql/slave.cc | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/sql-common/client.c b/sql-common/client.c index 9105b72d57c..184d7983a00 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -3644,6 +3644,8 @@ error: /* Free alloced memory */ end_server(mysql); mysql_close_free(mysql); + if (!(client_flag & CLIENT_REMEMBER_OPTIONS)) + mysql_close_free_options(mysql); } DBUG_RETURN(0); } @@ -3714,7 +3716,7 @@ my_bool mysql_reconnect(MYSQL *mysql) } if (!mysql_real_connect(&tmp_mysql,mysql->host,mysql->user,mysql->passwd, mysql->db, mysql->port, mysql->unix_socket, - mysql->client_flag)) + mysql->client_flag | CLIENT_REMEMBER_OPTIONS)) { if (ctxt) my_context_install_suspend_resume_hook(ctxt, NULL, NULL); diff --git a/sql/slave.cc b/sql/slave.cc index 5d44fb2b6a8..a309fc5cdc5 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -6061,7 +6061,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, #ifndef DBUG_OFF mi->events_till_disconnect = disconnect_slave_event_count; #endif - ulong client_flag= 0; + ulong client_flag= CLIENT_REMEMBER_OPTIONS; if (opt_slave_compressed_protocol) client_flag=CLIENT_COMPRESS; /* We will use compression */ -- cgit v1.2.1 From f33c35240de0e2a1d33da80a081e1c8c7d941378 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Thu, 11 Aug 2016 14:39:26 -0700 Subject: Adjusted test result. --- mysql-test/suite/sys_vars/r/sysvars_server_embedded.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index 9fd1e249e64..11f97cd309d 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -1983,7 +1983,7 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED -VARIABLE_NAME MAX_RECURSION_LEVEL +VARIABLE_NAME MAX_RECURSIVE_ITERATIONS SESSION_VALUE 4294967295 GLOBAL_VALUE 4294967295 GLOBAL_VALUE_ORIGIN COMPILE-TIME -- cgit v1.2.1 From 98e36b299915fe30bc935380d6bc1f6a1986eb52 Mon Sep 17 00:00:00 2001 From: Monty Date: Fri, 12 Aug 2016 20:02:23 +0300 Subject: With parallel replication we have had a couple of bugs where DDL's (like DROP TABLE) has been scheduled before conflicting DDL's (like INSERT) are commited. What makes these bugs hard to detect is that in most cases any wrong schduling are caught by MDL locks. It's only when there are timing issues that the bugs (usually deadlocks) are noticed. This patch adds a DBUG_ASSERT() that detects, in parallel replication, if a DDL is scheduled before any depending DML'S are commited. It does this be checking if there are any conflicting replication locks when the DDL is about to wait for getting it's MDL lock. I also did some minor code cleanups in sql_base.cc to make this code similar to other related code. --- sql/mdl.cc | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ sql/mdl.h | 2 +- sql/sql_base.cc | 21 +++++---------------- sql/sql_class.cc | 10 +++++++++- sql/sql_class.h | 8 ++++---- 5 files changed, 72 insertions(+), 22 deletions(-) diff --git a/sql/mdl.cc b/sql/mdl.cc index 28d2006b023..37699f1847b 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -17,6 +17,7 @@ #include "sql_class.h" #include "debug_sync.h" #include "sql_array.h" +#include "rpl_rli.h" #include #include #include @@ -442,6 +443,7 @@ public: virtual void notify_conflicting_locks(MDL_context *ctx) = 0; virtual bitmap_t hog_lock_types_bitmap() const = 0; + bool check_if_conflicting_replication_locks(MDL_context *ctx); /** List of granted tickets for this lock. */ Ticket_list m_granted; @@ -2290,6 +2292,44 @@ void MDL_scoped_lock::notify_conflicting_locks(MDL_context *ctx) } } +/** + Check if there is any conflicting lock that could cause this thread + to wait for another thread which is not ready to commit. + This is always an error, as the upper level of parallel replication + should not allow a scheduling of a conflicting DDL until all earlier + transactions has commited. + + This function is only called for a slave using parallel replication + and trying to get an exclusive lock for the table. +*/ + +bool MDL_lock::check_if_conflicting_replication_locks(MDL_context *ctx) +{ + Ticket_iterator it(m_granted); + MDL_ticket *conflicting_ticket; + + while ((conflicting_ticket= it++)) + { + if (conflicting_ticket->get_ctx() != ctx) + { + MDL_context *conflicting_ctx= conflicting_ticket->get_ctx(); + + /* + If the conflicting thread is another parallel replication + thread for the same master and it's not in commit stage, then + the current transaction has started too early and something is + seriously wrong. + */ + if (conflicting_ctx->get_thd()->rgi_slave && + conflicting_ctx->get_thd()->rgi_slave->rli == + ctx->get_thd()->rgi_slave->rli && + !conflicting_ctx->get_thd()->rgi_slave->did_mark_start_commit) + return 1; // Fatal error + } + } + return 0; +} + /** Acquire one lock with waiting for conflicting locks to go away if needed. @@ -2355,6 +2395,19 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout) if (lock->needs_notification(ticket) && lock_wait_timeout) lock->notify_conflicting_locks(this); + /* + Ensure that if we are trying to get an exclusive lock for a slave + running parallel replication, then we are not blocked by another + parallel slave thread that is not committed. This should never happen as + the parallel replication scheduler should never schedule a DDL while + DML's are still running. + */ + DBUG_ASSERT((mdl_request->type != MDL_INTENTION_EXCLUSIVE && + mdl_request->type != MDL_EXCLUSIVE) || + !(get_thd()->rgi_slave && + get_thd()->rgi_slave->is_parallel_exec && + lock->check_if_conflicting_replication_locks(this))); + mysql_prlock_unlock(&lock->m_rwlock); will_wait_for(ticket); diff --git a/sql/mdl.h b/sql/mdl.h index c4d792acd29..13de60284da 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -910,7 +910,6 @@ private: */ MDL_wait_for_subgraph *m_waiting_for; private: - THD *get_thd() const { return m_owner->get_thd(); } MDL_ticket *find_ticket(MDL_request *mdl_req, enum_mdl_duration *duration); void release_locks_stored_before(enum_mdl_duration duration, MDL_ticket *sentinel); @@ -919,6 +918,7 @@ private: MDL_ticket **out_ticket); public: + THD *get_thd() const { return m_owner->get_thd(); } void find_deadlock(); ulong get_thread_id() const { return thd_get_thread_id(get_thd()); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index fc1d716667e..21a004c4ec6 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -648,7 +648,6 @@ bool close_cached_connection_tables(THD *thd, LEX_STRING *connection) static void mark_temp_tables_as_free_for_reuse(THD *thd) { - rpl_group_info *rgi_slave; DBUG_ENTER("mark_temp_tables_as_free_for_reuse"); if (thd->query_id == 0) @@ -657,9 +656,7 @@ static void mark_temp_tables_as_free_for_reuse(THD *thd) DBUG_VOID_RETURN; } - rgi_slave=thd->rgi_slave; - if ((!rgi_slave && thd->temporary_tables) || - (rgi_slave && unlikely(rgi_slave->rli->save_temporary_tables))) + if (thd->have_temporary_tables()) { thd->lock_temporary_tables(); for (TABLE *table= thd->temporary_tables ; table ; table= table->next) @@ -667,15 +664,7 @@ static void mark_temp_tables_as_free_for_reuse(THD *thd) if ((table->query_id == thd->query_id) && ! table->open_by_handler) mark_tmp_table_for_reuse(table); } - thd->unlock_temporary_tables(); - if (rgi_slave) - { - /* - Temporary tables are shared with other by sql execution threads. - As a safety messure, clear the pointer to the common area. - */ - thd->temporary_tables= 0; - } + thd->unlock_temporary_tables(1); } DBUG_VOID_RETURN; } @@ -1643,7 +1632,7 @@ TABLE *find_temporary_table(THD *thd, break; } } - thd->unlock_temporary_tables(); + thd->unlock_temporary_tables(0); return result; } @@ -1746,7 +1735,7 @@ void close_temporary_table(THD *thd, TABLE *table, thread_safe_decrement32(&slave_open_temp_tables, &thread_running_lock); table->in_use= 0; // No statistics } - thd->unlock_temporary_tables(); + thd->unlock_temporary_tables(0); close_temporary(table, free_share, delete_table); DBUG_VOID_RETURN; } @@ -5705,7 +5694,7 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton, { thread_safe_increment32(&slave_open_temp_tables, &thread_running_lock); } - thd->unlock_temporary_tables(); + thd->unlock_temporary_tables(0); } tmp_table->pos_in_table_list= 0; DBUG_PRINT("tmptable", ("opened table: '%s'.'%s' 0x%lx", tmp_table->s->db.str, diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 1ee5e4b4113..94462924686 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -6477,10 +6477,18 @@ void THD::rgi_lock_temporary_tables() temporary_tables= rgi_slave->rli->save_temporary_tables; } -void THD::rgi_unlock_temporary_tables() +void THD::rgi_unlock_temporary_tables(bool clear) { rgi_slave->rli->save_temporary_tables= temporary_tables; mysql_mutex_unlock(&rgi_slave->rli->data_lock); + if (clear) + { + /* + Temporary tables are shared with other by sql execution threads. + As a safety messure, clear the pointer to the common area. + */ + temporary_tables= 0; + } } bool THD::rgi_have_temporary_tables() diff --git a/sql/sql_class.h b/sql/sql_class.h index 46eeeceb112..9da3387e75f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3778,7 +3778,7 @@ private: /* Protect against add/delete of temporary tables in parallel replication */ void rgi_lock_temporary_tables(); - void rgi_unlock_temporary_tables(); + void rgi_unlock_temporary_tables(bool clear); bool rgi_have_temporary_tables(); public: /* @@ -3802,15 +3802,15 @@ public: if (rgi_slave) rgi_lock_temporary_tables(); } - inline void unlock_temporary_tables() + inline void unlock_temporary_tables(bool clear) { if (rgi_slave) - rgi_unlock_temporary_tables(); + rgi_unlock_temporary_tables(clear); } inline bool have_temporary_tables() { return (temporary_tables || - (rgi_slave && rgi_have_temporary_tables())); + (rgi_slave && unlikely(rgi_have_temporary_tables()))); } }; -- cgit v1.2.1 From b3df257cfde490066933c4dc8329f9670aa8de58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 9 Aug 2016 16:51:35 +0300 Subject: MDEV-10469: innodb.innodb-alter-tempfile fails in buildbot: InnoDB: Warning: database page corruption or a failed Test case intentionally crashes the server and that could lead partially written pages that are then restored from doublewrite buffer. --- .../suite/innodb/r/innodb-alter-tempfile.result | 4 + .../suite/innodb/t/innodb-alter-tempfile.test | 7 ++ storage/innobase/buf/buf0buf.cc | 95 +++++++++++----------- storage/xtradb/buf/buf0buf.cc | 95 +++++++++++----------- 4 files changed, 107 insertions(+), 94 deletions(-) diff --git a/mysql-test/suite/innodb/r/innodb-alter-tempfile.result b/mysql-test/suite/innodb/r/innodb-alter-tempfile.result index ce13ad0978b..3cc973ca3a3 100644 --- a/mysql-test/suite/innodb/r/innodb-alter-tempfile.result +++ b/mysql-test/suite/innodb/r/innodb-alter-tempfile.result @@ -4,6 +4,10 @@ # Temporary tablename will be unique. This makes sure that future # in-place ALTERs of the same table will not be blocked due to # temporary tablename. +call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed +"); +call mtr.add_suppression("InnoDB: file read of space .* page .*"); +call mtr.add_suppression("InnoDB: Trying to recover it from the doublewrite buffer."); # Crash the server in ha_innobase::commit_inplace_alter_table() CREATE TABLE t1 (f1 INT NOT NULL, f2 INT NOT NULL) ENGINE=innodb; SET debug='d,innodb_alter_commit_crash_before_commit'; diff --git a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test index ec1ea35f1cf..e1e736fc678 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-tempfile.test +++ b/mysql-test/suite/innodb/t/innodb-alter-tempfile.test @@ -20,6 +20,13 @@ --echo # in-place ALTERs of the same table will not be blocked due to --echo # temporary tablename. +# As we intentionally crash below, there could be partially written +# pages that are then recovered from the doublewrite buffer +call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed +"); +call mtr.add_suppression("InnoDB: file read of space .* page .*"); +call mtr.add_suppression("InnoDB: Trying to recover it from the doublewrite buffer."); + let datadir= `select @@datadir`; --let $_server_id= `SELECT @@server_id` diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 5b1f479168a..2c7c578150d 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -614,7 +614,7 @@ buf_page_is_corrupted( "InnoDB: " REFMAN "forcing-innodb-recovery.html\n" "InnoDB: for more information.\n", - (ulong) mach_read_from_4( + (ulint) mach_read_from_4( read_buf + FIL_PAGE_OFFSET), (lsn_t) mach_read_from_8( read_buf + FIL_PAGE_LSN), @@ -802,7 +802,7 @@ buf_page_print( ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Page dump in ascii and hex (%lu bytes):\n", - (ulong) size); + size); ut_print_buf(stderr, read_buf, size); fputs("\nInnoDB: End of page dump\n", stderr); } @@ -2301,9 +2301,9 @@ buf_zip_decompress( } fprintf(stderr, - "InnoDB: unable to decompress space %lu page %lu\n", - (ulong) block->page.space, - (ulong) block->page.offset); + "InnoDB: unable to decompress space %u page %u\n", + block->page.space, + block->page.offset); return(FALSE); case FIL_PAGE_TYPE_ALLOCATED: @@ -3536,8 +3536,8 @@ buf_page_init( fprintf(stderr, "InnoDB: Error: page %lu %lu already found" " in the hash table: %p, %p\n", - (ulong) space, - (ulong) offset, + space, + offset, (const void*) hash_page, (const void*) block); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG mutex_exit(&block->mutex); @@ -3898,7 +3898,7 @@ buf_page_create( #ifdef UNIV_DEBUG if (buf_debug_prints) { fprintf(stderr, "Creating space %lu page %lu to buffer\n", - (ulong) space, (ulong) offset); + space, offset); } #endif /* UNIV_DEBUG */ @@ -4200,10 +4200,10 @@ buf_page_io_complete( ut_print_timestamp(stderr); fprintf(stderr, - " InnoDB: Error: reading page %lu\n" + " InnoDB: Error: reading page %u\n" "InnoDB: which is in the" " doublewrite buffer!\n", - (ulong) bpage->offset); + bpage->offset); } else if (!read_space_id && !read_page_no) { /* This is likely an uninitialized page. */ } else if ((bpage->space @@ -4219,10 +4219,11 @@ buf_page_io_complete( " InnoDB: Error: space id and page n:o" " stored in the page\n" "InnoDB: read in are %lu:%lu," - " should be %lu:%lu!\n", - (ulong) read_space_id, (ulong) read_page_no, - (ulong) bpage->space, - (ulong) bpage->offset); + " should be %u:%u!\n", + read_space_id, + read_page_no, + bpage->space, + bpage->offset); } /* From version 3.23.38 up we store the page checksum @@ -4246,19 +4247,19 @@ corrupt: fprintf(stderr, "InnoDB: Database page corruption on disk" " or a failed\n" - "InnoDB: file read of page %lu.\n" + "InnoDB: file read of page %u.\n" "InnoDB: You may have to recover" " from a backup.\n", - (ulong) bpage->offset); + bpage->offset); buf_page_print(frame, buf_page_get_zip_size(bpage), BUF_PAGE_PRINT_NO_CRASH); fprintf(stderr, "InnoDB: Database page corruption on disk" " or a failed\n" - "InnoDB: file read of page %lu.\n" + "InnoDB: file read of page %u.\n" "InnoDB: You may have to recover" " from a backup.\n", - (ulong) bpage->offset); + bpage->offset); fputs("InnoDB: It is also possible that" " your operating\n" "InnoDB: system has corrupted its" @@ -4374,8 +4375,8 @@ corrupt: if (buf_debug_prints) { fprintf(stderr, "Has %s page space %lu page no %lu\n", io_type == BUF_IO_READ ? "read" : "written", - (ulong) buf_page_get_space(bpage), - (ulong) buf_page_get_page_no(bpage)); + buf_page_get_space(bpage), + buf_page_get_page_no(bpage)); } #endif /* UNIV_DEBUG */ @@ -4410,17 +4411,17 @@ buf_all_freed_instance( if (UNIV_LIKELY_NULL(block)) { fil_space_t* space = fil_space_get(block->page.space); ib_logf(IB_LOG_LEVEL_ERROR, - "Page %lu %lu still fixed or dirty.", - (ulong) block->page.space, - (ulong) block->page.offset); + "Page %u %u still fixed or dirty.", + block->page.space, + block->page.offset); ib_logf(IB_LOG_LEVEL_ERROR, "Page oldest_modification %lu fix_count %d io_fix %d.", block->page.oldest_modification, block->page.buf_fix_count, buf_page_get_io_fix(&block->page)); ib_logf(IB_LOG_LEVEL_ERROR, - "Page space_id %lu name %s.", - (ulong)block->page.space, + "Page space_id %u name %s.", + block->page.space, (space && space->name) ? space->name : "NULL"); ut_error; } @@ -4721,16 +4722,16 @@ assert_s_latched: if (n_lru + n_free > buf_pool->curr_size + n_zip) { fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n", - (ulong) n_lru, (ulong) n_free, - (ulong) buf_pool->curr_size, (ulong) n_zip); + n_lru, n_free, + buf_pool->curr_size, n_zip); ut_error; } ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru); if (UT_LIST_GET_LEN(buf_pool->free) != n_free) { fprintf(stderr, "Free list len %lu, free blocks %lu\n", - (ulong) UT_LIST_GET_LEN(buf_pool->free), - (ulong) n_free); + UT_LIST_GET_LEN(buf_pool->free), + n_free); ut_error; } @@ -4809,20 +4810,20 @@ buf_print_instance( "n pending flush LRU %lu list %lu single page %lu\n" "pages made young %lu, not young %lu\n" "pages read %lu, created %lu, written %lu\n", - (ulong) size, - (ulong) UT_LIST_GET_LEN(buf_pool->LRU), - (ulong) UT_LIST_GET_LEN(buf_pool->free), - (ulong) UT_LIST_GET_LEN(buf_pool->flush_list), - (ulong) buf_pool->n_pend_unzip, - (ulong) buf_pool->n_pend_reads, - (ulong) buf_pool->n_flush[BUF_FLUSH_LRU], - (ulong) buf_pool->n_flush[BUF_FLUSH_LIST], - (ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE], - (ulong) buf_pool->stat.n_pages_made_young, - (ulong) buf_pool->stat.n_pages_not_made_young, - (ulong) buf_pool->stat.n_pages_read, - (ulong) buf_pool->stat.n_pages_created, - (ulong) buf_pool->stat.n_pages_written); + (ulint) size, + (ulint) UT_LIST_GET_LEN(buf_pool->LRU), + (ulint) UT_LIST_GET_LEN(buf_pool->free), + (ulint) UT_LIST_GET_LEN(buf_pool->flush_list), + (ulint) buf_pool->n_pend_unzip, + (ulint) buf_pool->n_pend_reads, + (ulint) buf_pool->n_flush[BUF_FLUSH_LRU], + (ulint) buf_pool->n_flush[BUF_FLUSH_LIST], + (ulint) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE], + (ulint) buf_pool->stat.n_pages_made_young, + (ulint) buf_pool->stat.n_pages_not_made_young, + (ulint) buf_pool->stat.n_pages_read, + (ulint) buf_pool->stat.n_pages_created, + (ulint) buf_pool->stat.n_pages_written); buf_flush_list_mutex_exit(buf_pool); @@ -4873,7 +4874,7 @@ buf_print_instance( fprintf(stderr, "Block count for index %llu in buffer is about %lu", (ullint) index_ids[i], - (ulong) counts[i]); + (ulint) counts[i]); if (index) { putc(' ', stderr); @@ -5321,10 +5322,10 @@ buf_print_io_instance( fprintf(file, "Buffer pool hit rate %lu / 1000," " young-making rate %lu / 1000 not %lu / 1000\n", - (ulong) hit_rate, - (ulong) (1000 * pool_info->young_making_delta + (ulint) hit_rate, + (ulint) (1000 * pool_info->young_making_delta / pool_info->n_page_get_delta), - (ulong) (1000 * pool_info->not_young_making_delta + (ulint) (1000 * pool_info->not_young_making_delta / pool_info->n_page_get_delta)); } else { fputs("No buffer pool page gets since the last printout\n", diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 489c690d9f9..40a6fe6545f 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -683,7 +683,7 @@ buf_page_is_corrupted( "InnoDB: " REFMAN "forcing-innodb-recovery.html\n" "InnoDB: for more information.\n", - (ulong) mach_read_from_4( + (ulint) mach_read_from_4( read_buf + FIL_PAGE_OFFSET), (lsn_t) mach_read_from_8( read_buf + FIL_PAGE_LSN), @@ -878,7 +878,7 @@ buf_page_print( ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Page dump in ascii and hex (%lu bytes):\n", - (ulong) size); + size); ut_print_buf(stderr, read_buf, size); fputs("\nInnoDB: End of page dump\n", stderr); } @@ -2426,9 +2426,9 @@ buf_zip_decompress( } fprintf(stderr, - "InnoDB: unable to decompress space %lu page %lu\n", - (ulong) block->page.space, - (ulong) block->page.offset); + "InnoDB: unable to decompress space %u page %u\n", + block->page.space, + block->page.offset); return(FALSE); case FIL_PAGE_TYPE_ALLOCATED: @@ -3735,8 +3735,8 @@ buf_page_init( fprintf(stderr, "InnoDB: Error: page %lu %lu already found" " in the hash table: %p, %p\n", - (ulong) space, - (ulong) offset, + space, + offset, (const void*) hash_page, (const void*) block); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG mutex_exit(&block->mutex); @@ -4102,7 +4102,7 @@ buf_page_create( #ifdef UNIV_DEBUG if (buf_debug_prints) { fprintf(stderr, "Creating space %lu page %lu to buffer\n", - (ulong) space, (ulong) offset); + space, offset); } #endif /* UNIV_DEBUG */ @@ -4412,10 +4412,10 @@ buf_page_io_complete( ut_print_timestamp(stderr); fprintf(stderr, - " InnoDB: Error: reading page %lu\n" + " InnoDB: Error: reading page %u\n" "InnoDB: which is in the" " doublewrite buffer!\n", - (ulong) bpage->offset); + bpage->offset); } else if (!read_space_id && !read_page_no) { /* This is likely an uninitialized page. */ } else if ((bpage->space @@ -4431,10 +4431,11 @@ buf_page_io_complete( " InnoDB: Error: space id and page n:o" " stored in the page\n" "InnoDB: read in are %lu:%lu," - " should be %lu:%lu!\n", - (ulong) read_space_id, (ulong) read_page_no, - (ulong) bpage->space, - (ulong) bpage->offset); + " should be %u:%u!\n", + read_space_id, + read_page_no, + bpage->space, + bpage->offset); } if (UNIV_LIKELY(!bpage->is_corrupt || @@ -4460,19 +4461,19 @@ corrupt: fprintf(stderr, "InnoDB: Database page corruption on disk" " or a failed\n" - "InnoDB: file read of page %lu.\n" + "InnoDB: file read of page %u.\n" "InnoDB: You may have to recover" " from a backup.\n", - (ulong) bpage->offset); + bpage->offset); buf_page_print(frame, buf_page_get_zip_size(bpage), BUF_PAGE_PRINT_NO_CRASH); fprintf(stderr, "InnoDB: Database page corruption on disk" " or a failed\n" - "InnoDB: file read of page %lu.\n" + "InnoDB: file read of page %u.\n" "InnoDB: You may have to recover" " from a backup.\n", - (ulong) bpage->offset); + bpage->offset); fputs("InnoDB: It is also possible that" " your operating\n" "InnoDB: system has corrupted its" @@ -4658,8 +4659,8 @@ retry_mutex: if (buf_debug_prints) { fprintf(stderr, "Has %s page space %lu page no %lu\n", io_type == BUF_IO_READ ? "read" : "written", - (ulong) buf_page_get_space(bpage), - (ulong) buf_page_get_page_no(bpage)); + buf_page_get_space(bpage), + buf_page_get_page_no(bpage)); } #endif /* UNIV_DEBUG */ @@ -4696,17 +4697,17 @@ buf_all_freed_instance( if (UNIV_LIKELY_NULL(block)) { fil_space_t* space = fil_space_get(block->page.space); ib_logf(IB_LOG_LEVEL_ERROR, - "Page %lu %lu still fixed or dirty.", - (ulong) block->page.space, - (ulong) block->page.offset); + "Page %u %u still fixed or dirty.", + block->page.space, + block->page.offset); ib_logf(IB_LOG_LEVEL_ERROR, "Page oldest_modification %lu fix_count %d io_fix %d.", block->page.oldest_modification, block->page.buf_fix_count, buf_page_get_io_fix(&block->page)); ib_logf(IB_LOG_LEVEL_ERROR, - "Page space_id %lu name %s.", - (ulong)block->page.space, + "Page space_id %u name %s.", + block->page.space, (space && space->name) ? space->name : "NULL"); ut_error; } @@ -4999,8 +5000,8 @@ buf_pool_validate_instance( if (n_lru + n_free > buf_pool->curr_size + n_zip) { fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n", - (ulong) n_lru, (ulong) n_free, - (ulong) buf_pool->curr_size, (ulong) n_zip); + n_lru, n_free, + buf_pool->curr_size, n_zip); ut_error; } @@ -5010,8 +5011,8 @@ buf_pool_validate_instance( if (UT_LIST_GET_LEN(buf_pool->free) != n_free) { fprintf(stderr, "Free list len %lu, free blocks %lu\n", - (ulong) UT_LIST_GET_LEN(buf_pool->free), - (ulong) n_free); + UT_LIST_GET_LEN(buf_pool->free), + n_free); ut_error; } @@ -5092,20 +5093,20 @@ buf_print_instance( "n pending flush LRU %lu list %lu single page %lu\n" "pages made young %lu, not young %lu\n" "pages read %lu, created %lu, written %lu\n", - (ulong) size, - (ulong) UT_LIST_GET_LEN(buf_pool->LRU), - (ulong) UT_LIST_GET_LEN(buf_pool->free), - (ulong) UT_LIST_GET_LEN(buf_pool->flush_list), - (ulong) buf_pool->n_pend_unzip, - (ulong) buf_pool->n_pend_reads, - (ulong) buf_pool->n_flush[BUF_FLUSH_LRU], - (ulong) buf_pool->n_flush[BUF_FLUSH_LIST], - (ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE], - (ulong) buf_pool->stat.n_pages_made_young, - (ulong) buf_pool->stat.n_pages_not_made_young, - (ulong) buf_pool->stat.n_pages_read, - (ulong) buf_pool->stat.n_pages_created, - (ulong) buf_pool->stat.n_pages_written); + (ulint) size, + (ulint) UT_LIST_GET_LEN(buf_pool->LRU), + (ulint) UT_LIST_GET_LEN(buf_pool->free), + (ulint) UT_LIST_GET_LEN(buf_pool->flush_list), + (ulint) buf_pool->n_pend_unzip, + (ulint) buf_pool->n_pend_reads, + (ulint) buf_pool->n_flush[BUF_FLUSH_LRU], + (ulint) buf_pool->n_flush[BUF_FLUSH_LIST], + (ulint) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE], + (ulint) buf_pool->stat.n_pages_made_young, + (ulint) buf_pool->stat.n_pages_not_made_young, + (ulint) buf_pool->stat.n_pages_read, + (ulint) buf_pool->stat.n_pages_created, + (ulint) buf_pool->stat.n_pages_written); /* Count the number of blocks belonging to each index in the buffer */ @@ -5156,7 +5157,7 @@ buf_print_instance( fprintf(stderr, "Block count for index %llu in buffer is about %lu", (ullint) index_ids[i], - (ulong) counts[i]); + (ulint) counts[i]); if (index) { putc(' ', stderr); @@ -5610,10 +5611,10 @@ buf_print_io_instance( fprintf(file, "Buffer pool hit rate %lu / 1000," " young-making rate %lu / 1000 not %lu / 1000\n", - (ulong) hit_rate, - (ulong) (1000 * pool_info->young_making_delta + (ulint) hit_rate, + (ulint) (1000 * pool_info->young_making_delta / pool_info->n_page_get_delta), - (ulong) (1000 * pool_info->not_young_making_delta + (ulint) (1000 * pool_info->not_young_making_delta / pool_info->n_page_get_delta)); } else { fputs("No buffer pool page gets since the last printout\n", -- cgit v1.2.1 From 9b23f8054d2f37458901b4505429c30eddc440bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Thu, 11 Aug 2016 14:39:47 +0300 Subject: MDEV-10535: ALTER TABLE causes standalone/wsrep cluster crash When checking is any of the renamed columns part of the columns for new indexes we accessed NULL pointer if checked column used on index was added on same statement. Additionally, we tried to check too many indexes, added_index_count is enough here. --- .../suite/innodb/r/innodb-alter-table.result | 50 ++++++++++++++++++++++ mysql-test/suite/innodb/t/innodb-alter-table.test | 33 ++++++++++++++ storage/innobase/handler/handler0alter.cc | 6 +-- storage/xtradb/handler/handler0alter.cc | 6 +-- 4 files changed, 89 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/innodb/r/innodb-alter-table.result b/mysql-test/suite/innodb/r/innodb-alter-table.result index 514b8b7935f..c4460a7226b 100644 --- a/mysql-test/suite/innodb/r/innodb-alter-table.result +++ b/mysql-test/suite/innodb/r/innodb-alter-table.result @@ -135,3 +135,53 @@ child CREATE TABLE `child` ( CONSTRAINT `child_ibfk_1` FOREIGN KEY (`c`) REFERENCES `parent` (`a`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DROP TABLE child, parent; +CREATE TABLE IF NOT EXISTS ticket ( +id INT UNSIGNED NOT NULL AUTO_INCREMENT, +mask VARCHAR(16) DEFAULT '' NOT NULL, +subject VARCHAR(255) DEFAULT '' NOT NULL, +is_closed TINYINT(1) UNSIGNED DEFAULT 0 NOT NULL, +is_deleted TINYINT(1) UNSIGNED DEFAULT 0 NOT NULL, +team_id INT UNSIGNED DEFAULT 0 NOT NULL, +category_id INT UNSIGNED DEFAULT 0 NOT NULL, +first_message_id INT UNSIGNED DEFAULT 0 NOT NULL, +created_date INT UNSIGNED, +updated_date INT UNSIGNED, +due_date INT UNSIGNED, +first_wrote_address_id INT UNSIGNED NOT NULL DEFAULT 0, +last_wrote_address_id INT UNSIGNED NOT NULL DEFAULT 0, +spam_score DECIMAL(4,4) NOT NULL DEFAULT 0, +spam_training VARCHAR(1) NOT NULL DEFAULT '', +interesting_words VARCHAR(255) NOT NULL DEFAULT '', +next_action VARCHAR(255) NOT NULL DEFAULT '', +PRIMARY KEY (id) +) ENGINE=InnoDB; +ALTER TABLE ticket +CHANGE COLUMN team_id group_id INT UNSIGNED NOT NULL DEFAULT 0, +CHANGE COLUMN category_id bucket_id INT UNSIGNED NOT NULL DEFAULT 0, +ADD COLUMN org_id INT UNSIGNED NOT NULL DEFAULT 0, +ADD INDEX org_id (org_id); +SHOW CREATE TABLE ticket; +Table Create Table +ticket CREATE TABLE `ticket` ( + `id` int(10) unsigned NOT NULL AUTO_INCREMENT, + `mask` varchar(16) NOT NULL DEFAULT '', + `subject` varchar(255) NOT NULL DEFAULT '', + `is_closed` tinyint(1) unsigned NOT NULL DEFAULT '0', + `is_deleted` tinyint(1) unsigned NOT NULL DEFAULT '0', + `group_id` int(10) unsigned NOT NULL DEFAULT '0', + `bucket_id` int(10) unsigned NOT NULL DEFAULT '0', + `first_message_id` int(10) unsigned NOT NULL DEFAULT '0', + `created_date` int(10) unsigned DEFAULT NULL, + `updated_date` int(10) unsigned DEFAULT NULL, + `due_date` int(10) unsigned DEFAULT NULL, + `first_wrote_address_id` int(10) unsigned NOT NULL DEFAULT '0', + `last_wrote_address_id` int(10) unsigned NOT NULL DEFAULT '0', + `spam_score` decimal(4,4) NOT NULL DEFAULT '0.0000', + `spam_training` varchar(1) NOT NULL DEFAULT '', + `interesting_words` varchar(255) NOT NULL DEFAULT '', + `next_action` varchar(255) NOT NULL DEFAULT '', + `org_id` int(10) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + KEY `org_id` (`org_id`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE ticket; diff --git a/mysql-test/suite/innodb/t/innodb-alter-table.test b/mysql-test/suite/innodb/t/innodb-alter-table.test index 2ad9c8791cb..45342b4a218 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-table.test +++ b/mysql-test/suite/innodb/t/innodb-alter-table.test @@ -138,3 +138,36 @@ SHOW CREATE TABLE child; DROP TABLE child, parent; +# +# MDEV-10535: ALTER TABLE causes standalone/wsrep cluster crash +# +CREATE TABLE IF NOT EXISTS ticket ( + id INT UNSIGNED NOT NULL AUTO_INCREMENT, + mask VARCHAR(16) DEFAULT '' NOT NULL, + subject VARCHAR(255) DEFAULT '' NOT NULL, + is_closed TINYINT(1) UNSIGNED DEFAULT 0 NOT NULL, + is_deleted TINYINT(1) UNSIGNED DEFAULT 0 NOT NULL, + team_id INT UNSIGNED DEFAULT 0 NOT NULL, + category_id INT UNSIGNED DEFAULT 0 NOT NULL, + first_message_id INT UNSIGNED DEFAULT 0 NOT NULL, + created_date INT UNSIGNED, + updated_date INT UNSIGNED, + due_date INT UNSIGNED, + first_wrote_address_id INT UNSIGNED NOT NULL DEFAULT 0, + last_wrote_address_id INT UNSIGNED NOT NULL DEFAULT 0, + spam_score DECIMAL(4,4) NOT NULL DEFAULT 0, + spam_training VARCHAR(1) NOT NULL DEFAULT '', + interesting_words VARCHAR(255) NOT NULL DEFAULT '', + next_action VARCHAR(255) NOT NULL DEFAULT '', + PRIMARY KEY (id) +) ENGINE=InnoDB; + +ALTER TABLE ticket + CHANGE COLUMN team_id group_id INT UNSIGNED NOT NULL DEFAULT 0, + CHANGE COLUMN category_id bucket_id INT UNSIGNED NOT NULL DEFAULT 0, + ADD COLUMN org_id INT UNSIGNED NOT NULL DEFAULT 0, + ADD INDEX org_id (org_id); + +SHOW CREATE TABLE ticket; + +DROP TABLE ticket; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index f7900bd167f..753352bb3e1 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -229,7 +229,7 @@ innobase_need_rebuild( & Alter_inplace_info::ADD_INDEX) || (ha_alter_info->handler_flags & Alter_inplace_info::ADD_FOREIGN_KEY))) { - for (ulint i = 0; i < ha_alter_info->key_count; i++) { + for (ulint i = 0; i < ha_alter_info->index_add_count; i++) { const KEY* key = &ha_alter_info->key_info_buffer[ ha_alter_info->index_add_buffer[i]]; @@ -240,13 +240,13 @@ innobase_need_rebuild( /* Field used on added index is renamed on this same alter table. We need table rebuild. */ - if (field->flags & FIELD_IS_RENAMED) { + if (field && field->flags & FIELD_IS_RENAMED) { return (true); } } } } - + return(!!(ha_alter_info->handler_flags & INNOBASE_ALTER_REBUILD)); } diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index 17ace78d6a2..94945885ae9 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -229,7 +229,7 @@ innobase_need_rebuild( & Alter_inplace_info::ADD_INDEX) || (ha_alter_info->handler_flags & Alter_inplace_info::ADD_FOREIGN_KEY))) { - for (ulint i = 0; i < ha_alter_info->key_count; i++) { + for (ulint i = 0; i < ha_alter_info->index_add_count; i++) { const KEY* key = &ha_alter_info->key_info_buffer[ ha_alter_info->index_add_buffer[i]]; @@ -240,13 +240,13 @@ innobase_need_rebuild( /* Field used on added index is renamed on this same alter table. We need table rebuild. */ - if (field->flags & FIELD_IS_RENAMED) { + if (field && field->flags & FIELD_IS_RENAMED) { return (true); } } } } - + return(!!(ha_alter_info->handler_flags & INNOBASE_ALTER_REBUILD)); } -- cgit v1.2.1 From df09d5e724af93fea096fd841fad9e93d2615ad5 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Mon, 15 Aug 2016 16:28:19 +0200 Subject: MDEV-10559: main.mysql_client_test_nonblock crashes in buildbot on 10.0 Fix tests which possibly fail connects in non_blocking mode. --- tests/mysql_client_test.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index fcc89482f4e..640ba298556 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -18440,9 +18440,10 @@ static void test_bug58036() /* Part1: try to connect with ucs2 client character set */ conn= mysql_client_init(NULL); mysql_options(conn, MYSQL_SET_CHARSET_NAME, "ucs2"); + if (mysql_real_connect(conn, opt_host, opt_user, opt_password, opt_db ? opt_db : "test", - opt_port, opt_unix_socket, 0)) + opt_port, opt_unix_socket, CLIENT_REMEMBER_OPTIONS)) { if (!opt_silent) printf("mysql_real_connect() succeeded (failure expected)\n"); @@ -18468,7 +18469,7 @@ static void test_bug58036() mysql_options(conn, MYSQL_SET_CHARSET_NAME, "latin1"); if (!mysql_real_connect(conn, opt_host, opt_user, opt_password, opt_db ? opt_db : "test", - opt_port, opt_unix_socket, 0)) + opt_port, opt_unix_socket, CLIENT_REMEMBER_OPTIONS)) { if (!opt_silent) printf("mysql_real_connect() failed: %s (%d)\n", @@ -18490,7 +18491,6 @@ static void test_bug58036() printf("Got mysql_change_user() error (expected): %s (%d)\n", mysql_error(conn), mysql_errno(conn)); mysql_close(conn); - DBUG_VOID_RETURN; } @@ -19344,8 +19344,9 @@ static void test_big_packet() if (!(mysql_real_connect(mysql_local, opt_host, opt_user, opt_password, current_db, opt_port, - opt_unix_socket, 0))) + opt_unix_socket, CLIENT_REMEMBER_OPTIONS))) { + mysql_close(mysql_local); fprintf(stderr, "\n connection failed(%s)", mysql_error(mysql_local)); exit(1); } -- cgit v1.2.1 From 48fbb2bf07515425edaf511ac2e17a575ae37713 Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Tue, 16 Aug 2016 12:34:58 +0200 Subject: MDEV-10553: Semi-sync replication hangs when master opens new binlog file In the AFTER_SYNC case, semi-sync was taking the binlog file name from the wrong place, so around binlog rotation it could be using the new name with a position belonging to the previous binlog file name. Signed-off-by: Kristian Nielsen --- sql/log.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/log.cc b/sql/log.cc index 7efec982de7..b77a6b32016 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -7712,7 +7712,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) last= current->next == NULL; if (!current->error && RUN_HOOK(binlog_storage, after_sync, - (current->thd, log_file_name, + (current->thd, current->cache_mngr->last_commit_pos_file, current->cache_mngr->last_commit_pos_offset, first, last))) { -- cgit v1.2.1 From 05f61ba46046ca835071a73b9255e787dcce9255 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Tue, 16 Aug 2016 21:23:57 +0200 Subject: MDEV-10559: main.mysql_client_test_nonblock crashes in buildbot on 10.0 fix for async operations --- sql-common/client.c | 3 ++- sql-common/mysql_async.c | 6 +++++- tests/mysql_client_test.c | 6 +++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/sql-common/client.c b/sql-common/client.c index 184d7983a00..78c5426367c 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -3644,7 +3644,8 @@ error: /* Free alloced memory */ end_server(mysql); mysql_close_free(mysql); - if (!(client_flag & CLIENT_REMEMBER_OPTIONS)) + if (!(client_flag & CLIENT_REMEMBER_OPTIONS) && + !mysql->options.extension->async_context) mysql_close_free_options(mysql); } DBUG_RETURN(0); diff --git a/sql-common/mysql_async.c b/sql-common/mysql_async.c index 80b4f390641..decf48e0e69 100644 --- a/sql-common/mysql_async.c +++ b/sql-common/mysql_async.c @@ -455,7 +455,11 @@ MK_ASYNC_START_BODY( parms.db= db; parms.port= port; parms.unix_socket= unix_socket; - parms.client_flags= client_flags; + /* + async wrapper enforce the CLIENT_REMEMBER_OPTIONS flag to be + functional (otherwise it can't operate) + */ + parms.client_flags= client_flags | CLIENT_REMEMBER_OPTIONS; }, NULL, r_ptr, diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 640ba298556..2acec60166e 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -18443,7 +18443,7 @@ static void test_bug58036() if (mysql_real_connect(conn, opt_host, opt_user, opt_password, opt_db ? opt_db : "test", - opt_port, opt_unix_socket, CLIENT_REMEMBER_OPTIONS)) + opt_port, opt_unix_socket, 0)) { if (!opt_silent) printf("mysql_real_connect() succeeded (failure expected)\n"); @@ -18469,7 +18469,7 @@ static void test_bug58036() mysql_options(conn, MYSQL_SET_CHARSET_NAME, "latin1"); if (!mysql_real_connect(conn, opt_host, opt_user, opt_password, opt_db ? opt_db : "test", - opt_port, opt_unix_socket, CLIENT_REMEMBER_OPTIONS)) + opt_port, opt_unix_socket, 0)) { if (!opt_silent) printf("mysql_real_connect() failed: %s (%d)\n", @@ -19344,7 +19344,7 @@ static void test_big_packet() if (!(mysql_real_connect(mysql_local, opt_host, opt_user, opt_password, current_db, opt_port, - opt_unix_socket, CLIENT_REMEMBER_OPTIONS))) + opt_unix_socket, 0))) { mysql_close(mysql_local); fprintf(stderr, "\n connection failed(%s)", mysql_error(mysql_local)); -- cgit v1.2.1 From 1e160e5cb387900df8c47e87b9378c6e7df05777 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 17 Aug 2016 13:57:34 +0400 Subject: MDEV-10404 - Improved systemd service hardening causes SELinux problems Disabled NoNewPrivileges until SELinux policy is fixed. --- support-files/mariadb.service.in | 3 ++- support-files/mariadb@.service.in | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in index 879c4d90a6c..6b8b2ba0ba3 100644 --- a/support-files/mariadb.service.in +++ b/support-files/mariadb.service.in @@ -48,7 +48,8 @@ CapabilityBoundingSet=CAP_IPC_LOCK # Prevent writes to /usr, /boot, and /etc ProtectSystem=full -NoNewPrivileges=true +# Doesn't yet work properly with SELinux enabled +# NoNewPrivileges=true PrivateDevices=true diff --git a/support-files/mariadb@.service.in b/support-files/mariadb@.service.in index b7ac3b808bf..965e85260e4 100644 --- a/support-files/mariadb@.service.in +++ b/support-files/mariadb@.service.in @@ -55,7 +55,8 @@ CapabilityBoundingSet=CAP_IPC_LOCK # Prevent writes to /usr, /boot, and /etc ProtectSystem=full -NoNewPrivileges=true +# Doesn't yet work properly with SELinux enabled +# NoNewPrivileges=true PrivateDevices=true -- cgit v1.2.1 From 8d5a0d650b123e963d5fead2424783a9b52da395 Mon Sep 17 00:00:00 2001 From: Monty Date: Sun, 21 Aug 2016 20:14:13 +0300 Subject: Cleanups and minor fixes - Fixed typos - Added --core-on-failure to mysql-test-run - More DBUG_PRINT in viosocket.c - Don't forget CLIENT_REMEMBER_OPTIONS for compressed slave protocol - Removed not used stage variables --- mysql-test/lib/My/SafeProcess.pm | 2 +- mysql-test/mysql-test-run.pl | 16 ++++++++++++++-- sql/mysqld.cc | 10 ---------- sql/mysqld.h | 5 ----- sql/slave.cc | 2 +- sql/sql_class.h | 4 ++-- vio/viosocket.c | 26 ++++++++++++++++++++++++-- 7 files changed, 42 insertions(+), 23 deletions(-) diff --git a/mysql-test/lib/My/SafeProcess.pm b/mysql-test/lib/My/SafeProcess.pm index a9b4f9a4ecc..f3ee772cca3 100644 --- a/mysql-test/lib/My/SafeProcess.pm +++ b/mysql-test/lib/My/SafeProcess.pm @@ -338,7 +338,7 @@ sub dump_core { my ($self)= @_; return if IS_WINDOWS; my $pid= $self->{SAFE_PID}; - die "Can't cet core from not started process" unless defined $pid; + die "Can't get core from not started process" unless defined $pid; _verbose("Sending ABRT to $self"); kill ("ABRT", $pid); return 1; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 5af19c8b9b6..07bf941de05 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -356,6 +356,7 @@ my $source_dist= -d "../sql"; my $opt_max_save_core= env_or_val(MTR_MAX_SAVE_CORE => 5); my $opt_max_save_datadir= env_or_val(MTR_MAX_SAVE_DATADIR => 20); my $opt_max_test_fail= env_or_val(MTR_MAX_TEST_FAIL => 10); +my $opt_core_on_failure= 0; my $opt_parallel= $ENV{MTR_PARALLEL} || 1; @@ -1165,6 +1166,7 @@ sub command_line_setup { 'max-save-core=i' => \$opt_max_save_core, 'max-save-datadir=i' => \$opt_max_save_datadir, 'max-test-fail=i' => \$opt_max_test_fail, + 'core-on-failure' => \$opt_core_on_failure, # Coverage, profiling etc 'gcov' => \$opt_gcov, @@ -4565,7 +4567,7 @@ sub run_testcase ($$) { } # Try to dump core for mysqltest and all servers - foreach my $proc ($test, started(all_servers())) + foreach my $proc ($test, started(all_servers())) { mtr_print("Trying to dump core for $proc"); if ($proc->dump_core()) @@ -5231,7 +5233,9 @@ sub after_failure ($) { sub report_failure_and_restart ($) { my $tinfo= shift; - if ($opt_valgrind_mysqld && ($tinfo->{'warnings'} || $tinfo->{'timeout'})) { + if ($opt_valgrind_mysqld && ($tinfo->{'warnings'} || $tinfo->{'timeout'}) && + $opt_core_on_failure == 0) + { # In these cases we may want valgrind report from normal termination $tinfo->{'dont_kill_server'}= 1; } @@ -5891,6 +5895,13 @@ sub start_mysqltest ($) { mtr_add_arg($args, "--sleep=%d", $opt_sleep); } + if ( $opt_valgrind_mysqld ) + { + # We are running server under valgrind, which causes some replication + # test to be much slower, notable rpl_mdev6020. Increase timeout. + mtr_add_arg($args, "--wait-for-pos-timeout=1500"); + } + if ( $opt_ssl ) { # Turn on SSL for _all_ test cases if option --ssl was used @@ -6533,6 +6544,7 @@ Options for debugging the product the current test run. Defaults to $opt_max_test_fail, set to 0 for no limit. Set it's default with MTR_MAX_TEST_FAIL + core-in-failure Generate a core even if run server is run with valgrind Options for valgrind diff --git a/sql/mysqld.cc b/sql/mysqld.cc index dd4b3953256..a1c13505304 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -9539,11 +9539,6 @@ PSI_stage_info stage_waiting_for_the_next_event_in_relay_log= { 0, "Waiting for PSI_stage_info stage_waiting_for_the_slave_thread_to_advance_position= { 0, "Waiting for the slave SQL thread to advance position", 0}; PSI_stage_info stage_waiting_to_finalize_termination= { 0, "Waiting to finalize termination", 0}; PSI_stage_info stage_waiting_to_get_readlock= { 0, "Waiting to get readlock", 0}; -PSI_stage_info stage_slave_waiting_workers_to_exit= { 0, "Waiting for workers to exit", 0}; -PSI_stage_info stage_slave_waiting_worker_to_release_partition= { 0, "Waiting for Slave Worker to release partition", 0}; -PSI_stage_info stage_slave_waiting_worker_to_free_events= { 0, "Waiting for Slave Workers to free pending events", 0}; -PSI_stage_info stage_slave_waiting_worker_queue= { 0, "Waiting for Slave Worker queue", 0}; -PSI_stage_info stage_slave_waiting_event_from_coordinator= { 0, "Waiting for an event from Coordinator", 0}; PSI_stage_info stage_binlog_waiting_background_tasks= { 0, "Waiting for background binlog tasks", 0}; PSI_stage_info stage_binlog_processing_checkpoint_notify= { 0, "Processing binlog checkpoint notification", 0}; PSI_stage_info stage_binlog_stopping_background_thread= { 0, "Stopping binlog background thread", 0}; @@ -9637,11 +9632,6 @@ PSI_stage_info *all_server_stages[]= & stage_setup, & stage_show_explain, & stage_slave_has_read_all_relay_log, - & stage_slave_waiting_event_from_coordinator, - & stage_slave_waiting_worker_queue, - & stage_slave_waiting_worker_to_free_events, - & stage_slave_waiting_worker_to_release_partition, - & stage_slave_waiting_workers_to_exit, & stage_sorting, & stage_sorting_for_group, & stage_sorting_for_order, diff --git a/sql/mysqld.h b/sql/mysqld.h index dbc65cd2a43..732f00c887a 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -443,11 +443,6 @@ extern PSI_stage_info stage_waiting_for_the_next_event_in_relay_log; extern PSI_stage_info stage_waiting_for_the_slave_thread_to_advance_position; extern PSI_stage_info stage_waiting_to_finalize_termination; extern PSI_stage_info stage_waiting_to_get_readlock; -extern PSI_stage_info stage_slave_waiting_worker_to_release_partition; -extern PSI_stage_info stage_slave_waiting_worker_to_free_events; -extern PSI_stage_info stage_slave_waiting_worker_queue; -extern PSI_stage_info stage_slave_waiting_event_from_coordinator; -extern PSI_stage_info stage_slave_waiting_workers_to_exit; extern PSI_stage_info stage_binlog_waiting_background_tasks; extern PSI_stage_info stage_binlog_processing_checkpoint_notify; extern PSI_stage_info stage_binlog_stopping_background_thread; diff --git a/sql/slave.cc b/sql/slave.cc index a309fc5cdc5..d8ec946ad16 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -6063,7 +6063,7 @@ static int connect_to_master(THD* thd, MYSQL* mysql, Master_info* mi, #endif ulong client_flag= CLIENT_REMEMBER_OPTIONS; if (opt_slave_compressed_protocol) - client_flag=CLIENT_COMPRESS; /* We will use compression */ + client_flag|= CLIENT_COMPRESS; /* We will use compression */ mysql_options(mysql, MYSQL_OPT_CONNECT_TIMEOUT, (char *) &slave_net_timeout); mysql_options(mysql, MYSQL_OPT_READ_TIMEOUT, (char *) &slave_net_timeout); diff --git a/sql/sql_class.h b/sql/sql_class.h index 9da3387e75f..8820205e8a2 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2981,12 +2981,12 @@ public: set_start_time(); start_utime= utime_after_lock= microsecond_interval_timer(); } - inline void set_time(my_hrtime_t t) + inline void set_time(my_hrtime_t t) { user_time= t; set_time(); } - inline void set_time(my_time_t t, ulong sec_part) + inline void set_time(my_time_t t, ulong sec_part) { my_hrtime_t hrtime= { hrtime_from_time(t) + sec_part }; set_time(hrtime); diff --git a/vio/viosocket.c b/vio/viosocket.c index e724165612c..e11460dd4b2 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -190,6 +190,12 @@ size_t vio_read(Vio *vio, uchar *buf, size_t size) { DBUG_PRINT("vio_error", ("Got error %d during read", errno)); } +#ifndef DEBUG_DATA_PACKETS + else + { + DBUG_DUMP("read_data", buf, ret); + } +#endif /* DEBUG_DATA_PACKETS */ #endif /* DBUG_OFF */ DBUG_PRINT("exit", ("%d", (int) ret)); DBUG_RETURN(ret); @@ -416,6 +422,13 @@ int vio_blocking(Vio *vio, my_bool set_blocking_mode, my_bool *old_mode) DBUG_RETURN(r); } +/* + Check if vio is blocking + + @retval 0 is not blocking + @retval 1 is blocking +*/ + my_bool vio_is_blocking(Vio * vio) { @@ -570,7 +583,9 @@ int vio_keepalive(Vio* vio, my_bool set_keep_alive) my_bool vio_should_retry(Vio *vio) { - return (vio_errno(vio) == SOCKET_EINTR); + DBUG_ENTER("vio_should_retry"); + DBUG_PRINT("info", ("vio_errno: %d", vio_errno(vio))); + DBUG_RETURN(vio_errno(vio) == SOCKET_EINTR); } @@ -595,8 +610,9 @@ int vio_close(Vio *vio) { int r=0; DBUG_ENTER("vio_close"); + DBUG_PRINT("enter", ("sd: %d", mysql_socket_getfd(vio->mysql_socket))); - if (vio->type != VIO_CLOSED) + if (vio->type != VIO_CLOSED) { DBUG_ASSERT(vio->type == VIO_TYPE_TCPIP || vio->type == VIO_TYPE_SOCKET || @@ -927,6 +943,7 @@ int vio_io_wait(Vio *vio, enum enum_vio_io_event event, int timeout) my_socket sd= mysql_socket_getfd(vio->mysql_socket); MYSQL_SOCKET_WAIT_VARIABLES(locker, state) /* no ';' */ DBUG_ENTER("vio_io_wait"); + DBUG_PRINT("enter", ("timeout: %d", timeout)); /* Note that if zero timeout, then we will not block, so we do not need to @@ -938,7 +955,10 @@ int vio_io_wait(Vio *vio, enum enum_vio_io_event event, int timeout) PSI_SOCKET_SELECT, timeout); ret= my_io_wait_async(vio->async_context, event, timeout); if (ret == 0) + { + DBUG_PRINT("info", ("timeout")); errno= SOCKET_ETIMEDOUT; + } END_SOCKET_WAIT(locker,timeout); DBUG_RETURN(ret); } @@ -972,6 +992,7 @@ int vio_io_wait(Vio *vio, enum enum_vio_io_event event, int timeout) switch ((ret= poll(&pfd, 1, timeout))) { case -1: + DBUG_PRINT("error", ("poll returned -1")); /* On error, -1 is returned. */ break; case 0: @@ -979,6 +1000,7 @@ int vio_io_wait(Vio *vio, enum enum_vio_io_event event, int timeout) Set errno to indicate a timeout error. (This is not compiled in on WIN32.) */ + DBUG_PRINT("info", ("poll timeout")); errno= SOCKET_ETIMEDOUT; break; default: -- cgit v1.2.1 From 6f31dd093a245a21a69fd990f947611a5dcfb77b Mon Sep 17 00:00:00 2001 From: Monty Date: Sun, 21 Aug 2016 20:18:39 +0300 Subject: Added new status variables to make it easier to debug certain problems: - Handler_read_retry - Update_scan - Delete_scan --- mysql-test/r/create.result | 1 + mysql-test/r/derived_view.result | 10 ++ mysql-test/r/group_min_max.result | 8 + mysql-test/r/handler_read_last.result | 4 + mysql-test/r/innodb_ext_key.result | 28 +++ mysql-test/r/insert_select.result | 1 + mysql-test/r/join.result | 2 + mysql-test/r/join_outer.result | 3 + mysql-test/r/join_outer_jcl6.result | 3 + mysql-test/r/limit_rows_examined.result | 2 + mysql-test/r/null_key.result | 1 + mysql-test/r/order_by.result | 2 + mysql-test/r/partition.result | 6 + mysql-test/r/partition_explicit_prune.result | 190 ++++++++++----------- mysql-test/r/ps.result | 4 + mysql-test/r/range_vs_index_merge.result | 3 + mysql-test/r/range_vs_index_merge_innodb.result | 3 + mysql-test/r/select.result | 1 + mysql-test/r/select_jcl6.result | 1 + mysql-test/r/select_pkeycache.result | 1 + mysql-test/r/single_delete_update.result | 42 +++++ mysql-test/r/sp.result | 2 + mysql-test/r/status.result | 4 +- mysql-test/r/status_user.result | 1 + mysql-test/r/subselect.result | 2 + mysql-test/r/subselect3.result | 1 + mysql-test/r/subselect3_jcl6.result | 1 + mysql-test/r/subselect_cache.result | 30 ++++ mysql-test/r/subselect_mat.result | 1 + mysql-test/r/subselect_no_exists_to_in.result | 2 + mysql-test/r/subselect_no_mat.result | 2 + mysql-test/r/subselect_no_opts.result | 2 + mysql-test/r/subselect_no_scache.result | 2 + mysql-test/r/subselect_no_semijoin.result | 2 + mysql-test/r/subselect_sj_mat.result | 1 + mysql-test/r/update.result | 11 ++ mysql-test/r/view.result | 2 + sql/handler.cc | 9 + sql/handler.h | 1 + sql/multi_range_read.cc | 2 +- sql/mysqld.cc | 3 + sql/sql_class.h | 4 + sql/sql_delete.cc | 3 + sql/sql_update.cc | 8 +- .../mysql-test/tokudb/r/ext_key_1_innodb.result | 6 + .../mysql-test/tokudb/r/ext_key_1_tokudb.result | 6 + .../mysql-test/tokudb/r/ext_key_2_innodb.result | 2 + .../mysql-test/tokudb/r/ext_key_2_tokudb.result | 2 + 48 files changed, 329 insertions(+), 99 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 372e2baa02b..20120bf7663 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -1721,6 +1721,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 7 diff --git a/mysql-test/r/derived_view.result b/mysql-test/r/derived_view.result index fa0a69a487d..a3b1e37a403 100644 --- a/mysql-test/r/derived_view.result +++ b/mysql-test/r/derived_view.result @@ -79,6 +79,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -93,6 +94,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 12 @@ -166,6 +168,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -180,6 +183,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 12 @@ -232,6 +236,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -248,6 +253,7 @@ Handler_read_key 11 Handler_read_last 0 Handler_read_next 3 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 11 Handler_read_rnd_deleted 0 Handler_read_rnd_next 36 @@ -323,6 +329,7 @@ Handler_read_key 22 Handler_read_last 0 Handler_read_next 22 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 60 @@ -340,6 +347,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -356,6 +364,7 @@ Handler_read_key 11 Handler_read_last 0 Handler_read_next 3 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 11 Handler_read_rnd_deleted 0 Handler_read_rnd_next 36 @@ -451,6 +460,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 8 Handler_read_rnd_deleted 0 Handler_read_rnd_next 39 diff --git a/mysql-test/r/group_min_max.result b/mysql-test/r/group_min_max.result index 9421ea9e740..3fd87425e23 100644 --- a/mysql-test/r/group_min_max.result +++ b/mysql-test/r/group_min_max.result @@ -2378,6 +2378,7 @@ SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 8 Handler_read_next 0 +Handler_read_retry 0 EXPLAIN SELECT max(b), a FROM t1 GROUP BY a; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range NULL a 5 NULL 8 Using index for group-by @@ -2387,6 +2388,7 @@ SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 8 Handler_read_next 0 +Handler_read_retry 0 FLUSH STATUS; SELECT * FROM (SELECT max(b), a FROM t1 GROUP BY a) b; max(b) a @@ -2398,6 +2400,7 @@ SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 8 Handler_read_next 0 +Handler_read_retry 0 FLUSH STATUS; (SELECT max(b), a FROM t1 GROUP BY a) UNION (SELECT max(b), a FROM t1 GROUP BY a); @@ -2410,6 +2413,7 @@ SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 16 Handler_read_next 0 +Handler_read_retry 0 EXPLAIN (SELECT max(b), a FROM t1 GROUP BY a) UNION (SELECT max(b), a FROM t1 GROUP BY a); id select_type table type possible_keys key key_len ref rows Extra @@ -2462,6 +2466,7 @@ SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 8 Handler_read_next 0 +Handler_read_retry 0 DELETE FROM t3; FLUSH STATUS; INSERT INTO t3 SELECT 1, (SELECT MAX(b) FROM t1 GROUP BY a HAVING a < 2) @@ -2470,12 +2475,14 @@ SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 8 Handler_read_next 0 +Handler_read_retry 0 FLUSH STATUS; DELETE FROM t3 WHERE (SELECT MAX(b) FROM t1 GROUP BY a HAVING a < 2) > 10000; SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 8 Handler_read_next 0 +Handler_read_retry 0 FLUSH STATUS; DELETE FROM t3 WHERE (SELECT (SELECT MAX(b) FROM t1 GROUP BY a HAVING a < 2) x FROM t1) > 10000; @@ -2484,6 +2491,7 @@ SHOW STATUS LIKE 'handler_read__e%'; Variable_name Value Handler_read_key 8 Handler_read_next 1 +Handler_read_retry 0 DROP TABLE t1,t2,t3; CREATE TABLE t1 (a int, INDEX idx(a)); INSERT INTO t1 VALUES diff --git a/mysql-test/r/handler_read_last.result b/mysql-test/r/handler_read_last.result index 574c3c25ab1..9dd3784673a 100644 --- a/mysql-test/r/handler_read_last.result +++ b/mysql-test/r/handler_read_last.result @@ -12,6 +12,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -26,6 +27,7 @@ Handler_read_key 0 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -42,6 +44,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -58,6 +61,7 @@ Handler_read_key 0 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 2 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/innodb_ext_key.result b/mysql-test/r/innodb_ext_key.result index 9140f306f77..2b3b98eb26a 100644 --- a/mysql-test/r/innodb_ext_key.result +++ b/mysql-test/r/innodb_ext_key.result @@ -20,6 +20,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -39,6 +40,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 1 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -60,6 +62,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -81,6 +84,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -102,6 +106,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 6 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -123,6 +128,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 1 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -146,6 +152,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 6 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -169,6 +176,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 3 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -188,6 +196,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 6 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -207,6 +216,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -228,6 +238,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 6 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -249,6 +260,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -270,6 +282,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -291,6 +304,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -317,6 +331,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 9 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 9 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -343,6 +358,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 2 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -370,6 +386,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 9 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 9 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -397,6 +414,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 3 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 3 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -422,6 +440,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 9 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 3 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -447,6 +466,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 3 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 3 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -477,6 +497,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 294 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -507,6 +528,7 @@ Handler_read_key 21 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -529,6 +551,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 1230 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -551,6 +574,7 @@ Handler_read_key 6 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -581,6 +605,7 @@ Handler_read_key 3 Handler_read_last 0 Handler_read_next 26 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -610,6 +635,7 @@ Handler_read_key 3 Handler_read_last 0 Handler_read_next 3 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -807,6 +833,7 @@ Handler_read_key 10 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 11 @@ -828,6 +855,7 @@ Handler_read_key 10 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 11 diff --git a/mysql-test/r/insert_select.result b/mysql-test/r/insert_select.result index 8bfc4e9215e..3f4a26a728e 100644 --- a/mysql-test/r/insert_select.result +++ b/mysql-test/r/insert_select.result @@ -701,6 +701,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 1 diff --git a/mysql-test/r/join.result b/mysql-test/r/join.result index e7292e8ddce..d500b38a8dc 100644 --- a/mysql-test/r/join.result +++ b/mysql-test/r/join.result @@ -860,6 +860,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 5 @@ -1271,6 +1272,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 1 diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result index 40abc197a36..ac838997e41 100644 --- a/mysql-test/r/join_outer.result +++ b/mysql-test/r/join_outer.result @@ -1244,6 +1244,7 @@ Handler_read_key 5 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 6 @@ -1805,6 +1806,7 @@ Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 1048581 @@ -1819,6 +1821,7 @@ Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 1048581 diff --git a/mysql-test/r/join_outer_jcl6.result b/mysql-test/r/join_outer_jcl6.result index 81395612269..38518c45eae 100644 --- a/mysql-test/r/join_outer_jcl6.result +++ b/mysql-test/r/join_outer_jcl6.result @@ -1255,6 +1255,7 @@ Handler_read_key 5 Handler_read_last 0 Handler_read_next 9 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 3 Handler_read_rnd_deleted 0 Handler_read_rnd_next 6 @@ -1816,6 +1817,7 @@ Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 1048581 @@ -1830,6 +1832,7 @@ Handler_read_key 4 Handler_read_last 0 Handler_read_next 5 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 1048581 diff --git a/mysql-test/r/limit_rows_examined.result b/mysql-test/r/limit_rows_examined.result index 130d17ae270..1f829d545f6 100644 --- a/mysql-test/r/limit_rows_examined.result +++ b/mysql-test/r/limit_rows_examined.result @@ -751,6 +751,7 @@ Handler_read_key 5 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 46 @@ -775,6 +776,7 @@ Handler_read_key 5 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 2 Handler_read_rnd_deleted 1 Handler_read_rnd_next 47 diff --git a/mysql-test/r/null_key.result b/mysql-test/r/null_key.result index e80851329a4..81bfa1ac994 100644 --- a/mysql-test/r/null_key.result +++ b/mysql-test/r/null_key.result @@ -429,6 +429,7 @@ Handler_read_key 6 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 5 diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result index 294142737d9..9c63570561d 100644 --- a/mysql-test/r/order_by.result +++ b/mysql-test/r/order_by.result @@ -2862,6 +2862,7 @@ Handler_read_key 250 Handler_read_last 0 Handler_read_next 249 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 249 Handler_read_rnd_deleted 0 Handler_read_rnd_next 250 @@ -2880,6 +2881,7 @@ Handler_read_key 250 Handler_read_last 0 Handler_read_next 249 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 250 diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index 233494238a5..09b22761bcb 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -562,6 +562,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -582,6 +583,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -614,6 +616,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -632,6 +635,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -650,6 +654,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -668,6 +673,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/partition_explicit_prune.result b/mysql-test/r/partition_explicit_prune.result index 765803d6332..3ca1e688e8f 100644 --- a/mysql-test/r/partition_explicit_prune.result +++ b/mysql-test/r/partition_explicit_prune.result @@ -19,7 +19,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_RND_NEXT 3 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 4 locks (1 table, 1 partition lock/unlock) FLUSH STATUS; SELECT a FROM t1 PARTITION (p0); @@ -31,7 +31,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_RND_NEXT 3 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 4 locks (1 table, 1 partition lock/unlock) FLUSH STATUS; INSERT INTO v1 VALUES (10); @@ -39,14 +39,14 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 2 # 4 locks (1 table, 1 partition lock/unlock) FLUSH STATUS; SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 2 locks (1 table, all partitions pruned) FLUSH STATUS; SELECT * FROM v1; @@ -59,7 +59,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_RND_NEXT 4 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 4 locks (1 table, 1 partition lock/unlock) FLUSH STATUS; SELECT a FROM t1 PARTITION (p0); @@ -72,7 +72,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_RND_NEXT 4 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 4 locks (1 table, 1 partition lock/unlock) SELECT * FROM t1; a @@ -89,14 +89,14 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 2 # 4 locks (1 table, 1 partition lock/unlock) FLUSH STATUS; SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 2 locks (1 table, all partitions pruned) SELECT * FROM v1; a @@ -121,7 +121,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 2 # 4 locks (1 table, 1 partition lock/unlock) FLUSH STATUS; @@ -131,7 +131,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 2 locks (1 table, all partitions pruned) FLUSH STATUS; INSERT INTO v1 VALUES (32); @@ -140,7 +140,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 4 locks (1 table, 1 partition lock/unlock) SELECT * FROM v1; a @@ -166,12 +166,12 @@ FLUSH STATUS; SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE -HANDLER_READ_RND_NEXT 26 -HANDLER_TMP_WRITE 47 +HANDLER_READ_RND_NEXT 27 +HANDLER_TMP_WRITE 49 # OK, seems to add number of variables processed before HANDLER_WRITE # and number of variables + 1 evaluated in the previous call in RND_NEXT CREATE TABLE t1 @@ -233,7 +233,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 2 # Should be 1 commit # 4 external locks (due to pruning of locks) @@ -250,7 +250,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 2 # Should be 1 commit # 4 external locks @@ -262,7 +262,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # should be 1 commit # 9 locks (1 ha_partition + 8 ha_innobase) # 17 writes (internal I_S) @@ -271,8 +271,8 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 2 -HANDLER_READ_RND_NEXT 26 -HANDLER_TMP_WRITE 47 +HANDLER_READ_RND_NEXT 27 +HANDLER_TMP_WRITE 49 HANDLER_WRITE 2 # + 1 commit # + 19 rnd next (internal I_S) @@ -282,8 +282,8 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 2 -HANDLER_READ_RND_NEXT 52 -HANDLER_TMP_WRITE 72 +HANDLER_READ_RND_NEXT 54 +HANDLER_TMP_WRITE 75 HANDLER_WRITE 2 # + 9 locks (unlocks) # + 19 rnd next (internal I_S) @@ -338,7 +338,7 @@ ERROR HY000: Unknown partition 'pNonexistent' in table 't1' SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # should have failed before locking (only 17 internal I_S writes) FLUSH STATUS; SELECT * FROM t1 PARTITION (subp2); @@ -348,7 +348,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # Should be 1 commit # 4 locks (1 ha_partition + 1 ha_innobase) x 2 (lock/unlock) # 1 read first (also calls index_read) @@ -366,7 +366,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 3 HANDLER_READ_NEXT 3 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # Should be 1 commit # 8 locks (1 ha_partition + 2 + 1 ha_innobase) x 2 # 3 read first (one for each partition) @@ -379,7 +379,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 1 commit # 18 locks # 18 READ KEY from opening a new partition table instance, @@ -398,8 +398,8 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 2 HANDLER_READ_FIRST 1 HANDLER_READ_NEXT 3 -HANDLER_READ_RND_NEXT 26 -HANDLER_TMP_WRITE 47 +HANDLER_READ_RND_NEXT 27 +HANDLER_TMP_WRITE 49 # + 1 commit # + 1 read first (read first key from index in one partition) # + 2 read key (innobase_get_index from index_init + from index_first) @@ -415,8 +415,8 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 3 HANDLER_READ_FIRST 3 HANDLER_READ_NEXT 4 -HANDLER_READ_RND_NEXT 52 -HANDLER_TMP_WRITE 72 +HANDLER_READ_RND_NEXT 54 +HANDLER_TMP_WRITE 75 # + 1 commit # + 2 read first (one for each subpart) # + 4 read key (innobase_get_index from index_init + from index_first) @@ -431,8 +431,8 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 4 HANDLER_READ_FIRST 3 HANDLER_READ_NEXT 4 -HANDLER_READ_RND_NEXT 78 -HANDLER_TMP_WRITE 97 +HANDLER_READ_RND_NEXT 81 +HANDLER_TMP_WRITE 101 # No matching partition, only internal I_S. SELECT * FROM t1 PARTITION (pNeg) WHERE a = 100; a b @@ -443,8 +443,8 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 5 HANDLER_READ_FIRST 3 HANDLER_READ_NEXT 4 -HANDLER_READ_RND_NEXT 104 -HANDLER_TMP_WRITE 122 +HANDLER_READ_RND_NEXT 108 +HANDLER_TMP_WRITE 127 # + 18 for unlock (same as lock above) (100 is not in pNeg, no match) # Test that EXPLAIN PARTITION works EXPLAIN PARTITIONS SELECT * FROM t1 PARTITION (pNonexistent); @@ -460,7 +460,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 8 locks (1 ha_partition + 3 ha_innobase) x 2 (lock/unlock) EXPLAIN PARTITIONS SELECT * FROM t1 PARTITION (subp3) AS TableAlias; id select_type table partitions type possible_keys key key_len ref rows Extra @@ -513,7 +513,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 1 # 2 locks (1 ha_partition) x 2 (lock/unlock), Was 4 locks before WL4443 # explicit pruning says part_id 0 and implicit pruning says part_id 1 @@ -526,7 +526,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 2 # 1 commit # 4 locks (1 ha_partition + 1 ha_innobase) x 2 (lock/unlock) @@ -544,7 +544,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_KEY 2 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_UPDATE 2 HANDLER_WRITE 2 # 1 commit @@ -566,7 +566,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 1 commit # 9 locks # 17 write (internal I_S) @@ -578,8 +578,8 @@ HANDLER_COMMIT 2 HANDLER_DELETE 2 HANDLER_READ_KEY 1 HANDLER_READ_NEXT 1 -HANDLER_READ_RND_NEXT 26 -HANDLER_TMP_WRITE 47 +HANDLER_READ_RND_NEXT 27 +HANDLER_TMP_WRITE 49 # + 1 commit # + 1 delete (one row deleted) # + 3 read key (1 innodb_get_index in records_in_range, @@ -596,9 +596,9 @@ HANDLER_COMMIT 2 HANDLER_DELETE 2 HANDLER_READ_KEY 1 HANDLER_READ_NEXT 1 -HANDLER_READ_RND_NEXT 52 +HANDLER_READ_RND_NEXT 54 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 72 +HANDLER_TMP_WRITE 75 HANDLER_WRITE 1 # Failed before start_stmt/execution. # + 19 rnd next (internal I_S) @@ -612,9 +612,9 @@ HANDLER_COMMIT 3 HANDLER_DELETE 2 HANDLER_READ_KEY 1 HANDLER_READ_NEXT 1 -HANDLER_READ_RND_NEXT 78 +HANDLER_READ_RND_NEXT 81 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 97 +HANDLER_TMP_WRITE 101 HANDLER_WRITE 3 # + 1 commit # + 19 rnd next (internal I_S) @@ -627,9 +627,9 @@ HANDLER_COMMIT 4 HANDLER_DELETE 2 HANDLER_READ_KEY 3 HANDLER_READ_NEXT 1 -HANDLER_READ_RND_NEXT 104 +HANDLER_READ_RND_NEXT 108 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 122 +HANDLER_TMP_WRITE 127 HANDLER_UPDATE 2 HANDLER_WRITE 5 # + 1 commit @@ -650,9 +650,9 @@ HANDLER_DELETE 2 HANDLER_READ_FIRST 1 HANDLER_READ_KEY 3 HANDLER_READ_NEXT 4 -HANDLER_READ_RND_NEXT 130 +HANDLER_READ_RND_NEXT 135 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 147 +HANDLER_TMP_WRITE 153 HANDLER_UPDATE 2 HANDLER_WRITE 5 # + 1 commit @@ -670,9 +670,9 @@ HANDLER_DELETE 2 HANDLER_READ_FIRST 1 HANDLER_READ_KEY 3 HANDLER_READ_NEXT 4 -HANDLER_READ_RND_NEXT 156 +HANDLER_READ_RND_NEXT 162 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 172 +HANDLER_TMP_WRITE 179 HANDLER_UPDATE 2 HANDLER_WRITE 5 # + 9 locks @@ -696,7 +696,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 4 HANDLER_READ_NEXT 5 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 1 commit # 10 locks (1 ha_partition + 4 ha_innobase) x 2 (lock/unlock) # 4 read first (for reading the first row in 4 partitions) @@ -709,7 +709,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 10 locks (table + 4 partition) x (lock + unlock) SELECT * FROM t1 PARTITION (pNeg, `p10-99`); a b @@ -720,7 +720,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 3 # 6 locks (1 ha_partition + 2 ha_innobase) x 2 (lock+unlock) # 1 rollback @@ -732,7 +732,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 10 # 10 lock (1 ha_partition + 4 ha_innobase) x 2 (lock + unlock) ALTER TABLE t1 TRUNCATE PARTITION pNeg, `p10-99`; @@ -742,7 +742,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 9 locks # 18 read key (ALTER forces table to be closed, see above for open) LOAD DATA INFILE 'loadtest.txt' INTO TABLE t1 PARTITION (pNeg, `p10-99`); @@ -750,8 +750,8 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 2 -HANDLER_READ_RND_NEXT 26 -HANDLER_TMP_WRITE 47 +HANDLER_READ_RND_NEXT 27 +HANDLER_TMP_WRITE 49 HANDLER_WRITE 10 # + 23 write (18 internal I_S + 5 rows) UNLOCK TABLES; @@ -759,8 +759,8 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 2 -HANDLER_READ_RND_NEXT 52 -HANDLER_TMP_WRITE 72 +HANDLER_READ_RND_NEXT 54 +HANDLER_TMP_WRITE 75 HANDLER_WRITE 10 # + 9 locks # @@ -773,7 +773,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_RND_NEXT 2 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_UPDATE 2 # 1 commit # 4 lock (1 ha_partition + 1 ha_innobase) x 2 (lock + unlock) @@ -791,7 +791,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_KEY 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_UPDATE 2 # 1 commit # 4 lock @@ -806,7 +806,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_KEY 1 HANDLER_READ_RND 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_UPDATE 2 # 1 commit # 4 lock @@ -819,7 +819,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # Nothing, since impossible PARTITION+WHERE clause. FLUSH STATUS; UPDATE t1 PARTITION(subp0) SET a = -2, b = concat(b, ', Updated from a = 100') @@ -828,7 +828,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # Nothing, since impossible PARTITION+WHERE clause. FLUSH STATUS; UPDATE t1 PARTITION(`p100-99999`) SET a = -2, b = concat(b, ', Updated from a = 100') @@ -840,7 +840,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_READ_KEY 1 HANDLER_READ_RND 1 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_UPDATE 1 # 6 lock # 4 read key (1 index init + 1 index read + 1 rnd init + 1 rnd pos) @@ -856,7 +856,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_READ_KEY 1 HANDLER_READ_RND 1 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_UPDATE 1 HANDLER_WRITE 1 # 10 locks @@ -874,7 +874,7 @@ HANDLER_COMMIT 1 HANDLER_DELETE 1 HANDLER_READ_KEY 1 HANDLER_READ_RND 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_UPDATE 1 HANDLER_WRITE 1 # 1 commit @@ -902,7 +902,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # Nothing (no matching partition found) FLUSH STATUS; UPDATE t1 PARTITION (pNeg) SET b = concat(b, ', Updated2') WHERE a = 1000000; @@ -910,7 +910,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # Nothing (no matching partition found) FLUSH STATUS; LOCK TABLE t1 WRITE; @@ -918,7 +918,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 9 locks UPDATE t1 PARTITION (subp7) SET b = concat(b, ', Updated to 103'), a = 103 WHERE a = 101; SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS @@ -927,8 +927,8 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 2 HANDLER_READ_KEY 1 HANDLER_READ_RND 1 -HANDLER_READ_RND_NEXT 26 -HANDLER_TMP_WRITE 47 +HANDLER_READ_RND_NEXT 27 +HANDLER_TMP_WRITE 49 HANDLER_UPDATE 2 # + 4 read key # + 1 read rnd @@ -941,8 +941,8 @@ HANDLER_COMMIT 3 HANDLER_DELETE 1 HANDLER_READ_KEY 2 HANDLER_READ_RND 2 -HANDLER_READ_RND_NEXT 52 -HANDLER_TMP_WRITE 72 +HANDLER_READ_RND_NEXT 54 +HANDLER_TMP_WRITE 75 HANDLER_UPDATE 3 HANDLER_WRITE 1 # + 1 delete @@ -957,8 +957,8 @@ HANDLER_COMMIT 3 HANDLER_DELETE 1 HANDLER_READ_KEY 2 HANDLER_READ_RND 2 -HANDLER_READ_RND_NEXT 78 -HANDLER_TMP_WRITE 97 +HANDLER_READ_RND_NEXT 81 +HANDLER_TMP_WRITE 101 HANDLER_UPDATE 3 HANDLER_WRITE 1 + 9 locks @@ -986,7 +986,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_DELETE 2 HANDLER_READ_KEY 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 1 delete # 4 locks (pruning works!). # 1 read key (index read) @@ -998,7 +998,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_DELETE 2 HANDLER_READ_RND_NEXT 3 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 1 delete # 4 locks # 1 read first @@ -1010,7 +1010,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 9 locks DELETE FROM t1 PARTITION (subp1) WHERE b = 'p0-9:subp3'; SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS @@ -1018,8 +1018,8 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 2 HANDLER_READ_KEY 1 -HANDLER_READ_RND_NEXT 26 -HANDLER_TMP_WRITE 47 +HANDLER_READ_RND_NEXT 27 +HANDLER_TMP_WRITE 49 # + 3 read key (1 innodb_get_index in records_in_range # + 1 innobase_get_index in index_init + 1 index read) DELETE FROM t1 PARTITION (`p0-9`) WHERE b = 'p0-9:subp3'; @@ -1030,8 +1030,8 @@ HANDLER_COMMIT 3 HANDLER_DELETE 2 HANDLER_READ_KEY 3 HANDLER_READ_NEXT 1 -HANDLER_READ_RND_NEXT 52 -HANDLER_TMP_WRITE 72 +HANDLER_READ_RND_NEXT 54 +HANDLER_TMP_WRITE 75 # + 1 delete # + 6 read key (same as above, but for two subpartitions) # + 1 read next (read next after found row) @@ -1043,8 +1043,8 @@ HANDLER_COMMIT 3 HANDLER_DELETE 2 HANDLER_READ_KEY 3 HANDLER_READ_NEXT 1 -HANDLER_READ_RND_NEXT 78 -HANDLER_TMP_WRITE 97 +HANDLER_READ_RND_NEXT 81 +HANDLER_TMP_WRITE 101 # + 9 locks # Test multi-table DELETE # Can be expressed in two different ways. @@ -1057,7 +1057,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 5 HANDLER_READ_NEXT 5 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 10 # 24 locks (2 table, 5 + 5 subpartitions lock/unlock) FLUSH STATUS; @@ -1066,7 +1066,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 14 locks (1 table, 6 subpartitions lock/unlock) FLUSH STATUS; INSERT INTO t2 PARTITION (subp3) SELECT * FROM t1 PARTITION (subp3, `p10-99`, `p100-99999`); @@ -1076,7 +1076,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_READ_FIRST 5 HANDLER_ROLLBACK 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 1 # 16 locks (2 tables, 1 + 5 subpartitions lock/unlock) FLUSH STATUS; @@ -1091,7 +1091,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 5 HANDLER_READ_NEXT 5 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 7 # 16 locks (2 tables, 1 + 5 subpartitions lock/unlock) TRUNCATE TABLE t2; @@ -1103,7 +1103,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 5 HANDLER_READ_NEXT 5 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 10 # 30 locks (2 table, 8 + 5 subpartitions lock/unlock) FLUSH STATUS; @@ -1114,7 +1114,7 @@ VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_FIRST 5 HANDLER_READ_NEXT 7 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 HANDLER_WRITE 7 # 14 locks (2 table, 5 subpartitions lock/unlock) SHOW CREATE TABLE t1; @@ -1201,7 +1201,7 @@ WHERE t1.a = t3.a AND t3.b = 'subp3'' at line 1 SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # Multi table delete without any matching rows FLUSH STATUS; DELETE t1, t2 FROM t1 PARTITION (pNeg), t3, t2 PARTITION (subp3) @@ -1211,7 +1211,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 HANDLER_READ_RND_NEXT 3 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 12 locks (3 in t1, 1 in t3, 2 in t2) x 2 (lock + unlock) # 1 read first (first rnd_next in t2) # 4 read key (1 innodb_get_index in rnd_init in t2 + index read in t2 @@ -1231,7 +1231,7 @@ HANDLER_READ_KEY 2 HANDLER_READ_NEXT 2 HANDLER_READ_RND 4 HANDLER_READ_RND_NEXT 16 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 4 delete (2 in t2 + 2 in t3) # 12 locks (3 in t2, 1 in t3, 2 in t1) x 2 (lock + unlock) # 3 read first (1 in t1 + 1 in t3 + 1 in t3, for second row in t1) @@ -1279,7 +1279,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0; VARIABLE_NAME VARIABLE_VALUE HANDLER_COMMIT 1 -HANDLER_TMP_WRITE 22 +HANDLER_TMP_WRITE 23 # 6 locks (lock/unlock two subpartitions + table) # Test on non partitioned table SELECT * FROM t3 PARTITION (pNeg); diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index 8d073f8da1e..517e2d23915 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -3949,6 +3949,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -3963,6 +3964,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -3977,6 +3979,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 8 @@ -3991,6 +3994,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/range_vs_index_merge.result b/mysql-test/r/range_vs_index_merge.result index 1b12a9f5512..9af359a55f3 100644 --- a/mysql-test/r/range_vs_index_merge.result +++ b/mysql-test/r/range_vs_index_merge.result @@ -1310,6 +1310,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 385 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 377 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1338,6 +1339,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 59 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1367,6 +1369,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 59 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/range_vs_index_merge_innodb.result b/mysql-test/r/range_vs_index_merge_innodb.result index 8428936d25f..601ae9b7613 100644 --- a/mysql-test/r/range_vs_index_merge_innodb.result +++ b/mysql-test/r/range_vs_index_merge_innodb.result @@ -1311,6 +1311,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 385 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 377 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1339,6 +1340,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 59 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1368,6 +1370,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 59 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result index 7219a9c4462..8d5ee08e44e 100644 --- a/mysql-test/r/select.result +++ b/mysql-test/r/select.result @@ -4351,6 +4351,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 6 diff --git a/mysql-test/r/select_jcl6.result b/mysql-test/r/select_jcl6.result index 3c62d0676ae..db7f2c473a8 100644 --- a/mysql-test/r/select_jcl6.result +++ b/mysql-test/r/select_jcl6.result @@ -4362,6 +4362,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 10 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 10 Handler_read_rnd_deleted 1 Handler_read_rnd_next 6 diff --git a/mysql-test/r/select_pkeycache.result b/mysql-test/r/select_pkeycache.result index 7219a9c4462..8d5ee08e44e 100644 --- a/mysql-test/r/select_pkeycache.result +++ b/mysql-test/r/select_pkeycache.result @@ -4351,6 +4351,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 6 diff --git a/mysql-test/r/single_delete_update.result b/mysql-test/r/single_delete_update.result index 9332effeb56..84380216b74 100644 --- a/mysql-test/r/single_delete_update.result +++ b/mysql-test/r/single_delete_update.result @@ -29,6 +29,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -48,6 +49,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -84,6 +86,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -103,6 +106,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -136,6 +140,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -155,6 +160,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -192,6 +198,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -211,6 +218,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -242,6 +250,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -261,6 +270,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -290,6 +300,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -309,6 +320,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -341,6 +353,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 7 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -365,6 +378,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 7 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 8 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -403,6 +417,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -422,6 +437,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -458,6 +474,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -477,6 +494,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -522,6 +540,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -547,6 +566,7 @@ Handler_read_key 0 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -566,6 +586,7 @@ Handler_read_key 0 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -603,6 +624,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -622,6 +644,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -663,6 +686,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -682,6 +706,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -720,6 +745,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -739,6 +765,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -776,6 +803,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -795,6 +823,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -826,6 +855,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -845,6 +875,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -875,6 +906,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -894,6 +926,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -927,6 +960,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 7 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -951,6 +985,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 7 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 8 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -993,6 +1028,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1012,6 +1048,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1053,6 +1090,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -1072,6 +1110,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -1111,6 +1150,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 4 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1136,6 +1176,7 @@ Handler_read_key 0 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -1155,6 +1196,7 @@ Handler_read_key 0 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 4 +Handler_read_retry 0 Handler_read_rnd 5 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index 471576e563f..45cd071e732 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -7713,6 +7713,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -7724,6 +7725,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 4097 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/status.result b/mysql-test/r/status.result index 140db7c5721..600a4fbd59f 100644 --- a/mysql-test/r/status.result +++ b/mysql-test/r/status.result @@ -287,6 +287,7 @@ Handler_read_key 4 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 7 Handler_read_rnd_deleted 0 Handler_read_rnd_next 23 @@ -304,7 +305,7 @@ Created_tmp_files 0 Created_tmp_tables 2 Handler_tmp_update 2 Handler_tmp_write 7 -Rows_tmp_read 42 +Rows_tmp_read 43 drop table t1; CREATE TABLE t1 (i int(11) DEFAULT NULL, KEY i (i) ) ENGINE=MyISAM; insert into t1 values (1),(2),(3),(4),(5); @@ -329,6 +330,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 2 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 1 Handler_read_rnd_next 2 diff --git a/mysql-test/r/status_user.result b/mysql-test/r/status_user.result index c6248a85d3a..d00a7d3047f 100644 --- a/mysql-test/r/status_user.result +++ b/mysql-test/r/status_user.result @@ -112,6 +112,7 @@ Handler_read_key 9 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 5 diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index fa6a0624dcb..a58d35708fe 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -6868,6 +6868,7 @@ Handler_read_key 8 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 @@ -6905,6 +6906,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 16 diff --git a/mysql-test/r/subselect3.result b/mysql-test/r/subselect3.result index 24d9f0de35a..0c03959a96a 100644 --- a/mysql-test/r/subselect3.result +++ b/mysql-test/r/subselect3.result @@ -124,6 +124,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 41 diff --git a/mysql-test/r/subselect3_jcl6.result b/mysql-test/r/subselect3_jcl6.result index 19d3d25148f..415963af882 100644 --- a/mysql-test/r/subselect3_jcl6.result +++ b/mysql-test/r/subselect3_jcl6.result @@ -134,6 +134,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 41 diff --git a/mysql-test/r/subselect_cache.result b/mysql-test/r/subselect_cache.result index e5a2fe12526..03d683de0d8 100644 --- a/mysql-test/r/subselect_cache.result +++ b/mysql-test/r/subselect_cache.result @@ -36,6 +36,7 @@ Handler_read_key 7 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 31 @@ -64,6 +65,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 61 @@ -91,6 +93,7 @@ Handler_read_key 7 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 31 @@ -117,6 +120,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 61 @@ -144,6 +148,7 @@ Handler_read_key 7 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 31 @@ -170,6 +175,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 61 @@ -269,6 +275,7 @@ Handler_read_key 7 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 442 @@ -367,6 +374,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 472 @@ -390,6 +398,7 @@ Handler_read_key 17 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 0 Handler_read_rnd_next 36 @@ -412,6 +421,7 @@ Handler_read_key 10 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 0 Handler_read_rnd_next 86 @@ -435,6 +445,7 @@ Handler_read_key 17 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 0 Handler_read_rnd_next 41 @@ -457,6 +468,7 @@ Handler_read_key 10 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 0 Handler_read_rnd_next 91 @@ -486,6 +498,7 @@ Handler_read_key 7 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 31 @@ -514,6 +527,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 61 @@ -537,6 +551,7 @@ Handler_read_key 7 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 1 Handler_read_rnd_next 36 @@ -559,6 +574,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 4 Handler_read_rnd_deleted 1 Handler_read_rnd_next 66 @@ -658,6 +674,7 @@ Handler_read_key 70 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 141 @@ -756,6 +773,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 671 @@ -1381,6 +1399,7 @@ Handler_read_key 11 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 145 @@ -1411,6 +1430,7 @@ Handler_read_key 32 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 84 @@ -1444,6 +1464,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 127 @@ -1474,6 +1495,7 @@ Handler_read_key 13 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 69 @@ -1665,6 +1687,7 @@ Handler_read_key 15 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 8 Handler_read_rnd_deleted 0 Handler_read_rnd_next 57 @@ -1691,6 +1714,7 @@ Handler_read_key 21 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 8 Handler_read_rnd_deleted 0 Handler_read_rnd_next 37 @@ -1718,6 +1742,7 @@ Handler_read_key 15 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 8 Handler_read_rnd_deleted 0 Handler_read_rnd_next 57 @@ -1744,6 +1769,7 @@ Handler_read_key 22 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 8 Handler_read_rnd_deleted 0 Handler_read_rnd_next 41 @@ -1786,6 +1812,7 @@ Handler_read_key 15 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 16 @@ -1824,6 +1851,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 151 @@ -1861,6 +1889,7 @@ Handler_read_key 15 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 43 @@ -1898,6 +1927,7 @@ Handler_read_key 18 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 27 diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result index 5f3495aae6f..1ea71b6ee32 100644 --- a/mysql-test/r/subselect_mat.result +++ b/mysql-test/r/subselect_mat.result @@ -2209,6 +2209,7 @@ Handler_read_key 5000 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 6003 diff --git a/mysql-test/r/subselect_no_exists_to_in.result b/mysql-test/r/subselect_no_exists_to_in.result index 012d7a80610..8b2d12c50f6 100644 --- a/mysql-test/r/subselect_no_exists_to_in.result +++ b/mysql-test/r/subselect_no_exists_to_in.result @@ -6868,6 +6868,7 @@ Handler_read_key 8 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 @@ -6905,6 +6906,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 16 diff --git a/mysql-test/r/subselect_no_mat.result b/mysql-test/r/subselect_no_mat.result index e83697fcb4f..6051a58c3be 100644 --- a/mysql-test/r/subselect_no_mat.result +++ b/mysql-test/r/subselect_no_mat.result @@ -6862,6 +6862,7 @@ Handler_read_key 8 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 @@ -6899,6 +6900,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 16 diff --git a/mysql-test/r/subselect_no_opts.result b/mysql-test/r/subselect_no_opts.result index c620f788cf7..718cf8b6099 100644 --- a/mysql-test/r/subselect_no_opts.result +++ b/mysql-test/r/subselect_no_opts.result @@ -6859,6 +6859,7 @@ Handler_read_key 8 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 @@ -6896,6 +6897,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 16 diff --git a/mysql-test/r/subselect_no_scache.result b/mysql-test/r/subselect_no_scache.result index b2b2518bdd3..2b84f569fc5 100644 --- a/mysql-test/r/subselect_no_scache.result +++ b/mysql-test/r/subselect_no_scache.result @@ -6874,6 +6874,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 58 @@ -6911,6 +6912,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 16 diff --git a/mysql-test/r/subselect_no_semijoin.result b/mysql-test/r/subselect_no_semijoin.result index 2bd82dffd36..a2841793f15 100644 --- a/mysql-test/r/subselect_no_semijoin.result +++ b/mysql-test/r/subselect_no_semijoin.result @@ -6859,6 +6859,7 @@ Handler_read_key 8 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 22 @@ -6896,6 +6897,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 16 diff --git a/mysql-test/r/subselect_sj_mat.result b/mysql-test/r/subselect_sj_mat.result index 34d5834bff4..fee63d8204e 100644 --- a/mysql-test/r/subselect_sj_mat.result +++ b/mysql-test/r/subselect_sj_mat.result @@ -2249,6 +2249,7 @@ Handler_read_key 5000 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 6003 diff --git a/mysql-test/r/update.result b/mysql-test/r/update.result index a0c35c6e0ca..49141e33709 100644 --- a/mysql-test/r/update.result +++ b/mysql-test/r/update.result @@ -276,6 +276,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -289,6 +290,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 2 Handler_read_rnd_deleted 0 Handler_read_rnd_next 9 @@ -301,6 +303,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -313,6 +316,7 @@ Handler_read_key 0 Handler_read_last 1 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -326,6 +330,7 @@ Handler_read_key 0 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 2 Handler_read_rnd_next 7 @@ -381,6 +386,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 1 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 1 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -420,6 +426,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -432,6 +439,7 @@ Handler_read_key 2 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -443,6 +451,7 @@ Handler_read_key 3 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -454,6 +463,7 @@ Handler_read_key 3 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -699,6 +709,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 1 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 2 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index a92ccbdde94..52c379d03af 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -4255,6 +4255,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 @@ -4278,6 +4279,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 17 diff --git a/sql/handler.cc b/sql/handler.cc index 1816b9aa7eb..0edff665a33 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2757,6 +2757,15 @@ int handler::ha_index_next_same(uchar *buf, const uchar *key, uint keylen) return result; } + +bool handler::ha_was_semi_consistent_read() +{ + bool result= was_semi_consistent_read(); + if (result) + increment_statistics(&SSV::ha_read_retry_count); + return result; +} + /* Initialize handler for random reading, with error handling */ int handler::ha_rnd_init_with_error(bool scan) diff --git a/sql/handler.h b/sql/handler.h index e2176f04933..772f2e68dab 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3090,6 +3090,7 @@ public: If this method returns nonzero, it will also signal the storage engine that the next read will be a locking re-read of the row. */ + bool ha_was_semi_consistent_read(); virtual bool was_semi_consistent_read() { return 0; } /** Tell the engine whether it should avoid unnecessary lock waits. diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index 3f55ff3684d..e856400466d 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -267,7 +267,7 @@ int handler::multi_range_read_next(range_id_t *range_info) } else { - if (was_semi_consistent_read()) + if (ha_was_semi_consistent_read()) { /* The following assignment is redundant, but for extra safety and to diff --git a/sql/mysqld.cc b/sql/mysqld.cc index a1c13505304..826f2af3a85 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -7887,6 +7887,7 @@ SHOW_VAR status_vars[]= { {"Delayed_errors", (char*) &delayed_insert_errors, SHOW_LONG}, {"Delayed_insert_threads", (char*) &delayed_insert_threads, SHOW_LONG_NOFLUSH}, {"Delayed_writes", (char*) &delayed_insert_writes, SHOW_LONG}, + {"Delete_scan", (char*) offsetof(STATUS_VAR, delete_scan_count), SHOW_LONG_STATUS}, {"Empty_queries", (char*) offsetof(STATUS_VAR, empty_queries), SHOW_LONG_STATUS}, {"Executed_events", (char*) &executed_events, SHOW_LONG_NOFLUSH }, {"Executed_triggers", (char*) offsetof(STATUS_VAR, executed_triggers), SHOW_LONG_STATUS}, @@ -7915,6 +7916,7 @@ SHOW_VAR status_vars[]= { {"Handler_read_last", (char*) offsetof(STATUS_VAR, ha_read_last_count), SHOW_LONG_STATUS}, {"Handler_read_next", (char*) offsetof(STATUS_VAR, ha_read_next_count), SHOW_LONG_STATUS}, {"Handler_read_prev", (char*) offsetof(STATUS_VAR, ha_read_prev_count), SHOW_LONG_STATUS}, + {"Handler_read_retry", (char*) offsetof(STATUS_VAR, ha_read_retry_count), SHOW_LONG_STATUS}, {"Handler_read_rnd", (char*) offsetof(STATUS_VAR, ha_read_rnd_count), SHOW_LONG_STATUS}, {"Handler_read_rnd_deleted", (char*) offsetof(STATUS_VAR, ha_read_rnd_deleted_count), SHOW_LONG_STATUS}, {"Handler_read_rnd_next", (char*) offsetof(STATUS_VAR, ha_read_rnd_next_count), SHOW_LONG_STATUS}, @@ -8031,6 +8033,7 @@ SHOW_VAR status_vars[]= { {"Threads_connected", (char*) &connection_count, SHOW_INT}, {"Threads_created", (char*) &thread_created, SHOW_LONG_NOFLUSH}, {"Threads_running", (char*) &thread_running, SHOW_INT}, + {"Update_scan", (char*) offsetof(STATUS_VAR, update_scan_count), SHOW_LONG_STATUS}, {"Uptime", (char*) &show_starttime, SHOW_SIMPLE_FUNC}, #ifdef ENABLED_PROFILING {"Uptime_since_flush_status",(char*) &show_flushstatustime, SHOW_SIMPLE_FUNC}, diff --git a/sql/sql_class.h b/sql/sql_class.h index 8820205e8a2..f56ffbc1cc3 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -666,9 +666,11 @@ typedef struct system_status_var ulong ha_read_key_count; ulong ha_read_next_count; ulong ha_read_prev_count; + ulong ha_read_retry_count; ulong ha_read_rnd_count; ulong ha_read_rnd_next_count; ulong ha_read_rnd_deleted_count; + /* This number doesn't include calls to the default implementation and calls made by range access. The intent is to count only calls made by @@ -702,6 +704,8 @@ typedef struct system_status_var ulong select_range_count_; ulong select_range_check_count_; ulong select_scan_count_; + ulong update_scan_count; + ulong delete_scan_count; ulong executed_triggers; ulong long_query_count; ulong filesort_merge_passes_; diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 64f5c85ef22..ce7da3cd33a 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -466,6 +466,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, DBUG_EXECUTE_IF("show_explain_probe_delete_exec_start", dbug_serve_apcs(thd, 1);); + if (!(select && select->quick)) + status_var_increment(thd->status_var.delete_scan_count); + if (query_plan.using_filesort) { ha_rows examined_rows; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index b2af075e2f4..55dc26ef043 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -528,6 +528,9 @@ int mysql_update(THD *thd, DBUG_EXECUTE_IF("show_explain_probe_update_exec_start", dbug_serve_apcs(thd, 1);); + if (!(select && select->quick)) + status_var_increment(thd->status_var.update_scan_count); + if (query_plan.using_filesort || query_plan.using_io_buffer) { /* @@ -594,6 +597,7 @@ int mysql_update(THD *thd, close_cached_file(&tempfile); goto err; } + table->file->try_semi_consistent_read(1); /* @@ -631,7 +635,7 @@ int mysql_update(THD *thd, thd->inc_examined_row_count(1); if (!select || (error= select->skip_record(thd)) > 0) { - if (table->file->was_semi_consistent_read()) + if (table->file->ha_was_semi_consistent_read()) continue; /* repeat the read of the same row if it still exists */ table->file->position(table->record[0]); @@ -746,7 +750,7 @@ int mysql_update(THD *thd, thd->inc_examined_row_count(1); if (!select || select->skip_record(thd) > 0) { - if (table->file->was_semi_consistent_read()) + if (table->file->ha_was_semi_consistent_read()) continue; /* repeat the read of the same row if it still exists */ store_record(table,record[1]); diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result index 152e7f31d08..703cbb02f39 100644 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result +++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result @@ -18,6 +18,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -35,6 +36,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -51,6 +53,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -67,6 +70,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -84,6 +88,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -100,6 +105,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result index 429fb82aaab..805cd60e167 100644 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result @@ -18,6 +18,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -35,6 +36,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -51,6 +53,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -67,6 +70,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -84,6 +88,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -100,6 +105,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result index a8953d1233f..bb9f05f2e72 100644 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result +++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result @@ -18,6 +18,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -35,6 +36,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result index fd0ed8b367d..a9c77661077 100644 --- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result @@ -18,6 +18,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 @@ -35,6 +36,7 @@ Handler_read_key 1 Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 +Handler_read_retry 0 Handler_read_rnd 0 Handler_read_rnd_deleted 0 Handler_read_rnd_next 0 -- cgit v1.2.1 From 5932fa789043fad1602a5ebb335adf4e7c860cdf Mon Sep 17 00:00:00 2001 From: Monty Date: Sun, 21 Aug 2016 20:38:47 +0300 Subject: Fixed "Packets out of order" warning message on stdout in clients, compiled for debugging, when the server goes down This happens in the following scenario: - Server gets a shutdown message - Servers sends error ER_CONNECTION_KILLED to the clients connection - The client sends a query to the server, before the server has time to close the connection to the client - Client reads the ER_CONNECTION_KILLED error message In the above case, the packet number for the reply is one less than what the client expected and the client prints "Packets out of order". Fixed the following way: - The client accepts now error packages with a packet number one less than expected. - To ensure that this issue can be detected early in my_real_read(), error messages sent to the client are not compressed, even when compressed protocol is used. --- include/mysql.h.pp | 2 +- include/mysql_com.h | 2 +- mysql-test/include/wait_until_connected_again.inc | 2 +- mysql-test/include/wait_until_disconnected.inc | 2 +- sql/net_serv.cc | 97 ++++++++++++++++------- sql/protocol.cc | 15 +++- 6 files changed, 84 insertions(+), 36 deletions(-) diff --git a/include/mysql.h.pp b/include/mysql.h.pp index 2c8e47b454f..a593f526c6e 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -27,7 +27,7 @@ typedef struct st_net { char save_char; char net_skip_rest_factor; my_bool thread_specific_malloc; - my_bool compress; + unsigned char compress; my_bool unused3; unsigned char *unused; unsigned int last_errno; diff --git a/include/mysql_com.h b/include/mysql_com.h index 8fdac38dd66..96514a28310 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -386,7 +386,7 @@ typedef struct st_net { char save_char; char net_skip_rest_factor; my_bool thread_specific_malloc; - my_bool compress; + unsigned char compress; my_bool unused3; /* Please remove with the next incompatible ABI change. */ /* Pointer to query object in query cache, do not equal NULL (0) for diff --git a/mysql-test/include/wait_until_connected_again.inc b/mysql-test/include/wait_until_connected_again.inc index 96240e36db7..6f64ef45440 100644 --- a/mysql-test/include/wait_until_connected_again.inc +++ b/mysql-test/include/wait_until_connected_again.inc @@ -14,7 +14,7 @@ while ($mysql_errno) # Strangely enough, the server might return "Too many connections" # while being shutdown, thus 1040 is an "allowed" error # See BUG#36228 - --error 0,1040,1053,2002,2003,2005,2006,2013 + --error 0,1040,1053,2002,2003,2005,2006,2013,1927 show status; dec $counter; diff --git a/mysql-test/include/wait_until_disconnected.inc b/mysql-test/include/wait_until_disconnected.inc index 71361682442..658bf0be739 100644 --- a/mysql-test/include/wait_until_disconnected.inc +++ b/mysql-test/include/wait_until_disconnected.inc @@ -12,7 +12,7 @@ while (!$mysql_errno) # Strangely enough, the server might return "Too many connections" # while being shutdown, thus 1040 is an "allowed" error. # See BUG#36228. - --error 0,1040,1053,2002,2003,2005,2006,2013 + --error 0,1040,1053,2002,2003,2005,2006,2013,1927 show status; dec $counter; diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 91a17606d68..b48d36700c6 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -540,7 +540,7 @@ net_write_buff(NET *net, const uchar *packet, ulong len) left_length= (ulong) (net->buff_end - net->write_pos); #ifdef DEBUG_DATA_PACKETS - DBUG_DUMP("data", packet, len); + DBUG_DUMP("data_written", packet, len); #endif if (len > left_length) { @@ -629,7 +629,8 @@ net_real_write(NET *net,const uchar *packet, size_t len) } memcpy(b+header_length,packet,len); - if (my_compress(b+header_length, &len, &complen)) + /* Don't compress error packets (compress == 2) */ + if (net->compress == 2 || my_compress(b+header_length, &len, &complen)) complen=0; int3store(&b[NET_HEADER_SIZE],complen); int3store(b,len); @@ -640,7 +641,7 @@ net_real_write(NET *net,const uchar *packet, size_t len) #endif /* HAVE_COMPRESS */ #ifdef DEBUG_DATA_PACKETS - DBUG_DUMP("data", packet, len); + DBUG_DUMP("data_written", packet, len); #endif #ifndef NO_ALARM @@ -830,6 +831,7 @@ my_real_read(NET *net, size_t *complen, size_t length; uint i,retry_count=0; ulong len=packet_error; + my_bool expect_error_packet __attribute((unused))= 0; thr_alarm_t alarmed; #ifndef NO_ALARM ALARM alarm_buff; @@ -878,6 +880,7 @@ my_real_read(NET *net, size_t *complen, if (i== 0 && thd_net_is_killed()) { + DBUG_PRINT("info", ("thd is killed")); len= packet_error; net->error= 0; net->last_errno= ER_CONNECTION_KILLED; @@ -947,39 +950,34 @@ my_real_read(NET *net, size_t *complen, pos+= length; update_statistics(thd_increment_bytes_received(length)); } + +#ifdef DEBUG_DATA_PACKETS + DBUG_DUMP("data_read", net->buff+net->where_b, length); +#endif if (i == 0) { /* First parts is packet length */ ulong helping; +#ifndef DEBUG_DATA_PACKETS DBUG_DUMP("packet_header", net->buff+net->where_b, NET_HEADER_SIZE); +#endif if (net->buff[net->where_b + 3] != (uchar) net->pkt_nr) - { - if (net->buff[net->where_b] != (uchar) 255) - { - DBUG_PRINT("error", - ("Packets out of order (Found: %d, expected %u)", - (int) net->buff[net->where_b + 3], - net->pkt_nr)); - /* - We don't make noise server side, since the client is expected - to break the protocol for e.g. --send LOAD DATA .. LOCAL where - the server expects the client to send a file, but the client - may reply with a new command instead. - */ + { #ifndef MYSQL_SERVER - EXTRA_DEBUG_fflush(stdout); - EXTRA_DEBUG_fprintf(stderr,"Error: Packets out of order (Found: %d, expected %d)\n", - (int) net->buff[net->where_b + 3], - (uint) (uchar) net->pkt_nr); - EXTRA_DEBUG_fflush(stderr); + if (net->buff[net->where_b + 3] == (uchar) (net->pkt_nr -1)) + { + /* + If the server was killed then the server may have missed the + last sent client packet and the packet numbering may be one off. + */ + DBUG_PRINT("warning", ("Found possible out of order packets")); + expect_error_packet= 1; + } + else #endif - } - len= packet_error; - /* Not a NET error on the client. XXX: why? */ - MYSQL_SERVER_my_error(ER_NET_PACKETS_OUT_OF_ORDER, MYF(0)); - goto end; - } - net->compress_pkt_nr= ++net->pkt_nr; + goto packets_out_of_order; + } + net->compress_pkt_nr= ++net->pkt_nr; #ifdef HAVE_COMPRESS if (net->compress) { @@ -1027,6 +1025,21 @@ my_real_read(NET *net, size_t *complen, } #endif } +#ifndef MYSQL_SERVER + else if (expect_error_packet) + { + /* + This check is safe both for compressed and not compressed protocol + as for the compressed protocol errors are not compressed anymore. + */ + if (net->buff[net->where_b] != (uchar) 255) + { + /* Restore pkt_nr to original value */ + net->pkt_nr--; + goto packets_out_of_order; + } + } +#endif } end: @@ -1040,7 +1053,7 @@ end: net->reading_or_writing=0; #ifdef DEBUG_DATA_PACKETS if (len != packet_error) - DBUG_DUMP("data", net->buff+net->where_b, len); + DBUG_DUMP("data_read", net->buff+net->where_b, len); #endif #ifdef MYSQL_SERVER if (server_extension != NULL) @@ -1051,9 +1064,35 @@ end: } #endif return(len); + +packets_out_of_order: + { + DBUG_PRINT("error", + ("Packets out of order (Found: %d, expected %u)", + (int) net->buff[net->where_b + 3], + net->pkt_nr)); + DBUG_ASSERT(0); + /* + We don't make noise server side, since the client is expected + to break the protocol for e.g. --send LOAD DATA .. LOCAL where + the server expects the client to send a file, but the client + may reply with a new command instead. + */ +#ifndef MYSQL_SERVER + EXTRA_DEBUG_fflush(stdout); + EXTRA_DEBUG_fprintf(stderr,"Error: Packets out of order (Found: %d, expected %d)\n", + (int) net->buff[net->where_b + 3], + (uint) (uchar) net->pkt_nr); + EXTRA_DEBUG_fflush(stderr); +#endif + len= packet_error; + MYSQL_SERVER_my_error(ER_NET_PACKETS_OUT_OF_ORDER, MYF(0)); + goto end; + } } + /* Old interface. See my_net_read_packet() for function description */ #undef my_net_read diff --git a/sql/protocol.cc b/sql/protocol.cc index c1614f4e7e4..777f124f502 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -373,7 +373,8 @@ bool net_send_error_packet(THD *thd, uint sql_errno, const char *err, uint error; char converted_err[MYSQL_ERRMSG_SIZE]; char buff[2+1+SQLSTATE_LENGTH+MYSQL_ERRMSG_SIZE], *pos; - + my_bool ret; + uint8 save_compress; DBUG_ENTER("send_error_packet"); if (net->vio == 0) @@ -401,8 +402,16 @@ bool net_send_error_packet(THD *thd, uint sql_errno, const char *err, /* Converted error message is always null-terminated. */ length= (uint) (strmake(pos, converted_err, MYSQL_ERRMSG_SIZE - 1) - buff); - DBUG_RETURN(net_write_command(net,(uchar) 255, (uchar*) "", 0, (uchar*) buff, - length)); + /* + Ensure that errors are not compressed. This is to ensure we can + detect out of bands error messages in the client + */ + if ((save_compress= net->compress)) + net->compress= 2; + ret= net_write_command(net,(uchar) 255, (uchar*) "", 0, (uchar*) buff, + length); + net->compress= save_compress; + DBUG_RETURN(ret); } #endif /* EMBEDDED_LIBRARY */ -- cgit v1.2.1 From 58386ca04dffd5fc9006d8f70a44a3a82f385b96 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Mon, 11 Jan 2016 22:43:27 +0200 Subject: refs codership/mysql-wsrep#239 Synced xtrabackup SST scripts from PXC source tree as of PXC 5.6.27-25.13 - PXC#480: xtrabackup-v2 SST fails with multiple log_bin directives in my.cn - PXC#460: wsrep_sst_auth don't work in Percona-XtraDB-Cluster-56-5.6.25-25. - PXC-416: Fix SST related issues. - PXC-389: Merge remote-tracking branch 'wsrep/5.6' into 5.6-wsrep-pxc389 - Bug #1431101: SST does not clobber backup-my.cnf --- scripts/wsrep_sst_common.sh | 4 ++-- scripts/wsrep_sst_xtrabackup-v2.sh | 6 ++++-- scripts/wsrep_sst_xtrabackup.sh | 6 ++++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 0aa338510e0..25ec0baae80 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -134,7 +134,7 @@ if $MY_PRINT_DEFAULTS -c $WSREP_SST_OPT_CONF sst | grep -q "wsrep_sst_auth" then if wsrep_auth_not_set then - WSREP_SST_OPT_AUTH=$(MY_PRINT_DEFAULTS -c $WSREP_SST_OPT_CONF sst | grep -- "--wsrep_sst_auth" | cut -d= -f2) + WSREP_SST_OPT_AUTH=$($MY_PRINT_DEFAULTS -c $WSREP_SST_OPT_CONF sst | grep -- "--wsrep_sst_auth" | cut -d= -f2) fi fi readonly WSREP_SST_OPT_AUTH @@ -176,7 +176,7 @@ wsrep_log_info() wsrep_cleanup_progress_file() { - [ -n "$SST_PROGRESS_FILE" ] && rm -f "$SST_PROGRESS_FILE" 2>/dev/null + [ -n "${SST_PROGRESS_FILE:-}" ] && rm -f "$SST_PROGRESS_FILE" 2>/dev/null || true } wsrep_check_program() diff --git a/scripts/wsrep_sst_xtrabackup-v2.sh b/scripts/wsrep_sst_xtrabackup-v2.sh index 074f36a11e9..b5556c60c64 100644 --- a/scripts/wsrep_sst_xtrabackup-v2.sh +++ b/scripts/wsrep_sst_xtrabackup-v2.sh @@ -674,6 +674,7 @@ then if [ $WSREP_SST_OPT_BYPASS -eq 0 ] then + usrst=0 if [[ -z $sst_ver ]];then wsrep_log_error "Upgrade joiner to 5.6.21 or higher for backup locks support" wsrep_log_error "The joiner is not supported for this version of donor" @@ -689,13 +690,14 @@ then itmpdir=$(mktemp -d) wsrep_log_info "Using $itmpdir as innobackupex temporary directory" - if [ "$WSREP_SST_OPT_USER" != "(null)" ]; then + if [[ -n "${WSREP_SST_OPT_USER:-}" && "$WSREP_SST_OPT_USER" != "(null)" ]]; then INNOEXTRA+=" --user=$WSREP_SST_OPT_USER" + usrst=1 fi if [ -n "${WSREP_SST_OPT_PSWD:-}" ]; then INNOEXTRA+=" --password=$WSREP_SST_OPT_PSWD" - else + elif [[ $usrst -eq 1 ]];then # Empty password, used for testing, debugging etc. INNOEXTRA+=" --password=" fi diff --git a/scripts/wsrep_sst_xtrabackup.sh b/scripts/wsrep_sst_xtrabackup.sh index 4acf2854301..b40be208be7 100644 --- a/scripts/wsrep_sst_xtrabackup.sh +++ b/scripts/wsrep_sst_xtrabackup.sh @@ -436,15 +436,17 @@ then if [ $WSREP_SST_OPT_BYPASS -eq 0 ] then + usrst=0 TMPDIR="${TMPDIR:-/tmp}" - if [ "$WSREP_SST_OPT_USER" != "(null)" ]; then + if [[ -n "${WSREP_SST_OPT_USER:-}" && "$WSREP_SST_OPT_USER" != "(null)" ]]; then INNOEXTRA+=" --user=$WSREP_SST_OPT_USER" + usrst=1 fi if [ -n "${WSREP_SST_OPT_PSWD:-}" ]; then INNOEXTRA+=" --password=$WSREP_SST_OPT_PSWD" - else + elif [[ $usrst -eq 1 ]];then # Empty password, used for testing, debugging etc. INNOEXTRA+=" --password=" fi -- cgit v1.2.1 From a53ac77c4265c2bf8ad4d07b924f6155e8b39458 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 10 Aug 2016 12:30:57 -0400 Subject: Cleanup: Remove dead code --- sql/sql_parse.cc | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 581a8212668..34c4474a9e2 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2641,31 +2641,10 @@ mysql_execute_command(THD *thd) lex->sql_command == SQLCOM_SELECT) && !wsrep_is_show_query(lex->sql_command)) { -#if DIRTY_HACK - /* Dirty hack for lp:1002714 - trying to recognize mysqldump connection - * and allow it to continue. Actuall mysqldump_magic_str may be longer - * and is obviously version dependent and may be issued by any client - * connection after which connection becomes non-replicating. */ - static char const mysqldump_magic_str[]= -"SELECT LOGFILE_GROUP_NAME, FILE_NAME, TOTAL_EXTENTS, INITIAL_SIZE, ENGINE, EXTRA FROM INFORMATION_SCHEMA.FILES WHERE FILE_TYPE = 'UNDO LOG' AND FILE_NAME IS NOT NULL"; - static const size_t mysqldump_magic_str_len= sizeof(mysqldump_magic_str) -1; - if (SQLCOM_SELECT != lex->sql_command || - thd->query_length() < mysqldump_magic_str_len || - strncmp(thd->query(), mysqldump_magic_str, mysqldump_magic_str_len)) - { -#endif /* DIRTY_HACK */ my_message(ER_UNKNOWN_COM_ERROR, "WSREP has not yet prepared node for application use", MYF(0)); goto error; -#if DIRTY_HACK - } - else - { - /* mysqldump connection, allow all further queries to pass */ - thd->variables.wsrep_on= FALSE; - } -#endif /* DIRTY_HACK */ } } #endif /* WITH_WSREP */ -- cgit v1.2.1 From df96eb5d049db22157ad0c01ac0e50c7beb79a88 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Thu, 18 Feb 2016 14:34:53 +0200 Subject: Refs: MW-248 - test cases from PXC for reproducing the issue - initial fix --- .../suite/galera/r/galera_as_slave_autoinc.result | 61 +++++++++++ .../galera/r/galera_binlog_stmt_autoinc.result | 74 +++++++++++++ .../suite/galera/t/galera_as_slave_autoinc.cnf | 1 + .../suite/galera/t/galera_as_slave_autoinc.test | 79 ++++++++++++++ .../suite/galera/t/galera_binlog_stmt_autoinc.test | 114 +++++++++++++++++++++ sql/sql_class.h | 1 + sql/sql_parse.cc | 23 ++--- 7 files changed, 341 insertions(+), 12 deletions(-) create mode 100644 mysql-test/suite/galera/r/galera_as_slave_autoinc.result create mode 100644 mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result create mode 100644 mysql-test/suite/galera/t/galera_as_slave_autoinc.cnf create mode 100644 mysql-test/suite/galera/t/galera_as_slave_autoinc.test create mode 100644 mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test diff --git a/mysql-test/suite/galera/r/galera_as_slave_autoinc.result b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result new file mode 100644 index 00000000000..b3a4cd77044 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result @@ -0,0 +1,61 @@ +START SLAVE USER='root'; +Warnings: +Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +SET SESSION binlog_format='STATEMENT'; +CREATE TABLE t1 ( +i int(11) NOT NULL AUTO_INCREMENT, +c char(32) DEFAULT 'dummy_text', +PRIMARY KEY (i) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +insert into t1(i) values(null); +select * from t1; +i c +1 dummy_text +insert into t1(i) values(null), (null), (null); +select * from t1; +i c +1 dummy_text +2 dummy_text +3 dummy_text +4 dummy_text +show variables like 'binlog_format'; +Variable_name Value +binlog_format STATEMENT +show variables like '%auto_increment%'; +Variable_name Value +auto_increment_increment 1 +auto_increment_offset 1 +wsrep_auto_increment_control ON +select * from t1; +i c +1 dummy_text +2 dummy_text +3 dummy_text +4 dummy_text +show variables like 'binlog_format'; +Variable_name Value +binlog_format ROW +show variables like '%auto_increment%'; +Variable_name Value +auto_increment_increment 2 +auto_increment_offset 1 +wsrep_auto_increment_control ON +select * from t1; +i c +1 dummy_text +2 dummy_text +3 dummy_text +4 dummy_text +show variables like 'binlog_format'; +Variable_name Value +binlog_format ROW +show variables like '%auto_increment%'; +Variable_name Value +auto_increment_increment 2 +auto_increment_offset 2 +wsrep_auto_increment_control ON +DROP TABLE t1; +STOP SLAVE; +RESET SLAVE ALL; +SET GLOBAL binlog_format='ROW'; +RESET MASTER; diff --git a/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result b/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result new file mode 100644 index 00000000000..6237ed7d5d8 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result @@ -0,0 +1,74 @@ +SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; +SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; +CREATE TABLE t1 ( +i int(11) NOT NULL AUTO_INCREMENT, +c char(32) DEFAULT 'dummy_text', +PRIMARY KEY (i) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +insert into t1(i) values(null); +select * from t1; +i c +1 dummy_text +insert into t1(i) values(null), (null), (null); +select * from t1; +i c +1 dummy_text +3 dummy_text +5 dummy_text +7 dummy_text +select * from t1; +i c +1 dummy_text +3 dummy_text +5 dummy_text +7 dummy_text +SET GLOBAL wsrep_forced_binlog_format='none'; +SET GLOBAL wsrep_forced_binlog_format='none'; +drop table t1; +SET SESSION binlog_format='STATEMENT'; +show variables like 'binlog_format'; +Variable_name Value +binlog_format STATEMENT +SET GLOBAL wsrep_auto_increment_control='OFF'; +SET SESSION auto_increment_increment = 3; +SET SESSION auto_increment_offset = 1; +CREATE TABLE t1 ( +i int(11) NOT NULL AUTO_INCREMENT, +c char(32) DEFAULT 'dummy_text', +PRIMARY KEY (i) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +insert into t1(i) values(null); +select * from t1; +i c +1 dummy_text +insert into t1(i) values(null), (null), (null); +select * from t1; +i c +1 dummy_text +4 dummy_text +7 dummy_text +10 dummy_text +select * from t1; +i c +1 dummy_text +4 dummy_text +7 dummy_text +10 dummy_text +SET GLOBAL wsrep_auto_increment_control='ON'; +SET SESSION binlog_format='ROW'; +show variables like 'binlog_format'; +Variable_name Value +binlog_format ROW +show variables like '%auto_increment%'; +Variable_name Value +auto_increment_increment 2 +auto_increment_offset 1 +wsrep_auto_increment_control ON +SET GLOBAL wsrep_auto_increment_control='OFF'; +show variables like '%auto_increment%'; +Variable_name Value +auto_increment_increment 3 +auto_increment_offset 1 +wsrep_auto_increment_control OFF +SET GLOBAL wsrep_auto_increment_control='ON'; +drop table t1; diff --git a/mysql-test/suite/galera/t/galera_as_slave_autoinc.cnf b/mysql-test/suite/galera/t/galera_as_slave_autoinc.cnf new file mode 100644 index 00000000000..9449ec9cf40 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_as_slave_autoinc.cnf @@ -0,0 +1 @@ +!include ../galera_2nodes_as_slave.cnf diff --git a/mysql-test/suite/galera/t/galera_as_slave_autoinc.test b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test new file mode 100644 index 00000000000..a1520a73905 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test @@ -0,0 +1,79 @@ +# +# Test Galera as a slave to a MySQL master +# +# The galera/galera_2node_slave.cnf describes the setup of the nodes +# + +--source include/have_innodb.inc +--source include/have_log_bin.inc + +# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc +--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 +--source include/galera_cluster.inc + +--connection node_2 +--disable_query_log +--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_1; +--enable_query_log +START SLAVE USER='root'; + +--connection node_1 + +## +## Verify the correct operation of the auto-increment when +## the binlog format set to the 'STATEMENT' on the master node: +## + +SET SESSION binlog_format='STATEMENT'; + +CREATE TABLE t1 ( + i int(11) NOT NULL AUTO_INCREMENT, + c char(32) DEFAULT 'dummy_text', + PRIMARY KEY (i) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +insert into t1(i) values(null); + +select * from t1; + +insert into t1(i) values(null), (null), (null); + +select * from t1; + +show variables like 'binlog_format'; +show variables like '%auto_increment%'; + +--connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; +--source include/wait_condition.inc + +--let $wait_condition = SELECT COUNT(*) = 4 FROM t1; +--source include/wait_condition.inc + +select * from t1; + +show variables like 'binlog_format'; +show variables like '%auto_increment%'; + +--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 + +select * from t1; + +show variables like 'binlog_format'; +show variables like '%auto_increment%'; + +--connection node_1 +DROP TABLE t1; + +--connection node_2 +--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; +--source include/wait_condition.inc + +STOP SLAVE; +RESET SLAVE ALL; + +--connection node_1 + +SET GLOBAL binlog_format='ROW'; + +RESET MASTER; diff --git a/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test b/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test new file mode 100644 index 00000000000..e35aa4642f5 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test @@ -0,0 +1,114 @@ +## +## Tests the auto-increment with binlog in STATEMENT mode. +## + +--source include/galera_cluster.inc +--source include/have_innodb.inc + +## +## Verify the correct operation of the auto-increment when the binlog +## format artificially set to the 'STATEMENT' (although this mode is +## not recommended in the current version): +## + +--connection node_2 +SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; + +--connection node_1 +SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; + +CREATE TABLE t1 ( + i int(11) NOT NULL AUTO_INCREMENT, + c char(32) DEFAULT 'dummy_text', + PRIMARY KEY (i) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +insert into t1(i) values(null); + +select * from t1; + +insert into t1(i) values(null), (null), (null); + +select * from t1; + +--connection node_2 + +select * from t1; + +SET GLOBAL wsrep_forced_binlog_format='none'; + +--connection node_1 + +SET GLOBAL wsrep_forced_binlog_format='none'; + +drop table t1; + +## +## Check the operation when the automatic control over the auto-increment +## settings is switched off, that is, when we use the increment step and +## the offset specified by the user. In the current session, the binlog +## format is set to 'STATEMENT'. It is important that the values of the +## auto-increment options does not changed on other node - it allows us +## to check the correct transmission of the auto-increment options to +## other nodes: +## + +--disable_warnings +SET SESSION binlog_format='STATEMENT'; +--enable_warnings + +show variables like 'binlog_format'; + +SET GLOBAL wsrep_auto_increment_control='OFF'; + +SET SESSION auto_increment_increment = 3; +SET SESSION auto_increment_offset = 1; + +CREATE TABLE t1 ( + i int(11) NOT NULL AUTO_INCREMENT, + c char(32) DEFAULT 'dummy_text', + PRIMARY KEY (i) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +insert into t1(i) values(null); + +select * from t1; + +insert into t1(i) values(null), (null), (null); + +select * from t1; + +--connection node_2 + +select * from t1; + +--connection node_1 + +## +## Verify the return to automatic calculation of the step +## and offset of the auto-increment: +## + +SET GLOBAL wsrep_auto_increment_control='ON'; + +SET SESSION binlog_format='ROW'; + +show variables like 'binlog_format'; +show variables like '%auto_increment%'; + +## +## Verify the recovery of original user-defined values after +## stopping the automatic control over auto-increment: +## + +SET GLOBAL wsrep_auto_increment_control='OFF'; + +show variables like '%auto_increment%'; + +## +## Restore original options and drop test table: +## + +SET GLOBAL wsrep_auto_increment_control='ON'; + +drop table t1; diff --git a/sql/sql_class.h b/sql/sql_class.h index bf3d043cc1a..ee637b3726d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -660,6 +660,7 @@ typedef struct system_variables uint wsrep_sync_wait; ulong wsrep_retry_autocommit; ulong wsrep_OSU_method; + ulong wsrep_auto_increment_control; #endif double long_query_time_double; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 34c4474a9e2..c8f3c5ae4bf 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6805,18 +6805,17 @@ void THD::reset_for_next_command() transactions. Appliers and replayers are either processing ROW events or get autoinc variable values from Query_log_event. */ - if (WSREP(thd) && thd->wsrep_exec_mode == LOCAL_STATE) { - if (wsrep_auto_increment_control) - { - if (thd->variables.auto_increment_offset != - global_system_variables.auto_increment_offset) - thd->variables.auto_increment_offset= - global_system_variables.auto_increment_offset; - if (thd->variables.auto_increment_increment != - global_system_variables.auto_increment_increment) - thd->variables.auto_increment_increment= - global_system_variables.auto_increment_increment; - } + if (WSREP(thd) && thd->wsrep_exec_mode == LOCAL_STATE && + !thd->slave_thread && wsrep_auto_increment_control) + { + if (thd->variables.auto_increment_offset != + global_system_variables.auto_increment_offset) + thd->variables.auto_increment_offset= + global_system_variables.auto_increment_offset; + if (thd->variables.auto_increment_increment != + global_system_variables.auto_increment_increment) + thd->variables.auto_increment_increment= + global_system_variables.auto_increment_increment; } #endif /* WITH_WSREP */ thd->query_start_used= 0; -- cgit v1.2.1 From 5edf55be631d86a92e1faaa9e0c2792be8f41c29 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Fri, 19 Feb 2016 11:48:09 +0200 Subject: Refs: MW-248 - fixed the test case and extended with autoinc modification is master side --- .../suite/galera/r/galera_as_slave_autoinc.result | 41 +++++++++++++++++----- .../suite/galera/t/galera_as_slave_autoinc.test | 17 +++++---- 2 files changed, 43 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_as_slave_autoinc.result b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result index b3a4cd77044..a8f5ec8e37e 100644 --- a/mysql-test/suite/galera/r/galera_as_slave_autoinc.result +++ b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result @@ -18,13 +18,29 @@ i c 2 dummy_text 3 dummy_text 4 dummy_text +SET SESSION auto_increment_increment=7; +insert into t1(i) values(null), (null), (null); +SET SESSION auto_increment_offset=5; +insert into t1(i) values(null), (null), (null); +select * from t1; +i c +1 dummy_text +2 dummy_text +3 dummy_text +4 dummy_text +8 dummy_text +15 dummy_text +22 dummy_text +33 dummy_text +40 dummy_text +47 dummy_text show variables like 'binlog_format'; Variable_name Value binlog_format STATEMENT show variables like '%auto_increment%'; Variable_name Value -auto_increment_increment 1 -auto_increment_offset 1 +auto_increment_increment 7 +auto_increment_offset 5 wsrep_auto_increment_control ON select * from t1; i c @@ -32,30 +48,37 @@ i c 2 dummy_text 3 dummy_text 4 dummy_text +8 dummy_text +15 dummy_text +22 dummy_text +33 dummy_text +40 dummy_text +47 dummy_text show variables like 'binlog_format'; Variable_name Value binlog_format ROW -show variables like '%auto_increment%'; +show variables like 'auto_increment_increment'; Variable_name Value auto_increment_increment 2 -auto_increment_offset 1 -wsrep_auto_increment_control ON select * from t1; i c 1 dummy_text 2 dummy_text 3 dummy_text 4 dummy_text +8 dummy_text +15 dummy_text +22 dummy_text +33 dummy_text +40 dummy_text +47 dummy_text show variables like 'binlog_format'; Variable_name Value binlog_format ROW -show variables like '%auto_increment%'; +show variables like 'auto_increment_increment'; Variable_name Value auto_increment_increment 2 -auto_increment_offset 2 -wsrep_auto_increment_control ON DROP TABLE t1; STOP SLAVE; RESET SLAVE ALL; -SET GLOBAL binlog_format='ROW'; RESET MASTER; diff --git a/mysql-test/suite/galera/t/galera_as_slave_autoinc.test b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test index a1520a73905..bf04b274ca7 100644 --- a/mysql-test/suite/galera/t/galera_as_slave_autoinc.test +++ b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test @@ -40,6 +40,14 @@ insert into t1(i) values(null), (null), (null); select * from t1; +SET SESSION auto_increment_increment=7; +insert into t1(i) values(null), (null), (null); + +SET SESSION auto_increment_offset=5; +insert into t1(i) values(null), (null), (null); + +select * from t1; + show variables like 'binlog_format'; show variables like '%auto_increment%'; @@ -47,20 +55,20 @@ show variables like '%auto_increment%'; --let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'; --source include/wait_condition.inc ---let $wait_condition = SELECT COUNT(*) = 4 FROM t1; +--let $wait_condition = SELECT COUNT(*) = 10 FROM t1; --source include/wait_condition.inc select * from t1; show variables like 'binlog_format'; -show variables like '%auto_increment%'; +show variables like 'auto_increment_increment'; --connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3 select * from t1; show variables like 'binlog_format'; -show variables like '%auto_increment%'; +show variables like 'auto_increment_increment'; --connection node_1 DROP TABLE t1; @@ -73,7 +81,4 @@ STOP SLAVE; RESET SLAVE ALL; --connection node_1 - -SET GLOBAL binlog_format='ROW'; - RESET MASTER; -- cgit v1.2.1 From ae0fec9c365a7a870b180ebbde6c68b01839fed4 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Fri, 19 Feb 2016 13:02:59 +0200 Subject: refs: MW-248 - removed the off topic mtr test --- .../galera/r/galera_binlog_stmt_autoinc.result | 74 ------------- .../suite/galera/t/galera_binlog_stmt_autoinc.test | 114 --------------------- 2 files changed, 188 deletions(-) delete mode 100644 mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result delete mode 100644 mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test diff --git a/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result b/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result deleted file mode 100644 index 6237ed7d5d8..00000000000 --- a/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result +++ /dev/null @@ -1,74 +0,0 @@ -SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; -SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; -CREATE TABLE t1 ( -i int(11) NOT NULL AUTO_INCREMENT, -c char(32) DEFAULT 'dummy_text', -PRIMARY KEY (i) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -insert into t1(i) values(null); -select * from t1; -i c -1 dummy_text -insert into t1(i) values(null), (null), (null); -select * from t1; -i c -1 dummy_text -3 dummy_text -5 dummy_text -7 dummy_text -select * from t1; -i c -1 dummy_text -3 dummy_text -5 dummy_text -7 dummy_text -SET GLOBAL wsrep_forced_binlog_format='none'; -SET GLOBAL wsrep_forced_binlog_format='none'; -drop table t1; -SET SESSION binlog_format='STATEMENT'; -show variables like 'binlog_format'; -Variable_name Value -binlog_format STATEMENT -SET GLOBAL wsrep_auto_increment_control='OFF'; -SET SESSION auto_increment_increment = 3; -SET SESSION auto_increment_offset = 1; -CREATE TABLE t1 ( -i int(11) NOT NULL AUTO_INCREMENT, -c char(32) DEFAULT 'dummy_text', -PRIMARY KEY (i) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; -insert into t1(i) values(null); -select * from t1; -i c -1 dummy_text -insert into t1(i) values(null), (null), (null); -select * from t1; -i c -1 dummy_text -4 dummy_text -7 dummy_text -10 dummy_text -select * from t1; -i c -1 dummy_text -4 dummy_text -7 dummy_text -10 dummy_text -SET GLOBAL wsrep_auto_increment_control='ON'; -SET SESSION binlog_format='ROW'; -show variables like 'binlog_format'; -Variable_name Value -binlog_format ROW -show variables like '%auto_increment%'; -Variable_name Value -auto_increment_increment 2 -auto_increment_offset 1 -wsrep_auto_increment_control ON -SET GLOBAL wsrep_auto_increment_control='OFF'; -show variables like '%auto_increment%'; -Variable_name Value -auto_increment_increment 3 -auto_increment_offset 1 -wsrep_auto_increment_control OFF -SET GLOBAL wsrep_auto_increment_control='ON'; -drop table t1; diff --git a/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test b/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test deleted file mode 100644 index e35aa4642f5..00000000000 --- a/mysql-test/suite/galera/t/galera_binlog_stmt_autoinc.test +++ /dev/null @@ -1,114 +0,0 @@ -## -## Tests the auto-increment with binlog in STATEMENT mode. -## - ---source include/galera_cluster.inc ---source include/have_innodb.inc - -## -## Verify the correct operation of the auto-increment when the binlog -## format artificially set to the 'STATEMENT' (although this mode is -## not recommended in the current version): -## - ---connection node_2 -SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; - ---connection node_1 -SET GLOBAL wsrep_forced_binlog_format='STATEMENT'; - -CREATE TABLE t1 ( - i int(11) NOT NULL AUTO_INCREMENT, - c char(32) DEFAULT 'dummy_text', - PRIMARY KEY (i) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -insert into t1(i) values(null); - -select * from t1; - -insert into t1(i) values(null), (null), (null); - -select * from t1; - ---connection node_2 - -select * from t1; - -SET GLOBAL wsrep_forced_binlog_format='none'; - ---connection node_1 - -SET GLOBAL wsrep_forced_binlog_format='none'; - -drop table t1; - -## -## Check the operation when the automatic control over the auto-increment -## settings is switched off, that is, when we use the increment step and -## the offset specified by the user. In the current session, the binlog -## format is set to 'STATEMENT'. It is important that the values of the -## auto-increment options does not changed on other node - it allows us -## to check the correct transmission of the auto-increment options to -## other nodes: -## - ---disable_warnings -SET SESSION binlog_format='STATEMENT'; ---enable_warnings - -show variables like 'binlog_format'; - -SET GLOBAL wsrep_auto_increment_control='OFF'; - -SET SESSION auto_increment_increment = 3; -SET SESSION auto_increment_offset = 1; - -CREATE TABLE t1 ( - i int(11) NOT NULL AUTO_INCREMENT, - c char(32) DEFAULT 'dummy_text', - PRIMARY KEY (i) -) ENGINE=InnoDB DEFAULT CHARSET=latin1; - -insert into t1(i) values(null); - -select * from t1; - -insert into t1(i) values(null), (null), (null); - -select * from t1; - ---connection node_2 - -select * from t1; - ---connection node_1 - -## -## Verify the return to automatic calculation of the step -## and offset of the auto-increment: -## - -SET GLOBAL wsrep_auto_increment_control='ON'; - -SET SESSION binlog_format='ROW'; - -show variables like 'binlog_format'; -show variables like '%auto_increment%'; - -## -## Verify the recovery of original user-defined values after -## stopping the automatic control over auto-increment: -## - -SET GLOBAL wsrep_auto_increment_control='OFF'; - -show variables like '%auto_increment%'; - -## -## Restore original options and drop test table: -## - -SET GLOBAL wsrep_auto_increment_control='ON'; - -drop table t1; -- cgit v1.2.1 From da9650a36a9e1d9b78a55d6f40a37b984d03bce4 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Fri, 19 Feb 2016 13:08:22 +0200 Subject: Refs: MW-248 - some more code cleanup --- sql/sql_parse.cc | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c8f3c5ae4bf..3c8254ac651 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6803,19 +6803,18 @@ void THD::reset_for_next_command() /* Autoinc variables should be adjusted only for locally executed transactions. Appliers and replayers are either processing ROW - events or get autoinc variable values from Query_log_event. + events or get autoinc variable values from Query_log_event and + mysql slave may be processing STATEMENT format events, but he should + use autoinc values passed in binlog events, not the values forced by + the cluster. */ if (WSREP(thd) && thd->wsrep_exec_mode == LOCAL_STATE && !thd->slave_thread && wsrep_auto_increment_control) { - if (thd->variables.auto_increment_offset != - global_system_variables.auto_increment_offset) - thd->variables.auto_increment_offset= - global_system_variables.auto_increment_offset; - if (thd->variables.auto_increment_increment != - global_system_variables.auto_increment_increment) - thd->variables.auto_increment_increment= - global_system_variables.auto_increment_increment; + thd->variables.auto_increment_offset= + global_system_variables.auto_increment_offset; + thd->variables.auto_increment_increment= + global_system_variables.auto_increment_increment; } #endif /* WITH_WSREP */ thd->query_start_used= 0; -- cgit v1.2.1 From 4290117b79bccec71b4a92c99beb3e15668627e7 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Mon, 29 Feb 2016 15:24:06 +0200 Subject: Refs MW-252 - enveloped FTWRL processing with wsrep desync/resync calls. This way FTWRL processing node will not cause flow control to kick in - donor servicing thread is unfortunate exception, we must let him to pause provider as part of FTWRL phase, but not desync/resync as this is done as part of donor control on higher level --- sql/lock.cc | 41 ++++++++++++++++++++++++++++++++++++++++- sql/sql_class.cc | 3 ++- sql/sql_class.h | 1 + sql/wsrep_sst.cc | 1 + 4 files changed, 44 insertions(+), 2 deletions(-) diff --git a/sql/lock.cc b/sql/lock.cc index 07ea0b1f6dc..f724606a46d 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1071,6 +1071,16 @@ void Global_read_lock::unlock_global_read_lock(THD *thd) #ifdef WITH_WSREP wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED; wsrep->resume(wsrep); + if (!wsrep_desync && !thd->wsrep_donor) + { + int ret = wsrep->resync(wsrep); + if (ret != WSREP_OK) + { + WSREP_WARN("resync failed %d for FTWRL: db: %s, query: %s", ret, + (thd->db ? thd->db : "(null)"), thd->query()); + DBUG_VOID_RETURN; + } + } #endif /* WITH_WSREP */ } thd->mdl_context.release_lock(m_mdl_global_shared_lock); @@ -1106,7 +1116,7 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd) */ #ifdef WITH_WSREP - if (m_mdl_blocks_commits_lock) + if (WSREP(thd) && m_mdl_blocks_commits_lock) { WSREP_DEBUG("GRL was in block commit mode when entering " "make_global_read_lock_block_commit"); @@ -1131,6 +1141,35 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd) m_state= GRL_ACQUIRED_AND_BLOCKS_COMMIT; #ifdef WITH_WSREP + /* Native threads should bail out before wsrep oprations to follow. + Donor servicing thread is an exception, it should pause provider but not desync, + as it is already desynced in donor state + */ + if (!WSREP(thd) && !thd->wsrep_donor) + { + DBUG_RETURN(FALSE); + } + + /* if already desynced or donor, avoid double desyncing */ + if (wsrep_desync || thd->wsrep_donor) + { + WSREP_DEBUG("desync set upfont, skipping implicit desync for FTWRL: %d", + wsrep_desync); + } + else + { + int rcode; + WSREP_DEBUG("running implicit desync for node"); + rcode = wsrep->desync(wsrep); + if (rcode != WSREP_OK) + { + WSREP_WARN("FTWRL desync failed %d for schema: %s, query: %s", + rcode, (thd->db ? thd->db : "(null)"), thd->query()); + my_message(ER_LOCK_DEADLOCK, "wsrep desync failed for FTWRL", MYF(0)); + DBUG_RETURN(TRUE); + } + } + long long ret = wsrep->pause(wsrep); if (ret >= 0) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 4873586aba5..6495069971b 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1088,7 +1088,8 @@ THD::THD() wsrep_po_in_trans(FALSE), wsrep_apply_format(0), wsrep_apply_toi(false), - wsrep_skip_append_keys(false) + wsrep_skip_append_keys(false), + wsrep_donor(false) #endif { ulong tmp; diff --git a/sql/sql_class.h b/sql/sql_class.h index ee637b3726d..6181333e1b2 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3880,6 +3880,7 @@ public: bool wsrep_apply_toi; /* applier processing in TOI */ bool wsrep_skip_append_keys; wsrep_gtid_t wsrep_sync_wait_gtid; + my_bool wsrep_donor; /* true if thread is SST donor servicing */ ulong wsrep_affected_rows; #endif /* WITH_WSREP */ }; diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index b697a557476..d13148b3d48 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -979,6 +979,7 @@ static void* sst_donor_thread (void* a) wsp::thd thd(FALSE); // we turn off wsrep_on for this THD so that it can // operate with wsrep_ready == OFF + thd.ptr->wsrep_donor = true; wsp::process proc(arg->cmd, "r", arg->env); err= proc.error(); -- cgit v1.2.1 From b159b666e5191b22618e631e9ed48159be541f7e Mon Sep 17 00:00:00 2001 From: sjaakola Date: Mon, 29 Feb 2016 16:36:17 +0200 Subject: Refs MW-252 - Calling FTWRL two times in a row caused desync error, this is fixed by making sub-sequent FTWRL calls bail out before wsrep operations --- sql/lock.cc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/sql/lock.cc b/sql/lock.cc index f724606a46d..04529308266 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1120,11 +1120,7 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd) { WSREP_DEBUG("GRL was in block commit mode when entering " "make_global_read_lock_block_commit"); - thd->mdl_context.release_lock(m_mdl_blocks_commits_lock); - m_mdl_blocks_commits_lock= NULL; - wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED; - wsrep->resume(wsrep); - m_state= GRL_ACQUIRED; + DBUG_RETURN(FALSE); } #endif /* WITH_WSREP */ -- cgit v1.2.1 From 8ec50ebda3c558e34f263c08b2661929f0bdad2d Mon Sep 17 00:00:00 2001 From: sjaakola Date: Mon, 29 Feb 2016 22:54:58 +0200 Subject: Refs MW-252 - reverted from tracking donor servicing thread. With xtrabackup SST, xtrabackup thread will call FTWRL and node is desynced upfront - Skipping desync in FTWRL if node is operating as donor --- sql/lock.cc | 6 +++--- sql/sql_class.cc | 3 +-- sql/sql_class.h | 1 - sql/wsrep_mysqld.cc | 5 +++++ sql/wsrep_mysqld.h | 1 + sql/wsrep_sst.cc | 1 - 6 files changed, 10 insertions(+), 7 deletions(-) diff --git a/sql/lock.cc b/sql/lock.cc index 04529308266..8c426deda17 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1071,7 +1071,7 @@ void Global_read_lock::unlock_global_read_lock(THD *thd) #ifdef WITH_WSREP wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED; wsrep->resume(wsrep); - if (!wsrep_desync && !thd->wsrep_donor) + if (!wsrep_desync && !wsrep_node_is_donor()) { int ret = wsrep->resync(wsrep); if (ret != WSREP_OK) @@ -1141,13 +1141,13 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd) Donor servicing thread is an exception, it should pause provider but not desync, as it is already desynced in donor state */ - if (!WSREP(thd) && !thd->wsrep_donor) + if (!WSREP(thd) && !wsrep_node_is_donor()) { DBUG_RETURN(FALSE); } /* if already desynced or donor, avoid double desyncing */ - if (wsrep_desync || thd->wsrep_donor) + if (wsrep_desync || wsrep_node_is_donor()) { WSREP_DEBUG("desync set upfont, skipping implicit desync for FTWRL: %d", wsrep_desync); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 6495069971b..4873586aba5 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1088,8 +1088,7 @@ THD::THD() wsrep_po_in_trans(FALSE), wsrep_apply_format(0), wsrep_apply_toi(false), - wsrep_skip_append_keys(false), - wsrep_donor(false) + wsrep_skip_append_keys(false) #endif { ulong tmp; diff --git a/sql/sql_class.h b/sql/sql_class.h index 6181333e1b2..ee637b3726d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3880,7 +3880,6 @@ public: bool wsrep_apply_toi; /* applier processing in TOI */ bool wsrep_skip_append_keys; wsrep_gtid_t wsrep_sync_wait_gtid; - my_bool wsrep_donor; /* true if thread is SST donor servicing */ ulong wsrep_affected_rows; #endif /* WITH_WSREP */ }; diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 0dd5c4dba14..734406080e9 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -1587,3 +1587,8 @@ wsrep_grant_mdl_exception(MDL_context *requestor_ctx, } return ret; } + +bool wsrep_node_is_donor() +{ + return (WSREP_ON) ? (local_status.get() == 2) : false; +} diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index 242227fd2a0..514c1f32cff 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -330,4 +330,5 @@ int wsrep_alter_event_query(THD *thd, uchar** buf, size_t* buf_len); void wsrep_init_sidno(const wsrep_uuid_t&); #endif /* GTID_SUPPORT */ +bool wsrep_node_is_donor(); #endif /* WSREP_MYSQLD_H */ diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index d13148b3d48..b697a557476 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -979,7 +979,6 @@ static void* sst_donor_thread (void* a) wsp::thd thd(FALSE); // we turn off wsrep_on for this THD so that it can // operate with wsrep_ready == OFF - thd.ptr->wsrep_donor = true; wsp::process proc(arg->cmd, "r", arg->env); err= proc.error(); -- cgit v1.2.1 From a03c45fa980ab170cd509d10923916aa9d9c4f86 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Tue, 1 Mar 2016 10:56:21 +0200 Subject: Refs: MW-252 - if wsrep_on==OFF, unlock tables would resume provider even though it was not passed in FTWRL processing. This is fixed in this patch. --- sql/lock.cc | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/sql/lock.cc b/sql/lock.cc index 8c426deda17..fea28753739 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1069,16 +1069,19 @@ void Global_read_lock::unlock_global_read_lock(THD *thd) thd->mdl_context.release_lock(m_mdl_blocks_commits_lock); m_mdl_blocks_commits_lock= NULL; #ifdef WITH_WSREP - wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED; - wsrep->resume(wsrep); - if (!wsrep_desync && !wsrep_node_is_donor()) + if (WSREP(thd) || wsrep_node_is_donor()) { - int ret = wsrep->resync(wsrep); - if (ret != WSREP_OK) + wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED; + wsrep->resume(wsrep); + if (!wsrep_desync && !wsrep_node_is_donor()) { - WSREP_WARN("resync failed %d for FTWRL: db: %s, query: %s", ret, - (thd->db ? thd->db : "(null)"), thd->query()); - DBUG_VOID_RETURN; + int ret = wsrep->resync(wsrep); + if (ret != WSREP_OK) + { + WSREP_WARN("resync failed %d for FTWRL: db: %s, query: %s", ret, + (thd->db ? thd->db : "(null)"), thd->query()); + DBUG_VOID_RETURN; + } } } #endif /* WITH_WSREP */ -- cgit v1.2.1 From fe6ebb657ea02fcb5993ca5d503161056c5a5b86 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Tue, 1 Mar 2016 08:32:06 -0800 Subject: Refs: MW-252 MTR tests for FTWRL and desync --- mysql-test/suite/galera/r/MW-252.result | 7 ++++ .../suite/galera/r/galera_as_slave_nonprim.result | 2 +- .../suite/galera/r/galera_gcs_fc_limit.result | 2 +- .../suite/galera/r/galera_var_desync_on.result | 2 ++ mysql-test/suite/galera/t/MW-252.test | 41 ++++++++++++++++++++++ .../suite/galera/t/galera_as_slave_nonprim.test | 2 +- mysql-test/suite/galera/t/galera_gcs_fc_limit.test | 2 +- .../suite/galera/t/galera_var_desync_on.test | 4 +++ 8 files changed, 58 insertions(+), 4 deletions(-) create mode 100644 mysql-test/suite/galera/r/MW-252.result create mode 100644 mysql-test/suite/galera/t/MW-252.test diff --git a/mysql-test/suite/galera/r/MW-252.result b/mysql-test/suite/galera/r/MW-252.result new file mode 100644 index 00000000000..c422edcb82a --- /dev/null +++ b/mysql-test/suite/galera/r/MW-252.result @@ -0,0 +1,7 @@ +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +FLUSH TABLES WITH READ LOCK; +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +VARIABLE_VALUE = 2 +1 +UNLOCK TABLES; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_as_slave_nonprim.result b/mysql-test/suite/galera/r/galera_as_slave_nonprim.result index fd20f8db2f9..365ea31f292 100644 --- a/mysql-test/suite/galera/r/galera_as_slave_nonprim.result +++ b/mysql-test/suite/galera/r/galera_as_slave_nonprim.result @@ -12,7 +12,7 @@ STOP SLAVE; RESET SLAVE ALL; CALL mtr.add_suppression("Slave SQL: Error 'Unknown command' on query"); CALL mtr.add_suppression("Slave: Unknown command Error_code: 1047"); -CALL mtr.add_suppression("Send action {\\(nil\\), 328, TORDERED} returned -107 \\(Transport endpoint is not connected\\)"); +CALL mtr.add_suppression("Transport endpoint is not connected"); CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be completed, 'Deadlock found when trying to get lock; try restarting transaction', Error_code: 1213"); CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047"); RESET MASTER; diff --git a/mysql-test/suite/galera/r/galera_gcs_fc_limit.result b/mysql-test/suite/galera/r/galera_gcs_fc_limit.result index ad60ead4b8a..9463b5f8eef 100644 --- a/mysql-test/suite/galera/r/galera_gcs_fc_limit.result +++ b/mysql-test/suite/galera/r/galera_gcs_fc_limit.result @@ -4,7 +4,7 @@ SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1'; -FLUSH TABLES WITH READ LOCK; +LOCK TABLE t1 WRITE; INSERT INTO t1 VALUES (2); INSERT INTO t1 VALUES (3); INSERT INTO t1 VALUES (4); diff --git a/mysql-test/suite/galera/r/galera_var_desync_on.result b/mysql-test/suite/galera/r/galera_var_desync_on.result index 0b5f34688b7..f286ae72308 100644 --- a/mysql-test/suite/galera/r/galera_var_desync_on.result +++ b/mysql-test/suite/galera/r/galera_var_desync_on.result @@ -26,4 +26,6 @@ INSERT INTO t1 VALUES (11); SELECT COUNT(*) = 11 FROM t1; COUNT(*) = 11 1 +CALL mtr.add_suppression("Protocol violation"); DROP TABLE t1; +CALL mtr.add_suppression("Protocol violation"); diff --git a/mysql-test/suite/galera/t/MW-252.test b/mysql-test/suite/galera/t/MW-252.test new file mode 100644 index 00000000000..3137aea7011 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-252.test @@ -0,0 +1,41 @@ +# +# MW-252 - Check that FTWRL causes the node to become desynced +# and not subject to flow control +# + +--source include/galera_cluster.inc + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; + +FLUSH TABLES WITH READ LOCK; + +# Node #1 is now desynced +--let $wait_condition = SELECT VARIABLE_VALUE = 'Donor/Desynced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment' +--source include/wait_condition.inc + +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; + +# Node #2 can issue updates without flow control kicking in +--connection node_2 + +--let $count = 100 +--disable_query_log +while ($count) +{ + INSERT INTO t1 VALUES (1); + --dec $count +} +--enable_query_log + +# Restore cluster +--connection node_1 +UNLOCK TABLES; + +--let $wait_condition = SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment' +--source include/wait_condition.inc + +--let $wait_condition = SELECT COUNT(*) = 100 FROM t1 +--source include/wait_condition.inc + +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test index 5914e52d851..46a93458271 100644 --- a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test +++ b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test @@ -87,7 +87,7 @@ RESET SLAVE ALL; CALL mtr.add_suppression("Slave SQL: Error 'Unknown command' on query"); CALL mtr.add_suppression("Slave: Unknown command Error_code: 1047"); -CALL mtr.add_suppression("Send action {\\(nil\\), 328, TORDERED} returned -107 \\(Transport endpoint is not connected\\)"); +CALL mtr.add_suppression("Transport endpoint is not connected"); CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be completed, 'Deadlock found when trying to get lock; try restarting transaction', Error_code: 1213"); CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047"); diff --git a/mysql-test/suite/galera/t/galera_gcs_fc_limit.test b/mysql-test/suite/galera/t/galera_gcs_fc_limit.test index fd77ec0a0eb..721d84ecb05 100644 --- a/mysql-test/suite/galera/t/galera_gcs_fc_limit.test +++ b/mysql-test/suite/galera/t/galera_gcs_fc_limit.test @@ -16,7 +16,7 @@ SELECT COUNT(*) = 1 FROM t1; SET GLOBAL wsrep_provider_options = 'gcs.fc_limit=1'; # Block the slave applier thread -FLUSH TABLES WITH READ LOCK; +LOCK TABLE t1 WRITE; --connection node_1 diff --git a/mysql-test/suite/galera/t/galera_var_desync_on.test b/mysql-test/suite/galera/t/galera_var_desync_on.test index fb0fb9f762a..06c5d30a769 100644 --- a/mysql-test/suite/galera/t/galera_var_desync_on.test +++ b/mysql-test/suite/galera/t/galera_var_desync_on.test @@ -55,4 +55,8 @@ INSERT INTO t1 VALUES (11); # Replication continues normally SELECT COUNT(*) = 11 FROM t1; +CALL mtr.add_suppression("Protocol violation"); DROP TABLE t1; + +--connection node_1 +CALL mtr.add_suppression("Protocol violation"); -- cgit v1.2.1 From 65cf1d354a5089ed3df328db69c79bd87b891278 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Thu, 11 Aug 2016 22:28:57 -0400 Subject: Refs: MW-252 Test fix post-merge --- mysql-test/suite/galera/t/MW-252.test | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/suite/galera/t/MW-252.test b/mysql-test/suite/galera/t/MW-252.test index 3137aea7011..dfb82e8070a 100644 --- a/mysql-test/suite/galera/t/MW-252.test +++ b/mysql-test/suite/galera/t/MW-252.test @@ -4,6 +4,7 @@ # --source include/galera_cluster.inc +--source include/have_innodb.inc --connection node_1 CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; -- cgit v1.2.1 From 8b998a48ccee617b7eb1232cf2793d6da67ccead Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Fri, 12 Aug 2016 12:56:41 -0400 Subject: Update galera version-dependent tests. --- mysql-test/suite/galera/r/galera_defaults.result | 5 +++-- mysql-test/suite/galera/t/galera_defaults.test | 2 +- mysql-test/suite/wsrep/r/variables.result | 2 ++ mysql-test/suite/wsrep/t/variables.test | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result index 3b89ccb7dbe..a4e6d31d406 100644 --- a/mysql-test/suite/galera/r/galera_defaults.result +++ b/mysql-test/suite/galera/r/galera_defaults.result @@ -47,12 +47,12 @@ WSREP_SST_DONOR WSREP_SST_DONOR_REJECTS_QUERIES OFF WSREP_SST_METHOD rsync WSREP_SYNC_WAIT 7 -; ; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; ; gcache.keep_pages_size = 0; gcache.mem_size = 0; ; gcache.page_size = 128M; gcache.size = 10M; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; ; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; ; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT90S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; +; ; ; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; ; gcache.keep_pages_size = 0; gcache.mem_size = 0; ; gcache.page_size = 128M; gcache.size = 10M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; ; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; ; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT90S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992; SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME LIKE 'wsrep_%' AND VARIABLE_NAME != 'wsrep_debug_sync_waiters'; COUNT(*) -57 +58 SELECT VARIABLE_NAME FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME LIKE 'wsrep_%' AND VARIABLE_NAME != 'wsrep_debug_sync_waiters' @@ -73,6 +73,7 @@ WSREP_COMMIT_OOOE WSREP_COMMIT_OOOL WSREP_COMMIT_WINDOW WSREP_CONNECTED +WSREP_DESYNC_COUNT WSREP_EVS_DELAYED WSREP_EVS_EVICT_LIST WSREP_EVS_REPL_LATENCY diff --git a/mysql-test/suite/galera/t/galera_defaults.test b/mysql-test/suite/galera/t/galera_defaults.test index 9897d0d55dc..87eddedafe9 100644 --- a/mysql-test/suite/galera/t/galera_defaults.test +++ b/mysql-test/suite/galera/t/galera_defaults.test @@ -13,7 +13,7 @@ # Make sure that the test is operating on the right version of galera library. --disable_query_log ---let $galera_version=3.9 +--let $galera_version=25.3.17 source ../wsrep/include/check_galera_version.inc; --enable_query_log diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result index 9c047177ca8..ab2692bed1e 100644 --- a/mysql-test/suite/wsrep/r/variables.result +++ b/mysql-test/suite/wsrep/r/variables.result @@ -60,6 +60,7 @@ wsrep_cert_index_size # wsrep_causal_reads # wsrep_cert_interval # wsrep_incoming_addresses # +wsrep_debug_sync_waiters # wsrep_cluster_conf_id # wsrep_cluster_size # wsrep_cluster_state_uuid # @@ -115,6 +116,7 @@ wsrep_cert_index_size # wsrep_causal_reads # wsrep_cert_interval # wsrep_incoming_addresses # +wsrep_debug_sync_waiters # wsrep_cluster_conf_id # wsrep_cluster_size # wsrep_cluster_state_uuid # diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test index 6922e2d1444..19e900b3f78 100644 --- a/mysql-test/suite/wsrep/t/variables.test +++ b/mysql-test/suite/wsrep/t/variables.test @@ -28,7 +28,7 @@ CALL mtr.add_suppression("WSREP: Could not open saved state file for reading.*") --disable_query_log eval SET GLOBAL wsrep_provider= '$WSREP_PROVIDER'; ---let $galera_version=3.9 +--let $galera_version=25.3.17 source include/check_galera_version.inc; --enable_query_log -- cgit v1.2.1 From f3444c4a436ecab23b535bb2ef24e02298aa7ae9 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Fri, 4 Mar 2016 14:20:58 +0200 Subject: Bump WSREP_PATCH_VERSION to 14 --- cmake/wsrep.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake index 61e41778166..6c256471115 100644 --- a/cmake/wsrep.cmake +++ b/cmake/wsrep.cmake @@ -18,7 +18,7 @@ # so WSREP_VERSION is produced regardless # Set the patch version -SET(WSREP_PATCH_VERSION "13") +SET(WSREP_PATCH_VERSION "14") # MariaDB addition: Revision number of the last revision merged from # codership branch visible in @@visible_comment. -- cgit v1.2.1 From d246630d739717a58d9b1c33705ce7f1d8504b43 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Mon, 7 Mar 2016 23:34:03 +0200 Subject: Refs MW-252 - changed the condition when to do implicit desync as part of FTWRL to cover only case when node is PC and synced. Donor node has alreaydy desycned and other states mean that node is not in cluster, so desync is not even possible. --- sql/lock.cc | 9 ++++++--- sql/wsrep_mysqld.cc | 4 ++++ sql/wsrep_mysqld.h | 1 + 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/sql/lock.cc b/sql/lock.cc index fea28753739..7153b85d740 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1073,7 +1073,8 @@ void Global_read_lock::unlock_global_read_lock(THD *thd) { wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED; wsrep->resume(wsrep); - if (!wsrep_desync && !wsrep_node_is_donor()) + /* resync here only if we did implicit desync earlier */ + if (!wsrep_desync && wsrep_node_is_synced()) { int ret = wsrep->resync(wsrep); if (ret != WSREP_OK) @@ -1149,8 +1150,10 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd) DBUG_RETURN(FALSE); } - /* if already desynced or donor, avoid double desyncing */ - if (wsrep_desync || wsrep_node_is_donor()) + /* if already desynced or donor, avoid double desyncing + if not in PC and synced, desyncing is not possible either + */ + if (wsrep_desync || !wsrep_node_is_synced()) { WSREP_DEBUG("desync set upfont, skipping implicit desync for FTWRL: %d", wsrep_desync); diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 734406080e9..e392aef32eb 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -1592,3 +1592,7 @@ bool wsrep_node_is_donor() { return (WSREP_ON) ? (local_status.get() == 2) : false; } +bool wsrep_node_is_synced() +{ + return (WSREP_ON) ? (local_status.get() == 4) : false; +} diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index 514c1f32cff..f499443ca2c 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -331,4 +331,5 @@ void wsrep_init_sidno(const wsrep_uuid_t&); #endif /* GTID_SUPPORT */ bool wsrep_node_is_donor(); +bool wsrep_node_is_synced(); #endif /* WSREP_MYSQLD_H */ -- cgit v1.2.1 From 4e4ad17163709a50314cbf72d72cec2596467513 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Tue, 8 Mar 2016 18:10:21 +0200 Subject: Refs MW-255 - popping PS reprepare observer before BF aborted PS replaying begins dangling observer will cause failure in open_table() ater on - test case for this anomaly --- .../galera/r/galera_transaction_replay.result | 27 +++++++++++ .../suite/galera/t/galera_transaction_replay.test | 53 ++++++++++++++++++++++ sql/wsrep_thd.cc | 9 ++++ 3 files changed, 89 insertions(+) diff --git a/mysql-test/suite/galera/r/galera_transaction_replay.result b/mysql-test/suite/galera/r/galera_transaction_replay.result index bfafa506fe6..eec9ba03ef5 100644 --- a/mysql-test/suite/galera/r/galera_transaction_replay.result +++ b/mysql-test/suite/galera/r/galera_transaction_replay.result @@ -30,3 +30,30 @@ SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c'; COUNT(*) = 1 1 DROP TABLE t1; +CREATE TABLE t1 (i int primary key, j int) ENGINE=INNODB; +INSERT INTO t1 VALUES (1, 0), (3, 0); +SELECT * FROM t1; +i j +1 0 +3 0 +PREPARE stmt1 FROM "UPDATE t1 SET j = 1 where i > 0"; +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +EXECUTE stmt1;; +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; +INSERT INTO t1 VALUES(2,2); +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; +SELECT * FROM t1; +i j +1 1 +2 2 +3 1 +SELECT * FROM t1; +i j +1 1 +2 2 +3 1 +DEALLOCATE PREPARE stmt1; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_transaction_replay.test b/mysql-test/suite/galera/t/galera_transaction_replay.test index bd5288a51c6..8e9bfa4c449 100644 --- a/mysql-test/suite/galera/t/galera_transaction_replay.test +++ b/mysql-test/suite/galera/t/galera_transaction_replay.test @@ -69,3 +69,56 @@ SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b'; SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c'; DROP TABLE t1; + +#echo "# test for PS replaying" + +# +# test replaying of prepared statements +# +--connection node_1 +CREATE TABLE t1 (i int primary key, j int) ENGINE=INNODB; +INSERT INTO t1 VALUES (1, 0), (3, 0); +SELECT * FROM t1; + +PREPARE stmt1 FROM "UPDATE t1 SET j = 1 where i > 0"; + +# block the commit of PS +--connection node_1a +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_set_sync_point.inc + +--connection node_1 +--send EXECUTE stmt1; + +# Wait until commit is blocked +--connection node_1a +SET SESSION wsrep_sync_wait = 0; +--source include/galera_wait_sync_point.inc + +# Issue a conflicting update on node_2 +--connection node_2 +#UPDATE t1 SET j=2; +INSERT INTO t1 VALUES(2,2); + + +# Wait until applying begins in node_1 +--connection node_1a +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Write_rows_log_event::write_row%'; +--source include/wait_condition.inc + +# Unblock the PS commit +--connection node_1a +--source include/galera_clear_sync_point.inc +--source include/galera_signal_sync_point.inc + +# Commit succeeds +--connection node_1 +--reap +SELECT * FROM t1; + +--connection node_2 +SELECT * FROM t1; + +--connection node_1 +DEALLOCATE PREPARE stmt1; +DROP TABLE t1; diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 9c2fa4ba856..09ffdbd54f5 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -185,6 +185,7 @@ static void wsrep_return_from_bf_mode(THD *thd, struct wsrep_thd_shadow* shadow) void wsrep_replay_transaction(THD *thd) { + DBUG_ENTER("wsrep_replay_transaction"); /* checking if BF trx must be replayed */ if (thd->wsrep_conflict_state== MUST_REPLAY) { DBUG_ASSERT(wsrep_thd_trx_seqno(thd)); @@ -193,6 +194,13 @@ void wsrep_replay_transaction(THD *thd) { WSREP_ERROR("replay issue, thd has reported status already"); } + + /* + PS reprepare observer should have been removed already. + open_table() will fail if we have dangling observer here. + */ + DBUG_ASSERT(thd->m_reprepare_observer == NULL); + thd->get_stmt_da()->reset_diagnostics_area(); thd->wsrep_conflict_state= REPLAYING; @@ -299,6 +307,7 @@ void wsrep_replay_transaction(THD *thd) mysql_mutex_unlock(&LOCK_wsrep_replaying); } } + DBUG_VOID_RETURN; } static void wsrep_replication_process(THD *thd) -- cgit v1.2.1 From b758e9238aac8d8b167621a9787b6d2dd92cb082 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Fri, 12 Aug 2016 13:42:12 -0400 Subject: Fix galera_transaction_replay.test. --- mysql-test/suite/galera/t/galera_transaction_replay.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/galera/t/galera_transaction_replay.test b/mysql-test/suite/galera/t/galera_transaction_replay.test index 8e9bfa4c449..29870829ba3 100644 --- a/mysql-test/suite/galera/t/galera_transaction_replay.test +++ b/mysql-test/suite/galera/t/galera_transaction_replay.test @@ -40,7 +40,7 @@ UPDATE t1 SET f2 = 'c' WHERE f1 = 2; # Wait for both transactions to be blocked --connection node_1a ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'System lock'; +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event::find_row%'; --source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT'; -- cgit v1.2.1 From a00f4b29b5a50c46641fa522c2b3235fe72ae697 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Tue, 15 Mar 2016 03:38:31 -0700 Subject: Refs codership/galera#105 An MTR test for ist.recv_bind --- .../suite/galera/r/galera_ist_recv_bind.result | 13 ++++++ mysql-test/suite/galera/t/galera_ist_recv_bind.cnf | 8 ++++ .../suite/galera/t/galera_ist_recv_bind.test | 53 ++++++++++++++++++++++ 3 files changed, 74 insertions(+) create mode 100644 mysql-test/suite/galera/r/galera_ist_recv_bind.result create mode 100644 mysql-test/suite/galera/t/galera_ist_recv_bind.cnf create mode 100644 mysql-test/suite/galera/t/galera_ist_recv_bind.test diff --git a/mysql-test/suite/galera/r/galera_ist_recv_bind.result b/mysql-test/suite/galera/r/galera_ist_recv_bind.result new file mode 100644 index 00000000000..de4e07fbe41 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_ist_recv_bind.result @@ -0,0 +1,13 @@ +SELECT @@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%'; +@@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%' +1 +SELECT @@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%'; +@@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%' +1 +SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1'; +SET SESSION wsrep_on = OFF; +SET SESSION wsrep_on = ON; +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); +SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0'; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_ist_recv_bind.cnf b/mysql-test/suite/galera/t/galera_ist_recv_bind.cnf new file mode 100644 index 00000000000..2628f05eaef --- /dev/null +++ b/mysql-test/suite/galera/t/galera_ist_recv_bind.cnf @@ -0,0 +1,8 @@ +!include ../galera_2nodes.cnf + +[mysqld.1] +wsrep_provider_options='base_port=@mysqld.1.#galera_port;ist.recv_bind=127.0.0.1;pc.ignore_sb=true' + +[mysqld.2] +wsrep_provider_options='base_port=@mysqld.2.#galera_port;ist.recv_bind=127.0.0.1' + diff --git a/mysql-test/suite/galera/t/galera_ist_recv_bind.test b/mysql-test/suite/galera/t/galera_ist_recv_bind.test new file mode 100644 index 00000000000..c04238d6cca --- /dev/null +++ b/mysql-test/suite/galera/t/galera_ist_recv_bind.test @@ -0,0 +1,53 @@ +# +# Test ist.recv_bind option. Since MTR can not do proper testing with multiple interfaces and such, we +# simply confirm that the option can be set (in the galera_ist_recv_bind.cnf file) and that IST works as expected +# + +--source include/galera_cluster.inc + +--connection node_1 +SELECT @@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%'; + +--connection node_2 +SELECT @@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%'; + +# Isolate node #2 + +SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1'; +--connection node_1 +--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc + +--connection node_2 +SET SESSION wsrep_on = OFF; +--let $wait_condition = SELECT VARIABLE_VALUE = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; +--source include/wait_condition.inc +SET SESSION wsrep_on = ON; + +# Node #2 is now isolated. Run some transactions to accumulate writesets for IST + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1); + +# Restore node #2 + +--connection node_2 +SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0'; + +--connection node_1 +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc + +--connection node_2 +--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; +--source include/wait_condition.inc + +# Confirm that IST has taken place + +--let $wait_condition = SELECT COUNT(*) = 1 FROM t1; +--source include/wait_condition.inc + +# Cleanup + +DROP TABLE t1; -- cgit v1.2.1 From 90d92d2b49eb54ea3c8d17e7db2525f20e475e94 Mon Sep 17 00:00:00 2001 From: Alexey Yurchenko Date: Sat, 2 Apr 2016 21:51:26 -0300 Subject: MW-258 - RSU DDL should not rely on the global wsrep_desync variable value and should always try to desync on its own. --- mysql-test/suite/galera/r/MW-258.result | 42 +++++++++++++++++++++++++++++ mysql-test/suite/galera/t/MW-258.test | 43 ++++++++++++++++++++++++++++++ sql/wsrep_mysqld.cc | 47 +++++++++++++-------------------- 3 files changed, 104 insertions(+), 28 deletions(-) create mode 100644 mysql-test/suite/galera/r/MW-258.result create mode 100644 mysql-test/suite/galera/t/MW-258.test diff --git a/mysql-test/suite/galera/r/MW-258.result b/mysql-test/suite/galera/r/MW-258.result new file mode 100644 index 00000000000..28b1e4049ab --- /dev/null +++ b/mysql-test/suite/galera/r/MW-258.result @@ -0,0 +1,42 @@ +CREATE TABLE t1 (f1 INTEGER); +LOCK TABLE t1 WRITE; +value prior to RSU: +SHOW STATUS LIKE 'wsrep_desync_count'; +Variable_name Value +wsrep_desync_count 0 +SHOW VARIABLES LIKE 'wsrep_desync'; +Variable_name Value +wsrep_desync OFF +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_osu_method = RSU; +ALTER TABLE t1 ADD COLUMN f2 INTEGER;; +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_osu_method = RSU; +ALTER TABLE t1 ADD COLUMN f3 INTEGER;; +value during RSU: +SHOW STATUS LIKE 'wsrep_desync_count'; +Variable_name Value +wsrep_desync_count 2 +SHOW VARIABLES LIKE 'wsrep_desync'; +Variable_name Value +wsrep_desync OFF +SHOW PROCESSLIST; +Id User Host db Command Time State Info Progress +# system user # NULL Sleep # NULL NULL 0.000 +# system user # NULL Sleep # wsrep aborter idle NULL 0.000 +# root # test Sleep # NULL 0.000 +# root # test Query # init SHOW PROCESSLIST 0.000 +# root # test Query # Waiting for table metadata lock ALTER TABLE t1 ADD COLUMN f2 INTEGER 0.000 +# root # test Query # checking permissions ALTER TABLE t1 ADD COLUMN f3 INTEGER 0.000 +UNLOCK TABLES; +value after RSU: +SHOW STATUS LIKE 'wsrep_desync_count'; +Variable_name Value +wsrep_desync_count 0 +SHOW VARIABLES LIKE 'wsrep_desync'; +Variable_name Value +wsrep_desync OFF +SET GLOBAL wsrep_desync=0; +Warnings: +Warning 1231 'wsrep_desync' is already OFF. +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/MW-258.test b/mysql-test/suite/galera/t/MW-258.test new file mode 100644 index 00000000000..7745ef5ea9f --- /dev/null +++ b/mysql-test/suite/galera/t/MW-258.test @@ -0,0 +1,43 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER); +LOCK TABLE t1 WRITE; +--echo value prior to RSU: +SHOW STATUS LIKE 'wsrep_desync_count'; +SHOW VARIABLES LIKE 'wsrep_desync'; + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connection node_1a +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_osu_method = RSU; +--send ALTER TABLE t1 ADD COLUMN f2 INTEGER; + +--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connection node_1b +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_osu_method = RSU; +--send ALTER TABLE t1 ADD COLUMN f3 INTEGER; + +--sleep 5 +--connection node_1 +--echo value during RSU: +SHOW STATUS LIKE 'wsrep_desync_count'; +SHOW VARIABLES LIKE 'wsrep_desync'; +--replace_column 1 # 3 # 6 # +SHOW PROCESSLIST; +UNLOCK TABLES; + +--connection node_1a +--reap +--connection node_1b +--reap + +--connection node_1 +--echo value after RSU: +SHOW STATUS LIKE 'wsrep_desync_count'; +SHOW VARIABLES LIKE 'wsrep_desync'; +SET GLOBAL wsrep_desync=0; + +DROP TABLE t1; diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index e392aef32eb..17e3577a6eb 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -1308,19 +1308,15 @@ static int wsrep_RSU_begin(THD *thd, char *db_, char *table_) WSREP_DEBUG("RSU BEGIN: %lld, %d : %s", (long long)wsrep_thd_trx_seqno(thd), thd->wsrep_exec_mode, thd->query() ); - if (!wsrep_desync) + ret = wsrep->desync(wsrep); + if (ret != WSREP_OK) { - ret = wsrep->desync(wsrep); - if (ret != WSREP_OK) - { - WSREP_WARN("RSU desync failed %d for schema: %s, query: %s", - ret, (thd->db ? thd->db : "(null)"), thd->query()); - my_error(ER_LOCK_DEADLOCK, MYF(0)); - return(ret); - } + WSREP_WARN("RSU desync failed %d for schema: %s, query: %s", + ret, (thd->db ? thd->db : "(null)"), thd->query()); + my_error(ER_LOCK_DEADLOCK, MYF(0)); + return(ret); } - else - WSREP_DEBUG("RSU desync skipped: %d", wsrep_desync); + mysql_mutex_lock(&LOCK_wsrep_replaying); wsrep_replaying++; mysql_mutex_unlock(&LOCK_wsrep_replaying); @@ -1335,15 +1331,13 @@ static int wsrep_RSU_begin(THD *thd, char *db_, char *table_) wsrep_replaying--; mysql_mutex_unlock(&LOCK_wsrep_replaying); - if (!wsrep_desync) + ret = wsrep->resync(wsrep); + if (ret != WSREP_OK) { - ret = wsrep->resync(wsrep); - if (ret != WSREP_OK) - { - WSREP_WARN("resync failed %d for schema: %s, query: %s", - ret, (thd->db ? thd->db : "(null)"), thd->query()); - } + WSREP_WARN("resync failed %d for schema: %s, query: %s", + ret, (thd->db ? thd->db : "(null)"), thd->query()); } + my_error(ER_LOCK_DEADLOCK, MYF(0)); return(1); } @@ -1379,18 +1373,15 @@ static void wsrep_RSU_end(THD *thd) (thd->db ? thd->db : "(null)"), thd->query()); } - if (!wsrep_desync) + + ret = wsrep->resync(wsrep); + if (ret != WSREP_OK) { - ret = wsrep->resync(wsrep); - if (ret != WSREP_OK) - { - WSREP_WARN("resync failed %d for schema: %s, query: %s", ret, - (thd->db ? thd->db : "(null)"), thd->query()); - return; - } + WSREP_WARN("resync failed %d for schema: %s, query: %s", ret, + (thd->db ? thd->db : "(null)"), thd->query()); + return; } - else - WSREP_DEBUG("RSU resync skipped: %d", wsrep_desync); + thd->variables.wsrep_on = 1; } -- cgit v1.2.1 From 4582a4bccf406776702a3f866a8f21aa4daaaff9 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Fri, 12 Aug 2016 14:03:24 -0400 Subject: Fix galera_ist_recv_bind.test. --- mysql-test/suite/galera/t/galera_ist_recv_bind.test | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/suite/galera/t/galera_ist_recv_bind.test b/mysql-test/suite/galera/t/galera_ist_recv_bind.test index c04238d6cca..cb7329073ad 100644 --- a/mysql-test/suite/galera/t/galera_ist_recv_bind.test +++ b/mysql-test/suite/galera/t/galera_ist_recv_bind.test @@ -4,6 +4,7 @@ # --source include/galera_cluster.inc +--source include/have_innodb.inc --connection node_1 SELECT @@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%'; -- cgit v1.2.1 From d45b58263ddf815aa04d4dbc9255ed1081e33bdb Mon Sep 17 00:00:00 2001 From: Alexey Yurchenko Date: Sat, 2 Apr 2016 22:37:22 -0300 Subject: MW-259 - moved wsrep desync/resync calls from wsrep_desync_update() to wsrep_desync_check() method which does not hold the lock and is arguably a more fitting place to change provider state - before changing the actual variable value. --- mysql-test/suite/galera/r/MW-259.result | 12 ++++++++++ mysql-test/suite/galera/t/MW-259.test | 42 +++++++++++++++++++++++++++++++++ sql/wsrep_var.cc | 13 +++++----- 3 files changed, 61 insertions(+), 6 deletions(-) create mode 100644 mysql-test/suite/galera/r/MW-259.result create mode 100644 mysql-test/suite/galera/t/MW-259.test diff --git a/mysql-test/suite/galera/r/MW-259.result b/mysql-test/suite/galera/r/MW-259.result new file mode 100644 index 00000000000..df76e959de5 --- /dev/null +++ b/mysql-test/suite/galera/r/MW-259.result @@ -0,0 +1,12 @@ +CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; +SET GLOBAL wsrep_desync=0; +Warnings: +Warning 1231 'wsrep_desync' is already OFF. +SET wsrep_OSU_method=RSU; +SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; +ALTER TABLE t1 ADD COLUMN f2 INTEGER;; +SET GLOBAL wsrep_desync=1;; +SET DEBUG_SYNC= 'now SIGNAL continue'; +DROP TABLE t1; +SET GLOBAL wsrep_desync=0; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/galera/t/MW-259.test b/mysql-test/suite/galera/t/MW-259.test new file mode 100644 index 00000000000..ff9a30deed3 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-259.test @@ -0,0 +1,42 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; + +SET GLOBAL wsrep_desync=0; +SET wsrep_OSU_method=RSU; + +SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; +--send ALTER TABLE t1 ADD COLUMN f2 INTEGER; + +--connection node_1a + +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'debug sync point: alter_table_before_open_tables' +--source include/wait_condition.inc + +# wsrep_desync=1 will block +--send SET GLOBAL wsrep_desync=1; + +--connection node_1b +--sleep 2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'exit open_tables()' and INFO = 'SET GLOBAL wsrep_desync=1' +--source include/wait_condition.inc + +SET DEBUG_SYNC= 'now SIGNAL continue'; +DROP TABLE t1; +SET GLOBAL wsrep_desync=0; + +--connection node_1 +--reap + +--connection node_1a +--reap + +# Cleanup +SET DEBUG_SYNC= 'RESET'; + diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 44d17e3e78a..2f13ffd6747 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -529,14 +529,10 @@ bool wsrep_desync_check (sys_var *self, THD* thd, set_var* var) ER_WRONG_VALUE_FOR_VAR, "'wsrep_desync' is already OFF."); } + return false; } - return 0; -} - -bool wsrep_desync_update (sys_var *self, THD* thd, enum_var_type type) -{ wsrep_status_t ret(WSREP_WARNING); - if (wsrep_desync) { + if (new_wsrep_desync) { ret = wsrep->desync (wsrep); if (ret != WSREP_OK) { WSREP_WARN ("SET desync failed %d for schema: %s, query: %s", ret, @@ -558,6 +554,11 @@ bool wsrep_desync_update (sys_var *self, THD* thd, enum_var_type type) return false; } +bool wsrep_desync_update (sys_var *self, THD* thd, enum_var_type type) +{ + return false; +} + bool wsrep_max_ws_size_update (sys_var *self, THD *thd, enum_var_type) { char max_ws_size_opt[128]; -- cgit v1.2.1 From dda114461ecb3f8ea3448a61b3dad7d059dbdaec Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Mon, 4 Apr 2016 05:14:13 -0700 Subject: Galera MTR Tests: Fixed tests to account for GAL-391 , GAL-374 --- .../suite/galera/r/galera_rsu_wsrep_desync.result | 7 +++--- .../suite/galera/t/galera_rsu_wsrep_desync.test | 26 ++++++++++++++-------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result b/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result index 62e327ffdee..08980389392 100644 --- a/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result +++ b/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result @@ -22,12 +22,11 @@ SET GLOBAL wsrep_desync=0; Warnings: Warning 1231 'wsrep_desync' is already OFF. SET wsrep_OSU_method=RSU; -SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; +SET DEBUG_SYNC = 'alter_table_before_create_table_no_lock WAIT_FOR continue'; ALTER TABLE t1 ADD COLUMN f2 INTEGER;; -SET GLOBAL wsrep_desync=1; -ERROR HY000: Operation 'desync' failed for SET GLOBAL wsrep_desync=1 -SET GLOBAL wsrep_desync=0; +SET GLOBAL wsrep_desync=1;; SET DEBUG_SYNC= 'now SIGNAL continue'; +SET GLOBAL wsrep_desync=0; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test b/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test index 36ec8563cbe..dc7ff11a9f5 100644 --- a/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test +++ b/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test @@ -17,9 +17,11 @@ SET wsrep_OSU_method=RSU; SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; --send ALTER TABLE t1 ADD COLUMN f2 INTEGER; + --connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 ---connection node_1a +--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connection node_1a --let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'debug sync point: alter_table_before_open_tables' --source include/wait_condition.inc @@ -44,24 +46,32 @@ CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; SET GLOBAL wsrep_desync=0; SET wsrep_OSU_method=RSU; -SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; +SET DEBUG_SYNC = 'alter_table_before_create_table_no_lock WAIT_FOR continue'; --send ALTER TABLE t1 ADD COLUMN f2 INTEGER; --connection node_1a ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'debug sync point: alter_table_before_open_tables' +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'debug sync point: alter_table_before_create_table_no_lock' --source include/wait_condition.inc -# This transition is currently not allowed ---error ER_CANNOT_USER -SET GLOBAL wsrep_desync=1; -SET GLOBAL wsrep_desync=0; +# wsrep_desync=1 will block +--send SET GLOBAL wsrep_desync=1; + + +--connection node_1b +--sleep 2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'exit open_tables()' and INFO = 'SET GLOBAL wsrep_desync=1' +--source include/wait_condition.inc SET DEBUG_SYNC= 'now SIGNAL continue'; --connection node_1 --reap +--connection node_1a +--reap +SET GLOBAL wsrep_desync=0; + SHOW CREATE TABLE t1; # Restore old state @@ -74,5 +84,3 @@ CALL mtr.add_suppression("desync failed"); --connection node_2 CALL mtr.add_suppression("Protocol violation"); - - -- cgit v1.2.1 From fce9217c21527938d279183fb05891505e79b25b Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Mon, 4 Apr 2016 05:32:50 -0700 Subject: Galera MTR Test: Fix for MW-258.test - do not use SHOW PROCESSLIST --- mysql-test/suite/galera/r/MW-258.result | 8 -------- mysql-test/suite/galera/t/MW-258.test | 2 -- 2 files changed, 10 deletions(-) diff --git a/mysql-test/suite/galera/r/MW-258.result b/mysql-test/suite/galera/r/MW-258.result index 28b1e4049ab..1b4d4ae0de8 100644 --- a/mysql-test/suite/galera/r/MW-258.result +++ b/mysql-test/suite/galera/r/MW-258.result @@ -20,14 +20,6 @@ wsrep_desync_count 2 SHOW VARIABLES LIKE 'wsrep_desync'; Variable_name Value wsrep_desync OFF -SHOW PROCESSLIST; -Id User Host db Command Time State Info Progress -# system user # NULL Sleep # NULL NULL 0.000 -# system user # NULL Sleep # wsrep aborter idle NULL 0.000 -# root # test Sleep # NULL 0.000 -# root # test Query # init SHOW PROCESSLIST 0.000 -# root # test Query # Waiting for table metadata lock ALTER TABLE t1 ADD COLUMN f2 INTEGER 0.000 -# root # test Query # checking permissions ALTER TABLE t1 ADD COLUMN f3 INTEGER 0.000 UNLOCK TABLES; value after RSU: SHOW STATUS LIKE 'wsrep_desync_count'; diff --git a/mysql-test/suite/galera/t/MW-258.test b/mysql-test/suite/galera/t/MW-258.test index 7745ef5ea9f..f5519f8a081 100644 --- a/mysql-test/suite/galera/t/MW-258.test +++ b/mysql-test/suite/galera/t/MW-258.test @@ -25,8 +25,6 @@ SET SESSION wsrep_osu_method = RSU; --echo value during RSU: SHOW STATUS LIKE 'wsrep_desync_count'; SHOW VARIABLES LIKE 'wsrep_desync'; ---replace_column 1 # 3 # 6 # -SHOW PROCESSLIST; UNLOCK TABLES; --connection node_1a -- cgit v1.2.1 From 9b42f09902f63249cc14abb173513cf9474e3408 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Mon, 4 Apr 2016 07:09:32 -0700 Subject: Galera MTR Tests: Add test for GAL-382, codership/galera#382 - InnoDB: Failing assertion: xid_seqno > trx_sys_cur_xid_seqno in trx0sys.cc line 356 --- mysql-test/suite/galera/r/GAL-382.result | 6 ++++++ mysql-test/suite/galera/t/GAL-382.test | 15 +++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 mysql-test/suite/galera/r/GAL-382.result create mode 100644 mysql-test/suite/galera/t/GAL-382.test diff --git a/mysql-test/suite/galera/r/GAL-382.result b/mysql-test/suite/galera/r/GAL-382.result new file mode 100644 index 00000000000..0c7365f3005 --- /dev/null +++ b/mysql-test/suite/galera/r/GAL-382.result @@ -0,0 +1,6 @@ +create table t1 (i int, j int, k int, primary key pk(i)) engine=innodb; +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +create table t2 (i int, j int, k int, primary key pk(i, j, k), index idx(i, k, j)) engine=innodb; +replace into t2 (i, j, k) select /*!99997 */ i, k, j from t1; +DROP TABLE t1; +DROP TABLE t2; diff --git a/mysql-test/suite/galera/t/GAL-382.test b/mysql-test/suite/galera/t/GAL-382.test new file mode 100644 index 00000000000..0cc90e26118 --- /dev/null +++ b/mysql-test/suite/galera/t/GAL-382.test @@ -0,0 +1,15 @@ +# +# GAL-382 InnoDB: Failing assertion: xid_seqno > trx_sys_cur_xid_seqno in trx0sys.cc line 356 +# + +--source include/galera_cluster.inc + +--connection node_1 + +create table t1 (i int, j int, k int, primary key pk(i)) engine=innodb; +insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3); +create table t2 (i int, j int, k int, primary key pk(i, j, k), index idx(i, k, j)) engine=innodb; +replace into t2 (i, j, k) select /*!99997 */ i, k, j from t1; + +DROP TABLE t1; +DROP TABLE t2; -- cgit v1.2.1 From 3f22e743c560676e6948bb1c7f9074134c2552e5 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 15 Aug 2016 11:14:57 -0400 Subject: Fix galera/GAL-382 test post-merge. --- mysql-test/suite/galera/t/GAL-382.test | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/suite/galera/t/GAL-382.test b/mysql-test/suite/galera/t/GAL-382.test index 0cc90e26118..05cc7346055 100644 --- a/mysql-test/suite/galera/t/GAL-382.test +++ b/mysql-test/suite/galera/t/GAL-382.test @@ -3,6 +3,7 @@ # --source include/galera_cluster.inc +--source include/have_innodb.inc --connection node_1 -- cgit v1.2.1 From f49500a80337cad9aec1f5c9cf8caf7ace927dc1 Mon Sep 17 00:00:00 2001 From: Teemu Ollakka Date: Tue, 5 Apr 2016 14:08:39 +0300 Subject: MW-44 Disable general log for applier threads --- sql/wsrep_thd.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 09ffdbd54f5..5e530e84d43 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -139,6 +139,9 @@ static void wsrep_prepare_bf_thd(THD *thd, struct wsrep_thd_shadow* shadow) shadow->wsrep_exec_mode = thd->wsrep_exec_mode; shadow->vio = thd->net.vio; + // Disable general logging on applier threads + thd->variables.option_bits |= OPTION_LOG_OFF; + // Enable binlogging if opt_log_slave_updates is set if (opt_log_slave_updates) thd->variables.option_bits|= OPTION_BIN_LOG; else -- cgit v1.2.1 From 675bcf3b6d654861d7f9ca0279a3f56847587696 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Thu, 14 Apr 2016 01:03:37 -0700 Subject: Galera MTR Tests: A test for MW-44 - Disable general log for applier threads --- mysql-test/suite/galera/r/MW-44.result | 14 ++++++++++++++ mysql-test/suite/galera/t/MW-44-master.opt | 1 + mysql-test/suite/galera/t/MW-44.test | 24 ++++++++++++++++++++++++ 3 files changed, 39 insertions(+) create mode 100644 mysql-test/suite/galera/r/MW-44.result create mode 100644 mysql-test/suite/galera/t/MW-44-master.opt create mode 100644 mysql-test/suite/galera/t/MW-44.test diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result new file mode 100644 index 00000000000..28a6f1ac8dd --- /dev/null +++ b/mysql-test/suite/galera/r/MW-44.result @@ -0,0 +1,14 @@ +TRUNCATE TABLE mysql.general_log; +TRUNCATE TABLE mysql.general_log; +SET SESSION wsrep_osu_method=TOI; +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +SET SESSION wsrep_osu_method=RSU; +ALTER TABLE t1 ADD COLUMN f2 INTEGER; +SET SESSION wsrep_osu_method=TOI; +SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%'; +COUNT(*) = 2 +1 +SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%'; +COUNT(*) = 0 +1 +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/MW-44-master.opt b/mysql-test/suite/galera/t/MW-44-master.opt new file mode 100644 index 00000000000..a15aa0a99d9 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-44-master.opt @@ -0,0 +1 @@ +--log-output=TABLE diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test new file mode 100644 index 00000000000..843d33ae525 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-44.test @@ -0,0 +1,24 @@ +# +# MW-44: DDL is logged in the general_log on the slave +# + +--source include/galera_cluster.inc + +--connection node_1 +TRUNCATE TABLE mysql.general_log; + +--connection node_2 +TRUNCATE TABLE mysql.general_log; + +--connection node_1 +SET SESSION wsrep_osu_method=TOI; +CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; +SET SESSION wsrep_osu_method=RSU; +ALTER TABLE t1 ADD COLUMN f2 INTEGER; +SET SESSION wsrep_osu_method=TOI; + +SELECT COUNT(*) = 2 FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument LIKE 'ALTER%'; + +--connection node_2 +SELECT COUNT(*) = 0 FROM mysql.general_log WHERE argument NOT LIKE 'SELECT%'; +DROP TABLE t1; -- cgit v1.2.1 From 182787f39e8f73781caeb6eaf536697fb747c9a0 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Thu, 14 Apr 2016 01:25:54 -0700 Subject: Galera MTR Tests: Adjust galera_log_output_csv.test to account for the fix for MW-44 --- mysql-test/suite/galera/r/galera_log_output_csv.result | 3 --- mysql-test/suite/galera/t/galera_log_output_csv.test | 3 --- 2 files changed, 6 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_log_output_csv.result b/mysql-test/suite/galera/r/galera_log_output_csv.result index 07a78469578..cdb5ee49f3e 100644 --- a/mysql-test/suite/galera/r/galera_log_output_csv.result +++ b/mysql-test/suite/galera/r/galera_log_output_csv.result @@ -9,9 +9,6 @@ SELECT 1 = 1 FROM t1; SELECT COUNT(*) = 1 FROM mysql.slow_log WHERE sql_text = 'SELECT 1 = 1 FROM t1'; COUNT(*) = 1 1 -SELECT COUNT(*) > 0 FROM mysql.general_log WHERE argument = 'CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB'; -COUNT(*) > 0 -1 SELECT 2 = 2 FROM t1; 2 = 2 1 diff --git a/mysql-test/suite/galera/t/galera_log_output_csv.test b/mysql-test/suite/galera/t/galera_log_output_csv.test index 00009396f6a..94ae3dd6168 100644 --- a/mysql-test/suite/galera/t/galera_log_output_csv.test +++ b/mysql-test/suite/galera/t/galera_log_output_csv.test @@ -17,9 +17,6 @@ SELECT COUNT(*) = 1 FROM mysql.slow_log WHERE sql_text = 'SELECT 1 = 1 FROM t1'; --connection node_2 -# CREATE TABLE from master is also present in the slave query log, but is logged twice, mysql-wsrep#44 -SELECT COUNT(*) > 0 FROM mysql.general_log WHERE argument = 'CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB'; - SELECT 2 = 2 FROM t1; SELECT COUNT(*) = 1 FROM mysql.slow_log WHERE sql_text = 'SELECT 2 = 2 FROM t1'; -- cgit v1.2.1 From 81174c9ab196e42b2f400d564eb5940fc888f38c Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 15 Aug 2016 11:29:48 -0400 Subject: Fix galera/MW-44 test post-merge. --- mysql-test/suite/galera/t/MW-44.test | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/suite/galera/t/MW-44.test b/mysql-test/suite/galera/t/MW-44.test index 843d33ae525..55a3fd57f80 100644 --- a/mysql-test/suite/galera/t/MW-44.test +++ b/mysql-test/suite/galera/t/MW-44.test @@ -3,6 +3,7 @@ # --source include/galera_cluster.inc +--source include/have_innodb.inc --connection node_1 TRUNCATE TABLE mysql.general_log; -- cgit v1.2.1 From db837fde87093f7b985568bd26ce57627b74e752 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Sun, 1 May 2016 23:29:55 -0700 Subject: Galera MTR Tests: Adjust tests for xtrabackup 2.4.2 --- mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test b/mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test index af4a5fbf9d6..a6660bd08d1 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test +++ b/mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test @@ -13,8 +13,8 @@ INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10); SELECT COUNT(*) = 10 FROM t1; --exec rm -rf $MYSQL_TMP_DIR/innobackupex_backup ---exec innobackupex $MYSQL_TMP_DIR/innobackupex_backup --galera-info --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp > $MYSQL_TMP_DIR/innobackupex-backup.log ---exec innobackupex $MYSQL_TMP_DIR/innobackupex_backup --apply-log --galera-info --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp > $MYSQL_TMP_DIR/innobackupex-apply.log +--exec innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 $MYSQL_TMP_DIR/innobackupex_backup --galera-info --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp > $MYSQL_TMP_DIR/innobackupex-backup.log +--exec innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 $MYSQL_TMP_DIR/innobackupex_backup --apply-log --galera-info --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp > $MYSQL_TMP_DIR/innobackupex-apply.log --source include/kill_galera.inc --sleep 1 @@ -23,7 +23,7 @@ SELECT COUNT(*) = 10 FROM t1; INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20); --exec rm -rf $MYSQLTEST_VARDIR/mysqld.2/data/* ---exec innobackupex --copy-back $MYSQL_TMP_DIR/innobackupex_backup --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --port=$NODE_MYPORT_2 --host=127.0.0.1 > $MYSQL_TMP_DIR/innobackupex-restore.log +--exec innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --copy-back $MYSQL_TMP_DIR/innobackupex_backup --port=$NODE_MYPORT_2 --host=127.0.0.1 > $MYSQL_TMP_DIR/innobackupex-restore.log # # Convert the xtrabackup_galera_info into a grastate.dat file -- cgit v1.2.1 From 137af55ca1064a605fed608572b34135823de6ac Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Tue, 17 May 2016 22:23:51 -0700 Subject: Galera MTR Tests: stability fixes --- mysql-test/suite/galera/r/galera_repl_max_ws_size.result | 4 ++++ mysql-test/suite/galera/r/galera_ssl_upgrade.result | 9 --------- mysql-test/suite/galera/t/galera_repl_max_ws_size.test | 4 ++++ mysql-test/suite/galera/t/galera_ssl_upgrade.test | 9 ++++++--- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_repl_max_ws_size.result b/mysql-test/suite/galera/r/galera_repl_max_ws_size.result index 6cfd10bdedd..70c09bda3f9 100644 --- a/mysql-test/suite/galera/r/galera_repl_max_ws_size.result +++ b/mysql-test/suite/galera/r/galera_repl_max_ws_size.result @@ -6,3 +6,7 @@ SELECT COUNT(*) = 0 FROM t1; COUNT(*) = 0 1 DROP TABLE t1; +CALL mtr.add_suppression("Maximum writeset size exceeded by"); +CALL mtr.add_suppression("transaction size limit"); +CALL mtr.add_suppression("transaction size exceeded"); +CALL mtr.add_suppression("rbr write fail"); diff --git a/mysql-test/suite/galera/r/galera_ssl_upgrade.result b/mysql-test/suite/galera/r/galera_ssl_upgrade.result index c0f2e84dc6f..b24671d120d 100644 --- a/mysql-test/suite/galera/r/galera_ssl_upgrade.result +++ b/mysql-test/suite/galera/r/galera_ssl_upgrade.result @@ -4,21 +4,12 @@ VARIABLE_VALUE = 'Synced' SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 -SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; -VARIABLE_VALUE = 'Synced' -1 SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 -SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; -VARIABLE_VALUE = 'Synced' -1 SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 -SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; -VARIABLE_VALUE = 'Synced' -1 SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 diff --git a/mysql-test/suite/galera/t/galera_repl_max_ws_size.test b/mysql-test/suite/galera/t/galera_repl_max_ws_size.test index 37a2f7d4ce3..60b866ae018 100644 --- a/mysql-test/suite/galera/t/galera_repl_max_ws_size.test +++ b/mysql-test/suite/galera/t/galera_repl_max_ws_size.test @@ -23,3 +23,7 @@ SELECT COUNT(*) = 0 FROM t1; DROP TABLE t1; +CALL mtr.add_suppression("Maximum writeset size exceeded by"); +CALL mtr.add_suppression("transaction size limit"); +CALL mtr.add_suppression("transaction size exceeded"); +CALL mtr.add_suppression("rbr write fail"); diff --git a/mysql-test/suite/galera/t/galera_ssl_upgrade.test b/mysql-test/suite/galera/t/galera_ssl_upgrade.test index 07aac0fbe92..a424942da30 100644 --- a/mysql-test/suite/galera/t/galera_ssl_upgrade.test +++ b/mysql-test/suite/galera/t/galera_ssl_upgrade.test @@ -18,7 +18,8 @@ SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_N --source include/start_mysqld.inc --source include/wait_until_connected_again.inc -SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +--let $wait_condition = SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +--source include/wait_condition.inc SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; # 3. Restart node #2 with the new socket.ssl_ca , socket.ssl_cert and socket.ssl_key @@ -29,7 +30,8 @@ SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_N --source include/start_mysqld.inc --source include/wait_until_connected_again.inc -SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +--let $wait_condition = SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +--source include/wait_condition.inc SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; # 4. Restart node #1 with the new socket.ssl_ca , socket.ssl_cert and socket.ssl_key @@ -40,7 +42,8 @@ SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_N --source include/start_mysqld.inc --source include/wait_until_connected_again.inc -SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +--let $wait_condition = SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment'; +--source include/wait_condition.inc SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; # Upgrade complete. Both nodes now use the new key and certificate -- cgit v1.2.1 From 92162e6d8761d8586299cfd88682a7704df2d7fa Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Wed, 18 May 2016 11:07:58 +0200 Subject: MW-175 Fix definitively lost memory in wsrep_get_params --- mysql-test/valgrind.supp | 110 ----------------------------------------------- sql/wsrep_var.cc | 1 + wsrep/wsrep_dummy.c | 2 +- 3 files changed, 2 insertions(+), 111 deletions(-) diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index fc3a2d08213..1cc5d177972 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -1228,25 +1228,6 @@ fun:dlopen@@GLIBC_2.2.5 } -{ - GitHub codership/mysql-wsrep#176 - Memcheck:Leak - fun:_Z16wsrep_get_paramsRKN6galera10ReplicatorE - fun:galera_parameters_get - fun:_ZL24refresh_provider_optionsv - fun:_Z29wsrep_provider_options_updateP7sys_varP3THD13enum_var_type - fun:_ZN7sys_var6updateEP3THDP7set_var - fun:_ZN7set_var6updateEP3THD - fun:_Z17sql_set_variablesP3THDP4ListI12set_var_baseE - fun:_Z21mysql_execute_commandP3THD - fun:_Z11mysql_parseP3THDPcjP12Parser_state - fun:_ZL17wsrep_mysql_parseP3THDPcjP12Parser_state - fun:_Z16dispatch_command19enum_server_commandP3THDPcj - fun:_Z10do_commandP3THD - fun:_Z24do_handle_one_connectionP3THD - fun:handle_one_connection -} - { GitHub codership/galera#330 Memcheck:Leak @@ -1340,31 +1321,6 @@ g codership/mysql-wsrep/issues#176 fun:_Z16wsrep_set_paramsRN6galera10ReplicatorEPKc } -{ - codership/mysql-wsrep/issues#176 - Memcheck:Leak - fun:_Z16wsrep_get_paramsRKN6galera10ReplicatorE -} - -{ - codership/mysql-wsrep/issues#176 - Memcheck:Leak - fun:_Z16wsrep_get_paramsRKN6galera10ReplicatorE - fun:galera_parameters_get - fun:_ZL24refresh_provider_optionsv - fun:_Z21wsrep_provider_updateP7sys_varP3THD13enum_var_type - fun:_ZN7sys_var6updateEP3THDP7set_var - fun:_ZN7set_var6updateEP3THD - fun:_Z17sql_set_variablesP3THDP4ListI12set_var_baseE - fun:_Z21mysql_execute_commandP3THD - fun:_Z11mysql_parseP3THDPcjP12Parser_state - fun:_ZL17wsrep_mysql_parseP3THDPcjP12Parser_state - fun:_Z16dispatch_command19enum_server_commandP3THDPcj - fun:_Z10do_commandP3THD - fun:_Z24do_handle_one_connectionP3THD - fun:handle_one_connection -} - { codership/mysql-wsrep/issues#176 Memcheck:Leak @@ -1475,72 +1431,6 @@ g codership/mysql-wsrep/issues#176 fun:_Z24do_handle_one_connectionP3THD } -{ - codership/mysql-wsrep/issues#176 - Memcheck:Leak - match-leak-kinds: possible - fun:malloc - fun:strdup - fun:_Z16wsrep_get_paramsRKN6galera10ReplicatorE - fun:galera_parameters_get - fun:_ZL24refresh_provider_optionsv - fun:_Z29wsrep_provider_options_updateP7sys_varP3THD13enum_var_type - fun:_ZN7sys_var6updateEP3THDP7set_var - fun:_ZN7set_var6updateEP3THD - fun:_Z17sql_set_variablesP3THDP4ListI12set_var_baseE - fun:_Z21mysql_execute_commandP3THD - fun:_Z11mysql_parseP3THDPcjP12Parser_state - fun:_ZL17wsrep_mysql_parseP3THDPcjP12Parser_state - fun:_Z16dispatch_command19enum_server_commandP3THDPcj - fun:_Z10do_commandP3THD - fun:_Z24do_handle_one_connectionP3THD - fun:handle_one_connection -} - -{ - codership/mysql-wsrep/issues#176 - Memcheck:Leak - match-leak-kinds: definite - fun:malloc - fun:strdup - fun:_Z16wsrep_get_paramsRKN6galera10ReplicatorE - fun:galera_parameters_get - fun:_ZL24refresh_provider_optionsv - fun:_Z29wsrep_provider_options_updateP7sys_varP3THD13enum_var_type - fun:_ZN7sys_var6updateEP3THDP7set_var - fun:_ZN7set_var6updateEP3THD - fun:_Z17sql_set_variablesP3THDP4ListI12set_var_baseE - fun:_Z21mysql_execute_commandP3THD - fun:_Z11mysql_parseP3THDPcjP12Parser_state - fun:_ZL17wsrep_mysql_parseP3THDPcjP12Parser_state - fun:_Z16dispatch_command19enum_server_commandP3THDPcj - fun:_Z10do_commandP3THD - fun:_Z24do_handle_one_connectionP3THD - fun:handle_one_connection -} - -{ - codership/mysql-wsrep/issues#176 - Memcheck:Leak - match-leak-kinds: definite - fun:malloc - fun:strdup - fun:_Z16wsrep_get_paramsRKN6galera10ReplicatorE - fun:galera_parameters_get - fun:_ZL24refresh_provider_optionsv - fun:_Z21wsrep_provider_updateP7sys_varP3THD13enum_var_type - fun:_ZN7sys_var6updateEP3THDP7set_var - fun:_ZN7set_var6updateEP3THD - fun:_Z17sql_set_variablesP3THDP4ListI12set_var_baseE - fun:_Z21mysql_execute_commandP3THD - fun:_Z11mysql_parseP3THDPcjP12Parser_state - fun:_ZL17wsrep_mysql_parseP3THDPcjP12Parser_state - fun:_Z16dispatch_command19enum_server_commandP3THDPcj - fun:_Z10do_commandP3THD - fun:_Z24do_handle_one_connectionP3THD - fun:handle_one_connection -} - { codership/galera#331 Memcheck:Leak diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 2f13ffd6747..8a507711daf 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -216,6 +216,7 @@ static bool refresh_provider_options() get_provider_option_value(wsrep_provider_options, (char*)"repl.max_ws_size", &wsrep_max_ws_size); + free(opts); } else { diff --git a/wsrep/wsrep_dummy.c b/wsrep/wsrep_dummy.c index bab5329dc02..5f1ea63cc40 100644 --- a/wsrep/wsrep_dummy.c +++ b/wsrep/wsrep_dummy.c @@ -86,7 +86,7 @@ static wsrep_status_t dummy_options_set( static char* dummy_options_get (wsrep_t* w) { WSREP_DBUG_ENTER(w); - return WSREP_DUMMY(w)->options; + return strdup(WSREP_DUMMY(w)->options); } static wsrep_status_t dummy_connect( -- cgit v1.2.1 From 1cb01fe7d2fd5651abf9df743c38fcae4541bd2a Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Thu, 2 Jun 2016 23:39:12 -0700 Subject: Galera MTR Tests: Fortify galera_restart_nochanges.test against sporadic failures due to node not being ready immediately after restart --- mysql-test/suite/galera/t/galera_restart_nochanges.test | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mysql-test/suite/galera/t/galera_restart_nochanges.test b/mysql-test/suite/galera/t/galera_restart_nochanges.test index a61332cefd6..ba12c4c409c 100644 --- a/mysql-test/suite/galera/t/galera_restart_nochanges.test +++ b/mysql-test/suite/galera/t/galera_restart_nochanges.test @@ -18,6 +18,10 @@ INSERT INTO t1 VALUES (1); --connection node_2 --source include/restart_mysqld.inc +--connection node_1 +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc + --let $galera_connection_name = node_2a --let $galera_server_number = 2 --source include/galera_connect.inc -- cgit v1.2.1 From 5609020c7101f2ffb9ec68dfc68896b242da3de1 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Thu, 2 Jun 2016 23:56:16 -0700 Subject: Galera MTR Tests: fortify galera_parallel_simple.test against sporadic failures --- mysql-test/suite/galera/r/galera_parallel_simple.result | 7 ++++--- mysql-test/suite/galera/t/galera_parallel_simple.test | 13 ++++++++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_parallel_simple.result b/mysql-test/suite/galera/r/galera_parallel_simple.result index 294a94baed3..6d023c38a57 100644 --- a/mysql-test/suite/galera/r/galera_parallel_simple.result +++ b/mysql-test/suite/galera/r/galera_parallel_simple.result @@ -1,6 +1,7 @@ CREATE TABLE t1 (id INT) ENGINE=InnoDB; CREATE TABLE t2 (id INT) ENGINE=InnoDB; SET GLOBAL wsrep_slave_threads = 2; +LOCK TABLE t1 WRITE; INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); INSERT INTO t1 VALUES (1); @@ -13,15 +14,15 @@ INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); +SET SESSION wsrep_sync_wait = 0; +UNLOCK TABLES; +SET SESSION wsrep_sync_wait = 7; SELECT COUNT(*) = 10 FROM t1; COUNT(*) = 10 0 SELECT COUNT(*) = 10 FROM t2; COUNT(*) = 10 0 -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'committed%'; -COUNT(*) = 2 -1 SET GLOBAL wsrep_slave_threads = 1;; DROP TABLE t1; DROP TABLE t2; diff --git a/mysql-test/suite/galera/t/galera_parallel_simple.test b/mysql-test/suite/galera/t/galera_parallel_simple.test index b1dc14deb5b..e078a342c16 100644 --- a/mysql-test/suite/galera/t/galera_parallel_simple.test +++ b/mysql-test/suite/galera/t/galera_parallel_simple.test @@ -13,6 +13,7 @@ CREATE TABLE t2 (id INT) ENGINE=InnoDB; --connection node_2 SET GLOBAL wsrep_slave_threads = 2; +LOCK TABLE t1 WRITE; --connection node_1 INSERT INTO t1 VALUES (1); @@ -34,10 +35,20 @@ INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); --connection node_2 +SET SESSION wsrep_sync_wait = 0; + +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%'; +--source include/wait_condition.inc + +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'applied write set%'; +--source include/wait_condition.inc + +UNLOCK TABLES; + +SET SESSION wsrep_sync_wait = 7; SELECT COUNT(*) = 10 FROM t1; SELECT COUNT(*) = 10 FROM t2; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'committed%'; --eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig; -- cgit v1.2.1 From 0e83726edb4437fb40fcbb043ccf3721ac60fbac Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Fri, 3 Jun 2016 04:26:17 -0700 Subject: Galera MTR Tests: force galera_3nodes.galera_pc_bootstrap.test to run on a fresh cluster in order to avoid interaction with galera_3nodes.galera_innobackupex_backup.test --- mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.cnf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.cnf diff --git a/mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.cnf b/mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.cnf new file mode 100644 index 00000000000..d560b675427 --- /dev/null +++ b/mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.cnf @@ -0,0 +1,5 @@ +# We need a dedicated .cnf file, even if empty, in order to force this test to run +# alone on a freshly started cluster. Otherwise there are adverse interactions with +# prior tests such as galera_3nodes.galera_innobackupex_backup + +!include ../galera_3nodes.cnf -- cgit v1.2.1 From 5996c7baad2cc936881442ccb26bdde3b04ad6f2 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Tue, 7 Jun 2016 10:46:14 +0300 Subject: refs: MW-279 - At startup time global wsrep_on is set too late and some wsrep paths may be executed because of this. e.g. replication slave restart could happen before wsrep_on state is defined. - This fix checks both global wsrep_on and wsrep_provider values to determine if wsrep processing should happen - Fix affects all instances where WSREP_ON macro is used --- sql/mysqld.cc | 2 +- sql/wsrep_mysqld.h | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index f28225f5dad..1bd2a039ce8 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -9205,7 +9205,7 @@ static int mysql_init_variables(void) strmake_buf(mysql_home, tmpenv); #endif #ifdef WITH_WSREP - if (WSREP_ON && wsrep_init_vars()) + if (wsrep_init_vars()) return 1; #endif return 0; diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index f499443ca2c..57382d27e98 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -200,8 +200,10 @@ extern void wsrep_prepend_PATH (const char* path); /* Other global variables */ extern wsrep_seqno_t wsrep_locked_seqno; -#define WSREP_ON \ - (global_system_variables.wsrep_on) +#define WSREP_ON \ + ((global_system_variables.wsrep_on) && \ + wsrep_provider && \ + strcmp(wsrep_provider, WSREP_NONE)) #define WSREP(thd) \ (WSREP_ON && wsrep && (thd && thd->variables.wsrep_on)) -- cgit v1.2.1 From a12fa57d35c00897fd883434e6573a65e6edfb41 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Mon, 13 Jun 2016 06:17:33 -0700 Subject: Galera MTR Tests: Run galera_pc_weight on freshly started servers in order to prevent interaction with other tests --- mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf diff --git a/mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf new file mode 100644 index 00000000000..57026ce6928 --- /dev/null +++ b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf @@ -0,0 +1,5 @@ +# We need a dedicated .cnf file, even if empty, in order to force this test to run +# alone on a freshly started cluster. Otherwise there are adverse interactions with +# following tests such as galera_3nodes.galera_var_dirty_reads2 + +!include ../galera_3nodes.cnf -- cgit v1.2.1 From 88a1592b0a785aff0941540a9543ef2964caaf21 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Tue, 14 Jun 2016 17:18:21 +0200 Subject: MW-286 Avoid spurious deadlock errors when wsrep_on is disabled If a conflict happens under wsrep_on, the THD's wsrep_conflict_state is typically set to MUST_ABORT and cleared later, when transaction is aborted. However, when wsrep_on is disabled, no check is performed to see whether wsrep_conflict_state is set. So this potentially creates spurious deadlock errors on the subsequent statement that runs with wsrep_on enabled. To avoid this problem wsrep_thd_set_conflict_state() sets the conflict state only if wsrep_on is enabled. --- mysql-test/suite/galera/r/MW-286.result | 13 +++++++++++++ mysql-test/suite/galera/t/MW-286.test | 32 ++++++++++++++++++++++++++++++++ sql/sql_class.cc | 2 +- 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 mysql-test/suite/galera/r/MW-286.result create mode 100644 mysql-test/suite/galera/t/MW-286.test diff --git a/mysql-test/suite/galera/r/MW-286.result b/mysql-test/suite/galera/r/MW-286.result new file mode 100644 index 00000000000..adc996c1cbe --- /dev/null +++ b/mysql-test/suite/galera/r/MW-286.result @@ -0,0 +1,13 @@ +CREATE TABLE ten (f1 INTEGER); +INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; +INSERT INTO t1 (f1) SELECT 000000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; +INSERT INTO t1 (f1) SELECT 100000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;; +SET GLOBAL wsrep_desync = TRUE; +SET wsrep_on = FALSE; +ALTER TABLE t1 ADD PRIMARY KEY (f1); +ERROR 70100: Query execution was interrupted +SET wsrep_on = TRUE; +SET GLOBAL wsrep_desync = FALSE; +DROP TABLE t1; +DROP TABLE ten; diff --git a/mysql-test/suite/galera/t/MW-286.test b/mysql-test/suite/galera/t/MW-286.test new file mode 100644 index 00000000000..1b2e322f078 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-286.test @@ -0,0 +1,32 @@ +# +# MW-286 Spurious deadlock error after error with wsrep_desync and wsrep_on +# + +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/big_test.inc + +--connection node_1 +CREATE TABLE ten (f1 INTEGER); +INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); + +CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; + +# Insert some values before the ALTER +INSERT INTO t1 (f1) SELECT 000000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; + +# Insert more values while the ALTER is running +--send INSERT INTO t1 (f1) SELECT 100000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5; + +--connection node_2 +SET GLOBAL wsrep_desync = TRUE; +SET wsrep_on = FALSE; + +--error ER_QUERY_INTERRUPTED +ALTER TABLE t1 ADD PRIMARY KEY (f1); + +SET wsrep_on = TRUE; +SET GLOBAL wsrep_desync = FALSE; + +DROP TABLE t1; +DROP TABLE ten; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 4873586aba5..37bacc986f7 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -846,7 +846,7 @@ extern "C" void wsrep_thd_set_query_state( extern "C" void wsrep_thd_set_conflict_state( THD *thd, enum wsrep_conflict_state state) { - thd->wsrep_conflict_state= state; + if (WSREP(thd)) thd->wsrep_conflict_state= state; } -- cgit v1.2.1 From c9ac48f8451d213cdf7fd4cee091025846306526 Mon Sep 17 00:00:00 2001 From: Krunal Bauskar Date: Thu, 2 Jun 2016 16:44:54 +0530 Subject: - PXC#592: Tried closing fk-reference-table that was never opened. Function "wsrep_row_upd_check_foreign_constraints" tried to mark fk-reference-table opened without ensuring it table is really opened. --- storage/innobase/row/row0upd.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index 44b3faa76c0..8d13d436ab8 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -410,7 +410,7 @@ wsrep_row_upd_check_foreign_constraints( dict_table_open_on_name( foreign->referenced_table_name_lookup, FALSE, FALSE, DICT_ERR_IGNORE_NONE); - opened = TRUE; + opened = (foreign->referenced_table) ? TRUE : FALSE; } if (foreign->referenced_table) { @@ -433,7 +433,7 @@ wsrep_row_upd_check_foreign_constraints( ->n_foreign_key_checks_running); if (opened == TRUE) { - dict_table_close(foreign->referenced_table, TRUE, FALSE); + dict_table_close(foreign->referenced_table, FALSE, FALSE); opened = FALSE; } } -- cgit v1.2.1 From dfa9012abbaaec15e99e3fb8cbe3c90cf6dc8e3b Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Mon, 20 Jun 2016 14:35:22 +0200 Subject: MW-285 MTR test case for broken foreign key constraints --- mysql-test/suite/galera/r/MW-285.result | 19 +++++++++++++++++++ mysql-test/suite/galera/t/MW-285.test | 31 +++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 mysql-test/suite/galera/r/MW-285.result create mode 100644 mysql-test/suite/galera/t/MW-285.test diff --git a/mysql-test/suite/galera/r/MW-285.result b/mysql-test/suite/galera/r/MW-285.result new file mode 100644 index 00000000000..8c5a21fcbee --- /dev/null +++ b/mysql-test/suite/galera/r/MW-285.result @@ -0,0 +1,19 @@ +CREATE TABLE parent1 ( id INT PRIMARY KEY, KEY (id) ) ENGINE=InnoDB; +CREATE TABLE parent2 ( id INT PRIMARY KEY, KEY (id) ) ENGINE=InnoDB; +CREATE TABLE child ( +id INT PRIMARY KEY, +parent1_id INT, +parent2_id INT, +FOREIGN KEY (parent1_id) REFERENCES parent1(id), +FOREIGN KEY (parent1_id) REFERENCES parent2(id) +) ENGINE=InnoDB; +INSERT INTO parent1 VALUES (1); +INSERT INTO parent2 VALUES (1); +INSERT INTO child VALUES (1,1,1); +INSERT INTO child VALUES (2,1,1); +SET foreign_key_checks=OFF; +DROP TABLE parent1; +UPDATE child SET parent1_id=2 WHERE id=1; +DROP TABLE child; +DROP TABLE parent2; +SET foreign_key_checks=ON; diff --git a/mysql-test/suite/galera/t/MW-285.test b/mysql-test/suite/galera/t/MW-285.test new file mode 100644 index 00000000000..1c567f7b250 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-285.test @@ -0,0 +1,31 @@ +# +# Broken FK constraints cause assertions +# + +--source include/galera_cluster.inc +--source include/have_innodb.inc + +CREATE TABLE parent1 ( id INT PRIMARY KEY, KEY (id) ) ENGINE=InnoDB; +CREATE TABLE parent2 ( id INT PRIMARY KEY, KEY (id) ) ENGINE=InnoDB; + +CREATE TABLE child ( + id INT PRIMARY KEY, + parent1_id INT, + parent2_id INT, + FOREIGN KEY (parent1_id) REFERENCES parent1(id), + FOREIGN KEY (parent1_id) REFERENCES parent2(id) +) ENGINE=InnoDB; + +INSERT INTO parent1 VALUES (1); +INSERT INTO parent2 VALUES (1); +INSERT INTO child VALUES (1,1,1); +INSERT INTO child VALUES (2,1,1); + +SET foreign_key_checks=OFF; +DROP TABLE parent1; + +UPDATE child SET parent1_id=2 WHERE id=1; + +DROP TABLE child; +DROP TABLE parent2; +SET foreign_key_checks=ON; -- cgit v1.2.1 From bf19492e3b3d73af6ea6c9ff61aa1838a55965ea Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Mon, 13 Jun 2016 17:49:42 +0200 Subject: GCF-837 Check wsrep interface version before loading provider --- wsrep/wsrep_loader.c | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/wsrep/wsrep_loader.c b/wsrep/wsrep_loader.c index 0825d7e9ecf..5f98b0ace6a 100644 --- a/wsrep/wsrep_loader.c +++ b/wsrep/wsrep_loader.c @@ -37,6 +37,22 @@ static wsrep_log_cb_t logger = default_logger; * Library loader **************************************************************************/ +static int wsrep_check_iface_version(const char* found, const char* iface_ver) +{ + const size_t msg_len = 128; + char msg[128]; + + if (strcmp(found, iface_ver)) { + snprintf (msg, msg_len, + "provider interface version mismatch: need '%s', found '%s'", + iface_ver, found); + logger (WSREP_LOG_ERROR, msg); + return EINVAL; + } + + return 0; +} + static int verify(const wsrep_t *wh, const char *iface_ver) { char msg[128]; @@ -50,13 +66,8 @@ static int verify(const wsrep_t *wh, const char *iface_ver) VERIFY(wh); VERIFY(wh->version); - if (strcmp(wh->version, iface_ver)) { - snprintf (msg, sizeof(msg), - "provider interface version mismatch: need '%s', found '%s'", - iface_ver, wh->version); - logger (WSREP_LOG_ERROR, msg); + if (wsrep_check_iface_version(wh->version, iface_ver)) return EINVAL; - } VERIFY(wh->init); VERIFY(wh->options_set); @@ -107,6 +118,15 @@ static wsrep_loader_fun wsrep_dlf(void *dlh, const char *sym) return alias.dlfun; } +static int wsrep_check_version_symbol(void *dlh) +{ + char** dlversion = NULL; + dlversion = (char**) dlsym(dlh, "wsrep_interface_version"); + if (dlversion == NULL) + return 0; + return wsrep_check_iface_version(*dlversion, WSREP_INTERFACE_VERSION); +} + extern int wsrep_dummy_loader(wsrep_t *w); int wsrep_load(const char *spec, wsrep_t **hptr, wsrep_log_cb_t log_cb) @@ -151,6 +171,11 @@ int wsrep_load(const char *spec, wsrep_t **hptr, wsrep_log_cb_t log_cb) goto out; } + if (wsrep_check_version_symbol(dlh) != 0) { + ret = EINVAL; + goto out; + } + if ((ret = (*dlfun)(*hptr)) != 0) { snprintf(msg, sizeof(msg), "wsrep_load(): loader failed: %s", strerror(ret)); -- cgit v1.2.1 From ea3ff73031dd14664045bcbe7ad922b780d229c9 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 9 Jun 2016 09:21:43 +0200 Subject: GCF-837 Fix crash when loading wrong provider version mysqld would crash with "double free or corrruption message" if wrong provider version was given. --- sql/wsrep_mysqld.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 17e3577a6eb..3c2b2c07d55 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -484,8 +484,7 @@ int wsrep_init() WSREP_ERROR("wsrep_load(%s) failed: %s (%d). Reverting to no provider.", wsrep_provider, strerror(rcode), rcode); strcpy((char*)wsrep_provider, WSREP_NONE); // damn it's a dirty hack - (void) wsrep_init(); - return rcode; + return wsrep_init(); } else /* this is for recursive call above */ { @@ -671,6 +670,9 @@ void wsrep_init_startup (bool first) wsrep_thr_lock_init(wsrep_thd_is_BF, wsrep_abort_thd, wsrep_debug, wsrep_convert_LOCK_to_trx, wsrep_on); + /* Skip replication start if dummy wsrep provider is loaded */ + if (!strcmp(wsrep_provider, WSREP_NONE)) return; + /* Skip replication start if no cluster address */ if (!wsrep_cluster_address || strlen(wsrep_cluster_address) == 0) return; -- cgit v1.2.1 From 85b9718b22adcc26952c13509d81fa61fa107f44 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Wed, 13 Jul 2016 03:19:20 -0700 Subject: Galera MTR Tests: Test case for galera#414 - crash on shutdown with gcs.max_packet_size=2 --- mysql-test/suite/galera/r/galera#414.result | 5 +++++ mysql-test/suite/galera/t/galera#414.cnf | 8 ++++++++ mysql-test/suite/galera/t/galera#414.test | 32 +++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) create mode 100644 mysql-test/suite/galera/r/galera#414.result create mode 100644 mysql-test/suite/galera/t/galera#414.cnf create mode 100644 mysql-test/suite/galera/t/galera#414.test diff --git a/mysql-test/suite/galera/r/galera#414.result b/mysql-test/suite/galera/r/galera#414.result new file mode 100644 index 00000000000..029961f9463 --- /dev/null +++ b/mysql-test/suite/galera/r/galera#414.result @@ -0,0 +1,5 @@ +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_on = OFF; +SET SESSION wsrep_on = ON; +CALL mtr.add_suppression("Failed to set packet size"); +CALL mtr.add_suppression("Failed to set packet size"); diff --git a/mysql-test/suite/galera/t/galera#414.cnf b/mysql-test/suite/galera/t/galera#414.cnf new file mode 100644 index 00000000000..fbd1c58754f --- /dev/null +++ b/mysql-test/suite/galera/t/galera#414.cnf @@ -0,0 +1,8 @@ +!include ../galera_2nodes.cnf + +[mysqld.1] +wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcs.max_packet_size=2' + +[mysqld.2] +wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcs.max_packet_size=2' + diff --git a/mysql-test/suite/galera/t/galera#414.test b/mysql-test/suite/galera/t/galera#414.test new file mode 100644 index 00000000000..b426e6510b6 --- /dev/null +++ b/mysql-test/suite/galera/t/galera#414.test @@ -0,0 +1,32 @@ +# +# codership/galera#414 Shutdown crashes node if the node started with `gcs.max_packet_size=2` +# + +--source include/big_test.inc +--source include/galera_cluster.inc + +# We perform the shutdown/restart sequence in here. If there was a crash during shutdown, MTR will detect it + +--connection node_2 +--source include/shutdown_mysqld.inc + +--connection node_1 +SET SESSION wsrep_sync_wait = 0; +SET SESSION wsrep_on = OFF; +--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc + +--connection node_2 +--source include/start_mysqld.inc + +--connection node_1 +SET SESSION wsrep_on = ON; +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc + +--connection node_1 +CALL mtr.add_suppression("Failed to set packet size"); + +--connection node_2 +CALL mtr.add_suppression("Failed to set packet size"); + -- cgit v1.2.1 From 065645313528fcb8a996b6a5f08686193b0b696c Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Fri, 22 Jul 2016 04:16:09 -0700 Subject: Galera MTR Tests: increase timeouts and adjust some sporadically-failing tests so that the Galera suites can be run with --parallel=4 --- mysql-test/suite/galera/galera_2nodes.cnf | 4 ++-- mysql-test/suite/galera/r/galera_defaults.result | 2 +- mysql-test/suite/galera_3nodes/galera_3nodes.cnf | 6 +++--- mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result | 3 --- mysql-test/suite/galera_3nodes/r/galera_pc_weight.result | 2 ++ mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.test | 3 ++- mysql-test/suite/galera_3nodes/t/galera_pc_weight.test | 7 ++++++- 7 files changed, 16 insertions(+), 11 deletions(-) diff --git a/mysql-test/suite/galera/galera_2nodes.cnf b/mysql-test/suite/galera/galera_2nodes.cnf index d20080589f1..956e09e7b82 100644 --- a/mysql-test/suite/galera/galera_2nodes.cnf +++ b/mysql-test/suite/galera/galera_2nodes.cnf @@ -16,7 +16,7 @@ wsrep-sync-wait=7 #ist_port=@OPT.port #sst_port=@OPT.port wsrep-cluster-address=gcomm:// -wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;gcache.size=10M' +wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S' wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port' @@ -25,7 +25,7 @@ wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port' #ist_port=@OPT.port #sst_port=@OPT.port wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port' -wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;gcache.size=10M' +wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S' wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port' diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result index a4e6d31d406..3d75b86172a 100644 --- a/mysql-test/suite/galera/r/galera_defaults.result +++ b/mysql-test/suite/galera/r/galera_defaults.result @@ -47,7 +47,7 @@ WSREP_SST_DONOR WSREP_SST_DONOR_REJECTS_QUERIES OFF WSREP_SST_METHOD rsync WSREP_SYNC_WAIT 7 -; ; ; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT5S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; ; gcache.keep_pages_size = 0; gcache.mem_size = 0; ; gcache.page_size = 128M; gcache.size = 10M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; ; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; ; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT90S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992; +; ; ; cert.log_conflicts = no; debug = no; evs.auto_evict = 0; evs.causal_keepalive_period = PT1S; evs.debug_log_mask = 0x1; evs.delay_margin = PT1S; evs.delayed_keep_period = PT30S; evs.inactive_check_period = PT0.5S; evs.inactive_timeout = PT15S; evs.info_log_mask = 0; evs.install_timeout = PT7.5S; evs.join_retrans_period = PT1S; evs.keepalive_period = PT1S; evs.max_install_timeouts = 3; evs.send_window = 4; evs.stats_report_period = PT1M; evs.suspect_timeout = PT10S; evs.use_aggregate = true; evs.user_send_window = 2; evs.version = 0; evs.view_forget_timeout = P1D; ; gcache.keep_pages_size = 0; gcache.mem_size = 0; ; gcache.page_size = 128M; gcache.size = 10M; gcomm.thread_prio = ; gcs.fc_debug = 0; gcs.fc_factor = 1.0; gcs.fc_limit = 16; gcs.fc_master_slave = no; gcs.max_packet_size = 64500; gcs.max_throttle = 0.25; ; gcs.recv_q_soft_limit = 0.25; gcs.sync_donor = no; ; gmcast.mcast_addr = ; gmcast.mcast_ttl = 1; gmcast.peer_timeout = PT3S; gmcast.segment = 0; gmcast.time_wait = PT5S; gmcast.version = 0; ; pc.announce_timeout = PT3S; pc.checksum = false; pc.ignore_quorum = false; pc.ignore_sb = false; pc.linger = PT20S; pc.npvo = false; pc.recovery = true; pc.version = 0; pc.wait_prim = true; pc.wait_prim_timeout = P30S; pc.weight = 1; protonet.backend = asio; protonet.version = 0; repl.causal_read_timeout = PT90S; repl.commit_order = 3; repl.key_format = FLAT8; repl.max_ws_size = 2147483647; repl.proto_max = 7; socket.checksum = 2; socket.recv_buf_size = 212992; SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME LIKE 'wsrep_%' AND VARIABLE_NAME != 'wsrep_debug_sync_waiters'; diff --git a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf index 689fd4db26c..f19277bb36a 100644 --- a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf +++ b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf @@ -16,7 +16,7 @@ wsrep-sync-wait=7 #ist_port=@OPT.port #sst_port=@OPT.port wsrep-cluster-address=gcomm:// -wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=10M' +wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S' wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port' @@ -25,7 +25,7 @@ wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port' #ist_port=@OPT.port #sst_port=@OPT.port wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port' -wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=10M' +wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S' wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port' @@ -34,7 +34,7 @@ wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port' #ist_port=@OPT.port #sst_port=@OPT.port wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port' -wsrep_provider_options='base_port=@mysqld.3.#galera_port;gcache.size=10M' +wsrep_provider_options='base_port=@mysqld.3.#galera_port;gcache.size=10M;evs.suspect_timeout=PT10S' wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port wsrep_sst_receive_address='127.0.0.1:@mysqld.3.#sst_port' diff --git a/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result b/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result index f5a4cad4a23..69995acb982 100644 --- a/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result +++ b/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result @@ -3,9 +3,6 @@ SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1'; SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1'; SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1'; SET SESSION wsrep_sync_wait = 0; -SHOW STATUS LIKE 'wsrep_cluster_status'; -Variable_name Value -wsrep_cluster_status non-Primary SET GLOBAL wsrep_provider_options = 'pc.bootstrap=1'; SHOW STATUS LIKE 'wsrep_cluster_size'; Variable_name Value diff --git a/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result b/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result index 85f923ad55e..6fb931638ef 100644 --- a/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result +++ b/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result @@ -1,6 +1,8 @@ SET GLOBAL wsrep_provider_options = 'pc.weight=3'; Suspending node ... SET SESSION wsrep_sync_wait=0; +SET SESSION wsrep_on=OFF; +SET SESSION wsrep_on=ON; SHOW STATUS LIKE 'wsrep_cluster_size'; Variable_name Value wsrep_cluster_size 2 diff --git a/mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.test b/mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.test index 6172ffcc743..f8381a3324b 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.test +++ b/mysql-test/suite/galera_3nodes/t/galera_pc_bootstrap.test @@ -23,7 +23,8 @@ SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1'; # Node #2 should be non-primary SET SESSION wsrep_sync_wait = 0; -SHOW STATUS LIKE 'wsrep_cluster_status'; +--let $wait_condition = SELECT variable_value = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_cluster_status'; +--source include/wait_condition.inc # Signal node #2 to bootstrap --connection node_2 diff --git a/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test index 6585f1934a4..c118b7481bc 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test +++ b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test @@ -19,6 +19,11 @@ SET GLOBAL wsrep_provider_options = 'pc.weight=3'; SET SESSION wsrep_sync_wait=0; --source include/wait_until_connected_again.inc +SET SESSION wsrep_on=OFF; +--let $wait_condition = SELECT VARIABLE_VALUE = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status' +--source include/wait_condition.inc +SET SESSION wsrep_on=ON; + # We can not use SELECT queries here, as only SHOW is allowed to run. # For nodes #2 and #3, we expect a non-primary component of size 2 @@ -45,7 +50,7 @@ SHOW STATUS LIKE 'wsrep_local_state_comment'; --connection node_1 --source include/galera_resume.inc ---sleep 5 +--sleep 10 --source include/wait_until_connected_again.inc # For Node #1, we expect a primary component of size 1 -- cgit v1.2.1 From 30c6ac3cd152c1280fe9ccfb86b42e8048e3dc91 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Wed, 3 Aug 2016 02:52:39 -0700 Subject: Galera MTR Tests: Attempt to fortify galera_kill_ddl.test against sporadic failures --- mysql-test/suite/galera/r/galera_kill_ddl.result | 3 --- mysql-test/suite/galera/t/galera_kill_ddl.test | 7 ++++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_kill_ddl.result b/mysql-test/suite/galera/r/galera_kill_ddl.result index 8dd36497dfb..b83226bbd42 100644 --- a/mysql-test/suite/galera/r/galera_kill_ddl.result +++ b/mysql-test/suite/galera/r/galera_kill_ddl.result @@ -5,7 +5,4 @@ ALTER TABLE t1 ADD COLUMN f2 INTEGER; SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='t1'; COUNT(*) = 2 1 -SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; -VARIABLE_VALUE = 2 -1 DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_kill_ddl.test b/mysql-test/suite/galera/t/galera_kill_ddl.test index 3c2bce5b9c9..90f3f30cc76 100644 --- a/mysql-test/suite/galera/t/galera_kill_ddl.test +++ b/mysql-test/suite/galera/t/galera_kill_ddl.test @@ -28,8 +28,13 @@ ALTER TABLE t1 ADD COLUMN f2 INTEGER; --source include/galera_connect.inc --connection node_2a +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc + +--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; +--source include/wait_condition.inc + SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='t1'; -SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; --connection node_1 --disable_query_log -- cgit v1.2.1 From f01a16b54196ef5f816ca5a1bb590262ce0381e6 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Thu, 4 Aug 2016 00:33:12 -0700 Subject: Galera MTR Tests: fortify galera_bf_abort_flush_for_export against sporadic failures. --- mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result | 7 ++++--- mysql-test/suite/galera/t/galera_bf_abort_flush_for_export.test | 5 ++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result b/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result index 8c07d87eec3..96ed226c3ab 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result @@ -6,9 +6,10 @@ SET SESSION wsrep_sync_wait = 0; UNLOCK TABLES; COMMIT; SET AUTOCOMMIT=ON; -SELECT * FROM t1; -f1 -2 +SET SESSION wsrep_sync_wait = 7; +SELECT COUNT(*) = 1 FROM t1; +COUNT(*) = 1 +1 wsrep_local_aborts_increment 1 DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_bf_abort_flush_for_export.test b/mysql-test/suite/galera/t/galera_bf_abort_flush_for_export.test index dbbe3b3c483..e32089ce21e 100644 --- a/mysql-test/suite/galera/t/galera_bf_abort_flush_for_export.test +++ b/mysql-test/suite/galera/t/galera_bf_abort_flush_for_export.test @@ -27,9 +27,8 @@ UNLOCK TABLES; COMMIT; SET AUTOCOMMIT=ON; ---let $wait_condition = SELECT COUNT(*) = 1 FROM t1 ---source include/wait_condition.inc -SELECT * FROM t1; +SET SESSION wsrep_sync_wait = 7; +SELECT COUNT(*) = 1 FROM t1; --let $wsrep_local_bf_aborts_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` -- cgit v1.2.1 From 2e56c7f3cdfc882aad25b606a4d14f9cb6295451 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Tue, 9 Aug 2016 12:34:03 +0300 Subject: Bump WSREP_PATCH_VERSION to 16 --- cmake/wsrep.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake index 6c256471115..eeb602c9c48 100644 --- a/cmake/wsrep.cmake +++ b/cmake/wsrep.cmake @@ -18,7 +18,7 @@ # so WSREP_VERSION is produced regardless # Set the patch version -SET(WSREP_PATCH_VERSION "14") +SET(WSREP_PATCH_VERSION "16") # MariaDB addition: Revision number of the last revision merged from # codership branch visible in @@visible_comment. -- cgit v1.2.1 From fec296cc10f0d1319e032b72e92e3c824b7fc390 Mon Sep 17 00:00:00 2001 From: Damien Ciabrini Date: Fri, 12 Aug 2016 10:57:58 +0200 Subject: refs codership/mysql-wsrep#267 Fix Galera crash at startup when compiled with gcc 6 --- sql/wsrep_mysqld.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 3c2b2c07d55..91a9a1f210d 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -87,7 +87,7 @@ my_bool wsrep_creating_startup_threads = 0; */ my_bool wsrep_inited = 0; // initialized ? -static const wsrep_uuid_t cluster_uuid = WSREP_UUID_UNDEFINED; +static wsrep_uuid_t cluster_uuid = WSREP_UUID_UNDEFINED; static char cluster_uuid_str[40]= { 0, }; static const char* cluster_status_str[WSREP_VIEW_MAX] = { -- cgit v1.2.1 From 415823a41cb7f302e9620f2b0fb57bcc69140d3f Mon Sep 17 00:00:00 2001 From: sjaakola Date: Wed, 8 Jun 2016 15:19:01 +0300 Subject: Refs: MW-279 - fixes in innodb to skip wsrep processing (like kill victim) when running in native mysql mode - similar fixes in mysql server side - forcing tc_log_dummy in native mysql mode when no binlog used. wsrep hton messes up handler counter and used to lead in using tc_log_mmap instead. Bad news is that tc_log_mmap does not seem to work at all --- sql/mysqld.cc | 11 +++++++++-- sql/wsrep_hton.cc | 10 +++------- storage/innobase/lock/lock0lock.cc | 22 +++++++++++++++------- storage/xtradb/lock/lock0lock.cc | 22 +++++++++++++++------- 4 files changed, 42 insertions(+), 23 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 1bd2a039ce8..6fc03e6e9f8 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5288,8 +5288,15 @@ static int init_server_components() tc_log= get_tc_log_implementation(); #ifdef WITH_WSREP - if (WSREP_ON && tc_log == &tc_log_mmap) - tc_log= &tc_log_dummy; + if (tc_log == &tc_log_mmap) + { + /* + wsrep hton raises total_ha_2pc count to 2, even in native mysql mode. + Have to force using tc_log_dummy here, as tc_log_mmap segfaults. + */ + if (WSREP_ON || total_ha_2pc <= 2) + tc_log= &tc_log_dummy; + } WSREP_DEBUG("Initial TC log open: %s", (tc_log == &mysql_bin_log) ? "binlog" : diff --git a/sql/wsrep_hton.cc b/sql/wsrep_hton.cc index e1bf63cd31f..9f8c328c353 100644 --- a/sql/wsrep_hton.cc +++ b/sql/wsrep_hton.cc @@ -37,6 +37,8 @@ enum wsrep_trx_status wsrep_run_wsrep_commit(THD *thd, handlerton *hton, */ void wsrep_cleanup_transaction(THD *thd) { + if (!WSREP(thd)) return; + if (wsrep_emulate_bin_log) thd_binlog_trx_reset(thd); thd->wsrep_ws_handle.trx_id= WSREP_UNDEFINED_TRX_ID; thd->wsrep_trx_meta.gtid= WSREP_GTID_UNDEFINED; @@ -112,13 +114,7 @@ void wsrep_register_hton(THD* thd, bool all) */ void wsrep_post_commit(THD* thd, bool all) { - /* - TODO: It can perhaps be fixed in a more elegant fashion by turning off - wsrep_emulate_binlog if wsrep_on=0 on server start. - https://github.com/codership/mysql-wsrep/issues/112 - */ - if (!WSREP_ON) - return; + if (!WSREP(thd)) return; switch (thd->wsrep_exec_mode) { diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index ea26298da01..c44aa490a81 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1685,6 +1685,10 @@ wsrep_kill_victim( { ut_ad(lock_mutex_own()); ut_ad(trx_mutex_own(lock->trx)); + + /* quit for native mysql */ + if (!wsrep_on(trx->mysql_thd)) return; + my_bool bf_this = wsrep_thd_is_BF(trx->mysql_thd, FALSE); my_bool bf_other = wsrep_thd_is_BF(lock->trx->mysql_thd, TRUE); @@ -1771,9 +1775,11 @@ lock_rec_other_has_conflicting( #ifdef WITH_WSREP if (lock_rec_has_to_wait(TRUE, trx, mode, lock, is_supremum)) { - trx_mutex_enter(lock->trx); - wsrep_kill_victim(trx, lock); - trx_mutex_exit(lock->trx); + if (wsrep_on(trx->mysql_thd)) { + trx_mutex_enter(lock->trx); + wsrep_kill_victim(trx, lock); + trx_mutex_exit(lock->trx); + } #else if (lock_rec_has_to_wait(trx, mode, lock, is_supremum)) { #endif /* WITH_WSREP */ @@ -2067,7 +2073,9 @@ lock_rec_create( ut_ad(index->table->n_ref_count > 0 || !index->table->can_be_evicted); #ifdef WITH_WSREP - if (c_lock && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + if (c_lock && + wsrep_on(trx->mysql_thd) && + wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { lock_t *hash = (lock_t *)c_lock->hash; lock_t *prev = NULL; @@ -4630,10 +4638,10 @@ lock_table_create( trx_mutex_exit(c_lock->trx); } } else { - UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); - } -#else +#endif /* WITH_WSREP */ UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); +#ifdef WITH_WSREP + } #endif /* WITH_WSREP */ if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) { diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index c9b9eea3808..b5b9f27e3aa 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -1695,6 +1695,10 @@ wsrep_kill_victim( { ut_ad(lock_mutex_own()); ut_ad(trx_mutex_own(lock->trx)); + + /* quit for native mysql */ + if (!wsrep_on(trx->mysql_thd)) return; + my_bool bf_this = wsrep_thd_is_BF(trx->mysql_thd, FALSE); my_bool bf_other = wsrep_thd_is_BF(lock->trx->mysql_thd, TRUE); @@ -1781,9 +1785,11 @@ lock_rec_other_has_conflicting( #ifdef WITH_WSREP if (lock_rec_has_to_wait(TRUE, trx, mode, lock, is_supremum)) { - trx_mutex_enter(lock->trx); - wsrep_kill_victim((trx_t *)trx, (lock_t *)lock); - trx_mutex_exit(lock->trx); + if (wsrep_on(trx->mysql_thd)) { + trx_mutex_enter(lock->trx); + wsrep_kill_victim(trx, lock); + trx_mutex_exit(lock->trx); + } #else if (lock_rec_has_to_wait(trx, mode, lock, is_supremum)) { #endif /* WITH_WSREP */ @@ -2089,7 +2095,9 @@ lock_rec_create( ut_ad(index->table->n_ref_count > 0 || !index->table->can_be_evicted); #ifdef WITH_WSREP - if (c_lock && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + if (c_lock && + wsrep_on(trx->mysql_thd) && + wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { lock_t *hash = (lock_t *)c_lock->hash; lock_t *prev = NULL; @@ -4667,10 +4675,10 @@ lock_table_create( trx_mutex_exit(c_lock->trx); } } else { - UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); - } -#else +#endif /* WITH_WSREP */ UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); +#ifdef WITH_WSREP + } #endif /* WITH_WSREP */ if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) { -- cgit v1.2.1 From cced23cf23f013bee9f137001f1d51142bace964 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 29 Jun 2016 16:50:53 -0400 Subject: MDEV-9423: cannot add new node to the cluser: Binlog.. .. file '/var/log/mysql/mariadb-bin.000001' not found in binlog index, needed for recovery. Aborting. In Galera cluster, while preparing for rsync/xtrabackup based SST, the donor node takes an FTWRL followed by (REFRESH_ENGINE_LOG in rsync based state transfer and) REFRESH_BINARY_LOG. The latter rotates the binary log and logs Binlog_checkpoint_log_event corresponding to the penultimate binary log file into the new file. The checkpoint event for the current file is later logged synchronously by binlog_background_thread. Now, since in rsync/xtrabackup based snapshot state transfer methods, only the last binary log file is transferred to the joiner node; the file could get transferred even before the checkpoint event for the same file gets written to it. As a result, the joiner node would fail to start complaining about the missing binlog file needed for recovery. In order to fix this, a mechanism has been put in place to make REFRESH_BINARY_LOG operation wait for Binlog_checkpoint_log_event to be logged for the current binary log file if the node is part of a Galera cluster. As further safety, during rsync based state transfer the donor node now acquires and owns LOCK_log for the duration of file transfer during SST. --- sql/log.cc | 29 ++++++++++++++++++++++++++--- sql/log.h | 1 + sql/sql_reload.cc | 6 ++++++ sql/wsrep_sst.cc | 20 ++++++++++++++++++++ 4 files changed, 53 insertions(+), 3 deletions(-) diff --git a/sql/log.cc b/sql/log.cc index 1d11b6ff01b..2479208b395 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3687,7 +3687,10 @@ bool MYSQL_BIN_LOG::open(const char *log_name, new_xid_list_entry->binlog_id= current_binlog_id; /* Remove any initial entries with no pending XIDs. */ while ((b= binlog_xid_count_list.head()) && b->xid_count == 0) + { my_free(binlog_xid_count_list.get()); + } + mysql_cond_broadcast(&COND_xid_list); binlog_xid_count_list.push_back(new_xid_list_entry); mysql_mutex_unlock(&LOCK_xid_list); @@ -4208,6 +4211,7 @@ err: DBUG_ASSERT(b->xid_count == 0); my_free(binlog_xid_count_list.get()); } + mysql_cond_broadcast(&COND_xid_list); reset_master_pending--; mysql_mutex_unlock(&LOCK_xid_list); } @@ -4218,6 +4222,26 @@ err: } +void MYSQL_BIN_LOG::wait_for_last_checkpoint_event() +{ + mysql_mutex_lock(&LOCK_xid_list); + for (;;) + { + if (binlog_xid_count_list.is_last(binlog_xid_count_list.head())) + break; + mysql_cond_wait(&COND_xid_list, &LOCK_xid_list); + } + mysql_mutex_unlock(&LOCK_xid_list); + + /* + LOCK_xid_list and LOCK_log are chained, so the LOCK_log will only be + obtained after mark_xid_done() has written the last checkpoint event. + */ + mysql_mutex_lock(&LOCK_log); + mysql_mutex_unlock(&LOCK_log); +} + + /** Delete relay log files prior to rli->group_relay_log_name (i.e. all logs which are not involved in a non-finished group @@ -9260,7 +9284,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint) */ if (unlikely(reset_master_pending)) { - mysql_cond_signal(&COND_xid_list); + mysql_cond_broadcast(&COND_xid_list); mysql_mutex_unlock(&LOCK_xid_list); DBUG_VOID_RETURN; } @@ -9298,8 +9322,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint) mysql_mutex_lock(&LOCK_log); mysql_mutex_lock(&LOCK_xid_list); --mark_xid_done_waiting; - if (unlikely(reset_master_pending)) - mysql_cond_signal(&COND_xid_list); + mysql_cond_broadcast(&COND_xid_list); /* We need to reload current_binlog_id due to release/re-take of lock. */ current= current_binlog_id; diff --git a/sql/log.h b/sql/log.h index 7f44113f66d..9eb9f88031d 100644 --- a/sql/log.h +++ b/sql/log.h @@ -774,6 +774,7 @@ public: bool need_mutex); bool reset_logs(THD* thd, bool create_new_log, rpl_gtid *init_state, uint32 init_state_len); + void wait_for_last_checkpoint_event(); void close(uint exiting); void clear_inuse_flag_when_closing(File file); diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index f8c04af56bb..a83e91680da 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -155,6 +155,12 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, { if (mysql_bin_log.rotate_and_purge(true)) *write_to_binlog= -1; + + if (WSREP_ON) + { + /* Wait for last binlog checkpoint event to be logged. */ + mysql_bin_log.wait_for_last_checkpoint_event(); + } } } if (options & REFRESH_RELAY_LOG) diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index b697a557476..877a93eec44 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -1006,6 +1006,16 @@ wait_signal: if (!err) { sst_disallow_writes (thd.ptr, true); + /* + Lets also keep statements that modify binary logs (like RESET LOGS, + RESET MASTER) from proceeding until the files have been transferred + to the joiner node. + */ + if (mysql_bin_log.is_open()) + { + mysql_mutex_lock(mysql_bin_log.get_log_lock()); + } + locked= true; goto wait_signal; } @@ -1014,6 +1024,11 @@ wait_signal: { if (locked) { + if (mysql_bin_log.is_open()) + { + mysql_mutex_assert_owner(mysql_bin_log.get_log_lock()); + mysql_mutex_unlock(mysql_bin_log.get_log_lock()); + } sst_disallow_writes (thd.ptr, false); thd.ptr->global_read_lock.unlock_global_read_lock (thd.ptr); locked= false; @@ -1046,6 +1061,11 @@ wait_signal: if (locked) // don't forget to unlock server before return { + if (mysql_bin_log.is_open()) + { + mysql_mutex_assert_owner(mysql_bin_log.get_log_lock()); + mysql_mutex_unlock(mysql_bin_log.get_log_lock()); + } sst_disallow_writes (thd.ptr, false); thd.ptr->global_read_lock.unlock_global_read_lock (thd.ptr); } -- cgit v1.2.1 From 3f481e52e41deecb05874989a51d6b009fda1a23 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Sun, 21 Aug 2016 20:09:05 -0400 Subject: Fixes for failing tests (post-merge). --- mysql-test/suite/galera/disabled.def | 1 + mysql-test/suite/galera/r/MW-284.result | 1 + mysql-test/suite/galera/r/galera_as_master.result | 1 + mysql-test/suite/galera/r/galera_as_slave_autoinc.result | 4 +--- mysql-test/suite/galera/t/MW-284.test | 1 + mysql-test/suite/galera/t/galera_as_master.test | 3 +++ mysql-test/suite/galera/t/galera_as_slave_autoinc.test | 5 ++--- mysql-test/suite/galera/t/galera_ist_restart_joiner.test | 15 +++++++++++++++ .../suite/galera_3nodes/t/galera_ist_gcache_rollover.test | 2 ++ mysql-test/suite/sys_vars/r/wsrep_desync_basic.result | 8 ++++++-- .../suite/sys_vars/r/wsrep_max_ws_size_basic.result | 4 +++- mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test | 2 ++ sql/sql_class.h | 1 - storage/xtradb/row/row0upd.cc | 4 ++-- wsrep/wsrep_dummy.c | 11 +++++++++-- 15 files changed, 49 insertions(+), 14 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 25f20e01521..4aa15d27661 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -28,3 +28,4 @@ galera_flush : mysql-wsrep/issues/229 galera_transaction_read_only : mysql-wsrep/issues/229 galera_gcs_fragment : Incorrect arguments to SET galera_flush_local : Fails sporadically +galera_binlog_stmt_autoinc : TODO: investigate \ No newline at end of file diff --git a/mysql-test/suite/galera/r/MW-284.result b/mysql-test/suite/galera/r/MW-284.result index 8b5119663ce..3ff131674ea 100644 --- a/mysql-test/suite/galera/r/MW-284.result +++ b/mysql-test/suite/galera/r/MW-284.result @@ -11,3 +11,4 @@ DROP TABLE t1; STOP SLAVE; RESET SLAVE ALL; CALL mtr.add_suppression('failed registering on master'); +CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work'); diff --git a/mysql-test/suite/galera/r/galera_as_master.result b/mysql-test/suite/galera/r/galera_as_master.result index aba93573ecf..bd7d63ad1ab 100644 --- a/mysql-test/suite/galera/r/galera_as_master.result +++ b/mysql-test/suite/galera/r/galera_as_master.result @@ -5,3 +5,4 @@ INSERT INTO t1 VALUES(2); DROP TABLE t1; STOP SLAVE; RESET SLAVE ALL; +CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work'); diff --git a/mysql-test/suite/galera/r/galera_as_slave_autoinc.result b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result index a8f5ec8e37e..b6314b862c2 100644 --- a/mysql-test/suite/galera/r/galera_as_slave_autoinc.result +++ b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result @@ -1,6 +1,4 @@ -START SLAVE USER='root'; -Warnings: -Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure. +START SLAVE; SET SESSION binlog_format='STATEMENT'; CREATE TABLE t1 ( i int(11) NOT NULL AUTO_INCREMENT, diff --git a/mysql-test/suite/galera/t/MW-284.test b/mysql-test/suite/galera/t/MW-284.test index 5998e22ed1e..f3ce1b0dc91 100644 --- a/mysql-test/suite/galera/t/MW-284.test +++ b/mysql-test/suite/galera/t/MW-284.test @@ -55,3 +55,4 @@ STOP SLAVE; RESET SLAVE ALL; CALL mtr.add_suppression('failed registering on master'); +CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work'); diff --git a/mysql-test/suite/galera/t/galera_as_master.test b/mysql-test/suite/galera/t/galera_as_master.test index c42dbbf9683..b8f989d497b 100644 --- a/mysql-test/suite/galera/t/galera_as_master.test +++ b/mysql-test/suite/galera/t/galera_as_master.test @@ -36,3 +36,6 @@ DROP TABLE t1; STOP SLAVE; RESET SLAVE ALL; + +CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work'); + diff --git a/mysql-test/suite/galera/t/galera_as_slave_autoinc.test b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test index bf04b274ca7..9292badc480 100644 --- a/mysql-test/suite/galera/t/galera_as_slave_autoinc.test +++ b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test @@ -5,7 +5,6 @@ # --source include/have_innodb.inc ---source include/have_log_bin.inc # As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 @@ -13,9 +12,9 @@ --connection node_2 --disable_query_log ---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_1; +--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1; --enable_query_log -START SLAVE USER='root'; +START SLAVE; --connection node_1 diff --git a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test index eae28bdbcd7..11664affe7c 100644 --- a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test +++ b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test @@ -9,6 +9,12 @@ --source include/have_debug_sync.inc --source suite/galera/include/galera_have_debug_sync.inc +# Save original auto_increment_offset values. +--connection node_1 +let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; +--connection node_2 +let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; + CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a'), (4, 'a'), (5, 'a'),(6, 'a'); @@ -106,3 +112,12 @@ SELECT COUNT(*) = 0 FROM t3; --connection node_1 DROP TABLE t1, t2, t3; + +# Restore original auto_increment_offset values. +--disable_query_log +--connection node_1 +--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; +--connection node_2 +--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; +--enable_query_log + diff --git a/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test b/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test index 7d8bbb39e35..8575d99f066 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test +++ b/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test @@ -82,6 +82,8 @@ INSERT INTO t1 VALUES (51), (52), (53), (54), (55); --connection node_3 --source include/wait_until_connected_again.inc +sleep 5; + # Final checks --connection node_2 SELECT COUNT(*) = 30 FROM t1; diff --git a/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result b/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result index 6225b444cfd..5cd2a2e5720 100644 --- a/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result +++ b/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result @@ -22,10 +22,12 @@ SET @@global.wsrep_desync=ON; ERROR HY000: Operation 'desync' failed for SET @@global.wsrep_desync=ON SELECT @@global.wsrep_desync; @@global.wsrep_desync -1 +0 # valid values SET @@global.wsrep_desync='OFF'; +Warnings: +Warning 1231 'wsrep_desync' is already OFF. SELECT @@global.wsrep_desync; @@global.wsrep_desync 0 @@ -33,8 +35,10 @@ SET @@global.wsrep_desync=ON; ERROR HY000: Operation 'desync' failed for SET @@global.wsrep_desync=ON SELECT @@global.wsrep_desync; @@global.wsrep_desync -1 +0 SET @@global.wsrep_desync=default; +Warnings: +Warning 1231 'wsrep_desync' is already OFF. SELECT @@global.wsrep_desync; @@global.wsrep_desync 0 diff --git a/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result b/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result index 26d8d823a5c..d7e72869be3 100644 --- a/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result +++ b/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result @@ -3,10 +3,11 @@ # # save the initial value SET @wsrep_max_ws_size_global_saved = @@global.wsrep_max_ws_size; +SET @wsrep_provider_options_saved = @@global.wsrep_provider_options; # default SELECT @@global.wsrep_max_ws_size; @@global.wsrep_max_ws_size -1073741824 +2147483647 # scope SELECT @@session.wsrep_max_ws_size; @@ -55,4 +56,5 @@ NULL # restore the initial value SET @@global.wsrep_max_ws_size = @wsrep_max_ws_size_global_saved; +SET @@global.wsrep_provider_options = @wsrep_provider_options_saved; # End of test diff --git a/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test b/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test index e7af4558f24..2e302015136 100644 --- a/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test +++ b/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test @@ -6,6 +6,7 @@ --echo # save the initial value SET @wsrep_max_ws_size_global_saved = @@global.wsrep_max_ws_size; +SET @wsrep_provider_options_saved = @@global.wsrep_provider_options; --echo # default SELECT @@global.wsrep_max_ws_size; @@ -41,5 +42,6 @@ SELECT @global.wsrep_max_ws_size; --echo --echo # restore the initial value SET @@global.wsrep_max_ws_size = @wsrep_max_ws_size_global_saved; +SET @@global.wsrep_provider_options = @wsrep_provider_options_saved; --echo # End of test diff --git a/sql/sql_class.h b/sql/sql_class.h index ee637b3726d..bf3d043cc1a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -660,7 +660,6 @@ typedef struct system_variables uint wsrep_sync_wait; ulong wsrep_retry_autocommit; ulong wsrep_OSU_method; - ulong wsrep_auto_increment_control; #endif double long_query_time_double; diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc index 16795aed0db..e28cae052f3 100644 --- a/storage/xtradb/row/row0upd.cc +++ b/storage/xtradb/row/row0upd.cc @@ -412,7 +412,7 @@ wsrep_row_upd_check_foreign_constraints( dict_table_open_on_name( foreign->referenced_table_name_lookup, FALSE, FALSE, DICT_ERR_IGNORE_NONE); - opened = TRUE; + opened = (foreign->referenced_table) ? TRUE : FALSE; } if (foreign->referenced_table) { @@ -435,7 +435,7 @@ wsrep_row_upd_check_foreign_constraints( ->n_foreign_key_checks_running); if (opened == TRUE) { - dict_table_close(foreign->referenced_table, TRUE, FALSE); + dict_table_close(foreign->referenced_table, FALSE, FALSE); opened = FALSE; } } diff --git a/wsrep/wsrep_dummy.c b/wsrep/wsrep_dummy.c index 5f1ea63cc40..1780e91f89d 100644 --- a/wsrep/wsrep_dummy.c +++ b/wsrep/wsrep_dummy.c @@ -85,8 +85,15 @@ static wsrep_status_t dummy_options_set( static char* dummy_options_get (wsrep_t* w) { - WSREP_DBUG_ENTER(w); - return strdup(WSREP_DUMMY(w)->options); + char *options; + + WSREP_DBUG_ENTER(w); + options= WSREP_DUMMY(w)->options; + + if (options) + options= strdup(WSREP_DUMMY(w)->options); + + return options; } static wsrep_status_t dummy_connect( -- cgit v1.2.1 From f381ad5230e0537c63ad721d39aab1681e0a213a Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Sun, 21 Aug 2016 20:13:51 -0400 Subject: Update WSREP_PATCH_REVNO. --- cmake/wsrep.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake index eeb602c9c48..f54efc274fb 100644 --- a/cmake/wsrep.cmake +++ b/cmake/wsrep.cmake @@ -23,7 +23,7 @@ SET(WSREP_PATCH_VERSION "16") # MariaDB addition: Revision number of the last revision merged from # codership branch visible in @@visible_comment. # Branch : https://github.com/codership/mysql-wsrep/tree/5.6 -SET(WSREP_PATCH_REVNO "af7f02e") # Should be updated on every merge. +SET(WSREP_PATCH_REVNO "c3fc46e") # Should be updated on every merge. # MariaDB: Obtain patch revision number: # Update WSREP_PATCH_REVNO if WSREP_REV environment variable is set. -- cgit v1.2.1 From b51109693e6abb0e58256192a648cdd158d47615 Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 22 Aug 2016 10:16:00 +0300 Subject: MDEV-10630 rpl.rpl_mdev6020 fails in buildbot with timeout The issue was that when running with valgrind the wait for master_pos_Wait() was not long enough. This patch also fixes two other failures that could affect rpl_mdev6020: - check_if_conflicting_replication_locks() didn't properly check domains - 'did_mark_start_commit' was after signals to other threads was sent which could get the variable read too early. --- client/mysqltest.cc | 7 ++++++- mysql-test/mysql-test-run.pl | 2 +- mysql-test/suite/rpl/t/rpl_mdev6020.test | 4 +++- sql/mdl.cc | 20 ++++++++++++++++---- sql/rpl_rli.cc | 6 +++--- 5 files changed, 29 insertions(+), 10 deletions(-) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 84d5abc1a67..66bcb6462e7 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -181,6 +181,7 @@ static uint my_end_arg= 0; static uint opt_tail_lines= 0; static uint opt_connect_timeout= 0; +static uint opt_wait_for_pos_timeout= 0; static char delimiter[MAX_DELIMITER_LENGTH]= ";"; static uint delimiter_length= 1; @@ -4659,7 +4660,7 @@ void do_sync_with_master2(struct st_command *command, long offset, MYSQL_ROW row; MYSQL *mysql= cur_con->mysql; char query_buf[FN_REFLEN+128]; - int timeout= 300; /* seconds */ + int timeout= opt_wait_for_pos_timeout; if (!master_pos.file[0]) die("Calling 'sync_with_master' without calling 'save_master_pos'"); @@ -7098,6 +7099,10 @@ static struct my_option my_long_options[] = "Number of seconds before connection timeout.", &opt_connect_timeout, &opt_connect_timeout, 0, GET_UINT, REQUIRED_ARG, 120, 0, 3600 * 12, 0, 0, 0}, + {"wait_for_pos_timeout", 0, + "Number of seconds to wait for master_pos_wait", + &opt_wait_for_pos_timeout, &opt_wait_for_pos_timeout, 0, GET_UINT, + REQUIRED_ARG, 300, 0, 3600 * 12, 0, 0, 0}, {"plugin_dir", 0, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 07bf941de05..f3b733a1eac 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -5899,7 +5899,7 @@ sub start_mysqltest ($) { { # We are running server under valgrind, which causes some replication # test to be much slower, notable rpl_mdev6020. Increase timeout. - mtr_add_arg($args, "--wait-for-pos-timeout=1500"); + mtr_add_arg($args, "--wait-for-pos-timeout=0"); } if ( $opt_ssl ) diff --git a/mysql-test/suite/rpl/t/rpl_mdev6020.test b/mysql-test/suite/rpl/t/rpl_mdev6020.test index 2fd342f5eda..8484e3e11c0 100644 --- a/mysql-test/suite/rpl/t/rpl_mdev6020.test +++ b/mysql-test/suite/rpl/t/rpl_mdev6020.test @@ -1,8 +1,10 @@ +# Running this with valgrind can take > 5000 seconds with xtradb +--source include/not_valgrind.inc + --source include/have_innodb.inc --source include/have_partition.inc --source include/have_binlog_format_mixed_or_row.inc --source include/master-slave.inc - --connection slave --source include/stop_slave.inc diff --git a/sql/mdl.cc b/sql/mdl.cc index 37699f1847b..57d5d8e7283 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -443,7 +443,9 @@ public: virtual void notify_conflicting_locks(MDL_context *ctx) = 0; virtual bitmap_t hog_lock_types_bitmap() const = 0; +#ifndef DBUG_OFF bool check_if_conflicting_replication_locks(MDL_context *ctx); +#endif /** List of granted tickets for this lock. */ Ticket_list m_granted; @@ -2303,16 +2305,23 @@ void MDL_scoped_lock::notify_conflicting_locks(MDL_context *ctx) and trying to get an exclusive lock for the table. */ +#ifndef DBUG_OFF bool MDL_lock::check_if_conflicting_replication_locks(MDL_context *ctx) { Ticket_iterator it(m_granted); MDL_ticket *conflicting_ticket; + rpl_group_info *rgi_slave= ctx->get_thd()->rgi_slave; + + if (!rgi_slave->gtid_sub_id) + return 0; while ((conflicting_ticket= it++)) { if (conflicting_ticket->get_ctx() != ctx) { MDL_context *conflicting_ctx= conflicting_ticket->get_ctx(); + rpl_group_info *conflicting_rgi_slave; + conflicting_rgi_slave= conflicting_ctx->get_thd()->rgi_slave; /* If the conflicting thread is another parallel replication @@ -2320,15 +2329,18 @@ bool MDL_lock::check_if_conflicting_replication_locks(MDL_context *ctx) the current transaction has started too early and something is seriously wrong. */ - if (conflicting_ctx->get_thd()->rgi_slave && - conflicting_ctx->get_thd()->rgi_slave->rli == - ctx->get_thd()->rgi_slave->rli && - !conflicting_ctx->get_thd()->rgi_slave->did_mark_start_commit) + if (conflicting_rgi_slave && + conflicting_rgi_slave->gtid_sub_id && + conflicting_rgi_slave->rli == rgi_slave->rli && + conflicting_rgi_slave->current_gtid.domain_id == + rgi_slave->current_gtid.domain_id && + !conflicting_rgi_slave->did_mark_start_commit) return 1; // Fatal error } } return 0; } +#endif /** diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 1fc92d4ecec..4aed8cdcd94 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -1921,8 +1921,8 @@ rpl_group_info::mark_start_commit_no_lock() { if (did_mark_start_commit) return; - mark_start_commit_inner(parallel_entry, gco, this); did_mark_start_commit= true; + mark_start_commit_inner(parallel_entry, gco, this); } @@ -1933,12 +1933,12 @@ rpl_group_info::mark_start_commit() if (did_mark_start_commit) return; + did_mark_start_commit= true; e= this->parallel_entry; mysql_mutex_lock(&e->LOCK_parallel_entry); mark_start_commit_inner(e, gco, this); mysql_mutex_unlock(&e->LOCK_parallel_entry); - did_mark_start_commit= true; } @@ -1981,12 +1981,12 @@ rpl_group_info::unmark_start_commit() if (!did_mark_start_commit) return; + did_mark_start_commit= false; e= this->parallel_entry; mysql_mutex_lock(&e->LOCK_parallel_entry); --e->count_committing_event_groups; mysql_mutex_unlock(&e->LOCK_parallel_entry); - did_mark_start_commit= false; } -- cgit v1.2.1 From a5051cd3b2a46eb01ef9afdb1d798bdf6db715ab Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 22 Aug 2016 10:19:07 +0300 Subject: Minor cleanups - Remove impossible test in test_quick_select - Ensure that is_fatal_error is set if we run out of stack space --- sql/opt_range.cc | 3 --- sql/sql_parse.cc | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 18859c3ad37..e0ca43e6d72 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3015,8 +3015,6 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, scan_time= read_time= DBL_MAX; if (limit < records) read_time= (double) records + scan_time + 1; // Force to use index - else if (read_time <= 2.0 && !force_quick_range) - DBUG_RETURN(0); /* No need for quick select */ possible_keys.clear_all(); @@ -3285,7 +3283,6 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, thd->no_errors=0; } - DBUG_EXECUTE("info", print_quick(quick, &needed_reg);); /* diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 118602c5127..cbf723c1b49 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6181,6 +6181,7 @@ bool check_stack_overrun(THD *thd, long margin, if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >= (long) (my_thread_stack_size - margin)) { + thd->is_fatal_error= 1; /* Do not use stack for the message buffer to ensure correct behaviour in cases we have close to no stack left. -- cgit v1.2.1 From eb2c1474752a5f743db638d5b06612c9e3f07f74 Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Sun, 1 May 2016 22:29:47 +0300 Subject: The consolidated patch for mdev-9197. --- mysql-test/r/derived_cond_pushdown.result | 6436 +++++++++++++++++++++++++++++ mysql-test/t/derived_cond_pushdown.test | 812 ++++ sql/item.cc | 204 + sql/item.h | 121 +- sql/item_cmpfunc.cc | 40 + sql/item_cmpfunc.h | 104 +- sql/item_func.cc | 3 + sql/item_func.h | 158 + sql/item_geofunc.h | 69 + sql/item_inetfunc.h | 16 + sql/item_row.cc | 18 + sql/item_row.h | 3 + sql/item_strfunc.h | 137 +- sql/item_subselect.h | 3 + sql/item_sum.h | 38 + sql/item_timefunc.h | 90 + sql/item_windowfunc.h | 17 +- sql/item_xmlfunc.cc | 34 + sql/item_xmlfunc.h | 4 + sql/procedure.h | 1 + sql/sql_derived.cc | 134 + sql/sql_derived.h | 8 + sql/sql_lex.cc | 188 + sql/sql_lex.h | 23 + sql/sql_priv.h | 5 +- sql/sql_select.cc | 89 +- sql/sql_select.h | 2 +- sql/sys_vars.cc | 1 + sql/table.cc | 176 + sql/table.h | 2 + 30 files changed, 8923 insertions(+), 13 deletions(-) create mode 100644 mysql-test/r/derived_cond_pushdown.result create mode 100644 mysql-test/t/derived_cond_pushdown.test diff --git a/mysql-test/r/derived_cond_pushdown.result b/mysql-test/r/derived_cond_pushdown.result new file mode 100644 index 00000000000..3acf1965323 --- /dev/null +++ b/mysql-test/r/derived_cond_pushdown.result @@ -0,0 +1,6436 @@ +create table t1 (a int, b int, c int); +create table t2 (a int, b int, c int, d decimal); +insert into t1 values +(1,21,345), (1,33,7), (8,33,114), (1,21,500), (1,19,107), (5,14,787), +(8,33,123), (9,10,211), (5,16,207), (1,33,988), (5,27,132), (1,21,104), +(6,20,309), (6,20,315), (1,21,101), (8,33,404), (9,10,800), (1,21,123), +(7,11,708), (6,20,214); +insert into t2 values +(2,3,207,207.0000), (1,21,909,12.0000), (7,13,312,406.0000), +(8,64,248,107.0000), (6,20,315,279.3333), (1,19,203,107.0000), +(8,80,800,314.0000), (3,12,231,190.0000), (6,23,303,909.0000); +Warnings: +Note 1265 Data truncated for column 'd' at row 5 +create table t1_double(a int, b double, c double); +insert into t1_double values +(1,23.4,14.3333), (1,12.5,18.9), (3,12.5,18.9), +(4,33.4,14.3333), (4,14.3333,13.65), (5,17.89,7.22), +(6,33.4,14.3), (10,33.4,13.65), (11,33.4,13.65); +create table t2_double(a int, b double, c double); +insert into t2_double values +(1,22.4,14.3333), (1,12.5,18.9), (2,22.4,18.9), +(4,33.4,14.3333), (5,22.4,13.65), (7,17.89,18.9), +(6,33.4,14.3333), (10,31.4,13.65), (12,33.4,13.65); +create table t1_char(a char, b char(8), c int); +insert into t1_char values +('a','Ivan',1), ('b','Vika',2), ('b','Inga',6), ('c','Vika',7), +('b','Ivan',7), ('a','Alex',6), ('b','Inga',5), ('d','Ron',9), +('d','Harry',2), ('d','Hermione',3), ('c','Ivan',3), ('c','Harry',4); +create table t2_char(a char, b char(8), c int); +insert into t2_char values +('b','Ivan',1), ('c','Vinny',3), ('c','Inga',9), ('a','Vika',1), +('c','Ivan',2), ('b','Ali',6), ('c','Inga',2), ('a','Ron',9), +('d','Harry',1), ('b','Hermes',3), ('b','Ivan',11), ('b','Harry',4); +create table t1_decimal (a decimal(3,1), b decimal(3,1), c int); +insert into t1_decimal values +(1,1,23),(2,2,11),(3,3,16), +(1,1,12),(1,1,14),(2,3,15), +(2,1,13),(2,3,11),(3,3,16); +create table t2_decimal (a decimal(3,1), b decimal(3,1), c int); +insert into t2_decimal values +(2,1,13),(2,2,11),(3,3,16), +(1,3,22),(1,3,14),(2,2,15), +(2,1,43),(2,3,11),(2,3,16); +create view v1 as select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707; +create view v2 as select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707; +create view v3 as select a, b, min(c) as min_c from t1 +where t1.a<10 group by a,b having min_c > 109; +create view v4 as +select a, b, min(max_c) as min_c from v1 +where (v1.a<15) group by a,b; +create view v_union as +select a, b, min(c) as c from t1 +where t1.a<10 group by a,b having c > 109 +union +select a, b, max(c) as c from t1 +where t1.b>10 group by a,b having c < 300; +create view v2_union as +select a, b, min(c) as c from t1 +where t1.a<10 group by a,b having c > 109 +union +select a, b, max(c) as c from t1 +where t1.b>10 group by a,b having c < 300 +union +select a, b, avg(c) as c from t1 +where t1.c>300 group by a,b having c < 707; +create view v_double as +select a, avg(a/4) as avg_a, b, c from t1_double +where (b>12.2) group by b,c having (avg_a<22.333); +create view v_char as +select a, b, max(c) as max_c from t1_char +group by a,b having max_c < 9; +create view v_decimal as +select a, b, avg(c) as avg_c from t1_decimal +group by a,b having (avg_c>12); +# conjunctive subformula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.max_c>214) and (t2.a>v1.a); +a b max_c avg_c a b c d +1 21 500 234.6000 2 3 207 207 +1 21 500 234.6000 7 13 312 406 +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 6 20 315 279 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 3 12 231 190 +1 21 500 234.6000 6 23 303 909 +6 20 315 279.3333 7 13 312 406 +6 20 315 279.3333 8 64 248 107 +6 20 315 279.3333 8 80 800 314 +select * from v1,t2 where (v1.max_c>214) and (t2.a>v1.a); +a b max_c avg_c a b c d +1 21 500 234.6000 2 3 207 207 +1 21 500 234.6000 7 13 312 406 +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 6 20 315 279 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 3 12 231 190 +1 21 500 234.6000 6 23 303 909 +6 20 315 279.3333 7 13 312 406 +6 20 315 279.3333 8 64 248 107 +6 20 315 279.3333 8 80 800 314 +explain select * from v1,t2 where (v1.max_c>214) and (t2.a>v1.a); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where (v1.max_c>214) and (t2.a>v1.a); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v1.max_c > 214)" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(t2.a > v1.a)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 214))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +8 33 404 213.6667 8 64 248 107 +6 20 315 279.3333 6 20 315 279 +1 21 500 234.6000 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +6 20 315 279.3333 6 23 303 909 +select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +8 33 404 213.6667 8 64 248 107 +6 20 315 279.3333 6 20 315 279 +1 21 500 234.6000 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +6 20 315 279.3333 6 23 303 909 +explain select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.a 2 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v1.max_c > 300)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 300))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +# extracted or formula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.max_c>400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where +((v1.max_c>400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.max_c > 400) or (v1.max_c < 135))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.max_c > 400) and (t2.a > v1.a)) or ((v1.max_c < 135) and (t2.a < v1.a)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and ((max_c > 400) or (max_c < 135)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.max_c>300) and (v1.avg_c>t2.d) and (v1.b=t2.b)) or +((v1.max_c<135) and (v1.max_c300) and (v1.avg_c>t2.d) and (v1.b=t2.b)) or +((v1.max_c<135) and (v1.max_c300) and (v1.avg_c>t2.d) and (v1.b=t2.b)) or +((v1.max_c<135) and (v1.max_c ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where +((v1.max_c>300) and (v1.avg_c>t2.d) and (v1.b=t2.b)) or +((v1.max_c<135) and (v1.max_c", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.max_c > 300) or (v1.max_c < 135))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.b = t2.b) and (v1.max_c > 300) and (v1.avg_c > t2.d)) or ((v1.a = t2.a) and (v1.max_c < 135) and (v1.max_c < t2.c)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and ((max_c > 300) or (max_c < 135)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +# conjunctive subformula : pushing into WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a>6) and (t2.b>v1.b); +a b max_c avg_c a b c d +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 80 800 314 +select * from v1,t2 where (v1.a>6) and (t2.b>v1.b); +a b max_c avg_c a b c d +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 80 800 314 +explain select * from v1,t2 where (v1.a>6) and (t2.b>v1.b); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where (v1.a>6) and (t2.b>v1.b); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v1.a > 6)" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(t2.b > v1.b)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 6)" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v2,t2 where (v2.b>25) and (t2.a25) and (t2.a25) and (t2.a ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v2,t2 where (v2.b>25) and (t2.a", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v2.b > 25)" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(t2.a < v2.a)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b > 25))" + } + } + } + } + } + } + } +} +# extracted or formula : pushing into WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.a>7) and (t2.c7) and (t2.c7) and (t2.c ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where +((v1.a>7) and (t2.c", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a > 7) or (v1.a < 2))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a > 7) and (t2.c < v1.max_c)) or ((v1.a < 2) and (t2.b < v1.b)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 7) or (t1.a < 2))" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v2,t2 where +((v2.a>7) and (t2.c5) and (t2.b7) and (t2.c5) and (t2.b7) and (t2.c5) and (t2.b ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v2,t2 where +((v2.a>7) and (t2.c5) and (t2.b", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v2.a > 7) or (v2.a > 5))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v2.a > 7) and (t2.c < v2.max_c)) or ((v2.a > 5) and (t2.b < v2.b)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and ((t1.a > 7) or (t1.a > 5)))" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.a>4) and (v1.b>t2.b) and (v1.max_c=t2.d)) or +((v1.a<2) and (v1.max_c4) and (v1.b>t2.b) and (v1.max_c=t2.d)) or +((v1.a<2) and (v1.max_c4) and (v1.b>t2.b) and (v1.max_c=t2.d)) or +((v1.a<2) and (v1.max_c ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where +((v1.a>4) and (v1.b>t2.b) and (v1.max_c=t2.d)) or +((v1.a<2) and (v1.max_c", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a > 4) or (v1.a < 2))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a > 4) and (v1.b > t2.b) and (v1.max_c = t2.d)) or ((v1.a < 2) and (v1.max_c < t2.c) and (v1.max_c = t2.d)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 4) or (t1.a < 2))" + } + } + } + } + } + } + } +} +# conjunctive subformulas : pushing into HAVING and WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a<2) and (v1.max_c>400) and (t2.b>v1.b); +a b max_c avg_c a b c d +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 6 23 303 909 +select * from v1,t2 where (v1.a<2) and (v1.max_c>400) and (t2.b>v1.b); +a b max_c avg_c a b c d +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 6 23 303 909 +explain select * from v1,t2 where (v1.a<2) and (v1.max_c>400) and (t2.b>v1.b); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where (v1.a<2) and (v1.max_c>400) and (t2.b>v1.b); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a < 2) and (v1.max_c > 400))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(t2.b > v1.b)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 400))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a < 2)" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_double as v,t2_double as t where +(v.a=t.a) and (v.avg_a>0.45) and (v.b>10); +a avg_a b c a b c +1 0.50000000 12.5 18.9 1 22.4 14.3333 +1 0.50000000 12.5 18.9 1 12.5 18.9 +4 1.00000000 33.4 14.3333 4 33.4 14.3333 +4 1.00000000 14.3333 13.65 4 33.4 14.3333 +5 1.25000000 17.89 7.22 5 22.4 13.65 +6 1.50000000 33.4 14.3 6 33.4 14.3333 +10 2.62500000 33.4 13.65 10 31.4 13.65 +select * from v_double as v,t2_double as t where +(v.a=t.a) and (v.avg_a>0.45) and (v.b>10); +a avg_a b c a b c +1 0.50000000 12.5 18.9 1 22.4 14.3333 +1 0.50000000 12.5 18.9 1 12.5 18.9 +4 1.00000000 33.4 14.3333 4 33.4 14.3333 +4 1.00000000 14.3333 13.65 4 33.4 14.3333 +5 1.25000000 17.89 7.22 5 22.4 13.65 +6 1.50000000 33.4 14.3 6 33.4 14.3333 +10 2.62500000 33.4 13.65 10 31.4 13.65 +explain select * from v_double as v,t2_double as t where +(v.a=t.a) and (v.avg_a>0.45) and (v.b>10); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t.a 2 Using where +2 DERIVED t1_double ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort +explain format=json select * from v_double as v,t2_double as t where +(v.a=t.a) and (v.avg_a>0.45) and (v.b>10); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t.a is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v.avg_a > 0.45) and (v.b > 10))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((avg_a < 22.333) and (avg_a > 0.45))", + "filesort": { + "sort_key": "t1_double.b, t1_double.c", + "temporary_table": { + "table": { + "table_name": "t1_double", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t1_double.b > 12.2) and (t1_double.b > 10))" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_decimal as v,t2_decimal as t where +(v.a=t.a) and (v.avg_c>15) and (v.b>1); +a b avg_c a b c +3.0 3.0 16.0000 3.0 3.0 16 +select * from v_decimal as v,t2_decimal as t where +(v.a=t.a) and (v.avg_c>15) and (v.b>1); +a b avg_c a b c +3.0 3.0 16.0000 3.0 3.0 16 +explain select * from v_decimal as v,t2_decimal as t where +(v.a=t.a) and (v.avg_c>15) and (v.b>1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 3 test.t.a 2 Using where +2 DERIVED t1_decimal ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort +explain format=json select * from v_decimal as v,t2_decimal as t where +(v.a=t.a) and (v.avg_c>15) and (v.b>1); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t.a is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "3", + "used_key_parts": ["a"], + "ref": ["test.t.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v.avg_c > 15) and (v.b > 1))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((avg_c > 12) and (avg_c > 15))", + "filesort": { + "sort_key": "t1_decimal.a, t1_decimal.b", + "temporary_table": { + "table": { + "table_name": "t1_decimal", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t1_decimal.b > 1)" + } + } + } + } + } + } + } +} +# extracted or formula : pushing into HAVING and WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.a>7) and (v1.max_c>300) and (t2.c7) and (v1.max_c>300) and (t2.c7) and (v1.max_c>300) and (t2.c ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where +((v1.a>7) and (v1.max_c>300) and (t2.c", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v1.a > 7) and (v1.max_c > 300)) or ((v1.a < 4) and (v1.max_c < 500)))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a > 7) and (v1.max_c > 300) and (t2.c < v1.max_c)) or ((v1.a < 4) and (v1.max_c < 500) and (t2.b < v1.b)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (((t1.a > 7) and (max_c > 300)) or ((t1.a < 4) and (max_c < 500))))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 7) or (t1.a < 4))" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where ((v1.a<2) and (v1.max_c>120)) or (v1.a>7); +a b max_c avg_c a b c d +1 21 500 234.6000 2 3 207 207 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 7 13 312 406 +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 6 20 315 279 +1 21 500 234.6000 1 19 203 107 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 3 12 231 190 +1 21 500 234.6000 6 23 303 909 +8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 6 23 303 909 +select * from v1,t2 where ((v1.a<2) and (v1.max_c>120)) or (v1.a>7); +a b max_c avg_c a b c d +1 21 500 234.6000 2 3 207 207 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 7 13 312 406 +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 6 20 315 279 +1 21 500 234.6000 1 19 203 107 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 3 12 231 190 +1 21 500 234.6000 6 23 303 909 +8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 6 23 303 909 +explain select * from v1,t2 where ((v1.a<2) and (v1.max_c>120)) or (v1.a>7); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where ((v1.a<2) and (v1.max_c>120)) or (v1.a>7); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v1.a < 2) and (v1.max_c > 120)) or (v1.a > 7))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a < 2) and (v1.max_c > 120)) or (v1.a > 7))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (((t1.a < 2) and (max_c > 120)) or (t1.a > 7)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 2) or (t1.a > 7))" + } + } + } + } + } + } + } +} +# extracted or formulas : pushing into WHERE and HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.a<2) and (v1.max_c>120) and (v1.b=t2.b)) or (v1.a>7); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 6 23 303 909 +select * from v1,t2 where +((v1.a<2) and (v1.max_c>120) and (v1.b=t2.b)) or (v1.a>7); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 6 23 303 909 +explain select * from v1,t2 where +((v1.a<2) and (v1.max_c>120) and (v1.b=t2.b)) or (v1.a>7); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where +((v1.a<2) and (v1.max_c>120) and (v1.b=t2.b)) or (v1.a>7); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v1.a < 2) and (v1.max_c > 120)) or (v1.a > 7))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.b = t2.b) and (v1.a < 2) and (v1.max_c > 120)) or (v1.a > 7))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (((t1.a < 2) and (max_c > 120)) or (t1.a > 7)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 2) or (t1.a > 7))" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.a<2) and (v1.max_c<200) and (t2.c>v1.max_c) and (v1.max_c=t2.d)) or +((v1.a>4) and (v1.max_c<500) and (t2.bv1.max_c) and (v1.max_c=t2.d)) or +((v1.a>4) and (v1.max_c<500) and (t2.bv1.max_c) and (v1.max_c=t2.d)) or +((v1.a>4) and (v1.max_c<500) and (t2.b ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where +((v1.a<2) and (v1.max_c<200) and (t2.c>v1.max_c) and (v1.max_c=t2.d)) or +((v1.a>4) and (v1.max_c<500) and (t2.b", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v1.a < 2) and (v1.max_c < 200)) or (v1.a > 4))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a < 2) and (v1.max_c < 200) and (t2.c > v1.max_c) and (v1.max_c = t2.d)) or ((v1.max_c = t2.c) and (v1.a > 4) and (t2.c < 500) and (t2.b < v1.b)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (((t1.a < 2) and (max_c < 200)) or ((t1.a > 4) and (max_c < 500))))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 2) or (t1.a > 4))" + } + } + } + } + } + } + } +} +# prepare of a query containing extracted or formula +prepare stmt from "select * from v1,t2 where + ((v1.max_c>400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.max_c > 400) or (v1.max_c < 135))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.max_c > 400) and (t2.a > v1.a)) or ((v1.max_c < 135) and (t2.a < v1.a)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and ((max_c > 400) or (max_c < 135)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +execute stmt; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.max_c > 400) or (v1.max_c < 135))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.max_c > 400) and (t2.a > v1.a)) or ((v1.max_c < 135) and (t2.a < v1.a)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and ((max_c > 400) or (max_c < 135)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +deallocate prepare stmt; +# conjunctive subformula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (t2.a=v1.a) and (v1.b=t2.b) and (v1.a=1); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +select * from v1,t2 where (t2.a=v1.a) and (v1.b=t2.b) and (v1.a=1); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +explain select * from v1,t2 where (t2.a=v1.a) and (v1.b=t2.b) and (v1.a=1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.b 2 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where (t2.a=v1.a) and (v1.b=t2.b) and (v1.a=1); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a = 1) and (t2.b is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["test.t2.b"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v1.a = 1)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a = 1)" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a=5) and (v1.max_c=t2.d); +a b max_c avg_c a b c d +5 16 207 207.0000 2 3 207 207 +select * from v1,t2 where (v1.a=5) and (v1.max_c=t2.d); +a b max_c avg_c a b c d +5 16 207 207.0000 2 3 207 207 +explain select * from v1,t2 where (v1.a=5) and (v1.max_c=t2.d); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.d 2 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where (v1.a=5) and (v1.max_c=t2.d); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.d is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["max_c"], + "ref": ["test.t2.d"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v1.a = 5) and (v1.max_c = t2.d))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a = 5)" + } + } + } + } + } + } + } +} +# conjunctive subformula : pushing into WHERE using equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (t2.a<5) and (v1.a=t2.a); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +1 19 107 107.0000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +1 19 107 107.0000 1 19 203 107 +select * from v1,t2 where (t2.a<5) and (v1.a=t2.a); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +1 19 107 107.0000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +1 19 107 107.0000 1 19 203 107 +explain select * from v1,t2 where (t2.a<5) and (v1.a=t2.a); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.a 2 +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where (t2.a<5) and (v1.a=t2.a); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a < 5) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a < 5)" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a); +a b max_c avg_c a b c d +select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a); +a b max_c avg_c a b c d +explain select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 10 test.t2.a,test.t2.a 2 +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a is not null) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "10", + "used_key_parts": ["a", "b"], + "ref": ["test.t2.a", "test.t2.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.b = t1.a)" + } + } + } + } + } + } + } +} +# conjunctive subformula : pushing into HAVING using equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (t2.c>150) and (v1.max_c=t2.c); +a b max_c avg_c a b c d +5 16 207 207.0000 2 3 207 207 +6 20 315 279.3333 6 20 315 279 +select * from v1,t2 where (t2.c>150) and (v1.max_c=t2.c); +a b max_c avg_c a b c d +5 16 207 207.0000 2 3 207 207 +6 20 315 279.3333 6 20 315 279 +explain select * from v1,t2 where (t2.c>150) and (v1.max_c=t2.c); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.c 2 +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where (t2.c>150) and (v1.max_c=t2.c); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.c > 150) and (t2.c is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["max_c"], + "ref": ["test.t2.c"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 150))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +# extracted and formula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a) and (v1.a=3); +a b max_c avg_c a b c d +select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a) and (v1.a=3); +a b max_c avg_c a b c d +explain select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a) and (v1.a=3); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where +explain format=json select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a) and (v1.a=3); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a = 3)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a = 3) and (v1.b = 3))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a = 3) and (t1.b = 3))" + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a=1) and (v1.b=21) and (t2.a=2); +a b max_c avg_c a b c d +1 21 500 234.6000 2 3 207 207 +select * from v1,t2 where (v1.a=1) and (v1.b=21) and (t2.a=2); +a b max_c avg_c a b c d +1 21 500 234.6000 2 3 207 207 +explain select * from v1,t2 where (v1.a=1) and (v1.b=21) and (t2.a=2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where +explain format=json select * from v1,t2 where (v1.a=1) and (v1.b=21) and (t2.a=2); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a = 2)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a = 1) and (v1.b = 21))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a = 1) and (t1.b = 21))" + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_char as v,t2_char as t where +(v.a='c') and (v.b<'Hermes') and ((v.b=t.b) or (v.max_c>20)); +a b max_c a b c +c Harry 4 d Harry 1 +c Harry 4 b Harry 4 +select * from v_char as v,t2_char as t where +(v.a='c') and (v.b<'Hermes') and ((v.b=t.b) or (v.max_c>20)); +a b max_c a b c +c Harry 4 d Harry 1 +c Harry 4 b Harry 4 +explain select * from v_char as v,t2_char as t where +(v.a='c') and (v.b<'Hermes') and ((v.b=t.b) or (v.max_c>20)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 12 Using where +1 PRIMARY t ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1_char ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort +explain format=json select * from v_char as v,t2_char as t where +(v.a='c') and (v.b<'Hermes') and ((v.b=t.b) or (v.max_c>20)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "((v.a = 'c') and (v.b < 'Hermes'))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 9)", + "filesort": { + "sort_key": "t1_char.b", + "temporary_table": { + "table": { + "table_name": "t1_char", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "((t1_char.a = 'c') and (t1_char.b < 'Hermes'))" + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 12, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((t.b = v.b) or (v.max_c > 20))" + } + } +} +# extracted and formula : pushing into WHERE using equalities +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_decimal as v,t2_decimal as t where +(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1)); +a b avg_c a b c +1.0 1.0 16.3333 2.0 1.0 13 +3.0 3.0 16.0000 3.0 3.0 16 +3.0 3.0 16.0000 1.0 3.0 22 +3.0 3.0 16.0000 1.0 3.0 14 +1.0 1.0 16.3333 2.0 1.0 43 +3.0 3.0 16.0000 2.0 3.0 11 +3.0 3.0 16.0000 2.0 3.0 16 +select * from v_decimal as v,t2_decimal as t where +(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1)); +a b avg_c a b c +1.0 1.0 16.3333 2.0 1.0 13 +3.0 3.0 16.0000 3.0 3.0 16 +3.0 3.0 16.0000 1.0 3.0 22 +3.0 3.0 16.0000 1.0 3.0 14 +1.0 1.0 16.3333 2.0 1.0 43 +3.0 3.0 16.0000 2.0 3.0 11 +3.0 3.0 16.0000 2.0 3.0 16 +explain select * from v_decimal as v,t2_decimal as t where +(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 6 test.t.b,test.t.b 2 +2 DERIVED t1_decimal ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort +explain format=json select * from v_decimal as v,t2_decimal as t where +(v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(((t.b > 1) or (t.b = 1)) and (t.b is not null) and (t.b is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "6", + "used_key_parts": ["a", "b"], + "ref": ["test.t.b", "test.t.b"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(avg_c > 12)", + "filesort": { + "sort_key": "t1_decimal.a, t1_decimal.b", + "temporary_table": { + "table": { + "table_name": "t1_decimal", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t1_decimal.b = t1_decimal.a) and ((t1_decimal.a > 1) or (t1_decimal.a = 1)))" + } + } + } + } + } + } + } +} +# extracted or formula : pushing into HAVING using equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 +where ((t2.a<4) and (v1.a=t2.a)) or ((t2.c>150) and (v1.max_c=t2.c)); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +5 16 207 207.0000 2 3 207 207 +6 20 315 279.3333 6 20 315 279 +select * from v1,t2 +where ((t2.a<4) and (v1.a=t2.a)) or ((t2.c>150) and (v1.max_c=t2.c)); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +5 16 207 207.0000 2 3 207 207 +6 20 315 279.3333 6 20 315 279 +explain select * from v1,t2 +where ((t2.a<4) and (v1.a=t2.a)) or ((t2.c>150) and (v1.max_c=t2.c)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 +where ((t2.a<4) and (v1.a=t2.a)) or ((t2.c>150) and (v1.max_c=t2.c)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a < 4) or (t2.c > 150))" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a = t2.a) and (t2.a < 4)) or ((v1.max_c = t2.c) and (t2.c > 150)))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and ((t1.a < 4) or (max_c > 150)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +# conjunctive subformulas : pushing into WHERE and HAVING using equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 +where ((t2.a>5) and (v1.a=t2.a)) and ((t2.c>250) and (v1.max_c=t2.c)); +a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279 +select * from v1,t2 +where ((t2.a>5) and (v1.a=t2.a)) and ((t2.c>250) and (v1.max_c=t2.c)); +a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279 +explain select * from v1,t2 +where ((t2.a>5) and (v1.a=t2.a)) and ((t2.c>250) and (v1.max_c=t2.c)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 10 test.t2.a,test.t2.c 2 +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 +where ((t2.a>5) and (v1.a=t2.a)) and ((t2.c>250) and (v1.max_c=t2.c)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a > 5) and (t2.c > 250) and (t2.a is not null) and (t2.c is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "10", + "used_key_parts": ["a", "max_c"], + "ref": ["test.t2.a", "test.t2.c"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 250))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +# conjunctive subformulas : pushing into WHERE and HAVING +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=8) and (v1.a=t2.a) and (v1.max_c=404); +a b max_c avg_c a b c d +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 80 800 314 +select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=8) and (v1.a=t2.a) and (v1.max_c=404); +a b max_c avg_c a b c d +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 80 800 314 +explain select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=8) and (v1.a=t2.a) and (v1.max_c=404); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where (v1.a=8) and (v1.a=t2.a) and (v1.max_c=404); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a = 8)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a = 8) and (v1.max_c = 404))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c = 404)", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a = 8)" + } + } + } + } + } + } + } +} +# conjunctive subformulas : pushing into WHERE and HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +(v1.a>3) and (v1.max_c>200) and (t2.b3) and (v1.max_c>200) and (t2.b3) and (v1.max_c>200) and (t2.b ref key0 key0 5 test.t2.d 2 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,t2 where +(v1.a>3) and (v1.max_c>200) and (t2.b", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["max_c"], + "ref": ["test.t2.d"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v1.a > 3) and (v1.max_c > 200) and (t2.b < v1.b) and (t2.d = v1.max_c))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 200))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 3)" + } + } + } + } + } + } + } +} +# conjunctive subformula : pushing into WHERE +# extracted or formula : pushing into HAVING using equalities +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_double as v,t2_double as t where +(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1)); +a avg_a b c a b c +select * from v_double as v,t2_double as t where +(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1)); +a avg_a b c a b c +explain select * from v_double as v,t2_double as t where +(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 18 test.t.c,test.t.c 2 Using where +2 DERIVED t1_double ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort +explain format=json select * from v_double as v,t2_double as t where +(v.b=v.c) and (v.c=t.c) and ((t.c>10) or (v.a=1)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t.c is not null) and (t.c is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "18", + "used_key_parts": ["b", "c"], + "ref": ["test.t.c", "test.t.c"], + "rows": 2, + "filtered": 100, + "attached_condition": "((t.c > 10) or (v.a = 1))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((avg_a < 22.333) and ((t1_double.b > 10) or (t1_double.a = 1)))", + "filesort": { + "sort_key": "t1_double.b, t1_double.c", + "temporary_table": { + "table": { + "table_name": "t1_double", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t1_double.c = t1_double.b) and (t1_double.b > 12.2))" + } + } + } + } + } + } + } +} +# conjunctive subformula : pushing into WHERE +# extracted or formula : pushing into HAVING using equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_double as v,t2_double as t where +(((v.a>0.2) or (v.b<17)) or (t.c>17)) and (t.c=v.c) and (v.c>18); +a avg_a b c a b c +1 0.50000000 12.5 18.9 1 12.5 18.9 +1 0.50000000 12.5 18.9 2 22.4 18.9 +1 0.50000000 12.5 18.9 7 17.89 18.9 +select * from v_double as v,t2_double as t where +(((v.a>0.2) or (v.b<17)) or (t.c>17)) and (t.c=v.c) and (v.c>18); +a avg_a b c a b c +1 0.50000000 12.5 18.9 1 12.5 18.9 +1 0.50000000 12.5 18.9 2 22.4 18.9 +1 0.50000000 12.5 18.9 7 17.89 18.9 +explain select * from v_double as v,t2_double as t where +(((v.a>0.2) or (v.b<17)) or (t.c>17)) and (t.c=v.c) and (v.c>18); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 9 test.t.c 2 Using where +2 DERIVED t1_double ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort +explain format=json select * from v_double as v,t2_double as t where +(((v.a>0.2) or (v.b<17)) or (t.c>17)) and (t.c=v.c) and (v.c>18); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t.c > 18) and (t.c is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "9", + "used_key_parts": ["c"], + "ref": ["test.t.c"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v.a > 0.2) or (v.b < 17) or (t.c > 17))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((avg_a < 22.333) and ((t1_double.a > 0.2) or (t1_double.b < 17) or (t1_double.c > 17)))", + "filesort": { + "sort_key": "t1_double.b, t1_double.c", + "temporary_table": { + "table": { + "table_name": "t1_double", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t1_double.b > 12.2) and (t1_double.c > 18))" + } + } + } + } + } + } + } +} +# extracted or formula : pushing into WHERE +# conjunctive subformula : pushing into HAVING +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_decimal as v,t2_decimal as t where +(((v.a>4) or (v.a=2)) or (v.b>3)) and (v.avg_c=13); +a b avg_c a b c +2.0 1.0 13.0000 2.0 1.0 13 +2.0 3.0 13.0000 2.0 1.0 13 +2.0 1.0 13.0000 2.0 2.0 11 +2.0 3.0 13.0000 2.0 2.0 11 +2.0 1.0 13.0000 3.0 3.0 16 +2.0 3.0 13.0000 3.0 3.0 16 +2.0 1.0 13.0000 1.0 3.0 22 +2.0 3.0 13.0000 1.0 3.0 22 +2.0 1.0 13.0000 1.0 3.0 14 +2.0 3.0 13.0000 1.0 3.0 14 +2.0 1.0 13.0000 2.0 2.0 15 +2.0 3.0 13.0000 2.0 2.0 15 +2.0 1.0 13.0000 2.0 1.0 43 +2.0 3.0 13.0000 2.0 1.0 43 +2.0 1.0 13.0000 2.0 3.0 11 +2.0 3.0 13.0000 2.0 3.0 11 +2.0 1.0 13.0000 2.0 3.0 16 +2.0 3.0 13.0000 2.0 3.0 16 +select * from v_decimal as v,t2_decimal as t where +(((v.a>4) or (v.a=2)) or (v.b>3)) and (v.avg_c=13); +a b avg_c a b c +2.0 1.0 13.0000 2.0 1.0 13 +2.0 3.0 13.0000 2.0 1.0 13 +2.0 1.0 13.0000 2.0 2.0 11 +2.0 3.0 13.0000 2.0 2.0 11 +2.0 1.0 13.0000 3.0 3.0 16 +2.0 3.0 13.0000 3.0 3.0 16 +2.0 1.0 13.0000 1.0 3.0 22 +2.0 3.0 13.0000 1.0 3.0 22 +2.0 1.0 13.0000 1.0 3.0 14 +2.0 3.0 13.0000 1.0 3.0 14 +2.0 1.0 13.0000 2.0 2.0 15 +2.0 3.0 13.0000 2.0 2.0 15 +2.0 1.0 13.0000 2.0 1.0 43 +2.0 3.0 13.0000 2.0 1.0 43 +2.0 1.0 13.0000 2.0 3.0 11 +2.0 3.0 13.0000 2.0 3.0 11 +2.0 1.0 13.0000 2.0 3.0 16 +2.0 3.0 13.0000 2.0 3.0 16 +explain select * from v_decimal as v,t2_decimal as t where +(((v.a>4) or (v.a=2)) or (v.b>3)) and (v.avg_c=13); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY t ALL NULL NULL NULL NULL 9 Using join buffer (flat, BNL join) +2 DERIVED t1_decimal ALL NULL NULL NULL NULL 9 Using where; Using temporary; Using filesort +explain format=json select * from v_decimal as v,t2_decimal as t where +(((v.a>4) or (v.a=2)) or (v.b>3)) and (v.avg_c=13); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(((v.a > 4) or (v.a = 2) or (v.b > 3)) and (v.avg_c = 13))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((avg_c > 12) and (avg_c = 13))", + "filesort": { + "sort_key": "t1_decimal.a, t1_decimal.b", + "temporary_table": { + "table": { + "table_name": "t1_decimal", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t1_decimal.a > 4) or (t1_decimal.a = 2) or (t1_decimal.b > 3))" + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL" + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.a=v1.b); +a b max_c avg_c a b c d +select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.a=v1.b); +a b max_c avg_c a b c d +explain select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.a=v1.b); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 10 test.t2.a,test.t2.a 2 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.a=v1.b); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a is not null) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "10", + "used_key_parts": ["a", "b"], + "ref": ["test.t2.a", "test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v1.max_c > 300)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 300))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b = t1.a) and (t1.a > 5))" + } + } + } + } + } + } + } +} +# nothing to push +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (t2.a<2) and (t2.c>900); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 21 500 234.6000 1 21 909 12 +5 16 207 207.0000 1 21 909 12 +5 27 132 132.0000 1 21 909 12 +6 20 315 279.3333 1 21 909 12 +8 33 404 213.6667 1 21 909 12 +select * from v1,t2 where (t2.a<2) and (t2.c>900); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 21 500 234.6000 1 21 909 12 +5 16 207 207.0000 1 21 909 12 +5 27 132 132.0000 1 21 909 12 +6 20 315 279.3333 1 21 909 12 +8 33 404 213.6667 1 21 909 12 +explain select * from v1,t2 where (t2.a<2) and (t2.c>900); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where (t2.a<2) and (t2.c>900); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a < 2) and (t2.c > 900))" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +6 20 315 279.3333 6 20 315 279 +1 19 107 107.0000 1 19 203 107 +select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +6 20 315 279.3333 6 20 315 279 +1 19 107 107.0000 1 19 203 107 +explain select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 10 test.t2.a,test.t2.b 2 +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a is not null) and (t2.b is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "10", + "used_key_parts": ["a", "b"], + "ref": ["test.t2.a", "test.t2.b"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +(t2.a=v1.a) or (v1.b=t2.b) and ((v1.a=1) or (v1.a=6)); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 80 800 314 +select * from v1,t2 where +(t2.a=v1.a) or (v1.b=t2.b) and ((v1.a=1) or (v1.a=6)); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 80 800 314 +explain select * from v1,t2 where +(t2.a=v1.a) or (v1.b=t2.b) and ((v1.a=1) or (v1.a=6)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where +(t2.a=v1.a) or (v1.b=t2.b) and ((v1.a=1) or (v1.a=6)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v1.a = t2.a) or ((v1.b = t2.b) and ((v1.a = 1) or (v1.a = 6))))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a=1) or (v1.b=21) or (t2.a=2); +a b max_c avg_c a b c d +1 19 107 107.0000 2 3 207 207 +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 7 13 312 406 +1 19 107 107.0000 8 64 248 107 +1 19 107 107.0000 6 20 315 279 +1 19 107 107.0000 1 19 203 107 +1 19 107 107.0000 8 80 800 314 +1 19 107 107.0000 3 12 231 190 +1 19 107 107.0000 6 23 303 909 +1 21 500 234.6000 2 3 207 207 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 7 13 312 406 +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 6 20 315 279 +1 21 500 234.6000 1 19 203 107 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 3 12 231 190 +1 21 500 234.6000 6 23 303 909 +5 16 207 207.0000 2 3 207 207 +5 27 132 132.0000 2 3 207 207 +6 20 315 279.3333 2 3 207 207 +8 33 404 213.6667 2 3 207 207 +select * from v1,t2 where (v1.a=1) or (v1.b=21) or (t2.a=2); +a b max_c avg_c a b c d +1 19 107 107.0000 2 3 207 207 +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 7 13 312 406 +1 19 107 107.0000 8 64 248 107 +1 19 107 107.0000 6 20 315 279 +1 19 107 107.0000 1 19 203 107 +1 19 107 107.0000 8 80 800 314 +1 19 107 107.0000 3 12 231 190 +1 19 107 107.0000 6 23 303 909 +1 21 500 234.6000 2 3 207 207 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 7 13 312 406 +1 21 500 234.6000 8 64 248 107 +1 21 500 234.6000 6 20 315 279 +1 21 500 234.6000 1 19 203 107 +1 21 500 234.6000 8 80 800 314 +1 21 500 234.6000 3 12 231 190 +1 21 500 234.6000 6 23 303 909 +5 16 207 207.0000 2 3 207 207 +5 27 132 132.0000 2 3 207 207 +6 20 315 279.3333 2 3 207 207 +8 33 404 213.6667 2 3 207 207 +explain select * from v1,t2 where (v1.a=1) or (v1.b=21) or (t2.a=2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where (v1.a=1) or (v1.b=21) or (t2.a=2); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v1.a = 1) or (v1.b = 21) or (t2.a = 2))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +(t2.a<2) and (t2.c>900) and ((v1.a900) and ((v1.a900) and ((v1.a ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,t2 where +(t2.a<2) and (t2.c>900) and ((v1.a 900))" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v1.a < t2.a) or (t2.a < 11))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +# using several derived tables : nothing to push +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,v2,t2 where +(v1.a=v2.a) and (v1.a=t2.a) and (v2.b<50); +a b max_c avg_c a b max_c avg_c a b c d +8 33 404 213.6667 8 33 404 213.6667 8 64 248 107 +6 20 315 279.3333 6 20 315 279.3333 6 20 315 279 +8 33 404 213.6667 8 33 404 213.6667 8 80 800 314 +6 20 315 279.3333 6 20 315 279.3333 6 23 303 909 +select * from v1,v2,t2 where +(v1.a=v2.a) and (v1.a=t2.a) and (v2.b<50); +a b max_c avg_c a b max_c avg_c a b c d +8 33 404 213.6667 8 33 404 213.6667 8 64 248 107 +6 20 315 279.3333 6 20 315 279.3333 6 20 315 279 +8 33 404 213.6667 8 33 404 213.6667 8 80 800 314 +6 20 315 279.3333 6 20 315 279.3333 6 23 303 909 +explain select * from v1,v2,t2 where +(v1.a=v2.a) and (v1.a=t2.a) and (v2.b<50); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key1 key1 5 test.t2.a 2 +1 PRIMARY ref key0 key0 5 test.t2.a 2 Using where +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,v2,t2 where +(v1.a=v2.a) and (v1.a=t2.a) and (v2.b<50); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a is not null) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key1"], + "key": "key1", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v2.b < 50)", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b < 50))" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,v2,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and (t2.b<50) and (v1.b=v2.b); +a b max_c avg_c a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279.3333 2 3 207 207 +6 20 315 279.3333 6 20 315 279.3333 1 21 909 12 +6 20 315 279.3333 6 20 315 279.3333 7 13 312 406 +6 20 315 279.3333 6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 20 315 279.3333 1 19 203 107 +6 20 315 279.3333 6 20 315 279.3333 3 12 231 190 +6 20 315 279.3333 6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 8 33 404 213.6667 6 23 303 909 +select * from v1,v2,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and (t2.b<50) and (v1.b=v2.b); +a b max_c avg_c a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279.3333 2 3 207 207 +6 20 315 279.3333 6 20 315 279.3333 1 21 909 12 +6 20 315 279.3333 6 20 315 279.3333 7 13 312 406 +6 20 315 279.3333 6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 20 315 279.3333 1 19 203 107 +6 20 315 279.3333 6 20 315 279.3333 3 12 231 190 +6 20 315 279.3333 6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 8 33 404 213.6667 6 23 303 909 +explain select * from v1,v2,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and (t2.b<50) and (v1.b=v2.b); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ref key0 key0 5 v1.b 2 Using where +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,v2,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and (t2.b<50) and (v1.b=v2.b); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.b < 50)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(v1.b is not null)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["v1.b"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v2.a = v1.a) or (v1.a = t2.a))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,v2,t2 where +((v1.a=v2.a) and (v1.a=t2.a)) or ((v2.b>13) and (t2.c<115)); +a b max_c avg_c a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 33 404 213.6667 8 80 800 314 +select * from v1,v2,t2 where +((v1.a=v2.a) and (v1.a=t2.a)) or ((v2.b>13) and (t2.c<115)); +a b max_c avg_c a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 8 33 404 213.6667 8 80 800 314 +explain select * from v1,v2,t2 where +((v1.a=v2.a) and (v1.a=t2.a)) or ((v2.b>13) and (t2.c<115)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (incremental, BNL join) +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,v2,t2 where +((v1.a=v2.a) and (v1.a=t2.a)) or ((v2.b>13) and (t2.c<115)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v1.a = t2.a) or (t2.c < 115))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "incremental", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a = t2.a) and (v2.a = t2.a)) or ((v2.b > 13) and (t2.c < 115)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +# using several derived tables : pushing in all tables +# conjunctive subformula : pushing into HAVING +# extracted or formula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,v2,t2 where ((v1.a=v2.a) or (v1.a=t2.a)) and +((v2.b<50) or (v2.b=19)) and (v1.max_c<300); +a b max_c avg_c a b max_c avg_c a b c d +1 19 107 107.0000 6 20 315 279.3333 1 21 909 12 +1 19 107 107.0000 6 20 315 279.3333 1 19 203 107 +1 19 107 107.0000 8 33 404 213.6667 1 21 909 12 +1 19 107 107.0000 8 33 404 213.6667 1 19 203 107 +select * from v1,v2,t2 where ((v1.a=v2.a) or (v1.a=t2.a)) and +((v2.b<50) or (v2.b=19)) and (v1.max_c<300); +a b max_c avg_c a b max_c avg_c a b c d +1 19 107 107.0000 6 20 315 279.3333 1 21 909 12 +1 19 107 107.0000 6 20 315 279.3333 1 19 203 107 +1 19 107 107.0000 8 33 404 213.6667 1 21 909 12 +1 19 107 107.0000 8 33 404 213.6667 1 19 203 107 +explain select * from v1,v2,t2 where ((v1.a=v2.a) or (v1.a=t2.a)) and +((v2.b<50) or (v2.b=19)) and (v1.max_c<300); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (incremental, BNL join) +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,v2,t2 where ((v1.a=v2.a) or (v1.a=t2.a)) and +((v2.b<50) or (v2.b=19)) and (v1.max_c<300); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v1.max_c < 300)" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c < 300))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v2.b < 50) or (v2.b = 19))" + }, + "buffer_type": "incremental", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v2.a = v1.a) or (v1.a = t2.a)) and ((v2.b < 50) or (v2.b = 19)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and ((t1.b < 50) or (t1.b = 19)))" + } + } + } + } + } + } + } +} +# using several derived tables : pushing only in one table +# conjunctive subformula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,v2,t2 where +(v1.a=t2.a) and (v1.a=v1.b) and (v1.a=v2.a) and (v2.max_c<300); +a b max_c avg_c a b max_c avg_c a b c d +select * from v1,v2,t2 where +(v1.a=t2.a) and (v1.a=v1.b) and (v1.a=v2.a) and (v2.max_c<300); +a b max_c avg_c a b max_c avg_c a b c d +explain select * from v1,v2,t2 where +(v1.a=t2.a) and (v1.a=v1.b) and (v1.a=v2.a) and (v2.max_c<300); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key1 key1 10 test.t2.a,test.t2.a 2 +1 PRIMARY ref key0 key0 5 test.t2.a 2 Using where +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,v2,t2 where +(v1.a=t2.a) and (v1.a=v1.b) and (v1.a=v2.a) and (v2.max_c<300); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a is not null) and (t2.a is not null) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key1"], + "key": "key1", + "key_length": "10", + "used_key_parts": ["a", "b"], + "ref": ["test.t2.a", "test.t2.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.b = t1.a)" + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v2.max_c < 300)", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "((max_c < 707) and (max_c < 300))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +# using several derived tables : pushing only in one table +# extracted and formula : pushing into WHERE +# conjunctive subformula : pushing into WHERE using equalities +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,v2,t2 where (v1.a=1) and (v1.b>10) and (v1.b=v2.b); +a b max_c avg_c a b max_c avg_c a b c d +select * from v1,v2,t2 where (v1.a=1) and (v1.b>10) and (v1.b=v2.b); +a b max_c avg_c a b max_c avg_c a b c d +explain select * from v1,v2,t2 where (v1.a=1) and (v1.b>10) and (v1.b=v2.b); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ref key0 key0 5 v1.b 2 +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v1,v2,t2 where (v1.a=1) and (v1.b>10) and (v1.b=v2.b); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a = 1) and (v1.b > 10))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(v1.b is not null)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a = 1) and (t1.b > 10))" + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["v1.b"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b > 10))" + } + } + } + } + } + } + } +} +# extracted or formula : pushing into WHERE +# conjunctive subformula : pushing into WHERE using equalities +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_char as v,t2_char as t where +(v.a=t.a) and (t.a='b') and ((v.b='Vika') or (v.b='Ali')); +a b max_c a b c +b Vika 2 b Ivan 1 +b Vika 2 b Ali 6 +b Vika 2 b Hermes 3 +b Vika 2 b Ivan 11 +b Vika 2 b Harry 4 +select * from v_char as v,t2_char as t where +(v.a=t.a) and (t.a='b') and ((v.b='Vika') or (v.b='Ali')); +a b max_c a b c +b Vika 2 b Ivan 1 +b Vika 2 b Ali 6 +b Vika 2 b Hermes 3 +b Vika 2 b Ivan 11 +b Vika 2 b Harry 4 +explain select * from v_char as v,t2_char as t where +(v.a=t.a) and (t.a='b') and ((v.b='Vika') or (v.b='Ali')); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 12 Using where +1 PRIMARY t ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1_char ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort +explain format=json select * from v_char as v,t2_char as t where +(v.a=t.a) and (t.a='b') and ((v.b='Vika') or (v.b='Ali')); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "((v.a = 'b') and ((v.b = 'Vika') or (v.b = 'Ali')))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(max_c < 9)", + "filesort": { + "sort_key": "t1_char.b", + "temporary_table": { + "table": { + "table_name": "t1_char", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "((t1_char.a = 'b') and ((t1_char.b = 'Vika') or (t1_char.b = 'Ali')))" + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "(t.a = 'b')" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL" + } + } +} +# using several derived tables : pushing in all tables +# extracted or formula : pushing into WHERE +# conjunctive subformulas : pushing into HAVING +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,v2,v3,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and ((v3.b<50) or (v3.b=33)) +and (v1.max_c<500) and (v3.a=t2.a) and (v2.max_c>300); +a b max_c avg_c a b max_c avg_c a b min_c a b c d +6 20 315 279.3333 6 20 315 279.3333 7 11 708 7 13 312 406 +6 20 315 279.3333 6 20 315 279.3333 8 33 114 8 64 248 107 +6 20 315 279.3333 6 20 315 279.3333 6 20 214 6 20 315 279 +6 20 315 279.3333 6 20 315 279.3333 8 33 114 8 80 800 314 +6 20 315 279.3333 6 20 315 279.3333 6 20 214 6 23 303 909 +6 20 315 279.3333 8 33 404 213.6667 6 20 214 6 20 315 279 +6 20 315 279.3333 8 33 404 213.6667 6 20 214 6 23 303 909 +8 33 404 213.6667 6 20 315 279.3333 8 33 114 8 64 248 107 +8 33 404 213.6667 6 20 315 279.3333 8 33 114 8 80 800 314 +8 33 404 213.6667 8 33 404 213.6667 7 11 708 7 13 312 406 +8 33 404 213.6667 8 33 404 213.6667 8 33 114 8 64 248 107 +8 33 404 213.6667 8 33 404 213.6667 6 20 214 6 20 315 279 +8 33 404 213.6667 8 33 404 213.6667 8 33 114 8 80 800 314 +8 33 404 213.6667 8 33 404 213.6667 6 20 214 6 23 303 909 +select * from v1,v2,v3,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and ((v3.b<50) or (v3.b=33)) +and (v1.max_c<500) and (v3.a=t2.a) and (v2.max_c>300); +a b max_c avg_c a b max_c avg_c a b min_c a b c d +6 20 315 279.3333 6 20 315 279.3333 7 11 708 7 13 312 406 +6 20 315 279.3333 6 20 315 279.3333 8 33 114 8 64 248 107 +6 20 315 279.3333 6 20 315 279.3333 6 20 214 6 20 315 279 +6 20 315 279.3333 6 20 315 279.3333 8 33 114 8 80 800 314 +6 20 315 279.3333 6 20 315 279.3333 6 20 214 6 23 303 909 +6 20 315 279.3333 8 33 404 213.6667 6 20 214 6 20 315 279 +6 20 315 279.3333 8 33 404 213.6667 6 20 214 6 23 303 909 +8 33 404 213.6667 6 20 315 279.3333 8 33 114 8 64 248 107 +8 33 404 213.6667 6 20 315 279.3333 8 33 114 8 80 800 314 +8 33 404 213.6667 8 33 404 213.6667 7 11 708 7 13 312 406 +8 33 404 213.6667 8 33 404 213.6667 8 33 114 8 64 248 107 +8 33 404 213.6667 8 33 404 213.6667 6 20 214 6 20 315 279 +8 33 404 213.6667 8 33 404 213.6667 8 33 114 8 80 800 314 +8 33 404 213.6667 8 33 404 213.6667 6 20 214 6 23 303 909 +explain select * from v1,v2,v3,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and ((v3.b<50) or (v3.b=33)) +and (v1.max_c<500) and (v3.a=t2.a) and (v2.max_c>300); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.a 2 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (incremental, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +explain format=json select * from v1,v2,v3,t2 where +((v1.a=v2.a) or (v1.a=t2.a)) and ((v3.b<50) or (v3.b=33)) +and (v1.max_c<500) and (v3.a=t2.a) and (v2.max_c>300); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v3.b < 50) or (v3.b = 33))", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(min_c > 109)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 10) and ((t1.b < 50) or (t1.b = 33)))" + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v2.max_c > 300)" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "((max_c < 707) and (max_c > 300))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v1.max_c < 500)" + }, + "buffer_type": "incremental", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v1.a = v2.a) or (v1.a = t2.a))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c < 500))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +# using several derived tables : pushing in all tables +# conjunctive subformulas : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +t2 where (v1.a=v2.a) and (v1.b=t2.b) and (v1.max_c>130) and (v2.min_c<130); +a b max_c avg_c a b min_c a b c d +select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +t2 where (v1.a=v2.a) and (v1.b=t2.b) and (v1.max_c>130) and (v2.min_c<130); +a b max_c avg_c a b min_c a b c d +explain select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +t2 where (v1.a=v2.a) and (v1.b=t2.b) and (v1.max_c>130) and (v2.min_c<130); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key1 key1 5 test.t2.b 2 Using where +1 PRIMARY ref key0 key0 5 v1.a 2 Using where +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +t2 where (v1.a=v2.a) and (v1.b=t2.b) and (v1.max_c>130) and (v2.min_c<130); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.b is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key1"], + "key": "key1", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["test.t2.b"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v1.max_c > 130) and (v1.a is not null))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 130))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["v1.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v2.min_c < 130)", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "((min_c < 707) and (min_c < 130))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +# using several derived tables : pushing in all tables +# extracted or formulas : pushing into HAVING +# conjunctive subformula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +(select a, b, avg(c) as avg_c from t1 +where t1.a<8 group by a,b) v3, +t2 where (v1.a=v2.a) and (v1.b=v3.b) and ((v3.avg_c>170) or (v3.a<5)) +and ((v1.avg_c<400) or (v1.a>1)) and (v2.min_c<200); +a b max_c avg_c a b min_c a b avg_c a b c d +8 33 404 213.6667 8 33 114 1 33 497.5000 2 3 207 207 +8 33 404 213.6667 8 33 114 1 33 497.5000 1 21 909 12 +8 33 404 213.6667 8 33 114 1 33 497.5000 7 13 312 406 +8 33 404 213.6667 8 33 114 1 33 497.5000 8 64 248 107 +8 33 404 213.6667 8 33 114 1 33 497.5000 6 20 315 279 +8 33 404 213.6667 8 33 114 1 33 497.5000 1 19 203 107 +8 33 404 213.6667 8 33 114 1 33 497.5000 8 80 800 314 +8 33 404 213.6667 8 33 114 1 33 497.5000 3 12 231 190 +8 33 404 213.6667 8 33 114 1 33 497.5000 6 23 303 909 +select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +(select a, b, avg(c) as avg_c from t1 +where t1.a<8 group by a,b) v3, +t2 where (v1.a=v2.a) and (v1.b=v3.b) and ((v3.avg_c>170) or (v3.a<5)) +and ((v1.avg_c<400) or (v1.a>1)) and (v2.min_c<200); +a b max_c avg_c a b min_c a b avg_c a b c d +8 33 404 213.6667 8 33 114 1 33 497.5000 2 3 207 207 +8 33 404 213.6667 8 33 114 1 33 497.5000 1 21 909 12 +8 33 404 213.6667 8 33 114 1 33 497.5000 7 13 312 406 +8 33 404 213.6667 8 33 114 1 33 497.5000 8 64 248 107 +8 33 404 213.6667 8 33 114 1 33 497.5000 6 20 315 279 +8 33 404 213.6667 8 33 114 1 33 497.5000 1 19 203 107 +8 33 404 213.6667 8 33 114 1 33 497.5000 8 80 800 314 +8 33 404 213.6667 8 33 114 1 33 497.5000 3 12 231 190 +8 33 404 213.6667 8 33 114 1 33 497.5000 6 23 303 909 +explain select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +(select a, b, avg(c) as avg_c from t1 +where t1.a<8 group by a,b) v3, +t2 where (v1.a=v2.a) and (v1.b=v3.b) and ((v3.avg_c>170) or (v3.a<5)) +and ((v1.avg_c<400) or (v1.a>1)) and (v2.min_c<200); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ref key0 key0 5 v1.a 2 Using where +1 PRIMARY ref key0 key0 5 v1.b 2 Using where +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +(select a, b, min(c) as min_c from t1 +where t1.a>5 group by a,b having min_c < 707) v2, +(select a, b, avg(c) as avg_c from t1 +where t1.a<8 group by a,b) v3, +t2 where (v1.a=v2.a) and (v1.b=v3.b) and ((v3.avg_c>170) or (v3.a<5)) +and ((v1.avg_c<400) or (v1.a>1)) and (v2.min_c<200); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.avg_c < 400) or (v1.a > 1))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.avg_c < 400) or (v1.a > 1)) and (v1.a is not null) and (v1.b is not null))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and ((avg_c < 400) or (t1.a > 1)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["v1.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v2.min_c < 200)", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "((min_c < 707) and (min_c < 200))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["v1.b"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v3.avg_c > 170) or (v3.a < 5))", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "((avg_c > 170) or (t1.a < 5))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a < 8)" + } + } + } + } + } + } + } +} +# extracted or formula : pushing into HAVING +# conjunctive subformula : pushing into WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where ((v1.a=1) or (v1.max_c<300)) and (v1.b>25); +a b max_c avg_c a b c d +5 27 132 132.0000 2 3 207 207 +5 27 132 132.0000 1 21 909 12 +5 27 132 132.0000 7 13 312 406 +5 27 132 132.0000 8 64 248 107 +5 27 132 132.0000 6 20 315 279 +5 27 132 132.0000 1 19 203 107 +5 27 132 132.0000 8 80 800 314 +5 27 132 132.0000 3 12 231 190 +5 27 132 132.0000 6 23 303 909 +select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where ((v1.a=1) or (v1.max_c<300)) and (v1.b>25); +a b max_c avg_c a b c d +5 27 132 132.0000 2 3 207 207 +5 27 132 132.0000 1 21 909 12 +5 27 132 132.0000 7 13 312 406 +5 27 132 132.0000 8 64 248 107 +5 27 132 132.0000 6 20 315 279 +5 27 132 132.0000 1 19 203 107 +5 27 132 132.0000 8 80 800 314 +5 27 132 132.0000 3 12 231 190 +5 27 132 132.0000 6 23 303 909 +explain select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where ((v1.a=1) or (v1.max_c<300)) and (v1.b>25); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +group by a,b having max_c < 707) v1, +t2 where ((v1.a=1) or (v1.max_c<300)) and (v1.b>25); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v1.a = 1) or (v1.max_c < 300)) and (v1.b > 25))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v1.a = 1) or (v1.max_c < 300))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and ((t1.a = 1) or (max_c < 300)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.b > 25)" + } + } + } + } + } + } + } +} +# extracted and formula : pushing into WHERE +# conjunctive subformula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.b<30); +a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 23 303 909 +select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.b<30); +a b max_c avg_c a b c d +6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 23 303 909 +explain select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.b<30); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.a 2 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from +(select a, b, max(c) as max_c, avg(c) as avg_c from t1 +where t1.a>5 group by a,b having max_c < 707) v1, +t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.b<30); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v1.max_c > 300) and (v1.b < 30))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 707) and (max_c > 300))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b < 30))" + } + } + } + } + } + } + } +} +# using query with union +# conjunctive subformula : pushing into WHERE +# conjunctive subformulas : pushing into HAVING and WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (t2.c>800) +union +select * from v1,t2 where (v1.max_c>100) and (v1.a>7) and (t2.d>800); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +8 33 404 213.6667 6 23 303 909 +select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (t2.c>800) +union +select * from v1,t2 where (v1.max_c>100) and (v1.a>7) and (t2.d>800); +a b max_c avg_c a b c d +1 21 500 234.6000 1 21 909 12 +8 33 404 213.6667 6 23 303 909 +explain select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (t2.c>800) +union +select * from v1,t2 where (v1.max_c>100) and (v1.a>7) and (t2.d>800); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.b 2 Using where +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 UNION t2 ALL NULL NULL NULL NULL 9 Using where +2 UNION ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (t2.c>800) +union +select * from v1,t2 where (v1.max_c>100) and (v1.a>7) and (t2.d>800); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.c > 800) and (t2.b is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["test.t2.b"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v1.a < 5)", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a < 5)" + } + } + } + } + } + } + } + }, + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.d > 800)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.max_c > 100) and (v1.a > 7))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "((max_c < 707) and (max_c > 100))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 7)" + } + } + } + } + } + } + } + } + ] + } + } +} +# using query with union +# extracted and formula : pushing into WHERE +# extracted or formula : pushing into HAVING +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (v1.b=19) +union +select * from v1,t2 where ((v1.max_c>400) or (v1.avg_c>270)) and (v1.a400) or (v1.avg_c>270)) and (v1.a400) or (v1.avg_c>270)) and (v1.a ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 UNION t2 ALL NULL NULL NULL NULL 9 +2 UNION ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (v1.b=19) +union +select * from v1,t2 where ((v1.max_c>400) or (v1.avg_c>270)) and (v1.a", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.b = 19)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.b = 19) and (v1.a < 5))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b = 19) and (t1.a < 5))" + } + } + } + } + } + } + } + }, + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.max_c > 400) or (v1.avg_c > 270))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.max_c > 400) or (v1.avg_c > 270)) and (v1.a < t2.a))", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "((max_c < 707) and ((max_c > 400) or (avg_c > 270)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } + } + ] + } + } +} +# using query with union +# extracted or formula : pushing into HAVING +# extracted or formula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((t2.a=v1.a) or (v1.b=t2.b)) and ((v1.a=1) or (v1.a=6)) +union +select * from v1,t2 where ((v1.a>3) and (v1.b>27)) or (v1.max_c>550); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 6 23 303 909 +select * from v1,t2 where +((t2.a=v1.a) or (v1.b=t2.b)) and ((v1.a=1) or (v1.a=6)) +union +select * from v1,t2 where ((v1.a>3) and (v1.b>27)) or (v1.max_c>550); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +1 21 500 234.6000 1 21 909 12 +1 21 500 234.6000 1 19 203 107 +6 20 315 279.3333 6 20 315 279 +6 20 315 279.3333 6 23 303 909 +8 33 404 213.6667 2 3 207 207 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 7 13 312 406 +8 33 404 213.6667 8 64 248 107 +8 33 404 213.6667 6 20 315 279 +8 33 404 213.6667 1 19 203 107 +8 33 404 213.6667 8 80 800 314 +8 33 404 213.6667 3 12 231 190 +8 33 404 213.6667 6 23 303 909 +explain select * from v1,t2 where +((t2.a=v1.a) or (v1.b=t2.b)) and ((v1.a=1) or (v1.a=6)) +union +select * from v1,t2 where ((v1.a>3) and (v1.b>27)) or (v1.max_c>550); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 UNION t2 ALL NULL NULL NULL NULL 9 +2 UNION ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v1,t2 where +((t2.a=v1.a) or (v1.b=t2.b)) and ((v1.a=1) or (v1.a=6)) +union +select * from v1,t2 where ((v1.a>3) and (v1.b>27)) or (v1.max_c>550); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a = 1) or (v1.a = 6))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a = t2.a) or (v1.b = t2.b)) and ((v1.a = 1) or (v1.a = 6)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a = 1) or (t1.a = 6))" + } + } + } + } + } + } + } + }, + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v1.a > 3) and (v1.b > 27)) or (v1.max_c > 550))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "(((v1.a > 3) and (v1.b > 27)) or (v1.max_c > 550))", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "((max_c < 707) and (((t1.a > 3) and (t1.b > 27)) or (max_c > 550)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } + } + ] + } + } +} +# using query with union +# extracted or formula : pushing into HAVING +# conjunctive subformulas : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v1,t2 where +((v1.a=1) and (v1.a=t2.a)) and ((v1.max_c<500) or (v1.avg_c>500)) +union +select * from v2,t2 where +((v2.a200)) and (v2.b>10) and (t2.a<2) +union +select * from v2,t2 where +(v2.max_c=t2.c) and (v2.b<10); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +6 20 315 279.3333 1 21 909 12 +6 20 315 279.3333 1 19 203 107 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 1 19 203 107 +select * from v1,t2 where +((v1.a=1) and (v1.a=t2.a)) and ((v1.max_c<500) or (v1.avg_c>500)) +union +select * from v2,t2 where +((v2.a200)) and (v2.b>10) and (t2.a<2) +union +select * from v2,t2 where +(v2.max_c=t2.c) and (v2.b<10); +a b max_c avg_c a b c d +1 19 107 107.0000 1 21 909 12 +1 19 107 107.0000 1 19 203 107 +6 20 315 279.3333 1 21 909 12 +6 20 315 279.3333 1 19 203 107 +8 33 404 213.6667 1 21 909 12 +8 33 404 213.6667 1 19 203 107 +explain select * from v1,t2 where +((v1.a=1) and (v1.a=t2.a)) and ((v1.max_c<500) or (v1.avg_c>500)) +union +select * from v2,t2 where +((v2.a200)) and (v2.b>10) and (t2.a<2) +union +select * from v2,t2 where +(v2.max_c=t2.c) and (v2.b<10); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 UNION t2 ALL NULL NULL NULL NULL 9 Using where +2 UNION ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +5 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 UNION t2 ALL NULL NULL NULL NULL 9 Using where +3 UNION ref key0 key0 5 test.t2.c 2 Using where +6 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v1,t2 where +((v1.a=1) and (v1.a=t2.a)) and ((v1.max_c<500) or (v1.avg_c>500)) +union +select * from v2,t2 where +((v2.a200)) and (v2.b>10) and (t2.a<2) +union +select * from v2,t2 where +(v2.max_c=t2.c) and (v2.b<10); +EXPLAIN +{ + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a = 1)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a = 1) and ((v1.max_c < 500) or (v1.avg_c > 500)))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v1.max_c < 500) or (v1.avg_c > 500))", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "((max_c < 707) and ((max_c < 500) or (avg_c > 500)))", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a = 1)" + } + } + } + } + } + } + } + }, + { + "query_block": { + "select_id": 2, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a < 2)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v2.b > 10)" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v2.a < t2.b) or (v2.max_c > 200))", + "materialized": { + "query_block": { + "select_id": 5, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b > 10))" + } + } + } + } + } + } + } + }, + { + "query_block": { + "select_id": 3, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.c is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["max_c"], + "ref": ["test.t2.c"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v2.b < 10)", + "materialized": { + "query_block": { + "select_id": 6, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b < 10))" + } + } + } + } + } + } + } + } + ] + } + } +} +# using derived table with union +# conjunctive subformulas : pushing into WHERE and HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_union,t2 where (v_union.a<3) and (v_union.c>100); +a b c a b c d +1 19 107 2 3 207 207 +1 19 107 1 21 909 12 +1 19 107 7 13 312 406 +1 19 107 8 64 248 107 +1 19 107 6 20 315 279 +1 19 107 1 19 203 107 +1 19 107 8 80 800 314 +1 19 107 3 12 231 190 +1 19 107 6 23 303 909 +select * from v_union,t2 where (v_union.a<3) and (v_union.c>100); +a b c a b c d +1 19 107 2 3 207 207 +1 19 107 1 21 909 12 +1 19 107 7 13 312 406 +1 19 107 8 64 248 107 +1 19 107 6 20 315 279 +1 19 107 1 19 203 107 +1 19 107 8 80 800 314 +1 19 107 3 12 231 190 +1 19 107 6 23 303 909 +explain select * from v_union,t2 where (v_union.a<3) and (v_union.c>100); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 40 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v_union,t2 where (v_union.a<3) and (v_union.c>100); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 40, + "filtered": 100, + "attached_condition": "((v_union.a < 3) and (v_union.c > 100))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 2, + "having_condition": "((c > 109) and (c > 100))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 10) and (t1.a < 3))" + } + } + } + } + }, + { + "query_block": { + "select_id": 3, + "having_condition": "((c < 300) and (c > 100))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b > 10) and (t1.a < 3))" + } + } + } + } + } + ] + } + } + } + } + } +} +# using derived table with union +# conjunctive subformula : pushing into WHERE +# extracted or formula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_union,t2 where +((v_union.a<2) or (v_union.c>800)) and (v_union.b>12); +a b c a b c d +1 19 107 2 3 207 207 +1 19 107 1 21 909 12 +1 19 107 7 13 312 406 +1 19 107 8 64 248 107 +1 19 107 6 20 315 279 +1 19 107 1 19 203 107 +1 19 107 8 80 800 314 +1 19 107 3 12 231 190 +1 19 107 6 23 303 909 +select * from v_union,t2 where +((v_union.a<2) or (v_union.c>800)) and (v_union.b>12); +a b c a b c d +1 19 107 2 3 207 207 +1 19 107 1 21 909 12 +1 19 107 7 13 312 406 +1 19 107 8 64 248 107 +1 19 107 6 20 315 279 +1 19 107 1 19 203 107 +1 19 107 8 80 800 314 +1 19 107 3 12 231 190 +1 19 107 6 23 303 909 +explain select * from v_union,t2 where +((v_union.a<2) or (v_union.c>800)) and (v_union.b>12); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 40 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v_union,t2 where +((v_union.a<2) or (v_union.c>800)) and (v_union.b>12); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 40, + "filtered": 100, + "attached_condition": "(((v_union.a < 2) or (v_union.c > 800)) and (v_union.b > 12))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v_union.a < 2) or (v_union.c > 800))", + "materialized": { + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 2, + "having_condition": "((c > 109) and ((t1.a < 2) or (c > 800)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 10) and (t1.b > 12))" + } + } + } + } + }, + { + "query_block": { + "select_id": 3, + "having_condition": "((c < 300) and ((t1.a < 2) or (c > 800)))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b > 10) and (t1.b > 12))" + } + } + } + } + } + ] + } + } + } + } + } +} +# using derived table with union +# conjunctive subformula : pushing into HAVING +# conjunctive subformula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_union,t2 where +(v_union.a=1) and (v_union.a=t2.a) and (v_union.c<200); +a b c a b c d +1 19 107 1 21 909 12 +1 19 107 1 19 203 107 +select * from v_union,t2 where +(v_union.a=1) and (v_union.a=t2.a) and (v_union.c<200); +a b c a b c d +1 19 107 1 21 909 12 +1 19 107 1 19 203 107 +explain select * from v_union,t2 where +(v_union.a=1) and (v_union.a=t2.a) and (v_union.c<200); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 40 Using where; Using join buffer (flat, BNL join) +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v_union,t2 where +(v_union.a=1) and (v_union.a=t2.a) and (v_union.c<200); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a = 1)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 40, + "filtered": 100, + "attached_condition": "((v_union.a = 1) and (v_union.c < 200))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 2, + "having_condition": "((c > 109) and (c < 200))", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a = 1)" + } + } + } + } + }, + { + "query_block": { + "select_id": 3, + "having_condition": "((c < 300) and (c < 200))", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a = 1) and (t1.b > 10))" + } + } + } + } + } + ] + } + } + } + } + } +} +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_char as v,t2_char as t where +(v.a=t.a) and (v.b='Vika') and (v.max_c>2); +a b max_c a b c +c Vika 7 c Vinny 3 +c Vika 7 c Inga 9 +c Vika 7 c Ivan 2 +c Vika 7 c Inga 2 +select * from v_char as v,t2_char as t where +(v.a=t.a) and (v.b='Vika') and (v.max_c>2); +a b max_c a b c +c Vika 7 c Vinny 3 +c Vika 7 c Inga 9 +c Vika 7 c Ivan 2 +c Vika 7 c Inga 2 +explain select * from v_char as v,t2_char as t where +(v.a=t.a) and (v.b='Vika') and (v.max_c>2); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t ALL NULL NULL NULL NULL 12 Using where +1 PRIMARY ref key0 key0 2 test.t.a 2 Using where +2 DERIVED t1_char ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort +explain format=json select * from v_char as v,t2_char as t where +(v.a=t.a) and (v.b='Vika') and (v.max_c>2); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "(t.a is not null)" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "2", + "used_key_parts": ["a"], + "ref": ["test.t.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "((v.b = 'Vika') and (v.max_c > 2))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "((max_c < 9) and (max_c > 2))", + "filesort": { + "sort_key": "t1_char.a", + "temporary_table": { + "table": { + "table_name": "t1_char", + "access_type": "ALL", + "rows": 12, + "filtered": 100, + "attached_condition": "(t1_char.b = 'Vika')" + } + } + } + } + } + } + } +} +# using derived table with union +# using several derived tables : pushing in all tables +# conjunctive subformula : pushing into WHERE using equalities +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v_union,v1,t2 where +(v_union.a=v1.a) and (v1.a=t2.a) and (t2.a=1) +and ((v_union.c>800) or (v1.max_c>200)); +a b c a b max_c avg_c a b c d +1 19 107 1 21 500 234.6000 1 21 909 12 +1 19 107 1 21 500 234.6000 1 19 203 107 +select * from v_union,v1,t2 where +(v_union.a=v1.a) and (v1.a=t2.a) and (t2.a=1) +and ((v_union.c>800) or (v1.max_c>200)); +a b c a b max_c avg_c a b c d +1 19 107 1 21 500 234.6000 1 21 909 12 +1 19 107 1 21 500 234.6000 1 19 203 107 +explain select * from v_union,v1,t2 where +(v_union.a=v1.a) and (v1.a=t2.a) and (t2.a=1) +and ((v_union.c>800) or (v1.max_c>200)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ALL NULL NULL NULL NULL 40 Using where; Using join buffer (incremental, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v_union,v1,t2 where +(v_union.a=v1.a) and (v1.a=t2.a) and (t2.a=1) +and ((v_union.c>800) or (v1.max_c>200)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(t2.a = 1)" + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v1.a = 1)" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a = 1)" + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 40, + "filtered": 100, + "attached_condition": "(v_union.a = 1)" + }, + "buffer_type": "incremental", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((v_union.c > 800) or (v1.max_c > 200))", + "materialized": { + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 2, + "having_condition": "(c > 109)", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a = 1)" + } + } + } + } + }, + { + "query_block": { + "select_id": 3, + "having_condition": "(c < 300)", + "filesort": { + "sort_key": "t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a = 1) and (t1.b > 10))" + } + } + } + } + } + ] + } + } + } + } + } +} +# using derived table with union +# extracted or formula : pushing into WHERE +# conjunctive subformula : pushing into HAVING +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v2_union as v,t2 where +((v.a=6) or (v.a=8)) and (v.c>200) and (v.a=t2.a); +a b c a b c d +8 33 404.0000 8 64 248 107 +6 20 312.0000 6 20 315 279 +6 20 214.0000 6 20 315 279 +8 33 404.0000 8 80 800 314 +6 20 312.0000 6 23 303 909 +6 20 214.0000 6 23 303 909 +select * from v2_union as v,t2 where +((v.a=6) or (v.a=8)) and (v.c>200) and (v.a=t2.a); +a b c a b c d +8 33 404.0000 8 64 248 107 +6 20 312.0000 6 20 315 279 +6 20 214.0000 6 20 315 279 +8 33 404.0000 8 80 800 314 +6 20 312.0000 6 23 303 909 +6 20 214.0000 6 23 303 909 +explain select * from v2_union as v,t2 where +((v.a=6) or (v.a=8)) and (v.c>200) and (v.a=t2.a); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.a 6 Using where +2 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +4 UNION t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +NULL UNION RESULT ALL NULL NULL NULL NULL NULL +explain format=json select * from v2_union as v,t2 where +((v.a=6) or (v.a=8)) and (v.c>200) and (v.a=t2.a); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "(((t2.a = 6) or (t2.a = 8)) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 6, + "filtered": 100, + "attached_condition": "(v.c > 200)", + "materialized": { + "query_block": { + "union_result": { + "table_name": "", + "access_type": "ALL", + "query_specifications": [ + { + "query_block": { + "select_id": 2, + "having_condition": "((c > 109) and (c > 200))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 10) and ((t1.a = 6) or (t1.a = 8)))" + } + } + } + } + }, + { + "query_block": { + "select_id": 3, + "having_condition": "((c < 300) and (c > 200))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b > 10) and ((t1.a = 6) or (t1.a = 8)))" + } + } + } + } + }, + { + "query_block": { + "select_id": 4, + "having_condition": "((c < 707) and (c > 200))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.c > 300) and ((t1.a = 6) or (t1.a = 8)))" + } + } + } + } + } + ] + } + } + } + } + } +} +# using embedded derived table : pushing the same conditions +# using several derived tables : pushing in all tables +# conjunctive subformula : pushing into WHERE +# extracted and formula : pushing into WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v1 where +(v4.a<13) and (v1.a>5) and (v1.b>12); +a b min_c a b max_c avg_c +1 19 107 6 20 315 279.3333 +1 21 500 6 20 315 279.3333 +5 16 207 6 20 315 279.3333 +5 27 132 6 20 315 279.3333 +6 20 315 6 20 315 279.3333 +8 33 404 6 20 315 279.3333 +1 19 107 8 33 404 213.6667 +1 21 500 8 33 404 213.6667 +5 16 207 8 33 404 213.6667 +5 27 132 8 33 404 213.6667 +6 20 315 8 33 404 213.6667 +8 33 404 8 33 404 213.6667 +select * from v4,v1 where +(v4.a<13) and (v1.a>5) and (v1.b>12); +a b min_c a b max_c avg_c +1 19 107 6 20 315 279.3333 +1 21 500 6 20 315 279.3333 +5 16 207 6 20 315 279.3333 +5 27 132 6 20 315 279.3333 +6 20 315 6 20 315 279.3333 +8 33 404 6 20 315 279.3333 +1 19 107 8 33 404 213.6667 +1 21 500 8 33 404 213.6667 +5 16 207 8 33 404 213.6667 +5 27 132 8 33 404 213.6667 +6 20 315 8 33 404 213.6667 +8 33 404 8 33 404 213.6667 +explain select * from v4,v1 where +(v4.a<13) and (v1.a>5) and (v1.b>12); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v1 where +(v4.a<13) and (v1.a>5) and (v1.b>12); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v4.a < 13)", + "materialized": { + "query_block": { + "select_id": 2, + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a < 15) and (v1.a < 13))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 15) and (t1.a < 13))" + } + } + } + } + } + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a > 5) and (v1.b > 12))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b > 12))" + } + } + } + } + } + } + } +} +# using embedded view : nothing to push +# using several derived tables : pushing only in one table +# conjunctive subformula : pushing into WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a=v1.a) and (v1.b>30); +a b min_c a b max_c avg_c a b c d +8 33 404 8 33 404 213.6667 8 64 248 107 +8 33 404 8 33 404 213.6667 8 80 800 314 +select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a=v1.a) and (v1.b>30); +a b min_c a b max_c avg_c a b c d +8 33 404 8 33 404 213.6667 8 64 248 107 +8 33 404 8 33 404 213.6667 8 80 800 314 +explain select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a=v1.a) and (v1.b>30); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key1 key1 5 test.t2.a 2 +1 PRIMARY ref key0 key0 5 test.t2.a 2 Using where +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a=v1.a) and (v1.b>30); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a is not null) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key1"], + "key": "key1", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 2, + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(v1.a < 15)", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a < 15)" + } + } + } + } + } + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v1.b > 30)", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.b > 30)" + } + } + } + } + } + } + } +} +# using embedded view : pushing different conditions +# using several derived tables : pushing in all tables +# conjunctive subformula : pushing into WHERE using equalities +# extracted and formula : pushing into WHERE using equalities +# conjunctive subformula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a>1) and (v4.a=v1.a) and (v4.min_c>100) and (v1.b<30); +a b min_c a b max_c avg_c a b c d +6 20 315 6 20 315 279.3333 6 20 315 279 +6 20 315 6 20 315 279.3333 6 23 303 909 +select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a>1) and (v4.a=v1.a) and (v4.min_c>100) and (v1.b<30); +a b min_c a b max_c avg_c a b c d +6 20 315 6 20 315 279.3333 6 20 315 279 +6 20 315 6 20 315 279.3333 6 23 303 909 +explain select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a>1) and (v4.a=v1.a) and (v4.min_c>100) and (v1.b<30); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key1 key1 5 test.t2.a 2 Using where +1 PRIMARY ref key0 key0 5 test.t2.a 2 Using where +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v1,t2 where +(v4.a=t2.a) and (v4.a>1) and (v4.a=v1.a) and (v4.min_c>100) and (v1.b<30); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.a > 1) and (t2.a is not null) and (t2.a is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key1"], + "key": "key1", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v4.min_c > 100)", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(min_c > 100)", + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a < 15) and (v1.a > 1))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 15) and (t1.a > 1))" + } + } + } + } + } + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["test.t2.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v1.b < 30)", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 1) and (t1.b < 30))" + } + } + } + } + } + } + } +} +# using embedded view : pushing different conditions +# using several derived tables : pushing in all tables +# extracted or formula : pushing into WHERE +# conjunctive subformula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v1,t2 where +(((v4.b>10) and (v4.a>1)) or (v4.b<20)) and (v1.max_c>200) and (v1.a=v4.a); +a b min_c a b max_c avg_c a b c d +1 19 107 1 21 500 234.6000 2 3 207 207 +1 19 107 1 21 500 234.6000 1 21 909 12 +1 19 107 1 21 500 234.6000 7 13 312 406 +1 19 107 1 21 500 234.6000 8 64 248 107 +1 19 107 1 21 500 234.6000 6 20 315 279 +1 19 107 1 21 500 234.6000 1 19 203 107 +1 19 107 1 21 500 234.6000 8 80 800 314 +1 19 107 1 21 500 234.6000 3 12 231 190 +1 19 107 1 21 500 234.6000 6 23 303 909 +5 16 207 5 16 207 207.0000 2 3 207 207 +5 16 207 5 16 207 207.0000 1 21 909 12 +5 16 207 5 16 207 207.0000 7 13 312 406 +5 16 207 5 16 207 207.0000 8 64 248 107 +5 16 207 5 16 207 207.0000 6 20 315 279 +5 16 207 5 16 207 207.0000 1 19 203 107 +5 16 207 5 16 207 207.0000 8 80 800 314 +5 16 207 5 16 207 207.0000 3 12 231 190 +5 16 207 5 16 207 207.0000 6 23 303 909 +5 27 132 5 16 207 207.0000 2 3 207 207 +5 27 132 5 16 207 207.0000 1 21 909 12 +5 27 132 5 16 207 207.0000 7 13 312 406 +5 27 132 5 16 207 207.0000 8 64 248 107 +5 27 132 5 16 207 207.0000 6 20 315 279 +5 27 132 5 16 207 207.0000 1 19 203 107 +5 27 132 5 16 207 207.0000 8 80 800 314 +5 27 132 5 16 207 207.0000 3 12 231 190 +5 27 132 5 16 207 207.0000 6 23 303 909 +6 20 315 6 20 315 279.3333 2 3 207 207 +6 20 315 6 20 315 279.3333 1 21 909 12 +6 20 315 6 20 315 279.3333 7 13 312 406 +6 20 315 6 20 315 279.3333 8 64 248 107 +6 20 315 6 20 315 279.3333 6 20 315 279 +6 20 315 6 20 315 279.3333 1 19 203 107 +6 20 315 6 20 315 279.3333 8 80 800 314 +6 20 315 6 20 315 279.3333 3 12 231 190 +6 20 315 6 20 315 279.3333 6 23 303 909 +8 33 404 8 33 404 213.6667 2 3 207 207 +8 33 404 8 33 404 213.6667 1 21 909 12 +8 33 404 8 33 404 213.6667 7 13 312 406 +8 33 404 8 33 404 213.6667 8 64 248 107 +8 33 404 8 33 404 213.6667 6 20 315 279 +8 33 404 8 33 404 213.6667 1 19 203 107 +8 33 404 8 33 404 213.6667 8 80 800 314 +8 33 404 8 33 404 213.6667 3 12 231 190 +8 33 404 8 33 404 213.6667 6 23 303 909 +select * from v4,v1,t2 where +(((v4.b>10) and (v4.a>1)) or (v4.b<20)) and (v1.max_c>200) and (v1.a=v4.a); +a b min_c a b max_c avg_c a b c d +1 19 107 1 21 500 234.6000 2 3 207 207 +1 19 107 1 21 500 234.6000 1 21 909 12 +1 19 107 1 21 500 234.6000 7 13 312 406 +1 19 107 1 21 500 234.6000 8 64 248 107 +1 19 107 1 21 500 234.6000 6 20 315 279 +1 19 107 1 21 500 234.6000 1 19 203 107 +1 19 107 1 21 500 234.6000 8 80 800 314 +1 19 107 1 21 500 234.6000 3 12 231 190 +1 19 107 1 21 500 234.6000 6 23 303 909 +5 16 207 5 16 207 207.0000 2 3 207 207 +5 16 207 5 16 207 207.0000 1 21 909 12 +5 16 207 5 16 207 207.0000 7 13 312 406 +5 16 207 5 16 207 207.0000 8 64 248 107 +5 16 207 5 16 207 207.0000 6 20 315 279 +5 16 207 5 16 207 207.0000 1 19 203 107 +5 16 207 5 16 207 207.0000 8 80 800 314 +5 16 207 5 16 207 207.0000 3 12 231 190 +5 16 207 5 16 207 207.0000 6 23 303 909 +5 27 132 5 16 207 207.0000 2 3 207 207 +5 27 132 5 16 207 207.0000 1 21 909 12 +5 27 132 5 16 207 207.0000 7 13 312 406 +5 27 132 5 16 207 207.0000 8 64 248 107 +5 27 132 5 16 207 207.0000 6 20 315 279 +5 27 132 5 16 207 207.0000 1 19 203 107 +5 27 132 5 16 207 207.0000 8 80 800 314 +5 27 132 5 16 207 207.0000 3 12 231 190 +5 27 132 5 16 207 207.0000 6 23 303 909 +6 20 315 6 20 315 279.3333 2 3 207 207 +6 20 315 6 20 315 279.3333 1 21 909 12 +6 20 315 6 20 315 279.3333 7 13 312 406 +6 20 315 6 20 315 279.3333 8 64 248 107 +6 20 315 6 20 315 279.3333 6 20 315 279 +6 20 315 6 20 315 279.3333 1 19 203 107 +6 20 315 6 20 315 279.3333 8 80 800 314 +6 20 315 6 20 315 279.3333 3 12 231 190 +6 20 315 6 20 315 279.3333 6 23 303 909 +8 33 404 8 33 404 213.6667 2 3 207 207 +8 33 404 8 33 404 213.6667 1 21 909 12 +8 33 404 8 33 404 213.6667 7 13 312 406 +8 33 404 8 33 404 213.6667 8 64 248 107 +8 33 404 8 33 404 213.6667 6 20 315 279 +8 33 404 8 33 404 213.6667 1 19 203 107 +8 33 404 8 33 404 213.6667 8 80 800 314 +8 33 404 8 33 404 213.6667 3 12 231 190 +8 33 404 8 33 404 213.6667 6 23 303 909 +explain select * from v4,v1,t2 where +(((v4.b>10) and (v4.a>1)) or (v4.b<20)) and (v1.max_c>200) and (v1.a=v4.a); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join) +1 PRIMARY ref key0 key0 5 v4.a 2 Using where +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v1,t2 where +(((v4.b>10) and (v4.a>1)) or (v4.b<20)) and (v1.max_c>200) and (v1.a=v4.a); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100 + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v4.b > 10) and (v4.a > 1)) or (v4.b < 20))" + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "attached_condition": "((((v4.b > 10) and (v4.a > 1)) or (v4.b < 20)) and (v4.a is not null))", + "materialized": { + "query_block": { + "select_id": 2, + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a < 15) and (((v1.b > 10) and (v1.a > 1)) or (v1.b < 20)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 15) and (((t1.b > 10) and (t1.a > 1)) or (t1.b < 20)))" + } + } + } + } + } + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["v4.a"], + "rows": 2, + "filtered": 100, + "attached_condition": "(v1.max_c > 200)", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "((max_c < 707) and (max_c > 200))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + } + } + } + } + } + } + } +} +# using embedded view : pushing different conditions +# using several derived tables : pushing only in one table +# extracted or formula : pushing into WHERE +# extracted or formula : pushing into HAVING +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v2 where +((v4.a>12) and (v4.min_c<300) and (v4.b>13)) or (v4.a<1); +a b min_c a b max_c avg_c +select * from v4,v2 where +((v4.a>12) and (v4.min_c<300) and (v4.b>13)) or (v4.a<1); +a b min_c a b max_c avg_c +explain select * from v4,v2 where +((v4.a>12) and (v4.min_c<300) and (v4.b>13)) or (v4.a<1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using join buffer (flat, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v2 where +((v4.a>12) and (v4.min_c<300) and (v4.b>13)) or (v4.a<1); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(((v4.a > 12) and (v4.min_c < 300) and (v4.b > 13)) or (v4.a < 1))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(((v1.a > 12) and (min_c < 300) and (v1.b > 13)) or (v1.a < 1))", + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a < 15) and (((v1.a > 12) and (v1.b > 13)) or (v1.a < 1)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 15) and (((t1.a > 12) and (t1.b > 13)) or (t1.a < 1)))" + } + } + } + } + } + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +# using embedded view : pushing different conditions +# using several derived tables : pushing only in one table +# conjunctive subformula : pushing into WHERE +# conjunctive subformula : pushing into HAVING +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v4.min_c<100); +a b min_c a b max_c avg_c +select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v4.min_c<100); +a b min_c a b max_c avg_c +explain select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v4.min_c<100); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where +1 PRIMARY ref key0 key0 5 v4.a 2 +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v4.min_c<100); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v4.b = v4.a) and (v4.min_c < 100) and (v4.a is not null))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(min_c < 100)", + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.b = v1.a) and (v1.a < 15))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b = t1.a) and (t1.a < 15))" + } + } + } + } + } + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["v4.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +# using embedded view : pushing the same conditions +# using several derived tables : pushing in all tables +# extracted and formula : pushing into WHERE using equalities +# conjunctive subformula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v2.b<30); +a b min_c a b max_c avg_c +select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v2.b<30); +a b min_c a b max_c avg_c +explain select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v2.b<30); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where +1 PRIMARY ref key0 key0 5 v4.a 2 +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and (v2.b<30); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v4.b = v4.a) and (v4.a < 30) and (v4.a is not null))", + "materialized": { + "query_block": { + "select_id": 2, + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.b = v1.a) and (v1.a < 15) and (v1.a < 30))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b = t1.a) and (t1.a < 15) and (t1.a < 30))" + } + } + } + } + } + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["v4.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and (t1.b < 30))" + } + } + } + } + } + } + } +} +# using embedded view : pushing the same conditions +# using several derived tables : pushing in all tables +# extracted or formula : pushing into WHERE using equalities +# extracted and formula : pushing into WHERE using equalities +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and ((v2.b<30) or (v4.a>2)); +a b min_c a b max_c avg_c +select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and ((v2.b<30) or (v4.a>2)); +a b min_c a b max_c avg_c +explain select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and ((v2.b<30) or (v4.a>2)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where +1 PRIMARY ref key0 key0 5 v4.a 2 +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v2 where +(v4.a=v2.b) and (v4.a=v4.b) and ((v2.b<30) or (v4.a>2)); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v4.b = v4.a) and ((v4.a < 30) or (v4.a > 2)) and (v4.a is not null))", + "materialized": { + "query_block": { + "select_id": 2, + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.b = v1.a) and (v1.a < 15) and ((v1.a < 30) or (v1.a > 2)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.b = t1.a) and (t1.a < 15) and ((t1.a < 30) or (t1.a > 2)))" + } + } + } + } + } + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["b"], + "ref": ["v4.a"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a > 5) and ((t1.b < 30) or (t1.b > 2)))" + } + } + } + } + } + } + } +} +# using embedded view : pushing the same conditions +# using several derived tables : pushing in all tables +# extracted or formula : pushing into WHERE +# conjunctive subformula : pushing into WHERE +# pushing equalities +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v2 where +(((v4.a<12) and (v4.b>13)) or (v4.a>10)) and +(v4.min_c=v2.max_c) and (v4.min_c>100); +a b min_c a b max_c avg_c +6 20 315 6 20 315 279.3333 +8 33 404 8 33 404 213.6667 +select * from v4,v2 where +(((v4.a<12) and (v4.b>13)) or (v4.a>10)) and +(v4.min_c=v2.max_c) and (v4.min_c>100); +a b min_c a b max_c avg_c +6 20 315 6 20 315 279.3333 +8 33 404 8 33 404 213.6667 +explain select * from v4,v2 where +(((v4.a<12) and (v4.b>13)) or (v4.a>10)) and +(v4.min_c=v2.max_c) and (v4.min_c>100); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY ALL NULL NULL NULL NULL 20 Using where +1 PRIMARY ref key0 key0 5 v4.min_c 2 +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v2 where +(((v4.a<12) and (v4.b>13)) or (v4.a>10)) and +(v4.min_c=v2.max_c) and (v4.min_c>100); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((((v4.a < 12) and (v4.b > 13)) or (v4.a > 10)) and (v4.min_c > 100) and (v4.min_c is not null))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(min_c > 100)", + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a < 15) and (((v1.a < 12) and (v1.b > 13)) or (v1.a > 10)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 15) and (((t1.a < 12) and (t1.b > 13)) or (t1.a > 10)))" + } + } + } + } + } + } + } + } + } + } + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["max_c"], + "ref": ["v4.min_c"], + "rows": 2, + "filtered": 100, + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "((max_c < 707) and (max_c > 100))", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +# using embedded view : pushing the same conditions +# using several derived tables : pushing only in one table +# extracted or formula : pushing into WHERE +set statement optimizer_switch='condition_pushdown_for_derived=off' for select * from v4,v2,t2 where +(((v4.a<12) and (t2.b>13)) or (v4.a>10)) and +(v4.min_c=t2.c) and (t2.c>100); +a b min_c a b max_c avg_c a b c d +6 20 315 6 20 315 279.3333 6 20 315 279 +6 20 315 8 33 404 213.6667 6 20 315 279 +select * from v4,v2,t2 where +(((v4.a<12) and (t2.b>13)) or (v4.a>10)) and +(v4.min_c=t2.c) and (t2.c>100); +a b min_c a b max_c avg_c a b c d +6 20 315 6 20 315 279.3333 6 20 315 279 +6 20 315 8 33 404 213.6667 6 20 315 279 +explain select * from v4,v2,t2 where +(((v4.a<12) and (t2.b>13)) or (v4.a>10)) and +(v4.min_c=t2.c) and (t2.c>100); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where +1 PRIMARY ref key0 key0 5 test.t2.c 2 Using where +1 PRIMARY ALL NULL NULL NULL NULL 20 Using join buffer (flat, BNL join) +4 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +2 DERIVED ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +3 DERIVED t1 ALL NULL NULL NULL NULL 20 Using where; Using temporary; Using filesort +explain format=json select * from v4,v2,t2 where +(((v4.a<12) and (t2.b>13)) or (v4.a>10)) and +(v4.min_c=t2.c) and (t2.c>100); +EXPLAIN +{ + "query_block": { + "select_id": 1, + "table": { + "table_name": "t2", + "access_type": "ALL", + "rows": 9, + "filtered": 100, + "attached_condition": "((t2.c > 100) and (t2.c is not null))" + }, + "table": { + "table_name": "", + "access_type": "ref", + "possible_keys": ["key0"], + "key": "key0", + "key_length": "5", + "used_key_parts": ["min_c"], + "ref": ["test.t2.c"], + "rows": 2, + "filtered": 100, + "attached_condition": "(((v4.a < 12) and (t2.b > 13)) or (v4.a > 10))", + "materialized": { + "query_block": { + "select_id": 2, + "having_condition": "(min_c > 100)", + "filesort": { + "sort_key": "v1.a, v1.b", + "temporary_table": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((v1.a < 15) and ((v1.a < 12) or (v1.a > 10)))", + "materialized": { + "query_block": { + "select_id": 3, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "((t1.a < 15) and ((t1.a < 12) or (t1.a > 10)))" + } + } + } + } + } + } + } + } + } + } + }, + "block-nl-join": { + "table": { + "table_name": "", + "access_type": "ALL", + "rows": 20, + "filtered": 100 + }, + "buffer_type": "flat", + "buffer_size": "256Kb", + "join_type": "BNL", + "materialized": { + "query_block": { + "select_id": 4, + "having_condition": "(max_c < 707)", + "filesort": { + "sort_key": "t1.a, t1.b", + "temporary_table": { + "table": { + "table_name": "t1", + "access_type": "ALL", + "rows": 20, + "filtered": 100, + "attached_condition": "(t1.a > 5)" + } + } + } + } + } + } + } +} +drop view v1,v2,v3,v4,v_union,v2_union,v_double,v_char,v_decimal; +drop table t1,t2,t1_double,t2_double,t1_char,t2_char,t1_decimal,t2_decimal; diff --git a/mysql-test/t/derived_cond_pushdown.test b/mysql-test/t/derived_cond_pushdown.test new file mode 100644 index 00000000000..6bde221a55c --- /dev/null +++ b/mysql-test/t/derived_cond_pushdown.test @@ -0,0 +1,812 @@ +let $no_pushdown= set statement optimizer_switch='condition_pushdown_for_derived=off' for; + +create table t1 (a int, b int, c int); +create table t2 (a int, b int, c int, d decimal); +insert into t1 values + (1,21,345), (1,33,7), (8,33,114), (1,21,500), (1,19,107), (5,14,787), + (8,33,123), (9,10,211), (5,16,207), (1,33,988), (5,27,132), (1,21,104), + (6,20,309), (6,20,315), (1,21,101), (8,33,404), (9,10,800), (1,21,123), + (7,11,708), (6,20,214); +insert into t2 values + (2,3,207,207.0000), (1,21,909,12.0000), (7,13,312,406.0000), + (8,64,248,107.0000), (6,20,315,279.3333), (1,19,203,107.0000), + (8,80,800,314.0000), (3,12,231,190.0000), (6,23,303,909.0000); + +create table t1_double(a int, b double, c double); +insert into t1_double values + (1,23.4,14.3333), (1,12.5,18.9), (3,12.5,18.9), + (4,33.4,14.3333), (4,14.3333,13.65), (5,17.89,7.22), + (6,33.4,14.3), (10,33.4,13.65), (11,33.4,13.65); + +create table t2_double(a int, b double, c double); +insert into t2_double values + (1,22.4,14.3333), (1,12.5,18.9), (2,22.4,18.9), + (4,33.4,14.3333), (5,22.4,13.65), (7,17.89,18.9), + (6,33.4,14.3333), (10,31.4,13.65), (12,33.4,13.65); + +create table t1_char(a char, b char(8), c int); +insert into t1_char values + ('a','Ivan',1), ('b','Vika',2), ('b','Inga',6), ('c','Vika',7), + ('b','Ivan',7), ('a','Alex',6), ('b','Inga',5), ('d','Ron',9), + ('d','Harry',2), ('d','Hermione',3), ('c','Ivan',3), ('c','Harry',4); + +create table t2_char(a char, b char(8), c int); +insert into t2_char values + ('b','Ivan',1), ('c','Vinny',3), ('c','Inga',9), ('a','Vika',1), + ('c','Ivan',2), ('b','Ali',6), ('c','Inga',2), ('a','Ron',9), + ('d','Harry',1), ('b','Hermes',3), ('b','Ivan',11), ('b','Harry',4); + +create table t1_decimal (a decimal(3,1), b decimal(3,1), c int); +insert into t1_decimal values + (1,1,23),(2,2,11),(3,3,16), + (1,1,12),(1,1,14),(2,3,15), + (2,1,13),(2,3,11),(3,3,16); + +create table t2_decimal (a decimal(3,1), b decimal(3,1), c int); +insert into t2_decimal values + (2,1,13),(2,2,11),(3,3,16), + (1,3,22),(1,3,14),(2,2,15), + (2,1,43),(2,3,11),(2,3,16); + +create view v1 as select a, b, max(c) as max_c, avg(c) as avg_c from t1 + group by a,b having max_c < 707; + +create view v2 as select a, b, max(c) as max_c, avg(c) as avg_c from t1 + where t1.a>5 group by a,b having max_c < 707; + +create view v3 as select a, b, min(c) as min_c from t1 + where t1.a<10 group by a,b having min_c > 109; + +create view v4 as + select a, b, min(max_c) as min_c from v1 + where (v1.a<15) group by a,b; + +create view v_union as + select a, b, min(c) as c from t1 + where t1.a<10 group by a,b having c > 109 + union + select a, b, max(c) as c from t1 + where t1.b>10 group by a,b having c < 300; + +create view v2_union as + select a, b, min(c) as c from t1 + where t1.a<10 group by a,b having c > 109 + union + select a, b, max(c) as c from t1 + where t1.b>10 group by a,b having c < 300 + union + select a, b, avg(c) as c from t1 + where t1.c>300 group by a,b having c < 707; + +create view v_double as + select a, avg(a/4) as avg_a, b, c from t1_double + where (b>12.2) group by b,c having (avg_a<22.333); + +create view v_char as + select a, b, max(c) as max_c from t1_char + group by a,b having max_c < 9; + +create view v_decimal as + select a, b, avg(c) as avg_c from t1_decimal + group by a,b having (avg_c>12); + +--echo # conjunctive subformula : pushing into HAVING +let $query= select * from v1,t2 where (v1.max_c>214) and (t2.a>v1.a); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from + (select a, b, max(c) as max_c, avg(c) as avg_c from t1 + group by a,b having max_c < 707) v1, + t2 where (v1.a=t2.a) and (v1.max_c>300); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted or formula : pushing into HAVING +let $query= + select * from v1,t2 where + ((v1.max_c>400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a300) and (v1.avg_c>t2.d) and (v1.b=t2.b)) or + ((v1.max_c<135) and (v1.max_c6) and (t2.b>v1.b); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= select * from v2,t2 where (v2.b>25) and (t2.a7) and (t2.c7) and (t2.c5) and (t2.b4) and (v1.b>t2.b) and (v1.max_c=t2.d)) or + ((v1.a<2) and (v1.max_c400) and (t2.b>v1.b); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v_double as v,t2_double as t where + (v.a=t.a) and (v.avg_a>0.45) and (v.b>10); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v_decimal as v,t2_decimal as t where + (v.a=t.a) and (v.avg_c>15) and (v.b>1); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted or formula : pushing into HAVING and WHERE +let $query= + select * from v1,t2 where + ((v1.a>7) and (v1.max_c>300) and (t2.c120)) or (v1.a>7); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted or formulas : pushing into WHERE and HAVING +let $query= + select * from v1,t2 where + ((v1.a<2) and (v1.max_c>120) and (v1.b=t2.b)) or (v1.a>7); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v1,t2 where + ((v1.a<2) and (v1.max_c<200) and (t2.c>v1.max_c) and (v1.max_c=t2.d)) or + ((v1.a>4) and (v1.max_c<500) and (t2.b400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a400) and (t2.a>v1.a)) or ((v1.max_c<135) and (t2.a150) and (v1.max_c=t2.c); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted and formula : pushing into WHERE +--echo # pushing equalities +let $query= + select * from v1,t2 where (v1.a=v1.b) and (v1.a=t2.a) and (v1.a=3); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v1,t2 where (v1.a=1) and (v1.b=21) and (t2.a=2); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v_char as v,t2_char as t where + (v.a='c') and (v.b<'Hermes') and ((v.b=t.b) or (v.max_c>20)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted and formula : pushing into WHERE using equalities +--echo # pushing equalities +let $query= +select * from v_decimal as v,t2_decimal as t where + (v.a=v.b) and (v.b=t.b) and ((t.b>1) or (v.a=1)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted or formula : pushing into HAVING using equalities +let $query= + select * from v1,t2 + where ((t2.a<4) and (v1.a=t2.a)) or ((t2.c>150) and (v1.max_c=t2.c)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # conjunctive subformulas : pushing into WHERE and HAVING using equalities +let $query= + select * from v1,t2 + where ((t2.a>5) and (v1.a=t2.a)) and ((t2.c>250) and (v1.max_c=t2.c)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # conjunctive subformulas : pushing into WHERE and HAVING +--echo # pushing equalities +let $query= + select * from + (select a, b, max(c) as max_c, avg(c) as avg_c from t1 + group by a,b having max_c < 707) v1, + t2 where (v1.a=8) and (v1.a=t2.a) and (v1.max_c=404); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # conjunctive subformulas : pushing into WHERE and HAVING +let $query= + select * from v1,t2 where + (v1.a>3) and (v1.max_c>200) and (t2.b10) or (v.a=1)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # conjunctive subformula : pushing into WHERE +--echo # extracted or formula : pushing into HAVING using equalities +let $query= + select * from v_double as v,t2_double as t where + (((v.a>0.2) or (v.b<17)) or (t.c>17)) and (t.c=v.c) and (v.c>18); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted or formula : pushing into WHERE +--echo # conjunctive subformula : pushing into HAVING +--echo # pushing equalities +let $query= + select * from v_decimal as v,t2_decimal as t where + (((v.a>4) or (v.a=2)) or (v.b>3)) and (v.avg_c=13); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from + (select a, b, max(c) as max_c, avg(c) as avg_c from t1 + where t1.a>5 group by a,b having max_c < 707) v1, + t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.a=v1.b); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # nothing to push +let $query= + select * from v1,t2 where (t2.a<2) and (t2.c>900); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= select * from v1,t2 where (v1.a=t2.a) and (v1.b=t2.b); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v1,t2 where + (t2.a=v1.a) or (v1.b=t2.b) and ((v1.a=1) or (v1.a=6)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v1,t2 where (v1.a=1) or (v1.b=21) or (t2.a=2); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v1,t2 where + (t2.a<2) and (t2.c>900) and ((v1.a13) and (t2.c<115)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using several derived tables : pushing in all tables +--echo # conjunctive subformula : pushing into HAVING +--echo # extracted or formula : pushing into WHERE +--echo # pushing equalities +let $query= + select * from v1,v2,t2 where ((v1.a=v2.a) or (v1.a=t2.a)) and + ((v2.b<50) or (v2.b=19)) and (v1.max_c<300); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using several derived tables : pushing only in one table +--echo # conjunctive subformula : pushing into WHERE +--echo # pushing equalities +let $query= + select * from v1,v2,t2 where + (v1.a=t2.a) and (v1.a=v1.b) and (v1.a=v2.a) and (v2.max_c<300); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using several derived tables : pushing only in one table +--echo # extracted and formula : pushing into WHERE +--echo # conjunctive subformula : pushing into WHERE using equalities +--echo # pushing equalities +let $query= + select * from v1,v2,t2 where (v1.a=1) and (v1.b>10) and (v1.b=v2.b); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted or formula : pushing into WHERE +--echo # conjunctive subformula : pushing into WHERE using equalities +--echo # pushing equalities +let $query= + select * from v_char as v,t2_char as t where + (v.a=t.a) and (t.a='b') and ((v.b='Vika') or (v.b='Ali')); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using several derived tables : pushing in all tables +--echo # extracted or formula : pushing into WHERE +--echo # conjunctive subformulas : pushing into HAVING +--echo # pushing equalities +let $query= + select * from v1,v2,v3,t2 where + ((v1.a=v2.a) or (v1.a=t2.a)) and ((v3.b<50) or (v3.b=33)) + and (v1.max_c<500) and (v3.a=t2.a) and (v2.max_c>300); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using several derived tables : pushing in all tables +--echo # conjunctive subformulas : pushing into HAVING +let $query= + select * from + (select a, b, max(c) as max_c, avg(c) as avg_c from t1 + where t1.a>5 group by a,b having max_c < 707) v1, + (select a, b, min(c) as min_c from t1 + where t1.a>5 group by a,b having min_c < 707) v2, + t2 where (v1.a=v2.a) and (v1.b=t2.b) and (v1.max_c>130) and (v2.min_c<130); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using several derived tables : pushing in all tables +--echo # extracted or formulas : pushing into HAVING +--echo # conjunctive subformula : pushing into HAVING +let $query= + select * from + (select a, b, max(c) as max_c, avg(c) as avg_c from t1 + where t1.a>5 group by a,b having max_c < 707) v1, + (select a, b, min(c) as min_c from t1 + where t1.a>5 group by a,b having min_c < 707) v2, + (select a, b, avg(c) as avg_c from t1 + where t1.a<8 group by a,b) v3, + t2 where (v1.a=v2.a) and (v1.b=v3.b) and ((v3.avg_c>170) or (v3.a<5)) + and ((v1.avg_c<400) or (v1.a>1)) and (v2.min_c<200); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted or formula : pushing into HAVING +--echo # conjunctive subformula : pushing into WHERE +let $query= + select * from + (select a, b, max(c) as max_c, avg(c) as avg_c from t1 + group by a,b having max_c < 707) v1, + t2 where ((v1.a=1) or (v1.max_c<300)) and (v1.b>25); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # extracted and formula : pushing into WHERE +--echo # conjunctive subformula : pushing into HAVING +let $query= + select * from + (select a, b, max(c) as max_c, avg(c) as avg_c from t1 + where t1.a>5 group by a,b having max_c < 707) v1, + t2 where (v1.a=t2.a) and (v1.max_c>300) and (v1.b<30); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using query with union +--echo # conjunctive subformula : pushing into WHERE +--echo # conjunctive subformulas : pushing into HAVING and WHERE +let $query= + select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (t2.c>800) + union + select * from v1,t2 where (v1.max_c>100) and (v1.a>7) and (t2.d>800); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using query with union +--echo # extracted and formula : pushing into WHERE +--echo # extracted or formula : pushing into HAVING +--echo # pushing equalities +let $query= + select * from v1,t2 where (v1.a<5) and (v1.b=t2.b) and (v1.b=19) + union + select * from v1,t2 where ((v1.max_c>400) or (v1.avg_c>270)) and (v1.a3) and (v1.b>27)) or (v1.max_c>550); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using query with union +--echo # extracted or formula : pushing into HAVING +--echo # conjunctive subformulas : pushing into WHERE +--echo # pushing equalities +let $query= + select * from v1,t2 where + ((v1.a=1) and (v1.a=t2.a)) and ((v1.max_c<500) or (v1.avg_c>500)) + union + select * from v2,t2 where + ((v2.a200)) and (v2.b>10) and (t2.a<2) + union + select * from v2,t2 where + (v2.max_c=t2.c) and (v2.b<10); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using derived table with union +--echo # conjunctive subformulas : pushing into WHERE and HAVING +let $query= select * from v_union,t2 where (v_union.a<3) and (v_union.c>100); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using derived table with union +--echo # conjunctive subformula : pushing into WHERE +--echo # extracted or formula : pushing into HAVING +let $query= + select * from v_union,t2 where + ((v_union.a<2) or (v_union.c>800)) and (v_union.b>12); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using derived table with union +--echo # conjunctive subformula : pushing into HAVING +--echo # conjunctive subformula : pushing into WHERE +--echo # pushing equalities +let $query= + select * from v_union,t2 where + (v_union.a=1) and (v_union.a=t2.a) and (v_union.c<200); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +let $query= + select * from v_char as v,t2_char as t where + (v.a=t.a) and (v.b='Vika') and (v.max_c>2); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using derived table with union +--echo # using several derived tables : pushing in all tables +--echo # conjunctive subformula : pushing into WHERE using equalities +--echo # pushing equalities +let $query= + select * from v_union,v1,t2 where + (v_union.a=v1.a) and (v1.a=t2.a) and (t2.a=1) + and ((v_union.c>800) or (v1.max_c>200)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using derived table with union +--echo # extracted or formula : pushing into WHERE +--echo # conjunctive subformula : pushing into HAVING +--echo # pushing equalities +let $query= + select * from v2_union as v,t2 where + ((v.a=6) or (v.a=8)) and (v.c>200) and (v.a=t2.a); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded derived table : pushing the same conditions +--echo # using several derived tables : pushing in all tables +--echo # conjunctive subformula : pushing into WHERE +--echo # extracted and formula : pushing into WHERE +let $query= +select * from v4,v1 where + (v4.a<13) and (v1.a>5) and (v1.b>12); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : nothing to push +--echo # using several derived tables : pushing only in one table +--echo # conjunctive subformula : pushing into WHERE +let $query= + select * from v4,v1,t2 where + (v4.a=t2.a) and (v4.a=v1.a) and (v1.b>30); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing different conditions +--echo # using several derived tables : pushing in all tables +--echo # conjunctive subformula : pushing into WHERE using equalities +--echo # extracted and formula : pushing into WHERE using equalities +--echo # conjunctive subformula : pushing into HAVING +let $query= + select * from v4,v1,t2 where + (v4.a=t2.a) and (v4.a>1) and (v4.a=v1.a) and (v4.min_c>100) and (v1.b<30); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing different conditions +--echo # using several derived tables : pushing in all tables +--echo # extracted or formula : pushing into WHERE +--echo # conjunctive subformula : pushing into HAVING +let $query= + select * from v4,v1,t2 where + (((v4.b>10) and (v4.a>1)) or (v4.b<20)) and (v1.max_c>200) and (v1.a=v4.a); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing different conditions +--echo # using several derived tables : pushing only in one table +--echo # extracted or formula : pushing into WHERE +--echo # extracted or formula : pushing into HAVING +let $query= + select * from v4,v2 where + ((v4.a>12) and (v4.min_c<300) and (v4.b>13)) or (v4.a<1); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing different conditions +--echo # using several derived tables : pushing only in one table +--echo # conjunctive subformula : pushing into WHERE +--echo # conjunctive subformula : pushing into HAVING +--echo # pushing equalities +let $query= + select * from v4,v2 where + (v4.a=v2.b) and (v4.a=v4.b) and (v4.min_c<100); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing the same conditions +--echo # using several derived tables : pushing in all tables +--echo # extracted and formula : pushing into WHERE using equalities +--echo # conjunctive subformula : pushing into WHERE +--echo # pushing equalities +let $query= + select * from v4,v2 where + (v4.a=v2.b) and (v4.a=v4.b) and (v2.b<30); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing the same conditions +--echo # using several derived tables : pushing in all tables +--echo # extracted or formula : pushing into WHERE using equalities +--echo # extracted and formula : pushing into WHERE using equalities +--echo # pushing equalities +let $query= + select * from v4,v2 where + (v4.a=v2.b) and (v4.a=v4.b) and ((v2.b<30) or (v4.a>2)); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing the same conditions +--echo # using several derived tables : pushing in all tables +--echo # extracted or formula : pushing into WHERE +--echo # conjunctive subformula : pushing into WHERE +--echo # pushing equalities +let $query= + select * from v4,v2 where + (((v4.a<12) and (v4.b>13)) or (v4.a>10)) and + (v4.min_c=v2.max_c) and (v4.min_c>100); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +--echo # using embedded view : pushing the same conditions +--echo # using several derived tables : pushing only in one table +--echo # extracted or formula : pushing into WHERE +let $query= + select * from v4,v2,t2 where + (((v4.a<12) and (t2.b>13)) or (v4.a>10)) and + (v4.min_c=t2.c) and (t2.c>100); +eval $no_pushdown $query; +eval $query; +eval explain $query; +eval explain format=json $query; + +drop view v1,v2,v3,v4,v_union,v2_union,v_double,v_char,v_decimal; +drop table t1,t2,t1_double,t2_double,t1_char,t2_char,t1_decimal,t2_decimal; \ No newline at end of file diff --git a/sql/item.cc b/sql/item.cc index 65fb00d4757..6c24cf2d073 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -479,6 +479,7 @@ Item::Item(THD *thd): } } + /** Constructor used by Item_field, Item_ref & aggregate (sum) functions. @@ -2161,6 +2162,73 @@ bool Item_func_or_sum::agg_item_set_converter(const DTCollation &coll, } +/** + @brief + Building clone for Item_func_or_sum + + @param thd thread handle + @param mem_root part of the memory for the clone + + @details + This method gets copy of the current item and also + build clones for its referencies. For the referencies + build_copy is called again. + + @retval + clone of the item + 0 if an error occured +*/ + +Item* Item_func_or_sum::build_clone(THD *thd, MEM_ROOT *mem_root) +{ + Item_func_or_sum *copy= (Item_func_or_sum *) get_copy(thd, mem_root); + if (!copy) + return 0; + if (arg_count > 2) + copy->args= + (Item**) alloc_root(mem_root, sizeof(Item*) * arg_count); + else if (arg_count > 0) + copy->args= copy->tmp_arg; + for (uint i= 0; i < arg_count; i++) + { + Item *arg_clone= args[i]->build_clone(thd, mem_root); + if (!arg_clone) + return 0; + copy->args[i]= arg_clone; + } + return copy; +} + + +/** + @brief + Building clone for Item_ref + + @param thd thread handle + @param mem_root part of the memory for the clone + + @details + This method gets copy of the current item and also + builds clone for its reference. + + @retval + clone of the item + 0 if an error occured +*/ + +Item* Item_ref::build_clone(THD *thd, MEM_ROOT *mem_root) +{ + Item_ref *copy= (Item_ref *) get_copy(thd, mem_root); + if (!copy) + return 0; + Item *item_clone= (* ref)->build_clone(thd, mem_root); + if (!item_clone) + return 0; + *copy->ref= item_clone; + return copy; +} + + void Item_ident_for_show::make_field(THD *thd, Send_field *tmp_field) { tmp_field->table_name= tmp_field->org_table_name= table_name; @@ -6554,6 +6622,85 @@ Item *Item_field::update_value_transformer(THD *thd, uchar *select_arg) } +Item *Item_field::derived_field_transformer_for_having(THD *thd, uchar *arg) +{ + st_select_lex *sl= (st_select_lex *)arg; + table_map map= sl->master_unit()->derived->table->map; + if (!((Item_field*)this)->item_equal) + { + if (used_tables() == map) + { + Item_ref *rf= + new (thd->mem_root) Item_ref(thd, &sl->context, + NullS, NullS, + ((Item_field*) this)->field_name); + if (!rf) + return 0; + return rf; + } + } + else + { + Item_equal *cond= (Item_equal *) ((Item_field*)this)->item_equal; + Item_equal_fields_iterator li(*cond); + Item *item; + while ((item=li++)) + { + if (item->used_tables() == map && item->type() == FIELD_ITEM) + { + Item_ref *rf= + new (thd->mem_root) Item_ref(thd, &sl->context, + NullS, NullS, + ((Item_field*) item)->field_name); + if (!rf) + return 0; + return rf; + } + } + } + return this; +} + + +Item *Item_field::derived_field_transformer_for_where(THD *thd, uchar *arg) +{ + st_select_lex *sl= (st_select_lex *)arg; + List_iterator li(sl->grouping_tmp_fields); + Grouping_tmp_field *field; + table_map map= sl->master_unit()->derived->table->map; + if (used_tables() == map) + { + while ((field=li++)) + { + if (((Item_field*) this)->field == field->tmp_field) + return field->producing_item->build_clone(thd, thd->mem_root); + } + } + else if (((Item_field*)this)->item_equal) + { + Item_equal *cond= (Item_equal *) ((Item_field*)this)->item_equal; + Item_equal_fields_iterator it(*cond); + Item *item; + while ((item=it++)) + { + if (item->used_tables() == map && item->type() == FIELD_ITEM) + { + Item_field *field_item= (Item_field *) item; + li.rewind(); + while ((field=li++)) + { + if (field_item->field == field->tmp_field) + { + return field->producing_item->build_clone(thd, thd->mem_root); + } + } + } + } + } + return this; +} + + void Item_field::print(String *str, enum_query_type query_type) { if (field && field->table->const_table) @@ -9722,5 +9869,62 @@ const char *dbug_print_item(Item *item) return "Couldn't fit into buffer"; } + #endif /*DBUG_OFF*/ +bool Item_field::exclusive_dependence_on_table_processor(uchar *map) +{ + table_map tab_map= *((table_map *) map); + return !((used_tables() == tab_map || + (item_equal && item_equal->used_tables() & tab_map))); +} + +bool Item_field::exclusive_dependence_on_grouping_fields_processor(uchar *arg) +{ + st_select_lex *sl= (st_select_lex *)arg; + List_iterator li(sl->grouping_tmp_fields); + Grouping_tmp_field *field; + table_map map= sl->master_unit()->derived->table->map; + if (used_tables() == map) + { + while ((field=li++)) + { + if (((Item_field*) this)->field == field->tmp_field) + return false; + } + } + else if (((Item_field*)this)->item_equal) + { + Item_equal *cond= (Item_equal *) ((Item_field*)this)->item_equal; + Item_equal_fields_iterator it(*cond); + Item *item; + while ((item=it++)) + { + if (item->used_tables() == map && item->type() == FIELD_ITEM) + { + li.rewind(); + while ((field=li++)) + { + if (((Item_field *)item)->field == field->tmp_field) + return false; + } + } + } + } + return true; +} + + +/*Item *Item::get_copy(THD *thd, MEM_ROOT *mem_root) +{ + dbug_print_item(this); + DBUG_ASSERT(0); + return 0; +}*/ + + +void Item::register_in(THD *thd) +{ + next= thd->free_list; + thd->free_list= this; +} diff --git a/sql/item.h b/sql/item.h index 674ff6e99dc..0cbe30ca9ef 100644 --- a/sql/item.h +++ b/sql/item.h @@ -33,6 +33,8 @@ C_MODE_START C_MODE_END #ifndef DBUG_OFF +const char *dbug_print_item(Item *item); + static inline bool trace_unsupported_func(const char *where, const char *processor_name) { @@ -105,6 +107,10 @@ char_to_byte_length_safe(uint32 char_length_arg, uint32 mbmaxlen_arg) #define MY_COLL_ALLOW_CONV (MY_COLL_ALLOW_SUPERSET_CONV | MY_COLL_ALLOW_COERCIBLE_CONV) #define MY_COLL_CMP_CONV (MY_COLL_ALLOW_CONV | MY_COLL_DISALLOW_NONE) +#define NO_EXTRACTION_FL (1 << 6) +#define FULL_EXTRACTION_FL (1 << 7) +#define EXTRACTION_MASK (NO_EXTRACTION_FL | FULL_EXTRACTION_FL) + class DTCollation { public: CHARSET_INFO *collation; @@ -607,7 +613,6 @@ class Item: public Value_source, public Type_std_attributes, public Type_handler { - Item(const Item &); /* Prevent use of these */ void operator=(Item &); /** The index in the JOIN::join_tab array of the JOIN_TAB this Item is attached @@ -1105,6 +1110,7 @@ public: virtual bool basic_const_item() const { return 0; } /* cloning of constant items (0 if it is not const) */ virtual Item *clone_item(THD *thd) { return 0; } + virtual Item* build_clone(THD *thd, MEM_ROOT *mem_root) { return get_copy(thd, mem_root); } virtual cond_result eq_cmp_result() const { return COND_OK; } inline uint float_length(uint decimals_par) const { return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;} @@ -1473,6 +1479,14 @@ public: virtual bool exists2in_processor(uchar *opt_arg) { return 0; } virtual bool find_selective_predicates_list_processor(uchar *opt_arg) { return 0; } + virtual bool exclusive_dependence_on_table_processor(uchar *map) + { return 0; } + virtual bool exclusive_dependence_on_grouping_fields_processor(uchar *arg) + { return 0; } + //virtual Item *get_copy(THD *thd, MEM_ROOT *mem_root); + + + virtual Item *get_copy(THD *thd, MEM_ROOT *mem_root)=0; /* To call bool function for all arguments */ struct bool_func_call_args @@ -1679,6 +1693,10 @@ public: { return this; } virtual Item *expr_cache_insert_transformer(THD *thd, uchar *unused) { return this; } + virtual Item *derived_field_transformer_for_having(THD *thd, uchar *arg) + { return this; } + virtual Item *derived_field_transformer_for_where(THD *thd, uchar *arg) + { return this; } virtual bool expr_cache_is_needed(THD *) { return FALSE; } virtual Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs); bool needs_charset_converter(uint32 length, CHARSET_INFO *tocs) @@ -1834,9 +1852,35 @@ public: */ virtual void under_not(Item_func_not * upper __attribute__((unused))) {}; + + + void register_in(THD *thd); + + bool depends_only_on(table_map view_map) + { return marker & FULL_EXTRACTION_FL; } + int get_extraction_flag() + { return marker & EXTRACTION_MASK; } + void set_extraction_flag(int flags) + { + marker &= ~EXTRACTION_MASK; + marker|= flags; + } + void clear_extraction_flag() + { + marker &= ~EXTRACTION_MASK; + } }; +template +inline Item* get_item_copy (THD *thd, MEM_ROOT *mem_root, T* item) +{ + Item *copy= new (mem_root) T(*item); + copy->register_in(thd); + return copy; +} + + /** Compare two Items for List::add_unique() */ @@ -2112,6 +2156,8 @@ public: { return this; } bool append_for_log(THD *thd, String *str); + + Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } }; /***************************************************************************** @@ -2158,6 +2204,7 @@ public: purposes. */ virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } private: uint m_case_expr_id; @@ -2238,6 +2285,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor("name_const"); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_num: public Item_basic_constant @@ -2372,6 +2421,8 @@ public: CHARSET_INFO *charset_for_protocol(void) const { return field->charset_for_protocol(); } enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } + Item* get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2519,7 +2570,13 @@ public: Item_field *field_for_view_update() { return this; } int fix_outer_field(THD *thd, Field **field, Item **reference); virtual Item *update_value_transformer(THD *thd, uchar *select_arg); + virtual Item *derived_field_transformer_for_having(THD *thd, uchar *arg); + virtual Item *derived_field_transformer_for_where(THD *thd, uchar *arg); virtual void print(String *str, enum_query_type query_type); + bool exclusive_dependence_on_table_processor(uchar *map); + bool exclusive_dependence_on_grouping_fields_processor(uchar *arg); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } bool is_outer_field() const { DBUG_ASSERT(fixed); @@ -2613,6 +2670,8 @@ public: Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_null_result :public Item_null @@ -2773,6 +2832,8 @@ public: { return this; } bool append_for_log(THD *thd, String *str); + + Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } private: virtual bool set_value(THD *thd, sp_rcontext *ctx, Item **it); @@ -2824,6 +2885,8 @@ public: { return int_eq(value, item); } bool check_partition_func_processor(uchar *bool_arg) { return FALSE;} bool check_vcol_func_processor(uchar *arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2840,6 +2903,8 @@ public: virtual void print(String *str, enum_query_type query_type); Item_num *neg(THD *thd); uint decimal_precision() const { return max_length; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2893,6 +2958,8 @@ public: void set_decimal_value(my_decimal *value_par); bool check_partition_func_processor(uchar *bool_arg) { return FALSE;} bool check_vcol_func_processor(uchar *arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2941,6 +3008,8 @@ public: virtual void print(String *str, enum_query_type query_type); bool eq(const Item *item, bool binary_cmp) const { return real_eq(value, item); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -3131,6 +3200,9 @@ public: } return MYSQL_TYPE_STRING; // Not a temporal literal } + + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -3360,6 +3432,8 @@ public: } enum Item_result cast_to_int_type() const { return INT_RESULT; } void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -3400,6 +3474,8 @@ public: } enum Item_result cast_to_int_type() const { return STRING_RESULT; } void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -3482,6 +3558,8 @@ public: void print(String *str, enum_query_type query_type); Item *clone_item(THD *thd); bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -3501,6 +3579,8 @@ public: void print(String *str, enum_query_type query_type); Item *clone_item(THD *thd); bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -3522,6 +3602,8 @@ public: void print(String *str, enum_query_type query_type); Item *clone_item(THD *thd); bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -3882,6 +3964,7 @@ public: virtual void fix_length_and_dec()= 0; bool const_item() const { return const_item_cache; } table_map used_tables() const { return used_tables_cache; } + Item* build_clone(THD *thd, MEM_ROOT *mem_root); }; @@ -4057,6 +4140,8 @@ public: DBUG_ASSERT(ref); return (*ref)->is_outer_field(); } + + Item* build_clone(THD *thd, MEM_ROOT *mem_root); /** Checks if the item tree that ref points to contains a subquery. @@ -4065,6 +4150,8 @@ public: { return (*ref)->has_subquery(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -4107,6 +4194,8 @@ public: bool is_null(); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); virtual Ref_Type ref_type() { return DIRECT_REF; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -4268,6 +4357,9 @@ public: { return trace_unsupported_by_check_vcol_func_processor("cache"); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; } }; @@ -4519,6 +4611,8 @@ public: bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); virtual void print(String *str, enum_query_type query_type); table_map used_tables() const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* @@ -4678,6 +4772,8 @@ public: longlong val_int(); void copy(); int save_in_field(Field *field, bool no_conversions); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -4700,6 +4796,8 @@ public: return null_value ? 0 : cached_value; } virtual void copy(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -4716,6 +4814,8 @@ public: { return null_value ? 0.0 : (double) (ulonglong) cached_value; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -4742,6 +4842,8 @@ public: cached_value= item->val_real(); null_value= item->null_value; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -4761,6 +4863,8 @@ public: double val_real(); longlong val_int(); void copy(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5181,6 +5285,8 @@ public: enum Item_result result_type() const { return INT_RESULT; } bool cache_value(); int save_in_field(Field *field, bool no_conversions); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5205,6 +5311,8 @@ public: Important when storing packed datetime values. */ Item *clone_item(THD *thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5221,6 +5329,8 @@ public: my_decimal *val_decimal(my_decimal *); enum Item_result result_type() const { return REAL_RESULT; } bool cache_value(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5237,6 +5347,8 @@ public: my_decimal *val_decimal(my_decimal *); enum Item_result result_type() const { return DECIMAL_RESULT; } bool cache_value(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5263,6 +5375,8 @@ public: CHARSET_INFO *charset() const { return value->charset(); }; int save_in_field(Field *field, bool no_conversions); bool cache_value(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5286,6 +5400,8 @@ public: */ return Item::safe_charset_converter(thd, tocs); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5357,6 +5473,8 @@ public: } bool cache_value(); virtual void set_null(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -5412,6 +5530,7 @@ public: static uint32 display_length(Item *item); static enum_field_types get_real_type(Item *); Field::geometry_type get_geometry_type() const { return geometry_type; }; + Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 335228c37fa..34416d6422c 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -34,6 +34,9 @@ #include "sql_time.h" // make_truncated_value_warning #include "sql_base.h" // dynamic_column_error_message +#define FULL_EXTRACTION_FL (1 << 6) +#define NO_EXTRACTION_FL (1 << 7) + /** find an temporal type (item) that others will be converted to @@ -4844,6 +4847,43 @@ void Item_cond::neg_arguments(THD *thd) } +/** + @brief + Building clone for Item_cond + + @param thd thread handle + @param mem_root part of the memory for the clone + + @details + This method gets copy of the current item and also + build clones for its elements. For this elements + build_copy is called again. + + @retval + clone of the item + 0 if an error occured +*/ + +Item *Item_cond::build_clone(THD *thd, MEM_ROOT *mem_root) +{ + List_iterator_fast li(list); + Item *item; + Item_cond *copy= (Item_cond *) get_copy(thd, mem_root); + if (!copy) + return 0; + copy->list.empty(); + while ((item= li++)) + { + Item *arg_clone= item->build_clone(thd, mem_root); + if (!arg_clone) + return 0; + if (copy->list.push_back(arg_clone, mem_root)) + return 0; + } + return copy; +} + + void Item_cond_and::mark_as_condition_AND_part(TABLE_LIST *embedding) { List_iterator li(list); diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 5789186dbe8..3d688223bcf 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -26,6 +26,7 @@ #include "item_func.h" /* Item_int_func, Item_bool_func */ #define PCRE_STATIC 1 /* Important on Windows */ #include "pcre.h" /* pcre header file */ +#include "item.h" extern Item_result item_cmp_type(Item_result a,Item_result b); inline Item_result item_cmp_type(const Item *a, const Item *b) @@ -124,6 +125,7 @@ public: comparators= 0; } friend class Item_func; + friend class Item_bool_rowready_func2; }; @@ -243,6 +245,8 @@ public: Item_func_istrue(THD *thd, Item *a): Item_func_truth(thd, a, true, true) {} ~Item_func_istrue() {} virtual const char* func_name() const { return "istrue"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -257,6 +261,8 @@ public: Item_func_truth(thd, a, true, false) {} ~Item_func_isnottrue() {} virtual const char* func_name() const { return "isnottrue"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -270,6 +276,8 @@ public: Item_func_isfalse(THD *thd, Item *a): Item_func_truth(thd, a, false, true) {} ~Item_func_isfalse() {} virtual const char* func_name() const { return "isfalse"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -284,6 +292,8 @@ public: Item_func_truth(thd, a, false, false) {} ~Item_func_isnotfalse() {} virtual const char* func_name() const { return "isnotfalse"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -345,6 +355,8 @@ public: void fix_after_pullout(st_select_lex *new_parent, Item **ref); bool invisible_mode(); void reset_cache() { cache= NULL; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -498,6 +510,17 @@ public: return add_key_fields_optimize_op(join, key_fields, and_level, usable_tables, sargables, false); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root) + { + Item_bool_rowready_func2 *clone= + (Item_bool_rowready_func2 *) Item_func::build_clone(thd, mem_root); + if (clone) + { + clone->cmp.comparators= 0; + } + return clone; + } + }; /** @@ -520,6 +543,8 @@ public: Item_args::propagate_equal_fields(thd, Context_boolean(), cond); return this; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_not :public Item_bool_func @@ -536,6 +561,8 @@ public: Item *neg_transformer(THD *thd); bool fix_fields(THD *, Item **); virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_maxmin_subselect; @@ -583,6 +610,8 @@ public: void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, SARGABLE_PARAM **sargables); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_not_all :public Item_func_not @@ -658,6 +687,8 @@ public: uint in_equality_no; virtual uint exists2in_reserved_items() { return 1; }; friend class Arg_comparator; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_equal :public Item_bool_rowready_func2 @@ -680,6 +711,8 @@ public: return add_key_fields_optimize_op(join, key_fields, and_level, usable_tables, sargables, true); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -694,6 +727,8 @@ public: cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return ">="; } Item *negated_item(THD *thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -708,6 +743,8 @@ public: cond_result eq_cmp_result() const { return COND_FALSE; } const char *func_name() const { return ">"; } Item *negated_item(THD *thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -722,6 +759,8 @@ public: cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return "<="; } Item *negated_item(THD *thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -736,6 +775,8 @@ public: cond_result eq_cmp_result() const { return COND_FALSE; } const char *func_name() const { return "<"; } Item *negated_item(THD *thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -759,6 +800,8 @@ public: Item *negated_item(THD *thd); void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, SARGABLE_PARAM **sargables); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -839,6 +882,8 @@ public: cond); return this; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -857,6 +902,8 @@ public: agg_arg_charsets_for_comparison(cmp_collation, args, 2); fix_char_length(2); // returns "1" or "0" or "-1" } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -887,6 +934,8 @@ public: str->append(func_name()); print_args(str, 0, query_type); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -909,6 +958,8 @@ public: } const char *func_name() const { return "coalesce"; } table_map not_null_tables() const { return 0; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -958,6 +1009,8 @@ public: { return Item_func_case_abbreviation2::decimal_precision2(args); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -981,6 +1034,8 @@ public: const char *func_name() const { return "if"; } bool eval_not_null_tables(uchar *opt_arg); void fix_after_pullout(st_select_lex *new_parent, Item **ref); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: void cache_type_info(Item *source); }; @@ -1047,6 +1102,8 @@ public: cond, &args[2]); return this; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1230,7 +1287,6 @@ public: item_dec->set_decimal_value(dec); } Item_result result_type() { return DECIMAL_RESULT; } - }; @@ -1488,6 +1544,19 @@ public: CHARSET_INFO *compare_collation() const { return cmp_collation.collation; } void cleanup(); Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root) + { + Item_func_case *clone= (Item_func_case *) Item_func::build_clone(thd, mem_root); + if (clone) + { + clone->case_item= 0; + clone->arg_buffer= 0; + bzero(&clone->cmp_items, sizeof(cmp_items)); + } + return clone; + } }; /* @@ -1584,6 +1653,18 @@ public: bool eval_not_null_tables(uchar *opt_arg); void fix_after_pullout(st_select_lex *new_parent, Item **ref); bool count_sargable_conds(uchar *arg); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root) + { + Item_func_in *clone= (Item_func_in *) Item_func::build_clone(thd, mem_root); + if (clone) + { + clone->array= 0; + bzero(&clone->cmp_items, sizeof(cmp_items)); + } + return clone; + } }; class cmp_item_row :public cmp_item @@ -1678,6 +1759,8 @@ public: bool top_level); table_map not_null_tables() const { return 0; } Item *neg_transformer(THD *thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* Functions used by HAVING for rewriting IN subquery */ @@ -1723,6 +1806,8 @@ public: Item *neg_transformer(THD *thd); virtual void print(String *str, enum_query_type query_type); void top_level_item() { abort_on_null=1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1862,6 +1947,9 @@ public: void cleanup(); bool find_selective_predicates_list_processor(uchar *arg); + + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1967,6 +2055,8 @@ public: longlong val_int(); void fix_length_and_dec(); const char *func_name() const { return "regexp"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } virtual inline void print(String *str, enum_query_type query_type) { @@ -1994,6 +2084,8 @@ public: longlong val_int(); void fix_length_and_dec(); const char *func_name() const { return "regexp_instr"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2071,6 +2163,7 @@ public: Item *compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, Item_transformer transformer, uchar *arg_t); bool eval_not_null_tables(uchar *opt_arg); + Item *build_clone(THD *thd, MEM_ROOT *mem_root); }; template class LI, class T> class Item_equal_iterator; @@ -2243,6 +2336,7 @@ public: void set_context_field(Item_field *ctx_field) { context_field= ctx_field; } void set_link_equal_fields(bool flag) { link_equal_fields= flag; } + Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } friend class Item_equal_fields_iterator; bool count_sargable_conds(uchar *arg); friend class Item_equal_iterator; @@ -2388,6 +2482,8 @@ public: void add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, table_map usable_tables, SARGABLE_PARAM **sargables); SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; inline bool is_cond_and(Item *item) @@ -2412,6 +2508,8 @@ public: table_map not_null_tables() const { return and_tables_cache; } Item *copy_andor_structure(THD *thd); Item *neg_transformer(THD *thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_dyncol_check :public Item_bool_func @@ -2420,6 +2518,8 @@ public: Item_func_dyncol_check(THD *thd, Item *str): Item_bool_func(thd, str) {} longlong val_int(); const char *func_name() const { return "column_check"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_dyncol_exists :public Item_bool_func @@ -2429,6 +2529,8 @@ public: Item_bool_func(thd, str, num) {} longlong val_int(); const char *func_name() const { return "column_exists"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; inline bool is_cond_or(Item *item) diff --git a/sql/item_func.cc b/sql/item_func.cc index 57bd004cf88..193575f44f4 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -6984,3 +6984,6 @@ void Item_func_last_value::fix_length_and_dec() Type_std_attributes::set(last_value); maybe_null= last_value->maybe_null; } + + + diff --git a/sql/item_func.h b/sql/item_func.h index 5c21535adbe..9cd5aea37fa 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -613,6 +613,8 @@ public: bool fix_fields(THD *thd, Item **ref); longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } bool check_vcol_func_processor(uchar *int_arg) { return TRUE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -633,6 +635,8 @@ public: } virtual void print(String *str, enum_query_type query_type); uint decimal_precision() const { return args[0]->decimal_precision(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -646,6 +650,8 @@ public: const char *func_name() const { return "cast_as_unsigned"; } longlong val_int(); virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -669,6 +675,8 @@ public: void fix_length_and_dec() {} const char *func_name() const { return "decimal_typecast"; } virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -686,6 +694,8 @@ public: void fix_length_and_dec() { maybe_null= 1; } const char *func_name() const { return "double_typecast"; } virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -709,6 +719,8 @@ public: longlong int_op(); double real_op(); my_decimal *decimal_op(my_decimal *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_minus :public Item_func_additive_op @@ -721,6 +733,8 @@ public: double real_op(); my_decimal *decimal_op(my_decimal *); void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -736,6 +750,8 @@ public: void result_precision(); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -750,6 +766,8 @@ public: const char *func_name() const { return "/"; } void fix_length_and_dec(); void result_precision(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -769,6 +787,8 @@ public: bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -784,6 +804,8 @@ public: void fix_length_and_dec(); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -800,6 +822,8 @@ public: uint decimal_precision() const { return args[0]->decimal_precision(); } bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -814,6 +838,8 @@ public: void fix_length_and_dec(); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; // A class to handle logarithmic and trigonometric functions @@ -836,6 +862,8 @@ public: Item_func_exp(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "exp"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -845,6 +873,8 @@ public: Item_func_ln(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "ln"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -855,6 +885,8 @@ public: Item_func_log(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {} double val_real(); const char *func_name() const { return "log"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -864,6 +896,8 @@ public: Item_func_log2(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "log2"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -873,6 +907,8 @@ public: Item_func_log10(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "log10"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -882,6 +918,8 @@ public: Item_func_sqrt(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "sqrt"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -891,6 +929,8 @@ public: Item_func_pow(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {} double val_real(); const char *func_name() const { return "pow"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -900,6 +940,8 @@ public: Item_func_acos(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "acos"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_asin :public Item_dec_func @@ -908,6 +950,8 @@ public: Item_func_asin(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "asin"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_atan :public Item_dec_func @@ -917,6 +961,8 @@ public: Item_func_atan(THD *thd, Item *a, Item *b): Item_dec_func(thd, a, b) {} double val_real(); const char *func_name() const { return "atan"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_cos :public Item_dec_func @@ -925,6 +971,8 @@ public: Item_func_cos(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "cos"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_sin :public Item_dec_func @@ -933,6 +981,8 @@ public: Item_func_sin(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "sin"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_tan :public Item_dec_func @@ -941,6 +991,8 @@ public: Item_func_tan(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "tan"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_cot :public Item_dec_func @@ -949,6 +1001,8 @@ public: Item_func_cot(THD *thd, Item *a): Item_dec_func(thd, a) {} double val_real(); const char *func_name() const { return "cot"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_integer :public Item_int_func @@ -977,6 +1031,8 @@ public: my_decimal *decimal_op(my_decimal *); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -990,6 +1046,8 @@ public: my_decimal *decimal_op(my_decimal *); bool check_partition_func_processor(uchar *int_arg) {return FALSE;} bool check_vcol_func_processor(uchar *int_arg) { return FALSE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* This handles round and truncate */ @@ -1005,6 +1063,8 @@ public: longlong int_op(); my_decimal *decimal_op(my_decimal *); void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1026,6 +1086,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: void seed_random (Item * val); }; @@ -1037,6 +1099,8 @@ public: Item_func_sign(THD *thd, Item *a): Item_int_func(thd, a) {} const char *func_name() const { return "sign"; } longlong val_int(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1052,6 +1116,8 @@ public: const char *func_name() const { return name; } void fix_length_and_dec() { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1088,6 +1154,8 @@ class Item_func_min :public Item_func_min_max public: Item_func_min(THD *thd, List &list): Item_func_min_max(thd, list, 1) {} const char *func_name() const { return "least"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_max :public Item_func_min_max @@ -1095,6 +1163,8 @@ class Item_func_max :public Item_func_min_max public: Item_func_max(THD *thd, List &list): Item_func_min_max(thd, list, -1) {} const char *func_name() const { return "greatest"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1127,6 +1197,8 @@ public: /* The item could be a NULL constant. */ null_value= args[0]->is_null(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1138,6 +1210,8 @@ public: longlong val_int(); const char *func_name() const { return "length"; } void fix_length_and_dec() { max_length=10; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_bit_length :public Item_func_length @@ -1147,6 +1221,8 @@ public: longlong val_int() { DBUG_ASSERT(fixed == 1); return Item_func_length::val_int()*8; } const char *func_name() const { return "bit_length"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_char_length :public Item_int_func @@ -1157,6 +1233,8 @@ public: longlong val_int(); const char *func_name() const { return "char_length"; } void fix_length_and_dec() { max_length=10; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_coercibility :public Item_int_func @@ -1170,6 +1248,8 @@ public: Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) { return this; } bool const_item() const { return true; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_locate :public Item_int_func @@ -1183,6 +1263,8 @@ public: longlong val_int(); void fix_length_and_dec(); virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1196,6 +1278,8 @@ public: longlong val_int(); const char *func_name() const { return "field"; } void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1207,6 +1291,8 @@ public: longlong val_int(); const char *func_name() const { return "ascii"; } void fix_length_and_dec() { max_length=3; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_ord :public Item_int_func @@ -1216,6 +1302,8 @@ public: Item_func_ord(THD *thd, Item *a): Item_int_func(thd, a) {} longlong val_int(); const char *func_name() const { return "ord"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_find_in_set :public Item_int_func @@ -1230,6 +1318,8 @@ public: longlong val_int(); const char *func_name() const { return "find_in_set"; } void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* Base class for all bit functions: '~', '|', '^', '&', '>>', '<<' */ @@ -1253,6 +1343,8 @@ public: Item_func_bit_or(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {} longlong val_int(); const char *func_name() const { return "|"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_bit_and :public Item_func_bit @@ -1261,6 +1353,8 @@ public: Item_func_bit_and(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {} longlong val_int(); const char *func_name() const { return "&"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_bit_count :public Item_int_func @@ -1270,6 +1364,8 @@ public: longlong val_int(); const char *func_name() const { return "bit_count"; } void fix_length_and_dec() { max_length=2; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_shift_left :public Item_func_bit @@ -1278,6 +1374,8 @@ public: Item_func_shift_left(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {} longlong val_int(); const char *func_name() const { return "<<"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_shift_right :public Item_func_bit @@ -1286,6 +1384,8 @@ public: Item_func_shift_right(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {} longlong val_int(); const char *func_name() const { return ">>"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_bit_neg :public Item_func_bit @@ -1299,6 +1399,8 @@ public: { Item_func::print(str, query_type); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1321,6 +1423,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1338,6 +1442,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1360,6 +1466,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1479,6 +1587,8 @@ class Item_func_udf_float :public Item_udf_func String *val_str(String *str); enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } void fix_length_and_dec() { fix_num_length_and_dec(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1496,6 +1606,8 @@ public: enum Item_result result_type () const { return INT_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } void fix_length_and_dec() { decimals= 0; max_length= 21; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1513,6 +1625,8 @@ public: enum Item_result result_type () const { return DECIMAL_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; } void fix_length_and_dec() { fix_num_length_and_dec(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1551,6 +1665,8 @@ public: enum Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { return string_field_type(); } void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #else /* Dummy functions to get sql_yacc.cc compiled */ @@ -1626,6 +1742,8 @@ class Item_func_get_lock :public Item_int_func { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_release_lock :public Item_int_func @@ -1646,6 +1764,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* replication functions */ @@ -1666,6 +1786,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1682,6 +1804,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1795,6 +1919,8 @@ public: bool register_field_in_bitmap(uchar *arg); bool set_entry(THD *thd, bool create_if_not_exists); void cleanup(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1821,6 +1947,8 @@ public: table_map used_tables() const { return const_item() ? 0 : RAND_TABLE_BIT; } bool eq(const Item *item, bool binary_cmp) const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: bool set_value(THD *thd, sp_rcontext *ctx, Item **it); @@ -1860,6 +1988,8 @@ public: void set_null_value(CHARSET_INFO* cs); void set_value(const char *str, uint length, CHARSET_INFO* cs); enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1914,6 +2044,8 @@ public: void cleanup(); bool check_vcol_func_processor(uchar *int_arg) { return TRUE;} + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1964,6 +2096,9 @@ public: /* TODO: consider adding in support for the MATCH-based virtual columns */ return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; } private: /** Check whether storage engine for given table, @@ -2008,6 +2143,8 @@ public: Item_func_bit_xor(THD *thd, Item *a, Item *b): Item_func_bit(thd, a, b) {} longlong val_int(); const char *func_name() const { return "^"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_is_free_lock :public Item_int_func @@ -2022,6 +2159,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_is_used_lock :public Item_int_func @@ -2036,6 +2175,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* For type casts */ @@ -2087,6 +2228,8 @@ public: return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2217,6 +2360,15 @@ public: { return TRUE; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root) + { + Item_func_sp *clone= (Item_func_sp *) Item_func::build_clone(thd, mem_root); + if (clone) + clone->sp_result_field= NULL; + return clone; + } }; @@ -2231,6 +2383,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2248,6 +2402,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -2273,6 +2429,8 @@ public: Item_func::update_used_tables(); maybe_null= last_value->maybe_null; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 56e2a729924..9a2a49f633b 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -51,6 +51,8 @@ public: Item_geometry_func(thd, a, srid) {} const char *func_name() const { return "st_geometryfromtext"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_geometry_from_wkb: public Item_geometry_func @@ -61,6 +63,8 @@ public: Item_geometry_func(thd, a, srid) {} const char *func_name() const { return "st_geometryfromwkb"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_as_wkt: public Item_str_ascii_func @@ -70,6 +74,8 @@ public: const char *func_name() const { return "st_astext"; } String *val_str_ascii(String *); void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_as_wkb: public Item_geometry_func @@ -79,6 +85,8 @@ public: const char *func_name() const { return "st_aswkb"; } String *val_str(String *); enum_field_types field_type() const { return MYSQL_TYPE_BLOB; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_geometry_type: public Item_str_ascii_func @@ -93,6 +101,8 @@ public: fix_length_and_charset(20, default_charset()); maybe_null= 1; }; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -125,6 +135,8 @@ public: {} const char *func_name() const { return "st_convexhull"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -135,6 +147,8 @@ public: const char *func_name() const { return "st_centroid"; } String *val_str(String *); Field::geometry_type get_geometry_type() const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_envelope: public Item_geometry_func @@ -144,6 +158,8 @@ public: const char *func_name() const { return "st_envelope"; } String *val_str(String *); Field::geometry_type get_geometry_type() const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -175,6 +191,8 @@ public: Item_func_boundary(THD *thd, Item *a): Item_geometry_func(thd, a) {} const char *func_name() const { return "st_boundary"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -187,6 +205,8 @@ public: const char *func_name() const { return "point"; } String *val_str(String *); Field::geometry_type get_geometry_type() const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_spatial_decomp: public Item_geometry_func @@ -211,6 +231,8 @@ public: } } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_spatial_decomp_n: public Item_geometry_func @@ -235,6 +257,8 @@ public: } } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_spatial_collection: public Item_geometry_func @@ -268,6 +292,8 @@ public: } const char *func_name() const { return "st_multipoint"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -299,6 +325,7 @@ public: return add_key_fields_optimize_op(join, key_fields, and_level, usable_tables, sargables, false); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; } }; @@ -310,6 +337,8 @@ public: { } longlong val_int(); const char *func_name() const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -324,6 +353,8 @@ public: { } longlong val_int(); const char *func_name() const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -339,6 +370,8 @@ public: { } longlong val_int(); const char *func_name() const { return "st_relate"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -368,6 +401,8 @@ public: { Item_func::print(str, query_type); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -419,6 +454,8 @@ public: Item_geometry_func(thd, obj, distance) {} const char *func_name() const { return "st_buffer"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -429,6 +466,8 @@ public: longlong val_int(); const char *func_name() const { return "st_isempty"; } void fix_length_and_dec() { maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_issimple: public Item_int_func @@ -443,6 +482,8 @@ public: const char *func_name() const { return "st_issimple"; } void fix_length_and_dec() { decimals=0; max_length=2; } uint decimal_precision() const { return 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_isclosed: public Item_int_func @@ -453,6 +494,8 @@ public: const char *func_name() const { return "st_isclosed"; } void fix_length_and_dec() { decimals=0; max_length=2; } uint decimal_precision() const { return 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_isring: public Item_func_issimple @@ -461,6 +504,8 @@ public: Item_func_isring(THD *thd, Item *a): Item_func_issimple(thd, a) {} longlong val_int(); const char *func_name() const { return "st_isring"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_dimension: public Item_int_func @@ -471,6 +516,8 @@ public: longlong val_int(); const char *func_name() const { return "st_dimension"; } void fix_length_and_dec() { max_length= 10; maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_x: public Item_real_func @@ -485,6 +532,8 @@ public: Item_real_func::fix_length_and_dec(); maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -500,6 +549,8 @@ public: Item_real_func::fix_length_and_dec(); maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -511,6 +562,8 @@ public: longlong val_int(); const char *func_name() const { return "st_numgeometries"; } void fix_length_and_dec() { max_length= 10; maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -522,6 +575,8 @@ public: longlong val_int(); const char *func_name() const { return "st_numinteriorrings"; } void fix_length_and_dec() { max_length= 10; maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -533,6 +588,8 @@ public: longlong val_int(); const char *func_name() const { return "st_numpoints"; } void fix_length_and_dec() { max_length= 10; maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -548,6 +605,8 @@ public: Item_real_func::fix_length_and_dec(); maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -563,6 +622,8 @@ public: Item_real_func::fix_length_and_dec(); maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -574,6 +635,8 @@ public: longlong val_int(); const char *func_name() const { return "srid"; } void fix_length_and_dec() { max_length= 10; maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -588,6 +651,8 @@ public: Item_func_distance(THD *thd, Item *a, Item *b): Item_real_func(thd, a, b) {} double val_real(); const char *func_name() const { return "st_distance"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -602,6 +667,8 @@ public: const char *func_name() const { return "st_pointonsurface"; } String *val_str(String *); Field::geometry_type get_geometry_type() const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -613,6 +680,8 @@ class Item_func_gis_debug: public Item_int_func { null_value= false; } const char *func_name() const { return "st_gis_debug"; } longlong val_int(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #endif diff --git a/sql/item_inetfunc.h b/sql/item_inetfunc.h index 82a4405df1e..9dcb3ab0db7 100644 --- a/sql/item_inetfunc.h +++ b/sql/item_inetfunc.h @@ -37,6 +37,8 @@ public: maybe_null= 1; unsigned_flag= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -57,6 +59,8 @@ public: fix_length_and_charset(3 * 8 + 7, default_charset()); maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -123,6 +127,8 @@ public: fix_length_and_charset(16, &my_charset_bin); maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: virtual bool calc_value(String *arg, String *buffer); @@ -155,6 +161,8 @@ public: maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: virtual bool calc_value(String *arg, String *buffer); @@ -175,6 +183,8 @@ public: public: virtual const char *func_name() const { return "is_ipv4"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: virtual bool calc_value(const String *arg); @@ -195,6 +205,8 @@ public: public: virtual const char *func_name() const { return "is_ipv6"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: virtual bool calc_value(const String *arg); @@ -215,6 +227,8 @@ public: public: virtual const char *func_name() const { return "is_ipv4_compat"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: virtual bool calc_value(const String *arg); @@ -235,6 +249,8 @@ public: public: virtual const char *func_name() const { return "is_ipv4_mapped"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: virtual bool calc_value(const String *arg); diff --git a/sql/item_row.cc b/sql/item_row.cc index 56d73f7b759..24516a568c8 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -160,3 +160,21 @@ void Item_row::bring_value() for (uint i= 0; i < arg_count; i++) args[i]->bring_value(); } + + +Item* Item_row::build_clone(THD *thd, MEM_ROOT *mem_root) +{ + Item_row *copy= (Item_row *) get_copy(thd, mem_root); + if (!copy) + return 0; + copy->args= (Item**) alloc_root(mem_root, sizeof(Item*) * arg_count); + for (uint i= 0; i < arg_count; i++) + { + Item *arg_clone= args[i]->build_clone(thd, mem_root); + if (!arg_clone) + return 0; + copy->args[i]= arg_clone; + } + return copy; +} + diff --git a/sql/item_row.h b/sql/item_row.h index ddb6f0835f2..de8a4c991fc 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -120,6 +120,9 @@ public: bool null_inside() { return with_null; }; void bring_value(); bool check_vcol_func_processor(uchar *int_arg) {return FALSE; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } + Item *build_clone(THD *thd, MEM_ROOT *mem_root); }; #endif /* ITEM_ROW_INCLUDED */ diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 0ff38157c25..b20fb4e1fbc 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -105,6 +105,8 @@ public: String *val_str_ascii(String *); void fix_length_and_dec(); const char *func_name() const { return "md5"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -114,7 +116,9 @@ public: Item_func_sha(THD *thd, Item *a): Item_str_ascii_func(thd, a) {} String *val_str_ascii(String *); void fix_length_and_dec(); - const char *func_name() const { return "sha"; } + const char *func_name() const { return "sha"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_sha2 :public Item_str_ascii_func @@ -124,6 +128,8 @@ public: String *val_str_ascii(String *); void fix_length_and_dec(); const char *func_name() const { return "sha2"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_to_base64 :public Item_str_ascii_func @@ -134,6 +140,8 @@ public: String *val_str_ascii(String *); void fix_length_and_dec(); const char *func_name() const { return "to_base64"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_from_base64 :public Item_str_func @@ -144,6 +152,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "from_base64"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #include @@ -167,6 +177,8 @@ public: Item_aes_crypt(thd, a, b) {} void fix_length_and_dec(); const char *func_name() const { return "aes_encrypt"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_aes_decrypt :public Item_aes_crypt @@ -176,6 +188,8 @@ public: Item_aes_crypt(thd, a, b) {} void fix_length_and_dec(); const char *func_name() const { return "aes_decrypt"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -188,6 +202,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "concat"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_decode_histogram :public Item_str_func @@ -204,6 +220,8 @@ public: maybe_null= 1; } const char *func_name() const { return "decode_histogram"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_concat_ws :public Item_str_func @@ -215,6 +233,8 @@ public: void fix_length_and_dec(); const char *func_name() const { return "concat_ws"; } table_map not_null_tables() const { return 0; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_reverse :public Item_str_func @@ -225,6 +245,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "reverse"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -237,6 +259,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "replace"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -260,6 +284,8 @@ public: String *val_str(String *str); void fix_length_and_dec(); const char *func_name() const { return "regexp_replace"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -280,6 +306,8 @@ public: String *val_str(String *str); void fix_length_and_dec(); const char *func_name() const { return "regexp_substr"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -293,6 +321,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "insert"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -314,6 +344,8 @@ public: Item_func_lcase(THD *thd, Item *item): Item_str_conv(thd, item) {} const char *func_name() const { return "lcase"; } void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_ucase :public Item_str_conv @@ -322,6 +354,8 @@ public: Item_func_ucase(THD *thd, Item *item): Item_str_conv(thd, item) {} const char *func_name() const { return "ucase"; } void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -333,6 +367,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "left"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -344,6 +380,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "right"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -356,6 +394,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "substr"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -368,6 +408,9 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "substring_index"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } + }; @@ -399,6 +442,8 @@ public: const char *func_name() const { return "trim"; } virtual void print(String *str, enum_query_type query_type); virtual const char *mode_name() const { return "both"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -410,6 +455,8 @@ public: String *val_str(String *); const char *func_name() const { return "ltrim"; } const char *mode_name() const { return "leading"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -421,6 +468,8 @@ public: String *val_str(String *); const char *func_name() const { return "rtrim"; } const char *mode_name() const { return "trailing"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -458,6 +507,8 @@ public: "password" : "old_password"); } static char *alloc(THD *thd, const char *password, size_t pass_len, enum PW_Alg al); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -476,6 +527,8 @@ public: max_length = args[0]->max_length + 9; } const char *func_name() const { return "des_encrypt"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_des_decrypt :public Item_str_func @@ -494,6 +547,8 @@ public: max_length-= 9U; } const char *func_name() const { return "des_decrypt"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_encrypt :public Item_str_func @@ -521,6 +576,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #include "sql_crypt.h" @@ -539,6 +596,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "encode"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: virtual void crypto_transform(String *); private: @@ -552,6 +611,8 @@ class Item_func_decode :public Item_func_encode public: Item_func_decode(THD *thd, Item *a, Item *seed_arg): Item_func_encode(thd, a, seed_arg) {} const char *func_name() const { return "decode"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } protected: void crypto_transform(String *); }; @@ -592,6 +653,8 @@ public: } const char *func_name() const { return "database"; } const char *fully_qualified_func_name() const { return "database()"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -622,6 +685,8 @@ public: { return save_str_value_in_field(field, &str_value); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -657,6 +722,8 @@ public: DBUG_ASSERT(fixed == 1); return (null_value ? 0 : &str_value); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -668,6 +735,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "soundex"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -680,6 +749,8 @@ public: String *val_str(String *str); void fix_length_and_dec(); const char *func_name() const { return "elt"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -692,6 +763,8 @@ public: String *val_str(String *str); void fix_length_and_dec(); const char *func_name() const { return "make_set"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -710,6 +783,8 @@ public: void fix_length_and_dec(); const char *func_name() const { return "format"; } virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -727,6 +802,8 @@ public: max_length= arg_count * 4; } const char *func_name() const { return "char"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -739,6 +816,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "repeat"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -749,6 +828,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "space"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -761,6 +842,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "binlog_gtid_pos"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -773,6 +856,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "rpad"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -785,6 +870,8 @@ public: String *val_str(String *); void fix_length_and_dec(); const char *func_name() const { return "lpad"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -801,6 +888,8 @@ public: max_length=64; maybe_null= 1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -818,6 +907,8 @@ public: decimals=0; fix_char_length(args[0]->max_length * 2); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_unhex :public Item_str_func @@ -837,6 +928,8 @@ public: decimals=0; max_length=(1+args[0]->max_length)/2; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -867,6 +960,8 @@ public: Item_func_like_range_min(THD *thd, Item *a, Item *b): Item_func_like_range(thd, a, b, true) { } const char *func_name() const { return "like_range_min"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -876,6 +971,8 @@ public: Item_func_like_range_max(THD *thd, Item *a, Item *b): Item_func_like_range(thd, a, b, false) { } const char *func_name() const { return "like_range_max"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #endif @@ -900,6 +997,8 @@ public: } virtual void print(String *str, enum_query_type query_type); const char *func_name() const { return "cast_as_binary"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -920,6 +1019,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -935,6 +1036,8 @@ class Item_func_export_set: public Item_str_func String *val_str(String *str); void fix_length_and_dec(); const char *func_name() const { return "export_set"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -952,6 +1055,8 @@ public: 2 * collation.collation->mbmaxlen; max_length= (uint32) MY_MIN(max_result_length, MAX_BLOB_WIDTH); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_conv_charset :public Item_str_func @@ -1034,6 +1139,8 @@ public: void fix_length_and_dec(); const char *func_name() const { return "convert"; } virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_set_collation :public Item_str_func @@ -1052,6 +1159,8 @@ public: /* this function is transparent for view updating */ return args[0]->field_for_view_update(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1079,6 +1188,8 @@ public: :Item_func_expr_str_metadata(thd, a) { } String *val_str(String *); const char *func_name() const { return "charset"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1089,6 +1200,8 @@ public: :Item_func_expr_str_metadata(thd, a) {} String *val_str(String *); const char *func_name() const { return "collation"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1121,6 +1234,8 @@ public: } Item* propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) { return this; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_crc32 :public Item_int_func @@ -1132,6 +1247,8 @@ public: const char *func_name() const { return "crc32"; } void fix_length_and_dec() { max_length=10; } longlong val_int(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_uncompressed_length : public Item_int_func @@ -1142,6 +1259,8 @@ public: const char *func_name() const{return "uncompressed_length";} void fix_length_and_dec() { max_length=10; maybe_null= true; } longlong val_int(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #ifdef HAVE_COMPRESS @@ -1158,6 +1277,8 @@ public: void fix_length_and_dec(){max_length= (args[0]->max_length*120)/100+12;} const char *func_name() const{return "compress";} String *val_str(String *) ZLIB_DEPENDED_FUNCTION + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_uncompress: public Item_str_func @@ -1168,6 +1289,8 @@ public: void fix_length_and_dec(){ maybe_null= 1; max_length= MAX_BLOB_WIDTH; } const char *func_name() const{return "uncompress";} String *val_str(String *) ZLIB_DEPENDED_FUNCTION + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1187,6 +1310,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor(func_name()); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1208,6 +1333,8 @@ public: String *val_str(String *); virtual void print(String *str, enum_query_type query_type); virtual enum Functype functype() const { return DYNCOL_FUNC; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1220,6 +1347,8 @@ public: const char *func_name() const{ return "column_add"; } String *val_str(String *); virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_dyncol_json: public Item_str_func @@ -1235,6 +1364,8 @@ public: collation.set(&my_charset_bin); decimals= 0; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* @@ -1259,6 +1390,8 @@ public: bool get_dyn_value(THD *thd, DYNAMIC_COLUMN_VALUE *val, String *tmp); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1269,6 +1402,8 @@ public: void fix_length_and_dec() { maybe_null= 1; max_length= MAX_BLOB_WIDTH; }; const char *func_name() const{ return "column_list"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #endif /* ITEM_STRFUNC_INCLUDED */ diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 58b5a948048..ab7297f51b1 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -250,6 +250,9 @@ public: } void init_expr_cache_tracker(THD *thd); + + Item* build_clone(THD *thd, MEM_ROOT *mem_root) { return 0; } + Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } friend class select_result_interceptor; diff --git a/sql/item_sum.h b/sql/item_sum.h index e766e69a1c5..15cfd1f5b9c 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -772,6 +772,8 @@ public: } Item *copy_or_same(THD* thd); void remove(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: void add_helper(bool perform_removal); @@ -829,6 +831,8 @@ class Item_sum_count :public Item_sum_int return has_with_distinct() ? "count(distinct " : "count("; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -876,6 +880,8 @@ public: count= 0; Item_sum_sum::cleanup(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -934,6 +940,8 @@ public: count= 0; Item_sum_num::cleanup(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* @@ -953,6 +961,8 @@ class Item_sum_std :public Item_sum_variance Item *result_item(THD *thd, Field *field); const char *func_name() const { return "std("; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; // This class is a string or number function depending on num_func @@ -1018,6 +1028,8 @@ public: bool add(); const char *func_name() const { return "min("; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1031,6 +1043,8 @@ public: bool add(); const char *func_name() const { return "max("; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1105,6 +1119,8 @@ public: bool add(); const char *func_name() const { return "bit_or("; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: void set_bits_from_counters(); @@ -1120,6 +1136,8 @@ public: bool add(); const char *func_name() const { return "bit_and("; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: void set_bits_from_counters(); @@ -1133,6 +1151,8 @@ public: bool add(); const char *func_name() const { return "bit_xor("; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: void set_bits_from_counters(); @@ -1191,6 +1211,8 @@ public: my_decimal *val_decimal(my_decimal *dec) { return val_decimal_from_real(dec); } String *val_str(String *str) { return val_string_from_real(str); } double val_real(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1210,6 +1232,8 @@ public: longlong val_int() { return val_int_from_decimal(); } String *val_str(String *str) { return val_string_from_decimal(str); } my_decimal *val_decimal(my_decimal *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1234,6 +1258,8 @@ public: { return trace_unsupported_by_check_vcol_func_processor("var_field"); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1245,6 +1271,8 @@ public: { } enum Type type() const { return FIELD_STD_ITEM; } double val_real(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1332,6 +1360,8 @@ class Item_sum_udf_float :public Item_udf_sum enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } void fix_length_and_dec() { fix_num_length_and_dec(); } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1353,6 +1383,8 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } void fix_length_and_dec() { decimals=0; max_length=21; } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1393,6 +1425,8 @@ public: enum_field_types field_type() const { return string_field_type(); } void fix_length_and_dec(); Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1413,6 +1447,8 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; } void fix_length_and_dec() { fix_num_length_and_dec(); } Item *copy_or_same(THD* thd); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #else /* Dummy functions to get sql_yacc.cc compiled */ @@ -1606,6 +1642,8 @@ public: virtual void print(String *str, enum_query_type query_type); virtual bool change_context_processor(uchar *cntx) { context= (Name_resolution_context *)cntx; return FALSE; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #endif /* ITEM_SUM_INCLUDED */ diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 175f3b06c1a..0edfc4f6a82 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -53,6 +53,8 @@ public: { max_length=6*MY_CHARSET_BIN_MB_MAXLEN; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -67,6 +69,8 @@ public: decimals=0; max_length=6*MY_CHARSET_BIN_MB_MAXLEN; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -90,6 +94,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -123,6 +129,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -144,6 +152,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -178,6 +188,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -195,6 +207,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -216,6 +230,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -237,6 +253,8 @@ public: { return !has_time_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -258,6 +276,8 @@ public: { return !has_time_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -279,6 +299,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -300,6 +322,8 @@ public: { return !has_time_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -315,6 +339,8 @@ public: max_length=2*MY_CHARSET_BIN_MB_MAXLEN; maybe_null=1; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_yearweek :public Item_int_func @@ -335,6 +361,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -358,6 +386,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -393,6 +423,8 @@ public: { return !has_date_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_dayname :public Item_func_weekday @@ -464,6 +496,8 @@ public: } longlong int_op(); my_decimal *decimal_op(my_decimal* buf); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -483,6 +517,8 @@ public: } longlong int_op(); my_decimal *decimal_op(my_decimal* buf); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -625,6 +661,8 @@ public: Item_func_curtime_local(THD *thd, uint dec): Item_func_curtime(thd, dec) {} const char *func_name() const { return "curtime"; } virtual void store_now_in_TIME(MYSQL_TIME *now_time); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -634,6 +672,8 @@ public: Item_func_curtime_utc(THD *thd, uint dec): Item_func_curtime(thd, dec) {} const char *func_name() const { return "utc_time"; } virtual void store_now_in_TIME(MYSQL_TIME *now_time); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -660,6 +700,8 @@ public: Item_func_curdate_local(THD *thd): Item_func_curdate(thd) {} const char *func_name() const { return "curdate"; } void store_now_in_TIME(MYSQL_TIME *now_time); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -669,6 +711,8 @@ public: Item_func_curdate_utc(THD *thd): Item_func_curdate(thd) {} const char *func_name() const { return "utc_date"; } void store_now_in_TIME(MYSQL_TIME *now_time); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -703,6 +747,8 @@ public: const char *func_name() const { return "now"; } virtual void store_now_in_TIME(MYSQL_TIME *now_time); virtual enum Functype functype() const { return NOW_FUNC; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -712,6 +758,8 @@ public: Item_func_now_utc(THD *thd, uint dec): Item_func_now(thd, dec) {} const char *func_name() const { return "utc_timestamp"; } virtual void store_now_in_TIME(MYSQL_TIME *now_time); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -733,6 +781,8 @@ public: maybe_null= 0; used_tables_cache|= RAND_TABLE_BIT; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -748,6 +798,8 @@ public: { return has_date_args() || has_time_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -766,6 +818,8 @@ public: void fix_length_and_dec(); uint format_length(const String *format); bool eq(const Item *item, bool binary_cmp) const; + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -777,6 +831,8 @@ class Item_func_from_unixtime :public Item_datetimefunc const char *func_name() const { return "from_unixtime"; } void fix_length_and_dec(); bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -811,6 +867,8 @@ class Item_func_convert_tz :public Item_datetimefunc void fix_length_and_dec(); bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); void cleanup(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -825,6 +883,8 @@ public: Item_timefunc::fix_length_and_dec(); } const char *func_name() const { return "sec_to_time"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -842,6 +902,8 @@ public: bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); bool eq(const Item *item, bool binary_cmp) const; void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -896,6 +958,8 @@ class Item_extract :public Item_int_func } return true; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -919,6 +983,8 @@ public: String *val_str(String *a); void fix_length_and_dec(); void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -944,6 +1010,8 @@ public: bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); const char *cast_type() const { return "date"; } enum_field_types field_type() const { return MYSQL_TYPE_DATE; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -956,6 +1024,8 @@ public: bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); const char *cast_type() const { return "time"; } enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -968,6 +1038,8 @@ public: const char *cast_type() const { return "datetime"; } enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -979,6 +1051,8 @@ public: const char *func_name() const { return "makedate"; } enum_field_types field_type() const { return MYSQL_TYPE_DATE; } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -995,6 +1069,8 @@ public: bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); void print(String *str, enum_query_type query_type); const char *func_name() const { return "add_time"; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_timediff :public Item_timefunc @@ -1009,6 +1085,8 @@ public: Item_timefunc::fix_length_and_dec(); } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; class Item_func_maketime :public Item_timefunc @@ -1024,6 +1102,8 @@ public: } const char *func_name() const { return "maketime"; } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1044,6 +1124,8 @@ public: { return !has_time_args(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1061,6 +1143,8 @@ public: maybe_null=1; } virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1085,6 +1169,8 @@ public: fix_length_and_charset(17, default_charset()); } virtual void print(String *str, enum_query_type query_type); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1103,6 +1189,8 @@ public: bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); const char *func_name() const { return "str_to_date"; } void fix_length_and_dec(); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -1112,6 +1200,8 @@ public: Item_func_last_day(THD *thd, Item *a): Item_datefunc(thd, a) {} const char *func_name() const { return "last_day"; } bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #endif /* ITEM_TIMEFUNC_INCLUDED */ diff --git a/sql/item_windowfunc.h b/sql/item_windowfunc.h index 40f48cc7dc5..90ecd277d1f 100644 --- a/sql/item_windowfunc.h +++ b/sql/item_windowfunc.h @@ -119,7 +119,8 @@ public: { return "row_number"; } - + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -188,6 +189,8 @@ public: peer_tracker.cleanup(); Item_sum_int::cleanup(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -251,6 +254,8 @@ class Item_sum_dense_rank: public Item_sum_int peer_tracker.cleanup(); Item_sum_int::cleanup(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; /* @@ -342,6 +347,8 @@ class Item_sum_percent_rank: public Item_sum_window_with_row_count } void setup_window_func(THD *thd, Window_spec *window_spec); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: longlong cur_rank; // Current rank of the current row. @@ -419,6 +426,9 @@ class Item_sum_cume_dist: public Item_sum_window_with_row_count decimals = 10; // TODO-cvicentiu find out how many decimals the standard // requires. } + + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: ulonglong current_row_count_; @@ -487,6 +497,9 @@ class Item_sum_ntile : public Item_sum_window_with_row_count enum Item_result result_type () const { return INT_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } + + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } private: longlong get_num_quantiles() { return args[0]->val_int(); } @@ -751,6 +764,8 @@ public: bool fix_fields(THD *thd, Item **ref); bool resolve_window_name(THD *thd); + + Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } }; diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index ba17d2c48c3..1736f7d4990 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -250,6 +250,8 @@ public: Item_nodeset_func(thd, pxml) {} const char *func_name() const { return "xpath_rootelement"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -261,6 +263,8 @@ public: Item_nodeset_func(thd, a, b, pxml) {} const char *func_name() const { return "xpath_union"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -293,6 +297,8 @@ public: Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} const char *func_name() const { return "xpath_selfbyname"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -305,6 +311,8 @@ public: Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} const char *func_name() const { return "xpath_childbyname"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -319,6 +327,8 @@ public: need_self(need_self_arg) {} const char *func_name() const { return "xpath_descendantbyname"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -333,6 +343,8 @@ public: need_self(need_self_arg) {} const char *func_name() const { return "xpath_ancestorbyname"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -345,6 +357,8 @@ public: Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} const char *func_name() const { return "xpath_parentbyname"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -357,6 +371,8 @@ public: Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {} const char *func_name() const { return "xpath_attributebyname"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -372,6 +388,8 @@ public: Item_nodeset_func(thd, a, b, pxml) {} const char *func_name() const { return "xpath_predicate"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -383,6 +401,8 @@ public: Item_nodeset_func(thd, a, b, pxml) { } const char *func_name() const { return "xpath_elementbyindex"; } String *val_nodeset(String *nodeset); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -422,6 +442,8 @@ public: } return args[0]->val_real() ? 1 : 0; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -434,6 +456,8 @@ public: Item_xpath_cast_number(THD *thd, Item *a): Item_real_func(thd, a) {} const char *func_name() const { return "xpath_cast_number"; } virtual double val_real() { return args[0]->val_real(); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -449,6 +473,8 @@ public: String *val_nodeset(String *res) { return string_cache; } void fix_length_and_dec() { max_length= MAX_BLOB_WIDTH; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -468,6 +494,8 @@ public: return ((MY_XPATH_FLT*)flt->ptr())->pos + 1; return 0; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -489,6 +517,8 @@ public: return predicate_supplied_context_size; return res->length() / sizeof(MY_XPATH_FLT); } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -532,6 +562,8 @@ public: } return sum; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -608,6 +640,8 @@ public: } return 0; } + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; diff --git a/sql/item_xmlfunc.h b/sql/item_xmlfunc.h index 3758025fc90..8daf8cbc281 100644 --- a/sql/item_xmlfunc.h +++ b/sql/item_xmlfunc.h @@ -105,6 +105,8 @@ public: Item_xml_str_func(thd, a, b) {} const char *func_name() const { return "extractvalue"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; @@ -119,6 +121,8 @@ public: Item_xml_str_func(thd, a, b, c) {} const char *func_name() const { return "updatexml"; } String *val_str(String *); + Item *get_copy(THD *thd, MEM_ROOT *mem_root) + { return get_item_copy(thd, mem_root, this); } }; #endif /* ITEM_XMLFUNC_INCLUDED */ diff --git a/sql/procedure.h b/sql/procedure.h index 1452f33652a..adbff842f95 100644 --- a/sql/procedure.h +++ b/sql/procedure.h @@ -57,6 +57,7 @@ public: { return trace_unsupported_by_check_vcol_func_processor("proc"); } + Item* get_copy(THD *thd, MEM_ROOT *mem_root) { return 0; } }; class Item_proc_real :public Item_proc diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 79e57cded81..5a2b435c069 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -986,3 +986,137 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived) unit->set_thd(thd); DBUG_RETURN(FALSE); } + + +/** + @brief + Extract the condition depended on derived table/view and pushed it there + + @param thd The thread handle + @param cond The condition from which to extract the pushed condition + @param derived The reference to the derived table/view + + @details + This functiom builds the most restrictive condition depending only on + the derived table/view that can be extracted from the condition cond. + The built condition is pushed into the having clauses of the + selects contained in the query specifying the derived table/view. + The function also checks for each select whether any condition depending + only on grouping fields can be extracted from the pushed condition. + If so, it pushes the condition over grouping fields into the where + clause of the select. + + @retval + true if an error is reported + false otherwise +*/ + +bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived) +{ + if (!cond) + return false; + /* + Build the most restrictive condition extractable from 'cond' + that can be pushed into the derived table 'derived'. + All subexpressions of this condition are cloned from the + subexpressions of 'cond'. + This condition has to be fixed yet. + */ + Item *extracted_cond; + derived->check_pushable_cond_for_table(cond); + extracted_cond= derived->build_pushable_cond_for_table(thd, cond); + if (!extracted_cond) + { + /* Nothing can be pushed into the derived table */ + return false; + } + /* Push extracted_cond into every select of the unit specifying 'derived' */ + st_select_lex_unit *unit= derived->get_unit(); + st_select_lex *save_curr_select= thd->lex->current_select; + st_select_lex *sl= unit->first_select(); + for (; sl; sl= sl->next_select()) + { + thd->lex->current_select= sl; + /* + For each select of the unit except the last one + create a clone of extracted_cond + */ + Item *extracted_cond_copy= !sl->next_select() ? extracted_cond : + extracted_cond->build_clone(thd, thd->mem_root); + if (!extracted_cond_copy) + continue; + + /* + Figure out what can be extracted from the pushed condition + that could be pushed into the where clause of sl + */ + Item *cond_over_grouping_fields; + sl->collect_grouping_fields(thd); + sl->check_cond_extraction_for_grouping_fields(extracted_cond_copy, + &Item::exclusive_dependence_on_grouping_fields_processor); + cond_over_grouping_fields= + sl->build_cond_for_grouping_fields(thd, extracted_cond_copy, true); + + /* + Transform the references to the 'derived' columns from the condition + pushed into the where clause of sl to make them usable in the new context + */ + if (cond_over_grouping_fields) + cond_over_grouping_fields= cond_over_grouping_fields->transform(thd, + &Item::derived_field_transformer_for_where, + (uchar*) sl); + + if (cond_over_grouping_fields) + { + /* + In extracted_cond_copy remove top conjuncts that + has been pushed into the where clause of sl + */ + extracted_cond_copy= remove_pushed_top_conjuncts(thd, extracted_cond_copy); + + /* + Create the conjunction of the existing where condition of sl + and the pushed condition, take it as the new where condition of sl + and fix this new condition + */ + cond_over_grouping_fields->walk(&Item::cleanup_processor, 0, 0); + thd->change_item_tree(&sl->join->conds, + and_conds(thd, sl->join->conds, + cond_over_grouping_fields)); + + if (sl->join->conds->fix_fields(thd, &sl->join->conds)) + goto err; + + if (!extracted_cond_copy) + continue; + } + + /* + Transform the references to the 'derived' columns from the condition + pushed into the having clause of sl to make them usable in the new context + */ + extracted_cond_copy= extracted_cond_copy->transform(thd, + &Item::derived_field_transformer_for_having, + (uchar*) sl); + if (!extracted_cond_copy) + continue; + /* + Create the conjunction of the existing having condition of sl + and the pushed condition, take it as the new having condition of sl + and fix this new condition + */ + extracted_cond_copy->walk(&Item::cleanup_processor, 0, 0); + thd->change_item_tree(&sl->join->having, + and_conds(thd, sl->join->having, + extracted_cond_copy)); + sl->having_fix_field= 1; + if (sl->join->having->fix_fields(thd, &sl->join->having)) + return true; + sl->having_fix_field= 0; + } + thd->lex->current_select= save_curr_select; + return false; +err: + thd->lex->current_select= save_curr_select; + return true; +} diff --git a/sql/sql_derived.h b/sql/sql_derived.h index 1dffef7235b..c451e423032 100644 --- a/sql/sql_derived.h +++ b/sql/sql_derived.h @@ -37,4 +37,12 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived); */ bool mysql_derived_cleanup(THD *thd, LEX *lex, TABLE_LIST *derived); +Item *delete_not_needed_parts(THD *thd, Item *cond); + +#if 0 +bool pushdown_cond_for_derived(THD *thd, Item **cond, TABLE_LIST *derived); +#else +bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived); +#endif + #endif /* SQL_DERIVED_INCLUDED */ diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index de345b4dd1c..727fdd73265 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -4880,3 +4880,191 @@ void binlog_unsafe_map_init() } #endif + +/** + @brief + Finding fiels that are used in the GROUP BY of this st_select_lex + + @param thd The thread handle + + @details + This method looks through the fields which are used in the GROUP BY of this + st_select_lex and saves this fields. +*/ + +void st_select_lex::collect_grouping_fields(THD *thd) +{ + List_iterator li(join->fields_list); + Item *item= li++; + for (uint i= 0; i < master_unit()->derived->table->s->fields; i++, (item=li++)) + { + for (ORDER *ord= join->group_list; ord; ord= ord->next) + { + if ((*ord->item)->eq((Item*)item, 0)) + { + Grouping_tmp_field *grouping_tmp_field= + new Grouping_tmp_field(master_unit()->derived->table->field[i], item); + grouping_tmp_fields.push_back(grouping_tmp_field); + } + } + } +} + +/** + @brief + For a condition check possibility of exraction a formula over grouping fields + + @param cond The condition whose subformulas are to be analyzed + + @details + This method traverses the AND-OR condition cond and for each subformula of + the condition it checks whether it can be usable for the extraction of a + condition over the grouping fields of this select. The method uses + the call-back parameter check_processor to ckeck whether a primary formula + depends only on grouping fields. + The subformulas that are not usable are marked with the flag NO_EXTRACTION_FL. + The subformulas that can be entierly extracted are marked with the flag + FULL_EXTRACTION_FL. + @note + This method is called before any call of extract_cond_for_grouping_fields. + The flag NO_EXTRACTION_FL set in a subformula allows to avoid building clone + for the subformula when extracting the pushable condition. + The flag FULL_EXTRACTION_FL allows to delete later all top level conjuncts + from cond. +*/ + +void st_select_lex::check_cond_extraction_for_grouping_fields(Item *cond, + Item_processor check_processor) +{ + cond->clear_extraction_flag(); + if (cond->type() == Item::COND_ITEM) + { + bool and_cond= ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC; + List *arg_list= ((Item_cond*) cond)->argument_list(); + List_iterator li(*arg_list); + uint count= 0; // to count items not containing NO_EXTRACTION_FL + uint count_full= 0; // to count items with FULL_EXTRACTION_FL + Item *item; + while ((item=li++)) + { + check_cond_extraction_for_grouping_fields(item, check_processor); + if (item->get_extraction_flag() != NO_EXTRACTION_FL) + { + count++; + if (item->get_extraction_flag() == FULL_EXTRACTION_FL) + count_full++; + } + else if (!and_cond) + break; + } + if ((and_cond && count == 0) || item) + cond->set_extraction_flag(NO_EXTRACTION_FL); + if (count_full == arg_list->elements) + cond->set_extraction_flag(FULL_EXTRACTION_FL); + if (cond->get_extraction_flag() != 0) + { + li.rewind(); + while ((item=li++)) + item->clear_extraction_flag(); + } + } + else + cond->set_extraction_flag(cond->walk(check_processor, + 0, (uchar *) this) ? + NO_EXTRACTION_FL : FULL_EXTRACTION_FL); +} + + +/** + @brief + Build condition extractable from the given one depended on grouping fields + + @param thd The thread handle + @param cond The condition from which the condition depended + on grouping fields is to be extracted + @param no_top_clones If it's true then no clones for the top fully + extractable conjuncts are built + + @details + For the given condition cond this method finds out what condition depended + only on the grouping fields can be extracted from cond. If such condition C + exists the method builds the item for it. + This method uses the flags NO_EXTRACTION_FL and FULL_EXTRACTION_FL set by the + preliminary call of st_select_lex::check_cond_extraction_for_grouping_fields + to figure out whether a subformula depends only on these fields or not. + @note + The built condition C is always implied by the condition cond + (cond => C). The method tries to build the most restictive such + condition (i.e. for any other condition C' such that cond => C' + we have C => C'). + @note + The build item is not ready for usage: substitution for the field items + has to be done and it has to be re-fixed. + + @retval + the built condition depended only on grouping fields if such a condition exists + NULL if there is no such a condition +*/ + +Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond, + bool no_top_clones) +{ + if (cond->get_extraction_flag() == FULL_EXTRACTION_FL) + { + if (no_top_clones) + return cond; + cond->clear_extraction_flag(); + return cond->build_clone(thd, thd->mem_root); + } + if (cond->type() == Item::COND_ITEM) + { + bool cond_and= false; + Item_cond *new_cond; + if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + { + cond_and= true; + new_cond= new (thd->mem_root) Item_cond_and(thd); + } + else + new_cond= new (thd->mem_root) Item_cond_or(thd); + if (!new_cond) + return 0; + List_iterator li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item=li++)) + { + if (item->get_extraction_flag() == NO_EXTRACTION_FL) + { + DBUG_ASSERT(cond_and); + item->clear_extraction_flag(); + continue; + } + Item *fix= build_cond_for_grouping_fields(thd, item, + no_top_clones & cond_and); + if (!fix) + { + if (cond_and) + continue; + break; + } + new_cond->argument_list()->push_back(fix, thd->mem_root); + } + + if (!cond_and && item) + { + while((item= li++)) + item->clear_extraction_flag(); + return 0; + } + switch (new_cond->argument_list()->elements) + { + case 0: + return 0; + case 1: + return new_cond->argument_list()->head(); + default: + return new_cond; + } + } + return 0; +} diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 10247bd33a2..77c9b9c7f37 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -726,6 +726,21 @@ public: typedef class st_select_lex_unit SELECT_LEX_UNIT; typedef Bounds_checked_array Ref_ptr_array; + +/* + Structure which consists of the field and the item which + produces this field. +*/ + +class Grouping_tmp_field :public Sql_alloc +{ +public: + Field *tmp_field; + Item *producing_item; + Grouping_tmp_field(Field *fld, Item *item) + :tmp_field(fld), producing_item(item) {} +}; + /* SELECT_LEX - store information of parsed SELECT statment */ @@ -910,6 +925,8 @@ public: /* namp of nesting SELECT visibility (for aggregate functions check) */ nesting_map name_visibility_map; + + List grouping_tmp_fields; void init_query(); void init_select(); @@ -1097,6 +1114,11 @@ public: return master_unit()->with_element; } With_element *find_table_def_in_with_clauses(TABLE_LIST *table); + void collect_grouping_fields(THD *thd); + void check_cond_extraction_for_grouping_fields(Item *cond, + Item_processor processor); + Item *build_cond_for_grouping_fields(THD *thd, Item *cond, + bool no_to_clones); List window_specs; void prepare_add_window_spec(THD *thd); @@ -3193,6 +3215,7 @@ public: } }; + extern sql_digest_state * digest_add_token(sql_digest_state *state, uint token, LEX_YYSTYPE yylval); diff --git a/sql/sql_priv.h b/sql/sql_priv.h index b15a80a889a..0d19ad1da17 100644 --- a/sql/sql_priv.h +++ b/sql/sql_priv.h @@ -224,7 +224,7 @@ #define OPTIMIZER_SWITCH_TABLE_ELIMINATION (1ULL << 26) #define OPTIMIZER_SWITCH_EXTENDED_KEYS (1ULL << 27) #define OPTIMIZER_SWITCH_EXISTS_TO_IN (1ULL << 28) -#define OPTIMIZER_SWITCH_USE_CONDITION_SELECTIVITY (1ULL << 29) +#define OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED (1ULL << 29) #define OPTIMIZER_SWITCH_DEFAULT (OPTIMIZER_SWITCH_INDEX_MERGE | \ OPTIMIZER_SWITCH_INDEX_MERGE_UNION | \ @@ -248,7 +248,8 @@ OPTIMIZER_SWITCH_SEMIJOIN | \ OPTIMIZER_SWITCH_FIRSTMATCH | \ OPTIMIZER_SWITCH_LOOSE_SCAN | \ - OPTIMIZER_SWITCH_EXISTS_TO_IN) + OPTIMIZER_SWITCH_EXISTS_TO_IN | \ + OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED) /* Replication uses 8 bytes to store SQL_MODE in the binary log. The day you use strictly more than 64 bits by adding one more define above, you should diff --git a/sql/sql_select.cc b/sql/sql_select.cc index a6bbfc806e1..c09f2e601f1 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1122,11 +1122,14 @@ int JOIN::optimize() int JOIN::optimize_inner() { +/* + if (conds) { Item *it_clone= conds->build_clone(thd,thd->mem_root); } +*/ ulonglong select_opts_for_readinfo; uint no_jbuf_after; JOIN_TAB *tab; DBUG_ENTER("JOIN::optimize"); - + dbug_print_item(conds); do_send_rows = (unit->select_limit_cnt) ? 1 : 0; // to prevent double initialization on EXPLAIN if (optimized) @@ -1139,10 +1142,6 @@ JOIN::optimize_inner() set_allowed_join_cache_types(); need_distinct= TRUE; - /* Run optimize phase for all derived tables/views used in this SELECT. */ - if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE)) - DBUG_RETURN(1); - if (select_lex->first_cond_optimization) { //Do it only for the first execution @@ -1254,9 +1253,32 @@ JOIN::optimize_inner() if (setup_jtbm_semi_joins(this, join_list, &conds)) DBUG_RETURN(1); - + conds= optimize_cond(this, conds, join_list, FALSE, &cond_value, &cond_equal, OPT_LINK_EQUAL_FIELDS); + + if (thd->lex->sql_command == SQLCOM_SELECT && + optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED)) + { + TABLE_LIST *tbl; + List_iterator_fast li(select_lex->leaf_tables); + while ((tbl= li++)) + { + if (tbl->is_materialized_derived()) + { + if (pushdown_cond_for_derived(thd, conds, tbl)) + DBUG_RETURN(1); + if (mysql_handle_single_derived(thd->lex, tbl, DT_OPTIMIZE)) + DBUG_RETURN(1); + } + } + } + else + { + /* Run optimize phase for all derived tables/views used in this SELECT. */ + if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE)) + DBUG_RETURN(1); + } if (thd->is_error()) { @@ -26021,6 +26043,61 @@ AGGR_OP::end_send() } +/** + @brief + Remove marked top conjuncts of a condition + + @param thd The thread handle + @param cond The condition which subformulas are to be removed + + @details + The function removes all top conjuncts marked with the flag + FULL_EXTRACTION_FL from the condition 'cond'. The resulting + formula is returned a the result of the function + If 'cond' s marked with such flag the function returns 0. + The function clear the extraction flags for the removed + formulas + + @retval + condition without removed subformulas + 0 if the whole 'cond' is removed +*/ + +Item *remove_pushed_top_conjuncts(THD *thd, Item *cond) +{ + if (cond->get_extraction_flag() == FULL_EXTRACTION_FL) + { + cond->clear_extraction_flag(); + return 0; + } + if (cond->type() == Item::COND_ITEM) + { + if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + { + List_iterator li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item= li++)) + { + if (item->get_extraction_flag() == FULL_EXTRACTION_FL) + { + item->clear_extraction_flag(); + li.remove(); + } + } + switch (((Item_cond*) cond)->argument_list()->elements) + { + case 0: + return 0; + case 1: + return ((Item_cond*) cond)->argument_list()->head(); + default: + return cond; + } + } + } + return cond; +} + /** @} (end of group Query_Optimizer) */ diff --git a/sql/sql_select.h b/sql/sql_select.h index d4dc691c547..408dd6f9958 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -188,7 +188,7 @@ typedef enum_nested_loop_state Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab); int rr_sequential(READ_RECORD *info); int rr_sequential_and_unpack(READ_RECORD *info); - +Item *remove_pushed_top_conjuncts(THD *thd, Item *cond); #include "sql_explain.h" diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 4bf202813f3..881c6715ba2 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -2382,6 +2382,7 @@ export const char *optimizer_switch_names[]= "table_elimination", "extended_keys", "exists_to_in", + "condition_pushdown_for_derived", "default", NullS }; static bool fix_optimizer_switch(sys_var *self, THD *thd, diff --git a/sql/table.cc b/sql/table.cc index dc1730b5b6f..63a54331083 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -7445,3 +7445,179 @@ double KEY::actual_rec_per_key(uint i) return (is_statistics_from_stat_tables ? read_stats->get_avg_frequency(i) : (double) rec_per_key[i]); } + + +/** + @brief + Mark subformulas of a condition unusable for the condition pushed into table + + @param cond The condition whose subformulas are to be marked + + @details + This method recursively traverses the AND-OR condition cond and for each subformula + of the codition it checks whether it can be usable for the extraction of a condition + that can be pushed into this table. The subformulas that are not usable are + marked with the flag NO_EXTRACTION_FL. + @note + This method is called before any call of TABLE_LIST::build_pushable_cond_for_table. + The flag NO_EXTRACTION_FL set in a subformula allows to avoid building clone + for the subformula when extracting the pushable condition. +*/ + +void TABLE_LIST::check_pushable_cond_for_table(Item *cond) +{ + table_map tab_map= table->map; + cond->clear_extraction_flag(); + if (cond->type() == Item::COND_ITEM) + { + bool and_cond= ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC; + List_iterator li(*((Item_cond*) cond)->argument_list()); + uint count= 0; + Item *item; + while ((item=li++)) + { + check_pushable_cond_for_table(item); + if (item->get_extraction_flag() != NO_EXTRACTION_FL) + count++; + else if (!and_cond) + break; + } + if ((and_cond && count == 0) || item) + { + cond->set_extraction_flag(NO_EXTRACTION_FL); + if (and_cond) + li.rewind(); + while ((item= li++)) + item->clear_extraction_flag(); + } + } + else if (cond->walk(&Item::exclusive_dependence_on_table_processor, + 0, (uchar *) &tab_map)) + cond->set_extraction_flag(NO_EXTRACTION_FL); +} + + +/** + @brief + Build condition extractable from the given one depended only on this table + + @param thd The thread handle + @param cond The condition from which the pushable one is to be extracted + + @details + For the given condition cond this method finds out what condition depended + only on this table can be extracted from cond. If such condition C exists + the method builds the item for it. + The method uses the flag NO_EXTRACTION_FL set by the preliminary call of + the method TABLE_LIST::check_pushable_cond_for_table to figure out whether + a subformula depends only on this table or not. + @note + The built condition C is always implied by the condition cond + (cond => C). The method tries to build the most restictive such + condition (i.e. for any other condition C' such that cond => C' + we have C => C'). + @note + The build item is not ready for usage: substitution for the field items + has to be done and it has to be re-fixed. + + @retval + the built condition pushable into this table if such a condition exists + NULL if there is no such a condition +*/ + +Item* TABLE_LIST::build_pushable_cond_for_table(THD *thd, Item *cond) +{ + table_map tab_map= table->map; + bool is_multiple_equality= cond->type() == Item::FUNC_ITEM && + ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC; + if (cond->get_extraction_flag() == NO_EXTRACTION_FL) + return 0; + if (cond->type() == Item::COND_ITEM) + { + bool cond_and= false; + Item_cond *new_cond; + if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + { + cond_and= true; + new_cond=new (thd->mem_root) Item_cond_and(thd); + } + else + new_cond= new (thd->mem_root) Item_cond_or(thd); + if (!new_cond) + return 0; + List_iterator li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item=li++)) + { + if (item->get_extraction_flag() == NO_EXTRACTION_FL) + { + if (!cond_and) + return 0; + continue; + } + Item *fix= build_pushable_cond_for_table(thd, item); + if (!fix && !cond_and) + return 0; + if (!fix) + continue; + new_cond->argument_list()->push_back(fix, thd->mem_root); + } + switch (new_cond->argument_list()->elements) + { + case 0: + return 0; + case 1: + return new_cond->argument_list()->head(); + default: + return new_cond; + } + } + else if (is_multiple_equality) + { + if (!(cond->used_tables() & tab_map)) + return 0; + Item *new_cond= NULL; + int i= 0; + Item_equal *item_equal= (Item_equal *) cond; + Item *left_item = item_equal->get_const(); + Item_equal_fields_iterator it(*item_equal); + Item *item; + if (!left_item) + { + while ((item=it++)) + if (item->used_tables() == tab_map) + { + left_item= item; + break; + } + } + if (!left_item) + return 0; + while ((item=it++)) + { + if (!(item->used_tables() == tab_map)) + continue; + Item_func_eq *eq= + new (thd->mem_root) Item_func_eq(thd, item, left_item); + if (eq) + { + i++; + switch (i) + { + case 1: + new_cond= eq; + break; + case 2: + new_cond= new (thd->mem_root) Item_cond_and(thd, new_cond, eq); + break; + default: + ((Item_cond_and*)new_cond)->argument_list()->push_back(eq, thd->mem_root); + } + } + } + return new_cond; + } + else if (cond->get_extraction_flag() != NO_EXTRACTION_FL) + return cond->build_clone(thd, thd->mem_root); + return 0; +} diff --git a/sql/table.h b/sql/table.h index a105df31e93..e0d98627d89 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2311,6 +2311,8 @@ struct TABLE_LIST return false; } void set_lock_type(THD* thd, enum thr_lock_type lock); + void check_pushable_cond_for_table(Item *cond); + Item *build_pushable_cond_for_table(THD *thd, Item *cond); private: bool prep_check_option(THD *thd, uint8 check_opt_type); -- cgit v1.2.1 From 294961cc4d4d55730e807b3f0a7f93a3dd4d4f7b Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 22 Aug 2016 18:38:06 -0400 Subject: MDEV-10538: MariaDB fails to start without galera_recovery in systemd mode Update ExecStartPre scripts to not fail if 'galera_recovery' script is not available. --- support-files/mariadb.service.in | 6 ++++-- support-files/mariadb@.service.in | 15 +++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in index 6b8b2ba0ba3..9b97172e1b5 100644 --- a/support-files/mariadb.service.in +++ b/support-files/mariadb.service.in @@ -63,9 +63,11 @@ PermissionsStartOnly=true # galera_recovery simply returns an empty string. In any case, however, # the script is not expected to return with a non-zero status. # It is always safe to unset _WSREP_START_POSITION environment variable. +# Do not panic if galera_recovery script is not available. (MDEV-10538) ExecStartPre=/bin/sh -c "systemctl unset-environment _WSREP_START_POSITION" -ExecStartPre=/bin/sh -c "VAR=`/usr/bin/galera_recovery`; [ $? -eq 0 ] && \ - systemctl set-environment _WSREP_START_POSITION=$VAR || exit 1" +ExecStartPre=/bin/sh -c "[ ! -e /usr/bin/galera_recovery ] && VAR= || \ + VAR=`/usr/bin/galera_recovery`; [ $? -eq 0 ] \ + && systemctl set-environment _WSREP_START_POSITION=$VAR || exit 1" # Needed to create system tables etc. # ExecStartPre=/usr/bin/mysql_install_db -u mysql diff --git a/support-files/mariadb@.service.in b/support-files/mariadb@.service.in index 965e85260e4..e941f21526e 100644 --- a/support-files/mariadb@.service.in +++ b/support-files/mariadb@.service.in @@ -70,16 +70,19 @@ PermissionsStartOnly=true # galera_recovery simply returns an empty string. In any case, however, # the script is not expected to return with a non-zero status. # It is always safe to unset _WSREP_START_POSITION%I environment variable. +# Do not panic if galera_recovery script is not available. (MDEV-10538) ExecStartPre=/bin/sh -c "systemctl unset-environment _WSREP_START_POSITION%I" -ExecStartPre=/bin/sh -c "VAR=`/usr/bin/galera_recovery \ - --defaults-file=@INSTALL_SYSCONF2DIR@/my%I.cnf`; [ $? -eq 0 ] && \ - systemctl set-environment _WSREP_START_POSITION%I=$VAR || exit 1" + +ExecStartPre=/bin/sh -c "[ ! -e /usr/bin/galera_recovery ] && VAR= || \ + VAR=`/usr/bin/galera_recovery --defaults-file=@INSTALL_SYSCONF2DIR@/my%I.cnf`; [ $? -eq 0 ] \ + && systemctl set-environment _WSREP_START_POSITION%I=$VAR || exit 1" + # Alternate: (remove ConditionPathExists above) # use [mysqld.INSTANCENAME] as sections in my.cnf # -#ExecStartPre=/bin/sh -c "VAR=`/usr/bin/galera_recovery \ -# --defaults-group-suffix=%I`; [ $? -eq 0 ] && \ -# systemctl set-environment _WSREP_START_POSITION%I=$VAR || exit 1" +#ExecStartPre=/bin/sh -c "[ ! -e /usr/bin/galera_recovery ] && VAR= || \ +# VAR=`/usr/bin/galera_recovery --defaults-group-suffix=%I`; [ $? -eq 0 ] \ +# && systemctl set-environment _WSREP_START_POSITION%I=$VAR || exit 1" # Needed to create system tables etc. # ExecStartPre=/usr/bin/mysql_install_db -u mysql -- cgit v1.2.1 From 3ac0721a3c8eecf8843b527cdc4d08c20edb6268 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 22 Aug 2016 19:06:32 -0400 Subject: MDEV-10507: MariaDB 10.1 + wsrep fails to start under systemd post-reboot /var/run/mysqld must be created before wsrep recovery. --- support-files/mariadb.service.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in index 9b97172e1b5..15f41c6377f 100644 --- a/support-files/mariadb.service.in +++ b/support-files/mariadb.service.in @@ -59,6 +59,8 @@ ProtectHome=true # Execute pre and post scripts as root, otherwise it does it as User= PermissionsStartOnly=true +@SYSTEMD_EXECSTARTPRE@ + # Perform automatic wsrep recovery. When server is started without wsrep, # galera_recovery simply returns an empty string. In any case, however, # the script is not expected to return with a non-zero status. @@ -77,7 +79,6 @@ ExecStartPre=/bin/sh -c "[ ! -e /usr/bin/galera_recovery ] && VAR= || \ # Use the [service] section and Environment="MYSQLD_OPTS=...". # This isn't a replacement for my.cnf. # _WSREP_NEW_CLUSTER is for the exclusive use of the script galera_new_cluster -@SYSTEMD_EXECSTARTPRE@ ExecStart=/usr/sbin/mysqld $MYSQLD_OPTS $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION @SYSTEMD_EXECSTARTPOST@ -- cgit v1.2.1 From 2024cddaa412e7c4a2aff8bb50907868155019da Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Mon, 22 Aug 2016 21:27:20 -0400 Subject: MDEV-10518: Large wsrep_gtid_domain_id may break IST wsrep_gtid_domain_id was incorrectly being parsed and stored as a signed long number on the joiner node. --- mysql-test/suite/galera/r/mdev_10518.result | 74 +++++++++++++++++++++++++++++ mysql-test/suite/galera/t/mdev_10518.cnf | 17 +++++++ mysql-test/suite/galera/t/mdev_10518.test | 53 +++++++++++++++++++++ sql/wsrep_sst.cc | 6 +-- 4 files changed, 147 insertions(+), 3 deletions(-) create mode 100644 mysql-test/suite/galera/r/mdev_10518.result create mode 100644 mysql-test/suite/galera/t/mdev_10518.cnf create mode 100644 mysql-test/suite/galera/t/mdev_10518.test diff --git a/mysql-test/suite/galera/r/mdev_10518.result b/mysql-test/suite/galera/r/mdev_10518.result new file mode 100644 index 00000000000..b2a3e0a65ef --- /dev/null +++ b/mysql-test/suite/galera/r/mdev_10518.result @@ -0,0 +1,74 @@ +# On node_1 +list of GTID variables : +gtid_domain_id 1 +gtid_binlog_pos +gtid_binlog_state +gtid_current_pos +gtid_slave_pos +wsrep_gtid_domain_id 4294967295 +wsrep_gtid_mode 1 +# On node_2 +list of GTID variables : +gtid_domain_id 2 +gtid_binlog_pos +gtid_binlog_state +gtid_current_pos +gtid_slave_pos +wsrep_gtid_domain_id 4294967295 +wsrep_gtid_mode 1 +# On node_1 +CREATE TABLE t1(i INT) ENGINE=INNODB; +CREATE TABLE t2(i INT) ENGINE=MEMORY; +INSERT INTO t1 VALUES(1); +SELECT * FROM t1; +i +1 +SELECT * FROM t2; +i +list of GTID variables : +gtid_domain_id 1 +gtid_binlog_pos 4294967295-1-3 +gtid_binlog_state 4294967295-1-3 +gtid_current_pos 4294967295-1-3 +gtid_slave_pos +wsrep_gtid_domain_id 4294967295 +wsrep_gtid_mode 1 +# On node_2 +SELECT * FROM t1; +i +1 +list of GTID variables : +gtid_domain_id 2 +gtid_binlog_pos 4294967295-1-3 +gtid_binlog_state 4294967295-1-3 +gtid_current_pos +gtid_slave_pos +wsrep_gtid_domain_id 4294967295 +wsrep_gtid_mode 1 +# On node_1 +INSERT INTO t2 VALUES(1); +SELECT * FROM t2; +i +1 +list of GTID variables : +gtid_domain_id 1 +gtid_binlog_pos 1-1-1,4294967295-1-3 +gtid_binlog_state 1-1-1,4294967295-1-3 +gtid_current_pos 1-1-1,4294967295-1-3 +gtid_slave_pos +wsrep_gtid_domain_id 4294967295 +wsrep_gtid_mode 1 +# On node_2 +SELECT * FROM t2; +i +list of GTID variables : +gtid_domain_id 2 +gtid_binlog_pos 4294967295-1-3 +gtid_binlog_state 4294967295-1-3 +gtid_current_pos +gtid_slave_pos +wsrep_gtid_domain_id 4294967295 +wsrep_gtid_mode 1 +# On node_1 +DROP TABLE t1, t2; +# End of test diff --git a/mysql-test/suite/galera/t/mdev_10518.cnf b/mysql-test/suite/galera/t/mdev_10518.cnf new file mode 100644 index 00000000000..482334c2f8a --- /dev/null +++ b/mysql-test/suite/galera/t/mdev_10518.cnf @@ -0,0 +1,17 @@ +!include ../galera_2nodes.cnf + +[mysqld] +log-bin +log-slave-updates + +[mysqld.1] +gtid_domain_id=1 +wsrep_gtid_mode=ON +# Maximum allowed wsrep_gtid_domain_id. +wsrep_gtid_domain_id=4294967295 + +[mysqld.2] +gtid_domain_id=2 +wsrep_gtid_mode=ON +#wsrep_gitd_domain_id value will be inherited from donor node (mysqld.1) +#wsrep_gitd_domain_id=X diff --git a/mysql-test/suite/galera/t/mdev_10518.test b/mysql-test/suite/galera/t/mdev_10518.test new file mode 100644 index 00000000000..c4127b4f655 --- /dev/null +++ b/mysql-test/suite/galera/t/mdev_10518.test @@ -0,0 +1,53 @@ +# Test for @@wsrep_gtid_mode and @@wsrep_gtid_domain_id variables +# +# When @@wsrep_gtid_mode=ON, all DDL/DML commands and transactions that +# are meant to be replicated over Galera cluster nodes are tagged with +# galera gtid_domain_id (@@wsrep_gtid_domain_id), while others are tagged +# with the local domain_id (@@gtid_domain_id). + +--source include/galera_cluster.inc +--source include/have_innodb.inc + +--echo # On node_1 +--connection node_1 +# print initial GTIDs +source include/print_gtid.inc; + +--echo # On node_2 +--connection node_2 +# print initial GTIDs +source include/print_gtid.inc; + +--echo # On node_1 +--connection node_1 +CREATE TABLE t1(i INT) ENGINE=INNODB; +CREATE TABLE t2(i INT) ENGINE=MEMORY; +INSERT INTO t1 VALUES(1); +SELECT * FROM t1; +SELECT * FROM t2; +source include/print_gtid.inc; + +--echo # On node_2 +--connection node_2 +SELECT * FROM t1; +source include/print_gtid.inc; + +--echo # On node_1 +--connection node_1 +INSERT INTO t2 VALUES(1); +SELECT * FROM t2; +source include/print_gtid.inc; + +--echo # On node_2 +--connection node_2 +SELECT * FROM t2; +source include/print_gtid.inc; + +--echo # On node_1 +--connection node_1 +# Cleanup +DROP TABLE t1, t2; + +--source include/galera_end.inc +--echo # End of test + diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index be10d2762a0..d88263d75cb 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -477,7 +477,7 @@ static void* sst_joiner_thread (void* a) } else { // Scan state ID first followed by wsrep_gtid_domain_id. char uuid[512]; - long int domain_id; + unsigned long int domain_id; size_t len= pos - out + 1; if (len > sizeof(uuid)) goto err; // safety check @@ -491,11 +491,11 @@ static void* sst_joiner_thread (void* a) else if (wsrep_gtid_mode) { errno= 0; /* Reset the errno */ - domain_id= strtol(pos + 1, NULL, 10); + domain_id= strtoul(pos + 1, NULL, 10); err= errno; /* Check if we received a valid gtid_domain_id. */ - if (err == EINVAL || err == ERANGE || domain_id < 0x0 || domain_id > 0xFFFF) + if (err == EINVAL || err == ERANGE) { WSREP_ERROR("Failed to get donor wsrep_gtid_domain_id."); err= EINVAL; -- cgit v1.2.1 From 4da2b83af712492e4c3cb85e0005cde8511fa810 Mon Sep 17 00:00:00 2001 From: Monty Date: Tue, 23 Aug 2016 15:03:31 +0300 Subject: Fixed compiler error and some warnings on windows --- extra/perror.c | 2 +- mysys/my_error.c | 2 +- sql-common/my_time.c | 4 ++-- sql/derror.cc | 2 +- sql/handler.cc | 2 +- sql/net_serv.cc | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/extra/perror.c b/extra/perror.c index 8aa6aa35b08..55d1e921739 100644 --- a/extra/perror.c +++ b/extra/perror.c @@ -250,7 +250,7 @@ static my_bool print_win_error_msg(DWORD error, my_bool verbose) will ignore calls to register already registered error numbers. */ -static const char **get_handler_error_messages() +static const char **get_handler_error_messages(void) { return handler_error_messages; } diff --git a/mysys/my_error.c b/mysys/my_error.c index 5d16091e0be..44d112bc049 100644 --- a/mysys/my_error.c +++ b/mysys/my_error.c @@ -217,7 +217,7 @@ void my_message(uint error, const char *str, register myf MyFlags) @retval != 0 Error */ -int my_error_register(const char** (*get_errmsgs) (), uint first, uint last) +int my_error_register(const char** (*get_errmsgs) (void), uint first, uint last) { struct my_err_head *meh_p; struct my_err_head **search_meh_pp; diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 28757a2c96c..7cf8692a3f6 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -224,7 +224,7 @@ my_bool check_datetime_range(const MYSQL_TIME *ltime) ltime->minute > 59 || ltime->second > 59 || ltime->second_part > TIME_MAX_SECOND_PART || (ltime->hour > - (ltime->time_type == MYSQL_TIMESTAMP_TIME ? TIME_MAX_HOUR : 23)); + (uint) (ltime->time_type == MYSQL_TIMESTAMP_TIME ? TIME_MAX_HOUR : 23)); } @@ -237,7 +237,7 @@ static void get_microseconds(ulong *val, MYSQL_TIME_STATUS *status, if (get_digits(&tmp, number_of_fields, str, end, 6)) status->warnings|= MYSQL_TIME_WARN_TRUNCATED; if ((status->precision= (*str - start)) < 6) - *val= tmp * log_10_int[6 - (*str - start)]; + *val= (ulong) (tmp * log_10_int[6 - (*str - start)]); else *val= tmp; if (skip_digits(str, end)) diff --git a/sql/derror.cc b/sql/derror.cc index f19f73238fb..776b2bb98e1 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -35,7 +35,7 @@ static void init_myfunc_errs(void); C_MODE_START -static const char **get_server_errmsgs() +static const char **get_server_errmsgs(void) { if (!current_thd) return DEFAULT_ERRMSGS; diff --git a/sql/handler.cc b/sql/handler.cc index 0edff665a33..8298d286259 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -284,7 +284,7 @@ handler *get_ha_partition(partition_info *part_info) static const char **handler_errmsgs; C_MODE_START -static const char **get_handler_errmsgs() +static const char **get_handler_errmsgs(void) { return handler_errmsgs; } diff --git a/sql/net_serv.cc b/sql/net_serv.cc index b48d36700c6..3e17ced92ba 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -831,7 +831,7 @@ my_real_read(NET *net, size_t *complen, size_t length; uint i,retry_count=0; ulong len=packet_error; - my_bool expect_error_packet __attribute((unused))= 0; + my_bool expect_error_packet __attribute__((unused))= 0; thr_alarm_t alarmed; #ifndef NO_ALARM ALARM alarm_buff; -- cgit v1.2.1 From 4eb898bb1663ab470a07e8419de4aa14b5afc667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 16 Aug 2016 11:25:11 +0300 Subject: MDEV-10563 Crash during shutdown in Master_info_index::any_slave_sql_running In well defined C code, the "this" pointer is never NULL. Currently, we were potentially dereferencing a NULL pointer (master_info_index). GCC v6 removes any "if (!this)" conditions as it assumes this is always a non-null pointer. In order to prevent undefined behaviour, check the pointer before dereferencing and remove the check within member functions. --- sql/item_func.cc | 7 ++++--- sql/mysqld.cc | 5 ++++- sql/rpl_mi.cc | 7 +------ sql/slave.cc | 1 + sql/sys_vars.cc | 25 ++++++++++++++++--------- 5 files changed, 26 insertions(+), 19 deletions(-) diff --git a/sql/item_func.cc b/sql/item_func.cc index b637213bc2d..9ee1ba4c7a7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -3942,7 +3942,7 @@ longlong Item_master_pos_wait::val_int() longlong timeout = (arg_count>=3) ? args[2]->val_int() : 0 ; String connection_name_buff; LEX_STRING connection_name; - Master_info *mi; + Master_info *mi= NULL; if (arg_count >= 4) { String *con; @@ -3962,8 +3962,9 @@ longlong Item_master_pos_wait::val_int() connection_name= thd->variables.default_master_connection; mysql_mutex_lock(&LOCK_active_mi); - mi= master_info_index->get_master_info(&connection_name, - Sql_condition::WARN_LEVEL_WARN); + if (master_info_index) // master_info_index is set to NULL on shutdown. + mi= master_info_index->get_master_info(&connection_name, + Sql_condition::WARN_LEVEL_WARN); mysql_mutex_unlock(&LOCK_active_mi); if (!mi) goto err; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 826f2af3a85..9748add6505 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -7307,7 +7307,10 @@ static int show_slaves_running(THD *thd, SHOW_VAR *var, char *buff) var->value= buff; mysql_mutex_lock(&LOCK_active_mi); - *((longlong *)buff)= master_info_index->any_slave_sql_running(); + if (master_info_index) + *((longlong *)buff)= master_info_index->any_slave_sql_running(); + else + *((longlong *)buff)= 0; mysql_mutex_unlock(&LOCK_active_mi); return 0; diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 9c6f4639717..249bf7608e5 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -1095,8 +1095,6 @@ Master_info_index::get_master_info(LEX_STRING *connection_name, connection_name->str)); mysql_mutex_assert_owner(&LOCK_active_mi); - if (!this) // master_info_index is set to NULL on server shutdown - DBUG_RETURN(NULL); /* Make name lower case for comparison */ res= strmake(buff, connection_name->str, connection_name->length); @@ -1250,8 +1248,6 @@ bool Master_info_index::give_error_if_slave_running() { DBUG_ENTER("give_error_if_slave_running"); mysql_mutex_assert_owner(&LOCK_active_mi); - if (!this) // master_info_index is set to NULL on server shutdown - DBUG_RETURN(TRUE); for (uint i= 0; i< master_info_hash.records; ++i) { @@ -1282,8 +1278,7 @@ uint Master_info_index::any_slave_sql_running() { uint count= 0; DBUG_ENTER("any_slave_sql_running"); - if (!this) // master_info_index is set to NULL on server shutdown - DBUG_RETURN(count); + mysql_mutex_assert_owner(&LOCK_active_mi); for (uint i= 0; i< master_info_hash.records; ++i) { diff --git a/sql/slave.cc b/sql/slave.cc index d8ec946ad16..6dc1a66a2ac 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -649,6 +649,7 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) mysql_mutex_unlock(log_lock); } if (opt_slave_parallel_threads > 0 && + master_info_index &&// master_info_index is set to NULL on server shutdown !master_info_index->any_slave_sql_running()) rpl_parallel_inactivate_pool(&global_rpl_thread_pool); if (thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 5b2b6e32314..689d35c9cc3 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -1538,7 +1538,8 @@ Sys_var_gtid_slave_pos::do_check(THD *thd, set_var *var) } mysql_mutex_lock(&LOCK_active_mi); - running= master_info_index->give_error_if_slave_running(); + running= (!master_info_index || + master_info_index->give_error_if_slave_running()); mysql_mutex_unlock(&LOCK_active_mi); if (running) return true; @@ -1578,7 +1579,7 @@ Sys_var_gtid_slave_pos::global_update(THD *thd, set_var *var) mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_active_mi); - if (master_info_index->give_error_if_slave_running()) + if (!master_info_index || master_info_index->give_error_if_slave_running()) err= true; else err= rpl_gtid_pos_update(thd, var->save_result.string_value.str, @@ -1767,7 +1768,8 @@ check_slave_parallel_threads(sys_var *self, THD *thd, set_var *var) bool running; mysql_mutex_lock(&LOCK_active_mi); - running= master_info_index->give_error_if_slave_running(); + running= (!master_info_index || + master_info_index->give_error_if_slave_running()); mysql_mutex_unlock(&LOCK_active_mi); if (running) return true; @@ -1782,7 +1784,8 @@ fix_slave_parallel_threads(sys_var *self, THD *thd, enum_var_type type) mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_active_mi); - err= master_info_index->give_error_if_slave_running(); + err= (!master_info_index || + master_info_index->give_error_if_slave_running()); mysql_mutex_unlock(&LOCK_active_mi); mysql_mutex_lock(&LOCK_global_system_variables); @@ -1809,7 +1812,8 @@ check_slave_domain_parallel_threads(sys_var *self, THD *thd, set_var *var) bool running; mysql_mutex_lock(&LOCK_active_mi); - running= master_info_index->give_error_if_slave_running(); + running= (!master_info_index || + master_info_index->give_error_if_slave_running()); mysql_mutex_unlock(&LOCK_active_mi); if (running) return true; @@ -1824,7 +1828,8 @@ fix_slave_domain_parallel_threads(sys_var *self, THD *thd, enum_var_type type) mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_active_mi); - running= master_info_index->give_error_if_slave_running(); + running= (!master_info_index || + master_info_index->give_error_if_slave_running()); mysql_mutex_unlock(&LOCK_active_mi); mysql_mutex_lock(&LOCK_global_system_variables); @@ -1862,7 +1867,8 @@ check_gtid_ignore_duplicates(sys_var *self, THD *thd, set_var *var) bool running; mysql_mutex_lock(&LOCK_active_mi); - running= master_info_index->give_error_if_slave_running(); + running= (!master_info_index || + master_info_index->give_error_if_slave_running()); mysql_mutex_unlock(&LOCK_active_mi); if (running) return true; @@ -1877,7 +1883,8 @@ fix_gtid_ignore_duplicates(sys_var *self, THD *thd, enum_var_type type) mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_active_mi); - running= master_info_index->give_error_if_slave_running(); + running= (!master_info_index || + master_info_index->give_error_if_slave_running()); mysql_mutex_unlock(&LOCK_active_mi); mysql_mutex_lock(&LOCK_global_system_variables); @@ -2830,7 +2837,7 @@ Sys_var_replicate_events_marked_for_skip::global_update(THD *thd, set_var *var) mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_active_mi); - if (!master_info_index->give_error_if_slave_running()) + if (master_info_index && !master_info_index->give_error_if_slave_running()) result= Sys_var_enum::global_update(thd, var); mysql_mutex_unlock(&LOCK_active_mi); mysql_mutex_lock(&LOCK_global_system_variables); -- cgit v1.2.1 From ed99e2cdd3928c03ed090ce3adcd3bcb698e91fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Thu, 18 Aug 2016 14:00:40 +0300 Subject: MDEV-10341: InnoDB: Failing assertion: mutex_own(mutex) - mutex_exit_func Followup from 5.5 patch. Removing memory barriers on intel is wrong as this doesn't prevent the compiler and/or processor from reorganizing reads before the mutex release. Forcing a memory barrier before reading the waiters will guarantee that no speculative reading takes place. --- storage/innobase/include/os0sync.h | 10 +--------- storage/xtradb/include/os0sync.h | 10 +--------- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h index bd9acb13028..0754210c47a 100644 --- a/storage/innobase/include/os0sync.h +++ b/storage/innobase/include/os0sync.h @@ -849,15 +849,7 @@ for synchronization */ } while (0); /** barrier definitions for memory ordering */ -#ifdef IB_STRONG_MEMORY_MODEL -/* Performance regression was observed at some conditions for Intel -architecture. Disable memory barrier for Intel architecture for now. */ -# define os_rmb do { } while(0) -# define os_wmb do { } while(0) -# define os_mb do { } while(0) -# define IB_MEMORY_BARRIER_STARTUP_MSG \ - "Memory barrier is not used" -#elif defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) +#if defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) # define HAVE_MEMORY_BARRIER # define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE) # define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE) diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index c7a9318c841..f6207555f1a 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -900,15 +900,7 @@ for synchronization */ } while (0); /** barrier definitions for memory ordering */ -#ifdef IB_STRONG_MEMORY_MODEL -/* Performance regression was observed at some conditions for Intel -architecture. Disable memory barrier for Intel architecture for now. */ -# define os_rmb do { } while(0) -# define os_wmb do { } while(0) -# define os_mb do { } while(0) -# define IB_MEMORY_BARRIER_STARTUP_MSG \ - "Memory barrier is not used" -#elif defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) +#if defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE) # define HAVE_MEMORY_BARRIER # define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE) # define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE) -- cgit v1.2.1 From 8cbc96b2f52fe7c1fde157603bb6c90eb279144c Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Wed, 24 Aug 2016 15:00:59 +1000 Subject: Markdown README for a prettier github representation --- CMakeLists.txt | 2 +- README | 55 --------------------------------------------- README.md | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 56 deletions(-) delete mode 100644 README create mode 100644 README.md diff --git a/CMakeLists.txt b/CMakeLists.txt index 3ace4084acd..a87d08cd1ae 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -443,7 +443,7 @@ ADD_CUSTOM_TARGET(INFO_BIN ALL WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) -INSTALL_DOCUMENTATION(README CREDITS COPYING COPYING.LESSER COPYING.thirdparty +INSTALL_DOCUMENTATION(README.md CREDITS COPYING COPYING.LESSER COPYING.thirdparty EXCEPTIONS-CLIENT COMPONENT Readme) # MDEV-6526 these files are not installed anymore #INSTALL_DOCUMENTATION(${CMAKE_BINARY_DIR}/Docs/INFO_SRC diff --git a/README b/README deleted file mode 100644 index a9413f16b04..00000000000 --- a/README +++ /dev/null @@ -1,55 +0,0 @@ -MariaDB is designed as a drop-in replacement of MySQL(R) with more -features, new storage engines, fewer bugs, and better performance. - -MariaDB is brought to you by the MariaDB Foundation. -Please read the CREDITS file for details about the MariaDB Foundation, -and who is developing MariaDB. - -MariaDB is developed by many of the original developers of MySQL who -now work for the MariadB Foundation and the MariaDB Corporation, and by many people in -the community. - -MySQL, which is the base of MariaDB, is a product and trademark of Oracle -Corporation, Inc. For a list of developers and other contributors, -see the Credits appendix. You can also run 'SHOW authors' to get a -list of active contributors. - -A description of the MariaDB project and a manual can be found at: -http://mariadb.org/ -https://mariadb.com/kb/en/ -https://mariadb.com/kb/en/mariadb-vs-mysql-features/ -https://mariadb.com/kb/en/mariadb-versus-mysql-features/ -https://mariadb.com/kb/en/mariadb-versus-mysql-compatibility/ - -As MariaDB is a full replacement of MySQL, the MySQL manual at -http://dev.mysql.com/doc is generally applicable. - -More help is available from the Maria Discuss mailing list -https://launchpad.net/~maria-discuss -and the #maria IRC channel on Freenode. - -*************************************************************************** - -NOTE: - -MariaDB is specifically available only under version 2 of the GNU -General Public License (GPLv2). (I.e. Without the "any later version" -clause.) This is inherited from MySQL. Please see the README file in -the MySQL distribution for more information. - -License information can be found in the COPYING, COPYING.LESSER, -and COPYING.thirdparty files. - -*************************************************************************** - -IMPORTANT: - -Bug and/or error reports regarding MariaDB should be submitted at -http://mariadb.org/jira - -Bugs in the MySQL code can also be submitted at http://bugs.mysql.com - -The code for MariaDB, including all revision history, can be found at: -https://github.com/MariaDB/server - -*************************************************************************** diff --git a/README.md b/README.md new file mode 100644 index 00000000000..f34e6a43b71 --- /dev/null +++ b/README.md @@ -0,0 +1,70 @@ +## MariaDB: drop-in replacement for MySQL + +MariaDB is designed as a drop-in replacement of MySQL(R) with more +features, new storage engines, fewer bugs, and better performance. + +MariaDB is brought to you by the MariaDB Foundation. +Please read the CREDITS file for details about the MariaDB Foundation, +and who is developing MariaDB. + +MariaDB is developed by many of the original developers of MySQL who +now work for the MariadB Foundation and the MariaDB Corporation, and by many people in +the community. + +MySQL, which is the base of MariaDB, is a product and trademark of Oracle +Corporation, Inc. For a list of developers and other contributors, +see the Credits appendix. You can also run 'SHOW authors' to get a +list of active contributors. + +A description of the MariaDB project and a manual can be found at: +http://mariadb.org/ +https://mariadb.com/kb/en/ +https://mariadb.com/kb/en/mariadb-vs-mysql-features/ +https://mariadb.com/kb/en/mariadb-versus-mysql-features/ +https://mariadb.com/kb/en/mariadb-versus-mysql-compatibility/ + +As MariaDB is a full replacement of MySQL, the MySQL manual at +http://dev.mysql.com/doc is generally applicable. + +Help: +----- + +More help is available from the Maria Discuss mailing list +https://launchpad.net/~maria-discuss +and the #maria IRC channel on Freenode. + + +License: +-------- + +*************************************************************************** + +NOTE: + +MariaDB is specifically available only under version 2 of the GNU +General Public License (GPLv2). (I.e. Without the "any later version" +clause.) This is inherited from MySQL. Please see the README file in +the MySQL distribution for more information. + +License information can be found in the COPYING, COPYING.LESSER, +and COPYING.thirdparty files. + +*************************************************************************** + +Bug Reports: +------------ + +Bug and/or error reports regarding MariaDB should be submitted at +http://mariadb.org/jira + +Bugs in the MySQL code can also be submitted at http://bugs.mysql.com + +The code for MariaDB, including all revision history, can be found at: +https://github.com/MariaDB/server + +*************************************************************************** + +Code status: +------------ + +* [![tests status](https://secure.travis-ci.org/MariaDB/server.png?branch=10.2)](https://travis-ci.org/MariaDB/server) travis-ci.org (10.2 branch) -- cgit v1.2.1 From 5bbe929d706e26cb3f9b291da6009526a17b1545 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Wed, 24 Aug 2016 17:39:57 +0300 Subject: MDEV-10604 Create a list of unstable MTR tests to be disabled in distribution builds - mysql-test/unstable-tests list is created, it includes = tests identified as unstable by Debian; = tests which failed in buildbot on 10.0 over the last ~6 months and were not fixed; = tests which have been recently modified or newly added - '*' wildcard is now supported in skip lists --- mysql-test/lib/mtr_cases.pm | 23 +++- mysql-test/unstable-tests | 254 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 274 insertions(+), 3 deletions(-) create mode 100644 mysql-test/unstable-tests diff --git a/mysql-test/lib/mtr_cases.pm b/mysql-test/lib/mtr_cases.pm index b76b10d42ed..10e5fd5c337 100644 --- a/mysql-test/lib/mtr_cases.pm +++ b/mysql-test/lib/mtr_cases.pm @@ -313,6 +313,7 @@ sub combinations_from_file($$) } our %disabled; +our %disabled_wildcards; sub parse_disabled { my ($filename, $suitename) = @_; @@ -321,10 +322,18 @@ sub parse_disabled { chomp; next if /^\s*#/ or /^\s*$/; mtr_error("Syntax error in $filename line $.") - unless /^\s*(?:([-0-9A-Za-z_\/]+)\.)?([-0-9A-Za-z_]+)\s*:\s*(.*?)\s*$/; - mtr_error("Wrong suite name in $filename line $.") + unless /^\s*(?:([-0-9A-Za-z_\/]+)\.)?([-0-9A-Za-z_\*]+)\s*:\s*(.*?)\s*$/; + mtr_error("Wrong suite name in $filename line $.: suitename = $suitename but the file says $1") if defined $1 and defined $suitename and $1 ne $suitename; - $disabled{($1 || $suitename || '') . ".$2"} = $3; + my ($sname, $casename, $text)= (($1 || $suitename || ''), $2, $3); + + if ($casename =~ /\*/) { + # Wildcard + $disabled_wildcards{$sname . ".$casename"}= $text; + } + else { + $disabled{$sname . ".$casename"}= $text; + } } close DISABLED; } @@ -721,6 +730,14 @@ sub collect_one_test_case { # Check for disabled tests # ---------------------------------------------------------------------- my $disable = $disabled{".$tname"} || $disabled{$name}; + if (not $disable) { + foreach my $w (keys %disabled_wildcards) { + if ($name =~ /^$w/) { + $disable= $disabled_wildcards{$w}; + last; + } + } + } if (not defined $disable and $suite->{parent}) { $disable = $disabled{$suite->{parent}->{name} . ".$tname"}; } diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests new file mode 100644 index 00000000000..6a46602eb07 --- /dev/null +++ b/mysql-test/unstable-tests @@ -0,0 +1,254 @@ +############################################################################## +# +# List the test cases which, unlike tests from disabled.def files, +# can still be run on the current tree meaningfully, but are known +# or suspected to fail sporadically on different reasons. +# +# Most common reasons are either test failures observed in buildbot, +# or recent modifications to the tests which make their stability +# unknown. +# +# Tests included due to recent modifications are later removed from the +# list, if during a certain period they do not fail (and are not +# modified again). Tests included due to intermittent failures are +# removed when corresponding bug reports are closed. +# +# Separate the test case name and the comment with ':'. +# +# . : MDEV-xxxxx - +# +# '*' wildcard in testcase names is supported. +# +# To use the list, run MTR with --skip-test-list=unstable-tests option. +# +############################################################################## + +main.bootstrap : Modified on 2016-06-18 (MDEV-9969) +main.create_delayed : MDEV-10605 - failed with timeout +main.create_or_replace : Modified on 2016-06-23 (MDEV-9728) +main.ctype_recoding : Modified on 2016-06-10 (MDEV-10181) +main.ctype_utf8 : Modified on 2016-06-21 (merge) +main.ctype_utf8mb4 : Modified on 2016-06-21 (merge) +main.events_1 : Modified on 2016-06-21 (MDEV-9524) +main.func_group : Modified on 2016-08-08 (MDEV-10468) +main.func_in : Modified on 2016-06-20 (MDEV-10020) +main.func_math : Modified on 2016-08-10 (merge) +main.func_misc : Modified on 2016-08-10 (merge) +main.grant2 : Modified on 2016-07-18 (MDEV-8569) +main.help : Modified on 2016-06-21 (MDEV-9524) +main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown +main.index_intersect_innodb : MDEV-10643 - failed with timeout +main.index_merge_innodb : MDEV-7142 - sporadic wrong execution plan +main.information_schema_stats : Modified on 2016-07-25 (MDEV-10428) +main.innodb_mysql_lock : MDEV-7861 - sporadic lock detection failure +main.insert_innodb : Modified on 2016-06-14 (merge from upstream) +main.loaddata : Modified on 2016-08-10 (merge) +main.locale : Modified on 2016-06-21 (merge) +main.mdev-504 : MDEV-10607 - sporadic "can't connect" +main.mdev375 : MDEV-10607 - sporadic "can't connect" +main.merge : MDEV-10607 - sporadic "can't connect" +main.multi_update : Modified on 2016-06-20 (MDEV-5973) +main.myisam_enable_keys-10506 : New test, added on 2016-08-10 (MDEV-10506) +main.mysqlcheck : Modified on 2016-08-10 (merge) +main.mysqldump : MDEV-10512 - sporadic assertion failure +main.mysqltest : MDEV-9269 - fails on Alpha +main.named_pipe : Modified on 2016-08-02 (MDEV-10383) +main.openssl_1 : Modified on 2016-07-11 (MDEV-10211) +main.parser : Modified on 2016-06-21 (merge) +main.pool_of_threads : MDEV-10100 - sporadic error on detecting max connections +main.ps_1general : Modified on 2016-07-12 (merge) +main.range : Modified on 2016-08-10 (merge) +main.range_mrr_icp : Modified on 2016-08-10 (merge) +main.query_cache : MDEV-10611 - sporadic mutex problem +main.shutdown : MDEV-10612 - sporadic crashes +main.sp-prelocking : Modified on 2016-08-10 (merge) +main.sp-security : MDEV-10607 - sporadic "can't connect" +main.ssl : MDEV-10211 - different ciphers on some platforms +main.ssl_ca : Modified on 2016-07-11 (MDEV-10211) +main.ssl_compress : Modified on 2016-07-11 (MDEV-10211) +main.ssl_timeout : Modified on 2016-07-11 (MDEV-10211) +main.stat_tables_par_innodb : MDEV-10515 - sporadic wrong results +main.status_user : Modified on 2016-06-20 (MDEV-8633) +main.subselect_innodb : MDEV-10614 - sporadic wrong results +main.temp_table : Modified on 2016-06-18 (MDEV-8569) +main.type_date : Modified on 2016-08-10 (merge) +main.type_datetime : Modified on 2016-06-16 (MDEV-9374) +main.view : Modified on 2016-08-10 (merge) +main.xtradb_mrr : Modified on 2016-08-04 (MDEV-9946) + +#---------------------------------------------------------------- + +archive.archive-big : MDEV-10615 - table is marked as crashed +archive.discover : MDEV-10510 - table is marked as crashed + +#---------------------------------------------------------------- + +binlog.binlog_commit_wait : MDEV-10150 - Error: too much time elapsed +binlog.binlog_dmls_on_tmp_tables_readonly : New test, added on 2016-05-04 (upstream) +binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint + +#---------------------------------------------------------------- + +connect.tbl : MDEV-9844, MDEV-10179 - sporadic crashes, valgrind warnings, wrong results +connect.jdbc : New test, added on 2016-07-15 +connect.jdbc-new : New test, added on 2016-07-14 +connect.jdbc-oracle : New test, added on 2016-07-13 +connect.jdbc-postgresql : New test, added on 2016-07-13 + +#---------------------------------------------------------------- + +federated.federatedx : MDEV-10617 - Wrong checksum, timeouts +federated.federated_innodb : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips +federated.federated_partition : MDEV-10417 - Fails on Mips +federated.federated_transactions : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips + +#---------------------------------------------------------------- + +funcs_1.processlist_priv_no_prot : Include file modified on 2016-07-12 (merge) +funcs_1.processlist_priv_ps : Include file modified on 2016-07-12 (merge) + +#---------------------------------------------------------------- + +innodb.binlog_consistent : MDEV-10618 - Server fails to start +innodb.innodb-alter-table : MDEV-10619 - Testcase timeout +innodb.innodb-alter-tempfile : Modified on 2016-08-09 (MDEV-10469) +innodb.innodb_corrupt_bit : Modified on 2016-06-21 (merge) +innodb.innodb_bug30423 : MDEV-7311 - Wrong number of rows in the plan +innodb.innodb-fk-warnings : Modified on 2016-07-18 (MDEV-8569) +innodb.innodb-fkcheck : Modified on 2016-06-13 (MDEV-10083) +innodb.innodb-wl5522 : rdiff file modified on 2016-08-10 (merge) +innodb.innodb-wl5522-debug-zip : MDEV-10427 - Warning: database page corruption + +#---------------------------------------------------------------- + +mroonga/storage.column_datetime_32bit_2038 : Wrong result on Alpha +mroonga/storage.column_datetime_32bit_before_unix_epoch : Wrong result on Alpha +mroonga/storage.column_datetime_32bit_max : Wrong result on Alpha +mroonga/storage.column_datetime_32bit_out_of_range : Wrong result on Alpha +mroonga/storage.index_multiple_column_unique_date_32bit_equal : Wrong result on Alpha +mroonga/storage.index_multiple_column_unique_date_order_32bit_desc : Wrong result on Alpha + +#---------------------------------------------------------------- + +multi_source.gtid : MDEV-10620, MDEV-10417 - Timeout in wait condition, fails on Mips +multi_source.multisource : MDEV-10417 - Fails on Mips +multi_source.simple : MDEV-4633 - Wrong slave status output +multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_heartbeats + +#---------------------------------------------------------------- + +parts.partition_float_myisam : MDEV-10621 - Testcase timeout +parts.partition_int_myisam : MDEV-10621 - Testcase timeout + +#---------------------------------------------------------------- + +perfschema.digest_table_full : Modified on 2016-06-21 (merge) +perfschema.func_file_io : MDEV-5708 - fails for s390x +perfschema.func_mutex : MDEV-5708 - fails for s390x +perfschema.rpl_gtid_func : Modified on 2016-06-21 (merge) +perfschema.sizing_low : Modified on 2016-04-26 (5.6.30 merge) +perfschema.socket_summary_by_event_name_func : MDEV-10622 - Socket summary tables do not match +perfschema.start_server_low_digest : Modified on 2016-06-21 (merge) +perfschema.statement_digest : Modified on 2016-06-21 (merge) +perfschema.statement_digest_consumers : Modified on 2016-06-21 (merge) +perfschema.statement_digest_long_query : Modified on 2016-06-21 (merge) +perfschema.table_name : New test, added on 2016-04-26 (5.6.30 merge) + +#---------------------------------------------------------------- + +plugins.feedback_plugin_send : MDEV-7932 - ssl failed for url +plugins.pam : Modified on 2016-08-03 (MDEV-7329) +plugins.pam_cleartext : Modified on 2016-08-03 +plugins.server_audit : MDEV-9562 - crashes on sol10-sparc +plugins.thread_pool_server_audit : MDEV-9562 - crashes on sol10-sparc + +#---------------------------------------------------------------- + +roles.rpl_grant_revoke_current_role-8638 : New test, added on 2016-06-20 (MDEV-8638) +roles.set_role-9614 : New test, added on 2016-05-30 (MDEV-9614) + +#---------------------------------------------------------------- + +rpl.last_insert_id : MDEV-10625 - warnings in error log +rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips +rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips +rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log +rpl.rpl_binlog_index : MDEV-9501 - Warning: failed registering on master +rpl.rpl_checksum_cache : MDEV-10626 - Testcase timeout +rpl.rpl_circular_for_4_hosts : MDEV-10627 - Testcase timeout +rpl.rpl_ddl : MDEV-10417 - Fails on Mips +rpl.rpl_gtid_crash : MDEV-9501 - Warning: failed registering on master +rpl.rpl_gtid_master_promote : MDEV-10628 - Timeout in sync_with_master +rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown +rpl.rpl_gtid_until : MDEV-10625 - warnings in error log +rpl.rpl_ignore_table : Modified on 2016-06-22 +rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips +rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x +rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x +rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips +rpl.rpl_mdev6020 : MDEV-10630, MDEV-10417 - Timeouts, fails on Mips +rpl.rpl_mdev6386 : MDEV-10631 - Wrong result on slave +rpl.rpl_parallel : MDEV-10632, MDEV-10653 - Failures to sync, timeouts +rpl.rpl_parallel_temptable : MDEV-10356 - Crash in close_thread_tables +rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips +rpl.rpl_row_drop_create_temp_table : MDEV-10626 - Testcase timeout +rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x +rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Wrong plugin status +rpl.rpl_slave_grp_exec : MDEV-10514 - Unexpected deadlock +rpl.rpl_switch_stm_row_mixed : MDEV-10611 - Wrong usage of mutex +rpl.rpl_sync : MDEV-10633 - Database page corruption +rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries +rpl.sec_behind_master-5114 : MDEV-8518 - Wrong value of Seconds_Behind_Master +rpl.rpl_skip_replication : MDEV-9268 - Fails with timeout in sync_slave_with_master on Alpha + +#---------------------------------------------------------------- + +spider.* : MDEV-9329 - tests are too memory-consuming + +spider/bg.direct_aggregate : MDEV-7098 - Trying to unlock mutex that wasn't locked +spider/bg.direct_aggregate_part : MDEV-7098 - Trying to unlock mutex that wasn't locked +spider/bg.ha : MDEV-7914, MDEV-9329 - Crash, failures on s390x +spider/bg.ha_part : MDEV-9329 - Fails on Ubuntu/s390x +spider/bg.spider_fixes : MDEV-7098, MDEV-9329 - Mutex problem, failures on s390x +spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x + +#---------------------------------------------------------------- + +stress.ddl_innodb : MDEV-10635 - Testcase timeout + +#---------------------------------------------------------------- + +sys_vars.autocommit_func2 : MDEV-9329 - Fails on Ubuntu/s390x +sys_vars.general_log_file_basic : Modified on 2016-08-09 (MDEV-10465) +sys_vars.slow_query_log_file_basic : Modified on 2016-08-09 (MDEV-10465) +sys_vars.innodb_buffer_pool_dump_pct_basic : MDEV-10651 - sporadic failure on file_exists + +#---------------------------------------------------------------- + +tokudb.background_job_manager : MDEV-10327 - Assertion failure on server shutdown +tokudb.cluster_filter_unpack_varchar : MDEV-10636 - Wrong execution plan +tokudb.* : MDEV-9891 - massive crashes on shutdown +tokudb_alter_table.* : MDEV-9891 - massive crashes on shutdown +tokudb_bugs.checkpoint_lock : MDEV-10637 - Wrong processlist output +tokudb_bugs.checkpoint_lock_3 : MDEV-10637 - Wrong processlist output +tokudb_bugs.* : MDEV-9891 - massive crashes on shutdown +tokudb_parts.* : MDEV-9891 - massive crashes on shutdown +rpl-tokudb.* : MDEV-9891 - massive crashes on shutdown, also modified on 2016-06-10 (Merge) +tokudb/tokudb_add_index.* : MDEV-9891 - massive crashes on shutdown +tokudb/tokudb_backup.* : MDEV-9891 - massive crashes on shutdown +tokudb/tokudb_mariadb.* : MDEV-9891 - massive crashes on shutdown +tokudb/tokudb_sys_vars.* : MDEV-9891 - massive crashes on shutdown +tokudb/tokudb_rpl.* : MDEV-9891 - massive crashes on shutdown + + +#---------------------------------------------------------------- + +unit.ma_test_loghandler : MDEV-10638 - record read not ok + +#---------------------------------------------------------------- + +vcol.charsets : Added on 2016-06-23 +vcol.not_supported : MDEV-10639 - Testcase timeout +vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout + +#---------------------------------------------------------------- -- cgit v1.2.1 From ea91bb6801b1b619d64fa137ea351eca9de683ec Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 26 Jul 2016 12:34:04 +0200 Subject: MDEV-10361 Crash in pam_securid.so with auth_pam connecting from SQLyog auth_pam: debug output --- mysql-test/include/default_mysqld.cnf | 2 ++ mysql-test/suite/plugins/r/pam_cleartext.result | 2 +- mysql-test/suite/plugins/t/pam_cleartext.test | 2 +- plugin/auth_pam/auth_pam.c | 30 +++++++++++++++++++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/mysql-test/include/default_mysqld.cnf b/mysql-test/include/default_mysqld.cnf index 17b2fd5b2bc..b5b16461781 100644 --- a/mysql-test/include/default_mysqld.cnf +++ b/mysql-test/include/default_mysqld.cnf @@ -45,6 +45,8 @@ loose-feedback-debug-startup-interval=20 loose-feedback-debug-first-interval=60 loose-feedback-debug-interval=60 +loose-pam-debug + loose-innodb_data_file_path= ibdata1:12M:autoextend loose-innodb_buffer_pool_size= 8M loose-innodb_lru_scan_depth= 100 diff --git a/mysql-test/suite/plugins/r/pam_cleartext.result b/mysql-test/suite/plugins/r/pam_cleartext.result index 00e0e94618e..3b7aada16b2 100644 --- a/mysql-test/suite/plugins/r/pam_cleartext.result +++ b/mysql-test/suite/plugins/r/pam_cleartext.result @@ -2,7 +2,7 @@ install plugin pam soname 'auth_pam.so'; create user test_pam identified via pam using 'mariadb_mtr'; create user pam_test; grant proxy on pam_test to test_pam; -show variables like 'pam%'; +show variables like 'pam_use_%'; Variable_name Value pam_use_cleartext_plugin ON drop user test_pam; diff --git a/mysql-test/suite/plugins/t/pam_cleartext.test b/mysql-test/suite/plugins/t/pam_cleartext.test index 6b9bf087ce5..aade924c43e 100644 --- a/mysql-test/suite/plugins/t/pam_cleartext.test +++ b/mysql-test/suite/plugins/t/pam_cleartext.test @@ -1,7 +1,7 @@ --source pam_init.inc -show variables like 'pam%'; +show variables like 'pam_use_%'; --error 1 --exec echo FAIL | $MYSQL_TEST -u test_pam --plugin-dir=$plugindir diff --git a/plugin/auth_pam/auth_pam.c b/plugin/auth_pam/auth_pam.c index 3e3462d3ba0..ac1b3b2da09 100644 --- a/plugin/auth_pam/auth_pam.c +++ b/plugin/auth_pam/auth_pam.c @@ -17,6 +17,7 @@ #define _GNU_SOURCE 1 /* for strndup */ #include +#include #include #include #include @@ -44,6 +45,13 @@ char *strndup(const char *from, size_t length) } #endif +#ifndef DBUG_OFF +static char pam_debug = 0; +#define PAM_DEBUG(X) do { if (pam_debug) { fprintf X; } } while(0) +#else +#define PAM_DEBUG(X) /* no-op */ +#endif + static int conv(int n, const struct pam_message **msg, struct pam_response **resp, void *data) { @@ -91,12 +99,17 @@ static int conv(int n, const struct pam_message **msg, 4 means "password-like input, echo disabled" C'est la vie. */ param->buf[0] = msg[i]->msg_style == PAM_PROMPT_ECHO_ON ? 2 : 4; + PAM_DEBUG((stderr, "PAM: conv: send(%.*s)\n", (int)(param->ptr - param->buf - 1), param->buf)); if (param->vio->write_packet(param->vio, param->buf, param->ptr - param->buf - 1)) return PAM_CONV_ERR; pkt_len = param->vio->read_packet(param->vio, &pkt); if (pkt_len < 0) + { + PAM_DEBUG((stderr, "PAM: conv: recv() ERROR\n")); return PAM_CONV_ERR; + } + PAM_DEBUG((stderr, "PAM: conv: recv(%.*s)\n", pkt_len, pkt)); /* allocate and copy the reply to the response array */ if (!((*resp)[i].resp= strndup((char*) pkt, pkt_len))) return PAM_CONV_ERR; @@ -134,9 +147,16 @@ static int pam_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) param.ptr = param.buf + 1; param.vio = vio; + PAM_DEBUG((stderr, "PAM: pam_start(%s, %s)\n", service, info->user_name)); DO( pam_start(service, info->user_name, &pam_start_arg, &pamh) ); + + PAM_DEBUG((stderr, "PAM: pam_authenticate(0)\n")); DO( pam_authenticate (pamh, 0) ); + + PAM_DEBUG((stderr, "PAM: pam_acct_mgmt(0)\n")); DO( pam_acct_mgmt(pamh, 0) ); + + PAM_DEBUG((stderr, "PAM: pam_get_item(PAM_USER)\n")); DO( pam_get_item(pamh, PAM_USER, (pam_get_item_3_arg) &new_username) ); if (new_username && strcmp(new_username, info->user_name)) @@ -145,6 +165,7 @@ static int pam_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) end: pam_end(pamh, status); + PAM_DEBUG((stderr, "PAM: status = %d user = %s\n", status, new_username)); return status == PAM_SUCCESS ? CR_OK : CR_ERROR; } @@ -163,8 +184,17 @@ static MYSQL_SYSVAR_BOOL(use_cleartext_plugin, use_cleartext_plugin, "supports simple PAM policies that don't require anything besides " "a password", NULL, NULL, 0); +#ifndef DBUG_OFF +static MYSQL_SYSVAR_BOOL(debug, pam_debug, PLUGIN_VAR_OPCMDARG, + "Log all PAM activity", NULL, NULL, 0); +#endif + + static struct st_mysql_sys_var* vars[] = { MYSQL_SYSVAR(use_cleartext_plugin), +#ifndef DBUG_OFF + MYSQL_SYSVAR(debug), +#endif NULL }; -- cgit v1.2.1 From 1b7c5dedf7266d73c9c402cefee681251aea1e18 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 24 Aug 2016 15:32:48 -0400 Subject: MDEV-10566: Create role statement replicated inconsistently in Galera Cluster In galera cluster, the definer (and thus binlog invoker) must be set for CREATE ROLE before Query_log_event is created during TOI on the originating node. --- mysql-test/suite/galera/r/galera_roles.result | 41 ++++++++++++++++++++++++++- mysql-test/suite/galera/t/galera_roles.test | 32 +++++++++++++++++++++ sql/sql_class.cc | 4 +++ sql/wsrep_mysqld.cc | 6 ++++ 4 files changed, 82 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/galera/r/galera_roles.result b/mysql-test/suite/galera/r/galera_roles.result index c0cdbc0e338..d8c13758797 100644 --- a/mysql-test/suite/galera/r/galera_roles.result +++ b/mysql-test/suite/galera/r/galera_roles.result @@ -69,8 +69,8 @@ SET ROLE role1; FLUSH TABLES; SELECT * FROM mysql.roles_mapping; Host User Role Admin_option - role1 Y localhost foo role1 N +localhost root role1 Y SHOW TABLES FROM test1; Tables_in_test1 t1 @@ -153,4 +153,43 @@ role1 # Connect with node_1 DROP USER foo@localhost; DROP DATABASE test1; +# +# MDEV-10566: Create role statement replicated inconsistently in Galera Cluster +# + +# On node_1 +CREATE USER foo@localhost; +CREATE ROLE role1; +CREATE ROLE role2 WITH ADMIN CURRENT_USER; +CREATE ROLE role3 WITH ADMIN foo@localhost; +CREATE ROLE role4 WITH ADMIN role1; +SELECT * FROM mysql.roles_mapping; +Host User Role Admin_option + role1 role4 Y +localhost foo role3 Y +localhost root role1 Y +localhost root role2 Y +SELECT * FROM INFORMATION_SCHEMA.APPLICABLE_ROLES; +GRANTEE ROLE_NAME IS_GRANTABLE +role1 role4 YES +root@localhost role1 YES +root@localhost role2 YES + +# On node_2 +SELECT * FROM mysql.roles_mapping; +Host User Role Admin_option + role1 role4 Y +localhost foo role3 Y +localhost root role1 Y +localhost root role2 Y +SELECT * FROM INFORMATION_SCHEMA.APPLICABLE_ROLES; +GRANTEE ROLE_NAME IS_GRANTABLE +role1 role4 YES +root@localhost role1 YES +root@localhost role2 YES +DROP ROLE role1; +DROP ROLE role2; +DROP ROLE role3; +DROP ROLE role4; +DROP USER foo@localhost; # End of test diff --git a/mysql-test/suite/galera/t/galera_roles.test b/mysql-test/suite/galera/t/galera_roles.test index f9a15126e5e..16e417d1fdb 100644 --- a/mysql-test/suite/galera/t/galera_roles.test +++ b/mysql-test/suite/galera/t/galera_roles.test @@ -163,5 +163,37 @@ disconnect foo_node_2; DROP USER foo@localhost; DROP DATABASE test1; +--echo # +--echo # MDEV-10566: Create role statement replicated inconsistently in Galera Cluster +--echo # +--echo +--echo # On node_1 +--connection node_1 +CREATE USER foo@localhost; +CREATE ROLE role1; +CREATE ROLE role2 WITH ADMIN CURRENT_USER; +CREATE ROLE role3 WITH ADMIN foo@localhost; +CREATE ROLE role4 WITH ADMIN role1; + +--sorted_result +SELECT * FROM mysql.roles_mapping; +--sorted_result +SELECT * FROM INFORMATION_SCHEMA.APPLICABLE_ROLES; + +--echo +--echo # On node_2 +--connection node_2 +--sorted_result +SELECT * FROM mysql.roles_mapping; +--sorted_result +SELECT * FROM INFORMATION_SCHEMA.APPLICABLE_ROLES; + +# Cleanup +DROP ROLE role1; +DROP ROLE role2; +DROP ROLE role3; +DROP ROLE role4; +DROP USER foo@localhost; + --source include/galera_end.inc --echo # End of test diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 37bacc986f7..76f8b98c55e 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -5249,7 +5249,11 @@ void THD::get_definer(LEX_USER *definer, bool role) { binlog_invoker(role); #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) +#ifdef WITH_WSREP + if ((wsrep_applier || slave_thread) && has_invoker()) +#else if (slave_thread && has_invoker()) +#endif { definer->user = invoker_user; definer->host= invoker_host; diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 91a9a1f210d..4117d5231f4 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -1236,6 +1236,12 @@ static int wsrep_TOI_begin(THD *thd, char *db_, char *table_, case SQLCOM_ALTER_EVENT: buf_err= wsrep_alter_event_query(thd, &buf, &buf_len); break; + case SQLCOM_CREATE_ROLE: + if (sp_process_definer(thd)) + { + WSREP_WARN("Failed to set CREATE ROLE definer for TOI."); + } + /* fallthrough */ default: buf_err= wsrep_to_buf_helper(thd, thd->query(), thd->query_length(), &buf, &buf_len); -- cgit v1.2.1 From 8b09db8bfb81f1e7695cfcfa6ce2bec45247171f Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Wed, 24 Aug 2016 17:13:20 -0400 Subject: Fixes/improvements in galera test suite --- .../include/auto_increment_offset_restore.inc | 35 ++++++++++++++++++++ .../galera/include/auto_increment_offset_save.inc | 37 ++++++++++++++++++++++ .../suite/galera/t/galera_ist_restart_joiner.test | 16 ++++------ mysql-test/suite/galera/t/galera_pc_ignore_sb.test | 17 ++++------ .../suite/galera/t/galera_restart_nochanges.test | 17 ++++------ mysql-test/suite/galera/t/galera_split_brain.test | 21 ++++++------ .../suite/galera/t/galera_suspend_slave.test | 15 +++------ .../suite/galera/t/galera_var_dirty_reads.test | 16 +++------- mysql-test/suite/galera/t/mysql-wsrep#31.test | 14 ++++++++ .../galera_3nodes/t/galera_certification_ccc.test | 21 ++++-------- 10 files changed, 131 insertions(+), 78 deletions(-) create mode 100644 mysql-test/suite/galera/include/auto_increment_offset_restore.inc create mode 100644 mysql-test/suite/galera/include/auto_increment_offset_save.inc diff --git a/mysql-test/suite/galera/include/auto_increment_offset_restore.inc b/mysql-test/suite/galera/include/auto_increment_offset_restore.inc new file mode 100644 index 00000000000..6218dfd6f2c --- /dev/null +++ b/mysql-test/suite/galera/include/auto_increment_offset_restore.inc @@ -0,0 +1,35 @@ +# See auto_increment_offset_restore.inc for details. + +if (!$node_1) +{ + --die ERROR IN TEST: $node_1 must be set before sourcing auto_increment_offset_save.inc +} + +if (!$node_2) +{ + --die ERROR IN TEST: $node_2 must be set before sourcing auto_increment_offset_save.inc +} + +if (!$auto_increment_offset_node_1) +{ + --die ERROR IN TEST: $auto_increment_offset_node_1 must be set before sourcing auto_increment_offset_save.inc +} + +if (!$auto_increment_offset_node_2) +{ + --die ERROR IN TEST: $auto_increment_offset_node_2 must be set before sourcing auto_increment_offset_save.inc +} + +# Restore original auto_increment_offset values. +--disable_query_log +--connection $node_1 +--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; +--connection $node_2 +--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; + +if ($node_3) +{ +--connection $node_3 +--eval SET @@global.auto_increment_offset = $auto_increment_offset_node_3; +} +--enable_query_log diff --git a/mysql-test/suite/galera/include/auto_increment_offset_save.inc b/mysql-test/suite/galera/include/auto_increment_offset_save.inc new file mode 100644 index 00000000000..3c4db3f381c --- /dev/null +++ b/mysql-test/suite/galera/include/auto_increment_offset_save.inc @@ -0,0 +1,37 @@ +# This file can be used to save the @@global.auto_increment_offset value at +# the beginning of any test that intends to restart any of the participating +# nodes. This is required as the node may get auto-assigned a different +# auto_increment_offset value on restart, which could cause MTR's internal +# post-check to fail. auto_increment_offset_restore.inc can be used at the +# end of the test to restore these saved values. + +# Parameters +# ---------- +# $node_1 +# Connection handle for 1st node +# $node_2 +# Connection handle for 2nd node +# $node_3 (optional) +# Connection handle for 3rd node + +if (!$node_1) +{ + --die ERROR IN TEST: $node_1 must be set before sourcing auto_increment_offset_save.inc +} + +if (!$node_2) +{ + --die ERROR IN TEST: $node_2 must be set before sourcing auto_increment_offset_save.inc +} + +--connection $node_1 +let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; +--connection $node_2 +let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; + +if ($node_3) +{ + --connection $node_3 + let $auto_increment_offset_node_3 = `SELECT @@global.auto_increment_offset`; +} + diff --git a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test index 11664affe7c..931daaad30d 100644 --- a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test +++ b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test @@ -10,10 +10,9 @@ --source suite/galera/include/galera_have_debug_sync.inc # Save original auto_increment_offset values. ---connection node_1 -let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; ---connection node_2 -let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)); INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a'), (4, 'a'), (5, 'a'),(6, 'a'); @@ -114,10 +113,7 @@ SELECT COUNT(*) = 0 FROM t3; DROP TABLE t1, t2, t3; # Restore original auto_increment_offset values. ---disable_query_log ---connection node_1 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; ---connection node_2 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; ---enable_query_log +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc diff --git a/mysql-test/suite/galera/t/galera_pc_ignore_sb.test b/mysql-test/suite/galera/t/galera_pc_ignore_sb.test index f63215ebe4a..84fd3a91857 100644 --- a/mysql-test/suite/galera/t/galera_pc_ignore_sb.test +++ b/mysql-test/suite/galera/t/galera_pc_ignore_sb.test @@ -6,10 +6,9 @@ --source include/have_innodb.inc # Save original auto_increment_offset values. ---connection node_1 -let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; ---connection node_2 -let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc --connection node_1 --let $wsrep_cluster_address_orig = `SELECT @@wsrep_cluster_address` @@ -40,10 +39,8 @@ SET GLOBAL wsrep_cluster_address = ''; --source include/start_mysqld.inc --source include/wait_until_connected_again.inc ---disable_query_log # Restore original auto_increment_offset values. ---connection node_1 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; ---connection node_2 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; ---enable_query_log +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc + diff --git a/mysql-test/suite/galera/t/galera_restart_nochanges.test b/mysql-test/suite/galera/t/galera_restart_nochanges.test index ba12c4c409c..0a6a0c5ccbe 100644 --- a/mysql-test/suite/galera/t/galera_restart_nochanges.test +++ b/mysql-test/suite/galera/t/galera_restart_nochanges.test @@ -6,10 +6,9 @@ --source include/have_innodb.inc # Save original auto_increment_offset values. ---connection node_1 -let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; ---connection node_2 -let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc --connection node_1 CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; @@ -33,11 +32,9 @@ SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_N DROP TABLE t1; ---disable_query_log # Restore original auto_increment_offset values. ---connection node_1 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; ---connection node_2a ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; ---enable_query_log +--let $node_2=node_2a +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc diff --git a/mysql-test/suite/galera/t/galera_split_brain.test b/mysql-test/suite/galera/t/galera_split_brain.test index e0298e55422..22f6370241c 100644 --- a/mysql-test/suite/galera/t/galera_split_brain.test +++ b/mysql-test/suite/galera/t/galera_split_brain.test @@ -6,17 +6,16 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +# Save original auto_increment_offset values. +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + call mtr.add_suppression("WSREP: TO isolation failed for: "); --connection node_1 --let $wsrep_cluster_address_orig = `SELECT @@wsrep_cluster_address` -# Save original auto_increment_offset values. ---connection node_1 -let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; ---connection node_2 -let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; - --connection node_2 --source include/kill_galera.inc @@ -44,10 +43,8 @@ SET GLOBAL wsrep_cluster_address = ''; --source include/wait_until_connected_again.inc # Restore original auto_increment_offset values. ---disable_query_log ---connection node_1 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; ---connection node_2a ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; ---enable_query_log +--let $node_2=node_2a +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc diff --git a/mysql-test/suite/galera/t/galera_suspend_slave.test b/mysql-test/suite/galera/t/galera_suspend_slave.test index 236c65b73a7..5c622085804 100644 --- a/mysql-test/suite/galera/t/galera_suspend_slave.test +++ b/mysql-test/suite/galera/t/galera_suspend_slave.test @@ -8,10 +8,9 @@ --source include/have_innodb.inc # Save original auto_increment_offset values. ---connection node_1 -let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; ---connection node_2 -let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc --connection node_1 CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; @@ -56,11 +55,7 @@ SELECT COUNT(*) = 1 FROM t1; DROP TABLE t1; ---disable_query_log # Restore original auto_increment_offset values. ---connection node_1 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; ---connection node_2a ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; ---enable_query_log +--let $node_2=node_2a +--source include/auto_increment_offset_restore.inc diff --git a/mysql-test/suite/galera/t/galera_var_dirty_reads.test b/mysql-test/suite/galera/t/galera_var_dirty_reads.test index 9eea8efdaf3..dfd8d5ecf29 100644 --- a/mysql-test/suite/galera/t/galera_var_dirty_reads.test +++ b/mysql-test/suite/galera/t/galera_var_dirty_reads.test @@ -5,13 +5,10 @@ --source include/galera_cluster.inc --source include/have_innodb.inc ---disable_query_log # Save original auto_increment_offset values. ---connection node_1 -let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; ---connection node_2 -let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; ---enable_query_log +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc --connection node_2 --let $wsrep_cluster_address_saved = `SELECT @@global.wsrep_cluster_address` @@ -49,13 +46,8 @@ SELECT * FROM t1; # Cleanup DROP TABLE t1; ---disable_query_log # Restore original auto_increment_offset values. ---connection node_1 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; ---connection node_2 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; ---enable_query_log +--source include/auto_increment_offset_restore.inc --source include/galera_end.inc --echo # End of test diff --git a/mysql-test/suite/galera/t/mysql-wsrep#31.test b/mysql-test/suite/galera/t/mysql-wsrep#31.test index eaace5d50dd..c669d4834ba 100644 --- a/mysql-test/suite/galera/t/mysql-wsrep#31.test +++ b/mysql-test/suite/galera/t/mysql-wsrep#31.test @@ -1,6 +1,11 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +# Save original auto_increment_offset values. +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + --connection node_1 CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; @@ -36,4 +41,13 @@ if ($galera_wsrep_start_position != $expected_position) DROP TABLE t1; DROP DATABASE db; +--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 +--source include/wait_until_connected_again.inc + +# Restore original auto_increment_offset values. +--let $node_2=node_2a +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc + diff --git a/mysql-test/suite/galera_3nodes/t/galera_certification_ccc.test b/mysql-test/suite/galera_3nodes/t/galera_certification_ccc.test index e19169a350c..b4fe10bff0d 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_certification_ccc.test +++ b/mysql-test/suite/galera_3nodes/t/galera_certification_ccc.test @@ -10,12 +10,10 @@ --source include/galera_connect.inc # Save original auto_increment_offset values. ---connection node_1 -let $auto_increment_offset_node_1 = `SELECT @@global.auto_increment_offset`; ---connection node_2 -let $auto_increment_offset_node_2 = `SELECT @@global.auto_increment_offset`; ---connection node_3 -let $auto_increment_offset_node_3 = `SELECT @@global.auto_increment_offset`; +--let $node_1=node_1 +--let $node_2=node_2 +--let $node_3=node_3 +--source ../galera/include/auto_increment_offset_save.inc --connection node_1 CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; @@ -49,12 +47,7 @@ SELECT COUNT(*) = 2 FROM t1; DROP TABLE t1; # Restore original auto_increment_offset values. ---disable_query_log ---connection node_1 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_1; ---connection node_2 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_2; ---connection node_3 ---eval SET @@global.auto_increment_offset = $auto_increment_offset_node_3; ---enable_query_log +--source ../galera/include/auto_increment_offset_restore.inc + +--source include/galera_end.inc -- cgit v1.2.1 From 080ac47661871da7be425a73d5aa068806e9946c Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Thu, 25 Aug 2016 15:56:53 +0600 Subject: remove die() from BUILD/autorun.sh This patch removes die() function from the BUILD/autorun.sh. It was introduced in the c682570431 commit (Fix BUILD/autorun.sh to really bail out on error.). Last users of die() was removed in the 8664de22 commit (WL#5665: Removal of the autotools-based build system) and since it is not used anywhere. No functionality changes. Just cleanup. --- BUILD/autorun.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/BUILD/autorun.sh b/BUILD/autorun.sh index 1d7cf5561ad..39346270af9 100755 --- a/BUILD/autorun.sh +++ b/BUILD/autorun.sh @@ -19,8 +19,6 @@ # Create MySQL cmake configure wrapper -die() { echo "$@"; exit 1; } - # Use a configure script that will call CMake. path=`dirname $0` cp $path/cmake_configure.sh $path/../configure -- cgit v1.2.1 From dfa3046db48d8439cf3eb02ba6623fe2f6e44f91 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 25 Aug 2016 15:11:21 +0200 Subject: fix a test for windows --- mysql-test/r/named_pipe.result | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/r/named_pipe.result b/mysql-test/r/named_pipe.result index ddd48f0ba91..43fb44beece 100644 --- a/mysql-test/r/named_pipe.result +++ b/mysql-test/r/named_pipe.result @@ -2154,3 +2154,4 @@ Privat (Private Nutzung) Mobilfunk Warnings: Warning 1052 Column 'kundentyp' in group statement is ambiguous drop table t1; +FOUND /\[ERROR\] Create named pipe failed/ in second-mysqld.err -- cgit v1.2.1 From 2d65679384c36ae2e46b2f62538223c3d71fb00a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 25 Aug 2016 19:47:38 +0300 Subject: MDEV-10665: Json_writer produces extra members in output Fix an issue in Single_line_formatting_helper: flush_on_one_line() didn't clean up the buffered items which could cause them to be printed for the second time. This can't be ever observed by a user (see MDEV text for details). --- sql/my_json_writer.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sql/my_json_writer.cc b/sql/my_json_writer.cc index e97db210da7..d36fdd1192a 100644 --- a/sql/my_json_writer.cc +++ b/sql/my_json_writer.cc @@ -330,6 +330,8 @@ void Single_line_formatting_helper::flush_on_one_line() ptr++; } owner->output.append(']'); + /* We've printed out the contents of the buffer, mark it as empty */ + buf_ptr= buffer; } -- cgit v1.2.1 From 3575618237d543df8ae137fb640bf3c1e8259c8b Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Thu, 25 Aug 2016 21:28:26 -0400 Subject: Post merge fixes. --- mysql-test/suite/galera/disabled.def | 3 ++- mysql-test/suite/galera/r/galera_roles.result | 16 ++++++------- mysql-test/suite/galera/t/MW-259.test | 2 +- .../suite/galera/t/galera_rsu_wsrep_desync.test | 2 +- mysql-test/suite/sys_vars/r/sysvars_wsrep.result | 14 +++++------ .../suite/sys_vars/r/wsrep_desync_basic.result | 4 ---- .../sys_vars/r/wsrep_max_ws_size_basic.result | 27 ++++++++-------------- .../suite/sys_vars/t/wsrep_max_ws_size_basic.test | 15 +++++------- mysql-test/suite/wsrep/r/variables.result | 2 -- sql/sys_vars.cc | 4 ++-- sql/wsrep_mysqld.h | 3 +++ sql/wsrep_var.cc | 26 +++++++++++++++++---- sql/wsrep_var.h | 2 ++ 13 files changed, 63 insertions(+), 57 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 4aa15d27661..5f8d9c6ddff 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -28,4 +28,5 @@ galera_flush : mysql-wsrep/issues/229 galera_transaction_read_only : mysql-wsrep/issues/229 galera_gcs_fragment : Incorrect arguments to SET galera_flush_local : Fails sporadically -galera_binlog_stmt_autoinc : TODO: investigate \ No newline at end of file +galera_binlog_stmt_autoinc : TODO: investigate +galera_concurrent_ctas : Test times out, investigate diff --git a/mysql-test/suite/galera/r/galera_roles.result b/mysql-test/suite/galera/r/galera_roles.result index d8c13758797..6312250c18d 100644 --- a/mysql-test/suite/galera/r/galera_roles.result +++ b/mysql-test/suite/galera/r/galera_roles.result @@ -170,10 +170,10 @@ localhost foo role3 Y localhost root role1 Y localhost root role2 Y SELECT * FROM INFORMATION_SCHEMA.APPLICABLE_ROLES; -GRANTEE ROLE_NAME IS_GRANTABLE -role1 role4 YES -root@localhost role1 YES -root@localhost role2 YES +GRANTEE ROLE_NAME IS_GRANTABLE IS_DEFAULT +role1 role4 YES NULL +root@localhost role1 YES NO +root@localhost role2 YES NO # On node_2 SELECT * FROM mysql.roles_mapping; @@ -183,10 +183,10 @@ localhost foo role3 Y localhost root role1 Y localhost root role2 Y SELECT * FROM INFORMATION_SCHEMA.APPLICABLE_ROLES; -GRANTEE ROLE_NAME IS_GRANTABLE -role1 role4 YES -root@localhost role1 YES -root@localhost role2 YES +GRANTEE ROLE_NAME IS_GRANTABLE IS_DEFAULT +role1 role4 YES NULL +root@localhost role1 YES NO +root@localhost role2 YES NO DROP ROLE role1; DROP ROLE role2; DROP ROLE role3; diff --git a/mysql-test/suite/galera/t/MW-259.test b/mysql-test/suite/galera/t/MW-259.test index ff9a30deed3..7298285f6ff 100644 --- a/mysql-test/suite/galera/t/MW-259.test +++ b/mysql-test/suite/galera/t/MW-259.test @@ -24,7 +24,7 @@ SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue'; --connection node_1b --sleep 2 ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'exit open_tables()' and INFO = 'SET GLOBAL wsrep_desync=1' +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = 'SET GLOBAL wsrep_desync=1' --source include/wait_condition.inc SET DEBUG_SYNC= 'now SIGNAL continue'; diff --git a/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test b/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test index dc7ff11a9f5..882f846fe67 100644 --- a/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test +++ b/mysql-test/suite/galera/t/galera_rsu_wsrep_desync.test @@ -60,7 +60,7 @@ SET DEBUG_SYNC = 'alter_table_before_create_table_no_lock WAIT_FOR continue'; --connection node_1b --sleep 2 ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'exit open_tables()' and INFO = 'SET GLOBAL wsrep_desync=1' +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = 'SET GLOBAL wsrep_desync=1' --source include/wait_condition.inc SET DEBUG_SYNC= 'now SIGNAL continue'; diff --git a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result index 3d7ec08166d..36d04afb80d 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result +++ b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result @@ -241,13 +241,13 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME WSREP_MAX_WS_ROWS SESSION_VALUE NULL -GLOBAL_VALUE 131072 +GLOBAL_VALUE 0 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 131072 +DEFAULT_VALUE 0 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Max number of rows in write set -NUMERIC_MIN_VALUE 1 +NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 1048576 NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL @@ -255,14 +255,14 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME WSREP_MAX_WS_SIZE SESSION_VALUE NULL -GLOBAL_VALUE 1073741824 +GLOBAL_VALUE 2147483647 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 1073741824 +DEFAULT_VALUE 2147483647 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Max write set size (bytes) NUMERIC_MIN_VALUE 1024 -NUMERIC_MAX_VALUE 4294901759 +NUMERIC_MAX_VALUE 2147483647 NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO @@ -367,7 +367,7 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME WSREP_PATCH_VERSION SESSION_VALUE NULL -GLOBAL_VALUE wsrep_25.13 +GLOBAL_VALUE wsrep_25.16 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE NULL VARIABLE_SCOPE GLOBAL diff --git a/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result b/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result index e4d452c11b5..90925e71c32 100644 --- a/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result +++ b/mysql-test/suite/sys_vars/r/wsrep_desync_basic.result @@ -24,8 +24,6 @@ SELECT @@global.wsrep_desync; # valid values SET @@global.wsrep_desync='OFF'; -Warnings: -Warning 1231 'wsrep_desync' is already OFF. SELECT @@global.wsrep_desync; @@global.wsrep_desync 0 @@ -35,8 +33,6 @@ SELECT @@global.wsrep_desync; @@global.wsrep_desync 0 SET @@global.wsrep_desync=default; -Warnings: -Warning 1231 'wsrep_desync' is already OFF. SELECT @@global.wsrep_desync; @@global.wsrep_desync 0 diff --git a/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result b/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result index d7e72869be3..689da3e9cb0 100644 --- a/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result +++ b/mysql-test/suite/sys_vars/r/wsrep_max_ws_size_basic.result @@ -1,9 +1,6 @@ # # wsrep_max_ws_size # -# save the initial value -SET @wsrep_max_ws_size_global_saved = @@global.wsrep_max_ws_size; -SET @wsrep_provider_options_saved = @@global.wsrep_provider_options; # default SELECT @@global.wsrep_max_ws_size; @@global.wsrep_max_ws_size @@ -13,28 +10,29 @@ SELECT @@global.wsrep_max_ws_size; SELECT @@session.wsrep_max_ws_size; ERROR HY000: Variable 'wsrep_max_ws_size' is a GLOBAL variable SET @@global.wsrep_max_ws_size=1; -Warnings: -Warning 1292 Truncated incorrect wsrep_max_ws_size value: '1' +ERROR HY000: WSREP (galera) not started SELECT @@global.wsrep_max_ws_size; @@global.wsrep_max_ws_size -1024 +2147483647 # valid values SET @@global.wsrep_max_ws_size=1073741824; +ERROR HY000: WSREP (galera) not started SELECT @@global.wsrep_max_ws_size; @@global.wsrep_max_ws_size -1073741824 +2147483647 SET @@global.wsrep_max_ws_size=1073741825; +ERROR HY000: WSREP (galera) not started SELECT @@global.wsrep_max_ws_size; @@global.wsrep_max_ws_size -1073741825 +2147483647 SET @@global.wsrep_max_ws_size=0; -Warnings: -Warning 1292 Truncated incorrect wsrep_max_ws_size value: '0' +ERROR HY000: WSREP (galera) not started SELECT @@global.wsrep_max_ws_size; @@global.wsrep_max_ws_size -1024 +2147483647 SET @@global.wsrep_max_ws_size=default; +ERROR HY000: WSREP (galera) not started SELECT @global.wsrep_max_ws_size; @global.wsrep_max_ws_size NULL @@ -48,13 +46,8 @@ SELECT @global.wsrep_max_ws_size; @global.wsrep_max_ws_size NULL SET @@global.wsrep_max_ws_size=-1; -Warnings: -Warning 1292 Truncated incorrect wsrep_max_ws_size value: '-1' +ERROR HY000: WSREP (galera) not started SELECT @global.wsrep_max_ws_size; @global.wsrep_max_ws_size NULL - -# restore the initial value -SET @@global.wsrep_max_ws_size = @wsrep_max_ws_size_global_saved; -SET @@global.wsrep_provider_options = @wsrep_provider_options_saved; # End of test diff --git a/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test b/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test index 2e302015136..50506ed12df 100644 --- a/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test +++ b/mysql-test/suite/sys_vars/t/wsrep_max_ws_size_basic.test @@ -4,10 +4,6 @@ --echo # wsrep_max_ws_size --echo # ---echo # save the initial value -SET @wsrep_max_ws_size_global_saved = @@global.wsrep_max_ws_size; -SET @wsrep_provider_options_saved = @@global.wsrep_provider_options; - --echo # default SELECT @@global.wsrep_max_ws_size; @@ -15,17 +11,22 @@ SELECT @@global.wsrep_max_ws_size; --echo # scope --error ER_INCORRECT_GLOBAL_LOCAL_VAR SELECT @@session.wsrep_max_ws_size; +--error ER_WRONG_ARGUMENTS SET @@global.wsrep_max_ws_size=1; SELECT @@global.wsrep_max_ws_size; --echo --echo # valid values +--error ER_WRONG_ARGUMENTS SET @@global.wsrep_max_ws_size=1073741824; SELECT @@global.wsrep_max_ws_size; +--error ER_WRONG_ARGUMENTS SET @@global.wsrep_max_ws_size=1073741825; SELECT @@global.wsrep_max_ws_size; +--error ER_WRONG_ARGUMENTS SET @@global.wsrep_max_ws_size=0; SELECT @@global.wsrep_max_ws_size; +--error ER_WRONG_ARGUMENTS SET @@global.wsrep_max_ws_size=default; SELECT @global.wsrep_max_ws_size; @@ -36,12 +37,8 @@ SET @@global.wsrep_max_ws_size=NULL; --error ER_WRONG_TYPE_FOR_VAR SET @@global.wsrep_max_ws_size='junk'; SELECT @global.wsrep_max_ws_size; +--error ER_WRONG_ARGUMENTS SET @@global.wsrep_max_ws_size=-1; SELECT @global.wsrep_max_ws_size; ---echo ---echo # restore the initial value -SET @@global.wsrep_max_ws_size = @wsrep_max_ws_size_global_saved; -SET @@global.wsrep_provider_options = @wsrep_provider_options_saved; - --echo # End of test diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result index 928f1995072..62d7f62440f 100644 --- a/mysql-test/suite/wsrep/r/variables.result +++ b/mysql-test/suite/wsrep/r/variables.result @@ -36,7 +36,6 @@ wsrep_commit_oooe # wsrep_commit_oool # wsrep_commit_window # wsrep_connected # -wsrep_debug_sync_waiters # wsrep_flow_control_paused # wsrep_flow_control_paused_ns # wsrep_flow_control_recv # @@ -92,7 +91,6 @@ wsrep_commit_oooe # wsrep_commit_oool # wsrep_commit_window # wsrep_connected # -wsrep_debug_sync_waiters # wsrep_flow_control_paused # wsrep_flow_control_paused_ns # wsrep_flow_control_recv # diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 9ad70514852..306528c5c86 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -4865,8 +4865,8 @@ static Sys_var_ulong Sys_wsrep_max_ws_size ( "wsrep_max_ws_size", "Max write set size (bytes)", GLOBAL_VAR(wsrep_max_ws_size), CMD_LINE(REQUIRED_ARG), VALID_RANGE(1024, WSREP_MAX_WS_SIZE), DEFAULT(WSREP_MAX_WS_SIZE), - BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), - ON_UPDATE(wsrep_max_ws_size_update)); + BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, + ON_CHECK(wsrep_max_ws_size_check), ON_UPDATE(wsrep_max_ws_size_update)); static Sys_var_ulong Sys_wsrep_max_ws_rows ( "wsrep_max_ws_rows", "Max number of rows in write set", diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index 296ff26962d..04ccc1a7e45 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -170,6 +170,9 @@ extern void wsrep_prepend_PATH (const char* path); extern wsrep_seqno_t wsrep_locked_seqno; #define WSREP_ON \ + (global_system_variables.wsrep_on) + +#define WSREP_ON_NEW \ ((global_system_variables.wsrep_on) && \ wsrep_provider && \ strcmp(wsrep_provider, WSREP_NONE)) diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 56a799c97c1..30c5bf900ff 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -195,6 +195,8 @@ end: static bool refresh_provider_options() { + DBUG_ASSERT(wsrep); + WSREP_DEBUG("refresh_provider_options: %s", (wsrep_provider_options) ? wsrep_provider_options : "null"); char* opts= wsrep->options_get(wsrep); @@ -318,18 +320,18 @@ void wsrep_provider_init (const char* value) } bool wsrep_provider_options_check(sys_var *self, THD* thd, set_var* var) -{ - return 0; -} - -bool wsrep_provider_options_update(sys_var *self, THD* thd, enum_var_type type) { if (wsrep == NULL) { my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) not started", MYF(0)); return true; } + return false; +} +bool wsrep_provider_options_update(sys_var *self, THD* thd, enum_var_type type) +{ + DBUG_ASSERT(wsrep); wsrep_status_t ret= wsrep->options_set(wsrep, wsrep_provider_options); if (ret != WSREP_OK) { @@ -522,6 +524,12 @@ bool wsrep_slave_threads_update (sys_var *self, THD* thd, enum_var_type type) bool wsrep_desync_check (sys_var *self, THD* thd, set_var* var) { + if (wsrep == NULL) + { + my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) not started", MYF(0)); + return true; + } + bool new_wsrep_desync= (bool) var->save_result.ulonglong_value; if (wsrep_desync == new_wsrep_desync) { if (new_wsrep_desync) { @@ -559,6 +567,12 @@ bool wsrep_desync_check (sys_var *self, THD* thd, set_var* var) } bool wsrep_desync_update (sys_var *self, THD* thd, enum_var_type type) +{ + DBUG_ASSERT(wsrep); + return false; +} + +bool wsrep_max_ws_size_check(sys_var *self, THD* thd, set_var* var) { if (wsrep == NULL) { @@ -570,6 +584,8 @@ bool wsrep_desync_update (sys_var *self, THD* thd, enum_var_type type) bool wsrep_max_ws_size_update (sys_var *self, THD *thd, enum_var_type) { + DBUG_ASSERT(wsrep); + char max_ws_size_opt[128]; my_snprintf(max_ws_size_opt, sizeof(max_ws_size_opt), "repl.max_ws_size=%d", wsrep_max_ws_size); diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h index 66b9a736b38..ca77b5c0039 100644 --- a/sql/wsrep_var.h +++ b/sql/wsrep_var.h @@ -92,7 +92,9 @@ extern bool wsrep_slave_threads_update UPDATE_ARGS; extern bool wsrep_desync_check CHECK_ARGS; extern bool wsrep_desync_update UPDATE_ARGS; +extern bool wsrep_max_ws_size_check CHECK_ARGS; extern bool wsrep_max_ws_size_update UPDATE_ARGS; + #else /* WITH_WSREP */ #define WSREP_NONE -- cgit v1.2.1 From e7f54437839a2ffe740792ebce473498f27ba9f6 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Fri, 26 Aug 2016 16:49:46 +0600 Subject: Call profiling.restart() and profiling.reset() only if profiling is enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit in other case we will get: ../alex/dev/server/sql/sql_class.cc: In member function ‘void THD::free_connection()’: ../server/sql/sql_class.cc:1664:3: error: ‘profiling’ was not declared in this scope profiling.restart(); // Reset profiling ^~~~~~~~~ ../server/sql/sql_class.cc: In member function ‘void THD::reset_for_reuse()’: ../server/sql/sql_class.cc:1689:3: error: ‘profiling’ was not declared in this scope profiling.reset(); ^~~~~~~~~ errors. --- sql/sql_class.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 9710ea8bbe3..4ace2be3e28 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1661,7 +1661,9 @@ void THD::free_connection() /* close all prepared statements, to save memory */ stmt_map.reset(); free_connection_done= 1; +#if defined(ENABLED_PROFILING) profiling.restart(); // Reset profiling +#endif } /* @@ -1686,7 +1688,9 @@ void THD::reset_for_reuse() abort_on_warning= 0; free_connection_done= 0; m_command= COM_CONNECT; +#if defined(ENABLED_PROFILING) profiling.reset(); +#endif #ifdef SIGNAL_WITH_VIO_CLOSE active_vio = 0; #endif -- cgit v1.2.1 From 467217e66951defe62083dc10e5d205d7b94a9b7 Mon Sep 17 00:00:00 2001 From: Nirbhay Choubey Date: Fri, 26 Aug 2016 12:45:48 -0400 Subject: MDEV-9510: Print extra info to error log Activated by enabling wsrep_debug. --- sql/log.cc | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/sql/log.cc b/sql/log.cc index b77a6b32016..be24bcd718a 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3138,6 +3138,22 @@ const char *MYSQL_LOG::generate_name(const char *log_name, } +/* + Print some additional information about addition/removal of + XID list entries. + TODO: Remove once MDEV-9510 is fixed. +*/ +#ifdef WITH_WSREP +#define WSREP_XID_LIST_ENTRY(X, Y) \ + if (wsrep_debug) \ + { \ + char buf[FN_REFLEN]; \ + strmake(buf, Y->binlog_name, Y->binlog_name_len); \ + WSREP_DEBUG(X, buf, Y->binlog_id); \ + } +#else +#define WSREP_XID_LIST_ENTRY(X, Y) do { } while(0) +#endif MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period) :reset_master_pending(0), mark_xid_done_waiting(0), @@ -3200,6 +3216,8 @@ void MYSQL_BIN_LOG::cleanup() */ DBUG_ASSERT(b->xid_count == 0); DBUG_ASSERT(!binlog_xid_count_list.head()); + WSREP_XID_LIST_ENTRY("MYSQL_BIN_LOG::cleanup(): Removing xid_list_entry " + "for %s (%lu)", b); my_free(b); } @@ -3691,9 +3709,13 @@ bool MYSQL_BIN_LOG::open(const char *log_name, /* Remove any initial entries with no pending XIDs. */ while ((b= binlog_xid_count_list.head()) && b->xid_count == 0) { + WSREP_XID_LIST_ENTRY("MYSQL_BIN_LOG::open(): Removing xid_list_entry for " + "%s (%lu)", b); my_free(binlog_xid_count_list.get()); } mysql_cond_broadcast(&COND_xid_list); + WSREP_XID_LIST_ENTRY("MYSQL_BIN_LOG::open(): Adding new xid_list_entry for " + "%s (%lu)", new_xid_list_entry); binlog_xid_count_list.push_back(new_xid_list_entry); mysql_mutex_unlock(&LOCK_xid_list); @@ -4228,6 +4250,8 @@ err: if (b->binlog_id == current_binlog_id) break; DBUG_ASSERT(b->xid_count == 0); + WSREP_XID_LIST_ENTRY("MYSQL_BIN_LOG::reset_logs(): Removing " + "xid_list_entry for %s (%lu)", b); my_free(binlog_xid_count_list.get()); } mysql_cond_broadcast(&COND_xid_list); @@ -9471,6 +9495,8 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint) DBUG_ASSERT(b); if (b->binlog_id == current || b->xid_count > 0) break; + WSREP_XID_LIST_ENTRY("TC_LOG_BINLOG::mark_xid_done(): Removing " + "xid_list_entry for %s (%lu)", b); my_free(binlog_xid_count_list.get()); } -- cgit v1.2.1 From c8f85bf263a81a625089507d747236852ec87024 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Fri, 26 Aug 2016 16:09:22 -0700 Subject: mdev-9864: cleanup, re-factoring. Added comments. --- sql/item_subselect.h | 5 + sql/share/errmsg-utf8.txt | 2 +- sql/sql_class.h | 4 +- sql/sql_cte.cc | 573 +++++++++++++++++++++++++++++++++++----------- sql/sql_cte.h | 114 +++++---- sql/sql_lex.h | 4 +- sql/sql_parse.cc | 2 +- sql/sql_prepare.cc | 2 +- sql/sql_union.cc | 39 ++-- sql/sql_view.cc | 4 +- sql/table.h | 5 +- 11 files changed, 547 insertions(+), 207 deletions(-) diff --git a/sql/item_subselect.h b/sql/item_subselect.h index c1e68247220..e72f75726ed 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -128,6 +128,11 @@ public: /* TRUE <=> The underlying SELECT is correlated w.r.t some ancestor select */ bool is_correlated; + /* + TRUE <=> the subquery contains a recursive reference in the FROM list + of one of its selects. In this case some of subquery optimization + strategies cannot be applied for the subquery; + */ bool with_recursive_reference; enum subs_type {UNKNOWN_SUBS, SINGLEROW_SUBS, diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 6c921789eca..e1db12d2544 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7157,7 +7157,7 @@ ER_RECURSIVE_WITHOUT_ANCHORS ER_UNACCEPTABLE_MUTUAL_RECURSION eng "Unacceptable mutual recursion with anchored table '%s'" ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED - eng "Reference to recursive WITH table '%s' in materiazed derived" + eng "Reference to recursive WITH table '%s' in materialized derived" ER_NOT_STANDARDS_COMPLIANT_RECURSIVE eng "Restrictions imposed on recursive definitions are violated for table '%s'" # diff --git a/sql/sql_class.h b/sql/sql_class.h index 04a80166ad1..be263a6b902 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -4682,10 +4682,12 @@ class select_union_recursive :public select_union { public: TABLE *incr_table; + TABLE *first_rec_table_to_update; List
rec_tables; select_union_recursive(THD *thd_arg): - select_union(thd_arg), incr_table(0) {}; + select_union(thd_arg), + incr_table(0), first_rec_table_to_update(0) {}; int send_data(List &items); bool create_result_table(THD *thd, List *column_types, diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index dd877b5598a..82958333f65 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -14,21 +14,25 @@ with_clauses_list Pointer to the first clause in the list @details - The procedure just calls the method With_clause::check_dependencies - for each member of the given list. + For each with clause from the given list the procedure finds all + dependencies between tables defined in the clause by calling the + method With_clause::checked_dependencies. + Additionally, based on the info collected by this method the procedure + finds anchors for each recursive definition and moves them at the head + of the definition. @retval false on success true on failure */ -bool check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list) +bool check_dependencies_in_with_clauses(With_clause *with_clauses_list) { for (With_clause *with_clause= with_clauses_list; with_clause; with_clause= with_clause->next_with_clause) { - if (with_clause->check_dependencies(thd)) + if (with_clause->check_dependencies()) return true; if (with_clause->check_anchors()) return true; @@ -43,43 +47,38 @@ bool check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list Check dependencies between tables defined in this with clause @details - The method performs the following actions for this with clause: - - 1. Test for definitions of the tables with the same name. - 2. For each table T defined in this with clause look for tables - from the same with clause that are used in the query that - specifies T and set the dependencies of T on these tables - in dependency_map. - 3. Build the transitive closure of the above direct dependencies - to find out all recursive definitions. - 4. If this with clause is not specified as recursive then - for each with table T defined in this with clause check whether - it is used in any definition that follows the definition of T. + The method performs the following for this with clause: + - checks that there are no definitions of the tables with the same name + - for each table T defined in this with clause looks for the tables + from the same with clause that are used in the query that specifies T + and set the dependencies of T on these tables in a bitmap. + - builds the transitive closure of the above direct dependencies + to find out all recursive definitions. @retval true if an error is reported false otherwise */ -bool With_clause::check_dependencies(THD *thd) +bool With_clause::check_dependencies() { if (dependencies_are_checked) return false; /* Look for for definitions with the same query name. When found report an error and return true immediately. - For each table T defined in this with clause look for all other tables from - the same with with clause that are used in the specification of T. + For each table T defined in this with clause look for all other tables + from the same with clause that are used in the specification of T. For each such table set the dependency bit in the dependency map of - with element for T. + the with element for T. */ - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) { - for (With_element *elem= first_elem; + for (With_element *elem= with_list.first; elem != with_elem; - elem= elem->next_elem) + elem= elem->next) { if (my_strcasecmp(system_charset_info, with_elem->query_name->str, elem->query_name->str) == 0) @@ -88,20 +87,20 @@ bool With_clause::check_dependencies(THD *thd) return true; } } - if (with_elem->check_dependencies_in_spec(thd)) + if (with_elem->check_dependencies_in_spec()) return true; } /* Build the transitive closure of the direct dependencies found above */ - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) with_elem->derived_dep_map= with_elem->base_dep_map; - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) { table_map with_elem_map= with_elem->get_elem_map(); - for (With_element *elem= first_elem; elem != NULL; elem= elem->next_elem) + for (With_element *elem= with_list.first; elem; elem= elem->next) { if (elem->derived_dep_map & with_elem_map) elem->derived_dep_map |= with_elem->derived_dep_map; @@ -109,12 +108,12 @@ bool With_clause::check_dependencies(THD *thd) } /* - Mark those elements where tables are defined with direct or indirect recursion. - Report an error when recursion (direct or indirect) is used to define a table. + Mark those elements where tables are defined with direct or indirect + make recursion. */ - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) { if (with_elem->derived_dep_map & with_elem->get_elem_map()) with_elem->is_recursive= true; @@ -125,13 +124,35 @@ bool With_clause::check_dependencies(THD *thd) } +/* + This structure describes an element of the stack of embedded units. + The stack is used when looking for a definition of a table in + with clauses. The definition can be found only in the scopes + of the with clauses attached to the units from the stack. + The with clauses are looked through from starting from the top + element of the stack. +*/ + struct st_unit_ctxt_elem { - st_unit_ctxt_elem *prev; - st_select_lex_unit *unit; + st_unit_ctxt_elem *prev; // the previous element of the stack + st_select_lex_unit *unit; }; -bool With_element::check_dependencies_in_spec(THD *thd) + +/** + @brief + Find the dependencies of this element on its siblings in its specification + + @details + For each table reference ref(T) from the FROM list of every select sl + immediately contained in the specification query of this element this + method searches for the definition of T in the the with clause which + this element belongs to. If such definition is found then the dependency + on it is set in sl->with_dep and in this->base_dep_map. +*/ + +bool With_element::check_dependencies_in_spec() { for (st_select_lex *sl= spec->first_select(); sl; sl= sl->next_select()) { @@ -144,6 +165,62 @@ bool With_element::check_dependencies_in_spec(THD *thd) } +/** + @brief + Search for the definition of a table among the elements of this with clause + + @param table The reference to the table that is looked for + @param barrier The barrier with element for the search + + @details + The function looks through the elements of this with clause trying to find + the definition of the given table. When it encounters the element with + the same query name as the table's name it returns this element. If no + such definitions are found the function returns NULL. + + @retval + found with element if the search succeeded + NULL - otherwise +*/ + +With_element *With_clause::find_table_def(TABLE_LIST *table, + With_element *barrier) +{ + for (With_element *with_elem= with_list.first; + with_elem != barrier; + with_elem= with_elem->next) + { + if (my_strcasecmp(system_charset_info, with_elem->query_name->str, + table->table_name) == 0) + { + table->set_derived(); + return with_elem; + } + } + return NULL; +} + + +/** + @brief + Search for the definition of a table in with clauses + + @param tbl The reference to the table that is looked for + @param ctxt The context describing in what with clauses of the upper + levels the table has to be searched for. + + @details + The function looks for the definition of the table tbl in the definitions + of the with clauses from the upper levels specified by the parameter ctxt. + When it encounters the element with the same query name as the table's name + it returns this element. If no such definitions are found the function + returns NULL. + + @retval + found with element if the search succeeded + NULL - otherwise +*/ + With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl, st_unit_ctxt_elem *ctxt) { @@ -159,12 +236,41 @@ With_element *find_table_def_in_with_clauses(TABLE_LIST *tbl, return tbl->with; barrier= NULL; if (unit->with_element && !unit->with_element->get_owner()->with_recursive) + { + /* + This unit is the specification if the with element unit->with_element. + The with element belongs to a with clause without the specifier RECURSIVE. + So when searching for the matching definition of tbl this with clause must + be looked up to this with element + */ barrier= unit->with_element; + } } return NULL; } +/** + @brief + Find the dependencies of this element on its siblings in a select + + @param sl The select where to look for the dependencies + @param ctxt The structure specifying the scope of the definitions + of the with elements of the upper levels + @param in_sbq if true mark dependencies found in subqueries in + this->sq_dep_map + @param dep_map IN/OUT The bit where to mark the found dependencies + + @details + For each table reference ref(T) from the FROM list of the select sl + the method searches in with clauses for the definition of the table T. + If the found definition belongs to the same with clause as this with + element then the method set dependency on T in the in/out parameter + dep_map, add if required - in this->sq_dep_map. + The parameter ctxt describes the proper context for the search + of the definition of T. +*/ + void With_element::check_dependencies_in_select(st_select_lex *sl, st_unit_ctxt_elem *ctxt, bool in_subq, @@ -176,36 +282,62 @@ void With_element::check_dependencies_in_select(st_select_lex *sl, if (tbl->derived || tbl->nested_join) continue; tbl->with_internal_reference_map= 0; + /* + If there is a with clause attached to the unit containing sl + look first for the definition of tbl in this with clause. + If such definition is not found there look in the with + clauses of the upper levels. + If the definition of tbl is found somewhere in with clauses + then tbl->with is set to point to this definition + */ if (with_clause && !tbl->with) tbl->with= with_clause->find_table_def(tbl, NULL); if (!tbl->with) tbl->with= find_table_def_in_with_clauses(tbl, ctxt); + if (tbl->with && tbl->with->owner== this->owner) - { + { + /* + The found definition T of tbl belongs to the same + with clause as this with element. In this case: + - set the dependence on T in the bitmap dep_map + - set tbl->with_internal_reference_map with + the bitmap for this definition + - set the dependence on T in the bitmap this->sq_dep_map + if needed + */ *dep_map|= tbl->with->get_elem_map(); tbl->with_internal_reference_map= get_elem_map(); if (in_subq) sq_dep_map|= tbl->with->get_elem_map(); } } + /* Now look for the dependencies in the subqueries of sl */ st_select_lex_unit *inner_unit= sl->first_inner_unit(); for (; inner_unit; inner_unit= inner_unit->next_unit()) check_dependencies_in_unit(inner_unit, ctxt, in_subq, dep_map); } - /** +/** @brief - Check dependencies on the sibling with tables used in the given unit - - @param unit The unit where the siblings are to be searched for + Find the dependencies of this element on its siblings in a unit + + @param unit The unit where to look for the dependencies + @param ctxt The structure specifying the scope of the definitions + of the with elements of the upper levels + @param in_sbq if true mark dependencies found in subqueries in + this->sq_dep_map + @param dep_map IN/OUT The bit where to mark the found dependencies @details - The method recursively looks through all from lists encountered - the given unit. If it finds a reference to a table that is - defined in the same with clause to which this element belongs - the method set the bit of dependency on this table in the - dependency_map of this element. + This method searches in the unit 'unit' for the the references in FROM + lists of all selects contained in this unit and in the with clause + attached to this unit that refer to definitions of tables from the + same with clause as this element. + If such definitions are found then the dependencies on them are + set in the in/out parameter dep_map and optionally in this->sq_dep_map. + The parameter ctxt describes the proper context for the search. */ void With_element::check_dependencies_in_unit(st_select_lex_unit *unit, @@ -224,39 +356,83 @@ void With_element::check_dependencies_in_unit(st_select_lex_unit *unit, } } + +/** + @brief + Find the dependencies of this element on its siblings in a with clause + + @param witt_clause The with clause where to look for the dependencies + @param ctxt The structure specifying the scope of the definitions + of the with elements of the upper levels + @param in_sbq if true mark dependencies found in subqueries in + this->sq_dep_map + @param dep_map IN/OUT The bit where to mark the found dependencies + + @details + This method searches in the with_clause for the the references in FROM + lists of all selects contained in the specifications of the with elements + from this with_clause that refer to definitions of tables from the + same with clause as this element. + If such definitions are found then the dependencies on them are + set in the in/out parameter dep_map and optionally in this->sq_dep_map. + The parameter ctxt describes the proper context for the search. +*/ + void With_element::check_dependencies_in_with_clause(With_clause *with_clause, st_unit_ctxt_elem *ctxt, bool in_subq, table_map *dep_map) { - for (With_element *with_elem= with_clause->first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_clause->with_list.first; + with_elem; + with_elem= with_elem->next) { check_dependencies_in_unit(with_elem->spec, ctxt, in_subq, dep_map); } } +/** + @brief + Find mutually recursive with elements and check that they have ancors + + @details + This method performs the following: + - for each recursive with element finds all mutually recursive with it + - links each group of mutually recursive with elements into a ring chain + - checks that every group of mutually recursive with elements contains + at least one anchor + - checks that after removing any with element with anchor the remaining + with elements mutually recursive with the removed one are not recursive + anymore + + @retval + true if an error is reported + false otherwise +*/ + bool With_clause::check_anchors() { - /* Find mutually recursive with elements */ - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) { if (!with_elem->is_recursive) continue; + /* + It with_elem is recursive with element find all elements mutually recursive + with it (any recursive element is mutually recursive with itself). Mark all + these elements in the bitmap->mutually_recursive. Also link all these + elements into a ring chain. + */ if (!with_elem->next_mutually_recursive) { With_element *last_mutually_recursive= with_elem; table_map with_elem_dep= with_elem->derived_dep_map; table_map with_elem_map= with_elem->get_elem_map(); - for (With_element *elem= with_elem; - elem != NULL; - elem= elem->next_elem) + for (With_element *elem= with_elem; elem; elem= elem->next) { if (!elem->is_recursive) continue; @@ -277,11 +453,16 @@ bool With_clause::check_anchors() elem->mutually_recursive= with_elem->mutually_recursive; } + /* + For each select from the specification of 'with_elem' check whether + it is an anchor i.e. does not depend on any with elements mutually + recursive with 'with_elem". + */ for (st_select_lex *sl= with_elem->spec->first_select(); sl; sl= sl->next_select()) { - if (!(with_elem->mutually_recursive & sl->with_dep)) + if (with_elem->is_anchor(sl)) { with_elem->with_anchor= true; break; @@ -289,15 +470,25 @@ bool With_clause::check_anchors() } } - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + /* + Check that for any group of mutually recursive with elements + - there is at least one anchor + - after removing any with element with anchor the remaining with elements + mutually recursive with the removed one are not recursive anymore + */ + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) { if (!with_elem->is_recursive) continue; if (!with_elem->with_anchor) { + /* + Check that the other with elements mutually recursive with 'with_elem' + contain at least one anchor. + */ With_element *elem= with_elem; while ((elem= elem->get_next_mutually_recursive()) != with_elem) { @@ -313,7 +504,13 @@ bool With_clause::check_anchors() } else { + /* 'with_elem' is a with element with an anchor */ With_element *elem= with_elem; + /* + For the other with elements mutually recursive with 'with_elem' + set dependency bits between those elements in the field work_dep_map + and build transitive closure of these dependencies + */ while ((elem= elem->get_next_mutually_recursive()) != with_elem) elem->work_dep_map= elem->base_dep_map & elem->mutually_recursive; elem= with_elem; @@ -327,6 +524,7 @@ bool With_clause::check_anchors() el->work_dep_map|= elem->work_dep_map; } } + /* If the transitive closure displays any cycle report an arror */ elem= with_elem; while ((elem= elem->get_next_mutually_recursive()) != with_elem) { @@ -346,36 +544,53 @@ bool With_clause::check_anchors() /** @brief - Search for the definition of a table among the elements of this with clause + Move anchors at the beginning of the specifications for with elements - @param table The reference to the table that is looked for - @details - The function looks through the elements of this with clause trying to find - the definition of the given table. When it encounters the element with - the same query name as the table's name it returns this element. If no - such definitions are found the function returns NULL. + This method moves anchors at the beginning of the specifications for + all recursive with elements. +*/ - @retval - found with element if the search succeeded - NULL - otherwise -*/ +void With_clause::move_anchors_ahead() +{ + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) + { + if (with_elem->is_recursive) + with_elem->move_anchors_ahead(); + } +} + -With_element *With_clause::find_table_def(TABLE_LIST *table, - With_element *barrier) +/** + @brief + Move anchors at the beginning of the specification of this with element + + @details + If the specification of this with element contains anchors the method + moves them at the very beginning of the specification. +*/ + +void With_element::move_anchors_ahead() { - for (With_element *with_elem= first_elem; - with_elem != barrier; - with_elem= with_elem->next_elem) + st_select_lex *next_sl; + st_select_lex *new_pos= spec->first_select(); + st_select_lex *last_sl; + new_pos->linkage= UNION_TYPE; + for (st_select_lex *sl= new_pos; sl; sl= next_sl) { - if (my_strcasecmp(system_charset_info, with_elem->query_name->str, - table->table_name) == 0) + next_sl= sl->next_select(); + if (is_anchor(sl)) { - table->set_derived(); - return with_elem; + sl->move_node(new_pos); + new_pos= sl->next_select(); } + last_sl= sl; } - return NULL; + if (spec->union_distinct) + spec->union_distinct= last_sl; + first_recursive= new_pos; } @@ -397,9 +612,9 @@ With_element *With_clause::find_table_def(TABLE_LIST *table, bool With_clause::prepare_unreferenced_elements(THD *thd) { - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) { if (!with_elem->is_referenced() && with_elem->prepare_unreferenced(thd)) return true; @@ -418,9 +633,9 @@ bool With_clause::prepare_unreferenced_elements(THD *thd) @param spec_end The end of the specification in the input string @details - The method creates for a string copy of the specification used in this element. - The method is called when the element is parsed. The copy may be used to - create clones of the specification whenever they are needed. + The method creates for a string copy of the specification used in this + element. The method is called when the element is parsed. The copy may be + used to create clones of the specification whenever they are needed. @retval false on success @@ -646,41 +861,6 @@ bool With_element::prepare_unreferenced(THD *thd) } - -void With_clause::move_anchors_ahead() -{ - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) - { - if (with_elem->is_recursive) - with_elem->move_anchors_ahead(); - } -} - - -void With_element::move_anchors_ahead() -{ - st_select_lex *next_sl; - st_select_lex *new_pos= spec->first_select(); - st_select_lex *last_sl; - new_pos->linkage= UNION_TYPE; - for (st_select_lex *sl= new_pos; sl; sl= next_sl) - { - next_sl= sl->next_select(); - if (is_anchor(sl)) - { - sl->move_node(new_pos); - new_pos= sl->next_select(); - } - last_sl= sl; - } - if (spec->union_distinct) - spec->union_distinct= last_sl; - first_recursive= new_pos; -} - - bool With_element::is_anchor(st_select_lex *sel) { return !(mutually_recursive & sel->with_dep); @@ -694,10 +874,10 @@ bool With_element::is_anchor(st_select_lex *sel) @param table reference to the table whose definition is searched for @details - The method looks for the definition the table whose reference is occurred + The method looks for the definition of the table whose reference is occurred in the FROM list of this select node. First it searches for it in the with clause attached to the unit this select node belongs to. If such a - definition is not found there the embedding units are looked through. + definition is not found then the embedding units are looked through. @retval pointer to the found definition if the search has been successful @@ -770,6 +950,12 @@ bool TABLE_LIST::is_recursive_with_table() } +/* + A reference to a with table T is recursive if it occurs somewhere + in the query specifying T or in the query specifying one of the tables + mutually recursive with T. +*/ + bool TABLE_LIST::is_with_table_recursive_reference() { return (with_internal_reference_map && @@ -777,12 +963,67 @@ bool TABLE_LIST::is_with_table_recursive_reference() } +/* + Specifications of with tables with recursive table references + in non-mergeable derived tables are not allowed in this + implementation. +*/ + + +/* + We say that the specification of a with table T is restricted + if all below is true. + 1. Any immediate select of the specification contains at most one + recursive table reference taking into account table references + from mergeable derived tables. + 2. Any recursive table reference is not an inner operand of an + outer join operation used in an immediate select of the + specification. + 3. Any immediate select from the specification of T does not + contain aggregate functions. + 4. The specification of T does not contain recursive table references. + + If the specification of T is not restricted we call the corresponding + with element unrestricted. + + The SQL standards allows only with elements with restricted specification. + By default we comply with the standards here. + + Yet we allow unrestricted specification if the status variable + 'standards_compliant_cte' set to 'off'(0). +*/ + + +/** + @brief + Check if this select makes the including specification unrestricted + + @param + only_standards_compliant true if the system variable + 'standards_compliant_cte' is set to 'on' + @details + This method checks whether the conditions 1-4 (see the comment above) + are satisfied for this select. If not then mark this element as + unrestricted and report an error if 'only_standards_compliant' is true. + + @retval + true if an error is reported + false otherwise +*/ bool st_select_lex::check_unrestricted_recursive(bool only_standards_compliant) { With_element *with_elem= get_with_element(); if (!with_elem ||!with_elem->is_recursive) + { + /* + If this select is not from the specifiocation of a with elememt or + if this not a recursive with element then there is nothing to check. + */ return false; + } + + /* Check conditions 1-2 for restricted specification*/ table_map unrestricted= 0; table_map encountered= 0; if (with_elem->check_unrestricted_recursive(this, @@ -790,10 +1031,15 @@ bool st_select_lex::check_unrestricted_recursive(bool only_standards_compliant) encountered)) return true; with_elem->get_owner()->add_unrestricted(unrestricted); + + + /* Check conditions 3-4 for restricted specification*/ if (with_sum_func || (with_elem->contains_sq_with_recursive_reference())) with_elem->get_owner()->add_unrestricted( with_elem->get_mutually_recursive()); + + /* Report an error on unrestricted specification if this is required */ if (only_standards_compliant && with_elem->is_unrestricted()) { my_error(ER_NOT_STANDARDS_COMPLIANT_RECURSIVE, @@ -805,10 +1051,30 @@ bool st_select_lex::check_unrestricted_recursive(bool only_standards_compliant) } +/** + @brief + Check if a select from the spec of this with element is partially restricted + + @param + sel select from the specification of this element where to check + whether conditions 1-2 are satisfied + unrestricted IN/OUT bitmap where to mark unrestricted specs + encountered IN/OUT bitmap where to mark encountered recursive references + @details + This method checks whether the conditions 1-2 (see the comment above) + are satisfied for the select sel. + This method is called recursively for derived tables. + + @retval + true if an error is reported + false otherwise +*/ + bool With_element::check_unrestricted_recursive(st_select_lex *sel, table_map &unrestricted, table_map &encountered) { + /* Check conditions 1-for restricted specification*/ List_iterator ti(sel->leaf_tables); TABLE_LIST *tbl; while ((tbl= ti++)) @@ -843,9 +1109,9 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, encountered|= with_elem->get_elem_map(); } } - for (With_element *with_elem= sel->get_with_element()->owner->first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= sel->get_with_element()->owner->with_list.first; + with_elem; + with_elem= with_elem->next) { if (!with_elem->is_recursive && (unrestricted & with_elem->get_elem_map())) continue; @@ -870,6 +1136,9 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, } } } + + + /* Check conditions 2 for restricted specification*/ ti.rewind(); while ((tbl= ti++)) { @@ -886,7 +1155,25 @@ bool With_element::check_unrestricted_recursive(st_select_lex *sel, } -void st_select_lex::check_subqueries_with_recursive_references() +/** + @brief + Check subqueries with recursive table references from FROM list of this select + + @details + For each recursive table reference from the FROM list of this select + this method checks: + - whether this reference is within a materialized derived table and + if so it report an error + - whether this reference is within a subquery and if so it set a flag + in this subquery that disallows some optimization strategies for + this subquery. + + @retval + true if an error is reported + false otherwise +*/ + +bool st_select_lex::check_subqueries_with_recursive_references() { st_select_lex_unit *sl_master= master_unit(); List_iterator ti(leaf_tables); @@ -895,15 +1182,27 @@ void st_select_lex::check_subqueries_with_recursive_references() { if (!(tbl->is_with_table_recursive_reference() && sl_master->item)) continue; + With_element *with_elem= tbl->with; + bool check_embedding_materialized_derived= true; for (st_select_lex *sl= this; sl; sl= sl_master->outer_select()) { sl_master= sl->master_unit(); + if (with_elem->get_owner() == sl_master->with_clause) + check_embedding_materialized_derived= false; + if (check_embedding_materialized_derived && !sl_master->with_element && + sl_master->derived && sl_master->derived->is_materialized_derived()) + { + my_error(ER_REF_TO_RECURSIVE_WITH_TABLE_IN_DERIVED, + MYF(0), with_elem->query_name->str); + return true; + } if (!sl_master->item) continue; Item_subselect *subq= (Item_subselect *) sl_master->item; subq->with_recursive_reference= true; } } + return false; } @@ -924,12 +1223,12 @@ void With_clause::print(String *str, enum_query_type query_type) str->append(STRING_WITH_LEN("with ")); if (with_recursive) str->append(STRING_WITH_LEN("recursive ")); - for (With_element *with_elem= first_elem; - with_elem != NULL; - with_elem= with_elem->next_elem) + for (With_element *with_elem= with_list.first; + with_elem; + with_elem= with_elem->next) { with_elem->print(str, query_type); - if (with_elem != first_elem) + if (with_elem != with_list.first) str->append(", "); } } diff --git a/sql/sql_cte.h b/sql/sql_cte.h index dfe673dcce9..20164174214 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -21,7 +21,7 @@ class With_element : public Sql_alloc { private: With_clause *owner; // with clause this object belongs to - With_element *next_elem; // next element in the with clause + With_element *next; // next element in the with clause uint number; // number of the element in the with clause (starting from 0) table_map elem_map; // The map where with only one 1 set in this->number /* @@ -35,11 +35,23 @@ private: The map derived_dep_map has 1 in i-th position if this with element depends directly or indirectly from the i-th with element. */ - table_map derived_dep_map; + table_map derived_dep_map; + /* + The map sq_dep_map has 1 in i-th position if there is a reference to this + with element somewhere in subqueries of the specifications of the tables + defined in the with clause containing this element; + */ table_map sq_dep_map; table_map work_dep_map; // dependency map used for work /* Dependency map of with elements mutually recursive with this with element */ - table_map mutually_recursive; + table_map mutually_recursive; + /* + The next with element from the circular chain of the with elements + mutually recursive with this with element. + (If This element is simply recursive than next_mutually_recursive contains + the pointer to itself. If it's not recursive than next_mutually_recursive + is set to NULL.) + */ With_element *next_mutually_recursive; /* Total number of references to this element in the FROM lists of @@ -56,8 +68,6 @@ private: /* Return the map where 1 is set only in the position for this element */ table_map get_elem_map() { return 1 << number; } - TABLE *table; - public: /* The name of the table introduced by this with elememt. The name @@ -79,34 +89,48 @@ public: */ bool is_recursive; + /* + Any non-recursive select in the specification of a recursive + with element is a called anchor. In the case mutually recursive + elements the specification of some them may be without any anchor. + Yet at least one of them must contain an anchor. + All anchors of any recursivespecification are moved ahead before + the prepare stage. + */ + /* Set to true if this is a recursive element with an anchor */ bool with_anchor; - + /* + Set to the first recursive select of the unit specifying the element + after all anchor have been moved to the head of the unit. + */ st_select_lex *first_recursive; - /* The number of the last performed iteration for recursive table */ + /* + The number of the last performed iteration for recursive table + (the number of the initial non-recursive step is 0, the number + of the first iteration is 1). + */ uint level; + /* + The pointer to the object used to materialize this with element + if it's recursive. This object is built at the end of prepare + stage and is used at the execution stage. + */ select_union_recursive *rec_result; - TABLE *result_table; - - TABLE *first_rec_table_to_update; - - With_element(LEX_STRING *name, List list, st_select_lex_unit *unit) - : next_elem(NULL), base_dep_map(0), derived_dep_map(0), + : next(NULL), base_dep_map(0), derived_dep_map(0), sq_dep_map(0), work_dep_map(0), mutually_recursive(0), - next_mutually_recursive(NULL), - references(0), table(NULL), + next_mutually_recursive(NULL), references(0), query_name(name), column_list(list), spec(unit), is_recursive(false), with_anchor(false), - level(0), rec_result(NULL), result_table(NULL), - first_rec_table_to_update(NULL) + level(0), rec_result(NULL) {} - bool check_dependencies_in_spec(THD *thd); + bool check_dependencies_in_spec(); void check_dependencies_in_select(st_select_lex *sl, st_unit_ctxt_elem *ctxt, bool in_subq, table_map *dep_map); @@ -155,10 +179,6 @@ public: With_element *get_next_mutually_recursive() { return next_mutually_recursive; } - void set_table(TABLE *tab) { table= tab; } - - TABLE *get_table() { return table; } - bool is_anchor(st_select_lex *sel); void move_anchors_ahead(); @@ -173,7 +193,7 @@ public: void mark_as_cleaned(); - void reset_for_exec(); + void reset_recursive_for_exec(); void cleanup_stabilized(); @@ -183,8 +203,6 @@ public: bool all_are_stabilized(); - void set_result_table(TABLE *tab) { result_table= tab; } - bool instantiate_tmp_tables(); void prepare_for_next_iteration(); @@ -206,9 +224,9 @@ class With_clause : public Sql_alloc { private: st_select_lex_unit *owner; // the unit this with clause attached to - With_element *first_elem; // the first definition in this with clause - With_element **last_next; // here is set the link for the next added element - uint elements; // number of the elements/defintions in this with clauses + + /* The list of all with elements from this with clause */ + SQL_I_List with_list; /* The with clause immediately containing this with clause if there is any, otherwise NULL. Now used only at parsing. @@ -222,9 +240,22 @@ private: /* Set to true if dependencies between with elements have been checked */ bool dependencies_are_checked; + /* + The bitmap of all recursive with elements whose specifications + are not complied with restrictions imposed by the SQL standards + on recursive specifications. + */ table_map unrestricted; + /* + The bitmap of all recursive with elements whose anchors + has been already prepared. + */ table_map with_prepared_anchor; table_map cleaned; + /* + The bitmap of all recursive with elements that + has been already materialized + */ table_map stabilized; public: @@ -232,23 +263,20 @@ public: bool with_recursive; With_clause(bool recursive_fl, With_clause *emb_with_clause) - : owner(NULL), first_elem(NULL), elements(0), + : owner(NULL), embedding_with_clause(emb_with_clause), next_with_clause(NULL), - dependencies_are_checked(false), - unrestricted(0), with_prepared_anchor(0), cleaned(0), - stabilized(0), + dependencies_are_checked(false), unrestricted(0), + with_prepared_anchor(0), cleaned(0), stabilized(0), with_recursive(recursive_fl) - { last_next= &first_elem; } + { } /* Add a new element to the current with clause */ bool add_with_element(With_element *elem) { elem->owner= this; - elem->number= elements; + elem->number= with_list.elements; elem->spec->with_element= elem; - *last_next= elem; - last_next= &elem->next_elem; - elements++; + with_list.link_in_list(elem, &elem->next); return false; } @@ -263,7 +291,7 @@ public: With_clause *pop() { return embedding_with_clause; } - bool check_dependencies(THD *thd); + bool check_dependencies(); bool check_anchors(); @@ -283,7 +311,7 @@ public: friend bool - check_dependencies_in_with_clauses(THD *thd, With_clause *with_clauses_list); + check_dependencies_in_with_clauses(With_clause *with_clauses_list); }; inline @@ -321,16 +349,18 @@ void With_element::mark_as_cleaned() inline -void With_element::reset_for_exec() +void With_element::reset_recursive_for_exec() { + DBUG_ASSERT(is_recursive); level= 0; owner->with_prepared_anchor&= ~mutually_recursive; owner->cleaned&= ~get_elem_map(); - first_rec_table_to_update= NULL; cleanup_stabilized(); + rec_result->first_rec_table_to_update= 0; } + inline void With_element::cleanup_stabilized() { @@ -365,7 +395,7 @@ void With_element::prepare_for_next_iteration() With_element *with_elem= this; while ((with_elem= with_elem->get_next_mutually_recursive()) != this) { - TABLE *rec_table= with_elem->first_rec_table_to_update; + TABLE *rec_table= with_elem->rec_result->first_rec_table_to_update; if (rec_table) rec_table->reginfo.join_tab->preread_init_done= false; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 91741961db5..e8402abf861 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1109,7 +1109,7 @@ public: } With_element *find_table_def_in_with_clauses(TABLE_LIST *table); bool check_unrestricted_recursive(bool only_standards_compliant); - void check_subqueries_with_recursive_references(); + bool check_subqueries_with_recursive_references(); List window_specs; void prepare_add_window_spec(THD *thd); @@ -2475,7 +2475,7 @@ struct LEX: public Query_tables_list SELECT_LEX *all_selects_list; /* current with clause in parsing if any, otherwise 0*/ With_clause *curr_with_clause; - /* pointer to the first with clause in the current statemant */ + /* pointer to the first with clause in the current statement */ With_clause *with_clauses_list; /* (*with_clauses_list_last_next) contains a pointer to the last diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index e4acb213c07..7dc0ef42b71 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6233,7 +6233,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) new (thd->mem_root) Item_int(thd, (ulonglong) thd->variables.select_limit); } - if (check_dependencies_in_with_clauses(thd, lex->with_clauses_list)) + if (check_dependencies_in_with_clauses(lex->with_clauses_list)) return 1; if (!(res= open_and_lock_tables(thd, all_tables, TRUE, 0))) diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index b5cdd807cb7..0c7e26c7b04 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1509,7 +1509,7 @@ static int mysql_test_select(Prepared_statement *stmt, lex->select_lex.context.resolve_in_select_list= TRUE; ulong privilege= lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL; - if (check_dependencies_in_with_clauses(thd,lex->with_clauses_list)) + if (check_dependencies_in_with_clauses(lex->with_clauses_list)) goto error; if (tables) { diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 382fabd39d7..185d79ec77a 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -1231,8 +1231,8 @@ bool st_select_lex_unit::exec_recursive() { saved_error= incr_table->insert_all_rows_into(thd, rec_table, !is_unrestricted); - if (!with_element->first_rec_table_to_update) - with_element->first_rec_table_to_update= rec_table; + if (!with_element->rec_result->first_rec_table_to_update) + with_element->rec_result->first_rec_table_to_update= rec_table; if (with_element->level == 1) rec_table->reginfo.join_tab->preread_init_done= true; } @@ -1257,14 +1257,7 @@ bool st_select_lex_unit::cleanup() for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) error|= sl->cleanup(); - - if (union_result && with_element && with_element->is_recursive) - { - ((select_union_recursive *) union_result)->cleanup(); - delete union_result; - union_result= 0; - } - + if (fake_select_lex) { error|= fake_select_lex->cleanup(); @@ -1289,15 +1282,25 @@ bool st_select_lex_unit::cleanup() } if (with_element && with_element->is_recursive) + { + if (union_result ) + { + ((select_union_recursive *) union_result)->cleanup(); + delete union_result; + union_result= 0; + } with_element->mark_as_cleaned(); - - if (union_result && !(with_element &&with_element->is_recursive)) + } + else { - delete union_result; - union_result=0; // Safety - if (table) - free_tmp_table(thd, table); - table= 0; // Safety + if (union_result) + { + delete union_result; + union_result=0; // Safety + if (table) + free_tmp_table(thd, table); + table= 0; // Safety + } } DBUG_RETURN(error); @@ -1325,7 +1328,7 @@ void st_select_lex_unit::reinit_exec_mechanism() } #endif if (with_element && with_element->is_recursive) - with_element->reset_for_exec(); + with_element->reset_recursive_for_exec(); } diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 36f5c294663..75b4021d91a 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -430,7 +430,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, lex->link_first_table_back(view, link_to_local); view->open_type= OT_BASE_ONLY; - if (check_dependencies_in_with_clauses(thd, lex->with_clauses_list)) + if (check_dependencies_in_with_clauses(lex->with_clauses_list)) { res= TRUE; goto err; @@ -1390,7 +1390,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table, TABLE_LIST *tbl; Security_context *security_ctx= 0; - if (check_dependencies_in_with_clauses(thd, thd->lex->with_clauses_list)) + if (check_dependencies_in_with_clauses(thd->lex->with_clauses_list)) goto err; /* diff --git a/sql/table.h b/sql/table.h index 143bf17f4d4..e0b993d5c05 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1857,8 +1857,9 @@ struct TABLE_LIST derived tables. Use TABLE_LIST::is_anonymous_derived_table(). */ st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ - With_element *with; /* With element of with_table */ - table_map with_internal_reference_map; + With_element *with; /* With element defining this table (if any) */ + /* Bitmap of the defining with element */ + table_map with_internal_reference_map; bool block_handle_derived; ST_SCHEMA_TABLE *schema_table; /* Information_schema table */ st_select_lex *schema_select_lex; -- cgit v1.2.1 From c5c9128af6f82d5ee13eedc040c5c7e8a902681b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 24 Jul 2016 16:54:01 +0200 Subject: cleanup: use multi_alloc_root --- sql/table.cc | 42 ++++++++++++++++-------------------------- 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/sql/table.cc b/sql/table.cc index 640ab8267fb..6fa77ef8fae 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1431,48 +1431,38 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d vcol_screen_length: %d", interval_count,interval_parts, keys,n_length,int_length, com_length, vcol_screen_length)); + if (!multi_alloc_root(&share->mem_root, + &share->field, (uint)(share->fields+1)*sizeof(Field*), + &share->intervals, (uint)interval_count*sizeof(TYPELIB), + &share->check_constraints, (uint) share->table_check_constraints * sizeof(Virtual_column_info*), + &interval_array, (uint) (share->fields+interval_parts+ keys+3)*sizeof(char *), + &names, (uint) (n_length+int_length), + &comment_pos, (uint) com_length, + &vcol_screen_pos, vcol_screen_length, + NullS)) - if (!(field_ptr = (Field **) - alloc_root(&share->mem_root, - (uint) ((share->fields+1)*sizeof(Field*)+ - interval_count*sizeof(TYPELIB)+ - share->table_check_constraints * - sizeof(Virtual_column_info*)+ - (share->fields+interval_parts+ - keys+3)*sizeof(char *)+ - (n_length+int_length+com_length+ - vcol_screen_length))))) - goto err; /* purecov: inspected */ - - share->field= field_ptr; + goto err; + + field_ptr= share->field; + table_check_constraints= share->check_constraints; read_length=(uint) (share->fields * field_pack_length + pos+ (uint) (n_length+int_length+com_length+ vcol_screen_length)); strpos= disk_buff+pos; - share->intervals= (TYPELIB*) (field_ptr+share->fields+1); - share->check_constraints= ((Virtual_column_info**) - (share->intervals+interval_count)); - table_check_constraints= share->check_constraints; - interval_array= (const char **) (table_check_constraints+ - share->table_check_constraints); - names= (char*) (interval_array+share->fields+interval_parts+keys+3); if (!interval_count) share->intervals= 0; // For better debugging - memcpy((char*) names, strpos+(share->fields*field_pack_length), - (uint) (n_length+int_length)); - comment_pos= names+(n_length+int_length); + + memcpy(names, strpos+(share->fields*field_pack_length), n_length+int_length); memcpy(comment_pos, disk_buff+read_length-com_length-vcol_screen_length, com_length); - vcol_screen_pos= (uchar*) (names+(n_length+int_length+com_length)); memcpy(vcol_screen_pos, disk_buff+read_length-vcol_screen_length, vcol_screen_length); fix_type_pointers(&interval_array, &share->fieldnames, 1, &names); if (share->fieldnames.count != share->fields) goto err; - fix_type_pointers(&interval_array, share->intervals, interval_count, - &names); + fix_type_pointers(&interval_array, share->intervals, interval_count, &names); { /* Set ENUM and SET lengths */ -- cgit v1.2.1 From 6820bf9ca9c5992a7e9d382aa8aaabff6751fd46 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 19 Jul 2016 11:18:58 +0200 Subject: do not quote numbers in the DEFAULT clause in SHOW CREATE --- mysql-test/r/alter_table.result | 2 +- mysql-test/r/alter_table_mdev539_maria.result | 16 +- mysql-test/r/alter_table_mdev539_myisam.result | 16 +- mysql-test/r/bug46760.result | 2 +- mysql-test/r/check_constraint.result | 6 +- mysql-test/r/create-uca.result | 4 +- mysql-test/r/create.result | 54 +- mysql-test/r/ctype_utf8.result | 2 +- mysql-test/r/ctype_utf8mb4.result | 2 +- mysql-test/r/ctype_utf8mb4_innodb.result | 2 +- mysql-test/r/ctype_utf8mb4_myisam.result | 2 +- mysql-test/r/default.result | 52 +- mysql-test/r/function_defaults.result | 16 +- mysql-test/r/function_defaults_innodb.result | 16 +- mysql-test/r/gis.result | 14 +- mysql-test/r/information_schema.result | 6 +- mysql-test/r/information_schema_parameters.result | 2 +- mysql-test/r/mysql5613mysql.result | 18 +- mysql-test/r/partition.result | 4 +- mysql-test/r/partition_cache_innodb.result | 4 +- mysql-test/r/partition_cache_myisam.result | 4 +- mysql-test/r/plugin_auth.result | 2 +- mysql-test/r/show_check.result | 2 +- mysql-test/r/sql_mode.result | 6 +- mysql-test/r/strict.result | 2 +- mysql-test/r/subselect.result | 12 +- mysql-test/r/subselect_no_exists_to_in.result | 12 +- mysql-test/r/subselect_no_mat.result | 12 +- mysql-test/r/subselect_no_opts.result | 12 +- mysql-test/r/subselect_no_scache.result | 12 +- mysql-test/r/subselect_no_semijoin.result | 12 +- mysql-test/r/system_mysql_db.result | 14 +- mysql-test/r/system_mysql_db_fix40123.result | 14 +- mysql-test/r/system_mysql_db_fix50030.result | 14 +- mysql-test/r/system_mysql_db_fix50117.result | 14 +- mysql-test/r/type_enum.result | 2 +- mysql-test/r/type_newdecimal.result | 16 +- mysql-test/r/union.result | 6 +- mysql-test/suite/archive/archive.result | 4 +- .../r/innodb_onlinealter_encryption.result | 10 +- .../engines/funcs/r/ta_set_drop_default.result | 44 +- .../funcs/r/tc_column_default_decimal.result | 32 +- .../funcs/r/tc_column_default_number.result | 24 +- .../funcs/r/tc_multicolumn_different.result | 3840 ++++++++++---------- .../suite/engines/rr_trx/r/init_innodb.result | 8 +- .../suite/federated/assisted_discovery.result | 4 +- .../suite/funcs_1/r/is_character_sets.result | 2 +- mysql-test/suite/funcs_1/r/is_collations.result | 4 +- mysql-test/suite/funcs_1/r/is_columns.result | 2 +- mysql-test/suite/funcs_1/r/is_events.result | 2 +- .../suite/funcs_1/r/is_key_column_usage.result | 2 +- mysql-test/suite/funcs_1/r/is_statistics.result | 4 +- mysql-test/suite/funcs_1/r/is_triggers.result | 2 +- .../funcs_1/r/processlist_priv_no_prot.result | 40 +- .../suite/funcs_1/r/processlist_priv_ps.result | 40 +- .../suite/funcs_1/r/processlist_val_no_prot.result | 20 +- .../suite/funcs_1/r/processlist_val_ps.result | 20 +- .../suite/innodb/r/innodb-alter-table.result | 2 +- mysql-test/suite/innodb/r/innodb-autoinc.result | 4 +- .../suite/innodb_fts/r/innodb_fts_misc.result | 6 +- .../parts/r/partition_alter1_1_2_innodb.result | 224 +- .../parts/r/partition_alter1_1_2_myisam.result | 64 +- .../suite/parts/r/partition_alter1_1_innodb.result | 128 +- .../suite/parts/r/partition_alter1_1_myisam.result | 64 +- .../suite/parts/r/partition_alter1_2_innodb.result | 320 +- .../suite/parts/r/partition_alter1_2_myisam.result | 128 +- .../parts/r/partition_alter2_1_1_innodb.result | 80 +- .../suite/parts/r/partition_alter2_1_maria.result | 48 +- .../suite/parts/r/partition_alter2_1_myisam.result | 48 +- .../parts/r/partition_alter2_2_1_innodb.result | 80 +- .../suite/parts/r/partition_alter2_2_maria.result | 48 +- .../suite/parts/r/partition_alter2_2_myisam.result | 48 +- .../suite/parts/r/partition_alter3_innodb.result | 52 +- .../suite/parts/r/partition_alter3_myisam.result | 52 +- .../suite/parts/r/partition_alter4_innodb.result | 768 ++-- .../suite/parts/r/partition_alter4_myisam.result | 768 ++-- .../suite/parts/r/partition_basic_innodb.result | 256 +- .../suite/parts/r/partition_basic_myisam.result | 128 +- .../parts/r/partition_basic_symlink_myisam.result | 156 +- .../suite/parts/r/partition_engine_innodb.result | 44 +- .../suite/parts/r/partition_engine_myisam.result | 44 +- .../suite/parts/r/partition_syntax_innodb.result | 44 +- .../suite/parts/r/partition_syntax_myisam.result | 44 +- .../suite/rpl/r/rpl_extra_col_slave_innodb.result | 2 +- .../suite/rpl/r/rpl_extra_col_slave_myisam.result | 2 +- mysql-test/suite/rpl/r/rpl_mixed_ddl_dml.result | 2 +- mysql-test/suite/rpl/r/rpl_multi_engine.result | 48 +- .../suite/rpl/r/rpl_row_basic_8partition.result | 72 +- .../mysql-test/query_response_time/basic.result | 2 +- sql/sql_show.cc | 4 + storage/connect/mysql-test/connect/r/bin.result | 4 +- .../mysql-test/connect/r/mysql_discovery.result | 4 +- .../mysql-test/sql_discovery/simple.result | 2 +- .../rpl/r/rpl_extra_col_slave_tokudb.result | 2 +- .../tokudb/r/background_job_manager.result | 2 +- .../tokudb/r/change_column_int_default.result | 40 +- .../tokudb/mysql-test/tokudb/r/type_enum.result | 2 +- .../mysql-test/tokudb/r/type_newdecimal.result | 4 +- .../r/alter_column_default.result | 6 +- .../mysql-test/tokudb_mariadb/r/alter.result | 10 +- .../tokudb_parts/r/partition_alter3_tokudb.result | 52 +- .../tokudb_parts/r/partition_engine_tokudb.result | 44 +- .../tokudb_parts/r/partition_syntax_tokudb.result | 44 +- 103 files changed, 4256 insertions(+), 4252 deletions(-) diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result index 3461038f85e..0801890c384 100644 --- a/mysql-test/r/alter_table.result +++ b/mysql-test/r/alter_table.result @@ -1504,7 +1504,7 @@ ALTER TABLE t1 DROP KEY IF EXISTS transaction_id, ADD PRIMARY KEY IF NOT EXISTS SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `transaction_id` int(11) NOT NULL DEFAULT '0', + `transaction_id` int(11) NOT NULL DEFAULT 0, PRIMARY KEY (`transaction_id`) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; diff --git a/mysql-test/r/alter_table_mdev539_maria.result b/mysql-test/r/alter_table_mdev539_maria.result index 703908825d2..769c8b11b00 100644 --- a/mysql-test/r/alter_table_mdev539_maria.result +++ b/mysql-test/r/alter_table_mdev539_maria.result @@ -10,10 +10,10 @@ drop index `primary` on lineitem; show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, @@ -39,10 +39,10 @@ alter table lineitem add primary key (l_orderkey, l_linenumber); show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, @@ -80,10 +80,10 @@ ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY' show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, @@ -114,10 +114,10 @@ alter table lineitem add primary key (l_orderkey, l_linenumber); show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, diff --git a/mysql-test/r/alter_table_mdev539_myisam.result b/mysql-test/r/alter_table_mdev539_myisam.result index 7140c544836..bf2a3f49203 100644 --- a/mysql-test/r/alter_table_mdev539_myisam.result +++ b/mysql-test/r/alter_table_mdev539_myisam.result @@ -10,10 +10,10 @@ drop index `primary` on lineitem; show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, @@ -39,10 +39,10 @@ alter table lineitem add primary key (l_orderkey, l_linenumber); show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, @@ -80,10 +80,10 @@ ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY' show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, @@ -114,10 +114,10 @@ alter table lineitem add primary key (l_orderkey, l_linenumber); show create table lineitem; Table Create Table lineitem CREATE TABLE `lineitem` ( - `l_orderkey` int(11) NOT NULL DEFAULT '0', + `l_orderkey` int(11) NOT NULL DEFAULT 0, `l_partkey` int(11) DEFAULT NULL, `l_suppkey` int(11) DEFAULT NULL, - `l_linenumber` int(11) NOT NULL DEFAULT '0', + `l_linenumber` int(11) NOT NULL DEFAULT 0, `l_quantity` double DEFAULT NULL, `l_extendedprice` double DEFAULT NULL, `l_discount` double DEFAULT NULL, diff --git a/mysql-test/r/bug46760.result b/mysql-test/r/bug46760.result index 46b8c23b95c..f05d6c5ef99 100644 --- a/mysql-test/r/bug46760.result +++ b/mysql-test/r/bug46760.result @@ -18,7 +18,7 @@ info: Records: 0 Duplicates: 0 Warnings: 0 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '10' + `a` int(11) DEFAULT 10 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DROP TABLE t1; # diff --git a/mysql-test/r/check_constraint.result b/mysql-test/r/check_constraint.result index babb42d93b8..2185395d347 100644 --- a/mysql-test/r/check_constraint.result +++ b/mysql-test/r/check_constraint.result @@ -54,7 +54,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL CHECK (a>10), `b` int(11) DEFAULT NULL CHECK (b > 20), - `c` int(11) DEFAULT '0' CHECK (c < 10), + `c` int(11) DEFAULT 0 CHECK (c < 10), CONSTRAINT `min` CHECK (a+b > 100), CONSTRAINT `max` CHECK (a+b <500), CONSTRAINT `CONSTRAINT_1` CHECK (a+b+c < 500) @@ -77,7 +77,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL CHECK (a>10), `b` int(11) DEFAULT NULL CHECK (b > 20), - `c` int(11) DEFAULT '0' CHECK (c < 10), + `c` int(11) DEFAULT 0 CHECK (c < 10), CONSTRAINT `min` CHECK (a+b > 100), CONSTRAINT `max` CHECK (a+b <500), CONSTRAINT `CONSTRAINT_1` CHECK (a+b+c < 500) @@ -93,7 +93,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL CHECK (a>10), `b` int(11) DEFAULT NULL CHECK (b > 20), - `c` int(11) DEFAULT '0' CHECK (c < 10), + `c` int(11) DEFAULT 0 CHECK (c < 10), CONSTRAINT `max` CHECK (a+b <500), CONSTRAINT `CONSTRAINT_1` CHECK (a+b+c < 500) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 diff --git a/mysql-test/r/create-uca.result b/mysql-test/r/create-uca.result index 2d2522a36a3..f405cb72b40 100644 --- a/mysql-test/r/create-uca.result +++ b/mysql-test/r/create-uca.result @@ -10,7 +10,7 @@ COLLATE latin1_bin; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `c1` int(11) DEFAULT '12' COMMENT 'column1', + `c1` int(11) DEFAULT 12 COMMENT 'column1', `c2` int(11) DEFAULT NULL COMMENT 'column2', `c3` int(11) NOT NULL COMMENT 'column3', `c4` varchar(255) CHARACTER SET utf8 NOT NULL DEFAULT 'a', @@ -21,7 +21,7 @@ CREATE TABLE t2 AS SELECT * FROM t1; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `c1` int(11) DEFAULT '12' COMMENT 'column1', + `c1` int(11) DEFAULT 12 COMMENT 'column1', `c2` int(11) DEFAULT NULL COMMENT 'column2', `c3` int(11) NOT NULL COMMENT 'column3', `c4` varchar(255) CHARACTER SET utf8 NOT NULL DEFAULT 'a', diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 782c7c7f5bd..e7dfa79c803 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -803,8 +803,8 @@ select a1,a2 from t1; show create table t2; Table Create Table t2 CREATE TABLE `t2` ( - `a` int(11) DEFAULT '3', - `b` int(11) DEFAULT '3', + `a` int(11) DEFAULT 3, + `b` int(11) DEFAULT 3, `a1` int(11) DEFAULT NULL, `a2` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 @@ -1133,46 +1133,46 @@ create table t1 like information_schema.processlist; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 drop table t1; create temporary table t1 like information_schema.processlist; show create table t1; Table Create Table t1 CREATE TEMPORARY TABLE `t1` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 drop table t1; create table t1 like information_schema.character_sets; @@ -1182,7 +1182,7 @@ t1 CREATE TABLE `t1` ( `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '', `DEFAULT_COLLATE_NAME` varchar(32) NOT NULL DEFAULT '', `DESCRIPTION` varchar(60) NOT NULL DEFAULT '', - `MAXLEN` bigint(3) NOT NULL DEFAULT '0' + `MAXLEN` bigint(3) NOT NULL DEFAULT 0 ) ENGINE=MEMORY DEFAULT CHARSET=utf8 drop table t1; @@ -1241,8 +1241,8 @@ CREATE TABLE t1(c1 YEAR DEFAULT 2008, c2 YEAR DEFAULT 0); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `c1` year(4) DEFAULT '2008', - `c2` year(4) DEFAULT '0000' + `c1` year(4) DEFAULT 2008, + `c2` year(4) DEFAULT 0000 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES(); @@ -1256,8 +1256,8 @@ ALTER TABLE t1 MODIFY c1 YEAR DEFAULT 0; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `c1` year(4) DEFAULT '0000', - `c2` year(4) DEFAULT '0000' + `c1` year(4) DEFAULT 0000, + `c2` year(4) DEFAULT 0000 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES(); diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index bd40b82fa6e..a9c4f2d754f 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -2275,7 +2275,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `clipid` int(11) NOT NULL, - `mos` tinyint(4) DEFAULT '0', + `mos` tinyint(4) DEFAULT 0, `Tape` tinytext DEFAULT NULL, PRIMARY KEY (`clipid`), KEY `tape` (`Tape`(255)) diff --git a/mysql-test/r/ctype_utf8mb4.result b/mysql-test/r/ctype_utf8mb4.result index 32fa9bd74c0..67b659e25b4 100644 --- a/mysql-test/r/ctype_utf8mb4.result +++ b/mysql-test/r/ctype_utf8mb4.result @@ -2387,7 +2387,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `clipid` int(11) NOT NULL, - `mos` tinyint(4) DEFAULT '0', + `mos` tinyint(4) DEFAULT 0, `Tape` tinytext DEFAULT NULL, PRIMARY KEY (`clipid`), KEY `tape` (`Tape`(250)) diff --git a/mysql-test/r/ctype_utf8mb4_innodb.result b/mysql-test/r/ctype_utf8mb4_innodb.result index d0fc581498d..28b566c19eb 100644 --- a/mysql-test/r/ctype_utf8mb4_innodb.result +++ b/mysql-test/r/ctype_utf8mb4_innodb.result @@ -2347,7 +2347,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `clipid` int(11) NOT NULL, - `mos` tinyint(4) DEFAULT '0', + `mos` tinyint(4) DEFAULT 0, `Tape` tinytext DEFAULT NULL, PRIMARY KEY (`clipid`), KEY `tape` (`Tape`(191)) diff --git a/mysql-test/r/ctype_utf8mb4_myisam.result b/mysql-test/r/ctype_utf8mb4_myisam.result index 2eb8d56b888..34145417966 100644 --- a/mysql-test/r/ctype_utf8mb4_myisam.result +++ b/mysql-test/r/ctype_utf8mb4_myisam.result @@ -2347,7 +2347,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `clipid` int(11) NOT NULL, - `mos` tinyint(4) DEFAULT '0', + `mos` tinyint(4) DEFAULT 0, `Tape` tinytext DEFAULT NULL, PRIMARY KEY (`clipid`), KEY `tape` (`Tape`(250)) diff --git a/mysql-test/r/default.result b/mysql-test/r/default.result index 7817016606f..f7037c9df48 100644 --- a/mysql-test/r/default.result +++ b/mysql-test/r/default.result @@ -269,7 +269,7 @@ create or replace table t1 (a int default 1, b int default (a+1), c int default show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1', + `a` int(11) DEFAULT 1, `b` int(11) DEFAULT (a+1), `c` int(11) DEFAULT (a+b) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 @@ -343,8 +343,8 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, - `b` int(11) DEFAULT '1', - `c` int(11) DEFAULT '-1', + `b` int(11) DEFAULT 1, + `c` int(11) DEFAULT -1, `d` int(11) DEFAULT (1+1), `e` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `f` int(11) DEFAULT (1+1+1), @@ -356,8 +356,8 @@ show create table t2; Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL, - `b` int(11) DEFAULT '1', - `c` int(11) DEFAULT '-1', + `b` int(11) DEFAULT 1, + `c` int(11) DEFAULT -1, `d` int(11) DEFAULT (1+1), `e` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `f` int(11) DEFAULT (1+1+1), @@ -374,9 +374,9 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT (1----1), - `b` int(11) DEFAULT '-1', - `c` int(11) DEFAULT '1', - `e` int(11) DEFAULT '1' + `b` int(11) DEFAULT -1, + `c` int(11) DEFAULT 1, + `e` int(11) DEFAULT 1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 insert into t1 values(); insert into t1 values(); @@ -520,7 +520,7 @@ execute stmt using @a; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1' + `a` int(11) DEFAULT 1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; set @a=-1; @@ -528,7 +528,7 @@ execute stmt using @a; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '-1' + `a` int(11) DEFAULT -1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; DEALLOCATE PREPARE stmt; @@ -538,8 +538,8 @@ execute stmt using @a,@b; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1', - `b` int(11) DEFAULT '2' + `a` int(11) DEFAULT 1, + `b` int(11) DEFAULT 2 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; DEALLOCATE PREPARE stmt; @@ -570,12 +570,12 @@ d03 DATETIME DEFAULT (((TIMESTAMP'2001-01-01 10:20:30'))) SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `i01` int(11) DEFAULT '1', - `i02` int(11) DEFAULT '14649', - `i03` int(11) DEFAULT '1', - `i04` int(11) DEFAULT '1', + `i01` int(11) DEFAULT 1, + `i02` int(11) DEFAULT 14649, + `i03` int(11) DEFAULT 1, + `i04` int(11) DEFAULT 1, `i05` int(11) DEFAULT NULL, - `f01` double DEFAULT '3.141592653589793', + `f01` double DEFAULT 3.141592653589793, `s01` varchar(10) DEFAULT 'test', `s02` varchar(10) DEFAULT 'test', `s03` varchar(10) DEFAULT '@', @@ -672,7 +672,7 @@ CREATE TABLE t1 (a INT DEFAULT 1 NOT NULL); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL DEFAULT '1' + `a` int(11) NOT NULL DEFAULT 1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES (DEFAULT); SELECT * FROM t1; @@ -697,7 +697,7 @@ CREATE TABLE t1 (a INT DEFAULT '1' NOT NULL); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL DEFAULT '1' + `a` int(11) NOT NULL DEFAULT 1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES (DEFAULT); SELECT * FROM t1; @@ -755,7 +755,7 @@ Note 1265 Data truncated for column 'a' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1' + `a` int(11) DEFAULT 1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES (DEFAULT); SELECT * FROM t1; @@ -799,7 +799,7 @@ CREATE TABLE t1 (a INT DEFAULT 0x61 NOT NULL); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL DEFAULT '97' + `a` int(11) NOT NULL DEFAULT 97 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES (DEFAULT); SELECT * FROM t1; @@ -923,7 +923,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT DEFAULT(b), - `b` int(11) DEFAULT '1' + `b` int(11) DEFAULT 1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES (DEFAULT, DEFAULT); SELECT * FROM t1; @@ -934,7 +934,7 @@ CREATE TABLE t1 (a INT DEFAULT 1, b INT DEFAULT(DEFAULT(a))); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1', + `a` int(11) DEFAULT 1, `b` int(11) DEFAULT DEFAULT(a) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES (DEFAULT, DEFAULT); @@ -1118,7 +1118,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT (3+3), - `b` int(11) DEFAULT '1000' + `b` int(11) DEFAULT 1000 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 insert into t1 values (1,1),(2,2); insert into t1 values (default,default); @@ -3080,7 +3080,7 @@ create table t1 (a int default 1, b int default (1+1), c int); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1', + `a` int(11) DEFAULT 1, `b` int(11) DEFAULT (1+1), `c` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 @@ -3092,7 +3092,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT (2+3), - `b` int(11) DEFAULT '4', + `b` int(11) DEFAULT 4, `c` int(11) DEFAULT (-a) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; diff --git a/mysql-test/r/function_defaults.result b/mysql-test/r/function_defaults.result index 6840e5bbb6c..987c505f1fb 100644 --- a/mysql-test/r/function_defaults.result +++ b/mysql-test/r/function_defaults.result @@ -1232,7 +1232,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -1284,7 +1284,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -1351,7 +1351,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: @@ -1389,7 +1389,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: @@ -2778,7 +2778,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -2830,7 +2830,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -2897,7 +2897,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: @@ -2935,7 +2935,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: diff --git a/mysql-test/r/function_defaults_innodb.result b/mysql-test/r/function_defaults_innodb.result index a877d27dba7..b539f70a3cb 100644 --- a/mysql-test/r/function_defaults_innodb.result +++ b/mysql-test/r/function_defaults_innodb.result @@ -1233,7 +1233,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -1285,7 +1285,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -1352,7 +1352,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: @@ -1390,7 +1390,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: @@ -2779,7 +2779,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -2831,7 +2831,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file1.dat" INTO table t1; Warnings: @@ -2898,7 +2898,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: @@ -2936,7 +2936,7 @@ t1 CREATE TABLE `t1` ( `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `g` timestamp(6) NULL DEFAULT NULL, `h` int(11) DEFAULT NULL, - `i` int(11) NOT NULL DEFAULT '42' + `i` int(11) NOT NULL DEFAULT 42 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 LOAD DATA INFILE "file2.dat" INTO table t1; Warnings: diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index ee65077fbc2..d10cfec6003 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -1714,18 +1714,18 @@ GEOMETRY_COLUMNS CREATE TEMPORARY TABLE `GEOMETRY_COLUMNS` ( `G_TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '', `G_TABLE_NAME` varchar(64) NOT NULL DEFAULT '', `G_GEOMETRY_COLUMN` varchar(64) NOT NULL DEFAULT '', - `STORAGE_TYPE` tinyint(2) NOT NULL DEFAULT '0', - `GEOMETRY_TYPE` int(7) NOT NULL DEFAULT '0', - `COORD_DIMENSION` tinyint(2) NOT NULL DEFAULT '0', - `MAX_PPR` tinyint(2) NOT NULL DEFAULT '0', - `SRID` smallint(5) NOT NULL DEFAULT '0' + `STORAGE_TYPE` tinyint(2) NOT NULL DEFAULT 0, + `GEOMETRY_TYPE` int(7) NOT NULL DEFAULT 0, + `COORD_DIMENSION` tinyint(2) NOT NULL DEFAULT 0, + `MAX_PPR` tinyint(2) NOT NULL DEFAULT 0, + `SRID` smallint(5) NOT NULL DEFAULT 0 ) ENGINE=MEMORY DEFAULT CHARSET=utf8 SHOW CREATE TABLE information_schema.spatial_ref_sys; Table Create Table SPATIAL_REF_SYS CREATE TEMPORARY TABLE `SPATIAL_REF_SYS` ( - `SRID` smallint(5) NOT NULL DEFAULT '0', + `SRID` smallint(5) NOT NULL DEFAULT 0, `AUTH_NAME` varchar(512) NOT NULL DEFAULT '', - `AUTH_SRID` int(5) NOT NULL DEFAULT '0', + `AUTH_SRID` int(5) NOT NULL DEFAULT 0, `SRTEXT` varchar(2048) NOT NULL DEFAULT '' ) ENGINE=MEMORY DEFAULT CHARSET=utf8 create table t1(g GEOMETRY, pt POINT); diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 098332a7de6..db2e77636d2 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -585,7 +585,7 @@ CHARACTER_SETS CREATE TEMPORARY TABLE `CHARACTER_SETS` ( `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '', `DEFAULT_COLLATE_NAME` varchar(32) NOT NULL DEFAULT '', `DESCRIPTION` varchar(60) NOT NULL DEFAULT '', - `MAXLEN` bigint(3) NOT NULL DEFAULT '0' + `MAXLEN` bigint(3) NOT NULL DEFAULT 0 ) ENGINE=MEMORY DEFAULT CHARSET=utf8 set names latin2; SHOW CREATE TABLE INFORMATION_SCHEMA.character_sets; @@ -594,7 +594,7 @@ CHARACTER_SETS CREATE TEMPORARY TABLE `CHARACTER_SETS` ( `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '', `DEFAULT_COLLATE_NAME` varchar(32) NOT NULL DEFAULT '', `DESCRIPTION` varchar(60) NOT NULL DEFAULT '', - `MAXLEN` bigint(3) NOT NULL DEFAULT '0' + `MAXLEN` bigint(3) NOT NULL DEFAULT 0 ) ENGINE=MEMORY DEFAULT CHARSET=utf8 set names latin1; create table t1 select * from information_schema.CHARACTER_SETS @@ -609,7 +609,7 @@ t1 CREATE TABLE `t1` ( `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '', `DEFAULT_COLLATE_NAME` varchar(32) NOT NULL DEFAULT '', `DESCRIPTION` varchar(60) NOT NULL DEFAULT '', - `MAXLEN` bigint(3) NOT NULL DEFAULT '0' + `MAXLEN` bigint(3) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=utf8 drop table t1; create view v1 as select * from information_schema.TABLES; diff --git a/mysql-test/r/information_schema_parameters.result b/mysql-test/r/information_schema_parameters.result index 08339f79270..4643b26a259 100644 --- a/mysql-test/r/information_schema_parameters.result +++ b/mysql-test/r/information_schema_parameters.result @@ -6,7 +6,7 @@ PARAMETERS CREATE TEMPORARY TABLE `PARAMETERS` ( `SPECIFIC_CATALOG` varchar(512) NOT NULL DEFAULT '', `SPECIFIC_SCHEMA` varchar(64) NOT NULL DEFAULT '', `SPECIFIC_NAME` varchar(64) NOT NULL DEFAULT '', - `ORDINAL_POSITION` int(21) NOT NULL DEFAULT '0', + `ORDINAL_POSITION` int(21) NOT NULL DEFAULT 0, `PARAMETER_MODE` varchar(5) DEFAULT NULL, `PARAMETER_NAME` varchar(64) DEFAULT NULL, `DATA_TYPE` varchar(64) NOT NULL DEFAULT '', diff --git a/mysql-test/r/mysql5613mysql.result b/mysql-test/r/mysql5613mysql.result index 9f32cb57a38..1d2c3b97baf 100644 --- a/mysql-test/r/mysql5613mysql.result +++ b/mysql-test/r/mysql5613mysql.result @@ -82,7 +82,7 @@ SHOW CREATE TABLE func; Table Create Table func CREATE TABLE `func` ( `name` char(64) COLLATE utf8_bin NOT NULL DEFAULT '', - `ret` tinyint(1) NOT NULL DEFAULT '0', + `ret` tinyint(1) NOT NULL DEFAULT 0, `dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '', `type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL, PRIMARY KEY (`name`) @@ -152,7 +152,7 @@ proxies_priv CREATE TABLE `proxies_priv` ( `User` char(16) COLLATE utf8_bin NOT NULL DEFAULT '', `Proxied_host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '', `Proxied_user` char(16) COLLATE utf8_bin NOT NULL DEFAULT '', - `With_grant` tinyint(1) NOT NULL DEFAULT '0', + `With_grant` tinyint(1) NOT NULL DEFAULT 0, `Grantor` char(77) COLLATE utf8_bin NOT NULL DEFAULT '', `Timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`Host`,`User`,`Proxied_host`,`Proxied_user`), @@ -169,7 +169,7 @@ servers CREATE TABLE `servers` ( `Db` char(64) NOT NULL DEFAULT '', `Username` char(64) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', - `Port` int(4) NOT NULL DEFAULT '0', + `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', `Owner` char(64) NOT NULL DEFAULT '', @@ -241,8 +241,8 @@ Table Create Table time_zone_transition_type CREATE TABLE `time_zone_transition_type` ( `Time_zone_id` int(10) unsigned NOT NULL, `Transition_type_id` int(10) unsigned NOT NULL, - `Offset` int(11) NOT NULL DEFAULT '0', - `Is_DST` tinyint(3) unsigned NOT NULL DEFAULT '0', + `Offset` int(11) NOT NULL DEFAULT 0, + `Is_DST` tinyint(3) unsigned NOT NULL DEFAULT 0, `Abbreviation` char(8) NOT NULL DEFAULT '', PRIMARY KEY (`Time_zone_id`,`Transition_type_id`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Time zone transition types' @@ -288,10 +288,10 @@ user CREATE TABLE `user` ( `ssl_cipher` blob NOT NULL, `x509_issuer` blob NOT NULL, `x509_subject` blob NOT NULL, - `max_questions` int(11) unsigned NOT NULL DEFAULT '0', - `max_updates` int(11) unsigned NOT NULL DEFAULT '0', - `max_connections` int(11) unsigned NOT NULL DEFAULT '0', - `max_user_connections` int(11) unsigned NOT NULL DEFAULT '0', + `max_questions` int(11) unsigned NOT NULL DEFAULT 0, + `max_updates` int(11) unsigned NOT NULL DEFAULT 0, + `max_connections` int(11) unsigned NOT NULL DEFAULT 0, + `max_user_connections` int(11) unsigned NOT NULL DEFAULT 0, `plugin` char(64) COLLATE utf8_bin DEFAULT '', `authentication_string` text COLLATE utf8_bin DEFAULT NULL, `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index 71adc194693..99b0e287975 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -182,8 +182,8 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `ID` int(11) NOT NULL, - `aaaa,aaaaa` tinyint(3) unsigned NOT NULL DEFAULT '0', - `ddddddddd` int(11) NOT NULL DEFAULT '0', + `aaaa,aaaaa` tinyint(3) unsigned NOT NULL DEFAULT 0, + `ddddddddd` int(11) NOT NULL DEFAULT 0, `new_field0` varchar(50) DEFAULT NULL, PRIMARY KEY (`ID`,`aaaa,aaaaa`,`ddddddddd`) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 diff --git a/mysql-test/r/partition_cache_innodb.result b/mysql-test/r/partition_cache_innodb.result index 271b23eed92..c12ff3588b0 100644 --- a/mysql-test/r/partition_cache_innodb.result +++ b/mysql-test/r/partition_cache_innodb.result @@ -21,7 +21,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `id` int(11) NOT NULL, `created_at` datetime NOT NULL, - `cool` tinyint(4) DEFAULT '0' + `cool` tinyint(4) DEFAULT 0 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (TO_DAYS(created_at)) (PARTITION month_2010_4 VALUES LESS THAN (734258) ENGINE = InnoDB, @@ -70,7 +70,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `id` int(11) NOT NULL, `created_at` datetime NOT NULL, - `cool` tinyint(4) DEFAULT '0' + `cool` tinyint(4) DEFAULT 0 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (TO_DAYS(created_at)) SUBPARTITION BY HASH (cool) diff --git a/mysql-test/r/partition_cache_myisam.result b/mysql-test/r/partition_cache_myisam.result index b7d3dc53599..d20a8baeab7 100644 --- a/mysql-test/r/partition_cache_myisam.result +++ b/mysql-test/r/partition_cache_myisam.result @@ -21,7 +21,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `id` int(11) NOT NULL, `created_at` datetime NOT NULL, - `cool` tinyint(4) DEFAULT '0' + `cool` tinyint(4) DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (TO_DAYS(created_at)) (PARTITION month_2010_4 VALUES LESS THAN (734258) ENGINE = MyISAM, @@ -70,7 +70,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `id` int(11) NOT NULL, `created_at` datetime NOT NULL, - `cool` tinyint(4) DEFAULT '0' + `cool` tinyint(4) DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 /*!50100 PARTITION BY RANGE (TO_DAYS(created_at)) SUBPARTITION BY HASH (cool) diff --git a/mysql-test/r/plugin_auth.result b/mysql-test/r/plugin_auth.result index 345d6861ad4..436db33af36 100644 --- a/mysql-test/r/plugin_auth.result +++ b/mysql-test/r/plugin_auth.result @@ -25,7 +25,7 @@ proxies_priv CREATE TABLE `proxies_priv` ( `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', `Proxied_host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '', `Proxied_user` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', - `With_grant` tinyint(1) NOT NULL DEFAULT '0', + `With_grant` tinyint(1) NOT NULL DEFAULT 0, `Grantor` char(141) COLLATE utf8_bin NOT NULL DEFAULT '', `Timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`Host`,`User`,`Proxied_host`,`Proxied_user`), diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index d0bc80af2bc..d3c4ec94cb7 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -332,7 +332,7 @@ index(type_short) show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `type_bool` tinyint(1) NOT NULL DEFAULT '0', + `type_bool` tinyint(1) NOT NULL DEFAULT 0, `type_tiny` tinyint(4) NOT NULL AUTO_INCREMENT, `type_short` smallint(3) DEFAULT NULL, `type_mediumint` mediumint(9) DEFAULT NULL, diff --git a/mysql-test/r/sql_mode.result b/mysql-test/r/sql_mode.result index ac1ad1c625b..e1afb964f0a 100644 --- a/mysql-test/r/sql_mode.result +++ b/mysql-test/r/sql_mode.result @@ -121,7 +121,7 @@ create table t1 ( min_num dec(6,6) default .000001); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `min_num` decimal(6,6) DEFAULT '0.000001' + `min_num` decimal(6,6) DEFAULT 0.000001 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1 ; set session sql_mode = 'IGNORE_SPACE'; @@ -129,14 +129,14 @@ create table t1 ( min_num dec(6,6) default 0.000001); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `min_num` decimal(6,6) DEFAULT '0.000001' + `min_num` decimal(6,6) DEFAULT 0.000001 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1 ; create table t1 ( min_num dec(6,6) default .000001); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `min_num` decimal(6,6) DEFAULT '0.000001' + `min_num` decimal(6,6) DEFAULT 0.000001 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1 ; set @@SQL_MODE=NULL; diff --git a/mysql-test/r/strict.result b/mysql-test/r/strict.result index 9dcd5975411..4da77f21792 100644 --- a/mysql-test/r/strict.result +++ b/mysql-test/r/strict.result @@ -1015,7 +1015,7 @@ CREATE TABLE t1 (col1 INT NOT NULL default 99, col2 CHAR(6) NOT NULL); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE "t1" ( - "col1" int(11) NOT NULL DEFAULT '99', + "col1" int(11) NOT NULL DEFAULT 99, "col2" char(6) NOT NULL ) INSERT INTO t1 VALUES (1, 'hello'); diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index f6b3fc7cf25..92e75c5088d 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1232,24 +1232,24 @@ CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT 1)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT 1)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT 1)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a+0)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a+0)` int(3) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a+0)` int(3) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT (SELECT 1 as a UNION SELECT 1+1 limit 1,1) as a; diff --git a/mysql-test/r/subselect_no_exists_to_in.result b/mysql-test/r/subselect_no_exists_to_in.result index 961b23f1028..1802ff59ea2 100644 --- a/mysql-test/r/subselect_no_exists_to_in.result +++ b/mysql-test/r/subselect_no_exists_to_in.result @@ -1236,24 +1236,24 @@ CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT 1)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT 1)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT 1)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a+0)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a+0)` int(3) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a+0)` int(3) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT (SELECT 1 as a UNION SELECT 1+1 limit 1,1) as a; diff --git a/mysql-test/r/subselect_no_mat.result b/mysql-test/r/subselect_no_mat.result index a7170a9dc45..1eb33fc80e7 100644 --- a/mysql-test/r/subselect_no_mat.result +++ b/mysql-test/r/subselect_no_mat.result @@ -1239,24 +1239,24 @@ CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT 1)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT 1)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT 1)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a+0)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a+0)` int(3) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a+0)` int(3) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT (SELECT 1 as a UNION SELECT 1+1 limit 1,1) as a; diff --git a/mysql-test/r/subselect_no_opts.result b/mysql-test/r/subselect_no_opts.result index 1e9749c2eb5..f65b505eeca 100644 --- a/mysql-test/r/subselect_no_opts.result +++ b/mysql-test/r/subselect_no_opts.result @@ -1235,24 +1235,24 @@ CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT 1)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT 1)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT 1)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a+0)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a+0)` int(3) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a+0)` int(3) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT (SELECT 1 as a UNION SELECT 1+1 limit 1,1) as a; diff --git a/mysql-test/r/subselect_no_scache.result b/mysql-test/r/subselect_no_scache.result index 23a093b2669..21ea53bf1b1 100644 --- a/mysql-test/r/subselect_no_scache.result +++ b/mysql-test/r/subselect_no_scache.result @@ -1238,24 +1238,24 @@ CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT 1)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT 1)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT 1)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a+0)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a+0)` int(3) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a+0)` int(3) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT (SELECT 1 as a UNION SELECT 1+1 limit 1,1) as a; diff --git a/mysql-test/r/subselect_no_semijoin.result b/mysql-test/r/subselect_no_semijoin.result index dd1810e47f1..aabf0674b2f 100644 --- a/mysql-test/r/subselect_no_semijoin.result +++ b/mysql-test/r/subselect_no_semijoin.result @@ -1235,24 +1235,24 @@ CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT 1)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT 1)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT 1)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a)` int(1) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a)` int(1) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT * FROM (SELECT 1 as a,(SELECT a+0)) a; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(1) NOT NULL DEFAULT '0', - `(SELECT a+0)` int(3) NOT NULL DEFAULT '0' + `a` int(1) NOT NULL DEFAULT 0, + `(SELECT a+0)` int(3) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; CREATE TABLE t1 SELECT (SELECT 1 as a UNION SELECT 1+1 limit 1,1) as a; diff --git a/mysql-test/r/system_mysql_db.result b/mysql-test/r/system_mysql_db.result index 332609e90f1..414815f02b7 100644 --- a/mysql-test/r/system_mysql_db.result +++ b/mysql-test/r/system_mysql_db.result @@ -122,23 +122,23 @@ user CREATE TABLE `user` ( `ssl_cipher` blob NOT NULL, `x509_issuer` blob NOT NULL, `x509_subject` blob NOT NULL, - `max_questions` int(11) unsigned NOT NULL DEFAULT '0', - `max_updates` int(11) unsigned NOT NULL DEFAULT '0', - `max_connections` int(11) unsigned NOT NULL DEFAULT '0', - `max_user_connections` int(11) NOT NULL DEFAULT '0', + `max_questions` int(11) unsigned NOT NULL DEFAULT 0, + `max_updates` int(11) unsigned NOT NULL DEFAULT 0, + `max_connections` int(11) unsigned NOT NULL DEFAULT 0, + `max_user_connections` int(11) NOT NULL DEFAULT 0, `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '', `authentication_string` text COLLATE utf8_bin NOT NULL, `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', - `max_statement_time` decimal(12,6) NOT NULL DEFAULT '0.000000', + `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000, PRIMARY KEY (`Host`,`User`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges' show create table func; Table Create Table func CREATE TABLE `func` ( `name` char(64) COLLATE utf8_bin NOT NULL DEFAULT '', - `ret` tinyint(1) NOT NULL DEFAULT '0', + `ret` tinyint(1) NOT NULL DEFAULT 0, `dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '', `type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL, PRIMARY KEY (`name`) @@ -191,7 +191,7 @@ servers CREATE TABLE `servers` ( `Db` char(64) NOT NULL DEFAULT '', `Username` char(80) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', - `Port` int(4) NOT NULL DEFAULT '0', + `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', `Owner` char(64) NOT NULL DEFAULT '', diff --git a/mysql-test/r/system_mysql_db_fix40123.result b/mysql-test/r/system_mysql_db_fix40123.result index 332609e90f1..414815f02b7 100644 --- a/mysql-test/r/system_mysql_db_fix40123.result +++ b/mysql-test/r/system_mysql_db_fix40123.result @@ -122,23 +122,23 @@ user CREATE TABLE `user` ( `ssl_cipher` blob NOT NULL, `x509_issuer` blob NOT NULL, `x509_subject` blob NOT NULL, - `max_questions` int(11) unsigned NOT NULL DEFAULT '0', - `max_updates` int(11) unsigned NOT NULL DEFAULT '0', - `max_connections` int(11) unsigned NOT NULL DEFAULT '0', - `max_user_connections` int(11) NOT NULL DEFAULT '0', + `max_questions` int(11) unsigned NOT NULL DEFAULT 0, + `max_updates` int(11) unsigned NOT NULL DEFAULT 0, + `max_connections` int(11) unsigned NOT NULL DEFAULT 0, + `max_user_connections` int(11) NOT NULL DEFAULT 0, `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '', `authentication_string` text COLLATE utf8_bin NOT NULL, `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', - `max_statement_time` decimal(12,6) NOT NULL DEFAULT '0.000000', + `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000, PRIMARY KEY (`Host`,`User`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges' show create table func; Table Create Table func CREATE TABLE `func` ( `name` char(64) COLLATE utf8_bin NOT NULL DEFAULT '', - `ret` tinyint(1) NOT NULL DEFAULT '0', + `ret` tinyint(1) NOT NULL DEFAULT 0, `dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '', `type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL, PRIMARY KEY (`name`) @@ -191,7 +191,7 @@ servers CREATE TABLE `servers` ( `Db` char(64) NOT NULL DEFAULT '', `Username` char(80) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', - `Port` int(4) NOT NULL DEFAULT '0', + `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', `Owner` char(64) NOT NULL DEFAULT '', diff --git a/mysql-test/r/system_mysql_db_fix50030.result b/mysql-test/r/system_mysql_db_fix50030.result index 200200f0089..ed86a35c910 100644 --- a/mysql-test/r/system_mysql_db_fix50030.result +++ b/mysql-test/r/system_mysql_db_fix50030.result @@ -122,23 +122,23 @@ user CREATE TABLE `user` ( `ssl_cipher` blob NOT NULL, `x509_issuer` blob NOT NULL, `x509_subject` blob NOT NULL, - `max_questions` int(11) unsigned NOT NULL DEFAULT '0', - `max_updates` int(11) unsigned NOT NULL DEFAULT '0', - `max_connections` int(11) unsigned NOT NULL DEFAULT '0', - `max_user_connections` int(11) NOT NULL DEFAULT '0', + `max_questions` int(11) unsigned NOT NULL DEFAULT 0, + `max_updates` int(11) unsigned NOT NULL DEFAULT 0, + `max_connections` int(11) unsigned NOT NULL DEFAULT 0, + `max_user_connections` int(11) NOT NULL DEFAULT 0, `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '', `authentication_string` text COLLATE utf8_bin NOT NULL, `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', - `max_statement_time` decimal(12,6) NOT NULL DEFAULT '0.000000', + `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000, PRIMARY KEY (`Host`,`User`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges' show create table func; Table Create Table func CREATE TABLE `func` ( `name` char(64) COLLATE utf8_bin NOT NULL DEFAULT '', - `ret` tinyint(1) NOT NULL DEFAULT '0', + `ret` tinyint(1) NOT NULL DEFAULT 0, `dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '', `type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL, PRIMARY KEY (`name`) @@ -191,7 +191,7 @@ servers CREATE TABLE `servers` ( `Db` char(64) NOT NULL DEFAULT '', `Username` char(80) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', - `Port` int(4) NOT NULL DEFAULT '0', + `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', `Owner` char(64) NOT NULL DEFAULT '', diff --git a/mysql-test/r/system_mysql_db_fix50117.result b/mysql-test/r/system_mysql_db_fix50117.result index 332609e90f1..414815f02b7 100644 --- a/mysql-test/r/system_mysql_db_fix50117.result +++ b/mysql-test/r/system_mysql_db_fix50117.result @@ -122,23 +122,23 @@ user CREATE TABLE `user` ( `ssl_cipher` blob NOT NULL, `x509_issuer` blob NOT NULL, `x509_subject` blob NOT NULL, - `max_questions` int(11) unsigned NOT NULL DEFAULT '0', - `max_updates` int(11) unsigned NOT NULL DEFAULT '0', - `max_connections` int(11) unsigned NOT NULL DEFAULT '0', - `max_user_connections` int(11) NOT NULL DEFAULT '0', + `max_questions` int(11) unsigned NOT NULL DEFAULT 0, + `max_updates` int(11) unsigned NOT NULL DEFAULT 0, + `max_connections` int(11) unsigned NOT NULL DEFAULT 0, + `max_user_connections` int(11) NOT NULL DEFAULT 0, `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '', `authentication_string` text COLLATE utf8_bin NOT NULL, `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', - `max_statement_time` decimal(12,6) NOT NULL DEFAULT '0.000000', + `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000, PRIMARY KEY (`Host`,`User`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges' show create table func; Table Create Table func CREATE TABLE `func` ( `name` char(64) COLLATE utf8_bin NOT NULL DEFAULT '', - `ret` tinyint(1) NOT NULL DEFAULT '0', + `ret` tinyint(1) NOT NULL DEFAULT 0, `dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '', `type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL, PRIMARY KEY (`name`) @@ -191,7 +191,7 @@ servers CREATE TABLE `servers` ( `Db` char(64) NOT NULL DEFAULT '', `Username` char(80) NOT NULL DEFAULT '', `Password` char(64) NOT NULL DEFAULT '', - `Port` int(4) NOT NULL DEFAULT '0', + `Port` int(4) NOT NULL DEFAULT 0, `Socket` char(64) NOT NULL DEFAULT '', `Wrapper` char(64) NOT NULL DEFAULT '', `Owner` char(64) NOT NULL DEFAULT '', diff --git a/mysql-test/r/type_enum.result b/mysql-test/r/type_enum.result index 14f5d97f426..5c5821da110 100644 --- a/mysql-test/r/type_enum.result +++ b/mysql-test/r/type_enum.result @@ -1669,7 +1669,7 @@ b ENUM('value',' show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1', + `a` int(11) DEFAULT 1, `b` enum('value','öäü_value','ÊÃÕ') NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 show columns from t1; diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result index 55dde117e80..ed837dda975 100644 --- a/mysql-test/r/type_newdecimal.result +++ b/mysql-test/r/type_newdecimal.result @@ -975,8 +975,8 @@ f1 decimal (0,0) zerofill not null default 0); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `f0` decimal(30,30) unsigned zerofill NOT NULL DEFAULT '0.000000000000000000000000000000', - `f1` decimal(10,0) unsigned zerofill NOT NULL DEFAULT '0000000000' + `f0` decimal(30,30) unsigned zerofill NOT NULL DEFAULT 0.000000000000000000000000000000, + `f1` decimal(10,0) unsigned zerofill NOT NULL DEFAULT 0000000000 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; drop procedure if exists wg2; @@ -2190,7 +2190,7 @@ Note 1265 Data truncated for column 'a' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` decimal(2,1) DEFAULT '0.0' + `a` decimal(2,1) DEFAULT 0.0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (a DECIMAL(2,1) DEFAULT '0.1 '); @@ -2199,7 +2199,7 @@ Note 1265 Data truncated for column 'a' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` decimal(2,1) DEFAULT '0.1' + `a` decimal(2,1) DEFAULT 0.1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (a DECIMAL(2,1) DEFAULT '0.10001 '); @@ -2208,7 +2208,7 @@ Note 1265 Data truncated for column 'a' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` decimal(2,1) DEFAULT '0.1' + `a` decimal(2,1) DEFAULT 0.1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (a DECIMAL(2,1) DEFAULT '0.10001'); @@ -2217,7 +2217,7 @@ Note 1265 Data truncated for column 'a' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` decimal(2,1) DEFAULT '0.1' + `a` decimal(2,1) DEFAULT 0.1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (a DECIMAL(2,1) DEFAULT 0.10001); @@ -2226,7 +2226,7 @@ Note 1265 Data truncated for column 'a' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` decimal(2,1) DEFAULT '0.1' + `a` decimal(2,1) DEFAULT 0.1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (a DECIMAL(2,1) DEFAULT 0.10001e0); @@ -2235,7 +2235,7 @@ Note 1265 Data truncated for column 'a' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` decimal(2,1) DEFAULT '0.1' + `a` decimal(2,1) DEFAULT 0.1 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; # diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index 308a4c7c369..a692af93b37 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -629,7 +629,7 @@ a show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` decimal(3,1) NOT NULL DEFAULT '0.0' + `a` decimal(3,1) NOT NULL DEFAULT 0.0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; create table t2 (it1 tinyint, it2 tinyint not null, i int not null, ib bigint, f float, d double, y year, da date, dt datetime, sc char(10), sv varchar(10), b blob, tx text); @@ -653,7 +653,7 @@ it2 show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `it2` int(11) NOT NULL DEFAULT '0' + `it2` int(11) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; create table t1 SELECT i from t2 UNION select f from t2; @@ -852,7 +852,7 @@ select * from t1; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `1` bigint(20) NOT NULL DEFAULT '0' + `1` bigint(20) NOT NULL DEFAULT 0 ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; create table t1 select _latin1"test" union select _latin2"testt" ; diff --git a/mysql-test/suite/archive/archive.result b/mysql-test/suite/archive/archive.result index d0ab319c4fa..603621abf2a 100644 --- a/mysql-test/suite/archive/archive.result +++ b/mysql-test/suite/archive/archive.result @@ -11136,8 +11136,8 @@ SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( `auto` int(11) DEFAULT NULL, - `fld1` int(6) unsigned zerofill NOT NULL DEFAULT '000000', - `companynr` tinyint(2) unsigned zerofill NOT NULL DEFAULT '00', + `fld1` int(6) unsigned zerofill NOT NULL DEFAULT 000000, + `companynr` tinyint(2) unsigned zerofill NOT NULL DEFAULT 00, `fld3` char(30) NOT NULL DEFAULT '', `fld4` char(35) NOT NULL DEFAULT '', `fld5` char(35) NOT NULL DEFAULT '' diff --git a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result index f5dba1548df..47bcfea87c8 100644 --- a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result +++ b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result @@ -59,7 +59,7 @@ Table Create Table t1 CREATE TABLE `t1` ( `id` int(11) NOT NULL, `a` varchar(255) DEFAULT NULL, - `b` int(11) DEFAULT '2', + `b` int(11) DEFAULT 2, PRIMARY KEY (`id`), KEY `a` (`a`), KEY `b` (`b`) @@ -69,7 +69,7 @@ Table Create Table t2 CREATE TABLE `t2` ( `id` int(11) NOT NULL, `a` varchar(255) DEFAULT NULL, - `b` int(11) DEFAULT '2', + `b` int(11) DEFAULT 2, PRIMARY KEY (`id`), KEY `a` (`a`), KEY `b` (`b`) @@ -79,7 +79,7 @@ Table Create Table t3 CREATE TABLE `t3` ( `id` int(11) DEFAULT NULL, `a` varchar(255) DEFAULT NULL, - `c` int(11) DEFAULT '5', + `c` int(11) DEFAULT 5, KEY `a` (`a`), KEY `c` (`c`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 `encrypted`=yes @@ -88,7 +88,7 @@ Table Create Table t4 CREATE TABLE `t4` ( `id` int(11) DEFAULT NULL, `a` varchar(255) DEFAULT NULL, - `c` int(11) DEFAULT '5', + `c` int(11) DEFAULT 5, KEY `a` (`a`), KEY `c` (`c`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 @@ -116,7 +116,7 @@ Table Create Table t7 CREATE TABLE `t7` ( `id` int(11) NOT NULL, `a` varchar(255) DEFAULT NULL, - `b` int(11) DEFAULT '2', + `b` int(11) DEFAULT 2, PRIMARY KEY (`id`), KEY `a` (`a`), KEY `b` (`b`) diff --git a/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result b/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result index 2b45626f08d..ffd3c000c7d 100644 --- a/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result +++ b/mysql-test/suite/engines/funcs/r/ta_set_drop_default.result @@ -19,7 +19,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` tinyint(4) NOT NULL, - `c2` tinyint(4) DEFAULT '10', + `c2` tinyint(4) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -60,7 +60,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` smallint(6) NOT NULL, - `c2` smallint(6) DEFAULT '10', + `c2` smallint(6) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -101,7 +101,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` mediumint(9) NOT NULL, - `c2` mediumint(9) DEFAULT '10', + `c2` mediumint(9) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -142,7 +142,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11) DEFAULT '10', + `c2` int(11) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -183,7 +183,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11) DEFAULT '10', + `c2` int(11) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -224,7 +224,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` bigint(20) NOT NULL, - `c2` bigint(20) DEFAULT '10', + `c2` bigint(20) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -265,7 +265,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double DEFAULT '10', + `c2` double DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -306,7 +306,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double DEFAULT '10', + `c2` double DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -347,7 +347,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` float NOT NULL, - `c2` float DEFAULT '10', + `c2` float DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -388,7 +388,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0) DEFAULT '10', + `c2` decimal(10,0) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -429,7 +429,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0) DEFAULT '10', + `c2` decimal(10,0) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER c2 DROP DEFAULT; @@ -470,7 +470,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` tinyint(4) NOT NULL, - `c2` tinyint(4) DEFAULT '10', + `c2` tinyint(4) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -511,7 +511,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` smallint(6) NOT NULL, - `c2` smallint(6) DEFAULT '10', + `c2` smallint(6) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -552,7 +552,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` mediumint(9) NOT NULL, - `c2` mediumint(9) DEFAULT '10', + `c2` mediumint(9) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -593,7 +593,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11) DEFAULT '10', + `c2` int(11) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -634,7 +634,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` int(11) NOT NULL, - `c2` int(11) DEFAULT '10', + `c2` int(11) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -675,7 +675,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` bigint(20) NOT NULL, - `c2` bigint(20) DEFAULT '10', + `c2` bigint(20) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -716,7 +716,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double DEFAULT '10', + `c2` double DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -757,7 +757,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` double NOT NULL, - `c2` double DEFAULT '10', + `c2` double DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -798,7 +798,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` float NOT NULL, - `c2` float DEFAULT '10', + `c2` float DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -839,7 +839,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0) DEFAULT '10', + `c2` decimal(10,0) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; @@ -880,7 +880,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c1` decimal(10,0) NOT NULL, - `c2` decimal(10,0) DEFAULT '10', + `c2` decimal(10,0) DEFAULT 10, PRIMARY KEY (`c1`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 ALTER TABLE t1 ALTER COLUMN c2 DROP DEFAULT; diff --git a/mysql-test/suite/engines/funcs/r/tc_column_default_decimal.result b/mysql-test/suite/engines/funcs/r/tc_column_default_decimal.result index 8a70602c0cc..b049451b2b7 100644 --- a/mysql-test/suite/engines/funcs/r/tc_column_default_decimal.result +++ b/mysql-test/suite/engines/funcs/r/tc_column_default_decimal.result @@ -7,7 +7,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) DEFAULT '2' + `c1` decimal(5,0) DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -19,7 +19,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) DEFAULT '3.1234' + `c1` decimal(5,4) DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; @@ -31,7 +31,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) DEFAULT '2' + `c1` decimal(5,0) DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -43,7 +43,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) DEFAULT '3.1234' + `c1` decimal(5,4) DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; @@ -55,7 +55,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) DEFAULT '2' + `c1` decimal(5,0) DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -67,7 +67,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) DEFAULT '3.1234' + `c1` decimal(5,4) DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; @@ -79,7 +79,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) DEFAULT '2' + `c1` decimal(5,0) DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -91,7 +91,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) DEFAULT '3.1234' + `c1` decimal(5,4) DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; @@ -103,7 +103,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) NOT NULL DEFAULT '2' + `c1` decimal(5,0) NOT NULL DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -115,7 +115,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) NOT NULL DEFAULT '3.1234' + `c1` decimal(5,4) NOT NULL DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; @@ -127,7 +127,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) NOT NULL DEFAULT '2' + `c1` decimal(5,0) NOT NULL DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -139,7 +139,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) NOT NULL DEFAULT '3.1234' + `c1` decimal(5,4) NOT NULL DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; @@ -151,7 +151,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) NOT NULL DEFAULT '2' + `c1` decimal(5,0) NOT NULL DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -163,7 +163,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) NOT NULL DEFAULT '3.1234' + `c1` decimal(5,4) NOT NULL DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; @@ -175,7 +175,7 @@ t12 SHOW CREATE TABLE t12; Table Create Table t12 CREATE TABLE `t12` ( - `c1` decimal(5,0) NOT NULL DEFAULT '2' + `c1` decimal(5,0) NOT NULL DEFAULT 2 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t12; SHOW TABLES; @@ -187,7 +187,7 @@ t13 SHOW CREATE TABLE t13; Table Create Table t13 CREATE TABLE `t13` ( - `c1` decimal(5,4) NOT NULL DEFAULT '3.1234' + `c1` decimal(5,4) NOT NULL DEFAULT 3.1234 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t13; SHOW TABLES; diff --git a/mysql-test/suite/engines/funcs/r/tc_column_default_number.result b/mysql-test/suite/engines/funcs/r/tc_column_default_number.result index 96326634a49..3cfb17f7e6b 100644 --- a/mysql-test/suite/engines/funcs/r/tc_column_default_number.result +++ b/mysql-test/suite/engines/funcs/r/tc_column_default_number.result @@ -6,7 +6,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` tinyint(5) DEFAULT '1' + `c1` tinyint(5) DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -18,7 +18,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` smallint(5) DEFAULT '1' + `c1` smallint(5) DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -30,7 +30,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` mediumint(5) DEFAULT '1' + `c1` mediumint(5) DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -42,7 +42,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` int(5) DEFAULT '1' + `c1` int(5) DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -54,7 +54,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` int(5) DEFAULT '1' + `c1` int(5) DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -66,7 +66,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` bigint(5) DEFAULT '1' + `c1` bigint(5) DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -78,7 +78,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` tinyint(5) NOT NULL DEFAULT '1' + `c1` tinyint(5) NOT NULL DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -90,7 +90,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` smallint(5) NOT NULL DEFAULT '1' + `c1` smallint(5) NOT NULL DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -102,7 +102,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` mediumint(5) NOT NULL DEFAULT '1' + `c1` mediumint(5) NOT NULL DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -114,7 +114,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` int(5) NOT NULL DEFAULT '1' + `c1` int(5) NOT NULL DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -126,7 +126,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` int(5) NOT NULL DEFAULT '1' + `c1` int(5) NOT NULL DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; @@ -138,7 +138,7 @@ t11 SHOW CREATE TABLE t11; Table Create Table t11 CREATE TABLE `t11` ( - `c1` bigint(5) NOT NULL DEFAULT '1' + `c1` bigint(5) NOT NULL DEFAULT 1 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 DROP TABLE t11; SHOW TABLES; diff --git a/mysql-test/suite/engines/funcs/r/tc_multicolumn_different.result b/mysql-test/suite/engines/funcs/r/tc_multicolumn_different.result index af518d114e3..16be4f4b08c 100644 --- a/mysql-test/suite/engines/funcs/r/tc_multicolumn_different.result +++ b/mysql-test/suite/engines/funcs/r/tc_multicolumn_different.result @@ -11,7 +11,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30,7 +30,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -49,7 +49,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -68,7 +68,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -87,7 +87,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -106,7 +106,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -125,7 +125,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -144,7 +144,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -163,7 +163,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -182,7 +182,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -201,7 +201,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -220,7 +220,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -239,7 +239,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -258,7 +258,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -277,7 +277,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -296,7 +296,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -315,7 +315,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -334,7 +334,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -353,7 +353,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -372,7 +372,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -391,7 +391,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -410,7 +410,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -429,7 +429,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -448,7 +448,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -467,7 +467,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -486,7 +486,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -505,7 +505,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -524,7 +524,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -543,7 +543,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -562,7 +562,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -581,7 +581,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -600,7 +600,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -619,7 +619,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -638,7 +638,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -657,7 +657,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -676,7 +676,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -695,7 +695,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -714,7 +714,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -733,7 +733,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -752,7 +752,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -771,7 +771,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -790,7 +790,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -809,7 +809,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -828,7 +828,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -847,7 +847,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -866,7 +866,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -885,7 +885,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -904,7 +904,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -923,7 +923,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -942,7 +942,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -961,7 +961,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -980,7 +980,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -999,7 +999,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1018,7 +1018,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1037,7 +1037,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1056,7 +1056,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1075,7 +1075,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1094,7 +1094,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1113,7 +1113,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1132,7 +1132,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1151,7 +1151,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1170,7 +1170,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1189,7 +1189,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1208,7 +1208,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1227,7 +1227,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1246,7 +1246,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1265,7 +1265,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1284,7 +1284,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1303,7 +1303,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1322,7 +1322,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1341,7 +1341,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1360,7 +1360,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1379,7 +1379,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1398,7 +1398,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1417,7 +1417,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1436,7 +1436,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1455,7 +1455,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1474,7 +1474,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1493,7 +1493,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1512,7 +1512,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1531,7 +1531,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1550,7 +1550,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1569,7 +1569,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1588,7 +1588,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1607,7 +1607,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1626,7 +1626,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1645,7 +1645,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1664,7 +1664,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1683,7 +1683,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1702,7 +1702,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1721,7 +1721,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1740,7 +1740,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1759,7 +1759,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1778,7 +1778,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1797,7 +1797,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1816,7 +1816,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1835,7 +1835,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1854,7 +1854,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1873,7 +1873,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1892,7 +1892,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1911,7 +1911,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1930,7 +1930,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1949,7 +1949,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1968,7 +1968,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -1987,7 +1987,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2006,7 +2006,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2025,7 +2025,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2044,7 +2044,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2063,7 +2063,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2082,7 +2082,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2101,7 +2101,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2120,7 +2120,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2139,7 +2139,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2158,7 +2158,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2177,7 +2177,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2196,7 +2196,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2215,7 +2215,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2234,7 +2234,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2253,7 +2253,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2272,7 +2272,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2291,7 +2291,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2310,7 +2310,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2329,7 +2329,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2348,7 +2348,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2367,7 +2367,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2386,7 +2386,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2405,7 +2405,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2424,7 +2424,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2443,7 +2443,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2462,7 +2462,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2481,7 +2481,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2500,7 +2500,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2519,7 +2519,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2538,7 +2538,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2557,7 +2557,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2576,7 +2576,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2595,7 +2595,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2614,7 +2614,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2633,7 +2633,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2652,7 +2652,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2671,7 +2671,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2690,7 +2690,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2709,7 +2709,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2728,7 +2728,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2747,7 +2747,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2766,7 +2766,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2785,7 +2785,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2804,7 +2804,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2823,7 +2823,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2842,7 +2842,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2861,7 +2861,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2880,7 +2880,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2899,7 +2899,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2918,7 +2918,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2937,7 +2937,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2956,7 +2956,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2975,7 +2975,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -2994,7 +2994,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3013,7 +3013,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3032,7 +3032,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3051,7 +3051,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3070,7 +3070,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3089,7 +3089,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3108,7 +3108,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3127,7 +3127,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3146,7 +3146,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3165,7 +3165,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3184,7 +3184,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3203,7 +3203,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3222,7 +3222,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3241,7 +3241,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3260,7 +3260,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3279,7 +3279,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3298,7 +3298,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3317,7 +3317,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3336,7 +3336,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3355,7 +3355,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3374,7 +3374,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3393,7 +3393,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3412,7 +3412,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3431,7 +3431,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3450,7 +3450,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3469,7 +3469,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3488,7 +3488,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3507,7 +3507,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3526,7 +3526,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3545,7 +3545,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3564,7 +3564,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3583,7 +3583,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3602,7 +3602,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3621,7 +3621,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3640,7 +3640,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3659,7 +3659,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3678,7 +3678,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3697,7 +3697,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3716,7 +3716,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3735,7 +3735,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3754,7 +3754,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3773,7 +3773,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3792,7 +3792,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3811,7 +3811,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3830,7 +3830,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3849,7 +3849,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3868,7 +3868,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3887,7 +3887,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3906,7 +3906,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3925,7 +3925,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3944,7 +3944,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3963,7 +3963,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -3982,7 +3982,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4001,7 +4001,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4020,7 +4020,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4039,7 +4039,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4058,7 +4058,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4077,7 +4077,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4096,7 +4096,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4115,7 +4115,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4134,7 +4134,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4153,7 +4153,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4172,7 +4172,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4191,7 +4191,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4210,7 +4210,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4229,7 +4229,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4248,7 +4248,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4267,7 +4267,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4286,7 +4286,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4305,7 +4305,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4324,7 +4324,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4343,7 +4343,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4362,7 +4362,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4381,7 +4381,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4400,7 +4400,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4419,7 +4419,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4438,7 +4438,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4457,7 +4457,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4476,7 +4476,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4495,7 +4495,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4514,7 +4514,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4533,7 +4533,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4552,7 +4552,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4571,7 +4571,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4590,7 +4590,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4609,7 +4609,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4628,7 +4628,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4647,7 +4647,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4666,7 +4666,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4685,7 +4685,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4704,7 +4704,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4723,7 +4723,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4742,7 +4742,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4761,7 +4761,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4780,7 +4780,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4799,7 +4799,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4818,7 +4818,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4837,7 +4837,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4856,7 +4856,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4875,7 +4875,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4894,7 +4894,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4913,7 +4913,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4932,7 +4932,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4951,7 +4951,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4970,7 +4970,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -4989,7 +4989,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5008,7 +5008,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5027,7 +5027,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5046,7 +5046,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5065,7 +5065,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5084,7 +5084,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5103,7 +5103,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5122,7 +5122,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5141,7 +5141,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5160,7 +5160,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5179,7 +5179,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5198,7 +5198,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5217,7 +5217,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5236,7 +5236,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5255,7 +5255,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5274,7 +5274,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5293,7 +5293,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5312,7 +5312,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5331,7 +5331,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5350,7 +5350,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5369,7 +5369,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5388,7 +5388,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5407,7 +5407,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5426,7 +5426,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5445,7 +5445,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5464,7 +5464,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5483,7 +5483,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5502,7 +5502,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5521,7 +5521,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5540,7 +5540,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5559,7 +5559,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5578,7 +5578,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5597,7 +5597,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5616,7 +5616,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5635,7 +5635,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5654,7 +5654,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5673,7 +5673,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5692,7 +5692,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5711,7 +5711,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5730,7 +5730,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5749,7 +5749,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5768,7 +5768,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5787,7 +5787,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5806,7 +5806,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5825,7 +5825,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5844,7 +5844,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5863,7 +5863,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5882,7 +5882,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5901,7 +5901,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5920,7 +5920,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5939,7 +5939,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5958,7 +5958,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5977,7 +5977,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -5996,7 +5996,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6015,7 +6015,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6034,7 +6034,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6053,7 +6053,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6072,7 +6072,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6091,7 +6091,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6110,7 +6110,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6129,7 +6129,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6148,7 +6148,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6167,7 +6167,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6186,7 +6186,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6205,7 +6205,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6224,7 +6224,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6243,7 +6243,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6262,7 +6262,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6281,7 +6281,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6300,7 +6300,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6319,7 +6319,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6338,7 +6338,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6357,7 +6357,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6376,7 +6376,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6395,7 +6395,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6414,7 +6414,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6433,7 +6433,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6452,7 +6452,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6471,7 +6471,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6490,7 +6490,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6509,7 +6509,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6528,7 +6528,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6547,7 +6547,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6566,7 +6566,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6585,7 +6585,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6604,7 +6604,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6623,7 +6623,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6642,7 +6642,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6661,7 +6661,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6680,7 +6680,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6699,7 +6699,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6718,7 +6718,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6737,7 +6737,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6756,7 +6756,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6775,7 +6775,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6794,7 +6794,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6813,7 +6813,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6832,7 +6832,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6851,7 +6851,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6870,7 +6870,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6889,7 +6889,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6908,7 +6908,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6927,7 +6927,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6946,7 +6946,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6965,7 +6965,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -6984,7 +6984,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7003,7 +7003,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7022,7 +7022,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7041,7 +7041,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7060,7 +7060,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7079,7 +7079,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7098,7 +7098,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7117,7 +7117,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7136,7 +7136,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7155,7 +7155,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7174,7 +7174,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7193,7 +7193,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7212,7 +7212,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7231,7 +7231,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7250,7 +7250,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7269,7 +7269,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7288,7 +7288,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7307,7 +7307,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7326,7 +7326,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7345,7 +7345,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7364,7 +7364,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7383,7 +7383,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7402,7 +7402,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7421,7 +7421,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7440,7 +7440,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7459,7 +7459,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7478,7 +7478,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7497,7 +7497,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7516,7 +7516,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7535,7 +7535,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7554,7 +7554,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7573,7 +7573,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7592,7 +7592,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7611,7 +7611,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7630,7 +7630,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7649,7 +7649,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7668,7 +7668,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7687,7 +7687,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7706,7 +7706,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7725,7 +7725,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7744,7 +7744,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7763,7 +7763,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7782,7 +7782,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7801,7 +7801,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7820,7 +7820,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7839,7 +7839,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7858,7 +7858,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7877,7 +7877,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7896,7 +7896,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7915,7 +7915,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7934,7 +7934,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7953,7 +7953,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7972,7 +7972,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -7991,7 +7991,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8010,7 +8010,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8029,7 +8029,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8048,7 +8048,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8067,7 +8067,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8086,7 +8086,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8105,7 +8105,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8124,7 +8124,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8143,7 +8143,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8162,7 +8162,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8181,7 +8181,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8200,7 +8200,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8219,7 +8219,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8238,7 +8238,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8257,7 +8257,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8276,7 +8276,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8295,7 +8295,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8314,7 +8314,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8333,7 +8333,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8352,7 +8352,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8371,7 +8371,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8390,7 +8390,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8409,7 +8409,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8428,7 +8428,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8447,7 +8447,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8466,7 +8466,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8485,7 +8485,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8504,7 +8504,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8523,7 +8523,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8542,7 +8542,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8561,7 +8561,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8580,7 +8580,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8599,7 +8599,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8618,7 +8618,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8637,7 +8637,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8656,7 +8656,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8675,7 +8675,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8694,7 +8694,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8713,7 +8713,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8732,7 +8732,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8751,7 +8751,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8770,7 +8770,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8789,7 +8789,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8808,7 +8808,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8827,7 +8827,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8846,7 +8846,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8865,7 +8865,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8884,7 +8884,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8903,7 +8903,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8922,7 +8922,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8941,7 +8941,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8960,7 +8960,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8979,7 +8979,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -8998,7 +8998,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9017,7 +9017,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9036,7 +9036,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9055,7 +9055,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9074,7 +9074,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9093,7 +9093,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9112,7 +9112,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9131,7 +9131,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9150,7 +9150,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9169,7 +9169,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9188,7 +9188,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9207,7 +9207,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9226,7 +9226,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9245,7 +9245,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9264,7 +9264,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9283,7 +9283,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9302,7 +9302,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9321,7 +9321,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9340,7 +9340,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9359,7 +9359,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9378,7 +9378,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9397,7 +9397,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9416,7 +9416,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9435,7 +9435,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9454,7 +9454,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9473,7 +9473,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9492,7 +9492,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9511,7 +9511,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9530,7 +9530,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9549,7 +9549,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9568,7 +9568,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9587,7 +9587,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9606,7 +9606,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9625,7 +9625,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9644,7 +9644,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9663,7 +9663,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9682,7 +9682,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9701,7 +9701,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9720,7 +9720,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9739,7 +9739,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9758,7 +9758,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9777,7 +9777,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9796,7 +9796,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9815,7 +9815,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9834,7 +9834,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9853,7 +9853,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9872,7 +9872,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9891,7 +9891,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9910,7 +9910,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9929,7 +9929,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9948,7 +9948,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9967,7 +9967,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -9986,7 +9986,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10005,7 +10005,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10024,7 +10024,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10043,7 +10043,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10062,7 +10062,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10081,7 +10081,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10100,7 +10100,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10119,7 +10119,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10138,7 +10138,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10157,7 +10157,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10176,7 +10176,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10195,7 +10195,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10214,7 +10214,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10233,7 +10233,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10252,7 +10252,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10271,7 +10271,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10290,7 +10290,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10309,7 +10309,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10328,7 +10328,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10347,7 +10347,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10366,7 +10366,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10385,7 +10385,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10404,7 +10404,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10423,7 +10423,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10442,7 +10442,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10461,7 +10461,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10480,7 +10480,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10499,7 +10499,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10518,7 +10518,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10537,7 +10537,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10556,7 +10556,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10575,7 +10575,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10594,7 +10594,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10613,7 +10613,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10632,7 +10632,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10651,7 +10651,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10670,7 +10670,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10689,7 +10689,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10708,7 +10708,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10727,7 +10727,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10746,7 +10746,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10765,7 +10765,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10784,7 +10784,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10803,7 +10803,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10822,7 +10822,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10841,7 +10841,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10860,7 +10860,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10879,7 +10879,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10898,7 +10898,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10917,7 +10917,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10936,7 +10936,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10955,7 +10955,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10974,7 +10974,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -10993,7 +10993,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11012,7 +11012,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11031,7 +11031,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11050,7 +11050,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11069,7 +11069,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11088,7 +11088,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11107,7 +11107,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11126,7 +11126,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11145,7 +11145,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11164,7 +11164,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11183,7 +11183,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11202,7 +11202,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11221,7 +11221,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11240,7 +11240,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11259,7 +11259,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11278,7 +11278,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11297,7 +11297,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11316,7 +11316,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11335,7 +11335,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11354,7 +11354,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11373,7 +11373,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11392,7 +11392,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11411,7 +11411,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11430,7 +11430,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11449,7 +11449,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11468,7 +11468,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11487,7 +11487,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11506,7 +11506,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11525,7 +11525,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11544,7 +11544,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11563,7 +11563,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11582,7 +11582,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11601,7 +11601,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11620,7 +11620,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11639,7 +11639,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11658,7 +11658,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11677,7 +11677,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11696,7 +11696,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11715,7 +11715,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11734,7 +11734,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11753,7 +11753,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11772,7 +11772,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11791,7 +11791,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11810,7 +11810,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11829,7 +11829,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11848,7 +11848,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11867,7 +11867,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11886,7 +11886,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11905,7 +11905,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11924,7 +11924,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11943,7 +11943,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11962,7 +11962,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -11981,7 +11981,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12000,7 +12000,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12019,7 +12019,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12038,7 +12038,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12057,7 +12057,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12076,7 +12076,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12095,7 +12095,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12114,7 +12114,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12133,7 +12133,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12152,7 +12152,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12171,7 +12171,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12190,7 +12190,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12209,7 +12209,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12228,7 +12228,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12247,7 +12247,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12266,7 +12266,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12285,7 +12285,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12304,7 +12304,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12323,7 +12323,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12342,7 +12342,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12361,7 +12361,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12380,7 +12380,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12399,7 +12399,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12418,7 +12418,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12437,7 +12437,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12456,7 +12456,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12475,7 +12475,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12494,7 +12494,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12513,7 +12513,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12532,7 +12532,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12551,7 +12551,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12570,7 +12570,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12589,7 +12589,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12608,7 +12608,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12627,7 +12627,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12646,7 +12646,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12665,7 +12665,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12684,7 +12684,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12703,7 +12703,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12722,7 +12722,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12741,7 +12741,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12760,7 +12760,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12779,7 +12779,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12798,7 +12798,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12817,7 +12817,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12836,7 +12836,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12855,7 +12855,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12874,7 +12874,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12893,7 +12893,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12912,7 +12912,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12931,7 +12931,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12950,7 +12950,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12969,7 +12969,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -12988,7 +12988,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13007,7 +13007,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13026,7 +13026,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13045,7 +13045,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13064,7 +13064,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13083,7 +13083,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13102,7 +13102,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13121,7 +13121,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13140,7 +13140,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13159,7 +13159,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13178,7 +13178,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13197,7 +13197,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13216,7 +13216,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13235,7 +13235,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13254,7 +13254,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13273,7 +13273,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13292,7 +13292,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13311,7 +13311,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13330,7 +13330,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13349,7 +13349,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13368,7 +13368,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13387,7 +13387,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13406,7 +13406,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13425,7 +13425,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13444,7 +13444,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13463,7 +13463,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13482,7 +13482,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13501,7 +13501,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13520,7 +13520,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13539,7 +13539,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13558,7 +13558,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13577,7 +13577,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13596,7 +13596,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13615,7 +13615,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13634,7 +13634,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13653,7 +13653,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13672,7 +13672,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13691,7 +13691,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13710,7 +13710,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13729,7 +13729,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13748,7 +13748,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13767,7 +13767,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13786,7 +13786,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13805,7 +13805,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13824,7 +13824,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13843,7 +13843,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13862,7 +13862,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13881,7 +13881,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13900,7 +13900,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13919,7 +13919,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13938,7 +13938,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13957,7 +13957,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13976,7 +13976,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -13995,7 +13995,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14014,7 +14014,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14033,7 +14033,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14052,7 +14052,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14071,7 +14071,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14090,7 +14090,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14109,7 +14109,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14128,7 +14128,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14147,7 +14147,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14166,7 +14166,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14185,7 +14185,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14204,7 +14204,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14223,7 +14223,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14242,7 +14242,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14261,7 +14261,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14280,7 +14280,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14299,7 +14299,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14318,7 +14318,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14337,7 +14337,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14356,7 +14356,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14375,7 +14375,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14394,7 +14394,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14413,7 +14413,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14432,7 +14432,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14451,7 +14451,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14470,7 +14470,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14489,7 +14489,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14508,7 +14508,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14527,7 +14527,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14546,7 +14546,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14565,7 +14565,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14584,7 +14584,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14603,7 +14603,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14622,7 +14622,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14641,7 +14641,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14660,7 +14660,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14679,7 +14679,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14698,7 +14698,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14717,7 +14717,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14736,7 +14736,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14755,7 +14755,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14774,7 +14774,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14793,7 +14793,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14812,7 +14812,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14831,7 +14831,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14850,7 +14850,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14869,7 +14869,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14888,7 +14888,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14907,7 +14907,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14926,7 +14926,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14945,7 +14945,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14964,7 +14964,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -14983,7 +14983,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15002,7 +15002,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15021,7 +15021,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15040,7 +15040,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15059,7 +15059,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15078,7 +15078,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15097,7 +15097,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15116,7 +15116,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15135,7 +15135,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15154,7 +15154,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15173,7 +15173,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15192,7 +15192,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15211,7 +15211,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15230,7 +15230,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15249,7 +15249,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15268,7 +15268,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15287,7 +15287,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15306,7 +15306,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15325,7 +15325,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15344,7 +15344,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15363,7 +15363,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15382,7 +15382,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15401,7 +15401,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15420,7 +15420,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15439,7 +15439,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15458,7 +15458,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15477,7 +15477,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15496,7 +15496,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15515,7 +15515,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15534,7 +15534,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15553,7 +15553,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15572,7 +15572,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15591,7 +15591,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15610,7 +15610,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15629,7 +15629,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15648,7 +15648,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15667,7 +15667,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15686,7 +15686,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15705,7 +15705,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15724,7 +15724,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15743,7 +15743,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15762,7 +15762,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15781,7 +15781,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15800,7 +15800,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15819,7 +15819,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15838,7 +15838,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15857,7 +15857,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15876,7 +15876,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15895,7 +15895,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15914,7 +15914,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15933,7 +15933,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15952,7 +15952,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15971,7 +15971,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -15990,7 +15990,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16009,7 +16009,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16028,7 +16028,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16047,7 +16047,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16066,7 +16066,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16085,7 +16085,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16104,7 +16104,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16123,7 +16123,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16142,7 +16142,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16161,7 +16161,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16180,7 +16180,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16199,7 +16199,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16218,7 +16218,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16237,7 +16237,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16256,7 +16256,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16275,7 +16275,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16294,7 +16294,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16313,7 +16313,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16332,7 +16332,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16351,7 +16351,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16370,7 +16370,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16389,7 +16389,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16408,7 +16408,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16427,7 +16427,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16446,7 +16446,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16465,7 +16465,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16484,7 +16484,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16503,7 +16503,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16522,7 +16522,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16541,7 +16541,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16560,7 +16560,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16579,7 +16579,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16598,7 +16598,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16617,7 +16617,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16636,7 +16636,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16655,7 +16655,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16674,7 +16674,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16693,7 +16693,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16712,7 +16712,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16731,7 +16731,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16750,7 +16750,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16769,7 +16769,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16788,7 +16788,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16807,7 +16807,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16826,7 +16826,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16845,7 +16845,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16864,7 +16864,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16883,7 +16883,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16902,7 +16902,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16921,7 +16921,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16940,7 +16940,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16959,7 +16959,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16978,7 +16978,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -16997,7 +16997,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17016,7 +17016,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17035,7 +17035,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17054,7 +17054,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17073,7 +17073,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17092,7 +17092,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17111,7 +17111,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17130,7 +17130,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17149,7 +17149,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17168,7 +17168,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17187,7 +17187,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17206,7 +17206,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17225,7 +17225,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17244,7 +17244,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17263,7 +17263,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17282,7 +17282,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17301,7 +17301,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17320,7 +17320,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17339,7 +17339,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17358,7 +17358,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17377,7 +17377,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17396,7 +17396,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17415,7 +17415,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17434,7 +17434,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17453,7 +17453,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17472,7 +17472,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17491,7 +17491,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17510,7 +17510,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17529,7 +17529,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17548,7 +17548,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17567,7 +17567,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17586,7 +17586,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17605,7 +17605,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17624,7 +17624,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17643,7 +17643,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17662,7 +17662,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17681,7 +17681,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17700,7 +17700,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17719,7 +17719,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17738,7 +17738,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17757,7 +17757,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17776,7 +17776,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17795,7 +17795,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17814,7 +17814,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17833,7 +17833,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17852,7 +17852,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17871,7 +17871,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17890,7 +17890,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17909,7 +17909,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17928,7 +17928,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17947,7 +17947,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17966,7 +17966,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -17985,7 +17985,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18004,7 +18004,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18023,7 +18023,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18042,7 +18042,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18061,7 +18061,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18080,7 +18080,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18099,7 +18099,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18118,7 +18118,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18137,7 +18137,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18156,7 +18156,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18175,7 +18175,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18194,7 +18194,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18213,7 +18213,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18232,7 +18232,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18251,7 +18251,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18270,7 +18270,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18289,7 +18289,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18308,7 +18308,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18327,7 +18327,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18346,7 +18346,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18365,7 +18365,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18384,7 +18384,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18403,7 +18403,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18422,7 +18422,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18441,7 +18441,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18460,7 +18460,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18479,7 +18479,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18498,7 +18498,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18517,7 +18517,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18536,7 +18536,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18555,7 +18555,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18574,7 +18574,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18593,7 +18593,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18612,7 +18612,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18631,7 +18631,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18650,7 +18650,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18669,7 +18669,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18688,7 +18688,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18707,7 +18707,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18726,7 +18726,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18745,7 +18745,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18764,7 +18764,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18783,7 +18783,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18802,7 +18802,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18821,7 +18821,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18840,7 +18840,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18859,7 +18859,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18878,7 +18878,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18897,7 +18897,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18916,7 +18916,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18935,7 +18935,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18954,7 +18954,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18973,7 +18973,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -18992,7 +18992,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19011,7 +19011,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19030,7 +19030,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19049,7 +19049,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19068,7 +19068,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19087,7 +19087,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19106,7 +19106,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19125,7 +19125,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19144,7 +19144,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19163,7 +19163,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19182,7 +19182,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19201,7 +19201,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19220,7 +19220,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19239,7 +19239,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19258,7 +19258,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19277,7 +19277,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19296,7 +19296,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19315,7 +19315,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19334,7 +19334,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19353,7 +19353,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19372,7 +19372,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19391,7 +19391,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19410,7 +19410,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19429,7 +19429,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19448,7 +19448,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19467,7 +19467,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19486,7 +19486,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19505,7 +19505,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19524,7 +19524,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19543,7 +19543,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19562,7 +19562,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19581,7 +19581,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19600,7 +19600,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19619,7 +19619,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19638,7 +19638,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19657,7 +19657,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19676,7 +19676,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19695,7 +19695,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19714,7 +19714,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19733,7 +19733,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19752,7 +19752,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19771,7 +19771,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19790,7 +19790,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19809,7 +19809,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19828,7 +19828,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19847,7 +19847,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19866,7 +19866,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19885,7 +19885,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19904,7 +19904,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19923,7 +19923,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19942,7 +19942,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19961,7 +19961,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19980,7 +19980,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -19999,7 +19999,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20018,7 +20018,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20037,7 +20037,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20056,7 +20056,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20075,7 +20075,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20094,7 +20094,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20113,7 +20113,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20132,7 +20132,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20151,7 +20151,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20170,7 +20170,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20189,7 +20189,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20208,7 +20208,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20227,7 +20227,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20246,7 +20246,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20265,7 +20265,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20284,7 +20284,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20303,7 +20303,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20322,7 +20322,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20341,7 +20341,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20360,7 +20360,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20379,7 +20379,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20398,7 +20398,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20417,7 +20417,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20436,7 +20436,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20455,7 +20455,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20474,7 +20474,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20493,7 +20493,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20512,7 +20512,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20531,7 +20531,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20550,7 +20550,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20569,7 +20569,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20588,7 +20588,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20607,7 +20607,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20626,7 +20626,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20645,7 +20645,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20664,7 +20664,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20683,7 +20683,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20702,7 +20702,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20721,7 +20721,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20740,7 +20740,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20759,7 +20759,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20778,7 +20778,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20797,7 +20797,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20816,7 +20816,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20835,7 +20835,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20854,7 +20854,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20873,7 +20873,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20892,7 +20892,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20911,7 +20911,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20930,7 +20930,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20949,7 +20949,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20968,7 +20968,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -20987,7 +20987,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21006,7 +21006,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21025,7 +21025,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21044,7 +21044,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21063,7 +21063,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21082,7 +21082,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21101,7 +21101,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21120,7 +21120,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21139,7 +21139,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21158,7 +21158,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21177,7 +21177,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21196,7 +21196,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21215,7 +21215,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21234,7 +21234,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21253,7 +21253,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21272,7 +21272,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21291,7 +21291,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21310,7 +21310,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21329,7 +21329,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21348,7 +21348,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21367,7 +21367,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21386,7 +21386,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21405,7 +21405,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21424,7 +21424,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21443,7 +21443,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21462,7 +21462,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21481,7 +21481,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21500,7 +21500,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21519,7 +21519,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21538,7 +21538,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21557,7 +21557,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21576,7 +21576,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21595,7 +21595,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21614,7 +21614,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21633,7 +21633,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21652,7 +21652,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21671,7 +21671,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21690,7 +21690,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21709,7 +21709,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21728,7 +21728,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21747,7 +21747,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21766,7 +21766,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21785,7 +21785,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21804,7 +21804,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21823,7 +21823,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21842,7 +21842,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21861,7 +21861,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21880,7 +21880,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21899,7 +21899,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21918,7 +21918,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21937,7 +21937,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21956,7 +21956,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21975,7 +21975,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -21994,7 +21994,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22013,7 +22013,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22032,7 +22032,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22051,7 +22051,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22070,7 +22070,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22089,7 +22089,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22108,7 +22108,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22127,7 +22127,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22146,7 +22146,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22165,7 +22165,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22184,7 +22184,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22203,7 +22203,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22222,7 +22222,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22241,7 +22241,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22260,7 +22260,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22279,7 +22279,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22298,7 +22298,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22317,7 +22317,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22336,7 +22336,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22355,7 +22355,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22374,7 +22374,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22393,7 +22393,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22412,7 +22412,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22431,7 +22431,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22450,7 +22450,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22469,7 +22469,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22488,7 +22488,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22507,7 +22507,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22526,7 +22526,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22545,7 +22545,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22564,7 +22564,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22583,7 +22583,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22602,7 +22602,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22621,7 +22621,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22640,7 +22640,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22659,7 +22659,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22678,7 +22678,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22697,7 +22697,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22716,7 +22716,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22735,7 +22735,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22754,7 +22754,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22773,7 +22773,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22792,7 +22792,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22811,7 +22811,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22830,7 +22830,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22849,7 +22849,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22868,7 +22868,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22887,7 +22887,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22906,7 +22906,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22925,7 +22925,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22944,7 +22944,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22963,7 +22963,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -22982,7 +22982,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23001,7 +23001,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23020,7 +23020,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23039,7 +23039,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23058,7 +23058,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23077,7 +23077,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23096,7 +23096,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23115,7 +23115,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23134,7 +23134,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23153,7 +23153,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23172,7 +23172,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23191,7 +23191,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23210,7 +23210,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23229,7 +23229,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23248,7 +23248,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23267,7 +23267,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23286,7 +23286,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23305,7 +23305,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23324,7 +23324,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23343,7 +23343,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23362,7 +23362,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23381,7 +23381,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23400,7 +23400,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23419,7 +23419,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23438,7 +23438,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23457,7 +23457,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23476,7 +23476,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23495,7 +23495,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23514,7 +23514,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23533,7 +23533,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23552,7 +23552,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23571,7 +23571,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23590,7 +23590,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23609,7 +23609,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23628,7 +23628,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23647,7 +23647,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23666,7 +23666,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23685,7 +23685,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23704,7 +23704,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23723,7 +23723,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23742,7 +23742,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23761,7 +23761,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23780,7 +23780,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23799,7 +23799,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23818,7 +23818,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23837,7 +23837,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23856,7 +23856,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23875,7 +23875,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23894,7 +23894,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23913,7 +23913,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23932,7 +23932,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23951,7 +23951,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23970,7 +23970,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -23989,7 +23989,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24008,7 +24008,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24027,7 +24027,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24046,7 +24046,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24065,7 +24065,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24084,7 +24084,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24103,7 +24103,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24122,7 +24122,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24141,7 +24141,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24160,7 +24160,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24179,7 +24179,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24198,7 +24198,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24217,7 +24217,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24236,7 +24236,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24255,7 +24255,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24274,7 +24274,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24293,7 +24293,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24312,7 +24312,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24331,7 +24331,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24350,7 +24350,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24369,7 +24369,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24388,7 +24388,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24407,7 +24407,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24426,7 +24426,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24445,7 +24445,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24464,7 +24464,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24483,7 +24483,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24502,7 +24502,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24521,7 +24521,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24540,7 +24540,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24559,7 +24559,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24578,7 +24578,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24597,7 +24597,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24616,7 +24616,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24635,7 +24635,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24654,7 +24654,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24673,7 +24673,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24692,7 +24692,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24711,7 +24711,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24730,7 +24730,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24749,7 +24749,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24768,7 +24768,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24787,7 +24787,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24806,7 +24806,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24825,7 +24825,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24844,7 +24844,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24863,7 +24863,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24882,7 +24882,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24901,7 +24901,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24920,7 +24920,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24939,7 +24939,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24958,7 +24958,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24977,7 +24977,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -24996,7 +24996,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25015,7 +25015,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25034,7 +25034,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25053,7 +25053,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25072,7 +25072,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25091,7 +25091,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25110,7 +25110,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25129,7 +25129,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25148,7 +25148,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25167,7 +25167,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25186,7 +25186,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25205,7 +25205,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25224,7 +25224,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25243,7 +25243,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25262,7 +25262,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25281,7 +25281,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25300,7 +25300,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25319,7 +25319,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25338,7 +25338,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25357,7 +25357,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25376,7 +25376,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25395,7 +25395,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25414,7 +25414,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25433,7 +25433,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25452,7 +25452,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25471,7 +25471,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25490,7 +25490,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25509,7 +25509,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25528,7 +25528,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25547,7 +25547,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25566,7 +25566,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25585,7 +25585,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25604,7 +25604,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25623,7 +25623,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25642,7 +25642,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25661,7 +25661,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25680,7 +25680,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25699,7 +25699,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25718,7 +25718,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25737,7 +25737,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25756,7 +25756,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25775,7 +25775,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25794,7 +25794,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25813,7 +25813,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25832,7 +25832,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25851,7 +25851,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25870,7 +25870,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25889,7 +25889,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25908,7 +25908,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25927,7 +25927,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25946,7 +25946,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25965,7 +25965,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -25984,7 +25984,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26003,7 +26003,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26022,7 +26022,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26041,7 +26041,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26060,7 +26060,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26079,7 +26079,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26098,7 +26098,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26117,7 +26117,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26136,7 +26136,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26155,7 +26155,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26174,7 +26174,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26193,7 +26193,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26212,7 +26212,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26231,7 +26231,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26250,7 +26250,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26269,7 +26269,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26288,7 +26288,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26307,7 +26307,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26326,7 +26326,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26345,7 +26345,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26364,7 +26364,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26383,7 +26383,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26402,7 +26402,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26421,7 +26421,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26440,7 +26440,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26459,7 +26459,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26478,7 +26478,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26497,7 +26497,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26516,7 +26516,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26535,7 +26535,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26554,7 +26554,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26573,7 +26573,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26592,7 +26592,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26611,7 +26611,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26630,7 +26630,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26649,7 +26649,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26668,7 +26668,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26687,7 +26687,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26706,7 +26706,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26725,7 +26725,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26744,7 +26744,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26763,7 +26763,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26782,7 +26782,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26801,7 +26801,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26820,7 +26820,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26839,7 +26839,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26858,7 +26858,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26877,7 +26877,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26896,7 +26896,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26915,7 +26915,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26934,7 +26934,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26953,7 +26953,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26972,7 +26972,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -26991,7 +26991,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27010,7 +27010,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27029,7 +27029,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27048,7 +27048,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27067,7 +27067,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27086,7 +27086,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27105,7 +27105,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27124,7 +27124,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27143,7 +27143,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27162,7 +27162,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27181,7 +27181,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27200,7 +27200,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27219,7 +27219,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27238,7 +27238,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27257,7 +27257,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27276,7 +27276,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27295,7 +27295,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27314,7 +27314,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27333,7 +27333,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27352,7 +27352,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27371,7 +27371,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27390,7 +27390,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27409,7 +27409,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27428,7 +27428,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27447,7 +27447,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27466,7 +27466,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27485,7 +27485,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27504,7 +27504,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27523,7 +27523,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27542,7 +27542,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27561,7 +27561,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27580,7 +27580,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27599,7 +27599,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27618,7 +27618,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27637,7 +27637,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27656,7 +27656,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27675,7 +27675,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27694,7 +27694,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27713,7 +27713,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27732,7 +27732,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27751,7 +27751,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27770,7 +27770,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27789,7 +27789,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27808,7 +27808,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27827,7 +27827,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27846,7 +27846,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27865,7 +27865,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27884,7 +27884,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27903,7 +27903,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27922,7 +27922,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27941,7 +27941,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27960,7 +27960,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27979,7 +27979,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -27998,7 +27998,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28017,7 +28017,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28036,7 +28036,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28055,7 +28055,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28074,7 +28074,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28093,7 +28093,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28112,7 +28112,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28131,7 +28131,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28150,7 +28150,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28169,7 +28169,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28188,7 +28188,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28207,7 +28207,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28226,7 +28226,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28245,7 +28245,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28264,7 +28264,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28283,7 +28283,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28302,7 +28302,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28321,7 +28321,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28340,7 +28340,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28359,7 +28359,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28378,7 +28378,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28397,7 +28397,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28416,7 +28416,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28435,7 +28435,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28454,7 +28454,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28473,7 +28473,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28492,7 +28492,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28511,7 +28511,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28530,7 +28530,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28549,7 +28549,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28568,7 +28568,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28587,7 +28587,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28606,7 +28606,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28625,7 +28625,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28644,7 +28644,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28663,7 +28663,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28682,7 +28682,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28701,7 +28701,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28720,7 +28720,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28739,7 +28739,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28758,7 +28758,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28777,7 +28777,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28796,7 +28796,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28815,7 +28815,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28834,7 +28834,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28853,7 +28853,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28872,7 +28872,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28891,7 +28891,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28910,7 +28910,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28929,7 +28929,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28948,7 +28948,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28967,7 +28967,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -28986,7 +28986,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29005,7 +29005,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29024,7 +29024,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29043,7 +29043,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29062,7 +29062,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29081,7 +29081,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29100,7 +29100,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29119,7 +29119,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29138,7 +29138,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29157,7 +29157,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29176,7 +29176,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29195,7 +29195,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29214,7 +29214,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29233,7 +29233,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29252,7 +29252,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29271,7 +29271,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29290,7 +29290,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29309,7 +29309,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29328,7 +29328,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29347,7 +29347,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29366,7 +29366,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29385,7 +29385,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29404,7 +29404,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29423,7 +29423,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29442,7 +29442,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29461,7 +29461,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29480,7 +29480,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29499,7 +29499,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29518,7 +29518,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29537,7 +29537,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29556,7 +29556,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29575,7 +29575,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29594,7 +29594,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29613,7 +29613,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29632,7 +29632,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` char(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29651,7 +29651,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29670,7 +29670,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29689,7 +29689,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29708,7 +29708,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29727,7 +29727,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29746,7 +29746,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29765,7 +29765,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29784,7 +29784,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29803,7 +29803,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29822,7 +29822,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29841,7 +29841,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29860,7 +29860,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29879,7 +29879,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29898,7 +29898,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29917,7 +29917,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29936,7 +29936,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29955,7 +29955,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29974,7 +29974,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -29993,7 +29993,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30012,7 +30012,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30031,7 +30031,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30050,7 +30050,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30069,7 +30069,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30088,7 +30088,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30107,7 +30107,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30126,7 +30126,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30145,7 +30145,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30164,7 +30164,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30183,7 +30183,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30202,7 +30202,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30221,7 +30221,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30240,7 +30240,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30259,7 +30259,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30278,7 +30278,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30297,7 +30297,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30316,7 +30316,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30335,7 +30335,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30354,7 +30354,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30373,7 +30373,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30392,7 +30392,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30411,7 +30411,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30430,7 +30430,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30449,7 +30449,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30468,7 +30468,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30487,7 +30487,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30506,7 +30506,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30525,7 +30525,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30544,7 +30544,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30563,7 +30563,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30582,7 +30582,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30601,7 +30601,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30620,7 +30620,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30639,7 +30639,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30658,7 +30658,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30677,7 +30677,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30696,7 +30696,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30715,7 +30715,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30734,7 +30734,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30753,7 +30753,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30772,7 +30772,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30791,7 +30791,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30810,7 +30810,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30829,7 +30829,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30848,7 +30848,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30867,7 +30867,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30886,7 +30886,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30905,7 +30905,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30924,7 +30924,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30943,7 +30943,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30962,7 +30962,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -30981,7 +30981,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31000,7 +31000,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31019,7 +31019,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31038,7 +31038,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31057,7 +31057,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31076,7 +31076,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31095,7 +31095,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31114,7 +31114,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31133,7 +31133,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31152,7 +31152,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31171,7 +31171,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31190,7 +31190,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31209,7 +31209,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31228,7 +31228,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31247,7 +31247,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31266,7 +31266,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31285,7 +31285,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31304,7 +31304,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31323,7 +31323,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31342,7 +31342,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31361,7 +31361,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31380,7 +31380,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31399,7 +31399,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31418,7 +31418,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31437,7 +31437,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31456,7 +31456,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31475,7 +31475,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31494,7 +31494,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31513,7 +31513,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31532,7 +31532,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31551,7 +31551,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31570,7 +31570,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31589,7 +31589,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31608,7 +31608,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31627,7 +31627,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31646,7 +31646,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31665,7 +31665,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31684,7 +31684,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31703,7 +31703,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31722,7 +31722,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31741,7 +31741,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31760,7 +31760,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31779,7 +31779,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31798,7 +31798,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31817,7 +31817,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31836,7 +31836,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31855,7 +31855,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31874,7 +31874,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31893,7 +31893,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31912,7 +31912,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varchar(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31931,7 +31931,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31950,7 +31950,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31969,7 +31969,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -31988,7 +31988,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32007,7 +32007,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32026,7 +32026,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32045,7 +32045,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32064,7 +32064,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32083,7 +32083,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32102,7 +32102,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32121,7 +32121,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32140,7 +32140,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32159,7 +32159,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32178,7 +32178,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32197,7 +32197,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32216,7 +32216,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32235,7 +32235,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32254,7 +32254,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32273,7 +32273,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32292,7 +32292,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32311,7 +32311,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32330,7 +32330,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32349,7 +32349,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32368,7 +32368,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32387,7 +32387,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32406,7 +32406,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32425,7 +32425,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32444,7 +32444,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32463,7 +32463,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32482,7 +32482,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32501,7 +32501,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32520,7 +32520,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32539,7 +32539,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32558,7 +32558,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32577,7 +32577,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32596,7 +32596,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32615,7 +32615,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32634,7 +32634,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32653,7 +32653,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32672,7 +32672,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32691,7 +32691,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32710,7 +32710,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32729,7 +32729,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32748,7 +32748,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32767,7 +32767,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32786,7 +32786,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32805,7 +32805,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32824,7 +32824,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32843,7 +32843,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32862,7 +32862,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32881,7 +32881,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32900,7 +32900,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32919,7 +32919,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32938,7 +32938,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32957,7 +32957,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32976,7 +32976,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -32995,7 +32995,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33014,7 +33014,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33033,7 +33033,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33052,7 +33052,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33071,7 +33071,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33090,7 +33090,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33109,7 +33109,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33128,7 +33128,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33147,7 +33147,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33166,7 +33166,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33185,7 +33185,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33204,7 +33204,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33223,7 +33223,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33242,7 +33242,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33261,7 +33261,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33280,7 +33280,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33299,7 +33299,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33318,7 +33318,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33337,7 +33337,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33356,7 +33356,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33375,7 +33375,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33394,7 +33394,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33413,7 +33413,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33432,7 +33432,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33451,7 +33451,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33470,7 +33470,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33489,7 +33489,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33508,7 +33508,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33527,7 +33527,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33546,7 +33546,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33565,7 +33565,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33584,7 +33584,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33603,7 +33603,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33622,7 +33622,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33641,7 +33641,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33660,7 +33660,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33679,7 +33679,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33698,7 +33698,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33717,7 +33717,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33736,7 +33736,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33755,7 +33755,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33774,7 +33774,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33793,7 +33793,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33812,7 +33812,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33831,7 +33831,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33850,7 +33850,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33869,7 +33869,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33888,7 +33888,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33907,7 +33907,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33926,7 +33926,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33945,7 +33945,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33964,7 +33964,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -33983,7 +33983,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34002,7 +34002,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34021,7 +34021,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34040,7 +34040,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34059,7 +34059,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34078,7 +34078,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34097,7 +34097,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34116,7 +34116,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34135,7 +34135,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34154,7 +34154,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34173,7 +34173,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34192,7 +34192,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` binary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34211,7 +34211,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34230,7 +34230,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34249,7 +34249,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34268,7 +34268,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34287,7 +34287,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34306,7 +34306,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34325,7 +34325,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34344,7 +34344,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34363,7 +34363,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34382,7 +34382,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34401,7 +34401,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34420,7 +34420,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34439,7 +34439,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34458,7 +34458,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34477,7 +34477,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34496,7 +34496,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34515,7 +34515,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34534,7 +34534,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34553,7 +34553,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34572,7 +34572,7 @@ m3 CREATE TABLE `m3` ( `c3` tinyint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34591,7 +34591,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34610,7 +34610,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34629,7 +34629,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34648,7 +34648,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34667,7 +34667,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34686,7 +34686,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34705,7 +34705,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34724,7 +34724,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34743,7 +34743,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34762,7 +34762,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34781,7 +34781,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34800,7 +34800,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34819,7 +34819,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34838,7 +34838,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34857,7 +34857,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34876,7 +34876,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34895,7 +34895,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34914,7 +34914,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34933,7 +34933,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34952,7 +34952,7 @@ m3 CREATE TABLE `m3` ( `c3` smallint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34971,7 +34971,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -34990,7 +34990,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35009,7 +35009,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35028,7 +35028,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35047,7 +35047,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35066,7 +35066,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35085,7 +35085,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35104,7 +35104,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35123,7 +35123,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35142,7 +35142,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35161,7 +35161,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35180,7 +35180,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35199,7 +35199,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35218,7 +35218,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35237,7 +35237,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35256,7 +35256,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35275,7 +35275,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35294,7 +35294,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35313,7 +35313,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35332,7 +35332,7 @@ m3 CREATE TABLE `m3` ( `c3` mediumint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35351,7 +35351,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35370,7 +35370,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35389,7 +35389,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35408,7 +35408,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35427,7 +35427,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35446,7 +35446,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35465,7 +35465,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35484,7 +35484,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35503,7 +35503,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35522,7 +35522,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35541,7 +35541,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35560,7 +35560,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35579,7 +35579,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35598,7 +35598,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35617,7 +35617,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35636,7 +35636,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35655,7 +35655,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35674,7 +35674,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35693,7 +35693,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35712,7 +35712,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35731,7 +35731,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35750,7 +35750,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35769,7 +35769,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35788,7 +35788,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35807,7 +35807,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35826,7 +35826,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35845,7 +35845,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35864,7 +35864,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35883,7 +35883,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35902,7 +35902,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35921,7 +35921,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35940,7 +35940,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35959,7 +35959,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35978,7 +35978,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -35997,7 +35997,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36016,7 +36016,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36035,7 +36035,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36054,7 +36054,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36073,7 +36073,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36092,7 +36092,7 @@ m3 CREATE TABLE `m3` ( `c3` int(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36111,7 +36111,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bit(1) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36130,7 +36130,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` tinyint(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36149,7 +36149,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` smallint(6) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36168,7 +36168,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` mediumint(9) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36187,7 +36187,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36206,7 +36206,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` int(11) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36225,7 +36225,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` bigint(20) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36244,7 +36244,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36263,7 +36263,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36282,7 +36282,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36301,7 +36301,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` decimal(10,0) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36320,7 +36320,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36339,7 +36339,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36358,7 +36358,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` double NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36377,7 +36377,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` float NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36396,7 +36396,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` date NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36415,7 +36415,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` time NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36434,7 +36434,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36453,7 +36453,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` datetime NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 @@ -36472,7 +36472,7 @@ m3 CREATE TABLE `m3` ( `c3` bigint(4) DEFAULT NULL, `c4` varbinary(15) NOT NULL, `c5` year(4) NOT NULL, - `c6` decimal(10,8) NOT NULL DEFAULT '3.14159200', + `c6` decimal(10,8) NOT NULL DEFAULT 3.14159200, PRIMARY KEY (`c4`), UNIQUE KEY `c5` (`c5`) ) ENGINE=ENGINE DEFAULT CHARSET=latin1 diff --git a/mysql-test/suite/engines/rr_trx/r/init_innodb.result b/mysql-test/suite/engines/rr_trx/r/init_innodb.result index b4d11879521..516147976ce 100644 --- a/mysql-test/suite/engines/rr_trx/r/init_innodb.result +++ b/mysql-test/suite/engines/rr_trx/r/init_innodb.result @@ -42,12 +42,12 @@ t1 CREATE TABLE `t1` ( `int2` int(11) DEFAULT NULL, `int2_key` int(11) DEFAULT NULL, `int2_unique` int(11) DEFAULT NULL, - `for_update` tinyint(1) DEFAULT '0', + `for_update` tinyint(1) DEFAULT 0, `timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `connection_id` int(11) DEFAULT NULL, - `thread_id` int(11) DEFAULT '0', - `is_uncommitted` tinyint(1) DEFAULT '0', - `is_consistent` tinyint(1) DEFAULT '0', + `thread_id` int(11) DEFAULT 0, + `is_uncommitted` tinyint(1) DEFAULT 0, + `is_consistent` tinyint(1) DEFAULT 0, PRIMARY KEY (`pk`), UNIQUE KEY `int1_unique` (`int1_unique`), UNIQUE KEY `int2_unique` (`int2_unique`), diff --git a/mysql-test/suite/federated/assisted_discovery.result b/mysql-test/suite/federated/assisted_discovery.result index 76ac6e422cd..f79e47da8b4 100644 --- a/mysql-test/suite/federated/assisted_discovery.result +++ b/mysql-test/suite/federated/assisted_discovery.result @@ -19,8 +19,8 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `id` int(20) NOT NULL, - `group` int(11) NOT NULL DEFAULT '1', - `a\\b` int(11) NOT NULL DEFAULT '2', + `group` int(11) NOT NULL DEFAULT 1, + `a\\b` int(11) NOT NULL DEFAULT 2, `a\\` int(10) unsigned DEFAULT NULL, `name` varchar(32) DEFAULT 'name', PRIMARY KEY (`id`) diff --git a/mysql-test/suite/funcs_1/r/is_character_sets.result b/mysql-test/suite/funcs_1/r/is_character_sets.result index 2aaa1985020..caff08ae1e4 100644 --- a/mysql-test/suite/funcs_1/r/is_character_sets.result +++ b/mysql-test/suite/funcs_1/r/is_character_sets.result @@ -38,7 +38,7 @@ CHARACTER_SETS CREATE TEMPORARY TABLE `CHARACTER_SETS` ( `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '', `DEFAULT_COLLATE_NAME` varchar(32) NOT NULL DEFAULT '', `DESCRIPTION` varchar(60) NOT NULL DEFAULT '', - `MAXLEN` bigint(3) NOT NULL DEFAULT '0' + `MAXLEN` bigint(3) NOT NULL DEFAULT 0 ) ENGINE=MEMORY DEFAULT CHARSET=utf8 SHOW COLUMNS FROM information_schema.CHARACTER_SETS; Field Type Null Key Default Extra diff --git a/mysql-test/suite/funcs_1/r/is_collations.result b/mysql-test/suite/funcs_1/r/is_collations.result index 234aeff8ff2..c017bbd4eaf 100644 --- a/mysql-test/suite/funcs_1/r/is_collations.result +++ b/mysql-test/suite/funcs_1/r/is_collations.result @@ -39,10 +39,10 @@ Table Create Table COLLATIONS CREATE TEMPORARY TABLE `COLLATIONS` ( `COLLATION_NAME` varchar(32) NOT NULL DEFAULT '', `CHARACTER_SET_NAME` varchar(32) NOT NULL DEFAULT '', - `ID` bigint(11) NOT NULL DEFAULT '0', + `ID` bigint(11) NOT NULL DEFAULT 0, `IS_DEFAULT` varchar(3) NOT NULL DEFAULT '', `IS_COMPILED` varchar(3) NOT NULL DEFAULT '', - `SORTLEN` bigint(3) NOT NULL DEFAULT '0' + `SORTLEN` bigint(3) NOT NULL DEFAULT 0 ) ENGINE=MEMORY DEFAULT CHARSET=utf8 SHOW COLUMNS FROM information_schema.COLLATIONS; Field Type Null Key Default Extra diff --git a/mysql-test/suite/funcs_1/r/is_columns.result b/mysql-test/suite/funcs_1/r/is_columns.result index b614b789c63..4b028e1da3d 100644 --- a/mysql-test/suite/funcs_1/r/is_columns.result +++ b/mysql-test/suite/funcs_1/r/is_columns.result @@ -55,7 +55,7 @@ COLUMNS CREATE TEMPORARY TABLE `COLUMNS` ( `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '', `TABLE_NAME` varchar(64) NOT NULL DEFAULT '', `COLUMN_NAME` varchar(64) NOT NULL DEFAULT '', - `ORDINAL_POSITION` bigint(21) unsigned NOT NULL DEFAULT '0', + `ORDINAL_POSITION` bigint(21) unsigned NOT NULL DEFAULT 0, `COLUMN_DEFAULT` longtext DEFAULT NULL, `IS_NULLABLE` varchar(3) NOT NULL DEFAULT '', `DATA_TYPE` varchar(64) NOT NULL DEFAULT '', diff --git a/mysql-test/suite/funcs_1/r/is_events.result b/mysql-test/suite/funcs_1/r/is_events.result index 2add18062d9..3db6cc5e61d 100644 --- a/mysql-test/suite/funcs_1/r/is_events.result +++ b/mysql-test/suite/funcs_1/r/is_events.result @@ -75,7 +75,7 @@ EVENTS CREATE TEMPORARY TABLE `EVENTS` ( `LAST_ALTERED` datetime NOT NULL DEFAULT '0000-00-00 00:00:00', `LAST_EXECUTED` datetime DEFAULT NULL, `EVENT_COMMENT` varchar(64) NOT NULL DEFAULT '', - `ORIGINATOR` bigint(10) NOT NULL DEFAULT '0', + `ORIGINATOR` bigint(10) NOT NULL DEFAULT 0, `CHARACTER_SET_CLIENT` varchar(32) NOT NULL DEFAULT '', `COLLATION_CONNECTION` varchar(32) NOT NULL DEFAULT '', `DATABASE_COLLATION` varchar(32) NOT NULL DEFAULT '' diff --git a/mysql-test/suite/funcs_1/r/is_key_column_usage.result b/mysql-test/suite/funcs_1/r/is_key_column_usage.result index 79310904d0f..80dd2db5441 100644 --- a/mysql-test/suite/funcs_1/r/is_key_column_usage.result +++ b/mysql-test/suite/funcs_1/r/is_key_column_usage.result @@ -50,7 +50,7 @@ KEY_COLUMN_USAGE CREATE TEMPORARY TABLE `KEY_COLUMN_USAGE` ( `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '', `TABLE_NAME` varchar(64) NOT NULL DEFAULT '', `COLUMN_NAME` varchar(64) NOT NULL DEFAULT '', - `ORDINAL_POSITION` bigint(10) NOT NULL DEFAULT '0', + `ORDINAL_POSITION` bigint(10) NOT NULL DEFAULT 0, `POSITION_IN_UNIQUE_CONSTRAINT` bigint(10) DEFAULT NULL, `REFERENCED_TABLE_SCHEMA` varchar(64) DEFAULT NULL, `REFERENCED_TABLE_NAME` varchar(64) DEFAULT NULL, diff --git a/mysql-test/suite/funcs_1/r/is_statistics.result b/mysql-test/suite/funcs_1/r/is_statistics.result index f58c3e56772..8197f043e48 100644 --- a/mysql-test/suite/funcs_1/r/is_statistics.result +++ b/mysql-test/suite/funcs_1/r/is_statistics.result @@ -50,10 +50,10 @@ STATISTICS CREATE TEMPORARY TABLE `STATISTICS` ( `TABLE_CATALOG` varchar(512) NOT NULL DEFAULT '', `TABLE_SCHEMA` varchar(64) NOT NULL DEFAULT '', `TABLE_NAME` varchar(64) NOT NULL DEFAULT '', - `NON_UNIQUE` bigint(1) NOT NULL DEFAULT '0', + `NON_UNIQUE` bigint(1) NOT NULL DEFAULT 0, `INDEX_SCHEMA` varchar(64) NOT NULL DEFAULT '', `INDEX_NAME` varchar(64) NOT NULL DEFAULT '', - `SEQ_IN_INDEX` bigint(2) NOT NULL DEFAULT '0', + `SEQ_IN_INDEX` bigint(2) NOT NULL DEFAULT 0, `COLUMN_NAME` varchar(64) NOT NULL DEFAULT '', `COLLATION` varchar(1) DEFAULT NULL, `CARDINALITY` bigint(21) DEFAULT NULL, diff --git a/mysql-test/suite/funcs_1/r/is_triggers.result b/mysql-test/suite/funcs_1/r/is_triggers.result index 6c218be565d..ac6372b1d84 100644 --- a/mysql-test/suite/funcs_1/r/is_triggers.result +++ b/mysql-test/suite/funcs_1/r/is_triggers.result @@ -61,7 +61,7 @@ TRIGGERS CREATE TEMPORARY TABLE `TRIGGERS` ( `EVENT_OBJECT_CATALOG` varchar(512) NOT NULL DEFAULT '', `EVENT_OBJECT_SCHEMA` varchar(64) NOT NULL DEFAULT '', `EVENT_OBJECT_TABLE` varchar(64) NOT NULL DEFAULT '', - `ACTION_ORDER` bigint(4) NOT NULL DEFAULT '0', + `ACTION_ORDER` bigint(4) NOT NULL DEFAULT 0, `ACTION_CONDITION` longtext DEFAULT NULL, `ACTION_STATEMENT` longtext NOT NULL DEFAULT '', `ACTION_ORIENTATION` varchar(9) NOT NULL DEFAULT '', diff --git a/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result b/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result index 70001195e02..6680e045558 100644 --- a/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result +++ b/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result @@ -24,23 +24,23 @@ connection default; SHOW CREATE TABLE processlist; Table Create Table PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 SHOW processlist; Id User Host db Command Time State Info Progress @@ -103,23 +103,23 @@ SHOW/SELECT shows only the processes (1) of the user. SHOW CREATE TABLE processlist; Table Create Table PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 SHOW processlist; Id User Host db Command Time State Info Progress diff --git a/mysql-test/suite/funcs_1/r/processlist_priv_ps.result b/mysql-test/suite/funcs_1/r/processlist_priv_ps.result index 69fa605f2bb..09f9d231144 100644 --- a/mysql-test/suite/funcs_1/r/processlist_priv_ps.result +++ b/mysql-test/suite/funcs_1/r/processlist_priv_ps.result @@ -24,23 +24,23 @@ connection default; SHOW CREATE TABLE processlist; Table Create Table PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 SHOW processlist; Id User Host db Command Time State Info Progress @@ -103,23 +103,23 @@ SHOW/SELECT shows only the processes (1) of the user. SHOW CREATE TABLE processlist; Table Create Table PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 SHOW processlist; Id User Host db Command Time State Info Progress diff --git a/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result b/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result index 72e4b4a3379..f935f7770b8 100644 --- a/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result +++ b/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result @@ -12,23 +12,23 @@ USE test; SHOW CREATE TABLE INFORMATION_SCHEMA.PROCESSLIST; Table Create Table PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 # Ensure that the information about the own connection is correct. #-------------------------------------------------------------------------- diff --git a/mysql-test/suite/funcs_1/r/processlist_val_ps.result b/mysql-test/suite/funcs_1/r/processlist_val_ps.result index a9f1297a507..48fcc0a5553 100644 --- a/mysql-test/suite/funcs_1/r/processlist_val_ps.result +++ b/mysql-test/suite/funcs_1/r/processlist_val_ps.result @@ -12,23 +12,23 @@ USE test; SHOW CREATE TABLE INFORMATION_SCHEMA.PROCESSLIST; Table Create Table PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` ( - `ID` bigint(4) NOT NULL DEFAULT '0', + `ID` bigint(4) NOT NULL DEFAULT 0, `USER` varchar(128) NOT NULL DEFAULT '', `HOST` varchar(64) NOT NULL DEFAULT '', `DB` varchar(64) DEFAULT NULL, `COMMAND` varchar(16) NOT NULL DEFAULT '', - `TIME` int(7) NOT NULL DEFAULT '0', + `TIME` int(7) NOT NULL DEFAULT 0, `STATE` varchar(64) DEFAULT NULL, `INFO` longtext DEFAULT NULL, - `TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000', - `STAGE` tinyint(2) NOT NULL DEFAULT '0', - `MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0', - `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000', - `MEMORY_USED` bigint(7) NOT NULL DEFAULT '0', - `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0', - `QUERY_ID` bigint(4) NOT NULL DEFAULT '0', + `TIME_MS` decimal(22,3) NOT NULL DEFAULT 0.000, + `STAGE` tinyint(2) NOT NULL DEFAULT 0, + `MAX_STAGE` tinyint(2) NOT NULL DEFAULT 0, + `PROGRESS` decimal(7,3) NOT NULL DEFAULT 0.000, + `MEMORY_USED` bigint(7) NOT NULL DEFAULT 0, + `EXAMINED_ROWS` int(7) NOT NULL DEFAULT 0, + `QUERY_ID` bigint(4) NOT NULL DEFAULT 0, `INFO_BINARY` blob DEFAULT NULL, - `TID` bigint(4) NOT NULL DEFAULT '0' + `TID` bigint(4) NOT NULL DEFAULT 0 ) DEFAULT CHARSET=utf8 # Ensure that the information about the own connection is correct. #-------------------------------------------------------------------------- diff --git a/mysql-test/suite/innodb/r/innodb-alter-table.result b/mysql-test/suite/innodb/r/innodb-alter-table.result index 514b8b7935f..4f4d628a1ee 100644 --- a/mysql-test/suite/innodb/r/innodb-alter-table.result +++ b/mysql-test/suite/innodb/r/innodb-alter-table.result @@ -72,7 +72,7 @@ Level Code Message SHOW CREATE TABLE `w_findispmon05u`; Table Create Table w_findispmon05u CREATE TABLE `w_findispmon05u` ( - `f5atpkey` int(11) NOT NULL DEFAULT '0', + `f5atpkey` int(11) NOT NULL DEFAULT 0, `f5atzo05` int(11) DEFAULT NULL, `pos` bigint(21) DEFAULT NULL, `f5BnvB` int(9) DEFAULT NULL, diff --git a/mysql-test/suite/innodb/r/innodb-autoinc.result b/mysql-test/suite/innodb/r/innodb-autoinc.result index ddca4685e05..52c5bb6157b 100644 --- a/mysql-test/suite/innodb/r/innodb-autoinc.result +++ b/mysql-test/suite/innodb/r/innodb-autoinc.result @@ -1304,7 +1304,7 @@ Warning 1264 Out of range value for column 'c1' at row 1 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `c1` int(11) NOT NULL DEFAULT '0', + `c1` int(11) NOT NULL DEFAULT 0, `c2` varchar(10) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 @@ -1316,7 +1316,7 @@ c1 c2 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `c1` int(11) NOT NULL DEFAULT '0', + `c1` int(11) NOT NULL DEFAULT 0, `c2` varchar(10) DEFAULT NULL, PRIMARY KEY (`c1`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result index a76f715246d..1449fd7d85a 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc.result @@ -302,7 +302,7 @@ MATCH a,b AGAINST ('support') ; SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `id` int(10) unsigned NOT NULL DEFAULT '0' + `id` int(10) unsigned NOT NULL DEFAULT 0 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 SELECT id FROM t2; id @@ -314,7 +314,7 @@ MATCH a,b AGAINST("+support +collections" IN BOOLEAN MODE); SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `id` int(10) unsigned NOT NULL DEFAULT '0' + `id` int(10) unsigned NOT NULL DEFAULT 0 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 SELECT id FROM t2; id @@ -325,7 +325,7 @@ MATCH a,b AGAINST ('"proximity search"@10' IN BOOLEAN MODE); SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `id` int(10) unsigned NOT NULL DEFAULT '0' + `id` int(10) unsigned NOT NULL DEFAULT 0 ) ENGINE=InnoDB DEFAULT CHARSET=latin1 SELECT id FROM t2; id diff --git a/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result index 34fcba6ae87..c55fab0f5cc 100644 --- a/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result @@ -68,8 +68,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -560,8 +560,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1060,8 +1060,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1565,8 +1565,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2064,8 +2064,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2567,8 +2567,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3077,8 +3077,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3585,8 +3585,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4082,8 +4082,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4574,8 +4574,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5074,8 +5074,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5579,8 +5579,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6078,8 +6078,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6581,8 +6581,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7091,8 +7091,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7599,8 +7599,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8097,8 +8097,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8605,8 +8605,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9121,8 +9121,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9642,8 +9642,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10157,8 +10157,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10676,8 +10676,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11202,8 +11202,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11726,8 +11726,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12239,8 +12239,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12747,8 +12747,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13263,8 +13263,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13784,8 +13784,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14299,8 +14299,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14818,8 +14818,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15344,8 +15344,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15868,8 +15868,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16382,8 +16382,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16875,8 +16875,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17376,8 +17376,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17882,8 +17882,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18382,8 +18382,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18886,8 +18886,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19397,8 +19397,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19906,8 +19906,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -20404,8 +20404,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -20897,8 +20897,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -21398,8 +21398,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -21904,8 +21904,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -22404,8 +22404,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -22908,8 +22908,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -23419,8 +23419,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -23928,8 +23928,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -24426,8 +24426,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -24919,8 +24919,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -25420,8 +25420,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -25926,8 +25926,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -26426,8 +26426,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -26930,8 +26930,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -27441,8 +27441,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -27950,8 +27950,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result b/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result index 018bc4e9287..3d5548c340b 100644 --- a/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result @@ -68,8 +68,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -585,8 +585,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1116,8 +1116,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1658,8 +1658,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2190,8 +2190,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2730,8 +2730,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3277,8 +3277,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3822,8 +3822,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4358,8 +4358,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4875,8 +4875,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5406,8 +5406,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5948,8 +5948,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6480,8 +6480,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7020,8 +7020,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7567,8 +7567,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8112,8 +8112,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter1_1_innodb.result b/mysql-test/suite/parts/r/partition_alter1_1_innodb.result index c05abfbcaed..29aaa2713c1 100644 --- a/mysql-test/suite/parts/r/partition_alter1_1_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter1_1_innodb.result @@ -386,8 +386,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -878,8 +878,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1378,8 +1378,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1883,8 +1883,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2382,8 +2382,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2887,8 +2887,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3397,8 +3397,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3905,8 +3905,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4402,8 +4402,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4894,8 +4894,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5394,8 +5394,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5899,8 +5899,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6398,8 +6398,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6903,8 +6903,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7413,8 +7413,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7921,8 +7921,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8419,8 +8419,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8927,8 +8927,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9443,8 +9443,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9964,8 +9964,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10479,8 +10479,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11000,8 +11000,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11526,8 +11526,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12050,8 +12050,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12563,8 +12563,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13071,8 +13071,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13587,8 +13587,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14108,8 +14108,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14623,8 +14623,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15144,8 +15144,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15670,8 +15670,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16194,8 +16194,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter1_1_myisam.result b/mysql-test/suite/parts/r/partition_alter1_1_myisam.result index 514afd88a20..4f724d3c8b2 100644 --- a/mysql-test/suite/parts/r/partition_alter1_1_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter1_1_myisam.result @@ -227,8 +227,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -744,8 +744,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1275,8 +1275,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -1817,8 +1817,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2349,8 +2349,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -2891,8 +2891,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3438,8 +3438,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -3983,8 +3983,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -4519,8 +4519,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5036,8 +5036,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -5567,8 +5567,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6109,8 +6109,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -6641,8 +6641,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7183,8 +7183,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -7730,8 +7730,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8275,8 +8275,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter1_2_innodb.result index 8ab37aafcbe..d52a3180124 100644 --- a/mysql-test/suite/parts/r/partition_alter1_2_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter1_2_innodb.result @@ -67,8 +67,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -507,8 +507,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -955,8 +955,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1408,8 +1408,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1855,8 +1855,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2308,8 +2308,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2766,8 +2766,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3222,8 +3222,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3667,8 +3667,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4107,8 +4107,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4555,8 +4555,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5008,8 +5008,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5455,8 +5455,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5908,8 +5908,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6366,8 +6366,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6822,8 +6822,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7267,8 +7267,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7723,8 +7723,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8187,8 +8187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8656,8 +8656,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9119,8 +9119,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9588,8 +9588,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10062,8 +10062,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10534,8 +10534,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10999,8 +10999,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11439,8 +11439,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11887,8 +11887,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12340,8 +12340,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12787,8 +12787,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13238,8 +13238,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13696,8 +13696,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14152,8 +14152,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14597,8 +14597,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15037,8 +15037,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15485,8 +15485,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15938,8 +15938,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16385,8 +16385,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16836,8 +16836,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17294,8 +17294,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17750,8 +17750,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18196,8 +18196,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18652,8 +18652,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19116,8 +19116,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19585,8 +19585,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20048,8 +20048,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20515,8 +20515,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20989,8 +20989,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21461,8 +21461,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21922,8 +21922,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22378,8 +22378,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22842,8 +22842,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23311,8 +23311,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23774,8 +23774,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24241,8 +24241,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24715,8 +24715,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25187,8 +25187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25649,8 +25649,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26089,8 +26089,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26537,8 +26537,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26990,8 +26990,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27437,8 +27437,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27888,8 +27888,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28346,8 +28346,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28802,8 +28802,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29247,8 +29247,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29687,8 +29687,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30135,8 +30135,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30588,8 +30588,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31035,8 +31035,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31486,8 +31486,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31944,8 +31944,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32400,8 +32400,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32845,8 +32845,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33301,8 +33301,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33765,8 +33765,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34234,8 +34234,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34697,8 +34697,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35164,8 +35164,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35638,8 +35638,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36110,8 +36110,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_alter1_2_myisam.result b/mysql-test/suite/parts/r/partition_alter1_2_myisam.result index d998e264522..6f9a2486fbe 100644 --- a/mysql-test/suite/parts/r/partition_alter1_2_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter1_2_myisam.result @@ -66,8 +66,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -531,8 +531,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1010,8 +1010,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1500,8 +1500,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1980,8 +1980,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2470,8 +2470,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2965,8 +2965,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3458,8 +3458,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3946,8 +3946,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4411,8 +4411,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4890,8 +4890,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5380,8 +5380,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5860,8 +5860,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6348,8 +6348,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6843,8 +6843,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7336,8 +7336,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7820,8 +7820,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8285,8 +8285,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8764,8 +8764,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9254,8 +9254,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9734,8 +9734,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10222,8 +10222,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10717,8 +10717,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11210,8 +11210,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11694,8 +11694,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12159,8 +12159,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12638,8 +12638,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13128,8 +13128,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13608,8 +13608,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14096,8 +14096,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14591,8 +14591,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15084,8 +15084,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result b/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result index efc81603b5a..06dd78b69dc 100644 --- a/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -524,7 +524,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -988,7 +988,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1457,7 +1457,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1920,7 +1920,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2389,7 +2389,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2863,7 +2863,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3335,7 +3335,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3797,7 +3797,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4291,7 +4291,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4793,7 +4793,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5300,7 +5300,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5801,7 +5801,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6308,7 +6308,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6820,7 +6820,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7330,7 +7330,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7829,7 +7829,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8323,7 +8323,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8825,7 +8825,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9332,7 +9332,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9833,7 +9833,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10340,7 +10340,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10852,7 +10852,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11362,7 +11362,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` bigint(20) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11862,7 +11862,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12370,7 +12370,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12886,7 +12886,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13407,7 +13407,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13922,7 +13922,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14443,7 +14443,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14969,7 +14969,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15493,7 +15493,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16006,7 +16006,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16514,7 +16514,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17030,7 +17030,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17551,7 +17551,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18066,7 +18066,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18587,7 +18587,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19113,7 +19113,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19637,7 +19637,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter2_1_maria.result b/mysql-test/suite/parts/r/partition_alter2_1_maria.result index 1182794ff05..1905c5c25ae 100644 --- a/mysql-test/suite/parts/r/partition_alter2_1_maria.result +++ b/mysql-test/suite/parts/r/partition_alter2_1_maria.result @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -533,7 +533,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1012,7 +1012,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1502,7 +1502,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1982,7 +1982,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2472,7 +2472,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2967,7 +2967,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3460,7 +3460,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3945,7 +3945,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4462,7 +4462,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4993,7 +4993,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5535,7 +5535,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6067,7 +6067,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6609,7 +6609,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7156,7 +7156,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7701,7 +7701,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8237,7 +8237,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8754,7 +8754,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9285,7 +9285,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9827,7 +9827,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10359,7 +10359,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10901,7 +10901,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11448,7 +11448,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11993,7 +11993,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter2_1_myisam.result b/mysql-test/suite/parts/r/partition_alter2_1_myisam.result index c6f68a5882e..3a31fa905e1 100644 --- a/mysql-test/suite/parts/r/partition_alter2_1_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter2_1_myisam.result @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -533,7 +533,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1012,7 +1012,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1502,7 +1502,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1982,7 +1982,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2472,7 +2472,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2967,7 +2967,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3460,7 +3460,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3945,7 +3945,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4462,7 +4462,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4993,7 +4993,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5535,7 +5535,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6067,7 +6067,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6609,7 +6609,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7156,7 +7156,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7701,7 +7701,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8237,7 +8237,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8754,7 +8754,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9285,7 +9285,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9827,7 +9827,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10359,7 +10359,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10901,7 +10901,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11448,7 +11448,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11993,7 +11993,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` bigint(20) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result b/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result index 2c8ec37d844..1b5fbc437cc 100644 --- a/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -526,7 +526,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -992,7 +992,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1463,7 +1463,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1926,7 +1926,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2397,7 +2397,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2871,7 +2871,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3345,7 +3345,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3808,7 +3808,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4304,7 +4304,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4808,7 +4808,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5317,7 +5317,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5818,7 +5818,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6327,7 +6327,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6839,7 +6839,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7351,7 +7351,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7852,7 +7852,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8348,7 +8348,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8852,7 +8852,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9361,7 +9361,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9862,7 +9862,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10371,7 +10371,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10883,7 +10883,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11395,7 +11395,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, `f_int2` mediumint(9) NOT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11897,7 +11897,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12407,7 +12407,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12925,7 +12925,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13448,7 +13448,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -13963,7 +13963,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -14486,7 +14486,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15012,7 +15012,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -15538,7 +15538,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16053,7 +16053,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -16563,7 +16563,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17081,7 +17081,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -17604,7 +17604,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18119,7 +18119,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -18642,7 +18642,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19168,7 +19168,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -19694,7 +19694,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter2_2_maria.result b/mysql-test/suite/parts/r/partition_alter2_2_maria.result index 7eaffd200ce..9407be53e04 100644 --- a/mysql-test/suite/parts/r/partition_alter2_2_maria.result +++ b/mysql-test/suite/parts/r/partition_alter2_2_maria.result @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -535,7 +535,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1016,7 +1016,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1508,7 +1508,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1988,7 +1988,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2480,7 +2480,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2975,7 +2975,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3470,7 +3470,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3957,7 +3957,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4476,7 +4476,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5009,7 +5009,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5553,7 +5553,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6085,7 +6085,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6629,7 +6629,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7176,7 +7176,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7723,7 +7723,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8261,7 +8261,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8780,7 +8780,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9313,7 +9313,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9857,7 +9857,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10389,7 +10389,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10933,7 +10933,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11480,7 +11480,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12027,7 +12027,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter2_2_myisam.result b/mysql-test/suite/parts/r/partition_alter2_2_myisam.result index 85ace90e88c..f2a450686f5 100644 --- a/mysql-test/suite/parts/r/partition_alter2_2_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter2_2_myisam.result @@ -68,7 +68,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -535,7 +535,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1016,7 +1016,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1508,7 +1508,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -1988,7 +1988,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2480,7 +2480,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -2975,7 +2975,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3470,7 +3470,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -3957,7 +3957,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -4476,7 +4476,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5009,7 +5009,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -5553,7 +5553,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6085,7 +6085,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -6629,7 +6629,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7176,7 +7176,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -7723,7 +7723,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8261,7 +8261,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -8780,7 +8780,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9313,7 +9313,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -9857,7 +9857,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10389,7 +10389,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -10933,7 +10933,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -11480,7 +11480,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, @@ -12027,7 +12027,7 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, `f_int2` mediumint(9) DEFAULT NULL, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_alter3_innodb.result b/mysql-test/suite/parts/r/partition_alter3_innodb.result index a1718453c11..2bb1bcc2433 100644 --- a/mysql-test/suite/parts/r/partition_alter3_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter3_innodb.result @@ -440,8 +440,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -463,8 +463,8 @@ ALTER TABLE t1 PARTITION BY KEY(f_int1); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -489,8 +489,8 @@ ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part7); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -515,8 +515,8 @@ ALTER TABLE t1 ADD PARTITION (PARTITION part2); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -543,8 +543,8 @@ ALTER TABLE t1 ADD PARTITION PARTITIONS 4; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -586,8 +586,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -619,8 +619,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -650,8 +650,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -679,8 +679,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -706,8 +706,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -731,8 +731,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -754,8 +754,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -779,8 +779,8 @@ ALTER TABLE t1 REMOVE PARTITIONING; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_alter3_myisam.result b/mysql-test/suite/parts/r/partition_alter3_myisam.result index 8bee02a3096..2a2ae74d8bd 100644 --- a/mysql-test/suite/parts/r/partition_alter3_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter3_myisam.result @@ -482,8 +482,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -506,8 +506,8 @@ ALTER TABLE t1 PARTITION BY KEY(f_int1); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -533,8 +533,8 @@ ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part7); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -562,8 +562,8 @@ ALTER TABLE t1 ADD PARTITION (PARTITION part2); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -594,8 +594,8 @@ ALTER TABLE t1 ADD PARTITION PARTITIONS 4; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -645,8 +645,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -685,8 +685,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -722,8 +722,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -756,8 +756,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -787,8 +787,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -815,8 +815,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -840,8 +840,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -866,8 +866,8 @@ ALTER TABLE t1 REMOVE PARTITIONING; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_alter4_innodb.result b/mysql-test/suite/parts/r/partition_alter4_innodb.result index f51d6e5cb02..5bdf4ac0ab1 100644 --- a/mysql-test/suite/parts/r/partition_alter4_innodb.result +++ b/mysql-test/suite/parts/r/partition_alter4_innodb.result @@ -69,8 +69,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -528,8 +528,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -998,8 +998,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1469,8 +1469,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1934,8 +1934,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2405,8 +2405,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2881,8 +2881,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3355,8 +3355,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3819,8 +3819,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4278,8 +4278,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4748,8 +4748,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5219,8 +5219,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5684,8 +5684,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6155,8 +6155,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6631,8 +6631,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7105,8 +7105,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7569,8 +7569,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8028,8 +8028,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8498,8 +8498,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8969,8 +8969,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9434,8 +9434,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9905,8 +9905,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10381,8 +10381,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10855,8 +10855,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11319,8 +11319,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11778,8 +11778,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12248,8 +12248,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12719,8 +12719,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13184,8 +13184,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13655,8 +13655,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14131,8 +14131,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14605,8 +14605,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15069,8 +15069,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15528,8 +15528,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15998,8 +15998,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16469,8 +16469,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16934,8 +16934,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17405,8 +17405,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17881,8 +17881,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18355,8 +18355,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18822,8 +18822,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19281,8 +19281,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19751,8 +19751,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20222,8 +20222,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20687,8 +20687,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21158,8 +21158,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21634,8 +21634,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22108,8 +22108,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22572,8 +22572,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23031,8 +23031,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23501,8 +23501,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23972,8 +23972,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24437,8 +24437,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24908,8 +24908,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25384,8 +25384,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25858,8 +25858,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26322,8 +26322,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26781,8 +26781,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27251,8 +27251,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27722,8 +27722,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28187,8 +28187,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28658,8 +28658,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29134,8 +29134,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29608,8 +29608,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30072,8 +30072,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30531,8 +30531,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31001,8 +31001,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31472,8 +31472,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31937,8 +31937,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32408,8 +32408,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32884,8 +32884,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33358,8 +33358,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33822,8 +33822,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34281,8 +34281,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34751,8 +34751,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35222,8 +35222,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35687,8 +35687,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36158,8 +36158,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36634,8 +36634,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37108,8 +37108,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37576,8 +37576,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38036,8 +38036,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38507,8 +38507,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38979,8 +38979,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39445,8 +39445,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39917,8 +39917,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40394,8 +40394,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40869,8 +40869,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41334,8 +41334,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41794,8 +41794,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42265,8 +42265,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42737,8 +42737,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43203,8 +43203,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43675,8 +43675,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44152,8 +44152,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44627,8 +44627,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45091,8 +45091,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45550,8 +45550,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46020,8 +46020,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46491,8 +46491,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46956,8 +46956,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47427,8 +47427,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47903,8 +47903,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48377,8 +48377,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48841,8 +48841,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49300,8 +49300,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49770,8 +49770,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50241,8 +50241,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50706,8 +50706,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51177,8 +51177,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51653,8 +51653,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52127,8 +52127,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52592,8 +52592,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53052,8 +53052,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53523,8 +53523,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53995,8 +53995,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54461,8 +54461,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54933,8 +54933,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55410,8 +55410,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55885,8 +55885,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56350,8 +56350,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56807,8 +56807,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57275,8 +57275,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57744,8 +57744,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58207,8 +58207,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58676,8 +58676,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59150,8 +59150,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59622,8 +59622,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60084,8 +60084,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60541,8 +60541,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61009,8 +61009,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61478,8 +61478,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61941,8 +61941,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62410,8 +62410,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62884,8 +62884,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -63356,8 +63356,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -64138,8 +64138,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -64595,8 +64595,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65063,8 +65063,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65532,8 +65532,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65995,8 +65995,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66464,8 +66464,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66938,8 +66938,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67410,8 +67410,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67877,8 +67877,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68336,8 +68336,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68806,8 +68806,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69277,8 +69277,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69742,8 +69742,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70213,8 +70213,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70689,8 +70689,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71163,8 +71163,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71627,8 +71627,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72086,8 +72086,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72556,8 +72556,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73027,8 +73027,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73492,8 +73492,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73963,8 +73963,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74439,8 +74439,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74913,8 +74913,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75377,8 +75377,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75836,8 +75836,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76306,8 +76306,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76777,8 +76777,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77242,8 +77242,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77713,8 +77713,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78189,8 +78189,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78663,8 +78663,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79127,8 +79127,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79586,8 +79586,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80056,8 +80056,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80527,8 +80527,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80992,8 +80992,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81463,8 +81463,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81939,8 +81939,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82413,8 +82413,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82877,8 +82877,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83336,8 +83336,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83806,8 +83806,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84277,8 +84277,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84742,8 +84742,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85213,8 +85213,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85689,8 +85689,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86163,8 +86163,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86628,8 +86628,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87082,8 +87082,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87544,8 +87544,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88004,8 +88004,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88462,8 +88462,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88924,8 +88924,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89386,8 +89386,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89844,8 +89844,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_alter4_myisam.result b/mysql-test/suite/parts/r/partition_alter4_myisam.result index c49b4229519..da99d6867ab 100644 --- a/mysql-test/suite/parts/r/partition_alter4_myisam.result +++ b/mysql-test/suite/parts/r/partition_alter4_myisam.result @@ -69,8 +69,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -537,8 +537,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1022,8 +1022,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1514,8 +1514,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1996,8 +1996,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2488,8 +2488,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2985,8 +2985,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3480,8 +3480,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3967,8 +3967,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4435,8 +4435,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4920,8 +4920,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5412,8 +5412,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5894,8 +5894,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6386,8 +6386,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6883,8 +6883,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7378,8 +7378,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7865,8 +7865,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8333,8 +8333,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8818,8 +8818,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9310,8 +9310,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -9792,8 +9792,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10284,8 +10284,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -10781,8 +10781,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11276,8 +11276,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -11763,8 +11763,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12231,8 +12231,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -12716,8 +12716,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13208,8 +13208,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -13690,8 +13690,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14182,8 +14182,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -14679,8 +14679,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15174,8 +15174,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -15661,8 +15661,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16129,8 +16129,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -16614,8 +16614,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17106,8 +17106,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -17588,8 +17588,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18080,8 +18080,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -18577,8 +18577,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19072,8 +19072,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -19562,8 +19562,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20030,8 +20030,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -20515,8 +20515,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21007,8 +21007,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21489,8 +21489,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -21981,8 +21981,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22478,8 +22478,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -22973,8 +22973,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23460,8 +23460,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -23928,8 +23928,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24413,8 +24413,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -24905,8 +24905,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25387,8 +25387,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -25879,8 +25879,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26376,8 +26376,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -26871,8 +26871,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27358,8 +27358,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -27826,8 +27826,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28311,8 +28311,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -28803,8 +28803,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29285,8 +29285,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -29777,8 +29777,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30274,8 +30274,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -30769,8 +30769,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31256,8 +31256,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -31724,8 +31724,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32209,8 +32209,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -32701,8 +32701,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33183,8 +33183,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -33675,8 +33675,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34172,8 +34172,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -34667,8 +34667,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35154,8 +35154,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -35622,8 +35622,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36107,8 +36107,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -36599,8 +36599,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37081,8 +37081,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -37573,8 +37573,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38070,8 +38070,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -38565,8 +38565,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39055,8 +39055,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -39523,8 +39523,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40008,8 +40008,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40500,8 +40500,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -40982,8 +40982,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41474,8 +41474,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -41971,8 +41971,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42466,8 +42466,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -42953,8 +42953,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43421,8 +43421,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -43906,8 +43906,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44398,8 +44398,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -44880,8 +44880,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45372,8 +45372,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -45869,8 +45869,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46364,8 +46364,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -46851,8 +46851,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47319,8 +47319,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -47804,8 +47804,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48296,8 +48296,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -48778,8 +48778,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49270,8 +49270,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -49767,8 +49767,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50262,8 +50262,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -50749,8 +50749,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51217,8 +51217,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -51702,8 +51702,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52194,8 +52194,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -52676,8 +52676,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53168,8 +53168,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -53665,8 +53665,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54160,8 +54160,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -54647,8 +54647,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55115,8 +55115,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -55600,8 +55600,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56092,8 +56092,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -56574,8 +56574,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57066,8 +57066,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -57563,8 +57563,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58058,8 +58058,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -58546,8 +58546,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59012,8 +59012,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59495,8 +59495,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -59985,8 +59985,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60465,8 +60465,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -60955,8 +60955,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61450,8 +61450,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -61943,8 +61943,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62428,8 +62428,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -62894,8 +62894,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -63377,8 +63377,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -63867,8 +63867,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -64347,8 +64347,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -64837,8 +64837,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65332,8 +65332,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -65825,8 +65825,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -66630,8 +66630,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67096,8 +67096,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -67579,8 +67579,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68069,8 +68069,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -68549,8 +68549,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69039,8 +69039,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -69534,8 +69534,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70027,8 +70027,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70517,8 +70517,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -70985,8 +70985,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71470,8 +71470,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -71962,8 +71962,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72444,8 +72444,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -72936,8 +72936,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73433,8 +73433,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -73928,8 +73928,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74415,8 +74415,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -74883,8 +74883,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75368,8 +75368,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -75860,8 +75860,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76342,8 +76342,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -76834,8 +76834,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77331,8 +77331,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -77826,8 +77826,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78313,8 +78313,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -78781,8 +78781,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79266,8 +79266,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -79758,8 +79758,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80240,8 +80240,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -80732,8 +80732,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81229,8 +81229,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -81724,8 +81724,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82211,8 +82211,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -82679,8 +82679,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83164,8 +83164,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -83656,8 +83656,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84138,8 +84138,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -84630,8 +84630,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85127,8 +85127,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -85622,8 +85622,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86109,8 +86109,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -86577,8 +86577,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87062,8 +87062,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -87554,8 +87554,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88036,8 +88036,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -88528,8 +88528,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89025,8 +89025,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -89520,8 +89520,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -90008,8 +90008,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -90468,8 +90468,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -90936,8 +90936,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -91402,8 +91402,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -91866,8 +91866,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -92334,8 +92334,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -92802,8 +92802,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -93266,8 +93266,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_basic_innodb.result b/mysql-test/suite/parts/r/partition_basic_innodb.result index 1a726229403..5bfda948ca4 100644 --- a/mysql-test/suite/parts/r/partition_basic_innodb.result +++ b/mysql-test/suite/parts/r/partition_basic_innodb.result @@ -67,8 +67,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -524,8 +524,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -992,8 +992,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1468,8 +1468,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1936,8 +1936,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2412,8 +2412,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2897,8 +2897,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3376,8 +3376,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3846,8 +3846,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4303,8 +4303,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4771,8 +4771,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5247,8 +5247,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5715,8 +5715,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6189,8 +6189,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6670,8 +6670,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7149,8 +7149,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7623,8 +7623,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8117,8 +8117,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8622,8 +8622,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9135,8 +9135,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9640,8 +9640,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10153,8 +10153,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10675,8 +10675,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11191,8 +11191,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11697,8 +11697,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12191,8 +12191,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12696,8 +12696,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13209,8 +13209,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13714,8 +13714,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14227,8 +14227,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14749,8 +14749,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15265,8 +15265,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15771,8 +15771,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16281,8 +16281,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16802,8 +16802,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17331,8 +17331,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17852,8 +17852,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18381,8 +18381,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18919,8 +18919,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19451,8 +19451,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19978,8 +19978,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -20472,8 +20472,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -20977,8 +20977,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -21490,8 +21490,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -21995,8 +21995,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -22506,8 +22506,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -23024,8 +23024,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -23540,8 +23540,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -24046,8 +24046,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -24540,8 +24540,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -25045,8 +25045,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -25558,8 +25558,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -26063,8 +26063,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -26574,8 +26574,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -27092,8 +27092,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -27608,8 +27608,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) NOT NULL DEFAULT '0', - `f_int2` int(11) NOT NULL DEFAULT '0', + `f_int1` int(11) NOT NULL DEFAULT 0, + `f_int2` int(11) NOT NULL DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -28114,8 +28114,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -28624,8 +28624,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -29145,8 +29145,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -29674,8 +29674,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -30195,8 +30195,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -30722,8 +30722,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -31256,8 +31256,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -31788,8 +31788,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_basic_myisam.result b/mysql-test/suite/parts/r/partition_basic_myisam.result index bade6841047..3351201ea06 100644 --- a/mysql-test/suite/parts/r/partition_basic_myisam.result +++ b/mysql-test/suite/parts/r/partition_basic_myisam.result @@ -67,8 +67,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -527,8 +527,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1001,8 +1001,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1486,8 +1486,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1961,8 +1961,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2446,8 +2446,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2940,8 +2940,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3428,8 +3428,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3908,8 +3908,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4368,8 +4368,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4842,8 +4842,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5327,8 +5327,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5802,8 +5802,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6285,8 +6285,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6775,8 +6775,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7263,8 +7263,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7747,8 +7747,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8260,8 +8260,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8787,8 +8787,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9325,8 +9325,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9853,8 +9853,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10391,8 +10391,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10938,8 +10938,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11479,8 +11479,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12016,8 +12016,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12529,8 +12529,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13056,8 +13056,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13594,8 +13594,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14122,8 +14122,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14658,8 +14658,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15201,8 +15201,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15742,8 +15742,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result b/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result index 006820d3f18..1c1a758f985 100644 --- a/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result +++ b/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result @@ -77,8 +77,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -557,8 +557,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1053,8 +1053,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1566,8 +1566,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2061,8 +2061,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2566,8 +2566,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3096,8 +3096,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3606,8 +3606,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4110,8 +4110,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4590,8 +4590,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5086,8 +5086,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -5599,8 +5599,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6094,8 +6094,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -6597,8 +6597,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7127,8 +7127,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -7637,8 +7637,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -8145,8 +8145,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -8678,8 +8678,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9227,8 +9227,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -9793,8 +9793,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10341,8 +10341,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -10899,8 +10899,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -11482,8 +11482,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12045,8 +12045,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -12606,8 +12606,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13139,8 +13139,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -13688,8 +13688,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14254,8 +14254,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -14802,8 +14802,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15358,8 +15358,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -15941,8 +15941,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -16504,8 +16504,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17061,8 +17061,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -17587,8 +17587,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18131,8 +18131,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -18684,8 +18684,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19222,8 +19222,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -19773,8 +19773,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, @@ -20316,8 +20316,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL, diff --git a/mysql-test/suite/parts/r/partition_engine_innodb.result b/mysql-test/suite/parts/r/partition_engine_innodb.result index c3cb7cff083..cfa27c8e112 100644 --- a/mysql-test/suite/parts/r/partition_engine_innodb.result +++ b/mysql-test/suite/parts/r/partition_engine_innodb.result @@ -62,8 +62,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -519,8 +519,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -978,8 +978,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1503,8 +1503,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2004,8 +2004,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2466,8 +2466,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2928,8 +2928,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3387,8 +3387,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3852,8 +3852,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4310,8 +4310,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4764,8 +4764,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_engine_myisam.result b/mysql-test/suite/parts/r/partition_engine_myisam.result index 02f8649692d..3d20dbb726a 100644 --- a/mysql-test/suite/parts/r/partition_engine_myisam.result +++ b/mysql-test/suite/parts/r/partition_engine_myisam.result @@ -62,8 +62,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -528,8 +528,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -996,8 +996,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1534,8 +1534,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2048,8 +2048,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2523,8 +2523,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2998,8 +2998,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3466,8 +3466,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3944,8 +3944,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4415,8 +4415,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4876,8 +4876,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_syntax_innodb.result b/mysql-test/suite/parts/r/partition_syntax_innodb.result index 3dba2873235..767f023d04e 100644 --- a/mysql-test/suite/parts/r/partition_syntax_innodb.result +++ b/mysql-test/suite/parts/r/partition_syntax_innodb.result @@ -652,8 +652,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -679,8 +679,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -713,8 +713,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -735,8 +735,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -820,8 +820,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -856,8 +856,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -881,8 +881,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -906,8 +906,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -931,8 +931,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1675,8 +1675,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1704,8 +1704,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/parts/r/partition_syntax_myisam.result b/mysql-test/suite/parts/r/partition_syntax_myisam.result index 7da964a1f8c..97eabe7d2ce 100644 --- a/mysql-test/suite/parts/r/partition_syntax_myisam.result +++ b/mysql-test/suite/parts/r/partition_syntax_myisam.result @@ -652,8 +652,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -687,8 +687,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -731,8 +731,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -759,8 +759,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -852,8 +852,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -904,8 +904,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -937,8 +937,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -974,8 +974,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1005,8 +1005,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1757,8 +1757,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1794,8 +1794,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result b/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result index 0ecc28a94ec..f4648160bbd 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_slave_innodb.result @@ -674,7 +674,7 @@ t16 CREATE TABLE `t16` ( `c3` text DEFAULT NULL, `c4` blob DEFAULT NULL, `c5` char(5) DEFAULT NULL, - `c6` int(11) DEFAULT '1', + `c6` int(11) DEFAULT 1, `c7` timestamp NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (`c1`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result b/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result index a2888377e39..f3863b27325 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_slave_myisam.result @@ -674,7 +674,7 @@ t16 CREATE TABLE `t16` ( `c3` text DEFAULT NULL, `c4` blob DEFAULT NULL, `c5` char(5) DEFAULT NULL, - `c6` int(11) DEFAULT '1', + `c6` int(11) DEFAULT 1, `c7` timestamp NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (`c1`) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 diff --git a/mysql-test/suite/rpl/r/rpl_mixed_ddl_dml.result b/mysql-test/suite/rpl/r/rpl_mixed_ddl_dml.result index ceb4fdf39d5..e7b0d6f29c8 100644 --- a/mysql-test/suite/rpl/r/rpl_mixed_ddl_dml.result +++ b/mysql-test/suite/rpl/r/rpl_mixed_ddl_dml.result @@ -44,7 +44,7 @@ t3 CREATE TABLE `t3` ( show create table t5; Table Create Table t5 CREATE TABLE `t5` ( - `id` int(11) NOT NULL DEFAULT '0', + `id` int(11) NOT NULL DEFAULT 0, `created` datetime DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 connection master; diff --git a/mysql-test/suite/rpl/r/rpl_multi_engine.result b/mysql-test/suite/rpl/r/rpl_multi_engine.result index c39d9f5f374..075cbc14fe7 100644 --- a/mysql-test/suite/rpl/r/rpl_multi_engine.result +++ b/mysql-test/suite/rpl/r/rpl_multi_engine.result @@ -17,8 +17,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -34,8 +34,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -76,8 +76,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -118,8 +118,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -160,8 +160,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -177,8 +177,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -219,8 +219,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -261,8 +261,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -303,8 +303,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -320,8 +320,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -362,8 +362,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, @@ -404,8 +404,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, diff --git a/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result b/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result index a88d81caaf0..37548cad4db 100644 --- a/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result +++ b/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result @@ -22,8 +22,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -43,8 +43,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -110,8 +110,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -131,8 +131,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -207,8 +207,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -225,8 +225,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -289,8 +289,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -307,8 +307,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -378,8 +378,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -394,8 +394,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -455,8 +455,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -471,8 +471,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL @@ -540,8 +540,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL, @@ -557,8 +557,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned DEFAULT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL, @@ -620,8 +620,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned NOT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL, @@ -637,8 +637,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` varchar(255) DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned NOT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL, @@ -700,8 +700,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned NOT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL, @@ -717,8 +717,8 @@ t1 CREATE TABLE `t1` ( `b1` bit(8) DEFAULT NULL, `vc` text DEFAULT NULL, `bc` char(255) DEFAULT NULL, - `d` decimal(10,4) DEFAULT '0.0000', - `f` float DEFAULT '0', + `d` decimal(10,4) DEFAULT 0.0000, + `f` float DEFAULT 0, `total` bigint(20) unsigned NOT NULL, `y` year(4) DEFAULT NULL, `t` date DEFAULT NULL, diff --git a/plugin/query_response_time/mysql-test/query_response_time/basic.result b/plugin/query_response_time/mysql-test/query_response_time/basic.result index 86fba87e056..673201266ec 100644 --- a/plugin/query_response_time/mysql-test/query_response_time/basic.result +++ b/plugin/query_response_time/mysql-test/query_response_time/basic.result @@ -7,7 +7,7 @@ SHOW CREATE TABLE INFORMATION_SCHEMA.QUERY_RESPONSE_TIME; Table Create Table QUERY_RESPONSE_TIME CREATE TEMPORARY TABLE `QUERY_RESPONSE_TIME` ( `TIME` varchar(14) NOT NULL DEFAULT '', - `COUNT` int(11) unsigned NOT NULL DEFAULT '0', + `COUNT` int(11) unsigned NOT NULL DEFAULT 0, `TOTAL` varchar(14) NOT NULL DEFAULT '' ) ENGINE=MEMORY DEFAULT CHARSET=utf8 SELECT PLUGIN_NAME, PLUGIN_VERSION, PLUGIN_TYPE, PLUGIN_AUTHOR, PLUGIN_DESCRIPTION, PLUGIN_LICENSE, PLUGIN_MATURITY FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME LIKE 'query_response_time%';; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 30c65e6e279..1c9d75d06eb 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1689,7 +1689,11 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value, quoted= 0; } else + { field->val_str(&type); + if (!field->str_needs_quotes()) + quoted= 0; + } if (type.length()) { String def_val; diff --git a/storage/connect/mysql-test/connect/r/bin.result b/storage/connect/mysql-test/connect/r/bin.result index 4ba353ac705..1baa18a1e4d 100644 --- a/storage/connect/mysql-test/connect/r/bin.result +++ b/storage/connect/mysql-test/connect/r/bin.result @@ -57,7 +57,7 @@ t1 CREATE TABLE `t1` ( `name` char(10) NOT NULL, `birth` date NOT NULL, `id` char(5) NOT NULL `FIELD_FORMAT`='S', - `salary` double(9,2) NOT NULL DEFAULT '0.00' `FIELD_FORMAT`='F', + `salary` double(9,2) NOT NULL DEFAULT 0.00 `FIELD_FORMAT`='F', `dept` int(4) NOT NULL `FIELD_FORMAT`='S' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`=BIN `FILE_NAME`='Testbal.dat' `OPTION_LIST`='Endian=Little' `READONLY`=NO INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777); @@ -76,7 +76,7 @@ t1 CREATE TABLE `t1` ( `name` char(10) NOT NULL, `birth` date NOT NULL, `id` char(5) NOT NULL `FIELD_FORMAT`='S', - `salary` double(9,2) NOT NULL DEFAULT '0.00' `FIELD_FORMAT`='F', + `salary` double(9,2) NOT NULL DEFAULT 0.00 `FIELD_FORMAT`='F', `dept` int(4) NOT NULL `FIELD_FORMAT`='S' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`=BIN `FILE_NAME`='Testbal.dat' `OPTION_LIST`='Endian=Little' `READONLY`=YES INSERT INTO t1 VALUES (7777,'BILL','1973-06-30',4444,5555.555,777); diff --git a/storage/connect/mysql-test/connect/r/mysql_discovery.result b/storage/connect/mysql-test/connect/r/mysql_discovery.result index 220df6f7b92..32bd4761f37 100644 --- a/storage/connect/mysql-test/connect/r/mysql_discovery.result +++ b/storage/connect/mysql-test/connect/r/mysql_discovery.result @@ -19,8 +19,8 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `id` int(20) NOT NULL, - `group` int(11) NOT NULL DEFAULT '1', - `a\\b` int(11) NOT NULL DEFAULT '2', + `group` int(11) NOT NULL DEFAULT 1, + `a\\b` int(11) NOT NULL DEFAULT 2, `a\\` int(10) unsigned DEFAULT NULL, `name` varchar(32) DEFAULT 'name' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/test/t1' `TABLE_TYPE`='MYSQL' diff --git a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result index 2e68d8be6b4..d2f0b52c446 100644 --- a/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result +++ b/storage/test_sql_discovery/mysql-test/sql_discovery/simple.result @@ -122,7 +122,7 @@ Handler_discover 15 show create table t1; Table Create Table t1 CREATE TABLE t1 ( - a int(11) NOT NULL DEFAULT '5', + a int(11) NOT NULL DEFAULT 5, b timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, c tinyblob DEFAULT NULL, d decimal(5,2) DEFAULT NULL, diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_extra_col_slave_tokudb.result b/storage/tokudb/mysql-test/rpl/r/rpl_extra_col_slave_tokudb.result index ce312f4682f..db89e478b90 100644 --- a/storage/tokudb/mysql-test/rpl/r/rpl_extra_col_slave_tokudb.result +++ b/storage/tokudb/mysql-test/rpl/r/rpl_extra_col_slave_tokudb.result @@ -674,7 +674,7 @@ t16 CREATE TABLE `t16` ( `c3` text DEFAULT NULL, `c4` blob DEFAULT NULL, `c5` char(5) DEFAULT NULL, - `c6` int(11) DEFAULT '1', + `c6` int(11) DEFAULT 1, `c7` timestamp NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (`c1`) ) ENGINE=TokuDB DEFAULT CHARSET=latin1 diff --git a/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result b/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result index 5769ee74071..9d813eca8e9 100644 --- a/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result +++ b/storage/tokudb/mysql-test/tokudb/r/background_job_manager.result @@ -17,7 +17,7 @@ set global tokudb_debug_pause_background_job_manager=TRUE; show create table information_schema.tokudb_background_job_status; Table Create Table TokuDB_background_job_status CREATE TEMPORARY TABLE `TokuDB_background_job_status` ( - `id` bigint(0) NOT NULL DEFAULT '0', + `id` bigint(0) NOT NULL DEFAULT 0, `database_name` varchar(256) NOT NULL DEFAULT '', `table_name` varchar(256) NOT NULL DEFAULT '', `job_type` varchar(256) NOT NULL DEFAULT '', diff --git a/storage/tokudb/mysql-test/tokudb/r/change_column_int_default.result b/storage/tokudb/mysql-test/tokudb/r/change_column_int_default.result index 558d153711c..b119f1a0b61 100644 --- a/storage/tokudb/mysql-test/tokudb/r/change_column_int_default.result +++ b/storage/tokudb/mysql-test/tokudb/r/change_column_int_default.result @@ -6,31 +6,31 @@ ALTER TABLE t CHANGE COLUMN a a TINYINT DEFAULT 100; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` tinyint(4) DEFAULT '100' + `a` tinyint(4) DEFAULT 100 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a SMALLINT DEFAULT 200; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` smallint(6) DEFAULT '200' + `a` smallint(6) DEFAULT 200 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a MEDIUMINT DEFAULT 300; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` mediumint(9) DEFAULT '300' + `a` mediumint(9) DEFAULT 300 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a INT DEFAULT 400; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` int(11) DEFAULT '400' + `a` int(11) DEFAULT 400 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a BIGINT DEFAULT 500; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` bigint(20) DEFAULT '500' + `a` bigint(20) DEFAULT 500 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 DROP TABLE t; CREATE TABLE t (a TINYINT NOT NULL DEFAULT 1); @@ -38,31 +38,31 @@ ALTER TABLE t CHANGE COLUMN a a TINYINT NOT NULL DEFAULT 100; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` tinyint(4) NOT NULL DEFAULT '100' + `a` tinyint(4) NOT NULL DEFAULT 100 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a SMALLINT NOT NULL DEFAULT 200; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` smallint(6) NOT NULL DEFAULT '200' + `a` smallint(6) NOT NULL DEFAULT 200 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a MEDIUMINT NOT NULL DEFAULT 300; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` mediumint(9) NOT NULL DEFAULT '300' + `a` mediumint(9) NOT NULL DEFAULT 300 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a INT NOT NULL DEFAULT 400; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` int(11) NOT NULL DEFAULT '400' + `a` int(11) NOT NULL DEFAULT 400 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a BIGINT NOT NULL DEFAULT 500; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` bigint(20) NOT NULL DEFAULT '500' + `a` bigint(20) NOT NULL DEFAULT 500 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 DROP TABLE t; CREATE TABLE t (a TINYINT UNSIGNED DEFAULT 1); @@ -70,31 +70,31 @@ ALTER TABLE t CHANGE COLUMN a a TINYINT UNSIGNED DEFAULT 100; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` tinyint(3) unsigned DEFAULT '100' + `a` tinyint(3) unsigned DEFAULT 100 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a SMALLINT UNSIGNED DEFAULT 200; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` smallint(5) unsigned DEFAULT '200' + `a` smallint(5) unsigned DEFAULT 200 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a MEDIUMINT UNSIGNED DEFAULT 300; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` mediumint(8) unsigned DEFAULT '300' + `a` mediumint(8) unsigned DEFAULT 300 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a INT UNSIGNED DEFAULT 400; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` int(10) unsigned DEFAULT '400' + `a` int(10) unsigned DEFAULT 400 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a BIGINT UNSIGNED DEFAULT 500; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` bigint(20) unsigned DEFAULT '500' + `a` bigint(20) unsigned DEFAULT 500 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 DROP TABLE t; CREATE TABLE t (a TINYINT UNSIGNED NOT NULL DEFAULT 1); @@ -102,30 +102,30 @@ ALTER TABLE t CHANGE COLUMN a a TINYINT UNSIGNED NOT NULL DEFAULT 100; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` tinyint(3) unsigned NOT NULL DEFAULT '100' + `a` tinyint(3) unsigned NOT NULL DEFAULT 100 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a SMALLINT UNSIGNED NOT NULL DEFAULT 200; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` smallint(5) unsigned NOT NULL DEFAULT '200' + `a` smallint(5) unsigned NOT NULL DEFAULT 200 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a MEDIUMINT UNSIGNED NOT NULL DEFAULT 300; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` mediumint(8) unsigned NOT NULL DEFAULT '300' + `a` mediumint(8) unsigned NOT NULL DEFAULT 300 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a INT UNSIGNED NOT NULL DEFAULT 400; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` int(10) unsigned NOT NULL DEFAULT '400' + `a` int(10) unsigned NOT NULL DEFAULT 400 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE t CHANGE COLUMN a a BIGINT UNSIGNED NOT NULL DEFAULT 500; SHOW CREATE TABLE t; Table Create Table t CREATE TABLE `t` ( - `a` bigint(20) unsigned NOT NULL DEFAULT '500' + `a` bigint(20) unsigned NOT NULL DEFAULT 500 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 DROP TABLE t; diff --git a/storage/tokudb/mysql-test/tokudb/r/type_enum.result b/storage/tokudb/mysql-test/tokudb/r/type_enum.result index 3a1654ef287..a1e61df126b 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_enum.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_enum.result @@ -1670,7 +1670,7 @@ b ENUM('value',' show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT '1', + `a` int(11) DEFAULT 1, `b` enum('value','öäü_value','ÊÃÕ') NOT NULL ) ENGINE=ENGINE DEFAULT CHARSET=latin1 show columns from t1; diff --git a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result index 1d42fc5bb0b..9d8e09b32e2 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result @@ -976,8 +976,8 @@ f1 decimal (0,0) zerofill not null default 0); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `f0` decimal(30,30) unsigned zerofill NOT NULL DEFAULT '0.000000000000000000000000000000', - `f1` decimal(10,0) unsigned zerofill NOT NULL DEFAULT '0000000000' + `f0` decimal(30,30) unsigned zerofill NOT NULL DEFAULT 0.000000000000000000000000000000, + `f1` decimal(10,0) unsigned zerofill NOT NULL DEFAULT 0000000000 ) ENGINE=ENGINE DEFAULT CHARSET=latin1 drop table t1; drop procedure if exists wg2; diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/r/alter_column_default.result b/storage/tokudb/mysql-test/tokudb_alter_table/r/alter_column_default.result index 2c1390ad2a4..4c63047444d 100644 --- a/storage/tokudb/mysql-test/tokudb_alter_table/r/alter_column_default.result +++ b/storage/tokudb/mysql-test/tokudb_alter_table/r/alter_column_default.result @@ -5,14 +5,14 @@ CREATE TABLE foo (a INT NOT NULL DEFAULT 0, b INT DEFAULT NULL); SHOW CREATE TABLE foo; Table Create Table foo CREATE TABLE `foo` ( - `a` int(11) NOT NULL DEFAULT '0', + `a` int(11) NOT NULL DEFAULT 0, `b` int(11) DEFAULT NULL ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE foo ALTER COLUMN a SET DEFAULT 100; SHOW CREATE TABLE foo; Table Create Table foo CREATE TABLE `foo` ( - `a` int(11) NOT NULL DEFAULT '100', + `a` int(11) NOT NULL DEFAULT 100, `b` int(11) DEFAULT NULL ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE foo ALTER COLUMN a DROP DEFAULT; @@ -27,7 +27,7 @@ SHOW CREATE TABLE foo; Table Create Table foo CREATE TABLE `foo` ( `a` int(11) NOT NULL, - `b` int(11) DEFAULT '42' + `b` int(11) DEFAULT 42 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 ALTER TABLE foo ALTER COLUMN b DROP DEFAULT; SHOW CREATE TABLE foo; diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/alter.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/alter.result index 600e34dfffd..95364221920 100644 --- a/storage/tokudb/mysql-test/tokudb_mariadb/r/alter.result +++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/alter.result @@ -8,7 +8,7 @@ show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `i` int(11) DEFAULT NULL, - `j` int(11) DEFAULT '0' + `j` int(11) DEFAULT 0 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 alter table t1 modify i int default '1'; select * from t1; @@ -17,8 +17,8 @@ i j show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `i` int(11) DEFAULT '1', - `j` int(11) DEFAULT '0' + `i` int(11) DEFAULT 1, + `j` int(11) DEFAULT 0 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 alter table t1 modify j int default '2', rename t2; select * from t1; @@ -29,7 +29,7 @@ i j show create table t2; Table Create Table t2 CREATE TABLE `t2` ( - `i` int(11) DEFAULT '1', - `j` int(11) DEFAULT '2' + `i` int(11) DEFAULT 1, + `j` int(11) DEFAULT 2 ) ENGINE=TokuDB DEFAULT CHARSET=latin1 drop table t2; diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter3_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter3_tokudb.result index b596e84d630..e8129e75f3e 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter3_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter3_tokudb.result @@ -392,8 +392,8 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -414,8 +414,8 @@ ALTER TABLE t1 PARTITION BY KEY(f_int1); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -439,8 +439,8 @@ ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part7); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -462,8 +462,8 @@ ALTER TABLE t1 ADD PARTITION (PARTITION part2); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -486,8 +486,8 @@ ALTER TABLE t1 ADD PARTITION PARTITIONS 4; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -521,8 +521,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -547,8 +547,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -572,8 +572,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -596,8 +596,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -619,8 +619,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -641,8 +641,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -662,8 +662,8 @@ ALTER TABLE t1 COALESCE PARTITION 1; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -686,8 +686,8 @@ ALTER TABLE t1 REMOVE PARTITIONING; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result index 7df4c93cd85..8837049119a 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result @@ -62,8 +62,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -519,8 +519,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -978,8 +978,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1503,8 +1503,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2004,8 +2004,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2466,8 +2466,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -2928,8 +2928,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3387,8 +3387,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -3852,8 +3852,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4310,8 +4310,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -4764,8 +4764,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result index 340bbf07f35..13b8a133628 100644 --- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result +++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_syntax_tokudb.result @@ -652,8 +652,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -679,8 +679,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -713,8 +713,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -735,8 +735,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -820,8 +820,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -856,8 +856,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -881,8 +881,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -906,8 +906,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -931,8 +931,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1675,8 +1675,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL @@ -1704,8 +1704,8 @@ create_command SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `f_int1` int(11) DEFAULT '0', - `f_int2` int(11) DEFAULT '0', + `f_int1` int(11) DEFAULT 0, + `f_int2` int(11) DEFAULT 0, `f_char1` char(20) DEFAULT NULL, `f_char2` char(20) DEFAULT NULL, `f_charbig` varchar(1000) DEFAULT NULL -- cgit v1.2.1 From 2013a7fc29bd304c575ea84fbb79b4e94cf90216 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 21 Jul 2016 14:03:49 +0200 Subject: fix: CURRENT_ROLE() inside SECURITY DEFINER views --- mysql-test/suite/roles/definer.result | 21 +++++++++++---------- mysql-test/suite/roles/definer.test | 2 +- sql/sql_acl.cc | 7 +++---- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/roles/definer.result b/mysql-test/suite/roles/definer.result index 293e5e539bc..4810e597763 100644 --- a/mysql-test/suite/roles/definer.result +++ b/mysql-test/suite/roles/definer.result @@ -22,10 +22,10 @@ show create view test.v1; View Create View character_set_client collation_connection v1 CREATE ALGORITHM=UNDEFINED DEFINER=`role1` SQL SECURITY DEFINER VIEW `test`.`v1` AS select (`mysqltest1`.`t1`.`a` + `mysqltest1`.`t1`.`b`) AS `a+b`,`mysqltest1`.`t1`.`c` AS `c` from `mysqltest1`.`t1` latin1 latin1_swedish_ci set role none; -create definer=role2 view test.v2 as select a+b,c from t1; +create definer=role2 view test.v2 as select a+b,c,current_role() from t1; show create view test.v2; View Create View character_set_client collation_connection -v2 CREATE ALGORITHM=UNDEFINED DEFINER=`role2` SQL SECURITY DEFINER VIEW `test`.`v2` AS select (`mysqltest1`.`t1`.`a` + `mysqltest1`.`t1`.`b`) AS `a+b`,`mysqltest1`.`t1`.`c` AS `c` from `mysqltest1`.`t1` latin1 latin1_swedish_ci +v2 CREATE ALGORITHM=UNDEFINED DEFINER=`role2` SQL SECURITY DEFINER VIEW `test`.`v2` AS select (`mysqltest1`.`t1`.`a` + `mysqltest1`.`t1`.`b`) AS `a+b`,`mysqltest1`.`t1`.`c` AS `c`,current_role() AS `current_role()` from `mysqltest1`.`t1` latin1 latin1_swedish_ci create definer=role3 view test.v3 as select a+b,c from t1; Warnings: Note 1449 The user specified as a definer ('role3'@'%') does not exist @@ -44,9 +44,9 @@ GRANT CREATE VIEW ON `mysqltest1`.* TO 'foo'@'localhost' select * from test.v1; ERROR HY000: View 'test.v1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them select * from test.v2; -a+b c -11 100 -22 200 +a+b c current_role() +11 100 role2 +22 200 role2 select * from test.v3; ERROR 28000: Access denied for user 'foo'@'localhost' (using password: NO) create definer=role4 view test.v4 as select a+b,c from t1; @@ -113,7 +113,7 @@ tr1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`role1` trigger tr insert t1 values (111, 222, 333) latin1 latin1_swedish_ci latin1_swedish_ci set role none; insert t2 values (11,22,33); -ERROR 42000: INSERT command denied to user 'role1'@'' for table 't1' +ERROR 42000: INSERT command denied to user ''@'' for table 't1' select * from t1; a b c 1 10 100 @@ -179,7 +179,7 @@ pr1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`role1` PROCEDURE insert t1 values (111, 222, 333) latin1 latin1_swedish_ci latin1_swedish_ci set role none; call pr1(); -ERROR 42000: INSERT command denied to user 'role1'@'' for table 't1' +ERROR 42000: INSERT command denied to user ''@'' for table 't1' select * from t1; a b c 1 10 100 @@ -222,7 +222,7 @@ fn1 NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION CREATE DEFINER=`role1` FUNCTION ` return (select sum(a+b) from t1) latin1 latin1_swedish_ci latin1_swedish_ci set role none; select fn1(); -ERROR 42000: SELECT command denied to user 'role1'@'' for column 'b' in table 't1' +ERROR 42000: SELECT command denied to user ''@'' for column 'b' in table 't1' select * from t1; a b c 1 10 100 @@ -289,7 +289,8 @@ SET @saved_cs_client = @@character_set_client; SET character_set_client = utf8; /*!50001 CREATE TABLE `v2` ( `a+b` tinyint NOT NULL, - `c` tinyint NOT NULL + `c` tinyint NOT NULL, + `current_role()` tinyint NOT NULL ) ENGINE=MyISAM */; SET character_set_client = @saved_cs_client; SET @saved_cs_client = @@character_set_client; @@ -553,7 +554,7 @@ USE `test`; /*!50001 SET character_set_client = latin1 */; /*!50001 SET character_set_results = latin1 */; /*!50001 SET collation_connection = latin1_swedish_ci */; -/*!50001 CREATE ALGORITHM=UNDEFINED DEFINER=`role2` SQL SECURITY DEFINER VIEW `v2` AS select (`mysqltest1`.`t1`.`a` + `mysqltest1`.`t1`.`b`) AS `a+b`,`mysqltest1`.`t1`.`c` AS `c` from `mysqltest1`.`t1` */; +/*!50001 CREATE ALGORITHM=UNDEFINED DEFINER=`role2` SQL SECURITY DEFINER VIEW `v2` AS select (`mysqltest1`.`t1`.`a` + `mysqltest1`.`t1`.`b`) AS `a+b`,`mysqltest1`.`t1`.`c` AS `c`,current_role() AS `current_role()` from `mysqltest1`.`t1` */; /*!50001 SET character_set_client = @saved_cs_client */; /*!50001 SET character_set_results = @saved_cs_results */; /*!50001 SET collation_connection = @saved_col_connection */; diff --git a/mysql-test/suite/roles/definer.test b/mysql-test/suite/roles/definer.test index 3de4a6922c2..090c60917c9 100644 --- a/mysql-test/suite/roles/definer.test +++ b/mysql-test/suite/roles/definer.test @@ -47,7 +47,7 @@ show create view test.v1; set role none; # definer=role_name, privileges ok -create definer=role2 view test.v2 as select a+b,c from t1; +create definer=role2 view test.v2 as select a+b,c,current_role() from t1; show create view test.v2; # definer=non_existent_role diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 37e6e769a89..fe8e8eea83f 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2009,8 +2009,7 @@ bool acl_getroot(Security_context *sctx, char *user, char *host, sctx->master_access= acl_role->access; if (acl_role->user.str) - strmake_buf(sctx->priv_user, user); - sctx->priv_host[0]= 0; + strmake_buf(sctx->priv_role, user); } } @@ -7162,7 +7161,7 @@ bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, GRANT_INFO *grant; const char *db_name; const char *table_name; - Security_context *sctx= MY_TEST(table_ref->security_ctx) ? + Security_context *sctx= table_ref->security_ctx ? table_ref->security_ctx : thd->security_ctx; if (table_ref->view || table_ref->field_translation) @@ -11078,7 +11077,7 @@ void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, /* global privileges */ grant->privilege= sctx->master_access; - if (!sctx->priv_user[0]) + if (!sctx->priv_user[0] && !sctx->priv_role[0]) { DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege)); DBUG_VOID_RETURN; // it is slave -- cgit v1.2.1 From b48555e9c9c526789b1e42ce084825c3037f0451 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 21 Jul 2016 17:46:41 +0200 Subject: fix: DEFAULT() in a view should be not updatable as in create table t1 (a int); create view v1 as select default(a) as NOT_UPDATABLE from t1; --- sql/item.h | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/item.h b/sql/item.h index e43b4d50e46..6988543e19d 100644 --- a/sql/item.h +++ b/sql/item.h @@ -4894,6 +4894,7 @@ public: bool send(Protocol *protocol, String *buffer); int save_in_field(Field *field_arg, bool no_conversions); table_map used_tables() const { return (table_map)0L; } + Item_field *field_for_view_update() { return 0; } bool walk(Item_processor processor, bool walk_subquery, void *args) { -- cgit v1.2.1 From ebf1e1d6014e34ceaa70bfd86d7a54508c2c26a9 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 19 Jul 2016 14:12:24 +0200 Subject: NULL pointer dereference in a case of a specially crafted invalid frm --- sql/table.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/table.cc b/sql/table.cc index 6fa77ef8fae..d1f4bae07e6 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2587,9 +2587,9 @@ static bool fix_vcol_expr(THD *thd, TABLE *table, Field *field, error= func_expr->walk(&Item::check_vcol_func_processor, 0, &res); if (error || (res.errors & VCOL_IMPOSSIBLE)) - { + { // this can only happen if the frm was corrupted my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name, - "???", field->field_name); + "???", field ? field->field_name : "?????"); goto end; } vcol->flags= res.errors; -- cgit v1.2.1 From eb9bce519d632104c49108bdf4021124add46f77 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 19 Jul 2016 22:10:54 +0200 Subject: split fix_vcol_expr() into "fix" and "check" parts --- sql/table.cc | 88 +++++++++++++++++++++++++++++++----------------------------- 1 file changed, 45 insertions(+), 43 deletions(-) diff --git a/sql/table.cc b/sql/table.cc index d1f4bae07e6..f72f0fd02d9 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2502,6 +2502,34 @@ void TABLE_SHARE::free_frm_image(const uchar *frm) } +static bool fix_vcol_expr(THD *thd, Virtual_column_info *vcol) +{ + DBUG_ENTER("fix_vcol_expr"); + + const enum enum_mark_columns save_mark_used_columns= thd->mark_used_columns; + thd->mark_used_columns= MARK_COLUMNS_NONE; + + const char *save_where= thd->where; + thd->where= "virtual column function"; + + thd->in_stored_expression= 1; + + int error= vcol->expr_item->fix_fields(thd, &vcol->expr_item); + + thd->in_stored_expression= 0; + thd->mark_used_columns= save_mark_used_columns; + thd->where= save_where; + + if (unlikely(error)) + { + my_error(ER_ERROR_EVALUATING_EXPRESSION, MYF(0), vcol->expr_str); + DBUG_RETURN(1); + } + + DBUG_RETURN(0); +} + + /* @brief Perform semantic analysis of the defining expression for a virtual column @@ -2529,53 +2557,38 @@ void TABLE_SHARE::free_frm_image(const uchar *frm) FALSE Otherwise */ -static bool fix_vcol_expr(THD *thd, TABLE *table, Field *field, - Virtual_column_info *vcol) +static bool fix_and_check_vcol_expr(THD *thd, TABLE *table, Field *field, + Virtual_column_info *vcol) { Item* func_expr= vcol->expr_item; - bool result= TRUE; - TABLE_LIST tables; - int error= 0; - const char *save_where; - enum_mark_columns save_mark_used_columns= thd->mark_used_columns; - DBUG_ENTER("fix_vcol_expr"); + DBUG_ENTER("fix_and_check_vcol_expr"); DBUG_PRINT("info", ("vcol: %p", vcol)); DBUG_ASSERT(func_expr); - thd->mark_used_columns= MARK_COLUMNS_NONE; + if (func_expr->fixed) + DBUG_RETURN(0); // nothing to do - save_where= thd->where; - thd->where= "virtual column function"; + if (fix_vcol_expr(thd, vcol)) + DBUG_RETURN(1); - /* Fix fields referenced to by the virtual column function */ - thd->in_stored_expression= 1; - if (!func_expr->fixed) - error= func_expr->fix_fields(thd, &vcol->expr_item); - thd->in_stored_expression= 0; + if (vcol->flags) + DBUG_RETURN(0); // already checked, no need to do it again - if (unlikely(error)) - { - DBUG_PRINT("info", - ("Field in virtual column expression does not belong to the table")); - my_error(ER_ERROR_EVALUATING_EXPRESSION, MYF(0), vcol->expr_str); - goto end; - } /* fix_fields could've changed the expression */ func_expr= vcol->expr_item; /* Number of columns will be checked later */ - thd->where= save_where; if (unlikely(func_expr->result_type() == ROW_RESULT)) { my_error(ER_ROW_EXPR_FOR_VCOL, MYF(0)); - goto end; + DBUG_RETURN(1); } /* Check that we are not refering to any not yet initialized fields */ if (field) { if (func_expr->walk(&Item::check_field_expression_processor, 0, field)) - goto end; + DBUG_RETURN(1); } /* @@ -2585,12 +2598,12 @@ static bool fix_vcol_expr(THD *thd, TABLE *table, Field *field, Item::vcol_func_processor_result res; res.errors= 0; - error= func_expr->walk(&Item::check_vcol_func_processor, 0, &res); + int error= func_expr->walk(&Item::check_vcol_func_processor, 0, &res); if (error || (res.errors & VCOL_IMPOSSIBLE)) { // this can only happen if the frm was corrupted my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name, "???", field ? field->field_name : "?????"); - goto end; + DBUG_RETURN(1); } vcol->flags= res.errors; @@ -2600,16 +2613,10 @@ static bool fix_vcol_expr(THD *thd, TABLE *table, Field *field, if (vcol->stored_in_db && vcol->flags & VCOL_NON_DETERMINISTIC) table->s->non_determinstic_insert= 1; - result= FALSE; - -end: - - thd->mark_used_columns= save_mark_used_columns; - table->map= 0; //Restore old value - - DBUG_RETURN(result); + DBUG_RETURN(0); } + /* @brief Unpack the definition of a virtual column from its linear representation @@ -2629,7 +2636,7 @@ end: pointer to this item is placed into in a Virtual_column_info object that is created. After this the function performs semantic analysis of the item by calling the the function - fix_vcol_expr(). Since the defining expression is part of the table + fix_and_check_vcol_expr(). Since the defining expression is part of the table definition the item for it is created in table->memroot within the special arena TABLE::expr_arena or in the thd memroot for INSERT DELAYED @@ -2736,15 +2743,10 @@ Virtual_column_info *unpack_vcol_info_from_frm(THD *thd, if (error) goto err; - /* - mark if expression will be stored in the table. This is also used by - fix_vcol_expr() to mark if we are using non deterministic functions. - */ vcol_storage.vcol_info->stored_in_db= vcol->stored_in_db; vcol_storage.vcol_info->name= vcol->name; vcol_storage.vcol_info->utf8= vcol->utf8; - /* Validate the Item tree. */ - if (!fix_vcol_expr(thd, table, field, vcol_storage.vcol_info)) + if (!fix_and_check_vcol_expr(thd, table, field, vcol_storage.vcol_info)) { vcol_info= vcol_storage.vcol_info; // Expression ok goto end; -- cgit v1.2.1 From 73a220aac3842da0ab8d51fa9961a18b03c45001 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 25 Jul 2016 14:34:37 +0200 Subject: session-state dependent functions in DEFAULT/CHECK/vcols * revert part of the db7edfe that moved calculations from fix_fields to val_str for Item_func_sysconst and descendants * mark session state dependent functions in check_vcol_func_processor() * re-run fix_fields for all such functions for every statement * fix CURRENT_USER/CURRENT_ROLE not to use Name_resolution_context (that is allocated on the stack in unpack_vcol_info_from_frm()) Note that NOW(), CURDATE(), etc use lazy initialization and do *not* force fix_fields to be re-run. The rule is: * lazy initialization is *not* allowed, if it changes metadata (so, e.g. DAYNAME() cannot use it) * lazy initialization is *preferrable* if it has side effects (e.g. NOW() sets thd->time_zone_used=1, so it's better to do it when the value of NOW is actually needed, not when NOW is simply prepared) --- mysql-test/r/default.result | 33 -------- mysql-test/r/default_session.result | 94 ++++++++++++++++++++++ mysql-test/suite/vcol/r/not_supported.result | 6 -- .../suite/vcol/r/vcol_blocked_sql_funcs.result | 19 ----- .../suite/vcol/r/vcol_supported_sql_funcs.result | 75 +++++++++++++++++ mysql-test/suite/vcol/t/not_supported.test | 6 -- .../suite/vcol/t/vcol_blocked_sql_funcs_main.inc | 25 ------ .../suite/vcol/t/vcol_supported_sql_funcs_main.inc | 30 +++++++ mysql-test/t/default.test | 34 -------- mysql-test/t/default_session.test | 82 +++++++++++++++++++ sql/field.cc | 9 ++- sql/field.h | 9 ++- sql/item_func.cc | 2 +- sql/item_func.h | 2 +- sql/item_strfunc.cc | 88 +++++++------------- sql/item_strfunc.h | 54 ++++++++----- sql/item_timefunc.h | 6 +- sql/sql_base.cc | 45 ++++++++++- sql/table.cc | 23 +++++- sql/table.h | 2 + 20 files changed, 422 insertions(+), 222 deletions(-) create mode 100644 mysql-test/r/default_session.result create mode 100644 mysql-test/t/default_session.test diff --git a/mysql-test/r/default.result b/mysql-test/r/default.result index f7037c9df48..1f60e66e64e 100644 --- a/mysql-test/r/default.result +++ b/mysql-test/r/default.result @@ -1084,33 +1084,6 @@ a b 20010101102030 2001 DROP TABLE t1; # -# Miscelaneous SQL standard variants -# -CREATE TABLE t1 (a VARCHAR(30) DEFAULT CURRENT_USER); -ERROR HY000: Function or expression 'current_user()' cannot be used in the DEFAULT clause of `a` -CREATE TABLE t1 (a VARCHAR(30) DEFAULT CURRENT_ROLE); -ERROR HY000: Function or expression 'current_role()' cannot be used in the DEFAULT clause of `a` -# -# Other Item_func_sysconst derived functions -# -CREATE TABLE t1 (a VARCHAR(30) DEFAULT DATABASE()); -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` varchar(30) DEFAULT DATABASE() -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -INSERT INTO t1 VALUES (); -USE INFORMATION_SCHEMA; -INSERT INTO test.t1 VALUES (); -USE test; -INSERT INTO t1 VALUES (); -SELECT * FROM t1; -a -test -information_schema -test -DROP TABLE t1; -# # Check DEFAULT() function # CREATE TABLE `t1` (`a` int(11) DEFAULT (3+3),`b` int(11) DEFAULT '1000'); @@ -1400,8 +1373,6 @@ SELECT * FROM t1; a b c 01,5,2013 %d,%m,%Y 2013-05-01 DROP TABLE t1; -CREATE TABLE t1 (a VARCHAR(30), b VARCHAR(30) DEFAULT DATE_FORMAT(a,'%W %M %Y')); -ERROR HY000: Function or expression 'date_format()' cannot be used in the DEFAULT clause of `b` # Item_datefunc SET time_zone='-10:00'; SET timestamp=UNIX_TIMESTAMP('2001-01-01 23:59:59'); @@ -1835,10 +1806,6 @@ SELECT * FROM t1; a b 2008-04-01 2 DROP TABLE t1; -CREATE TABLE t1 (a DATE, b VARCHAR(30) DEFAULT DAYNAME(a)); -ERROR HY000: Function or expression 'dayname()' cannot be used in the DEFAULT clause of `b` -CREATE TABLE t1 (a DATE, b VARCHAR(30) DEFAULT MONTHNAME(a)); -ERROR HY000: Function or expression 'monthname()' cannot be used in the DEFAULT clause of `b` CREATE TABLE t1 (a DATE, b INT DEFAULT EXTRACT(YEAR FROM a)); SHOW CREATE TABLE t1; Table Create Table diff --git a/mysql-test/r/default_session.result b/mysql-test/r/default_session.result new file mode 100644 index 00000000000..b00bf27dcf3 --- /dev/null +++ b/mysql-test/r/default_session.result @@ -0,0 +1,94 @@ +create database mysqltest1; +create user ''@localhost; +create user foo@localhost; +create role bar; +grant select on *.* to ''@localhost; +grant select,insert on *.* to foo@localhost; +grant select,insert on *.* to bar; +grant bar to ''@localhost; +create table t1 (n varchar(100), +u varchar(100) default user(), +cu varchar(100) default current_user(), +cr varchar(100) default current_role(), +d varchar(100) default database()); +create definer=foo@localhost view mysqltest1.v1 as select * from t1; +create definer=bar view v2 as select * from t1; +create view v3 as select * from v2; +create definer=foo@localhost view mysqltest1.v4 as select default(n),default(u),default(cu),default(cr), default(d) from t1; +create definer=bar view v5 as select default(n),default(u),default(cu),default(cr), default(d) from t1; +create view v6 as select * from v5; +insert t1 (n) values ('t1'); +insert mysqltest1.v1 (n) values ('v1'); +insert v2 (n) values ('v2'); +insert v3 (n) values ('v3'); +select default(n),default(u),default(cu),default(cr), default(d) from t1 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL root@localhost root@localhost NULL test +select * from mysqltest1.v4 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL root@localhost foo@localhost NULL test +select * from v5 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL root@localhost @ bar test +select * from v6 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL root@localhost @ bar test +connect conn,localhost,conn,,mysqltest1; +set role bar; +insert test.t1 (n) values ('t1'); +insert v1 (n) values ('v1'); +insert test.v2 (n) values ('v2'); +insert test.v3 (n) values ('v3'); +select default(n),default(u),default(cu),default(cr), default(d) from test.t1 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL conn@localhost @localhost bar mysqltest1 +select * from v4 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL conn@localhost foo@localhost NULL mysqltest1 +select * from test.v5 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL conn@localhost @ bar mysqltest1 +select * from test.v6 limit 1; +default(n) default(u) default(cu) default(cr) default(d) +NULL conn@localhost @ bar mysqltest1 +connection default; +disconnect conn; +select * from t1; +n u cu cr d +t1 root@localhost root@localhost NULL test +v1 root@localhost foo@localhost NULL test +v2 root@localhost @ bar test +v3 root@localhost @ bar test +t1 conn@localhost @localhost bar mysqltest1 +v1 conn@localhost foo@localhost NULL mysqltest1 +v2 conn@localhost @ bar mysqltest1 +v3 conn@localhost @ bar mysqltest1 +drop database mysqltest1; +drop view v2, v3, v5, v6; +drop table t1; +drop user ''@localhost; +drop user foo@localhost; +drop role bar; +create table t1 (a date, +mn varchar(100) default monthname(a), +dn varchar(100) default dayname(a), +df varchar(100) default date_format(a, "%a, %b")); +insert t1 (a) values ('2010-12-2'); +set lc_time_names=de_DE; +insert t1 (a) values ('2010-12-2'); +set lc_time_names=default; +select * from t1; +a mn dn df +2010-12-02 December Thursday Thu, Dec +2010-12-02 Dezember Donnerstag Do, Dez +drop table t1; +create table t1 (a varchar(100) default @@sql_mode); +insert t1 () values (); +set sql_mode=ansi; +insert t1 () values (); +set sql_mode=default; +select * from t1; +a +NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION +REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,ANSI +drop table t1; diff --git a/mysql-test/suite/vcol/r/not_supported.result b/mysql-test/suite/vcol/r/not_supported.result index fb03e3fdae3..cecf5c27912 100644 --- a/mysql-test/suite/vcol/r/not_supported.result +++ b/mysql-test/suite/vcol/r/not_supported.result @@ -14,12 +14,6 @@ create table t4 (a int, b int, v int as (@a:=a)); ERROR HY000: Function or expression '@a' cannot be used in the GENERATED ALWAYS AS clause of `v` create table t4 (a int, b int, v int as (@a:=a) PERSISTENT); ERROR HY000: Function or expression '@a' cannot be used in the GENERATED ALWAYS AS clause of `v` -create table t5 (a int, b int, v varchar(100) as (monthname(a))); -ERROR HY000: Function or expression 'monthname()' cannot be used in the GENERATED ALWAYS AS clause of `v` -create table t6 (a int, b int, v varchar(100) as (dayname(a))); -ERROR HY000: Function or expression 'dayname()' cannot be used in the GENERATED ALWAYS AS clause of `v` -create table t7 (a int, b int, v varchar(100) as (date_format(a, '%W %a %M %b'))); -ERROR HY000: Function or expression 'date_format()' cannot be used in the GENERATED ALWAYS AS clause of `v` create table t8 (a int, b int, v varchar(100) as (from_unixtime(a))); insert t1 (a,b) values (1,2); insert t8 (a,b) values (1234567890,2); diff --git a/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result b/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result index 369f0f902fc..3389038dd6c 100644 --- a/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result +++ b/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result @@ -74,13 +74,6 @@ create or replace table t1 (a varchar(64), b varchar(64) as (collation(a)) PERSI create or replace table t1 (a int as (connection_id())); create or replace table t1 (a int as (connection_id()) PERSISTENT); ERROR HY000: Function or expression 'connection_id()' cannot be used in the GENERATED ALWAYS AS clause of `a` -# CURRENT_USER(), CURRENT_USER -create or replace table t1 (a varchar(32) as (current_user())); -ERROR HY000: Function or expression 'current_user()' cannot be used in the GENERATED ALWAYS AS clause of `a` -create or replace table t1 (a varchar(32) as (current_user()) PERSISTENT); -ERROR HY000: Function or expression 'current_user()' cannot be used in the GENERATED ALWAYS AS clause of `a` -create or replace table t1 (a varchar(32) as (current_user) PERSISTENT); -ERROR HY000: Function or expression 'current_user()' cannot be used in the GENERATED ALWAYS AS clause of `a` # DATABASE() create or replace table t1 (a varchar(32) as (database())); create or replace table t1 (a varchar(1024), b varchar(1024) as (database()) PERSISTENT); @@ -141,9 +134,6 @@ create or replace table t1 (a varchar(1024), b varchar(1024) as (version()) PERS ERROR HY000: Function or expression 'version()' cannot be used in the GENERATED ALWAYS AS clause of `b` # ENCRYPT() create or replace table t1 (a varchar(1024), b varchar(1024) as (encrypt(a)) PERSISTENT); -# DATE_FORMAT() -create or replace table t1 (a datetime, b varchar(64) as (date_format(a,'%W %M %D')); -ERROR HY000: Function or expression 'date_format()' cannot be used in the GENERATED ALWAYS AS clause of `b` # Stored procedures create procedure p1() begin @@ -217,15 +207,6 @@ ERROR HY000: Function or expression 'var_samp()' cannot be used in the GENERATED # VARIANCE() create or replace table t1 (a int, b int as (variance(a))); ERROR HY000: Function or expression 'variance()' cannot be used in the GENERATED ALWAYS AS clause of `b` -# DAYNAME() -create or replace table t1 (a int, b varchar(10) as (dayname(a))); -ERROR HY000: Function or expression 'dayname()' cannot be used in the GENERATED ALWAYS AS clause of `b` -create or replace table t1 (a int, b varchar(10) as (monthname(a))); -ERROR HY000: Function or expression 'monthname()' cannot be used in the GENERATED ALWAYS AS clause of `b` -create or replace table t1 (a int, b varchar(10) as (date_format("1963-01-01","%d.%m.%Y"))); -ERROR HY000: Function or expression 'date_format()' cannot be used in the GENERATED ALWAYS AS clause of `b` -create or replace table t1 (a int, b varchar(10) as (time_format(now(),"%d.%m.%Y"))); -ERROR HY000: Function or expression 'time_format()' cannot be used in the GENERATED ALWAYS AS clause of `b` # # XML FUNCTIONS # diff --git a/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result b/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result index 7ce09a3a967..2e6dbc38b6f 100644 --- a/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result +++ b/mysql-test/suite/vcol/r/vcol_supported_sql_funcs.result @@ -2831,3 +2831,78 @@ a b a 30 drop table t1; set sql_warnings = 0; +# MONTHNAME() +set sql_warnings = 1; +create table t1 (a date, b varchar(100) as (monthname(a))); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` date DEFAULT NULL, + `b` varchar(100) AS (monthname(a)) VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert into t1 values ('2010-10-10',default); +select * from t1; +a b +2010-10-10 October +drop table t1; +set sql_warnings = 0; +# DAYNAME() +set sql_warnings = 1; +create table t1 (a date, b varchar(100) as (dayname(a))); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` date DEFAULT NULL, + `b` varchar(100) AS (dayname(a)) VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert into t1 values ('2011-11-11',default); +select * from t1; +a b +2011-11-11 Friday +drop table t1; +set sql_warnings = 0; +# DATE_FORMAT() +set sql_warnings = 1; +create table t1 (a date, b varchar(100) as (date_format(a, '%W %a %M %b'))); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` date DEFAULT NULL, + `b` varchar(100) AS (date_format(a, '%W %a %M %b')) VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert into t1 values ('2012-12-12',default); +select * from t1; +a b +2012-12-12 Wednesday Wed December Dec +drop table t1; +set sql_warnings = 0; +# CURRENT_USER() +set sql_warnings = 1; +create table t1 (a char, b varchar(32) as (current_user())); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(1) DEFAULT NULL, + `b` varchar(32) AS (current_user()) VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert into t1 values ('a', default); +select * from t1; +a b +a root@localhost +drop table t1; +set sql_warnings = 0; +# TIME_FORMAT() +set sql_warnings = 1; +create table t1 (a datetime, b varchar(10) as (time_format(a,"%d.%m.%Y"))); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` datetime DEFAULT NULL, + `b` varchar(10) AS (time_format(a,"%d.%m.%Y")) VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert into t1 values ('2001-01-01 02:02:02',default); +select * from t1; +a b +2001-01-01 02:02:02 01.01.2001 +drop table t1; +set sql_warnings = 0; diff --git a/mysql-test/suite/vcol/t/not_supported.test b/mysql-test/suite/vcol/t/not_supported.test index 4cfc97a1be1..ee6074870de 100644 --- a/mysql-test/suite/vcol/t/not_supported.test +++ b/mysql-test/suite/vcol/t/not_supported.test @@ -24,12 +24,6 @@ create table t3 (a int, b int, v int as (a+@@error_count) PERSISTENT); create table t4 (a int, b int, v int as (@a:=a)); --error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED create table t4 (a int, b int, v int as (@a:=a) PERSISTENT); ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create table t5 (a int, b int, v varchar(100) as (monthname(a))); ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create table t6 (a int, b int, v varchar(100) as (dayname(a))); ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create table t7 (a int, b int, v varchar(100) as (date_format(a, '%W %a %M %b'))); create table t8 (a int, b int, v varchar(100) as (from_unixtime(a))); insert t1 (a,b) values (1,2); diff --git a/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_main.inc b/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_main.inc index fad2f531ee5..492082af30c 100644 --- a/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_main.inc +++ b/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_main.inc @@ -116,14 +116,6 @@ create or replace table t1 (a int as (connection_id())); -- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED create or replace table t1 (a int as (connection_id()) PERSISTENT); ---echo # CURRENT_USER(), CURRENT_USER --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a varchar(32) as (current_user())); --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a varchar(32) as (current_user()) PERSISTENT); --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a varchar(32) as (current_user) PERSISTENT); - --echo # DATABASE() create or replace table t1 (a varchar(32) as (database())); -- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED @@ -204,10 +196,6 @@ create or replace table t1 (a varchar(1024), b varchar(1024) as (version()) PERS --echo # ENCRYPT() create or replace table t1 (a varchar(1024), b varchar(1024) as (encrypt(a)) PERSISTENT); ---echo # DATE_FORMAT() --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a datetime, b varchar(64) as (date_format(a,'%W %M %D')); - --echo # Stored procedures delimiter //; @@ -308,19 +296,6 @@ create or replace table t1 (a int, b int as (var_samp(a))); -- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED create or replace table t1 (a int, b int as (variance(a))); ---echo # DAYNAME() --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a int, b varchar(10) as (dayname(a))); - --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a int, b varchar(10) as (monthname(a))); - --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a int, b varchar(10) as (date_format("1963-01-01","%d.%m.%Y"))); - --- error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -create or replace table t1 (a int, b varchar(10) as (time_format(now(),"%d.%m.%Y"))); - --echo # --echo # XML FUNCTIONS --echo # diff --git a/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc b/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc index cf3183d979d..4a95ea75534 100644 --- a/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc +++ b/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_main.inc @@ -1179,3 +1179,33 @@ let $values1 = 'a',default; let $rows = 1; --source suite/vcol/inc/vcol_supported_sql_funcs.inc +--echo # MONTHNAME() +let $cols = a date, b varchar(100) as (monthname(a)); +let $values1 = '2010-10-10',default; +let $rows = 1; +--source suite/vcol/inc/vcol_supported_sql_funcs.inc + +--echo # DAYNAME() +let $cols = a date, b varchar(100) as (dayname(a)); +let $values1 = '2011-11-11',default; +let $rows = 1; +--source suite/vcol/inc/vcol_supported_sql_funcs.inc + +--echo # DATE_FORMAT() +let $cols = a date, b varchar(100) as (date_format(a, '%W %a %M %b')); +let $values1 = '2012-12-12',default; +let $rows = 1; +--source suite/vcol/inc/vcol_supported_sql_funcs.inc + +--echo # CURRENT_USER() +let $cols = a char, b varchar(32) as (current_user()); +let $values1 = 'a', default; +let $rows = 1; +--source suite/vcol/inc/vcol_supported_sql_funcs.inc + +--echo # TIME_FORMAT() +let $cols = a datetime, b varchar(10) as (time_format(a,"%d.%m.%Y")); +let $values1 = '2001-01-01 02:02:02',default; +let $rows = 1; +--source suite/vcol/inc/vcol_supported_sql_funcs.inc + diff --git a/mysql-test/t/default.test b/mysql-test/t/default.test index 4981c2161e7..14eb16bacd4 100644 --- a/mysql-test/t/default.test +++ b/mysql-test/t/default.test @@ -817,29 +817,6 @@ INSERT INTO t1 VALUES (); SELECT * FROM t1; DROP TABLE t1; - ---echo # ---echo # Miscelaneous SQL standard variants ---echo # - ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -CREATE TABLE t1 (a VARCHAR(30) DEFAULT CURRENT_USER); ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -CREATE TABLE t1 (a VARCHAR(30) DEFAULT CURRENT_ROLE); - ---echo # ---echo # Other Item_func_sysconst derived functions ---echo # -CREATE TABLE t1 (a VARCHAR(30) DEFAULT DATABASE()); -SHOW CREATE TABLE t1; -INSERT INTO t1 VALUES (); -USE INFORMATION_SCHEMA; -INSERT INTO test.t1 VALUES (); -USE test; -INSERT INTO t1 VALUES (); -SELECT * FROM t1; -DROP TABLE t1; - --echo # --echo # Check DEFAULT() function --echo # @@ -1010,9 +987,6 @@ INSERT INTO t1 VALUES ('01,5,2013','%d,%m,%Y', DEFAULT); SELECT * FROM t1; DROP TABLE t1; ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -CREATE TABLE t1 (a VARCHAR(30), b VARCHAR(30) DEFAULT DATE_FORMAT(a,'%W %M %Y')); - --echo # Item_datefunc SET time_zone='-10:00'; @@ -1221,7 +1195,6 @@ INSERT INTO t1 (a) VALUES ('1987-01-01'); SELECT * FROM t1; DROP TABLE t1; -# QQ: this depends on @@default_week_format. CREATE TABLE t1 (a DATE, b INT DEFAULT WEEK(a)); SHOW CREATE TABLE t1; INSERT INTO t1 (a) VALUES ('1987-02-01'); @@ -1240,12 +1213,6 @@ INSERT INTO t1 (a) VALUES ('2008-04-01'); SELECT * FROM t1; DROP TABLE t1; -# QQ: this depends on @@lc_time_names ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -CREATE TABLE t1 (a DATE, b VARCHAR(30) DEFAULT DAYNAME(a)); ---error ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED -CREATE TABLE t1 (a DATE, b VARCHAR(30) DEFAULT MONTHNAME(a)); - CREATE TABLE t1 (a DATE, b INT DEFAULT EXTRACT(YEAR FROM a)); SHOW CREATE TABLE t1; INSERT INTO t1 (a) VALUES ('2009-07-02'); @@ -1596,7 +1563,6 @@ INSERT INTO t1 (a) VALUES (64); SELECT * FROM t1; DROP TABLE t1; -# QQ: this depends on @@lc_time_names CREATE TABLE t1 (a INT, b INT, c VARCHAR(30) DEFAULT FORMAT(a,b)); SHOW CREATE TABLE t1; INSERT INTO t1 (a,b) VALUES (10000,3); diff --git a/mysql-test/t/default_session.test b/mysql-test/t/default_session.test new file mode 100644 index 00000000000..7796354ffd4 --- /dev/null +++ b/mysql-test/t/default_session.test @@ -0,0 +1,82 @@ +# +# DEFAULT clause with functions that depend on the current session state +# + +source include/not_embedded.inc; + +create database mysqltest1; + +create user ''@localhost; +create user foo@localhost; +create role bar; +grant select on *.* to ''@localhost; +grant select,insert on *.* to foo@localhost; +grant select,insert on *.* to bar; +grant bar to ''@localhost; + +create table t1 (n varchar(100), + u varchar(100) default user(), + cu varchar(100) default current_user(), + cr varchar(100) default current_role(), + d varchar(100) default database()); + +create definer=foo@localhost view mysqltest1.v1 as select * from t1; +create definer=bar view v2 as select * from t1; +create view v3 as select * from v2; + +create definer=foo@localhost view mysqltest1.v4 as select default(n),default(u),default(cu),default(cr), default(d) from t1; +create definer=bar view v5 as select default(n),default(u),default(cu),default(cr), default(d) from t1; +create view v6 as select * from v5; + +insert t1 (n) values ('t1'); +insert mysqltest1.v1 (n) values ('v1'); +insert v2 (n) values ('v2'); +insert v3 (n) values ('v3'); + +select default(n),default(u),default(cu),default(cr), default(d) from t1 limit 1; +select * from mysqltest1.v4 limit 1; +select * from v5 limit 1; +select * from v6 limit 1; + +connect (conn,localhost,conn,,mysqltest1); +set role bar; +insert test.t1 (n) values ('t1'); +insert v1 (n) values ('v1'); +insert test.v2 (n) values ('v2'); +insert test.v3 (n) values ('v3'); + +select default(n),default(u),default(cu),default(cr), default(d) from test.t1 limit 1; +select * from v4 limit 1; +select * from test.v5 limit 1; +select * from test.v6 limit 1; +connection default; +disconnect conn; + +select * from t1; +drop database mysqltest1; +drop view v2, v3, v5, v6; +drop table t1; +drop user ''@localhost; +drop user foo@localhost; +drop role bar; + +create table t1 (a date, + mn varchar(100) default monthname(a), + dn varchar(100) default dayname(a), + df varchar(100) default date_format(a, "%a, %b")); + +insert t1 (a) values ('2010-12-2'); +set lc_time_names=de_DE; +insert t1 (a) values ('2010-12-2'); +set lc_time_names=default; + +select * from t1; +drop table t1; + +create table t1 (a varchar(100) default @@sql_mode); +insert t1 () values (); +set sql_mode=ansi; +insert t1 () values (); +set sql_mode=default; +select * from t1; +drop table t1; diff --git a/sql/field.cc b/sql/field.cc index bd7f7c3f689..b8816c59d9f 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -9801,10 +9801,11 @@ bool check_expression(Virtual_column_info *vcol, const char *type, ret= vcol->expr_item->walk(&Item::check_vcol_func_processor, 0, &res); vcol->flags= res.errors; - if (ret || - (res.errors & - (VCOL_IMPOSSIBLE | - (must_be_determinstic ? VCOL_NON_DETERMINISTIC | VCOL_TIME_FUNC: 0)))) + uint filter= VCOL_IMPOSSIBLE; + if (must_be_determinstic) + filter|= VCOL_NOT_STRICTLY_DETERMINISTIC; + + if (ret || (res.errors & filter)) { my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), res.name, type, name); diff --git a/sql/field.h b/sql/field.h index 13a0e914845..dc15cf9d10a 100644 --- a/sql/field.h +++ b/sql/field.h @@ -575,11 +575,14 @@ inline bool is_temporal_type_with_time(enum_field_types type) } /* Bits for type of vcol expression */ -#define VCOL_DETERMINISTIC 0 /* Normal (no bit set) */ #define VCOL_UNKNOWN 1 /* UDF used; Need fix_fields() to know */ #define VCOL_NON_DETERMINISTIC 2 -#define VCOL_TIME_FUNC 4 -#define VCOL_IMPOSSIBLE 8 +#define VCOL_SESSION_FUNC 4 /* uses session data, e.g. USER or DAYNAME */ +#define VCOL_TIME_FUNC 8 +#define VCOL_IMPOSSIBLE 16 + +#define VCOL_NOT_STRICTLY_DETERMINISTIC \ + (VCOL_NON_DETERMINISTIC | VCOL_TIME_FUNC | VCOL_SESSION_FUNC) /* Virtual_column_info is the class to contain additional diff --git a/sql/item_func.cc b/sql/item_func.cc index 7f8c89cc228..cc1494cc034 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -5854,7 +5854,7 @@ void Item_func_get_system_var::print(String *str, enum_query_type query_type) bool Item_func_get_system_var::check_vcol_func_processor(void *arg) { - return mark_unsupported_function("@@", var->name.str, arg, VCOL_NON_DETERMINISTIC); + return mark_unsupported_function("@@", var->name.str, arg, VCOL_SESSION_FUNC); } enum Item_result Item_func_get_system_var::result_type() const diff --git a/sql/item_func.h b/sql/item_func.h index a909d93dbaa..892dacea6ee 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -613,7 +613,7 @@ public: longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } bool check_vcol_func_processor(void *arg) { - return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC); + return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } }; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index bd38d644101..bff31ec7b26 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -2343,18 +2343,6 @@ void Item_func_decode::crypto_transform(String *res) sql_crypt.decode((char*) res->ptr(),res->length()); } -Item *Item_func_sysconst::safe_charset_converter(THD *thd, - CHARSET_INFO *tocs) -{ - /* - In default, virtual functions or constraint expressions, the value - of a sysconst is not constant - */ - if (thd->in_stored_expression) - return Item_str_func::safe_charset_converter(thd, tocs); - return const_charset_converter(thd, tocs, true, fully_qualified_func_name()); -} - String *Item_func_database::val_str(String *str) { @@ -2376,69 +2364,55 @@ String *Item_func_database::val_str(String *str) BUG#28086) binlog_format=MIXED, but is incorrectly replicated to '' if binlog_format=STATEMENT. */ - -bool Item_func_user::init(THD *thd, const char *user, const char *host) +bool Item_func_user::init(const char *user, const char *host) { - DBUG_ENTER("Item_func_user::init"); DBUG_ASSERT(fixed == 1); - /* Check if we have already calculated the value for this thread */ - if (thd->query_id == last_query_id) - DBUG_RETURN(FALSE); - DBUG_PRINT("enter", ("user: '%s' host: '%s'", user,host)); - - last_query_id= thd->query_id; - null_value= 0; - // For system threads (e.g. replication SQL thread) user may be empty if (user) { - CHARSET_INFO *cs= system_charset_info; + CHARSET_INFO *cs= str_value.charset(); size_t res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen; - if (cached_value.alloc((uint) res_length)) + if (str_value.alloc((uint) res_length)) { null_value=1; - DBUG_RETURN(TRUE); + return TRUE; } - cached_value.set_charset(cs); - res_length=cs->cset->snprintf(cs, (char*)cached_value.ptr(), - (uint) res_length, + res_length=cs->cset->snprintf(cs, (char*)str_value.ptr(), (uint) res_length, "%s@%s", user, host); - cached_value.length((uint) res_length); - cached_value.mark_as_const(); + str_value.length((uint) res_length); + str_value.mark_as_const(); } - else - cached_value.set("", 0, system_charset_info); - DBUG_RETURN(FALSE); + return FALSE; } -String *Item_func_user::val_str(String *str) -{ - THD *thd= current_thd; - init(thd, thd->main_security_ctx.user, thd->main_security_ctx.host_or_ip); - return null_value ? 0 : &cached_value; -} -String *Item_func_current_user::val_str(String *str) +bool Item_func_user::fix_fields(THD *thd, Item **ref) { - THD *thd= current_thd; - Security_context *ctx= (context->security_ctx ? - context->security_ctx : thd->security_ctx); - init(thd, ctx->priv_user, ctx->priv_host); - return null_value ? 0 : &cached_value; + return (Item_func_sysconst::fix_fields(thd, ref) || + init(thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip)); } -bool Item_func_current_role::fix_fields(THD *thd, Item **ref) +bool Item_func_current_user::fix_fields(THD *thd, Item **ref) { - return Item_func_sysconst::fix_fields(thd,ref) || init(thd); + if (Item_func_sysconst::fix_fields(thd, ref)) + return TRUE; + + Security_context *ctx= context && context->security_ctx + ? context->security_ctx : thd->security_ctx; + return init(ctx->priv_user, ctx->priv_host); } -bool Item_func_current_role::init(THD *thd) +bool Item_func_current_role::fix_fields(THD *thd, Item **ref) { - Security_context *ctx= context->security_ctx + if (Item_func_sysconst::fix_fields(thd, ref)) + return 1; + + Security_context *ctx= context && context->security_ctx ? context->security_ctx : thd->security_ctx; if (ctx->priv_role[0]) @@ -2447,22 +2421,14 @@ bool Item_func_current_role::init(THD *thd) system_charset_info)) return 1; + null_value= maybe_null= 0; + str_value.mark_as_const(); return 0; } - null_value= 1; + null_value= maybe_null= 1; return 0; } -String *Item_func_current_role::val_str(String *) -{ - return (null_value ? 0 : &str_value); -} - -int Item_func_current_role::save_in_field(Field *field, bool no_conversions) -{ - return save_str_value_in_field(field, &str_value); -} - void Item_func_soundex::fix_length_and_dec() { uint32 char_length= args[0]->max_char_length(); diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index b0f5064a190..fd63c571359 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -562,7 +562,10 @@ class Item_func_sysconst :public Item_str_func public: Item_func_sysconst(THD *thd): Item_str_func(thd) { collation.set(system_charset_info,DERIVATION_SYSCONST); } - Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs); + Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) + { + return const_charset_converter(thd, tocs, true, fully_qualified_func_name()); + } /* Used to create correct Item name in new converted item in safe_charset_converter, return string representation of this function @@ -572,7 +575,7 @@ public: bool check_vcol_func_processor(void *arg) { return mark_unsupported_function(fully_qualified_func_name(), arg, - VCOL_NON_DETERMINISTIC); + VCOL_SESSION_FUNC); } }; @@ -595,15 +598,19 @@ public: class Item_func_user :public Item_func_sysconst { protected: - query_id_t last_query_id; - String cached_value; - bool init(THD *thd, const char *user, const char *host); + bool init (const char *user, const char *host); public: - Item_func_user(THD *thd): Item_func_sysconst(thd), last_query_id(0) - {} - - String *val_str(String *); + Item_func_user(THD *thd): Item_func_sysconst(thd) + { + str_value.set("", 0, system_charset_info); + } + String *val_str(String *) + { + DBUG_ASSERT(fixed == 1); + return (null_value ? 0 : &str_value); + } + bool fix_fields(THD *thd, Item **ref); void fix_length_and_dec() { max_length= (username_char_length + @@ -611,6 +618,10 @@ public: } const char *func_name() const { return "user"; } const char *fully_qualified_func_name() const { return "user()"; } + int save_in_field(Field *field, bool no_conversions) + { + return save_str_value_in_field(field, &str_value); + } }; @@ -621,14 +632,14 @@ class Item_func_current_user :public Item_func_user public: Item_func_current_user(THD *thd, Name_resolution_context *context_arg): Item_func_user(thd), context(context_arg) {} - String *val_str(String *); + bool fix_fields(THD *thd, Item **ref); const char *func_name() const { return "current_user"; } const char *fully_qualified_func_name() const { return "current_user()"; } - /* This is because of the stored Name_resolution_context */ bool check_vcol_func_processor(void *arg) { + context= 0; return mark_unsupported_function(fully_qualified_func_name(), arg, - VCOL_IMPOSSIBLE); + VCOL_SESSION_FUNC); } }; @@ -642,20 +653,21 @@ public: Item_func_sysconst(thd), context(context_arg) {} bool fix_fields(THD *thd, Item **ref); void fix_length_and_dec() - { - max_length= username_char_length * SYSTEM_CHARSET_MBMAXLEN; - maybe_null=1; - } - bool init(THD *thd); - int save_in_field(Field *field, bool no_conversions); + { max_length= username_char_length * SYSTEM_CHARSET_MBMAXLEN; } + int save_in_field(Field *field, bool no_conversions) + { return save_str_value_in_field(field, &str_value); } const char *func_name() const { return "current_role"; } const char *fully_qualified_func_name() const { return "current_role()"; } - String *val_str(String *); - /* This is because of the stored Name_resolution_context */ + String *val_str(String *) + { + DBUG_ASSERT(fixed == 1); + return (null_value ? 0 : &str_value); + } bool check_vcol_func_processor(void *arg) { + context= 0; return mark_unsupported_function(fully_qualified_func_name(), arg, - VCOL_IMPOSSIBLE); + VCOL_SESSION_FUNC); } }; diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 80e7c1010d0..84f610b992d 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -196,7 +196,7 @@ public: } bool check_vcol_func_processor(void *arg) { - return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); + return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } }; @@ -411,7 +411,7 @@ class Item_func_dayname :public Item_func_weekday bool check_partition_func_processor(void *int_arg) {return TRUE;} bool check_vcol_func_processor(void *arg) { - return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); + return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } }; @@ -776,7 +776,7 @@ public: bool eq(const Item *item, bool binary_cmp) const; bool check_vcol_func_processor(void *arg) { - return mark_unsupported_function(func_name(), "()", arg, VCOL_IMPOSSIBLE); + return mark_unsupported_function(func_name(), "()", arg, VCOL_SESSION_FUNC); } }; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 5ef67ce6560..9d6f1dbed9e 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -4687,6 +4687,45 @@ static void mark_real_tables_as_free_for_reuse(TABLE_LIST *table_list) } +static bool fix_all_session_vcol_exprs(THD *thd, TABLE_LIST *tables) +{ + Security_context *save_security_ctx= thd->security_ctx; + TABLE_LIST *first_not_own= thd->lex->first_not_own_table(); + DBUG_ENTER("fix_session_vcol_expr"); + + for (TABLE_LIST *table= tables; table && table != first_not_own; + table= table->next_global) + { + TABLE *t= table->table; + if (!table->placeholder() && t->s->vcols_need_refixing) + { + if (table->security_ctx) + thd->security_ctx= table->security_ctx; + + for (Field **vf= t->vfield; vf && *vf; vf++) + if (fix_session_vcol_expr(thd, (*vf)->vcol_info)) + goto err; + + for (Field **df= t->default_field; df && *df; df++) + if ((*df)->default_value && + fix_session_vcol_expr(thd, (*df)->default_value)) + goto err; + + if (table->lock_type >= TL_WRITE_ALLOW_WRITE) + for (Virtual_column_info **cc= t->check_constraints; cc && *cc; cc++) + if (fix_session_vcol_expr(thd, (*cc))) + goto err; + + thd->security_ctx= save_security_ctx; + } + } + DBUG_RETURN(0); +err: + thd->security_ctx= save_security_ctx; + DBUG_RETURN(1); +} + + /** Lock all tables in a list. @@ -4848,7 +4887,11 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, } } - DBUG_RETURN(thd->decide_logging_format(tables)); + bool res= fix_all_session_vcol_exprs(thd, tables); + if (!res) + res= thd->decide_logging_format(tables); + + DBUG_RETURN(res); } diff --git a/sql/table.cc b/sql/table.cc index f72f0fd02d9..e3a1f225358 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2530,6 +2530,18 @@ static bool fix_vcol_expr(THD *thd, Virtual_column_info *vcol) } +bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol) +{ + DBUG_ENTER("fix_session_vcol_expr"); + if (!(vcol->flags & (VCOL_TIME_FUNC|VCOL_SESSION_FUNC))) + DBUG_RETURN(0); + + vcol->expr_item->cleanup(); + DBUG_ASSERT(!vcol->expr_item->fixed); + DBUG_RETURN(fix_vcol_expr(thd, vcol)); +} + + /* @brief Perform semantic analysis of the defining expression for a virtual column @@ -2610,8 +2622,12 @@ static bool fix_and_check_vcol_expr(THD *thd, TABLE *table, Field *field, /* Mark what kind of default / virtual fields the table has */ - if (vcol->stored_in_db && vcol->flags & VCOL_NON_DETERMINISTIC) - table->s->non_determinstic_insert= 1; + if (vcol->stored_in_db && + vcol->flags & (VCOL_NON_DETERMINISTIC | VCOL_SESSION_FUNC)) + table->s->non_determinstic_insert= true; + + if (vcol->flags & VCOL_SESSION_FUNC) + table->s->vcols_need_refixing= true; DBUG_RETURN(0); } @@ -3030,8 +3046,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, goto err; } field->default_value= vcol; - if (is_create_table && - !(vcol->flags & (VCOL_UNKNOWN | VCOL_NON_DETERMINISTIC | VCOL_TIME_FUNC))) + if (is_create_table && !vcol->flags) { enum_check_fields old_count_cuted_fields= thd->count_cuted_fields; thd->count_cuted_fields= CHECK_FIELD_WARN; // To find wrong default values diff --git a/sql/table.h b/sql/table.h index 651fab7c4cb..4a86fc455a2 100644 --- a/sql/table.h +++ b/sql/table.h @@ -681,6 +681,7 @@ struct TABLE_SHARE bool can_cmp_whole_record; bool table_creation_was_logged; bool non_determinstic_insert; + bool vcols_need_refixing; bool virtual_stored_fields; bool check_set_initialized; bool has_update_default_function; @@ -2631,6 +2632,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, uint db_stat, uint prgflag, uint ha_open_flags, TABLE *outparam, bool is_create_table); +bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol); Virtual_column_info *unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root, TABLE *table, Field *field, -- cgit v1.2.1 From 266563ad775fca38dbb5c76a5100fef49515ffda Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 22 Jul 2016 13:44:58 +0200 Subject: fix: CREATE TABLE (col TIMESTAMP(6) DEFAULT NOW(2)) That is, when the precision of DEFAULT NOW() is less than the precision of the column, do not convert it to unireg_check, use the new approach where DEFAULT is tryly an expression. --- mysql-test/r/default.result | 2 +- mysql-test/r/func_default.result | 4 +- mysql-test/r/type_timestamp_hires.result | 67 +++++++++++++++++++++++--------- mysql-test/t/type_timestamp_hires.test | 32 +++++++++------ sql/field.cc | 58 ++++++++++----------------- 5 files changed, 93 insertions(+), 70 deletions(-) diff --git a/mysql-test/r/default.result b/mysql-test/r/default.result index 1f60e66e64e..62dee045604 100644 --- a/mysql-test/r/default.result +++ b/mysql-test/r/default.result @@ -253,7 +253,7 @@ CREATE or replace TABLE t1 (event_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIM SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( - `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) + `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(2) ON UPDATE CURRENT_TIMESTAMP(6) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 CREATE or replace TABLE t1 (event_time TIMESTAMP(6) NOT NULL DEFAULT SYSDATE(2) ON UPDATE CURRENT_TIMESTAMP); SHOW CREATE TABLE t1; diff --git a/mysql-test/r/func_default.result b/mysql-test/r/func_default.result index aac5f4942a7..535be10da86 100644 --- a/mysql-test/r/func_default.result +++ b/mysql-test/r/func_default.result @@ -29,6 +29,6 @@ insert t1 () values (); insert t1 (a) values (10); select default(a),default(b),default(c),default(d),default(e),default(f) from t1; default(a) default(b) default(c) default(d) default(e) default(f) -1 2 foo bar 2001-01-01 10:20:30 2001-01-01 10:20:30.123456 -1 11 foo bar 2001-01-01 10:20:30 2001-01-01 10:20:30.123456 +1 2 foo bar 2001-01-01 10:20:30 2001-01-01 10:20:30.120000 +1 11 foo bar 2001-01-01 10:20:30 2001-01-01 10:20:30.120000 drop table t1; diff --git a/mysql-test/r/type_timestamp_hires.result b/mysql-test/r/type_timestamp_hires.result index 89f6754ec9a..3b88454ba00 100644 --- a/mysql-test/r/type_timestamp_hires.result +++ b/mysql-test/r/type_timestamp_hires.result @@ -272,33 +272,64 @@ select * from t1; a 2011-01-01 01:01:01.12345 drop table t1; -create table t1 (a timestamp(5) default current_timestamp); +create or replace table t1 (a timestamp(5) default current_timestamp); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` timestamp(5) NOT NULL DEFAULT CURRENT_TIMESTAMP(5) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 -drop table t1; -create table t1 (a timestamp(5) default current_timestamp()); -drop table t1; -create table t1 (a timestamp(5) default current_timestamp(2)); +create or replace table t1 (a timestamp(5) default current_timestamp()); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` timestamp(5) NOT NULL DEFAULT CURRENT_TIMESTAMP(5) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 -drop table t1; -create table t1 (a timestamp(5) default current_timestamp(5)); -drop table t1; -create table t1 (a timestamp(5) default current_timestamp(6)); -drop table t1; -create table t1 (a timestamp(5) on update current_timestamp); -drop table t1; -create table t1 (a timestamp(5) on update current_timestamp()); -drop table t1; -create table t1 (a timestamp(5) on update current_timestamp(3)); +create or replace table t1 (a timestamp(5) default current_timestamp(2)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp(5) NOT NULL DEFAULT current_timestamp(2) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +insert t1 () values (); +select * from t1; +a +2011-01-01 01:01:01.12000 +create or replace table t1 (a timestamp(5) default current_timestamp(5)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp(5) NOT NULL DEFAULT CURRENT_TIMESTAMP(5) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +create or replace table t1 (a timestamp(5) default current_timestamp(6)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp(5) NOT NULL DEFAULT CURRENT_TIMESTAMP(5) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +create or replace table t1 (a timestamp(5) on update current_timestamp); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp(5) NOT NULL DEFAULT '0000-00-00 00:00:00.00000' ON UPDATE CURRENT_TIMESTAMP(5) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +create or replace table t1 (a timestamp(5) on update current_timestamp()); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp(5) NOT NULL DEFAULT '0000-00-00 00:00:00.00000' ON UPDATE CURRENT_TIMESTAMP(5) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +create or replace table t1 (a timestamp(5) on update current_timestamp(3)); ERROR HY000: Invalid ON UPDATE clause for 'a' column -create table t1 (a timestamp(5) on update current_timestamp(5)); -drop table t1; -create table t1 (a timestamp(5) on update current_timestamp(6)); +create or replace table t1 (a timestamp(5) on update current_timestamp(5)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp(5) NOT NULL DEFAULT '0000-00-00 00:00:00.00000' ON UPDATE CURRENT_TIMESTAMP(5) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +create or replace table t1 (a timestamp(5) on update current_timestamp(6)); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp(5) NOT NULL DEFAULT '0000-00-00 00:00:00.00000' ON UPDATE CURRENT_TIMESTAMP(5) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; diff --git a/mysql-test/t/type_timestamp_hires.test b/mysql-test/t/type_timestamp_hires.test index c51fb2fb247..0b05f81ef42 100644 --- a/mysql-test/t/type_timestamp_hires.test +++ b/mysql-test/t/type_timestamp_hires.test @@ -17,18 +17,26 @@ drop table t1; # # MDEV-438 Microseconds: Precision is ignored in CURRENT_TIMESTAMP(N) when it is given as a default column value # -create table t1 (a timestamp(5) default current_timestamp); +create or replace table t1 (a timestamp(5) default current_timestamp); show create table t1; -drop table t1; -create table t1 (a timestamp(5) default current_timestamp()); drop table t1; -create table t1 (a timestamp(5) default current_timestamp(2)); +create or replace table t1 (a timestamp(5) default current_timestamp()); +show create table t1; +create or replace table t1 (a timestamp(5) default current_timestamp(2)); +show create table t1; +insert t1 () values (); +select * from t1; +create or replace table t1 (a timestamp(5) default current_timestamp(5)); +show create table t1; +create or replace table t1 (a timestamp(5) default current_timestamp(6)); +show create table t1; +create or replace table t1 (a timestamp(5) on update current_timestamp); +show create table t1; +create or replace table t1 (a timestamp(5) on update current_timestamp()); show create table t1; -drop table t1; -create table t1 (a timestamp(5) default current_timestamp(5)); drop table t1; -create table t1 (a timestamp(5) default current_timestamp(6)); drop table t1; -create table t1 (a timestamp(5) on update current_timestamp); drop table t1; -create table t1 (a timestamp(5) on update current_timestamp()); drop table t1; --error ER_INVALID_ON_UPDATE -create table t1 (a timestamp(5) on update current_timestamp(3)); -create table t1 (a timestamp(5) on update current_timestamp(5)); drop table t1; -create table t1 (a timestamp(5) on update current_timestamp(6)); drop table t1; +create or replace table t1 (a timestamp(5) on update current_timestamp(3)); +create or replace table t1 (a timestamp(5) on update current_timestamp(5)); +show create table t1; +create or replace table t1 (a timestamp(5) on update current_timestamp(6)); +show create table t1; +drop table t1; diff --git a/sql/field.cc b/sql/field.cc index b8816c59d9f..73e6b4edc3c 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -9863,59 +9863,43 @@ bool Column_definition::check(THD *thd) } } } + if (default_value && (flags & AUTO_INCREMENT_FLAG)) { my_error(ER_INVALID_DEFAULT, MYF(0), field_name); DBUG_RETURN(1); } - if (default_value && !default_value->expr_item->basic_const_item()) + if (default_value && !default_value->expr_item->basic_const_item() && + mysql_type_to_time_type(sql_type) == MYSQL_TIMESTAMP_DATETIME && + default_value->expr_item->type() == Item::FUNC_ITEM) { - Item *def_expr= default_value->expr_item; - - unireg_check= Field::NONE; /* - NOW() for TIMESTAMP and DATETIME fields are handled as in MariaDB 10.1 - by marking them in unireg_check. + Special case: NOW() for TIMESTAMP and DATETIME fields are handled + as in MariaDB 10.1 by marking them in unireg_check. */ - if (def_expr->type() == Item::FUNC_ITEM && - (static_cast(def_expr)->functype() == - Item_func::NOW_FUNC && - (mysql_type_to_time_type(sql_type) == MYSQL_TIMESTAMP_DATETIME))) + Item_func *fn= static_cast(default_value->expr_item); + if (fn->functype() == Item_func::NOW_FUNC && + (fn->decimals == 0 || fn->decimals >= length)) { - /* - We are not checking the number of decimals for timestamps - to allow one to write (for historical reasons) - TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP - Instead we are going to use the number of decimals specifed by the - column. - */ default_value= 0; - unireg_check= (on_update ? - Field::TIMESTAMP_DNUN_FIELD : // for insertions and for updates. - Field::TIMESTAMP_DN_FIELD); // only for insertions. + unireg_check= Field::TIMESTAMP_DN_FIELD; } - else if (on_update) - unireg_check= Field::TIMESTAMP_UN_FIELD; // function default for updates - } - else - { - /* No function default for insertions. Either NULL or a constant. */ - if (on_update) - unireg_check= Field::TIMESTAMP_UN_FIELD; // function default for updates - else - unireg_check= ((flags & AUTO_INCREMENT_FLAG) ? - Field::NEXT_NUMBER : // Automatic increment. - Field::NONE); } - if (on_update && - (mysql_type_to_time_type(sql_type) != MYSQL_TIMESTAMP_DATETIME || - on_update->decimals < length)) + if (on_update) { - my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name); - DBUG_RETURN(TRUE); + if (mysql_type_to_time_type(sql_type) != MYSQL_TIMESTAMP_DATETIME || + on_update->decimals < length) + { + my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name); + DBUG_RETURN(TRUE); + } + unireg_check= unireg_check == Field::NONE ? Field::TIMESTAMP_UN_FIELD + : Field::TIMESTAMP_DNUN_FIELD; } + else if (flags & AUTO_INCREMENT_FLAG) + unireg_check= Field::NEXT_NUMBER; sign_len= flags & UNSIGNED_FLAG ? 0 : 1; -- cgit v1.2.1 From 6e5048e85f606280d7aacc32df786b0de30f63fe Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 22 Jul 2016 13:48:21 +0200 Subject: clarify the error message for frm size overflow --- mysql-test/r/comment_column2.result | 4 ++-- mysql-test/suite/engines/funcs/r/comment_column2.result | 4 ++-- mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result | 2 +- sql/unireg.cc | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/comment_column2.result b/mysql-test/r/comment_column2.result index 6fa5d73b70a..99402e7f5b4 100644 --- a/mysql-test/r/comment_column2.result +++ b/mysql-test/r/comment_column2.result @@ -249,7 +249,7 @@ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij SELECT comment,index_comment,char_length(index_comment) FROM information_schema.statistics WHERE table_name='t1'; comment index_comment char_length(index_comment) ALTER TABLE t1 ADD COLUMN c64 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij'; -ERROR HY000: Too many columns +ERROR HY000: Table definition is too large SELECT table_comment,char_length(table_comment) FROM information_schema.tables WHERE table_name='t1'; table_comment char_length(table_comment) 0 @@ -388,7 +388,7 @@ c63 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghi c64 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij', c65 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij') ; -ERROR HY000: Too many columns +ERROR HY000: Table definition is too large SELECT table_comment,char_length(table_comment) FROM information_schema.tables WHERE table_name='t1'; table_comment char_length(table_comment) SELECT column_comment,char_length(column_comment) FROM information_schema.columns WHERE table_name='t1'; diff --git a/mysql-test/suite/engines/funcs/r/comment_column2.result b/mysql-test/suite/engines/funcs/r/comment_column2.result index 6fa5d73b70a..99402e7f5b4 100644 --- a/mysql-test/suite/engines/funcs/r/comment_column2.result +++ b/mysql-test/suite/engines/funcs/r/comment_column2.result @@ -249,7 +249,7 @@ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij SELECT comment,index_comment,char_length(index_comment) FROM information_schema.statistics WHERE table_name='t1'; comment index_comment char_length(index_comment) ALTER TABLE t1 ADD COLUMN c64 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij'; -ERROR HY000: Too many columns +ERROR HY000: Table definition is too large SELECT table_comment,char_length(table_comment) FROM information_schema.tables WHERE table_name='t1'; table_comment char_length(table_comment) 0 @@ -388,7 +388,7 @@ c63 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghi c64 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij', c65 INTEGER COMMENT 'abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij') ; -ERROR HY000: Too many columns +ERROR HY000: Table definition is too large SELECT table_comment,char_length(table_comment) FROM information_schema.tables WHERE table_name='t1'; table_comment char_length(table_comment) SELECT column_comment,char_length(column_comment) FROM information_schema.columns WHERE table_name='t1'; diff --git a/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result b/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result index 3389038dd6c..0e03d80014d 100644 --- a/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result +++ b/mysql-test/suite/vcol/r/vcol_blocked_sql_funcs.result @@ -242,7 +242,7 @@ drop function sub1; create or replace table t1 (a int, b varchar(300) as (concat(a,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))); drop table t1; create or replace table t1 (a int, b varchar(16384) as (concat(a,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))); -ERROR HY000: Too many columns +ERROR HY000: Table definition is too large # # Constant expression create or replace table t1 (a int as (PI()) PERSISTENT); diff --git a/sql/unireg.cc b/sql/unireg.cc index 19d03d23cc7..ff46e03ab3e 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -735,7 +735,7 @@ static bool pack_header(THD *thd, uchar *forminfo, n_length+int_length+com_length+expression_length > 65535L || int_count > 255) { - my_message(ER_TOO_MANY_FIELDS, ER_THD(thd, ER_TOO_MANY_FIELDS), MYF(0)); + my_message(ER_TOO_MANY_FIELDS, "Table definition is too large", MYF(0)); DBUG_RETURN(1); } -- cgit v1.2.1 From 39537435cbf1cb705e9d11c5711170e26ae30c08 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 22 Jul 2016 14:24:23 +0200 Subject: clarify the comment and use the same error message for CREATE and open table time --- sql/table.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/table.cc b/sql/table.cc index e3a1f225358..9be2ae31215 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2589,11 +2589,11 @@ static bool fix_and_check_vcol_expr(THD *thd, TABLE *table, Field *field, /* fix_fields could've changed the expression */ func_expr= vcol->expr_item; - /* Number of columns will be checked later */ + /* this was checked in check_expression(), but the frm could be mangled... */ if (unlikely(func_expr->result_type() == ROW_RESULT)) { - my_error(ER_ROW_EXPR_FOR_VCOL, MYF(0)); - DBUG_RETURN(1); + my_error(ER_OPERAND_COLUMNS, MYF(0), 1); + DBUG_RETURN(1); } /* Check that we are not refering to any not yet initialized fields */ -- cgit v1.2.1 From 159dc969ddd2ced669acbb669bae5fc8a4383b92 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 22 Jul 2016 15:36:31 +0200 Subject: cleanup: redundant variable --- sql/sql_class.cc | 4 ++-- sql/sql_class.h | 2 +- sql/table.cc | 5 ----- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 4ace2be3e28..d29dc0eff14 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -870,7 +870,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) is_fatal_sub_stmt_error(false), rand_used(0), time_zone_used(0), - in_lock_tables(0), in_stored_expression(0), + in_lock_tables(0), bootstrap(0), derived_tables_processing(FALSE), waiting_on_group_commit(FALSE), has_waiter(FALSE), @@ -2338,7 +2338,7 @@ bool THD::convert_string(LEX_STRING *to, CHARSET_INFO *to_cs, to->length= copy_and_convert((char*) to->str, new_length, to_cs, from, from_length, from_cs, &errors); to->str[to->length]= 0; // Safety - if (errors && in_stored_expression) + if (errors && lex->parse_vcol_expr) { my_error(ER_BAD_DATA, MYF(0), ErrConvString(from, from_length, from_cs).ptr(), diff --git a/sql/sql_class.h b/sql/sql_class.h index 55c0948d46e..6e71adf023a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2858,7 +2858,7 @@ public: bool query_start_sec_part_used; /* for IS NULL => = last_insert_id() fix in remove_eq_conds() */ bool substitute_null_with_insert_id; - bool in_lock_tables, in_stored_expression; + bool in_lock_tables; bool bootstrap, cleanup_done, free_connection_done; /** is set if some thread specific value(s) used in a statement. */ diff --git a/sql/table.cc b/sql/table.cc index 9be2ae31215..ca6a5a1b5a6 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2512,11 +2512,8 @@ static bool fix_vcol_expr(THD *thd, Virtual_column_info *vcol) const char *save_where= thd->where; thd->where= "virtual column function"; - thd->in_stored_expression= 1; - int error= vcol->expr_item->fix_fields(thd, &vcol->expr_item); - thd->in_stored_expression= 0; thd->mark_used_columns= save_mark_used_columns; thd->where= save_where; @@ -2753,9 +2750,7 @@ Virtual_column_info *unpack_vcol_info_from_frm(THD *thd, thd->update_charset(&my_charset_utf8mb4_general_ci, table->s->table_charset); } - thd->in_stored_expression= 1; error= parse_sql(thd, &parser_state, NULL); - thd->in_stored_expression= 0; if (error) goto err; -- cgit v1.2.1 From 3aff76f3750cf1ce2a58f093cb46190c2417f3bd Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 23 Jul 2016 16:26:24 +0200 Subject: vcol flag rename VCOL_UNKNOWN -> VCOL_FIELD_REF --- sql/field.h | 2 +- sql/item.h | 4 ++-- sql/item_func.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/field.h b/sql/field.h index dc15cf9d10a..f3328d560b8 100644 --- a/sql/field.h +++ b/sql/field.h @@ -575,7 +575,7 @@ inline bool is_temporal_type_with_time(enum_field_types type) } /* Bits for type of vcol expression */ -#define VCOL_UNKNOWN 1 /* UDF used; Need fix_fields() to know */ +#define VCOL_FIELD_REF 1 #define VCOL_NON_DETERMINISTIC 2 #define VCOL_SESSION_FUNC 4 /* uses session data, e.g. USER or DAYNAME */ #define VCOL_TIME_FUNC 8 diff --git a/sql/item.h b/sql/item.h index 6988543e19d..8fcbf82853d 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2516,8 +2516,8 @@ public: bool update_table_bitmaps_processor(void *arg); bool switch_to_nullable_fields_processor(void *arg); bool check_vcol_func_processor(void *arg) - { // may be, a special flag VCOL_FIELD ? - return mark_unsupported_function(field_name, arg, VCOL_UNKNOWN); + { + return mark_unsupported_function(field_name, arg, VCOL_FIELD_REF); } void cleanup(); Item_equal *get_item_equal() { return item_equal; } diff --git a/sql/item_func.h b/sql/item_func.h index 892dacea6ee..92bc798e044 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1470,7 +1470,7 @@ public: virtual void print(String *str, enum_query_type query_type); bool check_vcol_func_processor(void *arg) { - return mark_unsupported_function(func_name(), "()", arg, VCOL_UNKNOWN); + return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC); } }; -- cgit v1.2.1 From 4070d55735f1642e563b8d60fc2e9771f4963a3f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 24 Jul 2016 14:54:52 +0200 Subject: fix: CHECK and DEFAULT after CREATE ... SELECT expression defaults and check constraints should behave as constant default values - copied from fields, not copied from expressions --- mysql-test/r/default.result | 18 ++++++++++++++++++ mysql-test/t/default.test | 10 ++++++++++ sql/field.cc | 4 ++-- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/default.result b/mysql-test/r/default.result index 62dee045604..81568d6c654 100644 --- a/mysql-test/r/default.result +++ b/mysql-test/r/default.result @@ -3063,3 +3063,21 @@ t1 CREATE TABLE `t1` ( `c` int(11) DEFAULT (-a) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; +create table t1 (a int default 5 check (a>10), b int default (5+5), c int as (a+b)); +create table t2 as select a, b, c from t1; +create table t3 as select max(a), max(b), max(c) from t1; +show create table t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) DEFAULT 5 CHECK (a>10), + `b` int(11) DEFAULT (5+5), + `c` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +show create table t3; +Table Create Table +t3 CREATE TABLE `t3` ( + `max(a)` int(11) DEFAULT NULL, + `max(b)` int(11) DEFAULT NULL, + `max(c)` int(11) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1, t2, t3; diff --git a/mysql-test/t/default.test b/mysql-test/t/default.test index 14eb16bacd4..47de4a00a50 100644 --- a/mysql-test/t/default.test +++ b/mysql-test/t/default.test @@ -1827,3 +1827,13 @@ alter table t1 alter a set default (2+3), alter b set default 4, alter table t1 alter a set default 1+2; show create table t1; drop table t1; + +# +# CREATE ... SELECT +# +create table t1 (a int default 5 check (a>10), b int default (5+5), c int as (a+b)); +create table t2 as select a, b, c from t1; +create table t3 as select max(a), max(b), max(c) from t1; +show create table t2; +show create table t3; +drop table t1, t2, t3; diff --git a/sql/field.cc b/sql/field.cc index 73e6b4edc3c..b5d971d4ce2 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -10482,8 +10482,8 @@ Column_definition::Column_definition(THD *thd, Field *old_field, comment= old_field->comment; decimals= old_field->decimals(); vcol_info= old_field->vcol_info; - default_value= old_field->default_value; - check_constraint= old_field->check_constraint; + default_value= orig_field ? orig_field->default_value : 0; + check_constraint= orig_field ? orig_field->check_constraint : 0; option_list= old_field->option_list; switch (sql_type) { -- cgit v1.2.1 From 12d2c4fcd0bc3fbe74759e8285f2c93ad348e749 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 23 Jul 2016 16:55:52 +0200 Subject: optimize constant default expressions to be calculated at the CREATE TABLE time and stored in the default row image. --- mysql-test/r/default.result | 3 +- mysql-test/t/default.test | 3 +- sql/field.h | 6 +- sql/item.cc | 6 +- sql/sql_table.cc | 190 ++++++++++++++++++++++++-------------------- sql/table.cc | 47 ++++++----- sql/unireg.cc | 4 +- 7 files changed, 141 insertions(+), 118 deletions(-) diff --git a/mysql-test/r/default.result b/mysql-test/r/default.result index 81568d6c654..386837a84bb 100644 --- a/mysql-test/r/default.result +++ b/mysql-test/r/default.result @@ -409,7 +409,6 @@ create or replace table t1 (a int not null, b int default (a+1)); create or replace table t1 (a int default a); ERROR 01000: Expression for field `a` is refering to uninitialized field `a` create or replace table t1 (a int default b, b int default (1+1)); -ERROR 01000: Expression for field `a` is refering to uninitialized field `b` create or replace table t1 (a int default 1, b int as (c), c int as (a+1)); ERROR 01000: Expression for field `b` is refering to uninitialized field `c` CREATE TABLE t1 (a INT DEFAULT (DEFAULT(a))); @@ -3035,7 +3034,7 @@ t1 CREATE TABLE `t1` ( `a` char(2) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci DEFAULT concat('A') ) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; -create table t1 (a int default 1, b int default (1+1), c int); +create table t1 (a int default 1, b int default (rand()*0+2), c int); insert t1 (c) values (a); insert t1 (c) values (b); select * from t1; diff --git a/mysql-test/t/default.test b/mysql-test/t/default.test index 47de4a00a50..6c871527212 100644 --- a/mysql-test/t/default.test +++ b/mysql-test/t/default.test @@ -295,7 +295,6 @@ create or replace table t1 (a int not null, b int default (a+1)); --error ER_EXPRESSION_REFERS_TO_UNINIT_FIELD create or replace table t1 (a int default a); ---error ER_EXPRESSION_REFERS_TO_UNINIT_FIELD create or replace table t1 (a int default b, b int default (1+1)); --error ER_EXPRESSION_REFERS_TO_UNINIT_FIELD create or replace table t1 (a int default 1, b int as (c), c int as (a+1)); @@ -1810,7 +1809,7 @@ DROP TABLE t1; # # Order of evaluation: # -create table t1 (a int default 1, b int default (1+1), c int); +create table t1 (a int default 1, b int default (rand()*0+2), c int); insert t1 (c) values (a); insert t1 (c) values (b); select * from t1; diff --git a/sql/field.h b/sql/field.h index f3328d560b8..45d2c3a7f00 100644 --- a/sql/field.h +++ b/sql/field.h @@ -574,7 +574,11 @@ inline bool is_temporal_type_with_time(enum_field_types type) } } -/* Bits for type of vcol expression */ +/* + Flags for Virtual_column_info. If none is set, the expression must be + a constant with no side-effects, so it's calculated at CREATE TABLE time, + stored in table->record[2], and not recalculated for every statement. +*/ #define VCOL_FIELD_REF 1 #define VCOL_NON_DETERMINISTIC 2 #define VCOL_SESSION_FUNC 4 /* uses session data, e.g. USER or DAYNAME */ diff --git a/sql/item.cc b/sql/item.cc index 2fb507cd720..420e0df71bd 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -956,8 +956,8 @@ bool Item_field::check_field_expression_processor(void *arg) { if (field->flags & NO_DEFAULT_VALUE_FLAG) return 0; - if ((field->default_value || field->has_insert_default_function() || - field->vcol_info)) + if ((field->default_value && field->default_value->flags) + || field->has_insert_default_function() || field->vcol_info) { Field *org_field= (Field*) arg; if (field == org_field || @@ -8232,7 +8232,7 @@ bool Item_default_value::fix_fields(THD *thd, Item **items) set_field(def_field); if (field->default_value) { - if (field->default_value->expr_item) // it's NULL during CREATE TABLE + if (thd->mark_used_columns != MARK_COLUMNS_NONE) field->default_value->expr_item->walk(&Item::register_field_in_read_map, 1, 0); IF_DBUG(def_field->is_stat_field=1,); // a hack to fool ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 3ca2783f773..364d8eda773 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3255,35 +3255,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, !(sql_field->charset= find_bin_collation(sql_field->charset))) DBUG_RETURN(TRUE); - /* - Convert the default value from client character - set into the column character set if necessary. - We can only do this for constants as we have not yet run fix_fields. - */ - if (sql_field->default_value && - sql_field->default_value->expr_item->basic_const_item() && - save_cs != sql_field->default_value->expr_item->collation.collation && - (sql_field->sql_type == MYSQL_TYPE_VAR_STRING || - sql_field->sql_type == MYSQL_TYPE_STRING || - sql_field->sql_type == MYSQL_TYPE_SET || - sql_field->sql_type == MYSQL_TYPE_TINY_BLOB || - sql_field->sql_type == MYSQL_TYPE_MEDIUM_BLOB || - sql_field->sql_type == MYSQL_TYPE_LONG_BLOB || - sql_field->sql_type == MYSQL_TYPE_BLOB || - sql_field->sql_type == MYSQL_TYPE_ENUM)) - { - Item *item; - if (!(item= sql_field->default_value->expr_item-> - safe_charset_converter(thd, save_cs))) - { - /* Could not convert */ - my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); - DBUG_RETURN(TRUE); - } - /* Fix for prepare statement */ - thd->change_item_tree(&sql_field->default_value->expr_item, item); - } - if (sql_field->sql_type == MYSQL_TYPE_SET || sql_field->sql_type == MYSQL_TYPE_ENUM) { @@ -3349,37 +3320,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, if (sql_field->sql_type == MYSQL_TYPE_SET) { uint32 field_length; - if (sql_field->default_value && - sql_field->default_value->expr_item->basic_const_item()) - { - char *not_used; - uint not_used2; - bool not_found= 0; - String str, *def= sql_field->default_value->expr_item->val_str(&str); - if (def == NULL) /* SQL "NULL" maps to NULL */ - { - if ((sql_field->flags & NOT_NULL_FLAG) != 0) - { - my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); - DBUG_RETURN(TRUE); - } - - /* else, NULL is an allowed value */ - (void) find_set(interval, NULL, 0, - cs, ¬_used, ¬_used2, ¬_found); - } - else /* not NULL */ - { - (void) find_set(interval, def->ptr(), def->length(), - cs, ¬_used, ¬_used2, ¬_found); - } - - if (not_found) - { - my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); - DBUG_RETURN(TRUE); - } - } calculate_interval_lengths(cs, interval, &dummy, &field_length); sql_field->length= field_length + (interval->count - 1); } @@ -3387,30 +3327,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, { uint32 field_length; DBUG_ASSERT(sql_field->sql_type == MYSQL_TYPE_ENUM); - if (sql_field->default_value && - sql_field->default_value->expr_item->basic_const_item()) - { - String str, *def= sql_field->default_value->expr_item->val_str(&str); - if (def == NULL) /* SQL "NULL" maps to NULL */ - { - if ((sql_field->flags & NOT_NULL_FLAG) != 0) - { - my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); - DBUG_RETURN(TRUE); - } - - /* else, the defaults yield the correct length for NULLs. */ - } - else /* not NULL */ - { - def->length(cs->cset->lengthsp(cs, def->ptr(), def->length())); - if (find_type2(interval, def->ptr(), def->length(), cs) == 0) /* not found */ - { - my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); - DBUG_RETURN(TRUE); - } - } - } calculate_interval_lengths(cs, interval, &field_length, &dummy); sql_field->length= field_length; } @@ -3430,6 +3346,112 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, if (prepare_blob_field(thd, sql_field)) DBUG_RETURN(TRUE); + if (sql_field->default_value) + { + Virtual_column_info *def= sql_field->default_value; + + if (!sql_field->has_default_expression()) + def->expr_str= null_lex_str; + + if (!def->expr_item->basic_const_item() && !def->flags) + { + Item *expr= def->expr_item; + int err= !expr->fixed && // may be already fixed if ALTER TABLE + expr->fix_fields(thd, &expr); + if (!err) + { + if (expr->result_type() == REAL_RESULT) + { // don't convert floats to string and back, it can be lossy + double res= expr->val_real(); + if (expr->null_value) + expr= new (thd->mem_root) Item_null(thd); + else + expr= new (thd->mem_root) Item_float(thd, res, expr->decimals); + } + else + { + StringBuffer buf; + String *res= expr->val_str(&buf); + if (expr->null_value) + expr= new (thd->mem_root) Item_null(thd); + else + { + char *str= (char*) thd->strmake(res->ptr(), res->length()); + expr= new (thd->mem_root) Item_string(thd, str, res->length(), res->charset()); + } + } + thd->change_item_tree(&def->expr_item, expr); + } + } + } + + /* + Convert the default value from client character + set into the column character set if necessary. + We can only do this for constants as we have not yet run fix_fields. + */ + if (sql_field->default_value && + sql_field->default_value->expr_item->basic_const_item() && + save_cs != sql_field->default_value->expr_item->collation.collation && + (sql_field->sql_type == MYSQL_TYPE_VAR_STRING || + sql_field->sql_type == MYSQL_TYPE_STRING || + sql_field->sql_type == MYSQL_TYPE_SET || + sql_field->sql_type == MYSQL_TYPE_TINY_BLOB || + sql_field->sql_type == MYSQL_TYPE_MEDIUM_BLOB || + sql_field->sql_type == MYSQL_TYPE_LONG_BLOB || + sql_field->sql_type == MYSQL_TYPE_BLOB || + sql_field->sql_type == MYSQL_TYPE_ENUM)) + { + Item *item; + if (!(item= sql_field->default_value->expr_item-> + safe_charset_converter(thd, save_cs))) + { + /* Could not convert */ + my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); + DBUG_RETURN(TRUE); + } + /* Fix for prepare statement */ + thd->change_item_tree(&sql_field->default_value->expr_item, item); + } + + if (sql_field->default_value && + sql_field->default_value->expr_item->basic_const_item() && + (sql_field->sql_type == MYSQL_TYPE_SET || + sql_field->sql_type == MYSQL_TYPE_ENUM)) + { + StringBuffer str; + String *def= sql_field->default_value->expr_item->val_str(&str); + bool not_found; + if (def == NULL) /* SQL "NULL" maps to NULL */ + { + not_found= sql_field->flags & NOT_NULL_FLAG; + } + else + { + not_found= false; + if (sql_field->sql_type == MYSQL_TYPE_SET) + { + char *not_used; + uint not_used2; + find_set(sql_field->interval, def->ptr(), def->length(), + sql_field->charset, ¬_used, ¬_used2, ¬_found); + } + else /* MYSQL_TYPE_ENUM */ + { + def->length(sql_field->charset->cset->lengthsp(sql_field->charset, + def->ptr(), def->length())); + not_found= !find_type2(sql_field->interval, def->ptr(), + def->length(), sql_field->charset); + } + } + + if (not_found) + { + my_error(ER_INVALID_DEFAULT, MYF(0), sql_field->field_name); + DBUG_RETURN(TRUE); + } + } + if (!(sql_field->flags & NOT_NULL_FLAG)) null_fields++; diff --git a/sql/table.cc b/sql/table.cc index ca6a5a1b5a6..77736430fa3 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2593,13 +2593,6 @@ static bool fix_and_check_vcol_expr(THD *thd, TABLE *table, Field *field, DBUG_RETURN(1); } - /* Check that we are not refering to any not yet initialized fields */ - if (field) - { - if (func_expr->walk(&Item::check_field_expression_processor, 0, field)) - DBUG_RETURN(1); - } - /* Walk through the Item tree checking if all items are valid to be part of the virtual column @@ -2777,6 +2770,14 @@ end: DBUG_RETURN(vcol_info); } +static bool check_vcol_forward_refs(Field *field, Virtual_column_info *vcol) +{ + bool res= vcol && + vcol->expr_item->walk(&Item::check_field_expression_processor, 0, + field); + return res; +} + /* Read data from a binary .frm file from MySQL 3.23 - 5.0 into TABLE_SHARE */ @@ -3041,22 +3042,6 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, goto err; } field->default_value= vcol; - if (is_create_table && !vcol->flags) - { - enum_check_fields old_count_cuted_fields= thd->count_cuted_fields; - thd->count_cuted_fields= CHECK_FIELD_WARN; // To find wrong default values - my_ptrdiff_t off= share->default_values - outparam->record[0]; - field->move_field_offset(off); - int res= vcol->expr_item->save_in_field(field, 1); - field->move_field_offset(-off); - thd->count_cuted_fields= old_count_cuted_fields; - if (res != 0 && res != 3) - { - my_error(ER_INVALID_DEFAULT, MYF(0), field->field_name); - error= OPEN_FRM_CORRUPTED; - goto err; - } - } *(dfield_ptr++)= *field_ptr; } else @@ -3068,6 +3053,19 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, *vfield_ptr= 0; // End marker *dfield_ptr= 0; // End marker + /* Check that expressions aren't refering to not yet initialized fields */ + for (field_ptr= outparam->field; *field_ptr; field_ptr++) + { + Field *field= *field_ptr; + if (check_vcol_forward_refs(field, field->vcol_info) || + check_vcol_forward_refs(field, field->check_constraint) || + check_vcol_forward_refs(field, field->default_value)) + { + error= OPEN_FRM_CORRUPTED; + goto err; + } + } + /* Update to use trigger fields */ switch_defaults_to_nullable_trigger_fields(outparam); @@ -7316,7 +7314,8 @@ int TABLE::update_default_fields(bool update_command, bool ignore_errors) { if (!update_command) { - if (field->default_value) + if (field->default_value && + (field->default_value->flags || field->flags & BLOB_FLAG)) res|= (field->default_value->expr_item->save_in_field(field, 0) < 0); else res|= field->evaluate_insert_default_function(); diff --git a/sql/unireg.cc b/sql/unireg.cc index ff46e03ab3e..add09411acb 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -634,7 +634,7 @@ static bool pack_header(THD *thd, uchar *forminfo, if (add_expr_length(thd, &field->vcol_info, &expression_length)) DBUG_RETURN(1); - if (field->has_default_expression()) + if (field->default_value && field->default_value->expr_str.length) if (add_expr_length(thd, &field->default_value, &expression_length)) DBUG_RETURN(1); if (add_expr_length(thd, &field->check_constraint, &expression_length)) @@ -983,7 +983,7 @@ static bool pack_fields(uchar **buff_arg, List &create_fields, if (field->vcol_info) pack_expression(&buff, field->vcol_info, field_nr, field->vcol_info->stored_in_db ? 1 : 0); - if (field->has_default_expression()) + if (field->default_value && field->default_value->expr_str.length) pack_expression(&buff, field->default_value, field_nr, 2); if (field->check_constraint) pack_expression(&buff, field->check_constraint, field_nr, 3); -- cgit v1.2.1 From cd51c7fb60e39ae113e6bcf0d029564dd1b391e3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 24 Jul 2016 15:12:54 +0200 Subject: move away from TIMESTAMP_DNUN_FIELD/TIMESTAMP_DN_FIELD code use the new approach with Field->default_value expressions. But keep the old TIMESTAMP_UN_FIELD for ON UPDATE NOW(). --- sql/field.cc | 56 ++++++++++++++------------------------- sql/field.h | 55 ++------------------------------------- sql/item.cc | 5 ++-- sql/sql_insert.cc | 4 +-- sql/sql_show.cc | 37 ++++++++------------------ sql/sql_table.cc | 3 +-- sql/table.cc | 78 ++++++++++++++++++++++++++++++------------------------- sql/table.h | 13 ---------- sql/unireg.cc | 5 +++- 9 files changed, 85 insertions(+), 171 deletions(-) diff --git a/sql/field.cc b/sql/field.cc index b5d971d4ce2..ba0ebb253f4 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4920,12 +4920,12 @@ void Field_double::sql_type(String &res) const field has NOW() as default and is updated when row changes, else it is field which has 0 as default value and is not automatically updated. TIMESTAMP_DN_FIELD - field with NOW() as default but not set on update - automatically (TIMESTAMP DEFAULT NOW()) + automatically (TIMESTAMP DEFAULT NOW()), not used in Field since 10.2.2 TIMESTAMP_UN_FIELD - field which is set on update automatically but has not NOW() as default (but it may has 0 or some other const timestamp as default) (TIMESTAMP ON UPDATE NOW()). TIMESTAMP_DNUN_FIELD - field which has now() as default and is auto-set on - update. (TIMESTAMP DEFAULT NOW() ON UPDATE NOW()) + update. (TIMESTAMP DEFAULT NOW() ON UPDATE NOW()), not used in Field since 10.2.2 NONE - field which is not auto-set on update with some other than NOW() default value (TIMESTAMP DEFAULT 0). @@ -4956,8 +4956,8 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg, this field will be automaticly updated on insert. */ flags|= TIMESTAMP_FLAG; - if (unireg_check != TIMESTAMP_DN_FIELD) - flags|= ON_UPDATE_NOW_FLAG; + flags|= ON_UPDATE_NOW_FLAG; + DBUG_ASSERT(unireg_check == TIMESTAMP_UN_FIELD); } } @@ -10561,40 +10561,24 @@ Column_definition::Column_definition(THD *thd, Field *old_field, - The column didn't have a default expression */ if (!(flags & (NO_DEFAULT_VALUE_FLAG | BLOB_FLAG)) && - old_field->ptr != NULL && - orig_field != NULL && - !default_value) + old_field->ptr != NULL && orig_field != NULL) { - bool default_now= false; - if (real_type_with_now_as_default(sql_type)) - { - // The SQL type of the new field allows a function default: - default_now= orig_field->has_insert_default_function(); - bool update_now= orig_field->has_update_default_function(); - - if (default_now && update_now) - unireg_check= Field::TIMESTAMP_DNUN_FIELD; - else if (default_now) - unireg_check= Field::TIMESTAMP_DN_FIELD; - else if (update_now) - unireg_check= Field::TIMESTAMP_UN_FIELD; - } - if (!default_now) // Give a constant default + if (orig_field->has_update_default_function()) + unireg_check= Field::TIMESTAMP_UN_FIELD; + + /* Get the value from default_values */ + const uchar *dv= orig_field->table->s->default_values; + if (!default_value && !orig_field->is_null_in_record(dv)) { - /* Get the value from default_values */ - const uchar *dv= orig_field->table->s->default_values; - if (!orig_field->is_null_in_record(dv)) - { - StringBuffer tmp(charset); - String *res= orig_field->val_str(&tmp, orig_field->ptr_in_record(dv)); - char *pos= (char*) thd->strmake(res->ptr(), res->length()); - default_value= new (thd->mem_root) Virtual_column_info(); - default_value->expr_str.str= pos; - default_value->expr_str.length= res->length(); - default_value->expr_item= - new (thd->mem_root) Item_string(thd, pos, res->length(), charset); - default_value->utf8= 0; - } + StringBuffer tmp(charset); + String *res= orig_field->val_str(&tmp, orig_field->ptr_in_record(dv)); + char *pos= (char*) thd->strmake(res->ptr(), res->length()); + default_value= new (thd->mem_root) Virtual_column_info(); + default_value->expr_str.str= pos; + default_value->expr_str.length= res->length(); + default_value->expr_item= + new (thd->mem_root) Item_string(thd, pos, res->length(), charset); + default_value->utf8= 0; } } } diff --git a/sql/field.h b/sql/field.h index 45d2c3a7f00..f550dad1c6c 100644 --- a/sql/field.h +++ b/sql/field.h @@ -473,20 +473,6 @@ inline bool is_temporal_type_with_date(enum_field_types type) } -/** - Tests if a field real type can have "DEFAULT CURRENT_TIMESTAMP" - - @param type Field type, as returned by field->real_type(). - @retval true If field real type can have "DEFAULT CURRENT_TIMESTAMP". - @retval false If field real type can not have "DEFAULT CURRENT_TIMESTAMP". -*/ -inline bool real_type_with_now_as_default(enum_field_types type) -{ - return type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_TIMESTAMP2 || - type == MYSQL_TYPE_DATETIME || type == MYSQL_TYPE_DATETIME2; -} - - /** Recognizer for concrete data type (called real_type for some reason), returning true if it is one of the TIMESTAMP types. @@ -928,16 +914,9 @@ public: } virtual void set_default(); - bool has_insert_default_function() const - { - return (unireg_check == TIMESTAMP_DN_FIELD || - unireg_check == TIMESTAMP_DNUN_FIELD); - } - bool has_update_default_function() const { - return (unireg_check == TIMESTAMP_UN_FIELD || - unireg_check == TIMESTAMP_DNUN_FIELD); + return unireg_check == TIMESTAMP_UN_FIELD; } /* @@ -2377,21 +2356,7 @@ public: void sql_type(String &str) const; bool zero_pack() const { return 0; } virtual int set_time(); - virtual void set_default() - { - if (has_insert_default_function()) - set_time(); - else - Field::set_default(); - } virtual void set_explicit_default(Item *value); - virtual int evaluate_insert_default_function() - { - int res= 0; - if (has_insert_default_function()) - res= set_time(); - return res; - } virtual int evaluate_update_default_function() { int res= 0; @@ -2821,20 +2786,6 @@ public: bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) { return Field_datetime::get_TIME(ltime, ptr, fuzzydate); } virtual int set_time(); - virtual void set_default() - { - if (has_insert_default_function()) - set_time(); - else - Field::set_default(); - } - virtual int evaluate_insert_default_function() - { - int res= 0; - if (has_insert_default_function()) - res= set_time(); - return res; - } virtual int evaluate_update_default_function() { int res= 0; @@ -3813,9 +3764,7 @@ public: bool has_default_function() const { - return (unireg_check == Field::TIMESTAMP_DN_FIELD || - unireg_check == Field::TIMESTAMP_DNUN_FIELD || - unireg_check == Field::TIMESTAMP_UN_FIELD || + return (unireg_check == Field::TIMESTAMP_UN_FIELD || unireg_check == Field::NEXT_NUMBER); } diff --git a/sql/item.cc b/sql/item.cc index 420e0df71bd..b955457cf32 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -956,8 +956,7 @@ bool Item_field::check_field_expression_processor(void *arg) { if (field->flags & NO_DEFAULT_VALUE_FLAG) return 0; - if ((field->default_value && field->default_value->flags) - || field->has_insert_default_function() || field->vcol_info) + if ((field->default_value && field->default_value->flags) || field->vcol_info) { Field *org_field= (Field*) arg; if (field == org_field || @@ -8258,7 +8257,7 @@ void Item_default_value::print(String *str, enum_query_type query_type) void Item_default_value::calculate() { - if (field->default_value || field->has_insert_default_function()) + if (field->default_value) field->set_default(); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 80287793175..13943ce8d3c 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -2489,8 +2489,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) (*field)->default_value= vcol; *dfield_ptr++= *field; } - if ((*field)->has_insert_default_function() || - (*field)->has_update_default_function()) + else + if ((*field)->has_update_default_function()) *dfield_ptr++= *field; } if (vfield) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 1c9d75d06eb..a446e05d427 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1634,20 +1634,11 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value, bool quoted) { bool has_default; - bool has_now_default; enum enum_field_types field_type= field->type(); - /* - We are using CURRENT_TIMESTAMP instead of NOW because it is - more standard - */ - has_now_default= field->has_insert_default_function(); - has_default= (field->default_value || (!(field->flags & NO_DEFAULT_VALUE_FLAG) && - field->unireg_check != Field::NEXT_NUMBER && - !((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) - && has_now_default))); + field->unireg_check != Field::NEXT_NUMBER)); def_value->length(0); if (has_default) @@ -1662,17 +1653,14 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value, field->default_value->expr_str.length); def_value->append(')'); } + else if (field->unireg_check) + def_value->append(field->default_value->expr_str.str, + field->default_value->expr_str.length); else def_value->set(field->default_value->expr_str.str, field->default_value->expr_str.length, &my_charset_utf8mb4_general_ci); } - else if (has_now_default) - { - def_value->append(STRING_WITH_LEN("CURRENT_TIMESTAMP")); - if (field->decimals() > 0) - def_value->append_parenthesized(field->decimals()); - } else if (!field->is_null()) { // Not null by default char tmp[MAX_FIELD_WIDTH]; @@ -1704,13 +1692,13 @@ static bool get_field_default_value(THD *thd, Field *field, String *def_value, if (quoted) append_unescaped(def_value, def_val.ptr(), def_val.length()); else - def_value->append(def_val.ptr(), def_val.length()); + def_value->move(def_val); } else if (quoted) - def_value->append(STRING_WITH_LEN("''")); + def_value->set(STRING_WITH_LEN("''"), system_charset_info); } else if (field->maybe_null() && quoted) - def_value->append(STRING_WITH_LEN("NULL")); // Null as default + def_value->set(STRING_WITH_LEN("NULL"), system_charset_info); // Null as default else return 0; @@ -1797,8 +1785,8 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, List field_list; char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], def_value_buf[MAX_FIELD_WIDTH]; const char *alias; - String type(tmp, sizeof(tmp), system_charset_info); - String def_value(def_value_buf, sizeof(def_value_buf), system_charset_info); + String type; + String def_value; Field **ptr,*field; uint primary_key; KEY *key_info; @@ -1891,12 +1879,8 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" ")); append_identifier(thd,packet,field->field_name, strlen(field->field_name)); packet->append(' '); - // check for surprises from the previous call to Field::sql_type() - if (type.ptr() != tmp) - type.set(tmp, sizeof(tmp), system_charset_info); - else - type.set_charset(system_charset_info); + type.set(tmp, sizeof(tmp), system_charset_info); field->sql_type(type); packet->append(type.ptr(), type.length(), system_charset_info); @@ -1943,6 +1927,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" NULL")); } + def_value.set(def_value_buf, sizeof(def_value_buf), system_charset_info); if (get_field_default_value(thd, field, &def_value, 1)) { packet->append(STRING_WITH_LEN(" DEFAULT ")); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 364d8eda773..c9194bcb276 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -9670,8 +9670,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, Old fields keep their current values, and therefore should not be present in the set of autoupdate fields. */ - if ((*ptr)->default_value || - ((*ptr)->has_insert_default_function())) + if ((*ptr)->default_value) { *(dfield_ptr++)= *ptr; ++to->s->default_fields; diff --git a/sql/table.cc b/sql/table.cc index 77736430fa3..cfa950f5f9c 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -66,7 +66,7 @@ LEX_STRING SLOW_LOG_NAME= {C_STRING_WITH_LEN("slow_log")}; Keyword added as a prefix when parsing the defining expression for a virtual column read from the column definition saved in the frm file */ -LEX_STRING parse_vcol_keyword= { C_STRING_WITH_LEN("PARSE_VCOL_EXPR ") }; +static LEX_STRING parse_vcol_keyword= { C_STRING_WITH_LEN("PARSE_VCOL_EXPR ") }; static int64 last_table_id; @@ -1551,6 +1551,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, LEX_STRING comment; Virtual_column_info *vcol_info= 0; uint gis_length, gis_decimals, srid= 0; + Field::utype unireg_check; if (new_frm_ver >= 3) { @@ -1766,22 +1767,36 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, swap_variables(uint, null_bit_pos, mysql57_vcol_null_bit_pos); } + /* Convert pre-10.2.2 timestamps to use Field::default_value */ + unireg_check= (Field::utype) MTYP_TYPENR(unireg_type); + if (unireg_check == Field::TIMESTAMP_DNUN_FIELD) + unireg_check= Field::TIMESTAMP_UN_FIELD; + if (unireg_check == Field::TIMESTAMP_DN_FIELD) + unireg_check= Field::NONE; + *field_ptr= reg_field= - make_field(share, &share->mem_root, record+recpos, - (uint32) field_length, - null_pos, null_bit_pos, - pack_flag, - field_type, - charset, - geom_type, srid, - (Field::utype) MTYP_TYPENR(unireg_type), - (interval_nr ? - share->intervals+interval_nr-1 : - (TYPELIB*) 0), + make_field(share, &share->mem_root, record+recpos, (uint32) field_length, + null_pos, null_bit_pos, pack_flag, field_type, charset, + geom_type, srid, unireg_check, + (interval_nr ? share->intervals+interval_nr-1 : NULL), share->fieldnames.type_names[i]); if (!reg_field) // Not supported field type goto err; + if (unireg_check != (Field::utype) MTYP_TYPENR(unireg_type)) + { + char buf[32]; + if (reg_field->decimals()) + my_snprintf(buf, sizeof(buf), "CURRENT_TIMESTAMP(%d)", reg_field->decimals()); + else + strmov(buf, "CURRENT_TIMESTAMP"); + + reg_field->default_value= new (&share->mem_root) Virtual_column_info(); + reg_field->default_value->stored_in_db= 1; + thd->make_lex_string(®_field->default_value->expr_str, buf, strlen(buf)); + share->default_expressions++; + } + reg_field->field_index= i; reg_field->comment=comment; reg_field->vcol_info= vcol_info; @@ -1821,13 +1836,12 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (share->stored_rec_length>=recpos) share->stored_rec_length= recpos-1; } - if (reg_field->has_insert_default_function()) - has_insert_default_function= 1; if (reg_field->has_update_default_function()) + { has_update_default_function= 1; - if (reg_field->has_insert_default_function() || - reg_field->has_update_default_function()) - share->default_fields++; + if (!reg_field->default_value) + share->default_fields++; + } } *field_ptr=0; // End marker /* Sanity checks: */ @@ -2213,16 +2227,19 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } case 1: // Generated stored field vcol_info->stored_in_db= 1; + DBUG_ASSERT(!reg_field->vcol_info); reg_field->vcol_info= vcol_info; share->virtual_fields++; share->virtual_stored_fields++; // For insert/load data break; case 2: // Default expression vcol_info->stored_in_db= 1; + DBUG_ASSERT(!reg_field->default_value); reg_field->default_value= vcol_info; share->default_expressions++; break; case 3: // Field check constraint + DBUG_ASSERT(!reg_field->check_constraint); reg_field->check_constraint= vcol_info; share->field_check_constraints++; break; @@ -2693,14 +2710,10 @@ Virtual_column_info *unpack_vcol_info_from_frm(THD *thd, vcol_expr->length + parse_vcol_keyword.length + 3))) DBUG_RETURN(0); - memcpy(vcol_expr_str, - (char*) parse_vcol_keyword.str, - parse_vcol_keyword.length); + memcpy(vcol_expr_str, parse_vcol_keyword.str, parse_vcol_keyword.length); str_len= parse_vcol_keyword.length; vcol_expr_str[str_len++]= '('; - memcpy(vcol_expr_str + str_len, - (char*) vcol_expr->str, - vcol_expr->length); + memcpy(vcol_expr_str + str_len, vcol_expr->str, vcol_expr->length); str_len+= vcol_expr->length; vcol_expr_str[str_len++]= ')'; vcol_expr_str[str_len++]= 0; @@ -3045,8 +3058,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, *(dfield_ptr++)= *field_ptr; } else - if ((field->has_insert_default_function() || - field->has_update_default_function())) + if (field->has_update_default_function()) *(dfield_ptr++)= *field_ptr; } @@ -6275,7 +6287,7 @@ void TABLE::mark_columns_needed_for_update() to compare records and detect data change. */ if ((file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) && - default_field && has_default_function(true)) + default_field && s->has_update_default_function) bitmap_union(read_set, write_set); DBUG_VOID_RETURN; } @@ -6573,17 +6585,13 @@ void TABLE::mark_default_fields_for_write(bool is_insert) for (field_ptr= default_field; *field_ptr; field_ptr++) { field= (*field_ptr); - if (field->default_value) + if (is_insert && field->default_value) { - if (is_insert) - { - bitmap_set_bit(write_set, field->field_index); - field->default_value->expr_item-> - walk(&Item::register_field_in_read_map, 1, 0); - } + bitmap_set_bit(write_set, field->field_index); + field->default_value->expr_item-> + walk(&Item::register_field_in_read_map, 1, 0); } - else if ((is_insert && field->has_insert_default_function()) || - (!is_insert && field->has_update_default_function())) + else if (!is_insert && field->has_update_default_function()) bitmap_set_bit(write_set, field->field_index); } DBUG_VOID_RETURN; diff --git a/sql/table.h b/sql/table.h index 4a86fc455a2..af3990a6882 100644 --- a/sql/table.h +++ b/sql/table.h @@ -685,7 +685,6 @@ struct TABLE_SHARE bool virtual_stored_fields; bool check_set_initialized; bool has_update_default_function; - bool has_insert_default_function; ulong table_map_id; /* for row-based replication */ /* @@ -1311,18 +1310,6 @@ public: void mark_columns_used_by_check_constraints(void); void mark_check_constraint_columns_for_read(void); int verify_constraints(bool ignore_failure); - /** - Check if a table has a default function either for INSERT or UPDATE-like - operation - @retval true there is a default function - @retval false there is no default function - */ - inline bool has_default_function(bool is_update) - { - return (is_update ? - s->has_update_default_function : - s->has_insert_default_function); - } inline void column_bitmaps_set(MY_BITMAP *read_set_arg, MY_BITMAP *write_set_arg) { diff --git a/sql/unireg.cc b/sql/unireg.cc index add09411acb..d3a9b832aaf 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -1039,7 +1039,10 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options, field->sql_type, field->charset, field->geom_type, field->srid, - field->unireg_check, + field->unireg_check == Field::TIMESTAMP_DNUN_FIELD + ? Field::TIMESTAMP_UN_FIELD + : field->unireg_check == Field::TIMESTAMP_DN_FIELD + ? Field::NONE : field->unireg_check, field->save_interval ? field->save_interval : field->interval, field->field_name); -- cgit v1.2.1 From 7450cb7f69db801c48f806748e666c393b8d6b81 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 25 Jul 2016 17:57:31 +0200 Subject: re-fix vcols on demand, not always for every SELECT --- sql/item.cc | 3 +++ sql/sql_base.cc | 4 ++-- sql/table.cc | 25 +++++++++++++++++++++++++ sql/table.h | 2 ++ 4 files changed, 32 insertions(+), 2 deletions(-) diff --git a/sql/item.cc b/sql/item.cc index b955457cf32..5da95b05b2f 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5219,6 +5219,8 @@ bool Item_field::fix_fields(THD *thd, Item **reference) } #endif fixed= 1; + if (field->vcol_info) + fix_session_vcol_expr_for_read(thd, field, field->vcol_info); if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && !outer_fixed && !thd->lex->in_sum_func && thd->lex->current_select->cur_pos_in_select_list != UNDEF_POS && @@ -8231,6 +8233,7 @@ bool Item_default_value::fix_fields(THD *thd, Item **items) set_field(def_field); if (field->default_value) { + fix_session_vcol_expr_for_read(thd, field, field->default_value); if (thd->mark_used_columns != MARK_COLUMNS_NONE) field->default_value->expr_item->walk(&Item::register_field_in_read_map, 1, 0); IF_DBUG(def_field->is_stat_field=1,); // a hack to fool ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9d6f1dbed9e..d7812db53bd 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -4697,7 +4697,8 @@ static bool fix_all_session_vcol_exprs(THD *thd, TABLE_LIST *tables) table= table->next_global) { TABLE *t= table->table; - if (!table->placeholder() && t->s->vcols_need_refixing) + if (!table->placeholder() && t->s->vcols_need_refixing && + table->lock_type >= TL_WRITE_ALLOW_WRITE) { if (table->security_ctx) thd->security_ctx= table->security_ctx; @@ -4711,7 +4712,6 @@ static bool fix_all_session_vcol_exprs(THD *thd, TABLE_LIST *tables) fix_session_vcol_expr(thd, (*df)->default_value)) goto err; - if (table->lock_type >= TL_WRITE_ALLOW_WRITE) for (Virtual_column_info **cc= t->check_constraints; cc && *cc; cc++) if (fix_session_vcol_expr(thd, (*cc))) goto err; diff --git a/sql/table.cc b/sql/table.cc index cfa950f5f9c..ab9a1c07172 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2543,7 +2543,11 @@ static bool fix_vcol_expr(THD *thd, Virtual_column_info *vcol) DBUG_RETURN(0); } +/** rerun fix_fields for vcols that returns time- or session- dependent values + @note this is done for all vcols for INSERT/UPDATE/DELETE, + and only as needed for SELECTs. +*/ bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol) { DBUG_ENTER("fix_session_vcol_expr"); @@ -2556,6 +2560,27 @@ bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol) } +/** invoke fix_session_vcol_expr for a vcol + + @note this is called for generated column or a DEFAULT expression from + their corresponding fix_fields on SELECT. +*/ +bool fix_session_vcol_expr_for_read(THD *thd, Field *field, + Virtual_column_info *vcol) +{ + DBUG_ENTER("fix_session_vcol_expr_for_read"); + TABLE_LIST *tl= field->table->pos_in_table_list; + if (!tl || tl->lock_type >= TL_WRITE_ALLOW_WRITE) + DBUG_RETURN(0); + Security_context *save_security_ctx= thd->security_ctx; + if (tl->security_ctx) + thd->security_ctx= tl->security_ctx; + bool res= fix_session_vcol_expr(thd, vcol); + thd->security_ctx= save_security_ctx; + DBUG_RETURN(res); +} + + /* @brief Perform semantic analysis of the defining expression for a virtual column diff --git a/sql/table.h b/sql/table.h index af3990a6882..68e8cb9069d 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2620,6 +2620,8 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, uint ha_open_flags, TABLE *outparam, bool is_create_table); bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol); +bool fix_session_vcol_expr_for_read(THD *thd, Field *field, + Virtual_column_info *vcol); Virtual_column_info *unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root, TABLE *table, Field *field, -- cgit v1.2.1 From eac7e57529756a2a7f6c269775323323f7b2c706 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 25 Jul 2016 22:15:05 +0200 Subject: Feature_check_constraint status variable --- mysql-test/r/features.result | 7 +++++++ mysql-test/t/features.test | 8 ++++++++ sql/mysqld.cc | 3 ++- sql/sql_class.h | 3 +-- sql/table.cc | 1 + 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/features.result b/mysql-test/r/features.result index 52650d118b3..615ab2368d9 100644 --- a/mysql-test/r/features.result +++ b/mysql-test/r/features.result @@ -3,6 +3,7 @@ set sql_mode=""; flush status; show status like "feature%"; Variable_name Value +Feature_check_constraint 0 Feature_delay_key_write 0 Feature_dynamic_columns 0 Feature_fulltext 0 @@ -158,3 +159,9 @@ drop table t1; show status like "feature_delay_key_write"; Variable_name Value Feature_delay_key_write 2 +create table t1 (a int check (a > 5)); +create table t2 (b int, constraint foo check (b < 10)); +drop table t1, t2; +show status like "feature_check_constraint"; +Variable_name Value +Feature_check_constraint 2 diff --git a/mysql-test/t/features.test b/mysql-test/t/features.test index 225ab40b361..63e923a772b 100644 --- a/mysql-test/t/features.test +++ b/mysql-test/t/features.test @@ -130,3 +130,11 @@ insert into t1 values(2); drop table t1; show status like "feature_delay_key_write"; + +# +# Feature CHECK CONSTRAINT +# +create table t1 (a int check (a > 5)); +create table t2 (b int, constraint foo check (b < 10)); +drop table t1, t2; +show status like "feature_check_constraint"; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index fa8f143335d..9b5fcbddd6c 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -530,7 +530,7 @@ ulong extra_max_connections; uint max_digest_length= 0; ulong slave_retried_transactions; ulonglong slave_skipped_errors; -ulong feature_files_opened_with_delayed_keys; +ulong feature_files_opened_with_delayed_keys= 0, feature_check_constraint= 0; ulonglong denied_connections; my_decimal decimal_zero; @@ -8390,6 +8390,7 @@ SHOW_VAR status_vars[]= { {"Empty_queries", (char*) offsetof(STATUS_VAR, empty_queries), SHOW_LONG_STATUS}, {"Executed_events", (char*) &executed_events, SHOW_LONG_NOFLUSH }, {"Executed_triggers", (char*) offsetof(STATUS_VAR, executed_triggers), SHOW_LONG_STATUS}, + {"Feature_check_constraint", (char*) &feature_check_constraint, SHOW_LONG }, {"Feature_delay_key_write", (char*) &feature_files_opened_with_delayed_keys, SHOW_LONG }, {"Feature_dynamic_columns", (char*) offsetof(STATUS_VAR, feature_dynamic_columns), SHOW_LONG_STATUS}, {"Feature_fulltext", (char*) offsetof(STATUS_VAR, feature_fulltext), SHOW_LONG_STATUS}, diff --git a/sql/sql_class.h b/sql/sql_class.h index 6e71adf023a..b40af4a0937 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -823,8 +823,7 @@ typedef struct system_status_var Global status variables */ -extern ulong feature_files_opened_with_delayed_keys; - +extern ulong feature_files_opened_with_delayed_keys, feature_check_constraint; void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); diff --git a/sql/table.cc b/sql/table.cc index ab9a1c07172..9225b9d4f11 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2306,6 +2306,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, bitmap_count= 1; if (share->table_check_constraints) { + feature_check_constraint++; if (!(share->check_set= (MY_BITMAP*) alloc_root(&share->mem_root, sizeof(*share->check_set)))) goto err; -- cgit v1.2.1 From 7d115e73b8a0a9f5bc8160a0bcf966ea2283a829 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Sat, 27 Aug 2016 19:50:42 +0300 Subject: MDEV-10604 Create a list of unstable MTR tests to be disabled in distribution builds List of unstable tests for 10.1 --- mysql-test/unstable-tests | 113 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 107 insertions(+), 6 deletions(-) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 6a46602eb07..daba2ea8ae8 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,12 +23,14 @@ # ############################################################################## +main.analyze_stmt_slow_query_log : MDEV-7558 - wrong result main.bootstrap : Modified on 2016-06-18 (MDEV-9969) main.create_delayed : MDEV-10605 - failed with timeout main.create_or_replace : Modified on 2016-06-23 (MDEV-9728) main.ctype_recoding : Modified on 2016-06-10 (MDEV-10181) main.ctype_utf8 : Modified on 2016-06-21 (merge) main.ctype_utf8mb4 : Modified on 2016-06-21 (merge) +main.ctype_utf16le : MDEV-10675: timeout or extra warnings main.events_1 : Modified on 2016-06-21 (MDEV-9524) main.func_group : Modified on 2016-08-08 (MDEV-10468) main.func_in : Modified on 2016-06-20 (MDEV-10020) @@ -54,12 +56,14 @@ main.mysqldump : MDEV-10512 - sporadic assertion failure main.mysqltest : MDEV-9269 - fails on Alpha main.named_pipe : Modified on 2016-08-02 (MDEV-10383) main.openssl_1 : Modified on 2016-07-11 (MDEV-10211) +main.order_by_optimizer_innodb : MDEV-10683 - wrong execution plan main.parser : Modified on 2016-06-21 (merge) main.pool_of_threads : MDEV-10100 - sporadic error on detecting max connections main.ps_1general : Modified on 2016-07-12 (merge) main.range : Modified on 2016-08-10 (merge) main.range_mrr_icp : Modified on 2016-08-10 (merge) main.query_cache : MDEV-10611 - sporadic mutex problem +main.show_explain : MDEV-10674 - sporadic failure main.shutdown : MDEV-10612 - sporadic crashes main.sp-prelocking : Modified on 2016-08-10 (merge) main.sp-security : MDEV-10607 - sporadic "can't connect" @@ -68,11 +72,15 @@ main.ssl_ca : Modified on 2016-07-11 (MDEV-10211) main.ssl_compress : Modified on 2016-07-11 (MDEV-10211) main.ssl_timeout : Modified on 2016-07-11 (MDEV-10211) main.stat_tables_par_innodb : MDEV-10515 - sporadic wrong results +main.status : MDEV-8510 - sporadic wrong result main.status_user : Modified on 2016-06-20 (MDEV-8633) +main.subselect : Modified in 10.1.17 main.subselect_innodb : MDEV-10614 - sporadic wrong results +main.subselect_sj_mat : Modified in 10.1.17 main.temp_table : Modified on 2016-06-18 (MDEV-8569) main.type_date : Modified on 2016-08-10 (merge) main.type_datetime : Modified on 2016-06-16 (MDEV-9374) +main.type_datetime_hires : MDEV-10687 - timeout main.view : Modified on 2016-08-10 (merge) main.xtradb_mrr : Modified on 2016-08-04 (MDEV-9946) @@ -97,6 +105,55 @@ connect.jdbc-postgresql : New test, added on 2016-07-13 #---------------------------------------------------------------- +galera.GAL-382 : New test, added in 10.1.17 +galera.MW-252 : New test, added in 10.1.17 +galera.MW-258 : New test, added in 10.1.17 +galera.MW-259 : New test, added in 10.1.17 +galera.MW-285 : New test, added in 10.1.17 +galera.MW-286 : New test, added in 10.1.17 +galera.MW-292 : New test, added in 10.1.17 +galera.MW-44 : New test, added in 10.1.17 +galera.galera#414 : New test, added in 10.1.17 +galera.galera_as_slave_autoinc : New test, added in 10.1.17 +galera.galera_as_slave_nonprim : Modified in 10.1.17 +galera.galera_bf_abort_flush_for_export : Modified in 10.1.17 +galera.galera_gcs_fc_limit : Modified in 10.1.17 +galera.galera_ist_recv_bind : New test, added in 10.1.17 +galera.galera_ist_restart_joiner : Modified in 10.1.17 +galera.galera_kill_ddl : Modified in 10.1.17 +galera.galera_parallel_simple : Modified in 10.1.17 +galera.galera_pc_ignore_sb : Modified in 10.1.17 +galera.galera_restart_nochanges : Modified in 10.1.17 +galera.galera_roles : Modified in 10.1.17 +galera.galera_rsu_wsrep_desync : Modified in 10.1.17 +galera.galera_split_brain : Modified in 10.1.17 +galera.galera_ssl_upgrade : Modified in 10.1.17 +galera.galera_suspend_slave : Modified in 10.1.17 +galera.galera_transaction_replay : Modified in 10.1.17 +galera.galera_var_dirty_reads : Modified in 10.1.17 +galera.galera_var_max_ws_rows : New test, added in 10.1.17 +galera.galera_var_max_ws_size : Modified in 10.1.17 +galera.mdev_10518 : New test, added in 10.1.17 +galera.mysql-wsrep#31 : Modified in 10.1.17 + +galera_3nodes.galera_certification_ccc : Modified in 10.1.17 +galera_3nodes.galera_innobackupex_backup : Modified in 10.1.17 +galera_3nodes.galera_ist_gcache_rollover : Modified in 10.1.17 +galera_3nodes.galera_pc_bootstrap : Modified in 10.1.17 +galera_3nodes.galera_pc_weight : Modified in 10.1.17 + +#---------------------------------------------------------------- + +encryption.create_or_replace : MDEV-9359 - Assertion failure +encryption.innodb-bad-key-shutdown : MDEV-9105 - valgrind warnings, assertion failures +encryption.innodb_encryption_discard_import : MDEV-9099 - warnings, errors, crash +encryption.innodb_encryption_filekeys : MDEV-9062 - timeouts +encryption.innodb_first_page : MDEV-10689 - crashes +encryption.innodb_onlinealter_encryption : MDEV-10099 - wrong results +encryption.innodb-page_encryption : MDEV-10641 - mutex problem + +#---------------------------------------------------------------- + federated.federatedx : MDEV-10617 - Wrong checksum, timeouts federated.federated_innodb : MDEV-10617, MDEV-10417 - Wrong checksum, timeouts, fails on Mips federated.federated_partition : MDEV-10417 - Fails on Mips @@ -106,18 +163,32 @@ federated.federated_transactions : MDEV-10617, MDEV-10417 - Wrong checksum, time funcs_1.processlist_priv_no_prot : Include file modified on 2016-07-12 (merge) funcs_1.processlist_priv_ps : Include file modified on 2016-07-12 (merge) +funcs_2.memory_charset : MDEV-10290 - timeout #---------------------------------------------------------------- innodb.binlog_consistent : MDEV-10618 - Server fails to start innodb.innodb-alter-table : MDEV-10619 - Testcase timeout innodb.innodb-alter-tempfile : Modified on 2016-08-09 (MDEV-10469) +innodb.innodb_blob_truncate : MDEV-10377 - Assertion failure +innodb.innodb-bug-14068765 : MDEV-9105 - valgrind warnings, assertion failures innodb.innodb_corrupt_bit : Modified on 2016-06-21 (merge) +innodb.innodb-bug-14084530 : MDEV-9105 - valgrind warnings, assertion failures innodb.innodb_bug30423 : MDEV-7311 - Wrong number of rows in the plan innodb.innodb-fk-warnings : Modified on 2016-07-18 (MDEV-8569) innodb.innodb-fkcheck : Modified on 2016-06-13 (MDEV-10083) -innodb.innodb-wl5522 : rdiff file modified on 2016-08-10 (merge) +innodb.innodb-page_compression_zip : MDEV-10641 - mutex problem +innodb.innodb_stats : MDEV-10682 - wrong result +innodb.innodb_sys_semaphore_waits : MDEV-10331 - wrong result +innodb.innodb-wl5522 : MDEV-9105 - valgrind warnings, assertion failures +innodb.innodb-wl5522-1 : MDEV-9105 - valgrind warnings, assertion failures innodb.innodb-wl5522-debug-zip : MDEV-10427 - Warning: database page corruption +innodb.innodb-wl5522-zip : MDEV-9105 - valgrind warnings, assertion failures +innodb.xa_recovery : MDEV-10685 - warnings + +#---------------------------------------------------------------- + +innodb_zip.innodb_prefix_index_liftedlimit : MDEV-10686 - timeout #---------------------------------------------------------------- @@ -127,11 +198,15 @@ mroonga/storage.column_datetime_32bit_max : Wrong resul mroonga/storage.column_datetime_32bit_out_of_range : Wrong result on Alpha mroonga/storage.index_multiple_column_unique_date_32bit_equal : Wrong result on Alpha mroonga/storage.index_multiple_column_unique_date_order_32bit_desc : Wrong result on Alpha +mroonga/storage.index_multiple_column_unique_datetime_index_read : MDEV-8643 - valgrind warnings +mroonga/storage.repair_table_no_index_file : MDEV-9364 - wrong result #---------------------------------------------------------------- multi_source.gtid : MDEV-10620, MDEV-10417 - Timeout in wait condition, fails on Mips +multi_source.info_logs : MDEV-10042 - wrong result multi_source.multisource : MDEV-10417 - Fails on Mips +multi_source.reset_slave : MDEV-10690 - wrong result multi_source.simple : MDEV-4633 - Wrong slave status output multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_heartbeats @@ -146,6 +221,7 @@ perfschema.digest_table_full : Modified on 2016-06-21 (merge) perfschema.func_file_io : MDEV-5708 - fails for s390x perfschema.func_mutex : MDEV-5708 - fails for s390x perfschema.rpl_gtid_func : Modified on 2016-06-21 (merge) +perfschema.setup_actors : MDEV-10679 - rare crash perfschema.sizing_low : Modified on 2016-04-26 (5.6.30 merge) perfschema.socket_summary_by_event_name_func : MDEV-10622 - Socket summary tables do not match perfschema.start_server_low_digest : Modified on 2016-06-21 (merge) @@ -153,6 +229,7 @@ perfschema.statement_digest : Modified on 2016-06-21 (merge) perfschema.statement_digest_consumers : Modified on 2016-06-21 (merge) perfschema.statement_digest_long_query : Modified on 2016-06-21 (merge) perfschema.table_name : New test, added on 2016-04-26 (5.6.30 merge) +perfschema.threads_mysql : MDEV-10677 - sporadic wrong result #---------------------------------------------------------------- @@ -177,8 +254,11 @@ rpl.rpl_binlog_index : MDEV-9501 - Warning: failed registering rpl.rpl_checksum_cache : MDEV-10626 - Testcase timeout rpl.rpl_circular_for_4_hosts : MDEV-10627 - Testcase timeout rpl.rpl_ddl : MDEV-10417 - Fails on Mips +rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result +rpl.rpl_gtid_basic : MDEV-10681 - server startup problem rpl.rpl_gtid_crash : MDEV-9501 - Warning: failed registering on master rpl.rpl_gtid_master_promote : MDEV-10628 - Timeout in sync_with_master +rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown rpl.rpl_gtid_until : MDEV-10625 - warnings in error log rpl.rpl_ignore_table : Modified on 2016-06-22 @@ -189,10 +269,13 @@ rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips rpl.rpl_mdev6020 : MDEV-10630, MDEV-10417 - Timeouts, fails on Mips rpl.rpl_mdev6386 : MDEV-10631 - Wrong result on slave rpl.rpl_parallel : MDEV-10632, MDEV-10653 - Failures to sync, timeouts +rpl.rpl_parallel_optimistic : MDEV-10511 - timeout rpl.rpl_parallel_temptable : MDEV-10356 - Crash in close_thread_tables rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips rpl.rpl_row_drop_create_temp_table : MDEV-10626 - Testcase timeout +rpl.rpl_row_log_innodb : MDEV-10688 - Wrong result rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x +rpl.rpl_show_slave_hosts : MDEV-10681 - server startup problem rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Wrong plugin status rpl.rpl_slave_grp_exec : MDEV-10514 - Unexpected deadlock rpl.rpl_switch_stm_row_mixed : MDEV-10611 - Wrong usage of mutex @@ -220,26 +303,40 @@ stress.ddl_innodb : MDEV-10635 - Testcase timeout sys_vars.autocommit_func2 : MDEV-9329 - Fails on Ubuntu/s390x sys_vars.general_log_file_basic : Modified on 2016-08-09 (MDEV-10465) +sys_vars.keep_files_on_create_basic : MDEV-10676 - timeout sys_vars.slow_query_log_file_basic : Modified on 2016-08-09 (MDEV-10465) +sys_vars.sysvars_innodb : MDEV-6958 - error-prone rdiffs +sys_vars.sysvars_server_embedded : MDEV-6958 - error-prone rdiffs sys_vars.innodb_buffer_pool_dump_pct_basic : MDEV-10651 - sporadic failure on file_exists +sys_vars.innodb_fatal_semaphore_wait_threshold : MDEV-10513 - crashes +sys_vars.sysvars_wsrep : Modified in 10.1.17 +sys_vars.wsrep_max_ws_size_basic : Modified in 10.1.17 #---------------------------------------------------------------- tokudb.background_job_manager : MDEV-10327 - Assertion failure on server shutdown +tokudb.cluster_filter : MDEV-10678 - Wrong execution plan +tokudb.cluster_filter_hidden : MDEV-10678 - Wrong execution plan tokudb.cluster_filter_unpack_varchar : MDEV-10636 - Wrong execution plan +tokudb.i_s_tokudb_lock_waits_released : Modified in 10.1.17 +tokudb.i_s_tokudb_locks_released : Modified in 10.1.17 tokudb.* : MDEV-9891 - massive crashes on shutdown + tokudb_alter_table.* : MDEV-9891 - massive crashes on shutdown + tokudb_bugs.checkpoint_lock : MDEV-10637 - Wrong processlist output tokudb_bugs.checkpoint_lock_3 : MDEV-10637 - Wrong processlist output tokudb_bugs.* : MDEV-9891 - massive crashes on shutdown + tokudb_parts.* : MDEV-9891 - massive crashes on shutdown + rpl-tokudb.* : MDEV-9891 - massive crashes on shutdown, also modified on 2016-06-10 (Merge) -tokudb/tokudb_add_index.* : MDEV-9891 - massive crashes on shutdown -tokudb/tokudb_backup.* : MDEV-9891 - massive crashes on shutdown -tokudb/tokudb_mariadb.* : MDEV-9891 - massive crashes on shutdown -tokudb/tokudb_sys_vars.* : MDEV-9891 - massive crashes on shutdown -tokudb/tokudb_rpl.* : MDEV-9891 - massive crashes on shutdown +tokudb_add_index.* : MDEV-9891 - massive crashes on shutdown +tokudb_backup.* : MDEV-9891 - massive crashes on shutdown +tokudb_mariadb.* : MDEV-9891 - massive crashes on shutdown +tokudb_sys_vars.* : MDEV-9891 - massive crashes on shutdown +tokudb_rpl.* : MDEV-9891 - massive crashes on shutdown #---------------------------------------------------------------- @@ -252,3 +349,7 @@ vcol.not_supported : MDEV-10639 - Testcase timeout vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout #---------------------------------------------------------------- + +wsrep.* : MDEV-10041 - server crashes sporadically during bootstrap + + -- cgit v1.2.1 From 96e95b5465e2e3e629d532f24ebe166f979dd220 Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 29 Aug 2016 13:10:17 +0300 Subject: Better SHOW PROCESSLIST for replication - When waiting for events, start time is now counted from start of wait - Instead of having "Connect" as "Command" for all replication threads we now have: - Slave_IO for Slave thread reading relay log - Slave_SQL for slave executing SQL commands or distribution queries to Slave workers - Slave_worker for slave threads executin SQL commands in parallel replication --- include/mysql.h.pp | 5 ++++- include/mysql_com.h | 5 ++++- sql/rpl_parallel.cc | 13 ++++++++++++- sql/slave.cc | 2 ++ sql/sql_class.h | 8 +++++++- sql/sql_parse.cc | 6 +++--- 6 files changed, 32 insertions(+), 7 deletions(-) diff --git a/include/mysql.h.pp b/include/mysql.h.pp index 0c06141df6c..b9018376876 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -10,7 +10,10 @@ enum enum_server_command COM_STMT_PREPARE, COM_STMT_EXECUTE, COM_STMT_SEND_LONG_DATA, COM_STMT_CLOSE, COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH, COM_DAEMON, COM_MDB_GAP_BEG, - COM_MDB_GAP_END=253, + COM_MDB_GAP_END=250, + COM_SLAVE_WORKER, + COM_SLAVE_IO, + COM_SLAVE_SQL, COM_MULTI, COM_END }; diff --git a/include/mysql_com.h b/include/mysql_com.h index c65c5de7f66..8e7bf2337cc 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -105,7 +105,10 @@ enum enum_server_command COM_STMT_RESET, COM_SET_OPTION, COM_STMT_FETCH, COM_DAEMON, /* don't forget to update const char *command_name[] in sql_parse.cc */ COM_MDB_GAP_BEG, - COM_MDB_GAP_END=253, + COM_MDB_GAP_END=250, + COM_SLAVE_WORKER, + COM_SLAVE_IO, + COM_SLAVE_SQL, COM_MULTI, /* Must be last */ COM_END diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 18c83608cd3..a389dac1bba 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -307,6 +307,7 @@ do_gco_wait(rpl_group_info *rgi, group_commit_orderer *gco, &stage_waiting_for_prior_transaction_to_start_commit, old_stage); *did_enter_cond= true; + thd->set_time_for_next_stage(); do { if (thd->check_killed() && !rgi->worker_error) @@ -369,6 +370,7 @@ do_ftwrl_wait(rpl_group_info *rgi, thd->ENTER_COND(&entry->COND_parallel_entry, &entry->LOCK_parallel_entry, &stage_waiting_for_ftwrl, old_stage); *did_enter_cond= true; + thd->set_time_for_next_stage(); do { if (entry->force_abort || rgi->worker_error) @@ -417,8 +419,11 @@ pool_mark_busy(rpl_parallel_thread_pool *pool, THD *thd) */ mysql_mutex_lock(&pool->LOCK_rpl_thread_pool); if (thd) + { thd->ENTER_COND(&pool->COND_rpl_thread_pool, &pool->LOCK_rpl_thread_pool, &stage_waiting_for_rpl_thread_pool, &old_stage); + thd->set_time_for_next_stage(); + } while (pool->busy) { if (thd && thd->check_killed()) @@ -534,6 +539,7 @@ rpl_pause_for_ftwrl(THD *thd) e->pause_sub_id= e->largest_started_sub_id; thd->ENTER_COND(&e->COND_parallel_entry, &e->LOCK_parallel_entry, &stage_waiting_for_ftwrl_threads_to_pause, &old_stage); + thd->set_time_for_next_stage(); while (e->pause_sub_id < (uint64)ULONGLONG_MAX && e->last_committed_sub_id < e->pause_sub_id && !err) @@ -995,7 +1001,6 @@ handle_rpl_parallel_thread(void *arg) */ thd->variables.tx_isolation= ISO_REPEATABLE_READ; - mysql_mutex_lock(&rpt->LOCK_rpl_thread); rpt->thd= thd; @@ -1005,8 +1010,10 @@ handle_rpl_parallel_thread(void *arg) rpt->running= true; mysql_cond_signal(&rpt->COND_rpl_thread); + thd->set_command(COM_SLAVE_WORKER); while (!rpt->stop) { + uint wait_count= 0; rpl_parallel_thread::queued_event *qev, *next_qev; thd->ENTER_COND(&rpt->COND_rpl_thread, &rpt->LOCK_rpl_thread, @@ -1025,7 +1032,11 @@ handle_rpl_parallel_thread(void *arg) (rpt->current_owner && !in_event_group) || (rpt->current_owner && group_rgi->parallel_entry->force_abort) || rpt->stop)) + { + if (!wait_count++) + thd->set_time_for_next_stage(); mysql_cond_wait(&rpt->COND_rpl_thread, &rpt->LOCK_rpl_thread); + } rpt->dequeue1(events); thd->EXIT_COND(&old_stage); diff --git a/sql/slave.cc b/sql/slave.cc index 81597212c62..078307274b0 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -4111,6 +4111,7 @@ connected: } DBUG_PRINT("info",("Starting reading binary log from master")); + thd->set_command(COM_SLAVE_IO); while (!io_slave_killed(mi)) { THD_STAGE_INFO(thd, stage_requesting_binlog_dump); @@ -4733,6 +4734,7 @@ pthread_handler_t handle_slave_sql(void *arg) /* Read queries from the IO/THREAD until this thread is killed */ + thd->set_command(COM_SLAVE_SQL); while (!sql_slave_killed(serial_rgi)) { THD_STAGE_INFO(thd, stage_reading_event_from_the_relay_log); diff --git a/sql/sql_class.h b/sql/sql_class.h index b40af4a0937..b8c9614a31f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3193,6 +3193,12 @@ public: } ulonglong current_utime() { return microsecond_interval_timer(); } + /* Tell SHOW PROCESSLIST to show time from this point */ + inline void set_time_for_next_stage() + { + utime_after_query= current_utime(); + } + /** Update server status after execution of a top level statement. Currently only checks if a query was slow, and assigns @@ -3202,7 +3208,7 @@ public: */ void update_server_status() { - utime_after_query= current_utime(); + set_time_for_next_stage(); if (utime_after_query > utime_after_lock + variables.long_query_time) server_status|= SERVER_QUERY_WAS_SLOW; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 5d698486737..c152984876e 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -392,9 +392,9 @@ const LEX_STRING command_name[257]={ { 0, 0 }, //248 { 0, 0 }, //249 { 0, 0 }, //250 - { 0, 0 }, //251 - { 0, 0 }, //252 - { 0, 0 }, //253 + { C_STRING_WITH_LEN("Slave_worker") }, //251 + { C_STRING_WITH_LEN("Slave_IO") }, //252 + { C_STRING_WITH_LEN("Slave_SQL") }, //253 { C_STRING_WITH_LEN("Com_multi") }, //254 { C_STRING_WITH_LEN("Error") } // Last command number 255 }; -- cgit v1.2.1 From e139d971ecbffe00d9a5626fdcd69aa1b9ba0f2c Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 29 Aug 2016 13:11:34 +0300 Subject: Fixed compiler warning and failing test suite because system dependency --- mysql-test/suite/wsrep/t/mdev_10186.test | 1 + storage/maria/ma_extra.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/wsrep/t/mdev_10186.test b/mysql-test/suite/wsrep/t/mdev_10186.test index ec5e9462821..90665d3c97f 100644 --- a/mysql-test/suite/wsrep/t/mdev_10186.test +++ b/mysql-test/suite/wsrep/t/mdev_10186.test @@ -7,6 +7,7 @@ --echo # SELECT @@wsrep_on; +--replace_result /usr/lib64/ /usr/lib/ SELECT @@GLOBAL.wsrep_provider; SET @@GLOBAL.wsrep_cluster_address='gcomm://'; diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index 50ad44f09a9..117a302b418 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -316,10 +316,10 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, /* Fall trough */ case HA_EXTRA_PREPARE_FOR_RENAME: { - DBUG_ASSERT(!share->temporary); my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP); my_bool save_global_changed; enum flush_type type; + DBUG_ASSERT(!share->temporary); /* This share, to have last_version=0, needs to save all its data/index blocks to disk if this is not for a DROP TABLE. Otherwise they would be -- cgit v1.2.1 From 00d84eada2806431fdd10f4dbcffdff450be2ee0 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 22 Jun 2016 11:17:44 +0200 Subject: MDEV-10045: Server crashes in Time_and_counter_tracker::incr_loops Do not set 'optimized' flag until whole optimization procedure is finished. --- sql/sql_select.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 96ac6f43c45..5dc50c92104 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1073,6 +1073,7 @@ JOIN::optimize_inner() DBUG_ENTER("JOIN::optimize"); do_send_rows = (unit->select_limit_cnt) ? 1 : 0; + DEBUG_SYNC(thd, "before_join_optimize"); THD_STAGE_INFO(thd, stage_optimizing); -- cgit v1.2.1 From a02642b66e06f95b80fa9ee592ba50eb61dc2f17 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 23 Jun 2016 17:50:07 +0200 Subject: MDEV-10017: Get unexpected `Empty Set` for correlated subquery with aggregate functions (part 1) Make aggregate function dependency visible. --- mysql-test/r/func_group.result | 18 ++++++++++++++++++ mysql-test/r/subselect3.result | 1 + mysql-test/r/subselect3_jcl6.result | 1 + mysql-test/r/subselect_mat.result | 1 + mysql-test/r/subselect_sj_mat.result | 1 + mysql-test/t/func_group.test | 10 ++++++++++ sql/item_sum.cc | 10 ++++++++++ sql/share/errmsg-utf8.txt | 3 +++ 8 files changed, 45 insertions(+) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 318248459a5..dc3a0b60ad3 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -2379,5 +2379,23 @@ companynr AVG(fld1) avg1 avg2 37 9223372036854775805.0000 9223372036854775805 9223372036854775805 DROP TABLE t1; # +# case where aggregate resolved in the local SELECT +# but outer ones are checked +# +create table t10 (a int , b int, c int); +insert into t10 values (0,0,0),(1,1,1); +create table t11 as select * from t10; +create table t12 as select * from t10; +explain extended select a from t10 where c<3 or a in (select c from t12 union select max(t10.b) from t11 group by t11.c); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t10 ALL NULL NULL NULL NULL 2 100.00 Using where +2 DEPENDENT SUBQUERY t12 ALL NULL NULL NULL NULL 2 100.00 Using where +3 DEPENDENT UNION t11 ALL NULL NULL NULL NULL 2 100.00 Using temporary +NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL +Warnings: +Note 1276 Field or reference 'test.t10.b' of SELECT #3 was resolved in SELECT #1 +Note 1003 select `test`.`t10`.`a` AS `a` from `test`.`t10` where ((`test`.`t10`.`c` < 3) or <`test`.`t10`.`a`,`test`.`t10`.`b`>((`test`.`t10`.`a`,(select `test`.`t12`.`c` from `test`.`t12` where ((`test`.`t10`.`a`) = `test`.`t12`.`c`) union select max(`test`.`t10`.`b`) from `test`.`t11` group by `test`.`t11`.`c` having ((`test`.`t10`.`a`) = (max(`test`.`t10`.`b`))))))) +drop table t10,t11,t12; +# # End of 10.1 tests # diff --git a/mysql-test/r/subselect3.result b/mysql-test/r/subselect3.result index 0c03959a96a..31e0f66f961 100644 --- a/mysql-test/r/subselect3.result +++ b/mysql-test/r/subselect3.result @@ -890,6 +890,7 @@ ERROR 42S22: Unknown column 'c' in 'field list' SHOW WARNINGS; Level Code Message Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #2 +Note 1981 Aggregate function 'count()' of SELECT #3 belongs to SELECT #2 Note 1276 Field or reference 'test.t1.c' of SELECT #3 was resolved in SELECT #2 Error 1054 Unknown column 'c' in 'field list' DROP TABLE t1; diff --git a/mysql-test/r/subselect3_jcl6.result b/mysql-test/r/subselect3_jcl6.result index 415963af882..8d976f8ddf8 100644 --- a/mysql-test/r/subselect3_jcl6.result +++ b/mysql-test/r/subselect3_jcl6.result @@ -900,6 +900,7 @@ ERROR 42S22: Unknown column 'c' in 'field list' SHOW WARNINGS; Level Code Message Note 1276 Field or reference 'test.t1.a' of SELECT #3 was resolved in SELECT #2 +Note 1981 Aggregate function 'count()' of SELECT #3 belongs to SELECT #2 Note 1276 Field or reference 'test.t1.c' of SELECT #3 was resolved in SELECT #2 Error 1054 Unknown column 'c' in 'field list' DROP TABLE t1; diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result index a34b7d28e2e..af3df9ec811 100644 --- a/mysql-test/r/subselect_mat.result +++ b/mysql-test/r/subselect_mat.result @@ -1139,6 +1139,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 4 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.b' of SELECT #3 was resolved in SELECT #1 +Note 1981 Aggregate function 'max()' of SELECT #3 belongs to SELECT #1 Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` group by `test`.`t1`.`a` having <`test`.`t1`.`a`,`test`.`t1`.`b`,max(`test`.`t1`.`b`),max(`test`.`t1`.`b`)>((`test`.`t1`.`a`,(select `test`.`t2`.`c` from `test`.`t2` where ((<`test`.`t2`.`d`,`test`.`t1`.`b`,max(`test`.`t1`.`b`),max(`test`.`t1`.`b`)>((`test`.`t2`.`d`,(select `test`.`t3`.`e` from `test`.`t3` where (max(`test`.`t1`.`b`) = `test`.`t3`.`e`) having ((`test`.`t2`.`d`) >= (`test`.`t3`.`e`)))))) and ((`test`.`t1`.`a`) = `test`.`t2`.`c`))))) select a from t1 group by a having a in (select c from t2 where d >= some(select e from t3 where max(b)=e)); diff --git a/mysql-test/r/subselect_sj_mat.result b/mysql-test/r/subselect_sj_mat.result index 93fdcf1ccca..9aa223b83f2 100644 --- a/mysql-test/r/subselect_sj_mat.result +++ b/mysql-test/r/subselect_sj_mat.result @@ -1173,6 +1173,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 3 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 4 100.00 Using where Warnings: Note 1276 Field or reference 'test.t1.b' of SELECT #3 was resolved in SELECT #1 +Note 1981 Aggregate function 'max()' of SELECT #3 belongs to SELECT #1 Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` group by `test`.`t1`.`a` having <`test`.`t1`.`a`,`test`.`t1`.`b`,max(`test`.`t1`.`b`),max(`test`.`t1`.`b`)>((`test`.`t1`.`a`,(select `test`.`t2`.`c` from `test`.`t2` where ((<`test`.`t2`.`d`,`test`.`t1`.`b`,max(`test`.`t1`.`b`),max(`test`.`t1`.`b`)>((`test`.`t2`.`d`,(select `test`.`t3`.`e` from `test`.`t3` where (max(`test`.`t1`.`b`) = `test`.`t3`.`e`) having ((`test`.`t2`.`d`) >= (`test`.`t3`.`e`)))))) and ((`test`.`t1`.`a`) = `test`.`t2`.`c`))))) select a from t1 group by a having a in (select c from t2 where d >= some(select e from t3 where max(b)=e)); diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 07a99f1b827..69a4dc1fddc 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -1647,6 +1647,16 @@ INSERT INTO t1 VALUES (3,0x7FFFFFFFFFFFFFFC,37); SELECT companynr, AVG(fld1), AVG(fld1)<<0 AS avg1, CAST(AVG(fld1) AS UNSIGNED)<<0 AS avg2 FROM t1 GROUP BY companynr; DROP TABLE t1; +--echo # +--echo # case where aggregate resolved in the local SELECT +--echo # but outer ones are checked +--echo # +create table t10 (a int , b int, c int); +insert into t10 values (0,0,0),(1,1,1); +create table t11 as select * from t10; +create table t12 as select * from t10; +explain extended select a from t10 where c<3 or a in (select c from t12 union select max(t10.b) from t11 group by t11.c); +drop table t10,t11,t12; --echo # --echo # End of 10.1 tests --echo # diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 5220521b95a..540eefcc79a 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -380,6 +380,16 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) sl->master_unit()->item->with_sum_func= 1; } thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); + + if ((thd->lex->describe & DESCRIBE_EXTENDED) && aggr_sel) + { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_WARN_AGGFUNC_DEPENDENCE, + ER_THD(thd, ER_WARN_AGGFUNC_DEPENDENCE), + func_name(), + thd->lex->current_select->select_number, + aggr_sel->select_number); + } return FALSE; } diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 709ac55b28c..a6eaf2b27bd 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7139,3 +7139,6 @@ ER_KILL_QUERY_DENIED_ERROR ER_NO_EIS_FOR_FIELD eng "Engine-independent statistics are not collected for column '%s'" ukr "Ðезалежна від типу таблиці ÑтатиÑтика не збираєтьÑÑ Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%s'" +ER_WARN_AGGFUNC_DEPENDENCE + eng "Aggregate function '%-.192s)' of SELECT #%d belongs to SELECT #%d" + ukr "Ðгрегатна Ñ„ÑƒÐ½ÐºÑ†Ñ–Ñ '%-.192s)' з SELECTу #%d належить до SELECTу #%d" -- cgit v1.2.1 From 1c91569274219ac612fcfdea4764311e2377977c Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 29 Aug 2016 20:28:06 +0300 Subject: Set server_id to 1 by default and disallow to set it to 0 This makes it easier to setup master as on only have to set --log-bin. Before this patch if one did set up the master with just --log-bin, slaves could not connect until server_id was set on the master, which could be both confusing and hard to do. --- mysql-test/r/mysqld--help.result | 4 ++-- mysql-test/r/variables.result | 6 ++++-- mysql-test/suite/sys_vars/r/server_id_basic.result | 18 +++++++++++------- .../suite/sys_vars/r/sysvars_server_notembedded.result | 8 ++++---- mysql-test/suite/sys_vars/t/server_id_basic.test | 2 +- sql/mysqld.cc | 14 -------------- sql/mysqld.h | 2 +- sql/sql_repl.cc | 12 ------------ sql/sql_repl.h | 1 - sql/sys_vars.cc | 3 +-- 10 files changed, 24 insertions(+), 46 deletions(-) diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index b0e89b7f02d..0d52a50eb56 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -1339,7 +1339,7 @@ performance-schema-max-rwlock-instances -1 performance-schema-max-socket-classes 10 performance-schema-max-socket-instances -1 performance-schema-max-stage-classes 150 -performance-schema-max-statement-classes 181 +performance-schema-max-statement-classes 184 performance-schema-max-table-handles -1 performance-schema-max-table-instances -1 performance-schema-max-thread-classes 50 @@ -1384,7 +1384,7 @@ rowid-merge-buff-size 8388608 safe-user-create FALSE secure-auth TRUE secure-file-priv (No default value) -server-id 0 +server-id 1 show-slave-auth-info FALSE silent-startup FALSE skip-grant-tables TRUE diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index d8f8c0c03f7..b78b3caa58d 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -1466,15 +1466,17 @@ SELECT @@GLOBAL.server_id; @@GLOBAL.server_id 4294967295 SET GLOBAL server_id = 0; +Warnings: +Warning 1292 Truncated incorrect server_id value: '0' SELECT @@GLOBAL.server_id; @@GLOBAL.server_id -0 +1 SET GLOBAL server_id = -1; Warnings: Warning 1292 Truncated incorrect server_id value: '-1' SELECT @@GLOBAL.server_id; @@GLOBAL.server_id -0 +1 SET GLOBAL server_id = @old_server_id; # # BUG#10206 - InnoDB: Transaction requiring Max_BinLog_Cache_size > 4GB always rollsback diff --git a/mysql-test/suite/sys_vars/r/server_id_basic.result b/mysql-test/suite/sys_vars/r/server_id_basic.result index 4d82d8f102a..a2b117c1332 100644 --- a/mysql-test/suite/sys_vars/r/server_id_basic.result +++ b/mysql-test/suite/sys_vars/r/server_id_basic.result @@ -8,17 +8,19 @@ SET @@global.server_id = 500000; SET @@global.server_id = DEFAULT; SELECT @@global.server_id; @@global.server_id -0 +1 '#--------------------FN_DYNVARS_144_02-------------------------#' SET @@global.server_id = DEFAULT; -SELECT @@global.server_id = 0; -@@global.server_id = 0 +SELECT @@global.server_id = 1; +@@global.server_id = 1 1 '#--------------------FN_DYNVARS_144_03-------------------------#' SET @@global.server_id = 0; +Warnings: +Warning 1292 Truncated incorrect server_id value: '0' SELECT @@global.server_id; @@global.server_id -0 +1 SET @@global.server_id = 1; SELECT @@global.server_id; @@global.server_id @@ -62,13 +64,13 @@ Warnings: Warning 1292 Truncated incorrect server_id value: '-1' SELECT @@global.server_id; @@global.server_id -0 +1 SET @@global.server_id = -2147483648; Warnings: Warning 1292 Truncated incorrect server_id value: '-2147483648' SELECT @@global.server_id; @@global.server_id -0 +1 SET @@global.server_id = 2147483649*2; Warnings: Warning 1292 Truncated incorrect server_id value: '4294967298' @@ -102,9 +104,11 @@ SELECT @@global.server_id; @@global.server_id 1 SET @@global.server_id = FALSE; +Warnings: +Warning 1292 Truncated incorrect server_id value: '0' SELECT @@global.server_id; @@global.server_id -0 +1 '#---------------------FN_DYNVARS_001_09----------------------#' SET @@global.server_id = 512; SELECT @@server_id = @@global.server_id; diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index 371fd9e3918..4e98bc9f102 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -3007,9 +3007,9 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES SESSION_VALUE NULL -GLOBAL_VALUE 181 +GLOBAL_VALUE 184 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 181 +DEFAULT_VALUE 184 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Maximum number of statement instruments. @@ -3779,11 +3779,11 @@ VARIABLE_NAME SERVER_ID SESSION_VALUE 1 GLOBAL_VALUE 1 GLOBAL_VALUE_ORIGIN CONFIG -DEFAULT_VALUE 0 +DEFAULT_VALUE 1 VARIABLE_SCOPE SESSION VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Uniquely identifies the server instance in the community of replication partners -NUMERIC_MIN_VALUE 0 +NUMERIC_MIN_VALUE 1 NUMERIC_MAX_VALUE 4294967295 NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL diff --git a/mysql-test/suite/sys_vars/t/server_id_basic.test b/mysql-test/suite/sys_vars/t/server_id_basic.test index 6940270c12e..c2dfa4c5f9d 100644 --- a/mysql-test/suite/sys_vars/t/server_id_basic.test +++ b/mysql-test/suite/sys_vars/t/server_id_basic.test @@ -54,7 +54,7 @@ SELECT @@global.server_id; ################################################################### SET @@global.server_id = DEFAULT; -SELECT @@global.server_id = 0; +SELECT @@global.server_id = 1; --echo '#--------------------FN_DYNVARS_144_03-------------------------#' ################################################################################## diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 9b5fcbddd6c..8fa8f01b894 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -400,7 +400,6 @@ bool opt_error_log= IF_WIN(1,0); bool opt_disable_networking=0, opt_skip_show_db=0; bool opt_skip_name_resolve=0; my_bool opt_character_set_client_handshake= 1; -bool server_id_supplied = 0; bool opt_endinfo, using_udf_functions; my_bool locked_in_memory; bool opt_using_transactions; @@ -5831,17 +5830,6 @@ int mysqld_main(int argc, char **argv) if (WSREP_ON && wsrep_check_opts()) global_system_variables.wsrep_on= 0; - if (opt_bin_log && !global_system_variables.server_id) - { - SYSVAR_AUTOSIZE(global_system_variables.server_id, ::server_id= 1); -#ifdef EXTRA_DEBUG - sql_print_warning("You have enabled the binary log, but you haven't set " - "server-id to a non-zero value: we force server id to 1; " - "updates will be logged to the binary log, but " - "connections from slaves will not be accepted."); -#endif - } - /* The subsequent calls may take a long time : e.g. innodb log read. Thus set the long running service control manager timeout @@ -8707,7 +8695,6 @@ static int mysql_init_variables(void) mqh_used= 0; kill_in_progress= 0; cleanup_done= 0; - server_id_supplied= 0; test_flags= select_errors= dropping_tables= ha_open_options=0; thread_count= thread_running= kill_cached_threads= wake_thread= 0; service_thread_count= 0; @@ -9176,7 +9163,6 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument) opt_noacl=opt_bootstrap=1; break; case OPT_SERVER_ID: - server_id_supplied = 1; ::server_id= global_system_variables.server_id; break; case OPT_LOWER_CASE_TABLE_NAMES: diff --git a/sql/mysqld.h b/sql/mysqld.h index 6783eeff810..846a01a9427 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -107,7 +107,7 @@ extern CHARSET_INFO *error_message_charset_info; extern CHARSET_INFO *character_set_filesystem; extern MY_BITMAP temp_pool; -extern bool opt_large_files, server_id_supplied; +extern bool opt_large_files; extern bool opt_update_log, opt_bin_log, opt_error_log; extern my_bool opt_log, opt_bootstrap; extern my_bool opt_backup_history_log; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 36f0cd84cbf..6ece9b793c9 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -2116,12 +2116,6 @@ static int init_binlog_sender(binlog_send_info *info, info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG; return 1; } - if (!server_id_supplied) - { - info->errmsg= "Misconfigured master - server id was not set"; - info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG; - return 1; - } char search_file_name[FN_REFLEN]; const char *name=search_file_name; @@ -3072,12 +3066,6 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) if (init_master_info(mi,master_info_file_tmp,relay_log_info_file_tmp, 0, thread_mask)) slave_errno=ER_MASTER_INFO; - else if (!server_id_supplied) - { - slave_errno= ER_BAD_SLAVE; net_report= 0; - my_message(slave_errno, "Misconfigured slave: server_id was not set; Fix in config file", - MYF(0)); - } else if (!*mi->host) { slave_errno= ER_BAD_SLAVE; net_report= 0; diff --git a/sql/sql_repl.h b/sql/sql_repl.h index e2000bbca73..4105bdddf4e 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -36,7 +36,6 @@ struct slave_connection_state; extern my_bool opt_show_slave_auth_info; extern char *master_host, *master_info_file; -extern bool server_id_supplied; extern int max_binlog_dump_events; extern my_bool opt_sporadic_binlog_dump_fail; diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index db054a635af..4047b5d6781 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -2850,7 +2850,6 @@ static bool fix_server_id(sys_var *self, THD *thd, enum_var_type type) { if (type == OPT_GLOBAL) { - server_id_supplied = 1; thd->variables.server_id= global_system_variables.server_id; /* Historically, server_id was a global variable that is exported to @@ -2867,7 +2866,7 @@ static Sys_var_ulong Sys_server_id( "Uniquely identifies the server instance in the community of " "replication partners", SESSION_VAR(server_id), CMD_LINE(REQUIRED_ARG, OPT_SERVER_ID), - VALID_RANGE(0, UINT_MAX32), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD, + VALID_RANGE(1, UINT_MAX32), DEFAULT(1), BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_has_super), ON_UPDATE(fix_server_id)); static Sys_var_mybool Sys_slave_compressed_protocol( -- cgit v1.2.1 From bbfb5d7c749dc9879dd2d05e94891b8b4be7338f Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 29 Aug 2016 20:33:25 +0300 Subject: Fixed failures for privilege_table_io and wsrep_cluster_address_basic - Sometimes privilege_table_io printed double rows of roles_mapping - Fixed by forcing restart of server when running test - Wsrep_cluster_address_basic failed in some combinations because wsrep_cluster_address was set to NULL - Fixed by ensuring it's never set to NULL, only empty string --- mysql-test/suite/perfschema/t/privilege_table_io.test | 3 +++ sql/wsrep_mysqld.cc | 4 ++-- sql/wsrep_var.cc | 12 ++++++------ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/mysql-test/suite/perfschema/t/privilege_table_io.test b/mysql-test/suite/perfschema/t/privilege_table_io.test index 833fe85e8bb..35c49bf33fb 100644 --- a/mysql-test/suite/perfschema/t/privilege_table_io.test +++ b/mysql-test/suite/perfschema/t/privilege_table_io.test @@ -1,7 +1,10 @@ # Tests for PERFORMANCE_SCHEMA table io +# This test makes a restart because without it, events_waits_history_long +# sometimes contains duplicated entries of roles_mapping. --source include/not_embedded.inc --source include/have_perfschema.inc +--source include/restart_mysqld.inc --source ../include/table_io_setup_helper.inc # Remove "deleted" records from mysql.db that may come from previous tests, diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index fe52ca2c49d..75e15db0b8d 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -805,7 +805,7 @@ void wsrep_init_startup (bool first) (wsrep_on_fun)wsrep_on); /* Skip replication start if no cluster address */ - if (!wsrep_cluster_address || strlen(wsrep_cluster_address) == 0) return; + if (!wsrep_cluster_address || wsrep_cluster_address[0] == 0) return; if (first) wsrep_sst_grab(); // do it so we can wait for SST below @@ -918,7 +918,7 @@ bool wsrep_start_replication() return true; } - if (!wsrep_cluster_address || strlen(wsrep_cluster_address)== 0) + if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0) { // if provider is non-trivial, but no address is specified, wait for address wsrep_ready_set(FALSE); diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index f5e236d3d4e..318b35158fe 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -359,11 +359,11 @@ bool wsrep_cluster_address_check (sys_var *self, THD* thd, set_var* var) (var->save_result.string_value.length > (FN_REFLEN - 1))) // safety goto err; - memcpy(addr_buf, var->save_result.string_value.str, - var->save_result.string_value.length); - addr_buf[var->save_result.string_value.length]= 0; + strmake(addr_buf, var->save_result.string_value.str, + sizeof(addr_buf)-1); - if (!wsrep_cluster_address_verify(addr_buf)) return 0; + if (!wsrep_cluster_address_verify(addr_buf)) + return 0; err: my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name.str, @@ -421,8 +421,8 @@ void wsrep_cluster_address_init (const char* value) (wsrep_cluster_address) ? wsrep_cluster_address : "null", (value) ? value : "null"); - if (wsrep_cluster_address) my_free ((void*)wsrep_cluster_address); - wsrep_cluster_address = (value) ? my_strdup(value, MYF(0)) : NULL; + my_free((void*) wsrep_cluster_address); + wsrep_cluster_address= my_strdup(value ? value : "", MYF(0)); } /* wsrep_cluster_name cannot be NULL or an empty string. */ -- cgit v1.2.1 From 9ac235ab7ddaefb2191a03d3e9cb025d584e3c36 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 29 Aug 2016 22:45:17 -0700 Subject: mdev-9864: cleanup, re-factoring. Added comments. Added reaction for exceeding maximum number of elements in with clause. Added a test case to check this reaction. Added a test case where the specification of a recursive table uses two non-recursive with tables. --- mysql-test/r/cte_nonrecursive.result | 3 + mysql-test/r/cte_recursive.result | 122 ++++++++++++++++++++++++++++------- mysql-test/t/cte_nonrecursive.test | 14 ++++ mysql-test/t/cte_recursive.test | 117 ++++++++++++++++++++++++++++----- sql/share/errmsg-utf8.txt | 2 + sql/sql_class.h | 3 + sql/sql_cte.cc | 32 +++++++++ sql/sql_cte.h | 12 +--- sql/sql_derived.cc | 90 +++++++++++++++++--------- sql/sql_union.cc | 25 ++++++- 10 files changed, 341 insertions(+), 79 deletions(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index 7481f26591f..c9552f662a1 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -797,6 +797,9 @@ select t1.b from t2,t1 where t1.a = t2.c; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t2 ALL NULL NULL NULL NULL 4 1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join) +# too many with elements in with clause +with s65 as (select * from t1), s64 as (select * from t1) , s63 as (select * from t1) , s62 as (select * from t1) , s61 as (select * from t1) , s60 as (select * from t1) , s59 as (select * from t1) , s58 as (select * from t1) , s57 as (select * from t1) , s56 as (select * from t1) , s55 as (select * from t1) , s54 as (select * from t1) , s53 as (select * from t1) , s52 as (select * from t1) , s51 as (select * from t1) , s50 as (select * from t1) , s49 as (select * from t1) , s48 as (select * from t1) , s47 as (select * from t1) , s46 as (select * from t1) , s45 as (select * from t1) , s44 as (select * from t1) , s43 as (select * from t1) , s42 as (select * from t1) , s41 as (select * from t1) , s40 as (select * from t1) , s39 as (select * from t1) , s38 as (select * from t1) , s37 as (select * from t1) , s36 as (select * from t1) , s35 as (select * from t1) , s34 as (select * from t1) , s33 as (select * from t1) , s32 as (select * from t1) , s31 as (select * from t1) , s30 as (select * from t1) , s29 as (select * from t1) , s28 as (select * from t1) , s27 as (select * from t1) , s26 as (select * from t1) , s25 as (select * from t1) , s24 as (select * from t1) , s23 as (select * from t1) , s22 as (select * from t1) , s21 as (select * from t1) , s20 as (select * from t1) , s19 as (select * from t1) , s18 as (select * from t1) , s17 as (select * from t1) , s16 as (select * from t1) , s15 as (select * from t1) , s14 as (select * from t1) , s13 as (select * from t1) , s12 as (select * from t1) , s11 as (select * from t1) , s10 as (select * from t1) , s9 as (select * from t1) , s8 as (select * from t1) , s7 as (select * from t1) , s6 as (select * from t1) , s5 as (select * from t1) , s4 as (select * from t1) , s3 as (select * from t1) , s2 as (select * from t1) , s1 as (select * from t1) select * from s65; +ERROR HY000: Too many WITH elements in WITH clause drop table t1,t2; # # Bug mdev-9937: View used in the specification of with table diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index f6ab5ee5dd7..5769948203e 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -160,6 +160,8 @@ insert into folks values (6, 'Grandgrandma Martha', '1923-05-17', null, null), (67, 'Cousin Eddie', '1992-02-28', 25, 27), (27, 'Auntie Melinda', '1971-03-29', null, null); +# simple recursion with one anchor and one recursive select +# the anchor is the first select in the specification with recursive ancestors as @@ -182,6 +184,8 @@ id name dob father mother 7 Grandma Sally 1943-08-23 NULL 6 8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL +# simple recursion with one anchor and one recursive select +# the anchor is the last select in the specification with recursive ancestors as @@ -204,6 +208,8 @@ id name dob father mother 7 Grandma Sally 1943-08-23 NULL 6 8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL +# simple recursion with one anchor and one recursive select +# the anchor is the first select in the specification with recursive ancestors as @@ -224,29 +230,7 @@ id name dob father mother 7 Grandma Sally 1943-08-23 NULL 6 8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL -with recursive -ancestors -as -( -select * -from folks -where name = 'Me' or name='Sister Amy' - union -select p.* -from folks as p, ancestors as a -where p.id = a.father or p.id = a.mother -) -select * from ancestors; -id name dob father mother -100 Me 2000-01-01 20 30 -98 Sister Amy 2001-06-20 20 30 -20 Dad 1970-02-02 10 9 -30 Mom 1975-03-03 8 7 -10 Grandpa Bill 1940-04-05 NULL NULL -9 Grandma Ann 1941-10-15 NULL NULL -7 Grandma Sally 1943-08-23 NULL 6 -8 Grandpa Ben 1940-10-21 NULL NULL -6 Grandgrandma Martha 1923-05-17 NULL NULL +# two recursive definition, one uses another with recursive prev_gen as @@ -282,6 +266,50 @@ Grandma Ann 1941-10-15 Grandma Sally 1943-08-23 Grandpa Ben 1940-10-21 Grandgrandma Martha 1923-05-17 +# recursive definition with two attached non-recursive +with recursive +ancestors(id,name,dob) +as +( +with +father(child_id,id,name,dob) +as +( +select folks.id, f.id, f.name, f.dob +from folks, folks f +where folks.father=f.id +), +mother(child_id,id,name,dob) +as +( +select folks.id, m.id, m.name, m.dob +from folks, folks m +where folks.mother=m.id +) +select folks.id, folks.name, folks.dob +from folks +where name='Me' + union +select f.id, f.name, f.dob +from ancestors a, father f +where f.child_id=a.id +union +select m.id, m.name, m.dob +from ancestors a, mother m +where m.child_id=a.id +) +select ancestors.name, ancestors.dob from ancestors; +name dob +Me 2000-01-01 +Dad 1970-02-02 +Mom 1975-03-03 +Grandpa Bill 1940-04-05 +Grandpa Ben 1940-10-21 +Grandma Ann 1941-10-15 +Grandma Sally 1943-08-23 +Grandgrandma Martha 1923-05-17 +# simple recursion with one anchor and one recursive select +# the anchor is the first select in the specification with recursive descendants as @@ -300,6 +328,8 @@ id name dob father mother 20 Dad 1970-02-02 10 9 100 Me 2000-01-01 20 30 98 Sister Amy 2001-06-20 20 30 +# simple recursion with one anchor and one recursive select +# the anchor is the first select in the specification with recursive descendants as @@ -320,6 +350,7 @@ id name dob father mother 100 Me 2000-01-01 20 30 98 Sister Amy 2001-06-20 20 30 67 Cousin Eddie 1992-02-28 25 27 +# simple recursive table used three times in the main query with recursive ancestors as @@ -340,6 +371,7 @@ id name dob father mother id name dob father mother 20 Dad 1970-02-02 10 9 30 Mom 1975-03-03 8 7 10 Grandpa Bill 1940-04-05 NULL NULL 9 Grandma Ann 1941-10-15 NULL NULL 8 Grandpa Ben 1940-10-21 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 +# simple recursive table used three times in the main query with ancestor_couples(husband, h_dob, wife, w_dob) as @@ -366,6 +398,7 @@ husband h_dob wife w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +# simple recursion with two selects in recursive part with recursive ancestors as @@ -392,6 +425,7 @@ id name dob father mother 9 Grandma Ann 1941-10-15 NULL NULL 7 Grandma Sally 1943-08-23 NULL 6 6 Grandgrandma Martha 1923-05-17 NULL NULL +# mutual recursion with renaming with recursive ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, w_id, w_name, w_dob, w_father, w_mother) @@ -421,6 +455,7 @@ h_name h_dob w_name w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +# mutual recursion with union all with recursive ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, w_id, w_name, w_dob, w_father, w_mother) @@ -450,6 +485,37 @@ h_name h_dob w_name w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +# mutual recursion with renaming +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, +w_id, w_name, w_dob, w_father, w_mother) +as +( +select h.*, w.* +from folks h, folks w, coupled_ancestors a +where a.father = h.id AND a.mother = w.id +union +select h.*, w.* +from folks v, folks h, folks w +where v.name = 'Me' and +(v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( +select h_id, h_name, h_dob, h_father, h_mother +from ancestor_couples +union +select w_id, w_name, w_dob, w_father, w_mother +from ancestor_couples +) +select h_name, h_dob, w_name, w_dob +from ancestor_couples; +h_name h_dob w_name w_dob +Dad 1970-02-02 Mom 1975-03-03 +Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 +Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +# mutual recursion with union all with recursive ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, w_id, w_name, w_dob, w_father, w_mother) @@ -478,6 +544,7 @@ h_name h_dob w_name w_dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +# mutual recursion with one select in the first definition with recursive ancestor_couple_ids(h_id, w_id) as @@ -507,6 +574,7 @@ h_id w_id 20 30 10 9 8 7 +# join of a mutually recursive table with base tables with recursive ancestor_couple_ids(h_id, w_id) as @@ -537,6 +605,7 @@ name dob name dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +# join of two mutually recursive tables with recursive ancestor_couple_ids(h_id, w_id) as @@ -607,6 +676,7 @@ NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL 2 UNCACHEABLE SUBQUERY ALL NULL NULL NULL NULL 12 100.00 Using where Warnings: Note 1003 with recursive ancestor_couple_ids as (select `a`.`father` AS `h_id`,`a`.`mother` AS `w_id` from `coupled_ancestors` `a` where ((`a`.`father` is not null) and (`a`.`mother` is not null)))coupled_ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where (`test`.`folks`.`name` = 'Me') union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `fa` where (`test`.`p`.`id` = `fa`.`h_id`) union all select `test`.`p`.`id` AS `id`,`test`.`p`.`name` AS `name`,`test`.`p`.`dob` AS `dob`,`test`.`p`.`father` AS `father`,`test`.`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestor_couple_ids` `ma` where (`test`.`p`.`id` = `ma`.`w_id`)), select `h`.`name` AS `name`,`h`.`dob` AS `dob`,`w`.`name` AS `name`,`w`.`dob` AS `dob` from `ancestor_couple_ids` `c` join `coupled_ancestors` `h` join `coupled_ancestors` `w` where ((`h`.`id` = `c`.`h_id`) and (`w`.`id` = `c`.`w_id`)) +# simple mutual recursion with recursive ancestor_couple_ids(h_id, w_id) as @@ -640,6 +710,7 @@ NULL NULL NULL NULL NULL 6 NULL NULL +# join of two mutually recursive tables with recursive ancestor_couple_ids(h_id, w_id) as @@ -669,6 +740,7 @@ name dob name dob Dad 1970-02-02 Mom 1975-03-03 Grandpa Bill 1940-04-05 Grandma Ann 1941-10-15 Grandpa Ben 1940-10-21 Grandma Sally 1943-08-23 +# execution of prepared query using a recursive table prepare stmt1 from " with recursive ancestors @@ -705,6 +777,7 @@ id name dob father mother 8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL deallocate prepare stmt1; +# view using a recursive table create view v1 as with recursive ancestors @@ -786,6 +859,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra NULL UNION RESULT ALL NULL NULL NULL NULL NULL NULL Warnings: Note 1003 with recursive ancestors as (select `test`.`folks`.`id` AS `id`,`test`.`folks`.`name` AS `name`,`test`.`folks`.`dob` AS `dob`,`test`.`folks`.`father` AS `father`,`test`.`folks`.`mother` AS `mother` from `test`.`folks` where ((`test`.`folks`.`name` = 'Me') and (`test`.`folks`.`dob` = DATE'2000-01-01')) union select `p`.`id` AS `id`,`p`.`name` AS `name`,`p`.`dob` AS `dob`,`p`.`father` AS `father`,`p`.`mother` AS `mother` from `test`.`folks` `p` join `ancestors` `a` where ((`a`.`father` = `p`.`id`) or (`a`.`mother` = `p`.`id`)))select `ancestors`.`id` AS `id`,`ancestors`.`name` AS `name`,`ancestors`.`dob` AS `dob`,`ancestors`.`father` AS `father`,`ancestors`.`mother` AS `mother` from `ancestors` +# recursive spec with two anchor selects and two recursive ones with recursive ancestor_ids (id) as @@ -813,6 +887,7 @@ id name dob father mother 7 Grandma Sally 1943-08-23 NULL 6 8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL +# recursive spec using union all with recursive ancestors as @@ -1115,6 +1190,7 @@ generation name 2 Grandma Ann 2 Grandma Sally 2 Grandpa Ben +# query with recursive tables using key access alter table folks add primary key (id); explain with recursive diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index eb6677e7f75..414b1c27687 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -469,6 +469,20 @@ explain with t as (select a, count(*) from t1 where b >= 'c' group by a) select t1.b from t2,t1 where t1.a = t2.c; +--echo # too many with elements in with clause +let $m= 65; +let $i= $m; +dec $i; +let $q= with s$m as (select * from t1); +while ($i) +{ + let $q= $q, s$i as (select * from t1) ; + dec $i; + } +let $q= $q select * from s$m; +--ERROR ER_TOO_MANY_DEFINITIONS_IN_WITH_CLAUSE +eval $q; + drop table t1,t2; --echo # diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index 98fe159e174..d795ea81b23 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -156,7 +156,8 @@ insert into folks values (67, 'Cousin Eddie', '1992-02-28', 25, 27), (27, 'Auntie Melinda', '1971-03-29', null, null); - +--echo # simple recursion with one anchor and one recursive select +--echo # the anchor is the first select in the specification with recursive ancestors as @@ -171,6 +172,8 @@ as ) select * from ancestors; +--echo # simple recursion with one anchor and one recursive select +--echo # the anchor is the last select in the specification with recursive ancestors as @@ -185,6 +188,8 @@ as ) select * from ancestors; +--echo # simple recursion with one anchor and one recursive select +--echo # the anchor is the first select in the specification with recursive ancestors as @@ -199,20 +204,8 @@ as ) select * from ancestors; -with recursive -ancestors -as -( - select * - from folks - where name = 'Me' or name='Sister Amy' - union - select p.* - from folks as p, ancestors as a - where p.id = a.father or p.id = a.mother -) -select * from ancestors; +--echo # two recursive definition, one uses another with recursive prev_gen as @@ -240,7 +233,45 @@ as ) select ancestors.name, ancestors.dob from ancestors; +--echo # recursive definition with two attached non-recursive +with recursive +ancestors(id,name,dob) +as +( + with + father(child_id,id,name,dob) + as + ( + select folks.id, f.id, f.name, f.dob + from folks, folks f + where folks.father=f.id + + ), + mother(child_id,id,name,dob) + as + ( + select folks.id, m.id, m.name, m.dob + from folks, folks m + where folks.mother=m.id + + ) + select folks.id, folks.name, folks.dob + from folks + where name='Me' + union + select f.id, f.name, f.dob + from ancestors a, father f + where f.child_id=a.id + union + select m.id, m.name, m.dob + from ancestors a, mother m + where m.child_id=a.id + +) +select ancestors.name, ancestors.dob from ancestors; +--echo # simple recursion with one anchor and one recursive select +--echo # the anchor is the first select in the specification with recursive descendants as @@ -255,6 +286,8 @@ as ) select * from descendants; +--echo # simple recursion with one anchor and one recursive select +--echo # the anchor is the first select in the specification with recursive descendants as @@ -270,6 +303,7 @@ as select * from descendants; +--echo # simple recursive table used three times in the main query with recursive ancestors as @@ -287,6 +321,8 @@ select * where exists (select * from ancestors a where a.father=t1.id AND a.mother=t2.id); + +--echo # simple recursive table used three times in the main query with ancestor_couples(husband, h_dob, wife, w_dob) as @@ -310,6 +346,8 @@ select t1.name, t1.dob, t2.name, t2.dob ) select * from ancestor_couples; + +--echo # simple recursion with two selects in recursive part with recursive ancestors as @@ -328,6 +366,8 @@ as ) select * from ancestors; + +--echo # mutual recursion with renaming with recursive ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, w_id, w_name, w_dob, w_father, w_mother) @@ -355,6 +395,7 @@ select h_name, h_dob, w_name, w_dob from ancestor_couples; +--echo # mutual recursion with union all with recursive ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, w_id, w_name, w_dob, w_father, w_mother) @@ -382,6 +423,35 @@ select h_name, h_dob, w_name, w_dob from ancestor_couples; +--echo # mutual recursion with renaming +with recursive +ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, + w_id, w_name, w_dob, w_father, w_mother) +as +( + select h.*, w.* + from folks h, folks w, coupled_ancestors a + where a.father = h.id AND a.mother = w.id + union + select h.*, w.* + from folks v, folks h, folks w + where v.name = 'Me' and + (v.father = h.id AND v.mother= w.id) +), +coupled_ancestors (id, name, dob, father, mother) +as +( + select h_id, h_name, h_dob, h_father, h_mother + from ancestor_couples + union + select w_id, w_name, w_dob, w_father, w_mother + from ancestor_couples +) +select h_name, h_dob, w_name, w_dob + from ancestor_couples; + + +--echo # mutual recursion with union all with recursive ancestor_couples(h_id, h_name, h_dob, h_father, h_mother, w_id, w_name, w_dob, w_father, w_mother) @@ -407,7 +477,7 @@ as select h_name, h_dob, w_name, w_dob from ancestor_couples; - +--echo # mutual recursion with one select in the first definition with recursive ancestor_couple_ids(h_id, w_id) as @@ -434,6 +504,8 @@ as select * from ancestor_couple_ids; + +--echo # join of a mutually recursive table with base tables with recursive ancestor_couple_ids(h_id, w_id) as @@ -462,6 +534,7 @@ select h.name, h.dob, w.name, w.dob where c.h_id = h.id and c.w_id= w.id; +--echo # join of two mutually recursive tables with recursive ancestor_couple_ids(h_id, w_id) as @@ -517,6 +590,8 @@ select h.name, h.dob, w.name, w.dob from ancestor_couple_ids c, coupled_ancestors h, coupled_ancestors w where c.h_id = h.id and c.w_id= w.id; + +--echo # simple mutual recursion with recursive ancestor_couple_ids(h_id, w_id) as @@ -543,6 +618,7 @@ select * from ancestor_couple_ids; +--echo # join of two mutually recursive tables with recursive ancestor_couple_ids(h_id, w_id) as @@ -570,6 +646,7 @@ select h.name, h.dob, w.name, w.dob where c.h_id = h.id and c.w_id= w.id; +--echo # execution of prepared query using a recursive table prepare stmt1 from " with recursive ancestors @@ -592,6 +669,7 @@ execute stmt1; deallocate prepare stmt1; +--echo # view using a recursive table create view v1 as with recursive ancestors @@ -636,6 +714,7 @@ select * from v2; drop view v1,v2; + explain extended with recursive ancestors @@ -652,6 +731,7 @@ as select * from ancestors; +--echo # recursive spec with two anchor selects and two recursive ones with recursive ancestor_ids (id) as @@ -672,6 +752,8 @@ as ) select * from ancestors; + +--echo # recursive spec using union all with recursive ancestors as @@ -691,8 +773,6 @@ as select * from ancestors; - - --ERROR ER_NOT_STANDARDS_COMPLIANT_RECURSIVE with recursive ancestor_ids (id, generation) @@ -937,6 +1017,8 @@ as ) select * from ancestors; +--echo # query with recursive tables using key access + alter table folks add primary key (id); explain @@ -958,6 +1040,7 @@ as ) select * from ancestors; + with recursive ancestors as diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index e1db12d2544..1dce05af943 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7150,6 +7150,8 @@ ER_BAD_COMMAND_IN_MULTI ukr "Команда '%s' не дозволена Ð´Ð»Ñ COM_MULTI" ER_WITH_COL_WRONG_LIST eng "WITH column list and SELECT field list have different column counts" +ER_TOO_MANY_DEFINITIONS_IN_WITH_CLAUSE + eng "Too many WITH elements in WITH clause" ER_DUP_QUERY_NAME eng "Duplicate query name in WITH clause" ER_RECURSIVE_WITHOUT_ANCHORS diff --git a/sql/sql_class.h b/sql/sql_class.h index be263a6b902..99a5403ff04 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -4681,8 +4681,11 @@ public: class select_union_recursive :public select_union { public: + /* The temporary table with the new records generated by one iterative step */ TABLE *incr_table; + /* One of tables from the list rec_tables (determined dynamically) */ TABLE *first_rec_table_to_update; + /* The temporary tables used for recursive table references */ List
rec_tables; select_union_recursive(THD *thd_arg): diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc index 82958333f65..fa18de0f49f 100644 --- a/sql/sql_cte.cc +++ b/sql/sql_cte.cc @@ -6,6 +6,38 @@ #include "sql_select.h" +/** + @brief + Add a new element to this with clause + + @param elem The with element to add to this with clause + + @details + The method adds the with element 'elem' to the elements + in this with clause. The method reports an error if + the number of the added element exceeds the value + of the constant max_number_of_elements_in_with_clause. + + @retval + true if an error is reported + false otherwise +*/ + +bool With_clause::add_with_element(With_element *elem) +{ + if (with_list.elements == max_number_of_elements_in_with_clause) + { + my_error(ER_TOO_MANY_DEFINITIONS_IN_WITH_CLAUSE, MYF(0)); + return true; + } + elem->owner= this; + elem->number= with_list.elements; + elem->spec->with_element= elem; + with_list.link_in_list(elem, &elem->next); + return false; +} + + /** @brief Check dependencies between tables defined in a list of with clauses diff --git a/sql/sql_cte.h b/sql/sql_cte.h index 20164174214..94fad561b0d 100644 --- a/sql/sql_cte.h +++ b/sql/sql_cte.h @@ -210,6 +210,8 @@ public: friend class With_clause; }; +const uint max_number_of_elements_in_with_clause= sizeof(table_map)*8; + /** @class With_clause @brief Set of with_elements @@ -270,15 +272,7 @@ public: with_recursive(recursive_fl) { } - /* Add a new element to the current with clause */ - bool add_with_element(With_element *elem) - { - elem->owner= this; - elem->number= with_list.elements; - elem->spec->with_element= elem; - with_list.link_in_list(elem, &elem->next); - return false; - } + bool add_with_element(With_element *elem); /* Add this with clause to the list of with clauses used in the statement */ void add_to_list(With_clause ** &last_next) diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 33befdd4639..6297cc6eb84 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -644,16 +644,21 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) if (unit->prepared && derived->is_recursive_with_table() && !derived->table) { + /* + Here 'derived' is either a non-recursive table reference to a recursive + with table or a recursive table reference to a recursvive table whose + specification has been already prepared (a secondary recursive table + reference. + */ if (!(derived->derived_result= new (thd->mem_root) select_union(thd))) DBUG_RETURN(TRUE); // out of memory thd->create_tmp_table_for_derived= TRUE; - if (!derived->table) - res= derived->derived_result->create_result_table( - thd, &unit->types, FALSE, - (first_select->options | - thd->variables.option_bits | - TMP_TABLE_ALL_COLUMNS), - derived->alias, FALSE, FALSE); + res= derived->derived_result->create_result_table( + thd, &unit->types, FALSE, + (first_select->options | + thd->variables.option_bits | + TMP_TABLE_ALL_COLUMNS), + derived->alias, FALSE, FALSE); thd->create_tmp_table_for_derived= FALSE; if (!res && !derived->table) @@ -662,6 +667,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) derived->table= derived->derived_result->table; if (derived->is_with_table_recursive_reference()) { + /* Here 'derived" is a secondary recursive table reference */ unit->with_element->rec_result->rec_tables.push_back(derived->table); } } @@ -685,7 +691,12 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) (!derived->with->with_anchor && !derived->with->is_with_prepared_anchor())) { - // Prepare underlying views/DT first. + /* + Prepare underlying views/DT first unless 'derived' is a recursive + table reference and either the anchors from the specification of + 'derived' has been already prepared or there no anchor in this + specification + */ if ((res= sl->handle_derived(lex, DT_PREPARE))) goto exit; } @@ -922,30 +933,41 @@ bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived) } +/** + @brief + Fill the recursive with table + + @param thd The thread handle + + @details + The method is called only for recursive with tables. + The method executes the recursive part of the specification + of this with table until no more rows are added to the table + or the number of the performed iteration reaches the allowed + maximum. + + @retval + false on success + true on failure +*/ + bool TABLE_LIST::fill_recursive(THD *thd) { bool rc= false; st_select_lex_unit *unit= get_unit(); - if (is_with_table_recursive_reference()) + rc= with->instantiate_tmp_tables(); + while (!rc && !with->all_are_stabilized()) { + if (with->level > thd->variables.max_recursive_iterations) + break; + with->prepare_for_next_iteration(); rc= unit->exec_recursive(); } - else + if (!rc) { - rc= with->instantiate_tmp_tables(); - while (!rc && !with->all_are_stabilized()) - { - if (with->level > thd->variables.max_recursive_iterations) - break; - with->prepare_for_next_iteration(); - rc= unit->exec_recursive(); - } - if (!rc) - { - TABLE *src= with->rec_result->table; - rc =src->insert_all_rows_into(thd, table, true); - } - } + TABLE *src= with->rec_result->table; + rc =src->insert_all_rows_into(thd, table, true); + } return rc; } @@ -960,9 +982,10 @@ bool TABLE_LIST::fill_recursive(THD *thd) @details Execute subquery of given 'derived' table/view and fill the result - table. After result table is filled, if this is not the EXPLAIN statement, - the entire unit / node is deleted. unit is deleted if UNION is used - for derived table and node is deleted is it is a simple SELECT. + table. After result table is filled, if this is not the EXPLAIN statement + and the table is not specified with a recursion the entire unit / node + is deleted. unit is deleted if UNION is used for derived table and node + is deleted is it is a simple SELECT. 'lex' is unused and 'thd' is passed as an argument to an underlying function. @note @@ -986,13 +1009,21 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) DBUG_RETURN(FALSE); /*check that table creation passed without problems. */ DBUG_ASSERT(derived->table && derived->table->is_created()); - SELECT_LEX *first_select= unit->first_select(); select_union *derived_result= derived->derived_result; SELECT_LEX *save_current_select= lex->current_select; if (derived_is_recursive) { - res= derived->fill_recursive(thd); + if (derived->is_with_table_recursive_reference()) + { + /* Here only one iteration step is performed */ + res= unit->exec_recursive(); + } + else + { + /* In this case all iteration are performed */ + res= derived->fill_recursive(thd); + } } else if (unit->is_union()) { @@ -1001,6 +1032,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) } else { + SELECT_LEX *first_select= unit->first_select(); unit->set_limit(unit->global_parameters()); if (unit->select_limit_cnt == HA_POS_ERROR) first_select->options&= ~OPTION_FOUND_ROWS; diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 185d79ec77a..40f01a4db38 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -100,6 +100,7 @@ int select_union::send_data(List &values) return 0; } + int select_union_recursive::send_data(List &values) { int rc= select_union::send_data(values); @@ -1166,7 +1167,29 @@ err: } -// One step of recursive execution +/** + @brief + Execute the union of the specification of a recursive with table + + @details + The method is performed only for the units that are specifications + if recursive with table T. If the specification contains an anchor + part then the first call of this method executes only this part + while the following calls execute the recursive part. If there are + no anchors each call executes the whole unit. + Before the excution the method cleans up the temporary table + to where the new rows of the recursive table are sent. + After the execution the unit these rows are copied to the + temporary tables created for recursive references of T. + If the specification if T is restricted (standards compliant) + then these temporary tables are cleaned up before new rows + are copied into them. + + @retval + false on success + true on failure +*/ + bool st_select_lex_unit::exec_recursive() { st_select_lex *lex_select_save= thd->lex->current_select; -- cgit v1.2.1 From 501fc1a9e292080aaa5c82e950839082a9672bc9 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 29 Aug 2016 22:58:01 -0700 Subject: Returned the test case that was removed by mistake. --- mysql-test/r/cte_recursive.result | 24 ++++++++++++++++++++++++ mysql-test/t/cte_recursive.test | 14 ++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/mysql-test/r/cte_recursive.result b/mysql-test/r/cte_recursive.result index 5769948203e..cb1c2439a13 100644 --- a/mysql-test/r/cte_recursive.result +++ b/mysql-test/r/cte_recursive.result @@ -230,6 +230,30 @@ id name dob father mother 7 Grandma Sally 1943-08-23 NULL 6 8 Grandpa Ben 1940-10-21 NULL NULL 6 Grandgrandma Martha 1923-05-17 NULL NULL +# simple recursion with or in anchor and or in recursive part +with recursive +ancestors +as +( +select * +from folks +where name = 'Me' or name='Sister Amy' + union +select p.* +from folks as p, ancestors as a +where p.id = a.father or p.id = a.mother +) +select * from ancestors; +id name dob father mother +100 Me 2000-01-01 20 30 +98 Sister Amy 2001-06-20 20 30 +20 Dad 1970-02-02 10 9 +30 Mom 1975-03-03 8 7 +10 Grandpa Bill 1940-04-05 NULL NULL +9 Grandma Ann 1941-10-15 NULL NULL +7 Grandma Sally 1943-08-23 NULL 6 +8 Grandpa Ben 1940-10-21 NULL NULL +6 Grandgrandma Martha 1923-05-17 NULL NULL # two recursive definition, one uses another with recursive prev_gen diff --git a/mysql-test/t/cte_recursive.test b/mysql-test/t/cte_recursive.test index d795ea81b23..4ca931f326c 100644 --- a/mysql-test/t/cte_recursive.test +++ b/mysql-test/t/cte_recursive.test @@ -204,6 +204,20 @@ as ) select * from ancestors; +--echo # simple recursion with or in anchor and or in recursive part +with recursive +ancestors +as +( + select * + from folks + where name = 'Me' or name='Sister Amy' + union + select p.* + from folks as p, ancestors as a + where p.id = a.father or p.id = a.mother +) +select * from ancestors; --echo # two recursive definition, one uses another with recursive -- cgit v1.2.1 From 4ca7b226c05ae883accec64b1afe6d60daf1b323 Mon Sep 17 00:00:00 2001 From: Monty Date: Tue, 30 Aug 2016 09:16:50 +0300 Subject: Safety fix to previous patch --- sql/wsrep_var.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 318b35158fe..f48f08852dc 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -356,11 +356,11 @@ bool wsrep_cluster_address_check (sys_var *self, THD* thd, set_var* var) char addr_buf[FN_REFLEN]; if ((! var->save_result.string_value.str) || - (var->save_result.string_value.length > (FN_REFLEN - 1))) // safety + (var->save_result.string_value.length >= sizeof(addr_buf))) // safety goto err; strmake(addr_buf, var->save_result.string_value.str, - sizeof(addr_buf)-1); + MY_MIN(sizeof(addr_buf)-1, var->save_result.string_value.length)); if (!wsrep_cluster_address_verify(addr_buf)) return 0; -- cgit v1.2.1 From 64fe3894dd031b37ea459682f34b172961dffe38 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Tue, 30 Aug 2016 10:32:37 -0400 Subject: bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index eda3118b090..d921401f495 100644 --- a/VERSION +++ b/VERSION @@ -1,3 +1,3 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=1 -MYSQL_VERSION_PATCH=17 +MYSQL_VERSION_PATCH=18 -- cgit v1.2.1 From cb1e44219b9beae92e104cfe5e67aab3073dd8aa Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Tue, 30 Aug 2016 11:13:25 -0700 Subject: Adjusted test results. --- .../suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff index cd3e1df9e50..07bb14eb75f 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff @@ -449,6 +449,15 @@ VARIABLE_COMMENT Maximum number of prepared statements in the server NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 1048576 +@@ -2171,7 +2171,7 @@ + GLOBAL_VALUE_ORIGIN COMPILE-TIME + DEFAULT_VALUE 4294967295 + VARIABLE_SCOPE SESSION +-VARIABLE_TYPE BIGINT UNSIGNED ++VARIABLE_TYPE INT UNSIGNED + VARIABLE_COMMENT Maximum number of iterations when executing recursive queries + NUMERIC_MIN_VALUE 0 + NUMERIC_MAX_VALUE 4294967295 @@ -2185,7 +2185,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE 4294967295 -- cgit v1.2.1 From 468a6ad722778768eb4ee5003dd818945b363261 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Wed, 31 Aug 2016 11:48:51 +0400 Subject: Fixed package build failure README -> README.md --- debian/mariadb-client-10.2.docs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/mariadb-client-10.2.docs b/debian/mariadb-client-10.2.docs index 21446855f51..8117d689011 100644 --- a/debian/mariadb-client-10.2.docs +++ b/debian/mariadb-client-10.2.docs @@ -1,2 +1,2 @@ debian/additions/innotop/changelog.innotop -README +README.md -- cgit v1.2.1 From e7608a78ef45cc46f4e4d5abbda788ad54e80e71 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Fri, 15 Apr 2016 20:40:25 +0200 Subject: MDEV-8931: (server part of) session state tracking initial commit to test --- include/mysql.h.pp | 9 + include/mysql_com.h | 53 ++- libmysqld/CMakeLists.txt | 1 + libmysqld/lib_sql.cc | 3 +- mysql-test/r/mysqld--help.result | 7 + .../sys_vars/r/sysvars_server_embedded.result | 28 ++ .../sys_vars/r/sysvars_server_notembedded.result | 28 ++ sql-common/pack.c | 22 ++ sql/CMakeLists.txt | 4 +- sql/net_serv.cc | 1 - sql/protocol.cc | 138 ++++++-- sql/session_tracker.cc | 358 +++++++++++++++++++++ sql/session_tracker.h | 159 +++++++++ sql/set_var.cc | 32 +- sql/set_var.h | 2 +- sql/share/errmsg-utf8.txt | 5 +- sql/sp_head.cc | 10 + sql/sql_class.cc | 3 + sql/sql_class.h | 4 + sql/sql_db.cc | 14 +- sql/sql_parse.cc | 14 + sql/sql_plugin.cc | 2 +- sql/sql_plugin.h | 2 + sql/sql_prepare.cc | 6 + sql/sql_string.cc | 14 + sql/sql_string.h | 18 ++ sql/sql_table.cc | 4 + sql/sys_vars.cc | 33 ++ 28 files changed, 928 insertions(+), 46 deletions(-) create mode 100644 sql/session_tracker.cc create mode 100644 sql/session_tracker.h diff --git a/include/mysql.h.pp b/include/mysql.h.pp index b9018376876..57ce4f78d99 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -87,6 +87,15 @@ enum enum_mysql_set_option MYSQL_OPTION_MULTI_STATEMENTS_ON, MYSQL_OPTION_MULTI_STATEMENTS_OFF }; +enum enum_session_state_type +{ + SESSION_TRACK_SYSTEM_VARIABLES, + SESSION_TRACK_SCHEMA, + SESSION_TRACK_STATE_CHANGE, + SESSION_TRACK_GTIDS, + SESSION_TRACK_TRANSACTION_CHARACTERISTICS, + SESSION_TRACK_TRANSACTION_STATE +}; my_bool my_net_init(NET *net, Vio* vio, void *thd, unsigned int my_flags); void my_net_local_init(NET *net); void net_end(NET *net); diff --git a/include/mysql_com.h b/include/mysql_com.h index 8e7bf2337cc..9eb0e4f2d74 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -72,6 +72,14 @@ #define INDEX_COMMENT_MAXLEN 1024 #define TABLE_PARTITION_COMMENT_MAXLEN 1024 +/* + Maximum length of protocol packet. + OK packet length limit also restricted to this value as any length greater + than this value will have first byte of OK packet to be 254 thus does not + provide a means to identify if this is OK or EOF packet. +*/ +#define MAX_PACKET_LENGTH (256L*256L*256L-1) + /* USER_HOST_BUFF_SIZE -- length of string buffer, that is enough to contain username and hostname parts of the user identifier with trailing zero in @@ -221,6 +229,14 @@ enum enum_server_command /* Don't close the connection for a connection with expired password. */ #define CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS (1UL << 22) +/** + Capable of handling server state change information. Its a hint to the + server to include the state change information in Ok packet. +*/ +#define CLIENT_SESSION_TRACK (1UL << 23) +/* Client no longer needs EOF packet */ +#define CLIENT_DEPRECATE_EOF (1UL << 24) + #define CLIENT_PROGRESS_OBSOLETE (1UL << 29) #define CLIENT_SSL_VERIFY_SERVER_CERT (1UL << 30) /* @@ -272,10 +288,12 @@ enum enum_server_command CLIENT_MULTI_RESULTS | \ CLIENT_PS_MULTI_RESULTS | \ CLIENT_SSL_VERIFY_SERVER_CERT | \ - CLIENT_REMEMBER_OPTIONS | \ + CLIENT_REMEMBER_OPTIONS | \ MARIADB_CLIENT_PROGRESS | \ CLIENT_PLUGIN_AUTH | \ CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA | \ + CLIENT_SESSION_TRACK |\ + CLIENT_DEPRECATE_EOF |\ CLIENT_CONNECT_ATTRS |\ MARIADB_CLIENT_COM_MULTI) @@ -340,6 +358,11 @@ enum enum_server_command */ #define SERVER_STATUS_IN_TRANS_READONLY 8192 +/** + This status flag, when on, implies that one of the state information has + changed on the server because of the execution of the last statement. +*/ +#define SERVER_SESSION_STATE_CHANGED (1UL << 14) /** Server status flags that must be cleared when starting @@ -356,7 +379,8 @@ enum enum_server_command SERVER_QUERY_WAS_SLOW |\ SERVER_STATUS_DB_DROPPED |\ SERVER_STATUS_CURSOR_EXISTS|\ - SERVER_STATUS_LAST_ROW_SENT) + SERVER_STATUS_LAST_ROW_SENT|\ + SERVER_SESSION_STATE_CHANGED) #define MYSQL_ERRMSG_SIZE 512 #define NET_READ_TIMEOUT 30 /* Timeout on read */ @@ -523,6 +547,30 @@ enum enum_mysql_set_option MYSQL_OPTION_MULTI_STATEMENTS_OFF }; +/* + Type of state change information that the server can include in the Ok + packet. + Note : 1) session_state_type shouldn't go past 255 (i.e. 1-byte boundary). + 2) Modify the definition of SESSION_TRACK_END when a new member is + added. +*/ +enum enum_session_state_type +{ + SESSION_TRACK_SYSTEM_VARIABLES, /* Session system variables */ + SESSION_TRACK_SCHEMA, /* Current schema */ + SESSION_TRACK_STATE_CHANGE, /* track session state changes */ + SESSION_TRACK_GTIDS, + SESSION_TRACK_TRANSACTION_CHARACTERISTICS, /* Transaction chistics */ + SESSION_TRACK_TRANSACTION_STATE /* Transaction state */ +}; + +#define SESSION_TRACK_BEGIN SESSION_TRACK_SYSTEM_VARIABLES + +#define SESSION_TRACK_END SESSION_TRACK_TRANSACTION_STATE + +#define IS_SESSION_STATE_TYPE(T) \ + (((int)(T) >= SESSION_TRACK_BEGIN) && ((T) <= SESSION_TRACK_END)) + #define net_new_transaction(net) ((net)->pkt_nr=0) #ifdef __cplusplus @@ -641,6 +689,7 @@ my_ulonglong net_field_length_ll(uchar **packet); my_ulonglong safe_net_field_length_ll(uchar **packet, size_t packet_len); uchar *net_store_length(uchar *pkg, ulonglong length); uchar *safe_net_store_length(uchar *pkg, size_t pkg_len, ulonglong length); +unsigned int net_length_size(ulonglong num); #endif #ifdef __cplusplus diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index 368f30f8317..4518329a3dd 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -112,6 +112,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc ../sql/item_windowfunc.cc ../sql/sql_window.cc ../sql/sql_cte.cc ../sql/temporary_tables.cc + ../sql/session_tracker.cc ${GEN_SOURCES} ${MYSYS_LIBWRAP_SOURCE} ) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 4a84e63f222..8c01fc8b9b6 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -1172,7 +1172,8 @@ bool net_send_ok(THD *thd, uint server_status, uint statement_warn_count, ulonglong affected_rows, ulonglong id, const char *message, - bool unused __attribute__((unused))) + bool unused1 __attribute__((unused)), + bool unused2 __attribute__((unused))) { DBUG_ENTER("emb_net_send_ok"); MYSQL_DATA *data; diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 0d52a50eb56..8e82635ec91 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -903,6 +903,11 @@ The following options may be given as the first argument: files within specified directory --server-id=# Uniquely identifies the server instance in the community of replication partners + --session-track-schema + Track changes to the 'default schema'. + (Defaults to on; use --skip-session-track-schema to disable.) + --session-track-state-change + Track changes to the 'session state'. --show-slave-auth-info Show user and password in SHOW SLAVE HOSTS on this master. @@ -1385,6 +1390,8 @@ safe-user-create FALSE secure-auth TRUE secure-file-priv (No default value) server-id 1 +session-track-schema TRUE +session-track-state-change FALSE show-slave-auth-info FALSE silent-startup FALSE skip-grant-tables TRUE diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index eecebce80ad..13733c38a94 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -3327,6 +3327,34 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME SESSION_TRACK_SCHEMA +SESSION_VALUE ON +GLOBAL_VALUE ON +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE ON +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Track changes to the 'default schema'. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME SESSION_TRACK_STATE_CHANGE +SESSION_VALUE OFF +GLOBAL_VALUE OFF +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE OFF +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Track changes to the 'session state'. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME SKIP_EXTERNAL_LOCKING SESSION_VALUE NULL GLOBAL_VALUE ON diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index 4e98bc9f102..e7f953e5f4c 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -3789,6 +3789,34 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME SESSION_TRACK_SCHEMA +SESSION_VALUE ON +GLOBAL_VALUE ON +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE ON +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Track changes to the 'default schema'. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME SESSION_TRACK_STATE_CHANGE +SESSION_VALUE OFF +GLOBAL_VALUE OFF +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE OFF +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Track changes to the 'session state'. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME SKIP_EXTERNAL_LOCKING SESSION_VALUE NULL GLOBAL_VALUE ON diff --git a/sql-common/pack.c b/sql-common/pack.c index 4bb4a0b7a4e..5428feb623e 100644 --- a/sql-common/pack.c +++ b/sql-common/pack.c @@ -186,3 +186,25 @@ uchar *safe_net_store_length(uchar *packet, size_t packet_len, ulonglong length) return packet+8; } + +/** + The length of space required to store the resulting length-encoded integer + for the given number. This function can be used at places where one needs to + dynamically allocate the buffer for a given number to be stored as length- + encoded integer. + + @param num [IN] the input number + + @return length of buffer needed to store this number [1, 3, 4, 9]. +*/ + +uint net_length_size(ulonglong num) +{ + if (num < (ulonglong) 251LL) + return 1; + if (num < (ulonglong) 65536LL) + return 3; + if (num < (ulonglong) 16777216LL) + return 4; + return 9; +} diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 089d793b2b0..a18294e5ae3 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -95,7 +95,9 @@ SET (SQL_SOURCE ../sql-common/client_plugin.c opt_range.cc opt_range.h opt_sum.cc ../sql-common/pack.c parse_file.cc password.c procedure.cc - protocol.cc records.cc repl_failsafe.cc rpl_filter.cc set_var.cc + protocol.cc records.cc repl_failsafe.cc rpl_filter.cc + session_tracker.cc + set_var.cc slave.cc sp.cc sp_cache.cc sp_head.cc sp_pcontext.cc sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc sql_cache.cc sql_class.cc sql_client.cc sql_crypt.cc sql_crypt.h diff --git a/sql/net_serv.cc b/sql/net_serv.cc index f0284462206..dc97d5e8e54 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -117,7 +117,6 @@ extern my_bool thd_net_is_killed(); #endif #define TEST_BLOCKING 8 -#define MAX_PACKET_LENGTH (256L*256L*256L-1) static my_bool net_write_buff(NET *, const uchar *, ulong); diff --git a/sql/protocol.cc b/sql/protocol.cc index 608ec553da0..e12c72dd988 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -35,7 +35,8 @@ static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024; /* Declared non-static only because of the embedded library. */ bool net_send_error_packet(THD *, uint, const char *, const char *); /* Declared non-static only because of the embedded library. */ -bool net_send_ok(THD *, uint, uint, ulonglong, ulonglong, const char *, bool); +bool net_send_ok(THD *, uint, uint, ulonglong, ulonglong, const char *, + bool, bool); /* Declared non-static only because of the embedded library. */ bool net_send_eof(THD *thd, uint server_status, uint statement_warn_count); #ifndef EMBEDDED_LIBRARY @@ -197,7 +198,8 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err, @param affected_rows Number of rows changed by statement @param id Auto_increment id for first row (if used) @param message Message to send to the client (Used by mysql_status) - + @param is_eof this called inted of old EOF packet + @return @retval FALSE The message was successfully sent @retval TRUE An error occurred and the messages wasn't sent properly @@ -209,10 +211,18 @@ bool net_send_ok(THD *thd, uint server_status, uint statement_warn_count, ulonglong affected_rows, ulonglong id, const char *message, + bool is_eof, bool skip_flush) { NET *net= &thd->net; - uchar buff[MYSQL_ERRMSG_SIZE+10],*pos; + StringBuffer store; + + /* + To be used to manage the data storage in case session state change + information is present. + */ + bool state_changed= false; + bool error= FALSE; DBUG_ENTER("net_send_ok"); @@ -222,38 +232,82 @@ net_send_ok(THD *thd, DBUG_RETURN(FALSE); } - buff[0]=0; // No fields - pos=net_store_length(buff+1,affected_rows); - pos=net_store_length(pos, id); + /* + OK send instead of EOF still require 0xFE header, but OK packet content. + */ + if (is_eof) + { + DBUG_ASSERT(thd->client_capabilities & CLIENT_DEPRECATE_EOF); + store.q_append((char)254); + } + else + store.q_append('\0'); + + /* affected rows */ + store.q_net_store_length(affected_rows); + + /* last insert id */ + store.q_net_store_length(id); + if (thd->client_capabilities & CLIENT_PROTOCOL_41) { DBUG_PRINT("info", ("affected_rows: %lu id: %lu status: %u warning_count: %u", - (ulong) affected_rows, + (ulong) affected_rows, (ulong) id, (uint) (server_status & 0xffff), (uint) statement_warn_count)); - int2store(pos, server_status); - pos+=2; + store.q_append2b(server_status); /* We can only return up to 65535 warnings in two bytes */ uint tmp= MY_MIN(statement_warn_count, 65535); - int2store(pos, tmp); - pos+= 2; + store.q_append2b(tmp); } else if (net->return_status) // For 4.0 protocol { - int2store(pos, server_status); - pos+=2; + store.q_append2b(server_status); } thd->get_stmt_da()->set_overwrite_status(true); - if (message && message[0]) - pos= net_store_data(pos, (uchar*) message, strlen(message)); - error= my_net_write(net, buff, (size_t) (pos-buff)); - if (!error && !skip_flush) + if ((thd->client_capabilities & CLIENT_SESSION_TRACK)) + { + if (server_status & SERVER_SESSION_STATE_CHANGED) + state_changed= true; + /* the info field */ + if (state_changed || (message && message[0])) + { + DBUG_ASSERT(strlen(message) <= MYSQL_ERRMSG_SIZE); + store.q_net_store_data((uchar*) message, message ? strlen(message) : 0); + } + + /* session state change information */ + if (unlikely(state_changed)) + { + store.set_charset(thd->variables.collation_database); + + thd->session_tracker.store(thd, &store); + } + } + else if (message && message[0]) + { + /* the info field, if there is a message to store */ + DBUG_ASSERT(strlen(message) <= MYSQL_ERRMSG_SIZE); + store.q_net_store_data((uchar*) message, strlen(message)); + } + + if (store.length() > MAX_PACKET_LENGTH) + { + net->error= 1; + net->last_errno= ER_NET_OK_PACKET_TOO_LARGE; + my_error(ER_NET_OK_PACKET_TOO_LARGE, MYF(0)); + DBUG_PRINT("info", ("OK packet too large")); + DBUG_RETURN(1); + } + error= my_net_write(net, (const unsigned char*)store.ptr(), store.length()); + if (!error && (!skip_flush || is_eof)) error= net_flush(net); + thd->server_status&= ~SERVER_SESSION_STATE_CHANGED; thd->get_stmt_da()->set_overwrite_status(false); DBUG_PRINT("info", ("OK sent, so no more error sending allowed")); @@ -261,6 +315,7 @@ net_send_ok(THD *thd, DBUG_RETURN(error); } + static uchar eof_buff[1]= { (uchar) 254 }; /* Marker for end of fields */ /** @@ -292,6 +347,22 @@ net_send_eof(THD *thd, uint server_status, uint statement_warn_count) NET *net= &thd->net; bool error= FALSE; DBUG_ENTER("net_send_eof"); + + /* + Check if client understand new format packets (OK instead of EOF) + + Normally end of statement reply is signaled by OK packet, but in case + of binlog dump request an EOF packet is sent instead. Also, old clients + expect EOF packet instead of OK + */ + if ((thd->client_capabilities & CLIENT_DEPRECATE_EOF) && + (thd->get_command() != COM_BINLOG_DUMP )) + { + error= net_send_ok(thd, server_status, statement_warn_count, 0, 0, NULL, + true, false); + DBUG_RETURN(error); + } + /* Set to TRUE if no active vio, to work well in case of --init-file */ if (net->vio != 0) { @@ -546,9 +617,9 @@ bool Protocol::send_ok(uint server_status, uint statement_warn_count, const char *message, bool skip_flush) { DBUG_ENTER("Protocol::send_ok"); - const bool retval= + const bool retval= net_send_ok(thd, server_status, statement_warn_count, - affected_rows, last_insert_id, message, skip_flush); + affected_rows, last_insert_id, message, false, skip_flush); DBUG_RETURN(retval); } @@ -562,7 +633,7 @@ bool Protocol::send_ok(uint server_status, uint statement_warn_count, bool Protocol::send_eof(uint server_status, uint statement_warn_count) { DBUG_ENTER("Protocol::send_eof"); - const bool retval= net_send_eof(thd, server_status, statement_warn_count); + bool retval= net_send_eof(thd, server_status, statement_warn_count); DBUG_RETURN(retval); } @@ -862,14 +933,19 @@ bool Protocol::send_result_set_metadata(List *list, uint flags) if (flags & SEND_EOF) { - /* - Mark the end of meta-data result set, and store thd->server_status, - to show that there is no cursor. - Send no warning information, as it will be sent at statement end. - */ - if (write_eof_packet(thd, &thd->net, thd->server_status, - thd->get_stmt_da()->current_statement_warn_count())) - DBUG_RETURN(1); + + /* if it is new client do not send EOF packet */ + if (!(thd->client_capabilities & CLIENT_DEPRECATE_EOF)) + { + /* + Mark the end of meta-data result set, and store thd->server_status, + to show that there is no cursor. + Send no warning information, as it will be sent at statement end. + */ + if (write_eof_packet(thd, &thd->net, thd->server_status, + thd->get_stmt_da()->current_statement_warn_count())) + DBUG_RETURN(1); + } } DBUG_RETURN(prepare_for_send(list->elements)); @@ -1505,6 +1581,7 @@ bool Protocol_binary::store_time(MYSQL_TIME *tm, int decimals) bool Protocol_binary::send_out_parameters(List *sp_params) { + bool ret; if (!(thd->client_capabilities & CLIENT_PS_MULTI_RESULTS)) { /* The client does not support OUT-parameters. */ @@ -1558,8 +1635,7 @@ bool Protocol_binary::send_out_parameters(List *sp_params) /* Restore THD::server_status. */ thd->server_status&= ~SERVER_PS_OUT_PARAMS; - /* Send EOF-packet. */ - net_send_eof(thd, thd->server_status, 0); + ret= net_send_eof(thd, thd->server_status, 0); /* Reset SERVER_MORE_RESULTS_EXISTS bit, because this is the last packet @@ -1567,5 +1643,5 @@ bool Protocol_binary::send_out_parameters(List *sp_params) */ thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; - return FALSE; + return ret ? FALSE : TRUE; } diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc new file mode 100644 index 00000000000..ad9906d7159 --- /dev/null +++ b/sql/session_tracker.cc @@ -0,0 +1,358 @@ +/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2016, MariaDB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + + +#include "session_tracker.h" + +#include "hash.h" +#include "table.h" +#include "rpl_gtid.h" +#include "sql_class.h" +#include "sql_show.h" +#include "sql_plugin.h" + +class Not_implemented_tracker : public State_tracker +{ +public: + bool enable(THD *thd) + { return false; } + bool check(THD *, set_var *) + { return false; } + bool update(THD *) + { return false; } + bool store(THD *, String *) + { return false; } + void mark_as_changed(THD *, LEX_CSTRING *tracked_item_name) + {} + +}; + + +/** + Current_schema_tracker, + + This is a tracker class that enables & manages the tracking of current + schema for a particular connection. +*/ + +class Current_schema_tracker : public State_tracker +{ +private: + bool schema_track_inited; + void reset(); + +public: + + Current_schema_tracker() + { + schema_track_inited= false; + } + + bool enable(THD *thd) + { return update(thd); } + bool check(THD *thd, set_var *var) + { return false; } + bool update(THD *thd); + bool store(THD *thd, String *buf); + void mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name); +}; + +/* + Session_state_change_tracker + + This is a boolean tracker class that will monitor any change that contributes + to a session state change. + Attributes that contribute to session state change include: + - Successful change to System variables + - User defined variables assignments + - temporary tables created, altered or deleted + - prepared statements added or removed + - change in current database + - change of current role +*/ + +class Session_state_change_tracker : public State_tracker +{ +private: + + void reset(); + +public: + Session_state_change_tracker(); + bool enable(THD *thd) + { return update(thd); }; + bool check(THD *thd, set_var *var) + { return false; } + bool update(THD *thd); + bool store(THD *thd, String *buf); + void mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name); + bool is_state_changed(THD*); + void ensure_enabled(THD *thd) + {} +}; + + +/* To be used in expanding the buffer. */ +static const unsigned int EXTRA_ALLOC= 1024; + +/////////////////////////////////////////////////////////////////////////////// + +/** + Enable/disable the tracker based on @@session_track_schema's value. + + @param thd [IN] The thd handle. + + @return + false (always) +*/ + +bool Current_schema_tracker::update(THD *thd) +{ + m_enabled= thd->variables.session_track_schema; + return false; +} + + +/** + Store the schema name as length-encoded string in the specified buffer. + + @param thd [IN] The thd handle. + @paran buf [INOUT] Buffer to store the information to. + + @reval false Success + @retval true Error +*/ + +bool Current_schema_tracker::store(THD *thd, String *buf) +{ + ulonglong db_length, length; + + /* + Protocol made (by unknown reasons) redundant: + It saves length of database name and name of database name + + length of saved length of database length. + */ + length= db_length= thd->db_length; + length += net_length_size(length); + + compile_time_assert(SESSION_TRACK_SCHEMA < 251); + compile_time_assert(NAME_LEN < 251); + DBUG_ASSERT(net_length_size(length) < 251); + if (buf->prep_alloc(1 + 1 + length, EXTRA_ALLOC)) + return true; + + /* Session state type (SESSION_TRACK_SCHEMA) */ + buf->q_net_store_length((ulonglong)SESSION_TRACK_SCHEMA); + + /* Length of the overall entity. */ + buf->q_net_store_length(length); + + /* Length and current schema name */ + buf->q_net_store_data((const uchar *)thd->db, thd->db_length); + + reset(); + + return false; +} + + +/** + Mark the tracker as changed. +*/ + +void Current_schema_tracker::mark_as_changed(THD *thd, LEX_CSTRING *) +{ + m_changed= true; + thd->lex->safe_to_cache_query= 0; + thd->server_status|= SERVER_SESSION_STATE_CHANGED; +} + + +/** + Reset the m_changed flag for next statement. + + @return void +*/ + +void Current_schema_tracker::reset() +{ + m_changed= false; +} + + +/////////////////////////////////////////////////////////////////////////////// + +Session_state_change_tracker::Session_state_change_tracker() +{ + m_changed= false; +} + +/** + @Enable/disable the tracker based on @@session_track_state_change value. + + @param thd [IN] The thd handle. + @return false (always) + +**/ + +bool Session_state_change_tracker::update(THD *thd) +{ + m_enabled= thd->variables.session_track_state_change; + return false; +} + +/** + Store the '1' in the specified buffer when state is changed. + + @param thd [IN] The thd handle. + @paran buf [INOUT] Buffer to store the information to. + + @reval false Success + @retval true Error +**/ + +bool Session_state_change_tracker::store(THD *thd, String *buf) +{ + if (buf->prep_alloc(1 + 1 + 1, EXTRA_ALLOC)) + return true; + + compile_time_assert(SESSION_TRACK_STATE_CHANGE < 251); + /* Session state type (SESSION_TRACK_STATE_CHANGE) */ + buf->q_net_store_length((ulonglong)SESSION_TRACK_STATE_CHANGE); + + /* Length of the overall entity (1 byte) */ + buf->q_append('\1'); + + DBUG_ASSERT(is_state_changed(thd)); + buf->q_append('1'); + + reset(); + + return false; +} + +/** + Mark the tracker as changed and associated session + attributes accordingly. +*/ + +void Session_state_change_tracker::mark_as_changed(THD *thd, LEX_CSTRING *) +{ + m_changed= true; + thd->lex->safe_to_cache_query= 0; + thd->server_status|= SERVER_SESSION_STATE_CHANGED; +} + +/** + Reset the m_changed flag for next statement. +*/ + +void Session_state_change_tracker::reset() +{ + m_changed= false; +} + +/** + Find if there is a session state change. +*/ + +bool Session_state_change_tracker::is_state_changed(THD *) +{ + return m_changed; +} + +/////////////////////////////////////////////////////////////////////////////// + +/** + @brief Initialize session tracker objects. +*/ + +Session_tracker::Session_tracker() +{ + m_trackers[SESSION_SYSVARS_TRACKER]= + new (std::nothrow) Not_implemented_tracker; + m_trackers[CURRENT_SCHEMA_TRACKER]= + new (std::nothrow) Current_schema_tracker; + m_trackers[SESSION_STATE_CHANGE_TRACKER]= + new (std::nothrow) Session_state_change_tracker; + m_trackers[SESSION_GTIDS_TRACKER]= + new (std::nothrow) Not_implemented_tracker; + m_trackers[TRANSACTION_INFO_TRACKER]= + new (std::nothrow) Not_implemented_tracker; +} + +/** + @brief Enables the tracker objects. + + @param thd [IN] The thread handle. + + @return void +*/ +void Session_tracker::enable(THD *thd) +{ + for (int i= 0; i <= SESSION_TRACKER_END; i ++) + m_trackers[i]->enable(thd); +} + + +/** + @brief Store all change information in the specified buffer. + + @param thd [IN] The thd handle. + @param buf [OUT] Reference to the string buffer to which the state + change data needs to be written. +*/ + +void Session_tracker::store(THD *thd, String *buf) +{ + /* Temporary buffer to store all the changes. */ + size_t start; + + /* + Probably most track result will fit in 251 byte so lets made it at + least efficient. We allocate 1 byte for length and then will move + string if there is more. + */ + buf->append('\0'); + start= buf->length(); + + /* Get total length. */ + for (int i= 0; i <= SESSION_TRACKER_END; i ++) + { + if (m_trackers[i]->is_changed() && + m_trackers[i]->store(thd, buf)) + { + buf->length(start); // it is safer to have 0-length block in case of error + return; + } + } + + size_t length= buf->length() - start; + uchar *data= (uchar *)(buf->ptr() + start); + uint size; + + if ((size= net_length_size(length)) != 1) + { + if (buf->prep_alloc(size - 1, EXTRA_ALLOC)) + { + buf->length(start); // it is safer to have 0-length block in case of error + return; + } + memmove(data + (size - 1), data, length); + } + + net_store_length(data - 1, length); +} diff --git a/sql/session_tracker.h b/sql/session_tracker.h new file mode 100644 index 00000000000..ec24d5a7a00 --- /dev/null +++ b/sql/session_tracker.h @@ -0,0 +1,159 @@ +#ifndef SESSION_TRACKER_INCLUDED +#define SESSION_TRACKER_INCLUDED + +/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + Copyright (c) 2016, MariaDB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include "m_string.h" +#include "thr_lock.h" + +/* forward declarations */ +class THD; +class set_var; +class String; + + +enum enum_session_tracker +{ + SESSION_SYSVARS_TRACKER, /* Session system variables */ + CURRENT_SCHEMA_TRACKER, /* Current schema */ + SESSION_STATE_CHANGE_TRACKER, + SESSION_GTIDS_TRACKER, /* Tracks GTIDs */ + TRANSACTION_INFO_TRACKER /* Transaction state */ +}; + +#define SESSION_TRACKER_END TRANSACTION_INFO_TRACKER + + +/** + State_tracker + + An abstract class that defines the interface for any of the server's + 'session state change tracker'. A tracker, however, is a sub- class of + this class which takes care of tracking the change in value of a part- + icular session state type and thus defines various methods listed in this + interface. The change information is later serialized and transmitted to + the client through protocol's OK packet. + + Tracker system variables :- + A tracker is normally mapped to a system variable. So in order to enable, + disable or modify the sub-entities of a tracker, the user needs to modify + the respective system variable either through SET command or via command + line option. As required in system variable handling, this interface also + includes two functions to help in the verification of the supplied value + (ON_CHECK) and the updation (ON_UPDATE) of the tracker system variable, + namely - check() and update(). +*/ + +class State_tracker +{ +protected: + /** + Is tracking enabled for a particular session state type ? + + @note: It is cache to avoid virtual functions and checking thd + when we want mark tracker as changed. + */ + bool m_enabled; + + /** Has the session state type changed ? */ + bool m_changed; + +public: + /** Constructor */ + State_tracker() : m_enabled(false), m_changed(false) + {} + + /** Destructor */ + virtual ~State_tracker() + {} + + /** Getters */ + bool is_enabled() const + { return m_enabled; } + + bool is_changed() const + { return m_changed; } + + /** Called in the constructor of THD*/ + virtual bool enable(THD *thd)= 0; + + /** To be invoked when the tracker's system variable is checked (ON_CHECK). */ + virtual bool check(THD *thd, set_var *var)= 0; + + /** To be invoked when the tracker's system variable is updated (ON_UPDATE).*/ + virtual bool update(THD *thd)= 0; + + /** Store changed data into the given buffer. */ + virtual bool store(THD *thd, String *buf)= 0; + + /** Mark the entity as changed. */ + virtual void mark_as_changed(THD *thd, LEX_CSTRING *name)= 0; +}; + + +/** + Session_tracker + + This class holds an object each for all tracker classes and provides + methods necessary for systematic detection and generation of session + state change information. +*/ + +class Session_tracker +{ +private: + State_tracker *m_trackers[SESSION_TRACKER_END + 1]; + + /* The following two functions are private to disable copying. */ + Session_tracker(Session_tracker const &other) + { + DBUG_ASSERT(FALSE); + } + Session_tracker& operator= (Session_tracker const &rhs) + { + DBUG_ASSERT(FALSE); + return *this; + } + +public: + + Session_tracker(); + ~Session_tracker() + { + for (int i= 0; i <= SESSION_TRACKER_END; i ++) + delete m_trackers[i]; + } + void enable(THD *thd); + + /** Returns the pointer to the tracker object for the specified tracker. */ + inline State_tracker *get_tracker(enum_session_tracker tracker) const + { + return m_trackers[tracker]; + } + + inline void mark_as_changed(THD *thd, enum enum_session_tracker tracker, + LEX_CSTRING *data) + { + if (m_trackers[tracker]->is_enabled()) + m_trackers[tracker]->mark_as_changed(thd, data); + } + + + void store(THD *thd, String *main_buf); +}; + +#endif /* SESSION_TRACKER_INCLUDED */ diff --git a/sql/set_var.cc b/sql/set_var.cc index b178681e952..68d57abcdf6 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -204,8 +204,28 @@ bool sys_var::update(THD *thd, set_var *var) (on_update && on_update(this, thd, OPT_GLOBAL)); } else - return session_update(thd, var) || + { + bool ret= session_update(thd, var) || (on_update && on_update(this, thd, OPT_SESSION)); + + /* + Make sure we don't session-track variables that are not actually + part of the session. tx_isolation and and tx_read_only for example + exist as GLOBAL, SESSION, and one-shot ("for next transaction only"). + */ + if ((var->type == OPT_SESSION) && (!ret)) + { + /* + Here MySQL sends variable name to avoid reporting change of + the tracker itself, but we decided that it is not needed + */ + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, + NULL); + + } + + return ret; + } } uchar *sys_var::session_value_ptr(THD *thd, const LEX_STRING *base) @@ -867,6 +887,8 @@ int set_var_user::update(THD *thd) MYF(0)); return -1; } + + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); return 0; } @@ -914,7 +936,11 @@ int set_var_role::check(THD *thd) int set_var_role::update(THD *thd) { #ifndef NO_EMBEDDED_ACCESS_CHECKS - return acl_setrole(thd, role.str, access); + int res= acl_setrole(thd, role.str, access); + if (!res) + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, + NULL); + return res; #else return 0; #endif @@ -968,6 +994,8 @@ int set_var_collation_client::update(THD *thd) { thd->update_charset(character_set_client, collation_connection, character_set_results); + + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); thd->protocol_text.init(thd); thd->protocol_binary.init(thd); return 0; diff --git a/sql/set_var.h b/sql/set_var.h index 060a4e1a57c..6a650f2ec8a 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -385,7 +385,7 @@ extern SHOW_COMP_OPTION have_openssl; SHOW_VAR* enumerate_sys_vars(THD *thd, bool sorted, enum enum_var_type type); int fill_sysvars(THD *thd, TABLE_LIST *tables, COND *cond); -sys_var *find_sys_var(THD *thd, const char *str, uint length=0); +sys_var *find_sys_var(THD *thd, const char *str, size_t length=0); int sql_set_variables(THD *thd, List *var_list, bool free); #define SYSVAR_AUTOSIZE(VAR,VAL) \ diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 8dfa519ba2d..3bb1b3a6197 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7139,7 +7139,6 @@ ER_KILL_QUERY_DENIED_ERROR ER_NO_EIS_FOR_FIELD eng "Engine-independent statistics are not collected for column '%s'" ukr "Ðезалежна від типу таблиці ÑтатиÑтика не збираєтьÑÑ Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%s'" - # # Internal errors, not used # @@ -7151,6 +7150,10 @@ skip-to-error-number 3000 ER_MYSQL_57_TEST eng "5.7 test" +ER_NET_OK_PACKET_TOO_LARGE 08S01 + eng "OK packet too large" + ukr "Пакет OK надто великий" + # MariaDB extra error numbers starts from 4000 skip-to-error-number 4000 diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 6b048cec68b..8488e8dfd62 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2977,6 +2977,16 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, reinit_stmt_before_use(thd, m_lex); +#ifndef EMBEDDED_LIBRARY + /* + if there was instruction which changed tracking state before, result + can go with this command OK packet, so better do not cache the result. + */ + if ((thd->client_capabilities & CLIENT_SESSION_TRACK) && + (thd->server_status & SERVER_SESSION_STATE_CHANGED)) + thd->lex->safe_to_cache_query= 0; +#endif + if (open_tables) res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index d29dc0eff14..e91c80d3f36 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1465,6 +1465,9 @@ void THD::init(void) /* Initialize the Debug Sync Facility. See debug_sync.cc. */ debug_sync_init_thread(this); #endif /* defined(ENABLED_DEBUG_SYNC) */ + + session_tracker.enable(this); + apc_target.init(&LOCK_thd_data); DBUG_VOID_RETURN; } diff --git a/sql/sql_class.h b/sql/sql_class.h index b8c9614a31f..7a663bf7653 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -45,6 +45,7 @@ #include #include #include +#include "session_tracker.h" extern "C" void set_thd_stage_info(void *thd, @@ -688,6 +689,8 @@ typedef struct system_variables my_bool pseudo_slave_mode; + my_bool session_track_schema; + my_bool session_track_state_change; } SV; /** @@ -4054,6 +4057,7 @@ private: LEX_STRING invoker_host; public: + Session_tracker session_tracker; /* Flag, mutex and condition for a thread to wait for a signal from another thread. diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 1a0ee03ec34..128281c7686 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1035,7 +1035,10 @@ exit: it to 0. */ if (thd->db && cmp_db_names(thd->db, db) && !error) + { mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); + thd->session_tracker.mark_as_changed(thd, CURRENT_SCHEMA_TRACKER, NULL); + } my_dirend(dirp); DBUG_RETURN(error); } @@ -1459,7 +1462,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); - DBUG_RETURN(FALSE); + goto done; } else { @@ -1476,8 +1479,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) mysql_change_db_impl(thd, &INFORMATION_SCHEMA_NAME, SELECT_ACL, system_charset_info); - - DBUG_RETURN(FALSE); + goto done; } /* @@ -1564,8 +1566,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); /* The operation succeed. */ - - DBUG_RETURN(FALSE); + goto done; } else { @@ -1589,6 +1590,9 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) mysql_change_db_impl(thd, &new_db_file_name, db_access, db_default_cl); +done: + thd->session_tracker.mark_as_changed(thd, CURRENT_SCHEMA_TRACKER, NULL); + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); DBUG_RETURN(FALSE); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c152984876e..7cb97d156cb 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3852,6 +3852,12 @@ mysql_execute_command(THD *thd) /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */ if (create_info.tmp_table()) thd->variables.option_bits|= OPTION_KEEP_LOG; + /* in case of create temp tables if @@session_track_state_change is + ON then send session state notification in OK packet */ + if(create_info.options & HA_LEX_CREATE_TMP_TABLE) + thd->session_tracker.mark_as_changed(thd, + SESSION_STATE_CHANGE_TRACKER, + NULL); my_ok(thd); } } @@ -4608,6 +4614,14 @@ end_with_restore_list: /* DDL and binlog write order are protected by metadata locks. */ res= mysql_rm_table(thd, first_table, lex->if_exists(), lex->tmp_table()); + + /* when dropping temporary tables if @@session_track_state_change is ON then + send the boolean tracker in the OK packet */ + if(!res && (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) + { + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, + NULL); + } break; } case SQLCOM_SHOW_PROCESSLIST: diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 848358e517a..98bc5d606af 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -2781,7 +2781,7 @@ static void update_func_double(THD *thd, struct st_mysql_sys_var *var, ****************************************************************************/ -sys_var *find_sys_var(THD *thd, const char *str, uint length) +sys_var *find_sys_var(THD *thd, const char *str, size_t length) { sys_var *var; sys_var_pluginvar *pi= NULL; diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h index efa48b22ce8..96f8411f8ed 100644 --- a/sql/sql_plugin.h +++ b/sql/sql_plugin.h @@ -120,6 +120,8 @@ struct st_plugin_int }; +extern mysql_mutex_t LOCK_plugin; + /* See intern_plugin_lock() for the explanation for the conditionally defined plugin_ref type diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index e8a7dce5771..cc41bd6284e 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2758,7 +2758,11 @@ void mysql_sql_stmt_prepare(THD *thd) thd->stmt_map.erase(stmt); } else + { + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, + NULL); my_ok(thd, 0L, 0L, "Statement prepared"); + } DBUG_VOID_RETURN; } @@ -3208,6 +3212,8 @@ void mysql_sql_stmt_close(THD *thd) else { stmt->deallocate(); + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, + NULL); my_ok(thd); } } diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 767154e019d..28e7b899133 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2000, 2013, Oracle and/or its affiliates. + Copyright (c) 2016, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -1157,3 +1158,16 @@ uint convert_to_printable(char *to, size_t to_len, *t= '\0'; return t - to; } + +void String::q_net_store_length(ulonglong length) +{ + char *pos= (char *) net_store_length((uchar *)(Ptr + str_length), length); + str_length= pos - Ptr; +} + +void String::q_net_store_data(const uchar *from, size_t length) +{ + q_net_store_length(length); + bool res= append((const char *)from, length); + DBUG_ASSERT(!res); +} diff --git a/sql/sql_string.h b/sql/sql_string.h index 51a11c7a4ff..10f3c4aee43 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -495,6 +495,11 @@ public: { Ptr[str_length++] = c; } + void q_append2b(const uint32 n) + { + int2store(Ptr + str_length, n); + str_length += 2; + } void q_append(const uint32 n) { int4store(Ptr + str_length, n); @@ -559,6 +564,17 @@ public: return Ptr+ old_length; /* Area to use */ } + inline bool prep_alloc(uint32 arg_length, uint32 step_alloc) + { + uint32 new_length= arg_length + str_length; + if (new_length > Alloced_length) + { + if (realloc(new_length + step_alloc)) + return true; + } + return false; + } + inline bool append(const char *s, uint32 arg_length, uint32 step_alloc) { uint32 new_length= arg_length + str_length; @@ -623,6 +639,8 @@ public: { return !sortcmp(this, other, cs); } + void q_net_store_length(ulonglong length); + void q_net_store_data(const uchar *from, size_t length); }; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index c9194bcb276..bed116a2930 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -55,6 +55,7 @@ #include "transaction.h" #include "sql_audit.h" + #ifdef __WIN__ #include #endif @@ -9228,6 +9229,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, { goto err_new_table_cleanup; } + /* in case of alter temp table send the tracker in OK packet */ + thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, + NULL); } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 4047b5d6781..ea9f1d14eee 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -5374,3 +5374,36 @@ static Sys_var_ulong Sys_log_tc_size( DEFAULT(my_getpagesize() * 6), BLOCK_SIZE(my_getpagesize())); #endif + + +static bool update_session_track_schema(sys_var *self, THD *thd, + enum_var_type type) +{ + DBUG_ENTER("update_session_track_schema"); + DBUG_RETURN(thd->session_tracker.get_tracker(CURRENT_SCHEMA_TRACKER)->update(thd)); +} + +static Sys_var_mybool Sys_session_track_schema( + "session_track_schema", + "Track changes to the 'default schema'.", + SESSION_VAR(session_track_schema), + CMD_LINE(OPT_ARG), DEFAULT(TRUE), + NO_MUTEX_GUARD, NOT_IN_BINLOG, + ON_CHECK(0), + ON_UPDATE(update_session_track_schema)); + +static bool update_session_track_state_change(sys_var *self, THD *thd, + enum_var_type type) +{ + DBUG_ENTER("update_session_track_state_change"); + DBUG_RETURN(thd->session_tracker.get_tracker(SESSION_STATE_CHANGE_TRACKER)->update(thd)); +} + +static Sys_var_mybool Sys_session_track_state_change( + "session_track_state_change", + "Track changes to the 'session state'.", + SESSION_VAR(session_track_state_change), + CMD_LINE(OPT_ARG), DEFAULT(FALSE), + NO_MUTEX_GUARD, NOT_IN_BINLOG, + ON_CHECK(0), + ON_UPDATE(update_session_track_state_change)); -- cgit v1.2.1 From c8948b0d0db4c182a744bc8bdbde7cbccff3d57d Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Fri, 15 Apr 2016 20:47:45 +0200 Subject: MDEV-8931: (server part of) session state tracking System variables tracking --- include/mysql_com.h | 8 +- mysql-test/r/mysqld--help.result | 3 + .../r/session_track_system_variables_basic.result | 162 +++++ .../sys_vars/r/sysvars_server_notembedded.result | 14 + .../t/session_track_system_variables_basic.test | 133 ++++ sql/mysqld.cc | 20 +- sql/mysqld.h | 1 + sql/protocol.cc | 1 - sql/session_tracker.cc | 765 ++++++++++++++++++++- sql/session_tracker.h | 20 +- sql/set_var.cc | 31 +- sql/set_var.h | 7 + sql/share/errmsg-utf8.txt | 30 +- sql/sql_class.cc | 4 + sql/sql_class.h | 2 + sql/sql_plugin.cc | 54 +- sql/sql_plugin.h | 2 + sql/sql_show.cc | 230 ++++--- sql/sql_show.h | 6 + sql/sql_string.h | 4 +- sql/sys_vars.cc | 10 + sql/sys_vars.ic | 129 +++- 22 files changed, 1483 insertions(+), 153 deletions(-) create mode 100644 mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result create mode 100644 mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test diff --git a/include/mysql_com.h b/include/mysql_com.h index 9eb0e4f2d74..7433411f29a 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -288,7 +288,7 @@ enum enum_server_command CLIENT_MULTI_RESULTS | \ CLIENT_PS_MULTI_RESULTS | \ CLIENT_SSL_VERIFY_SERVER_CERT | \ - CLIENT_REMEMBER_OPTIONS | \ + CLIENT_REMEMBER_OPTIONS | \ MARIADB_CLIENT_PROGRESS | \ CLIENT_PLUGIN_AUTH | \ CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA | \ @@ -556,9 +556,9 @@ enum enum_mysql_set_option */ enum enum_session_state_type { - SESSION_TRACK_SYSTEM_VARIABLES, /* Session system variables */ - SESSION_TRACK_SCHEMA, /* Current schema */ - SESSION_TRACK_STATE_CHANGE, /* track session state changes */ + SESSION_TRACK_SYSTEM_VARIABLES, /* Session system variables */ + SESSION_TRACK_SCHEMA, /* Current schema */ + SESSION_TRACK_STATE_CHANGE, /* track session state changes */ SESSION_TRACK_GTIDS, SESSION_TRACK_TRANSACTION_CHARACTERISTICS, /* Transaction chistics */ SESSION_TRACK_TRANSACTION_STATE /* Transaction state */ diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 8e82635ec91..7fc9fedd456 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -908,6 +908,8 @@ The following options may be given as the first argument: (Defaults to on; use --skip-session-track-schema to disable.) --session-track-state-change Track changes to the 'session state'. + --session-track-system-variables=name + Track changes in registered system variables. --show-slave-auth-info Show user and password in SHOW SLAVE HOSTS on this master. @@ -1392,6 +1394,7 @@ secure-file-priv (No default value) server-id 1 session-track-schema TRUE session-track-state-change FALSE +session-track-system-variables autocommit,character_set_client,character_set_connection,character_set_results,time_zone show-slave-auth-info FALSE silent-startup FALSE skip-grant-tables TRUE diff --git a/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result b/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result new file mode 100644 index 00000000000..e451a22d322 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result @@ -0,0 +1,162 @@ +# +# Variable name : session_track_system_variables +# Scope : Global & Session +# +# Global - default +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone +# Session - default +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone + +# via INFORMATION_SCHEMA.GLOBAL_VARIABLES +SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; +VARIABLE_NAME VARIABLE_VALUE +SESSION_TRACK_SCHEMA ON +SESSION_TRACK_STATE_CHANGE OFF +SESSION_TRACK_SYSTEM_VARIABLES autocommit,character_set_client,character_set_connection,character_set_results,time_zone +# via INFORMATION_SCHEMA.SESSION_VARIABLES +SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; +VARIABLE_NAME VARIABLE_VALUE +SESSION_TRACK_SCHEMA ON +SESSION_TRACK_STATE_CHANGE OFF +SESSION_TRACK_SYSTEM_VARIABLES autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SET @global_saved_tmp = @@global.session_track_system_variables; + +# Altering global variable's value +SET @@global.session_track_system_variables='autocommit'; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone + +# Altering session variable's value +SET @@session.session_track_system_variables='autocommit'; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit + +# Variables' values in a new session. +connect con1,"127.0.0.1",root,,test,$MASTER_MYPORT,; +# Global - expect "autocommit" +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit + +# Session - expect "autocommit" +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit + +# Switching to the default connection. +connection default; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit + +# Test if DEFAULT is working as expected. +SET @@global.session_track_system_variables = DEFAULT; +SET @@session.session_track_system_variables = DEFAULT; + +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone + +# Variables' values in a new session (con2). +connect con2,"127.0.0.1",root,,test,$MASTER_MYPORT,; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone + +# Altering session should not affect global. +SET @@session.session_track_system_variables = 'sql_mode'; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +sql_mode + +# Variables' values in a new session (con3). +connect con3,"127.0.0.1",root,,test,$MASTER_MYPORT,; +# Altering global should not affect session. +SET @@global.session_track_system_variables = 'sql_mode'; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +sql_mode +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +autocommit,character_set_client,character_set_connection,character_set_results,time_zone + +# Switching to the default connection. +connection default; +# Testing NULL +SET @@global.session_track_system_variables = NULL; +SET @@session.session_track_system_variables = NULL; +# Global - expect "" instead of NULL +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +NULL +# Session - expect "" instead of NULL +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables + +# testing with duplicate entries. +SET @@global.session_track_system_variables= "time_zone"; +SET @@session.session_track_system_variables= "time_zone"; +SET @@global.session_track_system_variables= "sql_mode,sql_mode"; +SET @@session.session_track_system_variables= "sql_mode,sql_mode"; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +sql_mode +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +sql_mode + +# testing ordering +SET @@global.session_track_system_variables= "time_zone,sql_mode"; +SET @@session.session_track_system_variables= "time_zone,sql_mode"; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +sql_mode,time_zone +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +sql_mode,time_zone + +# special values +SET @@global.session_track_system_variables= "*"; +SET @@session.session_track_system_variables= "*"; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables +* +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables +* +SET @@global.session_track_system_variables= ""; +SET @@session.session_track_system_variables= ""; +SELECT @@global.session_track_system_variables; +@@global.session_track_system_variables + +SELECT @@session.session_track_system_variables; +@@session.session_track_system_variables + + +# Restoring the original values. +SET @@global.session_track_system_variables = @global_saved_tmp; +# End of tests. diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index e7f953e5f4c..c0d07280253 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -3817,6 +3817,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME SESSION_TRACK_SYSTEM_VARIABLES +SESSION_VALUE autocommit,character_set_client,character_set_connection,character_set_results,time_zone +GLOBAL_VALUE autocommit,character_set_client,character_set_connection,character_set_results,time_zone +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE autocommit,character_set_client,character_set_connection,character_set_results,time_zone +VARIABLE_SCOPE SESSION +VARIABLE_TYPE VARCHAR +VARIABLE_COMMENT Track changes in registered system variables. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME SKIP_EXTERNAL_LOCKING SESSION_VALUE NULL GLOBAL_VALUE ON diff --git a/mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test b/mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test new file mode 100644 index 00000000000..bbb32bb67a3 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test @@ -0,0 +1,133 @@ +--source include/not_embedded.inc + +--echo # +--echo # Variable name : session_track_system_variables +--echo # Scope : Global & Session +--echo # + +--echo # Global - default +SELECT @@global.session_track_system_variables; +--echo # Session - default +SELECT @@session.session_track_system_variables; +--echo + +--echo # via INFORMATION_SCHEMA.GLOBAL_VARIABLES +--disable_warnings +SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; +--enable_warnings + +--echo # via INFORMATION_SCHEMA.SESSION_VARIABLES +--disable_warnings +SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; +--enable_warnings + +# Save the global value to be used to restore the original value. +SET @global_saved_tmp = @@global.session_track_system_variables; +--echo + +--echo # Altering global variable's value +SET @@global.session_track_system_variables='autocommit'; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # Altering session variable's value +SET @@session.session_track_system_variables='autocommit'; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # Variables' values in a new session. +connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,); + +--echo # Global - expect "autocommit" +SELECT @@global.session_track_system_variables; +--echo +--echo # Session - expect "autocommit" +SELECT @@session.session_track_system_variables; +--echo + +--echo # Switching to the default connection. +connection default; + +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # Test if DEFAULT is working as expected. +SET @@global.session_track_system_variables = DEFAULT; +SET @@session.session_track_system_variables = DEFAULT; +--echo + +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # Variables' values in a new session (con2). +connect (con2,"127.0.0.1",root,,test,$MASTER_MYPORT,); + +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # Altering session should not affect global. +SET @@session.session_track_system_variables = 'sql_mode'; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # Variables' values in a new session (con3). +connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,); + +--echo # Altering global should not affect session. +SET @@global.session_track_system_variables = 'sql_mode'; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # Switching to the default connection. +connection default; + +--echo # Testing NULL +SET @@global.session_track_system_variables = NULL; +SET @@session.session_track_system_variables = NULL; + +--echo # Global - expect "" instead of NULL +SELECT @@global.session_track_system_variables; +--echo # Session - expect "" instead of NULL +SELECT @@session.session_track_system_variables; + +--echo # testing with duplicate entries. +# Lets first set it to some valid value. +SET @@global.session_track_system_variables= "time_zone"; +SET @@session.session_track_system_variables= "time_zone"; +# Now set with duplicate entries (must pass) +SET @@global.session_track_system_variables= "sql_mode,sql_mode"; +SET @@session.session_track_system_variables= "sql_mode,sql_mode"; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # testing ordering +SET @@global.session_track_system_variables= "time_zone,sql_mode"; +SET @@session.session_track_system_variables= "time_zone,sql_mode"; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + +--echo # special values +SET @@global.session_track_system_variables= "*"; +SET @@session.session_track_system_variables= "*"; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +SET @@global.session_track_system_variables= ""; +SET @@session.session_track_system_variables= ""; +SELECT @@global.session_track_system_variables; +SELECT @@session.session_track_system_variables; +--echo + + +--echo # Restoring the original values. +SET @@global.session_track_system_variables = @global_saved_tmp; + +--echo # End of tests. diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 8fa8f01b894..b59c6c7048f 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -690,6 +690,14 @@ THD *next_global_thread(THD *thd) } struct system_variables global_system_variables; +/** + Following is just for options parsing, used with a difference against + global_system_variables. + + TODO: something should be done to get rid of following variables +*/ +const char *current_dbug_option=""; + struct system_variables max_system_variables; struct system_status_var global_status_var; @@ -1463,7 +1471,6 @@ my_bool plugins_are_initialized= FALSE; #ifndef DBUG_OFF static const char* default_dbug_option; #endif -const char *current_dbug_option=""; #ifdef HAVE_LIBWRAP const char *libwrapName= NULL; int allow_severity = LOG_INFO; @@ -5278,6 +5285,17 @@ static int init_server_components() } plugins_are_initialized= TRUE; /* Don't separate from init function */ + { + Session_tracker session_track_system_variables_check; + if (session_track_system_variables_check. + server_boot_verify(system_charset_info)) + { + sql_print_error("The variable session_track_system_variables has " + "invalid values."); + unireg_abort(1); + } + } + /* we do want to exit if there are any other unknown options */ if (remaining_argc > 1) { diff --git a/sql/mysqld.h b/sql/mysqld.h index 846a01a9427..68eab815564 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -135,6 +135,7 @@ extern my_bool lower_case_file_system; extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs; extern my_bool opt_secure_auth; extern const char *current_dbug_option; +extern const char *current_session_track_system_variables; extern char* opt_secure_file_priv; extern char* opt_secure_backup_file_priv; extern size_t opt_secure_backup_file_priv_len; diff --git a/sql/protocol.cc b/sql/protocol.cc index e12c72dd988..77dedfbc7d2 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -276,7 +276,6 @@ net_send_ok(THD *thd, /* the info field */ if (state_changed || (message && message[0])) { - DBUG_ASSERT(strlen(message) <= MYSQL_ERRMSG_SIZE); store.q_net_store_data((uchar*) message, message ? strlen(message) : 0); } diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc index ad9906d7159..cfbb1704318 100644 --- a/sql/session_tracker.cc +++ b/sql/session_tracker.cc @@ -15,6 +15,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "sql_plugin.h" #include "session_tracker.h" #include "hash.h" @@ -23,6 +24,7 @@ #include "sql_class.h" #include "sql_show.h" #include "sql_plugin.h" +#include "set_var.h" class Not_implemented_tracker : public State_tracker { @@ -40,6 +42,182 @@ public: }; +static my_bool name_array_filler(void *ptr, void *data_ptr); +/** + Session_sysvars_tracker + + This is a tracker class that enables & manages the tracking of session + system variables. It internally maintains a hash of user supplied variable + references and a boolean field to store if the variable was changed by the + last statement. +*/ + +class Session_sysvars_tracker : public State_tracker +{ +private: + + struct sysvar_node_st { + sys_var *m_svar; + bool *test_load; + bool m_changed; + }; + + class vars_list + { + private: + /** + Registered system variables. (@@session_track_system_variables) + A hash to store the name of all the system variables specified by the + user. + */ + HASH m_registered_sysvars; + /** Size of buffer for string representation */ + size_t buffer_length; + myf m_mem_flag; + /** + If TRUE then we want to check all session variable. + */ + bool track_all; + void init() + { + my_hash_init(&m_registered_sysvars, + &my_charset_bin, + 4, 0, 0, (my_hash_get_key) sysvars_get_key, + my_free, MYF(HASH_UNIQUE | + ((m_mem_flag & MY_THREAD_SPECIFIC) ? + HASH_THREAD_SPECIFIC : 0))); + } + void free_hash() + { + if (my_hash_inited(&m_registered_sysvars)) + { + my_hash_free(&m_registered_sysvars); + } + } + + uchar* search(const sys_var *svar) + { + return (my_hash_search(&m_registered_sysvars, (const uchar *)&svar, + sizeof(sys_var *))); + } + + public: + vars_list() : + buffer_length(0) + { + m_mem_flag= current_thd ? MY_THREAD_SPECIFIC : 0; + init(); + } + + size_t get_buffer_length() + { + DBUG_ASSERT(buffer_length != 0); // asked earlier then should + return buffer_length; + } + ~vars_list() + { + /* free the allocated hash. */ + if (my_hash_inited(&m_registered_sysvars)) + { + my_hash_free(&m_registered_sysvars); + } + } + + uchar* search(sysvar_node_st *node, const sys_var *svar) + { + uchar *res; + res= search(svar); + if (!res) + { + if (track_all) + { + insert(node, svar, m_mem_flag); + return search(svar); + } + } + return res; + } + + uchar* operator[](ulong idx) + { + return my_hash_element(&m_registered_sysvars, idx); + } + bool insert(sysvar_node_st *node, const sys_var *svar, myf mem_flag); + void reset(); + void copy(vars_list* from, THD *thd); + bool parse_var_list(THD *thd, LEX_STRING var_list, bool throw_error, + const CHARSET_INFO *char_set, bool session_created); + bool construct_var_list(char *buf, size_t buf_len); + }; + /** + Two objects of vars_list type are maintained to manage + various operations. + */ + vars_list *orig_list, *tool_list; + +public: + Session_sysvars_tracker() + { + orig_list= new (std::nothrow) vars_list(); + tool_list= new (std::nothrow) vars_list(); + } + + ~Session_sysvars_tracker() + { + if (orig_list) + delete orig_list; + if (tool_list) + delete tool_list; + } + + size_t get_buffer_length() + { + return orig_list->get_buffer_length(); + } + bool construct_var_list(char *buf, size_t buf_len) + { + return orig_list->construct_var_list(buf, buf_len); + } + + /** + Method used to check the validity of string provided + for session_track_system_variables during the server + startup. + */ + static bool server_init_check(THD *thd, const CHARSET_INFO *char_set, + LEX_STRING var_list) + { + vars_list dummy; + bool result; + result= dummy.parse_var_list(thd, var_list, false, char_set, false); + return result; + } + static bool server_init_process(THD *thd, const CHARSET_INFO *char_set, + LEX_STRING var_list) + { + vars_list dummy; + bool result; + result= dummy.parse_var_list(thd, var_list, false, char_set, false); + if (!result) + dummy.construct_var_list(var_list.str, var_list.length + 1); + return result; + } + + void reset(); + bool enable(THD *thd); + bool check(THD *thd, set_var *var); + bool check_str(THD *thd, LEX_STRING val); + bool update(THD *thd); + bool store(THD *thd, String *buf); + void mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name); + /* callback */ + static uchar *sysvars_get_key(const char *entry, size_t *length, + my_bool not_used __attribute__((unused))); + + friend my_bool name_array_filler(void *ptr, void *data_ptr); +}; + + /** Current_schema_tracker, @@ -108,6 +286,540 @@ public: /* To be used in expanding the buffer. */ static const unsigned int EXTRA_ALLOC= 1024; + +void Session_sysvars_tracker::vars_list::reset() +{ + buffer_length= 0; + track_all= 0; + if (m_registered_sysvars.records) + my_hash_reset(&m_registered_sysvars); +} + +/** + Copy the given list. + + @param from Source vars_list object. + @param thd THD handle to retrive the charset in use. + + @retval true there is something to track + @retval false nothing to track +*/ + +void Session_sysvars_tracker::vars_list::copy(vars_list* from, THD *thd) +{ + reset(); + track_all= from->track_all; + free_hash(); + buffer_length= from->buffer_length; + m_registered_sysvars= from->m_registered_sysvars; + from->init(); +} + +/** + Inserts the variable to be tracked into m_registered_sysvars hash. + + @param node Node to be inserted. + @param svar address of the system variable + + @retval false success + @retval true error +*/ + +bool Session_sysvars_tracker::vars_list::insert(sysvar_node_st *node, + const sys_var *svar, + myf mem_flag) +{ + if (!node) + { + if (!(node= (sysvar_node_st *) my_malloc(sizeof(sysvar_node_st), + MYF(MY_WME | mem_flag)))) + { + reset(); + return true; + } + } + + node->m_svar= (sys_var *)svar; + node->test_load= node->m_svar->test_load; + node->m_changed= false; + if (my_hash_insert(&m_registered_sysvars, (uchar *) node)) + { + my_free(node); + if (!search((sys_var *)svar)) + { + //EOF (error is already reported) + reset(); + return true; + } + } + return false; +} + +/** + Parse the specified system variables list. + + @Note In case of invalid entry a warning is raised per invalid entry. + This is done in order to handle 'potentially' valid system + variables from uninstalled plugins which might get installed in + future. + + + @param thd [IN] The thd handle. + @param var_list [IN] System variable list. + @param throw_error [IN] bool when set to true, returns an error + in case of invalid/duplicate values. + @param char_set [IN] charecter set information used for string + manipulations. + @param session_created [IN] bool variable which says if the parse is + already executed once. The mutex on variables + is not acquired if this variable is false. + + @return + true Error + false Success +*/ +bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd, + LEX_STRING var_list, + bool throw_error, + const CHARSET_INFO *char_set, + bool session_created) +{ + const char separator= ','; + char *token, *lasts= NULL; + size_t rest= var_list.length; + + if (!var_list.str || var_list.length == 0) + { + buffer_length= 1; + return false; + } + + if(!strcmp(var_list.str,(const char *)"*")) + { + track_all= true; + buffer_length= 2; + return false; + } + + buffer_length= var_list.length + 1; + token= var_list.str; + + track_all= false; + /* + If Lock to the plugin mutex is not acquired here itself, it results + in having to acquire it multiple times in find_sys_var_ex for each + token value. Hence the mutex is handled here to avoid a performance + overhead. + */ + if (!thd || session_created) + mysql_mutex_lock(&LOCK_plugin); + for (;;) + { + sys_var *svar; + LEX_STRING var; + + lasts= (char *) memchr(token, separator, rest); + + var.str= token; + if (lasts) + { + var.length= (lasts - token); + rest-= var.length + 1; + } + else + var.length= rest; + + /* Remove leading/trailing whitespace. */ + trim_whitespace(char_set, &var); + + if ((svar= find_sys_var_ex(thd, var.str, var.length, throw_error, true))) + { + if (insert(NULL, svar, m_mem_flag) == TRUE) + goto error; + } + else if (throw_error && session_created && thd) + { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "%.*s is not a valid system variable and will" + "be ignored.", (int)var.length, token); + } + else + goto error; + + if (lasts) + token= lasts + 1; + else + break; + } + if (!thd || session_created) + mysql_mutex_unlock(&LOCK_plugin); + + return false; + +error: + if (!thd || session_created) + mysql_mutex_unlock(&LOCK_plugin); + return true; +} + +struct name_array_filler_data +{ + LEX_CSTRING **names; + uint idx; + +}; + +/** Collects variable references into array */ +static my_bool name_array_filler(void *ptr, void *data_ptr) +{ + Session_sysvars_tracker::sysvar_node_st *node= + (Session_sysvars_tracker::sysvar_node_st *)ptr; + name_array_filler_data *data= (struct name_array_filler_data *)data_ptr; + if (*node->test_load) + data->names[data->idx++]= &node->m_svar->name; + return FALSE; +} + +/* Sorts variable references array */ +static int name_array_sorter(const void *a, const void *b) +{ + LEX_CSTRING **an= (LEX_CSTRING **)a, **bn=(LEX_CSTRING **)b; + size_t min= MY_MIN((*an)->length, (*bn)->length); + int res= strncmp((*an)->str, (*bn)->str, min); + if (res == 0) + res= ((int)(*bn)->length)- ((int)(*an)->length); + return res; +} + +/** + Construct variable list by internal hash with references +*/ + +bool Session_sysvars_tracker::vars_list::construct_var_list(char *buf, + size_t buf_len) +{ + struct name_array_filler_data data; + size_t left= buf_len; + size_t names_size= m_registered_sysvars.records * sizeof(LEX_CSTRING *); + const char separator= ','; + + if (unlikely(buf_len < 1)) + return true; + + if (unlikely(track_all)) + { + if (buf_len < 2) + return true; + buf[0]= '*'; + buf[1]= '\0'; + return false; + } + + if (m_registered_sysvars.records == 0) + { + buf[0]= '\0'; + return false; + } + + data.names= (LEX_CSTRING**)my_safe_alloca(names_size); + + if (unlikely(!data.names)) + return true; + + data.idx= 0; + + mysql_mutex_lock(&LOCK_plugin); + my_hash_iterate(&m_registered_sysvars, &name_array_filler, &data); + DBUG_ASSERT(data.idx <= m_registered_sysvars.records); + + + if (m_registered_sysvars.records == 0) + { + mysql_mutex_unlock(&LOCK_plugin); + buf[0]= '\0'; + return false; + } + + my_qsort(data.names, data.idx, sizeof(LEX_CSTRING *), + &name_array_sorter); + + for(uint i= 0; i < data.idx; i++) + { + LEX_CSTRING *nm= data.names[i]; + size_t ln= nm->length + 1; + if (ln > left) + { + mysql_mutex_unlock(&LOCK_plugin); + my_safe_afree(data.names, names_size); + return true; + } + memcpy(buf, nm->str, nm->length); + buf[nm->length]= separator; + buf+= ln; + left-= ln; + } + mysql_mutex_unlock(&LOCK_plugin); + + buf--; buf[0]= '\0'; + my_safe_afree(data.names, names_size); + + return false; +} + +/** + Enable session tracker by parsing global value of tracked variables. + + @param thd [IN] The thd handle. + + @retval true Error + @retval false Success +*/ + +bool Session_sysvars_tracker::enable(THD *thd) +{ + sys_var *svar; + + mysql_mutex_lock(&LOCK_plugin); + svar= find_sys_var_ex(thd, SESSION_TRACK_SYSTEM_VARIABLES_NAME.str, + SESSION_TRACK_SYSTEM_VARIABLES_NAME.length, + false, true); + DBUG_ASSERT(svar); + + set_var tmp(thd, SHOW_OPT_GLOBAL, svar, &null_lex_str, NULL); + svar->session_save_default(thd, &tmp); + + if (tool_list->parse_var_list(thd, tmp.save_result.string_value, + true, thd->charset(), false) == true) + { + mysql_mutex_unlock(&LOCK_plugin); + return true; + } + mysql_mutex_unlock(&LOCK_plugin); + orig_list->copy(tool_list, thd); + m_enabled= true; + + return false; +} + + +/** + Check system variable name(s). + + @note This function is called from the ON_CHECK() function of the + session_track_system_variables' sys_var class. + + @param thd [IN] The thd handle. + @param var [IN] A pointer to set_var holding the specified list of + system variable names. + + @retval true Error + @retval false Success +*/ + +inline bool Session_sysvars_tracker::check(THD *thd, set_var *var) +{ + return check_str(thd, var->save_result.string_value); +} + +inline bool Session_sysvars_tracker::check_str(THD *thd, LEX_STRING val) +{ + tool_list->reset(); + return tool_list->parse_var_list(thd, val, true, + thd->charset(), true); +} + + +/** + Once the value of the @@session_track_system_variables has been + successfully updated, this function calls + Session_sysvars_tracker::vars_list::copy updating the hash in orig_list + which represents the system variables to be tracked. + + @note This function is called from the ON_UPDATE() function of the + session_track_system_variables' sys_var class. + + @param thd [IN] The thd handle. + + @retval true Error + @retval false Success +*/ + +bool Session_sysvars_tracker::update(THD *thd) +{ + orig_list->copy(tool_list, thd); + return false; +} + + +/** + Store the data for changed system variables in the specified buffer. + Once the data is stored, we reset the flags related to state-change + (see reset()). + + @param thd [IN] The thd handle. + @paran buf [INOUT] Buffer to store the information to. + + @retval true Error + @retval false Success +*/ + +bool Session_sysvars_tracker::store(THD *thd, String *buf) +{ + char val_buf[SHOW_VAR_FUNC_BUFF_SIZE]; + SHOW_VAR show; + const char *value; + sysvar_node_st *node; + const CHARSET_INFO *charset; + size_t val_length, length; + int idx= 0; + + /* As its always system variable. */ + show.type= SHOW_SYS; + + while ((node= (sysvar_node_st *) (*orig_list)[idx])) + { + if (node->m_changed) + { + mysql_mutex_lock(&LOCK_plugin); + if (!*node->test_load) + { + mysql_mutex_unlock(&LOCK_plugin); + continue; + } + sys_var *svar= node->m_svar; + show.name= svar->name.str; + show.value= (char *) svar; + + value= get_one_variable(thd, &show, OPT_SESSION, SHOW_SYS, NULL, + &charset, val_buf, &val_length); + mysql_mutex_unlock(&LOCK_plugin); + + length= net_length_size(svar->name.length) + + svar->name.length + + net_length_size(val_length) + + val_length; + + compile_time_assert(SESSION_TRACK_SYSTEM_VARIABLES < 251); + buf->prep_alloc(1 + net_length_size(length) + length, EXTRA_ALLOC); + + /* Session state type (SESSION_TRACK_SYSTEM_VARIABLES) */ + buf->q_net_store_length((ulonglong)SESSION_TRACK_SYSTEM_VARIABLES); + + /* Length of the overall entity. */ + buf->q_net_store_length((ulonglong)length); + + /* System variable's name (length-encoded string). */ + buf->q_net_store_data((const uchar*)svar->name.str, svar->name.length); + + /* System variable's value (length-encoded string). */ + buf->q_net_store_data((const uchar*)value, val_length); + } + ++ idx; + } + + reset(); + + return false; +} + + +/** + Mark the system variable as changed. + + @param [IN] pointer on a variable +*/ + +void Session_sysvars_tracker::mark_as_changed(THD *thd, + LEX_CSTRING *var) +{ + sysvar_node_st *node= NULL; + sys_var *svar= (sys_var *)var; + /* + Check if the specified system variable is being tracked, if so + mark it as changed and also set the class's m_changed flag. + */ + if ((node= (sysvar_node_st *) (orig_list->search(node, svar)))) + { + node->m_changed= true; + m_changed= true; + /* do not cache the statement when there is change in session state */ + thd->lex->safe_to_cache_query= 0; + thd->server_status|= SERVER_SESSION_STATE_CHANGED; + } +} + + +/** + Supply key to a hash. + + @param entry [IN] A single entry. + @param length [OUT] Length of the key. + @param not_used Unused. + + @return Pointer to the key buffer. +*/ + +uchar *Session_sysvars_tracker::sysvars_get_key(const char *entry, + size_t *length, + my_bool not_used __attribute__((unused))) +{ + *length= sizeof(sys_var *); + return (uchar *) &(((sysvar_node_st *) entry)->m_svar); +} + + +/** + Prepare/reset the m_registered_sysvars hash for next statement. +*/ + +void Session_sysvars_tracker::reset() +{ + sysvar_node_st *node; + int idx= 0; + + while ((node= (sysvar_node_st *) (*orig_list)[idx])) + { + node->m_changed= false; + ++ idx; + } + m_changed= false; +} + +static Session_sysvars_tracker* sysvar_tracker(THD *thd) +{ + return (Session_sysvars_tracker*) + thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER); +} + +bool sysvartrack_validate_value(THD *thd, const char *str, size_t len) +{ + LEX_STRING tmp= {(char *)str, len}; + if (thd && sysvar_tracker(thd)->is_enabled()) + return sysvar_tracker(thd)->check_str(thd, tmp); + return Session_sysvars_tracker::server_init_check(thd, system_charset_info, + tmp); +} +bool sysvartrack_reprint_value(THD *thd, char *str, size_t len) +{ + LEX_STRING tmp= {str, len}; + return Session_sysvars_tracker::server_init_process(thd, + system_charset_info, + tmp); +} +bool sysvartrack_update(THD *thd) +{ + return sysvar_tracker(thd)->update(thd); +} +size_t sysvartrack_value_len(THD *thd) +{ + return sysvar_tracker(thd)->get_buffer_length(); +} +bool sysvartrack_value_construct(THD *thd, char *val, size_t len) +{ + return sysvar_tracker(thd)->construct_var_list(val, len); +} + /////////////////////////////////////////////////////////////////////////////// /** @@ -282,8 +994,29 @@ bool Session_state_change_tracker::is_state_changed(THD *) Session_tracker::Session_tracker() { + for (int i= 0; i <= SESSION_TRACKER_END; i ++) + m_trackers[i]= NULL; +} + + +/** + @brief Enables the tracker objects. + + @param thd [IN] The thread handle. + + @return void +*/ + +void Session_tracker::enable(THD *thd) +{ + /* + Originally and correctly this allocation was in the constructor and + deallocation in the destructor, but in this case memory counting + system works incorrectly (for example in INSERT DELAYED thread) + */ + deinit(); m_trackers[SESSION_SYSVARS_TRACKER]= - new (std::nothrow) Not_implemented_tracker; + new (std::nothrow) Session_sysvars_tracker(); m_trackers[CURRENT_SCHEMA_TRACKER]= new (std::nothrow) Current_schema_tracker; m_trackers[SESSION_STATE_CHANGE_TRACKER]= @@ -292,19 +1025,35 @@ Session_tracker::Session_tracker() new (std::nothrow) Not_implemented_tracker; m_trackers[TRANSACTION_INFO_TRACKER]= new (std::nothrow) Not_implemented_tracker; + + for (int i= 0; i <= SESSION_TRACKER_END; i ++) + m_trackers[i]->enable(thd); } -/** - @brief Enables the tracker objects. - @param thd [IN] The thread handle. +/** + Method called during the server startup to verify the contents + of @@session_track_system_variables. - @return void + @retval false Success + @retval true Failure */ -void Session_tracker::enable(THD *thd) + +bool Session_tracker::server_boot_verify(const CHARSET_INFO *char_set) { - for (int i= 0; i <= SESSION_TRACKER_END; i ++) - m_trackers[i]->enable(thd); + Session_sysvars_tracker *server_tracker; + bool result; + sys_var *svar= find_sys_var_ex(NULL, SESSION_TRACK_SYSTEM_VARIABLES_NAME.str, + SESSION_TRACK_SYSTEM_VARIABLES_NAME.length, + false, true); + DBUG_ASSERT(svar); + set_var tmp(NULL, SHOW_OPT_GLOBAL, svar, &null_lex_str, NULL); + svar->session_save_default(NULL, &tmp); + server_tracker= new (std::nothrow) Session_sysvars_tracker(); + result= server_tracker->server_init_check(NULL, char_set, + tmp.save_result.string_value); + delete server_tracker; + return result; } diff --git a/sql/session_tracker.h b/sql/session_tracker.h index ec24d5a7a00..7025c34967d 100644 --- a/sql/session_tracker.h +++ b/sql/session_tracker.h @@ -104,6 +104,12 @@ public: virtual void mark_as_changed(THD *thd, LEX_CSTRING *name)= 0; }; +bool sysvartrack_validate_value(THD *thd, const char *str, size_t len); +bool sysvartrack_reprint_value(THD *thd, char *str, size_t len); +bool sysvartrack_update(THD *thd); +size_t sysvartrack_value_len(THD *thd); +bool sysvartrack_value_construct(THD *thd, char *val, size_t len); + /** Session_tracker @@ -133,11 +139,23 @@ public: Session_tracker(); ~Session_tracker() + { + deinit(); + } + + /* trick to make happy memory accounting system */ + void deinit() { for (int i= 0; i <= SESSION_TRACKER_END; i ++) - delete m_trackers[i]; + { + if (m_trackers[i]) + delete m_trackers[i]; + m_trackers[i]= NULL; + } } + void enable(THD *thd); + bool server_boot_verify(const CHARSET_INFO *char_set); /** Returns the pointer to the tracker object for the specified tracker. */ inline State_tracker *get_tracker(enum_session_tracker tracker) const diff --git a/sql/set_var.cc b/sql/set_var.cc index 68d57abcdf6..84ed7810650 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -115,6 +115,9 @@ void sys_var_end() DBUG_VOID_RETURN; } + +static bool static_test_load= TRUE; + /** sys_var constructor @@ -184,6 +187,8 @@ sys_var::sys_var(sys_var_chain *chain, const char *name_arg, else chain->first= this; chain->last= this; + + test_load= &static_test_load; } bool sys_var::update(THD *thd, set_var *var) @@ -215,13 +220,14 @@ bool sys_var::update(THD *thd, set_var *var) */ if ((var->type == OPT_SESSION) && (!ret)) { + thd->session_tracker.mark_as_changed(thd, SESSION_SYSVARS_TRACKER, + (LEX_CSTRING*)var->var); /* Here MySQL sends variable name to avoid reporting change of the tracker itself, but we decided that it is not needed */ thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); - } return ret; @@ -995,7 +1001,30 @@ int set_var_collation_client::update(THD *thd) thd->update_charset(character_set_client, collation_connection, character_set_results); + /* Mark client collation variables as changed */ + if (thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->is_enabled()) + { + sys_var *svar; + mysql_mutex_lock(&LOCK_plugin); + if ((svar= find_sys_var_ex(thd, "character_set_client", + sizeof("character_set_client") - 1, + false, true))) + thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)-> + mark_as_changed(thd, (LEX_CSTRING*)svar); + if ((svar= find_sys_var_ex(thd, "character_set_results", + sizeof("character_set_results") - 1, + false, true))) + thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)-> + mark_as_changed(thd, (LEX_CSTRING*)svar); + if ((svar= find_sys_var_ex(thd, "character_set_connection", + sizeof("character_set_connection") - 1, + false, true))) + thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)-> + mark_as_changed(thd, (LEX_CSTRING*)svar); + mysql_mutex_unlock(&LOCK_plugin); + } thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); + thd->protocol_text.init(thd); thd->protocol_binary.init(thd); return 0; diff --git a/sql/set_var.h b/sql/set_var.h index 6a650f2ec8a..16111ad7111 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -48,6 +48,9 @@ struct sys_var_chain int mysql_add_sys_var_chain(sys_var *chain); int mysql_del_sys_var_chain(sys_var *chain); + +extern const LEX_CSTRING SESSION_TRACK_SYSTEM_VARIABLES_NAME; + /** A class representing one system variable - that is something that can be accessed as @@global.variable_name or @@session.variable_name, @@ -60,6 +63,7 @@ class sys_var: protected Value_source // for double_from_string_with_check public: sys_var *next; LEX_CSTRING name; + bool *test_load; enum flag_enum { GLOBAL, SESSION, ONLY_SESSION, SCOPE_MASK=1023, READONLY=1024, ALLOCATED=2048, PARSE_EARLY=4096, NO_SET_STATEMENT=8192, AUTO_SET=16384}; @@ -240,6 +244,9 @@ protected: uchar *global_var_ptr() { return ((uchar*)&global_system_variables) + offset; } + + friend class Session_sysvars_tracker; + friend class Session_tracker; }; #include "sql_plugin.h" /* SHOW_HA_ROWS, SHOW_MY_BOOL */ diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 3bb1b3a6197..4e301a9df02 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -4064,21 +4064,21 @@ ER_LOCK_OR_ACTIVE_TRANSACTION swe "Kan inte utföra kommandot emedan du har en lÃ¥st tabell eller an aktiv transaktion" ukr "Ðе можу виконати подану команду тому, що Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ Ð·Ð°Ð±Ð»Ð¾ÐºÐ¾Ð²Ð°Ð½Ð° або виконуєтьÑÑ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ñ–Ñ" ER_UNKNOWN_SYSTEM_VARIABLE - cze "Neznámá systémová promÄ›nná '%-.64s'" - dan "Ukendt systemvariabel '%-.64s'" - nla "Onbekende systeem variabele '%-.64s'" - eng "Unknown system variable '%-.64s'" - est "Tundmatu süsteemne muutuja '%-.64s'" - fre "Variable système '%-.64s' inconnue" - ger "Unbekannte Systemvariable '%-.64s'" - ita "Variabile di sistema '%-.64s' sconosciuta" - jpn "'%-.64s' ã¯ä¸æ˜Žãªã‚·ã‚¹ãƒ†ãƒ å¤‰æ•°ã§ã™ã€‚" - por "Variável de sistema '%-.64s' desconhecida" - rus "ÐеизвеÑÑ‚Ð½Ð°Ñ ÑиÑÑ‚ÐµÐ¼Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s'" - serbian "Nepoznata sistemska promenljiva '%-.64s'" - spa "Desconocida variable de sistema '%-.64s'" - swe "Okänd systemvariabel: '%-.64s'" - ukr "Ðевідома ÑиÑтемна змінна '%-.64s'" + cze "Neznámá systémová promÄ›nná '%-.*s'" + dan "Ukendt systemvariabel '%-.*s'" + nla "Onbekende systeem variabele '%-.*s'" + eng "Unknown system variable '%-.*s'" + est "Tundmatu süsteemne muutuja '%-.*s'" + fre "Variable système '%-.*s' inconnue" + ger "Unbekannte Systemvariable '%-.*s'" + ita "Variabile di sistema '%-.*s' sconosciuta" + jpn "'%-.*s' ã¯ä¸æ˜Žãªã‚·ã‚¹ãƒ†ãƒ å¤‰æ•°ã§ã™ã€‚" + por "Variável de sistema '%-.*s' desconhecida" + rus "ÐеизвеÑÑ‚Ð½Ð°Ñ ÑиÑÑ‚ÐµÐ¼Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.*s'" + serbian "Nepoznata sistemska promenljiva '%-.*s'" + spa "Desconocida variable de sistema '%-.*s'" + swe "Okänd systemvariabel: '%-.*s'" + ukr "Ðевідома ÑиÑтемна змінна '%-.*s'" ER_CRASHED_ON_USAGE cze "Tabulka '%-.192s' je oznaÄena jako poruÅ¡ená a mÄ›la by být opravena" dan "Tabellen '%-.192s' er markeret med fejl og bør repareres" diff --git a/sql/sql_class.cc b/sql/sql_class.cc index e91c80d3f36..a99e375cfbd 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1766,6 +1766,10 @@ THD::~THD() lf_hash_put_pins(xid_hash_pins); /* Ensure everything is freed */ status_var.local_memory_used-= sizeof(THD); + + /* trick to make happy memory accounting system */ + session_tracker.deinit(); + if (status_var.local_memory_used != 0) { DBUG_PRINT("error", ("memory_used: %lld", status_var.local_memory_used)); diff --git a/sql/sql_class.h b/sql/sql_class.h index 7a663bf7653..a6af33f7c5a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -691,6 +691,8 @@ typedef struct system_variables my_bool session_track_schema; my_bool session_track_state_change; + + char *session_track_system_variables; } SV; /** diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 98bc5d606af..db6a4b9b15b 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -269,6 +269,7 @@ struct st_bookmark uint name_len; int offset; uint version; + bool loaded; char key[1]; }; @@ -322,6 +323,8 @@ static void unlock_variables(THD *thd, struct system_variables *vars); static void cleanup_variables(struct system_variables *vars); static void plugin_vars_free_values(sys_var *vars); static void restore_ptr_backup(uint n, st_ptr_backup *backup); +#define my_intern_plugin_lock(A,B) intern_plugin_lock(A,B) +#define my_intern_plugin_lock_ci(A,B) intern_plugin_lock(A,B) static plugin_ref intern_plugin_lock(LEX *lex, plugin_ref plugin); static void intern_plugin_unlock(LEX *lex, plugin_ref plugin); static void reap_plugins(void); @@ -1175,6 +1178,13 @@ err: DBUG_RETURN(errs > 0 || oks + dupes == 0); } +static void plugin_variables_deinit(struct st_plugin_int *plugin) +{ + + for (sys_var *var= plugin->system_vars; var; var= var->next) + (*var->test_load)= FALSE; + mysql_del_sys_var_chain(plugin->system_vars); +} static void plugin_deinitialize(struct st_plugin_int *plugin, bool ref_check) { @@ -1226,8 +1236,7 @@ static void plugin_deinitialize(struct st_plugin_int *plugin, bool ref_check) if (ref_check && plugin->ref_count) sql_print_error("Plugin '%s' has ref_count=%d after deinitialization.", plugin->name.str, plugin->ref_count); - - mysql_del_sys_var_chain(plugin->system_vars); + plugin_variables_deinit(plugin); } static void plugin_del(struct st_plugin_int *plugin) @@ -1447,7 +1456,7 @@ static int plugin_initialize(MEM_ROOT *tmp_root, struct st_plugin_int *plugin, err: if (ret) - mysql_del_sys_var_chain(plugin->system_vars); + plugin_variables_deinit(plugin); mysql_mutex_lock(&LOCK_plugin); plugin->state= state; @@ -2780,22 +2789,24 @@ static void update_func_double(THD *thd, struct st_mysql_sys_var *var, System Variables support ****************************************************************************/ - -sys_var *find_sys_var(THD *thd, const char *str, size_t length) +sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length, + bool throw_error, bool locked) { sys_var *var; sys_var_pluginvar *pi= NULL; plugin_ref plugin; - DBUG_ENTER("find_sys_var"); + DBUG_ENTER("find_sys_var_ex"); + DBUG_PRINT("enter", ("var '%.*s'", (int)length, str)); - mysql_mutex_lock(&LOCK_plugin); + if (!locked) + mysql_mutex_lock(&LOCK_plugin); mysql_rwlock_rdlock(&LOCK_system_variables_hash); if ((var= intern_find_sys_var(str, length)) && (pi= var->cast_pluginvar())) { mysql_rwlock_unlock(&LOCK_system_variables_hash); LEX *lex= thd ? thd->lex : 0; - if (!(plugin= intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin)))) + if (!(plugin= my_intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin)))) var= NULL; /* failed to lock it, it must be uninstalling */ else if (!(plugin_state(plugin) & PLUGIN_IS_READY)) @@ -2807,14 +2818,20 @@ sys_var *find_sys_var(THD *thd, const char *str, size_t length) } else mysql_rwlock_unlock(&LOCK_system_variables_hash); - mysql_mutex_unlock(&LOCK_plugin); + if (!locked) + mysql_mutex_unlock(&LOCK_plugin); - if (!var) - my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (char*) str); + if (!throw_error && !var) + my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (int)length, (char*) str); DBUG_RETURN(var); } +sys_var *find_sys_var(THD *thd, const char *str, size_t length) +{ + return find_sys_var_ex(thd, str, length, false, false); +} + /* called by register_var, construct_options and test_plugin_options. Returns the 'bookmark' for the named variable. @@ -3940,6 +3957,14 @@ my_bool mark_changed(int, const struct my_option *opt, char *) return 0; } +/** + It is always false to mark global plugin variable unloaded just to be + safe because we have no way now to know truth about them. + + TODO: make correct mechanism for global plugin variables +*/ +static bool static_unload= FALSE; + /** Create and register system variables supplied from the plugin and assigns initial values from corresponding command line arguments. @@ -4017,9 +4042,13 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, tmp_backup[tmp->nbackups++].save(&o->name); if ((var= find_bookmark(tmp->name.str, o->name, o->flags))) + { varname= var->key + 1; + var->loaded= TRUE; + } else { + var= NULL; len= tmp->name.length + strlen(o->name) + 2; varname= (char*) alloc_root(mem_root, len); strxmov(varname, tmp->name.str, "-", o->name, NullS); @@ -4027,6 +4056,9 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, convert_dash_to_underscore(varname, len-1); } v= new (mem_root) sys_var_pluginvar(&chain, varname, tmp, o); + v->test_load= (var ? &var->loaded : &static_unload); + DBUG_ASSERT(static_unload == FALSE); + if (!(o->flags & PLUGIN_VAR_NOCMDOPT)) { // update app_type, used for I_S.SYSTEM_VARIABLES diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h index 96f8411f8ed..47c3af83bdf 100644 --- a/sql/sql_plugin.h +++ b/sql/sql_plugin.h @@ -192,4 +192,6 @@ extern bool plugin_foreach_with_mask(THD *thd, plugin_foreach_func *func, extern bool plugin_dl_foreach(THD *thd, const LEX_STRING *dl, plugin_foreach_func *func, void *arg); +sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length, + bool throw_error, bool locked); #endif diff --git a/sql/sql_show.cc b/sql/sql_show.cc index a446e05d427..25af6fe07cc 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3212,6 +3212,132 @@ void remove_status_vars(SHOW_VAR *list) } +/** + @brief Returns the value of a system or a status variable. + + @param thd [in] The handle of the current THD. + @param variable [in] Details of the variable. + @param value_type [in] Variable type. + @param show_type [in] Variable show type. + @param charset [out] Character set of the value. + @param buff [in,out] Buffer to store the value. + (Needs to have enough memory + to hold the value of variable.) + @param length [out] Length of the value. + + @return Pointer to the value buffer. +*/ + +const char* get_one_variable(THD *thd, + const SHOW_VAR *variable, + enum_var_type value_type, SHOW_TYPE show_type, + system_status_var *status_var, + const CHARSET_INFO **charset, char *buff, + size_t *length) +{ + void *value= variable->value; + const char *pos= buff; + const char *end= buff; + + + if (show_type == SHOW_SYS) + { + sys_var *var= (sys_var *) value; + show_type= var->show_type(); + value= var->value_ptr(thd, value_type, &null_lex_str); + *charset= var->charset(thd); + } + + /* + note that value may be == buff. All SHOW_xxx code below + should still work in this case + */ + switch (show_type) { + case SHOW_DOUBLE_STATUS: + value= ((char *) status_var + (intptr) value); + /* fall through */ + case SHOW_DOUBLE: + /* 6 is the default precision for '%f' in sprintf() */ + end= buff + my_fcvt(*(double *) value, 6, buff, NULL); + break; + case SHOW_LONG_STATUS: + value= ((char *) status_var + (intptr) value); + /* fall through */ + case SHOW_ULONG: + case SHOW_LONG_NOFLUSH: // the difference lies in refresh_status() + end= int10_to_str(*(long*) value, buff, 10); + break; + case SHOW_LONGLONG_STATUS: + value= ((char *) status_var + (intptr) value); + /* fall through */ + case SHOW_ULONGLONG: + end= longlong10_to_str(*(longlong*) value, buff, 10); + break; + case SHOW_HA_ROWS: + end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10); + break; + case SHOW_BOOL: + end= strmov(buff, *(bool*) value ? "ON" : "OFF"); + break; + case SHOW_MY_BOOL: + end= strmov(buff, *(my_bool*) value ? "ON" : "OFF"); + break; + case SHOW_UINT: + end= int10_to_str((long) *(uint*) value, buff, 10); + break; + case SHOW_SINT: + end= int10_to_str((long) *(int*) value, buff, -10); + break; + case SHOW_SLONG: + end= int10_to_str(*(long*) value, buff, -10); + break; + case SHOW_SLONGLONG: + end= longlong10_to_str(*(longlong*) value, buff, -10); + break; + case SHOW_HAVE: + { + SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value; + pos= show_comp_option_name[(int) tmp]; + end= strend(pos); + break; + } + case SHOW_CHAR: + { + if (!(pos= (char*)value)) + pos= ""; + end= strend(pos); + break; + } + case SHOW_CHAR_PTR: + { + if (!(pos= *(char**) value)) + pos= ""; + + end= strend(pos); + break; + } + case SHOW_LEX_STRING: + { + LEX_STRING *ls=(LEX_STRING*)value; + if (!(pos= ls->str)) + end= pos= ""; + else + end= pos + ls->length; + break; + } + case SHOW_UNDEF: + break; // Return empty string + case SHOW_SYS: // Cannot happen + default: + DBUG_ASSERT(0); + break; + } + + *length= (size_t) (end - pos); + return pos; +} + + static bool show_status_array(THD *thd, const char *wild, SHOW_VAR *variables, enum enum_var_type scope, @@ -3324,109 +3450,21 @@ static bool show_status_array(THD *thd, const char *wild, name_buffer, wild))) && (!cond || cond->val_int())) { - void *value=var->value; - const char *pos, *end; // We assign a lot of const's + const char *pos; // We assign a lot of const's + size_t length; if (show_type == SHOW_SYS) - { - sys_var *var= (sys_var *) value; - show_type= var->show_type(); mysql_mutex_lock(&LOCK_global_system_variables); - value= var->value_ptr(thd, scope, &null_lex_str); - charset= var->charset(thd); - } + pos= get_one_variable(thd, var, scope, show_type, status_var, + &charset, buff, &length); - pos= end= buff; - /* - note that value may be == buff. All SHOW_xxx code below - should still work in this case - */ - switch (show_type) { - case SHOW_DOUBLE_STATUS: - value= ((char *) status_var + (intptr) value); - /* fall through */ - case SHOW_DOUBLE: - /* 6 is the default precision for '%f' in sprintf() */ - end= buff + my_fcvt(*(double *) value, 6, buff, NULL); - break; - case SHOW_LONG_STATUS: - value= ((char *) status_var + (intptr) value); - /* fall through */ - case SHOW_ULONG: - case SHOW_LONG_NOFLUSH: // the difference lies in refresh_status() - end= int10_to_str(*(long*) value, buff, 10); - break; - case SHOW_LONGLONG_STATUS: - value= ((char *) status_var + (intptr) value); - /* fall through */ - case SHOW_ULONGLONG: - end= longlong10_to_str(*(longlong*) value, buff, 10); - break; - case SHOW_HA_ROWS: - end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10); - break; - case SHOW_BOOL: - end= strmov(buff, *(bool*) value ? "ON" : "OFF"); - break; - case SHOW_MY_BOOL: - end= strmov(buff, *(my_bool*) value ? "ON" : "OFF"); - break; - case SHOW_UINT: - end= int10_to_str((long) *(uint*) value, buff, 10); - break; - case SHOW_SINT: - end= int10_to_str((long) *(int*) value, buff, -10); - break; - case SHOW_SLONG: - end= int10_to_str(*(long*) value, buff, -10); - break; - case SHOW_SLONGLONG: - end= longlong10_to_str(*(longlong*) value, buff, -10); - break; - case SHOW_HAVE: - { - SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value; - pos= show_comp_option_name[(int) tmp]; - end= strend(pos); - break; - } - case SHOW_CHAR: - { - if (!(pos= (char*)value)) - pos= ""; - end= strend(pos); - break; - } - case SHOW_CHAR_PTR: - { - if (!(pos= *(char**) value)) - pos= ""; - - end= strend(pos); - break; - } - case SHOW_LEX_STRING: - { - LEX_STRING *ls=(LEX_STRING*)value; - if (!(pos= ls->str)) - end= pos= ""; - else - end= pos + ls->length; - break; - } - case SHOW_UNDEF: - break; // Return empty string - case SHOW_SYS: // Cannot happen - default: - DBUG_ASSERT(0); - break; - } - table->field[1]->store(pos, (uint32) (end - pos), charset); + table->field[1]->store(pos, (uint32) length, charset); + thd->count_cuted_fields= CHECK_FIELD_IGNORE; table->field[1]->set_notnull(); - - if (var->type == SHOW_SYS) + if (show_type == SHOW_SYS) mysql_mutex_unlock(&LOCK_global_system_variables); + if (schema_table_store_record(thd, table)) { res= TRUE; diff --git a/sql/sql_show.h b/sql/sql_show.h index dbae2a42b39..e93b855450c 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -131,6 +131,12 @@ bool get_schema_tables_result(JOIN *join, enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table); TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list); +const char* get_one_variable(THD *thd, const SHOW_VAR *variable, + enum_var_type value_type, SHOW_TYPE show_type, + system_status_var *status_var, + const CHARSET_INFO **charset, char *buff, + size_t *length); + /* These functions were under INNODB_COMPATIBILITY_HOOKS */ int get_quote_char_for_identifier(THD *thd, const char *name, uint length); THD *find_thread_by_id(longlong id, bool query_id= false); diff --git a/sql/sql_string.h b/sql/sql_string.h index 10f3c4aee43..feab8070cd2 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -359,7 +359,9 @@ public: if (ALIGN_SIZE(arg_length+1) < Alloced_length) { char *new_ptr; - if (!(new_ptr=(char*) my_realloc(Ptr,arg_length,MYF(0)))) + if (!(new_ptr=(char*) + my_realloc(Ptr, arg_length,MYF((thread_specific ? + MY_THREAD_SPECIFIC : 0))))) { Alloced_length = 0; real_alloc(arg_length); diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index ea9f1d14eee..68be0cdbdca 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -5375,6 +5375,16 @@ static Sys_var_ulong Sys_log_tc_size( BLOCK_SIZE(my_getpagesize())); #endif +const LEX_CSTRING SESSION_TRACK_SYSTEM_VARIABLES_NAME= + {STRING_WITH_LEN("session_track_system_variables")}; + +static Sys_var_sesvartrack Sys_track_session_sys_vars( + SESSION_TRACK_SYSTEM_VARIABLES_NAME.str, + "Track changes in registered system variables.", + CMD_LINE(REQUIRED_ARG), IN_SYSTEM_CHARSET, + DEFAULT("autocommit,character_set_client,character_set_connection," + "character_set_results,time_zone"), + NO_MUTEX_GUARD); static bool update_session_track_schema(sys_var *self, THD *thd, enum_var_type type) diff --git a/sql/sys_vars.ic b/sql/sys_vars.ic index ca6634849a1..dbe84d3efcc 100644 --- a/sql/sys_vars.ic +++ b/sql/sys_vars.ic @@ -438,10 +438,10 @@ public: does not destroy individual members of SV, there's no way to free allocated string variables for every thread. */ -class Sys_var_charptr: public sys_var +class Sys_var_charptr_base: public sys_var { public: - Sys_var_charptr(const char *name_arg, + Sys_var_charptr_base(const char *name_arg, const char *comment, int flag_args, ptrdiff_t off, size_t size, CMD_LINE getopt, enum charset_enum is_os_charset_arg, @@ -463,8 +463,6 @@ public: */ option.var_type|= (flags & ALLOCATED) ? GET_STR_ALLOC : GET_STR; global_var(const char*)= def_val; - SYSVAR_ASSERT(scope() == GLOBAL); - SYSVAR_ASSERT(size == sizeof(char *)); } void cleanup() { @@ -503,31 +501,35 @@ public: } bool do_check(THD *thd, set_var *var) { return do_string_check(thd, var, charset(thd)); } - bool session_update(THD *thd, set_var *var) - { - DBUG_ASSERT(FALSE); - return true; - } - bool global_update(THD *thd, set_var *var) + bool session_update(THD *thd, set_var *var)= 0; + char *global_update_prepare(THD *thd, set_var *var) { char *new_val, *ptr= var->save_result.string_value.str; size_t len=var->save_result.string_value.length; if (ptr) { new_val= (char*)my_memdup(ptr, len+1, MYF(MY_WME)); - if (!new_val) return true; + if (!new_val) return 0; new_val[len]=0; } else new_val= 0; + return new_val; + } + void global_update_finish(char *new_val) + { if (flags & ALLOCATED) my_free(global_var(char*)); flags|= ALLOCATED; global_var(char*)= new_val; - return false; } - void session_save_default(THD *thd, set_var *var) - { DBUG_ASSERT(FALSE); } + bool global_update(THD *thd, set_var *var) + { + char *new_val= global_update_prepare(thd, var); + global_update_finish(new_val); + return (new_val == 0 && var->save_result.string_value.str != 0); + } + void session_save_default(THD *thd, set_var *var)= 0; void global_save_default(THD *thd, set_var *var) { char *ptr= (char*)(intptr)option.def_value; @@ -536,6 +538,105 @@ public: } }; +class Sys_var_charptr: public Sys_var_charptr_base +{ +public: + Sys_var_charptr(const char *name_arg, + const char *comment, int flag_args, ptrdiff_t off, size_t size, + CMD_LINE getopt, + enum charset_enum is_os_charset_arg, + const char *def_val, PolyLock *lock=0, + enum binlog_status_enum binlog_status_arg=VARIABLE_NOT_IN_BINLOG, + on_check_function on_check_func=0, + on_update_function on_update_func=0, + const char *substitute=0) : + Sys_var_charptr_base(name_arg, comment, flag_args, off, size, getopt, + is_os_charset_arg, def_val, lock, binlog_status_arg, + on_check_func, on_update_func, substitute) + { + SYSVAR_ASSERT(scope() == GLOBAL); + SYSVAR_ASSERT(size == sizeof(char *)); + } + + bool session_update(THD *thd, set_var *var) + { + DBUG_ASSERT(FALSE); + return true; + } + void session_save_default(THD *thd, set_var *var) + { DBUG_ASSERT(FALSE); } +}; + +class Sys_var_sesvartrack: public Sys_var_charptr_base +{ +public: + Sys_var_sesvartrack(const char *name_arg, + const char *comment, + CMD_LINE getopt, + enum charset_enum is_os_charset_arg, + const char *def_val, PolyLock *lock) : + Sys_var_charptr_base(name_arg, comment, + SESSION_VAR(session_track_system_variables), + getopt, is_os_charset_arg, def_val, lock, + VARIABLE_NOT_IN_BINLOG, 0, 0, 0) + {} + bool do_check(THD *thd, set_var *var) + { + if (Sys_var_charptr_base::do_check(thd, var) || + sysvartrack_validate_value(thd, var->save_result.string_value.str, + var->save_result.string_value.length)) + return TRUE; + return FALSE; + } + bool global_update(THD *thd, set_var *var) + { + char *new_val= global_update_prepare(thd, var); + if (new_val) + { + if (sysvartrack_reprint_value(thd, new_val, + var->save_result.string_value.length)) + new_val= 0; + } + global_update_finish(new_val); + return (new_val == 0 && var->save_result.string_value.str != 0); + } + bool session_update(THD *thd, set_var *var) + { + return sysvartrack_update(thd); + } + void session_save_default(THD *thd, set_var *var) + { + var->save_result.string_value.str= global_var(char*); + var->save_result.string_value.length= + strlen(var->save_result.string_value.str); + /* parse and feel list with default values */ + if (thd) + { + bool res= + sysvartrack_validate_value(thd, + var->save_result.string_value.str, + var->save_result.string_value.length); + DBUG_ASSERT(res == 0); + } + } + uchar *session_value_ptr(THD *thd, const LEX_STRING *base) + { + DBUG_ASSERT(thd != NULL); + size_t len= sysvartrack_value_len(thd); + char *res= 0; + char *buf= (char *)my_safe_alloca(len); + if (buf && !sysvartrack_value_construct(thd, buf, len)) + { + size_t len= strlen(buf) + 1; + res= (char*) thd->alloc(len + sizeof(char *)); + if (res) + memcpy((*((char**) res)= res + sizeof(char *)), buf, len); + my_safe_afree(buf, len); + } + return (uchar *)res; + } +}; + class Sys_var_proxy_user: public sys_var { -- cgit v1.2.1 From 0ee3e64c55664332e8e92eda55b43692159fe4fe Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Mon, 30 May 2016 21:22:50 +0200 Subject: MDEV-8931: (server part of) session state tracking Transaction tracker --- include/mysql.h.pp | 3 +- include/mysql_com.h | 10 +- libmysqld/lib_sql.cc | 4 +- mysql-test/r/mysqld--help.result | 14 +- .../r/session_track_system_variables_basic.result | 2 + .../sys_vars/r/sysvars_server_embedded.result | 28 - .../sys_vars/r/sysvars_server_notembedded.result | 18 +- .../t/session_track_system_variables_basic.test | 4 - sql/lock.cc | 37 + sql/mysqld.cc | 6 +- sql/mysqld.h | 1 - sql/protocol.cc | 46 +- sql/session_tracker.cc | 767 ++++++++++++++++++--- sql/session_tracker.h | 157 ++++- sql/set_var.cc | 11 +- sql/set_var.h | 2 - sql/share/errmsg-utf8.txt | 4 - sql/sp_head.cc | 5 + sql/sql_base.cc | 12 + sql/sql_cache.cc | 29 + sql/sql_class.cc | 4 + sql/sql_class.h | 7 +- sql/sql_db.cc | 6 +- sql/sql_parse.cc | 24 +- sql/sql_plugin.cc | 4 +- sql/sql_prepare.cc | 6 +- sql/sql_string.cc | 6 +- sql/sql_table.cc | 3 +- sql/sys_vars.cc | 58 +- sql/sys_vars.ic | 57 +- sql/transaction.cc | 94 ++- sql/transaction.h | 6 + 32 files changed, 1169 insertions(+), 266 deletions(-) diff --git a/include/mysql.h.pp b/include/mysql.h.pp index 57ce4f78d99..0ef3403626c 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -94,7 +94,8 @@ enum enum_session_state_type SESSION_TRACK_STATE_CHANGE, SESSION_TRACK_GTIDS, SESSION_TRACK_TRANSACTION_CHARACTERISTICS, - SESSION_TRACK_TRANSACTION_STATE + SESSION_TRACK_TRANSACTION_STATE, + SESSION_TRACK_END }; my_bool my_net_init(NET *net, Vio* vio, void *thd, unsigned int my_flags); void my_net_local_init(NET *net); diff --git a/include/mysql_com.h b/include/mysql_com.h index 7433411f29a..16af9849759 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -550,9 +550,6 @@ enum enum_mysql_set_option /* Type of state change information that the server can include in the Ok packet. - Note : 1) session_state_type shouldn't go past 255 (i.e. 1-byte boundary). - 2) Modify the definition of SESSION_TRACK_END when a new member is - added. */ enum enum_session_state_type { @@ -561,15 +558,14 @@ enum enum_session_state_type SESSION_TRACK_STATE_CHANGE, /* track session state changes */ SESSION_TRACK_GTIDS, SESSION_TRACK_TRANSACTION_CHARACTERISTICS, /* Transaction chistics */ - SESSION_TRACK_TRANSACTION_STATE /* Transaction state */ + SESSION_TRACK_TRANSACTION_STATE, /* Transaction state */ + SESSION_TRACK_END /* must be last */ }; #define SESSION_TRACK_BEGIN SESSION_TRACK_SYSTEM_VARIABLES -#define SESSION_TRACK_END SESSION_TRACK_TRANSACTION_STATE - #define IS_SESSION_STATE_TYPE(T) \ - (((int)(T) >= SESSION_TRACK_BEGIN) && ((T) <= SESSION_TRACK_END)) + (((int)(T) >= SESSION_TRACK_BEGIN) && ((T) < SESSION_TRACK_END)) #define net_new_transaction(net) ((net)->pkt_nr=0) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 8c01fc8b9b6..aeee99ebe4d 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -1172,8 +1172,8 @@ bool net_send_ok(THD *thd, uint server_status, uint statement_warn_count, ulonglong affected_rows, ulonglong id, const char *message, - bool unused1 __attribute__((unused)), - bool unused2 __attribute__((unused))) + bool unused1, + bool unused2) { DBUG_ENTER("emb_net_send_ok"); MYSQL_DATA *data; diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 7fc9fedd456..176e8186798 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -904,12 +904,21 @@ The following options may be given as the first argument: --server-id=# Uniquely identifies the server instance in the community of replication partners --session-track-schema - Track changes to the 'default schema'. + Track changes to the default schema. (Defaults to on; use --skip-session-track-schema to disable.) --session-track-state-change - Track changes to the 'session state'. + Track changes to the session state. --session-track-system-variables=name Track changes in registered system variables. + --session-track-transaction-info=name + Track changes to the transaction attributes. OFF to + disable; STATE to track just transaction state (Is there + an active transaction? Does it have any data? etc.); + CHARACTERISTICS to track transaction state and report all + statements needed to start a transaction withthe same + characteristics (isolation level, read only/read + write,snapshot - but not any work done / data modified + within the transaction). --show-slave-auth-info Show user and password in SHOW SLAVE HOSTS on this master. @@ -1395,6 +1404,7 @@ server-id 1 session-track-schema TRUE session-track-state-change FALSE session-track-system-variables autocommit,character_set_client,character_set_connection,character_set_results,time_zone +session-track-transaction-info OFF show-slave-auth-info FALSE silent-startup FALSE skip-grant-tables TRUE diff --git a/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result b/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result index e451a22d322..78ca8ca4ad1 100644 --- a/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result +++ b/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result @@ -17,12 +17,14 @@ VARIABLE_NAME VARIABLE_VALUE SESSION_TRACK_SCHEMA ON SESSION_TRACK_STATE_CHANGE OFF SESSION_TRACK_SYSTEM_VARIABLES autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SESSION_TRACK_TRANSACTION_INFO OFF # via INFORMATION_SCHEMA.SESSION_VARIABLES SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; VARIABLE_NAME VARIABLE_VALUE SESSION_TRACK_SCHEMA ON SESSION_TRACK_STATE_CHANGE OFF SESSION_TRACK_SYSTEM_VARIABLES autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SESSION_TRACK_TRANSACTION_INFO OFF SET @global_saved_tmp = @@global.session_track_system_variables; # Altering global variable's value diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index 13733c38a94..eecebce80ad 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -3327,34 +3327,6 @@ NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED -VARIABLE_NAME SESSION_TRACK_SCHEMA -SESSION_VALUE ON -GLOBAL_VALUE ON -GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE ON -VARIABLE_SCOPE SESSION -VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT Track changes to the 'default schema'. -NUMERIC_MIN_VALUE NULL -NUMERIC_MAX_VALUE NULL -NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST OFF,ON -READ_ONLY NO -COMMAND_LINE_ARGUMENT OPTIONAL -VARIABLE_NAME SESSION_TRACK_STATE_CHANGE -SESSION_VALUE OFF -GLOBAL_VALUE OFF -GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE OFF -VARIABLE_SCOPE SESSION -VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT Track changes to the 'session state'. -NUMERIC_MIN_VALUE NULL -NUMERIC_MAX_VALUE NULL -NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST OFF,ON -READ_ONLY NO -COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME SKIP_EXTERNAL_LOCKING SESSION_VALUE NULL GLOBAL_VALUE ON diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index c0d07280253..3a73ca07402 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -3796,7 +3796,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE ON VARIABLE_SCOPE SESSION VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT Track changes to the 'default schema'. +VARIABLE_COMMENT Track changes to the default schema. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL @@ -3810,7 +3810,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE OFF VARIABLE_SCOPE SESSION VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT Track changes to the 'session state'. +VARIABLE_COMMENT Track changes to the session state. NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL @@ -3831,6 +3831,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME SESSION_TRACK_TRANSACTION_INFO +SESSION_VALUE OFF +GLOBAL_VALUE OFF +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE OFF +VARIABLE_SCOPE SESSION +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Track changes to the transaction attributes. OFF to disable; STATE to track just transaction state (Is there an active transaction? Does it have any data? etc.); CHARACTERISTICS to track transaction state and report all statements needed to start a transaction withthe same characteristics (isolation level, read only/read write,snapshot - but not any work done / data modified within the transaction). +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,STATE,CHARACTERISTICS +READ_ONLY NO +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME SKIP_EXTERNAL_LOCKING SESSION_VALUE NULL GLOBAL_VALUE ON diff --git a/mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test b/mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test index bbb32bb67a3..90e6052947c 100644 --- a/mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test +++ b/mysql-test/suite/sys_vars/t/session_track_system_variables_basic.test @@ -12,14 +12,10 @@ SELECT @@session.session_track_system_variables; --echo --echo # via INFORMATION_SCHEMA.GLOBAL_VARIABLES ---disable_warnings SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; ---enable_warnings --echo # via INFORMATION_SCHEMA.SESSION_VARIABLES ---disable_warnings SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; ---enable_warnings # Save the global value to be used to restore the original value. SET @global_saved_tmp = @@global.session_track_system_variables; diff --git a/sql/lock.cc b/sql/lock.cc index 2e44786d6fe..07286324fc5 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -90,6 +90,7 @@ extern HASH open_cache; static int lock_external(THD *thd, TABLE **table,uint count); static int unlock_external(THD *thd, TABLE **table,uint count); + /* Map the return value of thr_lock to an error from errmsg.txt */ static int thr_lock_errno_to_mysql[]= { 0, ER_LOCK_ABORTED, ER_LOCK_WAIT_TIMEOUT, ER_LOCK_DEADLOCK }; @@ -243,6 +244,39 @@ void reset_lock_data(MYSQL_LOCK *sql_lock, bool unlock) } +/** + Scan array of tables for access types; update transaction tracker + accordingly. + + @param thd The current thread. + @param tables An array of pointers to the tables to lock. + @param count The number of tables to lock. +*/ + +#ifndef EMBEDDED_LIBRARY +static void track_table_access(THD *thd, TABLE **tables, size_t count) +{ + if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) + { + Transaction_state_tracker *tst= (Transaction_state_tracker *) + thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER); + + while (count--) + { + TABLE *t= tables[count]; + + if (t) + tst->add_trx_state(thd, t->reginfo.lock_type, + t->file->has_transactions()); + } + } +} +#else +#define track_table_access(A,B,C) +#endif //EMBEDDED_LIBRARY + + + /** Lock tables. @@ -280,6 +314,9 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags) my_free(sql_lock); sql_lock= 0; } + + track_table_access(thd, tables, count); + DBUG_RETURN(sql_lock); } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index b59c6c7048f..28e91e208e7 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5285,16 +5285,16 @@ static int init_server_components() } plugins_are_initialized= TRUE; /* Don't separate from init function */ +#ifndef EMBEDDED_LIBRARY { - Session_tracker session_track_system_variables_check; - if (session_track_system_variables_check. - server_boot_verify(system_charset_info)) + if (Session_tracker::server_boot_verify(system_charset_info)) { sql_print_error("The variable session_track_system_variables has " "invalid values."); unireg_abort(1); } } +#endif //EMBEDDED_LIBRARY /* we do want to exit if there are any other unknown options */ if (remaining_argc > 1) diff --git a/sql/mysqld.h b/sql/mysqld.h index 68eab815564..846a01a9427 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -135,7 +135,6 @@ extern my_bool lower_case_file_system; extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs; extern my_bool opt_secure_auth; extern const char *current_dbug_option; -extern const char *current_session_track_system_variables; extern char* opt_secure_file_priv; extern char* opt_secure_backup_file_priv; extern size_t opt_secure_backup_file_priv_len; diff --git a/sql/protocol.cc b/sql/protocol.cc index 77dedfbc7d2..9ad9269f3b5 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -198,7 +198,7 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err, @param affected_rows Number of rows changed by statement @param id Auto_increment id for first row (if used) @param message Message to send to the client (Used by mysql_status) - @param is_eof this called inted of old EOF packet + @param is_eof this called instead of old EOF packet @return @retval FALSE The message was successfully sent @@ -217,10 +217,6 @@ net_send_ok(THD *thd, NET *net= &thd->net; StringBuffer store; - /* - To be used to manage the data storage in case session state change - information is present. - */ bool state_changed= false; bool error= FALSE; @@ -269,39 +265,25 @@ net_send_ok(THD *thd, } thd->get_stmt_da()->set_overwrite_status(true); - if ((thd->client_capabilities & CLIENT_SESSION_TRACK)) - { - if (server_status & SERVER_SESSION_STATE_CHANGED) - state_changed= true; - /* the info field */ - if (state_changed || (message && message[0])) - { - store.q_net_store_data((uchar*) message, message ? strlen(message) : 0); - } - - /* session state change information */ - if (unlikely(state_changed)) - { - store.set_charset(thd->variables.collation_database); + state_changed= + (thd->client_capabilities & CLIENT_SESSION_TRACK) && + (server_status & SERVER_SESSION_STATE_CHANGED); - thd->session_tracker.store(thd, &store); - } - } - else if (message && message[0]) + if (state_changed || (message && message[0])) { - /* the info field, if there is a message to store */ - DBUG_ASSERT(strlen(message) <= MYSQL_ERRMSG_SIZE); - store.q_net_store_data((uchar*) message, strlen(message)); + DBUG_ASSERT(safe_strlen(message) <= MYSQL_ERRMSG_SIZE); + store.q_net_store_data((uchar*) safe_str(message), safe_strlen(message)); } - if (store.length() > MAX_PACKET_LENGTH) + if (unlikely(state_changed)) { - net->error= 1; - net->last_errno= ER_NET_OK_PACKET_TOO_LARGE; - my_error(ER_NET_OK_PACKET_TOO_LARGE, MYF(0)); - DBUG_PRINT("info", ("OK packet too large")); - DBUG_RETURN(1); + store.set_charset(thd->variables.collation_database); + + thd->session_tracker.store(thd, &store); } + + DBUG_ASSERT(store.length() <= MAX_PACKET_LENGTH); + error= my_net_write(net, (const unsigned char*)store.ptr(), store.length()); if (!error && (!skip_flush || is_eof)) error= net_flush(net); diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc index cfbb1704318..bd641ab8d03 100644 --- a/sql/session_tracker.cc +++ b/sql/session_tracker.cc @@ -15,6 +15,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#ifndef EMBEDDED_LIBRARY #include "sql_plugin.h" #include "session_tracker.h" @@ -26,14 +27,20 @@ #include "sql_plugin.h" #include "set_var.h" +void State_tracker::mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name) +{ + m_changed= true; + thd->lex->safe_to_cache_query= 0; + thd->server_status|= SERVER_SESSION_STATE_CHANGED; +} + + class Not_implemented_tracker : public State_tracker { public: bool enable(THD *thd) { return false; } - bool check(THD *, set_var *) - { return false; } - bool update(THD *) + bool update(THD *, set_var *) { return false; } bool store(THD *, String *) { return false; } @@ -42,7 +49,6 @@ public: }; -static my_bool name_array_filler(void *ptr, void *data_ptr); /** Session_sysvars_tracker @@ -123,7 +129,7 @@ private: } } - uchar* search(sysvar_node_st *node, const sys_var *svar) + uchar* insert_or_search(sysvar_node_st *node, const sys_var *svar) { uchar *res; res= search(svar); @@ -146,7 +152,7 @@ private: void reset(); void copy(vars_list* from, THD *thd); bool parse_var_list(THD *thd, LEX_STRING var_list, bool throw_error, - const CHARSET_INFO *char_set, bool session_created); + CHARSET_INFO *char_set, bool session_created); bool construct_var_list(char *buf, size_t buf_len); }; /** @@ -184,15 +190,13 @@ public: for session_track_system_variables during the server startup. */ - static bool server_init_check(THD *thd, const CHARSET_INFO *char_set, + static bool server_init_check(THD *thd, CHARSET_INFO *char_set, LEX_STRING var_list) { - vars_list dummy; - bool result; - result= dummy.parse_var_list(thd, var_list, false, char_set, false); - return result; + return check_var_list(thd, var_list, false, char_set, false); } - static bool server_init_process(THD *thd, const CHARSET_INFO *char_set, + + static bool server_init_process(THD *thd, CHARSET_INFO *char_set, LEX_STRING var_list) { vars_list dummy; @@ -205,16 +209,17 @@ public: void reset(); bool enable(THD *thd); - bool check(THD *thd, set_var *var); - bool check_str(THD *thd, LEX_STRING val); - bool update(THD *thd); + bool check_str(THD *thd, LEX_STRING *val); + bool update(THD *thd, set_var *var); bool store(THD *thd, String *buf); void mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name); /* callback */ static uchar *sysvars_get_key(const char *entry, size_t *length, my_bool not_used __attribute__((unused))); - friend my_bool name_array_filler(void *ptr, void *data_ptr); + static my_bool name_array_filler(void *ptr, void *data_ptr); + static bool check_var_list(THD *thd, LEX_STRING var_list, bool throw_error, + CHARSET_INFO *char_set, bool session_created); }; @@ -240,12 +245,9 @@ public: } bool enable(THD *thd) - { return update(thd); } - bool check(THD *thd, set_var *var) - { return false; } - bool update(THD *thd); + { return update(thd, NULL); } + bool update(THD *thd, set_var *var); bool store(THD *thd, String *buf); - void mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name); }; /* @@ -271,15 +273,10 @@ private: public: Session_state_change_tracker(); bool enable(THD *thd) - { return update(thd); }; - bool check(THD *thd, set_var *var) - { return false; } - bool update(THD *thd); + { return update(thd, NULL); }; + bool update(THD *thd, set_var *var); bool store(THD *thd, String *buf); - void mark_as_changed(THD *thd, LEX_CSTRING *tracked_item_name); bool is_state_changed(THD*); - void ensure_enabled(THD *thd) - {} }; @@ -381,7 +378,7 @@ bool Session_sysvars_tracker::vars_list::insert(sysvar_node_st *node, bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd, LEX_STRING var_list, bool throw_error, - const CHARSET_INFO *char_set, + CHARSET_INFO *char_set, bool session_created) { const char separator= ','; @@ -463,6 +460,80 @@ error: return true; } + +bool Session_sysvars_tracker::check_var_list(THD *thd, + LEX_STRING var_list, + bool throw_error, + CHARSET_INFO *char_set, + bool session_created) +{ + const char separator= ','; + char *token, *lasts= NULL; + size_t rest= var_list.length; + + if (!var_list.str || var_list.length == 0 || + !strcmp(var_list.str,(const char *)"*")) + { + return false; + } + + token= var_list.str; + + /* + If Lock to the plugin mutex is not acquired here itself, it results + in having to acquire it multiple times in find_sys_var_ex for each + token value. Hence the mutex is handled here to avoid a performance + overhead. + */ + if (!thd || session_created) + mysql_mutex_lock(&LOCK_plugin); + for (;;) + { + sys_var *svar; + LEX_STRING var; + + lasts= (char *) memchr(token, separator, rest); + + var.str= token; + if (lasts) + { + var.length= (lasts - token); + rest-= var.length + 1; + } + else + var.length= rest; + + /* Remove leading/trailing whitespace. */ + trim_whitespace(char_set, &var); + + if (!(svar= find_sys_var_ex(thd, var.str, var.length, throw_error, true))) + { + if (throw_error && session_created && thd) + { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_VALUE_FOR_VAR, + "%.*s is not a valid system variable and will" + "be ignored.", (int)var.length, token); + } + else + { + if (!thd || session_created) + mysql_mutex_unlock(&LOCK_plugin); + return true; + } + } + + if (lasts) + token= lasts + 1; + else + break; + } + if (!thd || session_created) + mysql_mutex_unlock(&LOCK_plugin); + + return false; +} + struct name_array_filler_data { LEX_CSTRING **names; @@ -471,7 +542,8 @@ struct name_array_filler_data }; /** Collects variable references into array */ -static my_bool name_array_filler(void *ptr, void *data_ptr) +my_bool Session_sysvars_tracker::name_array_filler(void *ptr, + void *data_ptr) { Session_sysvars_tracker::sysvar_node_st *node= (Session_sysvars_tracker::sysvar_node_st *)ptr; @@ -578,18 +650,11 @@ bool Session_sysvars_tracker::vars_list::construct_var_list(char *buf, bool Session_sysvars_tracker::enable(THD *thd) { - sys_var *svar; - mysql_mutex_lock(&LOCK_plugin); - svar= find_sys_var_ex(thd, SESSION_TRACK_SYSTEM_VARIABLES_NAME.str, - SESSION_TRACK_SYSTEM_VARIABLES_NAME.length, - false, true); - DBUG_ASSERT(svar); - - set_var tmp(thd, SHOW_OPT_GLOBAL, svar, &null_lex_str, NULL); - svar->session_save_default(thd, &tmp); - - if (tool_list->parse_var_list(thd, tmp.save_result.string_value, + LEX_STRING tmp; + tmp.str= global_system_variables.session_track_system_variables; + tmp.length= safe_strlen(tmp.str); + if (tool_list->parse_var_list(thd, tmp, true, thd->charset(), false) == true) { mysql_mutex_unlock(&LOCK_plugin); @@ -617,16 +682,10 @@ bool Session_sysvars_tracker::enable(THD *thd) @retval false Success */ -inline bool Session_sysvars_tracker::check(THD *thd, set_var *var) -{ - return check_str(thd, var->save_result.string_value); -} - -inline bool Session_sysvars_tracker::check_str(THD *thd, LEX_STRING val) +inline bool Session_sysvars_tracker::check_str(THD *thd, LEX_STRING *val) { - tool_list->reset(); - return tool_list->parse_var_list(thd, val, true, - thd->charset(), true); + return Session_sysvars_tracker::check_var_list(thd, *val, true, + thd->charset(), true); } @@ -645,8 +704,16 @@ inline bool Session_sysvars_tracker::check_str(THD *thd, LEX_STRING val) @retval false Success */ -bool Session_sysvars_tracker::update(THD *thd) +bool Session_sysvars_tracker::update(THD *thd, set_var *var) { + /* + We are doing via tool list because there possible errors with memory + in this case value will be unchanged. + */ + tool_list->reset(); + if (tool_list->parse_var_list(thd, var->save_result.string_value, true, + thd->charset(), true)) + return true; orig_list->copy(tool_list, thd); return false; } @@ -670,7 +737,7 @@ bool Session_sysvars_tracker::store(THD *thd, String *buf) SHOW_VAR show; const char *value; sysvar_node_st *node; - const CHARSET_INFO *charset; + CHARSET_INFO *charset; size_t val_length, length; int idx= 0; @@ -701,10 +768,15 @@ bool Session_sysvars_tracker::store(THD *thd, String *buf) val_length; compile_time_assert(SESSION_TRACK_SYSTEM_VARIABLES < 251); - buf->prep_alloc(1 + net_length_size(length) + length, EXTRA_ALLOC); + if (unlikely((1 + net_length_size(length) + length + buf->length() >= + MAX_PACKET_LENGTH) || + buf->prep_alloc(1 + net_length_size(length) + length, + EXTRA_ALLOC))) + return true; + /* Session state type (SESSION_TRACK_SYSTEM_VARIABLES) */ - buf->q_net_store_length((ulonglong)SESSION_TRACK_SYSTEM_VARIABLES); + buf->q_append((char)SESSION_TRACK_SYSTEM_VARIABLES); /* Length of the overall entity. */ buf->q_net_store_length((ulonglong)length); @@ -739,13 +811,10 @@ void Session_sysvars_tracker::mark_as_changed(THD *thd, Check if the specified system variable is being tracked, if so mark it as changed and also set the class's m_changed flag. */ - if ((node= (sysvar_node_st *) (orig_list->search(node, svar)))) + if ((node= (sysvar_node_st *) (orig_list->insert_or_search(node, svar)))) { node->m_changed= true; - m_changed= true; - /* do not cache the statement when there is change in session state */ - thd->lex->safe_to_cache_query= 0; - thd->server_status|= SERVER_SESSION_STATE_CHANGED; + State_tracker::mark_as_changed(thd, var); } } @@ -795,8 +864,6 @@ static Session_sysvars_tracker* sysvar_tracker(THD *thd) bool sysvartrack_validate_value(THD *thd, const char *str, size_t len) { LEX_STRING tmp= {(char *)str, len}; - if (thd && sysvar_tracker(thd)->is_enabled()) - return sysvar_tracker(thd)->check_str(thd, tmp); return Session_sysvars_tracker::server_init_check(thd, system_charset_info, tmp); } @@ -807,9 +874,9 @@ bool sysvartrack_reprint_value(THD *thd, char *str, size_t len) system_charset_info, tmp); } -bool sysvartrack_update(THD *thd) +bool sysvartrack_update(THD *thd, set_var *var) { - return sysvar_tracker(thd)->update(thd); + return sysvar_tracker(thd)->update(thd, var); } size_t sysvartrack_value_len(THD *thd) { @@ -831,7 +898,7 @@ bool sysvartrack_value_construct(THD *thd, char *val, size_t len) false (always) */ -bool Current_schema_tracker::update(THD *thd) +bool Current_schema_tracker::update(THD *thd, set_var *) { m_enabled= thd->variables.session_track_schema; return false; @@ -862,12 +929,13 @@ bool Current_schema_tracker::store(THD *thd, String *buf) compile_time_assert(SESSION_TRACK_SCHEMA < 251); compile_time_assert(NAME_LEN < 251); - DBUG_ASSERT(net_length_size(length) < 251); - if (buf->prep_alloc(1 + 1 + length, EXTRA_ALLOC)) + DBUG_ASSERT(length < 251); + if (unlikely((1 + 1 + length + buf->length() >= MAX_PACKET_LENGTH) || + buf->prep_alloc(1 + 1 + length, EXTRA_ALLOC))) return true; /* Session state type (SESSION_TRACK_SCHEMA) */ - buf->q_net_store_length((ulonglong)SESSION_TRACK_SCHEMA); + buf->q_append((char)SESSION_TRACK_SCHEMA); /* Length of the overall entity. */ buf->q_net_store_length(length); @@ -882,26 +950,522 @@ bool Current_schema_tracker::store(THD *thd, String *buf) /** - Mark the tracker as changed. + Reset the m_changed flag for next statement. + + @return void */ -void Current_schema_tracker::mark_as_changed(THD *thd, LEX_CSTRING *) +void Current_schema_tracker::reset() { - m_changed= true; - thd->lex->safe_to_cache_query= 0; - thd->server_status|= SERVER_SESSION_STATE_CHANGED; + m_changed= false; +} + + +/////////////////////////////////////////////////////////////////////////////// + + +Transaction_state_tracker::Transaction_state_tracker() +{ + m_enabled = false; + tx_changed = TX_CHG_NONE; + tx_curr_state = + tx_reported_state= TX_EMPTY; + tx_read_flags = TX_READ_INHERIT; + tx_isol_level = TX_ISOL_INHERIT; +} + +/** + Enable/disable the tracker based on @@session_track_transaction_info. + + @param thd [IN] The thd handle. + + @retval true if updating the tracking level failed + @retval false otherwise +*/ + +bool Transaction_state_tracker::update(THD *thd, set_var *) +{ + if (thd->variables.session_track_transaction_info != TX_TRACK_NONE) + { + /* + If we only just turned reporting on (rather than changing between + state and characteristics reporting), start from a defined state. + */ + if (!m_enabled) + { + tx_curr_state = + tx_reported_state = TX_EMPTY; + tx_changed |= TX_CHG_STATE; + m_enabled= true; + } + if (thd->variables.session_track_transaction_info == TX_TRACK_CHISTICS) + tx_changed |= TX_CHG_CHISTICS; + mark_as_changed(thd, NULL); + } + else + m_enabled= false; + + return false; +} + + +/** + Store the transaction state (and, optionally, characteristics) + as length-encoded string in the specified buffer. Once the data + is stored, we reset the flags related to state-change (see reset()). + + + @param thd [IN] The thd handle. + @paran buf [INOUT] Buffer to store the information to. + + @retval false Success + @retval true Error +*/ + +static LEX_CSTRING isol[]= { + { STRING_WITH_LEN("READ UNCOMMITTED") }, + { STRING_WITH_LEN("READ COMMITTED") }, + { STRING_WITH_LEN("REPEATABLE READ") }, + { STRING_WITH_LEN("SERIALIZABLE") } +}; + +bool Transaction_state_tracker::store(THD *thd, String *buf) +{ + /* STATE */ + if (tx_changed & TX_CHG_STATE) + { + uchar *to; + if (unlikely((11 + buf->length() >= MAX_PACKET_LENGTH) || + ((to= (uchar *) buf->prep_append(11, EXTRA_ALLOC)) == NULL))) + return true; + + *(to++)= (char)SESSION_TRACK_TRANSACTION_STATE; + + to= net_store_length((uchar *) to, (ulonglong) 9); + to= net_store_length((uchar *) to, (ulonglong) 8); + + *(to++)= (tx_curr_state & TX_EXPLICIT) ? 'T' : + ((tx_curr_state & TX_IMPLICIT) ? 'I' : '_'); + *(to++)= (tx_curr_state & TX_READ_UNSAFE) ? 'r' : '_'; + *(to++)= ((tx_curr_state & TX_READ_TRX) || + (tx_curr_state & TX_WITH_SNAPSHOT)) ? 'R' : '_'; + *(to++)= (tx_curr_state & TX_WRITE_UNSAFE) ? 'w' : '_'; + *(to++)= (tx_curr_state & TX_WRITE_TRX) ? 'W' : '_'; + *(to++)= (tx_curr_state & TX_STMT_UNSAFE) ? 's' : '_'; + *(to++)= (tx_curr_state & TX_RESULT_SET) ? 'S' : '_'; + *(to++)= (tx_curr_state & TX_LOCKED_TABLES) ? 'L' : '_'; + } + + /* CHARACTERISTICS -- How to restart the transaction */ + + if ((thd->variables.session_track_transaction_info == TX_TRACK_CHISTICS) && + (tx_changed & TX_CHG_CHISTICS)) + { + bool is_xa= (thd->transaction.xid_state.xa_state != XA_NOTR); + size_t start; + + /* 2 length by 1 byte and code */ + if (unlikely((1 + 1 + 1 + 110 + buf->length() >= MAX_PACKET_LENGTH) || + buf->prep_alloc(1 + 1 + 1, EXTRA_ALLOC))) + return true; + + compile_time_assert(SESSION_TRACK_TRANSACTION_CHARACTERISTICS < 251); + /* Session state type (SESSION_TRACK_TRANSACTION_CHARACTERISTICS) */ + buf->q_append((char)SESSION_TRACK_TRANSACTION_CHARACTERISTICS); + + /* placeholders for lengths. will be filled in at the end */ + buf->q_append('\0'); + buf->q_append('\0'); + + start= buf->length(); + + { + /* + We have four basic replay scenarios: + + a) SET TRANSACTION was used, but before an actual transaction + was started, the load balancer moves the connection elsewhere. + In that case, the same one-shots should be set up in the + target session. (read-only/read-write; isolation-level) + + b) The initial transaction has begun; the relevant characteristics + are the session defaults, possibly overridden by previous + SET TRANSACTION statements, possibly overridden or extended + by options passed to the START TRANSACTION statement. + If the load balancer wishes to move this transaction, + it needs to be replayed with the correct characteristics. + (read-only/read-write from SET or START; + isolation-level from SET only, snapshot from START only) + + c) A subsequent transaction started with START TRANSACTION + (which is legal syntax in lieu of COMMIT AND CHAIN in MySQL) + may add/modify the current one-shots: + + - It may set up a read-only/read-write one-shot. + This one-shot will override the value used in the previous + transaction (whether that came from the default or a one-shot), + and, like all one-shots currently do, it will carry over into + any subsequent transactions that don't explicitly override them + in turn. This behavior is not guaranteed in the docs and may + change in the future, but the tracker item should correctly + reflect whatever behavior a given version of mysqld implements. + + - It may also set up a WITH CONSISTENT SNAPSHOT one-shot. + This one-shot does not currently carry over into subsequent + transactions (meaning that with "traditional syntax", WITH + CONSISTENT SNAPSHOT can only be requested for the first part + of a transaction chain). Again, the tracker item should reflect + mysqld behavior. + + d) A subsequent transaction started using COMMIT AND CHAIN + (or, for that matter, BEGIN WORK, which is currently + legal and equivalent syntax in MySQL, or START TRANSACTION + sans options) will re-use any one-shots set up so far + (with SET before the first transaction started, and with + all subsequent STARTs), except for WITH CONSISTANT SNAPSHOT, + which will never be chained and only applies when explicitly + given. + + It bears noting that if we switch sessions in a follow-up + transaction, SET TRANSACTION would be illegal in the old + session (as a transaction is active), whereas in the target + session which is being prepared, it should be legal, as no + transaction (chain) should have started yet. + + Therefore, we are free to generate SET TRANSACTION as a replay + statement even for a transaction that isn't the first in an + ongoing chain. Consider + + SET TRANSACTION ISOLATION LEVEL READ UNCOMMITED; + START TRANSACTION READ ONLY, WITH CONSISTENT SNAPSHOT; + # work + COMMIT AND CHAIN; + + If we switch away at this point, the replay in the new session + needs to be + + SET TRANSACTION ISOLATION LEVEL READ UNCOMMITED; + START TRANSACTION READ ONLY; + + When a transaction ends (COMMIT/ROLLBACK sans CHAIN), all + per-transaction characteristics are reset to the session's + defaults. + + This also holds for a transaction ended implicitly! (transaction.cc) + Once again, the aim is to have the tracker item reflect on a + given mysqld's actual behavior. + */ + + /* + "ISOLATION LEVEL" + Only legal in SET TRANSACTION, so will always be replayed as such. + */ + if (tx_isol_level != TX_ISOL_INHERIT) + { + /* + Unfortunately, we can't re-use tx_isolation_names / + tx_isolation_typelib as it hyphenates its items. + */ + buf->append(STRING_WITH_LEN("SET TRANSACTION ISOLATION LEVEL ")); + buf->append(isol[tx_isol_level - 1].str, isol[tx_isol_level - 1].length); + buf->append(STRING_WITH_LEN("; ")); + } + + /* + Start transaction will usually result in TX_EXPLICIT (transaction + started, but no data attached yet), except when WITH CONSISTENT + SNAPSHOT, in which case we may have data pending. + If it's an XA transaction, we don't go through here so we can + first print the trx access mode ("SET TRANSACTION READ ...") + separately before adding XA START (whereas with START TRANSACTION, + we can merge the access mode into the same statement). + */ + if ((tx_curr_state & TX_EXPLICIT) && !is_xa) + { + buf->append(STRING_WITH_LEN("START TRANSACTION")); + + /* + "WITH CONSISTENT SNAPSHOT" + Defaults to no, can only be enabled. + Only appears in START TRANSACTION. + */ + if (tx_curr_state & TX_WITH_SNAPSHOT) + { + buf->append(STRING_WITH_LEN(" WITH CONSISTENT SNAPSHOT")); + if (tx_read_flags != TX_READ_INHERIT) + buf->append(STRING_WITH_LEN(",")); + } + + /* + "READ WRITE / READ ONLY" can be set globally, per-session, + or just for one transaction. + + The latter case can take the form of + START TRANSACTION READ (WRITE|ONLY), or of + SET TRANSACTION READ (ONLY|WRITE). + (Both set thd->read_only for the upcoming transaction; + it will ultimately be re-set to the session default.) + + As the regular session-variable tracker does not monitor the one-shot, + we'll have to do it here. + + If READ is flagged as set explicitly (rather than just inherited + from the session's default), we'll get the actual bool from the THD. + */ + if (tx_read_flags != TX_READ_INHERIT) + { + if (tx_read_flags == TX_READ_ONLY) + buf->append(STRING_WITH_LEN(" READ ONLY")); + else + buf->append(STRING_WITH_LEN(" READ WRITE")); + } + buf->append(STRING_WITH_LEN("; ")); + } + else if (tx_read_flags != TX_READ_INHERIT) + { + /* + "READ ONLY" / "READ WRITE" + We could transform this to SET TRANSACTION even when it occurs + in START TRANSACTION, but for now, we'll resysynthesize the original + command as closely as possible. + */ + buf->append(STRING_WITH_LEN("SET TRANSACTION ")); + if (tx_read_flags == TX_READ_ONLY) + buf->append(STRING_WITH_LEN("READ ONLY; ")); + else + buf->append(STRING_WITH_LEN("READ WRITE; ")); + } + + if ((tx_curr_state & TX_EXPLICIT) && is_xa) + { + XID *xid= &thd->transaction.xid_state.xid; + long glen, blen; + + buf->append(STRING_WITH_LEN("XA START")); + + if ((glen= xid->gtrid_length) > 0) + { + buf->append(STRING_WITH_LEN(" '")); + buf->append(xid->data, glen); + + if ((blen= xid->bqual_length) > 0) + { + buf->append(STRING_WITH_LEN("','")); + buf->append(xid->data + glen, blen); + } + buf->append(STRING_WITH_LEN("'")); + + if (xid->formatID != 1) + { + buf->append(STRING_WITH_LEN(",")); + buf->append_ulonglong(xid->formatID); + } + } + + buf->append(STRING_WITH_LEN("; ")); + } + + // discard trailing space + if (buf->length() > start) + buf->length(buf->length() - 1); + } + + { + ulonglong length= buf->length() - start; + uchar *place= (uchar *)(buf->ptr() + (start - 2)); + DBUG_ASSERT(length < 249); // in fact < 110 + DBUG_ASSERT(start >= 3); + + DBUG_ASSERT((place - 1)[0] == SESSION_TRACK_TRANSACTION_CHARACTERISTICS); + /* Length of the overall entity. */ + place[0]= length + 1; + /* Transaction characteristics (length-encoded string). */ + place[1]= length; + } + } + + reset(); + + return false; } /** Reset the m_changed flag for next statement. +*/ - @return void +void Transaction_state_tracker::reset() +{ + m_changed= false; + tx_reported_state= tx_curr_state; + tx_changed= TX_CHG_NONE; +} + + +/** + Helper function: turn table info into table access flag. + Accepts table lock type and engine type flag (transactional/ + non-transactional), and returns the corresponding access flag + out of TX_READ_TRX, TX_READ_UNSAFE, TX_WRITE_TRX, TX_WRITE_UNSAFE. + + @param thd [IN] The thd handle + @param set [IN] The table's access/lock type + @param set [IN] Whether the table's engine is transactional + + @return The table access flag */ -void Current_schema_tracker::reset() +enum_tx_state Transaction_state_tracker::calc_trx_state(THD *thd, + thr_lock_type l, + bool has_trx) { - m_changed= false; + enum_tx_state s; + bool read= (l <= TL_READ_NO_INSERT); + + if (read) + s= has_trx ? TX_READ_TRX : TX_READ_UNSAFE; + else + s= has_trx ? TX_WRITE_TRX : TX_WRITE_UNSAFE; + + return s; +} + + +/** + Register the end of an (implicit or explicit) transaction. + + @param thd [IN] The thd handle +*/ +void Transaction_state_tracker::end_trx(THD *thd) +{ + DBUG_ASSERT(thd->variables.session_track_transaction_info > TX_TRACK_NONE); + + if ((!m_enabled) || (thd->state_flags & Open_tables_state::BACKUPS_AVAIL)) + return; + + if (tx_curr_state != TX_EMPTY) + { + if (tx_curr_state & TX_EXPLICIT) + tx_changed |= TX_CHG_CHISTICS; + tx_curr_state &= TX_LOCKED_TABLES; + } + update_change_flags(thd); +} + + +/** + Clear flags pertaining to the current statement or transaction. + May be called repeatedly within the same execution cycle. + + @param thd [IN] The thd handle. + @param set [IN] The flags to clear +*/ + +void Transaction_state_tracker::clear_trx_state(THD *thd, uint clear) +{ + if ((!m_enabled) || (thd->state_flags & Open_tables_state::BACKUPS_AVAIL)) + return; + + tx_curr_state &= ~clear; + update_change_flags(thd); +} + + +/** + Add flags pertaining to the current statement or transaction. + May be called repeatedly within the same execution cycle, + e.g. to add access info for more tables. + + @param thd [IN] The thd handle. + @param set [IN] The flags to add +*/ + +void Transaction_state_tracker::add_trx_state(THD *thd, uint add) +{ + if ((!m_enabled) || (thd->state_flags & Open_tables_state::BACKUPS_AVAIL)) + return; + + if (add == TX_EXPLICIT) + { + /* Always send characteristic item (if tracked), always replace state. */ + tx_changed |= TX_CHG_CHISTICS; + tx_curr_state = TX_EXPLICIT; + } + + /* + If we're not in an implicit or explicit transaction, but + autocommit==0 and tables are accessed, we flag "implicit transaction." + */ + else if (!(tx_curr_state & (TX_EXPLICIT|TX_IMPLICIT)) && + (thd->variables.option_bits & OPTION_NOT_AUTOCOMMIT) && + (add & + (TX_READ_TRX | TX_READ_UNSAFE | TX_WRITE_TRX | TX_WRITE_UNSAFE))) + tx_curr_state |= TX_IMPLICIT; + + /* + Only flag state when in transaction or LOCK TABLES is added. + */ + if ((tx_curr_state & (TX_EXPLICIT | TX_IMPLICIT)) || + (add & TX_LOCKED_TABLES)) + tx_curr_state |= add; + + update_change_flags(thd); +} + + +/** + Add "unsafe statement" flag if applicable. + + @param thd [IN] The thd handle. + @param set [IN] The flags to add +*/ + +void Transaction_state_tracker::add_trx_state_from_thd(THD *thd) +{ + if (m_enabled) + { + if (thd->lex->is_stmt_unsafe()) + add_trx_state(thd, TX_STMT_UNSAFE); + } +} + + +/** + Set read flags (read only/read write) pertaining to the next + transaction. + + @param thd [IN] The thd handle. + @param set [IN] The flags to set +*/ + +void Transaction_state_tracker::set_read_flags(THD *thd, + enum enum_tx_read_flags flags) +{ + if (m_enabled && (tx_read_flags != flags)) + { + tx_read_flags = flags; + tx_changed |= TX_CHG_CHISTICS; + mark_as_changed(thd, NULL); + } +} + + +/** + Set isolation level pertaining to the next transaction. + + @param thd [IN] The thd handle. + @param set [IN] The isolation level to set +*/ + +void Transaction_state_tracker::set_isol_level(THD *thd, + enum enum_tx_isol_level level) +{ + if (m_enabled && (tx_isol_level != level)) + { + tx_isol_level = level; + tx_changed |= TX_CHG_CHISTICS; + mark_as_changed(thd, NULL); + } } @@ -920,7 +1484,7 @@ Session_state_change_tracker::Session_state_change_tracker() **/ -bool Session_state_change_tracker::update(THD *thd) +bool Session_state_change_tracker::update(THD *thd, set_var *) { m_enabled= thd->variables.session_track_state_change; return false; @@ -938,12 +1502,13 @@ bool Session_state_change_tracker::update(THD *thd) bool Session_state_change_tracker::store(THD *thd, String *buf) { - if (buf->prep_alloc(1 + 1 + 1, EXTRA_ALLOC)) + if (unlikely((1 + 1 + 1 + buf->length() >= MAX_PACKET_LENGTH) || + buf->prep_alloc(1 + 1 + 1, EXTRA_ALLOC))) return true; compile_time_assert(SESSION_TRACK_STATE_CHANGE < 251); /* Session state type (SESSION_TRACK_STATE_CHANGE) */ - buf->q_net_store_length((ulonglong)SESSION_TRACK_STATE_CHANGE); + buf->q_append((char)SESSION_TRACK_STATE_CHANGE); /* Length of the overall entity (1 byte) */ buf->q_append('\1'); @@ -956,17 +1521,6 @@ bool Session_state_change_tracker::store(THD *thd, String *buf) return false; } -/** - Mark the tracker as changed and associated session - attributes accordingly. -*/ - -void Session_state_change_tracker::mark_as_changed(THD *thd, LEX_CSTRING *) -{ - m_changed= true; - thd->lex->safe_to_cache_query= 0; - thd->server_status|= SERVER_SESSION_STATE_CHANGED; -} /** Reset the m_changed flag for next statement. @@ -977,6 +1531,7 @@ void Session_state_change_tracker::reset() m_changed= false; } + /** Find if there is a session state change. */ @@ -994,7 +1549,12 @@ bool Session_state_change_tracker::is_state_changed(THD *) Session_tracker::Session_tracker() { - for (int i= 0; i <= SESSION_TRACKER_END; i ++) + /* track data ID fit into one byte in net coding */ + compile_time_assert(SESSION_TRACK_END < 251); + /* one tracker could serv several tracking data */ + compile_time_assert((uint)SESSION_TRACK_END >= (uint)SESSION_TRACKER_END); + + for (int i= 0; i < SESSION_TRACKER_END; i++) m_trackers[i]= NULL; } @@ -1024,9 +1584,9 @@ void Session_tracker::enable(THD *thd) m_trackers[SESSION_GTIDS_TRACKER]= new (std::nothrow) Not_implemented_tracker; m_trackers[TRANSACTION_INFO_TRACKER]= - new (std::nothrow) Not_implemented_tracker; + new (std::nothrow) Transaction_state_tracker; - for (int i= 0; i <= SESSION_TRACKER_END; i ++) + for (int i= 0; i < SESSION_TRACKER_END; i++) m_trackers[i]->enable(thd); } @@ -1039,20 +1599,14 @@ void Session_tracker::enable(THD *thd) @retval true Failure */ -bool Session_tracker::server_boot_verify(const CHARSET_INFO *char_set) +bool Session_tracker::server_boot_verify(CHARSET_INFO *char_set) { - Session_sysvars_tracker *server_tracker; bool result; - sys_var *svar= find_sys_var_ex(NULL, SESSION_TRACK_SYSTEM_VARIABLES_NAME.str, - SESSION_TRACK_SYSTEM_VARIABLES_NAME.length, - false, true); - DBUG_ASSERT(svar); - set_var tmp(NULL, SHOW_OPT_GLOBAL, svar, &null_lex_str, NULL); - svar->session_save_default(NULL, &tmp); - server_tracker= new (std::nothrow) Session_sysvars_tracker(); - result= server_tracker->server_init_check(NULL, char_set, - tmp.save_result.string_value); - delete server_tracker; + LEX_STRING tmp; + tmp.str= global_system_variables.session_track_system_variables; + tmp.length= safe_strlen(tmp.str); + result= + Session_sysvars_tracker::server_init_check(NULL, char_set, tmp); return result; } @@ -1067,7 +1621,6 @@ bool Session_tracker::server_boot_verify(const CHARSET_INFO *char_set) void Session_tracker::store(THD *thd, String *buf) { - /* Temporary buffer to store all the changes. */ size_t start; /* @@ -1079,7 +1632,7 @@ void Session_tracker::store(THD *thd, String *buf) start= buf->length(); /* Get total length. */ - for (int i= 0; i <= SESSION_TRACKER_END; i ++) + for (int i= 0; i < SESSION_TRACKER_END; i++) { if (m_trackers[i]->is_changed() && m_trackers[i]->store(thd, buf)) @@ -1105,3 +1658,5 @@ void Session_tracker::store(THD *thd, String *buf) net_store_length(data - 1, length); } + +#endif //EMBEDDED_LIBRARY diff --git a/sql/session_tracker.h b/sql/session_tracker.h index 7025c34967d..431726f03ed 100644 --- a/sql/session_tracker.h +++ b/sql/session_tracker.h @@ -20,6 +20,7 @@ #include "m_string.h" #include "thr_lock.h" +#ifndef EMBEDDED_LIBRARY /* forward declarations */ class THD; class set_var; @@ -32,12 +33,10 @@ enum enum_session_tracker CURRENT_SCHEMA_TRACKER, /* Current schema */ SESSION_STATE_CHANGE_TRACKER, SESSION_GTIDS_TRACKER, /* Tracks GTIDs */ - TRANSACTION_INFO_TRACKER /* Transaction state */ + TRANSACTION_INFO_TRACKER, /* Transaction state */ + SESSION_TRACKER_END /* must be the last */ }; -#define SESSION_TRACKER_END TRANSACTION_INFO_TRACKER - - /** State_tracker @@ -54,8 +53,7 @@ enum enum_session_tracker the respective system variable either through SET command or via command line option. As required in system variable handling, this interface also includes two functions to help in the verification of the supplied value - (ON_CHECK) and the updation (ON_UPDATE) of the tracker system variable, - namely - check() and update(). + (ON_UPDATE) of the tracker system variable, namely - update(). */ class State_tracker @@ -91,22 +89,19 @@ public: /** Called in the constructor of THD*/ virtual bool enable(THD *thd)= 0; - /** To be invoked when the tracker's system variable is checked (ON_CHECK). */ - virtual bool check(THD *thd, set_var *var)= 0; - /** To be invoked when the tracker's system variable is updated (ON_UPDATE).*/ - virtual bool update(THD *thd)= 0; + virtual bool update(THD *thd, set_var *var)= 0; /** Store changed data into the given buffer. */ virtual bool store(THD *thd, String *buf)= 0; /** Mark the entity as changed. */ - virtual void mark_as_changed(THD *thd, LEX_CSTRING *name)= 0; + virtual void mark_as_changed(THD *thd, LEX_CSTRING *name); }; bool sysvartrack_validate_value(THD *thd, const char *str, size_t len); bool sysvartrack_reprint_value(THD *thd, char *str, size_t len); -bool sysvartrack_update(THD *thd); +bool sysvartrack_update(THD *thd, set_var *var); size_t sysvartrack_value_len(THD *thd); bool sysvartrack_value_construct(THD *thd, char *val, size_t len); @@ -122,7 +117,7 @@ bool sysvartrack_value_construct(THD *thd, char *val, size_t len); class Session_tracker { private: - State_tracker *m_trackers[SESSION_TRACKER_END + 1]; + State_tracker *m_trackers[SESSION_TRACKER_END]; /* The following two functions are private to disable copying. */ Session_tracker(Session_tracker const &other) @@ -146,7 +141,7 @@ public: /* trick to make happy memory accounting system */ void deinit() { - for (int i= 0; i <= SESSION_TRACKER_END; i ++) + for (int i= 0; i < SESSION_TRACKER_END; i++) { if (m_trackers[i]) delete m_trackers[i]; @@ -155,7 +150,7 @@ public: } void enable(THD *thd); - bool server_boot_verify(const CHARSET_INFO *char_set); + static bool server_boot_verify(CHARSET_INFO *char_set); /** Returns the pointer to the tracker object for the specified tracker. */ inline State_tracker *get_tracker(enum_session_tracker tracker) const @@ -174,4 +169,136 @@ public: void store(THD *thd, String *main_buf); }; + +/* + Transaction_state_tracker +*/ + +/** + Transaction state (no transaction, transaction active, work attached, etc.) +*/ +enum enum_tx_state { + TX_EMPTY = 0, ///< "none of the below" + TX_EXPLICIT = 1, ///< an explicit transaction is active + TX_IMPLICIT = 2, ///< an implicit transaction is active + TX_READ_TRX = 4, ///< transactional reads were done + TX_READ_UNSAFE = 8, ///< non-transaction reads were done + TX_WRITE_TRX = 16, ///< transactional writes were done + TX_WRITE_UNSAFE = 32, ///< non-transactional writes were done + TX_STMT_UNSAFE = 64, ///< "unsafe" (non-deterministic like UUID()) stmts + TX_RESULT_SET = 128, ///< result set was sent + TX_WITH_SNAPSHOT= 256, ///< WITH CONSISTENT SNAPSHOT was used + TX_LOCKED_TABLES= 512 ///< LOCK TABLES is active +}; + + +/** + Transaction access mode +*/ +enum enum_tx_read_flags { + TX_READ_INHERIT = 0, ///< not explicitly set, inherit session.tx_read_only + TX_READ_ONLY = 1, ///< START TRANSACTION READ ONLY, or tx_read_only=1 + TX_READ_WRITE = 2, ///< START TRANSACTION READ WRITE, or tx_read_only=0 +}; + + +/** + Transaction isolation level +*/ +enum enum_tx_isol_level { + TX_ISOL_INHERIT = 0, ///< not explicitly set, inherit session.tx_isolation + TX_ISOL_UNCOMMITTED = 1, + TX_ISOL_COMMITTED = 2, + TX_ISOL_REPEATABLE = 3, + TX_ISOL_SERIALIZABLE= 4 +}; + + +/** + Transaction tracking level +*/ +enum enum_session_track_transaction_info { + TX_TRACK_NONE = 0, ///< do not send tracker items on transaction info + TX_TRACK_STATE = 1, ///< track transaction status + TX_TRACK_CHISTICS = 2 ///< track status and characteristics +}; + + +/** + This is a tracker class that enables & manages the tracking of + current transaction info for a particular connection. +*/ + +class Transaction_state_tracker : public State_tracker +{ +private: + /** Helper function: turn table info into table access flag */ + enum_tx_state calc_trx_state(THD *thd, thr_lock_type l, bool has_trx); +public: + /** Constructor */ + Transaction_state_tracker(); + bool enable(THD *thd) + { return update(thd, NULL); } + bool update(THD *thd, set_var *var); + bool store(THD *thd, String *buf); + + /** Change transaction characteristics */ + void set_read_flags(THD *thd, enum enum_tx_read_flags flags); + void set_isol_level(THD *thd, enum enum_tx_isol_level level); + + /** Change transaction state */ + void clear_trx_state(THD *thd, uint clear); + void add_trx_state(THD *thd, uint add); + void inline add_trx_state(THD *thd, thr_lock_type l, bool has_trx) + { + add_trx_state(thd, calc_trx_state(thd, l, has_trx)); + } + void add_trx_state_from_thd(THD *thd); + void end_trx(THD *thd); + + +private: + enum enum_tx_changed { + TX_CHG_NONE = 0, ///< no changes from previous stmt + TX_CHG_STATE = 1, ///< state has changed from previous stmt + TX_CHG_CHISTICS = 2 ///< characteristics have changed from previous stmt + }; + + /** any trackable changes caused by this statement? */ + uint tx_changed; + + /** transaction state */ + uint tx_curr_state, tx_reported_state; + + /** r/w or r/o set? session default? */ + enum enum_tx_read_flags tx_read_flags; + + /** isolation level */ + enum enum_tx_isol_level tx_isol_level; + + void reset(); + + inline void update_change_flags(THD *thd) + { + tx_changed &= ~TX_CHG_STATE; + tx_changed |= (tx_curr_state != tx_reported_state) ? TX_CHG_STATE : 0; + if (tx_changed != TX_CHG_NONE) + mark_as_changed(thd, NULL); + } +}; + +#define TRANSACT_TRACKER(X) \ + do { if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) \ + {((Transaction_state_tracker *) \ + thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER)) \ + ->X; } } while(0) +#define SESSION_TRACKER_CHANGED(A,B,C) \ + thd->session_tracker.mark_as_changed(A,B,C) +#else + +#define TRANSACT_TRACKER(X) do{}while(0) +#define SESSION_TRACKER_CHANGED(A,B,C) do{}while(0) + +#endif //EMBEDDED_LIBRARY + #endif /* SESSION_TRACKER_INCLUDED */ diff --git a/sql/set_var.cc b/sql/set_var.cc index 84ed7810650..26eb5127a0b 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -220,14 +220,13 @@ bool sys_var::update(THD *thd, set_var *var) */ if ((var->type == OPT_SESSION) && (!ret)) { - thd->session_tracker.mark_as_changed(thd, SESSION_SYSVARS_TRACKER, - (LEX_CSTRING*)var->var); + SESSION_TRACKER_CHANGED(thd, SESSION_SYSVARS_TRACKER, + (LEX_CSTRING*)var->var); /* Here MySQL sends variable name to avoid reporting change of the tracker itself, but we decided that it is not needed */ - thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, - NULL); + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); } return ret; @@ -894,7 +893,7 @@ int set_var_user::update(THD *thd) return -1; } - thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); return 0; } @@ -1002,6 +1001,7 @@ int set_var_collation_client::update(THD *thd) character_set_results); /* Mark client collation variables as changed */ +#ifndef EMBEDDED_LIBRARY if (thd->session_tracker.get_tracker(SESSION_SYSVARS_TRACKER)->is_enabled()) { sys_var *svar; @@ -1024,6 +1024,7 @@ int set_var_collation_client::update(THD *thd) mysql_mutex_unlock(&LOCK_plugin); } thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); +#endif //EMBEDDED_LIBRARY thd->protocol_text.init(thd); thd->protocol_binary.init(thd); diff --git a/sql/set_var.h b/sql/set_var.h index 16111ad7111..ba8027edc72 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -49,8 +49,6 @@ int mysql_add_sys_var_chain(sys_var *chain); int mysql_del_sys_var_chain(sys_var *chain); -extern const LEX_CSTRING SESSION_TRACK_SYSTEM_VARIABLES_NAME; - /** A class representing one system variable - that is something that can be accessed as @@global.variable_name or @@session.variable_name, diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 4e301a9df02..361d68ff76a 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -7150,10 +7150,6 @@ skip-to-error-number 3000 ER_MYSQL_57_TEST eng "5.7 test" -ER_NET_OK_PACKET_TOO_LARGE 08S01 - eng "OK packet too large" - ukr "Пакет OK надто великий" - # MariaDB extra error numbers starts from 4000 skip-to-error-number 4000 diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 8488e8dfd62..c344a6c6ed8 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2048,6 +2048,8 @@ sp_head::execute_procedure(THD *thd, List *args) break; } } + + TRANSACT_TRACKER(add_trx_state_from_thd(thd)); } /* @@ -3063,6 +3065,9 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, what is needed from the substatement gained */ thd->transaction.stmt.modified_non_trans_table |= parent_modified_non_trans_table; + + TRANSACT_TRACKER(add_trx_state_from_thd(thd)); + /* Unlike for PS we should not call Item's destructors for newly created items after execution of each instruction in stored routine. This is diff --git a/sql/sql_base.cc b/sql/sql_base.cc index d7812db53bd..7757068b265 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2093,6 +2093,9 @@ Locked_tables_list::init_locked_tables(THD *thd) return TRUE; } } + + TRANSACT_TRACKER(add_trx_state(thd, TX_LOCKED_TABLES)); + thd->enter_locked_tables_mode(LTM_LOCK_TABLES); return FALSE; @@ -2133,6 +2136,8 @@ Locked_tables_list::unlock_locked_tables(THD *thd) } thd->leave_locked_tables_mode(); + TRANSACT_TRACKER(clear_trx_state(thd, TX_LOCKED_TABLES)); + DBUG_ASSERT(thd->transaction.stmt.is_empty()); close_thread_tables(thd); @@ -4354,6 +4359,13 @@ static bool check_lock_and_start_stmt(THD *thd, table_list->table->file->print_error(error, MYF(0)); DBUG_RETURN(1); } + + /* + Record in transaction state tracking + */ + TRANSACT_TRACKER(add_trx_state(thd, lock_type, + table_list->table->file->has_transactions())); + DBUG_RETURN(0); } diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 06c5f992939..8ff4684f0ff 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1381,6 +1381,21 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) DBUG_VOID_RETURN; } + /* + Do not store queries while tracking transaction state. + The tracker already flags queries that actually have + transaction tracker items, but this will make behavior + more straight forward. + */ +#ifndef EMBEDDED_LIBRARY + if (thd->variables.session_track_transaction_info != TX_TRACK_NONE) + { + DBUG_PRINT("qcache", ("Do not work with transaction tracking")); + DBUG_VOID_RETURN; + } +#endif //EMBEDDED_LIBRARY + + /* The following assert fails if we haven't called send_result_to_client */ DBUG_ASSERT(thd->base_query.is_alloced() || thd->base_query.ptr() == thd->query()); @@ -1719,6 +1734,20 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length) goto err; } + /* + Don't allow serving from Query_cache while tracking transaction + state. This is a safeguard in case an otherwise matching query + was added to the cache before tracking was turned on. + */ +#ifndef EMBEDDED_LIBRARY + if (thd->variables.session_track_transaction_info != TX_TRACK_NONE) + { + DBUG_PRINT("qcache", ("Do not work with transaction tracking")); + goto err; + } +#endif //EMBEDDED_LIBRARY + + thd->query_cache_is_applicable= 1; sql= org_sql; sql_end= sql + query_length; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index a99e375cfbd..54bc4b9959f 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1466,7 +1466,9 @@ void THD::init(void) debug_sync_init_thread(this); #endif /* defined(ENABLED_DEBUG_SYNC) */ +#ifndef EMBEDDED_LIBRARY session_tracker.enable(this); +#endif //EMBEDDED_LIBRARY apc_target.init(&LOCK_thd_data); DBUG_VOID_RETURN; @@ -1768,7 +1770,9 @@ THD::~THD() status_var.local_memory_used-= sizeof(THD); /* trick to make happy memory accounting system */ +#ifndef EMBEDDED_LIBRARY session_tracker.deinit(); +#endif //EMBEDDED_LIBRARY if (status_var.local_memory_used != 0) { diff --git a/sql/sql_class.h b/sql/sql_class.h index a6af33f7c5a..1af6d1d87d3 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -689,10 +689,11 @@ typedef struct system_variables my_bool pseudo_slave_mode; + char *session_track_system_variables; + ulong session_track_transaction_info; my_bool session_track_schema; my_bool session_track_state_change; - char *session_track_system_variables; } SV; /** @@ -4059,7 +4060,9 @@ private: LEX_STRING invoker_host; public: +#ifndef EMBEDDED_LIBRARY Session_tracker session_tracker; +#endif //EMBEDDED_LIBRARY /* Flag, mutex and condition for a thread to wait for a signal from another thread. @@ -4293,6 +4296,8 @@ my_eof(THD *thd) { thd->set_row_count_func(-1); thd->get_stmt_da()->set_eof_status(thd); + + TRANSACT_TRACKER(add_trx_state(thd, TX_RESULT_SET)); } #define tmp_disable_binlog(A) \ diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 128281c7686..20538fe1fb4 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1037,7 +1037,7 @@ exit: if (thd->db && cmp_db_names(thd->db, db) && !error) { mysql_change_db_impl(thd, NULL, 0, thd->variables.collation_server); - thd->session_tracker.mark_as_changed(thd, CURRENT_SCHEMA_TRACKER, NULL); + SESSION_TRACKER_CHANGED(thd, CURRENT_SCHEMA_TRACKER, NULL); } my_dirend(dirp); DBUG_RETURN(error); @@ -1591,8 +1591,8 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) mysql_change_db_impl(thd, &new_db_file_name, db_access, db_default_cl); done: - thd->session_tracker.mark_as_changed(thd, CURRENT_SCHEMA_TRACKER, NULL); - thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, NULL); + SESSION_TRACKER_CHANGED(thd, CURRENT_SCHEMA_TRACKER, NULL); + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); DBUG_RETURN(FALSE); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7cb97d156cb..8dc34c2dfe2 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3855,9 +3855,9 @@ mysql_execute_command(THD *thd) /* in case of create temp tables if @@session_track_state_change is ON then send session state notification in OK packet */ if(create_info.options & HA_LEX_CREATE_TMP_TABLE) - thd->session_tracker.mark_as_changed(thd, - SESSION_STATE_CHANGE_TRACKER, - NULL); + { + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); + } my_ok(thd); } } @@ -4619,8 +4619,7 @@ end_with_restore_list: send the boolean tracker in the OK packet */ if(!res && (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) { - thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, - NULL); + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); } break; } @@ -5433,8 +5432,7 @@ end_with_restore_list: else { /* Reset the isolation level and access mode if no chaining transaction.*/ - thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; - thd->tx_read_only= thd->variables.tx_read_only; + trans_reset_one_shot_chistics(thd); } /* Disconnect the current client connection. */ if (tx_release) @@ -5481,8 +5479,7 @@ end_with_restore_list: else { /* Reset the isolation level and access mode if no chaining transaction.*/ - thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; - thd->tx_read_only= thd->variables.tx_read_only; + trans_reset_one_shot_chistics(thd); } /* Disconnect the current client connection. */ if (tx_release) @@ -5967,8 +5964,7 @@ end_with_restore_list: We've just done a commit, reset transaction isolation level and access mode to the session default. */ - thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; - thd->tx_read_only= thd->variables.tx_read_only; + trans_reset_one_shot_chistics(thd); my_ok(thd); break; } @@ -5986,8 +5982,7 @@ end_with_restore_list: We've just done a rollback, reset transaction isolation level and access mode to the session default. */ - thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; - thd->tx_read_only= thd->variables.tx_read_only; + trans_reset_one_shot_chistics(thd); my_ok(thd); break; } @@ -6205,6 +6200,9 @@ finish: { thd->mdl_context.release_statement_locks(); } + + TRANSACT_TRACKER(add_trx_state_from_thd(thd)); + WSREP_TO_ISOLATION_END; #ifdef WITH_WSREP diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index db6a4b9b15b..b715e33ae62 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -323,8 +323,6 @@ static void unlock_variables(THD *thd, struct system_variables *vars); static void cleanup_variables(struct system_variables *vars); static void plugin_vars_free_values(sys_var *vars); static void restore_ptr_backup(uint n, st_ptr_backup *backup); -#define my_intern_plugin_lock(A,B) intern_plugin_lock(A,B) -#define my_intern_plugin_lock_ci(A,B) intern_plugin_lock(A,B) static plugin_ref intern_plugin_lock(LEX *lex, plugin_ref plugin); static void intern_plugin_unlock(LEX *lex, plugin_ref plugin); static void reap_plugins(void); @@ -2806,7 +2804,7 @@ sys_var *find_sys_var_ex(THD *thd, const char *str, size_t length, { mysql_rwlock_unlock(&LOCK_system_variables_hash); LEX *lex= thd ? thd->lex : 0; - if (!(plugin= my_intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin)))) + if (!(plugin= intern_plugin_lock(lex, plugin_int_to_ref(pi->plugin)))) var= NULL; /* failed to lock it, it must be uninstalling */ else if (!(plugin_state(plugin) & PLUGIN_IS_READY)) diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index cc41bd6284e..eab2863588d 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2759,8 +2759,7 @@ void mysql_sql_stmt_prepare(THD *thd) } else { - thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, - NULL); + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); my_ok(thd, 0L, 0L, "Statement prepared"); } @@ -3212,8 +3211,7 @@ void mysql_sql_stmt_close(THD *thd) else { stmt->deallocate(); - thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, - NULL); + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); my_ok(thd); } } diff --git a/sql/sql_string.cc b/sql/sql_string.cc index 28e7b899133..a5f266b2d2c 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -1161,13 +1161,15 @@ uint convert_to_printable(char *to, size_t to_len, void String::q_net_store_length(ulonglong length) { + DBUG_ASSERT(Alloced_length >= (str_length + net_length_size(length))); char *pos= (char *) net_store_length((uchar *)(Ptr + str_length), length); str_length= pos - Ptr; } void String::q_net_store_data(const uchar *from, size_t length) { + DBUG_ASSERT(Alloced_length >= (str_length + length + + net_length_size(length))); q_net_store_length(length); - bool res= append((const char *)from, length); - DBUG_ASSERT(!res); + q_append((const char *)from, length); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index bed116a2930..7784a2b188a 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -9230,8 +9230,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err_new_table_cleanup; } /* in case of alter temp table send the tracker in OK packet */ - thd->session_tracker.mark_as_changed(thd, SESSION_STATE_CHANGE_TRACKER, - NULL); + SESSION_TRACKER_CHANGED(thd, SESSION_STATE_CHANGE_TRACKER, NULL); } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 68be0cdbdca..462bfe52741 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -3363,6 +3363,20 @@ bool Sys_var_tx_read_only::session_update(THD *thd, set_var *var) { // @see Sys_var_tx_isolation::session_update() above for the rules. thd->tx_read_only= var->save_result.ulonglong_value; + +#ifndef EMBEDDED_LIBRARY + if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) + { + Transaction_state_tracker *tst= (Transaction_state_tracker *) + thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER); + + if (var->type == OPT_DEFAULT) + tst->set_read_flags(thd, + thd->tx_read_only ? TX_READ_ONLY : TX_READ_WRITE); + else + tst->set_read_flags(thd, TX_READ_INHERIT); + } +#endif //EMBEDDED_LIBRARY } return false; } @@ -5375,11 +5389,10 @@ static Sys_var_ulong Sys_log_tc_size( BLOCK_SIZE(my_getpagesize())); #endif -const LEX_CSTRING SESSION_TRACK_SYSTEM_VARIABLES_NAME= - {STRING_WITH_LEN("session_track_system_variables")}; +#ifndef EMBEDDED_LIBRARY static Sys_var_sesvartrack Sys_track_session_sys_vars( - SESSION_TRACK_SYSTEM_VARIABLES_NAME.str, + "session_track_system_variables", "Track changes in registered system variables.", CMD_LINE(REQUIRED_ARG), IN_SYSTEM_CHARSET, DEFAULT("autocommit,character_set_client,character_set_connection," @@ -5390,30 +5403,61 @@ static bool update_session_track_schema(sys_var *self, THD *thd, enum_var_type type) { DBUG_ENTER("update_session_track_schema"); - DBUG_RETURN(thd->session_tracker.get_tracker(CURRENT_SCHEMA_TRACKER)->update(thd)); + DBUG_RETURN(thd->session_tracker.get_tracker(CURRENT_SCHEMA_TRACKER)-> + update(thd, NULL)); } static Sys_var_mybool Sys_session_track_schema( "session_track_schema", - "Track changes to the 'default schema'.", + "Track changes to the default schema.", SESSION_VAR(session_track_schema), CMD_LINE(OPT_ARG), DEFAULT(TRUE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(update_session_track_schema)); + +static bool update_session_track_tx_info(sys_var *self, THD *thd, + enum_var_type type) +{ + DBUG_ENTER("update_session_track_tx_info"); + DBUG_RETURN(thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER)-> + update(thd, NULL)); +} + +static const char *session_track_transaction_info_names[]= + { "OFF", "STATE", "CHARACTERISTICS", NullS }; + +static Sys_var_enum Sys_session_track_transaction_info( + "session_track_transaction_info", + "Track changes to the transaction attributes. OFF to disable; " + "STATE to track just transaction state (Is there an active transaction? " + "Does it have any data? etc.); CHARACTERISTICS to track transaction " + "state and report all statements needed to start a transaction with" + "the same characteristics (isolation level, read only/read write," + "snapshot - but not any work done / data modified within the " + "transaction).", + SESSION_VAR(session_track_transaction_info), + CMD_LINE(REQUIRED_ARG), session_track_transaction_info_names, + DEFAULT(0), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), + ON_UPDATE(update_session_track_tx_info)); + + static bool update_session_track_state_change(sys_var *self, THD *thd, enum_var_type type) { DBUG_ENTER("update_session_track_state_change"); - DBUG_RETURN(thd->session_tracker.get_tracker(SESSION_STATE_CHANGE_TRACKER)->update(thd)); + DBUG_RETURN(thd->session_tracker.get_tracker(SESSION_STATE_CHANGE_TRACKER)-> + update(thd, NULL)); } static Sys_var_mybool Sys_session_track_state_change( "session_track_state_change", - "Track changes to the 'session state'.", + "Track changes to the session state.", SESSION_VAR(session_track_state_change), CMD_LINE(OPT_ARG), DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(update_session_track_state_change)); + +#endif //EMBEDDED_LIBRARY diff --git a/sql/sys_vars.ic b/sql/sys_vars.ic index dbe84d3efcc..6f17e768d95 100644 --- a/sql/sys_vars.ic +++ b/sql/sys_vars.ic @@ -567,6 +567,7 @@ public: { DBUG_ASSERT(FALSE); } }; +#ifndef EMBEDDED_LIBRARY class Sys_var_sesvartrack: public Sys_var_charptr_base { public: @@ -602,7 +603,7 @@ public: } bool session_update(THD *thd, set_var *var) { - return sysvartrack_update(thd); + return sysvartrack_update(thd, var); } void session_save_default(THD *thd, set_var *var) { @@ -623,19 +624,17 @@ public: { DBUG_ASSERT(thd != NULL); size_t len= sysvartrack_value_len(thd); - char *res= 0; - char *buf= (char *)my_safe_alloca(len); - if (buf && !sysvartrack_value_construct(thd, buf, len)) + char *res= (char *)thd->alloc(len + sizeof(char *)); + if (res) { - size_t len= strlen(buf) + 1; - res= (char*) thd->alloc(len + sizeof(char *)); - if (res) - memcpy((*((char**) res)= res + sizeof(char *)), buf, len); - my_safe_afree(buf, len); + char *buf= res + sizeof(char *); + *((char**) res)= buf; + sysvartrack_value_construct(thd, buf, len); } return (uchar *)res; } }; +#endif //EMBEDDED_LIBRARY class Sys_var_proxy_user: public sys_var @@ -2079,7 +2078,47 @@ public: if (var->type == OPT_SESSION && Sys_var_enum::session_update(thd, var)) return TRUE; if (var->type == OPT_DEFAULT || !thd->in_active_multi_stmt_transaction()) + { +#ifndef EMBEDDED_LIBRARY + Transaction_state_tracker *tst= NULL; + + if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) + tst= (Transaction_state_tracker *) + thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER); +#endif //EMBEDDED_LIBRARY + thd->tx_isolation= (enum_tx_isolation) var->save_result.ulonglong_value; + +#ifndef EMBEDDED_LIBRARY + if (var->type == OPT_DEFAULT) + { + enum enum_tx_isol_level l; + switch (thd->tx_isolation) { + case ISO_READ_UNCOMMITTED: + l= TX_ISOL_UNCOMMITTED; + break; + case ISO_READ_COMMITTED: + l= TX_ISOL_COMMITTED; + break; + case ISO_REPEATABLE_READ: + l= TX_ISOL_REPEATABLE; + break; + case ISO_SERIALIZABLE: + l= TX_ISOL_SERIALIZABLE; + break; + default: + DBUG_ASSERT(0); + return TRUE; + } + if (tst) + tst->set_isol_level(thd, l); + } + else if (tst) + { + tst->set_isol_level(thd, TX_ISOL_INHERIT); + } +#endif //EMBEDDED_LIBRARY + } return FALSE; } }; diff --git a/sql/transaction.cc b/sql/transaction.cc index 8b188709ce6..d728ea25b65 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -25,6 +25,43 @@ #include "debug_sync.h" // DEBUG_SYNC #include "sql_acl.h" + +#ifndef EMBEDDED_LIBRARY +/** + Helper: Tell tracker (if any) that transaction ended. +*/ +static void trans_track_end_trx(THD *thd) +{ + if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) + { + ((Transaction_state_tracker *) + thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER))->end_trx(thd); + } +} + + +/** + Helper: transaction ended, SET TRANSACTION one-shot variables + revert to session values. Let the transaction state tracker know. +*/ +void trans_reset_one_shot_chistics(THD *thd) +{ + if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) + { + Transaction_state_tracker *tst= (Transaction_state_tracker *) + thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER); + + tst->set_read_flags(thd, TX_READ_INHERIT); + tst->set_isol_level(thd, TX_ISOL_INHERIT); + } + + thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; + thd->tx_read_only= thd->variables.tx_read_only; +} +#else +#define trans_track_end_trx(A) do{}while(0) +#endif //EMBEDDED_LIBRARY + /* Conditions under which the transaction state must not change. */ static bool trans_check(THD *thd) { @@ -125,11 +162,20 @@ static bool xa_trans_force_rollback(THD *thd) bool trans_begin(THD *thd, uint flags) { int res= FALSE; +#ifndef EMBEDDED_LIBRARY + Transaction_state_tracker *tst= NULL; +#endif //EMBEDDED_LIBRARY DBUG_ENTER("trans_begin"); if (trans_check(thd)) DBUG_RETURN(TRUE); +#ifndef EMBEDDED_LIBRARY + if (thd->variables.session_track_transaction_info > TX_TRACK_NONE) + tst= (Transaction_state_tracker *) + thd->session_tracker.get_tracker(TRANSACTION_INFO_TRACKER); +#endif //EMBEDDED_LIBRARY + thd->locked_tables_list.unlock_locked_tables(thd); DBUG_ASSERT(!thd->locked_tables_mode); @@ -172,7 +218,13 @@ bool trans_begin(THD *thd, uint flags) DBUG_ASSERT(!((flags & MYSQL_START_TRANS_OPT_READ_ONLY) && (flags & MYSQL_START_TRANS_OPT_READ_WRITE))); if (flags & MYSQL_START_TRANS_OPT_READ_ONLY) + { thd->tx_read_only= true; +#ifndef EMBEDDED_LIBRARY + if (tst) + tst->set_read_flags(thd, TX_READ_ONLY); +#endif //EMBEDDED_LIBRARY + } else if (flags & MYSQL_START_TRANS_OPT_READ_WRITE) { /* @@ -189,6 +241,14 @@ bool trans_begin(THD *thd, uint flags) DBUG_RETURN(true); } thd->tx_read_only= false; + /* + This flags that tx_read_only was set explicitly, rather than + just from the session's default. + */ +#ifndef EMBEDDED_LIBRARY + if (tst) + tst->set_read_flags(thd, TX_READ_WRITE); +#endif //EMBEDDED_LIBRARY } #ifdef WITH_WSREP @@ -203,9 +263,20 @@ bool trans_begin(THD *thd, uint flags) thd->server_status|= SERVER_STATUS_IN_TRANS_READONLY; DBUG_PRINT("info", ("setting SERVER_STATUS_IN_TRANS")); +#ifndef EMBEDDED_LIBRARY + if (tst) + tst->add_trx_state(thd, TX_EXPLICIT); +#endif //EMBEDDED_LIBRARY + /* ha_start_consistent_snapshot() relies on OPTION_BEGIN flag set. */ if (flags & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT) + { +#ifndef EMBEDDED_LIBRARY + if (tst) + tst->add_trx_state(thd, TX_WITH_SNAPSHOT); +#endif //EMBEDDED_LIBRARY res= ha_start_consistent_snapshot(thd); + } DBUG_RETURN(MY_TEST(res)); } @@ -255,6 +326,8 @@ bool trans_commit(THD *thd) thd->transaction.all.m_unsafe_rollback_flags&= ~THD_TRANS::DID_WAIT; thd->lex->start_transaction_opt= 0; + trans_track_end_trx(thd); + DBUG_RETURN(MY_TEST(res)); } @@ -308,8 +381,9 @@ bool trans_commit_implicit(THD *thd) @@session.completion_type since it's documented to not have any effect on implicit commit. */ - thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; - thd->tx_read_only= thd->variables.tx_read_only; + trans_reset_one_shot_chistics(thd); + + trans_track_end_trx(thd); DBUG_RETURN(res); } @@ -349,6 +423,8 @@ bool trans_rollback(THD *thd) thd->transaction.all.m_unsafe_rollback_flags&= ~THD_TRANS::DID_WAIT; thd->lex->start_transaction_opt= 0; + trans_track_end_trx(thd); + DBUG_RETURN(MY_TEST(res)); } @@ -396,6 +472,8 @@ bool trans_rollback_implicit(THD *thd) /* Rollback should clear transaction_rollback_request flag. */ DBUG_ASSERT(! thd->transaction_rollback_request); + trans_track_end_trx(thd); + DBUG_RETURN(MY_TEST(res)); } @@ -434,8 +512,7 @@ bool trans_commit_stmt(THD *thd) res= ha_commit_trans(thd, FALSE); if (! thd->in_active_multi_stmt_transaction()) { - thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; - thd->tx_read_only= thd->variables.tx_read_only; + trans_reset_one_shot_chistics(thd); if (WSREP_ON) wsrep_post_commit(thd, FALSE); } @@ -487,10 +564,7 @@ bool trans_rollback_stmt(THD *thd) wsrep_register_hton(thd, FALSE); ha_rollback_trans(thd, FALSE); if (! thd->in_active_multi_stmt_transaction()) - { - thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation; - thd->tx_read_only= thd->variables.tx_read_only; - } + trans_reset_one_shot_chistics(thd); } (void) RUN_HOOK(transaction, after_rollback, (thd, FALSE)); @@ -912,6 +986,8 @@ bool trans_xa_commit(THD *thd) xid_cache_delete(thd, &thd->transaction.xid_state); thd->transaction.xid_state.xa_state= XA_NOTR; + trans_track_end_trx(thd); + DBUG_RETURN(res); } @@ -968,5 +1044,7 @@ bool trans_xa_rollback(THD *thd) xid_cache_delete(thd, &thd->transaction.xid_state); thd->transaction.xid_state.xa_state= XA_NOTR; + trans_track_end_trx(thd); + DBUG_RETURN(res); } diff --git a/sql/transaction.h b/sql/transaction.h index 54b25f1de2a..040f1a453cd 100644 --- a/sql/transaction.h +++ b/sql/transaction.h @@ -44,4 +44,10 @@ bool trans_xa_prepare(THD *thd); bool trans_xa_commit(THD *thd); bool trans_xa_rollback(THD *thd); +#ifndef EMBEDDED_LIBRARY +void trans_reset_one_shot_chistics(THD *thd); +#else +#define trans_reset_one_shot_chistics(A) do{}while(0) +#endif //EMBEDDED_LIBRARY + #endif /* TRANSACTION_H */ -- cgit v1.2.1 From 6dfa1d374e8c4d3a06e25ceb78cc6571418363ab Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Tue, 9 Aug 2016 15:49:30 +0200 Subject: MDEV-8931: (server part of) session state tracking Postreview fixes. New MySQL tests fixes. --- include/mysql.h.pp | 2 +- include/mysql_com.h | 4 +- libmysqld/lib_sql.cc | 3 +- mysql-test/r/mysqld--help.result | 8 +- .../r/session_track_system_variables_basic.result | 22 +- .../sys_vars/r/sysvars_server_notembedded.result | 8 +- sql/session_tracker.cc | 276 ++++++++++++--------- sql/session_tracker.h | 4 +- sql/sp_head.cc | 6 +- sql/sql_string.h | 10 - sql/sys_vars.cc | 8 +- 11 files changed, 198 insertions(+), 153 deletions(-) diff --git a/include/mysql.h.pp b/include/mysql.h.pp index 0ef3403626c..e1a0901cee8 100644 --- a/include/mysql.h.pp +++ b/include/mysql.h.pp @@ -95,7 +95,7 @@ enum enum_session_state_type SESSION_TRACK_GTIDS, SESSION_TRACK_TRANSACTION_CHARACTERISTICS, SESSION_TRACK_TRANSACTION_STATE, - SESSION_TRACK_END + SESSION_TRACK_always_at_the_end }; my_bool my_net_init(NET *net, Vio* vio, void *thd, unsigned int my_flags); void my_net_local_init(NET *net); diff --git a/include/mysql_com.h b/include/mysql_com.h index 16af9849759..82f3b9f62ba 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -559,13 +559,13 @@ enum enum_session_state_type SESSION_TRACK_GTIDS, SESSION_TRACK_TRANSACTION_CHARACTERISTICS, /* Transaction chistics */ SESSION_TRACK_TRANSACTION_STATE, /* Transaction state */ - SESSION_TRACK_END /* must be last */ + SESSION_TRACK_always_at_the_end /* must be last */ }; #define SESSION_TRACK_BEGIN SESSION_TRACK_SYSTEM_VARIABLES #define IS_SESSION_STATE_TYPE(T) \ - (((int)(T) >= SESSION_TRACK_BEGIN) && ((T) < SESSION_TRACK_END)) + (((int)(T) >= SESSION_TRACK_BEGIN) && ((T) < SESSION_TRACK_always_at_the_end)) #define net_new_transaction(net) ((net)->pkt_nr=0) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index aeee99ebe4d..097cde04b43 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -1172,8 +1172,7 @@ bool net_send_ok(THD *thd, uint server_status, uint statement_warn_count, ulonglong affected_rows, ulonglong id, const char *message, - bool unused1, - bool unused2) + bool, bool) { DBUG_ENTER("emb_net_send_ok"); MYSQL_DATA *data; diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 176e8186798..ad57cc6fe5e 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -909,7 +909,11 @@ The following options may be given as the first argument: --session-track-state-change Track changes to the session state. --session-track-system-variables=name - Track changes in registered system variables. + Track changes in registered system variables. For + compatibility with MySQL defaults this variable should be + set to "autocommit, character_set_client, + character_set_connection, character_set_results, + time_zone" --session-track-transaction-info=name Track changes to the transaction attributes. OFF to disable; STATE to track just transaction state (Is there @@ -1403,7 +1407,7 @@ secure-file-priv (No default value) server-id 1 session-track-schema TRUE session-track-state-change FALSE -session-track-system-variables autocommit,character_set_client,character_set_connection,character_set_results,time_zone +session-track-system-variables session-track-transaction-info OFF show-slave-auth-info FALSE silent-startup FALSE diff --git a/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result b/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result index 78ca8ca4ad1..7162e40ef6b 100644 --- a/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result +++ b/mysql-test/suite/sys_vars/r/session_track_system_variables_basic.result @@ -5,25 +5,25 @@ # Global - default SELECT @@global.session_track_system_variables; @@global.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + # Session - default SELECT @@session.session_track_system_variables; @@session.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + # via INFORMATION_SCHEMA.GLOBAL_VARIABLES SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; VARIABLE_NAME VARIABLE_VALUE SESSION_TRACK_SCHEMA ON SESSION_TRACK_STATE_CHANGE OFF -SESSION_TRACK_SYSTEM_VARIABLES autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SESSION_TRACK_SYSTEM_VARIABLES SESSION_TRACK_TRANSACTION_INFO OFF # via INFORMATION_SCHEMA.SESSION_VARIABLES SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME LIKE 'session_track%' ORDER BY VARIABLE_NAME; VARIABLE_NAME VARIABLE_VALUE SESSION_TRACK_SCHEMA ON SESSION_TRACK_STATE_CHANGE OFF -SESSION_TRACK_SYSTEM_VARIABLES autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SESSION_TRACK_SYSTEM_VARIABLES SESSION_TRACK_TRANSACTION_INFO OFF SET @global_saved_tmp = @@global.session_track_system_variables; @@ -34,7 +34,7 @@ SELECT @@global.session_track_system_variables; autocommit SELECT @@session.session_track_system_variables; @@session.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + # Altering session variable's value SET @@session.session_track_system_variables='autocommit'; @@ -72,25 +72,25 @@ SET @@session.session_track_system_variables = DEFAULT; SELECT @@global.session_track_system_variables; @@global.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + SELECT @@session.session_track_system_variables; @@session.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + # Variables' values in a new session (con2). connect con2,"127.0.0.1",root,,test,$MASTER_MYPORT,; SELECT @@global.session_track_system_variables; @@global.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + SELECT @@session.session_track_system_variables; @@session.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + # Altering session should not affect global. SET @@session.session_track_system_variables = 'sql_mode'; SELECT @@global.session_track_system_variables; @@global.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + SELECT @@session.session_track_system_variables; @@session.session_track_system_variables sql_mode @@ -104,7 +104,7 @@ SELECT @@global.session_track_system_variables; sql_mode SELECT @@session.session_track_system_variables; @@session.session_track_system_variables -autocommit,character_set_client,character_set_connection,character_set_results,time_zone + # Switching to the default connection. connection default; diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result index 3a73ca07402..e422be7287e 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result @@ -3818,13 +3818,13 @@ ENUM_VALUE_LIST OFF,ON READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME SESSION_TRACK_SYSTEM_VARIABLES -SESSION_VALUE autocommit,character_set_client,character_set_connection,character_set_results,time_zone -GLOBAL_VALUE autocommit,character_set_client,character_set_connection,character_set_results,time_zone +SESSION_VALUE +GLOBAL_VALUE GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE autocommit,character_set_client,character_set_connection,character_set_results,time_zone +DEFAULT_VALUE VARIABLE_SCOPE SESSION VARIABLE_TYPE VARCHAR -VARIABLE_COMMENT Track changes in registered system variables. +VARIABLE_COMMENT Track changes in registered system variables. For compatibility with MySQL defaults this variable should be set to "autocommit, character_set_client, character_set_connection, character_set_results, time_zone" NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc index bd641ab8d03..3272d2a41f0 100644 --- a/sql/session_tracker.cc +++ b/sql/session_tracker.cc @@ -144,16 +144,18 @@ private: return res; } - uchar* operator[](ulong idx) - { - return my_hash_element(&m_registered_sysvars, idx); - } bool insert(sysvar_node_st *node, const sys_var *svar, myf mem_flag); + void reinit(); void reset(); + inline bool is_enabled() + { + return track_all || m_registered_sysvars.records; + } void copy(vars_list* from, THD *thd); bool parse_var_list(THD *thd, LEX_STRING var_list, bool throw_error, - CHARSET_INFO *char_set, bool session_created); + CHARSET_INFO *char_set, bool take_mutex); bool construct_var_list(char *buf, size_t buf_len); + bool store(THD *thd, String *buf); }; /** Two objects of vars_list type are maintained to manage @@ -217,9 +219,13 @@ public: static uchar *sysvars_get_key(const char *entry, size_t *length, my_bool not_used __attribute__((unused))); + // hash iterators static my_bool name_array_filler(void *ptr, void *data_ptr); + static my_bool store_variable(void *ptr, void *data_ptr); + static my_bool reset_variable(void *ptr, void *data_ptr); + static bool check_var_list(THD *thd, LEX_STRING var_list, bool throw_error, - CHARSET_INFO *char_set, bool session_created); + CHARSET_INFO *char_set, bool take_mutex); }; @@ -284,7 +290,7 @@ public: static const unsigned int EXTRA_ALLOC= 1024; -void Session_sysvars_tracker::vars_list::reset() +void Session_sysvars_tracker::vars_list::reinit() { buffer_length= 0; track_all= 0; @@ -304,7 +310,7 @@ void Session_sysvars_tracker::vars_list::reset() void Session_sysvars_tracker::vars_list::copy(vars_list* from, THD *thd) { - reset(); + reinit(); track_all= from->track_all; free_hash(); buffer_length= from->buffer_length; @@ -331,7 +337,7 @@ bool Session_sysvars_tracker::vars_list::insert(sysvar_node_st *node, if (!(node= (sysvar_node_st *) my_malloc(sizeof(sysvar_node_st), MYF(MY_WME | mem_flag)))) { - reset(); + reinit(); return true; } } @@ -345,7 +351,7 @@ bool Session_sysvars_tracker::vars_list::insert(sysvar_node_st *node, if (!search((sys_var *)svar)) { //EOF (error is already reported) - reset(); + reinit(); return true; } } @@ -367,9 +373,7 @@ bool Session_sysvars_tracker::vars_list::insert(sysvar_node_st *node, in case of invalid/duplicate values. @param char_set [IN] charecter set information used for string manipulations. - @param session_created [IN] bool variable which says if the parse is - already executed once. The mutex on variables - is not acquired if this variable is false. + @param take_mutex [IN] take LOCK_plugin @return true Error @@ -379,11 +383,12 @@ bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd, LEX_STRING var_list, bool throw_error, CHARSET_INFO *char_set, - bool session_created) + bool take_mutex) { const char separator= ','; char *token, *lasts= NULL; size_t rest= var_list.length; + reinit(); if (!var_list.str || var_list.length == 0) { @@ -408,7 +413,7 @@ bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd, token value. Hence the mutex is handled here to avoid a performance overhead. */ - if (!thd || session_created) + if (!thd || take_mutex) mysql_mutex_lock(&LOCK_plugin); for (;;) { @@ -429,12 +434,17 @@ bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd, /* Remove leading/trailing whitespace. */ trim_whitespace(char_set, &var); - if ((svar= find_sys_var_ex(thd, var.str, var.length, throw_error, true))) + if(!strcmp(var.str,(const char *)"*")) + { + track_all= true; + } + else if ((svar= + find_sys_var_ex(thd, var.str, var.length, throw_error, true))) { if (insert(NULL, svar, m_mem_flag) == TRUE) goto error; } - else if (throw_error && session_created && thd) + else if (throw_error && thd) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE_FOR_VAR, @@ -449,13 +459,13 @@ bool Session_sysvars_tracker::vars_list::parse_var_list(THD *thd, else break; } - if (!thd || session_created) + if (!thd || take_mutex) mysql_mutex_unlock(&LOCK_plugin); return false; error: - if (!thd || session_created) + if (!thd || take_mutex) mysql_mutex_unlock(&LOCK_plugin); return true; } @@ -465,7 +475,7 @@ bool Session_sysvars_tracker::check_var_list(THD *thd, LEX_STRING var_list, bool throw_error, CHARSET_INFO *char_set, - bool session_created) + bool take_mutex) { const char separator= ','; char *token, *lasts= NULL; @@ -485,11 +495,10 @@ bool Session_sysvars_tracker::check_var_list(THD *thd, token value. Hence the mutex is handled here to avoid a performance overhead. */ - if (!thd || session_created) + if (!thd || take_mutex) mysql_mutex_lock(&LOCK_plugin); for (;;) { - sys_var *svar; LEX_STRING var; lasts= (char *) memchr(token, separator, rest); @@ -506,9 +515,10 @@ bool Session_sysvars_tracker::check_var_list(THD *thd, /* Remove leading/trailing whitespace. */ trim_whitespace(char_set, &var); - if (!(svar= find_sys_var_ex(thd, var.str, var.length, throw_error, true))) + if(!strcmp(var.str,(const char *)"*") && + !find_sys_var_ex(thd, var.str, var.length, throw_error, true)) { - if (throw_error && session_created && thd) + if (throw_error && take_mutex && thd) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE_FOR_VAR, @@ -517,7 +527,7 @@ bool Session_sysvars_tracker::check_var_list(THD *thd, } else { - if (!thd || session_created) + if (!thd || take_mutex) mysql_mutex_unlock(&LOCK_plugin); return true; } @@ -528,7 +538,7 @@ bool Session_sysvars_tracker::check_var_list(THD *thd, else break; } - if (!thd || session_created) + if (!thd || take_mutex) mysql_mutex_unlock(&LOCK_plugin); return false; @@ -605,7 +615,10 @@ bool Session_sysvars_tracker::vars_list::construct_var_list(char *buf, my_hash_iterate(&m_registered_sysvars, &name_array_filler, &data); DBUG_ASSERT(data.idx <= m_registered_sysvars.records); - + /* + We check number of records again here because number of variables + could be reduced in case of plugin unload. + */ if (m_registered_sysvars.records == 0) { mysql_mutex_unlock(&LOCK_plugin); @@ -710,7 +723,7 @@ bool Session_sysvars_tracker::update(THD *thd, set_var *var) We are doing via tool list because there possible errors with memory in this case value will be unchanged. */ - tool_list->reset(); + tool_list->reinit(); if (tool_list->parse_var_list(thd, var->save_result.string_value, true, thd->charset(), true)) return true; @@ -719,76 +732,102 @@ bool Session_sysvars_tracker::update(THD *thd, set_var *var) } -/** - Store the data for changed system variables in the specified buffer. - Once the data is stored, we reset the flags related to state-change - (see reset()). - - @param thd [IN] The thd handle. - @paran buf [INOUT] Buffer to store the information to. - - @retval true Error - @retval false Success +/* + Function and structure to support storing variables from hash to the buffer. */ -bool Session_sysvars_tracker::store(THD *thd, String *buf) +struct st_store_variable_param { - char val_buf[SHOW_VAR_FUNC_BUFF_SIZE]; - SHOW_VAR show; - const char *value; - sysvar_node_st *node; - CHARSET_INFO *charset; - size_t val_length, length; - int idx= 0; - - /* As its always system variable. */ - show.type= SHOW_SYS; - - while ((node= (sysvar_node_st *) (*orig_list)[idx])) + THD *thd; + String *buf; +}; + +my_bool Session_sysvars_tracker::store_variable(void *ptr, void *data_ptr) +{ + Session_sysvars_tracker::sysvar_node_st *node= + (Session_sysvars_tracker::sysvar_node_st *)ptr; + if (node->m_changed) { - if (node->m_changed) + THD *thd= ((st_store_variable_param *)data_ptr)->thd; + String *buf= ((st_store_variable_param *)data_ptr)->buf; + char val_buf[SHOW_VAR_FUNC_BUFF_SIZE]; + SHOW_VAR show; + CHARSET_INFO *charset; + size_t val_length, length; + mysql_mutex_lock(&LOCK_plugin); + if (!*node->test_load) { - mysql_mutex_lock(&LOCK_plugin); - if (!*node->test_load) - { - mysql_mutex_unlock(&LOCK_plugin); - continue; - } - sys_var *svar= node->m_svar; - show.name= svar->name.str; - show.value= (char *) svar; + mysql_mutex_unlock(&LOCK_plugin); + return false; + } + sys_var *svar= node->m_svar; + bool is_plugin= svar->cast_pluginvar(); + if (!is_plugin) + mysql_mutex_unlock(&LOCK_plugin); - value= get_one_variable(thd, &show, OPT_SESSION, SHOW_SYS, NULL, - &charset, val_buf, &val_length); + /* As its always system variable. */ + show.type= SHOW_SYS; + show.name= svar->name.str; + show.value= (char *) svar; + + const char *value= get_one_variable(thd, &show, OPT_SESSION, SHOW_SYS, NULL, + &charset, val_buf, &val_length); + if (is_plugin) mysql_mutex_unlock(&LOCK_plugin); - length= net_length_size(svar->name.length) + - svar->name.length + - net_length_size(val_length) + - val_length; + length= net_length_size(svar->name.length) + + svar->name.length + + net_length_size(val_length) + + val_length; - compile_time_assert(SESSION_TRACK_SYSTEM_VARIABLES < 251); - if (unlikely((1 + net_length_size(length) + length + buf->length() >= - MAX_PACKET_LENGTH) || - buf->prep_alloc(1 + net_length_size(length) + length, - EXTRA_ALLOC))) - return true; + compile_time_assert(SESSION_TRACK_SYSTEM_VARIABLES < 251); + if (unlikely((1 + net_length_size(length) + length + buf->length() >= + MAX_PACKET_LENGTH) || + buf->reserve(1 + net_length_size(length) + length, + EXTRA_ALLOC))) + return true; - /* Session state type (SESSION_TRACK_SYSTEM_VARIABLES) */ - buf->q_append((char)SESSION_TRACK_SYSTEM_VARIABLES); + /* Session state type (SESSION_TRACK_SYSTEM_VARIABLES) */ + buf->q_append((char)SESSION_TRACK_SYSTEM_VARIABLES); - /* Length of the overall entity. */ - buf->q_net_store_length((ulonglong)length); + /* Length of the overall entity. */ + buf->q_net_store_length((ulonglong)length); - /* System variable's name (length-encoded string). */ - buf->q_net_store_data((const uchar*)svar->name.str, svar->name.length); + /* System variable's name (length-encoded string). */ + buf->q_net_store_data((const uchar*)svar->name.str, svar->name.length); - /* System variable's value (length-encoded string). */ - buf->q_net_store_data((const uchar*)value, val_length); - } - ++ idx; + /* System variable's value (length-encoded string). */ + buf->q_net_store_data((const uchar*)value, val_length); } + return false; +} + +bool Session_sysvars_tracker::vars_list::store(THD *thd, String *buf) +{ + st_store_variable_param data= {thd, buf}; + return my_hash_iterate(&m_registered_sysvars, &store_variable, &data); +} + +/** + Store the data for changed system variables in the specified buffer. + Once the data is stored, we reset the flags related to state-change + (see reset()). + + @param thd [IN] The thd handle. + @paran buf [INOUT] Buffer to store the information to. + + @retval true Error + @retval false Success +*/ + +bool Session_sysvars_tracker::store(THD *thd, String *buf) +{ + if (!orig_list->is_enabled()) + return false; + + if (orig_list->store(thd, buf)) + return true; reset(); @@ -811,7 +850,8 @@ void Session_sysvars_tracker::mark_as_changed(THD *thd, Check if the specified system variable is being tracked, if so mark it as changed and also set the class's m_changed flag. */ - if ((node= (sysvar_node_st *) (orig_list->insert_or_search(node, svar)))) + if (orig_list->is_enabled() && + (node= (sysvar_node_st *) (orig_list->insert_or_search(node, svar)))) { node->m_changed= true; State_tracker::mark_as_changed(thd, var); @@ -838,20 +878,28 @@ uchar *Session_sysvars_tracker::sysvars_get_key(const char *entry, } +/* Function to support resetting hash nodes for the variables */ + +my_bool Session_sysvars_tracker::reset_variable(void *ptr, + void *data_ptr) +{ + ((Session_sysvars_tracker::sysvar_node_st *)ptr)->m_changed= false; + return false; +} + +void Session_sysvars_tracker::vars_list::reset() +{ + my_hash_iterate(&m_registered_sysvars, &reset_variable, NULL); +} + /** Prepare/reset the m_registered_sysvars hash for next statement. */ void Session_sysvars_tracker::reset() { - sysvar_node_st *node; - int idx= 0; - while ((node= (sysvar_node_st *) (*orig_list)[idx])) - { - node->m_changed= false; - ++ idx; - } + orig_list->reset(); m_changed= false; } @@ -931,7 +979,7 @@ bool Current_schema_tracker::store(THD *thd, String *buf) compile_time_assert(NAME_LEN < 251); DBUG_ASSERT(length < 251); if (unlikely((1 + 1 + length + buf->length() >= MAX_PACKET_LENGTH) || - buf->prep_alloc(1 + 1 + length, EXTRA_ALLOC))) + buf->reserve(1 + 1 + length, EXTRA_ALLOC))) return true; /* Session state type (SESSION_TRACK_SCHEMA) */ @@ -1034,26 +1082,25 @@ bool Transaction_state_tracker::store(THD *thd, String *buf) /* STATE */ if (tx_changed & TX_CHG_STATE) { - uchar *to; if (unlikely((11 + buf->length() >= MAX_PACKET_LENGTH) || - ((to= (uchar *) buf->prep_append(11, EXTRA_ALLOC)) == NULL))) + buf->reserve(11, EXTRA_ALLOC))) return true; - *(to++)= (char)SESSION_TRACK_TRANSACTION_STATE; - - to= net_store_length((uchar *) to, (ulonglong) 9); - to= net_store_length((uchar *) to, (ulonglong) 8); - - *(to++)= (tx_curr_state & TX_EXPLICIT) ? 'T' : - ((tx_curr_state & TX_IMPLICIT) ? 'I' : '_'); - *(to++)= (tx_curr_state & TX_READ_UNSAFE) ? 'r' : '_'; - *(to++)= ((tx_curr_state & TX_READ_TRX) || - (tx_curr_state & TX_WITH_SNAPSHOT)) ? 'R' : '_'; - *(to++)= (tx_curr_state & TX_WRITE_UNSAFE) ? 'w' : '_'; - *(to++)= (tx_curr_state & TX_WRITE_TRX) ? 'W' : '_'; - *(to++)= (tx_curr_state & TX_STMT_UNSAFE) ? 's' : '_'; - *(to++)= (tx_curr_state & TX_RESULT_SET) ? 'S' : '_'; - *(to++)= (tx_curr_state & TX_LOCKED_TABLES) ? 'L' : '_'; + buf->q_append((char)SESSION_TRACK_TRANSACTION_STATE); + + buf->q_append((char)9); // whole packet length + buf->q_append((char)8); // results length + + buf->q_append((char)((tx_curr_state & TX_EXPLICIT) ? 'T' : + ((tx_curr_state & TX_IMPLICIT) ? 'I' : '_'))); + buf->q_append((char)((tx_curr_state & TX_READ_UNSAFE) ? 'r' : '_')); + buf->q_append((char)(((tx_curr_state & TX_READ_TRX) || + (tx_curr_state & TX_WITH_SNAPSHOT)) ? 'R' : '_')); + buf->q_append((char)((tx_curr_state & TX_WRITE_UNSAFE) ? 'w' : '_')); + buf->q_append((char)((tx_curr_state & TX_WRITE_TRX) ? 'W' : '_')); + buf->q_append((char)((tx_curr_state & TX_STMT_UNSAFE) ? 's' : '_')); + buf->q_append((char)((tx_curr_state & TX_RESULT_SET) ? 'S' : '_')); + buf->q_append((char)((tx_curr_state & TX_LOCKED_TABLES) ? 'L' : '_')); } /* CHARACTERISTICS -- How to restart the transaction */ @@ -1066,7 +1113,7 @@ bool Transaction_state_tracker::store(THD *thd, String *buf) /* 2 length by 1 byte and code */ if (unlikely((1 + 1 + 1 + 110 + buf->length() >= MAX_PACKET_LENGTH) || - buf->prep_alloc(1 + 1 + 1, EXTRA_ALLOC))) + buf->reserve(1 + 1 + 1, EXTRA_ALLOC))) return true; compile_time_assert(SESSION_TRACK_TRANSACTION_CHARACTERISTICS < 251); @@ -1503,7 +1550,7 @@ bool Session_state_change_tracker::update(THD *thd, set_var *) bool Session_state_change_tracker::store(THD *thd, String *buf) { if (unlikely((1 + 1 + 1 + buf->length() >= MAX_PACKET_LENGTH) || - buf->prep_alloc(1 + 1 + 1, EXTRA_ALLOC))) + buf->reserve(1 + 1 + 1, EXTRA_ALLOC))) return true; compile_time_assert(SESSION_TRACK_STATE_CHANGE < 251); @@ -1550,9 +1597,10 @@ bool Session_state_change_tracker::is_state_changed(THD *) Session_tracker::Session_tracker() { /* track data ID fit into one byte in net coding */ - compile_time_assert(SESSION_TRACK_END < 251); + compile_time_assert(SESSION_TRACK_always_at_the_end < 251); /* one tracker could serv several tracking data */ - compile_time_assert((uint)SESSION_TRACK_END >= (uint)SESSION_TRACKER_END); + compile_time_assert((uint)SESSION_TRACK_always_at_the_end >= + (uint)SESSION_TRACKER_END); for (int i= 0; i < SESSION_TRACKER_END; i++) m_trackers[i]= NULL; @@ -1648,7 +1696,7 @@ void Session_tracker::store(THD *thd, String *buf) if ((size= net_length_size(length)) != 1) { - if (buf->prep_alloc(size - 1, EXTRA_ALLOC)) + if (buf->reserve(size - 1, EXTRA_ALLOC)) { buf->length(start); // it is safer to have 0-length block in case of error return; diff --git a/sql/session_tracker.h b/sql/session_tracker.h index 431726f03ed..3f73b5dc705 100644 --- a/sql/session_tracker.h +++ b/sql/session_tracker.h @@ -62,8 +62,8 @@ protected: /** Is tracking enabled for a particular session state type ? - @note: It is cache to avoid virtual functions and checking thd - when we want mark tracker as changed. + @note: it is a cache of the corresponding thd->variables.session_track_xxx + variable */ bool m_enabled; diff --git a/sql/sp_head.cc b/sql/sp_head.cc index c344a6c6ed8..41006f07a0a 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2981,8 +2981,10 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, #ifndef EMBEDDED_LIBRARY /* - if there was instruction which changed tracking state before, result - can go with this command OK packet, so better do not cache the result. + If there was instruction which changed tracking state, + the result of changed tracking state send to client in OK packed. + So it changes result sent to client and probably can be different + independent on query text. So we can't cache such results. */ if ((thd->client_capabilities & CLIENT_SESSION_TRACK) && (thd->server_status & SERVER_SESSION_STATE_CHANGED)) diff --git a/sql/sql_string.h b/sql/sql_string.h index feab8070cd2..f53015fbd6b 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -566,16 +566,6 @@ public: return Ptr+ old_length; /* Area to use */ } - inline bool prep_alloc(uint32 arg_length, uint32 step_alloc) - { - uint32 new_length= arg_length + str_length; - if (new_length > Alloced_length) - { - if (realloc(new_length + step_alloc)) - return true; - } - return false; - } inline bool append(const char *s, uint32 arg_length, uint32 step_alloc) { diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 462bfe52741..a047823551b 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -5393,10 +5393,12 @@ static Sys_var_ulong Sys_log_tc_size( static Sys_var_sesvartrack Sys_track_session_sys_vars( "session_track_system_variables", - "Track changes in registered system variables.", + "Track changes in registered system variables. " + "For compatibility with MySQL defaults this variable should be set to " + "\"autocommit, character_set_client, character_set_connection, " + "character_set_results, time_zone\"", CMD_LINE(REQUIRED_ARG), IN_SYSTEM_CHARSET, - DEFAULT("autocommit,character_set_client,character_set_connection," - "character_set_results,time_zone"), + DEFAULT(""), NO_MUTEX_GUARD); static bool update_session_track_schema(sys_var *self, THD *thd, -- cgit v1.2.1 From 7b86fda0c9b6b9772c20005fb135c95277083f1d Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Sun, 28 Aug 2016 09:44:49 +0200 Subject: Fixed length of codding of COM_MULTI parts. --- sql/sql_parse.cc | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 8dc34c2dfe2..c76e22a2e57 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1476,13 +1476,16 @@ uint maria_multi_check(THD *thd, char *packet, uint packet_length) DBUG_ENTER("maria_multi_check"); while (packet_length) { + char *packet_start= packet; + size_t subpacket_length= net_field_length((uchar **)&packet_start); + uint length_length= packet_start - packet; // length of command + 3 bytes where that length was stored - uint subpacket_length= (uint3korr(packet) + 3); - DBUG_PRINT("info", ("sub-packet length: %d command: %x", - subpacket_length, packet[3])); + DBUG_PRINT("info", ("sub-packet length: %ld + %d command: %x", + (ulong)subpacket_length, length_length, + packet_start[3])); - if (subpacket_length == 3 || - subpacket_length > packet_length) + if (subpacket_length == 0 || + (subpacket_length + length_length) > packet_length) { my_message(ER_UNKNOWN_COM_ERROR, ER_THD(thd, ER_UNKNOWN_COM_ERROR), MYF(0)); @@ -1490,8 +1493,8 @@ uint maria_multi_check(THD *thd, char *packet, uint packet_length) } counter++; - packet+= subpacket_length; - packet_length-= subpacket_length; + packet= packet_start + subpacket_length; + packet_length-= (subpacket_length + length_length); } DBUG_RETURN(counter); } @@ -2231,8 +2234,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; { + char *packet_start= packet; /* We have to store next length because it will be destroyed by '\0' */ - uint next_subpacket_length= uint3korr(packet); + size_t next_subpacket_length= net_field_length((uchar **)&packet_start); + uint next_length_length= packet_start - packet; unsigned char *readbuff= net->buff; if (net_allocate_new_packet(net, thd, MYF(0))) @@ -2246,13 +2251,19 @@ bool dispatch_command(enum enum_server_command command, THD *thd, while (packet_length) { current_com++; - uint subpacket_length= next_subpacket_length + 3; + size_t subpacket_length= next_subpacket_length + next_length_length; + uint length_length= next_length_length; if (subpacket_length < packet_length) - next_subpacket_length= uint3korr(packet + subpacket_length); + { + packet_start= packet + subpacket_length; + next_subpacket_length= net_field_length((uchar**)&packet_start); + next_length_length= packet_start - (packet + subpacket_length); + } /* safety like in do_command() */ packet[subpacket_length]= '\0'; - enum enum_server_command subcommand= fetch_command(thd, (packet + 3)); + enum enum_server_command subcommand= + fetch_command(thd, (packet + length_length)); if (server_command_flags[subcommand] & CF_NO_COM_MULTI) { @@ -2260,8 +2271,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, goto com_multi_end; } - if (dispatch_command(subcommand, thd, packet + (1 + 3), - subpacket_length - (1 + 3), TRUE, + if (dispatch_command(subcommand, thd, packet + (1 + length_length), + subpacket_length - (1 + length_length), TRUE, (current_com != counter))) { DBUG_ASSERT(thd->is_error()); -- cgit v1.2.1 From a322651b8aa702e58d473edfae26606f10a089fb Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Mon, 29 Aug 2016 16:44:46 +0200 Subject: MDEV-10017: Get unexpected `Empty Set` for correlated subquery with aggregate functions take into account all arguments of aggregate function --- mysql-test/r/func_group.result | 35 +++++++++++++++++++++++++++++++++++ mysql-test/t/func_group.test | 22 ++++++++++++++++++++++ sql/item.cc | 31 +++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index dc3a0b60ad3..10d50193352 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -2397,5 +2397,40 @@ Note 1276 Field or reference 'test.t10.b' of SELECT #3 was resolved in SELECT #1 Note 1003 select `test`.`t10`.`a` AS `a` from `test`.`t10` where ((`test`.`t10`.`c` < 3) or <`test`.`t10`.`a`,`test`.`t10`.`b`>((`test`.`t10`.`a`,(select `test`.`t12`.`c` from `test`.`t12` where ((`test`.`t10`.`a`) = `test`.`t12`.`c`) union select max(`test`.`t10`.`b`) from `test`.`t11` group by `test`.`t11`.`c` having ((`test`.`t10`.`a`) = (max(`test`.`t10`.`b`))))))) drop table t10,t11,t12; # +# MDEV-10017: Get unexpected `Empty Set` for correlated subquery +# with aggregate functions +# +create table t1(c1 int, c2 int, c3 int); +insert into t1 values(1,1,1),(2,2,2),(3,3,3); +select * from t1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +create table t2(c1 int, c2 int); +insert into t2 values(2,2); +select * from t2; +c1 c2 +2 2 +explain extended +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+c) from t2 tt)); +ERROR HY000: Invalid use of group function +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+c) from t2 tt)); +ERROR HY000: Invalid use of group function +explain extended +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+tt.c1) from t2 tt)); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 +2 DEPENDENT SUBQUERY t system NULL NULL NULL NULL 1 100.00 +3 DEPENDENT SUBQUERY tt system NULL NULL NULL NULL 1 100.00 +Warnings: +Note 1276 Field or reference 'test.t1.c1' of SELECT #3 was resolved in SELECT #1 +Note 1003 select `test`.`t1`.`c1` AS `c1` from `test`.`t1` having (`test`.`t1`.`c1` >= <`test`.`t1`.`c1`>((select 2 AS `c` from dual order by (select min((`test`.`t1`.`c1` + 2)) from dual)))) +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+tt.c1) from t2 tt)); +c1 +2 +3 +drop table t1,t2; +# # End of 10.1 tests # diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 69a4dc1fddc..7e342928ef8 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -1657,6 +1657,28 @@ create table t11 as select * from t10; create table t12 as select * from t10; explain extended select a from t10 where c<3 or a in (select c from t12 union select max(t10.b) from t11 group by t11.c); drop table t10,t11,t12; +--echo # +--echo # MDEV-10017: Get unexpected `Empty Set` for correlated subquery +--echo # with aggregate functions +--echo # + +create table t1(c1 int, c2 int, c3 int); +insert into t1 values(1,1,1),(2,2,2),(3,3,3); +select * from t1; +create table t2(c1 int, c2 int); +insert into t2 values(2,2); +select * from t2; +--error ER_INVALID_GROUP_FUNC_USE +explain extended +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+c) from t2 tt)); +--error ER_INVALID_GROUP_FUNC_USE +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+c) from t2 tt)); + +explain extended +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+tt.c1) from t2 tt)); +select c1 from t1 having c1 >= (select t.c1 as c from t2 t order by (select min(t1.c1+tt.c1) from t2 tt)); +drop table t1,t2; + --echo # --echo # End of 10.1 tests --echo # diff --git a/sql/item.cc b/sql/item.cc index 71df93f3ffc..a9c17ef620c 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -57,6 +57,17 @@ bool cmp_items(Item *a, Item *b) } +/** + Set max_sum_func_level if it is needed +*/ +inline void set_max_sum_func_level(THD *thd, SELECT_LEX *select) +{ + if (thd->lex->in_sum_func && + thd->lex->in_sum_func->nest_level >= select->nest_level) + set_if_bigger(thd->lex->in_sum_func->max_sum_func_level, + select->nest_level - 1); +} + /***************************************************************************** ** Item functions *****************************************************************************/ @@ -4885,6 +4896,11 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) if (rf->fix_fields(thd, reference) || rf->check_cols(1)) return -1; + /* + We can not "move" aggregate function in the place where + its arguments are not defined. + */ + set_max_sum_func_level(thd, select); mark_as_dependent(thd, last_checked_context->select_lex, context->select_lex, rf, rf); @@ -4893,6 +4909,11 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) } else { + /* + We can not "move" aggregate function in the place where + its arguments are not defined. + */ + set_max_sum_func_level(thd, select); mark_as_dependent(thd, last_checked_context->select_lex, context->select_lex, this, (Item_ident*)*reference); @@ -5024,6 +5045,11 @@ bool Item_field::fix_fields(THD *thd, Item **reference) return(1); } + /* + We can not "move" aggregate function in the place where + its arguments are not defined. + */ + set_max_sum_func_level(thd, thd->lex->current_select); set_field(new_field); return 0; } @@ -5048,6 +5074,11 @@ bool Item_field::fix_fields(THD *thd, Item **reference) select->parsing_place == IN_GROUP_BY && alias_name_used ? *rf->ref : rf); + /* + We can not "move" aggregate function in the place where + its arguments are not defined. + */ + set_max_sum_func_level(thd, thd->lex->current_select); return FALSE; } } -- cgit v1.2.1 From 1eb58ff3b8569d7dad1f5c180a5e55683e53d205 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 31 Aug 2016 20:33:28 +0300 Subject: Update mysql-test/r/mysqld--help,win.rdiff Recent commits has added new variables and changed the default for server-id, which caused the patch in rdiff to no longer apply. --- mysql-test/r/mysqld--help,win.rdiff | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/mysqld--help,win.rdiff b/mysql-test/r/mysqld--help,win.rdiff index ac10f37b5be..5dabf58208d 100644 --- a/mysql-test/r/mysqld--help,win.rdiff +++ b/mysql-test/r/mysqld--help,win.rdiff @@ -23,9 +23,9 @@ + --shared-memory Enable the shared memory + --shared-memory-base-name=name + Base name of shared memory - --show-slave-auth-info - Show user and password in SHOW SLAVE HOSTS on this - master. + --session-track-schema + Track changes to the default schema. + (Defaults to on; use --skip-session-track-schema to disable.) @@ -1015,6 +1018,10 @@ Log slow queries to given log file. Defaults logging to 'hostname'-slow.log. Must be enabled to activate other @@ -103,12 +103,12 @@ @@ -1387,6 +1381,8 @@ secure-auth TRUE secure-file-priv (No default value) - server-id 0 + server-id 1 +shared-memory FALSE +shared-memory-base-name MYSQL - show-slave-auth-info FALSE - silent-startup FALSE - skip-grant-tables TRUE + session-track-schema TRUE + session-track-state-change FALSE + session-track-system-variables @@ -1411,6 +1407,7 @@ slave-type-conversions slow-launch-time 2 -- cgit v1.2.1 From d8ad96eac0df9241ba200634717227a7153086e1 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 31 Aug 2016 20:50:19 +0300 Subject: Update sys_vars.sysvars_server_embedded after recent pushes --- mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff | 6 +++--- mysql-test/suite/sys_vars/r/sysvars_server_embedded.result | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff index 37f38ceb7be..4b8abc33a36 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff @@ -885,7 +885,7 @@ NUMERIC_MAX_VALUE 256 @@ -2815,7 +2815,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME - DEFAULT_VALUE 181 + DEFAULT_VALUE 184 VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_TYPE INT UNSIGNED @@ -1078,12 +1078,12 @@ READ_ONLY NO @@ -3319,7 +3319,7 @@ GLOBAL_VALUE_ORIGIN CONFIG - DEFAULT_VALUE 0 + DEFAULT_VALUE 1 VARIABLE_SCOPE SESSION -VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_TYPE INT UNSIGNED VARIABLE_COMMENT Uniquely identifies the server instance in the community of replication partners - NUMERIC_MIN_VALUE 0 + NUMERIC_MIN_VALUE 1 NUMERIC_MAX_VALUE 4294967295 @@ -3417,7 +3417,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result index eecebce80ad..5a9d6a56cfa 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result +++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result @@ -2811,9 +2811,9 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES SESSION_VALUE NULL -GLOBAL_VALUE 181 +GLOBAL_VALUE 184 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 181 +DEFAULT_VALUE 184 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Maximum number of statement instruments. @@ -3317,11 +3317,11 @@ VARIABLE_NAME SERVER_ID SESSION_VALUE 1 GLOBAL_VALUE 1 GLOBAL_VALUE_ORIGIN CONFIG -DEFAULT_VALUE 0 +DEFAULT_VALUE 1 VARIABLE_SCOPE SESSION VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Uniquely identifies the server instance in the community of replication partners -NUMERIC_MIN_VALUE 0 +NUMERIC_MIN_VALUE 1 NUMERIC_MAX_VALUE 4294967295 NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL -- cgit v1.2.1 From 670760d504bebdf542df890f4d40f4cd3ff7a2c8 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Wed, 31 Aug 2016 10:51:31 -0700 Subject: Adjusted test results after the previous merge. --- .../suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff index 7dea7ec3d23..112b9fd99d2 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff @@ -1270,6 +1270,15 @@ VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes NUMERIC_MIN_VALUE 0 -NUMERIC_MAX_VALUE 18446744073709551615 ++NUMERIC_MAX_VALUE 4294967295 + NUMERIC_BLOCK_SIZE 1 + ENUM_VALUE_LIST NULL + READ_ONLY NO +@@ -5056,7 +5056,7 @@ + VARIABLE_TYPE BIGINT UNSIGNED + VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes + NUMERIC_MIN_VALUE 0 +-NUMERIC_MAX_VALUE 18446744073709551615 +NUMERIC_MAX_VALUE 4294967295 NUMERIC_BLOCK_SIZE 1 ENUM_VALUE_LIST NULL -- cgit v1.2.1 From dd31e5c6ab0abc695099417bf015b6b661985b41 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Fri, 26 Aug 2016 16:39:32 +0400 Subject: MDEV-9593 - Print the real version in the error log --- sql/mysqld.cc | 50 ++++++++++++++++++++++++++++---------------------- sql/mysqld.h | 2 +- sql/signal_handler.cc | 1 + 3 files changed, 30 insertions(+), 23 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 28e91e208e7..ea7bb2b5dcf 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4340,11 +4340,23 @@ static int init_common_variables() if (get_options(&remaining_argc, &remaining_argv)) return 1; - set_server_version(); + if (IS_SYSVAR_AUTOSIZE(&server_version_ptr)) + set_server_version(server_version, sizeof(server_version)); if (!opt_abort) - sql_print_information("%s (mysqld %s) starting as process %lu ...", - my_progname, server_version, (ulong) getpid()); + { + if (IS_SYSVAR_AUTOSIZE(&server_version_ptr)) + sql_print_information("%s (mysqld %s) starting as process %lu ...", + my_progname, server_version, (ulong) getpid()); + else + { + char real_server_version[SERVER_VERSION_LENGTH]; + set_server_version(real_server_version, sizeof(real_server_version)); + sql_print_information("%s (mysqld %s as %s) starting as process %lu ...", + my_progname, real_server_version, server_version, + (ulong) getpid()); + } + } #ifndef EMBEDDED_LIBRARY if (opt_abort && !opt_verbose) @@ -8571,7 +8583,8 @@ static bool add_many_options(DYNAMIC_ARRAY *options, my_option *list, #ifndef EMBEDDED_LIBRARY static void print_version(void) { - set_server_version(); + if (IS_SYSVAR_AUTOSIZE(&server_version_ptr)) + set_server_version(server_version, sizeof(server_version)); printf("%s Ver %s for %s on %s (%s)\n",my_progname, server_version,SYSTEM_TYPE,MACHINE_TYPE, MYSQL_COMPILATION_COMMENT); @@ -9680,24 +9693,17 @@ static int get_options(int *argc_ptr, char ***argv_ptr) (MYSQL_SERVER_SUFFIX is set by the compilation environment) */ -void set_server_version(void) -{ - if (!IS_SYSVAR_AUTOSIZE(&server_version_ptr)) - return; - char *version_end= server_version+sizeof(server_version)-1; - char *end= strxnmov(server_version, sizeof(server_version)-1, - MYSQL_SERVER_VERSION, - MYSQL_SERVER_SUFFIX_STR, NullS); -#ifdef EMBEDDED_LIBRARY - end= strnmov(end, "-embedded", (version_end-end)); -#endif -#ifndef DBUG_OFF - if (!strstr(MYSQL_SERVER_SUFFIX_STR, "-debug")) - end= strnmov(end, "-debug", (version_end-end)); -#endif - if (opt_log || global_system_variables.sql_log_slow || opt_bin_log) - strnmov(end, "-log", (version_end-end)); // This may slow down system - *end= 0; +void set_server_version(char *buf, size_t size) +{ + bool is_log= opt_log || global_system_variables.sql_log_slow || opt_bin_log; + bool is_debug= IF_DBUG(!strstr(MYSQL_SERVER_SUFFIX_STR, "-debug"), 0); + strxnmov(buf, size - 1, + MYSQL_SERVER_VERSION, + MYSQL_SERVER_SUFFIX_STR, + IF_EMBEDDED("-embedded", ""), + is_debug ? "-debug" : "", + is_log ? "-log" : "", + NullS); } diff --git a/sql/mysqld.h b/sql/mysqld.h index 846a01a9427..602cc258943 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -774,7 +774,7 @@ inline void dec_thread_running() thread_safe_decrement32(&thread_running); } -extern void set_server_version(void); +extern void set_server_version(char *buf, size_t size); #if defined(MYSQL_DYNAMIC_PLUGIN) && defined(_WIN32) extern "C" THD *_current_thd_noinline(); diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index bbe714fc5b4..ae49e65bb1d 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -107,6 +107,7 @@ extern "C" sig_handler handle_fatal_signal(int sig) "diagnose the problem, but since we have already crashed, \n" "something is definitely wrong and this may fail.\n\n"); + set_server_version(server_version, sizeof(server_version)); my_safe_printf_stderr("Server version: %s\n", server_version); if (dflt_key_cache) -- cgit v1.2.1 From e19ca691919a0c371062868fa681fe6d209bb635 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 1 Sep 2016 19:52:04 +0300 Subject: Update test results, mysql-test/r/mysqld--help,win.rdiff --- mysql-test/r/mysqld--help,win.rdiff | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/mysql-test/r/mysqld--help,win.rdiff b/mysql-test/r/mysqld--help,win.rdiff index 5393af1dc99..829e12a82e7 100644 --- a/mysql-test/r/mysqld--help,win.rdiff +++ b/mysql-test/r/mysqld--help,win.rdiff @@ -17,15 +17,15 @@ Buffer length for TCP/IP and socket communication --net-read-timeout=# @@ -903,6 +903,9 @@ - files within specified directory - --server-id=# Uniquely identifies the server instance in the community - of replication partners + characteristics (isolation level, read only/read + write,snapshot - but not any work done / data modified + within the transaction). + --shared-memory Enable the shared memory + --shared-memory-base-name=name + Base name of shared memory - --session-track-schema - Track changes to the default schema. - (Defaults to on; use --skip-session-track-schema to disable.) + --show-slave-auth-info + Show user and password in SHOW SLAVE HOSTS on this + master. @@ -1015,6 +1018,10 @@ Log slow queries to given log file. Defaults logging to 'hostname'-slow.log. Must be enabled to activate other @@ -101,14 +101,14 @@ net-read-timeout 30 net-retry-count 10 @@ -1387,6 +1381,8 @@ - secure-auth TRUE - secure-file-priv (No default value) - server-id 1 -+shared-memory FALSE -+shared-memory-base-name MYSQL - session-track-schema TRUE session-track-state-change FALSE session-track-system-variables + session-track-transaction-info OFF ++shared-memory FALSE ++shared-memory-base-name MYSQL + show-slave-auth-info FALSE + silent-startup FALSE + skip-grant-tables TRUE @@ -1411,6 +1407,7 @@ slave-type-conversions slow-launch-time 2 -- cgit v1.2.1 From b6e436278f9a2b7b8767d979feff02ff16f40cf1 Mon Sep 17 00:00:00 2001 From: Monty Date: Thu, 1 Sep 2016 21:11:47 +0300 Subject: Split rpl_parallel into two tests to make it easier to know what goes wrong. rpl_parallel_conflicts now contains the tests that can cause row conflicts in replication. --- mysql-test/suite/rpl/r/parallel_conflicts.result | 312 +++++++++++++++++++ mysql-test/suite/rpl/r/rpl_parallel.result | 341 +-------------------- .../suite/rpl/r/rpl_parallel_conflicts.result | 333 ++++++++++++++++++++ mysql-test/suite/rpl/t/rpl_parallel.test | 267 +--------------- .../suite/rpl/t/rpl_parallel_conflicts-slave.opt | 1 + mysql-test/suite/rpl/t/rpl_parallel_conflicts.test | 261 ++++++++++++++++ 6 files changed, 913 insertions(+), 602 deletions(-) create mode 100644 mysql-test/suite/rpl/r/parallel_conflicts.result create mode 100644 mysql-test/suite/rpl/r/rpl_parallel_conflicts.result create mode 100644 mysql-test/suite/rpl/t/rpl_parallel_conflicts-slave.opt create mode 100644 mysql-test/suite/rpl/t/rpl_parallel_conflicts.test diff --git a/mysql-test/suite/rpl/r/parallel_conflicts.result b/mysql-test/suite/rpl/r/parallel_conflicts.result new file mode 100644 index 00000000000..f3f5bc4a8ee --- /dev/null +++ b/mysql-test/suite/rpl/r/parallel_conflicts.result @@ -0,0 +1,312 @@ +include/master-slave.inc +[connection master] +connection server_2; +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=10; +SET GLOBAL slave_parallel_mode='conservative'; +include/start_slave.inc +*** MDEV-7847: "Slave worker thread retried transaction 10 time(s) in vain, giving up", followed by replication hanging *** +*** MDEV-7882: Excessive transaction retry in parallel replication *** +connection server_1; +CREATE TABLE t7 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +CREATE TABLE t8 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=40; +SELECT @old_retries:=@@GLOBAL.slave_transaction_retries; +@old_retries:=@@GLOBAL.slave_transaction_retries +10 +SET GLOBAL slave_transaction_retries= 5; +connection server_1; +INSERT INTO t7 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SET @old_dbug= @@SESSION.debug_dbug; +SET @commit_id= 42; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +INSERT INTO t8 VALUES (1,1); +INSERT INTO t8 VALUES (2,2); +INSERT INTO t8 VALUES (3,3); +INSERT INTO t8 VALUES (4,4); +INSERT INTO t8 VALUES (5,5); +INSERT INTO t8 VALUES (6,6); +INSERT INTO t8 VALUES (7,7); +INSERT INTO t8 VALUES (8,8); +UPDATE t7 SET b=9 WHERE a=3; +UPDATE t7 SET b=10 WHERE a=3; +UPDATE t7 SET b=11 WHERE a=3; +INSERT INTO t8 VALUES (12,12); +INSERT INTO t8 VALUES (13,13); +UPDATE t7 SET b=14 WHERE a=3; +UPDATE t7 SET b=15 WHERE a=3; +INSERT INTO t8 VALUES (16,16); +UPDATE t7 SET b=17 WHERE a=3; +INSERT INTO t8 VALUES (18,18); +INSERT INTO t8 VALUES (19,19); +UPDATE t7 SET b=20 WHERE a=3; +INSERT INTO t8 VALUES (21,21); +UPDATE t7 SET b=22 WHERE a=3; +INSERT INTO t8 VALUES (23,24); +INSERT INTO t8 VALUES (24,24); +UPDATE t7 SET b=25 WHERE a=3; +INSERT INTO t8 VALUES (26,26); +UPDATE t7 SET b=27 WHERE a=3; +BEGIN; +INSERT INTO t8 VALUES (28,28); +INSERT INTO t8 VALUES (29,28), (30,28); +INSERT INTO t8 VALUES (31,28); +INSERT INTO t8 VALUES (32,28); +INSERT INTO t8 VALUES (33,28); +INSERT INTO t8 VALUES (34,28); +INSERT INTO t8 VALUES (35,28); +INSERT INTO t8 VALUES (36,28); +INSERT INTO t8 VALUES (37,28); +INSERT INTO t8 VALUES (38,28); +INSERT INTO t8 VALUES (39,28); +INSERT INTO t8 VALUES (40,28); +INSERT INTO t8 VALUES (41,28); +INSERT INTO t8 VALUES (42,28); +COMMIT; +SET @commit_id=43; +INSERT INTO t8 VALUES (43,43); +INSERT INTO t8 VALUES (44,44); +UPDATE t7 SET b=45 WHERE a=3; +INSERT INTO t8 VALUES (46,46); +INSERT INTO t8 VALUES (47,47); +UPDATE t7 SET b=48 WHERE a=3; +INSERT INTO t8 VALUES (49,49); +INSERT INTO t8 VALUES (50,50); +SET @commit_id=44; +INSERT INTO t8 VALUES (51,51); +INSERT INTO t8 VALUES (52,52); +UPDATE t7 SET b=53 WHERE a=3; +INSERT INTO t8 VALUES (54,54); +INSERT INTO t8 VALUES (55,55); +UPDATE t7 SET b=56 WHERE a=3; +INSERT INTO t8 VALUES (57,57); +UPDATE t7 SET b=58 WHERE a=3; +INSERT INTO t8 VALUES (58,58); +INSERT INTO t8 VALUES (59,59); +INSERT INTO t8 VALUES (60,60); +INSERT INTO t8 VALUES (61,61); +UPDATE t7 SET b=62 WHERE a=3; +INSERT INTO t8 VALUES (63,63); +INSERT INTO t8 VALUES (64,64); +INSERT INTO t8 VALUES (65,65); +INSERT INTO t8 VALUES (66,66); +UPDATE t7 SET b=67 WHERE a=3; +INSERT INTO t8 VALUES (68,68); +UPDATE t7 SET b=69 WHERE a=3; +UPDATE t7 SET b=70 WHERE a=3; +UPDATE t7 SET b=71 WHERE a=3; +INSERT INTO t8 VALUES (72,72); +UPDATE t7 SET b=73 WHERE a=3; +UPDATE t7 SET b=74 WHERE a=3; +UPDATE t7 SET b=75 WHERE a=3; +UPDATE t7 SET b=76 WHERE a=3; +INSERT INTO t8 VALUES (77,77); +UPDATE t7 SET b=78 WHERE a=3; +INSERT INTO t8 VALUES (79,79); +UPDATE t7 SET b=80 WHERE a=3; +INSERT INTO t8 VALUES (81,81); +UPDATE t7 SET b=82 WHERE a=3; +INSERT INTO t8 VALUES (83,83); +UPDATE t7 SET b=84 WHERE a=3; +SET @commit_id=45; +INSERT INTO t8 VALUES (85,85); +UPDATE t7 SET b=86 WHERE a=3; +INSERT INTO t8 VALUES (87,87); +SET @commit_id=46; +INSERT INTO t8 VALUES (88,88); +INSERT INTO t8 VALUES (89,89); +INSERT INTO t8 VALUES (90,90); +SET SESSION debug_dbug=@old_dbug; +INSERT INTO t8 VALUES (91,91); +INSERT INTO t8 VALUES (92,92); +INSERT INTO t8 VALUES (93,93); +INSERT INTO t8 VALUES (94,94); +INSERT INTO t8 VALUES (95,95); +INSERT INTO t8 VALUES (96,96); +INSERT INTO t8 VALUES (97,97); +INSERT INTO t8 VALUES (98,98); +INSERT INTO t8 VALUES (99,99); +SELECT * FROM t7 ORDER BY a; +a b +1 1 +2 2 +3 86 +4 4 +5 5 +SELECT * FROM t8 ORDER BY a; +a b +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +12 12 +13 13 +16 16 +18 18 +19 19 +21 21 +23 24 +24 24 +26 26 +28 28 +29 28 +30 28 +31 28 +32 28 +33 28 +34 28 +35 28 +36 28 +37 28 +38 28 +39 28 +40 28 +41 28 +42 28 +43 43 +44 44 +46 46 +47 47 +49 49 +50 50 +51 51 +52 52 +54 54 +55 55 +57 57 +58 58 +59 59 +60 60 +61 61 +63 63 +64 64 +65 65 +66 66 +68 68 +72 72 +77 77 +79 79 +81 81 +83 83 +85 85 +87 87 +88 88 +89 89 +90 90 +91 91 +92 92 +93 93 +94 94 +95 95 +96 96 +97 97 +98 98 +99 99 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t7 ORDER BY a; +a b +1 1 +2 2 +3 86 +4 4 +5 5 +SELECT * FROM t8 ORDER BY a; +a b +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +12 12 +13 13 +16 16 +18 18 +19 19 +21 21 +23 24 +24 24 +26 26 +28 28 +29 28 +30 28 +31 28 +32 28 +33 28 +34 28 +35 28 +36 28 +37 28 +38 28 +39 28 +40 28 +41 28 +42 28 +43 43 +44 44 +46 46 +47 47 +49 49 +50 50 +51 51 +52 52 +54 54 +55 55 +57 57 +58 58 +59 59 +60 60 +61 61 +63 63 +64 64 +65 65 +66 66 +68 68 +72 72 +77 77 +79 79 +81 81 +83 83 +85 85 +87 87 +88 88 +89 89 +90 90 +91 91 +92 92 +93 93 +94 94 +95 95 +96 96 +97 97 +98 98 +99 99 +connection server_2; +include/stop_slave.inc +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +SET GLOBAL slave_transaction_retries=@old_retries; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE if exists t1,t2,t3,t4,t5,t6,t7,t8; +Warnings: +Note 1051 Unknown table 'test.t1' +Note 1051 Unknown table 'test.t2' +Note 1051 Unknown table 'test.t3' +Note 1051 Unknown table 'test.t4' +Note 1051 Unknown table 'test.t5' +Note 1051 Unknown table 'test.t6' +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel.result b/mysql-test/suite/rpl/r/rpl_parallel.result index 1b149fb170e..86154204666 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel.result +++ b/mysql-test/suite/rpl/r/rpl_parallel.result @@ -1504,303 +1504,11 @@ a 1044 1045 1046 -include/stop_slave.inc -SET GLOBAL slave_parallel_mode='conservative'; -include/start_slave.inc -*** MDEV-7847: "Slave worker thread retried transaction 10 time(s) in vain, giving up", followed by replication hanging *** -*** MDEV-7882: Excessive transaction retry in parallel replication *** -connection server_1; -CREATE TABLE t7 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; -CREATE TABLE t8 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; -connection server_2; -include/stop_slave.inc -SET GLOBAL slave_parallel_threads=40; -SELECT @old_retries:=@@GLOBAL.slave_transaction_retries; -@old_retries:=@@GLOBAL.slave_transaction_retries -10 -SET GLOBAL slave_transaction_retries= 5; -connection server_1; -INSERT INTO t7 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); -SET @old_dbug= @@SESSION.debug_dbug; -SET @commit_id= 42; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -INSERT INTO t8 VALUES (1,1); -INSERT INTO t8 VALUES (2,2); -INSERT INTO t8 VALUES (3,3); -INSERT INTO t8 VALUES (4,4); -INSERT INTO t8 VALUES (5,5); -INSERT INTO t8 VALUES (6,6); -INSERT INTO t8 VALUES (7,7); -INSERT INTO t8 VALUES (8,8); -UPDATE t7 SET b=9 WHERE a=3; -UPDATE t7 SET b=10 WHERE a=3; -UPDATE t7 SET b=11 WHERE a=3; -INSERT INTO t8 VALUES (12,12); -INSERT INTO t8 VALUES (13,13); -UPDATE t7 SET b=14 WHERE a=3; -UPDATE t7 SET b=15 WHERE a=3; -INSERT INTO t8 VALUES (16,16); -UPDATE t7 SET b=17 WHERE a=3; -INSERT INTO t8 VALUES (18,18); -INSERT INTO t8 VALUES (19,19); -UPDATE t7 SET b=20 WHERE a=3; -INSERT INTO t8 VALUES (21,21); -UPDATE t7 SET b=22 WHERE a=3; -INSERT INTO t8 VALUES (23,24); -INSERT INTO t8 VALUES (24,24); -UPDATE t7 SET b=25 WHERE a=3; -INSERT INTO t8 VALUES (26,26); -UPDATE t7 SET b=27 WHERE a=3; -BEGIN; -INSERT INTO t8 VALUES (28,28); -INSERT INTO t8 VALUES (29,28), (30,28); -INSERT INTO t8 VALUES (31,28); -INSERT INTO t8 VALUES (32,28); -INSERT INTO t8 VALUES (33,28); -INSERT INTO t8 VALUES (34,28); -INSERT INTO t8 VALUES (35,28); -INSERT INTO t8 VALUES (36,28); -INSERT INTO t8 VALUES (37,28); -INSERT INTO t8 VALUES (38,28); -INSERT INTO t8 VALUES (39,28); -INSERT INTO t8 VALUES (40,28); -INSERT INTO t8 VALUES (41,28); -INSERT INTO t8 VALUES (42,28); -COMMIT; -SET @commit_id=43; -INSERT INTO t8 VALUES (43,43); -INSERT INTO t8 VALUES (44,44); -UPDATE t7 SET b=45 WHERE a=3; -INSERT INTO t8 VALUES (46,46); -INSERT INTO t8 VALUES (47,47); -UPDATE t7 SET b=48 WHERE a=3; -INSERT INTO t8 VALUES (49,49); -INSERT INTO t8 VALUES (50,50); -SET @commit_id=44; -INSERT INTO t8 VALUES (51,51); -INSERT INTO t8 VALUES (52,52); -UPDATE t7 SET b=53 WHERE a=3; -INSERT INTO t8 VALUES (54,54); -INSERT INTO t8 VALUES (55,55); -UPDATE t7 SET b=56 WHERE a=3; -INSERT INTO t8 VALUES (57,57); -UPDATE t7 SET b=58 WHERE a=3; -INSERT INTO t8 VALUES (58,58); -INSERT INTO t8 VALUES (59,59); -INSERT INTO t8 VALUES (60,60); -INSERT INTO t8 VALUES (61,61); -UPDATE t7 SET b=62 WHERE a=3; -INSERT INTO t8 VALUES (63,63); -INSERT INTO t8 VALUES (64,64); -INSERT INTO t8 VALUES (65,65); -INSERT INTO t8 VALUES (66,66); -UPDATE t7 SET b=67 WHERE a=3; -INSERT INTO t8 VALUES (68,68); -UPDATE t7 SET b=69 WHERE a=3; -UPDATE t7 SET b=70 WHERE a=3; -UPDATE t7 SET b=71 WHERE a=3; -INSERT INTO t8 VALUES (72,72); -UPDATE t7 SET b=73 WHERE a=3; -UPDATE t7 SET b=74 WHERE a=3; -UPDATE t7 SET b=75 WHERE a=3; -UPDATE t7 SET b=76 WHERE a=3; -INSERT INTO t8 VALUES (77,77); -UPDATE t7 SET b=78 WHERE a=3; -INSERT INTO t8 VALUES (79,79); -UPDATE t7 SET b=80 WHERE a=3; -INSERT INTO t8 VALUES (81,81); -UPDATE t7 SET b=82 WHERE a=3; -INSERT INTO t8 VALUES (83,83); -UPDATE t7 SET b=84 WHERE a=3; -SET @commit_id=45; -INSERT INTO t8 VALUES (85,85); -UPDATE t7 SET b=86 WHERE a=3; -INSERT INTO t8 VALUES (87,87); -SET @commit_id=46; -INSERT INTO t8 VALUES (88,88); -INSERT INTO t8 VALUES (89,89); -INSERT INTO t8 VALUES (90,90); -SET SESSION debug_dbug=@old_dbug; -INSERT INTO t8 VALUES (91,91); -INSERT INTO t8 VALUES (92,92); -INSERT INTO t8 VALUES (93,93); -INSERT INTO t8 VALUES (94,94); -INSERT INTO t8 VALUES (95,95); -INSERT INTO t8 VALUES (96,96); -INSERT INTO t8 VALUES (97,97); -INSERT INTO t8 VALUES (98,98); -INSERT INTO t8 VALUES (99,99); -SELECT * FROM t7 ORDER BY a; -a b -1 1 -2 2 -3 86 -4 4 -5 5 -SELECT * FROM t8 ORDER BY a; -a b -1 1 -2 2 -3 3 -4 4 -5 5 -6 6 -7 7 -8 8 -12 12 -13 13 -16 16 -18 18 -19 19 -21 21 -23 24 -24 24 -26 26 -28 28 -29 28 -30 28 -31 28 -32 28 -33 28 -34 28 -35 28 -36 28 -37 28 -38 28 -39 28 -40 28 -41 28 -42 28 -43 43 -44 44 -46 46 -47 47 -49 49 -50 50 -51 51 -52 52 -54 54 -55 55 -57 57 -58 58 -59 59 -60 60 -61 61 -63 63 -64 64 -65 65 -66 66 -68 68 -72 72 -77 77 -79 79 -81 81 -83 83 -85 85 -87 87 -88 88 -89 89 -90 90 -91 91 -92 92 -93 93 -94 94 -95 95 -96 96 -97 97 -98 98 -99 99 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t7 ORDER BY a; -a b -1 1 -2 2 -3 86 -4 4 -5 5 -SELECT * FROM t8 ORDER BY a; -a b -1 1 -2 2 -3 3 -4 4 -5 5 -6 6 -7 7 -8 8 -12 12 -13 13 -16 16 -18 18 -19 19 -21 21 -23 24 -24 24 -26 26 -28 28 -29 28 -30 28 -31 28 -32 28 -33 28 -34 28 -35 28 -36 28 -37 28 -38 28 -39 28 -40 28 -41 28 -42 28 -43 43 -44 44 -46 46 -47 47 -49 49 -50 50 -51 51 -52 52 -54 54 -55 55 -57 57 -58 58 -59 59 -60 60 -61 61 -63 63 -64 64 -65 65 -66 66 -68 68 -72 72 -77 77 -79 79 -81 81 -83 83 -85 85 -87 87 -88 88 -89 89 -90 90 -91 91 -92 92 -93 93 -94 94 -95 95 -96 96 -97 97 -98 98 -99 99 -include/stop_slave.inc -SET GLOBAL slave_transaction_retries= @old_retries; -SET GLOBAL slave_parallel_threads=10; -include/start_slave.inc *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** connection server_2; include/stop_slave.inc +SET GLOBAL slave_parallel_mode='conservative'; +SET GLOBAL slave_parallel_threads=10; SET @old_dbug= @@GLOBAL.debug_dbug; SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; connection server_1; @@ -1949,49 +1657,6 @@ include/stop_slave.inc SET GLOBAL debug_dbug= @old_debg; SET GLOBAL max_relay_log_size= @old_max; include/start_slave.inc -*** MDEV-8302: Duplicate key with parallel replication *** -connection server_2; -include/stop_slave.inc -/* Inject a small sleep which makes the race easier to hit. */ -SET @old_dbug=@@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,inject_mdev8302"; -connection server_1; -INSERT INTO t7 VALUES (100,1), (101,2), (102,3), (103,4), (104,5); -SET @old_dbug= @@SESSION.debug_dbug; -SET @commit_id= 20000; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -SET SESSION debug_dbug=@old_dbug; -SELECT * FROM t7 ORDER BY a; -a b -1 1 -2 2 -3 86 -4 4 -5 5 -100 5 -101 1 -102 2 -103 3 -104 4 -include/save_master_gtid.inc -connection server_2; -include/start_slave.inc -include/sync_with_master_gtid.inc -SELECT * FROM t7 ORDER BY a; -a b -1 1 -2 2 -3 86 -4 4 -5 5 -100 5 -101 1 -102 2 -103 3 -104 4 -include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; -include/start_slave.inc *** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** connection server_1; BEGIN; @@ -2019,6 +1684,6 @@ include/start_slave.inc SET DEBUG_SYNC= 'RESET'; connection server_1; DROP function foo; -DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8; +DROP TABLE t1,t2,t3,t4,t5,t6; SET DEBUG_SYNC= 'RESET'; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_conflicts.result b/mysql-test/suite/rpl/r/rpl_parallel_conflicts.result new file mode 100644 index 00000000000..b15de6fc215 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_parallel_conflicts.result @@ -0,0 +1,333 @@ +include/master-slave.inc +[connection master] +*** MDEV-7847: "Slave worker thread retried transaction 10 time(s) in vain, giving up", followed by replication hanging *** +*** MDEV-7882: Excessive transaction retry in parallel replication *** +connection server_1; +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t7 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +CREATE TABLE t8 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +connection server_1; +INSERT INTO t7 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SET @old_dbug= @@SESSION.debug_dbug; +SET @commit_id= 42; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +INSERT INTO t8 VALUES (1,1); +INSERT INTO t8 VALUES (2,2); +INSERT INTO t8 VALUES (3,3); +INSERT INTO t8 VALUES (4,4); +INSERT INTO t8 VALUES (5,5); +INSERT INTO t8 VALUES (6,6); +INSERT INTO t8 VALUES (7,7); +INSERT INTO t8 VALUES (8,8); +UPDATE t7 SET b=9 WHERE a=3; +UPDATE t7 SET b=10 WHERE a=3; +UPDATE t7 SET b=11 WHERE a=3; +INSERT INTO t8 VALUES (12,12); +INSERT INTO t8 VALUES (13,13); +UPDATE t7 SET b=14 WHERE a=3; +UPDATE t7 SET b=15 WHERE a=3; +INSERT INTO t8 VALUES (16,16); +UPDATE t7 SET b=17 WHERE a=3; +INSERT INTO t8 VALUES (18,18); +INSERT INTO t8 VALUES (19,19); +UPDATE t7 SET b=20 WHERE a=3; +INSERT INTO t8 VALUES (21,21); +UPDATE t7 SET b=22 WHERE a=3; +INSERT INTO t8 VALUES (23,24); +INSERT INTO t8 VALUES (24,24); +UPDATE t7 SET b=25 WHERE a=3; +INSERT INTO t8 VALUES (26,26); +UPDATE t7 SET b=27 WHERE a=3; +BEGIN; +INSERT INTO t8 VALUES (28,28); +INSERT INTO t8 VALUES (29,28), (30,28); +INSERT INTO t8 VALUES (31,28); +INSERT INTO t8 VALUES (32,28); +INSERT INTO t8 VALUES (33,28); +INSERT INTO t8 VALUES (34,28); +INSERT INTO t8 VALUES (35,28); +INSERT INTO t8 VALUES (36,28); +INSERT INTO t8 VALUES (37,28); +INSERT INTO t8 VALUES (38,28); +INSERT INTO t8 VALUES (39,28); +INSERT INTO t8 VALUES (40,28); +INSERT INTO t8 VALUES (41,28); +INSERT INTO t8 VALUES (42,28); +COMMIT; +SET @commit_id=43; +INSERT INTO t8 VALUES (43,43); +INSERT INTO t8 VALUES (44,44); +UPDATE t7 SET b=45 WHERE a=3; +INSERT INTO t8 VALUES (46,46); +INSERT INTO t8 VALUES (47,47); +UPDATE t7 SET b=48 WHERE a=3; +INSERT INTO t8 VALUES (49,49); +INSERT INTO t8 VALUES (50,50); +SET @commit_id=44; +INSERT INTO t8 VALUES (51,51); +INSERT INTO t8 VALUES (52,52); +UPDATE t7 SET b=53 WHERE a=3; +INSERT INTO t8 VALUES (54,54); +INSERT INTO t8 VALUES (55,55); +UPDATE t7 SET b=56 WHERE a=3; +INSERT INTO t8 VALUES (57,57); +UPDATE t7 SET b=58 WHERE a=3; +INSERT INTO t8 VALUES (58,58); +INSERT INTO t8 VALUES (59,59); +INSERT INTO t8 VALUES (60,60); +INSERT INTO t8 VALUES (61,61); +UPDATE t7 SET b=62 WHERE a=3; +INSERT INTO t8 VALUES (63,63); +INSERT INTO t8 VALUES (64,64); +INSERT INTO t8 VALUES (65,65); +INSERT INTO t8 VALUES (66,66); +UPDATE t7 SET b=67 WHERE a=3; +INSERT INTO t8 VALUES (68,68); +UPDATE t7 SET b=69 WHERE a=3; +UPDATE t7 SET b=70 WHERE a=3; +UPDATE t7 SET b=71 WHERE a=3; +INSERT INTO t8 VALUES (72,72); +UPDATE t7 SET b=73 WHERE a=3; +UPDATE t7 SET b=74 WHERE a=3; +UPDATE t7 SET b=75 WHERE a=3; +UPDATE t7 SET b=76 WHERE a=3; +INSERT INTO t8 VALUES (77,77); +UPDATE t7 SET b=78 WHERE a=3; +INSERT INTO t8 VALUES (79,79); +UPDATE t7 SET b=80 WHERE a=3; +INSERT INTO t8 VALUES (81,81); +UPDATE t7 SET b=82 WHERE a=3; +INSERT INTO t8 VALUES (83,83); +UPDATE t7 SET b=84 WHERE a=3; +SET @commit_id=45; +INSERT INTO t8 VALUES (85,85); +UPDATE t7 SET b=86 WHERE a=3; +INSERT INTO t8 VALUES (87,87); +SET @commit_id=46; +INSERT INTO t8 VALUES (88,88); +INSERT INTO t8 VALUES (89,89); +INSERT INTO t8 VALUES (90,90); +SET SESSION debug_dbug=@old_dbug; +INSERT INTO t8 VALUES (91,91); +INSERT INTO t8 VALUES (92,92); +INSERT INTO t8 VALUES (93,93); +INSERT INTO t8 VALUES (94,94); +INSERT INTO t8 VALUES (95,95); +INSERT INTO t8 VALUES (96,96); +INSERT INTO t8 VALUES (97,97); +INSERT INTO t8 VALUES (98,98); +INSERT INTO t8 VALUES (99,99); +SELECT * FROM t7 ORDER BY a; +a b +1 1 +2 2 +3 86 +4 4 +5 5 +SELECT * FROM t8 ORDER BY a; +a b +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +12 12 +13 13 +16 16 +18 18 +19 19 +21 21 +23 24 +24 24 +26 26 +28 28 +29 28 +30 28 +31 28 +32 28 +33 28 +34 28 +35 28 +36 28 +37 28 +38 28 +39 28 +40 28 +41 28 +42 28 +43 43 +44 44 +46 46 +47 47 +49 49 +50 50 +51 51 +52 52 +54 54 +55 55 +57 57 +58 58 +59 59 +60 60 +61 61 +63 63 +64 64 +65 65 +66 66 +68 68 +72 72 +77 77 +79 79 +81 81 +83 83 +85 85 +87 87 +88 88 +89 89 +90 90 +91 91 +92 92 +93 93 +94 94 +95 95 +96 96 +97 97 +98 98 +99 99 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t7 ORDER BY a; +a b +1 1 +2 2 +3 86 +4 4 +5 5 +SELECT * FROM t8 ORDER BY a; +a b +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +12 12 +13 13 +16 16 +18 18 +19 19 +21 21 +23 24 +24 24 +26 26 +28 28 +29 28 +30 28 +31 28 +32 28 +33 28 +34 28 +35 28 +36 28 +37 28 +38 28 +39 28 +40 28 +41 28 +42 28 +43 43 +44 44 +46 46 +47 47 +49 49 +50 50 +51 51 +52 52 +54 54 +55 55 +57 57 +58 58 +59 59 +60 60 +61 61 +63 63 +64 64 +65 65 +66 66 +68 68 +72 72 +77 77 +79 79 +81 81 +83 83 +85 85 +87 87 +88 88 +89 89 +90 90 +91 91 +92 92 +93 93 +94 94 +95 95 +96 96 +97 97 +98 98 +99 99 +*** MDEV-8302: Duplicate key with parallel replication *** +connection server_2; +include/stop_slave.inc +/* Inject a small sleep which makes the race easier to hit. */ +SET @old_dbug=@@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,inject_mdev8302"; +connection server_1; +INSERT INTO t7 VALUES (100,1), (101,2), (102,3), (103,4), (104,5); +SET @old_dbug= @@SESSION.debug_dbug; +SET @commit_id= 20000; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +SET SESSION debug_dbug=@old_dbug; +SELECT * FROM t7 ORDER BY a; +a b +1 1 +2 2 +3 86 +4 4 +5 5 +100 5 +101 1 +102 2 +103 3 +104 4 +include/save_master_gtid.inc +connection server_2; +include/start_slave.inc +include/sync_with_master_gtid.inc +SELECT * FROM t7 ORDER BY a; +a b +1 1 +2 2 +3 86 +4 4 +5 5 +100 5 +101 1 +102 2 +103 3 +104 4 +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; +connection server_1; +DROP TABLE t7,t8; +SET DEBUG_SYNC= 'RESET'; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel.test b/mysql-test/suite/rpl/t/rpl_parallel.test index a7e5353a9fc..5d2232269cc 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel.test +++ b/mysql-test/suite/rpl/t/rpl_parallel.test @@ -1972,218 +1972,14 @@ SET GLOBAL slave_parallel_mode=minimal; --source include/start_slave.inc --source include/sync_with_master_gtid.inc SELECT * FROM t2 WHERE a >= 1040 ORDER BY a; ---source include/stop_slave.inc -SET GLOBAL slave_parallel_mode='conservative'; ---source include/start_slave.inc - - ---echo *** MDEV-7847: "Slave worker thread retried transaction 10 time(s) in vain, giving up", followed by replication hanging *** ---echo *** MDEV-7882: Excessive transaction retry in parallel replication *** - ---connection server_1 -CREATE TABLE t7 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; -CREATE TABLE t8 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; ---save_master_pos - ---connection server_2 ---sync_with_master ---source include/stop_slave.inc -SET GLOBAL slave_parallel_threads=40; -SELECT @old_retries:=@@GLOBAL.slave_transaction_retries; -SET GLOBAL slave_transaction_retries= 5; - - -# Using dbug error injection, we artificially create event groups with a lot of -# conflicting transactions in each event group. The bugs were originally seen -# "in the wild" with transactions that did not conflict on the master, and only -# conflicted very rarely on the slave (maybe some edge case with InnoDB btree -# page splits or something like that). The event groups here loosely reflect -# the structure of the original failure's group commits. - - ---connection server_1 -INSERT INTO t7 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); -SET @old_dbug= @@SESSION.debug_dbug; -SET @commit_id= 42; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; -INSERT INTO t8 VALUES (1,1); -INSERT INTO t8 VALUES (2,2); -INSERT INTO t8 VALUES (3,3); -INSERT INTO t8 VALUES (4,4); -INSERT INTO t8 VALUES (5,5); -INSERT INTO t8 VALUES (6,6); -INSERT INTO t8 VALUES (7,7); -INSERT INTO t8 VALUES (8,8); - -UPDATE t7 SET b=9 WHERE a=3; -UPDATE t7 SET b=10 WHERE a=3; -UPDATE t7 SET b=11 WHERE a=3; - -INSERT INTO t8 VALUES (12,12); -INSERT INTO t8 VALUES (13,13); - -UPDATE t7 SET b=14 WHERE a=3; -UPDATE t7 SET b=15 WHERE a=3; - -INSERT INTO t8 VALUES (16,16); - -UPDATE t7 SET b=17 WHERE a=3; - -INSERT INTO t8 VALUES (18,18); -INSERT INTO t8 VALUES (19,19); - -UPDATE t7 SET b=20 WHERE a=3; - -INSERT INTO t8 VALUES (21,21); - -UPDATE t7 SET b=22 WHERE a=3; - -INSERT INTO t8 VALUES (23,24); -INSERT INTO t8 VALUES (24,24); - -UPDATE t7 SET b=25 WHERE a=3; - -INSERT INTO t8 VALUES (26,26); - -UPDATE t7 SET b=27 WHERE a=3; - -BEGIN; -INSERT INTO t8 VALUES (28,28); -INSERT INTO t8 VALUES (29,28), (30,28); -INSERT INTO t8 VALUES (31,28); -INSERT INTO t8 VALUES (32,28); -INSERT INTO t8 VALUES (33,28); -INSERT INTO t8 VALUES (34,28); -INSERT INTO t8 VALUES (35,28); -INSERT INTO t8 VALUES (36,28); -INSERT INTO t8 VALUES (37,28); -INSERT INTO t8 VALUES (38,28); -INSERT INTO t8 VALUES (39,28); -INSERT INTO t8 VALUES (40,28); -INSERT INTO t8 VALUES (41,28); -INSERT INTO t8 VALUES (42,28); -COMMIT; - - -SET @commit_id=43; -INSERT INTO t8 VALUES (43,43); -INSERT INTO t8 VALUES (44,44); - -UPDATE t7 SET b=45 WHERE a=3; - -INSERT INTO t8 VALUES (46,46); -INSERT INTO t8 VALUES (47,47); - -UPDATE t7 SET b=48 WHERE a=3; - -INSERT INTO t8 VALUES (49,49); -INSERT INTO t8 VALUES (50,50); - - -SET @commit_id=44; -INSERT INTO t8 VALUES (51,51); -INSERT INTO t8 VALUES (52,52); - -UPDATE t7 SET b=53 WHERE a=3; - -INSERT INTO t8 VALUES (54,54); -INSERT INTO t8 VALUES (55,55); - -UPDATE t7 SET b=56 WHERE a=3; - -INSERT INTO t8 VALUES (57,57); - -UPDATE t7 SET b=58 WHERE a=3; - -INSERT INTO t8 VALUES (58,58); -INSERT INTO t8 VALUES (59,59); -INSERT INTO t8 VALUES (60,60); -INSERT INTO t8 VALUES (61,61); - -UPDATE t7 SET b=62 WHERE a=3; - -INSERT INTO t8 VALUES (63,63); -INSERT INTO t8 VALUES (64,64); -INSERT INTO t8 VALUES (65,65); -INSERT INTO t8 VALUES (66,66); - -UPDATE t7 SET b=67 WHERE a=3; - -INSERT INTO t8 VALUES (68,68); - -UPDATE t7 SET b=69 WHERE a=3; -UPDATE t7 SET b=70 WHERE a=3; -UPDATE t7 SET b=71 WHERE a=3; - -INSERT INTO t8 VALUES (72,72); - -UPDATE t7 SET b=73 WHERE a=3; -UPDATE t7 SET b=74 WHERE a=3; -UPDATE t7 SET b=75 WHERE a=3; -UPDATE t7 SET b=76 WHERE a=3; - -INSERT INTO t8 VALUES (77,77); - -UPDATE t7 SET b=78 WHERE a=3; - -INSERT INTO t8 VALUES (79,79); - -UPDATE t7 SET b=80 WHERE a=3; - -INSERT INTO t8 VALUES (81,81); - -UPDATE t7 SET b=82 WHERE a=3; - -INSERT INTO t8 VALUES (83,83); - -UPDATE t7 SET b=84 WHERE a=3; - - -SET @commit_id=45; -INSERT INTO t8 VALUES (85,85); -UPDATE t7 SET b=86 WHERE a=3; -INSERT INTO t8 VALUES (87,87); - - -SET @commit_id=46; -INSERT INTO t8 VALUES (88,88); -INSERT INTO t8 VALUES (89,89); -INSERT INTO t8 VALUES (90,90); - -SET SESSION debug_dbug=@old_dbug; - -INSERT INTO t8 VALUES (91,91); -INSERT INTO t8 VALUES (92,92); -INSERT INTO t8 VALUES (93,93); -INSERT INTO t8 VALUES (94,94); -INSERT INTO t8 VALUES (95,95); -INSERT INTO t8 VALUES (96,96); -INSERT INTO t8 VALUES (97,97); -INSERT INTO t8 VALUES (98,98); -INSERT INTO t8 VALUES (99,99); - - -SELECT * FROM t7 ORDER BY a; -SELECT * FROM t8 ORDER BY a; ---source include/save_master_gtid.inc +--echo *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** --connection server_2 ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc -SELECT * FROM t7 ORDER BY a; -SELECT * FROM t8 ORDER BY a; - --source include/stop_slave.inc -SET GLOBAL slave_transaction_retries= @old_retries; +SET GLOBAL slave_parallel_mode='conservative'; SET GLOBAL slave_parallel_threads=10; ---source include/start_slave.inc - ---echo *** MDEV-7888: ANALYZE TABLE does wakeup_subsequent_commits(), causing wrong binlog order and parallel replication hang *** - ---connection server_2 ---source include/stop_slave.inc SET @old_dbug= @@GLOBAL.debug_dbug; SET GLOBAL debug_dbug= '+d,inject_analyze_table_sleep'; @@ -2380,62 +2176,6 @@ SET GLOBAL debug_dbug= @old_debg; SET GLOBAL max_relay_log_size= @old_max; --source include/start_slave.inc - ---echo *** MDEV-8302: Duplicate key with parallel replication *** - ---connection server_2 ---source include/stop_slave.inc -/* Inject a small sleep which makes the race easier to hit. */ -SET @old_dbug=@@GLOBAL.debug_dbug; -SET GLOBAL debug_dbug="+d,inject_mdev8302"; - - ---connection server_1 -INSERT INTO t7 VALUES (100,1), (101,2), (102,3), (103,4), (104,5); - -# Artificially create a bunch of group commits with conflicting transactions. -# The bug happened when T1 and T2 was in one group commit, and T3 was in the -# following group commit. T2 is a DELETE of a row with same primary key as a -# row that T3 inserts. T1 and T2 can conflict, causing T2 to be deadlock -# killed after starting to commit. The bug was that T2 could roll back before -# doing unmark_start_commit(); this could allow T3 to run before the retry -# of T2, causing duplicate key violation. - -SET @old_dbug= @@SESSION.debug_dbug; -SET @commit_id= 20000; -SET SESSION debug_dbug="+d,binlog_force_commit_id"; - ---let $n = 100 ---disable_query_log -while ($n) -{ - eval UPDATE t7 SET b=b+1 WHERE a=100+($n MOD 5); - eval DELETE FROM t7 WHERE a=100+($n MOD 5); - - SET @commit_id = @commit_id + 1; - eval INSERT INTO t7 VALUES (100+($n MOD 5), $n); - SET @commit_id = @commit_id + 1; - dec $n; -} ---enable_query_log -SET SESSION debug_dbug=@old_dbug; - - -SELECT * FROM t7 ORDER BY a; ---source include/save_master_gtid.inc - - ---connection server_2 ---source include/start_slave.inc ---source include/sync_with_master_gtid.inc -SELECT * FROM t7 ORDER BY a; - ---source include/stop_slave.inc -SET GLOBAL debug_dbug=@old_dbug; ---source include/start_slave.inc - - - --echo *** MDEV-8725: Assertion on ROLLBACK statement in the binary log *** --connection server_1 # Inject an event group terminated by ROLLBACK, by mixing MyISAM and InnoDB @@ -2457,7 +2197,6 @@ SELECT * FROM t2 WHERE a>=2000 ORDER BY a; SELECT * FROM t1 WHERE a>=2000 ORDER BY a; SELECT * FROM t2 WHERE a>=2000 ORDER BY a; - # Clean up. --connection server_2 --source include/stop_slave.inc @@ -2467,7 +2206,7 @@ SET DEBUG_SYNC= 'RESET'; --connection server_1 DROP function foo; -DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8; +DROP TABLE t1,t2,t3,t4,t5,t6; SET DEBUG_SYNC= 'RESET'; --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_conflicts-slave.opt b/mysql-test/suite/rpl/t/rpl_parallel_conflicts-slave.opt new file mode 100644 index 00000000000..af7bd138793 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_conflicts-slave.opt @@ -0,0 +1 @@ +--slave_parallel_threads=40 --slave_parallel_mode=conservative --slave_transaction_retries=5 diff --git a/mysql-test/suite/rpl/t/rpl_parallel_conflicts.test b/mysql-test/suite/rpl/t/rpl_parallel_conflicts.test new file mode 100644 index 00000000000..fc294f68197 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_parallel_conflicts.test @@ -0,0 +1,261 @@ +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/master-slave.inc + +--echo *** MDEV-7847: "Slave worker thread retried transaction 10 time(s) in vain, giving up", followed by replication hanging *** +--echo *** MDEV-7882: Excessive transaction retry in parallel replication *** + +--connection server_1 +ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; +CREATE TABLE t7 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +CREATE TABLE t8 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + + +# Using dbug error injection, we artificially create event groups with a lot of +# conflicting transactions in each event group. The bugs were originally seen +# "in the wild" with transactions that did not conflict on the master, and only +# conflicted very rarely on the slave (maybe some edge case with InnoDB btree +# page splits or something like that). The event groups here loosely reflect +# the structure of the original failure's group commits. + + +--connection server_1 +INSERT INTO t7 VALUES (1,1), (2,2), (3,3), (4,4), (5,5); +SET @old_dbug= @@SESSION.debug_dbug; +SET @commit_id= 42; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; +INSERT INTO t8 VALUES (1,1); +INSERT INTO t8 VALUES (2,2); +INSERT INTO t8 VALUES (3,3); +INSERT INTO t8 VALUES (4,4); +INSERT INTO t8 VALUES (5,5); +INSERT INTO t8 VALUES (6,6); +INSERT INTO t8 VALUES (7,7); +INSERT INTO t8 VALUES (8,8); + +UPDATE t7 SET b=9 WHERE a=3; +UPDATE t7 SET b=10 WHERE a=3; +UPDATE t7 SET b=11 WHERE a=3; + +INSERT INTO t8 VALUES (12,12); +INSERT INTO t8 VALUES (13,13); + +UPDATE t7 SET b=14 WHERE a=3; +UPDATE t7 SET b=15 WHERE a=3; + +INSERT INTO t8 VALUES (16,16); + +UPDATE t7 SET b=17 WHERE a=3; + +INSERT INTO t8 VALUES (18,18); +INSERT INTO t8 VALUES (19,19); + +UPDATE t7 SET b=20 WHERE a=3; + +INSERT INTO t8 VALUES (21,21); + +UPDATE t7 SET b=22 WHERE a=3; + +INSERT INTO t8 VALUES (23,24); +INSERT INTO t8 VALUES (24,24); + +UPDATE t7 SET b=25 WHERE a=3; + +INSERT INTO t8 VALUES (26,26); + +UPDATE t7 SET b=27 WHERE a=3; + +BEGIN; +INSERT INTO t8 VALUES (28,28); +INSERT INTO t8 VALUES (29,28), (30,28); +INSERT INTO t8 VALUES (31,28); +INSERT INTO t8 VALUES (32,28); +INSERT INTO t8 VALUES (33,28); +INSERT INTO t8 VALUES (34,28); +INSERT INTO t8 VALUES (35,28); +INSERT INTO t8 VALUES (36,28); +INSERT INTO t8 VALUES (37,28); +INSERT INTO t8 VALUES (38,28); +INSERT INTO t8 VALUES (39,28); +INSERT INTO t8 VALUES (40,28); +INSERT INTO t8 VALUES (41,28); +INSERT INTO t8 VALUES (42,28); +COMMIT; + + +SET @commit_id=43; +INSERT INTO t8 VALUES (43,43); +INSERT INTO t8 VALUES (44,44); + +UPDATE t7 SET b=45 WHERE a=3; + +INSERT INTO t8 VALUES (46,46); +INSERT INTO t8 VALUES (47,47); + +UPDATE t7 SET b=48 WHERE a=3; + +INSERT INTO t8 VALUES (49,49); +INSERT INTO t8 VALUES (50,50); + + +SET @commit_id=44; +INSERT INTO t8 VALUES (51,51); +INSERT INTO t8 VALUES (52,52); + +UPDATE t7 SET b=53 WHERE a=3; + +INSERT INTO t8 VALUES (54,54); +INSERT INTO t8 VALUES (55,55); + +UPDATE t7 SET b=56 WHERE a=3; + +INSERT INTO t8 VALUES (57,57); + +UPDATE t7 SET b=58 WHERE a=3; + +INSERT INTO t8 VALUES (58,58); +INSERT INTO t8 VALUES (59,59); +INSERT INTO t8 VALUES (60,60); +INSERT INTO t8 VALUES (61,61); + +UPDATE t7 SET b=62 WHERE a=3; + +INSERT INTO t8 VALUES (63,63); +INSERT INTO t8 VALUES (64,64); +INSERT INTO t8 VALUES (65,65); +INSERT INTO t8 VALUES (66,66); + +UPDATE t7 SET b=67 WHERE a=3; + +INSERT INTO t8 VALUES (68,68); + +UPDATE t7 SET b=69 WHERE a=3; +UPDATE t7 SET b=70 WHERE a=3; +UPDATE t7 SET b=71 WHERE a=3; + +INSERT INTO t8 VALUES (72,72); + +UPDATE t7 SET b=73 WHERE a=3; +UPDATE t7 SET b=74 WHERE a=3; +UPDATE t7 SET b=75 WHERE a=3; +UPDATE t7 SET b=76 WHERE a=3; + +INSERT INTO t8 VALUES (77,77); + +UPDATE t7 SET b=78 WHERE a=3; + +INSERT INTO t8 VALUES (79,79); + +UPDATE t7 SET b=80 WHERE a=3; + +INSERT INTO t8 VALUES (81,81); + +UPDATE t7 SET b=82 WHERE a=3; + +INSERT INTO t8 VALUES (83,83); + +UPDATE t7 SET b=84 WHERE a=3; + + +SET @commit_id=45; +INSERT INTO t8 VALUES (85,85); +UPDATE t7 SET b=86 WHERE a=3; +INSERT INTO t8 VALUES (87,87); + + +SET @commit_id=46; +INSERT INTO t8 VALUES (88,88); +INSERT INTO t8 VALUES (89,89); +INSERT INTO t8 VALUES (90,90); + +SET SESSION debug_dbug=@old_dbug; + +INSERT INTO t8 VALUES (91,91); +INSERT INTO t8 VALUES (92,92); +INSERT INTO t8 VALUES (93,93); +INSERT INTO t8 VALUES (94,94); +INSERT INTO t8 VALUES (95,95); +INSERT INTO t8 VALUES (96,96); +INSERT INTO t8 VALUES (97,97); +INSERT INTO t8 VALUES (98,98); +INSERT INTO t8 VALUES (99,99); + + +SELECT * FROM t7 ORDER BY a; +SELECT * FROM t8 ORDER BY a; +--source include/save_master_gtid.inc + + +--connection server_2 +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t7 ORDER BY a; +SELECT * FROM t8 ORDER BY a; + +--echo *** MDEV-8302: Duplicate key with parallel replication *** + +--connection server_2 +--source include/stop_slave.inc +/* Inject a small sleep which makes the race easier to hit. */ +SET @old_dbug=@@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,inject_mdev8302"; + + +--connection server_1 +INSERT INTO t7 VALUES (100,1), (101,2), (102,3), (103,4), (104,5); + +# Artificially create a bunch of group commits with conflicting transactions. +# The bug happened when T1 and T2 was in one group commit, and T3 was in the +# following group commit. T2 is a DELETE of a row with same primary key as a +# row that T3 inserts. T1 and T2 can conflict, causing T2 to be deadlock +# killed after starting to commit. The bug was that T2 could roll back before +# doing unmark_start_commit(); this could allow T3 to run before the retry +# of T2, causing duplicate key violation. + +SET @old_dbug= @@SESSION.debug_dbug; +SET @commit_id= 20000; +SET SESSION debug_dbug="+d,binlog_force_commit_id"; + +--let $n = 100 +--disable_query_log +while ($n) +{ + eval UPDATE t7 SET b=b+1 WHERE a=100+($n MOD 5); + eval DELETE FROM t7 WHERE a=100+($n MOD 5); + + SET @commit_id = @commit_id + 1; + eval INSERT INTO t7 VALUES (100+($n MOD 5), $n); + SET @commit_id = @commit_id + 1; + dec $n; +} +--enable_query_log +SET SESSION debug_dbug=@old_dbug; + + +SELECT * FROM t7 ORDER BY a; +--source include/save_master_gtid.inc + + +--connection server_2 +--source include/start_slave.inc +--source include/sync_with_master_gtid.inc +SELECT * FROM t7 ORDER BY a; + +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; + +# Clean up. +--source include/start_slave.inc +SET DEBUG_SYNC= 'RESET'; + +--connection server_1 +DROP TABLE t7,t8; +SET DEBUG_SYNC= 'RESET'; + +--source include/rpl_end.inc -- cgit v1.2.1 From addb38f4763faa0378cd369106372a0eb0f0ee75 Mon Sep 17 00:00:00 2001 From: Monty Date: Thu, 1 Sep 2016 21:15:17 +0300 Subject: More DBUG_PRINT's to make it easier to debug parallel replication --- storage/innobase/handler/ha_innodb.cc | 1 + storage/innobase/log/log0log.cc | 7 +++++++ storage/innobase/trx/trx0trx.cc | 6 +++++- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 604adfadae3..82c6289ebd8 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4257,6 +4257,7 @@ innobase_commit( trx_t* trx; DBUG_ENTER("innobase_commit"); + DBUG_PRINT("enter", ("commit_trx: %d", commit_trx)); DBUG_ASSERT(hton == innodb_hton_ptr); DBUG_PRINT("trans", ("ending transaction")); diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 89b616aba01..ce5f75b4c61 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -1268,6 +1268,10 @@ log_group_file_header_flush( } #endif /* UNIV_DEBUG */ if (log_do_write) { + DBUG_PRINT("ib_log", ("write " LSN_PF + " group " ULINTPF + " file " ULINTPF " header", + start_lsn, group->id, nth_file)); log_sys->n_log_ios++; MONITOR_INC(MONITOR_LOG_IO); @@ -1539,6 +1543,9 @@ loop: return; } + DBUG_PRINT("ib_log", ("write " LSN_PF " to " LSN_PF, + log_sys->written_to_all_lsn, + log_sys->lsn)); #ifdef UNIV_DEBUG if (log_debug_writes) { fprintf(stderr, diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 1a99e159d41..a41c1807610 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -1138,6 +1138,7 @@ trx_flush_log_if_needed_low( lsn_t lsn) /*!< in: lsn up to which logs are to be flushed. */ { + DBUG_ENTER("trx_flush_log_if_needed_low"); switch (srv_flush_log_at_trx_commit) { case 0: /* Do nothing */ @@ -1156,6 +1157,7 @@ trx_flush_log_if_needed_low( default: ut_error; } + DBUG_VOID_RETURN; } /**********************************************************************//** @@ -1690,15 +1692,17 @@ trx_commit_complete_for_mysql( trx_t* trx) /*!< in/out: transaction */ { ut_a(trx); + DBUG_ENTER("trx_commit_complete_for_mysql"); if (!trx->must_flush_log_later || (srv_flush_log_at_trx_commit == 1 && trx->active_commit_ordered)) { - return; + DBUG_VOID_RETURN; } trx_flush_log_if_needed(trx->commit_lsn, trx); trx->must_flush_log_later = FALSE; + DBUG_VOID_RETURN; } /**********************************************************************//** -- cgit v1.2.1 From 4de75afb454cc5e032c185e2257cdbb7a4010b0d Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Thu, 1 Sep 2016 23:37:27 -0700 Subject: Fixed the previous merge to be able to build mysqld. Adjusted one result file. --- mysql-test/r/mysqld--help.result | 5 +++-- sql/item.cc | 4 ++-- sql/item.h | 10 ++++------ sql/table.cc | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index 0d3a1f07225..c3ecd389f91 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -580,7 +580,8 @@ The following options may be given as the first argument: semijoin_with_cache, join_cache_incremental, join_cache_hashed, join_cache_bka, optimize_join_buffer_size, table_elimination, - extended_keys, exists_to_in, orderby_uses_equalities + extended_keys, exists_to_in, orderby_uses_equalities, + condition_pushdown_for_derived --optimizer-use-condition-selectivity=# Controls selectivity of which conditions the optimizer takes into account to calculate cardinality of a partial @@ -1328,7 +1329,7 @@ old-style-user-limits FALSE optimizer-prune-level 1 optimizer-search-depth 62 optimizer-selectivity-sampling-limit 100 -optimizer-switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on +optimizer-switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=off,condition_pushdown_for_derived=on optimizer-use-condition-selectivity 1 performance-schema FALSE performance-schema-accounts-size -1 diff --git a/sql/item.cc b/sql/item.cc index 6bce98b071c..f04ab32c734 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -10083,14 +10083,14 @@ const char *dbug_print_item(Item *item) #endif /*DBUG_OFF*/ -bool Item_field::exclusive_dependence_on_table_processor(uchar *map) +bool Item_field::exclusive_dependence_on_table_processor(void *map) { table_map tab_map= *((table_map *) map); return !((used_tables() == tab_map || (item_equal && item_equal->used_tables() & tab_map))); } -bool Item_field::exclusive_dependence_on_grouping_fields_processor(uchar *arg) +bool Item_field::exclusive_dependence_on_grouping_fields_processor(void *arg) { st_select_lex *sl= (st_select_lex *)arg; List_iterator li(sl->grouping_tmp_fields); diff --git a/sql/item.h b/sql/item.h index 34fd66710c2..7a189004531 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1481,12 +1481,10 @@ public: virtual bool exists2in_processor(void *opt_arg) { return 0; } virtual bool find_selective_predicates_list_processor(void *opt_arg) { return 0; } - virtual bool exclusive_dependence_on_table_processor(uchar *map) + virtual bool exclusive_dependence_on_table_processor(void *map) { return 0; } - virtual bool exclusive_dependence_on_grouping_fields_processor(uchar *arg) + virtual bool exclusive_dependence_on_grouping_fields_processor(void *arg) { return 0; } - //virtual Item *get_copy(THD *thd, MEM_ROOT *mem_root); - virtual Item *get_copy(THD *thd, MEM_ROOT *mem_root)=0; @@ -2583,8 +2581,8 @@ public: virtual Item *derived_field_transformer_for_having(THD *thd, uchar *arg); virtual Item *derived_field_transformer_for_where(THD *thd, uchar *arg); virtual void print(String *str, enum_query_type query_type); - bool exclusive_dependence_on_table_processor(uchar *map); - bool exclusive_dependence_on_grouping_fields_processor(uchar *arg); + bool exclusive_dependence_on_table_processor(void *map); + bool exclusive_dependence_on_grouping_fields_processor(void *arg); Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy(thd, mem_root, this); } bool is_outer_field() const diff --git a/sql/table.cc b/sql/table.cc index 1b2b7352046..42ed08233af 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -7950,7 +7950,7 @@ void TABLE_LIST::check_pushable_cond_for_table(Item *cond) } } else if (cond->walk(&Item::exclusive_dependence_on_table_processor, - 0, (uchar *) &tab_map)) + 0, (void *) &tab_map)) cond->set_extraction_flag(NO_EXTRACTION_FL); } -- cgit v1.2.1 From 102fc62990c4b133f9597c61293d93f91759cae2 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Thu, 1 Sep 2016 23:44:42 -0700 Subject: Fixed a failure with cte_recursive.test: Do not push conditions into recursive with tables. --- sql/sql_derived.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 314d5985a72..4d24993f4b0 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -1129,6 +1129,11 @@ bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived) { if (!cond) return false; + + /* Do not push conditions into recursive with tables */ + if (derived->is_recursive_with_table()) + return false; + /* Build the most restrictive condition extractable from 'cond' that can be pushed into the derived table 'derived'. -- cgit v1.2.1 From 2e814d4702d71a04388386a9f591d14a35980bfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Fri, 12 Aug 2016 11:17:45 +0300 Subject: Merge InnoDB 5.7 from mysql-5.7.9. Contains also MDEV-10547: Test multi_update_innodb fails with InnoDB 5.7 The failure happened because 5.7 has changed the signature of the bool handler::primary_key_is_clustered() const virtual function ("const" was added). InnoDB was using the old signature which caused the function not to be used. MDEV-10550: Parallel replication lock waits/deadlock handling does not work with InnoDB 5.7 Fixed mutexing problem on lock_trx_handle_wait. Note that rpl_parallel and rpl_optimistic_parallel tests still fail. MDEV-10156 : Group commit tests fail on 10.2 InnoDB (branch bb-10.2-jan) Reason: incorrect merge MDEV-10550: Parallel replication can't sync with master in InnoDB 5.7 (branch bb-10.2-jan) Reason: incorrect merge --- BUILD/SETUP.sh | 2 +- configure.cmake | 2 +- extra/CMakeLists.txt | 2 + extra/innochecksum.cc | 2283 +-- include/dur_prop.h | 32 + include/my_icp.h | 48 + include/mysql/psi/psi_base.h | 147 + include/mysql/psi/psi_memory.h | 155 + mysql-test/disabled.def | 4 + mysql-test/include/expect_crash.inc | 5 + mysql-test/include/have_innodb_16k.inc | 2 + mysql-test/include/have_innodb_32k.inc | 6 +- mysql-test/include/have_innodb_4k.inc | 6 + mysql-test/include/have_innodb_64k.inc | 2 +- mysql-test/include/have_innodb_8k.inc | 6 + mysql-test/include/have_innodb_max_16k.inc | 4 + mysql-test/include/have_innodb_zip.inc | 4 + mysql-test/include/have_numa.inc | 18 + mysql-test/include/have_xtradb.combinations | 10 +- mysql-test/include/mtr_check.sql | 1 + mysql-test/mysql-test-run.pl | 9 +- mysql-test/r/alter_table.result | 4 +- mysql-test/r/group_min_max_innodb.result | 4 +- mysql-test/r/innodb_icp.result | 2 +- mysql-test/r/mysqlbinlog_row_minimal.result | 16 +- mysql-test/r/mysqld--help.result | 4 - mysql-test/r/order_by_optimizer_innodb.result | 4 +- mysql-test/r/partition_innodb.result | 12 +- mysql-test/r/partition_innodb_plugin.result | 8 +- mysql-test/r/range_vs_index_merge_innodb.result | 68 +- mysql-test/r/row-checksum-old.result | 2 +- mysql-test/r/row-checksum.result | 2 +- mysql-test/r/selectivity_innodb.result | 20 +- mysql-test/r/stat_tables_innodb.result | 4 +- mysql-test/r/type_bit_innodb.result | 2 +- mysql-test/suite/encryption/disabled.def | 1 + .../suite/encryption/r/debug_key_management.result | 8 +- .../encryption/r/innodb-bad-key-change.result | 7 +- .../encryption/r/innodb-bad-key-change3.result | 9 +- .../encryption/r/innodb-bad-key-change4.result | 8 +- .../encryption/r/innodb-bad-key-change5.result | 6 + .../encryption/r/innodb-discard-import.result | 11 +- .../encryption/r/innodb-encryption-alter.result | 4 + .../encryption/r/innodb-encryption-disable.result | 16 + .../suite/encryption/r/innodb-log-encrypt.result | 4 + .../encryption/r/innodb-page_encryption.result | 8 + .../r/innodb-page_encryption_compression.result | 8 + .../r/innodb-page_encryption_log_encryption.result | 8 + .../r/innodb_encryption_discard_import.result | 11 +- .../encryption/r/innodb_encryption_filekeys.result | 4 + .../suite/encryption/r/innodb_encryption_is.result | 4 + .../r/innodb_encryption_row_compressed.result | 159 + .../encryption/r/innodb_encryption_tables.result | 6 + .../r/innodb_onlinealter_encryption.result | 4 + .../r/innodb_page_encryption_key_change.result | 6 + .../suite/encryption/t/innodb-bad-key-change.test | 2 + .../suite/encryption/t/innodb-bad-key-change3.test | 4 +- .../suite/encryption/t/innodb-discard-import.test | 3 +- .../encryption/t/innodb-encryption-disable.test | 17 +- .../t/innodb_encryption_discard_import.test | 4 +- .../t/innodb_encryption_row_compressed.opt | 4 + .../t/innodb_encryption_row_compressed.test | 125 + mysql-test/suite/funcs_1/r/is_tables_innodb.result | 10 +- mysql-test/suite/funcs_1/r/is_tables_mysql.result | 4 +- mysql-test/suite/handler/disabled.def | 13 + mysql-test/suite/innodb/disabled.def | 4 + .../include/innodb_simulate_comp_failures.inc | 10 +- .../suite/innodb/r/group_commit_crash.result | 2 + .../r/group_commit_crash_no_optimize_thread.result | 2 + mysql-test/suite/innodb/r/help_url.result | 2 - mysql-test/suite/innodb/r/innodb-16k.result | 26 +- .../suite/innodb/r/innodb-alter-discard.result | 8 +- mysql-test/suite/innodb/r/innodb-blob.result | 42 +- .../suite/innodb/r/innodb-bug-14068765.result | 4 +- .../suite/innodb/r/innodb-bug-14084530.result | 4 +- .../innodb/r/innodb-change-buffer-recovery.result | 9 +- .../suite/innodb/r/innodb-fk-warnings.result | 2 +- mysql-test/suite/innodb/r/innodb-index.result | 14 +- mysql-test/suite/innodb/r/innodb-mdev-7408.result | 1 + mysql-test/suite/innodb/r/innodb-mdev-7513.result | 1 + .../innodb/r/innodb-page_compression_bzip2.result | 4 + .../innodb/r/innodb-page_compression_lz4.result | 4 + .../innodb/r/innodb-page_compression_lzma.result | 4 + .../innodb/r/innodb-page_compression_lzo.result | 4 + .../innodb/r/innodb-page_compression_snappy.result | 4 + .../innodb/r/innodb-page_compression_tables.result | 6 + .../innodb/r/innodb-page_compression_zip.result | 4 + .../suite/innodb/r/innodb-virtual-columns.result | 118 + mysql-test/suite/innodb/r/innodb-wl5522-1.result | 11 +- .../suite/innodb/r/innodb-wl5522-debug-zip.result | 29 +- mysql-test/suite/innodb/r/innodb-wl5522-zip.result | 18 +- mysql-test/suite/innodb/r/innodb-wl5522.result | 25 +- mysql-test/suite/innodb/r/innodb.result | 114 +- .../suite/innodb/r/innodb_blob_truncate.result | 4 + .../r/innodb_blob_unrecoverable_crash.result | 24 - .../suite/innodb/r/innodb_bug12400341.result | 1 + .../suite/innodb/r/innodb_bug12902967.result | 3 +- .../suite/innodb/r/innodb_bug14147491.result | 24 +- mysql-test/suite/innodb/r/innodb_bug30423.result | 12 +- mysql-test/suite/innodb/r/innodb_bug34053.result | 2 +- mysql-test/suite/innodb/r/innodb_bug34300.result | 19 +- mysql-test/suite/innodb/r/innodb_bug46000.result | 2 +- mysql-test/suite/innodb/r/innodb_bug47167.result | 12 +- mysql-test/suite/innodb/r/innodb_bug54044.result | 2 +- mysql-test/suite/innodb/r/innodb_bug60049.result | 2 - .../suite/innodb/r/innodb_corrupt_bit.result | 84 +- .../suite/innodb/r/innodb_file_format.result | 26 +- mysql-test/suite/innodb/r/innodb_gis.result | 2 +- .../innodb/r/innodb_information_schema.result | 10 +- .../r/innodb_information_schema_buffer.result | 1 + mysql-test/suite/innodb/r/innodb_monitor.result | 29 +- .../r/innodb_prefix_index_restart_server.result | 12 +- .../r/innodb_simulate_comp_failures_small.result | 13 +- .../r/innodb_stats_create_on_corrupted.result | 4 +- .../innodb/r/innodb_stats_fetch_corrupted.result | 4 +- mysql-test/suite/innodb/r/strict_mode.result | 3 +- mysql-test/suite/innodb/t/auto_increment_dup.opt | 1 + .../suite/innodb/t/create_isl_with_direct.test | 2 +- mysql-test/suite/innodb/t/innodb-16k.test | 21 +- .../suite/innodb/t/innodb-alter-discard.test | 6 + mysql-test/suite/innodb/t/innodb-blob.test | 54 +- .../t/innodb-change-buffer-recovery-master.opt | 1 + .../innodb/t/innodb-change-buffer-recovery.test | 24 +- mysql-test/suite/innodb/t/innodb-mdev-7408.test | 2 + .../suite/innodb/t/innodb-mdev-7513-master.opt | 1 + mysql-test/suite/innodb/t/innodb-mdev-7513.test | 1 + mysql-test/suite/innodb/t/innodb-mdev7046.test | 1 + .../suite/innodb/t/innodb-virtual-columns.test | 6 + mysql-test/suite/innodb/t/innodb-wl5522-1.test | 2 + .../suite/innodb/t/innodb-wl5522-debug-zip.test | 3 + mysql-test/suite/innodb/t/innodb-wl5522-zip.test | 2 + mysql-test/suite/innodb/t/innodb-wl5522.test | 2 + mysql-test/suite/innodb/t/innodb.test | 128 +- .../innodb/t/innodb_blob_unrecoverable_crash.test | 52 - mysql-test/suite/innodb/t/innodb_bug12400341.test | 4 +- mysql-test/suite/innodb/t/innodb_bug12902967.test | 9 +- .../suite/innodb/t/innodb_bug14147491-master.opt | 5 +- mysql-test/suite/innodb/t/innodb_bug14147491.test | 56 +- mysql-test/suite/innodb/t/innodb_bug30423.test | 2 + mysql-test/suite/innodb/t/innodb_bug34053.test | 20 +- mysql-test/suite/innodb/t/innodb_bug34300.test | 28 +- .../suite/innodb/t/innodb_bug60049-master.opt | 2 +- mysql-test/suite/innodb/t/innodb_bug60049.test | 17 +- mysql-test/suite/innodb/t/innodb_corrupt_bit.test | 22 +- mysql-test/suite/innodb/t/innodb_gis.test | 5 +- .../innodb/t/innodb_stats_create_on_corrupted.test | 4 +- .../innodb/t/innodb_stats_fetch_corrupted.test | 4 +- mysql-test/suite/innodb/t/strict_mode.test | 2 + .../suite/innodb_zip/include/innodb-wl6045.inc | 20 + .../innodb_zip/include/innodb_create_tab_indx.inc | 16 + .../suite/innodb_zip/include/innodb_dml_ops.inc | 82 + .../innodb_zip/include/innodb_fetch_records.inc | 7 + .../suite/innodb_zip/include/innodb_load_data.inc | 19 + .../innodb_zip/include/innodb_stats_comp_index.inc | 26 + .../innodb_zip/include/innodb_stats_restart.inc | 12 + .../innodb_zip/include/innodb_temp_table_dml.inc | 40 + .../include/innodb_wl6501_crash_stripped.inc | 144 + .../innodb_zip/include/innodb_wl6501_error.inc | 234 + .../innodb_zip/include/innodb_wl6501_scale.inc | 113 + mysql-test/suite/innodb_zip/r/16k.result | 745 + mysql-test/suite/innodb_zip/r/4k.result | 442 + mysql-test/suite/innodb_zip/r/8k.result | 473 + mysql-test/suite/innodb_zip/r/bug36169.result | 1 + mysql-test/suite/innodb_zip/r/bug36172.result | 1 + mysql-test/suite/innodb_zip/r/bug52745.result | 129 + mysql-test/suite/innodb_zip/r/bug53591.result | 13 + mysql-test/suite/innodb_zip/r/bug56680.result | 120 + .../suite/innodb_zip/r/cmp_drop_table.result | 13 + mysql-test/suite/innodb_zip/r/cmp_per_index.result | 94 + .../suite/innodb_zip/r/create_options.result | 839 ++ .../suite/innodb_zip/r/index_large_prefix.result | 534 + .../innodb_zip/r/index_large_prefix_4k.result | 404 + .../innodb_zip/r/index_large_prefix_8k.result | 442 + mysql-test/suite/innodb_zip/r/innochecksum.result | 82 + .../suite/innodb_zip/r/innochecksum_2.result | 140 + .../suite/innodb_zip/r/innochecksum_3.result | 184 + .../innodb_zip/r/innodb-create-options.result | 67 +- mysql-test/suite/innodb_zip/r/innodb-zip.result | 53 +- .../suite/innodb_zip/r/innodb_bug36169.result | 3 + .../suite/innodb_zip/r/innodb_bug52745.result | 6 +- .../suite/innodb_zip/r/innodb_bug53591.result | 8 +- .../suite/innodb_zip/r/innodb_bug56680.result | 4 + .../innodb_zip/r/innodb_cmp_drop_table.result | 4 + .../innodb_zip/r/innodb_index_large_prefix.result | 43 +- .../r/innodb_prefix_index_liftedlimit.result | 1396 -- mysql-test/suite/innodb_zip/r/large_blob.result | 83 + mysql-test/suite/innodb_zip/r/restart.result | 1236 ++ .../innodb_zip/r/wl6344_compress_level.result | 135 + .../innodb_zip/r/wl6347_comp_indx_stat.result | 8084 +++++++++++ mysql-test/suite/innodb_zip/r/wl6470_1.result | 598 + mysql-test/suite/innodb_zip/r/wl6470_2.result | 667 + mysql-test/suite/innodb_zip/r/wl6501_1.result | 1150 ++ .../suite/innodb_zip/r/wl6501_crash_3.result | 489 + .../suite/innodb_zip/r/wl6501_crash_4.result | 553 + .../suite/innodb_zip/r/wl6501_crash_5.result | 489 + .../suite/innodb_zip/r/wl6501_scale_1.result | 354 + mysql-test/suite/innodb_zip/r/wl6560.result | 418 + mysql-test/suite/innodb_zip/r/wl6915_1.result | 2060 +++ mysql-test/suite/innodb_zip/t/16k.test | 715 + mysql-test/suite/innodb_zip/t/4k.test | 440 + mysql-test/suite/innodb_zip/t/8k.test | 468 + mysql-test/suite/innodb_zip/t/bug36169.test | 1162 ++ mysql-test/suite/innodb_zip/t/bug36172.test | 30 + mysql-test/suite/innodb_zip/t/bug52745.test | 105 + mysql-test/suite/innodb_zip/t/bug53591.test | 22 + mysql-test/suite/innodb_zip/t/bug56680.test | 140 + .../suite/innodb_zip/t/cmp_drop_table-master.opt | 1 + mysql-test/suite/innodb_zip/t/cmp_drop_table.test | 57 + mysql-test/suite/innodb_zip/t/cmp_per_index.test | 118 + mysql-test/suite/innodb_zip/t/create_options.test | 528 + mysql-test/suite/innodb_zip/t/disabled.def | 17 + .../suite/innodb_zip/t/index_large_prefix.test | 441 + .../suite/innodb_zip/t/index_large_prefix_4k.test | 400 + .../suite/innodb_zip/t/index_large_prefix_8k.test | 429 + mysql-test/suite/innodb_zip/t/innochecksum.test | 240 + mysql-test/suite/innodb_zip/t/innochecksum_2.test | 114 + mysql-test/suite/innodb_zip/t/innochecksum_3.test | 378 + mysql-test/suite/innodb_zip/t/innodb_bug36169.opt | 1 + mysql-test/suite/innodb_zip/t/innodb_bug36169.test | 2 + mysql-test/suite/innodb_zip/t/innodb_bug53591.test | 3 +- .../innodb_zip/t/innodb_index_large_prefix.test | 8 +- .../t/innodb_prefix_index_liftedlimit.test | 1371 -- .../suite/innodb_zip/t/large_blob-master.opt | 3 + mysql-test/suite/innodb_zip/t/large_blob.test | 122 + mysql-test/suite/innodb_zip/t/restart.test | 602 + .../suite/innodb_zip/t/wl6344_compress_level.test | 135 + .../suite/innodb_zip/t/wl6347_comp_indx_stat.test | 1337 ++ mysql-test/suite/innodb_zip/t/wl6470_1.test | 60 + mysql-test/suite/innodb_zip/t/wl6470_2.test | 468 + mysql-test/suite/innodb_zip/t/wl6501_1.test | 451 + mysql-test/suite/innodb_zip/t/wl6501_crash_3.test | 26 + mysql-test/suite/innodb_zip/t/wl6501_crash_4.test | 29 + mysql-test/suite/innodb_zip/t/wl6501_crash_5.test | 26 + mysql-test/suite/innodb_zip/t/wl6501_scale_1.test | 41 + mysql-test/suite/innodb_zip/t/wl6560.test | 423 + mysql-test/suite/innodb_zip/t/wl6915_1.test | 650 + mysql-test/suite/rpl/disabled.def | 3 + .../innodb_adaptive_hash_index_parts_basic.result | 48 + .../r/innodb_adaptive_max_sleep_delay_basic.result | 26 + .../r/innodb_additional_mem_pool_size_basic.result | 53 - .../r/innodb_api_bk_commit_interval_basic.result | 36 + .../r/innodb_autoextend_increment_basic.result | 6 + .../r/innodb_buffer_pool_chunk_size_basic.result | 48 + ...nnodb_buffer_pool_dump_at_shutdown_basic.result | 3 +- .../r/innodb_buffer_pool_dump_pct_basic.result | 41 +- .../r/innodb_buffer_pool_filename_basic.result | 7 - ...innodb_buffer_pool_load_at_startup_basic.result | 2 +- .../r/innodb_buffer_pool_size_basic.result | 7 +- .../r/innodb_checksum_algorithm_basic.result | 22 +- .../r/innodb_cmp_per_index_enabled_basic.result | 2 - .../r/innodb_commit_concurrency_basic.result | 20 + ..._compression_failure_threshold_pct_basic.result | 15 + .../r/innodb_compression_pad_pct_max_basic.result | 15 + .../r/innodb_concurrency_tickets_basic.result | 48 +- .../r/innodb_default_row_format_basic.result | 48 + ...b_disable_resize_buffer_pool_debug_basic.result | 60 + .../sys_vars/r/innodb_fast_shutdown_basic.result | 15 + .../sys_vars/r/innodb_file_format_basic.result | 20 +- .../sys_vars/r/innodb_file_format_max_basic.result | 20 +- .../sys_vars/r/innodb_fill_factor_basic.result | 42 + .../r/innodb_flush_log_at_timeout_basic.result | 15 + .../r/innodb_flush_log_at_trx_commit_basic.result | 26 + .../sys_vars/r/innodb_flush_sync_basic.result | 92 + .../r/innodb_flushing_avg_loops_basic.result | 10 + .../r/innodb_ft_result_cache_limit_basic.result | 10 +- .../r/innodb_ft_server_stopword_table_basic.result | 1 + .../r/innodb_ft_user_stopword_table_basic.result | 2 +- .../sys_vars/r/innodb_large_prefix_basic.result | 24 +- .../r/innodb_lock_wait_timeout_basic.result | 98 + .../r/innodb_log_checkpoint_now_basic.result | 81 +- .../sys_vars/r/innodb_log_checksums_basic.result | 42 + .../r/innodb_log_write_ahead_size_basic.result | 88 + .../r/innodb_max_dirty_pages_pct_basic.result | 108 +- .../r/innodb_max_dirty_pages_pct_func.result | 7 + .../r/innodb_max_dirty_pages_pct_lwm_basic.result | 32 +- .../sys_vars/r/innodb_max_purge_lag_basic.result | 50 +- .../r/innodb_max_undo_log_size_basic.result | 54 + ...nodb_merge_threshold_set_all_debug_basic.result | 28 + .../r/innodb_mirrored_log_groups_basic.result | 53 - .../sys_vars/r/innodb_monitor_disable_basic.result | 29 +- .../sys_vars/r/innodb_monitor_enable_basic.result | 29 +- .../r/innodb_monitor_reset_all_basic.result | 29 +- .../sys_vars/r/innodb_monitor_reset_basic.result | 29 +- .../sys_vars/r/innodb_numa_interleave_basic.result | 10 + .../sys_vars/r/innodb_old_blocks_pct_basic.result | 8 + .../sys_vars/r/innodb_page_cleaners_basic.result | 41 + .../suite/sys_vars/r/innodb_page_size_basic.result | 9 +- .../r/innodb_purge_batch_size_basic.result | 25 +- ...nodb_purge_rseg_truncate_frequency_basic.result | 113 + .../sys_vars/r/innodb_purge_threads_basic.result | 38 +- .../r/innodb_read_ahead_threshold_basic.result | 14 + .../r/innodb_replication_delay_basic.result | 44 + .../sys_vars/r/innodb_spin_wait_delay_basic.result | 44 + .../r/innodb_stats_persistent_basic.result | 2 +- ...nodb_stats_persistent_sample_pages_basic.result | 26 +- .../r/innodb_stats_sample_pages_basic.result | 22 +- ...nnodb_stats_transient_sample_pages_basic.result | 6 + .../sys_vars/r/innodb_strict_mode_basic.result | 16 +- .../sys_vars/r/innodb_sync_debug_basic.result | 11 + .../sys_vars/r/innodb_sync_spin_loops_basic.result | 56 +- .../sys_vars/r/innodb_table_locks_func.result | 7 + .../r/innodb_temp_data_file_path_basic.result | 53 + .../r/innodb_thread_concurrency_basic.result | 15 + .../sys_vars/r/innodb_undo_directory_basic.result | 16 +- .../r/innodb_undo_log_truncate_basic.result | 69 + .../r/innodb_undo_tablespaces_basic.result | 12 +- .../sys_vars/r/innodb_use_sys_malloc_basic.result | 22 - mysql-test/suite/sys_vars/r/sysvars_innodb.result | 318 +- .../t/innodb_adaptive_hash_index_parts_basic.test | 75 + .../t/innodb_adaptive_max_sleep_delay_basic.test | 18 + .../t/innodb_additional_mem_pool_size_basic.test | 102 - .../t/innodb_api_bk_commit_interval_basic.test | 27 + .../t/innodb_api_disable_rowlock_basic.test | 4 + .../sys_vars/t/innodb_api_enable_binlog_basic.test | 4 + .../sys_vars/t/innodb_api_enable_mdl_basic.test | 4 + .../sys_vars/t/innodb_api_trx_level_basic.test | 6 + .../t/innodb_autoextend_increment_basic.test | 7 + .../sys_vars/t/innodb_autoinc_lock_mode_basic.test | 4 + .../t/innodb_autoinc_lock_mode_func-master.opt | 2 +- .../t/innodb_buffer_pool_chunk_size_basic.test | 75 + .../innodb_buffer_pool_dump_at_shutdown_basic.test | 2 + .../t/innodb_buffer_pool_dump_pct_basic.test | 67 +- .../t/innodb_buffer_pool_filename_basic.test | 32 - .../t/innodb_buffer_pool_instances_basic.test | 4 + .../t/innodb_buffer_pool_load_now_basic.test | 9 +- .../t/innodb_buffer_pool_size_basic-master.opt | 1 + .../sys_vars/t/innodb_buffer_pool_size_basic.test | 34 +- .../t/innodb_change_buffer_max_size_basic.test | 8 + .../sys_vars/t/innodb_change_buffering_basic.test | 6 + .../t/innodb_change_buffering_debug_basic.test | 6 + .../t/innodb_checksum_algorithm_basic.test | 1 - .../suite/sys_vars/t/innodb_checksums_basic.test | 4 + .../t/innodb_cmp_per_index_enabled_basic.test | 3 - .../t/innodb_commit_concurrency_basic.test | 12 + ...db_compression_failure_threshold_pct_basic.test | 14 +- .../sys_vars/t/innodb_compression_level_basic.test | 8 + .../t/innodb_compression_pad_pct_max_basic.test | 14 +- .../t/innodb_concurrency_tickets_basic.test | 48 +- .../sys_vars/t/innodb_data_file_path_basic.test | 4 + .../sys_vars/t/innodb_data_home_dir_basic.test | 4 + .../t/innodb_default_row_format_basic.test | 41 + ...odb_disable_resize_buffer_pool_debug_basic.test | 72 + .../suite/sys_vars/t/innodb_doublewrite_basic.test | 4 + .../t/innodb_doublewrite_batch_size_basic.test | 2 + .../sys_vars/t/innodb_fast_shutdown_basic.test | 14 +- .../suite/sys_vars/t/innodb_file_format_basic.test | 6 + .../sys_vars/t/innodb_file_format_check_basic.test | 6 + .../sys_vars/t/innodb_file_format_max_basic.test | 7 +- .../sys_vars/t/innodb_file_io_threads_basic.test | 8 + .../t/innodb_file_per_table_basic-master.opt | 1 + .../sys_vars/t/innodb_file_per_table_basic.test | 4 + .../suite/sys_vars/t/innodb_fill_factor_basic.test | 41 + .../t/innodb_flush_log_at_timeout_basic.test | 13 + .../t/innodb_flush_log_at_trx_commit_basic.test | 22 + .../sys_vars/t/innodb_flush_method_basic.test | 4 + .../sys_vars/t/innodb_flush_neighbors_basic.test | 12 + .../suite/sys_vars/t/innodb_flush_sync_basic.test | 77 + .../t/innodb_flushing_avg_loops_basic.test | 10 + .../t/innodb_force_load_corrupted_basic.test | 4 + .../sys_vars/t/innodb_force_recovery_basic.test | 4 + .../t/innodb_force_recovery_crash_basic.test | 2 + .../sys_vars/t/innodb_ft_aux_table_basic.test | 2 + .../sys_vars/t/innodb_ft_cache_size_basic.test | 2 + .../t/innodb_ft_enable_diag_print_basic.test | 12 + .../t/innodb_ft_enable_stopword_basic.test | 12 + .../sys_vars/t/innodb_ft_max_token_size_basic.test | 2 + .../sys_vars/t/innodb_ft_min_token_size_basic.test | 2 + .../t/innodb_ft_num_word_optimize_basic.test | 6 + .../t/innodb_ft_result_cache_limit_basic.test | 12 +- .../t/innodb_ft_server_stopword_table_basic.test | 4 + .../t/innodb_ft_sort_pll_degree_basic.test | 2 + .../t/innodb_ft_total_cache_size_basic.test | 13 +- .../t/innodb_ft_user_stopword_table_basic.test | 5 +- .../sys_vars/t/innodb_large_prefix_basic.test | 12 + ...innodb_limit_optimistic_insert_debug_basic.test | 6 + .../sys_vars/t/innodb_lock_wait_timeout_basic.test | 71 + .../t/innodb_locks_unsafe_for_binlog_basic.test | 4 + .../sys_vars/t/innodb_log_buffer_size_basic.test | 4 + .../t/innodb_log_checkpoint_now_basic.test | 80 +- .../sys_vars/t/innodb_log_checksums_basic.test | 36 + .../t/innodb_log_compressed_pages_basic.test | 9 +- .../sys_vars/t/innodb_log_file_size_basic.test | 4 + .../t/innodb_log_files_in_group_basic.test | 4 + .../t/innodb_log_group_home_dir_basic.test | 4 + .../t/innodb_log_write_ahead_size_basic.test | 93 + .../sys_vars/t/innodb_lru_scan_depth_basic.test | 8 + .../t/innodb_max_dirty_pages_pct_basic.test | 93 +- .../t/innodb_max_dirty_pages_pct_func.test | 7 + .../t/innodb_max_dirty_pages_pct_lwm_basic.test | 22 +- .../sys_vars/t/innodb_max_purge_lag_basic.test | 52 +- .../t/innodb_max_purge_lag_delay_basic.test | 2 + .../sys_vars/t/innodb_max_undo_log_size_basic.test | 99 + ...innodb_merge_threshold_set_all_debug_basic.test | 30 + .../t/innodb_mirrored_log_groups_basic.test | 102 - .../sys_vars/t/innodb_monitor_disable_basic.test | 5 - .../sys_vars/t/innodb_monitor_enable_basic.test | 5 - .../sys_vars/t/innodb_monitor_reset_all_basic.test | 5 - .../sys_vars/t/innodb_monitor_reset_basic.test | 5 - .../sys_vars/t/innodb_numa_interleave_basic.test | 13 + .../sys_vars/t/innodb_old_blocks_pct_basic.test | 22 +- .../sys_vars/t/innodb_old_blocks_time_basic.test | 6 + .../t/innodb_online_alter_log_max_size_basic.test | 6 + .../suite/sys_vars/t/innodb_open_files_basic.test | 4 + .../t/innodb_optimize_fulltext_only_basic.test | 12 + .../sys_vars/t/innodb_page_cleaners_basic.test | 53 + .../sys_vars/t/innodb_page_hash_locks_basic.test | 2 + .../suite/sys_vars/t/innodb_page_size_basic.test | 17 +- .../t/innodb_print_all_deadlocks_basic.test | 12 + .../sys_vars/t/innodb_purge_batch_size_basic.test | 17 +- ...innodb_purge_rseg_truncate_frequency_basic.test | 161 + .../sys_vars/t/innodb_purge_threads_basic.test | 89 +- .../sys_vars/t/innodb_random_read_ahead_basic.test | 12 + .../t/innodb_read_ahead_threshold_basic.test | 19 + .../sys_vars/t/innodb_read_io_threads_basic.test | 2 + .../suite/sys_vars/t/innodb_read_only_basic.test | 2 + .../sys_vars/t/innodb_replication_delay_basic.test | 52 +- .../t/innodb_rollback_on_timeout_basic.test | 4 + .../sys_vars/t/innodb_rollback_segments_basic.test | 6 + .../sys_vars/t/innodb_sort_buffer_size_basic.test | 2 + .../sys_vars/t/innodb_spin_wait_delay_basic.test | 51 +- .../sys_vars/t/innodb_stats_method_basic.test | 8 + .../sys_vars/t/innodb_stats_on_metadata_basic.test | 12 + .../sys_vars/t/innodb_stats_persistent_basic.test | 2 +- ...innodb_stats_persistent_sample_pages_basic.test | 34 +- .../t/innodb_stats_sample_pages_basic.test | 28 +- .../innodb_stats_transient_sample_pages_basic.test | 14 + .../sys_vars/t/innodb_status_output_basic.test | 14 + .../t/innodb_status_output_locks_basic.test | 14 + .../suite/sys_vars/t/innodb_strict_mode_basic.test | 12 + .../suite/sys_vars/t/innodb_support_xa_basic.test | 10 +- .../sys_vars/t/innodb_sync_array_size_basic.test | 8 +- .../suite/sys_vars/t/innodb_sync_debug_basic.test | 16 + .../sys_vars/t/innodb_sync_spin_loops_basic.test | 50 +- .../suite/sys_vars/t/innodb_table_locks_basic.test | 12 +- .../suite/sys_vars/t/innodb_table_locks_func.test | 7 + .../t/innodb_temp_data_file_path_basic.test | 106 + .../t/innodb_thread_concurrency_basic.test | 15 +- ...odb_trx_purge_view_update_only_debug_basic.test | 6 + .../t/innodb_trx_rseg_n_slots_debug_basic.test | 6 + .../sys_vars/t/innodb_undo_directory_basic.test | 12 +- .../sys_vars/t/innodb_undo_log_truncate_basic.test | 113 + .../suite/sys_vars/t/innodb_undo_logs_basic.test | 2 + .../sys_vars/t/innodb_undo_tablespaces_basic.test | 13 +- .../sys_vars/t/innodb_use_native_aio_basic.test | 4 + .../sys_vars/t/innodb_use_sys_malloc_basic.test | 31 - .../suite/sys_vars/t/innodb_version_basic.test | 2 + .../sys_vars/t/innodb_write_io_threads_basic.test | 2 + mysql-test/t/ctype_utf8mb4_innodb-master.opt | 1 + mysql-test/t/mysqlbinlog_row_minimal.test | 2 +- mysql-test/t/partition_exchange-master.opt | 1 + mysql-test/t/partition_innodb-master.opt | 1 + mysql-test/t/row-checksum-master.opt | 1 + mysql-test/t/row-checksum-old-master.opt | 1 + mysql-test/thou_shalt_not_kill.pm | 26 + sql/handler.h | 38 +- storage/innobase/CMakeLists.txt | 36 +- storage/innobase/Doxyfile | 1419 -- storage/innobase/api/api0api.cc | 776 +- storage/innobase/api/api0misc.cc | 125 +- storage/innobase/btr/btr0btr.cc | 2710 ++-- storage/innobase/btr/btr0bulk.cc | 995 ++ storage/innobase/btr/btr0cur.cc | 4395 ++++-- storage/innobase/btr/btr0defragment.cc | 53 +- storage/innobase/btr/btr0pcur.cc | 255 +- storage/innobase/btr/btr0scrub.cc | 70 +- storage/innobase/btr/btr0sea.cc | 1325 +- storage/innobase/buf/buf0buddy.cc | 253 +- storage/innobase/buf/buf0buf.cc | 4398 ++++-- storage/innobase/buf/buf0checksum.cc | 70 +- storage/innobase/buf/buf0dblwr.cc | 781 +- storage/innobase/buf/buf0dump.cc | 346 +- storage/innobase/buf/buf0flu.cc | 2417 +++- storage/innobase/buf/buf0lru.cc | 996 +- storage/innobase/buf/buf0mtflu.cc | 38 +- storage/innobase/buf/buf0rea.cc | 579 +- storage/innobase/data/data0data.cc | 267 +- storage/innobase/data/data0type.cc | 38 +- storage/innobase/dict/dict0boot.cc | 144 +- storage/innobase/dict/dict0crea.cc | 1293 +- storage/innobase/dict/dict0dict.cc | 2880 ++-- storage/innobase/dict/dict0load.cc | 2354 ++-- storage/innobase/dict/dict0mem.cc | 393 +- storage/innobase/dict/dict0stats.cc | 744 +- storage/innobase/dict/dict0stats_bg.cc | 141 +- storage/innobase/dyn/dyn0dyn.cc | 66 - storage/innobase/eval/eval0eval.cc | 140 +- storage/innobase/eval/eval0proc.cc | 20 +- storage/innobase/fil/fil0crypt.cc | 378 +- storage/innobase/fil/fil0fil.cc | 8278 +++++------ storage/innobase/fil/fil0pagecompress.cc | 226 +- storage/innobase/fsp/fsp0file.cc | 1130 ++ storage/innobase/fsp/fsp0fsp.cc | 3144 ++--- storage/innobase/fsp/fsp0space.cc | 291 + storage/innobase/fsp/fsp0sysspace.cc | 1050 ++ storage/innobase/fts/fts0ast.cc | 153 +- storage/innobase/fts/fts0blex.cc | 21 +- storage/innobase/fts/fts0blex.l | 1 + storage/innobase/fts/fts0config.cc | 84 +- storage/innobase/fts/fts0fts.cc | 1973 ++- storage/innobase/fts/fts0opt.cc | 436 +- storage/innobase/fts/fts0pars.cc | 13 +- storage/innobase/fts/fts0pars.y | 17 +- storage/innobase/fts/fts0plugin.cc | 295 + storage/innobase/fts/fts0que.cc | 929 +- storage/innobase/fts/fts0sql.cc | 90 +- storage/innobase/fts/fts0tlex.cc | 389 +- storage/innobase/fts/fts0tlex.l | 1 + storage/innobase/fut/fut0lst.cc | 212 +- storage/innobase/gis/gis0geo.cc | 835 ++ storage/innobase/gis/gis0rtree.cc | 2016 +++ storage/innobase/gis/gis0sea.cc | 2012 +++ storage/innobase/ha/ha0ha.cc | 178 +- storage/innobase/ha/ha0storage.cc | 12 +- storage/innobase/ha/hash0hash.cc | 144 +- storage/innobase/ha_innodb.def | 4 - storage/innobase/handler/ha_innodb.cc | 14087 ++++++++++++------- storage/innobase/handler/ha_innodb.h | 724 +- storage/innobase/handler/ha_innopart.cc | 4438 ++++++ storage/innobase/handler/ha_innopart.h | 1330 ++ storage/innobase/handler/handler0alter.cc | 4399 ++++-- storage/innobase/handler/handler0alter_innopart.cc | 307 + storage/innobase/handler/i_s.cc | 1184 +- storage/innobase/handler/i_s.h | 15 +- storage/innobase/ibuf/ibuf0ibuf.cc | 1467 +- storage/innobase/include/api0api.h | 411 +- storage/innobase/include/api0misc.h | 8 +- storage/innobase/include/btr0btr.h | 439 +- storage/innobase/include/btr0btr.ic | 88 +- storage/innobase/include/btr0bulk.h | 392 + storage/innobase/include/btr0cur.h | 431 +- storage/innobase/include/btr0cur.ic | 58 +- storage/innobase/include/btr0pcur.h | 152 +- storage/innobase/include/btr0pcur.ic | 151 +- storage/innobase/include/btr0sea.h | 306 +- storage/innobase/include/btr0sea.ic | 155 +- storage/innobase/include/btr0types.h | 195 +- storage/innobase/include/buf0buddy.h | 20 +- storage/innobase/include/buf0buddy.ic | 10 +- storage/innobase/include/buf0buf.h | 1202 +- storage/innobase/include/buf0buf.ic | 631 +- storage/innobase/include/buf0checksum.h | 32 +- storage/innobase/include/buf0dblwr.h | 36 +- storage/innobase/include/buf0dump.h | 7 +- storage/innobase/include/buf0flu.h | 258 +- storage/innobase/include/buf0flu.ic | 96 +- storage/innobase/include/buf0lru.h | 48 +- storage/innobase/include/buf0rea.h | 112 +- storage/innobase/include/buf0types.h | 41 +- storage/innobase/include/data0data.h | 268 +- storage/innobase/include/data0data.ic | 234 +- storage/innobase/include/data0type.h | 121 +- storage/innobase/include/data0type.ic | 77 +- storage/innobase/include/db0err.h | 55 +- storage/innobase/include/dict0boot.h | 50 +- storage/innobase/include/dict0boot.ic | 10 +- storage/innobase/include/dict0crea.h | 349 +- storage/innobase/include/dict0crea.ic | 54 +- storage/innobase/include/dict0dict.h | 1032 +- storage/innobase/include/dict0dict.ic | 955 +- storage/innobase/include/dict0load.h | 247 +- storage/innobase/include/dict0mem.h | 1214 +- storage/innobase/include/dict0mem.ic | 33 +- storage/innobase/include/dict0priv.h | 8 +- storage/innobase/include/dict0priv.ic | 32 +- storage/innobase/include/dict0stats.h | 25 +- storage/innobase/include/dict0stats.ic | 13 +- storage/innobase/include/dict0stats_bg.h | 19 +- storage/innobase/include/dict0types.h | 16 +- storage/innobase/include/dyn0buf.h | 505 + storage/innobase/include/dyn0dyn.h | 199 - storage/innobase/include/dyn0dyn.ic | 306 - storage/innobase/include/dyn0types.h | 39 + storage/innobase/include/eval0eval.h | 10 +- storage/innobase/include/eval0eval.ic | 12 +- storage/innobase/include/eval0proc.h | 24 +- storage/innobase/include/eval0proc.ic | 6 +- storage/innobase/include/fil0crypt.h | 40 +- storage/innobase/include/fil0crypt.ic | 53 +- storage/innobase/include/fil0fil.h | 1732 ++- storage/innobase/include/fil0fil.ic | 10 +- storage/innobase/include/fil0pagecompress.h | 48 +- storage/innobase/include/fsp0file.h | 590 + storage/innobase/include/fsp0fsp.h | 538 +- storage/innobase/include/fsp0fsp.ic | 402 +- storage/innobase/include/fsp0pagecompress.h | 2 +- storage/innobase/include/fsp0pagecompress.ic | 16 +- storage/innobase/include/fsp0space.h | 247 + storage/innobase/include/fsp0sysspace.h | 326 + storage/innobase/include/fsp0types.h | 311 +- storage/innobase/include/fts0ast.h | 53 +- storage/innobase/include/fts0blex.h | 2 +- storage/innobase/include/fts0fts.h | 265 +- storage/innobase/include/fts0opt.h | 3 +- storage/innobase/include/fts0plugin.h | 50 + storage/innobase/include/fts0priv.h | 163 +- storage/innobase/include/fts0priv.ic | 28 +- storage/innobase/include/fts0tlex.h | 2 +- storage/innobase/include/fts0tokenize.h | 188 + storage/innobase/include/fts0types.h | 114 +- storage/innobase/include/fts0types.ic | 318 +- storage/innobase/include/fut0fut.h | 30 +- storage/innobase/include/fut0fut.ic | 40 +- storage/innobase/include/fut0lst.h | 40 +- storage/innobase/include/fut0lst.ic | 32 +- storage/innobase/include/gis0geo.h | 162 + storage/innobase/include/gis0rtree.h | 572 + storage/innobase/include/gis0rtree.ic | 274 + storage/innobase/include/gis0type.h | 168 + storage/innobase/include/ha0ha.h | 106 +- storage/innobase/include/ha0ha.ic | 46 +- storage/innobase/include/ha0storage.h | 31 +- storage/innobase/include/ha0storage.ic | 6 +- storage/innobase/include/ha_prototypes.h | 396 +- storage/innobase/include/handler0alter.h | 13 +- storage/innobase/include/hash0hash.h | 92 +- storage/innobase/include/hash0hash.ic | 76 +- storage/innobase/include/ib0mutex.h | 1166 ++ storage/innobase/include/ibuf0ibuf.h | 199 +- storage/innobase/include/ibuf0ibuf.ic | 131 +- storage/innobase/include/lock0iter.h | 6 +- storage/innobase/include/lock0lock.h | 389 +- storage/innobase/include/lock0lock.ic | 49 +- storage/innobase/include/lock0prdt.h | 224 + storage/innobase/include/lock0priv.h | 1057 +- storage/innobase/include/lock0priv.ic | 364 +- storage/innobase/include/lock0types.h | 35 +- storage/innobase/include/log0log.h | 700 +- storage/innobase/include/log0log.ic | 194 +- storage/innobase/include/log0recv.h | 258 +- storage/innobase/include/log0recv.ic | 25 +- storage/innobase/include/log0types.h | 50 + storage/innobase/include/mach0data.h | 141 +- storage/innobase/include/mach0data.ic | 399 +- storage/innobase/include/mem0dbg.h | 150 - storage/innobase/include/mem0dbg.ic | 109 - storage/innobase/include/mem0mem.h | 461 +- storage/innobase/include/mem0mem.ic | 408 +- storage/innobase/include/mem0pool.h | 121 - storage/innobase/include/mem0pool.ic | 24 - storage/innobase/include/mtr0log.h | 130 +- storage/innobase/include/mtr0log.ic | 204 +- storage/innobase/include/mtr0mtr.h | 953 +- storage/innobase/include/mtr0mtr.ic | 391 +- storage/innobase/include/mtr0types.h | 268 +- storage/innobase/include/os0atomic.h | 320 + storage/innobase/include/os0atomic.ic | 215 + storage/innobase/include/os0event.h | 135 + storage/innobase/include/os0file.h | 2323 +-- storage/innobase/include/os0file.ic | 521 +- storage/innobase/include/os0once.h | 4 +- storage/innobase/include/os0proc.h | 51 +- storage/innobase/include/os0sync.h | 914 -- storage/innobase/include/os0sync.ic | 266 - storage/innobase/include/os0thread.h | 51 +- storage/innobase/include/page0cur.h | 167 +- storage/innobase/include/page0cur.ic | 127 +- storage/innobase/include/page0page.h | 315 +- storage/innobase/include/page0page.ic | 291 +- storage/innobase/include/page0size.h | 202 + storage/innobase/include/page0types.h | 65 +- storage/innobase/include/page0zip.h | 224 +- storage/innobase/include/page0zip.ic | 52 +- storage/innobase/include/pars0opt.h | 7 +- storage/innobase/include/pars0pars.h | 133 +- storage/innobase/include/pars0sym.h | 25 +- storage/innobase/include/que0que.h | 99 +- storage/innobase/include/que0que.ic | 21 +- storage/innobase/include/read0read.h | 250 +- storage/innobase/include/read0read.ic | 148 - storage/innobase/include/read0types.h | 305 +- storage/innobase/include/rem0cmp.h | 353 +- storage/innobase/include/rem0cmp.ic | 216 +- storage/innobase/include/rem0rec.h | 322 +- storage/innobase/include/rem0rec.ic | 191 +- storage/innobase/include/row0ext.h | 9 +- storage/innobase/include/row0ftsort.h | 36 +- storage/innobase/include/row0import.h | 6 +- storage/innobase/include/row0ins.h | 61 +- storage/innobase/include/row0log.h | 89 +- storage/innobase/include/row0log.ic | 18 +- storage/innobase/include/row0merge.h | 167 +- storage/innobase/include/row0mysql.h | 380 +- storage/innobase/include/row0purge.h | 27 +- storage/innobase/include/row0quiesce.h | 3 - storage/innobase/include/row0row.h | 78 +- storage/innobase/include/row0row.ic | 11 +- storage/innobase/include/row0sel.h | 197 +- storage/innobase/include/row0sel.ic | 46 +- storage/innobase/include/row0trunc.h | 433 + storage/innobase/include/row0uins.h | 6 +- storage/innobase/include/row0umod.h | 3 +- storage/innobase/include/row0undo.h | 16 +- storage/innobase/include/row0upd.h | 210 +- storage/innobase/include/row0upd.ic | 67 +- storage/innobase/include/row0vers.h | 47 +- storage/innobase/include/sess0sess.h | 146 + storage/innobase/include/srv0conc.h | 27 +- storage/innobase/include/srv0mon.h | 138 +- storage/innobase/include/srv0mon.ic | 12 +- storage/innobase/include/srv0srv.h | 356 +- storage/innobase/include/srv0start.h | 96 +- storage/innobase/include/sync0arr.h | 67 +- storage/innobase/include/sync0arr.ic | 51 +- storage/innobase/include/sync0debug.h | 105 + storage/innobase/include/sync0policy.h | 550 + storage/innobase/include/sync0policy.ic | 100 + storage/innobase/include/sync0rw.h | 576 +- storage/innobase/include/sync0rw.ic | 492 +- storage/innobase/include/sync0sync.h | 794 +- storage/innobase/include/sync0sync.ic | 410 - storage/innobase/include/sync0types.h | 1232 +- storage/innobase/include/trx0i_s.h | 35 +- storage/innobase/include/trx0purge.h | 345 +- storage/innobase/include/trx0purge.ic | 27 +- storage/innobase/include/trx0rec.h | 128 +- storage/innobase/include/trx0rec.ic | 26 +- storage/innobase/include/trx0roll.h | 123 +- storage/innobase/include/trx0roll.ic | 46 +- storage/innobase/include/trx0rseg.h | 187 +- storage/innobase/include/trx0rseg.ic | 94 +- storage/innobase/include/trx0sys.h | 323 +- storage/innobase/include/trx0sys.ic | 214 +- storage/innobase/include/trx0trx.h | 1030 +- storage/innobase/include/trx0trx.ic | 264 +- storage/innobase/include/trx0types.h | 223 +- storage/innobase/include/trx0undo.h | 263 +- storage/innobase/include/trx0undo.ic | 63 +- storage/innobase/include/trx0xa.h | 20 +- storage/innobase/include/univ.i | 277 +- storage/innobase/include/usr0sess.h | 9 +- storage/innobase/include/ut0bh.h | 152 - storage/innobase/include/ut0bh.ic | 125 - storage/innobase/include/ut0byte.h | 16 +- storage/innobase/include/ut0byte.ic | 18 +- storage/innobase/include/ut0counter.h | 102 +- storage/innobase/include/ut0crc32.h | 25 +- storage/innobase/include/ut0dbg.h | 158 +- storage/innobase/include/ut0list.h | 23 +- storage/innobase/include/ut0list.ic | 6 +- storage/innobase/include/ut0lst.h | 585 +- storage/innobase/include/ut0mem.h | 144 +- storage/innobase/include/ut0mem.ic | 58 +- storage/innobase/include/ut0mutex.h | 225 + storage/innobase/include/ut0mutex.ic | 108 + storage/innobase/include/ut0new.h | 922 ++ storage/innobase/include/ut0pool.h | 366 + storage/innobase/include/ut0rbt.h | 59 +- storage/innobase/include/ut0rnd.h | 29 +- storage/innobase/include/ut0rnd.ic | 38 +- storage/innobase/include/ut0stage.h | 594 + storage/innobase/include/ut0ut.h | 466 +- storage/innobase/include/ut0ut.ic | 105 +- storage/innobase/include/ut0vec.h | 55 +- storage/innobase/include/ut0vec.ic | 97 +- storage/innobase/include/ut0wqueue.h | 41 +- storage/innobase/innodb.cmake | 285 + storage/innobase/lock/lock0iter.cc | 9 +- storage/innobase/lock/lock0lock.cc | 9443 +++++++------ storage/innobase/lock/lock0prdt.cc | 1056 ++ storage/innobase/lock/lock0wait.cc | 105 +- storage/innobase/log/log0crypt.cc | 56 +- storage/innobase/log/log0log.cc | 2951 ++-- storage/innobase/log/log0recv.cc | 3606 ++--- storage/innobase/mach/mach0data.cc | 102 +- storage/innobase/mem/mem0dbg.cc | 1050 -- storage/innobase/mem/mem0mem.cc | 231 +- storage/innobase/mem/mem0pool.cc | 727 - storage/innobase/mtr/mtr0log.cc | 148 +- storage/innobase/mtr/mtr0mtr.cc | 1188 +- storage/innobase/os/os0event.cc | 550 + storage/innobase/os/os0file.cc | 11470 +++++++++------ storage/innobase/os/os0proc.cc | 146 +- storage/innobase/os/os0thread.cc | 245 +- storage/innobase/page/page0cur.cc | 1149 +- storage/innobase/page/page0page.cc | 996 +- storage/innobase/page/page0zip.cc | 861 +- storage/innobase/pars/lexyy.cc | 152 +- storage/innobase/pars/make_bison.sh | 3 +- storage/innobase/pars/make_flex.sh | 1 - storage/innobase/pars/pars0grm.cc | 5 +- storage/innobase/pars/pars0grm.y | 5 +- storage/innobase/pars/pars0lex.l | 5 +- storage/innobase/pars/pars0opt.cc | 59 +- storage/innobase/pars/pars0pars.cc | 284 +- storage/innobase/pars/pars0sym.cc | 43 +- storage/innobase/que/que0que.cc | 259 +- storage/innobase/read/read0read.cc | 793 +- storage/innobase/rem/rem0cmp.cc | 1656 +-- storage/innobase/rem/rem0rec.cc | 590 +- storage/innobase/row/row0ext.cc | 32 +- storage/innobase/row/row0ftsort.cc | 514 +- storage/innobase/row/row0import.cc | 1015 +- storage/innobase/row/row0ins.cc | 1387 +- storage/innobase/row/row0log.cc | 861 +- storage/innobase/row/row0merge.cc | 2321 ++- storage/innobase/row/row0mysql.cc | 3986 +++--- storage/innobase/row/row0purge.cc | 269 +- storage/innobase/row/row0quiesce.cc | 98 +- storage/innobase/row/row0row.cc | 471 +- storage/innobase/row/row0sel.cc | 2084 ++- storage/innobase/row/row0trunc.cc | 3060 ++++ storage/innobase/row/row0uins.cc | 120 +- storage/innobase/row/row0umod.cc | 296 +- storage/innobase/row/row0undo.cc | 116 +- storage/innobase/row/row0upd.cc | 1283 +- storage/innobase/row/row0vers.cc | 728 +- storage/innobase/srv/srv0conc.cc | 442 +- storage/innobase/srv/srv0mon.cc | 230 +- storage/innobase/srv/srv0srv.cc | 925 +- storage/innobase/srv/srv0start.cc | 3088 ++-- storage/innobase/sync/sync0arr.cc | 1103 +- storage/innobase/sync/sync0debug.cc | 1808 +++ storage/innobase/sync/sync0rw.cc | 1056 +- storage/innobase/sync/sync0sync.cc | 1671 +-- storage/innobase/trx/trx0i_s.cc | 350 +- storage/innobase/trx/trx0purge.cc | 1119 +- storage/innobase/trx/trx0rec.cc | 1282 +- storage/innobase/trx/trx0roll.cc | 656 +- storage/innobase/trx/trx0rseg.cc | 276 +- storage/innobase/trx/trx0sys.cc | 683 +- storage/innobase/trx/trx0trx.cc | 2422 +++- storage/innobase/trx/trx0undo.cc | 813 +- storage/innobase/usr/usr0sess.cc | 14 +- storage/innobase/ut/ut0bh.cc | 159 - storage/innobase/ut/ut0crc32.cc | 706 +- storage/innobase/ut/ut0dbg.cc | 83 +- storage/innobase/ut/ut0list.cc | 23 +- storage/innobase/ut/ut0mem.cc | 424 +- storage/innobase/ut/ut0new.cc | 227 + storage/innobase/ut/ut0rbt.cc | 87 +- storage/innobase/ut/ut0rnd.cc | 7 +- storage/innobase/ut/ut0ut.cc | 477 +- storage/innobase/ut/ut0vec.cc | 4 +- storage/innobase/ut/ut0wqueue.cc | 35 +- .../mysql-test/tokudb/r/type_bit_innodb.result | 2 +- storage/xtradb/CMakeLists.txt | 2 +- storage/xtradb/handler/ha_innodb.cc | 2 +- storage/xtradb/row/row0upd.cc | 6 +- 838 files changed, 174048 insertions(+), 83754 deletions(-) create mode 100644 include/dur_prop.h create mode 100644 include/my_icp.h create mode 100644 include/mysql/psi/psi_base.h create mode 100644 include/mysql/psi/psi_memory.h create mode 100644 mysql-test/include/expect_crash.inc create mode 100644 mysql-test/include/have_innodb_4k.inc create mode 100644 mysql-test/include/have_innodb_8k.inc create mode 100644 mysql-test/include/have_innodb_max_16k.inc create mode 100644 mysql-test/include/have_innodb_zip.inc create mode 100644 mysql-test/include/have_numa.inc create mode 100644 mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result create mode 100644 mysql-test/suite/encryption/t/innodb_encryption_row_compressed.opt create mode 100644 mysql-test/suite/encryption/t/innodb_encryption_row_compressed.test create mode 100644 mysql-test/suite/handler/disabled.def delete mode 100644 mysql-test/suite/innodb/r/innodb_blob_unrecoverable_crash.result create mode 100644 mysql-test/suite/innodb/t/auto_increment_dup.opt create mode 100644 mysql-test/suite/innodb/t/innodb-mdev-7513-master.opt delete mode 100644 mysql-test/suite/innodb/t/innodb_blob_unrecoverable_crash.test create mode 100644 mysql-test/suite/innodb_zip/include/innodb-wl6045.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_create_tab_indx.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_dml_ops.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_fetch_records.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_load_data.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_stats_comp_index.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_stats_restart.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_temp_table_dml.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_wl6501_crash_stripped.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_wl6501_error.inc create mode 100644 mysql-test/suite/innodb_zip/include/innodb_wl6501_scale.inc create mode 100644 mysql-test/suite/innodb_zip/r/16k.result create mode 100644 mysql-test/suite/innodb_zip/r/4k.result create mode 100644 mysql-test/suite/innodb_zip/r/8k.result create mode 100644 mysql-test/suite/innodb_zip/r/bug36169.result create mode 100644 mysql-test/suite/innodb_zip/r/bug36172.result create mode 100644 mysql-test/suite/innodb_zip/r/bug52745.result create mode 100644 mysql-test/suite/innodb_zip/r/bug53591.result create mode 100644 mysql-test/suite/innodb_zip/r/bug56680.result create mode 100644 mysql-test/suite/innodb_zip/r/cmp_drop_table.result create mode 100644 mysql-test/suite/innodb_zip/r/cmp_per_index.result create mode 100644 mysql-test/suite/innodb_zip/r/create_options.result create mode 100644 mysql-test/suite/innodb_zip/r/index_large_prefix.result create mode 100644 mysql-test/suite/innodb_zip/r/index_large_prefix_4k.result create mode 100644 mysql-test/suite/innodb_zip/r/index_large_prefix_8k.result create mode 100644 mysql-test/suite/innodb_zip/r/innochecksum.result create mode 100644 mysql-test/suite/innodb_zip/r/innochecksum_2.result create mode 100644 mysql-test/suite/innodb_zip/r/innochecksum_3.result delete mode 100644 mysql-test/suite/innodb_zip/r/innodb_prefix_index_liftedlimit.result create mode 100644 mysql-test/suite/innodb_zip/r/large_blob.result create mode 100644 mysql-test/suite/innodb_zip/r/restart.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6344_compress_level.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6470_1.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6470_2.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6501_1.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6501_crash_3.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6501_crash_4.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6501_crash_5.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6501_scale_1.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6560.result create mode 100644 mysql-test/suite/innodb_zip/r/wl6915_1.result create mode 100644 mysql-test/suite/innodb_zip/t/16k.test create mode 100644 mysql-test/suite/innodb_zip/t/4k.test create mode 100644 mysql-test/suite/innodb_zip/t/8k.test create mode 100644 mysql-test/suite/innodb_zip/t/bug36169.test create mode 100644 mysql-test/suite/innodb_zip/t/bug36172.test create mode 100644 mysql-test/suite/innodb_zip/t/bug52745.test create mode 100644 mysql-test/suite/innodb_zip/t/bug53591.test create mode 100644 mysql-test/suite/innodb_zip/t/bug56680.test create mode 100644 mysql-test/suite/innodb_zip/t/cmp_drop_table-master.opt create mode 100644 mysql-test/suite/innodb_zip/t/cmp_drop_table.test create mode 100644 mysql-test/suite/innodb_zip/t/cmp_per_index.test create mode 100644 mysql-test/suite/innodb_zip/t/create_options.test create mode 100644 mysql-test/suite/innodb_zip/t/disabled.def create mode 100644 mysql-test/suite/innodb_zip/t/index_large_prefix.test create mode 100644 mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test create mode 100644 mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test create mode 100644 mysql-test/suite/innodb_zip/t/innochecksum.test create mode 100644 mysql-test/suite/innodb_zip/t/innochecksum_2.test create mode 100644 mysql-test/suite/innodb_zip/t/innochecksum_3.test create mode 100644 mysql-test/suite/innodb_zip/t/innodb_bug36169.opt delete mode 100644 mysql-test/suite/innodb_zip/t/innodb_prefix_index_liftedlimit.test create mode 100644 mysql-test/suite/innodb_zip/t/large_blob-master.opt create mode 100644 mysql-test/suite/innodb_zip/t/large_blob.test create mode 100644 mysql-test/suite/innodb_zip/t/restart.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6344_compress_level.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6470_1.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6470_2.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6501_1.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6501_crash_3.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6501_crash_4.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6501_crash_5.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6501_scale_1.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6560.test create mode 100644 mysql-test/suite/innodb_zip/t/wl6915_1.test create mode 100644 mysql-test/suite/sys_vars/r/innodb_adaptive_hash_index_parts_basic.result delete mode 100644 mysql-test/suite/sys_vars/r/innodb_additional_mem_pool_size_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_buffer_pool_chunk_size_basic.result delete mode 100644 mysql-test/suite/sys_vars/r/innodb_buffer_pool_filename_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_default_row_format_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_disable_resize_buffer_pool_debug_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_fill_factor_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_flush_sync_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_log_checksums_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_log_write_ahead_size_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_max_undo_log_size_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_merge_threshold_set_all_debug_basic.result delete mode 100644 mysql-test/suite/sys_vars/r/innodb_mirrored_log_groups_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_numa_interleave_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_page_cleaners_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_purge_rseg_truncate_frequency_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_sync_debug_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_temp_data_file_path_basic.result create mode 100644 mysql-test/suite/sys_vars/r/innodb_undo_log_truncate_basic.result delete mode 100644 mysql-test/suite/sys_vars/r/innodb_use_sys_malloc_basic.result create mode 100644 mysql-test/suite/sys_vars/t/innodb_adaptive_hash_index_parts_basic.test delete mode 100644 mysql-test/suite/sys_vars/t/innodb_additional_mem_pool_size_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_buffer_pool_chunk_size_basic.test delete mode 100644 mysql-test/suite/sys_vars/t/innodb_buffer_pool_filename_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic-master.opt create mode 100644 mysql-test/suite/sys_vars/t/innodb_default_row_format_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_disable_resize_buffer_pool_debug_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_file_per_table_basic-master.opt create mode 100644 mysql-test/suite/sys_vars/t/innodb_fill_factor_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_flush_sync_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_log_checksums_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_log_write_ahead_size_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_max_undo_log_size_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_merge_threshold_set_all_debug_basic.test delete mode 100644 mysql-test/suite/sys_vars/t/innodb_mirrored_log_groups_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_numa_interleave_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_page_cleaners_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_purge_rseg_truncate_frequency_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_sync_debug_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_temp_data_file_path_basic.test create mode 100644 mysql-test/suite/sys_vars/t/innodb_undo_log_truncate_basic.test delete mode 100644 mysql-test/suite/sys_vars/t/innodb_use_sys_malloc_basic.test create mode 100644 mysql-test/t/partition_exchange-master.opt create mode 100644 mysql-test/t/partition_innodb-master.opt create mode 100644 mysql-test/t/row-checksum-master.opt create mode 100755 mysql-test/thou_shalt_not_kill.pm delete mode 100644 storage/innobase/Doxyfile create mode 100644 storage/innobase/btr/btr0bulk.cc delete mode 100644 storage/innobase/dyn/dyn0dyn.cc create mode 100644 storage/innobase/fsp/fsp0file.cc create mode 100644 storage/innobase/fsp/fsp0space.cc create mode 100644 storage/innobase/fsp/fsp0sysspace.cc create mode 100644 storage/innobase/fts/fts0plugin.cc create mode 100644 storage/innobase/gis/gis0geo.cc create mode 100644 storage/innobase/gis/gis0rtree.cc create mode 100644 storage/innobase/gis/gis0sea.cc delete mode 100644 storage/innobase/ha_innodb.def create mode 100644 storage/innobase/handler/ha_innopart.cc create mode 100644 storage/innobase/handler/ha_innopart.h create mode 100644 storage/innobase/handler/handler0alter_innopart.cc create mode 100644 storage/innobase/include/btr0bulk.h create mode 100644 storage/innobase/include/dyn0buf.h delete mode 100644 storage/innobase/include/dyn0dyn.h delete mode 100644 storage/innobase/include/dyn0dyn.ic create mode 100644 storage/innobase/include/dyn0types.h create mode 100644 storage/innobase/include/fsp0file.h create mode 100644 storage/innobase/include/fsp0space.h create mode 100644 storage/innobase/include/fsp0sysspace.h create mode 100644 storage/innobase/include/fts0plugin.h create mode 100644 storage/innobase/include/fts0tokenize.h create mode 100644 storage/innobase/include/gis0geo.h create mode 100644 storage/innobase/include/gis0rtree.h create mode 100644 storage/innobase/include/gis0rtree.ic create mode 100644 storage/innobase/include/gis0type.h create mode 100644 storage/innobase/include/ib0mutex.h create mode 100644 storage/innobase/include/lock0prdt.h create mode 100644 storage/innobase/include/log0types.h delete mode 100644 storage/innobase/include/mem0dbg.h delete mode 100644 storage/innobase/include/mem0dbg.ic delete mode 100644 storage/innobase/include/mem0pool.h delete mode 100644 storage/innobase/include/mem0pool.ic create mode 100644 storage/innobase/include/os0atomic.h create mode 100644 storage/innobase/include/os0atomic.ic create mode 100644 storage/innobase/include/os0event.h delete mode 100644 storage/innobase/include/os0sync.h delete mode 100644 storage/innobase/include/os0sync.ic create mode 100644 storage/innobase/include/page0size.h delete mode 100644 storage/innobase/include/read0read.ic create mode 100644 storage/innobase/include/row0trunc.h create mode 100644 storage/innobase/include/sess0sess.h create mode 100644 storage/innobase/include/sync0debug.h create mode 100644 storage/innobase/include/sync0policy.h create mode 100644 storage/innobase/include/sync0policy.ic delete mode 100644 storage/innobase/include/sync0sync.ic delete mode 100644 storage/innobase/include/ut0bh.h delete mode 100644 storage/innobase/include/ut0bh.ic create mode 100644 storage/innobase/include/ut0mutex.h create mode 100644 storage/innobase/include/ut0mutex.ic create mode 100644 storage/innobase/include/ut0new.h create mode 100644 storage/innobase/include/ut0pool.h create mode 100644 storage/innobase/include/ut0stage.h create mode 100644 storage/innobase/innodb.cmake create mode 100644 storage/innobase/lock/lock0prdt.cc delete mode 100644 storage/innobase/mem/mem0dbg.cc delete mode 100644 storage/innobase/mem/mem0pool.cc create mode 100644 storage/innobase/os/os0event.cc create mode 100644 storage/innobase/row/row0trunc.cc create mode 100644 storage/innobase/sync/sync0debug.cc delete mode 100644 storage/innobase/ut/ut0bh.cc create mode 100644 storage/innobase/ut/ut0new.cc diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 36072ebaa7e..01d654dba53 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -170,7 +170,7 @@ debug_cflags="-DEXTRA_DEBUG -DSAFE_MUTEX -DSAFEMALLOC" error_inject="--with-error-inject " # # Base C++ flags for all builds -base_cxxflags="-felide-constructors -fno-exceptions -fno-rtti" +base_cxxflags="-felide-constructors -fexceptions -fno-rtti" # # Flags for optimizing builds. # Be as fast as we can be without losing our ability to backtrace. diff --git a/configure.cmake b/configure.cmake index 4470bee3223..8dffb7efd09 100644 --- a/configure.cmake +++ b/configure.cmake @@ -56,7 +56,7 @@ IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang") # MySQL "canonical" GCC flags. At least -fno-rtti flag affects # ABI and cannot be simply removed. SET(CMAKE_CXX_FLAGS - "${CMAKE_CXX_FLAGS} -fno-exceptions -fno-rtti") + "${CMAKE_CXX_FLAGS} -fno-rtti") IF (CMAKE_EXE_LINKER_FLAGS MATCHES " -static " OR CMAKE_EXE_LINKER_FLAGS MATCHES " -static$") diff --git a/extra/CMakeLists.txt b/extra/CMakeLists.txt index 24090db6135..12f3343cf69 100644 --- a/extra/CMakeLists.txt +++ b/extra/CMakeLists.txt @@ -77,7 +77,9 @@ IF(WITH_INNOBASE_STORAGE_ENGINE OR WITH_XTRADB_STORAGE_ENGINE) ../storage/innobase/buf/buf0checksum.cc ../storage/innobase/ut/ut0crc32.cc ../storage/innobase/ut/ut0ut.cc + ../storage/innobase/buf/buf0buf.cc ../storage/innobase/page/page0zip.cc + ../storage/innobase/os/os0file.cc ) IF(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64le") diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc index ae6ffdb582e..dd97564c8a5 100644 --- a/extra/innochecksum.cc +++ b/extra/innochecksum.cc @@ -1,6 +1,5 @@ /* - Copyright (c) 2005, 2012, Oracle and/or its affiliates. - Copyright (c) 2014, 2015, MariaDB Corporation. + Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -26,43 +25,36 @@ Published with a permission. */ +#include #include #include #include #include #include #include -#ifndef __WIN__ +#ifdef HAVE_UNISTD_H # include #endif #include #include -#include /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ +#include /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ /* Only parts of these files are included from the InnoDB codebase. The parts not included are excluded by #ifndef UNIV_INNOCHECKSUM. */ -#include "univ.i" /* include all of this */ - -#define FLST_BASE_NODE_SIZE (4 + 2 * FIL_ADDR_SIZE) -#define FLST_NODE_SIZE (2 * FIL_ADDR_SIZE) -#define FSEG_PAGE_DATA FIL_PAGE_DATA -#define MLOG_1BYTE (1) - -#include "ut0ut.h" -#include "ut0byte.h" -#include "mach0data.h" -#include "fsp0types.h" -#include "rem0rec.h" -#include "buf0checksum.h" /* buf_calc_page_*() */ -#include "fil0fil.h" /* FIL_* */ -#include "page0page.h" /* PAGE_* */ -#include "page0zip.h" /* page_zip_*() */ -#include "trx0undo.h" /* TRX_* */ -#include "fsp0fsp.h" /* fsp_flags_get_page_size() & - fsp_flags_get_zip_size() */ -#include "ut0crc32.h" /* ut_crc32_init() */ -#include "fsp0pagecompress.h" /* fil_get_compression_alg_name */ +#include "univ.i" /* include all of this */ +#include "page0size.h" /* page_size_t */ +#include "page0zip.h" /* page_zip_calc_checksum() */ +#include "page0page.h" /* PAGE_* */ +#include "trx0undo.h" /* TRX_UNDO_* */ +#include "fut0lst.h" /* FLST_NODE_SIZE */ +#include "buf0checksum.h" /* buf_calc_page_*() */ +#include "fil0fil.h" /* FIL_* */ +#include "os0file.h" +#include "fsp0fsp.h" /* fsp_flags_get_page_size() & + fsp_flags_get_zip_size() */ +#include "mach0data.h" /* mach_read_from_4() */ +#include "ut0crc32.h" /* ut_crc32_init() */ #ifdef UNIV_NONINL # include "fsp0fsp.ic" @@ -71,148 +63,851 @@ The parts not included are excluded by #ifndef UNIV_INNOCHECKSUM. */ #endif /* Global variables */ -static my_bool verbose; -static my_bool debug; -static my_bool skip_corrupt; -static my_bool just_count; -static ulong start_page; -static ulong end_page; -static ulong do_page; -static my_bool use_end_page; -static my_bool do_one_page; -static my_bool per_page_details; -static my_bool do_leaf; -static ulong n_merge; -ulong srv_page_size; /* replaces declaration in srv0srv.c */ -static ulong physical_page_size; /* Page size in bytes on disk. */ -static ulong logical_page_size; /* Page size when uncompressed. */ -static bool compressed= false; /* Is tablespace compressed */ - -int n_undo_state_active; -int n_undo_state_cached; -int n_undo_state_to_free; -int n_undo_state_to_purge; -int n_undo_state_prepared; -int n_undo_state_other; -int n_undo_insert, n_undo_update, n_undo_other; -int n_bad_checksum; -int n_fil_page_index; -int n_fil_page_undo_log; -int n_fil_page_inode; -int n_fil_page_ibuf_free_list; -int n_fil_page_allocated; -int n_fil_page_ibuf_bitmap; -int n_fil_page_type_sys; -int n_fil_page_type_trx_sys; -int n_fil_page_type_fsp_hdr; -int n_fil_page_type_allocated; -int n_fil_page_type_xdes; -int n_fil_page_type_blob; -int n_fil_page_type_zblob; -int n_fil_page_type_other; -int n_fil_page_type_page_compressed; -int n_fil_page_type_page_compressed_encrypted; - -int n_fil_page_max_index_id; - -#define SIZE_RANGES_FOR_PAGE 10 -#define NUM_RETRIES 3 -#define DEFAULT_RETRY_DELAY 1000000 - -struct per_page_stats { - ulint n_recs; - ulint data_size; - ulint left_page_no; - ulint right_page_no; - per_page_stats(ulint n, ulint data, ulint left, ulint right) : - n_recs(n), data_size(data), left_page_no(left), right_page_no(right) {} - per_page_stats() : n_recs(0), data_size(0), left_page_no(0), right_page_no(0) {} +static bool verbose; +static bool just_count; +static uintmax_t start_page; +static uintmax_t end_page; +static uintmax_t do_page; +static bool use_end_page; +static bool do_one_page; +/* replaces declaration in srv0srv.c */ +ulong srv_page_size; +page_size_t univ_page_size(0, 0, false); +extern ulong srv_checksum_algorithm; +/* Current page number (0 based). */ +uintmax_t cur_page_num; +/* Skip the checksum verification. */ +static bool no_check; +/* Enabled for strict checksum verification. */ +bool strict_verify = 0; +/* Enabled for rewrite checksum. */ +static bool do_write; +/* Mismatches count allowed (0 by default). */ +static uintmax_t allow_mismatches; +static bool page_type_summary; +static bool page_type_dump; +/* Store filename for page-type-dump option. */ +char* page_dump_filename = 0; +/* skip the checksum verification & rewrite if page is doublewrite buffer. */ +static bool skip_page = 0; +const char *dbug_setting = "FALSE"; +char* log_filename = NULL; +/* User defined filename for logging. */ +FILE* log_file = NULL; +/* Enabled for log write option. */ +static bool is_log_enabled = false; + +#ifndef _WIN32 +/* advisory lock for non-window system. */ +struct flock lk; +#endif /* _WIN32 */ + +/* Strict check algorithm name. */ +static ulong strict_check; +/* Rewrite checksum algorithm name. */ +static ulong write_check; + +/* Innodb page type. */ +struct innodb_page_type { + int n_undo_state_active; + int n_undo_state_cached; + int n_undo_state_to_free; + int n_undo_state_to_purge; + int n_undo_state_prepared; + int n_undo_state_other; + int n_undo_insert; + int n_undo_update; + int n_undo_other; + int n_fil_page_index; + int n_fil_page_undo_log; + int n_fil_page_inode; + int n_fil_page_ibuf_free_list; + int n_fil_page_ibuf_bitmap; + int n_fil_page_type_sys; + int n_fil_page_type_trx_sys; + int n_fil_page_type_fsp_hdr; + int n_fil_page_type_allocated; + int n_fil_page_type_xdes; + int n_fil_page_type_blob; + int n_fil_page_type_zblob; + int n_fil_page_type_other; + int n_fil_page_type_zblob2; +} page_type; + +/* Possible values for "--strict-check" for strictly verify checksum +and "--write" for rewrite checksum. */ +static const char *innochecksum_algorithms[] = { + "crc32", + "crc32", + "innodb", + "innodb", + "none", + "none", + NullS }; -struct per_index_stats { - unsigned long long pages; - unsigned long long leaf_pages; - ulint first_leaf_page; - ulint count; - ulint free_pages; - ulint max_data_size; - unsigned long long total_n_recs; - unsigned long long total_data_bytes; - - /*!< first element for empty pages, - last element for pages with more than logical_page_size */ - unsigned long long pages_in_size_range[SIZE_RANGES_FOR_PAGE+2]; - - std::map leaves; - - per_index_stats():pages(0), leaf_pages(0), first_leaf_page(0), - count(0), free_pages(0), max_data_size(0), total_n_recs(0), - total_data_bytes(0) - { - memset(pages_in_size_range, 0, sizeof(pages_in_size_range)); - } +/* Used to define an enumerate type of the "innochecksum algorithm". */ +static TYPELIB innochecksum_algorithms_typelib = { + array_elements(innochecksum_algorithms)-1,"", + innochecksum_algorithms, NULL }; -std::map index_ids; +/** Get the page size of the filespace from the filespace header. +@param[in] buf buffer used to read the page. +@return page size */ +static +const page_size_t +get_page_size( + byte* buf) +{ + const ulint flags = mach_read_from_4(buf + FIL_PAGE_DATA + + FSP_SPACE_FLAGS); + + const ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); + + if (ssize == 0) { + srv_page_size = UNIV_PAGE_SIZE_ORIG; + } else { + srv_page_size = ((UNIV_ZIP_SIZE_MIN >> 1) << ssize); + } -bool encrypted = false; + univ_page_size.copy_from( + page_size_t(srv_page_size, srv_page_size, false)); -/* Get the page size of the filespace from the filespace header. */ + return(page_size_t(flags)); +} + +/** Decompress a page +@param[in,out] buf Page read from disk, uncompressed data will + also be copied to this page +@param[in, out] scratch Page to use for temporary decompress +@param[in] page_size scratch physical size +@return true if decompress succeeded */ static -my_bool -get_page_size( -/*==========*/ - FILE* f, /*!< in: file pointer, must be open - and set to start of file */ - byte* buf, /*!< in: buffer used to read the page */ - ulong* logical_page_size, /*!< out: Logical/Uncompressed page size */ - ulong* physical_page_size) /*!< out: Physical/Commpressed page size */ +bool page_decompress( + byte* buf, + byte* scratch, + page_size_t page_size) { - ulong flags; + dberr_t err=DB_SUCCESS; - int bytes= fread(buf, 1, UNIV_PAGE_SIZE_MIN, f); + /* Set the dblwr recover flag to false. */ + /* JAN: TODO: Decompress + err = os_file_decompress_page( + false, buf, scratch, page_size.physical()); + */ - if (ferror(f)) - { - perror("Error reading file header"); - return FALSE; - } + return(err == DB_SUCCESS); +} - if (bytes != UNIV_PAGE_SIZE_MIN) - { - fprintf(stderr, "Error; Was not able to read the minimum page size "); - fprintf(stderr, "of %d bytes. Bytes read was %d\n", UNIV_PAGE_SIZE_MIN, bytes); - return FALSE; - } +#ifdef _WIN32 +/***********************************************//* + @param [in] error error no. from the getLastError(). - rewind(f); + @retval error message corresponding to error no. +*/ +static +char* +error_message( + int error) +{ + static char err_msg[1024] = {'\0'}; + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, + NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPTSTR)err_msg, sizeof(err_msg), NULL ); - flags = mach_read_from_4(buf + FIL_PAGE_DATA + FSP_SPACE_FLAGS); + return (err_msg); +} +#endif /* _WIN32 */ - /* srv_page_size is used by InnoDB code as UNIV_PAGE_SIZE */ - srv_page_size = *logical_page_size = fsp_flags_get_page_size(flags); +/***********************************************//* + @param>>_______[in] name>_____name of file. + @retval file pointer; file pointer is NULL when error occured. +*/ - /* fsp_flags_get_zip_size() will return zero if not compressed. */ - *physical_page_size = fsp_flags_get_zip_size(flags); - if (*physical_page_size == 0) - { - *physical_page_size= *logical_page_size; - } - else - { - compressed= true; - } +FILE* +open_file( + const char* name) +{ + int fd; /* file descriptor. */ + FILE* fil_in; +#ifdef _WIN32 + HANDLE hFile; /* handle to open file. */ + DWORD access; /* define access control */ + int flags = 0; /* define the mode for file + descriptor */ + + if (do_write) { + access = GENERIC_READ | GENERIC_WRITE; + flags = _O_RDWR | _O_BINARY; + } else { + access = GENERIC_READ; + flags = _O_RDONLY | _O_BINARY; + } + /* CreateFile() also provide advisory lock with the usage of + access and share mode of the file.*/ + hFile = CreateFile( + (LPCTSTR) name, access, 0L, NULL, + OPEN_EXISTING, NULL, NULL); + + if (hFile == INVALID_HANDLE_VALUE) { + /* print the error message. */ + fprintf(stderr, "Filename::%s %s\n", + error_message(GetLastError())); + + return (NULL); + } + + /* get the file descriptor. */ + fd= _open_osfhandle((intptr_t)hFile, flags); +#else /* _WIN32 */ + + int create_flag; + /* define the advisory lock and open file mode. */ + if (do_write) { + create_flag = O_RDWR; + lk.l_type = F_WRLCK; + } + else { + create_flag = O_RDONLY; + lk.l_type = F_RDLCK; + } + + fd = open(name, create_flag); + + lk.l_whence = SEEK_SET; + lk.l_start = lk.l_len = 0; + + if (fcntl(fd, F_SETLK, &lk) == -1) { + fprintf(stderr, "Error: Unable to lock file::" + " %s\n", name); + perror("fcntl"); + return (NULL); + } +#endif /* _WIN32 */ + if (do_write) { + fil_in = fdopen(fd, "rb+"); + } else { + fil_in = fdopen(fd, "rb"); + } - return TRUE; + return (fil_in); } +/************************************************************//* + Read the content of file + + @param [in,out] buf read the file in buffer + @param [in] partial_page_read enable when to read the + remaining buffer for first page. + @param [in] physical_page_size Physical/Commpressed page size. + @param [in,out] fil_in file pointer created for the + tablespace. + @retval no. of bytes read. +*/ +ulong read_file( + byte* buf, + bool partial_page_read, + ulong physical_page_size, + FILE* fil_in) +{ + ulong bytes = 0; -/* command line argument to do page checks (that's it) */ -/* another argument to specify page ranges... seek to right spot and go from there */ + DBUG_ASSERT(physical_page_size >= UNIV_ZIP_SIZE_MIN); -static struct my_option innochecksum_options[] = + if (partial_page_read) { + buf += UNIV_ZIP_SIZE_MIN; + physical_page_size -= UNIV_ZIP_SIZE_MIN; + bytes = UNIV_ZIP_SIZE_MIN; + } + + bytes += ulong(fread(buf, 1, physical_page_size, fil_in)); + + return bytes; +} + +/** Check if page is corrupted or not. +@param[in] buf page frame +@param[in] page_size page size +@retval true if page is corrupted otherwise false. */ +static +bool +is_page_corrupted( + const byte* buf, + const page_size_t& page_size) { + + /* enable if page is corrupted. */ + bool is_corrupted; + /* use to store LSN values. */ + ulint logseq; + ulint logseqfield; + + if (!page_size.is_compressed()) { + /* check the stored log sequence numbers + for uncompressed tablespace. */ + logseq = mach_read_from_4(buf + FIL_PAGE_LSN + 4); + logseqfield = mach_read_from_4( + buf + page_size.logical() - + FIL_PAGE_END_LSN_OLD_CHKSUM + 4); + + if (is_log_enabled) { + fprintf(log_file, + "page::%lu log sequence number:first = %lu;" + " second = %lu\n", + cur_page_num, logseq, logseqfield); + if (logseq != logseqfield) { + fprintf(log_file, + "Fail; page %lu invalid (fails log " + "sequence number check)\n", + cur_page_num); + } + } + } + + is_corrupted = buf_page_is_corrupted( + true, buf, page_size, false, cur_page_num, strict_verify, + is_log_enabled, log_file); + + return(is_corrupted); +} + +/********************************************//* + Check if page is doublewrite buffer or not. + @param [in] page buffer page + + @retval true if page is doublewrite buffer otherwise false. +*/ +static +bool +is_page_doublewritebuffer( + const byte* page) +{ + if ((cur_page_num >= FSP_EXTENT_SIZE) + && (cur_page_num < FSP_EXTENT_SIZE * 3)) { + /* page is doublewrite buffer. */ + return (true); + } + + return (false); +} + +/*******************************************************//* +Check if page is empty or not. + @param [in] page page to checked for empty. + @param [in] len size of page. + + @retval true if page is empty. + @retval false if page is not empty. +*/ +static +bool +is_page_empty( + const byte* page, + size_t len) +{ + while (len--) { + if (*page++) { + return (false); + } + } + return (true); +} + +/********************************************************************//** +Rewrite the checksum for the page. +@param [in/out] page page buffer +@param [in] physical_page_size page size in bytes on disk. +@param [in] iscompressed Is compressed/Uncompressed Page. + +@retval true : do rewrite +@retval false : skip the rewrite as checksum stored match with + calculated or page is doublwrite buffer. +*/ + +bool +update_checksum( + byte* page, + ulong physical_page_size, + bool iscompressed) +{ + ib_uint32_t checksum = 0; + byte stored1[4]; /* get FIL_PAGE_SPACE_OR_CHKSUM field checksum */ + byte stored2[4]; /* get FIL_PAGE_END_LSN_OLD_CHKSUM field checksum */ + + ut_ad(page); + /* If page is doublewrite buffer, skip the rewrite of checksum. */ + if (skip_page) { + return (false); + } + + memcpy(stored1, page + FIL_PAGE_SPACE_OR_CHKSUM, 4); + memcpy(stored2, page + physical_page_size - + FIL_PAGE_END_LSN_OLD_CHKSUM, 4); + + /* Check if page is empty, exclude the checksum field */ + if (is_page_empty(page + 4, physical_page_size - 12) + && is_page_empty(page + physical_page_size - 4, 4)) { + + memset(page + FIL_PAGE_SPACE_OR_CHKSUM, 0, 4); + memset(page + physical_page_size - + FIL_PAGE_END_LSN_OLD_CHKSUM, 0, 4); + + goto func_exit; + } + + if (iscompressed) { + /* page is compressed */ + checksum = page_zip_calc_checksum( + page, physical_page_size, + static_cast(write_check)); + + mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum); + if (is_log_enabled) { + fprintf(log_file, "page::%lu; Updated checksum =" + " %u\n", cur_page_num, checksum); + } + + } else { + /* page is uncompressed. */ + + /* Store the new formula checksum */ + switch ((srv_checksum_algorithm_t) write_check) { + + case SRV_CHECKSUM_ALGORITHM_CRC32: + case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: + checksum = buf_calc_page_crc32(page); + break; + + case SRV_CHECKSUM_ALGORITHM_INNODB: + case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: + checksum = (ib_uint32_t) + buf_calc_page_new_checksum(page); + break; + + case SRV_CHECKSUM_ALGORITHM_NONE: + case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: + checksum = BUF_NO_CHECKSUM_MAGIC; + break; + /* no default so the compiler will emit a warning if new + enum is added and not handled here */ + } + + mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum); + if (is_log_enabled) { + fprintf(log_file, "page::%lu; Updated checksum field1" + " = %u\n", cur_page_num, checksum); + } + + if (write_check == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB + || write_check == SRV_CHECKSUM_ALGORITHM_INNODB) { + checksum = (ib_uint32_t) + buf_calc_page_old_checksum(page); + } + + mach_write_to_4(page + physical_page_size - + FIL_PAGE_END_LSN_OLD_CHKSUM,checksum); + + if (is_log_enabled) { + fprintf(log_file, "page::%lu ; Updated checksum " + "field2 = %u\n", cur_page_num, checksum); + } + + } + + func_exit: + /* The following code is to check the stored checksum with the + calculated checksum. If it matches, then return FALSE to skip + the rewrite of checksum, otherwise return TRUE. */ + if (iscompressed) { + if (!memcmp(stored1, page + FIL_PAGE_SPACE_OR_CHKSUM, 4)) { + return (false); + } + return (true); + } + + if (!memcmp(stored1, page + FIL_PAGE_SPACE_OR_CHKSUM, 4) + && !memcmp(stored2, page + physical_page_size - + FIL_PAGE_END_LSN_OLD_CHKSUM, 4)) { + return (false); + + } + + return (true); +} + +/** + Write the content to the file +@param[in] filename name of the file. +@param[in,out] file file pointer where content + have to be written +@param[in] buf file buffer read +@param[in] compressed Enabled if tablespace is + compressed. +@param[in,out] pos current file position. +@param[in] page_size page size in bytes on disk. + +@retval true if successfully written +@retval false if a non-recoverable error occurred +*/ +static +bool +write_file( + const char* filename, + FILE* file, + byte* buf, + bool compressed, + fpos_t* pos, + ulong page_size) +{ + bool do_update; + + do_update = update_checksum(buf, page_size, compressed); + + if (file != stdin) { + if (do_update) { + /* Set the previous file pointer position + saved in pos to current file position. */ + if (0 != fsetpos(file, pos)) { + perror("fsetpos"); + return(false); + } + } else { + /* Store the current file position in pos */ + if (0 != fgetpos(file, pos)) { + perror("fgetpos"); + return(false); + } + return(true); + } + } + + if (page_size + != fwrite(buf, 1, page_size, file == stdin ? stdout : file)) { + fprintf(stderr, "Failed to write page %lu to %s: %s\n", + cur_page_num, filename, strerror(errno)); + + return(false); + } + if (file != stdin) { + fflush(file); + /* Store the current file position in pos */ + if (0 != fgetpos(file, pos)) { + perror("fgetpos"); + return(false); + } + } + + return(true); +} + +/* +Parse the page and collect/dump the information about page type +@param [in] page buffer page +@param [in] file file for diagnosis. +*/ +void +parse_page( + const byte* page, + FILE* file) +{ + unsigned long long id; + ulint undo_page_type; + char str[20]={'\0'}; + + /* Check whether page is doublewrite buffer. */ + if(skip_page) { + strcpy(str, "Double_write_buffer"); + } else { + strcpy(str, "-"); + } + + switch (mach_read_from_2(page + FIL_PAGE_TYPE)) { + + case FIL_PAGE_INDEX: + page_type.n_fil_page_index++; + id = mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID); + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tIndex page\t\t\t|" + "\tindex id=%llu,", cur_page_num, id); + + fprintf(file, + " page level=%lu, No. of records=%lu," + " garbage=%lu, %s\n", + page_header_get_field(page, PAGE_LEVEL), + page_header_get_field(page, PAGE_N_RECS), + page_header_get_field(page, PAGE_GARBAGE), str); + } + break; + + case FIL_PAGE_UNDO_LOG: + page_type.n_fil_page_undo_log++; + undo_page_type = mach_read_from_2(page + + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE); + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tUndo log page\t\t\t|", + cur_page_num); + } + if (undo_page_type == TRX_UNDO_INSERT) { + page_type.n_undo_insert++; + if (page_type_dump) { + fprintf(file, "\t%s", + "Insert Undo log page"); + } + + } else if (undo_page_type == TRX_UNDO_UPDATE) { + page_type.n_undo_update++; + if (page_type_dump) { + fprintf(file, "\t%s", + "Update undo log page"); + } + } + + undo_page_type = mach_read_from_2(page + TRX_UNDO_SEG_HDR + + TRX_UNDO_STATE); + switch (undo_page_type) { + case TRX_UNDO_ACTIVE: + page_type.n_undo_state_active++; + if (page_type_dump) { + fprintf(file, ", %s", "Undo log of " + "an active transaction"); + } + break; + + case TRX_UNDO_CACHED: + page_type.n_undo_state_cached++; + if (page_type_dump) { + fprintf(file, ", %s", "Page is " + "cached for quick reuse"); + } + break; + + case TRX_UNDO_TO_FREE: + page_type.n_undo_state_to_free++; + if (page_type_dump) { + fprintf(file, ", %s", "Insert undo " + "segment that can be freed"); + } + break; + + case TRX_UNDO_TO_PURGE: + page_type.n_undo_state_to_purge++; + if (page_type_dump) { + fprintf(file, ", %s", "Will be " + "freed in purge when all undo" + "data in it is removed"); + } + break; + + case TRX_UNDO_PREPARED: + page_type.n_undo_state_prepared++; + if (page_type_dump) { + fprintf(file, ", %s", "Undo log of " + "an prepared transaction"); + } + break; + + default: + page_type.n_undo_state_other++; + break; + } + if(page_type_dump) { + fprintf(file, ", %s\n", str); + } + break; + + case FIL_PAGE_INODE: + page_type.n_fil_page_inode++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tInode page\t\t\t|" + "\t%s\n",cur_page_num, str); + } + break; + + case FIL_PAGE_IBUF_FREE_LIST: + page_type.n_fil_page_ibuf_free_list++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tInsert buffer free list" + " page\t|\t%s\n", cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_ALLOCATED: + page_type.n_fil_page_type_allocated++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tFreshly allocated " + "page\t\t|\t%s\n", cur_page_num, str); + } + break; + + case FIL_PAGE_IBUF_BITMAP: + page_type.n_fil_page_ibuf_bitmap++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tInsert Buffer " + "Bitmap\t\t|\t%s\n", cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_SYS: + page_type.n_fil_page_type_sys++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tSystem page\t\t\t|" + "\t%s\n",cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_TRX_SYS: + page_type.n_fil_page_type_trx_sys++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tTransaction system " + "page\t\t|\t%s\n", cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_FSP_HDR: + page_type.n_fil_page_type_fsp_hdr++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tFile Space " + "Header\t\t|\t%s\n", cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_XDES: + page_type.n_fil_page_type_xdes++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tExtent descriptor " + "page\t\t|\t%s\n", cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_BLOB: + page_type.n_fil_page_type_blob++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tBLOB page\t\t\t|\t%s\n", + cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_ZBLOB: + page_type.n_fil_page_type_zblob++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tCompressed BLOB " + "page\t\t|\t%s\n", cur_page_num, str); + } + break; + + case FIL_PAGE_TYPE_ZBLOB2: + page_type.n_fil_page_type_zblob2++; + if (page_type_dump) { + fprintf(file, "#::%lu \t\t|\t\tSubsequent Compressed " + "BLOB page\t|\t%s\n", cur_page_num, str); + } + break; + + default: + page_type.n_fil_page_type_other++; + break; + } +} +/** +@param [in/out] file_name name of the filename + +@retval FILE pointer if successfully created else NULL when error occured. +*/ +FILE* +create_file( + char* file_name) +{ + FILE* file = NULL; + +#ifndef _WIN32 + file = fopen(file_name, "wb"); + if (file == NULL) { + fprintf(stderr, "Failed to create file: %s: %s\n", + file_name, strerror(errno)); + return(NULL); + } +#else + HANDLE hFile; /* handle to open file. */ + int fd = 0; + hFile = CreateFile((LPCTSTR) file_name, + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_DELETE, + NULL, CREATE_NEW, NULL, NULL); + + if (hFile == INVALID_HANDLE_VALUE) { + /* print the error message. */ + fprintf(stderr, "Filename::%s %s\n", + file_name, + error_message(GetLastError())); + + return(NULL); + } + + /* get the file descriptor. */ + fd= _open_osfhandle((intptr_t)hFile, _O_RDWR | _O_BINARY); + file = fdopen(fd, "wb"); +#endif /* _WIN32 */ + + return(file); +} + +/* + Print the page type count of a tablespace. + @param [in] fil_out stream where the output goes. +*/ +void +print_summary( + FILE* fil_out) +{ + fprintf(fil_out, "\n================PAGE TYPE SUMMARY==============\n"); + fprintf(fil_out, "#PAGE_COUNT\tPAGE_TYPE"); + fprintf(fil_out, "\n===============================================\n"); + fprintf(fil_out, "%8d\tIndex page\n", + page_type.n_fil_page_index); + fprintf(fil_out, "%8d\tUndo log page\n", + page_type.n_fil_page_undo_log); + fprintf(fil_out, "%8d\tInode page\n", + page_type.n_fil_page_inode); + fprintf(fil_out, "%8d\tInsert buffer free list page\n", + page_type.n_fil_page_ibuf_free_list); + fprintf(fil_out, "%8d\tFreshly allocated page\n", + page_type.n_fil_page_type_allocated); + fprintf(fil_out, "%8d\tInsert buffer bitmap\n", + page_type.n_fil_page_ibuf_bitmap); + fprintf(fil_out, "%8d\tSystem page\n", + page_type.n_fil_page_type_sys); + fprintf(fil_out, "%8d\tTransaction system page\n", + page_type.n_fil_page_type_trx_sys); + fprintf(fil_out, "%8d\tFile Space Header\n", + page_type.n_fil_page_type_fsp_hdr); + fprintf(fil_out, "%8d\tExtent descriptor page\n", + page_type.n_fil_page_type_xdes); + fprintf(fil_out, "%8d\tBLOB page\n", + page_type.n_fil_page_type_blob); + fprintf(fil_out, "%8d\tCompressed BLOB page\n", + page_type.n_fil_page_type_zblob); + fprintf(fil_out, "%8d\tOther type of page", + page_type.n_fil_page_type_other); + fprintf(fil_out, "\n===============================================\n"); + fprintf(fil_out, "Additional information:\n"); + fprintf(fil_out, "Undo page type: %d insert, %d update, %d other\n", + page_type.n_undo_insert, + page_type.n_undo_update, + page_type.n_undo_other); + fprintf(fil_out, "Undo page state: %d active, %d cached, %d to_free, %d" + " to_purge, %d prepared, %d other\n", + page_type.n_undo_state_active, + page_type.n_undo_state_cached, + page_type.n_undo_state_to_free, + page_type.n_undo_state_to_purge, + page_type.n_undo_state_prepared, + page_type.n_undo_state_other); +} + +/* command line argument for innochecksum tool. */ +static struct my_option innochecksum_options[] = { {"help", '?', "Displays this help and exits.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"info", 'I', "Synonym for --help.", @@ -221,779 +916,599 @@ static struct my_option innochecksum_options[] = 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Verbose (prints progress every 5 seconds).", &verbose, &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"debug", 'd', "Debug mode (prints checksums for each page, implies verbose).", - &debug, &debug, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"skip_corrupt", 'u', "Skip corrupt pages.", - &skip_corrupt, &skip_corrupt, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"count", 'c', "Print the count of pages in the file.", +#ifndef DBUG_OFF + {"debug", '#', "Output debug log. See " REFMAN "dbug-package.html", + &dbug_setting, &dbug_setting, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#endif /* !DBUG_OFF */ + {"count", 'c', "Print the count of pages in the file and exits.", &just_count, &just_count, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"start_page", 's', "Start on this page number (0 based).", - &start_page, &start_page, 0, GET_ULONG, REQUIRED_ARG, - 0, 0, (longlong) 2L*1024L*1024L*1024L, 0, 1, 0}, + &start_page, &start_page, 0, GET_ULL, REQUIRED_ARG, + 0, 0, ULLONG_MAX, 0, 1, 0}, {"end_page", 'e', "End at this page number (0 based).", - &end_page, &end_page, 0, GET_ULONG, REQUIRED_ARG, - 0, 0, (longlong) 2L*1024L*1024L*1024L, 0, 1, 0}, + &end_page, &end_page, 0, GET_ULL, REQUIRED_ARG, + 0, 0, ULLONG_MAX, 0, 1, 0}, {"page", 'p', "Check only this page (0 based).", - &do_page, &do_page, 0, GET_ULONG, REQUIRED_ARG, - 0, 0, (longlong) 2L*1024L*1024L*1024L, 0, 1, 0}, - {"per_page_details", 'i', "Print out per-page detail information.", - &per_page_details, &per_page_details, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0} - , - {"leaf", 'l', "Examine leaf index pages", - &do_leaf, &do_leaf, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"merge", 'm', "leaf page count if merge given number of consecutive pages", - &n_merge, &n_merge, 0, GET_ULONG, REQUIRED_ARG, - 0, 0, (longlong)10L, 0, 1, 0}, + &do_page, &do_page, 0, GET_ULL, REQUIRED_ARG, + 0, 0, ULLONG_MAX, 0, 1, 0}, + {"strict-check", 'C', "Specify the strict checksum algorithm by the user.", + &strict_check, &strict_check, &innochecksum_algorithms_typelib, + GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"no-check", 'n', "Ignore the checksum verification.", + &no_check, &no_check, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"allow-mismatches", 'a', "Maximum checksum mismatch allowed.", + &allow_mismatches, &allow_mismatches, 0, + GET_ULL, REQUIRED_ARG, 0, 0, ULLONG_MAX, 0, 1, 0}, + {"write", 'w', "Rewrite the checksum algorithm by the user.", + &write_check, &write_check, &innochecksum_algorithms_typelib, + GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"page-type-summary", 'S', "Display a count of each page type " + "in a tablespace.", &page_type_summary, &page_type_summary, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"page-type-dump", 'D', "Dump the page type info for each page in a " + "tablespace.", &page_dump_filename, &page_dump_filename, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"log", 'l', "log output.", + &log_filename, &log_filename, 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; +/* Print out the Innodb version and machine information. */ static void print_version(void) { - printf("%s Ver %s, for %s (%s)\n", - my_progname, INNODB_VERSION_STR, - SYSTEM_TYPE, MACHINE_TYPE); +#ifdef DBUG_OFF + printf("%s Ver %s, for %s (%s)\n", + my_progname, INNODB_VERSION_STR, + SYSTEM_TYPE, MACHINE_TYPE); +#else + printf("%s-debug Ver %s, for %s (%s)\n", + my_progname, INNODB_VERSION_STR, + SYSTEM_TYPE, MACHINE_TYPE); +#endif /* DBUG_OFF */ } static void usage(void) { - print_version(); - puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); - printf("InnoDB offline file checksum utility.\n"); - printf("Usage: %s [-c] [-s ] [-e ] [-p ] [-v] [-d] \n", my_progname); - my_print_help(innochecksum_options); - my_print_variables(innochecksum_options); + print_version(); + puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); + printf("InnoDB offline file checksum utility.\n"); + printf("Usage: %s [-c] [-s ] [-e ] " + "[-p ] [-v] [-a ] [-n] " + "[-C ] [-w ] [-S] [-D ] " + "[-l ] \n", my_progname); + printf("See " REFMAN "innochecksum.html for usage hints.\n"); + my_print_help(innochecksum_options); + my_print_variables(innochecksum_options); } extern "C" my_bool innochecksum_get_one_option( -/*========================*/ - int optid, - const struct my_option *opt __attribute__((unused)), - char *argument __attribute__((unused))) + int optid, + const struct my_option *opt __attribute__((unused)), + char *argument __attribute__((unused))) { - switch (optid) { - case 'd': - verbose=1; /* debug implies verbose... */ - break; - case 'e': - use_end_page= 1; - break; - case 'p': - end_page= start_page= do_page; - use_end_page= 1; - do_one_page= 1; - break; - case 'V': - print_version(); - exit(0); - break; - case 'I': - case '?': - usage(); - exit(0); - break; - } - return 0; -} + switch (optid) { +#ifndef DBUG_OFF + case '#': + dbug_setting = argument + ? argument + : IF_WIN("d:O,innochecksum.trace", + "d:o,/tmp/innochecksum.trace"); + DBUG_PUSH(dbug_setting); + break; +#endif /* !DBUG_OFF */ + case 'e': + use_end_page = true; + break; + case 'p': + end_page = start_page = do_page; + use_end_page = true; + do_one_page = true; + break; + case 'V': + print_version(); + exit(EXIT_SUCCESS); + break; + case 'C': + strict_verify = true; + switch ((srv_checksum_algorithm_t) strict_check) { + + case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: + case SRV_CHECKSUM_ALGORITHM_CRC32: + srv_checksum_algorithm = + SRV_CHECKSUM_ALGORITHM_STRICT_CRC32; + break; + + case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: + case SRV_CHECKSUM_ALGORITHM_INNODB: + srv_checksum_algorithm = + SRV_CHECKSUM_ALGORITHM_STRICT_INNODB; + break; + + case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: + case SRV_CHECKSUM_ALGORITHM_NONE: + srv_checksum_algorithm = + SRV_CHECKSUM_ALGORITHM_STRICT_NONE; + break; + default: + return(true); + } + break; + case 'n': + no_check = true; + break; + case 'a': + case 'S': + break; + case 'w': + do_write = true; + break; + case 'D': + page_type_dump = true; + break; + case 'l': + is_log_enabled = true; + break; + case 'I': + case '?': + usage(); + exit(EXIT_SUCCESS); + break; + } -static int get_options( -/*===================*/ - int *argc, - char ***argv) -{ - int ho_error; - - if ((ho_error=handle_options(argc, argv, innochecksum_options, innochecksum_get_one_option))) - exit(ho_error); - - /* The next arg must be the filename */ - if (!*argc) - { - usage(); - return 1; - } - return 0; -} /* get_options */ - -/*********************************************************************//** -Gets the file page type. -@return type; NOTE that if the type has not been written to page, the -return value not defined */ -ulint -fil_page_get_type( -/*==============*/ - uchar* page) /*!< in: file page */ -{ - return(mach_read_from_2(page + FIL_PAGE_TYPE)); + return(false); } -/**************************************************************//** -Gets the index id field of a page. -@return index id */ -ib_uint64_t -btr_page_get_index_id( -/*==================*/ - uchar* page) /*!< in: index page */ +static +bool +get_options( + int *argc, + char ***argv) { - return(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)); -} + if (handle_options(argc, argv, innochecksum_options, + innochecksum_get_one_option)) + exit(true); + + /* The next arg must be the filename */ + if (!*argc) { + usage(); + return (true); + } -/********************************************************//** -Gets the next index page number. -@return next page number */ -ulint -btr_page_get_next( -/*==============*/ - const page_t* page) /*!< in: index page */ -{ - return(mach_read_from_4(page + FIL_PAGE_NEXT)); + return (false); } -/********************************************************//** -Gets the previous index page number. -@return prev page number */ -ulint -btr_page_get_prev( -/*==============*/ - const page_t* page) /*!< in: index page */ +int main( + int argc, + char **argv) { - return(mach_read_from_4(page + FIL_PAGE_PREV)); -} + /* our input file. */ + FILE* fil_in = NULL; + /* our input filename. */ + char* filename; + /* Buffer to store pages read. */ + byte* buf = NULL; + /* bytes read count */ + ulong bytes; + /* Buffer to decompress page.*/ + byte* tbuf = NULL; + /* current time */ + time_t now; + /* last time */ + time_t lastt; + /* stat, to get file size. */ +#ifdef _WIN32 + struct _stat64 st; +#else + struct stat st; +#endif /* _WIN32 */ + + /* size of file (has to be 64 bits) */ + unsigned long long int size = 0; + /* number of pages in file */ + ulint pages; + + off_t offset = 0; + /* count the no. of page corrupted. */ + ulint mismatch_count = 0; + /* Variable to ack the page is corrupted or not. */ + bool is_corrupted = false; + + bool partial_page_read = false; + /* Enabled when read from stdin is done. */ + bool read_from_stdin = false; + FILE* fil_page_type = NULL; + fpos_t pos; + + /* Use to check the space id of given file. If space_id is zero, + then check whether page is doublewrite buffer.*/ + ulint space_id = 0UL; + /* enable when space_id of given file is zero. */ + bool is_system_tablespace = false; + + ut_crc32_init(); + MY_INIT(argv[0]); + DBUG_ENTER("main"); + DBUG_PROCESS(argv[0]); + + if (get_options(&argc,&argv)) { + DBUG_RETURN(1); + } -void -parse_page( -/*=======*/ - uchar* page, /* in: buffer page */ - uchar* xdes) /* in: extend descriptor page */ -{ - ib_uint64_t id; - ulint x; - ulint n_recs; - ulint page_no; - ulint left_page_no; - ulint right_page_no; - ulint data_bytes; - int is_leaf; - int size_range_id; - - switch (fil_page_get_type(page)) { - case FIL_PAGE_INDEX: - n_fil_page_index++; - id = btr_page_get_index_id(page); - n_recs = page_get_n_recs(page); - page_no = page_get_page_no(page); - left_page_no = btr_page_get_prev(page); - right_page_no = btr_page_get_next(page); - data_bytes = page_get_data_size(page); - is_leaf = page_is_leaf(page); - size_range_id = (data_bytes * SIZE_RANGES_FOR_PAGE - + logical_page_size - 1) / - logical_page_size; - if (size_range_id > SIZE_RANGES_FOR_PAGE + 1) { - /* data_bytes is bigger than logical_page_size */ - size_range_id = SIZE_RANGES_FOR_PAGE + 1; - } - if (per_page_details) { - printf("index %lu page %lu leaf %u n_recs %lu data_bytes %lu" - "\n", (ulong) id, (ulong) page_no, is_leaf, n_recs, data_bytes); - } - /* update per-index statistics */ - { - if (index_ids.count(id) == 0) { - index_ids[id] = per_index_stats(); - } - std::map::iterator it; - it = index_ids.find(id); - per_index_stats &index = (it->second); - uchar* des = xdes + XDES_ARR_OFFSET - + XDES_SIZE * ((page_no & (physical_page_size - 1)) - / FSP_EXTENT_SIZE); - if (xdes_get_bit(des, XDES_FREE_BIT, - page_no % FSP_EXTENT_SIZE)) { - index.free_pages++; - return; - } - index.pages++; - if (is_leaf) { - index.leaf_pages++; - if (data_bytes > index.max_data_size) { - index.max_data_size = data_bytes; - } - struct per_page_stats pp(n_recs, data_bytes, - left_page_no, right_page_no); - - index.leaves[page_no] = pp; - - if (left_page_no == ULINT32_UNDEFINED) { - index.first_leaf_page = page_no; - index.count++; - } - } - index.total_n_recs += n_recs; - index.total_data_bytes += data_bytes; - index.pages_in_size_range[size_range_id] ++; - } - - break; - case FIL_PAGE_UNDO_LOG: - if (per_page_details) { - printf("FIL_PAGE_UNDO_LOG\n"); - } - n_fil_page_undo_log++; - x = mach_read_from_2(page + TRX_UNDO_PAGE_HDR + - TRX_UNDO_PAGE_TYPE); - if (x == TRX_UNDO_INSERT) - n_undo_insert++; - else if (x == TRX_UNDO_UPDATE) - n_undo_update++; - else - n_undo_other++; - - x = mach_read_from_2(page + TRX_UNDO_SEG_HDR + TRX_UNDO_STATE); - switch (x) { - case TRX_UNDO_ACTIVE: n_undo_state_active++; break; - case TRX_UNDO_CACHED: n_undo_state_cached++; break; - case TRX_UNDO_TO_FREE: n_undo_state_to_free++; break; - case TRX_UNDO_TO_PURGE: n_undo_state_to_purge++; break; - case TRX_UNDO_PREPARED: n_undo_state_prepared++; break; - default: n_undo_state_other++; break; - } - break; - case FIL_PAGE_INODE: - if (per_page_details) { - printf("FIL_PAGE_INODE\n"); - } - n_fil_page_inode++; - break; - case FIL_PAGE_IBUF_FREE_LIST: - if (per_page_details) { - printf("FIL_PAGE_IBUF_FREE_LIST\n"); - } - n_fil_page_ibuf_free_list++; - break; - case FIL_PAGE_TYPE_ALLOCATED: - if (per_page_details) { - printf("FIL_PAGE_TYPE_ALLOCATED\n"); - } - n_fil_page_type_allocated++; - break; - case FIL_PAGE_IBUF_BITMAP: - if (per_page_details) { - printf("FIL_PAGE_IBUF_BITMAP\n"); - } - n_fil_page_ibuf_bitmap++; - break; - case FIL_PAGE_TYPE_SYS: - if (per_page_details) { - printf("FIL_PAGE_TYPE_SYS\n"); - } - n_fil_page_type_sys++; - break; - case FIL_PAGE_TYPE_TRX_SYS: - if (per_page_details) { - printf("FIL_PAGE_TYPE_TRX_SYS\n"); - } - n_fil_page_type_trx_sys++; - break; - case FIL_PAGE_TYPE_FSP_HDR: - if (per_page_details) { - printf("FIL_PAGE_TYPE_FSP_HDR\n"); - } - memcpy(xdes, page, physical_page_size); - n_fil_page_type_fsp_hdr++; - break; - case FIL_PAGE_TYPE_XDES: - if (per_page_details) { - printf("FIL_PAGE_TYPE_XDES\n"); - } - memcpy(xdes, page, physical_page_size); - n_fil_page_type_xdes++; - break; - case FIL_PAGE_TYPE_BLOB: - if (per_page_details) { - printf("FIL_PAGE_TYPE_BLOB\n"); - } - n_fil_page_type_blob++; - break; - case FIL_PAGE_TYPE_ZBLOB: - case FIL_PAGE_TYPE_ZBLOB2: - if (per_page_details) { - printf("FIL_PAGE_TYPE_ZBLOB/2\n"); - } - n_fil_page_type_zblob++; - break; - case FIL_PAGE_PAGE_COMPRESSED: - if (per_page_details) { - printf("FIL_PAGE_PAGE_COMPRESSED\n"); - } - n_fil_page_type_page_compressed++; - break; - case FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED: - if (per_page_details) { - printf("FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED\n"); - } - n_fil_page_type_page_compressed_encrypted++; - break; - default: - if (per_page_details) { - printf("FIL_PAGE_TYPE_OTHER\n"); - } - n_fil_page_type_other++; - } -} + if (strict_verify && no_check) { + fprintf(stderr, "Error: --strict-check option cannot be used " + "together with --no-check option.\n"); + DBUG_RETURN(1); + } -void print_index_leaf_stats(unsigned long long id, const per_index_stats& index) -{ - ulint page_no = index.first_leaf_page; - std::map::const_iterator it_page = index.leaves.find(page_no); - printf("\nindex: %llu leaf page stats: n_pages = %llu\n", - id, index.leaf_pages); - printf("page_no\tdata_size\tn_recs\n"); - while (it_page != index.leaves.end()) { - const per_page_stats& stat = it_page->second; - printf("%lu\t%lu\t%lu\n", it_page->first, stat.data_size, stat.n_recs); - page_no = stat.right_page_no; - it_page = index.leaves.find(page_no); - } -} + if (no_check && !do_write) { + fprintf(stderr, "Error: --no-check must be associated with " + "--write option.\n"); + DBUG_RETURN(1); + } -void defrag_analysis(unsigned long long id, const per_index_stats& index) -{ - // TODO: make it work for compressed pages too - std::map::const_iterator it = index.leaves.find(index.first_leaf_page); - ulint n_pages = 0; - ulint n_leaf_pages = 0; - while (it != index.leaves.end()) { - ulint data_size_total = 0; - for (ulong i = 0; i < n_merge; i++) { - const per_page_stats& stat = it->second; - n_leaf_pages ++; - data_size_total += stat.data_size; - it = index.leaves.find(stat.right_page_no); - if (it == index.leaves.end()) { - break; - } - } - if (index.max_data_size) { - n_pages += data_size_total / index.max_data_size; - if (data_size_total % index.max_data_size != 0) { - n_pages += 1; - } - } - } - if (index.leaf_pages) - printf("count = %lu free = %lu\n", index.count, index.free_pages); - printf("%llu\t\t%llu\t\t%lu\t\t%lu\t\t%lu\t\t%.2f\t%lu\n", - id, index.leaf_pages, n_leaf_pages, n_merge, n_pages, - 1.0 - (double)n_pages / (double)n_leaf_pages, index.max_data_size); -} + if (page_type_dump) { + fil_page_type = create_file(page_dump_filename); + if (!fil_page_type) { + DBUG_RETURN(1); + } + } -void print_leaf_stats() -{ - printf("\n**************************************************\n"); - printf("index_id\t#leaf_pages\t#actual_leaf_pages\tn_merge\t" - "#leaf_after_merge\tdefrag\n"); - for (std::map::const_iterator it = index_ids.begin(); it != index_ids.end(); it++) { - const per_index_stats& index = it->second; - if (verbose) { - print_index_leaf_stats(it->first, index); - } - if (n_merge) { - defrag_analysis(it->first, index); - } - } -} + if (is_log_enabled) { + log_file = create_file(log_filename); + if (!log_file) { + DBUG_RETURN(1); + } + fprintf(log_file, "InnoDB File Checksum Utility.\n"); + } -void -print_stats() -/*========*/ -{ - unsigned long long i; - - printf("%d\tbad checksum\n", n_bad_checksum); - printf("%d\tFIL_PAGE_INDEX\n", n_fil_page_index); - printf("%d\tFIL_PAGE_UNDO_LOG\n", n_fil_page_undo_log); - printf("%d\tFIL_PAGE_INODE\n", n_fil_page_inode); - printf("%d\tFIL_PAGE_IBUF_FREE_LIST\n", n_fil_page_ibuf_free_list); - printf("%d\tFIL_PAGE_TYPE_ALLOCATED\n", n_fil_page_type_allocated); - printf("%d\tFIL_PAGE_IBUF_BITMAP\n", n_fil_page_ibuf_bitmap); - printf("%d\tFIL_PAGE_TYPE_SYS\n", n_fil_page_type_sys); - printf("%d\tFIL_PAGE_TYPE_TRX_SYS\n", n_fil_page_type_trx_sys); - printf("%d\tFIL_PAGE_TYPE_FSP_HDR\n", n_fil_page_type_fsp_hdr); - printf("%d\tFIL_PAGE_TYPE_XDES\n", n_fil_page_type_xdes); - printf("%d\tFIL_PAGE_TYPE_BLOB\n", n_fil_page_type_blob); - printf("%d\tFIL_PAGE_TYPE_ZBLOB\n", n_fil_page_type_zblob); - printf("%d\tFIL_PAGE_PAGE_COMPRESSED\n", n_fil_page_type_page_compressed); - printf("%d\tFIL_PAGE_PAGE_COMPRESSED_ENCRYPTED\n", n_fil_page_type_page_compressed_encrypted); - printf("%d\tother\n", n_fil_page_type_other); - printf("%d\tmax index_id\n", n_fil_page_max_index_id); - printf("undo type: %d insert, %d update, %d other\n", - n_undo_insert, n_undo_update, n_undo_other); - printf("undo state: %d active, %d cached, %d to_free, %d to_purge," - " %d prepared, %d other\n", n_undo_state_active, - n_undo_state_cached, n_undo_state_to_free, - n_undo_state_to_purge, n_undo_state_prepared, - n_undo_state_other); - - printf("index_id\t#pages\t\t#leaf_pages\t#recs_per_page" - "\t#bytes_per_page\n"); - for (std::map::const_iterator it = index_ids.begin(); it != index_ids.end(); it++) { - const per_index_stats& index = it->second; - printf("%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", - it->first, index.pages, index.leaf_pages, - index.total_n_recs / index.pages, - index.total_data_bytes / index.pages); - } - printf("\n"); - printf("index_id\tpage_data_bytes_histgram(empty,...,oversized)\n"); - for (std::map::const_iterator it = index_ids.begin(); it != index_ids.end(); it++) { - printf("%lld\t", it->first); - const per_index_stats& index = it->second; - for (i = 0; i < SIZE_RANGES_FOR_PAGE+2; i++) { - printf("\t%lld", index.pages_in_size_range[i]); - } - printf("\n"); - } - if (do_leaf) { - print_leaf_stats(); - } -} + if (verbose) { + /* JAN: TODO: MySQL 5.7 + my_print_variables_ex(innochecksum_options, stderr); + */ + my_print_variables(innochecksum_options); + } -int main(int argc, char **argv) -{ - FILE* f; /* our input file */ - char* filename; /* our input filename. */ - unsigned char *big_buf= 0, *buf; - unsigned char *big_xdes= 0, *xdes; - ulong bytes; /* bytes read count */ - ulint ct; /* current page number (0 based) */ - time_t now; /* current time */ - time_t lastt; /* last time */ - ulint oldcsum, oldcsumfield, csum, csumfield, crc32, logseq, logseqfield; - /* ulints for checksum storage */ - struct stat st; /* for stat, if you couldn't guess */ - unsigned long long int size; /* size of file (has to be 64 bits) */ - ulint pages; /* number of pages in file */ - off_t offset= 0; - int fd; - - printf("InnoDB offline file checksum utility.\n"); - - ut_crc32_init(); - - MY_INIT(argv[0]); - - if (get_options(&argc,&argv)) - exit(1); - - if (verbose) - my_print_variables(innochecksum_options); - - /* The file name is not optional */ - filename = *argv; - if (*filename == '\0') - { - fprintf(stderr, "Error; File name missing\n"); - goto error_out; - } - - /* stat the file to get size and page count */ - if (stat(filename, &st)) - { - fprintf(stderr, "Error; %s cannot be found\n", filename); - goto error_out; - } - size= st.st_size; - - /* Open the file for reading */ - f= fopen(filename, "rb"); - if (f == NULL) - { - fprintf(stderr, "Error; %s cannot be opened", filename); - perror(" "); - goto error_out; - } - - big_buf = (unsigned char *)malloc(2 * UNIV_PAGE_SIZE_MAX); - if (big_buf == NULL) - { - fprintf(stderr, "Error; failed to allocate memory\n"); - perror(""); - goto error_f; - } - - /* Make sure the page is aligned */ - buf = (unsigned char*)ut_align_down(big_buf - + UNIV_PAGE_SIZE_MAX, UNIV_PAGE_SIZE_MAX); - - big_xdes = (unsigned char *)malloc(2 * UNIV_PAGE_SIZE_MAX); - if (big_xdes == NULL) - { - fprintf(stderr, "Error; failed to allocate memory\n"); - perror(""); - goto error_big_buf; - } - - /* Make sure the page is aligned */ - xdes = (unsigned char*)ut_align_down(big_xdes - + UNIV_PAGE_SIZE_MAX, UNIV_PAGE_SIZE_MAX); - - - if (!get_page_size(f, buf, &logical_page_size, &physical_page_size)) - goto error; - - if (compressed) - { - printf("Table is compressed\n"); - printf("Key block size is %lu\n", physical_page_size); - } - else - { - printf("Table is uncompressed\n"); - printf("Page size is %lu\n", physical_page_size); - } - - pages= (ulint) (size / physical_page_size); - - if (just_count) - { - if (verbose) - printf("Number of pages: "); - printf("%lu\n", pages); - goto ok; - } - else if (verbose) - { - printf("file %s = %llu bytes (%lu pages)...\n", filename, size, pages); - if (do_one_page) - printf("InnoChecksum; checking page %lu\n", do_page); - else - printf("InnoChecksum; checking pages in range %lu to %lu\n", start_page, use_end_page ? end_page : (pages - 1)); - } - -#ifdef UNIV_LINUX - if (posix_fadvise(fileno(f), 0, 0, POSIX_FADV_SEQUENTIAL) || - posix_fadvise(fileno(f), 0, 0, POSIX_FADV_NOREUSE)) - { - perror("posix_fadvise failed"); - } -#endif - /* seek to the necessary position */ - if (start_page) - { - fd= fileno(f); - if (!fd) - { - perror("Error; Unable to obtain file descriptor number"); - goto error; - } - - offset= (off_t)start_page * (off_t)physical_page_size; - - if (lseek(fd, offset, SEEK_SET) != offset) - { - perror("Error; Unable to seek to necessary offset"); - goto error; - } - } - - /* main checksumming loop */ - ct= start_page; - lastt= 0; - while (!feof(f)) - { - int page_ok = 1; - - bytes= fread(buf, 1, physical_page_size, f); - - if (!bytes && feof(f)) - goto ok; - - if (ferror(f)) - { - fprintf(stderr, "Error reading %lu bytes", physical_page_size); - perror(" "); - goto error; - } - - ulint page_type = mach_read_from_2(buf+FIL_PAGE_TYPE); - ulint key_version = mach_read_from_4(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); - - if (key_version && page_type != FIL_PAGE_PAGE_COMPRESSED) { - encrypted = true; - } else { - encrypted = false; - } - - ulint comp_method = 0; - - if (encrypted) { - comp_method = mach_read_from_2(buf+FIL_PAGE_DATA+FIL_PAGE_COMPRESSED_SIZE); - } else { - comp_method = mach_read_from_8(buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); - } - - ulint comp_size = mach_read_from_2(buf+FIL_PAGE_DATA); - ib_uint32_t encryption_checksum = mach_read_from_4(buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4); - - - if (page_type == FIL_PAGE_PAGE_COMPRESSED) { - /* Page compressed tables do not have any checksum */ - if (debug) - fprintf(stderr, "Page %lu page compressed with method %s real_size %lu\n", ct, - fil_get_compression_alg_name(comp_method), comp_size); - page_ok = 1; - } else if (compressed) { - /* compressed pages */ - ulint crccsum = page_zip_calc_checksum(buf, physical_page_size, SRV_CHECKSUM_ALGORITHM_CRC32); - ulint icsum = page_zip_calc_checksum(buf, physical_page_size, SRV_CHECKSUM_ALGORITHM_INNODB); - - if (debug) { - if (key_version != 0) { - fprintf(stderr, - "Page %lu encrypted key_version %lu calculated = %lu; crc32 = %lu; recorded = %u\n", - ct, key_version, icsum, crccsum, encryption_checksum); - } - } + buf = (byte*) malloc(UNIV_PAGE_SIZE_MAX * 2); + tbuf = buf + UNIV_PAGE_SIZE_MAX; + + /* The file name is not optional. */ + for (int i = 0; i < argc; ++i) { + /* Reset parameters for each file. */ + filename = argv[i]; + memset(&page_type, 0, sizeof(innodb_page_type)); + is_corrupted = false; + partial_page_read = false; + skip_page = false; + + if (is_log_enabled) { + fprintf(log_file, "Filename = %s\n", filename); + } + + if (*filename == '-') { + /* read from stdin. */ + fil_in = stdin; + read_from_stdin = true; + + } + + /* stat the file to get size and page count. */ + if (!read_from_stdin && +#ifdef _WIN32 + _stat64(filename, &st)) { +#else + stat(filename, &st)) { +#endif /* _WIN32 */ + fprintf(stderr, "Error: %s cannot be found\n", + filename); + + DBUG_RETURN(1); + } + + if (!read_from_stdin) { + size = st.st_size; + fil_in = open_file(filename); + /*If fil_in is NULL, terminate as some error encountered */ + if(fil_in == NULL) { + DBUG_RETURN(1); + } + /* Save the current file pointer in pos variable.*/ + if (0 != fgetpos(fil_in, &pos)) { + perror("fgetpos"); + DBUG_RETURN(1); + } + } + + /* Testing for lock mechanism. The innochecksum + acquire lock on given file. So other tools accessing the same + file for processsing must fail. */ +#ifdef _WIN32 + DBUG_EXECUTE_IF("innochecksum_cause_mysqld_crash", + ut_ad(page_dump_filename); + while((_access( page_dump_filename, 0)) == 0) { + sleep(1); + } + DBUG_RETURN(0); ); +#else + DBUG_EXECUTE_IF("innochecksum_cause_mysqld_crash", + ut_ad(page_dump_filename); + struct stat status_buf; + while(stat(page_dump_filename, &status_buf) == 0) { + sleep(1); + } + DBUG_RETURN(0); ); +#endif /* _WIN32 */ + + /* Read the minimum page size. */ + bytes = ulong(fread(buf, 1, UNIV_ZIP_SIZE_MIN, fil_in)); + partial_page_read = true; + + if (bytes != UNIV_ZIP_SIZE_MIN) { + fprintf(stderr, "Error: Was not able to read the " + "minimum page size "); + fprintf(stderr, "of %d bytes. Bytes read was %lu\n", + UNIV_ZIP_SIZE_MIN, bytes); + + free(buf); + DBUG_RETURN(1); + } + + /* enable variable is_system_tablespace when space_id of given + file is zero. Use to skip the checksum verification and rewrite + for doublewrite pages. */ + is_system_tablespace = (!memcmp(&space_id, buf + + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 4)) + ? true : false; + + const page_size_t& page_size = get_page_size(buf); + + pages = (ulint) (size / page_size.physical()); + + if (just_count) { + if (read_from_stdin) { + fprintf(stderr, "Number of pages:%lu\n", pages); + } else { + printf("Number of pages:%lu\n", pages); + } + continue; + } else if (verbose && !read_from_stdin) { + if (is_log_enabled) { + fprintf(log_file, "file %s = %llu bytes " + "(%lu pages)\n", filename, size, pages); + if (do_one_page) { + fprintf(log_file, "Innochecksum: " + "checking page %lu \n", + do_page); + } + } + } else { + if (is_log_enabled) { + fprintf(log_file, "Innochecksum: checking " + "pages in range %lu to %lu\n", + start_page, use_end_page ? + end_page : (pages - 1)); + } + } + + /* seek to the necessary position */ + if (start_page) { + if (!read_from_stdin) { + /* If read is not from stdin, we can use + fseeko() to position the file pointer to + the desired page. */ + partial_page_read = false; + + offset = (off_t) start_page + * (off_t) page_size.physical(); +#ifdef _WIN32 + if (_fseeki64(fil_in, offset, SEEK_SET)) { +#else + if (fseeko(fil_in, offset, SEEK_SET)) { +#endif /* _WIN32 */ + perror("Error: Unable to seek to " + "necessary offset"); + + free(buf); + DBUG_RETURN(1); + } + /* Save the current file pointer in + pos variable. */ + if (0 != fgetpos(fil_in, &pos)) { + perror("fgetpos"); + + free(buf); + DBUG_RETURN(1); + } + } else { + + ulong count = 0; + + while (!feof(fil_in)) { + if (start_page == count) { + break; + } + /* We read a part of page to find the + minimum page size. We cannot reset + the file pointer to the beginning of + the page if we are reading from stdin + (fseeko() on stdin doesn't work). So + read only the remaining part of page, + if partial_page_read is enable. */ + bytes = read_file(buf, + partial_page_read, + static_cast( + page_size.physical()), + fil_in); + + partial_page_read = false; + count++; + + if (!bytes || feof(fil_in)) { + fprintf(stderr, "Error: Unable " + "to seek to necessary " + "offset"); + + free(buf); + DBUG_RETURN(1); + } + } + } + } + + if (page_type_dump) { + fprintf(fil_page_type, + "\n\nFilename::%s\n", filename); + fprintf(fil_page_type, + "========================================" + "======================================\n"); + fprintf(fil_page_type, + "\tPAGE_NO\t\t|\t\tPAGE_TYPE\t\t" + "\t|\tEXTRA INFO\n"); + fprintf(fil_page_type, + "========================================" + "======================================\n"); + } + + /* main checksumming loop */ + cur_page_num = start_page; + lastt = 0; + while (!feof(fil_in)) { + + bytes = read_file(buf, partial_page_read, + static_cast( + page_size.physical()), fil_in); + partial_page_read = false; + + if (!bytes && feof(fil_in)) { + break; + } + + if (ferror(fil_in)) { + fprintf(stderr, "Error reading %lu bytes", + page_size.physical()); + perror(" "); + + free(buf); + DBUG_RETURN(1); + } + + if (bytes != page_size.physical()) { + fprintf(stderr, "Error: bytes read (%lu) " + "doesn't match page size (%lu)\n", + bytes, page_size.physical()); + free(buf); + DBUG_RETURN(1); + } + + if (is_system_tablespace) { + /* enable when page is double write buffer.*/ + skip_page = is_page_doublewritebuffer(buf); + } else { + skip_page = false; + + if (!page_decompress(buf, tbuf, page_size)) { + + fprintf(stderr, + "Page decompress failed"); + + free(buf); + DBUG_RETURN(1); + } + } + + /* If no-check is enabled, skip the + checksum verification.*/ + if (!no_check) { + /* Checksum verification */ + if (!skip_page) { + is_corrupted = is_page_corrupted( + buf, page_size); + + if (is_corrupted) { + fprintf(stderr, "Fail: page " + "%lu invalid\n", + cur_page_num); + + mismatch_count++; + + if(mismatch_count > allow_mismatches) { + fprintf(stderr, + "Exceeded the " + "maximum allowed " + "checksum mismatch " + "count::%lu\n", + allow_mismatches); + + free(buf); + DBUG_RETURN(1); + } + } + } + } + + /* Rewrite checksum */ + if (do_write + && !write_file(filename, fil_in, buf, + page_size.is_compressed(), &pos, + static_cast(page_size.physical()))) { + + free(buf); + DBUG_RETURN(1); + } + + /* end if this was the last page we were supposed to check */ + if (use_end_page && (cur_page_num >= end_page)) { + break; + } + + if (page_type_summary || page_type_dump) { + parse_page(buf, fil_page_type); + } + + /* do counter increase and progress printing */ + cur_page_num++; + if (verbose && !read_from_stdin) { + if ((cur_page_num % 64) == 0) { + now = time(0); + if (!lastt) { + lastt= now; + } + if (now - lastt >= 1 + && is_log_enabled) { + fprintf(log_file, "page %lu " + "okay: %.3f%% done\n", + (cur_page_num - 1), + (float) cur_page_num / pages * 100); + lastt = now; + } + } + } + } + + if (!read_from_stdin) { + /* flcose() will flush the data and release the lock if + any acquired. */ + fclose(fil_in); + } + + /* Enabled for page type summary. */ + if (page_type_summary) { + if (!read_from_stdin) { + fprintf(stdout, "\nFile::%s",filename); + print_summary(stdout); + } else { + print_summary(stderr); + } + } + } - if (encrypted) { - if (encryption_checksum != 0 && crccsum != encryption_checksum && icsum != encryption_checksum) { - if (debug) - fprintf(stderr, "page %lu: compressed: calculated = %lu; crc32 = %lu; recorded = %u\n", - ct, icsum, crccsum, encryption_checksum); - fprintf(stderr, "Fail; page %lu invalid (fails compressed page checksum).\n", ct); - } - } else { - if (!page_zip_verify_checksum(buf, physical_page_size)) { - fprintf(stderr, "Fail; page %lu invalid (fails compressed page checksum).\n", ct); - if (!skip_corrupt) - goto error; - page_ok = 0; - } + if (is_log_enabled) { + fclose(log_file); } - } else { - if (key_version != 0) { - /* Encrypted page */ - if (debug) { - if (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED) { - fprintf(stderr, - "Page %lu page compressed with method %s real_size %lu and encrypted key_version %lu checksum %u\n", - ct, fil_get_compression_alg_name(comp_method), comp_size, key_version, encryption_checksum); - } else { - fprintf(stderr, - "Page %lu encrypted key_version %lu checksum %u\n", - ct, key_version, encryption_checksum); - } - } - } - - /* Page compressed tables do not contain FIL tailer */ - if (page_type != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED && page_type != FIL_PAGE_PAGE_COMPRESSED) { - /* check the "stored log sequence numbers" */ - logseq= mach_read_from_4(buf + FIL_PAGE_LSN + 4); - logseqfield= mach_read_from_4(buf + logical_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM + 4); - if (debug) - printf("page %lu: log sequence number: first = %lu; second = %lu\n", ct, logseq, logseqfield); - if (logseq != logseqfield) - { - fprintf(stderr, "Fail; page %lu invalid (fails log sequence number check)\n", ct); - if (!skip_corrupt) - goto error; - page_ok = 0; - } - /* check old method of checksumming */ - oldcsum= buf_calc_page_old_checksum(buf); - oldcsumfield= mach_read_from_4(buf + logical_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM); - if (debug) - printf("page %lu: old style: calculated = %lu; recorded = %lu\n", ct, oldcsum, oldcsumfield); - if (oldcsumfield != mach_read_from_4(buf + FIL_PAGE_LSN) && oldcsumfield != oldcsum) - { - fprintf(stderr, "Fail; page %lu invalid (fails old style checksum)\n", ct); - if (!skip_corrupt) - goto error; - page_ok = 0; - } - } - - /* now check the new method */ - csum= buf_calc_page_new_checksum(buf); - crc32= buf_calc_page_crc32(buf); - csumfield= mach_read_from_4(buf + FIL_PAGE_SPACE_OR_CHKSUM); - - if (key_version) - csumfield = encryption_checksum; - - if (debug) - printf("page %lu: new style: calculated = %lu; crc32 = %lu; recorded = %lu\n", - ct, csum, crc32, csumfield); - if (csumfield != 0 && crc32 != csumfield && csum != csumfield) - { - fprintf(stderr, "Fail; page %lu invalid (fails innodb and crc32 checksum)\n", ct); - if (!skip_corrupt) - goto error; - page_ok = 0; - } - } - /* end if this was the last page we were supposed to check */ - if (use_end_page && (ct >= end_page)) - goto ok; - - if (per_page_details) - { - printf("page %ld ", ct); - } - - /* do counter increase and progress printing */ - ct++; - - if (!page_ok) - { - if (per_page_details) - { - printf("BAD_CHECKSUM\n"); - } - n_bad_checksum++; - continue; - } - - /* Can't parse compressed or/and encrypted pages */ - if (page_type != FIL_PAGE_PAGE_COMPRESSED && !encrypted) { - parse_page(buf, xdes); - } - - if (verbose) - { - if (ct % 64 == 0) - { - now= time(0); - if (!lastt) lastt= now; - if (now - lastt >= 1) - { - printf("page %lu okay: %.3f%% done\n", (ct - 1), (float) ct / pages * 100); - lastt= now; - } - } - } - } - -ok: - if (!just_count) - print_stats(); - free(big_xdes); - free(big_buf); - fclose(f); - my_end(0); - exit(0); - -error: - free(big_xdes); -error_big_buf: - free(big_buf); -error_f: - fclose(f); -error_out: - my_end(0); - exit(1); + free(buf); + DBUG_RETURN(0); } diff --git a/include/dur_prop.h b/include/dur_prop.h new file mode 100644 index 00000000000..558ce5acc01 --- /dev/null +++ b/include/dur_prop.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#ifndef _my_dur_prop_h +#define _my_dur_prop_h + +enum durability_properties +{ + /* + Preserves the durability properties defined by the engine + */ + HA_REGULAR_DURABILITY= 0, + /* + Ignore the durability properties defined by the engine and + write only in-memory entries. + */ + HA_IGNORE_DURABILITY= 1 +}; + +#endif /* _my_dur_prop_h */ diff --git a/include/my_icp.h b/include/my_icp.h new file mode 100644 index 00000000000..8b77b7cc759 --- /dev/null +++ b/include/my_icp.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ + +#ifndef _my_icp_h +#define _my_icp_h + +#ifdef __cplusplus +extern "C" { +#endif + +/** + Values returned by index_cond_func_xxx functions. +*/ + +typedef enum icp_result { + /** Index tuple doesn't satisfy the pushed index condition (the engine + should discard the tuple and go to the next one) */ + ICP_NO_MATCH, + + /** Index tuple satisfies the pushed index condition (the engine should + fetch and return the record) */ + ICP_MATCH, + + /** Index tuple is out of the range that we're scanning, e.g. if we're + scanning "t.key BETWEEN 10 AND 20" and got a "t.key=21" tuple (the engine + should stop scanning and return HA_ERR_END_OF_FILE right away). */ + ICP_OUT_OF_RANGE + +} ICP_RESULT; + + +#ifdef __cplusplus +} +#endif + +#endif /* _my_icp_h */ diff --git a/include/mysql/psi/psi_base.h b/include/mysql/psi/psi_base.h new file mode 100644 index 00000000000..10593c4dab4 --- /dev/null +++ b/include/mysql/psi/psi_base.h @@ -0,0 +1,147 @@ +/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ + +#ifndef MYSQL_PSI_BASE_H +#define MYSQL_PSI_BASE_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** + @file mysql/psi/psi_base.h + Performance schema instrumentation interface. + + @defgroup Instrumentation_interface Instrumentation Interface + @ingroup Performance_schema + @{ +*/ + +#define PSI_INSTRUMENT_ME 0 + +#define PSI_NOT_INSTRUMENTED 0 + +/** + Global flag. + This flag indicate that an instrumentation point is a global variable, + or a singleton. +*/ +#define PSI_FLAG_GLOBAL (1 << 0) + +/** + Mutable flag. + This flag indicate that an instrumentation point is a general placeholder, + that can mutate into a more specific instrumentation point. +*/ +#define PSI_FLAG_MUTABLE (1 << 1) + +#define PSI_FLAG_THREAD (1 << 2) + +/** + Stage progress flag. + This flag apply to the stage instruments only. + It indicates the instrumentation provides progress data. +*/ +#define PSI_FLAG_STAGE_PROGRESS (1 << 3) + +/** + Shared Exclusive flag. + Indicates that rwlock support the shared exclusive state. +*/ +#define PSI_RWLOCK_FLAG_SX (1 << 4) + +/** + Transferable flag. + This flag indicate that an instrumented object can + be created by a thread and destroyed by another thread. +*/ +#define PSI_FLAG_TRANSFER (1 << 5) + +#ifdef HAVE_PSI_INTERFACE + +/** + @def PSI_VERSION_1 + Performance Schema Interface number for version 1. + This version is supported. +*/ +#define PSI_VERSION_1 1 + +/** + @def PSI_VERSION_2 + Performance Schema Interface number for version 2. + This version is not implemented, it's a placeholder. +*/ +#define PSI_VERSION_2 2 + +/** + @def PSI_CURRENT_VERSION + Performance Schema Interface number for the most recent version. + The most current version is @c PSI_VERSION_1 +*/ +#define PSI_CURRENT_VERSION 1 + +/** + @def USE_PSI_1 + Define USE_PSI_1 to use the interface version 1. +*/ + +/** + @def USE_PSI_2 + Define USE_PSI_2 to use the interface version 2. +*/ + +/** + @def HAVE_PSI_1 + Define HAVE_PSI_1 if the interface version 1 needs to be compiled in. +*/ + +/** + @def HAVE_PSI_2 + Define HAVE_PSI_2 if the interface version 2 needs to be compiled in. +*/ + +#ifndef USE_PSI_2 +#ifndef USE_PSI_1 +#define USE_PSI_1 +#endif +#endif + +#ifdef USE_PSI_1 +#define HAVE_PSI_1 +#endif + +#ifdef USE_PSI_2 +#define HAVE_PSI_2 +#endif + +/* + Allow to override PSI_XXX_CALL at compile time + with more efficient implementations, if available. + If nothing better is available, + make a dynamic call using the PSI_server function pointer. +*/ + +#define PSI_DYNAMIC_CALL(M) PSI_server->M + +#endif /* HAVE_PSI_INTERFACE */ + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* MYSQL_PSI_BASE_H */ + diff --git a/include/mysql/psi/psi_memory.h b/include/mysql/psi/psi_memory.h new file mode 100644 index 00000000000..725b3ed77d0 --- /dev/null +++ b/include/mysql/psi/psi_memory.h @@ -0,0 +1,155 @@ +/* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software Foundation, + 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ + +#ifndef MYSQL_PSI_MEMORY_H +#define MYSQL_PSI_MEMORY_H + +#include "psi_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + @file mysql/psi/psi_memory.h + Performance schema instrumentation interface. + + @defgroup Instrumentation_interface Instrumentation Interface + @ingroup Performance_schema + @{ +*/ + +#ifdef HAVE_PSI_INTERFACE +#ifndef DISABLE_ALL_PSI +#ifndef DISABLE_PSI_MEMORY +#define HAVE_PSI_MEMORY_INTERFACE +#endif /* DISABLE_PSI_MEMORY */ +#endif /* DISABLE_ALL_PSI */ +#endif /* HAVE_PSI_INTERFACE */ + +struct PSI_thread; + +/** + Instrumented memory key. + To instrument memory, a memory key must be obtained using @c register_memory. + Using a zero key always disable the instrumentation. +*/ +typedef unsigned int PSI_memory_key; + +#ifdef HAVE_PSI_1 + +/** + @defgroup Group_PSI_v1 Application Binary Interface, version 1 + @ingroup Instrumentation_interface + @{ +*/ + +/** + Memory instrument information. + @since PSI_VERSION_1 + This structure is used to register instrumented memory. +*/ +struct PSI_memory_info_v1 +{ + /** Pointer to the key assigned to the registered memory. */ + PSI_memory_key *m_key; + /** The name of the memory instrument to register. */ + const char *m_name; + /** + The flags of the socket instrument to register. + @sa PSI_FLAG_GLOBAL + */ + int m_flags; +}; +typedef struct PSI_memory_info_v1 PSI_memory_info_v1; + +/** + Memory registration API. + @param category a category name (typically a plugin name) + @param info an array of memory info to register + @param count the size of the info array +*/ +typedef void (*register_memory_v1_t) + (const char *category, struct PSI_memory_info_v1 *info, int count); + +/** + Instrument memory allocation. + @param key the memory instrument key + @param size the size of memory allocated + @param[out] owner the memory owner + @return the effective memory instrument key +*/ +typedef PSI_memory_key (*memory_alloc_v1_t) + (PSI_memory_key key, size_t size, struct PSI_thread ** owner); + +/** + Instrument memory re allocation. + @param key the memory instrument key + @param old_size the size of memory previously allocated + @param new_size the size of memory re allocated + @param[in, out] owner the memory owner + @return the effective memory instrument key +*/ +typedef PSI_memory_key (*memory_realloc_v1_t) + (PSI_memory_key key, size_t old_size, size_t new_size, struct PSI_thread ** owner); + +/** + Instrument memory claim. + @param key the memory instrument key + @param size the size of memory allocated + @param[in, out] owner the memory owner + @return the effective memory instrument key +*/ +typedef PSI_memory_key (*memory_claim_v1_t) + (PSI_memory_key key, size_t size, struct PSI_thread ** owner); + +/** + Instrument memory free. + @param key the memory instrument key + @param size the size of memory allocated + @param owner the memory owner +*/ +typedef void (*memory_free_v1_t) + (PSI_memory_key key, size_t size, struct PSI_thread * owner); + +/** @} (end of group Group_PSI_v1) */ + +#endif /* HAVE_PSI_1 */ + +#ifdef HAVE_PSI_2 +struct PSI_memory_info_v2 +{ + int placeholder; +}; + +#endif /* HAVE_PSI_2 */ + +#ifdef USE_PSI_1 +typedef struct PSI_memory_info_v1 PSI_memory_info; +#endif + +#ifdef USE_PSI_2 +typedef struct PSI_memory_info_v2 PSI_memory_info; +#endif + +/** @} (end of group Instrumentation_interface) */ + +#ifdef __cplusplus +} +#endif + + +#endif /* MYSQL_PSI_MEMORY_H */ + diff --git a/mysql-test/disabled.def b/mysql-test/disabled.def index e82ddcf92d1..75704c7571e 100644 --- a/mysql-test/disabled.def +++ b/mysql-test/disabled.def @@ -22,3 +22,7 @@ innodb-wl5522-debug-zip : broken upstream innodb_bug12902967 : broken upstream file_contents : MDEV-6526 these files are not installed anymore max_statement_time : cannot possibly work, depends on timing +implicit_commit : MDEV-10549 +lock_sync : MDEV-10548 +innodb_mysql_sync : MDEV-10548 +partition_debug_sync : MDEV-10548 diff --git a/mysql-test/include/expect_crash.inc b/mysql-test/include/expect_crash.inc new file mode 100644 index 00000000000..af8b0908104 --- /dev/null +++ b/mysql-test/include/expect_crash.inc @@ -0,0 +1,5 @@ +--let $_server_id= `SELECT @@server_id` +--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect + +# There should be a debug crash after using this .inc file +--exec echo "wait" > $_expect_file_name diff --git a/mysql-test/include/have_innodb_16k.inc b/mysql-test/include/have_innodb_16k.inc index f0198ca5ee7..0d5f021c606 100644 --- a/mysql-test/include/have_innodb_16k.inc +++ b/mysql-test/include/have_innodb_16k.inc @@ -1,4 +1,6 @@ +--disable_warnings if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value = 16384`) { --skip Test requires InnoDB with 16k Page size. } +--enable_warnings diff --git a/mysql-test/include/have_innodb_32k.inc b/mysql-test/include/have_innodb_32k.inc index 76a8d2d59a3..4f7f5454e87 100644 --- a/mysql-test/include/have_innodb_32k.inc +++ b/mysql-test/include/have_innodb_32k.inc @@ -1,6 +1,4 @@ if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value = 32768`) { - - --skip Test requires InnoDB with 32k Page size. - -} \ No newline at end of file + --skip Test requires InnoDB with 32k page size. +} diff --git a/mysql-test/include/have_innodb_4k.inc b/mysql-test/include/have_innodb_4k.inc new file mode 100644 index 00000000000..f51b8bf66b9 --- /dev/null +++ b/mysql-test/include/have_innodb_4k.inc @@ -0,0 +1,6 @@ +--disable_warnings +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value = 4096`) +{ + --skip Test requires InnoDB with 4k Page size. +} +--enable_warnings diff --git a/mysql-test/include/have_innodb_64k.inc b/mysql-test/include/have_innodb_64k.inc index bcb76c4f54e..6f0fadc25ac 100644 --- a/mysql-test/include/have_innodb_64k.inc +++ b/mysql-test/include/have_innodb_64k.inc @@ -1,4 +1,4 @@ if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value = 65536`) { - --skip Test requires InnoDB with 64k Page size. + --skip Test requires InnoDB with 64k page size. } diff --git a/mysql-test/include/have_innodb_8k.inc b/mysql-test/include/have_innodb_8k.inc new file mode 100644 index 00000000000..125a4e4d6af --- /dev/null +++ b/mysql-test/include/have_innodb_8k.inc @@ -0,0 +1,6 @@ +--disable_warnings +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value = 8192`) +{ + --skip Test requires InnoDB with 8k Page size. +} +--enable_warnings diff --git a/mysql-test/include/have_innodb_max_16k.inc b/mysql-test/include/have_innodb_max_16k.inc new file mode 100644 index 00000000000..f8346666299 --- /dev/null +++ b/mysql-test/include/have_innodb_max_16k.inc @@ -0,0 +1,4 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value <= 16384`) +{ + --skip Test requires InnoDB with page size not greater than 16k. +} diff --git a/mysql-test/include/have_innodb_zip.inc b/mysql-test/include/have_innodb_zip.inc new file mode 100644 index 00000000000..6af83d51304 --- /dev/null +++ b/mysql-test/include/have_innodb_zip.inc @@ -0,0 +1,4 @@ +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value <= 16384`) +{ + --skip Test with InnoDB zip requires page size not greater than 16k. +} diff --git a/mysql-test/include/have_numa.inc b/mysql-test/include/have_numa.inc new file mode 100644 index 00000000000..bd1c7d7a085 --- /dev/null +++ b/mysql-test/include/have_numa.inc @@ -0,0 +1,18 @@ +let $numa_support = `SELECT COUNT(VARIABLE_VALUE) = 1 FROM + INFORMATION_SCHEMA.GLOBAL_VARIABLES + WHERE VARIABLE_NAME='innodb_numa_interleave'`; + +if ( $numa_support == 0 ) +{ + --skip Test requires: NUMA must be enabled +} + +if ( $numa_support == 1 ) +{ + let $numa = `SELECT @@GLOBAL.innodb_numa_interleave = 'OFF'`; + if ( $numa == 1 ) + { + --skip Test requires: NUMA must be enabled + } +} + diff --git a/mysql-test/include/have_xtradb.combinations b/mysql-test/include/have_xtradb.combinations index 0419dc91171..3454f83cb4d 100644 --- a/mysql-test/include/have_xtradb.combinations +++ b/mysql-test/include/have_xtradb.combinations @@ -6,8 +6,8 @@ innodb-cmpmem innodb-trx innodb-sys-indexes -[xtradb] -innodb -innodb-cmpmem -innodb-trx -innodb-sys-indexes +#[xtradb] +#innodb +#innodb-cmpmem +#innodb-trx +#innodb-sys-indexes diff --git a/mysql-test/include/mtr_check.sql b/mysql-test/include/mtr_check.sql index 6b25c75276c..d47e7d322b5 100644 --- a/mysql-test/include/mtr_check.sql +++ b/mysql-test/include/mtr_check.sql @@ -32,6 +32,7 @@ BEGIN AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_USE_NATIVE_AIO' + AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' ORDER BY variable_name; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 0cddff81960..afad786358b 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -4456,7 +4456,14 @@ sub extract_warning_lines ($$) { qr|nnoDB: fix the corruption by dumping, dropping, and reimporting|, qr|InnoDB: the corrupt table. You can use CHECK|, qr|InnoDB: TABLE to scan your table for corruption|, - qr/InnoDB: See also */ + qr/InnoDB: See also */, + qr/InnoDB: Cannot open .*ib_buffer_pool.* for reading: No such file or directory*/, + qr/InnoDB: Upgrading redo log:*/, + qr|InnoDB: Starting to delete and rewrite log files.|, + qr/InnoDB: New log files created, LSN=*/, + qr|InnoDB: Creating foreign key constraint system tables.|, + qr/InnoDB: Table .*mysql.*innodb_table_stats.* not found./, + qr/InnoDB: User stopword table .* does not exist./ ); diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result index 3461038f85e..d8e17419342 100644 --- a/mysql-test/r/alter_table.result +++ b/mysql-test/r/alter_table.result @@ -1860,8 +1860,8 @@ ALTER TABLE tm1 MODIFY COLUMN c INT NULL; affected rows: 2 info: Records: 2 Duplicates: 0 Warnings: 0 ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30); -affected rows: 2 -info: Records: 2 Duplicates: 0 Warnings: 0 +affected rows: 0 +info: Records: 0 Duplicates: 0 Warnings: 0 ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30); affected rows: 2 info: Records: 2 Duplicates: 0 Warnings: 0 diff --git a/mysql-test/r/group_min_max_innodb.result b/mysql-test/r/group_min_max_innodb.result index 77c74fbc041..9d8f8e7a26c 100644 --- a/mysql-test/r/group_min_max_innodb.result +++ b/mysql-test/r/group_min_max_innodb.result @@ -194,7 +194,7 @@ EXPLAIN SELECT c1, i1, max(i2) FROM t2 WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) GROUP BY c1,i1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range k2 k2 5 NULL 59 Using where; Using index +1 SIMPLE t2 range k2 k2 5 NULL 60 Using where; Using index SELECT c1, i1, max(i2) FROM t2 WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ) GROUP BY c1,i1; @@ -205,7 +205,7 @@ EXPLAIN SELECT c1, i1, max(i2) FROM t2 WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) GROUP BY c1,i1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range k2 k2 5 NULL 58 Using where; Using index +1 SIMPLE t2 range k2 k2 5 NULL 60 Using where; Using index SELECT c1, i1, max(i2) FROM t2 WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )) GROUP BY c1,i1; diff --git a/mysql-test/r/innodb_icp.result b/mysql-test/r/innodb_icp.result index bc1c24276ed..8a519d6415d 100644 --- a/mysql-test/r/innodb_icp.result +++ b/mysql-test/r/innodb_icp.result @@ -409,7 +409,7 @@ WHERE (pk BETWEEN 4 AND 5 OR pk < 2) AND c1 < 240 ORDER BY c1 LIMIT 1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range PRIMARY,k1 k1 5 NULL 3 Using where; Using index +1 SIMPLE t1 range PRIMARY,k1 k1 5 NULL 4 Using where; Using index DROP TABLE t1; # # diff --git a/mysql-test/r/mysqlbinlog_row_minimal.result b/mysql-test/r/mysqlbinlog_row_minimal.result index 84c0e668981..2737d61eca4 100644 --- a/mysql-test/r/mysqlbinlog_row_minimal.result +++ b/mysql-test/r/mysqlbinlog_row_minimal.result @@ -54,7 +54,7 @@ CREATE TABLE t2 (pk INT PRIMARY KEY, f1 INT, f2 INT, f3 INT, f4 INT, f5 MEDIUMIN BEGIN /*!*/; # at 809 -# server id 1 end_log_pos 865 CRC32 XXX Table_map: `test`.`t1` mapped to number 30 +# server id 1 end_log_pos 865 CRC32 XXX Table_map: `test`.`t1` mapped to number num # at 865 # server id 1 end_log_pos 934 CRC32 XXX Write_rows: table id 30 flags: STMT_END_F ### INSERT INTO `test`.`t1` @@ -79,7 +79,7 @@ COMMIT BEGIN /*!*/; # at 1049 -# server id 1 end_log_pos 1105 CRC32 XXX Table_map: `test`.`t1` mapped to number 30 +# server id 1 end_log_pos 1105 CRC32 XXX Table_map: `test`.`t1` mapped to number num # at 1105 # server id 1 end_log_pos 1173 CRC32 XXX Write_rows: table id 30 flags: STMT_END_F ### INSERT INTO `test`.`t1` @@ -104,7 +104,7 @@ COMMIT BEGIN /*!*/; # at 1288 -# server id 1 end_log_pos 1344 CRC32 XXX Table_map: `test`.`t1` mapped to number 30 +# server id 1 end_log_pos 1344 CRC32 XXX Table_map: `test`.`t1` mapped to number num # at 1344 # server id 1 end_log_pos 1411 CRC32 XXX Write_rows: table id 30 flags: STMT_END_F ### INSERT INTO `test`.`t1` @@ -129,7 +129,7 @@ COMMIT BEGIN /*!*/; # at 1526 -# server id 1 end_log_pos 1582 CRC32 XXX Table_map: `test`.`t1` mapped to number 30 +# server id 1 end_log_pos 1582 CRC32 XXX Table_map: `test`.`t1` mapped to number num # at 1582 # server id 1 end_log_pos 1652 CRC32 XXX Write_rows: table id 30 flags: STMT_END_F ### INSERT INTO `test`.`t1` @@ -154,7 +154,7 @@ COMMIT BEGIN /*!*/; # at 1767 -# server id 1 end_log_pos 1823 CRC32 XXX Table_map: `test`.`t2` mapped to number 31 +# server id 1 end_log_pos 1823 CRC32 XXX Table_map: `test`.`t2` mapped to number num # at 1823 # server id 1 end_log_pos 1990 CRC32 XXX Write_rows: table id 31 flags: STMT_END_F ### INSERT INTO `test`.`t2` @@ -212,7 +212,7 @@ COMMIT BEGIN /*!*/; # at 2105 -# server id 1 end_log_pos 2161 CRC32 XXX Table_map: `test`.`t2` mapped to number 31 +# server id 1 end_log_pos 2161 CRC32 XXX Table_map: `test`.`t2` mapped to number num # at 2161 # server id 1 end_log_pos 2235 CRC32 XXX Update_rows: table id 31 flags: STMT_END_F ### UPDATE `test`.`t2` @@ -244,7 +244,7 @@ COMMIT BEGIN /*!*/; # at 2350 -# server id 1 end_log_pos 2406 CRC32 XXX Table_map: `test`.`t1` mapped to number 30 +# server id 1 end_log_pos 2406 CRC32 XXX Table_map: `test`.`t1` mapped to number num # at 2406 # server id 1 end_log_pos 2460 CRC32 XXX Delete_rows: table id 30 flags: STMT_END_F ### DELETE FROM `test`.`t1` @@ -270,7 +270,7 @@ COMMIT BEGIN /*!*/; # at 2575 -# server id 1 end_log_pos 2631 CRC32 XXX Table_map: `test`.`t2` mapped to number 31 +# server id 1 end_log_pos 2631 CRC32 XXX Table_map: `test`.`t2` mapped to number num # at 2631 # server id 1 end_log_pos 2685 CRC32 XXX Delete_rows: table id 31 flags: STMT_END_F ### DELETE FROM `test`.`t2` diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result index b0e89b7f02d..818316fb45c 100644 --- a/mysql-test/r/mysqld--help.result +++ b/mysql-test/r/mysqld--help.result @@ -108,9 +108,6 @@ The following options may be given as the first argument: --bulk-insert-buffer-size=# Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread! - --changed-page-bitmaps[=name] - Enable or disable CHANGED_PAGE_BITMAPS plugin. One of: - ON, OFF, FORCE (don't start if the plugin fails to load). --character-set-client-handshake Don't ignore client side character set value sent during handshake. @@ -1153,7 +1150,6 @@ binlog-row-event-max-size 1024 binlog-row-image FULL binlog-stmt-cache-size 32768 bulk-insert-buffer-size 8388608 -changed-page-bitmaps ON character-set-client-handshake TRUE character-set-filesystem binary character-sets-dir MYSQL_CHARSETSDIR/ diff --git a/mysql-test/r/order_by_optimizer_innodb.result b/mysql-test/r/order_by_optimizer_innodb.result index f3167db4b9a..0b62ba997d8 100644 --- a/mysql-test/r/order_by_optimizer_innodb.result +++ b/mysql-test/r/order_by_optimizer_innodb.result @@ -40,11 +40,11 @@ pk1 count(*) # The following should use range(ux_pk1_fd5), two key parts (key_len=5+8=13) EXPLAIN SELECT * FROM t2 USE INDEX(ux_pk1_fd5) WHERE pk1=9 AND fd5 < 500 ORDER BY fd5 DESC LIMIT 10; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range ux_pk1_fd5 ux_pk1_fd5 13 NULL 137 Using where +1 SIMPLE t2 range ux_pk1_fd5 ux_pk1_fd5 13 NULL 138 Using where # This also must use range, not ref. key_len must be 13 EXPLAIN SELECT * FROM t2 WHERE pk1=9 AND fd5 < 500 ORDER BY fd5 DESC LIMIT 10; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range PRIMARY,ux_pk1_fd5 ux_pk1_fd5 13 NULL 137 Using where +1 SIMPLE t2 range PRIMARY,ux_pk1_fd5 ux_pk1_fd5 13 NULL 138 Using where drop table t0,t1, t2; # # MDEV-6814: Server crashes in calculate_key_len on query with ORDER BY diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result index c9568f7b07f..d1ec12efe31 100644 --- a/mysql-test/r/partition_innodb.result +++ b/mysql-test/r/partition_innodb.result @@ -385,33 +385,33 @@ DROP TABLE t1; create table t1 (a int) engine=innodb partition by hash(a) ; show table status like 't1'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 InnoDB 10 Compact 2 8192 16384 0 0 # NULL # NULL NULL latin1_swedish_ci NULL partitioned +t1 InnoDB 10 Dynamic 2 8192 16384 0 0 # NULL # NULL NULL latin1_swedish_ci NULL partitioned drop table t1; create table t1 (a int) engine = innodb partition by key (a); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 InnoDB 10 Compact 2 8192 16384 0 0 # NULL # NULL NULL latin1_swedish_ci NULL partitioned +t1 InnoDB 10 Dynamic 2 8192 16384 0 0 # NULL # NULL NULL latin1_swedish_ci NULL partitioned insert into t1 values (0), (1), (2), (3); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 InnoDB 10 Compact 4 4096 16384 0 0 # NULL # NULL NULL latin1_swedish_ci NULL partitioned +t1 InnoDB 10 Dynamic 4 4096 16384 0 0 # NULL # NULL NULL latin1_swedish_ci NULL partitioned drop table t1; create table t1 (a int auto_increment primary key) engine = innodb partition by key (a); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 InnoDB 10 Compact 2 8192 16384 0 0 # 1 # NULL NULL latin1_swedish_ci NULL partitioned +t1 InnoDB 10 Dynamic 2 8192 16384 0 0 # 1 # NULL NULL latin1_swedish_ci NULL partitioned insert into t1 values (NULL), (NULL), (NULL), (NULL); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 InnoDB 10 Compact 4 4096 16384 0 0 # 5 # NULL NULL latin1_swedish_ci NULL partitioned +t1 InnoDB 10 Dynamic 4 4096 16384 0 0 # 5 # NULL NULL latin1_swedish_ci NULL partitioned insert into t1 values (NULL), (NULL), (NULL), (NULL); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 InnoDB 10 Compact 8 2048 16384 0 0 # 9 # NULL NULL latin1_swedish_ci NULL partitioned +t1 InnoDB 10 Dynamic 8 2048 16384 0 0 # 9 # NULL NULL latin1_swedish_ci NULL partitioned drop table t1; create table t1 (a int) partition by key (a) diff --git a/mysql-test/r/partition_innodb_plugin.result b/mysql-test/r/partition_innodb_plugin.result index d53d2edb581..35b1e3142b4 100644 --- a/mysql-test/r/partition_innodb_plugin.result +++ b/mysql-test/r/partition_innodb_plugin.result @@ -42,6 +42,8 @@ SET @old_innodb_strict_mode = @@global.innodb_strict_mode; SET @@global.innodb_file_format = Barracuda, @@global.innodb_file_per_table = ON, @@global.innodb_strict_mode = ON; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html connect con1,localhost,root,,; CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY, @@ -104,6 +106,8 @@ disconnect con2; connection default; SET @@global.innodb_strict_mode = @old_innodb_strict_mode; SET @@global.innodb_file_format = @old_innodb_file_format; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET @@global.innodb_file_per_table = @old_innodb_file_per_table; SET NAMES utf8; CREATE TABLE `t``\""e` (a INT, PRIMARY KEY (a)) @@ -143,12 +147,12 @@ ERROR 40001: Deadlock found when trying to get lock; try restarting transaction # First table reported in 'SHOW ENGINE InnoDB STATUS' SHOW ENGINE InnoDB STATUS; Type Name Status -InnoDB index `PRIMARY` of table `test`.`t``\""e` /* Partition `p0``\""e`, Subpartition `sp0``\""e` */ +InnoDB index PRIMARY of table `test`.`t``\""e` /* Partition `p0``\""e`, Subpartition `sp0``\""e` */ set @old_sql_mode = @@sql_mode; set sql_mode = 'ANSI_QUOTES'; SHOW ENGINE InnoDB STATUS; Type Name Status -InnoDB index `PRIMARY` of table `test`.`t``\""e` /* Partition `p0``\""e`, Subpartition `sp0``\""e` */ +InnoDB index PRIMARY of table `test`.`t``\""e` /* Partition `p0``\""e`, Subpartition `sp0``\""e` */ set @@sql_mode = @old_sql_mode; connection con1; ROLLBACK; diff --git a/mysql-test/r/range_vs_index_merge_innodb.result b/mysql-test/r/range_vs_index_merge_innodb.result index 8428936d25f..19feae46575 100644 --- a/mysql-test/r/range_vs_index_merge_innodb.result +++ b/mysql-test/r/range_vs_index_merge_innodb.result @@ -57,7 +57,7 @@ WHERE Population < 200000 AND Name LIKE 'P%' AND (Population > 300000 OR Name LIKE 'T%') AND (Population < 100000 OR Name LIKE 'Pa%'); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population,Name Name 35 NULL 235 Using index condition; Using where +1 SIMPLE City range Population,Name Name 35 NULL 236 Using index condition; Using where EXPLAIN SELECT * FROM City WHERE Population > 100000 AND Name LIKE 'Aba%' OR @@ -65,34 +65,34 @@ Country IN ('CAN', 'ARG') AND ID < 3800 OR Country < 'U' AND Name LIKE 'Zhu%' OR ID BETWEEN 3800 AND 3810; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,PRIMARY 35,7,4 NULL 123 Using sort_union(Name,Country,PRIMARY); Using where +1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,PRIMARY 35,7,4 NULL 125 Using sort_union(Name,Country,PRIMARY); Using where EXPLAIN SELECT * FROM City WHERE (Population > 101000 AND Population < 115000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 458 Using index condition +1 SIMPLE City range Population Population 4 NULL 459 Using index condition EXPLAIN SELECT * FROM City WHERE (Population > 101000 AND Population < 102000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 38 Using index condition +1 SIMPLE City range Population Population 4 NULL 39 Using index condition EXPLAIN SELECT * FROM City WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'F')); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge Country,Name Name,Country 35,3 NULL 213 Using sort_union(Name,Country); Using where +1 SIMPLE City index_merge Country,Name Name,Country 35,3 NULL 215 Using sort_union(Name,Country); Using where EXPLAIN SELECT * FROM City WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'F')) AND (Population > 101000 AND Population < 115000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge Population,Country,Name Name,Country 35,3 NULL 213 Using sort_union(Name,Country); Using where +1 SIMPLE City index_merge Population,Country,Name Name,Country 35,3 NULL 215 Using sort_union(Name,Country); Using where EXPLAIN SELECT * FROM City WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'F')) AND (Population > 101000 AND Population < 102000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population,Country,Name Population 4 NULL 38 Using index condition; Using where +1 SIMPLE City range Population,Country,Name Population 4 NULL 39 Using index condition; Using where SELECT * FROM City USE INDEX () WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'F')) AND (Population > 101000 AND Population < 115000); @@ -176,11 +176,11 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE (Name < 'Bb'); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Name Name 35 NULL 373 Using index condition +1 SIMPLE City range Name Name 35 NULL 374 Using index condition EXPLAIN SELECT * FROM City WHERE (Country > 'A' AND Country < 'B'); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Country Country 3 NULL 106 Using index condition +1 SIMPLE City range Country Country 3 NULL 107 Using index condition EXPLAIN SELECT * FROM City WHERE (Name BETWEEN 'P' AND 'Pb'); id select_type table type possible_keys key key_len ref rows Extra @@ -188,15 +188,15 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE (Name BETWEEN 'P' AND 'S'); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Name Name 35 NULL 384 Using index condition +1 SIMPLE City range Name Name 35 NULL 385 Using index condition EXPLAIN SELECT * FROM City WHERE (Population > 101000 AND Population < 110000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 327 Using index condition +1 SIMPLE City range Population Population 4 NULL 328 Using index condition EXPLAIN SELECT * FROM City WHERE (Population > 103000 AND Population < 104000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 36 Using index condition +1 SIMPLE City range Population Population 4 NULL 37 Using index condition EXPLAIN SELECT * FROM City WHERE (Name < 'Ac' AND (Country > 'A' AND Country < 'B')) OR @@ -208,19 +208,19 @@ SELECT * FROM City WHERE (Name < 'Ac' AND (Country > 'A' AND Country < 'B')) OR (Name BETWEEN 'P' AND 'S' AND (Population > 103000 AND Population < 104000)); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge Population,Country,Name Name,Population 35,4 NULL 59 Using sort_union(Name,Population); Using where +1 SIMPLE City index_merge Population,Country,Name Name,Population 35,4 NULL 60 Using sort_union(Name,Population); Using where EXPLAIN SELECT * FROM City WHERE (Name < 'Bb' AND (Country > 'A' AND Country < 'B')) OR (Name BETWEEN 'P' AND 'Pb' AND (Population > 101000 AND Population < 110000)); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge Population,Country,Name Country,Name 3,35 NULL 177 Using sort_union(Country,Name); Using where +1 SIMPLE City index_merge Population,Country,Name Country,Name 3,35 NULL 178 Using sort_union(Country,Name); Using where EXPLAIN SELECT * FROM City WHERE (Name < 'Bb' AND (Country > 'A' AND Country < 'B')) OR (Name BETWEEN 'P' AND 'S' AND (Population > 103000 AND Population < 104000)); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge Population,Country,Name Country,Population 3,4 NULL 142 Using sort_union(Country,Population); Using where +1 SIMPLE City index_merge Population,Country,Name Country,Population 3,4 NULL 144 Using sort_union(Country,Population); Using where SELECT * FROM City USE INDEX () WHERE (Name < 'Ac' AND (Country > 'A' AND Country < 'B')) OR (Name BETWEEN 'P' AND 'Pb' AND (Population > 101000 AND Population < 110000)); @@ -336,15 +336,15 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE (ID < 600) OR (ID BETWEEN 900 AND 1500); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range PRIMARY PRIMARY 4 NULL 1198 Using where +1 SIMPLE City range PRIMARY PRIMARY 4 NULL 1200 Using where EXPLAIN SELECT * FROM City WHERE Country > 'A' AND Country < 'ARG'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Country Country 3 NULL 19 Using index condition +1 SIMPLE City range Country Country 3 NULL 20 Using index condition EXPLAIN SELECT * FROM City WHERE Name LIKE 'H%' OR Name LIKE 'P%' ; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Name Name 35 NULL 394 Using index condition; Using where +1 SIMPLE City range Name Name 35 NULL 395 Using index condition; Using where EXPLAIN SELECT * FROM City WHERE Name LIKE 'Ha%' OR Name LIKE 'Pa%' ; id select_type table type possible_keys key key_len ref rows Extra @@ -362,7 +362,7 @@ WHERE ((ID < 800) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG'))) OR ((ID BETWEEN 900 AND 1500) AND (Name LIKE 'Pa%' OR (Population > 103000 AND Population < 104000))); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,PRIMARY 39,3,4 NULL 680 Using sort_union(Name,Country,PRIMARY); Using where +1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,PRIMARY 39,3,4 NULL 683 Using sort_union(Name,Country,PRIMARY); Using where EXPLAIN SELECT * FROM City WHERE ((ID < 200) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG'))) @@ -577,11 +577,11 @@ ID Name Country Population EXPLAIN SELECT * FROM City WHERE Population > 101000 AND Population < 102000; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 38 Using index condition +1 SIMPLE City range Population Population 4 NULL 39 Using index condition EXPLAIN SELECT * FROM City WHERE Population > 101000 AND Population < 110000; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 327 Using index condition +1 SIMPLE City range Population Population 4 NULL 328 Using index condition EXPLAIN SELECT * FROM City WHERE Country < 'C'; id select_type table type possible_keys key key_len ref rows Extra @@ -593,7 +593,7 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE Name BETWEEN 'P' AND 'S'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Name Name 35 NULL 384 Using index condition +1 SIMPLE City range Name Name 35 NULL 385 Using index condition EXPLAIN SELECT * FROM City WHERE Name BETWEEN 'P' AND 'Pb'; id select_type table type possible_keys key key_len ref rows Extra @@ -601,7 +601,7 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE ID BETWEEN 3400 AND 3800; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range PRIMARY PRIMARY 4 NULL 400 Using where +1 SIMPLE City range PRIMARY PRIMARY 4 NULL 401 Using where EXPLAIN SELECT * FROM City WHERE ID BETWEEN 3790 AND 3800; id select_type table type possible_keys key key_len ref rows Extra @@ -609,7 +609,7 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE Name LIKE 'P%'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Name Name 35 NULL 235 Using index condition +1 SIMPLE City range Name Name 35 NULL 236 Using index condition EXPLAIN SELECT * FROM City WHERE ((Population > 101000 AND Population < 102000) AND @@ -617,7 +617,7 @@ WHERE ((Population > 101000 AND Population < 102000) AND ((ID BETWEEN 3400 AND 3800) AND (Country < 'AGO' OR Name LIKE 'Pa%')); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge PRIMARY,Population,Country,Name Population,PRIMARY 4,4 NULL 438 Using sort_union(Population,PRIMARY); Using where +1 SIMPLE City index_merge PRIMARY,Population,Country,Name Population,PRIMARY 4,4 NULL 440 Using sort_union(Population,PRIMARY); Using where EXPLAIN SELECT * FROM City WHERE ((Population > 101000 AND Population < 110000) AND @@ -684,11 +684,11 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE Name LIKE 'P%'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Name Name 35 NULL 235 Using index condition +1 SIMPLE City range Name Name 35 NULL 236 Using index condition EXPLAIN SELECT * FROM City WHERE (Population > 101000 AND Population < 103000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 80 Using index condition +1 SIMPLE City range Population Population 4 NULL 81 Using index condition EXPLAIN SELECT * FROM City WHERE Country='USA'; id select_type table type possible_keys key key_len ref rows Extra @@ -702,7 +702,7 @@ SELECT * FROM City WHERE ((Population > 101000 AND Population < 103000) OR Name LIKE 'Pas%') AND Country='USA'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge Population,Country,Name,CountryPopulation CountryPopulation,Name 7,35 NULL 17 Using sort_union(CountryPopulation,Name); Using where +1 SIMPLE City index_merge Population,Country,Name,CountryPopulation CountryPopulation,Name 7,35 NULL 18 Using sort_union(CountryPopulation,Name); Using where EXPLAIN SELECT * FROM City WHERE ((Population > 101000 AND Population < 103000) OR Name LIKE 'P%') @@ -777,7 +777,7 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE ID BETWEEN 3500 AND 3800; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range PRIMARY PRIMARY 4 NULL 300 Using where +1 SIMPLE City range PRIMARY PRIMARY 4 NULL 301 Using where EXPLAIN SELECT * FROM City WHERE ID BETWEEN 4000 AND 4300; id select_type table type possible_keys key key_len ref rows Extra @@ -789,11 +789,11 @@ id select_type table type possible_keys key key_len ref rows Extra EXPLAIN SELECT * FROM City WHERE (Population > 101000 AND Population < 102000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 38 Using index condition +1 SIMPLE City range Population Population 4 NULL 39 Using index condition EXPLAIN SELECT * FROM City WHERE (Population > 101000 AND Population < 103000); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City range Population Population 4 NULL 80 Using index condition +1 SIMPLE City range Population Population 4 NULL 81 Using index condition EXPLAIN SELECT * FROM City WHERE Name LIKE 'Pa%'; id select_type table type possible_keys key key_len ref rows Extra @@ -806,7 +806,7 @@ WHERE ((Population > 101000 AND Population < 102000) OR ID BETWEEN 3790 AND 3800) AND Country='USA' AND (Name LIKE 'Pa%' OR ID BETWEEN 4025 AND 4035); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge PRIMARY,Population,Country,Name,CountryPopulation,CountryName CountryPopulation,PRIMARY 7,4 NULL 13 Using sort_union(CountryPopulation,PRIMARY); Using where +1 SIMPLE City index_merge PRIMARY,Population,Country,Name,CountryPopulation,CountryName CountryPopulation,PRIMARY 7,4 NULL 14 Using sort_union(CountryPopulation,PRIMARY); Using where EXPLAIN SELECT * FROM City WHERE ((Population > 101000 AND Population < 103000) OR @@ -869,7 +869,7 @@ WHERE ((Population > 101000 and Population < 102000) OR ID BETWEEN 3790 AND 3800) AND Country='USA' OR (Name LIKE 'Pa%' OR ID BETWEEN 250 AND 260) AND Country='BRA'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE City index_merge PRIMARY,Population,Country,Name,CountryPopulation,CountryName CountryPopulation,CountryName,PRIMARY 7,38,4 NULL 35 Using sort_union(CountryPopulation,CountryName,PRIMARY); Using where +1 SIMPLE City index_merge PRIMARY,Population,Country,Name,CountryPopulation,CountryName CountryPopulation,CountryName,PRIMARY 7,38,4 NULL 36 Using sort_union(CountryPopulation,CountryName,PRIMARY); Using where SELECT * FROM City USE INDEX () WHERE ((Population > 101000 and Population < 102000) OR ID BETWEEN 3790 AND 3800) AND Country='USA' @@ -1445,7 +1445,7 @@ explain select * from t1 where (home_state = 'ia' or work_state='ia') and account_id = 1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index_merge account_id,user_home_state_indx,user_work_state_indx user_home_state_indx,user_work_state_indx 3,3 NULL 10 Using union(user_home_state_indx,user_work_state_indx); Using where +1 SIMPLE t1 index_merge account_id,user_home_state_indx,user_work_state_indx user_home_state_indx,user_work_state_indx 3,3 NULL 12 Using union(user_home_state_indx,user_work_state_indx); Using where drop table t1; CREATE TABLE t1 ( c1 int(11) NOT NULL auto_increment, diff --git a/mysql-test/r/row-checksum-old.result b/mysql-test/r/row-checksum-old.result index ef523463860..5789fc64c68 100644 --- a/mysql-test/r/row-checksum-old.result +++ b/mysql-test/r/row-checksum-old.result @@ -73,7 +73,7 @@ test.t1 4108368782 drop table if exists t1; create table t1 (a int null, v varchar(100)) engine=innodb checksum=0 row_format=fixed; Warnings: -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. insert into t1 values(null, null), (1, "hello"); checksum table t1; Table Checksum diff --git a/mysql-test/r/row-checksum.result b/mysql-test/r/row-checksum.result index fb8a1260a1d..0fbebba073b 100644 --- a/mysql-test/r/row-checksum.result +++ b/mysql-test/r/row-checksum.result @@ -73,7 +73,7 @@ test.t1 3885665021 drop table if exists t1; create table t1 (a int null, v varchar(100)) engine=innodb checksum=0 row_format=fixed; Warnings: -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. insert into t1 values(null, null), (1, "hello"); checksum table t1; Table Checksum diff --git a/mysql-test/r/selectivity_innodb.result b/mysql-test/r/selectivity_innodb.result index daf28073cf1..2c1913f0929 100644 --- a/mysql-test/r/selectivity_innodb.result +++ b/mysql-test/r/selectivity_innodb.result @@ -144,9 +144,9 @@ order by s_suppkey; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY supplier index PRIMARY PRIMARY 4 NULL 10 100.00 1 PRIMARY ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where -3 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 228 100.00 Using where; Using temporary; Using filesort -2 SUBQUERY ALL NULL NULL NULL NULL 228 100.00 -4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 228 100.00 Using where; Using temporary; Using filesort +3 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort +2 SUBQUERY ALL NULL NULL NULL NULL 229 100.00 +4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort Warnings: Note 1003 select `dbt3_s001`.`supplier`.`s_suppkey` AS `s_suppkey`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`revenue0`.`total_revenue` AS `total_revenue` from `dbt3_s001`.`supplier` join `dbt3_s001`.`revenue0` where ((`revenue0`.`supplier_no` = `dbt3_s001`.`supplier`.`s_suppkey`) and (`revenue0`.`total_revenue` = (select max(`revenue0`.`total_revenue`) from `dbt3_s001`.`revenue0`))) order by `dbt3_s001`.`supplier`.`s_suppkey` select s_suppkey, s_name, s_address, s_phone, total_revenue @@ -165,9 +165,9 @@ order by s_suppkey; id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY supplier index PRIMARY PRIMARY 4 NULL 10 100.00 1 PRIMARY ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where -3 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 228 100.00 Using where; Using temporary; Using filesort -2 SUBQUERY ALL NULL NULL NULL NULL 227 100.00 -4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 228 100.00 Using where; Using temporary; Using filesort +3 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort +2 SUBQUERY ALL NULL NULL NULL NULL 228 100.00 +4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 229 100.00 Using where; Using temporary; Using filesort Warnings: Note 1003 select `dbt3_s001`.`supplier`.`s_suppkey` AS `s_suppkey`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`revenue0`.`total_revenue` AS `total_revenue` from `dbt3_s001`.`supplier` join `dbt3_s001`.`revenue0` where ((`revenue0`.`supplier_no` = `dbt3_s001`.`supplier`.`s_suppkey`) and (`revenue0`.`total_revenue` = (select max(`revenue0`.`total_revenue`) from `dbt3_s001`.`revenue0`))) order by `dbt3_s001`.`supplier`.`s_suppkey` select s_suppkey, s_name, s_address, s_phone, total_revenue @@ -544,7 +544,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 100.00 2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 4.17 Using where 2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where -4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.37 Using where +4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where Warnings: Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2 Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2 @@ -600,7 +600,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 100.00 2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 7.03 Using where 2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where -4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.37 Using where +4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where Warnings: Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2 Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2 @@ -656,7 +656,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 100.00 2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 7.81 Using where 2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where -4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.37 Using where +4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where Warnings: Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2 Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2 @@ -712,7 +712,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 PRIMARY eq_ref distinct_key distinct_key 4 func 1 100.00 2 MATERIALIZED part ALL PRIMARY NULL NULL NULL 200 7.81 Using where 2 MATERIALIZED partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey PRIMARY 4 dbt3_s001.part.p_partkey 3 100.00 Using where -4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.37 Using where +4 DEPENDENT SUBQUERY lineitem ref i_l_shipdate,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.partsupp.ps_suppkey 8 14.40 Using where Warnings: Note 1276 Field or reference 'dbt3_s001.partsupp.ps_partkey' of SELECT #4 was resolved in SELECT #2 Note 1276 Field or reference 'dbt3_s001.partsupp.ps_suppkey' of SELECT #4 was resolved in SELECT #2 diff --git a/mysql-test/r/stat_tables_innodb.result b/mysql-test/r/stat_tables_innodb.result index 0e866755532..42443bfcb72 100644 --- a/mysql-test/r/stat_tables_innodb.result +++ b/mysql-test/r/stat_tables_innodb.result @@ -67,7 +67,7 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01' group by n_name order by revenue desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 211 Using where; Using temporary; Using filesort +1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 213 Using where; Using temporary; Using filesort 1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where 1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1 1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.customer.c_nationkey 1 Using index @@ -198,7 +198,7 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01' group by n_name order by revenue desc; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 211 Using where; Using temporary; Using filesort +1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 213 Using where; Using temporary; Using filesort 1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where 1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1 1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.customer.c_nationkey 1 Using index diff --git a/mysql-test/r/type_bit_innodb.result b/mysql-test/r/type_bit_innodb.result index 80fc942e77c..acb3c311cf5 100644 --- a/mysql-test/r/type_bit_innodb.result +++ b/mysql-test/r/type_bit_innodb.result @@ -256,7 +256,7 @@ a+0 b+0 127 403 explain select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range a a 2 NULL 8 Using where; Using index; Using filesort +1 SIMPLE t1 range a a 2 NULL 9 Using where; Using index; Using filesort select a+0, b+0 from t1 where a > 40 and a < 70 order by 2; a+0 b+0 57 135 diff --git a/mysql-test/suite/encryption/disabled.def b/mysql-test/suite/encryption/disabled.def index 8c0d47983fd..ae9359f4a1b 100644 --- a/mysql-test/suite/encryption/disabled.def +++ b/mysql-test/suite/encryption/disabled.def @@ -14,4 +14,5 @@ innodb_scrub : MDEV-8139 innodb_scrub_compressed : MDEV-8139 innodb_scrub_background : MDEV-8139 innodb_encryption-page-compression : Fails with lost connection at line 156 +innochecksum : Waiting for merge diff --git a/mysql-test/suite/encryption/r/debug_key_management.result b/mysql-test/suite/encryption/r/debug_key_management.result index 8793e6ba363..e185740aa25 100644 --- a/mysql-test/suite/encryption/r/debug_key_management.result +++ b/mysql-test/suite/encryption/r/debug_key_management.result @@ -9,13 +9,13 @@ innodb_encryption_threads 4 select space,name,current_key_version from information_schema.innodb_tablespaces_encryption order by space; space name current_key_version 0 NULL 1 -1 mysql/innodb_table_stats 1 -2 mysql/innodb_index_stats 1 +2 mysql/innodb_table_stats 1 +3 mysql/innodb_index_stats 1 set global debug_key_management_version=10; select space,name,current_key_version from information_schema.innodb_tablespaces_encryption order by space; space name current_key_version 0 NULL 10 -1 mysql/innodb_table_stats 10 -2 mysql/innodb_index_stats 10 +2 mysql/innodb_table_stats 10 +3 mysql/innodb_index_stats 10 set global innodb_encrypt_tables=OFF; set global debug_key_management_version=1; diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change.result b/mysql-test/suite/encryption/r/innodb-bad-key-change.result index cf9791887cc..84c8c2f6773 100644 --- a/mysql-test/suite/encryption/r/innodb-bad-key-change.result +++ b/mysql-test/suite/encryption/r/innodb-bad-key-change.result @@ -8,9 +8,12 @@ call mtr.add_suppression(".*InnoDB: Cannot open table test/.* from the internal call mtr.add_suppression("InnoDB: .ibd file is missing for table test/.*"); call mtr.add_suppression("mysqld: File .*"); call mtr.add_suppression("InnoDB: Tablespace id .* is encrypted but encryption service or used key_id .* is not available. Can't continue opening tablespace."); +call mtr.add_suppression("InnoDB: InnoDB: Page may be an index page where index id is .*"); # Start server with keys2.txt SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; CREATE TABLE t1 (c VARCHAR(8)) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=2; INSERT INTO t1 VALUES ('foobar'); @@ -36,10 +39,12 @@ SELECT * FROM t1; ERROR HY000: Got error 192 'Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.' from InnoDB SHOW WARNINGS; Level Code Message -Warning 1812 Tablespace is missing for table 'test/t1' +Warning 192 Table test/t1 in tablespace 8 is encrypted but encryption service or used key_id is not available. Can't continue reading table. Warning 192 Table test/t1 is encrypted but encryption service or used key_id 2 is not available. Can't continue reading table. Error 1296 Got error 192 'Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.' from InnoDB DROP TABLE t1; +Warnings: +Warning 192 Table in tablespace 8 encrypted.However key management plugin or used key_id 1 is not found or used encryption algorithm or method does not match. Can't continue opening the table. # Start server with keys.txt CREATE TABLE t2 (c VARCHAR(8), id int not null primary key, b int, key(b)) ENGINE=InnoDB ENCRYPTED=YES; INSERT INTO t2 VALUES ('foobar',1,2); diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change3.result b/mysql-test/suite/encryption/r/innodb-bad-key-change3.result index 68d8552a0a3..3449e63acb9 100644 --- a/mysql-test/suite/encryption/r/innodb-bad-key-change3.result +++ b/mysql-test/suite/encryption/r/innodb-bad-key-change3.result @@ -1,5 +1,8 @@ -call mtr.add_suppression("InnoDB: Table .* tablespace is set as discarded"); +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; set global innodb_compression_algorithm = 1; CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(255)) ENGINE=InnoDB PAGE_COMPRESSED=1 ENCRYPTED=YES ENCRYPTION_KEY_ID=4; @@ -24,6 +27,8 @@ UNLOCK TABLES; NOT FOUND /foobar/ in t1.ibd ALTER TABLE t1 DISCARD TABLESPACE; SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; # List after t1 DISCARD t1.frm @@ -42,3 +47,5 @@ ERROR HY000: Tablespace has been discarded for table 't1' # t1 yes on expecting NOT FOUND NOT FOUND /foobar/ in t1.ibd DROP TABLE t1; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change4.result b/mysql-test/suite/encryption/r/innodb-bad-key-change4.result index 531ba4063a4..3ced393f38b 100644 --- a/mysql-test/suite/encryption/r/innodb-bad-key-change4.result +++ b/mysql-test/suite/encryption/r/innodb-bad-key-change4.result @@ -5,16 +5,22 @@ call mtr.add_suppression(".*InnoDB: Cannot open table test/.* from the internal call mtr.add_suppression("InnoDB: .ibd file is missing for table test/.*"); call mtr.add_suppression("Couldn't load plugins from 'file_key_management*"); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB ENCRYPTION_KEY_ID=4; INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; CHECK TABLE t1; Table Op Msg_type Msg_text -test.t1 check Warning Table test/t1 in tablespace 4 is encrypted but encryption service or used key_id is not available. Can't continue reading table. +test.t1 check Warning Table test/t1 in tablespace 7 is encrypted but encryption service or used key_id is not available. Can't continue reading table. test.t1 check Warning Table test/t1 is encrypted but encryption service or used key_id is not available. Can't continue checking table. test.t1 check error Corrupt SHOW WARNINGS; Level Code Message DROP TABLE t1; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change5.result b/mysql-test/suite/encryption/r/innodb-bad-key-change5.result index 11130a7a20b..9d8b1ddd23b 100644 --- a/mysql-test/suite/encryption/r/innodb-bad-key-change5.result +++ b/mysql-test/suite/encryption/r/innodb-bad-key-change5.result @@ -5,10 +5,14 @@ call mtr.add_suppression(".*InnoDB: Cannot open table test/.* from the internal call mtr.add_suppression("InnoDB: .ibd file is missing for table test/.*"); call mtr.add_suppression("Couldn't load plugins from 'file_key_management*"); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB ENCRYPTION_KEY_ID=4; INSERT INTO t1 VALUES (1,'foo'),(2,'bar'); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; OPTIMIZE TABLE t1; Table Op Msg_type Msg_text @@ -26,3 +30,5 @@ Level Code Message Warning 192 Table test/t1 is encrypted but encryption service or used key_id is not available. Can't continue reading table. Error 1296 Got error 192 'Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.' from InnoDB DROP TABLE t1; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-discard-import.result b/mysql-test/suite/encryption/r/innodb-discard-import.result index 195b82f7488..dc55c0a77a6 100644 --- a/mysql-test/suite/encryption/r/innodb-discard-import.result +++ b/mysql-test/suite/encryption/r/innodb-discard-import.result @@ -1,5 +1,8 @@ -call mtr.add_suppression("InnoDB: Table .* tablespace is set as discarded"); +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; SET GLOBAL innodb_compression_algorithm = 1; create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes encryption_key_id=4; @@ -72,6 +75,8 @@ ALTER TABLE t2 DISCARD TABLESPACE; ALTER TABLE t3 DISCARD TABLESPACE; ALTER TABLE t4 DISCARD TABLESPACE; SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; SET GLOBAL innodb_compression_algorithm = 1; # List after t1 DISCARD @@ -129,6 +134,8 @@ COUNT(*) 2000 flush data to disk SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; SET GLOBAL innodb_compression_algorithm = 1; # tables should be still either encrypted and/or compressed @@ -142,3 +149,5 @@ NOT FOUND /tmpres/ in t3.ibd NOT FOUND /mysql/ in t4.ibd DROP PROCEDURE innodb_insert_proc; DROP TABLE t1,t2,t3,t4; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-encryption-alter.result b/mysql-test/suite/encryption/r/innodb-encryption-alter.result index 5869c5d7000..2003df0f4f4 100644 --- a/mysql-test/suite/encryption/r/innodb-encryption-alter.result +++ b/mysql-test/suite/encryption/r/innodb-encryption-alter.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; SET GLOBAL innodb_encrypt_tables = ON; SET GLOBAL innodb_encryption_threads = 4; @@ -51,3 +53,5 @@ Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB set innodb_default_encryption_key_id = 1; drop table t1,t2; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-encryption-disable.result b/mysql-test/suite/encryption/r/innodb-encryption-disable.result index 63ff1dcda71..62b233c1c93 100644 --- a/mysql-test/suite/encryption/r/innodb-encryption-disable.result +++ b/mysql-test/suite/encryption/r/innodb-encryption-disable.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; call mtr.add_suppression("InnoDB: Block in space_id .* in file test/.* encrypted"); call mtr.add_suppression("InnoDB: However key management plugin or used key_id 1 is not found or used encryption algorithm or method does not match."); @@ -23,10 +25,24 @@ CREATE TABLE `t1` ( `charcol3` varchar(128) DEFAULT NULL ) ENGINE=InnoDB; insert into t1 values (1,2,'maria','db','encryption'); +select * from t1; +intcol1 intcol2 charcol1 charcol2 charcol3 +1 2 maria db encryption +select * from t5; +intcol1 intcol2 charcol1 charcol2 charcol3 +1 2 maria db encryption alter table t1 encrypted='yes' `encryption_key_id`=1; select * from t1; +intcol1 intcol2 charcol1 charcol2 charcol3 +1 2 maria db encryption +select * from t5; +intcol1 intcol2 charcol1 charcol2 charcol3 +1 2 maria db encryption +select * from t1; ERROR HY000: Got error 192 'Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.' from InnoDB select * from t5; ERROR HY000: Got error 192 'Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.' from InnoDB drop table t1; drop table t5; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-log-encrypt.result b/mysql-test/suite/encryption/r/innodb-log-encrypt.result index 655e3023f7a..fb62292e1f3 100644 --- a/mysql-test/suite/encryption/r/innodb-log-encrypt.result +++ b/mysql-test/suite/encryption/r/innodb-log-encrypt.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; create table t1(c1 bigint not null, b char(200), c varchar(200)) engine=innodb encrypted=yes encryption_key_id=1; show warnings; @@ -53,3 +55,5 @@ FOUND /publicmessage/ in ib_logfile0 NOT FOUND /publicmessage/ in ib_logfile1 drop procedure innodb_insert_proc; drop table t1; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-page_encryption.result b/mysql-test/suite/encryption/r/innodb-page_encryption.result index c4814983af4..051fd602db5 100644 --- a/mysql-test/suite/encryption/r/innodb-page_encryption.result +++ b/mysql-test/suite/encryption/r/innodb-page_encryption.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb; show warnings; @@ -121,6 +123,8 @@ SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_ variable_value >= 0 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; update innodb_normal set c1 = c1 +1; update innodb_compact set c1 = c1 + 1; @@ -198,6 +202,8 @@ innodb_redundant CREATE TABLE `innodb_redundant` ( `b` char(200) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; show create table innodb_compact; Table Create Table @@ -275,3 +281,5 @@ drop table innodb_compressed; drop table innodb_dynamic; drop table innodb_redundant; drop table innodb_defkey; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result b/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result index f7ffc77fd66..6efefb23b87 100644 --- a/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result +++ b/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; set global innodb_compression_algorithm = 1; create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb page_compressed=1; @@ -75,6 +77,8 @@ variable_value >= 0 SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_decompressed'; variable_value >= 0 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; set global innodb_compression_algorithm = 1; update innodb_normal set c1 = c1 + 1; @@ -129,6 +133,8 @@ innodb_dynamic CREATE TABLE `innodb_dynamic` ( `b` char(200) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; show create table innodb_normal; Table Create Table @@ -182,3 +188,5 @@ drop procedure innodb_insert_proc; drop table innodb_normal; drop table innodb_compact; drop table innodb_dynamic; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result b/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result index 92130da19e9..672202de774 100644 --- a/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result +++ b/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result @@ -2,6 +2,8 @@ call mtr.add_suppression("KeyID 0 not found or with error. Check the key and the call mtr.add_suppression("Disabling redo log encryp*"); call mtr.add_suppression("InnoDB: Redo log crypto: Can't initialize to key version*"); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb; show warnings; @@ -100,6 +102,8 @@ SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_ variable_value >= 0 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; update innodb_normal set c1 = c1 +1; update innodb_compact set c1 = c1 + 1; @@ -169,6 +173,8 @@ innodb_redundant CREATE TABLE `innodb_redundant` ( `b` char(200) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; show create table innodb_compact; Table Create Table @@ -247,3 +253,5 @@ pk 1 2 DROP TABLE t1; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result b/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result index f8c59b7bcc3..5bb3b2bc41e 100644 --- a/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result +++ b/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result @@ -1,5 +1,8 @@ -call mtr.add_suppression("InnoDB: Table .* tablespace is set as discarded."); +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB encrypted=yes; CREATE TABLE t2 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB; @@ -50,6 +53,8 @@ t2.ibd t3.frm t3.ibd SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; ALTER TABLE t1 DISCARD TABLESPACE; ALTER TABLE t2 DISCARD TABLESPACE; @@ -61,6 +66,8 @@ t3.frm # Restarting server # Done restarting server SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; # Tablespaces should be still encrypted # t1 yes on expecting NOT FOUND @@ -147,3 +154,5 @@ NOT FOUND /temp/ in t2.ibd NOT FOUND /barfoo/ in t3.ibd DROP PROCEDURE innodb_insert_proc; DROP TABLE t1, t2, t3; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb_encryption_filekeys.result b/mysql-test/suite/encryption/r/innodb_encryption_filekeys.result index ab958004eab..576b44fe897 100644 --- a/mysql-test/suite/encryption/r/innodb_encryption_filekeys.result +++ b/mysql-test/suite/encryption/r/innodb_encryption_filekeys.result @@ -1,5 +1,7 @@ call mtr.add_suppression("trying to do an operation on a dropped tablespace .*"); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; SET GLOBAL innodb_encrypt_tables = OFF; SET GLOBAL innodb_encryption_threads = 4; @@ -63,5 +65,7 @@ COUNT(1) SELECT COUNT(1) FROM t5; COUNT(1) 400 +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html drop table t1,t2,t3,t4, t5; set GLOBAL innodb_default_encryption_key_id=1; diff --git a/mysql-test/suite/encryption/r/innodb_encryption_is.result b/mysql-test/suite/encryption/r/innodb_encryption_is.result index 5bbcbbe6bb6..591c5a84ccc 100644 --- a/mysql-test/suite/encryption/r/innodb_encryption_is.result +++ b/mysql-test/suite/encryption/r/innodb_encryption_is.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; CREATE TABLE t1 (c VARCHAR(8)) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=1; CREATE TABLE t2 (c VARCHAR(8)) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=2; @@ -12,3 +14,5 @@ NAME ENCRYPTION_SCHEME MIN_KEY_VERSION CURRENT_KEY_VERSION CURRENT_KEY_ID test/t1 1 1 1 1 test/t2 1 1 1 2 DROP TABLE t1, t2; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result b/mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result new file mode 100644 index 00000000000..e49e38a8f3f --- /dev/null +++ b/mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result @@ -0,0 +1,159 @@ +SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET GLOBAL innodb_file_per_table = ON; +create table innodb_compressed1(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed encrypted=yes; +create table innodb_compressed2(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed key_block_size=1 encrypted=yes; +create table innodb_compressed3(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed key_block_size=2 encrypted=yes; +create table innodb_compressed4(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed key_block_size=4 encrypted=yes; +insert into innodb_compressed1 values (1, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (2, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (3, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (4, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (5, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (6, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (7, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (8, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (9, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (10, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed2 select * from innodb_compressed1; +insert into innodb_compressed3 select * from innodb_compressed1; +insert into innodb_compressed4 select * from innodb_compressed1; +# t1 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed1.ibd +# t2 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed2.ibd +# t3 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed3.ibd +# t4 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed4.ibd +SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET GLOBAL innodb_file_per_table = ON; +select * from innodb_compressed1 where d = 20; +c1 d a b +1 20 private evenmoreprivate +2 20 private evenmoreprivate +8 20 private evenmoreprivate +9 20 private evenmoreprivate +10 20 private evenmoreprivate +select * from innodb_compressed1 where d = 30; +c1 d a b +3 30 private evenmoreprivate +4 30 private evenmoreprivate +5 30 private evenmoreprivate +6 30 private evenmoreprivate +7 30 private evenmoreprivate +select * from innodb_compressed2 where d = 20; +c1 d a b +1 20 private evenmoreprivate +2 20 private evenmoreprivate +8 20 private evenmoreprivate +9 20 private evenmoreprivate +10 20 private evenmoreprivate +select * from innodb_compressed2 where d = 30; +c1 d a b +3 30 private evenmoreprivate +4 30 private evenmoreprivate +5 30 private evenmoreprivate +6 30 private evenmoreprivate +7 30 private evenmoreprivate +select * from innodb_compressed3 where d = 20; +c1 d a b +1 20 private evenmoreprivate +2 20 private evenmoreprivate +8 20 private evenmoreprivate +9 20 private evenmoreprivate +10 20 private evenmoreprivate +select * from innodb_compressed3 where d = 30; +c1 d a b +3 30 private evenmoreprivate +4 30 private evenmoreprivate +5 30 private evenmoreprivate +6 30 private evenmoreprivate +7 30 private evenmoreprivate +select * from innodb_compressed4 where d = 20; +c1 d a b +1 20 private evenmoreprivate +2 20 private evenmoreprivate +8 20 private evenmoreprivate +9 20 private evenmoreprivate +10 20 private evenmoreprivate +select * from innodb_compressed4 where d = 30; +c1 d a b +3 30 private evenmoreprivate +4 30 private evenmoreprivate +5 30 private evenmoreprivate +6 30 private evenmoreprivate +7 30 private evenmoreprivate +update innodb_compressed1 set d = d + 10 where d = 30; +update innodb_compressed2 set d = d + 10 where d = 30; +update innodb_compressed3 set d = d + 10 where d = 30; +update innodb_compressed4 set d = d + 10 where d = 30; +insert into innodb_compressed1 values (20, 60, 'newprivate', 'newevenmoreprivate'); +insert into innodb_compressed2 values (20, 60, 'newprivate', 'newevenmoreprivate'); +insert into innodb_compressed3 values (20, 60, 'newprivate', 'newevenmoreprivate'); +insert into innodb_compressed4 values (20, 60, 'newprivate', 'newevenmoreprivate'); +# t1 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed1.ibd +# t2 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed2.ibd +# t3 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed3.ibd +# t4 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed4.ibd +select * from innodb_compressed1 where d = 40; +c1 d a b +3 40 private evenmoreprivate +4 40 private evenmoreprivate +5 40 private evenmoreprivate +6 40 private evenmoreprivate +7 40 private evenmoreprivate +select * from innodb_compressed1 where d = 60; +c1 d a b +20 60 newprivate newevenmoreprivate +select * from innodb_compressed2 where d = 40; +c1 d a b +3 40 private evenmoreprivate +4 40 private evenmoreprivate +5 40 private evenmoreprivate +6 40 private evenmoreprivate +7 40 private evenmoreprivate +select * from innodb_compressed2 where d = 60; +c1 d a b +20 60 newprivate newevenmoreprivate +select * from innodb_compressed3 where d = 40; +c1 d a b +3 40 private evenmoreprivate +4 40 private evenmoreprivate +5 40 private evenmoreprivate +6 40 private evenmoreprivate +7 40 private evenmoreprivate +select * from innodb_compressed3 where d = 60; +c1 d a b +20 60 newprivate newevenmoreprivate +select * from innodb_compressed4 where d = 40; +c1 d a b +3 40 private evenmoreprivate +4 40 private evenmoreprivate +5 40 private evenmoreprivate +6 40 private evenmoreprivate +7 40 private evenmoreprivate +select * from innodb_compressed4 where d = 60; +c1 d a b +20 60 newprivate newevenmoreprivate +# t1 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed1.ibd +# t2 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed2.ibd +# t3 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed3.ibd +# t4 yes on expecting NOT FOUND +NOT FOUND /private/ in innodb_compressed4.ibd +drop table innodb_compressed1; +drop table innodb_compressed2; +drop table innodb_compressed3; +drop table innodb_compressed4; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb_encryption_tables.result b/mysql-test/suite/encryption/r/innodb_encryption_tables.result index 640e2be87a2..da62c0a2f0e 100644 --- a/mysql-test/suite/encryption/r/innodb_encryption_tables.result +++ b/mysql-test/suite/encryption/r/innodb_encryption_tables.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb; create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact; @@ -104,6 +106,8 @@ SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_ variable_value >= 0 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; update innodb_normal set c1 = c1 + 1; update innodb_compact set c1 = c1 + 1; @@ -159,3 +163,5 @@ drop table innodb_compact; drop table innodb_dynamic; drop table innodb_compressed; drop table innodb_redundant; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result index f5dba1548df..69ab0d0bca4 100644 --- a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result +++ b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB encrypted=yes; CREATE TABLE t2 (id INT NOT NULL PRIMARY KEY, a VARCHAR(255)) ENGINE=InnoDB; @@ -174,3 +176,5 @@ NOT FOUND /mangled/ in t6.ibd NOT FOUND /mysql/ in t7.ibd DROP PROCEDURE innodb_insert_proc; DROP TABLE t1, t2, t3, t4, t5, t6, t7; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result b/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result index 43fb7368654..22038c0e933 100644 --- a/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result +++ b/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result @@ -1,5 +1,7 @@ # Restart mysqld --loose-file-key-management-filename=keys2.txt SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb; show warnings; @@ -105,6 +107,8 @@ SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_ variable_value >= 0 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; alter table innodb_compact engine=innodb encryption_key_id = 2; alter table innodb_compressed engine=innodb encryption_key_id = 3; @@ -151,3 +155,5 @@ drop table innodb_compact; drop table innodb_compressed; drop table innodb_dynamic; drop table innodb_redundant; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/encryption/t/innodb-bad-key-change.test b/mysql-test/suite/encryption/t/innodb-bad-key-change.test index 9180fb12085..9fd6ac3a011 100644 --- a/mysql-test/suite/encryption/t/innodb-bad-key-change.test +++ b/mysql-test/suite/encryption/t/innodb-bad-key-change.test @@ -22,6 +22,8 @@ call mtr.add_suppression(".*InnoDB: Cannot open table test/.* from the internal call mtr.add_suppression("InnoDB: .ibd file is missing for table test/.*"); call mtr.add_suppression("mysqld: File .*"); call mtr.add_suppression("InnoDB: Tablespace id .* is encrypted but encryption service or used key_id .* is not available. Can't continue opening tablespace."); +call mtr.add_suppression("InnoDB: InnoDB: Page may be an index page where index id is .*"); + --echo --echo # Start server with keys2.txt -- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt diff --git a/mysql-test/suite/encryption/t/innodb-bad-key-change3.test b/mysql-test/suite/encryption/t/innodb-bad-key-change3.test index 20d63b10649..d0480a6b424 100644 --- a/mysql-test/suite/encryption/t/innodb-bad-key-change3.test +++ b/mysql-test/suite/encryption/t/innodb-bad-key-change3.test @@ -8,7 +8,9 @@ # # MDEV-8772: Assertion failure in file ha_innodb.cc line 20027 when importing page compressed and encrypted tablespace using incorrect keys # -call mtr.add_suppression("InnoDB: Table .* tablespace is set as discarded"); + +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); --disable_query_log let $innodb_file_format_orig = `SELECT @@innodb_file_format`; diff --git a/mysql-test/suite/encryption/t/innodb-discard-import.test b/mysql-test/suite/encryption/t/innodb-discard-import.test index 6d9f6c5dbb3..3bcb8d39862 100644 --- a/mysql-test/suite/encryption/t/innodb-discard-import.test +++ b/mysql-test/suite/encryption/t/innodb-discard-import.test @@ -10,7 +10,8 @@ # MDEV-8770: Incorrect error message when importing page compressed tablespace # -call mtr.add_suppression("InnoDB: Table .* tablespace is set as discarded"); +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); --disable_query_log let $innodb_file_format_orig = `SELECT @@innodb_file_format`; diff --git a/mysql-test/suite/encryption/t/innodb-encryption-disable.test b/mysql-test/suite/encryption/t/innodb-encryption-disable.test index 42d8008d1aa..e8e2ba02402 100644 --- a/mysql-test/suite/encryption/t/innodb-encryption-disable.test +++ b/mysql-test/suite/encryption/t/innodb-encryption-disable.test @@ -30,12 +30,14 @@ call mtr.add_suppression("InnoDB: Tablespace id.* is encrypted but encryption se --shutdown_server --source include/wait_until_disconnected.inc ---write_file $MYSQLTEST_VARDIR/keys1.txt +--error 0,1,2 +--remove_file $MYSQLTEST_VARDIR/encryption-disable-keys1.txt +--write_file $MYSQLTEST_VARDIR/encryption-disable-keys1.txt 1;770A8A65DA156D24EE2A093277530142 4;770A8A65DA156D24EE2A093277530143 EOF ---exec echo "restart:--innodb-encrypt-tables --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=$MYSQLTEST_VARDIR/keys1.txt" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--exec echo "restart:--innodb-encrypt-tables --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=$MYSQLTEST_VARDIR/encryption-disable-keys1.txt" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --enable_reconnect --source include/wait_until_connected_again.inc @@ -58,8 +60,15 @@ CREATE TABLE `t1` ( ) ENGINE=InnoDB; insert into t1 values (1,2,'maria','db','encryption'); + +select * from t1; +select * from t5; + alter table t1 encrypted='yes' `encryption_key_id`=1; +select * from t1; +select * from t5; + --exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --shutdown_server --source include/wait_until_disconnected.inc @@ -77,7 +86,7 @@ select * from t5; --shutdown_server --source include/wait_until_disconnected.inc ---exec echo "restart:--innodb-encrypt-tables --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=$MYSQLTEST_VARDIR/keys1.txt" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--exec echo "restart:--innodb-encrypt-tables --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=$MYSQLTEST_VARDIR/encryption-disable-keys1.txt" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --enable_reconnect --source include/wait_until_connected_again.inc @@ -89,4 +98,4 @@ EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig; EVAL SET GLOBAL innodb_file_format = $innodb_file_format_orig; --enable_query_log ---remove_file $MYSQLTEST_VARDIR/keys1.txt +--remove_file $MYSQLTEST_VARDIR/encryption-disable-keys1.txt diff --git a/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test b/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test index def3665eeff..0361fddecff 100644 --- a/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test +++ b/mysql-test/suite/encryption/t/innodb_encryption_discard_import.test @@ -4,7 +4,8 @@ -- source include/not_embedded.inc -- source include/not_windows.inc -call mtr.add_suppression("InnoDB: Table .* tablespace is set as discarded."); +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); --let $MYSQLD_TMPDIR = `SELECT @@tmpdir` --let $MYSQLD_DATADIR = `SELECT @@datadir` @@ -107,6 +108,7 @@ ALTER TABLE t3 DISCARD TABLESPACE; --echo # List after t1 DISCARD --list_files $MYSQLD_DATADIR/test +--disable_result_log --error 0,1,2 --remove_file $MYSQLD_DATADIR/test/t1.cfg --error 0,1,2 diff --git a/mysql-test/suite/encryption/t/innodb_encryption_row_compressed.opt b/mysql-test/suite/encryption/t/innodb_encryption_row_compressed.opt new file mode 100644 index 00000000000..7ebf81a07f3 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb_encryption_row_compressed.opt @@ -0,0 +1,4 @@ +--innodb-encrypt-tables=ON +--innodb-encryption-rotate-key-age=15 +--innodb-encryption-threads=4 +--innodb-tablespaces-encryption diff --git a/mysql-test/suite/encryption/t/innodb_encryption_row_compressed.test b/mysql-test/suite/encryption/t/innodb_encryption_row_compressed.test new file mode 100644 index 00000000000..0a28c1690a2 --- /dev/null +++ b/mysql-test/suite/encryption/t/innodb_encryption_row_compressed.test @@ -0,0 +1,125 @@ +-- source include/have_innodb.inc +-- source include/have_file_key_management_plugin.inc +-- source include/not_embedded.inc + +--disable_query_log +let $innodb_file_format_orig = `SELECT @@innodb_file_format`; +let $innodb_file_per_table_orig = `SELECT @@innodb_file_per_table`; +--enable_query_log + +SET GLOBAL innodb_file_format = `Barracuda`; +SET GLOBAL innodb_file_per_table = ON; + +create table innodb_compressed1(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed encrypted=yes; +create table innodb_compressed2(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed key_block_size=1 encrypted=yes; +create table innodb_compressed3(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed key_block_size=2 encrypted=yes; +create table innodb_compressed4(c1 bigint not null primary key, d int, a varchar(20), b char(200)) engine=innodb row_format=compressed key_block_size=4 encrypted=yes; + +insert into innodb_compressed1 values (1, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (2, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (3, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (4, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (5, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (6, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (7, 30, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (8, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (9, 20, 'private', 'evenmoreprivate'); +insert into innodb_compressed1 values (10, 20, 'private', 'evenmoreprivate'); + +insert into innodb_compressed2 select * from innodb_compressed1; +insert into innodb_compressed3 select * from innodb_compressed1; +insert into innodb_compressed4 select * from innodb_compressed1; + +--source include/restart_mysqld.inc + +--let $MYSQLD_DATADIR=`select @@datadir` +--let t1_IBD = $MYSQLD_DATADIR/test/innodb_compressed1.ibd +--let t2_IBD = $MYSQLD_DATADIR/test/innodb_compressed2.ibd +--let t3_IBD = $MYSQLD_DATADIR/test/innodb_compressed3.ibd +--let t4_IBD = $MYSQLD_DATADIR/test/innodb_compressed4.ibd +--let SEARCH_RANGE = 10000000 +--let SEARCH_PATTERN=private +--echo # t1 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--echo # t2 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t2_IBD +-- source include/search_pattern_in_file.inc +--echo # t3 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t3_IBD +-- source include/search_pattern_in_file.inc +--echo # t4 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t4_IBD +-- source include/search_pattern_in_file.inc + +SET GLOBAL innodb_file_format = `Barracuda`; +SET GLOBAL innodb_file_per_table = ON; + +select * from innodb_compressed1 where d = 20; +select * from innodb_compressed1 where d = 30; +select * from innodb_compressed2 where d = 20; +select * from innodb_compressed2 where d = 30; +select * from innodb_compressed3 where d = 20; +select * from innodb_compressed3 where d = 30; +select * from innodb_compressed4 where d = 20; +select * from innodb_compressed4 where d = 30; + +update innodb_compressed1 set d = d + 10 where d = 30; +update innodb_compressed2 set d = d + 10 where d = 30; +update innodb_compressed3 set d = d + 10 where d = 30; +update innodb_compressed4 set d = d + 10 where d = 30; + +insert into innodb_compressed1 values (20, 60, 'newprivate', 'newevenmoreprivate'); +insert into innodb_compressed2 values (20, 60, 'newprivate', 'newevenmoreprivate'); +insert into innodb_compressed3 values (20, 60, 'newprivate', 'newevenmoreprivate'); +insert into innodb_compressed4 values (20, 60, 'newprivate', 'newevenmoreprivate'); + +--let SEARCH_PATTERN=private +--echo # t1 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--echo # t2 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t2_IBD +-- source include/search_pattern_in_file.inc +--echo # t3 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t3_IBD +-- source include/search_pattern_in_file.inc +--echo # t4 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t4_IBD +-- source include/search_pattern_in_file.inc + +--source include/restart_mysqld.inc + +select * from innodb_compressed1 where d = 40; +select * from innodb_compressed1 where d = 60; +select * from innodb_compressed2 where d = 40; +select * from innodb_compressed2 where d = 60; +select * from innodb_compressed3 where d = 40; +select * from innodb_compressed3 where d = 60; +select * from innodb_compressed4 where d = 40; +select * from innodb_compressed4 where d = 60; + +--let SEARCH_PATTERN=private +--echo # t1 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t1_IBD +-- source include/search_pattern_in_file.inc +--echo # t2 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t2_IBD +-- source include/search_pattern_in_file.inc +--echo # t3 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t3_IBD +-- source include/search_pattern_in_file.inc +--echo # t4 yes on expecting NOT FOUND +-- let SEARCH_FILE=$t4_IBD +-- source include/search_pattern_in_file.inc + +drop table innodb_compressed1; +drop table innodb_compressed2; +drop table innodb_compressed3; +drop table innodb_compressed4; + +# reset system +--disable_query_log +EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig; +EVAL SET GLOBAL innodb_file_format = $innodb_file_format_orig; +--enable_query_log diff --git a/mysql-test/suite/funcs_1/r/is_tables_innodb.result b/mysql-test/suite/funcs_1/r/is_tables_innodb.result index 23e6ad77309..0e2389771b2 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_innodb.result +++ b/mysql-test/suite/funcs_1/r/is_tables_innodb.result @@ -21,7 +21,7 @@ TABLE_NAME t1 TABLE_TYPE BASE TABLE ENGINE InnoDB VERSION 10 -ROW_FORMAT Compact +ROW_FORMAT DYNAMIC_OR_PAGE TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -44,7 +44,7 @@ TABLE_NAME t2 TABLE_TYPE BASE TABLE ENGINE InnoDB VERSION 10 -ROW_FORMAT Compact +ROW_FORMAT DYNAMIC_OR_PAGE TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -67,7 +67,7 @@ TABLE_NAME t1 TABLE_TYPE BASE TABLE ENGINE InnoDB VERSION 10 -ROW_FORMAT Compact +ROW_FORMAT DYNAMIC_OR_PAGE TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -111,7 +111,7 @@ TABLE_NAME t1 TABLE_TYPE BASE TABLE ENGINE InnoDB VERSION 10 -ROW_FORMAT Compact +ROW_FORMAT DYNAMIC_OR_PAGE TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -134,7 +134,7 @@ TABLE_NAME t2 TABLE_TYPE BASE TABLE ENGINE InnoDB VERSION 10 -ROW_FORMAT Compact +ROW_FORMAT DYNAMIC_OR_PAGE TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql.result b/mysql-test/suite/funcs_1/r/is_tables_mysql.result index c9c86b4e96e..8e0c9b64dab 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_mysql.result +++ b/mysql-test/suite/funcs_1/r/is_tables_mysql.result @@ -315,7 +315,7 @@ TABLE_NAME innodb_index_stats TABLE_TYPE BASE TABLE ENGINE InnoDB VERSION 10 -ROW_FORMAT Compact +ROW_FORMAT DYNAMIC_OR_PAGE TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# @@ -338,7 +338,7 @@ TABLE_NAME innodb_table_stats TABLE_TYPE BASE TABLE ENGINE InnoDB VERSION 10 -ROW_FORMAT Compact +ROW_FORMAT DYNAMIC_OR_PAGE TABLE_ROWS #TBLR# AVG_ROW_LENGTH #ARL# DATA_LENGTH #DL# diff --git a/mysql-test/suite/handler/disabled.def b/mysql-test/suite/handler/disabled.def new file mode 100644 index 00000000000..ef63577b0cb --- /dev/null +++ b/mysql-test/suite/handler/disabled.def @@ -0,0 +1,13 @@ +############################################################################## +# +# List the test cases that are to be disabled temporarily. +# +# Separate the test case name and the comment with ':'. +# +# : BUG# +# +# Do not use any TAB characters for whitespace. +# +############################################################################## + +innodb : MDEV-10549 \ No newline at end of file diff --git a/mysql-test/suite/innodb/disabled.def b/mysql-test/suite/innodb/disabled.def index 8cae44a3607..778ce482db5 100644 --- a/mysql-test/suite/innodb/disabled.def +++ b/mysql-test/suite/innodb/disabled.def @@ -10,3 +10,7 @@ # ############################################################################## +innodb.auto_increment_dup : MDEV-10548 +innodb_skip_innodb_is_tables : MDEV-10200 +innodb.innodb_bug13510739: MDEV-10549 +innodb.defrag_mdl-9155 : MDEV-10551 diff --git a/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc b/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc index bc64937669a..75cab775528 100644 --- a/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc +++ b/mysql-test/suite/innodb/include/innodb_simulate_comp_failures.inc @@ -23,6 +23,8 @@ call mtr.add_suppression(".*"); # create the table with compressed pages of size 8K. CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW CREATE TABLE t1; + # percentage of compressions that will be forced to fail SET GLOBAL innodb_simulate_comp_failures = 25; @@ -35,16 +37,16 @@ let $commit_iterations=50; while ($num_inserts_ind) { let $repeat = `select floor(rand() * 10)`; - eval -INSERT INTO t1(id, msg) -VALUES ($num_inserts_ind, REPEAT('abcdefghijklmnopqrstuvwxyz', $repeat)); + eval INSERT INTO t1(id, msg) + VALUES ($num_inserts_ind, REPEAT('abcdefghijklmnopqrstuvwxyz', $repeat)); dec $num_inserts_ind; } --enable_query_log --enable_result_log -SELECT COUNT(*) FROM t1; +COMMIT; +SELECT COUNT(id) FROM t1; --disable_query_log --disable_result_log diff --git a/mysql-test/suite/innodb/r/group_commit_crash.result b/mysql-test/suite/innodb/r/group_commit_crash.result index 5d5dffab33e..80a780ba2c5 100644 --- a/mysql-test/suite/innodb/r/group_commit_crash.result +++ b/mysql-test/suite/innodb/r/group_commit_crash.result @@ -124,3 +124,5 @@ delete from t1; DROP TABLE t1; DROP TABLE t2; DROP PROCEDURE setcrash; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result b/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result index 542ce9d496e..06fdeaef6a7 100644 --- a/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result +++ b/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result @@ -124,3 +124,5 @@ delete from t1; DROP TABLE t1; DROP TABLE t2; DROP PROCEDURE setcrash; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/help_url.result b/mysql-test/suite/innodb/r/help_url.result index 10affe78f0c..87530bc2f7d 100644 --- a/mysql-test/suite/innodb/r/help_url.result +++ b/mysql-test/suite/innodb/r/help_url.result @@ -1,4 +1,2 @@ create table innodb_table_monitor (a int) engine=InnoDB; -Warnings: -Warning 131 Using the table name innodb_table_monitor to enable diagnostic output is deprecated and may be removed in future releases. Use INFORMATION_SCHEMA or PERFORMANCE_SCHEMA tables or SET GLOBAL innodb_status_output=ON. drop table innodb_table_monitor; diff --git a/mysql-test/suite/innodb/r/innodb-16k.result b/mysql-test/suite/innodb/r/innodb-16k.result index 0537315122c..adfbc97ea66 100644 --- a/mysql-test/suite/innodb/r/innodb-16k.result +++ b/mysql-test/suite/innodb/r/innodb-16k.result @@ -1,5 +1,11 @@ +call mtr.add_suppression("InnoDB: Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_large_prefix = OFF; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html # Test 1) Show the page size from Information Schema SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_page_size'; @@ -224,6 +230,8 @@ table_name row_format create_options t1 Compressed row_format=COMPRESSED DROP TABLE t1; SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html CREATE TABLE t2(d varchar(17) PRIMARY KEY) ENGINE=innodb DEFAULT CHARSET=utf8; CREATE TABLE t3(a int PRIMARY KEY) ENGINE=innodb; INSERT INTO t3 VALUES (22),(44),(33),(55),(66); @@ -368,11 +376,6 @@ UPDATE t1 SET s=@e; CREATE INDEX t1t ON t1 (t(767)); UPDATE t1 SET t=@e; ERROR HY000: Undo log record is too big. -CREATE INDEX t1u ON t1 (u(767)); -ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs -CREATE INDEX t1ut ON t1 (u(767), t(767)); -ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs -CREATE INDEX t1st ON t1 (s(767), t(767)); SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -416,11 +419,12 @@ t1 CREATE TABLE `t1` ( KEY `t1q` (`q`(767)), KEY `t1r` (`r`(767)), KEY `t1s` (`s`(767)), - KEY `t1t` (`t`(767)), - KEY `t1st` (`s`(767),`t`(767)) + KEY `t1t` (`t`(767)) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DROP TABLE t1; SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; Test an assertion failure on purge. CREATE TABLE t1_purge ( @@ -464,6 +468,8 @@ DELETE FROM t3_purge; DELETE FROM t4_purge; SET GLOBAL innodb_file_per_table=on; SET GLOBAL innodb_file_format='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET @r=REPEAT('a',500); CREATE TABLE tlong(a int, v1 varchar(500), v2 varchar(500), v3 varchar(500), @@ -552,7 +558,7 @@ SHOW WARNINGS; Level Code Message Error 1713 Undo log record is too big. DROP TABLE bug12547647; -SET SESSION innodb_strict_mode = off; +SET SESSION innodb_strict_mode = on; CREATE TABLE t1( c text NOT NULL, d text NOT NULL, PRIMARY KEY (c(767),d(767))) @@ -977,3 +983,7 @@ COL196 TEXT, COL197 TEXT) row_format=compact,ENGINE=INNODB; ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-alter-discard.result b/mysql-test/suite/innodb/r/innodb-alter-discard.result index 29712868239..bd60d2d0099 100644 --- a/mysql-test/suite/innodb/r/innodb-alter-discard.result +++ b/mysql-test/suite/innodb/r/innodb-alter-discard.result @@ -1,5 +1,11 @@ SET GLOBAL innodb_file_per_table=1; CREATE TABLE t(a INT)ENGINE=InnoDB; +call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation."); +call mtr.add_suppression("InnoDB: The error means the system cannot find the path specified."); +call mtr.add_suppression("InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them."); +call mtr.add_suppression("InnoDB: Cannot open datafile for read-only: './test/t.ibd' OS error: .*"); +call mtr.add_suppression("InnoDB: Ignoring tablespace `test/t` because it could not be opened."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .*"); call mtr.add_suppression("InnoDB: Error: trying to open a table, but could not$"); call mtr.add_suppression("MySQL is trying to open a table handle but the \.ibd file for$"); call mtr.add_suppression("InnoDB: Table 'test/t'$"); @@ -17,5 +23,5 @@ ERROR 42S02: Table 'test.t1' doesn't exist ALTER TABLE t DISCARD TABLESPACE; Warnings: Warning 1812 Tablespace is missing for table 'test/t' -Warning 1812 Tablespace is missing for table 't' +Warning 1812 Tablespace is missing for table 'test/t' DROP TABLE t; diff --git a/mysql-test/suite/innodb/r/innodb-blob.result b/mysql-test/suite/innodb/r/innodb-blob.result index ec5a4a8b0ac..fe4b1908fcb 100644 --- a/mysql-test/suite/innodb/r/innodb-blob.result +++ b/mysql-test/suite/innodb/r/innodb-blob.result @@ -1,3 +1,4 @@ +call mtr.add_suppression("InnoDB: The log sequence numbers [0-9]+ and [0-9]+ in ibdata files do not match the log sequence number [0-9]+ in the ib_logfiles!"); CREATE TABLE t1 (a INT PRIMARY KEY, b TEXT) ENGINE=InnoDB; CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t3 (a INT PRIMARY KEY, b TEXT, c TEXT) ENGINE=InnoDB; @@ -18,7 +19,9 @@ a RIGHT(b,20) 1 aaaaaaaaaaaaaaaaaaaa 2 bbbbbbbbbbbbbbbbbbbb connection default; -SET DEBUG_DBUG='+d,row_ins_extern_checkpoint'; +SET DEBUG='+d,row_ins_extern_checkpoint'; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SET DEBUG_SYNC='before_row_ins_extern_latch SIGNAL rec_not_blob WAIT_FOR crash'; ROLLBACK; BEGIN; @@ -38,7 +41,9 @@ a 1 2 3 -SET DEBUG_DBUG='+d,crash_commit_before'; +SET DEBUG='+d,crash_commit_before'; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead INSERT INTO t2 VALUES (42); ERROR HY000: Lost connection to MySQL server during query disconnect con1; @@ -51,18 +56,25 @@ test.t1 check status OK INSERT INTO t3 VALUES (1,REPEAT('d',7000),REPEAT('e',100)), (2,REPEAT('g',7000),REPEAT('h',100)); -SET DEBUG_SYNC='before_row_upd_extern SIGNAL have_latch WAIT_FOR go'; +SET DEBUG_SYNC='blob_write_middle SIGNAL go_sel WAIT_FOR go_upd'; UPDATE t3 SET c=REPEAT('f',3000) WHERE a=1; +# Connection con1: connect con1,localhost,root,,; -SET DEBUG_SYNC='now WAIT_FOR have_latch'; +SET DEBUG_SYNC='now WAIT_FOR go_sel'; SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT @@tx_isolation; @@tx_isolation READ-UNCOMMITTED SELECT a, RIGHT(b,20), RIGHT(c,20) FROM t3; -connect con2,localhost,root,,; -SET DEBUG_SYNC='now SIGNAL go'; +a RIGHT(b,20) RIGHT(c,20) +2 gggggggggggggggggggg hhhhhhhhhhhhhhhhhhhh +set debug_sync='now SIGNAL go_upd'; +# Connection default: +connection default; +# reap UPDATE t3 SET c=REPEAT('f',3000) WHERE a=1; +# Connection con1: connection con1; +SELECT a, RIGHT(b,20), RIGHT(c,20) FROM t3; a RIGHT(b,20) RIGHT(c,20) 1 dddddddddddddddddddd ffffffffffffffffffff 2 gggggggggggggggggggg hhhhhhhhhhhhhhhhhhhh @@ -73,11 +85,13 @@ Table Op Msg_type Msg_text test.t1 check status OK test.t2 check status OK test.t3 check status OK -connection con2; +connect con2,localhost,root,,; BEGIN; INSERT INTO t2 VALUES (347); connection default; -SET DEBUG_DBUG='+d,row_upd_extern_checkpoint'; +SET DEBUG='+d,row_upd_extern_checkpoint'; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SET DEBUG_SYNC='before_row_upd_extern SIGNAL have_latch WAIT_FOR crash'; UPDATE t3 SET c=REPEAT('i',3000) WHERE a=2; connection con2; @@ -86,7 +100,9 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: before_row_upd_extern'; info UPDATE t3 SET c=REPEAT('i',3000) WHERE a=2 -SET DEBUG_DBUG='+d,crash_commit_before'; +SET DEBUG='+d,crash_commit_before'; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead COMMIT; ERROR HY000: Lost connection to MySQL server during query disconnect con2; @@ -109,7 +125,9 @@ connect con2,localhost,root,,; BEGIN; INSERT INTO t2 VALUES (33101); connection default; -SET DEBUG_DBUG='+d,row_upd_extern_checkpoint'; +SET DEBUG='+d,row_upd_extern_checkpoint'; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SET DEBUG_SYNC='after_row_upd_extern SIGNAL have_latch WAIT_FOR crash'; UPDATE t3 SET c=REPEAT('j',3000) WHERE a=2; connection con2; @@ -118,7 +136,9 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: after_row_upd_extern'; info UPDATE t3 SET c=REPEAT('j',3000) WHERE a=2 -SET DEBUG_DBUG='+d,crash_commit_before'; +SET DEBUG='+d,crash_commit_before'; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead COMMIT; ERROR HY000: Lost connection to MySQL server during query disconnect con2; diff --git a/mysql-test/suite/innodb/r/innodb-bug-14068765.result b/mysql-test/suite/innodb/r/innodb-bug-14068765.result index 7a8f959b995..f6d37b23114 100644 --- a/mysql-test/suite/innodb/r/innodb-bug-14068765.result +++ b/mysql-test/suite/innodb/r/innodb-bug-14068765.result @@ -38,5 +38,7 @@ COUNT(*) 2 DROP TABLE testdb_wl5522.t1; DROP DATABASE testdb_wl5522; -SET GLOBAL INNODB_FILE_FORMAT=Antelope; +SET GLOBAL INNODB_FILE_FORMAT=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL INNODB_FILE_PER_TABLE=1; diff --git a/mysql-test/suite/innodb/r/innodb-bug-14084530.result b/mysql-test/suite/innodb/r/innodb-bug-14084530.result index 4b4f201300c..3ba8e0e8440 100644 --- a/mysql-test/suite/innodb/r/innodb-bug-14084530.result +++ b/mysql-test/suite/innodb/r/innodb-bug-14084530.result @@ -27,5 +27,7 @@ c1 SET AUTOCOMMIT = 1; DROP TABLE testdb_wl5522.t1; DROP DATABASE testdb_wl5522; -SET GLOBAL INNODB_FILE_FORMAT=Antelope; +SET GLOBAL INNODB_FILE_FORMAT=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL INNODB_FILE_PER_TABLE=1; diff --git a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result index 508d578193f..5abfb8cffa7 100644 --- a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result +++ b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result @@ -2,13 +2,13 @@ # Bug#69122 - INNODB DOESN'T REDO-LOG INSERT BUFFER MERGE # OPERATION IF IT IS DONE IN-PLACE # -SET GLOBAL innodb_change_buffering_debug = 1; CREATE TABLE t1( a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(1), c INT, INDEX(b)) -ENGINE=InnoDB; +ENGINE=InnoDB STATS_PERSISTENT=0; +SET GLOBAL innodb_change_buffering_debug = 1; INSERT INTO t1 VALUES(0,'x',1); INSERT INTO t1 SELECT 0,b,c FROM t1; INSERT INTO t1 SELECT 0,b,c FROM t1; @@ -23,7 +23,6 @@ INSERT INTO t1 SELECT 0,b,c FROM t1; INSERT INTO t1 SELECT 0,b,c FROM t1; INSERT INTO t1 SELECT 0,b,c FROM t1; INSERT INTO t1 SELECT 0,b,c FROM t1; -INSERT INTO t1 SELECT 0,b,c FROM t1; BEGIN; SELECT b FROM t1 LIMIT 3; b @@ -35,7 +34,9 @@ connection con1; BEGIN; DELETE FROM t1 WHERE a=1; INSERT INTO t1 VALUES(1,'X',1); -SET DEBUG_DBUG='+d,crash_after_log_ibuf_upd_inplace'; +SET DEBUG='+d,crash_after_log_ibuf_upd_inplace'; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead SELECT b FROM t1 LIMIT 3; ERROR HY000: Lost connection to MySQL server during query FOUND /Wrote log record for ibuf update in place operation/ in my_restart.err diff --git a/mysql-test/suite/innodb/r/innodb-fk-warnings.result b/mysql-test/suite/innodb/r/innodb-fk-warnings.result index eddedfc3620..6c203cca207 100644 --- a/mysql-test/suite/innodb/r/innodb-fk-warnings.result +++ b/mysql-test/suite/innodb/r/innodb-fk-warnings.result @@ -16,7 +16,7 @@ CONSTRAINT test FOREIGN KEY (b) REFERENCES t2 (id) ERROR HY000: Can't create table `test`.`t2` (errno: 121 "Duplicate key on write or update") show warnings; Level Code Message -Warning 121 Create or Alter table `test`.`t2` with foreign key constraint failed. Foreign key constraint `test/test` already exists on data dictionary. Foreign key constraint names need to be unique in database. Error in foreign key definition: CONSTRAINT `test` FOREIGN KEY (`b`) REFERENCES `test`.`t2` (`id`). +Warning 121 Create or Alter table `test`.`t2` with foreign key constraint failed. Foreign key constraint `test`.`test` already exists on data dictionary. Foreign key constraint names need to be unique in database. Error in foreign key definition: CONSTRAINT `test` FOREIGN KEY (`b`) REFERENCES `test`.`t2` (`id`). Error 1005 Can't create table `test`.`t2` (errno: 121 "Duplicate key on write or update") Warning 1022 Can't write; duplicate key in table 't2' drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb-index.result b/mysql-test/suite/innodb/r/innodb-index.result index e6ee836ae13..fa490274652 100644 --- a/mysql-test/suite/innodb/r/innodb-index.result +++ b/mysql-test/suite/innodb/r/innodb-index.result @@ -1,5 +1,7 @@ set global innodb_file_per_table=on; set global innodb_file_format='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS; create table t1(a varchar(2) primary key) engine=innodb; insert into t1 values(''); @@ -857,8 +859,12 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 index NULL PRIMARY 4 NULL 2 Using index drop table t1; set global innodb_file_per_table=1; -set global innodb_file_format=Antelope; -set global innodb_file_format_max=Antelope; +set global innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_format_max=Barracuda; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0; SET FOREIGN_KEY_CHECKS=0; CREATE TABLE t1( @@ -1189,3 +1195,7 @@ t2c CREATE TABLE `t2c` ( KEY `t2a` (`a`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DROP TABLE t1,t2,t2c,t2i; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-mdev-7408.result b/mysql-test/suite/innodb/r/innodb-mdev-7408.result index 8f6ad139192..80b46d3425c 100644 --- a/mysql-test/suite/innodb/r/innodb-mdev-7408.result +++ b/mysql-test/suite/innodb/r/innodb-mdev-7408.result @@ -1,3 +1,4 @@ +call mtr.add_suppression("InnoDB: User stopword table .* does not exist."); select @@global.innodb_ft_server_stopword_table; @@global.innodb_ft_server_stopword_table NULL diff --git a/mysql-test/suite/innodb/r/innodb-mdev-7513.result b/mysql-test/suite/innodb/r/innodb-mdev-7513.result index bb3531e3f90..55b4d3462b0 100644 --- a/mysql-test/suite/innodb/r/innodb-mdev-7513.result +++ b/mysql-test/suite/innodb/r/innodb-mdev-7513.result @@ -1,3 +1,4 @@ +call mtr.add_suppression("InnoDB: Cannot add field `.* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); call mtr.add_suppression("Row size too large (> 8126)*"); CREATE TABLE t1 ( text1 TEXT, text2 TEXT, diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result b/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result index 8d3bc063a71..a566c94bd3b 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result @@ -1,4 +1,6 @@ set global innodb_file_format = `barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table = on; set global innodb_compression_algorithm = 5; create table innodb_compressed(c1 int, b char(20)) engine=innodb row_format=compressed key_block_size=8; @@ -435,3 +437,5 @@ drop table innodb_page_compressed6; drop table innodb_page_compressed7; drop table innodb_page_compressed8; drop table innodb_page_compressed9; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result b/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result index eeab2622cb6..76cd5b16f28 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result @@ -1,4 +1,6 @@ set global innodb_file_format = `barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table = on; set global innodb_compression_algorithm = 2; create table innodb_compressed(c1 int, b char(20)) engine=innodb row_format=compressed key_block_size=8; @@ -436,3 +438,5 @@ drop table innodb_page_compressed6; drop table innodb_page_compressed7; drop table innodb_page_compressed8; drop table innodb_page_compressed9; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result b/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result index d340801b656..cceff820ee0 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result @@ -1,4 +1,6 @@ set global innodb_file_format = `barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table = on; set global innodb_compression_algorithm = 4; create table innodb_compressed(c1 int, b char(20)) engine=innodb row_format=compressed key_block_size=8; @@ -435,3 +437,5 @@ drop table innodb_page_compressed6; drop table innodb_page_compressed7; drop table innodb_page_compressed8; drop table innodb_page_compressed9; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result b/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result index fdbc99f60d9..1a9235fab62 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result @@ -1,4 +1,6 @@ set global innodb_file_format = `barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table = on; set global innodb_compression_algorithm = 3; create table innodb_compressed(c1 int, b char(20)) engine=innodb row_format=compressed key_block_size=8; @@ -349,3 +351,5 @@ drop table innodb_page_compressed6; drop table innodb_page_compressed7; drop table innodb_page_compressed8; drop table innodb_page_compressed9; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result index a0b2f947fd3..e1d46b04a7f 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result @@ -1,5 +1,7 @@ call mtr.add_suppression("Compression failed for space*"); set global innodb_file_format = `barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table = on; set global innodb_compression_algorithm = 6; create table innodb_compressed(c1 int, b char(20)) engine=innodb row_format=compressed key_block_size=8; @@ -436,3 +438,5 @@ drop table innodb_page_compressed6; drop table innodb_page_compressed7; drop table innodb_page_compressed8; drop table innodb_page_compressed9; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_tables.result b/mysql-test/suite/innodb/r/innodb-page_compression_tables.result index 98de5db3c12..a0ac8986b9e 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_tables.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_tables.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; set global innodb_compression_algorithm = 1; create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb; @@ -91,6 +93,8 @@ select count(*) from innodb_dynamic where c1 < 1500000; count(*) 5000 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; set global innodb_compression_algorithm = 0; alter table innodb_compact engine=innodb page_compressed=DEFAULT; @@ -119,3 +123,5 @@ drop procedure innodb_insert_proc; drop table innodb_normal; drop table innodb_compact; drop table innodb_dynamic; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_zip.result b/mysql-test/suite/innodb/r/innodb-page_compression_zip.result index 4c3ab273b2e..a8078c02e13 100644 --- a/mysql-test/suite/innodb/r/innodb-page_compression_zip.result +++ b/mysql-test/suite/innodb/r/innodb-page_compression_zip.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; set global innodb_compression_algorithm = 1; create table innodb_compressed(c1 int, b char(20)) engine=innodb row_format=compressed key_block_size=8; @@ -349,3 +351,5 @@ drop table innodb_page_compressed6; drop table innodb_page_compressed7; drop table innodb_page_compressed8; drop table innodb_page_compressed9; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb-virtual-columns.result b/mysql-test/suite/innodb/r/innodb-virtual-columns.result index e613f76d5bf..9837f567954 100644 --- a/mysql-test/suite/innodb/r/innodb-virtual-columns.result +++ b/mysql-test/suite/innodb/r/innodb-virtual-columns.result @@ -23,6 +23,22 @@ deg_start_term char(4) NOT NULL DEFAULT '' COMMENT 'Educated guess at the beginn deg_as_of_term char(4) NOT NULL COMMENT 'In most cases also end term', CONSTRAINT grad_degree_stu_plan_admit_pky PRIMARY KEY (student_id, plan, admit_term) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +SHOW CREATE TABLE grad_degree; +Table Create Table +grad_degree CREATE TABLE `grad_degree` ( + `student_id` int(8) unsigned NOT NULL, + `plan` varchar(10) NOT NULL, + `admit_term` char(4) NOT NULL, + `wdraw_rsn` varchar(4) NOT NULL DEFAULT '', + `ofis_deg_status` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed' + ELSE 'Not Completed' + END) VIRTUAL, + `deg_start_term` char(4) NOT NULL DEFAULT '' COMMENT 'Educated guess at the beginning of the data', + `deg_as_of_term` char(4) NOT NULL COMMENT 'In most cases also end term', + PRIMARY KEY (`student_id`,`plan`,`admit_term`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 CREATE INDEX grad_degree_wdraw_rsn_ndx ON grad_degree (wdraw_rsn); CREATE INDEX grad_degree_as_of_term_ndx ON grad_degree (deg_as_of_term); INSERT IGNORE grad_degree ( @@ -117,6 +133,57 @@ deg_start_term char(4) NOT NULL DEFAULT '' COMMENT 'Educated guess at the beginn deg_as_of_term char(4) NOT NULL COMMENT 'In most cases also end term', CONSTRAINT grad_degree_stu_plan_admit_pky PRIMARY KEY (student_id, plan, admit_term) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +SHOW CREATE TABLE grad_degree; +Table Create Table +grad_degree CREATE TABLE `grad_degree` ( + `student_id` int(8) unsigned NOT NULL, + `plan` varchar(10) NOT NULL, + `admit_term` char(4) NOT NULL, + `wdraw_rsn` varchar(4) NOT NULL DEFAULT '', + `ofis_deg_status` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed' + ELSE 'Not Completed' + END) VIRTUAL, + `ofis_deg_status2` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress2' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed2' + ELSE 'Not Completed2' + END) VIRTUAL, + `ofis_deg_status3` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress3' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed3' + ELSE 'Not Completed3' + END) VIRTUAL, + `ofis_deg_status4` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress4' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed4' + ELSE 'Not Completed4' + END) VIRTUAL, + `ofis_deg_status5` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress5' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed5' + ELSE 'Not Completed5' + END) VIRTUAL, + `ofis_deg_status6` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress6' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed6' + ELSE 'Not Completed6' + END) VIRTUAL, + `ofis_deg_status7` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress7' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed7' + ELSE 'Not Completed7' + END) VIRTUAL, + `ofis_deg_status8` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress8' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed8' + ELSE 'Not Completed8' + END) VIRTUAL, + `deg_start_term` char(4) NOT NULL DEFAULT '' COMMENT 'Educated guess at the beginning of the data', + `deg_as_of_term` char(4) NOT NULL COMMENT 'In most cases also end term', + PRIMARY KEY (`student_id`,`plan`,`admit_term`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 CREATE INDEX grad_degree_wdraw_rsn_ndx ON grad_degree (wdraw_rsn); CREATE INDEX grad_degree_as_of_term_ndx ON grad_degree (deg_as_of_term); INSERT IGNORE grad_degree ( @@ -264,6 +331,57 @@ deg_start_term char(4) NOT NULL DEFAULT '' COMMENT 'Educated guess at the beginn deg_as_of_term char(4) NOT NULL COMMENT 'In most cases also end term', CONSTRAINT grad_degree_stu_plan_admit_pky PRIMARY KEY (student_id, plan, admit_term) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +SHOW CREATE TABLE grad_degree; +Table Create Table +grad_degree CREATE TABLE `grad_degree` ( + `student_id` int(8) unsigned NOT NULL, + `plan` varchar(10) NOT NULL, + `admit_term` char(4) NOT NULL, + `wdraw_rsn` varchar(4) NOT NULL DEFAULT '', + `ofis_deg_status` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed' + ELSE 'Not Completed' + END) VIRTUAL, + `ofis_deg_status2` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress2' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed2' + ELSE 'Not Completed2' + END) VIRTUAL, + `ofis_deg_status3` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress3' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed3' + ELSE 'Not Completed3' + END) VIRTUAL, + `ofis_deg_status4` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress4' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed4' + ELSE 'Not Completed4' + END) VIRTUAL, + `ofis_deg_status5` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress5' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed5' + ELSE 'Not Completed5' + END) VIRTUAL, + `ofis_deg_status6` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress6' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed6' + ELSE 'Not Completed6' + END) VIRTUAL, + `ofis_deg_status7` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress7' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed7' + ELSE 'Not Completed7' + END) VIRTUAL, + `ofis_deg_status8` varchar(15) AS (CASE +WHEN wdraw_rsn = '' THEN 'In progress8' + WHEN wdraw_rsn = 'DCMP' OR wdraw_rsn = 'TRDC' THEN 'Completed8' + ELSE 'Not Completed8' + END) VIRTUAL, + `deg_start_term` char(4) NOT NULL DEFAULT '' COMMENT 'Educated guess at the beginning of the data', + `deg_as_of_term` char(4) NOT NULL COMMENT 'In most cases also end term', + PRIMARY KEY (`student_id`,`plan`,`admit_term`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 CREATE INDEX grad_degree_wdraw_rsn_ndx ON grad_degree (wdraw_rsn); ALTER TABLE grad_degree DROP COLUMN ofis_deg_status2, DROP COLUMN ofis_deg_status3, DROP COLUMN ofis_deg_status4, DROP COLUMN ofis_deg_status5, DROP COLUMN ofis_deg_status6, diff --git a/mysql-test/suite/innodb/r/innodb-wl5522-1.result b/mysql-test/suite/innodb/r/innodb-wl5522-1.result index 060840859a7..ec28ead12b7 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5522-1.result +++ b/mysql-test/suite/innodb/r/innodb-wl5522-1.result @@ -1,9 +1,12 @@ +call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT."); DROP TABLE IF EXISTS t1; SET GLOBAL innodb_file_per_table = 1; SELECT @@innodb_file_per_table; @@innodb_file_per_table 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@innodb_file_format; @@innodb_file_format Barracuda @@ -126,6 +129,8 @@ COUNT(*) 2 DROP TABLE testdb_wl5522.t1; SET GLOBAL innodb_file_format='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html CREATE TABLE testdb_wl5522.t1 ( col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000), @@ -410,7 +415,7 @@ ALTER TABLE testdb_wl5522.t1 DISCARD TABLESPACE; restore: t1 .ibd and .cfg files ALTER TABLE testdb_wl5522.t1 IMPORT TABLESPACE; ALTER TABLE testdb_wl5522.t1 IMPORT TABLESPACE; -ERROR HY000: Tablespace for table 't1' exists. Please DISCARD the tablespace before IMPORT. +ERROR HY000: Tablespace for table 'testdb_wl5522/t1' exists. Please DISCARD the tablespace before IMPORT. SELECT * FROM testdb_wl5522.t1 ORDER BY i; i 100 @@ -807,5 +812,7 @@ DROP DATABASE testdb_wl5522; call mtr.add_suppression("Got error -1 when reading table '.*'"); call mtr.add_suppression("InnoDB: Error: tablespace id and flags in file '.*'.*"); call mtr.add_suppression("InnoDB: The table .* doesn't have a corresponding tablespace, it was discarded"); -SET GLOBAL INNODB_FILE_FORMAT=Antelope; +SET GLOBAL INNODB_FILE_FORMAT=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL INNODB_FILE_PER_TABLE=1; diff --git a/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result b/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result index 0e863f5849e..ae4e96dcd48 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result +++ b/mysql-test/suite/innodb/r/innodb-wl5522-debug-zip.result @@ -1,8 +1,13 @@ +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); +call mtr.add_suppression("InnoDB: Error: Tablespace flags .* corrupted unused .*"); SET GLOBAL innodb_file_per_table = 1; SELECT @@innodb_file_per_table; @@innodb_file_per_table 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@innodb_file_format; @@innodb_file_format Barracuda @@ -49,6 +54,8 @@ SELECT @@innodb_file_per_table; @@innodb_file_per_table 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@innodb_file_format; @@innodb_file_format Barracuda @@ -86,7 +93,7 @@ ERROR HY000: Tablespace has been discarded for table 't1' restore: t1 .ibd and .cfg files SET SESSION debug_dbug="+d,ib_import_internal_error"; ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE; -ERROR HY000: Internal error: While updating the of index "GEN_CLUST_INDEX" - Generic error +ERROR HY000: Internal error: While updating the of index GEN_CLUST_INDEX - Generic error SET SESSION debug_dbug="-d,ib_import_internal_error"; restore: t1 .ibd and .cfg files ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE; @@ -99,7 +106,7 @@ ERROR HY000: Tablespace has been discarded for table 't1' restore: t1 .ibd and .cfg files SET SESSION debug_dbug="+d,ib_import_reset_space_and_lsn_failure"; ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE; -ERROR HY000: Internal error: Cannot reset LSNs in table '"test_wl5522"."t1"' : Too many concurrent transactions +ERROR HY000: Internal error: Cannot reset LSNs in table "test_wl5522"."t1" : Too many concurrent transactions restore: t1 .ibd and .cfg files SET SESSION debug_dbug="-d,ib_import_reset_space_and_lsn_failure"; SET SESSION debug_dbug="+d,ib_import_open_tablespace_failure"; @@ -442,7 +449,7 @@ t1 CREATE TABLE `t1` ( KEY `idx1` (`c2`), KEY `idx2` (`c3`(512)), KEY `idx3` (`c4`(512)) -) ENGINE=InnoDB AUTO_INCREMENT=185 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8 +) ENGINE=InnoDB AUTO_INCREMENT=248 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8 DROP TABLE test_wl5522.t1; CREATE TABLE test_wl5522.t1 (c1 INT, c2 VARCHAR(1024), c3 BLOB) ENGINE = Innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; @@ -473,7 +480,7 @@ ERROR HY000: Tablespace has been discarded for table 't1' restore: t1 .ibd and .cfg files SET SESSION debug_dbug="+d,ib_import_trigger_corruption_1"; ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE; -ERROR HY000: Internal error: Cannot reset LSNs in table '"test_wl5522"."t1"' : Data structure corruption +ERROR HY000: Internal error: Cannot reset LSNs in table "test_wl5522"."t1" : Data structure corruption SET SESSION debug_dbug="-d,ib_import_trigger_corruption_1"; DROP TABLE test_wl5522.t1; unlink: t1.ibd @@ -498,7 +505,7 @@ ERROR HY000: Tablespace has been discarded for table 't1' restore: t1 .ibd and .cfg files SET SESSION debug_dbug="+d,ib_import_trigger_corruption_2"; ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE; -ERROR HY000: Index corrupt: Externally stored column(5) has a reference length of 19 in the cluster index "GEN_CLUST_INDEX" +ERROR HY000: Index corrupt: Externally stored column(5) has a reference length of 19 in the cluster index GEN_CLUST_INDEX SET SESSION debug_dbug="-d,ib_import_trigger_corruption_2"; DROP TABLE test_wl5522.t1; unlink: t1.ibd @@ -563,7 +570,7 @@ ERROR HY000: Tablespace has been discarded for table 't1' restore: t1 .ibd and .cfg files SET SESSION debug_dbug="+d,fsp_flags_is_valid_failure"; ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE; -ERROR HY000: Internal error: Cannot reset LSNs in table '"test_wl5522"."t1"' : Unsupported +ERROR HY000: Internal error: Cannot reset LSNs in table "test_wl5522"."t1" : Unsupported SET SESSION debug_dbug="-d,fsp_flags_is_valid_failure"; DROP TABLE test_wl5522.t1; unlink: t1.ibd @@ -575,6 +582,12 @@ set global innodb_monitor_enable = default; set global innodb_monitor_disable = default; set global innodb_monitor_reset = default; set global innodb_monitor_reset_all = default; +Warnings: +Error 145 Table './mtr/test_suppressions' is marked as crashed and should be repaired +Error 1194 Table 'test_suppressions' is marked as crashed and should be repaired +Error 1034 1 client is using or hasn't closed the table properly SET GLOBAL INNODB_FILE_PER_TABLE=1; -SET GLOBAL INNODB_FILE_FORMAT=Antelope; -SET SESSION innodb_strict_mode=0; +SET GLOBAL INNODB_FILE_FORMAT=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET SESSION innodb_strict_mode=1; diff --git a/mysql-test/suite/innodb/r/innodb-wl5522-zip.result b/mysql-test/suite/innodb/r/innodb-wl5522-zip.result index 47413b18ce9..562e9f84cd4 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5522-zip.result +++ b/mysql-test/suite/innodb/r/innodb-wl5522-zip.result @@ -1,9 +1,12 @@ +call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT."); DROP TABLE IF EXISTS t1; SET GLOBAL innodb_file_per_table = 1; SELECT @@innodb_file_per_table; @@innodb_file_per_table 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@innodb_file_format; @@innodb_file_format Barracuda @@ -74,9 +77,6 @@ ALTER TABLE t1 DISCARD TABLESPACE; t1.frm ALTER TABLE t1 IMPORT TABLESPACE; ALTER TABLE t1 ENGINE InnoDB; -Warnings: -Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. SELECT COUNT(*) FROM t1; COUNT(*) 640 @@ -112,6 +112,8 @@ SELECT @@innodb_file_per_table; @@innodb_file_per_table 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@innodb_file_format; @@innodb_file_format Barracuda @@ -124,7 +126,7 @@ c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 INT) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; INSERT INTO t1(c2) VALUES(1); ALTER TABLE t1 IMPORT TABLESPACE; -ERROR HY000: Tablespace for table 't1' exists. Please DISCARD the tablespace before IMPORT. +ERROR HY000: Tablespace for table 'test/t1' exists. Please DISCARD the tablespace before IMPORT. SELECT * FROM t1; c1 c2 1 1 @@ -459,7 +461,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED SELECT * FROM t1; c1 c2 1 1 @@ -498,6 +500,8 @@ DROP TABLE t1; call mtr.add_suppression("Got error -1 when reading table '.*'"); call mtr.add_suppression("InnoDB: Error: tablespace id and flags in file '.*'.*"); call mtr.add_suppression("InnoDB: The table .* doesn't have a corresponding tablespace, it was discarded"); -SET GLOBAL INNODB_FILE_FORMAT=Antelope; +SET GLOBAL INNODB_FILE_FORMAT=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL INNODB_FILE_PER_TABLE=1; -SET SESSION innodb_strict_mode=0; +SET SESSION innodb_strict_mode=1; diff --git a/mysql-test/suite/innodb/r/innodb-wl5522.result b/mysql-test/suite/innodb/r/innodb-wl5522.result index fb4ac37b9fd..84a67edab1a 100644 --- a/mysql-test/suite/innodb/r/innodb-wl5522.result +++ b/mysql-test/suite/innodb/r/innodb-wl5522.result @@ -1,9 +1,12 @@ +call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT."); DROP TABLE IF EXISTS t1; SET GLOBAL innodb_file_per_table = 1; SELECT @@innodb_file_per_table; @@innodb_file_per_table 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@innodb_file_format; @@innodb_file_format Barracuda @@ -105,6 +108,8 @@ SELECT @@innodb_file_per_table; @@innodb_file_per_table 1 SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@innodb_file_format; @@innodb_file_format Barracuda @@ -113,7 +118,7 @@ c1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, c2 INT) ENGINE=InnoDB; INSERT INTO t1(c2) VALUES(1); ALTER TABLE t1 IMPORT TABLESPACE; -ERROR HY000: Tablespace for table 't1' exists. Please DISCARD the tablespace before IMPORT. +ERROR HY000: Tablespace for table 'test/t1' exists. Please DISCARD the tablespace before IMPORT. SELECT * FROM t1; c1 c2 1 1 @@ -426,7 +431,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 SELECT * FROM t1; c1 c2 1 1 @@ -536,7 +541,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT SELECT * FROM t1; c1 c2 1 1 @@ -612,7 +617,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT SELECT * FROM t1; c1 c2 1 1 @@ -722,7 +727,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT SELECT * FROM t1; c1 c2 1 1 @@ -801,7 +806,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT SELECT * FROM t1; c1 c2 1 1 @@ -911,7 +916,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC SELECT * FROM t1; c1 c2 1 1 @@ -990,7 +995,7 @@ t1 CREATE TABLE `t1` ( `c2` int(11) DEFAULT NULL, PRIMARY KEY (`c1`), KEY `idx` (`c2`) -) ENGINE=InnoDB AUTO_INCREMENT=44 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +) ENGINE=InnoDB AUTO_INCREMENT=59 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC SELECT * FROM t1; c1 c2 1 1 @@ -1029,5 +1034,7 @@ DROP TABLE t1; call mtr.add_suppression("Got error -1 when reading table '.*'"); call mtr.add_suppression("InnoDB: Error: tablespace id and flags in file '.*'.*"); call mtr.add_suppression("InnoDB: The table .* doesn't have a corresponding tablespace, it was discarded"); -SET GLOBAL INNODB_FILE_FORMAT=Antelope; +SET GLOBAL INNODB_FILE_FORMAT=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL INNODB_FILE_PER_TABLE=1; diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result index 4c890ddf0c0..d278feeee11 100644 --- a/mysql-test/suite/innodb/r/innodb.result +++ b/mysql-test/suite/innodb/r/innodb.result @@ -947,6 +947,7 @@ desc t1; Field Type Null Key Default Extra t int(11) NO MUL 1 drop table t1; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; CREATE TABLE t1 ( number bigint(20) NOT NULL default '0', cname char(15) NOT NULL default '', @@ -1009,6 +1010,7 @@ select * from t2; number cname carrier_id privacy last_mod_date last_mod_id last_app_date last_app_id version assigned_scps status 333 tubs 99 2 2002-01-09 11:34:53 501 2002-01-09 11:34:53 500 3 10 0 drop table t1,t2; +SET sql_mode = default; create table t1 (id int unsigned not null auto_increment, code tinyint unsigned not null, name char(20) not null, primary key (id), key (code), unique (name)) engine=innodb; BEGIN; SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; @@ -1316,16 +1318,80 @@ INSERT INTO t2 VALUES (10, 'old'), (20, 'other'); UPDATE t1 SET c1 = 'other' WHERE c1 = 'old'; ERROR 23000: Foreign key constraint for table 't1', record 'other-somevalu' would lead to a duplicate entry in table 't2', key 'c1' DROP TABLE t2,t1; +call mtr.add_suppression("Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 255. Please drop excessive foreign constraints and try again"); create table t1( id int primary key, pid int, index(pid), foreign key(pid) references t1(id) on delete cascade) engine=innodb; -insert into t1 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), -(8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13),(15,14); +insert into t1 values +( 0, 0), ( 1, 0), ( 2, 1), ( 3, 2), +( 4, 3), ( 5, 4), ( 6, 5), ( 7, 6), +( 8, 7), ( 9, 8), ( 10, 9), ( 11, 10), +( 12, 11), ( 13, 12), ( 14, 13), ( 15, 14), +( 16, 15), ( 17, 16), ( 18, 17), ( 19, 18), +( 20, 19), ( 21, 20), ( 22, 21), ( 23, 22), +( 24, 23), ( 25, 24), ( 26, 25), ( 27, 26), +( 28, 27), ( 29, 28), ( 30, 29), ( 31, 30), +( 32, 31), ( 33, 32), ( 34, 33), ( 35, 34), +( 36, 35), ( 37, 36), ( 38, 37), ( 39, 38), +( 40, 39), ( 41, 40), ( 42, 41), ( 43, 42), +( 44, 43), ( 45, 44), ( 46, 45), ( 47, 46), +( 48, 47), ( 49, 48), ( 50, 49), ( 51, 50), +( 52, 51), ( 53, 52), ( 54, 53), ( 55, 54), +( 56, 55), ( 57, 56), ( 58, 57), ( 59, 58), +( 60, 59), ( 61, 60), ( 62, 61), ( 63, 62), +( 64, 63), ( 65, 64), ( 66, 65), ( 67, 66), +( 68, 67), ( 69, 68), ( 70, 69), ( 71, 70), +( 72, 71), ( 73, 72), ( 74, 73), ( 75, 74), +( 76, 75), ( 77, 76), ( 78, 77), ( 79, 78), +( 80, 79), ( 81, 80), ( 82, 81), ( 83, 82), +( 84, 83), ( 85, 84), ( 86, 85), ( 87, 86), +( 88, 87), ( 89, 88), ( 90, 89), ( 91, 90), +( 92, 91), ( 93, 92), ( 94, 93), ( 95, 94), +( 96, 95), ( 97, 96), ( 98, 97), ( 99, 98), +(100, 99), (101, 100), (102, 101), (103, 102), +(104, 103), (105, 104), (106, 105), (107, 106), +(108, 107), (109, 108), (110, 109), (111, 110), +(112, 111), (113, 112), (114, 113), (115, 114), +(116, 115), (117, 116), (118, 117), (119, 118), +(120, 119), (121, 120), (122, 121), (123, 122), +(124, 123), (125, 124), (126, 125), (127, 126), +(128, 127), (129, 128), (130, 129), (131, 130), +(132, 131), (133, 132), (134, 133), (135, 134), +(136, 135), (137, 136), (138, 137), (139, 138), +(140, 139), (141, 140), (142, 141), (143, 142), +(144, 143), (145, 144), (146, 145), (147, 146), +(148, 147), (149, 148), (150, 149), (151, 150), +(152, 151), (153, 152), (154, 153), (155, 154), +(156, 155), (157, 156), (158, 157), (159, 158), +(160, 159), (161, 160), (162, 161), (163, 162), +(164, 163), (165, 164), (166, 165), (167, 166), +(168, 167), (169, 168), (170, 169), (171, 170), +(172, 171), (173, 172), (174, 173), (175, 174), +(176, 175), (177, 176), (178, 177), (179, 178), +(180, 179), (181, 180), (182, 181), (183, 182), +(184, 183), (185, 184), (186, 185), (187, 186), +(188, 187), (189, 188), (190, 189), (191, 190), +(192, 191), (193, 192), (194, 193), (195, 194), +(196, 195), (197, 196), (198, 197), (199, 198), +(200, 199), (201, 200), (202, 201), (203, 202), +(204, 203), (205, 204), (206, 205), (207, 206), +(208, 207), (209, 208), (210, 209), (211, 210), +(212, 211), (213, 212), (214, 213), (215, 214), +(216, 215), (217, 216), (218, 217), (219, 218), +(220, 219), (221, 220), (222, 221), (223, 222), +(224, 223), (225, 224), (226, 225), (227, 226), +(228, 227), (229, 228), (230, 229), (231, 230), +(232, 231), (233, 232), (234, 233), (235, 234), +(236, 235), (237, 236), (238, 237), (239, 238), +(240, 239), (241, 240), (242, 241), (243, 242), +(244, 243), (245, 244), (246, 245), (247, 246), +(248, 247), (249, 248), (250, 249), (251, 250), +(252, 251), (253, 252), (254, 253), (255, 254); delete from t1 where id=0; -ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pid`) REFERENCES `t1` (`id`) ON DELETE CASCADE) -delete from t1 where id=15; +Got one of the listed errors +delete from t1 where id=255; delete from t1 where id=0; drop table t1; CREATE TABLE t1 (col1 int(1))ENGINE=InnoDB; @@ -1628,6 +1694,7 @@ a drop table t1; create table t1 (a int not null, b int not null, c blob not null, d int not null, e int, primary key (a,b,c(255),d)) engine=innodb; insert into t1 values (2,2,"b",2,2),(1,1,"a",1,1),(3,3,"ab",3,3); +analyze table t1; select * from t1 order by a,b,c,d; a b c d e 1 1 a 1 1 @@ -1689,10 +1756,10 @@ variable_value 16384 SELECT variable_value - @innodb_rows_deleted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_deleted'; variable_value - @innodb_rows_deleted_orig -71 +311 SELECT variable_value - @innodb_rows_inserted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_inserted'; variable_value - @innodb_rows_inserted_orig -964 +1204 SELECT variable_value - @innodb_rows_updated_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_updated'; variable_value - @innodb_rows_updated_orig 866 @@ -2259,7 +2326,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (v varchar(10), c char(10)) row_format=fixed; Warnings: -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -2287,9 +2354,16 @@ select * from t1 where a=20 and b is null; a b 20 NULL drop table t1; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +SET GLOBAL innodb_large_prefix=OFF; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create table t1 (v varchar(65530), key(v)); Warnings: Warning 1071 Specified key was too long; max key length is 767 bytes +SET GLOBAL innodb_large_prefix=default; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html drop table t1; create table t1 (v varchar(65536)); Warnings: @@ -2309,7 +2383,8 @@ t1 CREATE TABLE `t1` ( `v` mediumtext CHARACTER SET utf8 DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 drop table t1; -set storage_engine=MyISAM; +SET sql_mode = default; +set default_storage_engine=MyISAM; create table t1 (v varchar(16384)) engine=innodb; drop table t1; create table t1 (a char(1), b char(1), key(a, b)) engine=innodb; @@ -2434,6 +2509,10 @@ t9 CREATE TABLE `t9` ( KEY `col1` (`col1`,`col2`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 drop table t1, t2, t3, t4, t5, t6, t7, t8, t9; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +SET GLOBAL innodb_large_prefix=OFF; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create table t1 (col1 varchar(768), index(col1)) character set = latin1 engine = innodb; Warnings: @@ -2450,6 +2529,9 @@ create table t4 (col1 blob, index(col1(768))) character set = latin1 engine = innodb; Warnings: Warning 1071 Specified key was too long; max key length is 767 bytes +SET GLOBAL innodb_large_prefix=default; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -2457,6 +2539,9 @@ t1 CREATE TABLE `t1` ( KEY `col1` (`col1`(767)) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 drop table t1, t2, t3, t4; +set global innodb_large_prefix=OFF; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create table t1 (col1 varchar(768) primary key) character set = latin1 engine = innodb; ERROR 42000: Specified key was too long; max key length is 767 bytes @@ -2469,6 +2554,10 @@ ERROR 42000: Specified key was too long; max key length is 767 bytes create table t4 (col1 blob, primary key(col1(768))) character set = latin1 engine = innodb; ERROR 42000: Specified key was too long; max key length is 767 bytes +SET sql_mode = default; +set global innodb_large_prefix=default; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html CREATE TABLE t1 ( id INT PRIMARY KEY @@ -2485,7 +2574,7 @@ INSERT INTO t2 VALUES(1); DELETE FROM t1 WHERE id = 1; ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `c1` FOREIGN KEY (`v`) REFERENCES `t1` (`id`)) DROP TABLE t1; -ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `c1` FOREIGN KEY (`v`) REFERENCES `t1` (`id`)) +Got one of the listed errors SET FOREIGN_KEY_CHECKS=0; DROP TABLE t1; SET FOREIGN_KEY_CHECKS=1; @@ -2608,6 +2697,7 @@ d varchar(255) character set utf8, e varchar(255) character set utf8, key (a,b,c,d,e)) engine=innodb; ERROR 42000: Specified key was too long; max key length is 3072 bytes +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; create table t1 (s1 varbinary(2),primary key (s1)) engine=innodb; create table t2 (s1 binary(2),primary key (s1)) engine=innodb; create table t3 (s1 varchar(2) binary,primary key (s1)) engine=innodb; @@ -2723,6 +2813,7 @@ t2 CREATE TABLE `t2` ( KEY `t2_ibfk_0` (`a`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DROP TABLE t2,t1; +SET sql_mode = default; CREATE TABLE t1 ( field1 varchar(8) NOT NULL DEFAULT '', field2 varchar(8) NOT NULL DEFAULT '', @@ -3095,7 +3186,7 @@ t1 CREATE TABLE `t1` ( CONSTRAINT `t1_t2` FOREIGN KEY (`id`) REFERENCES `t2` (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=349 DEFAULT CHARSET=latin1 DROP TABLE t1,t2; -set innodb_strict_mode=on; +SET innodb_strict_mode=ON; CREATE TABLE t1 ( c01 CHAR(255), c02 CHAR(255), c03 CHAR(255), c04 CHAR(255), c05 CHAR(255), c06 CHAR(255), c07 CHAR(255), c08 CHAR(255), @@ -3106,7 +3197,8 @@ c21 CHAR(255), c22 CHAR(255), c23 CHAR(255), c24 CHAR(255), c25 CHAR(255), c26 CHAR(255), c27 CHAR(255), c28 CHAR(255), c29 CHAR(255), c30 CHAR(255), c31 CHAR(255), c32 CHAR(255) ) ENGINE = InnoDB; -ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +ERROR 42000: Row size too large (> {checked_valid}). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +SET innodb_strict_mode=OFF; DROP TABLE IF EXISTS t1; Warnings: Note 1051 Unknown table 'test.t1' diff --git a/mysql-test/suite/innodb/r/innodb_blob_truncate.result b/mysql-test/suite/innodb/r/innodb_blob_truncate.result index 569e6b03452..a71dd7678c0 100644 --- a/mysql-test/suite/innodb/r/innodb_blob_truncate.result +++ b/mysql-test/suite/innodb/r/innodb_blob_truncate.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table = ON; create table t1(a blob) engine=innodb key_block_size=8; create function generate_blob() @@ -17,3 +19,5 @@ truncate t1; insert into t1 select generate_blob(); drop table t1; drop function generate_blob; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb_blob_unrecoverable_crash.result b/mysql-test/suite/innodb/r/innodb_blob_unrecoverable_crash.result deleted file mode 100644 index c467193bca7..00000000000 --- a/mysql-test/suite/innodb/r/innodb_blob_unrecoverable_crash.result +++ /dev/null @@ -1,24 +0,0 @@ -call mtr.add_suppression("InnoDB: The total blob data length"); -SET GLOBAL max_allowed_packet = 100*1024*1024; -connect big_packets,localhost,root,,; -connection big_packets; -CREATE TABLE t1 (a BIGINT PRIMARY KEY, b LONGBLOB) ENGINE=InnoDB; -INSERT INTO t1 (a, b) VALUES (1, '1'); -INSERT INTO t1 (a, b) VALUES (2, '2'); -INSERT INTO t1 (a, b) VALUES (3, '3'); -INSERT INTO t1 (a, b) VALUES (4, '4'); -INSERT INTO t1 (a, b) VALUES (5, '5'); -start transaction; -INSERT INTO t1 (a, b) VALUES (6, REPEAT('a', 20*1024*1024)); -ERROR 42000: The size of BLOB/TEXT data inserted in one transaction is greater than 10% of redo log size. Increase the redo log size using innodb_log_file_size. -connection default; -# Quick shutdown and restart server -connection default; -SELECT a FROM t1; -a -1 -2 -3 -4 -5 -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/innodb_bug12400341.result b/mysql-test/suite/innodb/r/innodb_bug12400341.result index 31a064e624d..3bb786c4654 100644 --- a/mysql-test/suite/innodb/r/innodb_bug12400341.result +++ b/mysql-test/suite/innodb/r/innodb_bug12400341.result @@ -1,4 +1,5 @@ call mtr.add_suppression("InnoDB: Warning: cannot find a free slot for an undo log. Do you have too*"); +call mtr.add_suppression("\\[Warning\\] InnoDB: Cannot find a free slot for an undo log. Do you have too"); set @old_innodb_undo_logs = @@innodb_undo_logs; set global innodb_undo_logs=1; show variables like "max_connections"; diff --git a/mysql-test/suite/innodb/r/innodb_bug12902967.result b/mysql-test/suite/innodb/r/innodb_bug12902967.result index 5958a8dce31..e784c6b306a 100644 --- a/mysql-test/suite/innodb/r/innodb_bug12902967.result +++ b/mysql-test/suite/innodb/r/innodb_bug12902967.result @@ -1,6 +1,5 @@ +call mtr.add_suppression("In ALTER TABLE .* has or is referenced in foreign key constraints which are not compatible with the new table definition."); create table t1 (f1 integer primary key) engine innodb; alter table t1 add constraint c1 foreign key (f1) references t1(f1); ERROR HY000: Error on rename of '#sql-temporary' to './test/t1' (errno: 150 "Foreign key constraint is incorrectly formed") -InnoDB: has or is referenced in foreign key constraints -InnoDB: which are not compatible with the new table definition. drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb_bug14147491.result b/mysql-test/suite/innodb/r/innodb_bug14147491.result index bd3c388fae1..cf960e3a6ee 100644 --- a/mysql-test/suite/innodb/r/innodb_bug14147491.result +++ b/mysql-test/suite/innodb/r/innodb_bug14147491.result @@ -1,31 +1,10 @@ -call mtr.add_suppression("InnoDB: Database page corruption on disk or a failed.*"); -CALL mtr.add_suppression("InnoDB: Error: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts"); -CALL mtr.add_suppression("InnoDB: Warning: database page corruption or a failed"); -CALL mtr.add_suppression("InnoDB: Database page corruption on disk or a failed"); -CALL mtr.add_suppression("InnoDB: Space .* file test/t1 read of page .*"); -CALL mtr.add_suppression("InnoDB: You may have to recover from a backup."); -CALL mtr.add_suppression("InnoDB: It is also possible that your operatingsystem has corrupted its own file cache."); -CALL mtr.add_suppression("InnoDB: and rebooting your computer removes the error."); -CALL mtr.add_suppression("InnoDB: If the corrupt page is an index page you can also try to"); -CALL mtr.add_suppression("InnoDB: fix the corruption by dumping, dropping, and reimporting"); -CALL mtr.add_suppression("InnoDB: the corrupt table. You can use CHECK"); -CALL mtr.add_suppression("InnoDB: TABLE to scan your table for corruption."); -CALL mtr.add_suppression("InnoDB: See also .* about forcing recovery."); -# Create and populate the table to be corrupted -CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB; +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=InnoDB; INSERT INTO t1 (b) VALUES ('corrupt me'); INSERT INTO t1 (b) VALUES ('corrupt me'); -# Write file to make mysql-test-run.pl expect the "crash", but don't -# start it until it's told to -# We give 30 seconds to do a clean shutdown because we do not want -# to redo apply the pages of t1.ibd at the time of recovery. -# We want SQL to initiate the first access to t1.ibd. -# Wait until disconnected. # Backup the t1.ibd before corrupting # Corrupt the table Munged a string. Munged a string. -# Write file to make mysql-test-run.pl start up the server again SET DEBUG_DBUG = '+d,innodb_page_corruption_retries'; # Write file to make mysql-test-run.pl expect the "crash", but don't # start it until it's told to @@ -34,6 +13,5 @@ SET DEBUG_DBUG = '+d,innodb_page_corruption_retries'; SELECT * FROM t1; ERROR HY000: Lost connection to MySQL server during query # Restore the original t1.ibd -# Write file to make mysql-test-run.pl start up the server again # Cleanup DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/innodb_bug30423.result b/mysql-test/suite/innodb/r/innodb_bug30423.result index d7b72b1ec2a..c7f823a06ae 100644 --- a/mysql-test/suite/innodb/r/innodb_bug30423.result +++ b/mysql-test/suite/innodb/r/innodb_bug30423.result @@ -48,9 +48,9 @@ ON orgs.org_id=sa_opportunities.org_id LEFT JOIN bug30243_2 contacts ON orgs.org_id=contacts.org_id ; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE orgs index NULL org_id 4 NULL 128 Using index -1 SIMPLE sa_opportunities ref org_id org_id 5 test.orgs.org_id 1 Using index -1 SIMPLE contacts ref contacts$org_id contacts$org_id 5 test.orgs.org_id 1 Using index +1 SIMPLE orgs index NULL org_id 4 NULL ROWS Using index +1 SIMPLE sa_opportunities ref org_id org_id 5 test.orgs.org_id ROWS Using index +1 SIMPLE contacts ref contacts$org_id contacts$org_id 5 test.orgs.org_id ROWS Using index select @@innodb_stats_method; @@innodb_stats_method nulls_ignored @@ -74,9 +74,9 @@ ON orgs.org_id=sa_opportunities.org_id LEFT JOIN bug30243_2 contacts ON orgs.org_id=contacts.org_id; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE orgs index NULL org_id 4 NULL 128 Using index -1 SIMPLE sa_opportunities ref org_id org_id 5 test.orgs.org_id 1 Using index -1 SIMPLE contacts ref contacts$org_id contacts$org_id 5 test.orgs.org_id 1 Using index +1 SIMPLE orgs index NULL org_id 4 NULL ROWS Using index +1 SIMPLE sa_opportunities ref org_id org_id 5 test.orgs.org_id ROWS Using index +1 SIMPLE contacts ref contacts$org_id contacts$org_id 5 test.orgs.org_id ROWS Using index SELECT COUNT(*) FROM table_bug30423 WHERE org_id IS NULL; COUNT(*) 1024 diff --git a/mysql-test/suite/innodb/r/innodb_bug34053.result b/mysql-test/suite/innodb/r/innodb_bug34053.result index 195775f74c8..23c5b0cc2f7 100644 --- a/mysql-test/suite/innodb/r/innodb_bug34053.result +++ b/mysql-test/suite/innodb/r/innodb_bug34053.result @@ -1 +1 @@ -SET storage_engine=InnoDB; +SET default_storage_engine=InnoDB; diff --git a/mysql-test/suite/innodb/r/innodb_bug34300.result b/mysql-test/suite/innodb/r/innodb_bug34300.result index 09fc0b44579..b168c7782bb 100644 --- a/mysql-test/suite/innodb/r/innodb_bug34300.result +++ b/mysql-test/suite/innodb/r/innodb_bug34300.result @@ -1,3 +1,20 @@ -ERROR 42000: The size of BLOB/TEXT data inserted in one transaction is greater than 10% of redo log size. Increase the redo log size using innodb_log_file_size. +# +# Bug#34300 Tinyblob & tinytext fields currupted after export/import and alter in 5.1 +# +SET @@global.max_allowed_packet=16777216; +connect newconn, localhost, root,,; +CREATE TABLE bug34300 ( +f4 TINYTEXT, +f6 MEDIUMTEXT, +f8 TINYBLOB +) ENGINE=InnoDB; +INSERT INTO bug34300 VALUES ('xxx', repeat('a', 8459264), 'zzz'); +SELECT f4, f8 FROM bug34300; f4 f8 +xxx zzz +ALTER TABLE bug34300 ADD COLUMN (f10 INT); +SELECT f4, f8 FROM bug34300; f4 f8 +xxx zzz +# Cleanup +DROP TABLE bug34300; diff --git a/mysql-test/suite/innodb/r/innodb_bug46000.result b/mysql-test/suite/innodb/r/innodb_bug46000.result index 0e3f0ef59ae..7c5ef13f3dc 100644 --- a/mysql-test/suite/innodb/r/innodb_bug46000.result +++ b/mysql-test/suite/innodb/r/innodb_bug46000.result @@ -6,7 +6,7 @@ show warnings; Level Code Message Warning 1280 Cannot Create Index with name 'GEN_CLUST_INDEX'. The name is reserved for the system default primary index. Error 1280 Incorrect index name 'GEN_CLUST_INDEX' -Warning 1030 Got error -1 "Internal error < 0 (Not system error)" from storage engine InnoDB +Warning 1030 Got error 124 "Wrong index given to function" from storage engine InnoDB create table bug46000(id int) engine=innodb; create index GEN_CLUST_INDEX on bug46000(id); ERROR 42000: Incorrect index name 'GEN_CLUST_INDEX' diff --git a/mysql-test/suite/innodb/r/innodb_bug47167.result b/mysql-test/suite/innodb/r/innodb_bug47167.result index 656a4846a52..b678046e308 100644 --- a/mysql-test/suite/innodb/r/innodb_bug47167.result +++ b/mysql-test/suite/innodb/r/innodb_bug47167.result @@ -1,19 +1,25 @@ set @old_innodb_file_format_max=@@innodb_file_format_max; select @old_innodb_file_format_max; @old_innodb_file_format_max -Antelope +Barracuda set global innodb_file_format_max = Barracuda; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format_max; @@innodb_file_format_max Barracuda set global innodb_file_format_max = DEFAULT; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format_max; @@innodb_file_format_max Antelope set global innodb_file_format_max = @old_innodb_file_format_max; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format_max; @@innodb_file_format_max -Antelope +Barracuda set global innodb_file_format_max = cheetah; ERROR 42000: Variable 'innodb_file_format_max' can't be set to the value of 'cheetah' set global innodb_file_format_max = Bear; @@ -22,3 +28,5 @@ set global innodb_file_format_max = on; ERROR 42000: Variable 'innodb_file_format_max' can't be set to the value of 'ON' set global innodb_file_format_max = off; ERROR 42000: Variable 'innodb_file_format_max' can't be set to the value of 'off' +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb_bug54044.result b/mysql-test/suite/innodb/r/innodb_bug54044.result index d80c451c841..1c34ea9de1d 100644 --- a/mysql-test/suite/innodb/r/innodb_bug54044.result +++ b/mysql-test/suite/innodb/r/innodb_bug54044.result @@ -19,5 +19,5 @@ CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB; INSERT INTO t1 VALUES ('foo'),('bar'); FLUSH TABLES; CREATE TEMPORARY TABLE tmp ENGINE=InnoDB AS SELECT VALUES(a) FROM t1; -ERROR HY000: Can't create table `test`.`tmp` (errno: -1 "Internal error < 0 (Not system error)") +ERROR HY000: Can't create table `test`.`tmp` (errno: 168 "Unknown (generic) error from engine") DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/innodb_bug60049.result b/mysql-test/suite/innodb/r/innodb_bug60049.result index 8e3be130e48..f34ebc0a955 100644 --- a/mysql-test/suite/innodb/r/innodb_bug60049.result +++ b/mysql-test/suite/innodb/r/innodb_bug60049.result @@ -1,5 +1,3 @@ -call mtr.add_suppression('InnoDB: Error: Table "mysql"."innodb_(table|index)_stats" not found'); -call mtr.add_suppression('InnoDB: Error: Fetch of persistent statistics requested'); CREATE TABLE t(a INT)ENGINE=InnoDB STATS_PERSISTENT=0; RENAME TABLE t TO u; DROP TABLE u; diff --git a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result index 0ef6f65d0ff..d68879d1c8e 100644 --- a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result +++ b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result @@ -1,48 +1,74 @@ +set names utf8; +CREATE TABLE corrupt_bit_test_Ä( +a INT AUTO_INCREMENT PRIMARY KEY, +b CHAR(100), +c INT, +z INT, +INDEX idx(b)) +ENGINE=InnoDB STATS_PERSISTENT=0; +INSERT INTO corrupt_bit_test_Ä VALUES(0,'x',1, 1); +CREATE UNIQUE INDEX idxÄ ON corrupt_bit_test_Ä(c, b); +CREATE UNIQUE INDEX idxÄ“ ON corrupt_bit_test_Ä(z, b); +SELECT * FROM corrupt_bit_test_Ä; a b c z 1 x 1 1 +INSERT INTO corrupt_bit_test_Ä SELECT 0,b,c+1,z+1 FROM corrupt_bit_test_Ä; +select count(*) from corrupt_bit_test_Ä; count(*) 2 +SET SESSION debug="+d,dict_set_index_corrupted"; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead +check table corrupt_bit_test_Ä; Table Op Msg_type Msg_text -test.corrupt_bit_test_Ä check Warning InnoDB: Index "idx" is marked as corrupted -test.corrupt_bit_test_Ä check Warning InnoDB: Index "idxÄ" is marked as corrupted -test.corrupt_bit_test_Ä check Warning InnoDB: Index "idxÄ“" is marked as corrupted +test.corrupt_bit_test_Ä check Warning InnoDB: Index idx is marked as corrupted +test.corrupt_bit_test_Ä check Warning InnoDB: Index idxÄ is marked as corrupted +test.corrupt_bit_test_Ä check Warning InnoDB: Index idxÄ“ is marked as corrupted test.corrupt_bit_test_Ä check error Corrupt -ERROR HY000: Index "idx" is corrupted -ERROR HY000: Index "idx" is corrupted +SET SESSION debug="-d,dict_set_index_corrupted"; +Warnings: +Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead +CREATE INDEX idx3 ON corrupt_bit_test_Ä(b, c); +ERROR HY000: Index idx is corrupted +CREATE INDEX idx4 ON corrupt_bit_test_Ä(b, z); +ERROR HY000: Index idx is corrupted +select c from corrupt_bit_test_Ä; ERROR HY000: Index corrupt_bit_test_Ä is corrupted +select z from corrupt_bit_test_Ä; ERROR HY000: Index corrupt_bit_test_Ä is corrupted +show warnings; Level Code Message -Warning 180 InnoDB: Index "idxÄ“" for table "test"."corrupt_bit_test_Ä" is marked as corrupted +Warning 180 InnoDB: Index idxÄ“ for table "test"."corrupt_bit_test_Ä" is marked as corrupted Error 1712 Index corrupt_bit_test_Ä is corrupted +insert into corrupt_bit_test_Ä values (10001, "a", 20001, 20001); +select * from corrupt_bit_test_Ä use index(primary) where a = 10001; a b c z 10001 a 20001 20001 +begin; +insert into corrupt_bit_test_Ä values (10002, "a", 20002, 20002); +delete from corrupt_bit_test_Ä where a = 10001; +insert into corrupt_bit_test_Ä values (10001, "a", 20001, 20001); +rollback; +drop index idxÄ on corrupt_bit_test_Ä; +check table corrupt_bit_test_Ä; Table Op Msg_type Msg_text -test.corrupt_bit_test_Ä check Warning InnoDB: Index "idx" is marked as corrupted -test.corrupt_bit_test_Ä check Warning InnoDB: Index "idxÄ“" is marked as corrupted +test.corrupt_bit_test_Ä check Warning InnoDB: Index idx is marked as corrupted +test.corrupt_bit_test_Ä check Warning InnoDB: Index idxÄ“ is marked as corrupted test.corrupt_bit_test_Ä check error Corrupt +set names utf8; +select z from corrupt_bit_test_Ä; ERROR HY000: Index corrupt_bit_test_Ä is corrupted -Table Create Table -corrupt_bit_test_Ä CREATE TABLE `corrupt_bit_test_Ä` ( - `a` int(11) NOT NULL AUTO_INCREMENT, - `b` char(100) DEFAULT NULL, - `c` int(11) DEFAULT NULL, - `z` int(11) DEFAULT NULL, - PRIMARY KEY (`a`), - UNIQUE KEY `idxÄ“` (`z`,`b`), - KEY `idx` (`b`) -) ENGINE=InnoDB AUTO_INCREMENT=10003 DEFAULT CHARSET=latin1 -ERROR HY000: Index "idx" is corrupted -ERROR HY000: Index "idx" is corrupted -Table Create Table -corrupt_bit_test_Ä CREATE TABLE `corrupt_bit_test_Ä` ( - `a` int(11) NOT NULL AUTO_INCREMENT, - `b` char(100) DEFAULT NULL, - `c` int(11) DEFAULT NULL, - `z` int(11) DEFAULT NULL, - PRIMARY KEY (`a`), - KEY `idx` (`b`) -) ENGINE=InnoDB AUTO_INCREMENT=10003 DEFAULT CHARSET=latin1 +drop index idxÄ“ on corrupt_bit_test_Ä; +CREATE INDEX idx3 ON corrupt_bit_test_Ä(b, c); +ERROR HY000: Index idx is corrupted +CREATE INDEX idx4 ON corrupt_bit_test_Ä(b, z); +ERROR HY000: Index idx is corrupted +drop index idx on corrupt_bit_test_Ä; +CREATE INDEX idx3 ON corrupt_bit_test_Ä(b, c); +CREATE INDEX idx4 ON corrupt_bit_test_Ä(b, z); +select z from corrupt_bit_test_Ä limit 10; z 20001 1 2 +drop table corrupt_bit_test_Ä; diff --git a/mysql-test/suite/innodb/r/innodb_file_format.result b/mysql-test/suite/innodb/r/innodb_file_format.result index 77328a360a9..e489911afb5 100644 --- a/mysql-test/suite/innodb/r/innodb_file_format.result +++ b/mysql-test/suite/innodb/r/innodb_file_format.result @@ -1,38 +1,50 @@ select @@innodb_file_format; @@innodb_file_format -Antelope +Barracuda select @@innodb_file_format_check; @@innodb_file_format_check 1 select @@innodb_file_format_max; @@innodb_file_format_max -Antelope +Barracuda set global innodb_file_format=antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format=barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format=cheetah; ERROR 42000: Variable 'innodb_file_format' can't be set to the value of 'cheetah' select @@innodb_file_format; @@innodb_file_format Barracuda set global innodb_file_format=default; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format; @@innodb_file_format -Antelope +Barracuda set global innodb_file_format=on; ERROR 42000: Variable 'innodb_file_format' can't be set to the value of 'ON' set global innodb_file_format=off; ERROR 42000: Variable 'innodb_file_format' can't be set to the value of 'off' select @@innodb_file_format; @@innodb_file_format -Antelope +Barracuda set global innodb_file_format_max=antelope; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format_max=barracuda; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format_max=cheetah; ERROR 42000: Variable 'innodb_file_format_max' can't be set to the value of 'cheetah' select @@innodb_file_format_max; @@innodb_file_format_max Barracuda set global innodb_file_format_max=default; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format_max; @@innodb_file_format_max Antelope @@ -44,5 +56,11 @@ select @@innodb_file_format_max; @@innodb_file_format_max Antelope set global innodb_file_format_max=antelope; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format_check=off; ERROR HY000: Variable 'innodb_file_format_check' is a read only variable +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb_gis.result b/mysql-test/suite/innodb/r/innodb_gis.result index fc13ba1ab2c..5a978ea303c 100644 --- a/mysql-test/suite/innodb/r/innodb_gis.result +++ b/mysql-test/suite/innodb/r/innodb_gis.result @@ -598,4 +598,4 @@ create table t1 (a int not null, b linestring not null, unique key b (b(12))); create unique index a on t1(a); drop table t1; create table t1 (g geometry not null, spatial gk(g)) engine=innodb; -ERROR HY000: The storage engine InnoDB doesn't support SPATIAL indexes +drop table t1; diff --git a/mysql-test/suite/innodb/r/innodb_information_schema.result b/mysql-test/suite/innodb/r/innodb_information_schema.result index 1b83bc29493..33dc50df745 100644 --- a/mysql-test/suite/innodb/r/innodb_information_schema.result +++ b/mysql-test/suite/innodb/r/innodb_information_schema.result @@ -7,10 +7,10 @@ X RECORD `test`.```t'\"_str` PRIMARY 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a X RECORD `test`.```t'\"_str` PRIMARY 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a\\bc\\', '\\abc\\\\' X RECORD `test`.```t'\"_str` PRIMARY 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0' X RECORD `test`.```t'\"_str` PRIMARY 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0' -X RECORD `test`.`t_min` PRIMARY 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0 -X RECORD `test`.`t_min` PRIMARY 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0 -X RECORD `test`.`t_max` PRIMARY 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615 -X RECORD `test`.`t_max` PRIMARY 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615 +X RECORD `test`.`t_max` PRIMARY 2 127.000000, 140517642401116, 32767.000000, 140517642401147, 8388607.000000, 140517642401180, 2147483647.000000, 140517642401216, 9223372036854775808.000000, 140517642401261 +X RECORD `test`.`t_max` PRIMARY 2 127.000000, 140517642401116, 32767.000000, 140517642401147, 8388607.000000, 140517642401180, 2147483647.000000, 140517642401216, 9223372036854775808.000000, 140517642401261 +X RECORD `test`.`t_min` PRIMARY 2 18446744073709551616.000000, 140517642401133, 18446744073709518848.000000, 140517642401179, 18446744073701163008.000000, 140517642401225, 18446744071562067968.000000, 140517642401271, 9223372036854775808.000000, 140517642401316 +X RECORD `test`.`t_min` PRIMARY 2 18446744073709551616.000000, 140517642401133, 18446744073709518848.000000, 140517642401179, 18446744073701163008.000000, 140517642401225, 18446744071562067968.000000, 140517642401271, 9223372036854775808.000000, 140517642401316 X RECORD `test`.```t'\"_str` PRIMARY 1 supremum pseudo-record X RECORD `test`.```t'\"_str` PRIMARY 1 supremum pseudo-record lock_table COUNT(*) @@ -47,7 +47,7 @@ trx_adaptive_hash_timeout bigint(21) unsigned NO 0 trx_is_read_only int(1) NO 0 trx_autocommit_non_locking int(1) NO 0 trx_state trx_weight trx_tables_in_use trx_tables_locked trx_rows_locked trx_rows_modified trx_concurrency_tickets trx_isolation_level trx_unique_checks trx_foreign_key_checks -RUNNING 4 0 0 7 1 0 REPEATABLE READ 1 1 +RUNNING 4 0 1 7 1 0 REPEATABLE READ 1 1 trx_isolation_level trx_unique_checks trx_foreign_key_checks SERIALIZABLE 0 0 trx_state trx_isolation_level trx_last_foreign_key_error diff --git a/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result b/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result index dcdf3082067..6328458d46e 100644 --- a/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result +++ b/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result @@ -24,6 +24,7 @@ WHERE TABLE_NAME like "%infoschema_buffer_test%" and PAGE_STATE="file_page" and INDEX_NAME = "idx" and PAGE_TYPE="index"; TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE PAGE_STATE PAGE_TYPE `test`.`infoschema_buffer_test` idx 2 32 FILE_PAGE INDEX +`test`.`infoschema_buffer_test` idx 2 32 FILE_PAGE INDEX DROP TABLE infoschema_buffer_test; SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE diff --git a/mysql-test/suite/innodb/r/innodb_monitor.result b/mysql-test/suite/innodb/r/innodb_monitor.result index 8c580348e1a..bda1462ed33 100644 --- a/mysql-test/suite/innodb/r/innodb_monitor.result +++ b/mysql-test/suite/innodb/r/innodb_monitor.result @@ -4,7 +4,6 @@ name status metadata_table_handles_opened disabled metadata_table_handles_closed disabled metadata_table_reference_count disabled -metadata_mem_pool_size disabled lock_deadlocks disabled lock_timeouts disabled lock_rec_lock_waits disabled @@ -47,7 +46,6 @@ buffer_data_written disabled buffer_flush_batch_scanned disabled buffer_flush_batch_num_scan disabled buffer_flush_batch_scanned_per_call disabled -buffer_flush_batch_rescan disabled buffer_flush_batch_total_pages disabled buffer_flush_batches disabled buffer_flush_batch_pages disabled @@ -55,6 +53,19 @@ buffer_flush_neighbor_total_pages disabled buffer_flush_neighbor disabled buffer_flush_neighbor_pages disabled buffer_flush_n_to_flush_requested disabled +buffer_flush_n_to_flush_by_age disabled +buffer_flush_adaptive_avg_time_slot disabled +buffer_LRU_batch_flush_avg_time_slot disabled +buffer_flush_adaptive_avg_time_thread disabled +buffer_LRU_batch_flush_avg_time_thread disabled +buffer_flush_adaptive_avg_time_est disabled +buffer_LRU_batch_flush_avg_time_est disabled +buffer_flush_avg_time disabled +buffer_flush_adaptive_avg_pass disabled +buffer_LRU_batch_flush_avg_pass disabled +buffer_flush_avg_pass disabled +buffer_LRU_get_free_loops disabled +buffer_LRU_get_free_waits disabled buffer_flush_avg_page_rate disabled buffer_flush_lsn_avg_rate disabled buffer_flush_pct_for_dirty disabled @@ -157,12 +168,13 @@ log_lsn_checkpoint_age disabled log_lsn_buf_pool_oldest disabled log_max_modified_age_async disabled log_max_modified_age_sync disabled -log_pending_log_writes disabled +log_pending_log_flushes disabled log_pending_checkpoint_writes disabled log_num_log_io disabled log_waits disabled log_write_requests disabled log_writes disabled +log_padded disabled compress_pages_compressed disabled compress_pages_decompressed disabled compression_pad_increments disabled @@ -223,10 +235,13 @@ innodb_dblwr_pages_written disabled innodb_page_size disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled dml_reads disabled dml_inserts disabled dml_deletes disabled @@ -239,6 +254,8 @@ ddl_background_drop_indexes disabled ddl_background_drop_tables disabled ddl_online_create_index disabled ddl_pending_alter_table disabled +ddl_sort_file_alter_table disabled +ddl_log_file_alter_table disabled icp_attempts disabled icp_no_match disabled icp_out_of_range disabled @@ -280,10 +297,13 @@ lock_row_lock_waits disabled lock_row_lock_time_avg disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled set global innodb_monitor_enable = "%lock*"; ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*' set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%"; @@ -408,7 +428,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 2 NULL 2 enabled metadata_table_handles_closed 1 NULL 1 1 NULL 1 enabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_disable = module_metadata; set global innodb_monitor_reset = module_metadata; select name, max_count, min_count, count, @@ -419,7 +438,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 NULL NULL 0 disabled metadata_table_handles_closed 1 NULL 1 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_reset_all = module_metadata; select name, max_count, min_count, count, max_count_reset, min_count_reset, count_reset, status @@ -429,7 +447,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled metadata_table_handles_closed NULL NULL 0 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_enable = module_trx; begin; insert into monitor_test values(9); diff --git a/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result b/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result index cf5d77a679c..5ae138477b9 100644 --- a/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result +++ b/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result @@ -1,6 +1,10 @@ set global innodb_file_format="Barracuda"; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table=1; set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html DROP TABLE IF EXISTS worklog5743; CREATE TABLE worklog5743 ( col_1_text TEXT(4000) , col_2_text TEXT(4000) , @@ -88,6 +92,10 @@ worklog5743; col_1_text = REPEAT("a", 3500) col_2_text = REPEAT("o", 3500) 1 1 DROP TABLE worklog5743; -SET GLOBAL innodb_file_format=Antelope; +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=1; -SET GLOBAL innodb_large_prefix=0; +SET GLOBAL innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result index 92ae1637ad5..099c673bca7 100644 --- a/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result +++ b/mysql-test/suite/innodb/r/innodb_simulate_comp_failures_small.result @@ -2,7 +2,16 @@ # Testing robustness against random compression failures # CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255), KEY msg_i(msg)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `msg` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `msg_i` (`msg`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8 SET GLOBAL innodb_simulate_comp_failures = 25; -SELECT COUNT(*) FROM t1; -COUNT(*) +COMMIT; +SELECT COUNT(id) FROM t1; +COUNT(id) 1000 diff --git a/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result b/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result index c351b222496..b9429046b36 100644 --- a/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result +++ b/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result @@ -1,5 +1,5 @@ -call mtr.add_suppression("InnoDB: Error: Table \"mysql\".\"innodb_index_stats\" not found"); -call mtr.add_suppression("InnoDB: Error: Fetch of persistent statistics requested for table"); +call mtr.add_suppression("InnoDB: Table .*innodb_index_stats.* not found"); +call mtr.add_suppression("InnoDB: Fetch of persistent statistics requested for table .*"); ALTER TABLE mysql.innodb_index_stats RENAME TO mysql.innodb_index_stats_; CREATE TABLE test_ps_create_on_corrupted (a INT, PRIMARY KEY (a)) diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result b/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result index f6cce754527..42862fb0e3a 100644 --- a/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result +++ b/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result @@ -1,5 +1,5 @@ -call mtr.add_suppression("InnoDB: Error: Table \"mysql\".\"innodb_index_stats\" not found"); -call mtr.add_suppression("InnoDB: Error: Fetch of persistent statistics requested for table"); +call mtr.add_suppression("InnoDB: Table \"mysql\".\"innodb_index_stats\" not found"); +call mtr.add_suppression("InnoDB: Fetch of persistent statistics requested for table.*"); CREATE TABLE test_ps_fetch_corrupted (a INT, PRIMARY KEY (a)) ENGINE=INNODB STATS_PERSISTENT=1; diff --git a/mysql-test/suite/innodb/r/strict_mode.result b/mysql-test/suite/innodb/r/strict_mode.result index d6a621212c3..2f120afbc09 100644 --- a/mysql-test/suite/innodb/r/strict_mode.result +++ b/mysql-test/suite/innodb/r/strict_mode.result @@ -2,6 +2,7 @@ # Bug #17852083 PRINT A WARNING WHEN DDL HAS AN ERROR IN # INNODB_STRICT_MODE = 1 # +call mtr.add_suppression("InnoDB: Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); set innodb_strict_mode = 0; create table t1 (id int auto_increment primary key, v varchar(32), @@ -235,7 +236,7 @@ col227 text, col228 text ) ENGINE=InnoDB; Warnings: -Warning 139 Row size too large (> 8126). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +Warning 139 Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. set innodb_strict_mode = 1; alter table t1 engine=InnoDB; ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs diff --git a/mysql-test/suite/innodb/t/auto_increment_dup.opt b/mysql-test/suite/innodb/t/auto_increment_dup.opt new file mode 100644 index 00000000000..8235b0a811a --- /dev/null +++ b/mysql-test/suite/innodb/t/auto_increment_dup.opt @@ -0,0 +1 @@ +--innodb_autoinc_lock_mode=1 diff --git a/mysql-test/suite/innodb/t/create_isl_with_direct.test b/mysql-test/suite/innodb/t/create_isl_with_direct.test index 34ea9c77703..83c7507e025 100644 --- a/mysql-test/suite/innodb/t/create_isl_with_direct.test +++ b/mysql-test/suite/innodb/t/create_isl_with_direct.test @@ -3,7 +3,7 @@ --source include/not_windows.inc --disable_query_log -CALL mtr.add_suppression("\\[Warning\\] InnoDB: Failed to set O_DIRECT on file ./ibdata1: OPEN: Invalid argument, continuing anyway. O_DIRECT is known to result in 'Invalid argument' on Linux on tmpfs, see MySQL Bug#26662."); +CALL mtr.add_suppression(".*Failed to set O_DIRECT on file.*"); # The below mtr suppression to avoid failure in solaris platform. CALL mtr.add_suppression("\\[ERROR\\] InnoDB: Failed to set DIRECTIO_ON on file.*"); diff --git a/mysql-test/suite/innodb/t/innodb-16k.test b/mysql-test/suite/innodb/t/innodb-16k.test index 3cd90a00d55..ad09666442d 100644 --- a/mysql-test/suite/innodb/t/innodb-16k.test +++ b/mysql-test/suite/innodb/t/innodb-16k.test @@ -3,16 +3,20 @@ --source include/have_innodb.inc --source include/have_innodb_16k.inc +call mtr.add_suppression("InnoDB: Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); + --disable_query_log let $MYSQLD_DATADIR= `select @@datadir`; # These values can change during the test let $innodb_file_format_orig = `SELECT @@innodb_file_format`; let $innodb_file_per_table_orig = `SELECT @@innodb_file_per_table`; let $innodb_strict_mode_orig = `SELECT @@session.innodb_strict_mode`; +let $innodb_large_prefix_orig = `SELECT @@innodb_large_prefix`; --enable_query_log SET GLOBAL innodb_file_format = `Barracuda`; SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_large_prefix = OFF; --echo # Test 1) Show the page size from Information Schema @@ -374,16 +378,6 @@ CREATE INDEX t1t ON t1 (t(767)); --error 1713 UPDATE t1 SET t=@e; -# The function dict_index_too_big_for_undo() prevents us from adding -# one more index. But it is too late. The record is already too big. - ---error ER_TOO_BIG_ROWSIZE -CREATE INDEX t1u ON t1 (u(767)); - ---error ER_TOO_BIG_ROWSIZE -CREATE INDEX t1ut ON t1 (u(767), t(767)); -CREATE INDEX t1st ON t1 (s(767), t(767)); - SHOW CREATE TABLE t1; DROP TABLE t1; @@ -520,11 +514,7 @@ UPDATE bug12547647 SET c = REPEAT('b',16928); SHOW WARNINGS; DROP TABLE bug12547647; - - -# The following should fail in non-strict mode too. - -SET SESSION innodb_strict_mode = off; +SET SESSION innodb_strict_mode = on; --error ER_TOO_BIG_ROWSIZE CREATE TABLE t1( c text NOT NULL, d text NOT NULL, @@ -974,4 +964,5 @@ row_format=compact,ENGINE=INNODB; EVAL SET GLOBAL innodb_file_format = $innodb_file_format_orig; EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig; EVAL SET SESSION innodb_strict_mode = $innodb_strict_mode_orig; +EVAL SET GLOBAL innodb_large_prefix = $innodb_large_prefix_orig; --enable_query_log diff --git a/mysql-test/suite/innodb/t/innodb-alter-discard.test b/mysql-test/suite/innodb/t/innodb-alter-discard.test index 80678cef0a6..792882b51a5 100644 --- a/mysql-test/suite/innodb/t/innodb-alter-discard.test +++ b/mysql-test/suite/innodb/t/innodb-alter-discard.test @@ -23,6 +23,12 @@ EOF -- enable_reconnect -- source include/wait_until_connected_again.inc +call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation."); +call mtr.add_suppression("InnoDB: The error means the system cannot find the path specified."); +call mtr.add_suppression("InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them."); +call mtr.add_suppression("InnoDB: Cannot open datafile for read-only: './test/t.ibd' OS error: .*"); +call mtr.add_suppression("InnoDB: Ignoring tablespace `test/t` because it could not be opened."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .*"); call mtr.add_suppression("InnoDB: Error: trying to open a table, but could not$"); call mtr.add_suppression("MySQL is trying to open a table handle but the \.ibd file for$"); call mtr.add_suppression("InnoDB: Table 'test/t'$"); diff --git a/mysql-test/suite/innodb/t/innodb-blob.test b/mysql-test/suite/innodb/t/innodb-blob.test index 8aa113fc3ca..c1f9ee5992f 100644 --- a/mysql-test/suite/innodb/t/innodb-blob.test +++ b/mysql-test/suite/innodb/t/innodb-blob.test @@ -3,6 +3,7 @@ # columns are stored off-page. --source include/have_innodb.inc +# The 7000 in this test is a bit less than half the innodb_page_size. --source include/have_innodb_16k.inc # DEBUG_SYNC must be compiled in. @@ -15,6 +16,8 @@ # Avoid CrashReporter popup on Mac --source include/not_crashrep.inc +call mtr.add_suppression("InnoDB: The log sequence numbers [0-9]+ and [0-9]+ in ibdata files do not match the log sequence number [0-9]+ in the ib_logfiles!"); + CREATE TABLE t1 (a INT PRIMARY KEY, b TEXT) ENGINE=InnoDB; CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t3 (a INT PRIMARY KEY, b TEXT, c TEXT) ENGINE=InnoDB; @@ -41,7 +44,7 @@ connect (con2,localhost,root,,); # Check that the above SELECT is blocked let $wait_condition= select count(*) = 1 from information_schema.processlist - where state = 'Sending data' and + where state in ('Sending data', 'Opening tables') and info = 'SELECT a, RIGHT(b,20) FROM t1'; --source include/wait_condition.inc @@ -51,7 +54,7 @@ connection con1; reap; connection default; reap; -SET DEBUG_DBUG='+d,row_ins_extern_checkpoint'; +SET DEBUG='+d,row_ins_extern_checkpoint'; SET DEBUG_SYNC='before_row_ins_extern_latch SIGNAL rec_not_blob WAIT_FOR crash'; ROLLBACK; BEGIN; @@ -68,7 +71,8 @@ SELECT a, RIGHT(b,20) FROM t1; SELECT a FROM t1; # Request a crash, and restart the server. -SET DEBUG_DBUG='+d,crash_commit_before'; +SET DEBUG='+d,crash_commit_before'; +# Write file to make mysql-test-run.pl restart the server --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --error 2013 INSERT INTO t2 VALUES (42); @@ -80,7 +84,6 @@ connection default; --error 2013 reap; -# Write file to make mysql-test-run.pl restart the server --enable_reconnect --source include/wait_until_connected_again.inc --disable_reconnect @@ -90,41 +93,34 @@ CHECK TABLE t1; INSERT INTO t3 VALUES (1,REPEAT('d',7000),REPEAT('e',100)), (2,REPEAT('g',7000),REPEAT('h',100)); -SET DEBUG_SYNC='before_row_upd_extern SIGNAL have_latch WAIT_FOR go'; +SET DEBUG_SYNC='blob_write_middle SIGNAL go_sel WAIT_FOR go_upd'; # This should move column b off-page. --send UPDATE t3 SET c=REPEAT('f',3000) WHERE a=1; +--echo # Connection con1: connect (con1,localhost,root,,); -SET DEBUG_SYNC='now WAIT_FOR have_latch'; +SET DEBUG_SYNC='now WAIT_FOR go_sel'; SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; SELECT @@tx_isolation; - -# this one should block --- send SELECT a, RIGHT(b,20), RIGHT(c,20) FROM t3; +set debug_sync='now SIGNAL go_upd'; -connect (con2,localhost,root,,); - -# Check that the above SELECT is blocked -let $wait_condition= - select count(*) = 1 from information_schema.processlist - where state = 'Sending data' and - info = 'SELECT a, RIGHT(b,20), RIGHT(c,20) FROM t3'; ---source include/wait_condition.inc - -SET DEBUG_SYNC='now SIGNAL go'; +--echo # Connection default: +connection default; +--echo # reap UPDATE t3 SET c=REPEAT('f',3000) WHERE a=1; +reap; +--echo # Connection con1: connection con1; -reap; +SELECT a, RIGHT(b,20), RIGHT(c,20) FROM t3; + disconnect con1; connection default; -reap; - CHECK TABLE t1,t2,t3; -connection con2; +connect (con2,localhost,root,,); BEGIN; INSERT INTO t2 VALUES (347); connection default; @@ -134,7 +130,7 @@ connection default; # remain open while we are writing the off-page columns and are # stuck in the DEBUG_SYNC. A checkpoint involves a flush, which # would wait for the buffer-fix to cease. -SET DEBUG_DBUG='+d,row_upd_extern_checkpoint'; +SET DEBUG='+d,row_upd_extern_checkpoint'; SET DEBUG_SYNC='before_row_upd_extern SIGNAL have_latch WAIT_FOR crash'; # This should move column b off-page. --send @@ -148,7 +144,8 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: before_row_upd_extern'; # Request a crash, and restart the server. -SET DEBUG_DBUG='+d,crash_commit_before'; +SET DEBUG='+d,crash_commit_before'; +# Write file to make mysql-test-run.pl restart the server --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --error 2013 COMMIT; @@ -159,7 +156,6 @@ connection default; --error 2013 reap; -# Write file to make mysql-test-run.pl restart the server --enable_reconnect --source include/wait_until_connected_again.inc --disable_reconnect @@ -178,7 +174,7 @@ connection default; # remain open while we are writing the off-page columns and are # stuck in the DEBUG_SYNC. A checkpoint involves a flush, which # would wait for the buffer-fix to cease. -SET DEBUG_DBUG='+d,row_upd_extern_checkpoint'; +SET DEBUG='+d,row_upd_extern_checkpoint'; SET DEBUG_SYNC='after_row_upd_extern SIGNAL have_latch WAIT_FOR crash'; # This should move column b off-page. --send @@ -192,7 +188,8 @@ SELECT info FROM information_schema.processlist WHERE state = 'debug sync point: after_row_upd_extern'; # Request a crash, and restart the server. -SET DEBUG_DBUG='+d,crash_commit_before'; +SET DEBUG='+d,crash_commit_before'; +# Write file to make mysql-test-run.pl restart the server --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --error 2013 COMMIT; @@ -203,7 +200,6 @@ connection default; --error 2013 reap; -# Write file to make mysql-test-run.pl restart the server --enable_reconnect --source include/wait_until_connected_again.inc --disable_reconnect diff --git a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery-master.opt b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery-master.opt index 33e2b863684..97b259ee047 100644 --- a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery-master.opt +++ b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery-master.opt @@ -1 +1,2 @@ --log-error=$MYSQLTEST_VARDIR/tmp/my_restart.err +--innodb_buffer_pool_size=24M diff --git a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test index 79f7999d115..cbf2d0c9805 100644 --- a/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test +++ b/mysql-test/suite/innodb/t/innodb-change-buffer-recovery.test @@ -1,8 +1,3 @@ -if (`select plugin_auth_version < "5.6.17" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB before 5.6.17 -} - --echo # --echo # Bug#69122 - INNODB DOESN'T REDO-LOG INSERT BUFFER MERGE --echo # OPERATION IF IT IS DONE IN-PLACE @@ -14,8 +9,13 @@ if (`select plugin_auth_version < "5.6.17" from information_schema.plugins where --source include/not_embedded.inc # DBUG_SUICIDE() hangs under valgrind --source include/not_valgrind.inc -# No windows, need perl ---source include/not_windows.inc + +CREATE TABLE t1( + a INT AUTO_INCREMENT PRIMARY KEY, + b CHAR(1), + c INT, + INDEX(b)) +ENGINE=InnoDB STATS_PERSISTENT=0; # The flag innodb_change_buffering_debug is only available in debug builds. # It instructs InnoDB to try to evict pages from the buffer pool when @@ -24,13 +24,6 @@ if (`select plugin_auth_version < "5.6.17" from information_schema.plugins where SET GLOBAL innodb_change_buffering_debug = 1; let SEARCH_FILE = $MYSQLTEST_VARDIR/tmp/my_restart.err; -CREATE TABLE t1( - a INT AUTO_INCREMENT PRIMARY KEY, - b CHAR(1), - c INT, - INDEX(b)) -ENGINE=InnoDB; - # Create enough rows for the table, so that the change buffer will be # used for modifying the secondary index page. There must be multiple # index pages, because changes to the root page are never buffered. @@ -48,7 +41,6 @@ INSERT INTO t1 SELECT 0,b,c FROM t1; INSERT INTO t1 SELECT 0,b,c FROM t1; INSERT INTO t1 SELECT 0,b,c FROM t1; INSERT INTO t1 SELECT 0,b,c FROM t1; -INSERT INTO t1 SELECT 0,b,c FROM t1; BEGIN; SELECT b FROM t1 LIMIT 3; @@ -60,7 +52,7 @@ DELETE FROM t1 WHERE a=1; # This should be buffered, if innodb_change_buffering_debug = 1 is in effect. INSERT INTO t1 VALUES(1,'X',1); -SET DEBUG_DBUG='+d,crash_after_log_ibuf_upd_inplace'; +SET DEBUG='+d,crash_after_log_ibuf_upd_inplace'; --exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --error 2013 # This should force a change buffer merge diff --git a/mysql-test/suite/innodb/t/innodb-mdev-7408.test b/mysql-test/suite/innodb/t/innodb-mdev-7408.test index d1cd1879bb1..46f1afca27b 100644 --- a/mysql-test/suite/innodb/t/innodb-mdev-7408.test +++ b/mysql-test/suite/innodb/t/innodb-mdev-7408.test @@ -1,5 +1,7 @@ --source include/have_innodb.inc +call mtr.add_suppression("InnoDB: User stopword table .* does not exist."); + select @@global.innodb_ft_server_stopword_table; CREATE TABLE `stop_it-IT` ENGINE = InnoDB SELECT * FROM information_schema.INNODB_FT_DEFAULT_STOPWORD; --error 1231 diff --git a/mysql-test/suite/innodb/t/innodb-mdev-7513-master.opt b/mysql-test/suite/innodb/t/innodb-mdev-7513-master.opt new file mode 100644 index 00000000000..a2a7d5f6adf --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-mdev-7513-master.opt @@ -0,0 +1 @@ +--innodb-strict-mode=0 diff --git a/mysql-test/suite/innodb/t/innodb-mdev-7513.test b/mysql-test/suite/innodb/t/innodb-mdev-7513.test index b929ea14781..88f941ef70d 100644 --- a/mysql-test/suite/innodb/t/innodb-mdev-7513.test +++ b/mysql-test/suite/innodb/t/innodb-mdev-7513.test @@ -3,6 +3,7 @@ # MDEV-7513: ib_warn_row_too_big dereferences null thd +call mtr.add_suppression("InnoDB: Cannot add field `.* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); call mtr.add_suppression("Row size too large (> 8126)*"); --disable_warnings diff --git a/mysql-test/suite/innodb/t/innodb-mdev7046.test b/mysql-test/suite/innodb/t/innodb-mdev7046.test index b4085228e02..208dcd52f35 100644 --- a/mysql-test/suite/innodb/t/innodb-mdev7046.test +++ b/mysql-test/suite/innodb/t/innodb-mdev7046.test @@ -12,6 +12,7 @@ call mtr.add_suppression("InnoDB: File ./test/t1*"); call mtr.add_suppression("InnoDB: Error number*"); call mtr.add_suppression("InnoDB: File ./test/t1#p#p1#sp#p1sp0.ibd: 'rename' returned OS error*"); +call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation."); # MDEV-7046: MySQL#74480 - Failing assertion: os_file_status(newpath, &exists, &type) # after Operating system error number 36 in a file operation diff --git a/mysql-test/suite/innodb/t/innodb-virtual-columns.test b/mysql-test/suite/innodb/t/innodb-virtual-columns.test index 368c6fc8cb1..0e0d6dbb2f5 100644 --- a/mysql-test/suite/innodb/t/innodb-virtual-columns.test +++ b/mysql-test/suite/innodb/t/innodb-virtual-columns.test @@ -33,6 +33,8 @@ CREATE TABLE IF NOT EXISTS grad_degree ( CONSTRAINT grad_degree_stu_plan_admit_pky PRIMARY KEY (student_id, plan, admit_term) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +SHOW CREATE TABLE grad_degree; + CREATE INDEX grad_degree_wdraw_rsn_ndx ON grad_degree (wdraw_rsn); CREATE INDEX grad_degree_as_of_term_ndx ON grad_degree (deg_as_of_term); @@ -137,6 +139,8 @@ CREATE TABLE IF NOT EXISTS grad_degree ( CONSTRAINT grad_degree_stu_plan_admit_pky PRIMARY KEY (student_id, plan, admit_term) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +SHOW CREATE TABLE grad_degree; + CREATE INDEX grad_degree_wdraw_rsn_ndx ON grad_degree (wdraw_rsn); CREATE INDEX grad_degree_as_of_term_ndx ON grad_degree (deg_as_of_term); @@ -251,6 +255,8 @@ CREATE TABLE IF NOT EXISTS grad_degree ( CONSTRAINT grad_degree_stu_plan_admit_pky PRIMARY KEY (student_id, plan, admit_term) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +SHOW CREATE TABLE grad_degree; + CREATE INDEX grad_degree_wdraw_rsn_ndx ON grad_degree (wdraw_rsn); ALTER TABLE grad_degree DROP COLUMN ofis_deg_status2, DROP COLUMN ofis_deg_status3, diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-1.test b/mysql-test/suite/innodb/t/innodb-wl5522-1.test index b1db34976a9..6c2607effe8 100644 --- a/mysql-test/suite/innodb/t/innodb-wl5522-1.test +++ b/mysql-test/suite/innodb/t/innodb-wl5522-1.test @@ -3,6 +3,8 @@ -- source include/have_innodb.inc +call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT."); + --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test b/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test index 4b03ac008d2..c61d10e8593 100644 --- a/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test +++ b/mysql-test/suite/innodb/t/innodb-wl5522-debug-zip.test @@ -17,6 +17,9 @@ # allow test to run only when innodb-page-size=16 --source include/have_innodb_16k.inc +call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded."); +call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue."); +call mtr.add_suppression("InnoDB: Error: Tablespace flags .* corrupted unused .*"); let MYSQLD_DATADIR =`SELECT @@datadir`; let $innodb_file_per_table = `SELECT @@innodb_file_per_table`; diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-zip.test b/mysql-test/suite/innodb/t/innodb-wl5522-zip.test index d139e0b700d..395e4def85d 100644 --- a/mysql-test/suite/innodb/t/innodb-wl5522-zip.test +++ b/mysql-test/suite/innodb/t/innodb-wl5522-zip.test @@ -7,6 +7,8 @@ # allow test to run only when innodb-page-size=16 --source include/have_innodb_16k.inc +call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT."); + --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings diff --git a/mysql-test/suite/innodb/t/innodb-wl5522.test b/mysql-test/suite/innodb/t/innodb-wl5522.test index c9e7748cb47..b04c726b74a 100644 --- a/mysql-test/suite/innodb/t/innodb-wl5522.test +++ b/mysql-test/suite/innodb/t/innodb-wl5522.test @@ -3,6 +3,8 @@ -- source include/have_innodb.inc +call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT."); + --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings diff --git a/mysql-test/suite/innodb/t/innodb.test b/mysql-test/suite/innodb/t/innodb.test index 2e7306c8e29..79a00c27686 100644 --- a/mysql-test/suite/innodb/t/innodb.test +++ b/mysql-test/suite/innodb/t/innodb.test @@ -31,8 +31,15 @@ set optimizer_switch = 'mrr=on,mrr_sort_keys=on,index_condition_pushdown=on'; # the test to be run multiple times without restarting the mysqld server. # See Bug#43309 Test main.innodb can't be run twice -- disable_query_log +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +call mtr.add_suppression("\\[ERROR\\] InnoDB: in ALTER TABLE `test`.`t1`"); +call mtr.add_suppression("\\[ERROR\\] InnoDB: in RENAME TABLE table `test`.`t1`"); + +SET @innodb_thread_sleep_delay_orig = @@innodb_thread_sleep_delay; + SET @innodb_thread_concurrency_orig = @@innodb_thread_concurrency; +--disable_warnings SET @innodb_rows_deleted_orig = (SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_deleted'); SET @innodb_rows_inserted_orig = (SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_inserted'); SET @innodb_rows_updated_orig = (SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_updated'); @@ -41,6 +48,7 @@ SET @innodb_row_lock_current_waits_orig = (SELECT variable_value FROM informatio SET @innodb_row_lock_time_orig = (SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_row_lock_time'); SET @innodb_row_lock_time_max_orig = (SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_row_lock_time_max'); SET @innodb_row_lock_time_avg_orig = (SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_row_lock_time_avg'); +--enable_warnings -- enable_query_log --disable_warnings @@ -677,6 +685,8 @@ drop table t1; # Test of multi-table-delete # +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; + CREATE TABLE t1 ( number bigint(20) NOT NULL default '0', cname char(15) NOT NULL default '', @@ -720,6 +730,7 @@ select * from t1; select * from t2; select * from t2; drop table t1,t2; +SET sql_mode = default; # # A simple test with some isolation levels @@ -1056,18 +1067,84 @@ UPDATE t1 SET c1 = 'other' WHERE c1 = 'old'; DROP TABLE t2,t1; # -# test for recursion depth limit +# test for FK cascade depth limit # +call mtr.add_suppression("Cannot delete/update rows with cascading foreign key constraints that exceed max depth of 255. Please drop excessive foreign constraints and try again"); + create table t1( id int primary key, pid int, index(pid), foreign key(pid) references t1(id) on delete cascade) engine=innodb; -insert into t1 values(0,0),(1,0),(2,1),(3,2),(4,3),(5,4),(6,5),(7,6), - (8,7),(9,8),(10,9),(11,10),(12,11),(13,12),(14,13),(15,14); --- error 1451 +insert into t1 values +( 0, 0), ( 1, 0), ( 2, 1), ( 3, 2), +( 4, 3), ( 5, 4), ( 6, 5), ( 7, 6), +( 8, 7), ( 9, 8), ( 10, 9), ( 11, 10), +( 12, 11), ( 13, 12), ( 14, 13), ( 15, 14), +( 16, 15), ( 17, 16), ( 18, 17), ( 19, 18), +( 20, 19), ( 21, 20), ( 22, 21), ( 23, 22), +( 24, 23), ( 25, 24), ( 26, 25), ( 27, 26), +( 28, 27), ( 29, 28), ( 30, 29), ( 31, 30), +( 32, 31), ( 33, 32), ( 34, 33), ( 35, 34), +( 36, 35), ( 37, 36), ( 38, 37), ( 39, 38), +( 40, 39), ( 41, 40), ( 42, 41), ( 43, 42), +( 44, 43), ( 45, 44), ( 46, 45), ( 47, 46), +( 48, 47), ( 49, 48), ( 50, 49), ( 51, 50), +( 52, 51), ( 53, 52), ( 54, 53), ( 55, 54), +( 56, 55), ( 57, 56), ( 58, 57), ( 59, 58), +( 60, 59), ( 61, 60), ( 62, 61), ( 63, 62), +( 64, 63), ( 65, 64), ( 66, 65), ( 67, 66), +( 68, 67), ( 69, 68), ( 70, 69), ( 71, 70), +( 72, 71), ( 73, 72), ( 74, 73), ( 75, 74), +( 76, 75), ( 77, 76), ( 78, 77), ( 79, 78), +( 80, 79), ( 81, 80), ( 82, 81), ( 83, 82), +( 84, 83), ( 85, 84), ( 86, 85), ( 87, 86), +( 88, 87), ( 89, 88), ( 90, 89), ( 91, 90), +( 92, 91), ( 93, 92), ( 94, 93), ( 95, 94), +( 96, 95), ( 97, 96), ( 98, 97), ( 99, 98), +(100, 99), (101, 100), (102, 101), (103, 102), +(104, 103), (105, 104), (106, 105), (107, 106), +(108, 107), (109, 108), (110, 109), (111, 110), +(112, 111), (113, 112), (114, 113), (115, 114), +(116, 115), (117, 116), (118, 117), (119, 118), +(120, 119), (121, 120), (122, 121), (123, 122), +(124, 123), (125, 124), (126, 125), (127, 126), +(128, 127), (129, 128), (130, 129), (131, 130), +(132, 131), (133, 132), (134, 133), (135, 134), +(136, 135), (137, 136), (138, 137), (139, 138), +(140, 139), (141, 140), (142, 141), (143, 142), +(144, 143), (145, 144), (146, 145), (147, 146), +(148, 147), (149, 148), (150, 149), (151, 150), +(152, 151), (153, 152), (154, 153), (155, 154), +(156, 155), (157, 156), (158, 157), (159, 158), +(160, 159), (161, 160), (162, 161), (163, 162), +(164, 163), (165, 164), (166, 165), (167, 166), +(168, 167), (169, 168), (170, 169), (171, 170), +(172, 171), (173, 172), (174, 173), (175, 174), +(176, 175), (177, 176), (178, 177), (179, 178), +(180, 179), (181, 180), (182, 181), (183, 182), +(184, 183), (185, 184), (186, 185), (187, 186), +(188, 187), (189, 188), (190, 189), (191, 190), +(192, 191), (193, 192), (194, 193), (195, 194), +(196, 195), (197, 196), (198, 197), (199, 198), +(200, 199), (201, 200), (202, 201), (203, 202), +(204, 203), (205, 204), (206, 205), (207, 206), +(208, 207), (209, 208), (210, 209), (211, 210), +(212, 211), (213, 212), (214, 213), (215, 214), +(216, 215), (217, 216), (218, 217), (219, 218), +(220, 219), (221, 220), (222, 221), (223, 222), +(224, 223), (225, 224), (226, 225), (227, 226), +(228, 227), (229, 228), (230, 229), (231, 230), +(232, 231), (233, 232), (234, 233), (235, 234), +(236, 235), (237, 236), (238, 237), (239, 238), +(240, 239), (241, 240), (242, 241), (243, 242), +(244, 243), (245, 244), (246, 245), (247, 246), +(248, 247), (249, 248), (250, 249), (251, 250), +(252, 251), (253, 252), (254, 253), (255, 254); +--error 1296,1451 delete from t1 where id=0; -delete from t1 where id=15; +delete from t1 where id=255; +--error 0,1451 delete from t1 where id=0; drop table t1; @@ -1279,6 +1356,9 @@ drop table t1; create table t1 (a int not null, b int not null, c blob not null, d int not null, e int, primary key (a,b,c(255),d)) engine=innodb; insert into t1 values (2,2,"b",2,2),(1,1,"a",1,1),(3,3,"ab",3,3); +-- disable_result_log +analyze table t1; +-- enable_result_log select * from t1 order by a,b,c,d; explain select * from t1 order by a,b,c,d; drop table t1; @@ -1342,10 +1422,12 @@ source include/varchar.inc; # # Some errors/warnings on create # - +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; # Embedded server doesn't chdir to data directory --replace_result $MYSQLTEST_VARDIR . master-data/ '' +SET GLOBAL innodb_large_prefix=OFF; create table t1 (v varchar(65530), key(v)); +SET GLOBAL innodb_large_prefix=default; drop table t1; create table t1 (v varchar(65536)); show create table t1; @@ -1353,8 +1435,8 @@ drop table t1; create table t1 (v varchar(65530) character set utf8); show create table t1; drop table t1; - -eval set storage_engine=$default; +SET sql_mode = default; +eval set default_storage_engine=$default; # InnoDB specific varchar tests create table t1 (v varchar(16384)) engine=innodb; @@ -1459,7 +1541,7 @@ CREATE TABLE t1 id INT PRIMARY KEY ) ENGINE=InnoDB; ---error 1005,1005 +--error ER_CANNOT_ADD_FOREIGN,1005 CREATE TEMPORARY TABLE t2 ( id INT NOT NULL PRIMARY KEY, @@ -1500,6 +1582,8 @@ show create table t9; drop table t1, t2, t3, t4, t5, t6, t7, t8, t9; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +SET GLOBAL innodb_large_prefix=OFF; # these should have their index length trimmed create table t1 (col1 varchar(768), index(col1)) character set = latin1 engine = innodb; @@ -1509,25 +1593,28 @@ create table t3 (col1 text, index(col1(768))) character set = latin1 engine = innodb; create table t4 (col1 blob, index(col1(768))) character set = latin1 engine = innodb; +SET GLOBAL innodb_large_prefix=default; show create table t1; drop table t1, t2, t3, t4; # these should be refused ---error 1071 +set global innodb_large_prefix=OFF; +--error ER_TOO_LONG_KEY create table t1 (col1 varchar(768) primary key) character set = latin1 engine = innodb; ---error 1071 +--error ER_TOO_LONG_KEY create table t2 (col1 varbinary(768) primary key) character set = latin1 engine = innodb; ---error 1071 +--error ER_TOO_LONG_KEY create table t3 (col1 text, primary key(col1(768))) character set = latin1 engine = innodb; ---error 1071 +--error ER_TOO_LONG_KEY create table t4 (col1 blob, primary key(col1(768))) character set = latin1 engine = innodb; - +SET sql_mode = default; +set global innodb_large_prefix=default; # # Test improved foreign key error messages (bug #3443) # @@ -1552,7 +1639,7 @@ INSERT INTO t2 VALUES(1); --error 1451 DELETE FROM t1 WHERE id = 1; ---error 1451 +--error 1451, 1217 DROP TABLE t1; SET FOREIGN_KEY_CHECKS=0; @@ -1719,7 +1806,7 @@ create table t1 (a varchar(255) character set utf8, # test the padding of BINARY types and collations (Bug #14189) - +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; create table t1 (s1 varbinary(2),primary key (s1)) engine=innodb; create table t2 (s1 binary(2),primary key (s1)) engine=innodb; create table t3 (s1 varchar(2) binary,primary key (s1)) engine=innodb; @@ -1803,7 +1890,7 @@ ALTER TABLE t2 ADD CONSTRAINT t2_ibfk_0 FOREIGN KEY (a) REFERENCES t1(a); ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_0; SHOW CREATE TABLE t2; DROP TABLE t2,t1; - +SET sql_mode = default; # # Test case for bug #16229: MySQL/InnoDB uses full explicit table locks in trigger processing # @@ -2344,8 +2431,9 @@ DROP TABLE t1,t2; # # Bug #21101 (Prints wrong error message if max row size is too large) # -set innodb_strict_mode=on; ---error 1118 +SET innodb_strict_mode=ON; +--replace_result 8126 {checked_valid} 4030 {checked_valid} 1982 {checked_valid} +--error ER_TOO_BIG_ROWSIZE CREATE TABLE t1 ( c01 CHAR(255), c02 CHAR(255), c03 CHAR(255), c04 CHAR(255), c05 CHAR(255), c06 CHAR(255), c07 CHAR(255), c08 CHAR(255), @@ -2356,6 +2444,7 @@ CREATE TABLE t1 ( c25 CHAR(255), c26 CHAR(255), c27 CHAR(255), c28 CHAR(255), c29 CHAR(255), c30 CHAR(255), c31 CHAR(255), c32 CHAR(255) ) ENGINE = InnoDB; +SET innodb_strict_mode=OFF; # # Bug #31860 InnoDB assumes AUTOINC values can only be positive. @@ -2512,6 +2601,7 @@ DROP TABLE bug35537; DISCONNECT c1; CONNECTION default; +SET GLOBAL innodb_thread_sleep_delay = @innodb_thread_sleep_delay_orig; SET GLOBAL innodb_thread_concurrency = @innodb_thread_concurrency_orig; -- enable_query_log diff --git a/mysql-test/suite/innodb/t/innodb_blob_unrecoverable_crash.test b/mysql-test/suite/innodb/t/innodb_blob_unrecoverable_crash.test deleted file mode 100644 index 8553d97bd9e..00000000000 --- a/mysql-test/suite/innodb/t/innodb_blob_unrecoverable_crash.test +++ /dev/null @@ -1,52 +0,0 @@ ---source include/not_embedded.inc ---source include/not_crashrep.inc ---source include/have_innodb.inc - -call mtr.add_suppression("InnoDB: The total blob data length"); - -let $old_max_allowed_packet = `select @@max_allowed_packet`; -SET GLOBAL max_allowed_packet = 100*1024*1024; - -connect(big_packets,localhost,root,,); -connection big_packets; - -CREATE TABLE t1 (a BIGINT PRIMARY KEY, b LONGBLOB) ENGINE=InnoDB; - -# Insert a few rows (it doesn't really matter how many). These transactions -# are committed once they are acked, so they should not be lost. -INSERT INTO t1 (a, b) VALUES (1, '1'); -INSERT INTO t1 (a, b) VALUES (2, '2'); -INSERT INTO t1 (a, b) VALUES (3, '3'); -INSERT INTO t1 (a, b) VALUES (4, '4'); -INSERT INTO t1 (a, b) VALUES (5, '5'); - -# The BLOB insert will fail, and should disappear. However all data committed -# up to this point should not be lost. -start transaction; ---replace_regex /\(> [0-9]*\)/(> ####)/ ---error ER_TOO_BIG_ROWSIZE -INSERT INTO t1 (a, b) VALUES (6, REPEAT('a', 20*1024*1024)); - -connection default; - -# We expect a restart. ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect - ---echo # Quick shutdown and restart server ---shutdown_server 0 - -# Wait for the server to come back up, and reconnect. ---enable_reconnect ---source include/wait_until_connected_again.inc - -connection default; - -# We should see (1,2,3,4,5) here. -SELECT a FROM t1; - -# Clean up. -DROP TABLE t1; - ---disable_query_log -eval set global max_allowed_packet = $old_max_allowed_packet; ---enable_query_log diff --git a/mysql-test/suite/innodb/t/innodb_bug12400341.test b/mysql-test/suite/innodb/t/innodb_bug12400341.test index 235ece04a8c..9a96f29fc3b 100644 --- a/mysql-test/suite/innodb/t/innodb_bug12400341.test +++ b/mysql-test/suite/innodb/t/innodb_bug12400341.test @@ -14,10 +14,8 @@ if (`select count(*)=0 from information_schema.global_variables where variable_n # undo slots of the previous test might exist still --source include/not_windows.inc -# Previous undo slots cause unnecessary failures ---source include/not_windows.inc - call mtr.add_suppression("InnoDB: Warning: cannot find a free slot for an undo log. Do you have too*"); +call mtr.add_suppression("\\[Warning\\] InnoDB: Cannot find a free slot for an undo log. Do you have too"); --disable_query_log set @old_innodb_trx_rseg_n_slots_debug = @@innodb_trx_rseg_n_slots_debug; diff --git a/mysql-test/suite/innodb/t/innodb_bug12902967.test b/mysql-test/suite/innodb/t/innodb_bug12902967.test index 8e1b8199524..1b5df7fa165 100644 --- a/mysql-test/suite/innodb/t/innodb_bug12902967.test +++ b/mysql-test/suite/innodb/t/innodb_bug12902967.test @@ -9,6 +9,8 @@ --source include/have_innodb.inc --source include/not_embedded.inc +call mtr.add_suppression("In ALTER TABLE .* has or is referenced in foreign key constraints which are not compatible with the new table definition."); + let error_log= $MYSQLTEST_VARDIR/log/mysqld.1.err; --source include/restart_mysqld.inc @@ -20,11 +22,4 @@ create table t1 (f1 integer primary key) engine innodb; --replace_regex /'\.\/test\/#sql-[0-9a-f_]*'/'#sql-temporary'/ --error ER_ERROR_ON_RENAME alter table t1 add constraint c1 foreign key (f1) references t1(f1); ---source include/restart_mysqld.inc -perl; -$file = $ENV{error_log}; -open (FILE, '<', $file) or die "can't open(< $file): $!\n"; -print ((grep { /^InnoDB:/ and not /aio/i and not /io_setup\(\) attempt [0-9]+ failed/ } )[-2..-1]); -EOF - drop table t1; diff --git a/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt b/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt index 6b82baca147..410738202bd 100644 --- a/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt +++ b/mysql-test/suite/innodb/t/innodb_bug14147491-master.opt @@ -1 +1,4 @@ ---innodb_file_per_table=1 --skip-stack-trace --skip-core-file +--innodb_file_per_table=1 +--skip-stack-trace +--skip-core-file +--loose-innodb_buffer_pool_load_at_startup=OFF diff --git a/mysql-test/suite/innodb/t/innodb_bug14147491.test b/mysql-test/suite/innodb/t/innodb_bug14147491.test index 16e88826c85..c73571af6dd 100644 --- a/mysql-test/suite/innodb/t/innodb_bug14147491.test +++ b/mysql-test/suite/innodb/t/innodb_bug14147491.test @@ -1,41 +1,29 @@ # # Test opening a corrupted table. # - --- source include/not_encrypted.inc - -call mtr.add_suppression("InnoDB: Database page corruption on disk or a failed.*"); - -# Don't test under valgrind, memory leaks will occur +# Valgrind can hang or return spurious messages on DBUG_SUICIDE source include/not_valgrind.inc; # Avoid CrashReporter popup on Mac source include/not_crashrep.inc; -# Don't test under embedded +# Restarting is not supported under embedded source include/not_embedded.inc; # Require InnoDB source include/have_innodb.inc; # Require Debug for SET DEBUG source include/have_debug.inc; +# Not encrypted tables +source include/not_encrypted.inc; # Test could open crash reporter on Windows # if compiler set up source include/not_windows.inc; -CALL mtr.add_suppression("InnoDB: Error: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts"); -CALL mtr.add_suppression("InnoDB: Warning: database page corruption or a failed"); -CALL mtr.add_suppression("InnoDB: Database page corruption on disk or a failed"); -CALL mtr.add_suppression("InnoDB: Space .* file test/t1 read of page .*"); -CALL mtr.add_suppression("InnoDB: You may have to recover from a backup."); -CALL mtr.add_suppression("InnoDB: It is also possible that your operatingsystem has corrupted its own file cache."); -CALL mtr.add_suppression("InnoDB: and rebooting your computer removes the error."); -CALL mtr.add_suppression("InnoDB: If the corrupt page is an index page you can also try to"); -CALL mtr.add_suppression("InnoDB: fix the corruption by dumping, dropping, and reimporting"); -CALL mtr.add_suppression("InnoDB: the corrupt table. You can use CHECK"); -CALL mtr.add_suppression("InnoDB: TABLE to scan your table for corruption."); -CALL mtr.add_suppression("InnoDB: See also .* about forcing recovery."); - - ---echo # Create and populate the table to be corrupted -CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB; +--disable_query_log +CALL mtr.add_suppression("\\[ERROR\\] \\[FATAL\\] InnoDB: Unable to read page \\[page id: space=.*, page number=.*\\] into the buffer pool after 100 attempts"); +CALL mtr.add_suppression("\\[ERROR\\] InnoDB: Database page corruption on disk or a failed"); +--enable_query_log + + +CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=InnoDB; INSERT INTO t1 (b) VALUES ('corrupt me'); --disable_query_log --let $i = 10 @@ -50,17 +38,7 @@ INSERT INTO t1 (b) VALUES ('corrupt me'); let $MYSQLD_DATADIR=`select @@datadir`; let t1_IBD = $MYSQLD_DATADIR/test/t1.ibd; ---echo # Write file to make mysql-test-run.pl expect the "crash", but don't ---echo # start it until it's told to ---exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect - ---echo # We give 30 seconds to do a clean shutdown because we do not want ---echo # to redo apply the pages of t1.ibd at the time of recovery. ---echo # We want SQL to initiate the first access to t1.ibd. -shutdown_server 30; - ---echo # Wait until disconnected. ---source include/wait_until_disconnected.inc +--source include/shutdown_mysqld.inc --echo # Backup the t1.ibd before corrupting --copy_file $t1_IBD $MYSQLD_DATADIR/test/t1.ibd.backup @@ -92,10 +70,7 @@ while ($len = sysread IBD_FILE, $chunk, 1024) close IBD_FILE; EOF ---echo # Write file to make mysql-test-run.pl start up the server again ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect ---enable_reconnect ---source include/wait_until_connected_again.inc +--source include/start_mysqld.inc SET DEBUG_DBUG = '+d,innodb_page_corruption_retries'; @@ -117,10 +92,7 @@ SLEEP 1; --remove_file $MYSQLD_DATADIR/test/t1.ibd --move_file $MYSQLD_DATADIR/test/t1.ibd.backup $MYSQLD_DATADIR/test/t1.ibd ---echo # Write file to make mysql-test-run.pl start up the server again ---exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect ---enable_reconnect ---source include/wait_until_connected_again.inc +--source include/start_mysqld.inc # Note SET DEBUG = '-d,innodb_page_corruption_retries' is not required # because the session information is lost after server restart diff --git a/mysql-test/suite/innodb/t/innodb_bug30423.test b/mysql-test/suite/innodb/t/innodb_bug30423.test index bbb6f1155ff..de53a935156 100644 --- a/mysql-test/suite/innodb/t/innodb_bug30423.test +++ b/mysql-test/suite/innodb/t/innodb_bug30423.test @@ -139,6 +139,7 @@ analyze table bug30243_3; # Following query plan shows that we get the correct rows per # unique value (should be approximately 1 row per value) +-- replace_column 9 ROWS explain SELECT COUNT(*), 0 FROM bug30243_1 orgs LEFT JOIN bug30243_3 sa_opportunities @@ -159,6 +160,7 @@ analyze table bug30243_3; # Following query plan shows that we get the correct rows per # unique value (~1) +-- replace_column 9 ROWS explain SELECT COUNT(*), 0 FROM bug30243_1 orgs LEFT JOIN bug30243_3 sa_opportunities diff --git a/mysql-test/suite/innodb/t/innodb_bug34053.test b/mysql-test/suite/innodb/t/innodb_bug34053.test index 56c26acb632..d93d5100d81 100644 --- a/mysql-test/suite/innodb/t/innodb_bug34053.test +++ b/mysql-test/suite/innodb/t/innodb_bug34053.test @@ -5,7 +5,7 @@ -- source include/not_embedded.inc -- source include/have_innodb.inc -SET storage_engine=InnoDB; +SET default_storage_engine=InnoDB; # we do not really care about what gets printed, we are only # interested in getting success or failure according to our @@ -20,8 +20,12 @@ FLUSH PRIVILEGES; -- connection con1 -- error ER_SPECIFIC_ACCESS_DENIED_ERROR +SET GLOBAL innodb_status_output=ON; +-- error ER_SPECIFIC_ACCESS_DENIED_ERROR +SET GLOBAL innodb_status_output_locks=ON; + CREATE TABLE innodb_monitor (a INT) ENGINE=INNODB; -# this should only fail with UNIV_MEM_DEBUG +DROP TABLE innodb_monitor; CREATE TABLE innodb_mem_validate (a INT) ENGINE=INNODB; DROP TABLE innodb_mem_validate; CREATE TABLE innodb_sql (a INT) ENGINE=INNODB; @@ -36,16 +40,18 @@ CREATE TABLE nnodb_monitor (a INT) ENGINE=INNODB; DROP TABLE nnodb_monitor; -- connection default -CREATE TABLE innodb_monitor (a INT) ENGINE=INNODB; -CREATE TABLE innodb_mem_validate (a INT) ENGINE=INNODB; +SET GLOBAL innodb_status_output=ON; +SET GLOBAL innodb_status_output_locks=ON; -- connection con1 -- error ER_SPECIFIC_ACCESS_DENIED_ERROR -DROP TABLE innodb_monitor; -DROP TABLE innodb_mem_validate; +SET GLOBAL innodb_status_output=OFF; +-- error ER_SPECIFIC_ACCESS_DENIED_ERROR +SET GLOBAL innodb_status_output_locks=OFF; -- connection default -DROP TABLE innodb_monitor; +SET GLOBAL innodb_status_output=OFF; +SET GLOBAL innodb_status_output_locks=OFF; DROP USER 'shane'@'localhost'; -- disconnect con1 diff --git a/mysql-test/suite/innodb/t/innodb_bug34300.test b/mysql-test/suite/innodb/t/innodb_bug34300.test index 13c708b48d6..8c73af13bc1 100644 --- a/mysql-test/suite/innodb/t/innodb_bug34300.test +++ b/mysql-test/suite/innodb/t/innodb_bug34300.test @@ -1,35 +1,26 @@ -# -# Bug#34300 Tinyblob & tinytext fields currupted after export/import and alter in 5.1 -# http://bugs.mysql.com/34300 -# +--echo # +--echo # Bug#34300 Tinyblob & tinytext fields currupted after export/import and alter in 5.1 +--echo # -- source include/have_innodb.inc -if (`select plugin_auth_version <= "5.6.22-MariaDB-72.0" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in XtraDB as of 5.6.22-MariaDB-72.0 or earlier -} - -- disable_query_log --- disable_result_log - -call mtr.add_suppression("InnoDB: The total blob data length"); +call mtr.add_suppression("InnoDB: Warning: a long semaphore wait:"); +call mtr.add_suppression("The age of the last checkpoint is"); # set packet size and reconnect let $max_packet=`select @@global.max_allowed_packet`; +-- enable_query_log + SET @@global.max_allowed_packet=16777216; --connect (newconn, localhost, root,,) ---enable_result_log - CREATE TABLE bug34300 ( f4 TINYTEXT, f6 MEDIUMTEXT, f8 TINYBLOB ) ENGINE=InnoDB; ---replace_regex /\(> [0-9]*\)/(> ####)/ ---error ER_TOO_BIG_ROWSIZE INSERT INTO bug34300 VALUES ('xxx', repeat('a', 8459264), 'zzz'); SELECT f4, f8 FROM bug34300; @@ -38,5 +29,10 @@ ALTER TABLE bug34300 ADD COLUMN (f10 INT); SELECT f4, f8 FROM bug34300; +--echo # Cleanup DROP TABLE bug34300; + +-- disable_query_log EVAL SET @@global.max_allowed_packet=$max_packet; +-- enable_query_log + diff --git a/mysql-test/suite/innodb/t/innodb_bug60049-master.opt b/mysql-test/suite/innodb/t/innodb_bug60049-master.opt index 741d8685459..22a5d4ed221 100644 --- a/mysql-test/suite/innodb/t/innodb_bug60049-master.opt +++ b/mysql-test/suite/innodb/t/innodb_bug60049-master.opt @@ -1 +1 @@ ---loose-innodb-fast-shutdown=0 +--innodb_fast_shutdown=0 diff --git a/mysql-test/suite/innodb/t/innodb_bug60049.test b/mysql-test/suite/innodb/t/innodb_bug60049.test index b1d56d16a5e..cb05ca297ea 100644 --- a/mysql-test/suite/innodb/t/innodb_bug60049.test +++ b/mysql-test/suite/innodb/t/innodb_bug60049.test @@ -5,12 +5,11 @@ -- source include/not_embedded.inc -- source include/have_innodb.inc -- source include/have_innodb_16k.inc --- source include/not_encrypted.inc - -call mtr.add_suppression('InnoDB: Error: Table "mysql"."innodb_(table|index)_stats" not found'); -call mtr.add_suppression('InnoDB: Error: Fetch of persistent statistics requested'); -- disable_query_log +call mtr.add_suppression('\\[ERROR\\] InnoDB: Table `mysql`.`innodb_(table|index)_stats` not found'); +call mtr.add_suppression('\\[ERROR\\] InnoDB: Fetch of persistent statistics requested for table `mysql`.`gtid_executed`'); + let $create1 = query_get_value(SHOW CREATE TABLE mysql.innodb_table_stats, Create Table, 1); let $create2 = query_get_value(SHOW CREATE TABLE mysql.innodb_index_stats, Create Table, 1); DROP TABLE mysql.innodb_index_stats; @@ -23,10 +22,7 @@ DROP TABLE u; SELECT @@innodb_fast_shutdown; let $MYSQLD_DATADIR=`select @@datadir`; -# Shut down the server --- exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --- shutdown_server 30 --- source include/wait_until_disconnected.inc +--source include/shutdown_mysqld.inc # Check the tail of ID_IND (SYS_TABLES.ID) let IBDATA1=$MYSQLD_DATADIR/ibdata1; @@ -45,10 +41,7 @@ close(FILE); print unpack("H*", $_), "\n"; EOF -# Restart the server. --- exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect --- enable_reconnect --- source include/wait_until_connected_again.inc +--source include/start_mysqld.inc -- disable_query_log USE mysql; diff --git a/mysql-test/suite/innodb/t/innodb_corrupt_bit.test b/mysql-test/suite/innodb/t/innodb_corrupt_bit.test index ee04e8d66fc..52a318f0fa8 100644 --- a/mysql-test/suite/innodb/t/innodb_corrupt_bit.test +++ b/mysql-test/suite/innodb/t/innodb_corrupt_bit.test @@ -2,32 +2,28 @@ # Test for persistent corrupt bit for corrupted index and table # -- source include/have_innodb.inc -#-- source include/have_innodb_16k.inc --- source include/not_embedded.inc +--source include/not_embedded.inc # This test needs debug server -- source include/have_debug.inc --- disable_query_log -call mtr.add_suppression("Flagged corruption of idx.*in"); +--disable_query_log +call mtr.add_suppression("Flagged corruption of.* in table .* in .*"); +--enable_query_log set names utf8; -SET UNIQUE_CHECKS=0; - CREATE TABLE corrupt_bit_test_Ä( a INT AUTO_INCREMENT PRIMARY KEY, b CHAR(100), c INT, z INT, INDEX idx(b)) -ENGINE=InnoDB; +ENGINE=InnoDB STATS_PERSISTENT=0; INSERT INTO corrupt_bit_test_Ä VALUES(0,'x',1, 1); -# This is the first unique index we intend to corrupt CREATE UNIQUE INDEX idxÄ ON corrupt_bit_test_Ä(c, b); -# This is the second unique index we intend to corrupt CREATE UNIQUE INDEX idxÄ“ ON corrupt_bit_test_Ä(z, b); SELECT * FROM corrupt_bit_test_Ä; @@ -37,9 +33,9 @@ INSERT INTO corrupt_bit_test_Ä SELECT 0,b,c+1,z+1 FROM corrupt_bit_test_Ä; select count(*) from corrupt_bit_test_Ä; # This will flag all secondary indexes corrupted -SET SESSION debug_dbug="+d,dict_set_index_corrupted"; +SET SESSION debug="+d,dict_set_index_corrupted"; check table corrupt_bit_test_Ä; -SET SESSION debug_dbug=""; +SET SESSION debug="-d,dict_set_index_corrupted"; # Cannot create new indexes while corrupted indexes exist --error ER_INDEX_CORRUPT @@ -79,8 +75,6 @@ set names utf8; -- error ER_INDEX_CORRUPT select z from corrupt_bit_test_Ä; -show create table corrupt_bit_test_Ä; - # Drop the corrupted index drop index idxÄ“ on corrupt_bit_test_Ä; @@ -90,13 +84,13 @@ CREATE INDEX idx3 ON corrupt_bit_test_Ä(b, c); --error ER_INDEX_CORRUPT CREATE INDEX idx4 ON corrupt_bit_test_Ä(b, z); -show create table corrupt_bit_test_Ä; drop index idx on corrupt_bit_test_Ä; # Now that there exist no corrupted indexes, we can create new indexes. CREATE INDEX idx3 ON corrupt_bit_test_Ä(b, c); CREATE INDEX idx4 ON corrupt_bit_test_Ä(b, z); + # Now select back to normal select z from corrupt_bit_test_Ä limit 10; diff --git a/mysql-test/suite/innodb/t/innodb_gis.test b/mysql-test/suite/innodb/t/innodb_gis.test index 1adb14ea482..45d66d95002 100644 --- a/mysql-test/suite/innodb/t/innodb_gis.test +++ b/mysql-test/suite/innodb/t/innodb_gis.test @@ -6,5 +6,8 @@ SET storage_engine=innodb; # # Bug #15680 (SPATIAL key in innodb) # ---error ER_TABLE_CANT_HANDLE_SPKEYS +# MySQL 5.7 Introduces SPATIAL keys for InnoDB +#--error ER_TABLE_CANT_HANDLE_SPKEYS create table t1 (g geometry not null, spatial gk(g)) engine=innodb; +drop table t1; + diff --git a/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test b/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test index c932e45591d..5d36cfdcbb9 100644 --- a/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test +++ b/mysql-test/suite/innodb/t/innodb_stats_create_on_corrupted.test @@ -10,8 +10,8 @@ -- source include/have_innodb_16k.inc -- source include/not_embedded.inc -call mtr.add_suppression("InnoDB: Error: Table \"mysql\".\"innodb_index_stats\" not found"); -call mtr.add_suppression("InnoDB: Error: Fetch of persistent statistics requested for table"); +call mtr.add_suppression("InnoDB: Table .*innodb_index_stats.* not found"); +call mtr.add_suppression("InnoDB: Fetch of persistent statistics requested for table .*"); -- vertical_results diff --git a/mysql-test/suite/innodb/t/innodb_stats_fetch_corrupted.test b/mysql-test/suite/innodb/t/innodb_stats_fetch_corrupted.test index 4a3f7527c09..81fd52c72f8 100644 --- a/mysql-test/suite/innodb/t/innodb_stats_fetch_corrupted.test +++ b/mysql-test/suite/innodb/t/innodb_stats_fetch_corrupted.test @@ -8,8 +8,8 @@ # test with 16k page size. -- source include/have_innodb_16k.inc -call mtr.add_suppression("InnoDB: Error: Table \"mysql\".\"innodb_index_stats\" not found"); -call mtr.add_suppression("InnoDB: Error: Fetch of persistent statistics requested for table"); +call mtr.add_suppression("InnoDB: Table \"mysql\".\"innodb_index_stats\" not found"); +call mtr.add_suppression("InnoDB: Fetch of persistent statistics requested for table.*"); -- vertical_results diff --git a/mysql-test/suite/innodb/t/strict_mode.test b/mysql-test/suite/innodb/t/strict_mode.test index 86b56a09c0e..48fc1ef7881 100644 --- a/mysql-test/suite/innodb/t/strict_mode.test +++ b/mysql-test/suite/innodb/t/strict_mode.test @@ -5,6 +5,8 @@ --echo # INNODB_STRICT_MODE = 1 --echo # +call mtr.add_suppression("InnoDB: Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); + set innodb_strict_mode = 0; create table t1 (id int auto_increment primary key, diff --git a/mysql-test/suite/innodb_zip/include/innodb-wl6045.inc b/mysql-test/suite/innodb_zip/include/innodb-wl6045.inc new file mode 100644 index 00000000000..26ce7e72983 --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb-wl6045.inc @@ -0,0 +1,20 @@ +--echo ===> Testing size=$size +--disable_warnings +--eval CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=$size +--enable_warnings + +insert into t1 values(1,"I"); +insert into t1 values(2,"AM"); +insert into t1 values(3,"COMPRESSED"); + +--source include/shutdown_mysqld.inc + +--exec $INNOCHECKSUM $MYSQLD_DATADIR/test/t1.ibd +--exec $INNOCHECKSUM --write=crc32 $MYSQLD_DATADIR/test/t1.ibd +--exec $INNOCHECKSUM --strict-check=crc32 $MYSQLD_DATADIR/test/t1.ibd +--exec $INNOCHECKSUM --write=none $MYSQLD_DATADIR/test/t1.ibd +--exec $INNOCHECKSUM --strict-check=none $MYSQLD_DATADIR/test/t1.ibd + +--source include/start_mysqld.inc +select * from t1; +drop table t1; diff --git a/mysql-test/suite/innodb_zip/include/innodb_create_tab_indx.inc b/mysql-test/suite/innodb_zip/include/innodb_create_tab_indx.inc new file mode 100644 index 00000000000..413a026265e --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_create_tab_indx.inc @@ -0,0 +1,16 @@ +--echo # Create table & Index + +eval CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=$block_size; + + +let $i = 9; +while ($i) { + +eval CREATE INDEX idx$i ON tab5(col_$i(10)); +dec $i; +} + + diff --git a/mysql-test/suite/innodb_zip/include/innodb_dml_ops.inc b/mysql-test/suite/innodb_zip/include/innodb_dml_ops.inc new file mode 100644 index 00000000000..4908dfb6bee --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_dml_ops.inc @@ -0,0 +1,82 @@ +delimiter |; +create procedure populate_t1() +begin + declare i int default 1; + while (i <= 200) do + insert into t1 values (i, 'a', 'b'); + set i = i + 1; + end while; +end| +create procedure populate_t1_small() +begin + declare i int default 1; + while (i <= 20) do + insert into t1 values (i, 'c', 'd'); + set i = i + 1; + end while; +end| +create procedure populate_t1_small2() +begin + declare i int default 30; + while (i <= 50) do + insert into t1 values (i, 'e', 'f'); + set i = i + 1; + end while; +end| +delimiter ;| +# +begin; +select count(*) from t1; +call populate_t1(); +select count(*) from t1; +select * from t1 limit 10; +rollback; +select count(*) from t1; +# +begin; +call populate_t1(); +select count(*) from t1; +commit; +select count(*) from t1; +# +truncate table t1; +select count(*) from t1; +# +call populate_t1_small(); +select count(*) from t1; +rollback; +select count(*) from t1; +truncate table t1; +# +call populate_t1(); +select count(*) from t1; +delete from t1 where keyc <= 60; +select count(*) from t1; +call populate_t1_small(); +select count(*) from t1; +select * from t1 limit 10; +begin; +call populate_t1_small2(); +select count(*) from t1; +select * from t1 where keyc > 30 limit 10; +rollback; +select count(*) from t1; +select * from t1 where keyc > 30 limit 10; +# +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +rollback; +begin; +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +rollback; +select * from t1 limit 10; +commit; +select * from t1 limit 10; +# +insert into t2 select * from t1 where keyc < 2101; +select count(*) from t2; +# +drop procedure populate_t1; +drop procedure populate_t1_small; +drop procedure populate_t1_small2; diff --git a/mysql-test/suite/innodb_zip/include/innodb_fetch_records.inc b/mysql-test/suite/innodb_zip/include/innodb_fetch_records.inc new file mode 100644 index 00000000000..5e55293c18c --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_fetch_records.inc @@ -0,0 +1,7 @@ +--echo =============== +--echo Fetch Records +--echo =============== + +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; diff --git a/mysql-test/suite/innodb_zip/include/innodb_load_data.inc b/mysql-test/suite/innodb_zip/include/innodb_load_data.inc new file mode 100644 index 00000000000..1bcb30131ac --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_load_data.inc @@ -0,0 +1,19 @@ +--echo # Load the data + +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); + +while ($i) { + +eval INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +dec $i; +} +commit; diff --git a/mysql-test/suite/innodb_zip/include/innodb_stats_comp_index.inc b/mysql-test/suite/innodb_zip/include/innodb_stats_comp_index.inc new file mode 100644 index 00000000000..d2fe05e5d13 --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_stats_comp_index.inc @@ -0,0 +1,26 @@ +--echo # Check the stats of the table +--echo # Check the size of the ibd file + +-- echo # testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; + +perl; + my $dir = $ENV{'MYSQLD_DATADIR'}."test"; + my $size; + opendir(DIR, $dir) or die $!; + while (my $file = readdir(DIR)) + { + + next unless ($file =~ m/\.ibd$/); + $size = -s "$dir/$file"; + print "The size of the tab5.ibd file: $size\n"; + } + close(DIR); + exit(0) +EOF diff --git a/mysql-test/suite/innodb_zip/include/innodb_stats_restart.inc b/mysql-test/suite/innodb_zip/include/innodb_stats_restart.inc new file mode 100644 index 00000000000..13952459847 --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_stats_restart.inc @@ -0,0 +1,12 @@ +--echo =============== +--echo After Restart Chekc the stats of the table +--echo =============== + +-- echo # testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @comp_val AND 1000 +AND uncompress_ops BETWEEN @uncomp_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; diff --git a/mysql-test/suite/innodb_zip/include/innodb_temp_table_dml.inc b/mysql-test/suite/innodb_zip/include/innodb_temp_table_dml.inc new file mode 100644 index 00000000000..42e0908f810 --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_temp_table_dml.inc @@ -0,0 +1,40 @@ +# insert test +insert into t1 values (100, 1.1, 'pune'); +insert into t1 values (99, 1.2, 'mumbai'); +insert into t1 values (98, 1.3, 'jaipur'); +insert into t1 values (97, 1.4, 'delhi'); +insert into t1 values (96, 1.5, 'ahmedabad'); +select * from t1; +select * from t1 where i = 98; +select * from t1 where i < 100; +# +# add index using alter table table +explain select * from t1 where f > 1.29999; +alter table t1 add index sec_index(f); +explain select * from t1 where f > 1.29999; +select * from t1 where f > 1.29999; +# +explain select * from t1 where i = 100; +alter table t1 add unique index pri_index(i); +explain select * from t1 where i = 100; +select * from t1 where i = 100; +# +# delete test +delete from t1 where i < 97; +select * from t1; +insert into t1 values (96, 1.5, 'kolkata'); +select * from t1; +# +# update test +update t1 set f = 1.44 where c = 'delhi'; +select * from t1; +# +# truncate table +truncate table t1; +insert into t1 values (100, 1.1, 'pune'); +insert into t1 values (99, 1.2, 'mumbai'); +insert into t1 values (98, 1.3, 'jaipur'); +insert into t1 values (97, 1.4, 'delhi'); +insert into t1 values (96, 1.5, 'ahmedabad'); +select * from t1; + diff --git a/mysql-test/suite/innodb_zip/include/innodb_wl6501_crash_stripped.inc b/mysql-test/suite/innodb_zip/include/innodb_wl6501_crash_stripped.inc new file mode 100644 index 00000000000..fcefd0cdf7e --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_wl6501_crash_stripped.inc @@ -0,0 +1,144 @@ +# +# WL#6501: make truncate table atomic +# + +--source include/have_innodb.inc +--source include/have_debug.inc + +# Valgrind would complain about memory leaks when we crash on purpose. +--source include/not_valgrind.inc +# Embedded server does not support crashing +--source include/not_embedded.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc + +# suppress expected warnings. +call mtr.add_suppression("The file '.*' already exists though the corresponding table did not exist in the InnoDB data dictionary"); +call mtr.add_suppression("Cannot create file '.*'"); +call mtr.add_suppression("InnoDB: Error number 17 means 'File exists'"); + +################################################################################ +# +# Will test following scenarios: +# 1. Hit crash point on completing drop of all indexes before creation of index +# is commenced. +# 2. Hit crash point after data is updated to system-table and in-memory dict. +# 3. Post truncate recovery, abruptly shutdown the server. +# On restart ensure table state is maintained. +# +################################################################################ + +#----------------------------------------------------------------------------- +# +# create test-bed +# + +let $WL6501_TMP_DIR = `select @@tmpdir`; +let $WL6501_DATA_DIR = `select @@datadir`; +let SEARCH_FILE = $MYSQLTEST_VARDIR/log/my_restart.err; + +#----------------------------------------------------------------------------- +# +# 1. Hit crash point on completing drop of all indexes before creation of index +# is commenced. +# +--echo "1. Hit crash point on completing drop of all indexes before creation" +--echo " of index is commenced." +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +set innodb_strict_mode=off; +--disable_warnings +eval create $wl6501_temp table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +check table t; +# +set session debug = "+d,ib_trunc_crash_drop_reinit_done_create_to_start"; +--source include/expect_crash.inc +--error 2013 +truncate table t; +# +--source include/start_mysqld.inc + +check table t; +select * from t; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +select * from t where f < 2.5; +drop table t; + +#----------------------------------------------------------------------------- +# +# 2. Hit crash point after data is updated to system-table and in-memory dict. +# +--echo "2. Hit crash point after data is updated to system-table and" +--echo " in-memory dict." +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +set innodb_strict_mode=off; +--disable_warnings +eval create $wl6501_temp table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +check table t; +# +set session debug = "+d,ib_trunc_crash_on_updating_dict_sys_info"; +--source include/expect_crash.inc +--error 2013 +truncate table t; +# +--source include/start_mysqld.inc +check table t; +select * from t; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +select * from t where f < 2.5; +drop table t; + +#----------------------------------------------------------------------------- +# +# 3. Post truncate recovery, abruptly shutdown the server. +# On restart ensure table state is maintained. +# +--echo "3. Post truncate recovery, abruptly shutdown the server." +--echo " On restart ensure table state is maintained." +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +set innodb_strict_mode=off; +--disable_warnings +eval create $wl6501_temp table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +check table t; +# +set session debug = "+d,ib_trunc_crash_after_redo_log_write_complete"; +--source include/expect_crash.inc +--error 2013 +truncate table t; +# +--source include/start_mysqld.inc +check table t; +select * from t; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +# +--source include/kill_and_restart_mysqld.inc +# +check table t; +select * from t; +select * from t where f < 2.5; +drop table t; diff --git a/mysql-test/suite/innodb_zip/include/innodb_wl6501_error.inc b/mysql-test/suite/innodb_zip/include/innodb_wl6501_error.inc new file mode 100644 index 00000000000..0939d452dae --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_wl6501_error.inc @@ -0,0 +1,234 @@ +# +# WL#6501: make truncate table atomic +# + +--source include/have_innodb.inc +--source include/have_debug.inc + +--disable_query_log +# suppress expected warnings +call mtr.add_suppression("Unable to truncate FTS index for table"); +call mtr.add_suppression("Unable to assign a new identifier to table " + "`.*`\.`.*` after truncating it"); +call mtr.add_suppression("Flagged corruption of .* in table " + "`.*`\.`.*` in TRUNCATE TABLE"); +call mtr.add_suppression("Parent table of FTS auxiliary table " + ".*\/.* not found"); +--enable_query_log +################################################################################ +# +# Will test following scenarios: +# 1. Error in assigning undo logs for truncate action. +# 2. Error while preparing for truncate. +# 3. Error while dropping/creating indexes. +# 4. Error while completing truncate of table involving FTS. +# 5. Error while updating sys-tables. +# +################################################################################ + +#----------------------------------------------------------------------------- +# +# create test-bed +# +let $per_table = `select @@innodb_file_per_table`; +let $format = `select @@innodb_file_format`; + +eval set global innodb_file_per_table = on; +let $WL6501_TMP_DIR = `select @@tmpdir`; +let $WL6501_DATA_DIR = `select @@datadir`; +set innodb_strict_mode=off; + +#----------------------------------------------------------------------------- +# +# 1. Error in assigning undo logs for truncate action. +# +--echo "1. Error in assigning undo logs for truncate action." +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--disable_warnings +eval create $wl6501_temp table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +#check table t; +# +set session debug = "+d,ib_err_trunc_assigning_undo_log"; +--error ER_GET_ERRNO +truncate table t; +set session debug = "-d,ib_err_trunc_assigning_undo_log"; +# +#check table t; +select * from t; +drop table t; + +#----------------------------------------------------------------------------- +# +# 2. Error while preparing for truncate. +# +--echo "2. Error while preparing for truncate." +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--disable_warnings +eval create $wl6501_temp table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +#check table t; +# +set session debug = "+d,ib_err_trunc_preparing_for_truncate"; +--error ER_GET_ERRNO +truncate table t; +set session debug = "-d,ib_err_trunc_preparing_for_truncate"; +# +#check table t; +select * from t; +drop table t; + +#----------------------------------------------------------------------------- +# +# 3. Error while dropping/creating indexes +# +--echo "3. Error while dropping/creating indexes" +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--disable_warnings +eval create $wl6501_temp table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +#check table t; +# +set session debug = "+d,ib_err_trunc_drop_index"; +--error ER_GET_ERRNO +truncate table t; +set session debug = "-d,ib_err_trunc_drop_index"; +# +#check table t; +--error ER_TABLE_CORRUPT, 1030 +select * from t; +drop table t; +# +# +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--disable_warnings +eval create $wl6501_temp table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +#check table t; +# +set session debug = "+d,ib_err_trunc_create_index"; +--error ER_GET_ERRNO +truncate table t; +set session debug = "-d,ib_err_trunc_create_index"; +# +#check table t; +--error ER_TABLE_CORRUPT, 1030 +select * from t; +drop table t; +# +# +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--disable_warnings +eval create temporary table t ( + i int, f float, c char, + primary key pk(i), unique findex(f), index ck(c)) + engine = innodb row_format = $wl6501_row_fmt + key_block_size = $wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +#check table t; +# +set session debug = "+d,ib_err_trunc_temp_recreate_index"; +--error ER_GET_ERRNO +truncate table t; +set session debug = "-d,ib_err_trunc_temp_recreate_index"; +# +#check table t; +--error ER_TABLE_CORRUPT, 1030 +select * from t; +drop table t; + +#----------------------------------------------------------------------------- +# +# 4. Error while completing truncate of table involving FTS. +# +--echo "4. Error while completing truncate of table involving FTS." +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--disable_warnings +eval create $wl6501_temp table t (i int, f float, c char(100), + primary key pk(i), index fk(f), fulltext index ck(c)) + engine=innodb row_format=$wl6501_row_fmt + key_block_size=$wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'mysql is now oracle company'), + (2, 2.2, 'innodb is part of mysql'), + (3, 3.3, 'innodb is default storage engine of mysql'); +select * from t; +#check table t; +# +set session debug = "+d,ib_err_trunc_during_fts_trunc"; +--error ER_GET_ERRNO +truncate table t; +set session debug = "-d,ib_err_trunc_during_fts_trunc"; +# +#check table t; +--error ER_TABLE_CORRUPT, 1030 +select * from t; +drop table t; + +#----------------------------------------------------------------------------- +# +# 5. Error while updating sys-tables. +# +--echo "5. Error while updating sys-tables." +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--disable_warnings +eval create $wl6501_temp table t (i int, f float, c char(100), + primary key pk(i), index fk(f), fulltext index ck(c)) + engine=innodb row_format=$wl6501_row_fmt + key_block_size=$wl6501_kbs; +--enable_warnings +insert into t values (1, 1.1, 'mysql is now oracle company'), + (2, 2.2, 'innodb is part of mysql'), + (3, 3.3, 'innodb is default storage engine of mysql'); +select * from t order by i; +#check table t; +# +set session debug = "+d,ib_err_trunc_during_sys_table_update"; +--error ER_GET_ERRNO +truncate table t; +set session debug = "-d,ib_err_trunc_during_sys_table_update"; +# +#check table t; +--error ER_TABLE_CORRUPT, 1030 +select * from t order by i; +drop table t; + +#----------------------------------------------------------------------------- +# +# remove test-bed +# +eval set global innodb_file_format = $format; +eval set global innodb_file_per_table = $per_table; diff --git a/mysql-test/suite/innodb_zip/include/innodb_wl6501_scale.inc b/mysql-test/suite/innodb_zip/include/innodb_wl6501_scale.inc new file mode 100644 index 00000000000..67569d3dae9 --- /dev/null +++ b/mysql-test/suite/innodb_zip/include/innodb_wl6501_scale.inc @@ -0,0 +1,113 @@ +# +# load tables with some significant amount of data and then truncate it. +# + +#----------------------------------------------------------------------------- +# +# create test-bed +# +let $per_table = `select @@innodb_file_per_table`; +let $format = `select @@innodb_file_format`; + +let $WL6501_TMP_DIR = `select @@tmpdir`; +let $WL6501_DATA_DIR = `select @@datadir`; +set innodb_strict_mode=OFF; + +#----------------------------------------------------------------------------- +# +# create procedure to load data +# +delimiter |; +create procedure populate() +begin + declare i int default 1; + while (i <= 5000) do + insert into t1 values (i, 'a', 'b'); + insert into t2 values (i, 'a', 'b'); + insert into t3 values (i, 'a', 'b'); + set i = i + 1; + end while; +end| +create procedure populate_small() +begin + declare i int default 10001; + while (i <= 12000) do + insert into t1 values (i, 'c', 'd'); + insert into t2 values (i, 'a', 'b'); + insert into t3 values (i, 'a', 'b'); + set i = i + 1; + end while; +end| +delimiter ;| + +#----------------------------------------------------------------------------- +# +# create and load the tables. +# +eval set global innodb_file_per_table = $wl6501_file_per_table; +eval set global innodb_file_format = $wl6501_file_format; +--replace_regex /[0-9]+/NUMBER/ +eval create table t1 + (i int, c1 char(100), c2 char(100), + index c1_idx(c1)) + engine=innodb row_format=$wl6501_row_fmt + key_block_size=$wl6501_kbs; +eval create table t2 + (i int, c1 char(100), c2 char(100), + index c1_idx(c1)) + engine=innodb row_format=$wl6501_row_fmt + key_block_size=$wl6501_kbs; +eval create temporary table t3 + (i int, c1 char(100), c2 char(100), + index c1_idx(c1)) + engine=innodb row_format=$wl6501_row_fmt + key_block_size=$wl6501_kbs; +# +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +begin; +call populate(); +commit; +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +# +truncate table t1; +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +# +call populate_small(); +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +# +truncate table t2; +truncate table t3; +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +# +call populate_small(); +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +# +drop table t1; +drop table t2; +drop table t3; + +#----------------------------------------------------------------------------- +# +# drop the procedure +# +drop procedure populate; +drop procedure populate_small; + +#----------------------------------------------------------------------------- +# +# remove test-bed +# +eval set global innodb_file_format = $format; +eval set global innodb_file_per_table = $per_table; diff --git a/mysql-test/suite/innodb_zip/r/16k.result b/mysql-test/suite/innodb_zip/r/16k.result new file mode 100644 index 00000000000..3d9f39529e2 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/16k.result @@ -0,0 +1,745 @@ +SET default_storage_engine=InnoDB; +# Test 1) Show the page size from Information Schema +SELECT variable_value FROM information_schema.global_status +WHERE LOWER(variable_name) = 'innodb_page_size'; +variable_value +16384 +# Test 2) The number of buffer pool pages is dependent upon the page size. +SELECT variable_value FROM information_schema.global_status +WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total'; +variable_value +{checked_valid} +# Test 3) Query some information_shema tables that are dependent upon +# the page size. +SELECT t.name table_name, t.n_cols, t.flag table_flags, +i.name index_name, i.page_no root_page, i.type, +i.n_fields, i.merge_threshold +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i +WHERE t.table_id = i.table_id +AND t.name LIKE 'mysql%' + ORDER BY t.name, i.index_id; +table_name n_cols table_flags index_name root_page type n_fields merge_threshold +mysql/engine_cost 9 33 PRIMARY 3 3 3 50 +mysql/gtid_executed 6 33 PRIMARY 3 3 2 50 +mysql/help_category 7 33 PRIMARY 3 3 1 50 +mysql/help_category 7 33 name 4 2 1 50 +mysql/help_keyword 5 33 PRIMARY 3 3 1 50 +mysql/help_keyword 5 33 name 4 2 1 50 +mysql/help_relation 5 33 PRIMARY 3 3 2 50 +mysql/help_topic 9 33 PRIMARY 3 3 1 50 +mysql/help_topic 9 33 name 4 2 1 50 +mysql/innodb_index_stats 11 33 PRIMARY 3 3 4 50 +mysql/innodb_table_stats 9 33 PRIMARY 3 3 2 50 +mysql/plugin 5 33 PRIMARY 3 3 1 50 +mysql/servers 12 33 PRIMARY 3 3 1 50 +mysql/server_cost 7 33 PRIMARY 3 3 1 50 +mysql/slave_master_info 28 33 PRIMARY 3 3 1 50 +mysql/slave_relay_log_info 12 33 PRIMARY 3 3 1 50 +mysql/slave_worker_info 16 33 PRIMARY 3 3 2 50 +mysql/time_zone 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_leap_second 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_name 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_transition 6 33 PRIMARY 3 3 2 50 +mysql/time_zone_transition_type 8 33 PRIMARY 3 3 2 50 +CREATE TABLE t1 (a INT KEY, b TEXT) ROW_FORMAT=REDUNDANT ENGINE=innodb; +CREATE TABLE t2 (a INT KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=innodb; +CREATE TABLE t3 (a INT KEY, b TEXT) ROW_FORMAT=COMPRESSED ENGINE=innodb; +CREATE TABLE t4 (a INT KEY, b TEXT) ROW_FORMAT=DYNAMIC ENGINE=innodb; +SELECT t.name table_name, t.n_cols, t.flag table_flags, +i.name index_name, i.page_no root_page, i.type, +i.n_fields, i.merge_threshold +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i +WHERE t.table_id = i.table_id +AND t.name LIKE 'test%' + ORDER BY t.name, i.name; +table_name n_cols table_flags index_name root_page type n_fields merge_threshold +test/t1 5 0 PRIMARY 3 3 1 50 +test/t2 5 1 PRIMARY 3 3 1 50 +test/t3 5 41 PRIMARY 3 3 1 50 +test/t4 5 33 PRIMARY 3 3 1 50 +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t1 Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t1.ibd +test/t2 Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t2.ibd +test/t3 Single DEFAULT 8192 Compressed MYSQLD_DATADIR/test/t3.ibd +test/t4 Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t1.ibd +test/t2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t2.ibd +test/t3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t3.ibd +test/t4 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4.ibd +DROP TABLE t1, t2, t3, t4; +# Test 4) The maximum row size is dependent upon the page size. +# Redundant: 8123, Compact: 8126. +# Compressed: 8126, Dynamic: 8126. +# Each row format has its own amount of overhead that +# varies depending on number of fields and other overhead. +SET SESSION innodb_strict_mode = ON; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(211) +) ROW_FORMAT=redundant; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(212) +) ROW_FORMAT=redundant; +ERROR 42000: Row size too large (> 8123). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(246) +) ROW_FORMAT=compact; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(247) +) ROW_FORMAT=compact; +ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(157) +) ROW_FORMAT=compressed; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(160) +) ROW_FORMAT=compressed; +ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(246) +) ROW_FORMAT=dynamic; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(247) +) ROW_FORMAT=dynamic; +ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +CREATE TABLE t1 (a varchar(255) character set utf8, +b varchar(255) character set utf8, +c varchar(255) character set utf8, +d varchar(255) character set utf8, +e varchar(4) character set utf8, +PRIMARY KEY (a,b,c,d,e)) +ENGINE=innodb; +DROP TABLE t1; +CREATE TABLE t1 (a varchar(255) character set utf8, +b varchar(255) character set utf8, +c varchar(255) character set utf8, +d varchar(255) character set utf8, +e varchar(5) character set utf8, +PRIMARY KEY (a,b,c,d,e)) +ENGINE=innodb; +ERROR 42000: Specified key was too long; max key length is 3072 bytes +CREATE TABLE t1 (a varchar(255) character set utf8, +b varchar(255) character set utf8, +c varchar(255) character set utf8, +d varchar(255) character set utf8, +e varchar(255) character set utf8, +f varchar(4) character set utf8, +PRIMARY KEY (a), KEY (b,c,d,e,f)) +ENGINE=innodb; +DROP TABLE t1; +CREATE TABLE t1 (a varchar(255) character set utf8, +b varchar(255) character set utf8, +c varchar(255) character set utf8, +d varchar(255) character set utf8, +e varchar(255) character set utf8, +f varchar(5) character set utf8, +PRIMARY KEY (a), KEY (b,c,d,e,f)) +ENGINE=innodb; +ERROR 42000: Specified key was too long; max key length is 3072 bytes +# Test 5) Make sure that KEY_BLOCK_SIZE=16, 8, 4, 2 & 1 +# are all accepted. +SET SESSION innodb_strict_mode = ON; +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=16 +ALTER TABLE t1 KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=8 +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4 +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1 +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED +DROP TABLE t1; +SET SESSION innodb_strict_mode = OFF; +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=16 +ALTER TABLE t1 KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=8 +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4 +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1 +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED +DROP TABLE t1; +# Test 6) Make sure that KEY_BLOCK_SIZE = 8 and 16 +# are rejected when innodb_file_per_table=OFF +SET SESSION innodb_strict_mode = ON; +SET GLOBAL innodb_file_per_table = OFF; +SHOW VARIABLES LIKE 'innodb_file_per_table'; +Variable_name Value +innodb_file_per_table OFF +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +ERROR HY000: Table storage engine for 't4' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1031 Table storage engine for 't4' doesn't have this option +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't5' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1031 Table storage engine for 't5' doesn't have this option +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_file_format = `Antelope`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +ERROR HY000: Table storage engine for 't4' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1031 Table storage engine for 't4' doesn't have this option +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't5' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1031 Table storage engine for 't5' doesn't have this option +SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +# Test 7) This series of tests were moved from innodb-index to here +# because the second alter table t1 assumes a 16k page size. +# Moving the test allows the rest of innodb-index to be run on all +# page sizes. The previously disabled portions of this test were +# moved as well. +CREATE TABLE t2(d varchar(17) PRIMARY KEY) ENGINE=innodb DEFAULT CHARSET=utf8; +CREATE TABLE t3(a int PRIMARY KEY) ENGINE=innodb; +INSERT INTO t3 VALUES (22),(44),(33),(55),(66); +INSERT INTO t2 VALUES ('jejdkrun87'),('adfd72nh9k'), +('adfdpplkeock'),('adfdijnmnb78k'),('adfdijn0loKNHJik'); +CREATE TABLE t1(a int, b blob, c text, d text NOT NULL) +ENGINE=innodb DEFAULT CHARSET=utf8 STATS_PERSISTENT=0; +INSERT INTO t1 +SELECT a,LEFT(REPEAT(d,100*a),65535),REPEAT(d,20*a),d FROM t2,t3 order by a, d; +DROP TABLE t2, t3; +SELECT COUNT(*) FROM t1 WHERE a=44; +COUNT(*) +5 +SELECT a, +LENGTH(b),b=LEFT(REPEAT(d,100*a),65535),LENGTH(c),c=REPEAT(d,20*a),d FROM t1 +ORDER BY 1, 2, 3, 4, 5, 6; +a LENGTH(b) b=LEFT(REPEAT(d,100*a),65535) LENGTH(c) c=REPEAT(d,20*a) d +22 22000 1 4400 1 adfd72nh9k +22 22000 1 4400 1 jejdkrun87 +22 26400 1 5280 1 adfdpplkeock +22 28600 1 5720 1 adfdijnmnb78k +22 35200 1 7040 1 adfdijn0loKNHJik +33 33000 1 6600 1 adfd72nh9k +33 33000 1 6600 1 jejdkrun87 +33 39600 1 7920 1 adfdpplkeock +33 42900 1 8580 1 adfdijnmnb78k +33 52800 1 10560 1 adfdijn0loKNHJik +44 44000 1 8800 1 adfd72nh9k +44 44000 1 8800 1 jejdkrun87 +44 52800 1 10560 1 adfdpplkeock +44 57200 1 11440 1 adfdijnmnb78k +44 65535 1 14080 1 adfdijn0loKNHJik +55 55000 1 11000 1 adfd72nh9k +55 55000 1 11000 1 jejdkrun87 +55 65535 1 13200 1 adfdpplkeock +55 65535 1 14300 1 adfdijnmnb78k +55 65535 1 17600 1 adfdijn0loKNHJik +66 65535 1 13200 1 adfd72nh9k +66 65535 1 13200 1 jejdkrun87 +66 65535 1 15840 1 adfdpplkeock +66 65535 1 17160 1 adfdijnmnb78k +66 65535 1 21120 1 adfdijn0loKNHJik +ALTER TABLE t1 ADD PRIMARY KEY (a), ADD KEY (b(20)); +ERROR 23000: Duplicate entry '22' for key 'PRIMARY' +DELETE FROM t1 WHERE d='null'; +ALTER TABLE t1 ADD PRIMARY KEY (a), ADD KEY (b(20)); +ERROR 23000: Duplicate entry '22' for key 'PRIMARY' +DELETE FROM t1 WHERE a%2; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +ALTER TABLE t1 ADD PRIMARY KEY (a,b(255),c(255)), ADD KEY (b(767)); +SELECT COUNT(*) FROM t1 WHERE a=44; +COUNT(*) +5 +SELECT a, +LENGTH(b), b=LEFT(REPEAT(d,100*a), 65535),LENGTH(c), c=REPEAT(d,20*a), d FROM t1; +a LENGTH(b) b=LEFT(REPEAT(d,100*a), 65535) LENGTH(c) c=REPEAT(d,20*a) d +22 22000 1 4400 1 adfd72nh9k +22 35200 1 7040 1 adfdijn0loKNHJik +22 28600 1 5720 1 adfdijnmnb78k +22 26400 1 5280 1 adfdpplkeock +22 22000 1 4400 1 jejdkrun87 +44 44000 1 8800 1 adfd72nh9k +44 65535 1 14080 1 adfdijn0loKNHJik +44 57200 1 11440 1 adfdijnmnb78k +44 52800 1 10560 1 adfdpplkeock +44 44000 1 8800 1 jejdkrun87 +66 65535 1 13200 1 adfd72nh9k +66 65535 1 21120 1 adfdijn0loKNHJik +66 65535 1 17160 1 adfdijnmnb78k +66 65535 1 15840 1 adfdpplkeock +66 65535 1 13200 1 jejdkrun87 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` blob NOT NULL, + `c` text NOT NULL, + `d` text NOT NULL, + PRIMARY KEY (`a`,`b`(255),`c`(255)), + KEY `b` (`b`(767)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +EXPLAIN SELECT * FROM t1 WHERE b LIKE 'adfd%'; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL range b b 769 NULL 12 100.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c`,`test`.`t1`.`d` AS `d` from `test`.`t1` where (`test`.`t1`.`b` like 'adfd%') +DROP TABLE t1; +# Test 8) Test creating a table that could lead to undo log overflow. +CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob, +h blob,i blob,j blob,k blob,l blob,m blob,n blob, +o blob,p blob,q blob,r blob,s blob,t blob,u blob) +ENGINE=InnoDB ROW_FORMAT=dynamic; +SET @a = repeat('a', 767); +SET @b = repeat('b', 767); +SET @c = repeat('c', 767); +SET @d = repeat('d', 767); +SET @e = repeat('e', 767); +INSERT INTO t1 VALUES (@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a); +UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b, +k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b; +CREATE INDEX t1a ON t1 (a(767)); +CREATE INDEX t1b ON t1 (b(767)); +CREATE INDEX t1c ON t1 (c(767)); +CREATE INDEX t1d ON t1 (d(767)); +CREATE INDEX t1e ON t1 (e(767)); +UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c, +k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c; +CREATE INDEX t1f ON t1 (f(767)); +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d,f=@d,g=@d,h=@d,i=@d,j=@d, +k=@d,l=@d,m=@d,n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +ERROR HY000: Undo log record is too big. +BEGIN; +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d; +UPDATE t1 SET f=@d,g=@d,h=@d,i=@d,j=@d,k=@d,l=@d,m=@d, +n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +COMMIT; +CREATE INDEX t1g ON t1 (g(767)); +UPDATE t1 SET g=@e; +CREATE INDEX t1h ON t1 (h(767)); +UPDATE t1 SET h=@e; +CREATE INDEX t1i ON t1 (i(767)); +UPDATE t1 SET i=@e; +CREATE INDEX t1j ON t1 (j(767)); +UPDATE t1 SET j=@e; +CREATE INDEX t1k ON t1 (k(767)); +UPDATE t1 SET k=@e; +CREATE INDEX t1l ON t1 (l(767)); +UPDATE t1 SET l=@e; +CREATE INDEX t1m ON t1 (m(767)); +UPDATE t1 SET m=@e; +CREATE INDEX t1n ON t1 (n(767)); +UPDATE t1 SET n=@e; +CREATE INDEX t1o ON t1 (o(767)); +UPDATE t1 SET o=@e; +CREATE INDEX t1p ON t1 (p(767)); +UPDATE t1 SET p=@e; +CREATE INDEX t1q ON t1 (q(767)); +UPDATE t1 SET q=@e; +CREATE INDEX t1r ON t1 (r(767)); +UPDATE t1 SET r=@e; +CREATE INDEX t1s ON t1 (s(767)); +UPDATE t1 SET s=@e; +CREATE INDEX t1t ON t1 (t(767)); +UPDATE t1 SET t=@e; +ERROR HY000: Undo log record is too big. +CREATE INDEX t1u ON t1 (u(767)); +CREATE INDEX t1ut ON t1 (u(767), t(767)); +CREATE INDEX t1st ON t1 (s(767), t(767)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` blob, + `b` blob, + `c` blob, + `d` blob, + `e` blob, + `f` blob, + `g` blob, + `h` blob, + `i` blob, + `j` blob, + `k` blob, + `l` blob, + `m` blob, + `n` blob, + `o` blob, + `p` blob, + `q` blob, + `r` blob, + `s` blob, + `t` blob, + `u` blob, + KEY `t1a` (`a`(767)), + KEY `t1b` (`b`(767)), + KEY `t1c` (`c`(767)), + KEY `t1d` (`d`(767)), + KEY `t1e` (`e`(767)), + KEY `t1f` (`f`(767)), + KEY `t1g` (`g`(767)), + KEY `t1h` (`h`(767)), + KEY `t1i` (`i`(767)), + KEY `t1j` (`j`(767)), + KEY `t1k` (`k`(767)), + KEY `t1l` (`l`(767)), + KEY `t1m` (`m`(767)), + KEY `t1n` (`n`(767)), + KEY `t1o` (`o`(767)), + KEY `t1p` (`p`(767)), + KEY `t1q` (`q`(767)), + KEY `t1r` (`r`(767)), + KEY `t1s` (`s`(767)), + KEY `t1t` (`t`(767)), + KEY `t1u` (`u`(767)), + KEY `t1ut` (`u`(767),`t`(767)), + KEY `t1st` (`s`(767),`t`(767)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +DROP TABLE t1; +# Bug #12429576 - Test an assertion failure on purge. +CREATE TABLE t1_purge ( +A int, +B blob, C blob, D blob, E blob, +F blob, G blob, H blob, +PRIMARY KEY (B(767), C(767), D(767), E(767), A), +INDEX (A) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +INSERT INTO t1_purge VALUES (1, +REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766), +REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766)); +CREATE TABLE t2_purge ( +A int PRIMARY KEY, +B blob, C blob, D blob, E blob, +F blob, G blob, H blob, I blob, +J blob, K blob, L blob, +INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +INSERT INTO t2_purge VALUES (1, +REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766), +REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766), REPEAT('i', 766), +REPEAT('j', 766), REPEAT('k', 766), REPEAT('l', 766)); +CREATE TABLE t3_purge ( +A int, +B varchar(800), C varchar(800), D varchar(800), E varchar(800), +F varchar(800), G varchar(800), H varchar(800), +PRIMARY KEY (B(767), C(767), D(767), E(767), A), +INDEX (A) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +INSERT INTO t3_purge SELECT * FROM t1_purge; +CREATE TABLE t4_purge ( +A int PRIMARY KEY, +B varchar(800), C varchar(800), D varchar(800), E varchar(800), +F varchar(800), G varchar(800), H varchar(800), I varchar(800), +J varchar(800), K varchar(800), L varchar(800), +INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +INSERT INTO t4_purge SELECT * FROM t2_purge; +DELETE FROM t1_purge; +DELETE FROM t2_purge; +DELETE FROM t3_purge; +DELETE FROM t4_purge; +SET @r=REPEAT('a',500); +CREATE TABLE t12637786(a int, +v1 varchar(500), v2 varchar(500), v3 varchar(500), +v4 varchar(500), v5 varchar(500), v6 varchar(500), +v7 varchar(500), v8 varchar(500), v9 varchar(500), +v10 varchar(500), v11 varchar(500), v12 varchar(500), +v13 varchar(500), v14 varchar(500), v15 varchar(500), +v16 varchar(500), v17 varchar(500), v18 varchar(500) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +CREATE INDEX idx1 ON t12637786(a,v1); +INSERT INTO t12637786 VALUES(9,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +UPDATE t12637786 SET a=1000; +DELETE FROM t12637786; +# Bug#12963823 - Test that the purge thread does not crash when +CREATE TABLE t12963823(a blob,b blob,c blob,d blob,e blob,f blob,g blob,h blob, +i blob,j blob,k blob,l blob,m blob,n blob,o blob,p blob) +ENGINE=innodb ROW_FORMAT=dynamic; +SET @r = REPEAT('a', 767); +INSERT INTO t12963823 VALUES (@r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r); +CREATE INDEX ndx_a ON t12963823 (a(500)); +CREATE INDEX ndx_b ON t12963823 (b(500)); +CREATE INDEX ndx_c ON t12963823 (c(500)); +CREATE INDEX ndx_d ON t12963823 (d(500)); +CREATE INDEX ndx_e ON t12963823 (e(500)); +CREATE INDEX ndx_f ON t12963823 (f(500)); +CREATE INDEX ndx_k ON t12963823 (k(500)); +CREATE INDEX ndx_l ON t12963823 (l(500)); +SET @r = REPEAT('b', 500); +UPDATE t12963823 set a=@r,b=@r,c=@r,d=@r; +UPDATE t12963823 set e=@r,f=@r,g=@r,h=@r; +UPDATE t12963823 set i=@r,j=@r,k=@r,l=@r; +UPDATE t12963823 set m=@r,n=@r,o=@r,p=@r; +ALTER TABLE t12963823 DROP INDEX ndx_a; +ALTER TABLE t12963823 DROP INDEX ndx_b; +CREATE INDEX ndx_g ON t12963823 (g(500)); +CREATE INDEX ndx_h ON t12963823 (h(500)); +CREATE INDEX ndx_i ON t12963823 (i(500)); +CREATE INDEX ndx_j ON t12963823 (j(500)); +CREATE INDEX ndx_m ON t12963823 (m(500)); +CREATE INDEX ndx_n ON t12963823 (n(500)); +CREATE INDEX ndx_o ON t12963823 (o(500)); +CREATE INDEX ndx_p ON t12963823 (p(500)); +SHOW CREATE TABLE t12963823; +Table Create Table +t12963823 CREATE TABLE `t12963823` ( + `a` blob, + `b` blob, + `c` blob, + `d` blob, + `e` blob, + `f` blob, + `g` blob, + `h` blob, + `i` blob, + `j` blob, + `k` blob, + `l` blob, + `m` blob, + `n` blob, + `o` blob, + `p` blob, + KEY `ndx_c` (`c`(500)), + KEY `ndx_d` (`d`(500)), + KEY `ndx_e` (`e`(500)), + KEY `ndx_f` (`f`(500)), + KEY `ndx_k` (`k`(500)), + KEY `ndx_l` (`l`(500)), + KEY `ndx_g` (`g`(500)), + KEY `ndx_h` (`h`(500)), + KEY `ndx_i` (`i`(500)), + KEY `ndx_j` (`j`(500)), + KEY `ndx_m` (`m`(500)), + KEY `ndx_n` (`n`(500)), + KEY `ndx_o` (`o`(500)), + KEY `ndx_p` (`p`(500)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +# Bug#12547647 UPDATE LOGGING COULD EXCEED LOG PAGE SIZE +SET SESSION innodb_strict_mode = ON; +CREATE TABLE bug12547647( +a int NOT NULL, b blob NOT NULL, c text, +PRIMARY KEY (b(10), a), INDEX (c(767)), INDEX(b(767)) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +INSERT INTO bug12547647 VALUES (5,REPEAT('khdfo5AlOq',1900),REPEAT('g',7751)); +COMMIT; +UPDATE bug12547647 SET c = REPEAT('b',16928); +ERROR HY000: Undo log record is too big. +SHOW WARNINGS; +Level Code Message +Error 1713 Undo log record is too big. +DROP TABLE bug12547647; +SET SESSION innodb_strict_mode = off; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII; +drop table t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(440))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(438))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512)); +DROP TABLE t1; +# +# Bug#56862 Execution of a query that uses index merge returns a wrong result +# +CREATE TABLE t1 ( +pk int NOT NULL AUTO_INCREMENT PRIMARY KEY, +a int, +b int, +INDEX idx(a)) +ENGINE=INNODB; +INSERT INTO t1(a,b) VALUES +(11, 1100), (2, 200), (1, 100), (14, 1400), (5, 500), +(3, 300), (17, 1700), (4, 400), (12, 1200), (8, 800), +(6, 600), (18, 1800), (9, 900), (10, 1000), (7, 700), +(13, 1300), (15, 1500), (19, 1900), (16, 1600), (20, 2000); +INSERT INTO t1(a,b) SELECT a+20, b+2000 FROM t1; +INSERT INTO t1(a,b) SELECT a+40, b+4000 FROM t1; +INSERT INTO t1(a,b) SELECT a+80, b+8000 FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1 VALUES (1000000, 0, 0); +set @optimizer_switch_saved=@@optimizer_switch; +SET SESSION optimizer_switch='derived_merge=off'; +SET SESSION sort_buffer_size = 1024*36; +EXPLAIN +SELECT COUNT(*) FROM +(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY) +WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 PRIMARY NULL ALL NULL NULL NULL NULL 1537 100.00 NULL +2 DERIVED t1 NULL index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL 1537 100.00 Using sort_union(idx,PRIMARY); Using where +Warnings: +Note 1003 /* select#1 */ select count(0) AS `COUNT(*)` from (/* select#2 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` FORCE INDEX (PRIMARY) FORCE INDEX (`idx`) where ((`test`.`t1`.`a` between 2 and 7) or (`test`.`t1`.`pk` = 1000000))) `t` +SELECT COUNT(*) FROM +(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY) +WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t; +COUNT(*) +1537 +set @@optimizer_switch=@optimizer_switch_saved; +SET SESSION sort_buffer_size = DEFAULT; +DROP TABLE t1; +DROP TABLE t1_purge, t2_purge, t3_purge, t4_purge; +DROP TABLE t12637786; +DROP TABLE t12963823; diff --git a/mysql-test/suite/innodb_zip/r/4k.result b/mysql-test/suite/innodb_zip/r/4k.result new file mode 100644 index 00000000000..721943e7f5a --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/4k.result @@ -0,0 +1,442 @@ +SET default_storage_engine=InnoDB; +# Test 1) Show the page size from Information Schema +SELECT variable_value FROM information_schema.global_status +WHERE LOWER(variable_name) = 'innodb_page_size'; +variable_value +4096 +# Test 2) The number of buffer pool pages is dependent upon the page size. +SELECT variable_value FROM information_schema.global_status +WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total'; +variable_value +{checked_valid} +# Test 3) Query some information_shema tables that are dependent upon +# the page size. +SELECT t.name table_name, t.n_cols, t.flag table_flags, +i.name index_name, i.page_no root_page, i.type, +i.n_fields, i.merge_threshold +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i +WHERE t.table_id = i.table_id +AND t.name LIKE 'mysql%' + ORDER BY t.name, i.index_id; +table_name n_cols table_flags index_name root_page type n_fields merge_threshold +mysql/engine_cost 9 33 PRIMARY 3 3 3 50 +mysql/gtid_executed 6 33 PRIMARY 3 3 2 50 +mysql/help_category 7 33 PRIMARY 3 3 1 50 +mysql/help_category 7 33 name 4 2 1 50 +mysql/help_keyword 5 33 PRIMARY 3 3 1 50 +mysql/help_keyword 5 33 name 4 2 1 50 +mysql/help_relation 5 33 PRIMARY 3 3 2 50 +mysql/help_topic 9 33 PRIMARY 3 3 1 50 +mysql/help_topic 9 33 name 4 2 1 50 +mysql/innodb_index_stats 11 33 PRIMARY 3 3 4 50 +mysql/innodb_table_stats 9 33 PRIMARY 3 3 2 50 +mysql/plugin 5 33 PRIMARY 3 3 1 50 +mysql/servers 12 33 PRIMARY 3 3 1 50 +mysql/server_cost 7 33 PRIMARY 3 3 1 50 +mysql/slave_master_info 28 33 PRIMARY 3 3 1 50 +mysql/slave_relay_log_info 12 33 PRIMARY 3 3 1 50 +mysql/slave_worker_info 16 33 PRIMARY 3 3 2 50 +mysql/time_zone 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_leap_second 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_name 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_transition 6 33 PRIMARY 3 3 2 50 +mysql/time_zone_transition_type 8 33 PRIMARY 3 3 2 50 +CREATE TABLE t1 (a INT KEY, b TEXT) ROW_FORMAT=REDUNDANT ENGINE=innodb; +CREATE TABLE t2 (a INT KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=innodb; +CREATE TABLE t3 (a INT KEY, b TEXT) ROW_FORMAT=COMPRESSED ENGINE=innodb; +CREATE TABLE t4 (a INT KEY, b TEXT) ROW_FORMAT=DYNAMIC ENGINE=innodb; +SELECT t.name table_name, t.n_cols, t.flag table_flags, +i.name index_name, i.page_no root_page, i.type, +i.n_fields, i.merge_threshold +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i +WHERE t.table_id = i.table_id +AND t.name LIKE 'test%' + ORDER BY t.name, i.name; +table_name n_cols table_flags index_name root_page type n_fields merge_threshold +test/t1 5 0 PRIMARY 3 3 1 50 +test/t2 5 1 PRIMARY 3 3 1 50 +test/t3 5 37 PRIMARY 3 3 1 50 +test/t4 5 33 PRIMARY 3 3 1 50 +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t1 Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t1.ibd +test/t2 Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t2.ibd +test/t3 Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t3.ibd +test/t4 Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t1.ibd +test/t2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t2.ibd +test/t3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t3.ibd +test/t4 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4.ibd +DROP TABLE t1, t2, t3, t4; +# Test 4) The maximum row size is dependent upon the page size. +# Redundant: 1979, Compact: 1982. +# Compressed: 1982, Dynamic: 1982. +# Each row format has its own amount of overhead that +# varies depending on number of fields and other overhead. +SET SESSION innodb_strict_mode = ON; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(127) +) ROW_FORMAT=redundant; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(128) +) ROW_FORMAT=redundant; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(155) +) ROW_FORMAT=compact; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(156) +) ROW_FORMAT=compact; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(76) +) ROW_FORMAT=compressed; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(79) +) ROW_FORMAT=compressed; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(155) +) ROW_FORMAT=dynamic; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(156) +) ROW_FORMAT=dynamic; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +CREATE TABLE t1 (a varchar(64) character set utf8, +b varchar(64) character set utf8, +c varchar(64) character set utf8, +d varchar(64) character set utf8, +PRIMARY KEY (a,b,c,d)) +ENGINE=innodb; +DROP TABLE t1; +CREATE TABLE t1 (a varchar(64) character set utf8, +b varchar(64) character set utf8, +c varchar(64) character set utf8, +d varchar(65) character set utf8, +PRIMARY KEY (a,b,c,d)) +ENGINE=innodb; +ERROR 42000: Specified key was too long; max key length is 768 bytes +CREATE TABLE t1 (a varchar(64) character set utf8, +b varchar(64) character set utf8, +c varchar(64) character set utf8, +d varchar(64) character set utf8, +e varchar(64) character set utf8, +PRIMARY KEY (a), KEY (b,c,d,e)) +ENGINE=innodb; +DROP TABLE t1; +CREATE TABLE t1 (a varchar(64) character set utf8, +b varchar(64) character set utf8, +c varchar(64) character set utf8, +d varchar(64) character set utf8, +e varchar(65) character set utf8, +PRIMARY KEY (a), KEY (b,c,d,e)) +ENGINE=innodb; +ERROR 42000: Specified key was too long; max key length is 768 bytes +# Test 5) Make sure that KEY_BLOCK_SIZE=4, 2 & 1 are all +# accepted and that KEY_BLOCK_SIZE=16 & 8 are rejected +# in strict mode and converted to 4 in non-strict mode. +SET SESSION innodb_strict_mode = ON; +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't1' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=16 cannot be larger than 4. +Error 1031 Table storage engine for 't1' doesn't have this option +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +ERROR HY000: Table storage engine for 't1' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=8 cannot be larger than 4. +Error 1031 Table storage engine for 't1' doesn't have this option +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4 +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1 +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED +DROP TABLE t1; +SET SESSION innodb_strict_mode = OFF; +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=16 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=8. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=8. +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=8 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4 +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1 +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED +DROP TABLE t1; +# Test 6) Make sure that KEY_BLOCK_SIZE = 8 and 16 +# are both rejected when innodb_file_per_table=OFF +SET SESSION innodb_strict_mode = ON; +SET GLOBAL innodb_file_per_table = OFF; +SHOW VARIABLES LIKE 'innodb_file_per_table'; +Variable_name Value +innodb_file_per_table OFF +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +ERROR HY000: Table storage engine for 't4' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=8 cannot be larger than 4. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1031 Table storage engine for 't4' doesn't have this option +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't5' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=16 cannot be larger than 4. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1031 Table storage engine for 't5' doesn't have this option +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_file_format = `Antelope`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +ERROR HY000: Table storage engine for 't4' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=8 cannot be larger than 4. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1031 Table storage engine for 't4' doesn't have this option +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't5' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=16 cannot be larger than 4. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1031 Table storage engine for 't5' doesn't have this option +SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +# Test 7) Not included here; 16k only +# Test 8) Test creating a table that could lead to undo log overflow. +CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob, +h blob,i blob,j blob,k blob,l blob,m blob,n blob, +o blob,p blob,q blob,r blob,s blob,t blob,u blob) +ENGINE=InnoDB ROW_FORMAT=dynamic; +SET @a = repeat('a', 767); +SET @b = repeat('b', 767); +SET @c = repeat('c', 767); +SET @d = repeat('d', 767); +SET @e = repeat('e', 767); +INSERT INTO t1 VALUES (@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a); +UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b, +k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b; +CREATE INDEX t1a ON t1 (a(767)); +UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c, +k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c; +CREATE INDEX t1b ON t1 (b(767)); +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d,f=@d,g=@d,h=@d,i=@d,j=@d, +k=@d,l=@d,m=@d,n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +ERROR HY000: Undo log record is too big. +BEGIN; +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d; +UPDATE t1 SET f=@d,g=@d,h=@d,i=@d,j=@d,k=@d,l=@d,m=@d, +n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +COMMIT; +CREATE INDEX t1c ON t1 (c(767)); +UPDATE t1 SET c=@e; +CREATE INDEX t1d ON t1 (d(767)); +UPDATE t1 SET d=@e; +ERROR HY000: Undo log record is too big. +CREATE INDEX t1e ON t1 (e(767)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` blob, + `b` blob, + `c` blob, + `d` blob, + `e` blob, + `f` blob, + `g` blob, + `h` blob, + `i` blob, + `j` blob, + `k` blob, + `l` blob, + `m` blob, + `n` blob, + `o` blob, + `p` blob, + `q` blob, + `r` blob, + `s` blob, + `t` blob, + `u` blob, + KEY `t1a` (`a`(767)), + KEY `t1b` (`b`(767)), + KEY `t1c` (`c`(767)), + KEY `t1d` (`d`(767)), + KEY `t1e` (`e`(767)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +DROP TABLE t1; +SET SESSION innodb_strict_mode = OFF; +CREATE TABLE t1( +pk01 varchar(48), pk02 varchar(48), pk03 varchar(48), pk04 varchar(48), +pk05 varchar(48), pk06 varchar(48), pk07 varchar(48), pk08 varchar(48), +pk09 varchar(48), pk10 varchar(48), pk11 varchar(48), pk12 varchar(48), +pk13 varchar(48), pk14 varchar(48), pk15 varchar(48), pk16 varchar(48), +sk01 varchar(48), sk02 varchar(48), sk03 varchar(48), sk04 varchar(48), +sk05 varchar(48), sk06 varchar(48), sk07 varchar(48), sk08 varchar(48), +sk09 varchar(48), sk10 varchar(48), sk11 varchar(48), sk12 varchar(48), +sk13 varchar(48), sk14 varchar(48), sk15 varchar(48), sk16 varchar(48), +PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, +pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), +KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, +sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) +ROW_FORMAT=Redundant ENGINE=InnoDB; +SET @r = repeat('a', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; +CREATE TABLE t1( +pk01 varchar(48), pk02 varchar(48), pk03 varchar(48), pk04 varchar(48), +pk05 varchar(48), pk06 varchar(48), pk07 varchar(48), pk08 varchar(48), +pk09 varchar(48), pk10 varchar(48), pk11 varchar(48), pk12 varchar(48), +pk13 varchar(48), pk14 varchar(48), pk15 varchar(48), pk16 varchar(48), +sk01 varchar(48), sk02 varchar(48), sk03 varchar(48), sk04 varchar(48), +sk05 varchar(48), sk06 varchar(48), sk07 varchar(48), sk08 varchar(48), +sk09 varchar(48), sk10 varchar(48), sk11 varchar(48), sk12 varchar(48), +sk13 varchar(48), sk14 varchar(48), sk15 varchar(48), sk16 varchar(48), +PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, +pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), +KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, +sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) +ROW_FORMAT=Compressed KEY_BLOCK_SIZE=4 ENGINE=InnoDB; +SET @r = repeat('a', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; +SET SESSION innodb_strict_mode = off; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII; +drop table t1; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII; +drop table t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(440))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(438))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512)); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_zip/r/8k.result b/mysql-test/suite/innodb_zip/r/8k.result new file mode 100644 index 00000000000..dc2b5ca1363 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/8k.result @@ -0,0 +1,473 @@ +SET default_storage_engine=InnoDB; +# Test 1) Show the page size from Information Schema +SELECT variable_value FROM information_schema.global_status +WHERE LOWER(variable_name) = 'innodb_page_size'; +variable_value +8192 +# Test 2) The number of buffer pool pages is dependent upon the page size. +SELECT variable_value FROM information_schema.global_status +WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total'; +variable_value +{checked_valid} +# Test 3) Query some information_shema tables that are dependent upon +# the page size. +SELECT t.name table_name, t.n_cols, t.flag table_flags, +i.name index_name, i.page_no root_page, i.type, +i.n_fields, i.merge_threshold +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i +WHERE t.table_id = i.table_id +AND t.name LIKE 'mysql%' + ORDER BY t.name, i.index_id; +table_name n_cols table_flags index_name root_page type n_fields merge_threshold +mysql/engine_cost 9 33 PRIMARY 3 3 3 50 +mysql/gtid_executed 6 33 PRIMARY 3 3 2 50 +mysql/help_category 7 33 PRIMARY 3 3 1 50 +mysql/help_category 7 33 name 4 2 1 50 +mysql/help_keyword 5 33 PRIMARY 3 3 1 50 +mysql/help_keyword 5 33 name 4 2 1 50 +mysql/help_relation 5 33 PRIMARY 3 3 2 50 +mysql/help_topic 9 33 PRIMARY 3 3 1 50 +mysql/help_topic 9 33 name 4 2 1 50 +mysql/innodb_index_stats 11 33 PRIMARY 3 3 4 50 +mysql/innodb_table_stats 9 33 PRIMARY 3 3 2 50 +mysql/plugin 5 33 PRIMARY 3 3 1 50 +mysql/servers 12 33 PRIMARY 3 3 1 50 +mysql/server_cost 7 33 PRIMARY 3 3 1 50 +mysql/slave_master_info 28 33 PRIMARY 3 3 1 50 +mysql/slave_relay_log_info 12 33 PRIMARY 3 3 1 50 +mysql/slave_worker_info 16 33 PRIMARY 3 3 2 50 +mysql/time_zone 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_leap_second 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_name 5 33 PRIMARY 3 3 1 50 +mysql/time_zone_transition 6 33 PRIMARY 3 3 2 50 +mysql/time_zone_transition_type 8 33 PRIMARY 3 3 2 50 +CREATE TABLE t1 (a INT KEY, b TEXT) ROW_FORMAT=REDUNDANT ENGINE=innodb; +CREATE TABLE t2 (a INT KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=innodb; +CREATE TABLE t3 (a INT KEY, b TEXT) ROW_FORMAT=COMPRESSED ENGINE=innodb; +CREATE TABLE t4 (a INT KEY, b TEXT) ROW_FORMAT=DYNAMIC ENGINE=innodb; +SELECT t.name table_name, t.n_cols, t.flag table_flags, +i.name index_name, i.page_no root_page, i.type, +i.n_fields, i.merge_threshold +FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, +INFORMATION_SCHEMA.INNODB_SYS_INDEXES i +WHERE t.table_id = i.table_id +AND t.name LIKE 'test%' + ORDER BY t.name, i.name; +table_name n_cols table_flags index_name root_page type n_fields merge_threshold +test/t1 5 0 PRIMARY 3 3 1 50 +test/t2 5 1 PRIMARY 3 3 1 50 +test/t3 5 39 PRIMARY 3 3 1 50 +test/t4 5 33 PRIMARY 3 3 1 50 +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t1 Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t1.ibd +test/t2 Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t2.ibd +test/t3 Single DEFAULT 4096 Compressed MYSQLD_DATADIR/test/t3.ibd +test/t4 Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t1.ibd +test/t2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t2.ibd +test/t3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t3.ibd +test/t4 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4.ibd +DROP TABLE t1, t2, t3, t4; +# Test 4) The maximum row size is dependent upon the page size. +# Redundant: 4027, Compact: 4030. +# Compressed: 4030, Dynamic: 4030. +# Each row format has its own amount of overhead that +# varies depending on number of fields and other overhead. +SET SESSION innodb_strict_mode = ON; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(155) +) ROW_FORMAT=redundant; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(156) +) ROW_FORMAT=redundant; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(202) +) ROW_FORMAT=compact; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(203) +) ROW_FORMAT=compact; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(103) +) ROW_FORMAT=compressed; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(106) +) ROW_FORMAT=compressed; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(202) +) ROW_FORMAT=dynamic; +DROP TABLE t1; +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(203) +) ROW_FORMAT=dynamic; +ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +CREATE TABLE t1 (a varchar(128) character set utf8, +b varchar(128) character set utf8, +c varchar(128) character set utf8, +d varchar(128) character set utf8, +PRIMARY KEY (a,b,c,d)) +ENGINE=innodb; +DROP TABLE t1; +CREATE TABLE t1 (a varchar(128) character set utf8, +b varchar(128) character set utf8, +c varchar(128) character set utf8, +d varchar(129) character set utf8, +PRIMARY KEY (a,b,c,d)) +ENGINE=innodb; +ERROR 42000: Specified key was too long; max key length is 1536 bytes +CREATE TABLE t1 (a varchar(128) character set utf8, +b varchar(128) character set utf8, +c varchar(128) character set utf8, +d varchar(128) character set utf8, +e varchar(128) character set utf8, +PRIMARY KEY (a), KEY (b,c,d,e)) +ENGINE=innodb; +DROP TABLE t1; +CREATE TABLE t1 (a varchar(128) character set utf8, +b varchar(128) character set utf8, +c varchar(128) character set utf8, +d varchar(128) character set utf8, +e varchar(129) character set utf8, +PRIMARY KEY (a), KEY (b,c,d,e)) +ENGINE=innodb; +ERROR 42000: Specified key was too long; max key length is 1536 bytes +# Test 5) Make sure that KEY_BLOCK_SIZE=8, 4, 2 & 1 are all +# accepted and that KEY_BLOCK_SIZE=16 is rejected in +# strict mode and converted to 8 in non-strict mode. +SET SESSION innodb_strict_mode = ON; +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't1' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=16 cannot be larger than 8. +Error 1031 Table storage engine for 't1' doesn't have this option +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=8 +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4 +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1 +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED +DROP TABLE t1; +SET SESSION innodb_strict_mode = OFF; +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=16 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=8 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4 +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2 +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1 +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT table_name, row_format, create_options +FROM information_schema.tables WHERE table_name = 't1'; +table_name row_format create_options +t1 Compressed row_format=COMPRESSED +DROP TABLE t1; +# Test 6) Make sure that KEY_BLOCK_SIZE = 8 and 16 +# are rejected when innodb_file_per_table=OFF +SET SESSION innodb_strict_mode = ON; +SET GLOBAL innodb_file_per_table = OFF; +SHOW VARIABLES LIKE 'innodb_file_per_table'; +Variable_name Value +innodb_file_per_table OFF +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +ERROR HY000: Table storage engine for 't4' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1031 Table storage engine for 't4' doesn't have this option +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't5' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=16 cannot be larger than 8. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1031 Table storage engine for 't5' doesn't have this option +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_file_format = `Antelope`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +ERROR HY000: Table storage engine for 't4' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1031 Table storage engine for 't4' doesn't have this option +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +ERROR HY000: Table storage engine for 't5' doesn't have this option +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE=16 cannot be larger than 8. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1031 Table storage engine for 't5' doesn't have this option +SET GLOBAL innodb_file_format = `Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +# Test 7) Not included here; 16k only +# Test 8) Test creating a table that could lead to undo log overflow. +CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob, +h blob,i blob,j blob,k blob,l blob,m blob,n blob, +o blob,p blob,q blob,r blob,s blob,t blob,u blob) +ENGINE=InnoDB ROW_FORMAT=dynamic; +SET @a = repeat('a', 767); +SET @b = repeat('b', 767); +SET @c = repeat('c', 767); +SET @d = repeat('d', 767); +SET @e = repeat('e', 767); +INSERT INTO t1 VALUES (@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a); +UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b, +k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b; +CREATE INDEX t1a ON t1 (a(767)); +CREATE INDEX t1b ON t1 (b(767)); +UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c, +k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c; +CREATE INDEX t1c ON t1 (c(767)); +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d,f=@d,g=@d,h=@d,i=@d,j=@d, +k=@d,l=@d,m=@d,n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +ERROR HY000: Undo log record is too big. +BEGIN; +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d; +UPDATE t1 SET f=@d,g=@d,h=@d,i=@d,j=@d,k=@d,l=@d,m=@d, +n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +COMMIT; +CREATE INDEX t1d ON t1 (d(767)); +UPDATE t1 SET d=@e; +CREATE INDEX t1e ON t1 (e(767)); +UPDATE t1 SET e=@e; +CREATE INDEX t1f ON t1 (f(767)); +UPDATE t1 SET f=@e; +CREATE INDEX t1g ON t1 (g(767)); +UPDATE t1 SET g=@e; +CREATE INDEX t1h ON t1 (h(767)); +UPDATE t1 SET h=@e; +CREATE INDEX t1i ON t1 (i(767)); +UPDATE t1 SET i=@e; +CREATE INDEX t1k ON t1 (j(767)); +CREATE INDEX t1j ON t1 (j(500)); +UPDATE t1 SET j=@e; +ERROR HY000: Undo log record is too big. +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` blob, + `b` blob, + `c` blob, + `d` blob, + `e` blob, + `f` blob, + `g` blob, + `h` blob, + `i` blob, + `j` blob, + `k` blob, + `l` blob, + `m` blob, + `n` blob, + `o` blob, + `p` blob, + `q` blob, + `r` blob, + `s` blob, + `t` blob, + `u` blob, + KEY `t1a` (`a`(767)), + KEY `t1b` (`b`(767)), + KEY `t1c` (`c`(767)), + KEY `t1d` (`d`(767)), + KEY `t1e` (`e`(767)), + KEY `t1f` (`f`(767)), + KEY `t1g` (`g`(767)), + KEY `t1h` (`h`(767)), + KEY `t1i` (`i`(767)), + KEY `t1k` (`j`(767)), + KEY `t1j` (`j`(500)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +DROP TABLE t1; +SET SESSION innodb_strict_mode = OFF; +CREATE TABLE t1( +pk01 varchar(96), pk02 varchar(96), pk03 varchar(96), pk04 varchar(96), +pk05 varchar(96), pk06 varchar(96), pk07 varchar(96), pk08 varchar(96), +pk09 varchar(96), pk10 varchar(96), pk11 varchar(96), pk12 varchar(96), +pk13 varchar(96), pk14 varchar(96), pk15 varchar(96), pk16 varchar(96), +sk01 varchar(96), sk02 varchar(96), sk03 varchar(96), sk04 varchar(96), +sk05 varchar(96), sk06 varchar(96), sk07 varchar(96), sk08 varchar(96), +sk09 varchar(96), sk10 varchar(96), sk11 varchar(96), sk12 varchar(96), +sk13 varchar(96), sk14 varchar(96), sk15 varchar(96), sk16 varchar(96), +PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, +pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), +KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, +sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) +ROW_FORMAT=Redundant ENGINE=InnoDB; +SET @r = repeat('a', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; +CREATE TABLE t1( +pk01 varchar(96), pk02 varchar(96), pk03 varchar(96), pk04 varchar(96), +pk05 varchar(96), pk06 varchar(96), pk07 varchar(96), pk08 varchar(96), +pk09 varchar(96), pk10 varchar(96), pk11 varchar(96), pk12 varchar(96), +pk13 varchar(96), pk14 varchar(96), pk15 varchar(96), pk16 varchar(96), +sk01 varchar(96), sk02 varchar(96), sk03 varchar(96), sk04 varchar(96), +sk05 varchar(96), sk06 varchar(96), sk07 varchar(96), sk08 varchar(96), +sk09 varchar(96), sk10 varchar(96), sk11 varchar(96), sk12 varchar(96), +sk13 varchar(96), sk14 varchar(96), sk15 varchar(96), sk16 varchar(96), +PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, +pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), +KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, +sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) +ROW_FORMAT=Compressed KEY_BLOCK_SIZE=8 ENGINE=InnoDB; +SET @r = repeat('a', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, +@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; +SET SESSION innodb_strict_mode = off; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1( +c text NOT NULL, d text NOT NULL, +PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII; +drop table t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(440))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +Warnings: +Warning 139 Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. +DROP TABLE t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(438))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512)); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_zip/r/bug36169.result b/mysql-test/suite/innodb_zip/r/bug36169.result new file mode 100644 index 00000000000..7e165e0f7d4 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/bug36169.result @@ -0,0 +1 @@ +SET GLOBAL innodb_file_per_table=ON; diff --git a/mysql-test/suite/innodb_zip/r/bug36172.result b/mysql-test/suite/innodb_zip/r/bug36172.result new file mode 100644 index 00000000000..23c5b0cc2f7 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/bug36172.result @@ -0,0 +1 @@ +SET default_storage_engine=InnoDB; diff --git a/mysql-test/suite/innodb_zip/r/bug52745.result b/mysql-test/suite/innodb_zip/r/bug52745.result new file mode 100644 index 00000000000..20605eb274d --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/bug52745.result @@ -0,0 +1,129 @@ +SET GLOBAL innodb_file_per_table=on; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +CREATE TABLE bug52745 ( +a2 int(10) unsigned DEFAULT NULL, +col37 time DEFAULT NULL, +col38 char(229) CHARACTER SET utf8 DEFAULT NULL, +col39 text, +col40 timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +col41 int(10) unsigned DEFAULT NULL, +col42 varchar(248) CHARACTER SET utf8 DEFAULT NULL, +col43 smallint(5) unsigned zerofill DEFAULT NULL, +col44 varchar(150) CHARACTER SET utf8 DEFAULT NULL, +col45 float unsigned zerofill DEFAULT NULL, +col46 binary(1) DEFAULT NULL, +col47 tinyint(4) DEFAULT NULL, +col48 tinyint(1) DEFAULT NULL, +col49 timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', +col50 binary(1) DEFAULT NULL, +col51 double unsigned zerofill DEFAULT NULL, +col52 int(10) unsigned DEFAULT NULL, +col53 time DEFAULT NULL, +col54 double unsigned DEFAULT NULL, +col55 time DEFAULT NULL, +col56 mediumtext CHARACTER SET latin2, +col57 blob, +col58 decimal(52,16) unsigned zerofill NOT NULL DEFAULT '000000000000000000000000000000000000.0000000000000000', +col59 binary(1) DEFAULT NULL, +col60 longblob, +col61 time DEFAULT NULL, +col62 longtext CHARACTER SET utf8 COLLATE utf8_persian_ci, +col63 timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', +col64 int(10) unsigned DEFAULT NULL, +col65 date DEFAULT NULL, +col66 timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', +col67 binary(1) DEFAULT NULL, +col68 tinyblob, +col69 date DEFAULT NULL, +col70 tinyint(3) unsigned zerofill DEFAULT NULL, +col71 varchar(44) CHARACTER SET utf8 DEFAULT NULL, +col72 datetime DEFAULT NULL, +col73 smallint(5) unsigned zerofill DEFAULT NULL, +col74 longblob, +col75 bit(34) DEFAULT NULL, +col76 float unsigned zerofill DEFAULT NULL, +col77 year(4) DEFAULT NULL, +col78 tinyint(3) unsigned DEFAULT NULL, +col79 set('msfheowh','tbpxbgf','by','wahnrjw','myqfasxz','rsokyumrt') CHARACTER SET latin2 DEFAULT NULL, +col80 datetime DEFAULT NULL, +col81 smallint(6) DEFAULT NULL, +col82 enum('xtaurnqfqz','rifrse','kuzwpbvb','niisabk','zxavro','rbvasv','','uulrfaove','','') DEFAULT NULL, +col83 bigint(20) unsigned zerofill DEFAULT NULL, +col84 float unsigned zerofill DEFAULT NULL, +col85 double DEFAULT NULL, +col86 enum('ylannv','','vlkhycqc','snke','cxifustp','xiaxaswzp','oxl') CHARACTER SET latin1 COLLATE latin1_german2_ci DEFAULT NULL, +col87 varbinary(221) DEFAULT NULL, +col88 double unsigned DEFAULT NULL, +col89 float unsigned zerofill DEFAULT NULL, +col90 tinyblob +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +Warnings: +Note 1291 Column 'col82' has duplicated value '' in ENUM +Note 1291 Column 'col82' has duplicated value '' in ENUM +SET sql_mode = default; +INSERT IGNORE INTO bug52745 SET +col40='0000-00-00 00:00:00', +col51=16547, +col53='7711484', +col54=-28604, +col55='7112612', +col56='wakefulness\'', +col57=repeat('absorbefacient\'',106), +col58=11027, +col59='AM09gW7', +col60=repeat('Noelani\'',16), +col61='2520576', +col62='substitutiv', +col63='19950106155112', +col64=-12038, +col65='86238806', +col66='19600719080256', +col68=repeat('Sagittarius\'',54), +col69='38943902', +col70=1232, +col71='Elora\'', +col74=repeat('zipp',11), +col75='0', +col76=23254, +col78=13247, +col79='56219', +col80='20500609035724', +col81=11632, +col82=7, +col84=-23863, +col85=6341, +col87='HZdkf.4 s7t,5Rmq 8so fmr,ruGLUG25TrtI.yQ 2SuHq0ML7rw7.4 b2yf2E5TJxOtBBZImezDnzpj,uPYfznnEUDN1e9aQoO 2DsplB7TFWy oQJ br HLF :F,eQ p4i1oWsr lL3PG,hjCz6hYqN h1QTjLCjrv:QCdSzpYBibJAtZCxLOk3l6Blsh.W', +col88=16894, +col89=6161, +col90=repeat('gale',48); +Warnings: +Warning 1265 Data truncated for column 'col53' at row 1 +Warning 1264 Out of range value for column 'col54' at row 1 +Warning 1265 Data truncated for column 'col59' at row 1 +Warning 1265 Data truncated for column 'col61' at row 1 +Warning 1264 Out of range value for column 'col64' at row 1 +Warning 1265 Data truncated for column 'col65' at row 1 +Warning 1264 Out of range value for column 'col66' at row 1 +Warning 1265 Data truncated for column 'col68' at row 1 +Warning 1265 Data truncated for column 'col69' at row 1 +Warning 1264 Out of range value for column 'col70' at row 1 +Warning 1264 Out of range value for column 'col78' at row 1 +Warning 1265 Data truncated for column 'col79' at row 1 +Warning 1264 Out of range value for column 'col84' at row 1 +SHOW WARNINGS; +Level Code Message +Warning 1265 Data truncated for column 'col53' at row 1 +Warning 1264 Out of range value for column 'col54' at row 1 +Warning 1265 Data truncated for column 'col59' at row 1 +Warning 1265 Data truncated for column 'col61' at row 1 +Warning 1264 Out of range value for column 'col64' at row 1 +Warning 1265 Data truncated for column 'col65' at row 1 +Warning 1264 Out of range value for column 'col66' at row 1 +Warning 1265 Data truncated for column 'col68' at row 1 +Warning 1265 Data truncated for column 'col69' at row 1 +Warning 1264 Out of range value for column 'col70' at row 1 +Warning 1264 Out of range value for column 'col78' at row 1 +Warning 1265 Data truncated for column 'col79' at row 1 +Warning 1264 Out of range value for column 'col84' at row 1 +DROP TABLE bug52745; +SET GLOBAL innodb_file_per_table=1; diff --git a/mysql-test/suite/innodb_zip/r/bug53591.result b/mysql-test/suite/innodb_zip/r/bug53591.result new file mode 100644 index 00000000000..e14a1942750 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/bug53591.result @@ -0,0 +1,13 @@ +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_strict_mode=on; +set old_alter_table=0; +CREATE TABLE bug53591(a text charset utf8 not null) +ENGINE=InnoDB KEY_BLOCK_SIZE=1; +ALTER TABLE bug53591 ADD PRIMARY KEY(a(220)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is {checked_valid}. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +SHOW WARNINGS; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is {checked_valid}. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +DROP TABLE bug53591; +SET GLOBAL innodb_file_per_table=1; +SET GLOBAL innodb_strict_mode=DEFAULT; diff --git a/mysql-test/suite/innodb_zip/r/bug56680.result b/mysql-test/suite/innodb_zip/r/bug56680.result new file mode 100644 index 00000000000..40660f435fb --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/bug56680.result @@ -0,0 +1,120 @@ +SET GLOBAL tx_isolation='REPEATABLE-READ'; +SET GLOBAL innodb_file_per_table=on; +CREATE TABLE bug56680( +a INT AUTO_INCREMENT PRIMARY KEY, +b CHAR(1), +c INT, +INDEX(b)) +ENGINE=InnoDB STATS_PERSISTENT=0; +INSERT INTO bug56680 VALUES(0,'x',1); +BEGIN; +SELECT b FROM bug56680; +b +x +connect con1,localhost,root,,; +connection con1; +BEGIN; +UPDATE bug56680 SET b='X'; +connection default; +SELECT b FROM bug56680; +b +x +SELECT * FROM bug56680; +a b c +1 x 1 +connection con1; +ROLLBACK; +disconnect con1; +connection default; +SELECT b FROM bug56680; +b +x +SET GLOBAL tx_isolation='READ-UNCOMMITTED'; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +BEGIN; +SELECT b FROM bug56680 LIMIT 2; +b +x +x +connect con1,localhost,root,,; +connection con1; +BEGIN; +DELETE FROM bug56680 WHERE a=1; +INSERT INTO bug56680 VALUES(1,'X',1); +SELECT b FROM bug56680 LIMIT 3; +b +X +x +x +connection default; +SELECT b FROM bug56680 LIMIT 2; +b +x +x +CHECK TABLE bug56680; +Table Op Msg_type Msg_text +test.bug56680 check status OK +connection con1; +ROLLBACK; +SELECT b FROM bug56680 LIMIT 2; +b +x +x +CHECK TABLE bug56680; +Table Op Msg_type Msg_text +test.bug56680 check status OK +connection default; +disconnect con1; +SELECT b FROM bug56680 LIMIT 2; +b +x +x +CREATE TABLE bug56680_2( +a INT AUTO_INCREMENT PRIMARY KEY, +b VARCHAR(2) CHARSET latin1 COLLATE latin1_german2_ci, +c INT, +INDEX(b)) +ENGINE=InnoDB STATS_PERSISTENT=0; +INSERT INTO bug56680_2 SELECT 0,_latin1 0xdf,c FROM bug56680; +BEGIN; +SELECT HEX(b) FROM bug56680_2 LIMIT 2; +HEX(b) +DF +DF +DELETE FROM bug56680_2 WHERE a=1; +INSERT INTO bug56680_2 VALUES(1,'SS',1); +SELECT HEX(b) FROM bug56680_2 LIMIT 3; +HEX(b) +5353 +DF +DF +CHECK TABLE bug56680_2; +Table Op Msg_type Msg_text +test.bug56680_2 check status OK +ALTER TABLE bug56680_2 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SELECT HEX(b) FROM bug56680_2 LIMIT 2; +HEX(b) +5353 +DF +DELETE FROM bug56680_2 WHERE a=1; +INSERT INTO bug56680_2 VALUES(1,_latin1 0xdf,1); +SELECT HEX(b) FROM bug56680_2 LIMIT 3; +HEX(b) +DF +DF +DF +CHECK TABLE bug56680_2; +Table Op Msg_type Msg_text +test.bug56680_2 check status OK +DROP TABLE bug56680_2; +DROP TABLE bug56680; diff --git a/mysql-test/suite/innodb_zip/r/cmp_drop_table.result b/mysql-test/suite/innodb_zip/r/cmp_drop_table.result new file mode 100644 index 00000000000..c1743cac2e1 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/cmp_drop_table.result @@ -0,0 +1,13 @@ +set global innodb_file_per_table=on; +create table t1(a text) engine=innodb key_block_size=8; +SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0; +page_size +8192 +drop table t1; +SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0; +page_size +8192 +create table t2(a text) engine=innodb; +SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0; +page_size +drop table t2; diff --git a/mysql-test/suite/innodb_zip/r/cmp_per_index.result b/mysql-test/suite/innodb_zip/r/cmp_per_index.result new file mode 100644 index 00000000000..5b899e9ff71 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/cmp_per_index.result @@ -0,0 +1,94 @@ +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SELECT * FROM information_schema.innodb_cmp_per_index; +CREATE TABLE t ( +a INT, +b VARCHAR(512), +c VARCHAR(16), +PRIMARY KEY (a), +INDEX (b(512)), +INDEX (c(16)) +) ENGINE=INNODB KEY_BLOCK_SIZE=2; +SELECT +database_name, +table_name, +index_name, +compress_ops, +compress_ops_ok, +uncompress_ops +FROM information_schema.innodb_cmp_per_index +ORDER BY 1, 2, 3; +database_name test +table_name t +index_name b +compress_ops 1 +compress_ops_ok 1 +uncompress_ops 0 +database_name test +table_name t +index_name c +compress_ops 1 +compress_ops_ok 1 +uncompress_ops 0 +database_name test +table_name t +index_name PRIMARY +compress_ops 1 +compress_ops_ok 1 +uncompress_ops 0 +BEGIN; +COMMIT; +ALTER TABLE t DROP INDEX c; +GRANT USAGE ON *.* TO 'tuser01'@'localhost' IDENTIFIED BY 'cDJvI9s_Uq'; +Warnings: +Level Warning +Code 1287 +Message Using GRANT for creating new user is deprecated and will be removed in future release. Create new user with CREATE USER statement. +FLUSH PRIVILEGES; +SELECT * FROM information_schema.innodb_cmp_per_index; +ERROR 42000: Access denied; you need (at least one of) the PROCESS privilege(s) for this operation +DROP USER 'tuser01'@'localhost'; +SELECT +database_name, +table_name, +index_name, +CASE WHEN compress_ops=47 and @@innodb_compression_level IN (4,8,9) THEN 65 +ELSE compress_ops END as compress_ops, +CASE WHEN compress_ops_ok=47 and @@innodb_compression_level IN (4,8,9) THEN 65 +ELSE compress_ops_ok END as compress_ops_ok, +uncompress_ops +FROM information_schema.innodb_cmp_per_index +ORDER BY 1, 2, 3; +database_name test +table_name t +index_name b +compress_ops 43 +compress_ops_ok 43 +uncompress_ops 0 +database_name test +table_name t +index_name PRIMARY +compress_ops 65 +compress_ops_ok 65 +uncompress_ops 0 +# restart +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SELECT COUNT(*) FROM t; +COUNT(*) 128 +SELECT +database_name, +table_name, +index_name, +compress_ops, +compress_ops_ok, +CASE WHEN uncompress_ops=6 and @@innodb_compression_level IN (4,8,9) THEN 9 +ELSE uncompress_ops END as uncompress_ops +FROM information_schema.innodb_cmp_per_index +ORDER BY 1, 2, 3; +database_name test +table_name t +index_name PRIMARY +compress_ops 0 +compress_ops_ok 0 +uncompress_ops 9 +DROP TABLE t; +SET GLOBAL innodb_cmp_per_index_enabled=default; diff --git a/mysql-test/suite/innodb_zip/r/create_options.result b/mysql-test/suite/innodb_zip/r/create_options.result new file mode 100644 index 00000000000..2d80894c8cd --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/create_options.result @@ -0,0 +1,839 @@ +SET default_storage_engine=InnoDB; +SET GLOBAL innodb_file_per_table=ON; +SET SESSION innodb_strict_mode = ON; +# Test 1) StrictMode=ON, CREATE and ALTER with each ROW_FORMAT & KEY_BLOCK_SIZE=0 +# KEY_BLOCK_SIZE=0 means 'no KEY_BLOCK_SIZE is specified' +# 'FIXED' is sent to InnoDB since it is used by MyISAM. +# But it is an invalid mode in InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: invalid ROW_FORMAT specifier. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: invalid ROW_FORMAT specifier. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE' +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic +# Test 2) StrictMode=ON, CREATE with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE +# KEY_BLOCK_SIZE is incompatible with COMPACT, REDUNDANT, & DYNAMIC +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=1 +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=1 +# Test 3) StrictMode=ON, ALTER with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: invalid ROW_FORMAT specifier. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE' +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=1 +ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=1 +# Test 4) StrictMode=ON, CREATE with ROW_FORMAT=COMPACT, ALTER with a valid non-zero KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=1 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=1 +# Test 5) StrictMode=ON, CREATE with a valid KEY_BLOCK_SIZE +# ALTER with each ROW_FORMAT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=2; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2 +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL, + `f1` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2 +ALTER TABLE t1 ROW_FORMAT=COMPACT; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +# Test 6) StrictMode=ON, CREATE with an invalid KEY_BLOCK_SIZE. +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=9; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16] +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +# Test 7) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and +# and a valid non-zero KEY_BLOCK_SIZE are rejected with Antelope +# and that they can be set to default values during strict mode. +SET GLOBAL innodb_file_format=Antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=4; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT; +SHOW WARNINGS; +Level Code Message +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT' +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT' +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SET GLOBAL innodb_file_format=Antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +ALTER TABLE t1 ADD COLUMN f1 INT; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4. +Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL, + `f1` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 +SHOW WARNINGS; +Level Code Message +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +Level Code Message +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +# Test 8) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and +# and a valid non-zero KEY_BLOCK_SIZE are rejected with +# innodb_file_per_table=OFF and that they can be set to default +# values during strict mode. +SET GLOBAL innodb_file_per_table=OFF; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED; +Got one of the listed errors +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table. +Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options") +Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options") +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT; +SHOW WARNINGS; +Level Code Message +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE' +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT' +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table. +Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT' +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT' +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT +ALTER TABLE t1 ROW_FORMAT=DEFAULT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic +SET GLOBAL innodb_file_per_table=ON; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SET GLOBAL innodb_file_per_table=OFF; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +Level Code Message +SET GLOBAL innodb_file_per_table=ON; +################################################## +SET SESSION innodb_strict_mode = OFF; +# Test 9) StrictMode=OFF, CREATE and ALTER with each ROW_FORMAT & KEY_BLOCK_SIZE=0 +# KEY_BLOCK_SIZE=0 means 'no KEY_BLOCK_SIZE is specified' +# 'FIXED' is sent to InnoDB since it is used by MyISAM. +# It is an invalid mode in InnoDB, use COMPACT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED; +Warnings: +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=FIXED +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0; +Warnings: +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=FIXED +# Test 10) StrictMode=OFF, CREATE with each ROW_FORMAT & a valid KEY_BLOCK_SIZE +# KEY_BLOCK_SIZE is ignored with COMPACT, REDUNDANT, & DYNAMIC +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT key_block_size=1 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT key_block_size=2 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC key_block_size=4 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=1 +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=1 +# Test 11) StrictMode=OFF, ALTER with each ROW_FORMAT & a valid KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=FIXED key_block_size=1 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT key_block_size=2 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC key_block_size=4 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT key_block_size=2 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=1 +ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=1 +# Test 12) StrictMode=OFF, CREATE with ROW_FORMAT=COMPACT, ALTER with a valid KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT key_block_size=2 +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT key_block_size=2 +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC key_block_size=2 +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=4 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed key_block_size=2 +# Test 13) StrictMode=OFF, CREATE with a valid KEY_BLOCK_SIZE +# ALTER with each ROW_FORMAT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1 +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL, + `f1` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1 +ALTER TABLE t1 ROW_FORMAT=COMPACT; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT key_block_size=1 +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Redundant row_format=REDUNDANT key_block_size=1 +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC key_block_size=1 +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=1 +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compact row_format=COMPACT +# Test 14) StrictMode=OFF, CREATE with an invalid KEY_BLOCK_SIZE, +# it defaults to half of the page size. +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=15; +Warnings: +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=15. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=15. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic key_block_size=15 +# Test 15) StrictMode=OFF, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and a +valid KEY_BLOCK_SIZE are remembered but not used when ROW_FORMAT +is reverted to Antelope and then used again when ROW_FORMAT=Barracuda. +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=1 +SET GLOBAL innodb_file_format=Antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +ALTER TABLE t1 ADD COLUMN f1 INT; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1. +Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SHOW WARNINGS; +Level Code Message +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1. +Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=COMPRESSED key_block_size=1 +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=1 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +SET GLOBAL innodb_file_format=Antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +# Test 16) StrictMode=OFF, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and a +valid KEY_BLOCK_SIZE are remembered but not used when innodb_file_per_table=OFF +and then used again when innodb_file_per_table=ON. +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +SET GLOBAL innodb_file_per_table=OFF; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +SET GLOBAL innodb_file_per_table=ON; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Compressed row_format=COMPRESSED key_block_size=2 +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +SET GLOBAL innodb_file_per_table=OFF; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +SET GLOBAL innodb_file_per_table=ON; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +Level Code Message +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +TABLE_NAME ROW_FORMAT CREATE_OPTIONS +t1 Dynamic row_format=DYNAMIC +# Cleanup +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix.result b/mysql-test/suite/innodb_zip/r/index_large_prefix.result new file mode 100644 index 00000000000..7285f4dfad7 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/index_large_prefix.result @@ -0,0 +1,534 @@ +SET default_storage_engine=InnoDB; +set global innodb_file_per_table=1; +### Test 1 ### +create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC; +show warnings; +Level Code Message +insert into worklog5743 values(repeat("a", 20000)); +update worklog5743 set a = (repeat("b", 16000)); +create index idx on worklog5743(a(2000)); +show warnings; +Level Code Message +begin; +update worklog5743 set a = (repeat("x", 17000)); +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +connect con1,localhost,root,,; +select a = repeat("x", 17000) from worklog5743; +a = repeat("x", 17000) +0 +select a = repeat("b", 16000) from worklog5743; +a = repeat("b", 16000) +1 +connect con2,localhost,root,,; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a = repeat("x", 17000) from worklog5743; +a = repeat("x", 17000) +1 +connection default; +rollback; +drop table worklog5743; +### Test 2 ### +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +show warnings; +Level Code Message +create index idx on worklog5743(a1, a2(2000)); +show warnings; +Level Code Message +insert into worklog5743 values(9, repeat("a", 10000)); +begin; +update worklog5743 set a1 = 1000; +connection con1; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743 ref idx idx 5 const 1 +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +9 1 +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +connection default; +rollback; +drop table worklog5743; +### Test 3 ### +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +create index idx on worklog5743(a1, a2(50)); +insert into worklog5743 values(9, repeat("a", 10000)); +begin; +update worklog5743 set a1 = 1000; +connection con1; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743 ref idx idx 5 const 1 +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +9 1 +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +connection default; +rollback; +drop table worklog5743; +### Test 4 ### +create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1; +create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2; +create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4; +create table worklog5743_8(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=8; +create table worklog5743_16(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=16; +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_1(a2(4000)); +Got one of the listed errors +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_1(a2(4000)); +Got one of the listed errors +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 3072 bytes +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx3 on worklog5743_1(a2(436)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx4 on worklog5743_1(a2(434)); +show warnings; +Level Code Message +create index idx5 on worklog5743_1(a1, a2(430)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx6 on worklog5743_1(a1, a2(428)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET sql_mode= ''; +create index idx1 on worklog5743_2(a2(4000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_2(a2(4000)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 3072 bytes +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx3 on worklog5743_2(a2(948)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx4 on worklog5743_2(a2(946)); +show warnings; +Level Code Message +create index idx5 on worklog5743_2(a1, a2(942)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx6 on worklog5743_2(a1, a2(940)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_4(a2(4000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_4(a2(4000)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 3072 bytes +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx3 on worklog5743_4(a2(1972)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx4 on worklog5743_4(a2(1970)); +show warnings; +Level Code Message +create index idx5 on worklog5743_4(a1, a2(1966)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx6 on worklog5743_4(a1, a2(1964)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_8(a2(1000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_8(a2(3073)); +Warnings: +Warning 1071 Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 3072 bytes +create index idx3 on worklog5743_8(a2(3072)); +Warnings: +Note 1831 Duplicate index 'idx3' defined on the table 'test.worklog5743_8'. This is deprecated and will be disallowed in a future release. +show warnings; +Level Code Message +Note 1831 Duplicate index 'idx3' defined on the table 'test.worklog5743_8'. This is deprecated and will be disallowed in a future release. +create index idx4 on worklog5743_8(a1, a2(3069)); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 3072 bytes +create index idx5 on worklog5743_8(a1, a2(3068)); +show warnings; +Level Code Message +create index idx6 on worklog5743_8(a1, a2(2000), a3(1069)); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 3072 bytes +create index idx7 on worklog5743_8(a1, a2(2000), a3(1068)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_16(a2(1000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_16(a2(3073)); +Warnings: +Warning 1071 Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 3072 bytes +create index idx3 on worklog5743_16(a2(3072)); +Warnings: +Note 1831 Duplicate index 'idx3' defined on the table 'test.worklog5743_16'. This is deprecated and will be disallowed in a future release. +show warnings; +Level Code Message +Note 1831 Duplicate index 'idx3' defined on the table 'test.worklog5743_16'. This is deprecated and will be disallowed in a future release. +create index idx4 on worklog5743_16(a1, a2(3069)); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 3072 bytes +create index idx5 on worklog5743_16(a1, a2(3068)); +show warnings; +Level Code Message +create index idx6 on worklog5743_16(a1, a2(2000), a3(1069)); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 3072 bytes +create index idx7 on worklog5743_16(a1, a2(2000), a3(1068)); +show warnings; +Level Code Message +set sql_mode= default; +insert into worklog5743_1 values(9, repeat("a", 10000)); +insert into worklog5743_2 values(9, repeat("a", 10000)); +insert into worklog5743_4 values(9, repeat("a", 10000)); +insert into worklog5743_8 values(9, repeat("a", 10000), repeat("a", 10000)); +insert into worklog5743_16 values(9, repeat("a", 10000), repeat("a", 10000)); +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +insert into worklog5743_1 values(2, repeat("b", 10000)); +insert into worklog5743_2 values(2, repeat("b", 10000)); +insert into worklog5743_4 values(2, repeat("b", 10000)); +insert into worklog5743_8 values(2, repeat("b", 10000), repeat("b", 10000)); +insert into worklog5743_16 values(2, repeat("b", 10000), repeat("b", 10000)); +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +select a1, left(a2, 20) from worklog5743_1; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_2; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_4; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_8; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_16; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +begin; +update worklog5743_1 set a1 = 1000; +update worklog5743_2 set a1 = 1000; +update worklog5743_4 set a1 = 1000; +update worklog5743_8 set a1 = 1000; +update worklog5743_16 set a1 = 1000; +select a1, left(a2, 20) from worklog5743_1; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_2; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_4; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_8; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_16; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +connection con1; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743_1 ref idx6 idx6 5 const 1 +explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743_2 ref idx6 idx6 5 const 1 +explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743_4 ref idx6 idx6 5 const 1 +explain select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743_8 ref idx5,idx7 idx5 5 const 1 +explain select a1, left(a2, 20) from worklog5743_16 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743_16 ref idx5,idx7 idx5 5 const 1 +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_16 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_16 where a1 = 9; +a1 left(a2, 20) +connection default; +rollback; +drop table worklog5743_1; +drop table worklog5743_2; +drop table worklog5743_4; +drop table worklog5743_8; +drop table worklog5743_16; +### Test 5 ### +create table worklog5743(a1 int, +a2 varchar(20000), +a3 varchar(3073), +a4 varchar(3072), +a5 varchar(3069), +a6 varchar(3068)) +ROW_FORMAT=DYNAMIC; +SET sql_mode=''; +create index idx1 on worklog5743(a2); +Warnings: +Warning 1071 Specified key was too long; max key length is 3072 bytes +create index idx2 on worklog5743(a3); +Warnings: +Warning 1071 Specified key was too long; max key length is 3072 bytes +create index idx3 on worklog5743(a4); +show warnings; +Level Code Message +SET sql_mode= default; +create index idx4 on worklog5743(a1, a2); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 3072 bytes +Error 1071 Specified key was too long; max key length is 3072 bytes +create index idx5 on worklog5743(a1, a5); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 3072 bytes +create index idx6 on worklog5743(a1, a6); +show warnings; +Level Code Message +show create table worklog5743; +Table Create Table +worklog5743 CREATE TABLE `worklog5743` ( + `a1` int(11) DEFAULT NULL, + `a2` varchar(20000) DEFAULT NULL, + `a3` varchar(3073) DEFAULT NULL, + `a4` varchar(3072) DEFAULT NULL, + `a5` varchar(3069) DEFAULT NULL, + `a6` varchar(3068) DEFAULT NULL, + KEY `idx1` (`a2`(3072)), + KEY `idx2` (`a3`(3072)), + KEY `idx3` (`a4`), + KEY `idx6` (`a1`,`a6`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +insert into worklog5743 values(9, +repeat("a", 20000), repeat("a", 3073), +repeat("a", 3072), repeat("a", 3069), +repeat("a", 3068)); +begin; +update worklog5743 set a1 = 1000; +connection con1; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1 from worklog5743 where a1 = 9; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE worklog5743 ref idx6 idx6 5 const 1 Using index +select a1 from worklog5743 where a1 = 9; +a1 +9 +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1 from worklog5743 where a1 = 9; +a1 +connection default; +rollback; +drop table worklog5743; +### Test 6 ### +create table worklog5743(a TEXT not null, primary key (a(1000))) +row_format=compact; +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create table worklog5743(a TEXT) +row_format=compact; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx on worklog5743(a(767)); +insert into worklog5743 values(repeat("a", 20000)); +begin; +insert into worklog5743 values(repeat("b", 20000)); +update worklog5743 set a = (repeat("x", 25000)); +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +connection con1; +select a = repeat("a", 20000) from worklog5743; +a = repeat("a", 20000) +1 +disconnect con1; +connection con2; +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a = repeat("x", 25000) from worklog5743; +a = repeat("x", 25000) +1 +1 +disconnect con2; +connection default; +rollback; +drop table worklog5743; +### Test 7 ### +create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC; +SET sql_mode=''; +create index idx1 on worklog5743(a(3073)); +Warnings: +Warning 1071 Specified key was too long; max key length is 3072 bytes +create index idx2 on worklog5743(a(3072)); +Warnings: +Note 1831 Duplicate index 'idx2' defined on the table 'test.worklog5743'. This is deprecated and will be disallowed in a future release. +show create table worklog5743; +Table Create Table +worklog5743 CREATE TABLE `worklog5743` ( + `a` text NOT NULL, + KEY `idx1` (`a`(3072)), + KEY `idx2` (`a`(3072)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +drop table worklog5743; +SET sql_mode= default; +create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx2 on worklog5743(a(767)); +drop table worklog5743; +create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx2 on worklog5743(a(767)); +drop table worklog5743; +SET GLOBAL innodb_file_per_table=1; diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix_4k.result b/mysql-test/suite/innodb_zip/r/index_large_prefix_4k.result new file mode 100644 index 00000000000..f010c522614 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/index_large_prefix_4k.result @@ -0,0 +1,404 @@ +SET default_storage_engine=InnoDB; +set global innodb_file_per_table=1; +### Test 1 ### +create table worklog5743(a TEXT not null, primary key (a(768))) ROW_FORMAT=DYNAMIC; +show warnings; +Level Code Message +insert into worklog5743 values(repeat("a", 20000)); +update worklog5743 set a = (repeat("b", 16000)); +SET sql_mode= ''; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +create index idx on worklog5743(a(900)); +Warnings: +Warning 1071 Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 768 bytes +SET sql_mode= default; +begin; +update worklog5743 set a = (repeat("x", 17000)); +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +select a = repeat("x", 17000) from worklog5743; +a = repeat("x", 17000) +0 +select a = repeat("b", 16000) from worklog5743; +a = repeat("b", 16000) +1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a = repeat("x", 17000) from worklog5743; +a = repeat("x", 17000) +1 +rollback; +drop table worklog5743; +### Test 2 ### +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +show warnings; +Level Code Message +create index idx on worklog5743(a1, a2(750)); +show warnings; +Level Code Message +insert into worklog5743 values(9, repeat("a", 10000)); +begin; +update worklog5743 set a1 = 1111; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743 NULL ref idx idx 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743`.`a1` AS `a1`,(`test`.`worklog5743`.`a2` = repeat('a',10000)) AS `a2 = repeat("a", 10000)` from `test`.`worklog5743` where (`test`.`worklog5743`.`a1` = 9) +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +9 1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +rollback; +drop table worklog5743; +### Test 3 ### +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +create index idx on worklog5743(a1, a2(50)); +insert into worklog5743 values(9, repeat("a", 10000)); +begin; +update worklog5743 set a1 = 2222; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743 NULL ref idx idx 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743`.`a1` AS `a1`,(`test`.`worklog5743`.`a2` = repeat('a',10000)) AS `a2 = repeat("a", 10000)` from `test`.`worklog5743` where (`test`.`worklog5743`.`a1` = 9) +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +9 1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +rollback; +drop table worklog5743; +### Test 4 ### +create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1; +create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2; +create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4; +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_1(a2(4000)); +ERROR 42000: Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 767 bytes +create index idx3 on worklog5743_1(a2(436)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 1982. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 1982. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx4 on worklog5743_1(a2(434)); +show warnings; +Level Code Message +create index idx5 on worklog5743_1(a1, a2(430)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 1982. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 1982. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx6 on worklog5743_1(a1, a2(428)); +show warnings; +Level Code Message +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET sql_mode= ''; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +create index idx1 on worklog5743_2(a2(4000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 768 bytes +show create table worklog5743_2; +Table Create Table +worklog5743_2 CREATE TABLE `worklog5743_2` ( + `a1` int(11) DEFAULT NULL, + `a2` text NOT NULL, + KEY `idx1` (`a2`(768)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2 +create index idx3 on worklog5743_2(a2(769)); +Warnings: +Warning 1071 Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 768 bytes +create index idx4 on worklog5743_2(a2(768)); +Warnings: +Warning 1831 Duplicate index 'idx4' defined on the table 'test.worklog5743_2'. This is deprecated and will be disallowed in a future release. +show warnings; +Level Code Message +Warning 1831 Duplicate index 'idx4' defined on the table 'test.worklog5743_2'. This is deprecated and will be disallowed in a future release. +create index idx5 on worklog5743_2(a1, a2(765)); +ERROR 42000: Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 768 bytes +create index idx6 on worklog5743_2(a1, a2(764)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_4(a2(4000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +show create table worklog5743_4; +Table Create Table +worklog5743_4 CREATE TABLE `worklog5743_4` ( + `a1` int(11) DEFAULT NULL, + `a2` text NOT NULL, + KEY `idx1` (`a2`(767)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=4 +create index idx3 on worklog5743_4(a2(769)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +create index idx4 on worklog5743_4(a2(768)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +create index idx5 on worklog5743_4(a1, a2(765)); +ERROR 42000: Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 768 bytes +create index idx6 on worklog5743_4(a1, a2(764)); +show warnings; +Level Code Message +SET sql_mode= default; +insert into worklog5743_1 values(9, repeat("a", 10000)); +insert into worklog5743_2 values(9, repeat("a", 10000)); +insert into worklog5743_4 values(9, repeat("a", 10000)); +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +insert into worklog5743_1 values(2, repeat("b", 10000)); +insert into worklog5743_2 values(2, repeat("b", 10000)); +insert into worklog5743_4 values(2, repeat("b", 10000)); +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +select a1, left(a2, 20) from worklog5743_1; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_2; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_4; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +begin; +update worklog5743_1 set a1 = 1000; +update worklog5743_2 set a1 = 1000; +update worklog5743_4 set a1 = 1000; +select a1, left(a2, 20) from worklog5743_1; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_2; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_4; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743_1 NULL ref idx6 idx6 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743_1`.`a1` AS `a1`,left(`test`.`worklog5743_1`.`a2`,20) AS `left(a2, 20)` from `test`.`worklog5743_1` where (`test`.`worklog5743_1`.`a1` = 9) +explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743_2 NULL ref idx6 idx6 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743_2`.`a1` AS `a1`,left(`test`.`worklog5743_2`.`a2`,20) AS `left(a2, 20)` from `test`.`worklog5743_2` where (`test`.`worklog5743_2`.`a1` = 9) +explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743_4 NULL ref idx6 idx6 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743_4`.`a1` AS `a1`,left(`test`.`worklog5743_4`.`a2`,20) AS `left(a2, 20)` from `test`.`worklog5743_4` where (`test`.`worklog5743_4`.`a1` = 9) +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +a1 left(a2, 20) +rollback; +drop table worklog5743_1; +drop table worklog5743_2; +drop table worklog5743_4; +### Test 5 ### +create table worklog5743(a1 int, a2 varchar(20000)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 3072 bytes +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(3072)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +ERROR 42000: Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 768 bytes +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(769)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +ERROR 42000: Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 768 bytes +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(768)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +show warnings; +Level Code Message +insert into worklog5743 values(9, repeat("a", 768)); +update worklog5743 set a1 = 3333; +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(765)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a1, a2); +ERROR 42000: Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 768 bytes +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(764)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a1, a2); +show warnings; +Level Code Message +insert into worklog5743 values(9, repeat("a", 764)); +begin; +update worklog5743 set a1 = 4444; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1 from worklog5743 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743 NULL ref idx1 idx1 5 const 1 100.00 Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743`.`a1` AS `a1` from `test`.`worklog5743` where (`test`.`worklog5743`.`a1` = 9) +select a1 from worklog5743 where a1 = 9; +a1 +9 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1 from worklog5743 where a1 = 9; +a1 +rollback; +drop table worklog5743; +### Test 6 ### +create table worklog5743(a TEXT not null, primary key (a(1000))); +ERROR 42000: Specified key was too long; max key length is 768 bytes +create table worklog5743(a TEXT) ROW_FORMAT=COMPACT; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx on worklog5743(a(767)); +insert into worklog5743 values(repeat("a", 20000)); +begin; +insert into worklog5743 values(repeat("b", 20000)); +update worklog5743 set a = (repeat("x", 25000)); +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +select a = repeat("a", 20000) from worklog5743; +a = repeat("a", 20000) +1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a = repeat("x", 25000) from worklog5743; +a = repeat("x", 25000) +1 +1 +rollback; +drop table worklog5743; +### Test 7 ### +create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC; +SET sql_mode= ''; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +create index idx1 on worklog5743(a(769)); +Warnings: +Warning 1071 Specified key was too long; max key length is 768 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 768 bytes +SET sql_mode= default; +create index idx2 on worklog5743(a(768)); +Warnings: +Warning 1831 Duplicate index 'idx2' defined on the table 'test.worklog5743'. This is deprecated and will be disallowed in a future release. +show warnings; +Level Code Message +Warning 1831 Duplicate index 'idx2' defined on the table 'test.worklog5743'. This is deprecated and will be disallowed in a future release. +show create table worklog5743; +Table Create Table +worklog5743 CREATE TABLE `worklog5743` ( + `a` text NOT NULL, + KEY `idx1` (`a`(768)), + KEY `idx2` (`a`(768)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +insert into worklog5743 values(repeat("a", 768)); +drop table worklog5743; +create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx2 on worklog5743(a(767)); +drop table worklog5743; +create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx2 on worklog5743(a(767)); +drop table worklog5743; +SET GLOBAL innodb_file_per_table=1; diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix_8k.result b/mysql-test/suite/innodb_zip/r/index_large_prefix_8k.result new file mode 100644 index 00000000000..3176ffe2204 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/index_large_prefix_8k.result @@ -0,0 +1,442 @@ +SET default_storage_engine=InnoDB; +set global innodb_file_per_table=1; +### Test 1 ### +create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC; +show warnings; +Level Code Message +insert into worklog5743 values(repeat("a", 20000)); +update worklog5743 set a = (repeat("b", 16000)); +SET sql_mode= ''; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +create index idx on worklog5743(a(2000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 1536 bytes +SET sql_mode= default; +begin; +update worklog5743 set a = (repeat("x", 17000)); +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +select a = repeat("x", 17000) from worklog5743; +a = repeat("x", 17000) +0 +select a = repeat("b", 16000) from worklog5743; +a = repeat("b", 16000) +1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a = repeat("x", 17000) from worklog5743; +a = repeat("x", 17000) +1 +rollback; +drop table worklog5743; +### Test 2 ### +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +show warnings; +Level Code Message +create index idx on worklog5743(a1, a2(1250)); +show warnings; +Level Code Message +insert into worklog5743 values(9, repeat("a", 10000)); +begin; +update worklog5743 set a1 = 1000; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743 NULL ref idx idx 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743`.`a1` AS `a1`,(`test`.`worklog5743`.`a2` = repeat('a',10000)) AS `a2 = repeat("a", 10000)` from `test`.`worklog5743` where (`test`.`worklog5743`.`a1` = 9) +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +9 1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +rollback; +drop table worklog5743; +### Test 3 ### +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +create index idx on worklog5743(a1, a2(50)); +insert into worklog5743 values(9, repeat("a", 10000)); +begin; +update worklog5743 set a1 = 1000; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743 NULL ref idx idx 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743`.`a1` AS `a1`,(`test`.`worklog5743`.`a2` = repeat('a',10000)) AS `a2 = repeat("a", 10000)` from `test`.`worklog5743` where (`test`.`worklog5743`.`a1` = 9) +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +9 1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +a1 a2 = repeat("a", 10000) +rollback; +drop table worklog5743; +### Test 4 ### +create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1; +create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2; +create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4; +create table worklog5743_8(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=8; +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_1(a2(4000)); +ERROR 42000: Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_1(a2(4000)); +ERROR 42000: Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 1536 bytes +create index idx3 on worklog5743_1(a2(436)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx4 on worklog5743_1(a2(434)); +show warnings; +Level Code Message +create index idx5 on worklog5743_1(a1, a2(430)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx6 on worklog5743_1(a1, a2(428)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET sql_mode= ''; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +create index idx1 on worklog5743_2(a2(4000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_2(a2(4000)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 1536 bytes +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx3 on worklog5743_2(a2(948)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx4 on worklog5743_2(a2(946)); +show warnings; +Level Code Message +create index idx5 on worklog5743_2(a1, a2(942)); +ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +show warnings; +Level Code Message +Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 4030. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs +create index idx6 on worklog5743_2(a1, a2(940)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_4(a2(4000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx3 on worklog5743_4(a2(1537)); +Warnings: +Warning 1071 Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 1536 bytes +create index idx4 on worklog5743_4(a2(1536)); +Warnings: +Warning 1831 Duplicate index 'idx4' defined on the table 'test.worklog5743_4'. This is deprecated and will be disallowed in a future release. +show warnings; +Level Code Message +Warning 1831 Duplicate index 'idx4' defined on the table 'test.worklog5743_4'. This is deprecated and will be disallowed in a future release. +create index idx5 on worklog5743_4(a1, a2(1533)); +ERROR 42000: Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 1536 bytes +create index idx6 on worklog5743_4(a1, a2(1532)); +show warnings; +Level Code Message +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx1 on worklog5743_8(a2(1000)); +Warnings: +Warning 1071 Specified key was too long; max key length is 767 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 767 bytes +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create index idx2 on worklog5743_8(a2(3073)); +Warnings: +Warning 1071 Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 1536 bytes +create index idx3 on worklog5743_8(a2(3072)); +Warnings: +Warning 1071 Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Warning 1071 Specified key was too long; max key length is 1536 bytes +create index idx4 on worklog5743_8(a1, a2(1533)); +ERROR 42000: Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 1536 bytes +create index idx5 on worklog5743_8(a1, a2(1532)); +show warnings; +Level Code Message +SET sql_mode= default; +insert into worklog5743_1 values(9, repeat("a", 10000)); +insert into worklog5743_2 values(9, repeat("a", 10000)); +insert into worklog5743_4 values(9, repeat("a", 10000)); +insert into worklog5743_8 values(9, repeat("a", 10000), repeat("a", 10000)); +set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +insert into worklog5743_1 values(2, repeat("b", 10000)); +insert into worklog5743_2 values(2, repeat("b", 10000)); +insert into worklog5743_4 values(2, repeat("b", 10000)); +insert into worklog5743_8 values(2, repeat("b", 10000), repeat("b", 10000)); +set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +select a1, left(a2, 20) from worklog5743_1; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_2; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_4; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_8; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +2 bbbbbbbbbbbbbbbbbbbb +begin; +update worklog5743_1 set a1 = 1000; +update worklog5743_2 set a1 = 1000; +update worklog5743_4 set a1 = 1000; +update worklog5743_8 set a1 = 1000; +select a1, left(a2, 20) from worklog5743_1; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_2; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_4; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select a1, left(a2, 20) from worklog5743_8; +a1 left(a2, 20) +1000 aaaaaaaaaaaaaaaaaaaa +1000 bbbbbbbbbbbbbbbbbbbb +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743_1 NULL ref idx6 idx6 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743_1`.`a1` AS `a1`,left(`test`.`worklog5743_1`.`a2`,20) AS `left(a2, 20)` from `test`.`worklog5743_1` where (`test`.`worklog5743_1`.`a1` = 9) +explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743_2 NULL ref idx6 idx6 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743_2`.`a1` AS `a1`,left(`test`.`worklog5743_2`.`a2`,20) AS `left(a2, 20)` from `test`.`worklog5743_2` where (`test`.`worklog5743_2`.`a1` = 9) +explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743_4 NULL ref idx6 idx6 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743_4`.`a1` AS `a1`,left(`test`.`worklog5743_4`.`a2`,20) AS `left(a2, 20)` from `test`.`worklog5743_4` where (`test`.`worklog5743_4`.`a1` = 9) +explain select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743_8 NULL ref idx5 idx5 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743_8`.`a1` AS `a1`,left(`test`.`worklog5743_8`.`a2`,20) AS `left(a2, 20)` from `test`.`worklog5743_8` where (`test`.`worklog5743_8`.`a1` = 9) +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +a1 left(a2, 20) +9 aaaaaaaaaaaaaaaaaaaa +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +a1 left(a2, 20) +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +a1 left(a2, 20) +rollback; +drop table worklog5743_1; +drop table worklog5743_2; +drop table worklog5743_4; +drop table worklog5743_8; +### Test 5 ### +create table worklog5743(a1 int, a2 varchar(20000)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +ERROR 42000: Specified key was too long; max key length is 3072 bytes +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(1537)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +ERROR 42000: Specified key was too long; max key length is 1536 bytes +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(1536)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +show warnings; +Level Code Message +insert into worklog5743 values(9, repeat("a", 1536)); +update worklog5743 set a1 = 1000; +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(1533)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a1, a2); +ERROR 42000: Specified key was too long; max key length is 1536 bytes +show warnings; +Level Code Message +Error 1071 Specified key was too long; max key length is 1536 bytes +drop table worklog5743; +create table worklog5743(a1 int, a2 varchar(1532)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a1, a2); +show warnings; +Level Code Message +insert into worklog5743 values(9, repeat("a", 1532)); +update worklog5743 set a1 = 1000; +begin; +update worklog5743 set a1 = 1000; +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +explain select a1 from worklog5743 where a1 = 9; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE worklog5743 NULL ref idx1 idx1 5 const 1 100.00 Using index +Warnings: +Note 1003 /* select#1 */ select `test`.`worklog5743`.`a1` AS `a1` from `test`.`worklog5743` where (`test`.`worklog5743`.`a1` = 9) +select a1 from worklog5743 where a1 = 9; +a1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a1 from worklog5743 where a1 = 9; +a1 +rollback; +drop table worklog5743; +### Test 6 ### +create table worklog5743(a TEXT not null, primary key (a(1000))) +row_format=compact; +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create table worklog5743(a TEXT) row_format=compact; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx on worklog5743(a(767)); +insert into worklog5743 values(repeat("a", 20000)); +begin; +insert into worklog5743 values(repeat("b", 20000)); +update worklog5743 set a = (repeat("x", 25000)); +select @@session.tx_isolation; +@@session.tx_isolation +REPEATABLE-READ +select a = repeat("a", 20000) from worklog5743; +a = repeat("a", 20000) +1 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +@@session.tx_isolation +READ-UNCOMMITTED +select a = repeat("x", 25000) from worklog5743; +a = repeat("x", 25000) +1 +1 +rollback; +drop table worklog5743; +### Test 7 ### +create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC; +SET sql_mode= ''; +Warnings: +Warning 3090 Changing sql mode 'NO_AUTO_CREATE_USER' is deprecated. It will be removed in a future release. +create index idx1 on worklog5743(a(3073)); +Warnings: +Warning 1071 Specified key was too long; max key length is 1536 bytes +create index idx2 on worklog5743(a(3072)); +Warnings: +Warning 1071 Specified key was too long; max key length is 1536 bytes +SET sql_mode= default; +show create table worklog5743; +Table Create Table +worklog5743 CREATE TABLE `worklog5743` ( + `a` text NOT NULL, + KEY `idx1` (`a`(1536)), + KEY `idx2` (`a`(1536)) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +drop table worklog5743; +create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx2 on worklog5743(a(767)); +drop table worklog5743; +create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT; +create index idx on worklog5743(a(768)); +ERROR HY000: Index column size too large. The maximum column size is 767 bytes. +create index idx2 on worklog5743(a(767)); +drop table worklog5743; +SET GLOBAL innodb_file_per_table=1; diff --git a/mysql-test/suite/innodb_zip/r/innochecksum.result b/mysql-test/suite/innodb_zip/r/innochecksum.result new file mode 100644 index 00000000000..694de4d9c83 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/innochecksum.result @@ -0,0 +1,82 @@ +# Set the environmental variables +call mtr.add_suppression("InnoDB: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts"); +call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed"); +SET GLOBAL innodb_file_per_table=on; +CREATE TABLE tab1(c1 INT PRIMARY KEY,c2 VARCHAR(20)) ENGINE=InnoDB; +CREATE INDEX idx1 ON tab1(c2(10)); +INSERT INTO tab1 VALUES(1, 'Innochecksum InnoDB1'); +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +insert into t1 values(1,"i"); +insert into t1 values(2,"am"); +insert into t1 values(3,"compressed table"); +# Shutdown the Server +# Server Default checksum = innodb +[1a]: check the innochecksum when file doesn't exists +[1b]: check the innochecksum without --strict-check +[2]: check the innochecksum with full form --strict-check=crc32 +[3]: check the innochecksum with short form -C crc32 +[4]: check the innochecksum with --no-check ignores algorithm check, warning is expected +[5]: check the innochecksum with short form --no-check ignores algorithm check, warning is expected +[6]: check the innochecksum with full form strict-check & no-check , an error is expected +[7]: check the innochecksum with short form strict-check & no-check , an error is expected +[8]: check the innochecksum with short & full form combination +# strict-check & no-check, an error is expected +[9]: check the innochecksum with full form --strict-check=innodb +[10]: check the innochecksum with full form --strict-check=none +# when server Default checksum=crc32 +[11]: check the innochecksum with short form -C innodb +# when server Default checksum=crc32 +[12]: check the innochecksum with short form -C none +# when server Default checksum=crc32 +[13]: check strict-check with invalid values +[14a]: when server default checksum=crc32 rewrite new checksum=crc32 with innochecksum +# Also check the long form of write option. +[14b]: when server default checksum=crc32 rewrite new checksum=innodb with innochecksum +# Also check the long form of write option. +# start the server with innodb_checksum_algorithm=InnoDB +# restart +INSERT INTO tab1 VALUES(2, 'Innochecksum CRC32'); +SELECT c1,c2 FROM tab1 order by c1,c2; +c1 c2 +1 Innochecksum InnoDB1 +2 Innochecksum CRC32 +# Stop the server +[15]: when server default checksum=crc32 rewrite new checksum=none with innochecksum +# Also check the short form of write option. +# Start the server with checksum algorithm=none +# restart +INSERT INTO tab1 VALUES(3, 'Innochecksum None'); +SELECT c1,c2 FROM tab1 order by c1,c2; +c1 c2 +1 Innochecksum InnoDB1 +2 Innochecksum CRC32 +3 Innochecksum None +DROP TABLE t1; +# Stop the server +[16]: rewrite into new checksum=crc32 with innochecksum +# Restart the DB server with innodb_checksum_algorithm=crc32 +# restart +SELECT * FROM tab1; +c1 c2 +1 Innochecksum InnoDB1 +2 Innochecksum CRC32 +3 Innochecksum None +DELETE FROM tab1 where c1=3; +SELECT c1,c2 FROM tab1 order by c1,c2; +c1 c2 +1 Innochecksum InnoDB1 +2 Innochecksum CRC32 +# Stop server +[17]: rewrite into new checksum=InnoDB +# Restart the DB server with innodb_checksum_algorithm=InnoDB +# restart +DELETE FROM tab1 where c1=2; +SELECT * FROM tab1; +c1 c2 +1 Innochecksum InnoDB1 +# Stop server +[18]:check Innochecksum with invalid write options +# Restart the server +# restart +DROP TABLE tab1; +SET GLOBAL innodb_file_per_table=default; diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_2.result b/mysql-test/suite/innodb_zip/r/innochecksum_2.result new file mode 100644 index 00000000000..0b6adaa33a2 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/innochecksum_2.result @@ -0,0 +1,140 @@ +SET GLOBAL innodb_compression_level=0; +SELECT @@innodb_compression_level; +@@innodb_compression_level +0 +CREATE TABLE t1 (j LONGBLOB) ENGINE = InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +INSERT INTO t1 VALUES (repeat('abcdefghijklmnopqrstuvwxyz',200)); +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +INSERT INTO t1 SELECT * from t1; +# stop the server +[1]:# check the both short and long options for "help" +[2]:# Run the innochecksum when file isn't provided. +# It will print the innochecksum usage similar to --help option. +innochecksum Ver #.#.# +Copyright (c) YEAR, YEAR , Oracle and/or its affiliates. All rights reserved. + +Oracle is a registered trademark of Oracle Corporation and/or its +affiliates. Other names may be trademarks of their respective +owners. + +InnoDB offline file checksum utility. +Usage: innochecksum [-c] [-s ] [-e ] [-p ] [-v] [-a ] [-n] [-C ] [-w ] [-S] [-D ] [-l ] + -?, --help Displays this help and exits. + -I, --info Synonym for --help. + -V, --version Displays version information and exits. + -v, --verbose Verbose (prints progress every 5 seconds). + -c, --count Print the count of pages in the file and exits. + -s, --start-page=# Start on this page number (0 based). + -e, --end-page=# End at this page number (0 based). + -p, --page=# Check only this page (0 based). + -C, --strict-check=name + Specify the strict checksum algorithm by the user. + -n, --no-check Ignore the checksum verification. + -a, --allow-mismatches=# + Maximum checksum mismatch allowed. + -w, --write=name Rewrite the checksum algorithm by the user. + -S, --page-type-summary + Display a count of each page type in a tablespace. + -D, --page-type-dump=name + Dump the page type info for each page in a tablespace. + -l, --log=name log output. + +Variables (--variable-name=value) +and boolean options {FALSE|TRUE} Value (after reading options) +--------------------------------- ---------------------------------------- +verbose FALSE +count FALSE +start-page 0 +end-page 0 +page 0 +strict-check crc32 +no-check FALSE +allow-mismatches 0 +write crc32 +page-type-summary FALSE +page-type-dump (No default value) +log (No default value) +[3]:# check the both short and long options for "count" and exit +Number of pages:# +Number of pages:# +[4]:# Print the version of innochecksum and exit +innochecksum Ver #.#.## Restart the DB server +# restart +DROP TABLE t1; +[5]:# Check the innochecksum for compressed table t1 with different key_block_size +# Test for KEY_BLOCK_SIZE=1 +===> Testing size=1 +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +insert into t1 values(1,"I"); +insert into t1 values(2,"AM"); +insert into t1 values(3,"COMPRESSED"); +# restart +select * from t1; +id msg +1 I +2 AM +3 COMPRESSED +drop table t1; +# Test for KEY_BLOCK_SIZE=2 +===> Testing size=2 +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +insert into t1 values(1,"I"); +insert into t1 values(2,"AM"); +insert into t1 values(3,"COMPRESSED"); +# restart +select * from t1; +id msg +1 I +2 AM +3 COMPRESSED +drop table t1; +# Test for for KEY_BLOCK_SIZE=4 +===> Testing size=4 +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +insert into t1 values(1,"I"); +insert into t1 values(2,"AM"); +insert into t1 values(3,"COMPRESSED"); +# restart +select * from t1; +id msg +1 I +2 AM +3 COMPRESSED +drop table t1; +set innodb_strict_mode=off; +# Test for for KEY_BLOCK_SIZE=8 +===> Testing size=8 +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +insert into t1 values(1,"I"); +insert into t1 values(2,"AM"); +insert into t1 values(3,"COMPRESSED"); +# restart +select * from t1; +id msg +1 I +2 AM +3 COMPRESSED +drop table t1; +set innodb_strict_mode=off; +# Test for KEY_BLOCK_SIZE=16 +===> Testing size=16 +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +insert into t1 values(1,"I"); +insert into t1 values(2,"AM"); +insert into t1 values(3,"COMPRESSED"); +# restart +select * from t1; +id msg +1 I +2 AM +3 COMPRESSED +drop table t1; +# Test[5] completed diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_3.result b/mysql-test/suite/innodb_zip/r/innochecksum_3.result new file mode 100644 index 00000000000..85058c41e04 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/innochecksum_3.result @@ -0,0 +1,184 @@ +# Set the environmental variables +call mtr.add_suppression("InnoDB: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts"); +SET GLOBAL innodb_file_per_table=on; +[1]: Further Test are for rewrite checksum (innodb|crc32|none) for all ibd file & start the server. +CREATE TABLE tab1 (pk INTEGER NOT NULL PRIMARY KEY, +linestring_key GEOMETRY NOT NULL, +linestring_nokey GEOMETRY NOT NULL) +ENGINE=InnoDB ; +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (1, ST_GeomFromText('POINT(10 10) '), ST_GeomFromText('POINT(10 10) ')); +CREATE INDEX linestring_index ON tab1(linestring_nokey(5)); +ALTER TABLE tab1 ADD KEY (linestring_key(5)); +# create a compressed table +CREATE TABLE tab2(col_1 CHAR (255) , +col_2 VARCHAR (255), col_3 longtext, +col_4 longtext,col_5 longtext, +col_6 longtext , col_7 int ) +engine = innodb row_format=compressed key_block_size=4; +CREATE INDEX idx1 ON tab2(col_3(10)); +CREATE INDEX idx2 ON tab2(col_4(10)); +CREATE INDEX idx3 ON tab2(col_5(10)); +SET @col_1 = repeat('a', 5); +SET @col_2 = repeat('b', 20); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,5); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,4); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,3); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,2); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,1); +SELECT * FROM tab2 ORDER BY col_7; +# stop the server +[1(a)]: Rewrite into new checksum=InnoDB for all *.ibd file and ibdata1 +: start the server with innodb_checksum_algorithm=strict_innodb +# restart +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (2, ST_GeomFromText('LINESTRING(10 10,20 20,30 30)'), ST_GeomFromText('LINESTRING(10 10,20 20,30 30)')); +SET @col_1 = repeat('a', 5); +SET @col_2 = repeat('b', 20); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,6); +SELECT pk,ST_AsText(linestring_key),ST_AsText(linestring_nokey) +FROM tab1 ORDER BY pk; +SELECT * FROM tab2 ORDER BY col_7; +# stop the server +[1(b)]: Rewrite into new checksum=crc32 for all *.ibd file and ibdata1 +# start the server with innodb_checksum_algorithm=strict_crc32 +# restart +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (3, ST_GeomFromText('POLYGON((0 0,5 5,10 10,15 15,0 0),(10 10,20 20,30 30,40 40,10 10))'), +ST_GeomFromText('POLYGON((0 0,5 5,10 10,15 15,0 0),(10 10,20 20,30 30,40 40,10 10))')); +SET @col_1 = repeat('g', 5); +SET @col_2 = repeat('h', 20); +SET @col_3 = repeat('i', 100); +SET @col_4 = repeat('j', 100); +SET @col_5 = repeat('k', 100); +SET @col_6 = repeat('l', 100); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,7); +SELECT pk,ST_AsText(linestring_key),ST_AsText(linestring_nokey) +FROM tab1 ORDER BY pk; +SELECT * FROM tab2 ORDER BY col_7; +# stop the server +[1(c)]: Rewrite into new checksum=none for all *.ibd file and ibdata1 +# restart: --innodb_checksum_algorithm=strict_none --default_storage_engine=InnoDB +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (4, ST_GeomFromText('MULTIPOINT(0 0,5 5,10 10,20 20) '), ST_GeomFromText('MULTIPOINT(0 0,5 5,10 10,20 20) ')); +SET @col_1 = repeat('m', 5); +SET @col_2 = repeat('n', 20); +SET @col_3 = repeat('o', 100); +SET @col_4 = repeat('p', 100); +SET @col_5 = repeat('q', 100); +SET @col_6 = repeat('r', 100); +INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,8); +SELECT pk,ST_AsText(linestring_key),ST_AsText(linestring_nokey) +FROM tab1 ORDER BY pk; +SELECT * FROM tab2 ORDER BY col_7; +# stop the server +[2]: Check the page type summary with shortform for tab1.ibd + +File::tab#.ibd +================PAGE TYPE SUMMARY============== +#PAGE_COUNT PAGE_TYPE +=============================================== + # Index page + # Undo log page + # Inode page + # Insert buffer free list page + # Freshly allocated page + # Insert buffer bitmap + # System page + # Transaction system page + # File Space Header + # Extent descriptor page + # BLOB page + # Compressed BLOB page + # Other type of page +=============================================== +Additional information: +Undo page type: # insert, # update, # other +Undo page state: # active, # cached, # to_free, # to_purge, # prepared, # other +[3]: Check the page type summary with longform for tab1.ibd + +File::tab#.ibd +================PAGE TYPE SUMMARY============== +#PAGE_COUNT PAGE_TYPE +=============================================== + # Index page + # Undo log page + # Inode page + # Insert buffer free list page + # Freshly allocated page + # Insert buffer bitmap + # System page + # Transaction system page + # File Space Header + # Extent descriptor page + # BLOB page + # Compressed BLOB page + # Other type of page +=============================================== +Additional information: +Undo page type: # insert, # update, # other +Undo page state: # active, # cached, # to_free, # to_purge, # prepared, # other +[4]: Page type dump for with longform for tab1.ibd +# Print the contents stored in dump.txt + + +Filename::tab#.ibd +============================================================================== + PAGE_NO | PAGE_TYPE | EXTRA INFO +============================================================================== +#:: # | File Space Header | - +#:: # | Insert Buffer Bitmap | - +#:: # | Inode page | - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Freshly allocated page | - +[5]: Page type dump for with shortform for tab1.ibd + + +Filename::tab#.ibd +============================================================================== + PAGE_NO | PAGE_TYPE | EXTRA INFO +============================================================================== +#:: # | File Space Header | - +#:: # | Insert Buffer Bitmap | - +#:: # | Inode page | - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Index page | index id=#, page level=#, No. of records=#, garbage=#, - +#:: # | Freshly allocated page | - +[6]: check the valid lower bound values for option +# allow-mismatches,page,start-page,end-page +[7]: check the negative values for option +# allow-mismatches,page,start-page,end-page. +# They will reset to zero for negative values. +# check the invalid lower bound values +[8]: check the valid upper bound values for +# both short and long options "allow-mismatches" and "end-page" +[9]: check the both short and long options "page" and "start-page" when +# seek value is larger than file size. +[34]: check the invalid upper bound values for options, allow-mismatches, end-page, start-page and page. +# innochecksum will fail with error code: 1 +# Restart the server +# restart: --innodb_checksum_algorithm=strict_none --default_storage_engine=InnoDB +DROP TABLE tab1; +DROP TABLE tab2; +SET GLOBAL innodb_file_per_table=default; diff --git a/mysql-test/suite/innodb_zip/r/innodb-create-options.result b/mysql-test/suite/innodb_zip/r/innodb-create-options.result index fe3d799229d..1b92eb71fba 100644 --- a/mysql-test/suite/innodb_zip/r/innodb-create-options.result +++ b/mysql-test/suite/innodb_zip/r/innodb-create-options.result @@ -1,5 +1,7 @@ SET default_storage_engine=InnoDB; SET GLOBAL innodb_file_format=`Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=ON; SET SESSION innodb_strict_mode = ON; # Test 1) StrictMode=ON, CREATE and ALTER with each ROW_FORMAT & KEY_BLOCK_SIZE=0 @@ -45,7 +47,7 @@ SHOW WARNINGS; Level Code Message SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact +t1 Dynamic ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0; ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE' SHOW WARNINGS; @@ -54,7 +56,7 @@ Warning 1478 InnoDB: invalid ROW_FORMAT specifier. Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE' SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact +t1 Dynamic # Test 2) StrictMode=ON, CREATE with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE # KEY_BLOCK_SIZE is incompatible with COMPACT, REDUNDANT, & DYNAMIC DROP TABLE IF EXISTS t1; @@ -244,7 +246,7 @@ SHOW WARNINGS; Level Code Message SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact +t1 Dynamic ALTER TABLE t1 ROW_FORMAT=COMPACT; SHOW WARNINGS; Level Code Message @@ -264,6 +266,8 @@ Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB # and a valid non-zero KEY_BLOCK_SIZE are rejected with Antelope # and that they can be set to default values during strict mode. SET GLOBAL innodb_file_format=Antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html DROP TABLE IF EXISTS t1; Warnings: Note 1051 Unknown table 'test.t1' @@ -324,15 +328,19 @@ Level Code Message Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope. Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT' SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; SET GLOBAL innodb_file_format=Antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html ALTER TABLE t1 ADD COLUMN f1 INT; Warnings: Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4. Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -348,6 +356,8 @@ ALTER TABLE t1 ADD COLUMN f2 INT; SHOW WARNINGS; Level Code Message SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html # Test 8) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and # and a valid non-zero KEY_BLOCK_SIZE are rejected with # innodb_file_per_table=OFF and that they can be set to default @@ -427,7 +437,7 @@ SHOW WARNINGS; Level Code Message SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact +t1 Dynamic SET GLOBAL innodb_file_per_table=ON; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; @@ -451,13 +461,13 @@ SET SESSION innodb_strict_mode = OFF; DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED; Warnings: -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SHOW WARNINGS; Level Code Message -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact row_format=FIXED +t1 Dynamic row_format=FIXED DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0; SHOW WARNINGS; @@ -488,16 +498,16 @@ SHOW WARNINGS; Level Code Message SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact +t1 Dynamic ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0; Warnings: -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SHOW WARNINGS; Level Code Message -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact row_format=FIXED +t1 Dynamic row_format=FIXED # Test 10) StrictMode=OFF, CREATE with each ROW_FORMAT & a valid KEY_BLOCK_SIZE # KEY_BLOCK_SIZE is ignored with COMPACT, REDUNDANT, & DYNAMIC DROP TABLE IF EXISTS t1; @@ -562,14 +572,14 @@ CREATE TABLE t1 ( i INT ); ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1; Warnings: Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SHOW WARNINGS; Level Code Message Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact row_format=FIXED key_block_size=1 +t1 Dynamic row_format=FIXED key_block_size=1 DROP TABLE IF EXISTS t1; CREATE TABLE t1 ( i INT ); ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2; @@ -728,7 +738,7 @@ SHOW WARNINGS; Level Code Message SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact +t1 Dynamic ALTER TABLE t1 ROW_FORMAT=COMPACT; SHOW WARNINGS; Level Code Message @@ -746,7 +756,7 @@ Level Code Message Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=15. SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact key_block_size=15 +t1 Dynamic key_block_size=15 # Test 15) StrictMode=OFF, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and a valid KEY_BLOCK_SIZE are remembered but not used when ROW_FORMAT is reverted to Antelope and then used again when ROW_FORMAT=Barracuda. @@ -758,22 +768,26 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME ROW_FORMAT CREATE_OPTIONS t1 Compressed row_format=COMPRESSED key_block_size=1 SET GLOBAL innodb_file_format=Antelope; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html ALTER TABLE t1 ADD COLUMN f1 INT; Warnings: Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1. Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SHOW WARNINGS; Level Code Message Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1. Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact row_format=COMPRESSED key_block_size=1 +t1 Dynamic row_format=COMPRESSED key_block_size=1 SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html ALTER TABLE t1 ADD COLUMN f2 INT; SHOW WARNINGS; Level Code Message @@ -788,18 +802,17 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME ROW_FORMAT CREATE_OPTIONS t1 Dynamic row_format=DYNAMIC SET GLOBAL innodb_file_format=Antelope; -ALTER TABLE t1 ADD COLUMN f1 INT; Warnings: -Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +ALTER TABLE t1 ADD COLUMN f1 INT; SHOW WARNINGS; Level Code Message -Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; TABLE_NAME ROW_FORMAT CREATE_OPTIONS -t1 Compact row_format=DYNAMIC +t1 Dynamic row_format=DYNAMIC SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html ALTER TABLE t1 ADD COLUMN f2 INT; SHOW WARNINGS; Level Code Message @@ -853,3 +866,5 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS t1 Dynamic row_format=DYNAMIC # Cleanup DROP TABLE IF EXISTS t1; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb_zip/r/innodb-zip.result b/mysql-test/suite/innodb_zip/r/innodb-zip.result index 318f65d49c3..47215a1e077 100644 --- a/mysql-test/suite/innodb_zip/r/innodb-zip.result +++ b/mysql-test/suite/innodb_zip/r/innodb-zip.result @@ -9,11 +9,13 @@ SET @save_innodb_stats_on_metadata=@@global.innodb_stats_on_metadata; set session innodb_strict_mode=0; set global innodb_file_per_table=off; set global innodb_file_format=`0`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET @@global.innodb_stats_on_metadata=ON; create table t0(a int primary key) engine=innodb row_format=compressed; Warnings: Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. create table t00(a int primary key) engine=innodb key_block_size=4 row_format=compressed; Warnings: @@ -21,11 +23,8 @@ Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4. Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. create table t1(a int primary key) engine=innodb row_format=dynamic; -Warnings: -Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. create table t2(a int primary key) engine=innodb row_format=redundant; create table t3(a int primary key) engine=innodb row_format=compact; create table t4(a int primary key) engine=innodb key_block_size=9; @@ -46,6 +45,8 @@ Warnings: Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1. set global innodb_file_format=`1`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create table t7(a int primary key) engine=innodb key_block_size=1 row_format=redundant; Warnings: @@ -54,7 +55,7 @@ create table t8(a int primary key) engine=innodb key_block_size=1 row_format=fixed; Warnings: Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED. -Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT. +Warning 1478 InnoDB: assuming ROW_FORMAT=DYNAMIC. create table t9(a int primary key) engine=innodb key_block_size=1 row_format=compact; Warnings: @@ -74,21 +75,21 @@ Warnings: Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=9. SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql'; table_schema table_name row_format data_length index_length -mysqltest_innodb_zip t0 Compact {valid} 0 -mysqltest_innodb_zip t00 Compact {valid} 0 -mysqltest_innodb_zip t1 Compact {valid} 0 +mysqltest_innodb_zip t0 Dynamic {valid} 0 +mysqltest_innodb_zip t00 Dynamic {valid} 0 +mysqltest_innodb_zip t1 Dynamic {valid} 0 mysqltest_innodb_zip t10 Dynamic {valid} 0 mysqltest_innodb_zip t11 Compressed 1024 0 mysqltest_innodb_zip t12 Compressed 1024 0 mysqltest_innodb_zip t13 Compressed {valid} 0 -mysqltest_innodb_zip t14 Compact {valid} 0 +mysqltest_innodb_zip t14 Dynamic {valid} 0 mysqltest_innodb_zip t2 Redundant {valid} 0 mysqltest_innodb_zip t3 Compact {valid} 0 -mysqltest_innodb_zip t4 Compact {valid} 0 +mysqltest_innodb_zip t4 Dynamic {valid} 0 mysqltest_innodb_zip t5 Redundant {valid} 0 mysqltest_innodb_zip t6 Redundant {valid} 0 mysqltest_innodb_zip t7 Redundant {valid} 0 -mysqltest_innodb_zip t8 Compact {valid} 0 +mysqltest_innodb_zip t8 Dynamic {valid} 0 mysqltest_innodb_zip t9 Compact {valid} 0 drop table t0,t00,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14; alter table t1 key_block_size=0; @@ -138,7 +139,7 @@ mysqltest_innodb_zip.t2 analyze status OK SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql'; table_schema table_name row_format data_length index_length mysqltest_innodb_zip t1 Compressed 2048 1024 -mysqltest_innodb_zip t2 Compact {valid} 0 +mysqltest_innodb_zip t2 Dynamic {valid} 0 drop table t1,t2; create table t1( c1 int not null, c2 blob, c3 blob, c4 blob, primary key(c1, c2(22), c3(22))) @@ -160,10 +161,14 @@ count(*) update t1 set c3 = repeat('E', 20000) where c1 = 1; drop table t1; set global innodb_file_format=`0`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format; @@innodb_file_format Antelope set global innodb_file_format=`1`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format; @@innodb_file_format Barracuda @@ -172,7 +177,11 @@ ERROR 42000: Variable 'innodb_file_format' can't be set to the value of '2' set global innodb_file_format=`-1`; ERROR 42000: Variable 'innodb_file_format' can't be set to the value of '-1' set global innodb_file_format=`Antelope`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format=`Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format=`Cheetah`; ERROR 42000: Variable 'innodb_file_format' can't be set to the value of 'Cheetah' set global innodb_file_format=`abc`; @@ -183,6 +192,8 @@ set global innodb_file_format=``; ERROR 42000: Variable 'innodb_file_format' can't be set to the value of '' set global innodb_file_per_table = on; set global innodb_file_format = `1`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set innodb_strict_mode = off; create table t1 (id int primary key) engine = innodb key_block_size = 0; drop table t1; @@ -204,7 +215,7 @@ create table t10(id int primary key) engine = innodb row_format = compact; create table t11(id int primary key) engine = innodb row_format = redundant; SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql'; table_schema table_name row_format data_length index_length -mysqltest_innodb_zip t1 Compact {valid} 0 +mysqltest_innodb_zip t1 Dynamic {valid} 0 mysqltest_innodb_zip t10 Compact {valid} 0 mysqltest_innodb_zip t11 Redundant {valid} 0 mysqltest_innodb_zip t3 Compressed 1024 0 @@ -320,6 +331,8 @@ mysqltest_innodb_zip t9 Redundant {valid} 0 drop table t8, t9; set global innodb_file_per_table = on; set global innodb_file_format = `0`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create table t1 (id int primary key) engine = innodb key_block_size = 1; ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options") show warnings; @@ -363,16 +376,22 @@ mysqltest_innodb_zip t8 Compact {valid} 0 mysqltest_innodb_zip t9 Redundant {valid} 0 drop table t8, t9; set global innodb_file_per_table=1; -set global innodb_file_format=Antelope; +set global innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table=on; set global innodb_file_format=`Barracuda`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_format_max=`Antelope`; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create table normal_table ( c1 int ) engine = innodb; select @@innodb_file_format_max; @@innodb_file_format_max -Antelope +Barracuda create table zip_table ( c1 int ) engine = innodb key_block_size = 4; @@ -380,6 +399,8 @@ select @@innodb_file_format_max; @@innodb_file_format_max Barracuda set global innodb_file_format_max=`Antelope`; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@innodb_file_format_max; @@innodb_file_format_max Antelope diff --git a/mysql-test/suite/innodb_zip/r/innodb_bug36169.result b/mysql-test/suite/innodb_zip/r/innodb_bug36169.result index aa80e4d7aa4..161cef10ad5 100644 --- a/mysql-test/suite/innodb_zip/r/innodb_bug36169.result +++ b/mysql-test/suite/innodb_zip/r/innodb_bug36169.result @@ -1,2 +1,5 @@ +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size .* for a record on index leaf page."); SET GLOBAL innodb_file_format='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=ON; diff --git a/mysql-test/suite/innodb_zip/r/innodb_bug52745.result b/mysql-test/suite/innodb_zip/r/innodb_bug52745.result index f4393e8fae0..5ba26753ea6 100644 --- a/mysql-test/suite/innodb_zip/r/innodb_bug52745.result +++ b/mysql-test/suite/innodb_zip/r/innodb_bug52745.result @@ -1,4 +1,6 @@ SET GLOBAL innodb_file_format='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=on; CREATE TABLE bug52745 ( a2 int(10) unsigned DEFAULT NULL, @@ -126,5 +128,7 @@ Warning 1264 Out of range value for column 'col78' at row 1 Warning 1265 Data truncated for column 'col79' at row 1 Warning 1264 Out of range value for column 'col84' at row 1 DROP TABLE bug52745; -SET GLOBAL innodb_file_format=Antelope; +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=1; diff --git a/mysql-test/suite/innodb_zip/r/innodb_bug53591.result b/mysql-test/suite/innodb_zip/r/innodb_bug53591.result index dbebb9d2d33..3b10942c2de 100644 --- a/mysql-test/suite/innodb_zip/r/innodb_bug53591.result +++ b/mysql-test/suite/innodb_zip/r/innodb_bug53591.result @@ -1,5 +1,8 @@ SET GLOBAL innodb_file_format='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_strict_mode=on; set old_alter_table=0; CREATE TABLE bug53591(a text charset utf8 not null) ENGINE=InnoDB KEY_BLOCK_SIZE=1; @@ -9,5 +12,8 @@ SHOW WARNINGS; Level Code Message Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is {checked_valid}. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs DROP TABLE bug53591; -SET GLOBAL innodb_file_format=Antelope; +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=1; +SET GLOBAL innodb_strict_mode=DEFAULT; diff --git a/mysql-test/suite/innodb_zip/r/innodb_bug56680.result b/mysql-test/suite/innodb_zip/r/innodb_bug56680.result index 40c39d21243..92b589c6b7e 100644 --- a/mysql-test/suite/innodb_zip/r/innodb_bug56680.result +++ b/mysql-test/suite/innodb_zip/r/innodb_bug56680.result @@ -1,5 +1,7 @@ SET GLOBAL tx_isolation='REPEATABLE-READ'; SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=on; CREATE TABLE bug56680( a INT AUTO_INCREMENT PRIMARY KEY, @@ -119,3 +121,5 @@ Table Op Msg_type Msg_text test.bug56680_2 check status OK DROP TABLE bug56680_2; DROP TABLE bug56680; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb_zip/r/innodb_cmp_drop_table.result b/mysql-test/suite/innodb_zip/r/innodb_cmp_drop_table.result index 1f6d6948756..11e90b9e7d6 100644 --- a/mysql-test/suite/innodb_zip/r/innodb_cmp_drop_table.result +++ b/mysql-test/suite/innodb_zip/r/innodb_cmp_drop_table.result @@ -1,5 +1,7 @@ set global innodb_file_per_table=on; set global innodb_file_format=`1`; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create table t1(a text) engine=innodb key_block_size=8; SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0; page_size @@ -12,3 +14,5 @@ create table t2(a text) engine=innodb; SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0; page_size drop table t2; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html diff --git a/mysql-test/suite/innodb_zip/r/innodb_index_large_prefix.result b/mysql-test/suite/innodb_zip/r/innodb_index_large_prefix.result index f11988034ad..f69fa5c7659 100644 --- a/mysql-test/suite/innodb_zip/r/innodb_index_large_prefix.result +++ b/mysql-test/suite/innodb_zip/r/innodb_index_large_prefix.result @@ -1,7 +1,13 @@ SET default_storage_engine=InnoDB; +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); set global innodb_file_format="Barracuda"; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html set global innodb_file_per_table=1; set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_strict_mode=1; ### Test 1 ### create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC; show warnings; @@ -97,6 +103,8 @@ create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4; create table worklog5743_8(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=8; create table worklog5743_16(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=16; set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx1 on worklog5743_1(a2(4000)); ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs show warnings; @@ -104,6 +112,8 @@ Level Code Message Warning 1071 Specified key was too long; max key length is 767 bytes Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx2 on worklog5743_1(a2(4000)); ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs show warnings; @@ -127,6 +137,8 @@ create index idx6 on worklog5743_1(a1, a2(428)); show warnings; Level Code Message set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx1 on worklog5743_2(a2(4000)); Warnings: Warning 1071 Specified key was too long; max key length is 767 bytes @@ -134,6 +146,8 @@ show warnings; Level Code Message Warning 1071 Specified key was too long; max key length is 767 bytes set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx2 on worklog5743_2(a2(4000)); ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs show warnings; @@ -157,6 +171,8 @@ create index idx6 on worklog5743_2(a1, a2(940)); show warnings; Level Code Message set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx1 on worklog5743_4(a2(4000)); Warnings: Warning 1071 Specified key was too long; max key length is 767 bytes @@ -164,6 +180,8 @@ show warnings; Level Code Message Warning 1071 Specified key was too long; max key length is 767 bytes set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx2 on worklog5743_4(a2(4000)); ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs show warnings; @@ -187,6 +205,8 @@ create index idx6 on worklog5743_4(a1, a2(1964)); show warnings; Level Code Message set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx1 on worklog5743_8(a2(1000)); Warnings: Warning 1071 Specified key was too long; max key length is 767 bytes @@ -194,6 +214,8 @@ show warnings; Level Code Message Warning 1071 Specified key was too long; max key length is 767 bytes set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx2 on worklog5743_8(a2(3073)); Warnings: Warning 1071 Specified key was too long; max key length is 3072 bytes @@ -223,6 +245,8 @@ create index idx7 on worklog5743_8(a1, a2(2000), a3(1068)); show warnings; Level Code Message set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx1 on worklog5743_16(a2(1000)); Warnings: Warning 1071 Specified key was too long; max key length is 767 bytes @@ -230,6 +254,8 @@ show warnings; Level Code Message Warning 1071 Specified key was too long; max key length is 767 bytes set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html create index idx2 on worklog5743_16(a2(3073)); Warnings: Warning 1071 Specified key was too long; max key length is 3072 bytes @@ -264,12 +290,16 @@ insert into worklog5743_4 values(9, repeat("a", 10000)); insert into worklog5743_8 values(9, repeat("a", 10000), repeat("a", 10000)); insert into worklog5743_16 values(9, repeat("a", 10000), repeat("a", 10000)); set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html insert into worklog5743_1 values(2, repeat("b", 10000)); insert into worklog5743_2 values(2, repeat("b", 10000)); insert into worklog5743_4 values(2, repeat("b", 10000)); insert into worklog5743_8 values(2, repeat("b", 10000), repeat("b", 10000)); insert into worklog5743_16 values(2, repeat("b", 10000), repeat("b", 10000)); set global innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select a1, left(a2, 20) from worklog5743_1; a1 left(a2, 20) 9 aaaaaaaaaaaaaaaaaaaa @@ -444,9 +474,9 @@ connection default; rollback; drop table worklog5743; ### Test 6 ### -create table worklog5743(a TEXT not null, primary key (a(1000))); +create table worklog5743(a TEXT not null, primary key (a(1000))) row_format=COMPACT; ERROR HY000: Index column size too large. The maximum column size is 767 bytes. -create table worklog5743(a TEXT); +create table worklog5743(a TEXT) row_format=COMPACT; create index idx on worklog5743(a(768)); ERROR HY000: Index column size too large. The maximum column size is 767 bytes. create index idx on worklog5743(a(767)); @@ -499,9 +529,14 @@ create index idx on worklog5743(a(768)); ERROR HY000: Index column size too large. The maximum column size is 767 bytes. create index idx2 on worklog5743(a(767)); drop table worklog5743; -SET GLOBAL innodb_file_format=Antelope; +SET GLOBAL innodb_file_format=Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SET GLOBAL innodb_file_per_table=1; -SET GLOBAL innodb_large_prefix=0; +SET GLOBAL innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET GLOBAL innodb_strict_mode = DEFAULT; connection con1; disconnect con1; connection con2; diff --git a/mysql-test/suite/innodb_zip/r/innodb_prefix_index_liftedlimit.result b/mysql-test/suite/innodb_zip/r/innodb_prefix_index_liftedlimit.result deleted file mode 100644 index 7d52ab135b3..00000000000 --- a/mysql-test/suite/innodb_zip/r/innodb_prefix_index_liftedlimit.result +++ /dev/null @@ -1,1396 +0,0 @@ -set global innodb_file_format="Barracuda"; -set global innodb_file_per_table=1; -set global innodb_large_prefix=1; -DROP TABLE IF EXISTS worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE FROM -INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -COLUMN_NAME INDEX_NAME SUB_PART INDEX_TYPE -col_1_varchar PRIMARY 3072 BTREE -col_1_varchar prefix_idx 3072 BTREE -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -col_1_varchar = REPEAT("c", 4000) -0 -1 -ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT; -ERROR HY000: Index column size too large. The maximum column size is 767 bytes. -ALTER TABLE worklog5743 ROW_FORMAT=COMPACT; -ERROR HY000: Index column size too large. The maximum column size is 767 bytes. -ALTER TABLE worklog5743 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_text (3072)); -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE FROM -INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -COLUMN_NAME INDEX_NAME SUB_PART INDEX_TYPE -col_1_text PRIMARY 3072 BTREE -col_1_text prefix_idx 3072 BTREE -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743; -col_1_text = REPEAT("a", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743; -col_1_text = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_mediumtext MEDIUMTEXT , col_2_mediumtext MEDIUMTEXT , -PRIMARY KEY (col_1_mediumtext(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_mediumtext (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_mediumtext = REPEAT("a", 4000),col_2_mediumtext = REPEAT("o", 4000) -FROM worklog5743; -col_1_mediumtext = REPEAT("a", 4000) col_2_mediumtext = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_mediumtext = REPEAT("c", 4000) -WHERE col_1_mediumtext = REPEAT("a", 4000) -AND col_2_mediumtext = REPEAT("o", 4000); -SELECT col_1_mediumtext = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_mediumtext = REPEAT("c", 4000) -AND col_2_mediumtext = REPEAT("o", 4000); -col_1_mediumtext = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_mediumtext = REPEAT("b", 4000); -SELECT col_1_mediumtext = REPEAT("c", 4000) FROM worklog5743; -col_1_mediumtext = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_longtext LONGTEXT , col_2_longtext LONGTEXT , -PRIMARY KEY (col_1_longtext(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_longtext (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_longtext = REPEAT("a", 4000) , col_2_longtext = REPEAT("o", 4000) -FROM worklog5743; -col_1_longtext = REPEAT("a", 4000) col_2_longtext = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_longtext = REPEAT("c", 4000) -WHERE col_1_longtext = REPEAT("a", 4000) -AND col_2_longtext = REPEAT("o", 4000); -SELECT col_1_longtext = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_longtext = REPEAT("c", 4000) -AND col_2_longtext = REPEAT("o", 4000); -col_1_longtext = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_longtext = REPEAT("b", 4000); -SELECT col_1_longtext = REPEAT("c", 4000) FROM worklog5743; -col_1_longtext = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_blob (3072)); -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE FROM -INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -COLUMN_NAME INDEX_NAME SUB_PART INDEX_TYPE -col_1_blob PRIMARY 3072 BTREE -col_1_blob prefix_idx 3072 BTREE -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743; -col_1_blob = REPEAT("a", 4000) col_2_blob = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -col_1_blob = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743; -col_1_blob = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_mediumblob MEDIUMBLOB , col_2_mediumblob MEDIUMBLOB , -PRIMARY KEY (col_1_mediumblob(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_mediumblob (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_mediumblob = REPEAT("a", 4000),col_2_mediumblob = REPEAT("o", 4000) -FROM worklog5743; -col_1_mediumblob = REPEAT("a", 4000) col_2_mediumblob = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_mediumblob = REPEAT("c", 4000) -WHERE col_1_mediumblob = REPEAT("a", 4000) -AND col_2_mediumblob = REPEAT("o", 4000); -SELECT col_1_mediumblob = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_mediumblob = REPEAT("c", 4000) -AND col_2_mediumblob = REPEAT("o", 4000); -col_1_mediumblob = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_mediumblob = REPEAT("b", 4000); -SELECT col_1_mediumblob = REPEAT("c", 4000) FROM worklog5743; -col_1_mediumblob = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_longblob LONGBLOB , col_2_longblob LONGBLOB , -PRIMARY KEY (col_1_longblob(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_longblob (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_longblob = REPEAT("a", 4000) , col_2_longblob = REPEAT("o", 4000) -FROM worklog5743; -col_1_longblob = REPEAT("a", 4000) col_2_longblob = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_longblob = REPEAT("c", 4000) -WHERE col_1_longblob = REPEAT("a", 4000) -AND col_2_longblob = REPEAT("o", 4000); -SELECT col_1_longblob = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_longblob = REPEAT("c", 4000) -AND col_2_longblob = REPEAT("o", 4000); -col_1_longblob = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_longblob = REPEAT("b", 4000); -SELECT col_1_longblob = REPEAT("c", 4000) FROM worklog5743; -col_1_longblob = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varbinary VARBINARY (4000) , -PRIMARY KEY (col_1_varbinary(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varbinary = REPEAT("o", 4000) -FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) col_2_varbinary = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; -col_1_varbinary = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 (col_1_char CHAR (255) , col_2_char CHAR (255), -col_3_char CHAR (255), col_4_char CHAR (255),col_5_char CHAR (255), -col_6_char CHAR (255), col_7_char CHAR (255),col_8_char CHAR (255), -col_9_char CHAR (255), col_10_char CHAR (255),col_11_char CHAR (255), -col_12_char CHAR (255), col_13_char CHAR (255),col_14_char CHAR (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255) -); -CREATE INDEX prefix_idx ON worklog5743(col_1_char(250),col_2_char(250), -col_3_char(250),col_4_char(250),col_5_char(250),col_6_char(250), -col_7_char(250),col_8_char(250),col_9_char(250),col_10_char(250), -col_11_char(250),col_12_char(250),col_13_char(72) -); -INSERT INTO worklog5743 VALUES(REPEAT("b", 255) , REPEAT("p", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255) -); -SELECT col_1_char = REPEAT("a", 255) , col_2_char = REPEAT("o", 255) FROM worklog5743; -col_1_char = REPEAT("a", 255) col_2_char = REPEAT("o", 255) -1 1 -0 0 -UPDATE worklog5743 SET col_1_char = REPEAT("c", 255) -WHERE col_1_char = REPEAT("a", 255) AND col_2_char = REPEAT("o", 255); -SELECT col_1_char = REPEAT("c", 255) FROM worklog5743 -WHERE col_1_char = REPEAT("c", 255) AND col_2_char = REPEAT("o", 255); -col_1_char = REPEAT("c", 255) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_char = REPEAT("b", 255); -SELECT col_1_char = REPEAT("c", 255) FROM worklog5743; -col_1_char = REPEAT("c", 255) -1 -0 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 (col_1_binary BINARY (255) , col_2_binary BINARY (255), -col_3_binary BINARY(255),col_4_binary BINARY (255),col_5_binary BINARY (255), -col_6_binary BINARY(255),col_7_binary BINARY (255),col_8_binary BINARY (255), -col_9_binary BINARY(255),col_10_binary BINARY (255),col_11_binary BINARY (255), -col_12_binary BINARY(255),col_13_binary BINARY (255),col_14_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255) -); -CREATE INDEX prefix_idx ON worklog5743(col_1_binary (250),col_2_binary (250), -col_3_binary (250),col_4_binary (250),col_5_binary (250), -col_6_binary (250),col_7_binary (250),col_8_binary (250), -col_9_binary (250),col_10_binary (250),col_11_binary (250), -col_12_binary (250),col_13_binary (72) -); -INSERT INTO worklog5743 VALUES(REPEAT("b", 255) , REPEAT("p", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255) -); -SELECT col_1_binary = REPEAT("a", 255) , col_2_binary = REPEAT("o", 255) FROM worklog5743; -col_1_binary = REPEAT("a", 255) col_2_binary = REPEAT("o", 255) -1 1 -0 0 -UPDATE worklog5743 SET col_1_binary = REPEAT("c", 255) -WHERE col_1_binary = REPEAT("a", 255) -AND col_2_binary = REPEAT("o", 255); -SELECT col_1_binary = REPEAT("c", 255) FROM worklog5743 -WHERE col_1_binary = REPEAT("c", 255) -AND col_2_binary = REPEAT("o", 255); -col_1_binary = REPEAT("c", 255) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_binary = REPEAT("b", 255); -SELECT col_1_binary = REPEAT("c", 255) FROM worklog5743; -col_1_binary = REPEAT("c", 255) -1 -0 -DROP TABLE worklog5743; -CREATE TABLE worklog5743_key2 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key2 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743_key2; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key2 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key2 -WHERE col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("c", 4000) -1 -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key2 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key2; -col_1_varchar = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key2; -CREATE TABLE worklog5743_key4 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(1964)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb; -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key4 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743_key4; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key4 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743_key4 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key4 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key4; -col_1_varchar = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key4; -CREATE TABLE worklog5743_key8 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8, engine = innodb; -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key8 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743_key8; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key8 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743_key8 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key8 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key8; -col_1_varchar = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key8; -CREATE TABLE worklog5743_key2 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key2 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743_key2; -col_1_text = REPEAT("a", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key2 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("b", 3500) FROM worklog5743_key2 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key2 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743_key2; -col_1_text = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key2; -CREATE TABLE worklog5743_key4 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(1964)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb; -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key4 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743_key4; -col_1_text = REPEAT("a", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key4 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("b", 3500) FROM worklog5743_key4 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key4 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743_key4; -col_1_text = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key4; -CREATE TABLE worklog5743_key8 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8, engine = innodb; -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key8 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743_key8; -col_1_text = REPEAT("a", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key8 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("b", 3500) FROM worklog5743_key8 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key8 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743_key8; -col_1_text = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key8; -CREATE TABLE worklog5743_key2 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key2 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743_key2; -col_1_blob = REPEAT("a", 4000) col_2_blob = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key2 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("b", 3500) FROM worklog5743_key2 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -col_1_blob = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key2 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743_key2; -col_1_blob = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key2; -CREATE TABLE worklog5743_key4 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(1964)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb; -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key4 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743_key4; -col_1_blob = REPEAT("a", 4000) col_2_blob = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key4 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("b", 3500) FROM worklog5743_key4 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -col_1_blob = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key4 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743_key4; -col_1_blob = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key4; -CREATE TABLE worklog5743_key8 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(3072)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8, engine = innodb; -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743_key8 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743_key8; -col_1_blob = REPEAT("a", 4000) col_2_blob = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743_key8 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("b", 3500) FROM worklog5743_key8 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -col_1_blob = REPEAT("b", 3500) -0 -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key8 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743_key8; -col_1_blob = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743_key8; -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000) , -col_3_text TEXT (4000), col_4_blob BLOB (4000), col_5_text TEXT (4000), -col_6_varchar VARCHAR (4000), col_7_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072)); -CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; -col_1_varbinary = REPEAT("c", 4000) -1 -0 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072)); -CREATE INDEX prefix_idx4 ON worklog5743(col_4_blob (3072)); -CREATE INDEX prefix_idx5 ON worklog5743(col_5_text (3072)); -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -ERROR HY000: Undo log record is too big. -SHOW WARNINGS; -Level Code Message -Error 1713 Undo log record is too big. -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000) , -col_3_text TEXT (4000), col_4_blob BLOB (4000),col_5_text TEXT (4000), -col_6_varchar VARCHAR (4000), col_7_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072)); -CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072)); -CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072)); -CREATE INDEX prefix_idx4 ON worklog5743(col_4_blob (3072)); -CREATE INDEX prefix_idx5 ON worklog5743(col_5_text (3072)); -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -ROLLBACK; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -COMMIT; -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -ROLLBACK; -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -ERROR HY000: Undo log record is too big. -SHOW WARNINGS; -Level Code Message -Error 1713 Undo log record is too big. -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; -col_1_varbinary = REPEAT("c", 4000) -0 -0 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) CHARACTER SET 'utf8', -col_2_text TEXT (4000) CHARACTER SET 'utf8', -PRIMARY KEY (col_1_text(1024)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_text (1024)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) FROM worklog5743; -col_1_text = REPEAT("a", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743; -col_1_text = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 (col_1_varchar VARCHAR (4000) CHARACTER SET 'utf8', -col_2_varchar VARCHAR (4000) CHARACTER SET 'utf8' , -PRIMARY KEY (col_1_varchar(1024)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -ERROR 42000: Row size too large (> max_row_size). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline. -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , -col_2_varchar VARCHAR (4000) CHARACTER SET 'utf8', -col_3_text TEXT (4000) CHARACTER SET 'utf8', -col_4_blob BLOB (4000),col_5_text TEXT (4000), -col_6_varchar VARCHAR (4000), col_7_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (500)); -CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (500)); -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -ROLLBACK; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -COMMIT; -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -ROLLBACK; -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varchar = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; -col_1_varbinary = REPEAT("c", 4000) -0 -0 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) CHARACTER SET 'utf8', -col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(1024)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("स", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_text (1024)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("स", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743; -col_1_text = REPEAT("स", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_text = REPEAT("क", 4000) -WHERE col_1_text = REPEAT("स", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("क", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("क", 4000) -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("क", 4000) FROM worklog5743; -col_1_text = REPEAT("क", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_text TEXT(4000) , col_2_text TEXT(4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 200) , REPEAT("o", 200)); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -connect con1,localhost,root,,; -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -SELECT COUNT(*) FROM worklog5743; -COUNT(*) -1 -connect con2,localhost,root,,; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 200) , REPEAT("o", 200)); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -0 1 -connection con1; -select @@session.tx_isolation; -@@session.tx_isolation -REPEATABLE-READ -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("b", 200) col_2_text = REPEAT("o", 200) -0 1 -SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; -select @@session.tx_isolation; -@@session.tx_isolation -READ-UNCOMMITTED -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("b", 200) col_2_text = REPEAT("o", 200) -0 1 -1 1 -SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; -START TRANSACTION; -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -SELECT COUNT(*) FROM worklog5743; -COUNT(*) -1 -connection con2; -COMMIT; -connection con1; -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("b", 200) col_2_text = REPEAT("o", 200) -0 1 -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -SELECT COUNT(*) FROM worklog5743; -COUNT(*) -1 -COMMIT; -connection default; -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_text TEXT(4000) , col_2_text TEXT(4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 200) , REPEAT("o", 200)); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -connection con1; -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -SELECT COUNT(*) FROM worklog5743; -COUNT(*) -1 -START TRANSACTION; -connection con2; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 200) , REPEAT("o", 200)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("a", 200); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -0 1 -COMMIT; -connection con1; -SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; -select @@session.tx_isolation; -@@session.tx_isolation -READ-UNCOMMITTED -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("b", 200) col_2_text = REPEAT("o", 200) -1 1 -SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("b", 200) col_2_text = REPEAT("o", 200) -1 1 -SELECT COUNT(*) FROM worklog5743; -COUNT(*) -1 -COMMIT; -connection default; -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_text TEXT(4000) , col_2_text TEXT(4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 200) , REPEAT("o", 200)); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -connection con1; -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -1 1 -SELECT COUNT(*) FROM worklog5743; -COUNT(*) -1 -START TRANSACTION; -connection con2; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 200) , REPEAT("o", 200)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("a", 200); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("a", 200) col_2_text = REPEAT("o", 200) -0 1 -ROLLBACK; -connection con1; -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -col_1_text = REPEAT("b", 200) col_2_text = REPEAT("o", 200) -0 1 -SELECT COUNT(*) FROM worklog5743; -COUNT(*) -1 -COMMIT; -disconnect con1; -connection con2; -disconnect con2; -connection default; -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) -AND col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("c", 4000) -1 -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -col_1_varchar = REPEAT("c", 4000) -0 -0 -1 -SELECT tbl1.col_1_varchar = tbl2.col_1_varchar -FROM worklog5743 tbl1 , worklog5743 tbl2 -WHERE tbl1.col_1_varchar = tbl2.col_1_varchar ; -tbl1.col_1_varchar = tbl2.col_1_varchar -1 -1 -1 -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar IN (SELECT tbl2.col_1_varchar FROM worklog5743 tbl2) ; -tbl1.col_1_varchar = REPEAT("c", 4000) -0 -0 -1 -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar NOT IN (SELECT tbl2.col_1_varchar FROM worklog5743 tbl2) ; -tbl1.col_1_varchar = REPEAT("c", 4000) -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 WHERE -col_1_varchar IN (SELECT tbl2.col_1_varchar FROM worklog5743 tbl2) -AND col_1_varchar = REPEAT("c", 4000); -tbl1.col_1_varchar = REPEAT("c", 4000) -1 -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar in ( -SELECT tbl2.col_1_varchar FROM worklog5743 tbl2 -WHERE tbl1.col_1_varchar != tbl2.col_1_varchar -) ; -tbl1.col_1_varchar = REPEAT("c", 4000) -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar in ( -SELECT tbl2.col_1_varchar FROM worklog5743 tbl2 -WHERE tbl1.col_1_varchar = tbl2.col_1_varchar -) ; -tbl1.col_1_varchar = REPEAT("c", 4000) -0 -0 -1 -SELECT -REVERSE(col_1_varchar) = REPEAT("c", 4000) , -REVERSE(REVERSE(col_1_varchar)) = REPEAT("c", 4000) -FROM worklog5743; -REVERSE(col_1_varchar) = REPEAT("c", 4000) REVERSE(REVERSE(col_1_varchar)) = REPEAT("c", 4000) -0 0 -0 0 -1 1 -SELECT -UPPER(col_1_varchar) = REPEAT("c", 4000) , -UPPER(col_1_varchar) = REPEAT("C", 4000) , -LOWER(UPPER(col_1_varchar)) = REPEAT("c", 4000) -FROM worklog5743; -UPPER(col_1_varchar) = REPEAT("c", 4000) UPPER(col_1_varchar) = REPEAT("C", 4000) LOWER(UPPER(col_1_varchar)) = REPEAT("c", 4000) -0 0 0 -0 0 0 -1 1 1 -SELECT -col_1_varchar = REPEAT("c", 4000) -FROM worklog5743 WHERE col_1_varchar like '%c__%'; -col_1_varchar = REPEAT("c", 4000) -1 -SELECT SUBSTRING(INSERT(col_1_varchar, 1, 4, 'kkkk'),1,10) FROM worklog5743 ; -SUBSTRING(INSERT(col_1_varchar, 1, 4, 'kkkk'),1,10) -kkkkaaaaaa -kkkkbbbbbb -kkkkcccccc -SELECT CONCAT(SUBSTRING(col_1_varchar,-5,3),'append') FROM worklog5743 ; -CONCAT(SUBSTRING(col_1_varchar,-5,3),'append') -aaaappend -bbbappend -cccappend -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , -col_2_varchar VARCHAR (4000) , -UNIQUE INDEX (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 1000),REPEAT("c", 1000)), REPEAT("o", 4000)); -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 2000)), REPEAT("o", 4000)); -INSERT INTO worklog5743 VALUES(NULL,NULL); -INSERT INTO worklog5743 VALUES(NULL,NULL); -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE -FROM INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -COLUMN_NAME INDEX_NAME SUB_PART INDEX_TYPE -col_1_varchar col_1_varchar 3072 BTREE -SELECT col_1_varchar FROM worklog5743 WHERE col_1_varchar IS NULL; -col_1_varchar -NULL -NULL -SELECT col_1_varchar = concat(REPEAT("a", 2000),REPEAT("b", 2000)) -FROM worklog5743 WHERE col_1_varchar IS NOT NULL ORDER BY 1; -col_1_varchar = concat(REPEAT("a", 2000),REPEAT("b", 2000)) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072))) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -DROP INDEX prefix_idx ON worklog5743; -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("c", 4000) -1 -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -col_1_varchar = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY `prefix_primary` (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -ALTER TABLE worklog5743 DROP PRIMARY KEY; -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) -AND col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("c", 4000) -1 -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar(3072)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -ERROR 23000: Duplicate entry 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for key 'PRIMARY' -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -col_1_varchar = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY `prefix_primary` (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -ALTER TABLE worklog5743 DROP PRIMARY KEY; -DROP INDEX prefix_idx ON worklog5743; -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -col_1_varchar = REPEAT("a", 4000) col_2_varchar = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -col_1_varchar = REPEAT("c", 4000) -1 -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar(3072)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -ERROR 23000: Duplicate entry 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for key 'PRIMARY' -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -col_1_varchar = REPEAT("c", 4000) -0 -1 -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR(4000) , col_2_varchar VARCHAR(4000) , -PRIMARY KEY (col_1_varchar (3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("c", 3500) , REPEAT("o", 3500)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -connect con1,localhost,root,,; -connection con1; -SELECT col_1_varchar = REPEAT("c", 3500) , col_2_varchar = REPEAT("o", 3500) -FROM worklog5743; -col_1_varchar = REPEAT("c", 3500) col_2_varchar = REPEAT("o", 3500) -1 1 -connection default; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 3500) , REPEAT("o", 3500)); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743 -WHERE col_2_varchar = REPEAT("o", 3500); -col_1_varchar = REPEAT("b", 3500) -0 -0 -COMMIT; -connection con1; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("k", 3500),REPEAT("p", 3500)); -ALTER TABLE worklog5743 DROP PRIMARY KEY; -UPDATE worklog5743 SET col_1_varchar = REPEAT("b", 3500) -WHERE col_1_varchar = REPEAT("a", 3500) -AND col_2_varchar = REPEAT("o", 3500); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743 -WHERE col_2_varchar = REPEAT("o", 3500); -col_1_varchar = REPEAT("b", 3500) -1 -0 -connection default; -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 3500); -SELECT col_1_varchar = REPEAT("a", 3500) FROM worklog5743 -WHERE col_2_varchar = REPEAT("p", 3500); -col_1_varchar = REPEAT("a", 3500) -0 -connection con1; -COMMIT; -connection default; -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varbinary VARBINARY (4000) , -PRIMARY KEY (col_1_varbinary(3072))) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varbinary = REPEAT("o", 4000) -FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) col_2_varbinary = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -1 -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("c", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -DROP INDEX prefix_idx ON worklog5743; -SELECT col_1_varbinary = REPEAT("b", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("b", 4000) -AND col_2_varbinary = REPEAT("p", 4000); -col_1_varbinary = REPEAT("b", 4000) -1 -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (2000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) -1 -0 -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("c", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -DROP INDEX prefix_idx ON worklog5743; -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (4000)); -Warnings: -Warning 1071 Specified key was too long; max key length is 3072 bytes -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743; -col_1_varbinary = REPEAT("a", 4000) -1 -0 -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("c", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -col_1_varbinary = REPEAT("c", 4000) -DROP TABLE worklog5743; -CREATE TABLE worklog5743 (col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(500)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_text (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743; -col_1_text = REPEAT("a", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -1 -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -DROP INDEX prefix_idx ON worklog5743; -SELECT col_1_text = REPEAT("b", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("b", 4000) AND col_2_text = REPEAT("p", 4000); -col_1_text = REPEAT("b", 4000) -1 -CREATE INDEX prefix_idx ON worklog5743(col_1_text (1000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -col_1_text = REPEAT("a", 4000) -1 -0 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -DROP INDEX prefix_idx ON worklog5743; -CREATE INDEX prefix_idx ON worklog5743(col_1_text (4000)); -Warnings: -Warning 1071 Specified key was too long; max key length is 3072 bytes -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -col_1_text = REPEAT("a", 4000) -1 -0 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) FROM worklog5743; -col_1_text = REPEAT("a", 4000) col_2_text = REPEAT("o", 4000) -1 1 -0 0 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -1 -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -ALTER TABLE worklog5743 DROP PRIMARY KEY; -SELECT col_1_text = REPEAT("b", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("b", 4000) -AND col_2_text = REPEAT("p", 4000); -col_1_text = REPEAT("b", 4000) -1 -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_text (700)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -col_1_text = REPEAT("a", 4000) -1 -0 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -ALTER TABLE worklog5743 DROP PRIMARY KEY; -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_text (950)); -ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -col_1_text = REPEAT("a", 4000) -0 -1 -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); -col_1_text = REPEAT("c", 4000) -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -ALTER TABLE worklog5743 DROP PRIMARY KEY; -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar (900)); -ALTER TABLE worklog5743 DROP PRIMARY KEY; -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar (3073)); -ERROR 42000: Specified key was too long; max key length is 3072 bytes -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_BLOB BLOB (4000) , PRIMARY KEY (col_1_BLOB(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -ALTER TABLE worklog5743 DROP PRIMARY KEY; -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_BLOB (500)); -ALTER TABLE worklog5743 DROP PRIMARY KEY; -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_BLOB (3073)); -ERROR 42000: Specified key was too long; max key length is 3072 bytes -DROP TABLE worklog5743; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 1000),REPEAT("c", 1000)), -REPEAT("o", 4000)); -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 2000)), REPEAT("o", 4000)); -ALTER TABLE worklog5743 ADD PRIMARY KEY `pk_idx` (col_1_varchar(3000)); -ERROR 23000: Duplicate entry 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' for key 'PRIMARY' -DROP TABLE worklog5743; -set global innodb_large_prefix=0; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -ERROR 42000: Specified key was too long; max key length is 767 bytes -set global innodb_large_prefix=0; -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(767)) -) engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT('a',4000),REPEAT('b',4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (1000)); -affected rows: 0 -info: Records: 0 Duplicates: 0 Warnings: 1 -Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes -ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT; -affected rows: 0 -info: Records: 0 Duplicates: 0 Warnings: 0 -SHOW CREATE TABLE worklog5743; -Table Create Table -worklog5743 CREATE TABLE `worklog5743` ( - `col_1_varchar` varchar(4000) NOT NULL, - `col_2_varchar` varchar(4000) DEFAULT NULL, - PRIMARY KEY (`col_1_varchar`(767)), - KEY `prefix_idx` (`col_1_varchar`(767)) -) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT -DROP TABLE worklog5743; -connection default; diff --git a/mysql-test/suite/innodb_zip/r/large_blob.result b/mysql-test/suite/innodb_zip/r/large_blob.result new file mode 100644 index 00000000000..7070d610f58 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/large_blob.result @@ -0,0 +1,83 @@ +# +# This tests the use of large blobs in InnoDB. +# +call mtr.add_suppression("InnoDB: Warning: a long semaphore wait"); +SET GLOBAL innodb_file_per_table = OFF; +# +# System tablespace, Row Format = Redundant +# +CREATE TABLE t1 ( +c1 INT DEFAULT NULL, +c2 LONGBLOB NOT NULL, +KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; +# +# System tablespace, Row Format = Compact +# +CREATE TABLE t1 ( +c1 INT DEFAULT NULL, +c2 LONGBLOB NOT NULL, +KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; +SET GLOBAL innodb_file_per_table = ON; +# +# Separate tablespace, Row Format = Redundant +# +CREATE TABLE t1 ( +c1 INT DEFAULT NULL, +c2 LONGBLOB NOT NULL, +KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; +# +# Separate tablespace, Row Format = Compact +# +CREATE TABLE t1 ( +c1 INT DEFAULT NULL, +c2 LONGBLOB NOT NULL, +KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; +# +# Separate tablespace, Row Format = Compressed, Key Block Size = 2k +# +CREATE TABLE t1 ( +c1 INT DEFAULT NULL, +c2 LONGBLOB NOT NULL, +KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; +# +# Separate tablespace, Row Format = Compressed, Key Block Size = 1k +# +CREATE TABLE t1 ( +c1 INT DEFAULT NULL, +c2 LONGBLOB NOT NULL, +KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; +# +# Separate tablespace, Row Format = Dynamic +# +CREATE TABLE t1 ( +c1 INT DEFAULT NULL, +c2 LONGBLOB NOT NULL, +KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_zip/r/restart.result b/mysql-test/suite/innodb_zip/r/restart.result new file mode 100644 index 00000000000..5645b1ee310 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/restart.result @@ -0,0 +1,1236 @@ +SET default_storage_engine=InnoDB; +# +# A series of tests to make sure tables are opened after restart. +# Bug#13357607 Compressed file-per-table tablespaces fail to open +# +set global innodb_file_per_table=on; +# +# Create and insert records into a REDUNDANT row formatted table. +# +CREATE TABLE t1_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=REDUNDANT ENGINE=InnoDB; +INSERT INTO t1_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +SHOW CREATE TABLE t1_restart; +Table Create Table +t1_restart CREATE TABLE `t1_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000027 DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT +SELECT count(*) FROM t1_restart; +count(*) +16 +# +# Create and insert records into a COMPACT row formatted table. +# +CREATE TABLE t2_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=COMPACT ENGINE=InnoDB; +INSERT INTO t2_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +SHOW CREATE TABLE t2_restart; +Table Create Table +t2_restart CREATE TABLE `t2_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000027 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT +SELECT count(*) FROM t2_restart; +count(*) +16 +# +# Create and insert records into a COMPRESSED row formatted table. +# +CREATE TABLE t3_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 ENGINE=InnoDB; +INSERT INTO t3_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +SHOW CREATE TABLE t3_restart; +Table Create Table +t3_restart CREATE TABLE `t3_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000027 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +SELECT count(*) FROM t3_restart; +count(*) +16 +# +# Create and insert records into a DYNAMIC row formatted table. +# +CREATE TABLE t4_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=DYNAMIC ENGINE=InnoDB; +INSERT INTO t4_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +SHOW CREATE TABLE t4_restart; +Table Create Table +t4_restart CREATE TABLE `t4_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000027 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +SELECT count(*) FROM t4_restart; +count(*) +16 +# +# Create and insert records into a table that uses a remote DATA DIRECTORY. +# +CREATE TABLE t5_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=DYNAMIC ENGINE=InnoDB DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir'; +INSERT INTO t5_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +SHOW CREATE TABLE t5_restart; +Table Create Table +t5_restart CREATE TABLE `t5_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000027 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' +SELECT count(*) FROM t5_restart; +count(*) +16 +# +# Create and insert records into a partitioned table that uses +# a remote DATA DIRECTORY for each partition. +# +CREATE TABLE t6_restart( +c1 INT AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 ENGINE=InnoDB +PARTITION BY HASH(c1) ( +PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir', +PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir', +PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir'); +INSERT INTO t6_restart VALUES (0, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +SHOW CREATE TABLE t6_restart; +Table Create Table +t6_restart CREATE TABLE `t6_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=17 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir/' ENGINE = InnoDB, + PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir/' ENGINE = InnoDB, + PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir/' ENGINE = InnoDB) */ +SELECT count(*) FROM t6_restart; +count(*) +16 +# +# Create and insert records into a subpartitioned table that uses +# a remote DATA DIRECTORY for each subpartition. +# +CREATE TABLE t7_restart( +c1 INT AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=DYNAMIC ENGINE=InnoDB +PARTITION BY RANGE(c1) SUBPARTITION BY HASH(c1) ( +PARTITION p0 VALUES LESS THAN (10) ( +SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir', +SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir'), +PARTITION p1 VALUES LESS THAN MAXVALUE ( +SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir', +SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir')); +INSERT INTO t7_restart VALUES (0, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +SHOW CREATE TABLE t7_restart; +Table Create Table +t7_restart CREATE TABLE `t7_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=17 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir/' ENGINE = InnoDB, + SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir/' ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir/' ENGINE = InnoDB, + SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir/' ENGINE = InnoDB)) */ +SELECT count(*) FROM t7_restart; +count(*) +16 +# +# Create and insert records into a table that uses a general tablespace. +# +CREATE TABLESPACE s1_restart ADD DATAFILE 's1_restart.ibd'; +CREATE TABLE t8_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=COMPACT ENGINE=InnoDB TABLESPACE=s1_restart; +INSERT INTO t8_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +SHOW CREATE TABLE t8_restart; +Table Create Table +t8_restart CREATE TABLE `t8_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) /*!50100 TABLESPACE `s1_restart` */ ENGINE=InnoDB AUTO_INCREMENT=1000000125 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT +SELECT count(*) FROM t8_restart; +count(*) +65 +CREATE TABLE t9_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) +ROW_FORMAT=DYNAMIC ENGINE=InnoDB TABLESPACE=s1_restart; +INSERT INTO t9_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +SHOW CREATE TABLE t9_restart; +Table Create Table +t9_restart CREATE TABLE `t9_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) /*!50100 TABLESPACE `s1_restart` */ ENGINE=InnoDB AUTO_INCREMENT=1000000125 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +SELECT count(*) FROM t9_restart; +count(*) +65 +# +# Show these tables in information_schema. +# +=== information_schema.innodb_sys_tables and innodb_sys_tablespaces === +Table Name Tablespace Table Flags Columns Row Format Zip Size Space Type +test/t1_restart test/t1_restart 0 8 Redundant 0 Single +test/t2_restart test/t2_restart 1 8 Compact 0 Single +test/t3_restart test/t3_restart 37 8 Compressed 2048 Single +test/t4_restart test/t4_restart 33 8 Dynamic 0 Single +test/t5_restart test/t5_restart 97 8 Dynamic 0 Single +test/t6_restart#p#p0 test/t6_restart#p#p0 101 8 Compressed 2048 Single +test/t6_restart#p#p1 test/t6_restart#p#p1 101 8 Compressed 2048 Single +test/t6_restart#p#p2 test/t6_restart#p#p2 101 8 Compressed 2048 Single +test/t7_restart#p#p0#sp#s0 test/t7_restart#p#p0#sp#s0 97 8 Dynamic 0 Single +test/t7_restart#p#p0#sp#s1 test/t7_restart#p#p0#sp#s1 97 8 Dynamic 0 Single +test/t7_restart#p#p1#sp#s2 test/t7_restart#p#p1#sp#s2 97 8 Dynamic 0 Single +test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0 Single +test/t8_restart s1_restart 129 8 Compact 0 General +test/t9_restart s1_restart 161 8 Dynamic 0 General +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t1_restart Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t1_restart.ibd +test/t2_restart Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t2_restart.ibd +test/t3_restart Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t3_restart.ibd +test/t4_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +s1_restart General DEFAULT 0 Any MYSQLD_DATADIR/s1_restart.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t1_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t1_restart.ibd +test/t2_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t2_restart.ibd +test/t3_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t3_restart.ibd +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +s1_restart TABLESPACE InnoDB NORMAL s1_restart MYSQLD_DATADIR/s1_restart.ibd +# +# Shutdown the server and list the tablespace OS files +# +---- MYSQL_DATA_DIR/test +t1_restart.frm +t1_restart.ibd +t2_restart.frm +t2_restart.ibd +t3_restart.frm +t3_restart.ibd +t4_restart.frm +t4_restart.ibd +t5_restart.frm +t5_restart.isl +t6_restart#p#p0.isl +t6_restart#p#p1.isl +t6_restart#p#p2.isl +t6_restart.frm +t7_restart#p#p0#sp#s0.isl +t7_restart#p#p0#sp#s1.isl +t7_restart#p#p1#sp#s2.isl +t7_restart#p#p1#sp#s3.isl +t7_restart.frm +t8_restart.frm +t9_restart.frm +---- MYSQL_TMP_DIR/alt_dir +test +---- MYSQL_TMP_DIR/alt_dir/test +t5_restart.ibd +t6_restart#p#p0.ibd +t6_restart#p#p1.ibd +t6_restart#p#p2.ibd +t7_restart#p#p0#sp#s0.ibd +t7_restart#p#p0#sp#s1.ibd +t7_restart#p#p1#sp#s2.ibd +t7_restart#p#p1#sp#s3.ibd +# +# Start the server and show that tables are still visible and accessible. +# +# restart +SHOW VARIABLES LIKE 'innodb_file_per_table'; +Variable_name Value +innodb_file_per_table ON +SHOW CREATE TABLE t1_restart; +Table Create Table +t1_restart CREATE TABLE `t1_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000020 DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT +SHOW CREATE TABLE t2_restart; +Table Create Table +t2_restart CREATE TABLE `t2_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000020 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT +SHOW CREATE TABLE t3_restart; +Table Create Table +t3_restart CREATE TABLE `t3_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000020 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +SHOW CREATE TABLE t4_restart; +Table Create Table +t4_restart CREATE TABLE `t4_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000020 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +SHOW CREATE TABLE t5_restart; +Table Create Table +t5_restart CREATE TABLE `t5_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000020 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' +SHOW CREATE TABLE t6_restart; +Table Create Table +t6_restart CREATE TABLE `t6_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=17 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB) */ +SHOW CREATE TABLE t7_restart; +Table Create Table +t7_restart CREATE TABLE `t7_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=17 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB)) */ +SHOW CREATE TABLE t8_restart; +Table Create Table +t8_restart CREATE TABLE `t8_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) /*!50100 TABLESPACE `s1_restart` */ ENGINE=InnoDB AUTO_INCREMENT=1000000110 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT +SHOW CREATE TABLE t9_restart; +Table Create Table +t9_restart CREATE TABLE `t9_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) /*!50100 TABLESPACE `s1_restart` */ ENGINE=InnoDB AUTO_INCREMENT=1000000110 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t8_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t9_restart); +SELECT count(*) FROM t1_restart; +count(*) +32 +SELECT count(*) FROM t2_restart; +count(*) +32 +SELECT count(*) FROM t3_restart; +count(*) +32 +SELECT count(*) FROM t4_restart; +count(*) +32 +SELECT count(*) FROM t5_restart; +count(*) +32 +SELECT count(*) FROM t6_restart; +count(*) +32 +SELECT count(*) FROM t7_restart; +count(*) +32 +SELECT count(*) FROM t8_restart; +count(*) +130 +SELECT count(*) FROM t9_restart; +count(*) +130 +# +# Show these tables in information_schema. +# +=== information_schema.innodb_sys_tables and innodb_sys_tablespaces === +Table Name Tablespace Table Flags Columns Row Format Zip Size Space Type +test/t1_restart test/t1_restart 0 8 Redundant 0 Single +test/t2_restart test/t2_restart 1 8 Compact 0 Single +test/t3_restart test/t3_restart 37 8 Compressed 2048 Single +test/t4_restart test/t4_restart 33 8 Dynamic 0 Single +test/t5_restart test/t5_restart 97 8 Dynamic 0 Single +test/t6_restart#p#p0 test/t6_restart#p#p0 101 8 Compressed 2048 Single +test/t6_restart#p#p1 test/t6_restart#p#p1 101 8 Compressed 2048 Single +test/t6_restart#p#p2 test/t6_restart#p#p2 101 8 Compressed 2048 Single +test/t7_restart#p#p0#sp#s0 test/t7_restart#p#p0#sp#s0 97 8 Dynamic 0 Single +test/t7_restart#p#p0#sp#s1 test/t7_restart#p#p0#sp#s1 97 8 Dynamic 0 Single +test/t7_restart#p#p1#sp#s2 test/t7_restart#p#p1#sp#s2 97 8 Dynamic 0 Single +test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0 Single +test/t8_restart s1_restart 129 8 Compact 0 General +test/t9_restart s1_restart 161 8 Dynamic 0 General +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t1_restart Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t1_restart.ibd +test/t2_restart Single DEFAULT 0 Compact or Redundant MYSQLD_DATADIR/test/t2_restart.ibd +test/t3_restart Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t3_restart.ibd +test/t4_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +s1_restart General DEFAULT 0 Any MYSQLD_DATADIR/s1_restart.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t1_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t1_restart.ibd +test/t2_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t2_restart.ibd +test/t3_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t3_restart.ibd +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +s1_restart TABLESPACE InnoDB NORMAL s1_restart MYSQLD_DATADIR/s1_restart.ibd +DROP TABLE t1_restart; +DROP TABLE t2_restart; +DROP TABLE t3_restart; +DROP TABLE t8_restart; +DROP TABLE t9_restart; +DROP TABLESPACE s1_restart; +# +# Truncate the remote tablespaces. +# +TRUNCATE TABLE t5_restart; +ALTER TABLE t6_restart TRUNCATE PARTITION p2; +ALTER TABLE t7_restart TRUNCATE PARTITION p1; +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t4_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +INSERT INTO t5_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +SELECT count(*) FROM t5_restart; +count(*) +8 +SHOW CREATE TABLE t5_restart; +Table Create Table +t5_restart CREATE TABLE `t5_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000012 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' +SELECT count(*) FROM t6_restart; +count(*) +21 +SHOW CREATE TABLE t6_restart; +Table Create Table +t6_restart CREATE TABLE `t6_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=32 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB) */ +SELECT count(*) FROM t7_restart; +count(*) +9 +SHOW CREATE TABLE t7_restart; +Table Create Table +t7_restart CREATE TABLE `t7_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB)) */ +# +# Shutdown the server and make a backup of a tablespace +# +---- MYSQL_DATA_DIR/test +t4_restart.frm +t4_restart.ibd +t5_restart.frm +t5_restart.frm.bak +t5_restart.isl +t5_restart.isl.bak +t6_restart#p#p0.isl +t6_restart#p#p1.isl +t6_restart#p#p2.isl +t6_restart.frm +t7_restart#p#p0#sp#s0.isl +t7_restart#p#p0#sp#s1.isl +t7_restart#p#p1#sp#s2.isl +t7_restart#p#p1#sp#s3.isl +t7_restart.frm +---- MYSQL_TMP_DIR/alt_dir/test +t5_restart.ibd +t5_restart.ibd.bak +t6_restart#p#p0.ibd +t6_restart#p#p1.ibd +t6_restart#p#p2.ibd +t7_restart#p#p0#sp#s0.ibd +t7_restart#p#p0#sp#s1.ibd +t7_restart#p#p1#sp#s2.ibd +t7_restart#p#p1#sp#s3.ibd +# +# Start the server and show the tablespaces. +# +# restart +SHOW VARIABLES LIKE 'innodb_file_per_table'; +Variable_name Value +innodb_file_per_table ON +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t4_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4_restart.ibd +test/t5_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd +test/t6_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd +test/t6_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd +test/t6_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd +test/t7_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd +test/t7_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd +test/t7_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd +test/t7_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd +SELECT count(*) FROM t5_restart; +count(*) +8 +SHOW CREATE TABLE t5_restart; +Table Create Table +t5_restart CREATE TABLE `t5_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000009 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' +SELECT count(*) FROM t6_restart; +count(*) +21 +SHOW CREATE TABLE t6_restart; +Table Create Table +t6_restart CREATE TABLE `t6_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=32 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB) */ +SELECT count(*) FROM t7_restart; +count(*) +9 +SHOW CREATE TABLE t7_restart; +Table Create Table +t7_restart CREATE TABLE `t7_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=10 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB)) */ +# +# Try to rename a tablespace to a file that already exists +# +RENAME TABLE t5_restart TO t55_restart; +ERROR 42S01: Table 't55_restart' already exists +RENAME TABLE t5_restart TO t55_restart; +ERROR HY000: Error on rename of './test/t5_restart' to './test/t55_restart' (errno: 184 - Tablespace already exists) +---- MYSQL_DATA_DIR/test +t4_restart.frm +t4_restart.ibd +t5_restart.frm +t5_restart.isl +t6_restart#p#p0.isl +t6_restart#p#p1.isl +t6_restart#p#p2.isl +t6_restart.frm +t7_restart#p#p0#sp#s0.isl +t7_restart#p#p0#sp#s1.isl +t7_restart#p#p1#sp#s2.isl +t7_restart#p#p1#sp#s3.isl +t7_restart.frm +---- MYSQL_TMP_DIR/alt_dir/test +t5_restart.ibd +t6_restart#p#p0.ibd +t6_restart#p#p1.ibd +t6_restart#p#p2.ibd +t7_restart#p#p0#sp#s0.ibd +t7_restart#p#p0#sp#s1.ibd +t7_restart#p#p1#sp#s2.ibd +t7_restart#p#p1#sp#s3.ibd +# +# Rename file table and tablespace +# +RENAME TABLE t5_restart TO t55_restart; +RENAME TABLE t6_restart TO t66_restart; +RENAME TABLE t7_restart TO t77_restart; +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t4_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4_restart.ibd +test/t55_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd +test/t66_restart#p#p0 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4_restart.ibd +test/t55_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd +test/t66_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +count(*) +16 +SHOW CREATE TABLE t55_restart; +Table Create Table +t55_restart CREATE TABLE `t55_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000024 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +count(*) +42 +SHOW CREATE TABLE t66_restart; +Table Create Table +t66_restart CREATE TABLE `t66_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=53 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB) */ +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +count(*) +18 +SHOW CREATE TABLE t77_restart; +Table Create Table +t77_restart CREATE TABLE `t77_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=19 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB)) */ +---- MYSQL_DATA_DIR/test +t4_restart.frm +t4_restart.ibd +t55_restart.frm +t55_restart.isl +t66_restart#p#p0.isl +t66_restart#p#p1.isl +t66_restart#p#p2.isl +t66_restart.frm +t77_restart#p#p0#sp#s0.isl +t77_restart#p#p0#sp#s1.isl +t77_restart#p#p1#sp#s2.isl +t77_restart#p#p1#sp#s3.isl +t77_restart.frm +---- MYSQL_TMP_DIR/alt_dir/test +t55_restart.ibd +t66_restart#p#p0.ibd +t66_restart#p#p1.ibd +t66_restart#p#p2.ibd +t77_restart#p#p0#sp#s0.ibd +t77_restart#p#p0#sp#s1.ibd +t77_restart#p#p1#sp#s2.ibd +t77_restart#p#p1#sp#s3.ibd +# +# Restart the server +# +# restart +SHOW VARIABLES LIKE 'innodb_file_per_table'; +Variable_name Value +innodb_file_per_table ON +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t4_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4_restart.ibd +test/t55_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd +test/t66_restart#p#p0 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4_restart.ibd +test/t55_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd +test/t66_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +count(*) +32 +SHOW CREATE TABLE t55_restart; +Table Create Table +t55_restart CREATE TABLE `t55_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000048 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DATA DIRECTORY='MYSQL_TMP_DIR/alt_dir/' +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +count(*) +84 +SHOW CREATE TABLE t66_restart; +Table Create Table +t66_restart CREATE TABLE `t66_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=95 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB) */ +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +count(*) +36 +SHOW CREATE TABLE t77_restart; +Table Create Table +t77_restart CREATE TABLE `t77_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=37 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB, + SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/alt_dir' ENGINE = InnoDB)) */ +# +# Shutdown the server +# +# +# Move the remote tablespaces to a new location and change the ISL files +# +---- MYSQL_DATA_DIR/test +t4_restart.frm +t4_restart.ibd +t55_restart.frm +t55_restart.isl +t66_restart#p#p0.isl +t66_restart#p#p1.isl +t66_restart#p#p2.isl +t66_restart.frm +t77_restart#p#p0#sp#s0.isl +t77_restart#p#p0#sp#s1.isl +t77_restart#p#p1#sp#s2.isl +t77_restart#p#p1#sp#s3.isl +t77_restart.frm +---- MYSQL_TMP_DIR/alt_dir/test +t55_restart.ibd +t66_restart#p#p0.ibd +t66_restart#p#p1.ibd +t66_restart#p#p2.ibd +t77_restart#p#p0#sp#s0.ibd +t77_restart#p#p0#sp#s1.ibd +t77_restart#p#p1#sp#s2.ibd +t77_restart#p#p1#sp#s3.ibd +---- MYSQL_TMP_DIR/new_dir/test +# Moving tablespace 't4_restart' from MYSQL_DATA_DIR to MYSQL_TMP_DIR/new_dir +# Moving tablespace 't55_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir +# Moving tablespace 't66_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir +# Moving tablespace 't77_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir +---- MYSQL_DATA_DIR/test +t4_restart.frm +t4_restart.isl +t55_restart.frm +t55_restart.isl +t66_restart#p#p0.isl +t66_restart#p#p1.isl +t66_restart#p#p2.isl +t66_restart.frm +t77_restart#p#p0#sp#s0.isl +t77_restart#p#p0#sp#s1.isl +t77_restart#p#p1#sp#s2.isl +t77_restart#p#p1#sp#s3.isl +t77_restart.frm +---- MYSQL_TMP_DIR/alt_dir/test +---- MYSQL_TMP_DIR/new_dir/test +t4_restart.ibd +t55_restart.ibd +t66_restart#p#p0.ibd +t66_restart#p#p1.ibd +t66_restart#p#p2.ibd +t77_restart#p#p0#sp#s0.ibd +t77_restart#p#p0#sp#s1.ibd +t77_restart#p#p1#sp#s2.ibd +t77_restart#p#p1#sp#s3.ibd +# +# Start the server and check tablespaces. +# +# restart +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t4_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd +test/t55_restart Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd +test/t66_restart#p#p0 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 Single DEFAULT 2048 Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s3.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd +test/t55_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd +test/t66_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s3.ibd +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +SELECT count(*) FROM t4_restart; +count(*) +64 +SHOW CREATE TABLE t4_restart; +Table Create Table +t4_restart CREATE TABLE `t4_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000099 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +count(*) +64 +SHOW CREATE TABLE t55_restart; +Table Create Table +t55_restart CREATE TABLE `t55_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000096 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC DATA DIRECTORY='MYSQL_TMP_DIR/new_dir/' +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +count(*) +168 +SHOW CREATE TABLE t66_restart; +Table Create Table +t66_restart CREATE TABLE `t66_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=179 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 DATA DIRECTORY = 'MYSQL_TMP_DIR/new_dir' ENGINE = InnoDB, + PARTITION p1 DATA DIRECTORY = 'MYSQL_TMP_DIR/new_dir' ENGINE = InnoDB, + PARTITION p2 DATA DIRECTORY = 'MYSQL_TMP_DIR/new_dir' ENGINE = InnoDB) */ +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +count(*) +72 +SHOW CREATE TABLE t77_restart; +Table Create Table +t77_restart CREATE TABLE `t77_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=73 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 DATA DIRECTORY = 'MYSQL_TMP_DIR/new_dir' ENGINE = InnoDB, + SUBPARTITION s1 DATA DIRECTORY = 'MYSQL_TMP_DIR/new_dir' ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 DATA DIRECTORY = 'MYSQL_TMP_DIR/new_dir' ENGINE = InnoDB, + SUBPARTITION s3 DATA DIRECTORY = 'MYSQL_TMP_DIR/new_dir' ENGINE = InnoDB)) */ +# +# Shutdown the server +# +# +# Move the remote tablespaces back to the default datadir and delete the ISL file. +# +---- MYSQL_DATA_DIR/test +t4_restart.frm +t4_restart.isl +t55_restart.frm +t55_restart.isl +t66_restart#p#p0.isl +t66_restart#p#p1.isl +t66_restart#p#p2.isl +t66_restart.frm +t77_restart#p#p0#sp#s0.isl +t77_restart#p#p0#sp#s1.isl +t77_restart#p#p1#sp#s2.isl +t77_restart#p#p1#sp#s3.isl +t77_restart.frm +---- MYSQL_TMP_DIR/new_dir/test +t4_restart.ibd +t55_restart.ibd +t66_restart#p#p0.ibd +t66_restart#p#p1.ibd +t66_restart#p#p2.ibd +t77_restart#p#p0#sp#s0.ibd +t77_restart#p#p0#sp#s1.ibd +t77_restart#p#p1#sp#s2.ibd +t77_restart#p#p1#sp#s3.ibd +# Moving 't4_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +# Moving 't55_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +# Moving 't66_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +# Moving 't77_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +---- MYSQL_DATA_DIR/test +t4_restart.frm +t4_restart.ibd +t55_restart.frm +t55_restart.ibd +t66_restart#p#p0.ibd +t66_restart#p#p1.ibd +t66_restart#p#p2.ibd +t66_restart.frm +t77_restart#p#p0#sp#s0.ibd +t77_restart#p#p0#sp#s1.ibd +t77_restart#p#p1#sp#s2.ibd +t77_restart#p#p1#sp#s3.ibd +t77_restart.frm +---- MYSQL_TMP_DIR/new_dir/test +# +# Start the server and check tablespaces. +# +# restart +=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles === +Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path +test/t4_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t4_restart.ibd +test/t55_restart Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t55_restart.ibd +test/t66_restart#p#p0 Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 Single DEFAULT 0 Dynamic MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s3.ibd +=== information_schema.files === +Space_Name File_Type Engine Status Tablespace_Name Path +test/t4_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t4_restart.ibd +test/t55_restart TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t55_restart.ibd +test/t66_restart#p#p0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t66_restart#p#p0.ibd +test/t66_restart#p#p1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t66_restart#p#p1.ibd +test/t66_restart#p#p2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t66_restart#p#p2.ibd +test/t77_restart#p#p0#sp#s0 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s0.ibd +test/t77_restart#p#p0#sp#s1 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s1.ibd +test/t77_restart#p#p1#sp#s2 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s2.ibd +test/t77_restart#p#p1#sp#s3 TABLESPACE InnoDB NORMAL innodb_file_per_table.## MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s3.ibd +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +SELECT count(*) FROM t4_restart; +count(*) +128 +SHOW CREATE TABLE t4_restart; +Table Create Table +t4_restart CREATE TABLE `t4_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000195 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +count(*) +128 +SHOW CREATE TABLE t55_restart; +Table Create Table +t55_restart CREATE TABLE `t55_restart` ( + `c1` double NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=1000000192 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +count(*) +336 +SHOW CREATE TABLE t66_restart; +Table Create Table +t66_restart CREATE TABLE `t66_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=347 DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 +/*!50100 PARTITION BY HASH (c1) +(PARTITION p0 ENGINE = InnoDB, + PARTITION p1 ENGINE = InnoDB, + PARTITION p2 ENGINE = InnoDB) */ +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +count(*) +144 +SHOW CREATE TABLE t77_restart; +Table Create Table +t77_restart CREATE TABLE `t77_restart` ( + `c1` int(11) NOT NULL AUTO_INCREMENT, + `c2` char(10) DEFAULT NULL, + `c3` varchar(100) DEFAULT NULL, + `c4` date DEFAULT NULL, + `c5` text, + PRIMARY KEY (`c1`) +) ENGINE=InnoDB AUTO_INCREMENT=145 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC +/*!50100 PARTITION BY RANGE (c1) +SUBPARTITION BY HASH (c1) +(PARTITION p0 VALUES LESS THAN (10) + (SUBPARTITION s0 ENGINE = InnoDB, + SUBPARTITION s1 ENGINE = InnoDB), + PARTITION p1 VALUES LESS THAN MAXVALUE + (SUBPARTITION s2 ENGINE = InnoDB, + SUBPARTITION s3 ENGINE = InnoDB)) */ +# +# Cleanup +# +DROP TABLE t4_restart; +DROP TABLE t55_restart; +DROP TABLE t66_restart; +DROP TABLE t77_restart; diff --git a/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result b/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result new file mode 100644 index 00000000000..5cdfe162b6a --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6344_compress_level.result @@ -0,0 +1,135 @@ +USE test; +DROP TABLE IF EXISTS tab5; +Warnings: +Note 1051 Unknown table 'test.tab5' +DROP TABLE IF EXISTS tab6; +Warnings: +Note 1051 Unknown table 'test.tab6' +#set the other madatory flags before test starts +SET GLOBAL Innodb_file_per_table=on; +#set the compression level=0 (No compress) +SET global innodb_compression_level=0; +#check the compression level and the compressed_pages is default +SELECT @@innodb_compression_level; +@@innodb_compression_level +0 +SELECT @@Innodb_file_per_table; +@@Innodb_file_per_table +1 +#create table with 1K block size +CREATE TABLE tab5 (col_1 CHAR (255) , +col_2 VARCHAR (255), col_3 longtext, +col_4 longtext,col_5 longtext, +col_6 longtext , col_7 longtext , +col_8 longtext ,col_9 longtext , +col_10 longtext ,col_11 int auto_increment primary key) +ENGINE = innodb ROW_FORMAT=compressed key_block_size=1; +#create indexes +CREATE INDEX idx1 ON tab5(col_4(10)); +CREATE INDEX idx2 ON tab5(col_5(10)); +CREATE INDEX idx3 ON tab5(col_6(10)); +CREATE INDEX idx4 ON tab5(col_7(10)); +CREATE INDEX idx5 ON tab5(col_8(10)); +CREATE INDEX idx6 ON tab5(col_11); +#load the with repeat function +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +SET @col_10 = repeat('j', 100); +#insert 10 records +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +#set the compression level=9 (High compress) +SET global innodb_compression_level=9; +#create table with 1K block size +CREATE TABLE tab6 (col_1 CHAR (255) , +col_2 VARCHAR (255), col_3 longtext, +col_4 longtext,col_5 longtext, +col_6 longtext , col_7 longtext , +col_8 longtext ,col_9 longtext , +col_10 longtext ,col_11 int auto_increment primary key) +ENGINE = innodb ROW_FORMAT=compressed key_block_size=1; +#create indexes +CREATE INDEX idx1 ON tab6(col_4(10)); +CREATE INDEX idx2 ON tab6(col_5(10)); +CREATE INDEX idx3 ON tab6(col_6(10)); +CREATE INDEX idx4 ON tab6(col_7(10)); +CREATE INDEX idx5 ON tab6(col_8(10)); +CREATE INDEX idx6 ON tab6(col_11); +#load the with repeat function +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +SET @col_10 = repeat('j', 100); +#insert 10 records +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +#diff the sizes of the No compressed table and high compressed table +SET @size=(SELECT +(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024) +FROM INFORMATION_SCHEMA.TABLES +WHERE table_name='tab5' AND ENGINE='InnoDB' AND table_schema='test') +- +(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024 +FROM INFORMATION_SCHEMA.TABLES +WHERE table_name='tab6' AND ENGINE='InnoDB' AND table_schema='test') +FROM DUAL); +#check the size of the table, it should not be Negative value +#The results of this query Test pass = 1 and fail=0 +SELECT @size >= 0; +@size >= 0 +1 +# +# Cleanup +# +DROP TABLE tab5; +DROP TABLE tab6; +#reset back the compression_level to default. diff --git a/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result new file mode 100644 index 00000000000..3b98527250b --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result @@ -0,0 +1,8084 @@ +#****************************************************************** +# Test 1: Test the interaction between stats and compression level +# and logging of compressed pages configuration +# This testcase is to verify the table/idex level compression stats +# When the flags are set as follows +# innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 +# page size 1K,2K,4K,8K,16K +# check the size and compression stats of the table tab5 +#****************************************************************** +# set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_level=0; +#****************************************************************** +# Test 1-1K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 1K +#****************************************************************** +# create a table with page size=1K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for deterministic reasons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 5242880 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 5242880 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 5242880 +# restart +# set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=0; +# fetch the compressed page and check the stats +# The stats figure may be different/same for each restart. +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table +# testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +=============== +After Restart Chekc the stats of the table +=============== +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @comp_val AND 1000 +AND uncompress_ops BETWEEN @uncomp_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +DROP TABLE tab5; +#****************************************************************** +# Test 1-2K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 2K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; +# create a table with page size=2K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 2097152 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 2097152 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 2097152 +# restart +# set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=0; +# fetch the compressed page and check the stats +# The stats figure may be different/same for each restart. +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table +# testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=2; +=============== +After Restart Chekc the stats of the table +=============== +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @comp_val AND 1000 +AND uncompress_ops BETWEEN @uncomp_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +DROP TABLE tab5; +#****************************************************************** +# Test 1-4K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 4K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; +# create a table with page size=4K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +DROP TABLE tab5; +#****************************************************************** +# Test 1-8K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 8K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; +# create a table with page size=8K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 122880 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 212992 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 212992 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 212992 +DROP TABLE tab5; +#****************************************************************** +# Test 1-16K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 16K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; +# create a table with page size=16K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 245760 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +DROP TABLE tab5; +#****************************************************************** +# Test 2: test the interaction between wl6347 & wl6344 (2.2) +# This testcase is to verify the table/idex level compression stats +# When the flags are set as follows +# innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 +# page size 1K,2K,4K,8K,16K +# check the size and compression stats of the table tab5 +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; +# set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=9; +#****************************************************************** +# Test 2-1K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 1K +#****************************************************************** +# create a table with page size=1K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 65536 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 2097152 +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# restart +# set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=9; +# fetch the compressed page and check the stats +# The stats figure may be different/same for each restart. +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table +# testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +=============== +After Restart Chekc the stats of the table +=============== +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @comp_val AND 1000 +AND uncompress_ops BETWEEN @uncomp_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +DROP TABLE tab5; +#****************************************************************** +# Test 2-2K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 2K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=2K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 65536 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 2097152 +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# restart +# set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=9; +# fetch the compressed page and check the stats +# The stats figure may be different/same for each restart. +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table +# testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +=============== +After Restart Chekc the stats of the table +=============== +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @comp_val AND 1000 +AND uncompress_ops BETWEEN @uncomp_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +DROP TABLE tab5; +#****************************************************************** +# Test 2-4K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 4K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=4K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 65536 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 159744 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +DROP TABLE tab5; +#****************************************************************** +# Test 2-8K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 8K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=8K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 122880 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 122880 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +# The size of the file with 0 compress = 212992 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +DROP TABLE tab5; +#****************************************************************** +# Test 2-16K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 16K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=16K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 245760 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 245760 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +# The size of the file with 0 compress = 344064 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +DROP TABLE tab5; +#****************************************************************** +# Test 3: test the interaction between wl6347 & wl6344 (2.3) +# This testcase is to verify the table/idex level compression stats +# When the flags are set as follows +# innodb_cmp_per_index_enabled=ON and +# innodb_compression_level=6 (default) +# page size 1K,2K,4K,8K,16K +# check the size and compression stats of the table tab5 +#****************************************************************** +#****************************************************************** +# Test 3-1K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 1K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_level=default; +# create a table with page size=1K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 65536 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 65536 +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +DROP TABLE tab5; +#****************************************************************** +# Test 3-2K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 2K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_level=default; +# create a table with page size=2K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 65536 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 86016 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +DROP TABLE tab5; +#****************************************************************** +# Test 3-4K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 4K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_level=default; +# create a table with page size=4K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 65536 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 86016 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +DROP TABLE tab5; +#****************************************************************** +# Test 3-8K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 8K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_level=default; +# create a table with page size=8K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 122880 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 122880 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 172032 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +DROP TABLE tab5; +#****************************************************************** +# Test 3-16K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 16K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_level=default; +# create a table with page size=16K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +# The size of the file with 0 compress = 245760 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 245760 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +# The size of the file with 0 compress = 344064 +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +DROP TABLE tab5; +#****************************************************************** +# Test 4: test the interaction between wl6347 & wl6344 (2.5 & 2.6) +# This testcase is to verify the table/idex level compression stats +# When the flags are set as follows +# innodb_cmp_per_index_enabled=ON and +# Innodb_compression_failure_threshold_pct=0 +# page size 1K,2K,4K,8K,16K +# check the size and compression stats of the table tab5 +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_compression_failure_threshold_pct=0; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# check the flags +SELECT @@innodb_cmp_per_index_enabled; +@@innodb_cmp_per_index_enabled 1 +SELECT @@innodb_compression_failure_threshold_pct; +@@innodb_compression_failure_threshold_pct 0 +SELECT @@innodb_file_per_table; +@@innodb_file_per_table 1 +SELECT @@innodb_compression_level; +@@innodb_compression_level 6 +#****************************************************************** +# Test 4-1K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 1K +#****************************************************************** +# create a table with page size=1K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# restart +# set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_compression_failure_threshold_pct=0; +SET GLOBAL innodb_file_per_table=on; +# fetch the compressed page and check the stats +# The stats figure may be different/same for each restart. +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table +# testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +=============== +After Restart Chekc the stats of the table +=============== +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @comp_val AND 1000 +AND uncompress_ops BETWEEN @uncomp_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +DROP TABLE tab5; +#****************************************************************** +# Test 4-2K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 2K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=2K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +DROP TABLE tab5; +#****************************************************************** +# Test 4-4K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 4K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=4K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +DROP TABLE tab5; +#****************************************************************** +# Test 4-8K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 8K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=8K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 122880 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +DROP TABLE tab5; +#****************************************************************** +# Test 4-16K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 16K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=16K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 245760 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +DROP TABLE tab5; +#****************************************************************** +# Test 5: test the interaction between wl6347 & wl6344 (2.7) +# This testcase is to verify the table/idex level compression stats +# When the flags are set as follows +# innodb_cmp_per_index_enabled=ON and +# Innodb_compression_failure_threshold_pct=10 +# page size 1K,2K,4K,8K,16K +# check the size and compression stats of the table tab5 +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_failure_threshold_pct=10; +SET GLOBAL innodb_compression_level=Default; +# check the flags +SELECT @@innodb_cmp_per_index_enabled; +@@innodb_cmp_per_index_enabled 1 +SELECT @@innodb_compression_failure_threshold_pct; +@@innodb_compression_failure_threshold_pct 10 +SELECT @@innodb_file_per_table; +@@innodb_file_per_table 1 +SELECT @@innodb_compression_level; +@@innodb_compression_level 6 +#****************************************************************** +# Test 5-1K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 1K +#****************************************************************** +# create a table with page size=1K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# restart +# set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# set the flags +SET GLOBAL innodb_compression_failure_threshold_pct=10; +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_failure_threshold_pct=10; +# fetch the compressed page and check the stats +# The stats figure may be different/same for each restart. +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table +# testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +=============== +After Restart Chekc the stats of the table +=============== +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @comp_val AND 1000 +AND uncompress_ops BETWEEN @uncomp_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +DROP TABLE tab5; +#****************************************************************** +# Test 5-2K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 2K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_failure_threshold_pct=10; +# create a table with page size=2K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=2; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +DROP TABLE tab5; +#****************************************************************** +# Test 5-4K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 4K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=4K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 65536 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 81920 +DROP TABLE tab5; +#****************************************************************** +# Test 5-8K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 8K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_failure_threshold_pct=10; +# create a table with page size=8K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 122880 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 163840 +DROP TABLE tab5; +#****************************************************************** +# Test 5-16K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 16K +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +# create a table with page size=16K +# create indexes on each column.(total 9 indexes) +# Create table & Index +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +CREATE INDEX idx9 ON tab5(col_9(10)); +CREATE INDEX idx8 ON tab5(col_8(10)); +CREATE INDEX idx7 ON tab5(col_7(10)); +CREATE INDEX idx6 ON tab5(col_6(10)); +CREATE INDEX idx5 ON tab5(col_5(10)); +CREATE INDEX idx4 ON tab5(col_4(10)); +CREATE INDEX idx3 ON tab5(col_3(10)); +CREATE INDEX idx2 ON tab5(col_2(10)); +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 245760 +# for determintic resons simple data should be inserted. +# insert some 100 records +# Load the data +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +INSERT INTO tab5 +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9); +commit; +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed page and check the stats +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +# fetch the compressed same page once again and check the stats +# the stat figures should be same as above query +=============== +Fetch Records +=============== +SELECT col_7,col_8,col_9 FROM tab5 +WHERE col_2='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' +LIMIT 1; +col_7 gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg +col_8 hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh +col_9 iiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii +# check the stats of the table & size of the table +SET @inl_val=1; +# Check the stats of the table +# Check the size of the ibd file +# testcase : pass = 1 fail = 0 +SELECT count(*) > 0 as "compress_stat" +FROM information_schema.innodb_cmp_per_index +WHERE +compress_ops_ok BETWEEN @inl_val AND 1000 +AND compress_ops BETWEEN @inl_val AND 1000 +AND table_name='tab5' AND database_name='test' +AND index_name like 'idx%' ; +compress_stat 1 +The size of the tab5.ibd file: 327680 +DROP TABLE tab5; +#****************************************************************** +# Test 6: Create multiple tables & indexes having same name in 2 diff DB's +# Check the stats of the table. (1.1) +#****************************************************************** +# reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; +SET GLOBAL innodb_file_per_table=ON; +SET GLOBAL innodb_compression_level=default; +SET GLOBAL innodb_compression_failure_threshold_pct=default; +# create a table page size=1K +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +CREATE INDEX idx1 ON tab5(col_1(10)); +# check the stats of the table +SELECT database_name,table_name,index_name,compress_ops,compress_ops_ok +FROM information_schema.innodb_cmp_per_index +WHERE database_name='test' and table_name='tab5' +ORDER BY index_name,table_name,database_name ; +database_name test +table_name tab5 +index_name GEN_CLUST_INDEX +compress_ops 1 +compress_ops_ok 1 +database_name test +table_name tab5 +index_name idx1 +compress_ops 1 +compress_ops_ok 1 +CREATE DATABASE sb; +USE sb; +# create a table page size=1K (testcase-1) +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +CREATE INDEX idx1 ON tab5(col_1(10)); +SELECT database_name,table_name,index_name,compress_ops,compress_ops_ok +FROM information_schema.innodb_cmp_per_index +WHERE database_name='sb' and table_name='tab5' +ORDER BY index_name,table_name,database_name ; +database_name sb +table_name tab5 +index_name GEN_CLUST_INDEX +compress_ops 1 +compress_ops_ok 1 +database_name sb +table_name tab5 +index_name idx1 +compress_ops 1 +compress_ops_ok 1 +DROP TABLE tab5, test.tab5; +DROP DATABASE sb; +# reset the flags +SET GLOBAL innodb_file_per_table=default; +SET GLOBAL innodb_cmp_per_index_enabled=default; +SET GLOBAL innodb_compression_failure_threshold_pct=default; diff --git a/mysql-test/suite/innodb_zip/r/wl6470_1.result b/mysql-test/suite/innodb_zip/r/wl6470_1.result new file mode 100644 index 00000000000..ea1866d69eb --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6470_1.result @@ -0,0 +1,598 @@ +create temporary table t1 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc), index sec_index(c1) +) engine = innodb; +create temporary table t2 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc), index sec_index(c1) +) engine = innodb; +create procedure populate_t1() +begin +declare i int default 1; +while (i <= 200) do +insert into t1 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +create procedure populate_t1_small() +begin +declare i int default 1; +while (i <= 20) do +insert into t1 values (i, 'c', 'd'); +set i = i + 1; +end while; +end| +create procedure populate_t1_small2() +begin +declare i int default 30; +while (i <= 50) do +insert into t1 values (i, 'e', 'f'); +set i = i + 1; +end while; +end| +begin; +select count(*) from t1; +count(*) +0 +call populate_t1(); +select count(*) from t1; +count(*) +200 +select * from t1 limit 10; +keyc c1 c2 +1 a b +2 a b +3 a b +4 a b +5 a b +6 a b +7 a b +8 a b +9 a b +10 a b +rollback; +select count(*) from t1; +count(*) +0 +begin; +call populate_t1(); +select count(*) from t1; +count(*) +200 +commit; +select count(*) from t1; +count(*) +200 +truncate table t1; +select count(*) from t1; +count(*) +0 +call populate_t1_small(); +select count(*) from t1; +count(*) +20 +rollback; +select count(*) from t1; +count(*) +20 +truncate table t1; +call populate_t1(); +select count(*) from t1; +count(*) +200 +delete from t1 where keyc <= 60; +select count(*) from t1; +count(*) +140 +call populate_t1_small(); +select count(*) from t1; +count(*) +160 +select * from t1 limit 10; +keyc c1 c2 +1 c d +2 c d +3 c d +4 c d +5 c d +6 c d +7 c d +8 c d +9 c d +10 c d +begin; +call populate_t1_small2(); +select count(*) from t1; +count(*) +181 +select * from t1 where keyc > 30 limit 10; +keyc c1 c2 +31 e f +32 e f +33 e f +34 e f +35 e f +36 e f +37 e f +38 e f +39 e f +40 e f +rollback; +select count(*) from t1; +count(*) +160 +select * from t1 where keyc > 30 limit 10; +keyc c1 c2 +61 a b +62 a b +63 a b +64 a b +65 a b +66 a b +67 a b +68 a b +69 a b +70 a b +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +rollback; +begin; +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +keyc c1 c2 +4001 c d +4002 c d +4003 c d +4004 c d +4005 c d +4006 c d +4007 c d +4008 c d +4009 c d +4010 c d +rollback; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +commit; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +insert into t2 select * from t1 where keyc < 2101; +select count(*) from t2; +count(*) +60 +drop procedure populate_t1; +drop procedure populate_t1_small; +drop procedure populate_t1_small2; +drop table t1; +drop table t2; +create temporary table t1 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc), index sec_index(c1) +) engine = innodb key_block_size = 4; +set innodb_strict_mode=off; +create temporary table t2 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc), index sec_index(c1) +) engine = innodb key_block_size = 8; +set innodb_strict_mode=default; +create procedure populate_t1() +begin +declare i int default 1; +while (i <= 200) do +insert into t1 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +create procedure populate_t1_small() +begin +declare i int default 1; +while (i <= 20) do +insert into t1 values (i, 'c', 'd'); +set i = i + 1; +end while; +end| +create procedure populate_t1_small2() +begin +declare i int default 30; +while (i <= 50) do +insert into t1 values (i, 'e', 'f'); +set i = i + 1; +end while; +end| +begin; +select count(*) from t1; +count(*) +0 +call populate_t1(); +select count(*) from t1; +count(*) +200 +select * from t1 limit 10; +keyc c1 c2 +1 a b +2 a b +3 a b +4 a b +5 a b +6 a b +7 a b +8 a b +9 a b +10 a b +rollback; +select count(*) from t1; +count(*) +0 +begin; +call populate_t1(); +select count(*) from t1; +count(*) +200 +commit; +select count(*) from t1; +count(*) +200 +truncate table t1; +select count(*) from t1; +count(*) +0 +call populate_t1_small(); +select count(*) from t1; +count(*) +20 +rollback; +select count(*) from t1; +count(*) +20 +truncate table t1; +call populate_t1(); +select count(*) from t1; +count(*) +200 +delete from t1 where keyc <= 60; +select count(*) from t1; +count(*) +140 +call populate_t1_small(); +select count(*) from t1; +count(*) +160 +select * from t1 limit 10; +keyc c1 c2 +1 c d +2 c d +3 c d +4 c d +5 c d +6 c d +7 c d +8 c d +9 c d +10 c d +begin; +call populate_t1_small2(); +select count(*) from t1; +count(*) +181 +select * from t1 where keyc > 30 limit 10; +keyc c1 c2 +31 e f +32 e f +33 e f +34 e f +35 e f +36 e f +37 e f +38 e f +39 e f +40 e f +rollback; +select count(*) from t1; +count(*) +160 +select * from t1 where keyc > 30 limit 10; +keyc c1 c2 +61 a b +62 a b +63 a b +64 a b +65 a b +66 a b +67 a b +68 a b +69 a b +70 a b +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +rollback; +begin; +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +keyc c1 c2 +4001 c d +4002 c d +4003 c d +4004 c d +4005 c d +4006 c d +4007 c d +4008 c d +4009 c d +4010 c d +rollback; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +commit; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +insert into t2 select * from t1 where keyc < 2101; +select count(*) from t2; +count(*) +60 +drop procedure populate_t1; +drop procedure populate_t1_small; +drop procedure populate_t1_small2; +drop table t1; +drop table t2; +set global innodb_file_per_table = 0; +create temporary table t1 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc), index sec_index(c1) +) engine = innodb; +create temporary table t2 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc), index sec_index(c1) +) engine = innodb; +create procedure populate_t1() +begin +declare i int default 1; +while (i <= 200) do +insert into t1 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +create procedure populate_t1_small() +begin +declare i int default 1; +while (i <= 20) do +insert into t1 values (i, 'c', 'd'); +set i = i + 1; +end while; +end| +create procedure populate_t1_small2() +begin +declare i int default 30; +while (i <= 50) do +insert into t1 values (i, 'e', 'f'); +set i = i + 1; +end while; +end| +begin; +select count(*) from t1; +count(*) +0 +call populate_t1(); +select count(*) from t1; +count(*) +200 +select * from t1 limit 10; +keyc c1 c2 +1 a b +2 a b +3 a b +4 a b +5 a b +6 a b +7 a b +8 a b +9 a b +10 a b +rollback; +select count(*) from t1; +count(*) +0 +begin; +call populate_t1(); +select count(*) from t1; +count(*) +200 +commit; +select count(*) from t1; +count(*) +200 +truncate table t1; +select count(*) from t1; +count(*) +0 +call populate_t1_small(); +select count(*) from t1; +count(*) +20 +rollback; +select count(*) from t1; +count(*) +20 +truncate table t1; +call populate_t1(); +select count(*) from t1; +count(*) +200 +delete from t1 where keyc <= 60; +select count(*) from t1; +count(*) +140 +call populate_t1_small(); +select count(*) from t1; +count(*) +160 +select * from t1 limit 10; +keyc c1 c2 +1 c d +2 c d +3 c d +4 c d +5 c d +6 c d +7 c d +8 c d +9 c d +10 c d +begin; +call populate_t1_small2(); +select count(*) from t1; +count(*) +181 +select * from t1 where keyc > 30 limit 10; +keyc c1 c2 +31 e f +32 e f +33 e f +34 e f +35 e f +36 e f +37 e f +38 e f +39 e f +40 e f +rollback; +select count(*) from t1; +count(*) +160 +select * from t1 where keyc > 30 limit 10; +keyc c1 c2 +61 a b +62 a b +63 a b +64 a b +65 a b +66 a b +67 a b +68 a b +69 a b +70 a b +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +rollback; +begin; +update t1 set keyc = keyc + 2000; +select * from t1 limit 10; +keyc c1 c2 +4001 c d +4002 c d +4003 c d +4004 c d +4005 c d +4006 c d +4007 c d +4008 c d +4009 c d +4010 c d +rollback; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +commit; +select * from t1 limit 10; +keyc c1 c2 +2001 c d +2002 c d +2003 c d +2004 c d +2005 c d +2006 c d +2007 c d +2008 c d +2009 c d +2010 c d +insert into t2 select * from t1 where keyc < 2101; +select count(*) from t2; +count(*) +60 +drop procedure populate_t1; +drop procedure populate_t1_small; +drop procedure populate_t1_small2; +drop table t1; +drop table t2; +set global innodb_file_per_table = 1; diff --git a/mysql-test/suite/innodb_zip/r/wl6470_2.result b/mysql-test/suite/innodb_zip/r/wl6470_2.result new file mode 100644 index 00000000000..b001cd73882 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6470_2.result @@ -0,0 +1,667 @@ +create procedure populate_tables() +begin +declare n int default 20; +declare inner_loop int default 100; +set global innodb_file_per_table=on; +drop table if exists t1,t2,t3,t4; +create temporary table t1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(6000) not null, +c5 blob(6000) not null, +c6 varchar(2000) not null, +c7 varchar(2000) not null, +c8 datetime, +c9 decimal(6,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=redundant; +create temporary table t2(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(6000) not null, +c5 blob(6000) not null, +c6 varchar(2000) not null, +c7 varchar(2000) not null, +c8 datetime, +c9 decimal(6,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compact; +create temporary table t3(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(6000) not null, +c5 blob(6000) not null, +c6 varchar(2000) not null, +c7 varchar(2000) not null, +c8 datetime, +c9 decimal(6,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compressed key_block_size=4; +create temporary table t4(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(6000) not null, +c5 blob(6000) not null, +c6 varchar(2000) not null, +c7 varchar(2000) not null, +c8 datetime, +c9 decimal(6,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=dynamic; +create temporary table t5(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(6000) not null, +c5 blob(6000) not null, +c6 varchar(2000) not null, +c7 varchar(2000) not null, +c8 datetime, +c9 decimal(6,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +create temporary table t6 ( a int ) engine = innodb; +insert into t6 values (50),(100),(150),(190); +while (n > 0) do +start transaction; +insert into t1 values(n,n,repeat(concat(' tc3_',n),30), +repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), +repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), +now(),(100.55+n)); +insert into t2 values(n,n,repeat(concat(' tc3_',n),30), +repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), +repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), +now(),(100.55+n)); +insert into t3 values(n,n,repeat(concat(' tc3_',n),30), +repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), +repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), +now(),(100.55+n)); +insert into t4 values(n,n,repeat(concat(' tc3_',n),30), +repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), +repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), +now(),(100.55+n)); +insert into t5 values(n,n,repeat(concat(' tc3_',n),30), +repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), +repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), +now(),(100.55+n)); +if (n > 10) then +commit; +else +delete from t1 where c1 > 10 ; +delete from t2 where c1 > 10 ; +delete from t3 where c1 > 10 ; +delete from t4 where c1 > 10 ; +delete from t5 where c1 > 10 ; +rollback; +start transaction; +update t1 set c1 = c1 + 1000 where c1 > 10; +update t2 set c1 = c1 + 1000 where c1 > 10; +update t3 set c1 = c1 + 1000 where c1 > 10; +update t4 set c1 = c1 + 1000 where c1 > 10; +update t5 set c1 = c1 + 1000 where c1 > 10; +rollback; +end if; +start transaction; +insert into t1 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t2 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t3 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t4 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t5 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +delete from t1 where c1 between 100 and 110; +delete from t2 where c1 between 100 and 110; +delete from t3 where c1 between 100 and 110; +delete from t4 where c1 between 100 and 110; +delete from t5 where c1 between 100 and 110; +update t1 set c1 = c1+1 where c1>110; +update t2 set c1 = c1+1 where c1>110; +update t3 set c1 = c1+1 where c1>110; +update t4 set c1 = c1+1 where c1>110; +update t5 set c1 = c1+1 where c1>110; +savepoint a; +insert into t1 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t2 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t3 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t4 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t5 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +savepoint b; +insert into t1 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t2 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t3 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t4 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +insert into t5 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), +repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), +repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), +now(),(100.55+n+inner_loop)); +savepoint c; +rollback to b; +rollback to a; +commit; +commit; +rollback; +set n = n - 1; +end while; +end| +connect con1,localhost,root,,; +connect con2,localhost,root,,; +#---client 1 : dml operation ---" +connection con1; +#---client 2 : dml operation ---" +connection con2; +# In connection 1 +connection con1; +select count(*) from t1; +count(*) +20 +select count(*) from t2; +count(*) +20 +select count(*) from t3; +count(*) +20 +select count(*) from t4; +count(*) +20 +select count(*) from t5; +count(*) +20 +select c1 from t1; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t2; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t3; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t4; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t5; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +# In connection 2 +connection con2; +select count(*) from t1; +count(*) +20 +select count(*) from t2; +count(*) +20 +select count(*) from t3; +count(*) +20 +select count(*) from t4; +count(*) +20 +select count(*) from t5; +count(*) +20 +select c1 from t1; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t2; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t3; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t4; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +select c1 from t5; +c1 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +# In connection 1 +connection con1; +set autocommit = 0; +insert into t1 values (20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t1 values (20,1,'a','a','a','a','a',now(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +insert into t2 values (20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t2 values (20,1,'a','a','a','a','a',now(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +insert into t3 values (20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t3 values (20,1,'a','a','a','a','a',now(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +insert into t4 values (20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t4 values (20,1,'a','a','a','a','a',now(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +insert into t5 values (20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t5 values (20,1,'a','a','a','a','a',now(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +insert into t1 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert into t2 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert into t3 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert into t4 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert into t5 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +set autocommit = 1; +select c1,c2 from t1 where c1 in (20,1); +c1 c2 +20 20 +select c1,c2 from t2 where c1 in (20,1); +c1 c2 +20 20 +select c1,c2 from t3 where c1 in (20,1); +c1 c2 +20 20 +select c1,c2 from t4 where c1 in (20,1); +c1 c2 +20 20 +select c1,c2 from t5 where c1 in (20,1); +c1 c2 +20 20 +replace into t1 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t2 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t3 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t4 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t5 values (20,1,'a','a','a','a','a',now(),100.55); +select c1,c2,c3,c4,c5,c6,c7,c9 from t1 where c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +select c1,c2,c3,c4,c5,c6,c7,c9 from t2 where c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +select c1,c2,c3,c4,c5,c6,c7,c9 from t3 where c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +select c1,c2,c3,c4,c5,c6,c7,c9 from t4 where c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +select c1,c2,c3,c4,c5,c6,c7,c9 from t5 where c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +update ignore t1 set c1 = 20 where c1 = 140 ; +update ignore t2 set c1 = 20 where c1 = 140 ; +update ignore t3 set c1 = 20 where c1 = 140 ; +update ignore t4 set c1 = 20 where c1 = 140 ; +update ignore t5 set c1 = 20 where c1 = 140 ; +select count(*) from t1 where c1 = 140; +count(*) +1 +select count(*) from t2 where c1 = 140; +count(*) +1 +select count(*) from t3 where c1 = 140; +count(*) +1 +select count(*) from t4 where c1 = 140; +count(*) +1 +select count(*) from t5 where c1 = 140; +count(*) +1 +"running select * into outfile from t1 ; +create temporary table temp_1 engine = innodb as select * from t1 where 1=2; +select count(*) from temp_1; +count(*) +0 +"running load data infile into temp_1 ; +select count(*) from temp_1; +count(*) +20 +alter table temp_1 add column c10 int default 99 , +add column c11 varchar(100) default 'test'; +alter table temp_1 add primary key (c1); +insert into temp_1 (c1,c2,c3,c4,c5,c6,c7,c8,c9) values (-1,-1,'a','a','a','a','a',now(),100.55); +select c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 from temp_1 where c1 < 0; +c1 c2 c3 c4 c5 c6 c7 c9 c10 c11 +-1 -1 a a a a a 100.550 99 test +select count(*) from temp_1 where c10 = 99 and c11 like 'test'; +count(*) +21 +insert into temp_1 (c1,c2,c3,c4,c5,c6,c7,c8,c9) values (-1,-1,'a','a','a','a','a',now(),100.55) +on duplicate key update c1=-2,c2=-2; +select c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 from temp_1 where c1 < 0; +c1 c2 c3 c4 c5 c6 c7 c9 c10 c11 +-2 -2 a a a a a 100.550 99 test +drop table t1 ,t2 ,t3,t4,t5,t6,temp_1; +disconnect con1; +connection con2; +drop table t1 ,t2 ,t3,t4,t5,t6; +disconnect con2; +connection default; +drop procedure populate_tables; +create temporary table prep_1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(6000) not null, +c5 blob(6000) not null, +c6 varchar(2000) not null, +c7 varchar(2000) not null, +c8 datetime, +c9 decimal(6,3), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +PREPARE stm FROM "insert into prep_1 values(?,?,repeat(concat(' tc3_',?),30),repeat(concat(' tc4_',?),800),repeat(concat(' tc_',?),800),repeat(concat(' tc6_',?),245),repeat(concat(' tc7_',?),245),now(),(100.55+?))"; +set @var = 5; +set @var_static = 5; +EXECUTE stm USING @var,@var,@var,@var,@var,@var,@var,@var; +EXECUTE stm USING @var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static; +set @var = @var - 1; +EXECUTE stm USING @var,@var,@var,@var,@var,@var,@var,@var; +EXECUTE stm USING @var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static; +set @var = @var - 1; +EXECUTE stm USING @var,@var,@var,@var,@var,@var,@var,@var; +EXECUTE stm USING @var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static; +set @var = @var - 1; +EXECUTE stm USING @var,@var,@var,@var,@var,@var,@var,@var; +EXECUTE stm USING @var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static; +set @var = @var - 1; +EXECUTE stm USING @var,@var,@var,@var,@var,@var,@var,@var; +EXECUTE stm USING @var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static; +set @var = @var - 1; +select c1,left(c3,15) from prep_1 order by c1 ; +c1 left(c3,15) +1 tc3_1 tc3_1 tc +2 tc3_2 tc3_2 tc +3 tc3_3 tc3_3 tc +4 tc3_4 tc3_4 tc +5 tc3_5 tc3_5 tc +5 tc3_5 tc3_5 tc +5 tc3_5 tc3_5 tc +5 tc3_5 tc3_5 tc +5 tc3_5 tc3_5 tc +5 tc3_5 tc3_5 tc +select count(*) from prep_1; +count(*) +10 +PREPARE stm_1 FROM "UPDATE prep_1 SET c1 = c1 + 1"; +EXECUTE stm_1; +EXECUTE stm_1; +select c1,left(c3,15) from prep_1 order by c1 ; +c1 left(c3,15) +3 tc3_1 tc3_1 tc +4 tc3_2 tc3_2 tc +5 tc3_3 tc3_3 tc +6 tc3_4 tc3_4 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +select count(*) from prep_1; +count(*) +10 +PREPARE stm_2 FROM "DELETE FROM prep_1 ORDER BY c1 LIMIT 1"; +EXECUTE stm_2; +EXECUTE stm_2; +select c1,left(c3,15) from prep_1 order by c1 ; +c1 left(c3,15) +5 tc3_3 tc3_3 tc +6 tc3_4 tc3_4 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +7 tc3_5 tc3_5 tc +select count(*) from prep_1; +count(*) +8 +drop prepare stm; +drop prepare stm_1; +drop prepare stm_2; +drop table prep_1; diff --git a/mysql-test/suite/innodb_zip/r/wl6501_1.result b/mysql-test/suite/innodb_zip/r/wl6501_1.result new file mode 100644 index 00000000000..4337275d50b --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6501_1.result @@ -0,0 +1,1150 @@ +set global innodb_file_per_table=on; +# Verify that 'TRUNCATE TABLE' statement works fine and the size +# of .ibd file is equal to the initial size after truncation. +drop table if exists t1,t2,t3,t4,t6; +Warnings: +Note 1051 Unknown table 'test.t1' +Note 1051 Unknown table 'test.t2' +Note 1051 Unknown table 'test.t3' +Note 1051 Unknown table 'test.t4' +Note 1051 Unknown table 'test.t6' +create table t1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=redundant; +create table t2(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compact; +create table t3(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compressed key_block_size=4; +create table t4(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=dynamic; +create temporary table t5(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +create table t6 ( a int ) engine = innodb; +insert into t6 values (50),(100),(150); +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +count(*) +3 +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_during_drop_index_temp_table"; +"---debug ib_trunc_crash_during_drop_index_temp_table point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t5; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; +"---debug ib_trunc_crash_on_drop_of_sec_index point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t1; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; +"---debug ib_trunc_crash_on_drop_of_sec_index point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t2; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; +"---debug ib_trunc_crash_on_drop_of_sec_index point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t3; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; +"---debug ib_trunc_crash_on_drop_of_sec_index point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t4; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +0 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +drop table t1, t2, t3, t4, t6; +create table t1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=redundant; +create table t2(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compact; +create table t3(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compressed key_block_size=4; +create table t4(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=dynamic; +create temporary table t5(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +create table t6 ( a int ) engine = innodb; +insert into t6 values (50),(100),(150); +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +count(*) +3 +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_drop_reinit_done_create_to_start"; +"---debug ib_trunc_crash_drop_reinit_done_create_to_start---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t5; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; +"---debug ib_trunc_crash_on_create_of_sec_index---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t1; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; +"---debug ib_trunc_crash_on_create_of_sec_index---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t2; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; +"---debug ib_trunc_crash_on_create_of_sec_index---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t3; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; +"---debug ib_trunc_crash_on_create_of_sec_index---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t4; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +0 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +drop table t1, t2, t3, t4, t6; +create table t1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=redundant; +create table t2(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compact; +create table t3(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compressed key_block_size=4; +create table t4(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=dynamic; +create temporary table t5(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +create table t6 ( a int ) engine = innodb; +insert into t6 values (50),(100),(150); +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +count(*) +3 +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_before_log_removal"; +"---debug ib_trunc_crash_before_log_removal point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t1; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_before_log_removal"; +"---debug ib_trunc_crash_before_log_removal point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t2; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_before_log_removal"; +"---debug ib_trunc_crash_before_log_removal point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t3; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_before_log_removal"; +"---debug ib_trunc_crash_before_log_removal point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t4; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +0 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +drop table t1, t2, t3, t4, t6; +create table t1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=redundant; +create table t2(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compact; +create table t3(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compressed key_block_size=4; +create table t4(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=dynamic; +create temporary table t5(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +create table t6 ( a int ) engine = innodb; +insert into t6 values (50),(100),(150); +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +count(*) +3 +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t1; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t2; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t3; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t4; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +0 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +drop table t1, t2, t3, t4, t6; +create table t1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=redundant; +create table t2(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compact; +create table t3(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compressed key_block_size=4; +create table t4(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=dynamic; +create temporary table t5(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +create table t6 ( a int ) engine = innodb; +insert into t6 values (50),(100),(150); +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +count(*) +3 +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t1; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t2; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t3; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_truncate_done"; +"---debug ib_trunc_crash_after_truncate_done point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t4; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +0 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +drop table t1, t2, t3, t4, t6; +create table t1(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=redundant; +create table t2(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compact; +create table t3(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=compressed key_block_size=4; +create table t4(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb row_format=dynamic; +create temporary table t5(c1 int not null, +c2 int not null, +c3 char(255) not null, +c4 text(500) not null, +c5 blob(500) not null, +c6 varchar(500) not null, +c7 varchar(500) not null, +c8 datetime, +c9 decimal(5,3), +primary key (c1), +index (c3,c4(50),c5(50)), +index (c2)) +engine=innodb; +create table t6 ( a int ) engine = innodb; +insert into t6 values (50),(100),(150); +select count(*) from t1; +count(*) +3 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +count(*) +3 +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; +"---debug ib_trunc_crash_after_redo_log_write_complete point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t1; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +3 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; +"---debug ib_trunc_crash_after_redo_log_write_complete point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t2; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +3 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; +"---debug ib_trunc_crash_after_redo_log_write_complete point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t3; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +3 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; +"---debug ib_trunc_crash_after_redo_log_write_complete point---" +# Write file to make mysql-test-run.pl expect crash and restart +# Run the crashing query +truncate table t4; +ERROR HY000: Lost connection to MySQL server during query +# Restart the MySQL server +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +select count(*) from t4; +count(*) +0 +select count(*) from t5; +ERROR 42S02: Table 'test.t5' doesn't exist +select count(*) from t6; +count(*) +3 +drop table t1, t2, t3, t4, t6; diff --git a/mysql-test/suite/innodb_zip/r/wl6501_crash_3.result b/mysql-test/suite/innodb_zip/r/wl6501_crash_3.result new file mode 100644 index 00000000000..23acb33adca --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6501_crash_3.result @@ -0,0 +1,489 @@ +call mtr.add_suppression("The file '.*' already exists though the corresponding table did not exist in the InnoDB data dictionary"); +call mtr.add_suppression("Cannot create file '.*'"); +call mtr.add_suppression("InnoDB: Error number 17 means 'File exists'"); +set global innodb_file_per_table = on; +"1. Hit crash point while writing redo log." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine=innodb row_format=compressed +key_block_size=16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_while_writing_redo_log"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"2. Hit crash point on completion of redo log write." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_after_redo_log_write_complete"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"3. Hit crash point while dropping indexes." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_clust_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_uniq_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_sec_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"4. Hit crash point on completing drop of all indexes before creation" +" of index is commenced." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_drop_reinit_done_create_to_start"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"5. Hit crash point while creating indexes." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_clust_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_uniq_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_sec_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"6. Hit crash point after data is updated to system-table and" +" in-memory dict." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_updating_dict_sys_info"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"7. Hit crash point before/after log checkpoint is done." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_before_log_removal"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 16; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_after_truncate_done"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +set global innodb_file_format = Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_per_table = 1; diff --git a/mysql-test/suite/innodb_zip/r/wl6501_crash_4.result b/mysql-test/suite/innodb_zip/r/wl6501_crash_4.result new file mode 100644 index 00000000000..cb8a4d5a157 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6501_crash_4.result @@ -0,0 +1,553 @@ +call mtr.add_suppression("The file '.*' already exists though the corresponding table did not exist in the InnoDB data dictionary"); +call mtr.add_suppression("Cannot create file '.*'"); +call mtr.add_suppression("InnoDB: Error number 17 means 'File exists'"); +set global innodb_file_per_table = on; +"1. Hit crash point while writing redo log." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine=innodb row_format=compressed +key_block_size=4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_while_writing_redo_log"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"2. Hit crash point on completion of redo log write." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_after_redo_log_write_complete"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"3. Hit crash point while dropping indexes." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_clust_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_uniq_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_sec_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"4. Hit crash point on completing drop of all indexes before creation" +" of index is commenced." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_drop_reinit_done_create_to_start"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"5. Hit crash point while creating indexes." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_clust_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_uniq_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_sec_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"6. Hit crash point after data is updated to system-table and" +" in-memory dict." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_updating_dict_sys_info"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"7. Hit crash point before/after log checkpoint is done." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_before_log_removal"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_after_truncate_done"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +set global innodb_file_format = Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_per_table = 1; +call mtr.add_suppression("does not exist in the InnoDB internal"); +set global innodb_file_per_table = on; +"1. Hit crash point on completing drop of all indexes before creation" +" of index is commenced." +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set innodb_strict_mode=off; +create temporary table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_drop_reinit_done_create_to_start"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check Error Table 'test.t' doesn't exist +test.t check status Operation failed +"2. Hit crash point after data is updated to system-table and" +" in-memory dict." +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set innodb_strict_mode=off; +create temporary table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 4; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_updating_dict_sys_info"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check Error Table 'test.t' doesn't exist +test.t check status Operation failed +set global innodb_file_format = Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_per_table = 1; diff --git a/mysql-test/suite/innodb_zip/r/wl6501_crash_5.result b/mysql-test/suite/innodb_zip/r/wl6501_crash_5.result new file mode 100644 index 00000000000..74f1e9dd1ad --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6501_crash_5.result @@ -0,0 +1,489 @@ +call mtr.add_suppression("The file '.*' already exists though the corresponding table did not exist in the InnoDB data dictionary"); +call mtr.add_suppression("Cannot create file '.*'"); +call mtr.add_suppression("InnoDB: Error number 17 means 'File exists'"); +set global innodb_file_per_table = on; +"1. Hit crash point while writing redo log." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine=innodb row_format=compressed +key_block_size=8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_while_writing_redo_log"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"2. Hit crash point on completion of redo log write." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_after_redo_log_write_complete"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"3. Hit crash point while dropping indexes." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_clust_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_uniq_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_drop_of_sec_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"4. Hit crash point on completing drop of all indexes before creation" +" of index is commenced." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_drop_reinit_done_create_to_start"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"5. Hit crash point while creating indexes." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_clust_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_uniq_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_create_of_sec_index"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"6. Hit crash point after data is updated to system-table and" +" in-memory dict." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_on_updating_dict_sys_info"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +"7. Hit crash point before/after log checkpoint is done." +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_before_log_removal"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +use test; +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +SET innodb_strict_mode=OFF; +create table t ( +i int, f float, c char, +primary key pk(i), unique findex(f), index ck(c)) +engine = innodb row_format = compressed +key_block_size = 8; +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +check table t; +Table Op Msg_type Msg_text +test.t check status OK +set session debug = "+d,ib_trunc_crash_after_truncate_done"; +truncate table t; +ERROR HY000: Lost connection to MySQL server during query +# restart +check table t; +Table Op Msg_type Msg_text +test.t check status OK +select * from t; +i f c +insert into t values (1, 1.1, 'a'), (2, 2.2, 'b'), (3, 3.3, 'c'); +select * from t; +i f c +1 1.1 a +2 2.2 b +3 3.3 c +select * from t where f < 2.5; +i f c +1 1.1 a +2 2.2 b +drop table t; +set global innodb_file_format = Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_per_table = 1; diff --git a/mysql-test/suite/innodb_zip/r/wl6501_scale_1.result b/mysql-test/suite/innodb_zip/r/wl6501_scale_1.result new file mode 100644 index 00000000000..9c197737137 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6501_scale_1.result @@ -0,0 +1,354 @@ +set innodb_strict_mode=OFF; +create procedure populate() +begin +declare i int default 1; +while (i <= 5000) do +insert into t1 values (i, 'a', 'b'); +insert into t2 values (i, 'a', 'b'); +insert into t3 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +create procedure populate_small() +begin +declare i int default 10001; +while (i <= 12000) do +insert into t1 values (i, 'c', 'd'); +insert into t2 values (i, 'a', 'b'); +insert into t3 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Antelope'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create table tNUMBER +(i int, cNUMBER char(NUMBER), cNUMBER char(NUMBER), +index cNUMBER_idx(cNUMBER)) +engine=innodb row_format=compact +key_block_size=NUMBER; +Warnings: +Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER. +create table t2 +(i int, c1 char(100), c2 char(100), +index c1_idx(c1)) +engine=innodb row_format=compact +key_block_size=16; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +create temporary table t3 +(i int, c1 char(100), c2 char(100), +index c1_idx(c1)) +engine=innodb row_format=compact +key_block_size=16; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +begin; +call populate(); +commit; +select count(*) from t1; +count(*) +5000 +select count(*) from t2; +count(*) +5000 +select count(*) from t3; +count(*) +5000 +truncate table t1; +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +5000 +select count(*) from t3; +count(*) +5000 +call populate_small(); +select count(*) from t1; +count(*) +2000 +select count(*) from t2; +count(*) +7000 +select count(*) from t3; +count(*) +7000 +truncate table t2; +truncate table t3; +select count(*) from t1; +count(*) +2000 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +call populate_small(); +select count(*) from t1; +count(*) +4000 +select count(*) from t2; +count(*) +2000 +select count(*) from t3; +count(*) +2000 +drop table t1; +drop table t2; +drop table t3; +drop procedure populate; +drop procedure populate_small; +set global innodb_file_format = Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_per_table = 1; +set innodb_strict_mode=OFF; +create procedure populate() +begin +declare i int default 1; +while (i <= 5000) do +insert into t1 values (i, 'a', 'b'); +insert into t2 values (i, 'a', 'b'); +insert into t3 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +create procedure populate_small() +begin +declare i int default 10001; +while (i <= 12000) do +insert into t1 values (i, 'c', 'd'); +insert into t2 values (i, 'a', 'b'); +insert into t3 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +set global innodb_file_per_table = 1; +set global innodb_file_format = 'Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create table tNUMBER +(i int, cNUMBER char(NUMBER), cNUMBER char(NUMBER), +index cNUMBER_idx(cNUMBER)) +engine=innodb row_format=compressed +key_block_size=NUMBER; +create table t2 +(i int, c1 char(100), c2 char(100), +index c1_idx(c1)) +engine=innodb row_format=compressed +key_block_size=16; +create temporary table t3 +(i int, c1 char(100), c2 char(100), +index c1_idx(c1)) +engine=innodb row_format=compressed +key_block_size=16; +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +begin; +call populate(); +commit; +select count(*) from t1; +count(*) +5000 +select count(*) from t2; +count(*) +5000 +select count(*) from t3; +count(*) +5000 +truncate table t1; +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +5000 +select count(*) from t3; +count(*) +5000 +call populate_small(); +select count(*) from t1; +count(*) +2000 +select count(*) from t2; +count(*) +7000 +select count(*) from t3; +count(*) +7000 +truncate table t2; +truncate table t3; +select count(*) from t1; +count(*) +2000 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +call populate_small(); +select count(*) from t1; +count(*) +4000 +select count(*) from t2; +count(*) +2000 +select count(*) from t3; +count(*) +2000 +drop table t1; +drop table t2; +drop table t3; +drop procedure populate; +drop procedure populate_small; +set global innodb_file_format = Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_per_table = 1; +set innodb_strict_mode=OFF; +create procedure populate() +begin +declare i int default 1; +while (i <= 5000) do +insert into t1 values (i, 'a', 'b'); +insert into t2 values (i, 'a', 'b'); +insert into t3 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +create procedure populate_small() +begin +declare i int default 10001; +while (i <= 12000) do +insert into t1 values (i, 'c', 'd'); +insert into t2 values (i, 'a', 'b'); +insert into t3 values (i, 'a', 'b'); +set i = i + 1; +end while; +end| +set global innodb_file_per_table = 0; +set global innodb_file_format = 'Antelope'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +create table tNUMBER +(i int, cNUMBER char(NUMBER), cNUMBER char(NUMBER), +index cNUMBER_idx(cNUMBER)) +engine=innodb row_format=compact +key_block_size=NUMBER; +Warnings: +Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER. +create table t2 +(i int, c1 char(100), c2 char(100), +index c1_idx(c1)) +engine=innodb row_format=compact +key_block_size=16; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +create temporary table t3 +(i int, c1 char(100), c2 char(100), +index c1_idx(c1)) +engine=innodb row_format=compact +key_block_size=16; +Warnings: +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16. +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +begin; +call populate(); +commit; +select count(*) from t1; +count(*) +5000 +select count(*) from t2; +count(*) +5000 +select count(*) from t3; +count(*) +5000 +truncate table t1; +select count(*) from t1; +count(*) +0 +select count(*) from t2; +count(*) +5000 +select count(*) from t3; +count(*) +5000 +call populate_small(); +select count(*) from t1; +count(*) +2000 +select count(*) from t2; +count(*) +7000 +select count(*) from t3; +count(*) +7000 +truncate table t2; +truncate table t3; +select count(*) from t1; +count(*) +2000 +select count(*) from t2; +count(*) +0 +select count(*) from t3; +count(*) +0 +call populate_small(); +select count(*) from t1; +count(*) +4000 +select count(*) from t2; +count(*) +2000 +select count(*) from t3; +count(*) +2000 +drop table t1; +drop table t2; +drop table t3; +drop procedure populate; +drop procedure populate_small; +set global innodb_file_format = Barracuda; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html +set global innodb_file_per_table = 1; diff --git a/mysql-test/suite/innodb_zip/r/wl6560.result b/mysql-test/suite/innodb_zip/r/wl6560.result new file mode 100644 index 00000000000..bf46d8a41a0 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6560.result @@ -0,0 +1,418 @@ +set global innodb_file_per_table = off; +# files in MYSQL_DATA_DIR +ibtmp1 +select @@global.innodb_file_per_table; +@@global.innodb_file_per_table +0 +create temporary table t1 (i int, f float, c char(100)) engine=innodb; +insert into t1 values (100, 1.1, 'pune'); +insert into t1 values (99, 1.2, 'mumbai'); +insert into t1 values (98, 1.3, 'jaipur'); +insert into t1 values (97, 1.4, 'delhi'); +insert into t1 values (96, 1.5, 'ahmedabad'); +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +select * from t1 where i = 98; +i f c +98 1.3 jaipur +select * from t1 where i < 100; +i f c +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +explain select * from t1 where f > 1.29999; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 33.33 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999) +alter table t1 add index sec_index(f); +explain select * from t1 where f > 1.29999; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL ALL sec_index NULL NULL NULL 5 60.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999) +select * from t1 where f > 1.29999; +i f c +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +explain select * from t1 where i = 100; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 20.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`i` = 100) +alter table t1 add unique index pri_index(i); +explain select * from t1 where i = 100; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL const pri_index pri_index 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select '100' AS `i`,'1.1' AS `f`,'pune' AS `c` from `test`.`t1` where 1 +select * from t1 where i = 100; +i f c +100 1.1 pune +delete from t1 where i < 97; +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +insert into t1 values (96, 1.5, 'kolkata'); +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 kolkata +update t1 set f = 1.44 where c = 'delhi'; +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.44 delhi +96 1.5 kolkata +truncate table t1; +insert into t1 values (100, 1.1, 'pune'); +insert into t1 values (99, 1.2, 'mumbai'); +insert into t1 values (98, 1.3, 'jaipur'); +insert into t1 values (97, 1.4, 'delhi'); +insert into t1 values (96, 1.5, 'ahmedabad'); +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +alter table t1 discard tablespace; +ERROR HY000: Cannot DISCARD/IMPORT tablespace associated with temporary table +alter table t1 import tablespace; +ERROR HY000: Cannot DISCARD/IMPORT tablespace associated with temporary table +drop table t1; +#files in MYSQL_TMP_DIR +set global innodb_file_per_table = 1; +select @@global.innodb_file_per_table; +@@global.innodb_file_per_table +1 +create temporary table t1 +(i int, f float, c char(100)) engine = innodb key_block_size = 4; +show create table t1; +Table Create Table +t1 CREATE TEMPORARY TABLE `t1` ( + `i` int(11) DEFAULT NULL, + `f` float DEFAULT NULL, + `c` char(100) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=4 +#files in MYSQL_TMP_DIR +#sql.ibd +insert into t1 values (100, 1.1, 'pune'); +insert into t1 values (99, 1.2, 'mumbai'); +insert into t1 values (98, 1.3, 'jaipur'); +insert into t1 values (97, 1.4, 'delhi'); +insert into t1 values (96, 1.5, 'ahmedabad'); +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +select * from t1 where i = 98; +i f c +98 1.3 jaipur +select * from t1 where i < 100; +i f c +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +explain select * from t1 where f > 1.29999; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 33.33 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999) +alter table t1 add index sec_index(f); +explain select * from t1 where f > 1.29999; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL ALL sec_index NULL NULL NULL 5 60.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999) +select * from t1 where f > 1.29999; +i f c +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +explain select * from t1 where i = 100; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 20.00 Using where +Warnings: +Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`i` = 100) +alter table t1 add unique index pri_index(i); +explain select * from t1 where i = 100; +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 NULL const pri_index pri_index 5 const 1 100.00 NULL +Warnings: +Note 1003 /* select#1 */ select '100' AS `i`,'1.1' AS `f`,'pune' AS `c` from `test`.`t1` where 1 +select * from t1 where i = 100; +i f c +100 1.1 pune +delete from t1 where i < 97; +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +insert into t1 values (96, 1.5, 'kolkata'); +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 kolkata +update t1 set f = 1.44 where c = 'delhi'; +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.44 delhi +96 1.5 kolkata +truncate table t1; +insert into t1 values (100, 1.1, 'pune'); +insert into t1 values (99, 1.2, 'mumbai'); +insert into t1 values (98, 1.3, 'jaipur'); +insert into t1 values (97, 1.4, 'delhi'); +insert into t1 values (96, 1.5, 'ahmedabad'); +select * from t1; +i f c +100 1.1 pune +99 1.2 mumbai +98 1.3 jaipur +97 1.4 delhi +96 1.5 ahmedabad +alter table t1 discard tablespace; +ERROR HY000: Cannot DISCARD/IMPORT tablespace associated with temporary table +drop table t1; +set global innodb_file_per_table = off; +create temporary table t1 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc)) engine = innodb; +CREATE PROCEDURE populate_t1() +BEGIN +DECLARE i INT DEFAULT 1; +while (i <= 20000) DO +insert into t1 values (i, 'a', 'b'); +SET i = i + 1; +END WHILE; +END| +set autocommit=0; +select count(*) from t1; +count(*) +0 +call populate_t1(); +select count(*) from t1; +count(*) +20000 +select * from t1 limit 10; +keyc c1 c2 +1 a b +2 a b +3 a b +4 a b +5 a b +6 a b +7 a b +8 a b +9 a b +10 a b +set autocommit=1; +truncate table t1; +select count(*) from t1; +count(*) +0 +drop procedure populate_t1; +drop table t1; +create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; +insert into t1 values (1, 'c', 'b'); +select * from t1; +keyc c1 c2 +1 c b +# restart +# files in MYSQL_DATA_DIR +ibtmp1 +use test; +select * from t1; +ERROR 42S02: Table 'test.t1' doesn't exist +"testing temp-table creation in --innodb_read_only mode" +# restart: --innodb-read-only +use test; +show tables; +Tables_in_test +create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; +ERROR HY000: InnoDB is in read only mode. +"testing system and temp tablespace name conflict" +"restarting server in normal mode" +# restart +show tables; +Tables_in_test +create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; +drop table t1; +# test condition of full-temp-tablespace +# restart: --innodb_temp_data_file_path=ibtmp1:12M +create temporary table t1 +(keyc int, c1 char(100), c2 char(100), +primary key(keyc)) engine = innodb; +CREATE PROCEDURE populate_t1() +BEGIN +DECLARE i INT DEFAULT 1; +while (i <= 20000) DO +insert into t1 values (i, 'a', 'b'); +SET i = i + 1; +END WHILE; +END| +set autocommit=0; +select count(*) from t1; +count(*) +0 +call populate_t1(); +ERROR HY000: The table 't1' is full +drop procedure populate_t1; +drop table t1; +set innodb_strict_mode = off; +set global innodb_file_per_table = 0; +set global innodb_file_format = 'Antelope'; +create temporary table t ( +i int) +engine = innodb row_format = compressed; +show warnings; +Level Code Message +Warning NUMBER InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table. +Warning NUMBER InnoDB: assuming ROW_FORMAT=DYNAMIC. +drop table t; +create temporary table t ( +i int) +engine = innodb row_format = compressed key_block_size = 8; +show warnings; +Level Code Message +Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table. +Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER. +Warning NUMBER InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table. +Warning NUMBER InnoDB: assuming ROW_FORMAT=DYNAMIC. +drop table t; +set global innodb_file_per_table = 1; +create temporary table t ( +i int) +engine = innodb row_format = compressed key_block_size = 8; +show warnings; +Level Code Message +Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope. +Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER. +Warning NUMBER InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope. +Warning NUMBER InnoDB: assuming ROW_FORMAT=DYNAMIC. +drop table t; +create temporary table t ( +i int) +engine = innodb row_format = dynamic; +show warnings; +Level Code Message +#files in MYSQL_TMP_DIR +drop table t; +set innodb_strict_mode = on; +create temporary table t ( +i int) +engine = innodb row_format = dynamic; +drop table t; +set global innodb_file_format = 'Barracuda'; +set innodb_strict_mode = off; +create temporary table t ( +i int) +engine = innodb row_format = compressed key_block_size = 8; +set innodb_strict_mode = default; +#files in MYSQL_TMP_DIR +#sql.ibd +drop table t; +create temporary table t ( +i int) +engine = innodb row_format = compressed; +show warnings; +Level Code Message +#files in MYSQL_TMP_DIR +#sql.ibd +drop table t; +create temporary table t ( +i int) +engine = innodb row_format = dynamic; +show warnings; +Level Code Message +#files in MYSQL_TMP_DIR +drop table t; +set innodb_strict_mode = on; +create temporary table t ( +i int) +engine = innodb row_format = dynamic; +show warnings; +Level Code Message +drop table t; +set innodb_strict_mode = off; +#files in MYSQL_TMP_DIR +create temporary table t ( +i int) +engine = innodb row_format = dynamic key_block_size = 4; +show warnings; +Level Code Message +Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER unless ROW_FORMAT=COMPRESSED. +#files in MYSQL_TMP_DIR +#sql.ibd +drop table t; +create temporary table t ( +i int) +engine = innodb row_format = compact; +show warnings; +Level Code Message +#files in MYSQL_TMP_DIR +drop table t; +create temporary table t ( +i int) +engine = innodb key_block_size = 4; +show warnings; +Level Code Message +#files in MYSQL_TMP_DIR +#sql.ibd +drop table t; +"testing temp tablespace non-support for raw device" +"testing temp tablespace non-support for raw device" +# restart +show tables; +Tables_in_test +create temporary table t1 ( +keyc int, c1 char(100), c2 char(100) +) engine = innodb; +drop table t1; +"try starting server with temp-tablespace size < min. threshold" +"try starting server with sys-tablespace size < min. threshold" +# restart +show tables; +Tables_in_test +create temporary table t1 ( +keyc int, c1 char(100), c2 char(100) +) engine = innodb; +drop table t1; +"try starting server with no file specified for temp-tablespace" +# restart +show tables; +Tables_in_test +create temporary table t1 ( +keyc int, c1 char(100), c2 char(100) +) engine = innodb; +drop table t1; diff --git a/mysql-test/suite/innodb_zip/r/wl6915_1.result b/mysql-test/suite/innodb_zip/r/wl6915_1.result new file mode 100644 index 00000000000..0ffc2f43265 --- /dev/null +++ b/mysql-test/suite/innodb_zip/r/wl6915_1.result @@ -0,0 +1,2060 @@ +call mtr.ADD_suppression(".*Resizing redo log.*"); +call mtr.ADD_suppression(".*Starting to delete and rewrite log files.*"); +call mtr.ADD_suppression(".*New log files created.*"); +SELECT @@global.innodb_undo_tablespaces; +@@global.innodb_undo_tablespaces +0 +CREATE PROCEDURE populate_tables(IN id VARCHAR(10)) +begin +declare n int default 20; +set global innodb_file_per_table=on; +DROP TABLE IF EXISTS t1,t2,t3,t4; +CREATE TEMPORARY TABLE t1_temp(c1 int NOT NULL, +c2 int NOT NULL, +c3 char(255) NOT NULL, +c4 text(600) NOT NULL, +c5 blob(600) NOT NULL, +c6 varchar(600) NOT NULL, +c7 varchar(600) NOT NULL, +c8 datetime, +c9 decimal(6,3), +PRIMARY KEY (c1), +INDEX (c3,c4(50),c5(50)), +INDEX (c2)) +ENGINE=InnoDB ROW_FORMAT=redundant; +set @s = concat("CREATE TABLE t1",id," ( c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=redundant;"); +PREPARE createTable FROM @s; +EXECUTE createTable; +DEALLOCATE PREPARE createTable; +CREATE TEMPORARY TABLE t2_temp(c1 int NOT NULL, +c2 int NOT NULL, +c3 char(255) NOT NULL, +c4 text(600) NOT NULL, +c5 blob(600) NOT NULL, +c6 varchar(600) NOT NULL, +c7 varchar(600) NOT NULL, +c8 datetime, +c9 decimal(6,3), +PRIMARY KEY (c1), +INDEX (c3,c4(50),c5(50)), +INDEX (c2)) +ENGINE=InnoDB ROW_FORMAT=compact; +set @s = concat("CREATE TABLE t2",id," (c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=compact;"); +PREPARE createTable FROM @s; +EXECUTE createTable; +DEALLOCATE PREPARE createTable; +CREATE TEMPORARY TABLE t3_temp(c1 int NOT NULL, +c2 int NOT NULL, +c3 char(255) NOT NULL, +c4 text(600) NOT NULL, +c5 blob(600) NOT NULL, +c6 varchar(600) NOT NULL, +c7 varchar(600) NOT NULL, +c8 datetime, +c9 decimal(6,3), +PRIMARY KEY (c1), +INDEX (c3,c4(50),c5(50)), +INDEX (c2)) +ENGINE=InnoDB ROW_FORMAT=compressed key_block_size=4; +set @s = concat("CREATE TABLE t3",id," (c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=compressed key_block_size=4;"); +PREPARE createTable FROM @s; +EXECUTE createTable; +DEALLOCATE PREPARE createTable; +CREATE TEMPORARY TABLE t4_temp(c1 int NOT NULL, +c2 int NOT NULL, +c3 char(255) NOT NULL, +c4 text(600) NOT NULL, +c5 blob(600) NOT NULL, +c6 varchar(600) NOT NULL, +c7 varchar(600) NOT NULL, +c8 datetime, +c9 decimal(6,3), +PRIMARY KEY (c1), +INDEX (c3,c4(50),c5(50)), +INDEX (c2)) +ENGINE=InnoDB ROW_FORMAT=dynamic; +set @s = concat("CREATE TABLE t4",id," (c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=dynamic;"); +PREPARE createTable FROM @s; +EXECUTE createTable; +DEALLOCATE PREPARE createTable; +while (n > 0) do +START TRANSACTION; +set @s = concat("INSERT INTO t1",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t1_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), +REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), +REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), +NOW(),(100.55+n)); +set @s = concat("INSERT INTO t2",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t2_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), +REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), +REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), +NOW(),(100.55+n)); +savepoint a; +set @s = concat("INSERT INTO t3",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t3_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), +REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), +REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), +NOW(),(100.55+n)); +savepoint b; +set @s = concat("INSERT INTO t4",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t4_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), +REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), +REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), +NOW(),(100.55+n)); +if (n > 10) then +if (n > 10 and n <=12) then +ROLLBACK TO SAVEPOINT a; +COMMIT; +end if; +if (n > 12 and n < 15) then +ROLLBACK TO SAVEPOINT b; +COMMIT; +end if; +if (n > 15) then +COMMIT; +end if; +else +if (n > 5) then +START TRANSACTION; +DELETE FROM t1_temp WHERE c1 > 10 ; +DELETE FROM t2_temp WHERE c1 > 10 ; +DELETE FROM t3_temp WHERE c1 > 10 ; +DELETE FROM t4_temp WHERE c1 > 10 ; +rollback; +START TRANSACTION; +update t1_temp set c1 = c1 + 1000 WHERE c1 > 10; +update t2_temp set c1 = c1 + 1000 WHERE c1 > 10; +update t3_temp set c1 = c1 + 1000 WHERE c1 > 10; +update t4_temp set c1 = c1 + 1000 WHERE c1 > 10; +rollback; +end if; +end if; +if (n < 5) then +rollback; +end if; +FLUSH logs; +ALTER TABLE t1_temp DROP PRIMARY KEY; +ALTER TABLE t1_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); +ALTER TABLE t2_temp DROP PRIMARY KEY; +ALTER TABLE t2_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); +ALTER TABLE t3_temp DROP PRIMARY KEY; +ALTER TABLE t3_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); +ALTER TABLE t4_temp DROP PRIMARY KEY; +ALTER TABLE t4_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); +FLUSH tables; +START TRANSACTION; +set @s = concat("INSERT INTO t1",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t1_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t2",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t2_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t3",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t3_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t4",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t4_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +DELETE FROM t1_temp WHERE c1 between 100 and 110; +DELETE FROM t2_temp WHERE c1 between 100 and 110; +DELETE FROM t3_temp WHERE c1 between 100 and 110; +DELETE FROM t4_temp WHERE c1 between 100 and 110; +update t1_temp set c1 = c1+1 WHERE c1>110; +update t2_temp set c1 = c1+1 WHERE c1>110; +update t3_temp set c1 = c1+1 WHERE c1>110; +update t4_temp set c1 = c1+1 WHERE c1>110; +savepoint a; +set @s = concat("INSERT INTO t1",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t1_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t2",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t2_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t3",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t3_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t4",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t4_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +savepoint b; +set @s = concat("INSERT INTO t1",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t1_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t2",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t2_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t3",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t3_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +set @s = concat("INSERT INTO t4",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); +PREPARE insertIntoTable FROM @s; +EXECUTE insertIntoTable; +DEALLOCATE PREPARE insertIntoTable; +INSERT INTO t4_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), +REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), +REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), +NOW(),(100.55+n+100)); +savepoint c; +rollback to b; +rollback to a; +COMMIT; +COMMIT; +rollback; +set n = n - 1; +end while; +end| +#---client 1 : dml operation ---" +#---client 2 : dml operation ---" +# In connection 1 +SELECT count(*) FROM t1_1; +count(*) +36 +SELECT count(*) FROM t2_1; +count(*) +36 +SELECT count(*) FROM t3_1; +count(*) +34 +SELECT count(*) FROM t4_1; +count(*) +32 +SELECT c1 FROM t1_1; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t2_1; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t3_1; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t4_1; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT count(*) FROM t1_temp; +count(*) +26 +SELECT count(*) FROM t2_temp; +count(*) +26 +SELECT count(*) FROM t3_temp; +count(*) +24 +SELECT count(*) FROM t4_temp; +count(*) +22 +SELECT c1 FROM t1_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t2_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t3_temp; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t4_temp; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +# In connection 2 +SELECT count(*) FROM t1_2; +count(*) +36 +SELECT count(*) FROM t2_2; +count(*) +36 +SELECT count(*) FROM t3_2; +count(*) +34 +SELECT count(*) FROM t4_2; +count(*) +32 +SELECT c1 FROM t1_2; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t2_2; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t3_2; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t4_2; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT count(*) FROM t1_temp; +count(*) +26 +SELECT count(*) FROM t2_temp; +count(*) +26 +SELECT count(*) FROM t3_temp; +count(*) +24 +SELECT count(*) FROM t4_temp; +count(*) +22 +SELECT c1 FROM t1_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t2_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t3_temp; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t4_temp; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +# In connection 1 +set AUTOCOMMIT = 0; +ALTER TABLE t1_temp DROP PRIMARY KEY; +ALTER TABLE t1_temp ADD PRIMARY KEY (c1); +ALTER TABLE t2_temp DROP PRIMARY KEY; +ALTER TABLE t2_temp ADD PRIMARY KEY (c1); +ALTER TABLE t3_temp DROP PRIMARY KEY; +ALTER TABLE t3_temp ADD PRIMARY KEY (c1); +ALTER TABLE t4_temp DROP PRIMARY KEY; +ALTER TABLE t4_temp ADD PRIMARY KEY (c1); +INSERT INTO t1_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t1_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +INSERT INTO t2_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t2_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +INSERT INTO t3_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t3_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +INSERT INTO t4_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +insert ignore into t4_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +INSERT INTO t1_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +INSERT INTO t2_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +INSERT INTO t3_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +INSERT INTO t4_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); +ERROR 23000: Duplicate entry '20' for key 'PRIMARY' +set AUTOCOMMIT = 1; +SELECT c1,c2 FROM t1_temp WHERE c1 in (20,1); +c1 c2 +20 20 +SELECT c1,c2 FROM t2_temp WHERE c1 in (20,1); +c1 c2 +20 20 +SELECT c1,c2 FROM t3_temp WHERE c1 in (20,1); +c1 c2 +20 20 +SELECT c1,c2 FROM t4_temp WHERE c1 in (20,1); +c1 c2 +20 20 +REPLACE INTO t1_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +REPLACE INTO t2_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +REPLACE INTO t3_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +REPLACE INTO t4_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t1_temp WHERE c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t2_temp WHERE c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t3_temp WHERE c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t4_temp WHERE c1 = 20; +c1 c2 c3 c4 c5 c6 c7 c9 +20 1 a a a a a 100.550 +update ignore t1_temp set c1 = 20 WHERE c1 = 140 ; +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +update ignore t2_temp set c1 = 20 WHERE c1 = 140 ; +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +update ignore t3_temp set c1 = 20 WHERE c1 = 140 ; +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +update ignore t4_temp set c1 = 20 WHERE c1 = 140 ; +Warnings: +Warning 1062 Duplicate entry '20' for key 'PRIMARY' +SELECT count(*) FROM t1_temp WHERE c1 = 140; +count(*) +1 +SELECT count(*) FROM t2_temp WHERE c1 = 140; +count(*) +1 +SELECT count(*) FROM t3_temp WHERE c1 = 140; +count(*) +1 +SELECT count(*) FROM t4_temp WHERE c1 = 140; +count(*) +1 +ALTER TABLE t1_temp ADD COLUMN c10 int default 99 , +ADD COLUMN c11 varchar(100) default 'test'; +ALTER TABLE t1_temp DROP PRIMARY KEY; +ALTER TABLE t1_temp ADD PRIMARY KEY (c1); +INSERT INTO t1_temp (c1,c2,c3,c4,c5,c6,c7,c8,c9) VALUES (-1,-1,'a','a','a','a','a',NOW(),100.55); +SELECT c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 FROM t1_temp WHERE c1 < 0; +c1 c2 c3 c4 c5 c6 c7 c9 c10 c11 +-1 -1 a a a a a 100.550 99 test +SELECT count(*) FROM t1_temp WHERE c10 = 99 and c11 like 'test'; +count(*) +27 +INSERT INTO t1_temp (c1,c2,c3,c4,c5,c6,c7,c8,c9) VALUES (-1,-1,'a','a','a','a','a',NOW(),100.55) +ON DUPLICATE KEY UPDATE c1=-2,c2=-2; +SELECT c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 FROM t1_temp WHERE c1 < 0; +c1 c2 c3 c4 c5 c6 c7 c9 c10 c11 +-2 -2 a a a a a 100.550 99 test +DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1; +DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2; +# restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=20 --innodb_undo_logs=20 --innodb_log_files_in_group=4 +call populate_tables('_1');; +call populate_tables('_2');; +"#connection 1 - verify tables" +SELECT count(*) FROM t1_1; +count(*) +36 +SELECT count(*) FROM t2_1; +count(*) +36 +SELECT count(*) FROM t3_1; +count(*) +34 +SELECT count(*) FROM t4_1; +count(*) +32 +SELECT c1 FROM t1_1; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t2_1; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t3_1; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t4_1; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT count(*) FROM t1_temp; +count(*) +26 +SELECT count(*) FROM t2_temp; +count(*) +26 +SELECT count(*) FROM t3_temp; +count(*) +24 +SELECT count(*) FROM t4_temp; +count(*) +22 +SELECT c1 FROM t1_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t2_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t3_temp; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t4_temp; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1; +"#connection 2 - verify tables" +SELECT count(*) FROM t1_2; +count(*) +36 +SELECT count(*) FROM t2_2; +count(*) +36 +SELECT count(*) FROM t3_2; +count(*) +34 +SELECT count(*) FROM t4_2; +count(*) +32 +SELECT c1 FROM t1_2; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t2_2; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t3_2; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t4_2; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT count(*) FROM t1_temp; +count(*) +26 +SELECT count(*) FROM t2_temp; +count(*) +26 +SELECT count(*) FROM t3_temp; +count(*) +24 +SELECT count(*) FROM t4_temp; +count(*) +22 +SELECT c1 FROM t1_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t2_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t3_temp; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t4_temp; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2; +# restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=30 --innodb_undo_logs=20 --innodb_log_files_in_group=4 +call populate_tables('_1');; +call populate_tables('_2');; +"#connection 1 - verify tables" +SELECT count(*) FROM t1_1; +count(*) +36 +SELECT count(*) FROM t2_1; +count(*) +36 +SELECT count(*) FROM t3_1; +count(*) +34 +SELECT count(*) FROM t4_1; +count(*) +32 +SELECT c1 FROM t1_1; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t2_1; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t3_1; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t4_1; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT count(*) FROM t1_temp; +count(*) +26 +SELECT count(*) FROM t2_temp; +count(*) +26 +SELECT count(*) FROM t3_temp; +count(*) +24 +SELECT count(*) FROM t4_temp; +count(*) +22 +SELECT c1 FROM t1_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t2_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t3_temp; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t4_temp; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1; +"#connection 2 - verify tables" +SELECT count(*) FROM t1_2; +count(*) +36 +SELECT count(*) FROM t2_2; +count(*) +36 +SELECT count(*) FROM t3_2; +count(*) +34 +SELECT count(*) FROM t4_2; +count(*) +32 +SELECT c1 FROM t1_2; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t2_2; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t3_2; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT c1 FROM t4_2; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +SELECT count(*) FROM t1_temp; +count(*) +26 +SELECT count(*) FROM t2_temp; +count(*) +26 +SELECT count(*) FROM t3_temp; +count(*) +24 +SELECT count(*) FROM t4_temp; +count(*) +22 +SELECT c1 FROM t1_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t2_temp; +c1 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t3_temp; +c1 +5 +6 +7 +8 +9 +10 +13 +14 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +SELECT c1 FROM t4_temp; +c1 +5 +6 +7 +8 +9 +10 +15 +16 +17 +18 +19 +20 +122 +124 +126 +128 +130 +132 +134 +136 +138 +140 +DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2; +DROP PROCEDURE populate_tables; diff --git a/mysql-test/suite/innodb_zip/t/16k.test b/mysql-test/suite/innodb_zip/t/16k.test new file mode 100644 index 00000000000..274b0b8e1bb --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/16k.test @@ -0,0 +1,715 @@ +# Tests for setting innodb-page-size=16k; default value +--source include/big_test.inc +--source include/have_innodb.inc +--source include/have_innodb_16k.inc +SET default_storage_engine=InnoDB; + +--disable_query_log +let $MYSQLD_DATADIR = `select @@datadir`; +let $INNODB_PAGE_SIZE = `select @@innodb_page_size`; + +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +# These values can change during the test +--enable_query_log + +--echo # Test 1) Show the page size from Information Schema +--disable_warnings +SELECT variable_value FROM information_schema.global_status + WHERE LOWER(variable_name) = 'innodb_page_size'; +--enable_warnings + +--echo # Test 2) The number of buffer pool pages is dependent upon the page size. +--disable_warnings +--replace_result 1535 {checked_valid} 1536 {checked_valid} +SELECT variable_value FROM information_schema.global_status + WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total'; +--enable_warnings + +--echo # Test 3) Query some information_shema tables that are dependent upon +--echo # the page size. +# Show the metadata for tables in schema 'mysql'. +# Pulled from innodb-system-table-view.test +# The IDs of mysql.innodb_table_stats and mysql.innodb_index_stats are +# unpredictable. They depend on whether mtr has created the database for +# this test from scratch or is using a previously created database where +# those tables have been dropped and recreated. Since we cannot force mtr +# to use a freshly created database for this test we do not return the +# table or index IDs. We can return the space IS of mysql schema tables +# since they are created consistently during bootstrap. +SELECT t.name table_name, t.n_cols, t.flag table_flags, + i.name index_name, i.page_no root_page, i.type, + i.n_fields, i.merge_threshold + FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i + WHERE t.table_id = i.table_id + AND t.name LIKE 'mysql%' + ORDER BY t.name, i.index_id; + +CREATE TABLE t1 (a INT KEY, b TEXT) ROW_FORMAT=REDUNDANT ENGINE=innodb; +CREATE TABLE t2 (a INT KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=innodb; +CREATE TABLE t3 (a INT KEY, b TEXT) ROW_FORMAT=COMPRESSED ENGINE=innodb; +CREATE TABLE t4 (a INT KEY, b TEXT) ROW_FORMAT=DYNAMIC ENGINE=innodb; + +# Show the metadata for tables in schema 'test'. +# Do not return the space ID since this tablespace may have existed before +# this test runs. The root page number of each index should be consistent +# within a file-per-table tablespace. +SELECT t.name table_name, t.n_cols, t.flag table_flags, + i.name index_name, i.page_no root_page, i.type, + i.n_fields, i.merge_threshold + FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i + WHERE t.table_id = i.table_id + AND t.name LIKE 'test%' + ORDER BY t.name, i.name; +--source suite/innodb/include/show_i_s_tablespaces.inc +DROP TABLE t1, t2, t3, t4; + +--echo # Test 4) The maximum row size is dependent upon the page size. +--echo # Redundant: 8123, Compact: 8126. +--echo # Compressed: 8126, Dynamic: 8126. +--echo # Each row format has its own amount of overhead that +--echo # varies depending on number of fields and other overhead. + +SET SESSION innodb_strict_mode = ON; + +# Redundant table; 8011 bytes with 40 char fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(211) +) ROW_FORMAT=redundant; +DROP TABLE t1; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(212) +) ROW_FORMAT=redundant; + +# Compact table; 8096 bytes with 40 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(246) +) ROW_FORMAT=compact; +DROP TABLE t1; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(247) +) ROW_FORMAT=compact; + +# Compressed table; 7959 bytes with 40 CHAR fields +# Bug#13391353 Limit is 7957 on 32-Linux only +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(157) +) ROW_FORMAT=compressed; +DROP TABLE t1; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(200), c40 char(160) +) ROW_FORMAT=compressed; + +# Dynamic table; 8096 bytes with 40 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(246) +) ROW_FORMAT=dynamic; +DROP TABLE t1; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(200), +c21 char(200), c22 char(200), c23 char(200), c24 char(200), c25 char(200), +c26 char(200), c27 char(200), c28 char(200), c29 char(200), c30 char(200), +c31 char(200), c32 char(200), c33 char(200), c34 char(200), c35 char(200), +c36 char(200), c37 char(200), c38 char(200), c39 char(250), c40 char(247) +) ROW_FORMAT=dynamic; + +# +# Test the maximum key length +# Moved from innodb-index.test since each page size has its own max key length. +# Max Key Length is 3072 for 16k pages. +# Max key Part length is 767 +# InnoDB assumes 3 bytes for each UTF8 character. +# +CREATE TABLE t1 (a varchar(255) character set utf8, + b varchar(255) character set utf8, + c varchar(255) character set utf8, + d varchar(255) character set utf8, + e varchar(4) character set utf8, + PRIMARY KEY (a,b,c,d,e)) + ENGINE=innodb; +DROP TABLE t1; +--error ER_TOO_LONG_KEY +CREATE TABLE t1 (a varchar(255) character set utf8, + b varchar(255) character set utf8, + c varchar(255) character set utf8, + d varchar(255) character set utf8, + e varchar(5) character set utf8, + PRIMARY KEY (a,b,c,d,e)) + ENGINE=innodb; +CREATE TABLE t1 (a varchar(255) character set utf8, + b varchar(255) character set utf8, + c varchar(255) character set utf8, + d varchar(255) character set utf8, + e varchar(255) character set utf8, + f varchar(4) character set utf8, + PRIMARY KEY (a), KEY (b,c,d,e,f)) + ENGINE=innodb; +DROP TABLE t1; +--error ER_TOO_LONG_KEY +CREATE TABLE t1 (a varchar(255) character set utf8, + b varchar(255) character set utf8, + c varchar(255) character set utf8, + d varchar(255) character set utf8, + e varchar(255) character set utf8, + f varchar(5) character set utf8, + PRIMARY KEY (a), KEY (b,c,d,e,f)) + ENGINE=innodb; + +--echo # Test 5) Make sure that KEY_BLOCK_SIZE=16, 8, 4, 2 & 1 +--echo # are all accepted. + +SET SESSION innodb_strict_mode = ON; + +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + +SET SESSION innodb_strict_mode = OFF; + +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + + +--echo # Test 6) Make sure that KEY_BLOCK_SIZE = 8 and 16 +--echo # are rejected when innodb_file_per_table=OFF +# Moved from innodb-zip.test +SET SESSION innodb_strict_mode = ON; +SET GLOBAL innodb_file_per_table = OFF; +SHOW VARIABLES LIKE 'innodb_file_per_table'; +--error ER_ILLEGAL_HA +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +--error ER_ILLEGAL_HA +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_file_format = `Antelope`; +--error ER_ILLEGAL_HA +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +--error ER_ILLEGAL_HA +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SET GLOBAL innodb_file_format = `Barracuda`; + + +--echo # Test 7) This series of tests were moved from innodb-index to here +--echo # because the second alter table t1 assumes a 16k page size. +--echo # Moving the test allows the rest of innodb-index to be run on all +--echo # page sizes. The previously disabled portions of this test were +--echo # moved as well. + +CREATE TABLE t2(d varchar(17) PRIMARY KEY) ENGINE=innodb DEFAULT CHARSET=utf8; +CREATE TABLE t3(a int PRIMARY KEY) ENGINE=innodb; + +INSERT INTO t3 VALUES (22),(44),(33),(55),(66); + +INSERT INTO t2 VALUES ('jejdkrun87'),('adfd72nh9k'), +('adfdpplkeock'),('adfdijnmnb78k'),('adfdijn0loKNHJik'); + +CREATE TABLE t1(a int, b blob, c text, d text NOT NULL) +ENGINE=innodb DEFAULT CHARSET=utf8 STATS_PERSISTENT=0; + +INSERT INTO t1 +SELECT a,LEFT(REPEAT(d,100*a),65535),REPEAT(d,20*a),d FROM t2,t3 order by a, d; +DROP TABLE t2, t3; +SELECT COUNT(*) FROM t1 WHERE a=44; +SELECT a, +LENGTH(b),b=LEFT(REPEAT(d,100*a),65535),LENGTH(c),c=REPEAT(d,20*a),d FROM t1 +ORDER BY 1, 2, 3, 4, 5, 6; +# in-place alter table should trigger ER_PRIMARY_CANT_HAVE_NULL +--error ER_DUP_ENTRY +ALTER TABLE t1 ADD PRIMARY KEY (a), ADD KEY (b(20)); +DELETE FROM t1 WHERE d='null'; +--error ER_DUP_ENTRY +ALTER TABLE t1 ADD PRIMARY KEY (a), ADD KEY (b(20)); +DELETE FROM t1 WHERE a%2; +CHECK TABLE t1; +# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on. +# And adding a PRIMARY KEY will also add NOT NULL implicitly! +ALTER TABLE t1 ADD PRIMARY KEY (a,b(255),c(255)), ADD KEY (b(767)); +SELECT COUNT(*) FROM t1 WHERE a=44; +SELECT a, +LENGTH(b), b=LEFT(REPEAT(d,100*a), 65535),LENGTH(c), c=REPEAT(d,20*a), d FROM t1; +SHOW CREATE TABLE t1; +CHECK TABLE t1; +EXPLAIN SELECT * FROM t1 WHERE b LIKE 'adfd%'; + +# The following tests are disabled because of the introduced timeouts for +# metadata locks at the MySQL level as part of the fix for +# Bug#45225 Locking: hang if drop table with no timeout +# The following commands now play with MySQL metadata locks instead of +# InnoDB locks +# start disabled45225_1 +## +## Test locking +## +# +#CREATE TABLE t2(a int, b varchar(255), PRIMARY KEY(a,b)) ENGINE=innodb; +#INSERT INTO t2 SELECT a,LEFT(b,255) FROM t1; +#DROP TABLE t1; +#RENAME TABLE t2 to t1; +# +#connect (a,localhost,root,,); +#connect (b,localhost,root,,); +#connection a; +#SET innodb_lock_wait_timeout=1; +#begin; +## Obtain an IX lock on the table +#SELECT a FROM t1 limit 1 FOR UPDATE; +#connection b; +#SET innodb_lock_wait_timeout=1; +## This would require an S lock on the table, conflicting with the IX lock. +#--error ER_LOCK_WAIT_TIMEOUT +#CREATE INDEX t1ba ON t1 (b,a); +#connection a; +#commit; +#begin; +## Obtain an IS lock on the table +#SELECT a FROM t1 limit 1 lock in share mode; +#connection b; +## This will require an S lock on the table. No conflict with the IS lock. +#CREATE INDEX t1ba ON t1 (b,a); +## This would require an X lock on the table, conflicting with the IS lock. +#--error ER_LOCK_WAIT_TIMEOUT +#DROP INDEX t1ba ON t1; +#connection a; +#commit; +#EXPLAIN SELECT a FROM t1 ORDER BY b; +#--send +#SELECT a,sleep(2+a/100) FROM t1 ORDER BY b limit 3; +# +## The following DROP INDEX will succeed, altough the SELECT above has +## opened a read view. However, during the execution of the SELECT, +## MySQL should hold a table lock that should block the execution +## of the DROP INDEX below. +# +#connection b; +#SELECT sleep(1); +#DROP INDEX t1ba ON t1; +# +## After the index was dropped, subsequent SELECTs will use the same +## read view, but they should not be accessing the dropped index any more. +# +#connection a; +#reap; +#EXPLAIN SELECT a FROM t1 ORDER BY b; +#SELECT a FROM t1 ORDER BY b limit 3; +#commit; +# +#connection default; +#disconnect a; +#disconnect b; +# +# end disabled45225_1 +DROP TABLE t1; + +--echo # Test 8) Test creating a table that could lead to undo log overflow. +CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob, + h blob,i blob,j blob,k blob,l blob,m blob,n blob, + o blob,p blob,q blob,r blob,s blob,t blob,u blob) + ENGINE=InnoDB ROW_FORMAT=dynamic; +SET @a = repeat('a', 767); +SET @b = repeat('b', 767); +SET @c = repeat('c', 767); +SET @d = repeat('d', 767); +SET @e = repeat('e', 767); + +# With no indexes defined, we can update all columns to max key part length. +INSERT INTO t1 VALUES (@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a); +UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b, + k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b; + +# With this many indexes defined, we can still update all fields. +CREATE INDEX t1a ON t1 (a(767)); +CREATE INDEX t1b ON t1 (b(767)); +CREATE INDEX t1c ON t1 (c(767)); +CREATE INDEX t1d ON t1 (d(767)); +CREATE INDEX t1e ON t1 (e(767)); +UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c, + k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c; + +# Add one more index and the UNDO record becomes too big to update all columns. +# But a single transaction can update the columns in separate statements. +# because the UNDO records will be smaller. +CREATE INDEX t1f ON t1 (f(767)); +--error ER_UNDO_RECORD_TOO_BIG +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d,f=@d,g=@d,h=@d,i=@d,j=@d, + k=@d,l=@d,m=@d,n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +BEGIN; +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d; +UPDATE t1 SET f=@d,g=@d,h=@d,i=@d,j=@d,k=@d,l=@d,m=@d, + n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +COMMIT; + +# More indexes can still be added and a single field can still be updated +CREATE INDEX t1g ON t1 (g(767)); +UPDATE t1 SET g=@e; +CREATE INDEX t1h ON t1 (h(767)); +UPDATE t1 SET h=@e; +CREATE INDEX t1i ON t1 (i(767)); +UPDATE t1 SET i=@e; +CREATE INDEX t1j ON t1 (j(767)); +UPDATE t1 SET j=@e; +CREATE INDEX t1k ON t1 (k(767)); +UPDATE t1 SET k=@e; +CREATE INDEX t1l ON t1 (l(767)); +UPDATE t1 SET l=@e; +CREATE INDEX t1m ON t1 (m(767)); +UPDATE t1 SET m=@e; +CREATE INDEX t1n ON t1 (n(767)); +UPDATE t1 SET n=@e; +CREATE INDEX t1o ON t1 (o(767)); +UPDATE t1 SET o=@e; +CREATE INDEX t1p ON t1 (p(767)); +UPDATE t1 SET p=@e; +CREATE INDEX t1q ON t1 (q(767)); +UPDATE t1 SET q=@e; +CREATE INDEX t1r ON t1 (r(767)); +UPDATE t1 SET r=@e; +CREATE INDEX t1s ON t1 (s(767)); +UPDATE t1 SET s=@e; + +# Add one more index and we cannot update a column to its defined index length. +# This is a problem. It means that the DDL is allowed to create a table +# that CANNOT be updated. See bug#12953735. +CREATE INDEX t1t ON t1 (t(767)); +--error ER_UNDO_RECORD_TOO_BIG +UPDATE t1 SET t=@e; + +CREATE INDEX t1u ON t1 (u(767)); +CREATE INDEX t1ut ON t1 (u(767), t(767)); +CREATE INDEX t1st ON t1 (s(767), t(767)); + +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--echo # Bug #12429576 - Test an assertion failure on purge. +# This test is not in innodb_8k or innodb_4k since the bug is not about +# page size. It just tests the condition that caused the assertion. +CREATE TABLE t1_purge ( +A int, +B blob, C blob, D blob, E blob, +F blob, G blob, H blob, +PRIMARY KEY (B(767), C(767), D(767), E(767), A), +INDEX (A) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; + +INSERT INTO t1_purge VALUES (1, +REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766), +REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766)); + +CREATE TABLE t2_purge ( +A int PRIMARY KEY, +B blob, C blob, D blob, E blob, +F blob, G blob, H blob, I blob, +J blob, K blob, L blob, +INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; + +INSERT INTO t2_purge VALUES (1, +REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766), +REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766), REPEAT('i', 766), +REPEAT('j', 766), REPEAT('k', 766), REPEAT('l', 766)); + +CREATE TABLE t3_purge ( +A int, +B varchar(800), C varchar(800), D varchar(800), E varchar(800), +F varchar(800), G varchar(800), H varchar(800), +PRIMARY KEY (B(767), C(767), D(767), E(767), A), +INDEX (A) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; + +INSERT INTO t3_purge SELECT * FROM t1_purge; + +CREATE TABLE t4_purge ( +A int PRIMARY KEY, +B varchar(800), C varchar(800), D varchar(800), E varchar(800), +F varchar(800), G varchar(800), H varchar(800), I varchar(800), +J varchar(800), K varchar(800), L varchar(800), +INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; + +INSERT INTO t4_purge SELECT * FROM t2_purge; + +# This would trigger the failure (Bug #12429576) +# if purge gets a chance to run before DROP TABLE t1_purge, .... +DELETE FROM t1_purge; +DELETE FROM t2_purge; +DELETE FROM t3_purge; +DELETE FROM t4_purge; +# We need to activate the purge thread. +# Instead of doing a --sleep 10 now, do it once at the end. + +# Bug#12637786 - Assertion hit; ut_ad(dict_index_is_clust(index)); +# A secondary index tuple is found to be too long to fit into a page. +# This test is not in innodb_8k or innodb_4k since the bug is not about +# page size. It just tests the condition that caused the assertion. +SET @r=REPEAT('a',500); +CREATE TABLE t12637786(a int, + v1 varchar(500), v2 varchar(500), v3 varchar(500), + v4 varchar(500), v5 varchar(500), v6 varchar(500), + v7 varchar(500), v8 varchar(500), v9 varchar(500), + v10 varchar(500), v11 varchar(500), v12 varchar(500), + v13 varchar(500), v14 varchar(500), v15 varchar(500), + v16 varchar(500), v17 varchar(500), v18 varchar(500) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +CREATE INDEX idx1 ON t12637786(a,v1); +INSERT INTO t12637786 VALUES(9,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +UPDATE t12637786 SET a=1000; +DELETE FROM t12637786; +# We need to activate the purge thread to make sure it does not assert and +# is able to clean up the old versions of secondary index entries. +# Instead of doing a --sleep 10 now for each test, do it once at the end. + +--echo # Bug#12963823 - Test that the purge thread does not crash when +# the number of indexes has changed since the UNDO record was logged. +# This test is not in innodb_8k or innodb_4k since the bug is not about +# page size. It just tests the condition that caused the crash. +CREATE TABLE t12963823(a blob,b blob,c blob,d blob,e blob,f blob,g blob,h blob, + i blob,j blob,k blob,l blob,m blob,n blob,o blob,p blob) + ENGINE=innodb ROW_FORMAT=dynamic; +SET @r = REPEAT('a', 767); +INSERT INTO t12963823 VALUES (@r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r); +CREATE INDEX ndx_a ON t12963823 (a(500)); +CREATE INDEX ndx_b ON t12963823 (b(500)); +CREATE INDEX ndx_c ON t12963823 (c(500)); +CREATE INDEX ndx_d ON t12963823 (d(500)); +CREATE INDEX ndx_e ON t12963823 (e(500)); +CREATE INDEX ndx_f ON t12963823 (f(500)); +CREATE INDEX ndx_k ON t12963823 (k(500)); +CREATE INDEX ndx_l ON t12963823 (l(500)); + +SET @r = REPEAT('b', 500); +UPDATE t12963823 set a=@r,b=@r,c=@r,d=@r; +UPDATE t12963823 set e=@r,f=@r,g=@r,h=@r; +UPDATE t12963823 set i=@r,j=@r,k=@r,l=@r; +UPDATE t12963823 set m=@r,n=@r,o=@r,p=@r; +ALTER TABLE t12963823 DROP INDEX ndx_a; +ALTER TABLE t12963823 DROP INDEX ndx_b; +CREATE INDEX ndx_g ON t12963823 (g(500)); +CREATE INDEX ndx_h ON t12963823 (h(500)); +CREATE INDEX ndx_i ON t12963823 (i(500)); +CREATE INDEX ndx_j ON t12963823 (j(500)); +CREATE INDEX ndx_m ON t12963823 (m(500)); +CREATE INDEX ndx_n ON t12963823 (n(500)); +CREATE INDEX ndx_o ON t12963823 (o(500)); +CREATE INDEX ndx_p ON t12963823 (p(500)); +SHOW CREATE TABLE t12963823; +# We need to activate the purge thread at this point to see if it crashes. +# Instead of doing a --sleep 10 now for each test, do it once at the end. + +--echo # Bug#12547647 UPDATE LOGGING COULD EXCEED LOG PAGE SIZE +# InnoDB cannot know that this undo record would be too big for the undo +# page. Too much of text field is stored in the clustered record in this +# DYNAMIC row formatted record. +# This test is not in innodb_8k or innodb_4k since the bug is not about +# page size. It just tests the condition that caused the hang. + +SET SESSION innodb_strict_mode = ON; +CREATE TABLE bug12547647( +a int NOT NULL, b blob NOT NULL, c text, +PRIMARY KEY (b(10), a), INDEX (c(767)), INDEX(b(767)) +) ENGINE=InnoDB ROW_FORMAT=DYNAMIC; +INSERT INTO bug12547647 VALUES (5,REPEAT('khdfo5AlOq',1900),REPEAT('g',7751)); +COMMIT; +# The following used to cause a hang while doing infinite undo log allocation. +--error ER_UNDO_RECORD_TOO_BIG +UPDATE bug12547647 SET c = REPEAT('b',16928); +SHOW WARNINGS; +DROP TABLE bug12547647; + +# The following should fail in non-strict mode too. +# (The fix of Bug #50945 only affects REDUNDANT and COMPACT tables.) +SET SESSION innodb_strict_mode = off; +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +DROP TABLE t1; +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII; +DROP TABLE t1; +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII; +drop table t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(440))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +DROP TABLE t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(438))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512)); +DROP TABLE t1; + + +--echo # +--echo # Bug#56862 Execution of a query that uses index merge returns a wrong result +--echo # + +# Moved to here from innodb_mysql.test. Some PB3 systems sporadically +# had timeouts doing this with smaller page sizes. + +CREATE TABLE t1 ( + pk int NOT NULL AUTO_INCREMENT PRIMARY KEY, + a int, + b int, + INDEX idx(a)) +ENGINE=INNODB; + +INSERT INTO t1(a,b) VALUES + (11, 1100), (2, 200), (1, 100), (14, 1400), (5, 500), + (3, 300), (17, 1700), (4, 400), (12, 1200), (8, 800), + (6, 600), (18, 1800), (9, 900), (10, 1000), (7, 700), + (13, 1300), (15, 1500), (19, 1900), (16, 1600), (20, 2000); +INSERT INTO t1(a,b) SELECT a+20, b+2000 FROM t1; +INSERT INTO t1(a,b) SELECT a+40, b+4000 FROM t1; +INSERT INTO t1(a,b) SELECT a+80, b+8000 FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1(a,b) SELECT a,b FROM t1; +INSERT INTO t1 VALUES (1000000, 0, 0); + +set @optimizer_switch_saved=@@optimizer_switch; +SET SESSION optimizer_switch='derived_merge=off'; +SET SESSION sort_buffer_size = 1024*36; + +EXPLAIN +SELECT COUNT(*) FROM + (SELECT * FROM t1 FORCE INDEX (idx,PRIMARY) + WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t; + +SELECT COUNT(*) FROM + (SELECT * FROM t1 FORCE INDEX (idx,PRIMARY) + WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t; + +set @@optimizer_switch=@optimizer_switch_saved; +SET SESSION sort_buffer_size = DEFAULT; + +DROP TABLE t1; + + +# The tests that uses these tables required the purge thread to run. +# Just in case it has not by now, provide a 10 second wait. +--sleep 10 +DROP TABLE t1_purge, t2_purge, t3_purge, t4_purge; +DROP TABLE t12637786; +DROP TABLE t12963823; diff --git a/mysql-test/suite/innodb_zip/t/4k.test b/mysql-test/suite/innodb_zip/t/4k.test new file mode 100644 index 00000000000..6226c4abcee --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/4k.test @@ -0,0 +1,440 @@ +# Tests for setting innodb-page-size=4k + +--source include/have_innodb.inc +--source include/have_innodb_4k.inc +SET default_storage_engine=InnoDB; + +--disable_query_log +let $MYSQLD_DATADIR = `select @@datadir`; +let $INNODB_PAGE_SIZE = `select @@innodb_page_size`; + +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +--enable_query_log + +--echo # Test 1) Show the page size from Information Schema +--disable_warnings +SELECT variable_value FROM information_schema.global_status + WHERE LOWER(variable_name) = 'innodb_page_size'; +--enable_warnings + +--echo # Test 2) The number of buffer pool pages is dependent upon the page size. +--disable_warnings +--replace_result 6144 {checked_valid} +SELECT variable_value FROM information_schema.global_status + WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total'; +--enable_warnings + +--echo # Test 3) Query some information_shema tables that are dependent upon +--echo # the page size. +# Show the metadata for tables in schema 'mysql'. +# Pulled from innodb-system-table-view.test +# The IDs of mysql.innodb_table_stats and mysql.innodb_index_stats are +# unpredictable. They depend on whether mtr has created the database for +# this test from scratch or is using a previously created database where +# those tables have been dropped and recreated. Since we cannot force mtr +# to use a freshly created database for this test we do not return the +# table or index IDs. We can return the space IS of mysql schema tables +# since they are created consistently during bootstrap. +SELECT t.name table_name, t.n_cols, t.flag table_flags, + i.name index_name, i.page_no root_page, i.type, + i.n_fields, i.merge_threshold + FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i + WHERE t.table_id = i.table_id + AND t.name LIKE 'mysql%' + ORDER BY t.name, i.index_id; + +CREATE TABLE t1 (a INT KEY, b TEXT) ROW_FORMAT=REDUNDANT ENGINE=innodb; +CREATE TABLE t2 (a INT KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=innodb; +CREATE TABLE t3 (a INT KEY, b TEXT) ROW_FORMAT=COMPRESSED ENGINE=innodb; +CREATE TABLE t4 (a INT KEY, b TEXT) ROW_FORMAT=DYNAMIC ENGINE=innodb; + +# Show the metadata for tables in schema 'test'. +# Do not return the space ID since this tablespace may have existed before +# this test runs. The root page number of each index should be consistent +# within a file-per-table tablespace. +SELECT t.name table_name, t.n_cols, t.flag table_flags, + i.name index_name, i.page_no root_page, i.type, + i.n_fields, i.merge_threshold + FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i + WHERE t.table_id = i.table_id + AND t.name LIKE 'test%' + ORDER BY t.name, i.name; +--source suite/innodb/include/show_i_s_tablespaces.inc +DROP TABLE t1, t2, t3, t4; + +--echo # Test 4) The maximum row size is dependent upon the page size. +--echo # Redundant: 1979, Compact: 1982. +--echo # Compressed: 1982, Dynamic: 1982. +--echo # Each row format has its own amount of overhead that +--echo # varies depending on number of fields and other overhead. + +SET SESSION innodb_strict_mode = ON; + +# Redundant table; 1927 bytes with 10 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(127) +) ROW_FORMAT=redundant; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(128) +) ROW_FORMAT=redundant; + +# Compact table; 1955 bytes with 10 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(155) +) ROW_FORMAT=compact; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(156) +) ROW_FORMAT=compact; + +# Compressed table; 1878 bytes with 10 CHAR fields +# Bug#13391353 Limit is 1876 on 32-Linux only +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(76) +) ROW_FORMAT=compressed; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(79) +) ROW_FORMAT=compressed; + +# Dynamic table; 1955 bytes with 10 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(155) +) ROW_FORMAT=dynamic; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(156) +) ROW_FORMAT=dynamic; + +# +# Test the maximum key length +# Moved from innodb-index.test since each page size has its own max key length. +# Max Key Length is 768 for 4k pages. +# +# InnoDB assumes 3 bytes for each UTF8 character. +# +CREATE TABLE t1 (a varchar(64) character set utf8, + b varchar(64) character set utf8, + c varchar(64) character set utf8, + d varchar(64) character set utf8, + PRIMARY KEY (a,b,c,d)) + ENGINE=innodb; +DROP TABLE t1; +--error ER_TOO_LONG_KEY +CREATE TABLE t1 (a varchar(64) character set utf8, + b varchar(64) character set utf8, + c varchar(64) character set utf8, + d varchar(65) character set utf8, + PRIMARY KEY (a,b,c,d)) + ENGINE=innodb; +CREATE TABLE t1 (a varchar(64) character set utf8, + b varchar(64) character set utf8, + c varchar(64) character set utf8, + d varchar(64) character set utf8, + e varchar(64) character set utf8, + PRIMARY KEY (a), KEY (b,c,d,e)) + ENGINE=innodb; +DROP TABLE t1; +--error ER_TOO_LONG_KEY +CREATE TABLE t1 (a varchar(64) character set utf8, + b varchar(64) character set utf8, + c varchar(64) character set utf8, + d varchar(64) character set utf8, + e varchar(65) character set utf8, + PRIMARY KEY (a), KEY (b,c,d,e)) + ENGINE=innodb; + +--echo # Test 5) Make sure that KEY_BLOCK_SIZE=4, 2 & 1 are all +--echo # accepted and that KEY_BLOCK_SIZE=16 & 8 are rejected +--echo # in strict mode and converted to 4 in non-strict mode. + +SET SESSION innodb_strict_mode = ON; + +--error ER_ILLEGAL_HA +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; + +--error ER_ILLEGAL_HA +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW WARNINGS; + +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + +SET SESSION innodb_strict_mode = OFF; + +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + + +--echo # Test 6) Make sure that KEY_BLOCK_SIZE = 8 and 16 +--echo # are both rejected when innodb_file_per_table=OFF +# Moved from innodb-zip.test +SET SESSION innodb_strict_mode = ON; +SET GLOBAL innodb_file_per_table = OFF; +SHOW VARIABLES LIKE 'innodb_file_per_table'; +--error ER_ILLEGAL_HA +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +--error ER_ILLEGAL_HA +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_file_format = `Antelope`; +--error ER_ILLEGAL_HA +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +--error ER_ILLEGAL_HA +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SET GLOBAL innodb_file_format = `Barracuda`; + + +--echo # Test 7) Not included here; 16k only + + +--echo # Test 8) Test creating a table that could lead to undo log overflow. +CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob, + h blob,i blob,j blob,k blob,l blob,m blob,n blob, + o blob,p blob,q blob,r blob,s blob,t blob,u blob) + ENGINE=InnoDB ROW_FORMAT=dynamic; +SET @a = repeat('a', 767); +SET @b = repeat('b', 767); +SET @c = repeat('c', 767); +SET @d = repeat('d', 767); +SET @e = repeat('e', 767); + +# With no indexes defined, we can update all columns to max key part length. +INSERT INTO t1 VALUES (@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a); +UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b, + k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b; + +# With one index defined, we can still update all fields. +CREATE INDEX t1a ON t1 (a(767)); +UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c, + k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c; + +# Add one more index and the UNDO record becomes too big to update all columns. +# But a single transaction can update the columns in separate statements. +# because the UNDO records will be smaller. +CREATE INDEX t1b ON t1 (b(767)); +--error ER_UNDO_RECORD_TOO_BIG +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d,f=@d,g=@d,h=@d,i=@d,j=@d, + k=@d,l=@d,m=@d,n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +BEGIN; +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d; +UPDATE t1 SET f=@d,g=@d,h=@d,i=@d,j=@d,k=@d,l=@d,m=@d, + n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +COMMIT; + +# Another index can still be added and a single field can still be updated +CREATE INDEX t1c ON t1 (c(767)); +UPDATE t1 SET c=@e; + +# Add one more index and we cannot update a column to its defined index length. +# This is a problem. It means that the DDL is allowed to create a table +# that CANNOT be updated. See bug#12953735. +CREATE INDEX t1d ON t1 (d(767)); +--error ER_UNDO_RECORD_TOO_BIG +UPDATE t1 SET d=@e; + +--replace_regex /> [0-9]*/> max_row_size/ +CREATE INDEX t1e ON t1 (e(767)); + +SHOW CREATE TABLE t1; +DROP TABLE t1; + +# +# Bug #13336585 - INNODB: CHANGE BUFFERING WITH 4K PAGES CAN ASSERT +# IF SECONDARY KEY IS NEAR MAX +# If the secondary index tuple is close to half the page size, +# ibuf_insert_low() could return DB_TOO_BIG_RECORD, which is not expected +# in ibuf_insert(). In order to insure this does not happen, WL5756 +# imposes a maximum key length of 768 for 4k pages and 1536 for 8k pages. +# The existing max key Size for 16k pages is 3072. +# + +#-- disable_query_log +# The flag innodb_change_buffering_debug is only available in debug builds. +# It instructs InnoDB to try to evict pages from the buffer pool when +# change buffering is possible, so that the change buffer will be used +# whenever possible. +# This flag is not used currently since it exposes valgrind error in ibuf +# code with the following SQL +#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +#SET @innodb_change_buffering_debug_orig = @@innodb_change_buffering_debug; +#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +#SET GLOBAL innodb_change_buffering_debug = 1; +#-- enable_query_log + +# make sure the largest possible key entry can be added to the insert buffer. +# Make enough records so that the root page is not a leaf page. +SET SESSION innodb_strict_mode = OFF; +CREATE TABLE t1( + pk01 varchar(48), pk02 varchar(48), pk03 varchar(48), pk04 varchar(48), + pk05 varchar(48), pk06 varchar(48), pk07 varchar(48), pk08 varchar(48), + pk09 varchar(48), pk10 varchar(48), pk11 varchar(48), pk12 varchar(48), + pk13 varchar(48), pk14 varchar(48), pk15 varchar(48), pk16 varchar(48), + sk01 varchar(48), sk02 varchar(48), sk03 varchar(48), sk04 varchar(48), + sk05 varchar(48), sk06 varchar(48), sk07 varchar(48), sk08 varchar(48), + sk09 varchar(48), sk10 varchar(48), sk11 varchar(48), sk12 varchar(48), + sk13 varchar(48), sk14 varchar(48), sk15 varchar(48), sk16 varchar(48), + PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, + pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), + KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, + sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) + ROW_FORMAT=Redundant ENGINE=InnoDB; +SET @r = repeat('a', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; + +# Compressed tables do not compress parent pages. So the whole uncompressed +# secondary tuple including the primary key must be able to fit in half the +# compressed page size. This record length is enforced at index creation. +# So the only way to get an ibuf tuple too big is to make the KEY_BLOCK_SIZE +# the same as the page size. +CREATE TABLE t1( + pk01 varchar(48), pk02 varchar(48), pk03 varchar(48), pk04 varchar(48), + pk05 varchar(48), pk06 varchar(48), pk07 varchar(48), pk08 varchar(48), + pk09 varchar(48), pk10 varchar(48), pk11 varchar(48), pk12 varchar(48), + pk13 varchar(48), pk14 varchar(48), pk15 varchar(48), pk16 varchar(48), + sk01 varchar(48), sk02 varchar(48), sk03 varchar(48), sk04 varchar(48), + sk05 varchar(48), sk06 varchar(48), sk07 varchar(48), sk08 varchar(48), + sk09 varchar(48), sk10 varchar(48), sk11 varchar(48), sk12 varchar(48), + sk13 varchar(48), sk14 varchar(48), sk15 varchar(48), sk16 varchar(48), + PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, + pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), + KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, + sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) + ROW_FORMAT=Compressed KEY_BLOCK_SIZE=4 ENGINE=InnoDB; +SET @r = repeat('a', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 48); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; + +#-- disable_query_log +#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +#SET GLOBAL innodb_change_buffering_debug = 0; +#-- enable_query_log + +# The following should fail in non-strict mode too. +# (The fix of Bug #50945 only affects REDUNDANT and COMPACT tables.) +SET SESSION innodb_strict_mode = off; +--replace_regex /> [0-9]*/> max_row_size/ +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +DROP TABLE t1; +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII; +drop table t1; +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII; +drop table t1; +--replace_regex /> [0-9]*/> max_row_size/ +CREATE TABLE t1(c text, PRIMARY KEY (c(440))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +DROP TABLE t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(438))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512)); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_zip/t/8k.test b/mysql-test/suite/innodb_zip/t/8k.test new file mode 100644 index 00000000000..3a2e8755f57 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/8k.test @@ -0,0 +1,468 @@ +# Tests for setting innodb-page-size=8k + +--source include/have_innodb.inc +--source include/have_innodb_8k.inc +SET default_storage_engine=InnoDB; + +--disable_query_log +let $MYSQLD_DATADIR = `select @@datadir`; +let $INNODB_PAGE_SIZE = `select @@innodb_page_size`; + +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +--enable_query_log + +--echo # Test 1) Show the page size from Information Schema +--disable_warnings +SELECT variable_value FROM information_schema.global_status + WHERE LOWER(variable_name) = 'innodb_page_size'; +--enable_warnings + +--echo # Test 2) The number of buffer pool pages is dependent upon the page size. +--disable_warnings +--replace_result 3071 {checked_valid} 3072 {checked_valid} +SELECT variable_value FROM information_schema.global_status + WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total'; +--enable_warnings + +--echo # Test 3) Query some information_shema tables that are dependent upon +--echo # the page size. +# Show the metadata for tables in schema 'mysql'. +# Pulled from innodb-system-table-view.test +# The IDs of mysql.innodb_table_stats and mysql.innodb_index_stats are +# unpredictable. They depend on whether mtr has created the database for +# this test from scratch or is using a previously created database where +# those tables have been dropped and recreated. Since we cannot force mtr +# to use a freshly created database for this test we do not return the +# table or index IDs. We can return the space IS of mysql schema tables +# since they are created consistently during bootstrap. +SELECT t.name table_name, t.n_cols, t.flag table_flags, + i.name index_name, i.page_no root_page, i.type, + i.n_fields, i.merge_threshold + FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i + WHERE t.table_id = i.table_id + AND t.name LIKE 'mysql%' + ORDER BY t.name, i.index_id; + +CREATE TABLE t1 (a INT KEY, b TEXT) ROW_FORMAT=REDUNDANT ENGINE=innodb; +CREATE TABLE t2 (a INT KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=innodb; +CREATE TABLE t3 (a INT KEY, b TEXT) ROW_FORMAT=COMPRESSED ENGINE=innodb; +CREATE TABLE t4 (a INT KEY, b TEXT) ROW_FORMAT=DYNAMIC ENGINE=innodb; + +# Show the metadata for tables in schema 'test'. +# Do not return the space ID since this tablespace may have existed before +# this test runs. The root page number of each index should be consistent +# within a file-per-table tablespace. +SELECT t.name table_name, t.n_cols, t.flag table_flags, + i.name index_name, i.page_no root_page, i.type, + i.n_fields, i.merge_threshold + FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES t, + INFORMATION_SCHEMA.INNODB_SYS_INDEXES i + WHERE t.table_id = i.table_id + AND t.name LIKE 'test%' + ORDER BY t.name, i.name; +--source suite/innodb/include/show_i_s_tablespaces.inc +DROP TABLE t1, t2, t3, t4; + +--echo # Test 4) The maximum row size is dependent upon the page size. +--echo # Redundant: 4027, Compact: 4030. +--echo # Compressed: 4030, Dynamic: 4030. +--echo # Each row format has its own amount of overhead that +--echo # varies depending on number of fields and other overhead. + +SET SESSION innodb_strict_mode = ON; + +# Redundant table; 3955 bytes with 20 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(155) +) ROW_FORMAT=redundant; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(156) +) ROW_FORMAT=redundant; + +# Compact table; 4002 bytes with 20 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(202) +) ROW_FORMAT=compact; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(203) +) ROW_FORMAT=compact; + +# Compressed table; 3905 bytes with 20 CHAR fields +# Bug#13391353 Limit is 3903 on 32-Linux only +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(103) +) ROW_FORMAT=compressed; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(106) +) ROW_FORMAT=compressed; + +# Dynamic table; 4002 bytes with 20 CHAR fields +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(202) +) ROW_FORMAT=dynamic; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE t1 ( +c01 char(200), c02 char(200), c03 char(200), c04 char(200), c05 char(200), +c06 char(200), c07 char(200), c08 char(200), c09 char(200), c10 char(200), +c11 char(200), c12 char(200), c13 char(200), c14 char(200), c15 char(200), +c16 char(200), c17 char(200), c18 char(200), c19 char(200), c20 char(203) +) ROW_FORMAT=dynamic; + +# +# Test the maximum key length +# Moved from innodb-index.test since each page size has its own max key length. +# Max Key Length is 1536 for 8k pages. +# +# InnoDB assumes 3 bytes for each UTF8 character. +# +CREATE TABLE t1 (a varchar(128) character set utf8, + b varchar(128) character set utf8, + c varchar(128) character set utf8, + d varchar(128) character set utf8, + PRIMARY KEY (a,b,c,d)) + ENGINE=innodb; +DROP TABLE t1; +--error ER_TOO_LONG_KEY +CREATE TABLE t1 (a varchar(128) character set utf8, + b varchar(128) character set utf8, + c varchar(128) character set utf8, + d varchar(129) character set utf8, + PRIMARY KEY (a,b,c,d)) + ENGINE=innodb; +CREATE TABLE t1 (a varchar(128) character set utf8, + b varchar(128) character set utf8, + c varchar(128) character set utf8, + d varchar(128) character set utf8, + e varchar(128) character set utf8, + PRIMARY KEY (a), KEY (b,c,d,e)) + ENGINE=innodb; +DROP TABLE t1; +--error ER_TOO_LONG_KEY +CREATE TABLE t1 (a varchar(128) character set utf8, + b varchar(128) character set utf8, + c varchar(128) character set utf8, + d varchar(128) character set utf8, + e varchar(129) character set utf8, + PRIMARY KEY (a), KEY (b,c,d,e)) + ENGINE=innodb; + +--echo # Test 5) Make sure that KEY_BLOCK_SIZE=8, 4, 2 & 1 are all +--echo # accepted and that KEY_BLOCK_SIZE=16 is rejected in +--echo # strict mode and converted to 8 in non-strict mode. + +SET SESSION innodb_strict_mode = ON; + +--error ER_ILLEGAL_HA +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; + +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + +SET SESSION innodb_strict_mode = OFF; + +CREATE TABLE t1 (i int) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; + +ALTER TABLE t1 KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT table_name, row_format, create_options + FROM information_schema.tables WHERE table_name = 't1'; +DROP TABLE t1; + + +--echo # Test 6) Make sure that KEY_BLOCK_SIZE = 8 and 16 +--echo # are rejected when innodb_file_per_table=OFF +# Moved from innodb-zip.test +SET SESSION innodb_strict_mode = ON; +SET GLOBAL innodb_file_per_table = OFF; +SHOW VARIABLES LIKE 'innodb_file_per_table'; +--error ER_ILLEGAL_HA +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +--error ER_ILLEGAL_HA +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SET GLOBAL innodb_file_per_table = ON; +SET GLOBAL innodb_file_format = `Antelope`; +--error ER_ILLEGAL_HA +CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8; +SHOW WARNINGS; +--error ER_ILLEGAL_HA +CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16; +SHOW WARNINGS; +SET GLOBAL innodb_file_format = `Barracuda`; + + +--echo # Test 7) Not included here; 16k only + + +--echo # Test 8) Test creating a table that could lead to undo log overflow. +CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob, + h blob,i blob,j blob,k blob,l blob,m blob,n blob, + o blob,p blob,q blob,r blob,s blob,t blob,u blob) + ENGINE=InnoDB ROW_FORMAT=dynamic; +SET @a = repeat('a', 767); +SET @b = repeat('b', 767); +SET @c = repeat('c', 767); +SET @d = repeat('d', 767); +SET @e = repeat('e', 767); + +# With no indexes defined, we can update all columns to max key part length. +INSERT INTO t1 VALUES (@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,@a); +UPDATE t1 SET a=@b,b=@b,c=@b,d=@b,e=@b,f=@b,g=@b,h=@b,i=@b,j=@b, + k=@b,l=@b,m=@b,n=@b,o=@b,p=@b,q=@b,r=@b,s=@b,t=@b,u=@b; + +# With this many indexes defined, we can still update all fields. +CREATE INDEX t1a ON t1 (a(767)); +CREATE INDEX t1b ON t1 (b(767)); +UPDATE t1 SET a=@c,b=@c,c=@c,d=@c,e=@c,f=@c,g=@c,h=@c,i=@c,j=@c, + k=@c,l=@c,m=@c,n=@c,o=@c,p=@c,q=@c,r=@c,s=@c,t=@c,u=@c; + +# Add one more index and the UNDO record becomes too big to update all columns. +# But a single transaction can update the columns in separate statements. +# because the UNDO records will be smaller. +CREATE INDEX t1c ON t1 (c(767)); +--error ER_UNDO_RECORD_TOO_BIG +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d,f=@d,g=@d,h=@d,i=@d,j=@d, + k=@d,l=@d,m=@d,n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +BEGIN; +UPDATE t1 SET a=@d,b=@d,c=@d,d=@d,e=@d; +UPDATE t1 SET f=@d,g=@d,h=@d,i=@d,j=@d,k=@d,l=@d,m=@d, + n=@d,o=@d,p=@d,q=@d,r=@d,s=@d,t=@d,u=@d; +COMMIT; + +# More indexes can still be added and a single field can still be updated +CREATE INDEX t1d ON t1 (d(767)); +UPDATE t1 SET d=@e; +CREATE INDEX t1e ON t1 (e(767)); +UPDATE t1 SET e=@e; +CREATE INDEX t1f ON t1 (f(767)); +UPDATE t1 SET f=@e; +CREATE INDEX t1g ON t1 (g(767)); +UPDATE t1 SET g=@e; +CREATE INDEX t1h ON t1 (h(767)); +UPDATE t1 SET h=@e; +CREATE INDEX t1i ON t1 (i(767)); +UPDATE t1 SET i=@e; + +--replace_regex /> [0-9]*/> max_row_size/ +CREATE INDEX t1k ON t1 (j(767)); + +# But it does allow a 500 byte index. And with this, we cannot +# update the record. This is a problem. It means that the DDL is +# allowed to create a table and a record that CANNOT be updated. +# See bug#12953735 +--replace_regex /> [0-9]*/> max_row_size/ +CREATE INDEX t1j ON t1 (j(500)); +--error ER_UNDO_RECORD_TOO_BIG +UPDATE t1 SET j=@e; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +# +# Bug #13336585 - INNODB: CHANGE BUFFERING WITH 4K PAGES CAN ASSERT +# IF SECONDARY KEY IS NEAR MAX +# If the secondary index tuple is close to half the page size, +# ibuf_insert_low() could return DB_TOO_BIG_RECORD, which is not expected +# in ibuf_insert(). In order to insure this does not happen, WL5756 +# imposes a maximum key length of 768 for 4k pages and 1536 for 8k pages. +# The existing max key Size for 16k pages is 3072. +# + +#-- disable_query_log +# The flag innodb_change_buffering_debug is only available in debug builds. +# It instructs InnoDB to try to evict pages from the buffer pool when +# change buffering is possible, so that the change buffer will be used +# whenever possible. +#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +#SET @innodb_change_buffering_debug_orig = @@innodb_change_buffering_debug; +#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +#SET GLOBAL innodb_change_buffering_debug = 1; +#-- enable_query_log + +# make sure the largest possible key entry can be added to the insert buffer. +# Make enough records so that the root page is not a leaf page. +SET SESSION innodb_strict_mode = OFF; +CREATE TABLE t1( + pk01 varchar(96), pk02 varchar(96), pk03 varchar(96), pk04 varchar(96), + pk05 varchar(96), pk06 varchar(96), pk07 varchar(96), pk08 varchar(96), + pk09 varchar(96), pk10 varchar(96), pk11 varchar(96), pk12 varchar(96), + pk13 varchar(96), pk14 varchar(96), pk15 varchar(96), pk16 varchar(96), + sk01 varchar(96), sk02 varchar(96), sk03 varchar(96), sk04 varchar(96), + sk05 varchar(96), sk06 varchar(96), sk07 varchar(96), sk08 varchar(96), + sk09 varchar(96), sk10 varchar(96), sk11 varchar(96), sk12 varchar(96), + sk13 varchar(96), sk14 varchar(96), sk15 varchar(96), sk16 varchar(96), + PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, + pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), + KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, + sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) + ROW_FORMAT=Redundant ENGINE=InnoDB; +SET @r = repeat('a', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; + +# Compressed tables do not compress parent pages. So the whole uncompressed +# secondary tuple including the primary key must be able to fit in half the +# compressed page size. This record length is enforced at index creation. +# So the only way to get an ibuf tuple too big is to make the KEY_BLOCK_SIZE +# the same as the page size. +CREATE TABLE t1( + pk01 varchar(96), pk02 varchar(96), pk03 varchar(96), pk04 varchar(96), + pk05 varchar(96), pk06 varchar(96), pk07 varchar(96), pk08 varchar(96), + pk09 varchar(96), pk10 varchar(96), pk11 varchar(96), pk12 varchar(96), + pk13 varchar(96), pk14 varchar(96), pk15 varchar(96), pk16 varchar(96), + sk01 varchar(96), sk02 varchar(96), sk03 varchar(96), sk04 varchar(96), + sk05 varchar(96), sk06 varchar(96), sk07 varchar(96), sk08 varchar(96), + sk09 varchar(96), sk10 varchar(96), sk11 varchar(96), sk12 varchar(96), + sk13 varchar(96), sk14 varchar(96), sk15 varchar(96), sk16 varchar(96), + PRIMARY KEY pk(pk01,pk02,pk03,pk04,pk05,pk06,pk07,pk08, + pk09,pk10,pk11,pk12,pk13,pk14,pk15,pk16), + KEY pk(sk01,sk02,sk03,sk04,sk05,sk06,sk07,sk08, + sk09,sk10,sk11,sk12,sk13,sk14,sk15,sk16)) + ROW_FORMAT=Compressed KEY_BLOCK_SIZE=8 ENGINE=InnoDB; +SET @r = repeat('a', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('b', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('c', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('d', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +SET @r = repeat('e', 96); +INSERT INTO t1 VALUES(@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r, + @r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r); +DELETE from t1; +DROP TABLE t1; + +#-- disable_query_log +#-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +#SET GLOBAL innodb_change_buffering_debug = 0; +#-- enable_query_log + +# The following should fail in non-strict mode too. +# (The fix of Bug #50945 only affects REDUNDANT and COMPACT tables.) +SET SESSION innodb_strict_mode = off; +--replace_regex /> [0-9]*/> max_row_size/ +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +DROP TABLE t1; +--replace_regex /> [0-9]*/> max_row_size/ +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII; +DROP TABLE t1; +CREATE TABLE t1( + c text NOT NULL, d text NOT NULL, + PRIMARY KEY (c(767),d(767))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII; +drop table t1; +--replace_regex /> [0-9]*/> max_row_size/ +CREATE TABLE t1(c text, PRIMARY KEY (c(440))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +DROP TABLE t1; +CREATE TABLE t1(c text, PRIMARY KEY (c(438))) +ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII; +INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512)); +DROP TABLE t1; diff --git a/mysql-test/suite/innodb_zip/t/bug36169.test b/mysql-test/suite/innodb_zip/t/bug36169.test new file mode 100644 index 00000000000..5452c929b92 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/bug36169.test @@ -0,0 +1,1162 @@ +# +# Bug#36169 create innodb compressed table with too large row size crashed +# http://bugs.mysql.com/36169 +# + +-- source include/have_innodb.inc +-- source include/have_innodb_zip.inc + +let $file_per_table=`select @@innodb_file_per_table`; +SET GLOBAL innodb_file_per_table=ON; + +# +# The following is copied from http://bugs.mysql.com/36169 +# (http://bugs.mysql.com/file.php?id=9121) +# Probably it can be simplified but that is not obvious. +# + +# we care only that the following SQL commands do produce errors +# as expected and do not crash the server +-- disable_query_log +-- disable_result_log +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +# Generating 10 tables +# Creating a table with 94 columns and 24 indexes +DROP TABLE IF EXISTS `table0`; +set innodb_strict_mode=on; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE IF NOT EXISTS `table0` +(`col0` BOOL, +`col1` BOOL, +`col2` TINYINT, +`col3` DATE, +`col4` TIME, +`col5` SET ('test1','test2','test3'), +`col6` TIME, +`col7` TEXT, +`col8` DECIMAL, +`col9` SET ('test1','test2','test3'), +`col10` FLOAT, +`col11` DOUBLE PRECISION, +`col12` ENUM ('test1','test2','test3'), +`col13` TINYBLOB, +`col14` YEAR, +`col15` SET ('test1','test2','test3'), +`col16` NUMERIC, +`col17` NUMERIC, +`col18` BLOB, +`col19` DATETIME, +`col20` DOUBLE PRECISION, +`col21` DECIMAL, +`col22` DATETIME, +`col23` NUMERIC, +`col24` NUMERIC, +`col25` LONGTEXT, +`col26` TINYBLOB, +`col27` TIME, +`col28` TINYBLOB, +`col29` ENUM ('test1','test2','test3'), +`col30` SMALLINT, +`col31` REAL, +`col32` FLOAT, +`col33` CHAR (175), +`col34` TINYTEXT, +`col35` TINYTEXT, +`col36` TINYBLOB, +`col37` TINYBLOB, +`col38` TINYTEXT, +`col39` MEDIUMBLOB, +`col40` TIMESTAMP, +`col41` DOUBLE, +`col42` SMALLINT, +`col43` LONGBLOB, +`col44` VARCHAR (80), +`col45` MEDIUMTEXT, +`col46` NUMERIC, +`col47` BIGINT, +`col48` DATE, +`col49` TINYBLOB, +`col50` DATE, +`col51` BOOL, +`col52` MEDIUMINT, +`col53` FLOAT, +`col54` TINYBLOB, +`col55` LONGTEXT, +`col56` SMALLINT, +`col57` ENUM ('test1','test2','test3'), +`col58` DATETIME, +`col59` MEDIUMTEXT, +`col60` VARCHAR (232), +`col61` NUMERIC, +`col62` YEAR, +`col63` SMALLINT, +`col64` TIMESTAMP, +`col65` BLOB, +`col66` LONGBLOB, +`col67` INT, +`col68` LONGTEXT, +`col69` ENUM ('test1','test2','test3'), +`col70` INT, +`col71` TIME, +`col72` TIMESTAMP, +`col73` TIMESTAMP, +`col74` VARCHAR (170), +`col75` SET ('test1','test2','test3'), +`col76` TINYBLOB, +`col77` BIGINT, +`col78` NUMERIC, +`col79` DATETIME, +`col80` YEAR, +`col81` NUMERIC, +`col82` LONGBLOB, +`col83` TEXT, +`col84` CHAR (83), +`col85` DECIMAL, +`col86` FLOAT, +`col87` INT, +`col88` VARCHAR (145), +`col89` DATE, +`col90` DECIMAL, +`col91` DECIMAL, +`col92` MEDIUMBLOB, +`col93` TIME, +KEY `idx0` (`col69`,`col90`,`col8`), +KEY `idx1` (`col60`), +KEY `idx2` (`col60`,`col70`,`col74`), +KEY `idx3` (`col22`,`col32`,`col72`,`col30`), +KEY `idx4` (`col29`), +KEY `idx5` (`col19`,`col45`(143)), +KEY `idx6` (`col46`,`col48`,`col5`,`col39`(118)), +KEY `idx7` (`col48`,`col61`), +KEY `idx8` (`col93`), +KEY `idx9` (`col31`), +KEY `idx10` (`col30`,`col21`), +KEY `idx11` (`col67`), +KEY `idx12` (`col44`,`col6`,`col8`,`col38`(226)), +KEY `idx13` (`col71`,`col41`,`col15`,`col49`(88)), +KEY `idx14` (`col78`), +KEY `idx15` (`col63`,`col67`,`col64`), +KEY `idx16` (`col17`,`col86`), +KEY `idx17` (`col77`,`col56`,`col10`,`col55`(24)), +KEY `idx18` (`col62`), +KEY `idx19` (`col31`,`col57`,`col56`,`col53`), +KEY `idx20` (`col46`), +KEY `idx21` (`col83`(54)), +KEY `idx22` (`col51`,`col7`(120)), +KEY `idx23` (`col7`(163),`col31`,`col71`,`col14`) +)engine=innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SET sql_mode = default; + +# Creating a table with 10 columns and 32 indexes +DROP TABLE IF EXISTS `table1`; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE IF NOT EXISTS `table1` +(`col0` CHAR (113), +`col1` FLOAT, +`col2` BIGINT, +`col3` DECIMAL, +`col4` BLOB, +`col5` LONGTEXT, +`col6` SET ('test1','test2','test3'), +`col7` BIGINT, +`col8` BIGINT, +`col9` TINYBLOB, +KEY `idx0` (`col5`(101),`col7`,`col8`), +KEY `idx1` (`col8`), +KEY `idx2` (`col4`(177),`col9`(126),`col6`,`col3`), +KEY `idx3` (`col5`(160)), +KEY `idx4` (`col9`(242)), +KEY `idx5` (`col4`(139),`col2`,`col3`), +KEY `idx6` (`col7`), +KEY `idx7` (`col6`,`col2`,`col0`,`col3`), +KEY `idx8` (`col9`(66)), +KEY `idx9` (`col5`(253)), +KEY `idx10` (`col1`,`col7`,`col2`), +KEY `idx11` (`col9`(242),`col0`,`col8`,`col5`(163)), +KEY `idx12` (`col8`), +KEY `idx13` (`col0`,`col9`(37)), +KEY `idx14` (`col0`), +KEY `idx15` (`col5`(111)), +KEY `idx16` (`col8`,`col0`,`col5`(13)), +KEY `idx17` (`col4`(139)), +KEY `idx18` (`col5`(189),`col2`,`col3`,`col9`(136)), +KEY `idx19` (`col0`,`col3`,`col1`,`col8`), +KEY `idx20` (`col8`), +KEY `idx21` (`col0`,`col7`,`col9`(227),`col3`), +KEY `idx22` (`col0`), +KEY `idx23` (`col2`), +KEY `idx24` (`col3`), +KEY `idx25` (`col2`,`col3`), +KEY `idx26` (`col0`), +KEY `idx27` (`col5`(254)), +KEY `idx28` (`col3`), +KEY `idx29` (`col3`), +KEY `idx30` (`col7`,`col3`,`col0`,`col4`(220)), +KEY `idx31` (`col4`(1),`col0`) +)engine=innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +# Creating a table with 141 columns and 18 indexes +DROP TABLE IF EXISTS `table2`; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE IF NOT EXISTS `table2` +(`col0` BOOL, +`col1` MEDIUMINT, +`col2` VARCHAR (209), +`col3` MEDIUMBLOB, +`col4` CHAR (13), +`col5` DOUBLE, +`col6` TINYTEXT, +`col7` REAL, +`col8` SMALLINT, +`col9` BLOB, +`col10` TINYINT, +`col11` DECIMAL, +`col12` BLOB, +`col13` DECIMAL, +`col14` LONGBLOB, +`col15` SMALLINT, +`col16` LONGBLOB, +`col17` TINYTEXT, +`col18` FLOAT, +`col19` CHAR (78), +`col20` MEDIUMTEXT, +`col21` SET ('test1','test2','test3'), +`col22` MEDIUMINT, +`col23` INT, +`col24` MEDIUMBLOB, +`col25` ENUM ('test1','test2','test3'), +`col26` TINYBLOB, +`col27` VARCHAR (116), +`col28` TIMESTAMP, +`col29` BLOB, +`col30` SMALLINT, +`col31` DOUBLE PRECISION, +`col32` DECIMAL, +`col33` DECIMAL, +`col34` TEXT, +`col35` MEDIUMINT, +`col36` MEDIUMINT, +`col37` BIGINT, +`col38` VARCHAR (253), +`col39` TINYBLOB, +`col40` MEDIUMBLOB, +`col41` BIGINT, +`col42` DOUBLE, +`col43` TEXT, +`col44` BLOB, +`col45` TIME, +`col46` MEDIUMINT, +`col47` DOUBLE PRECISION, +`col48` SET ('test1','test2','test3'), +`col49` DOUBLE PRECISION, +`col50` VARCHAR (97), +`col51` TEXT, +`col52` NUMERIC, +`col53` ENUM ('test1','test2','test3'), +`col54` MEDIUMTEXT, +`col55` MEDIUMINT, +`col56` DATETIME, +`col57` DATETIME, +`col58` MEDIUMTEXT, +`col59` CHAR (244), +`col60` LONGBLOB, +`col61` MEDIUMBLOB, +`col62` DOUBLE, +`col63` SMALLINT, +`col64` BOOL, +`col65` SMALLINT, +`col66` VARCHAR (212), +`col67` TIME, +`col68` REAL, +`col69` BOOL, +`col70` BIGINT, +`col71` DATE, +`col72` TINYINT, +`col73` ENUM ('test1','test2','test3'), +`col74` DATE, +`col75` TIME, +`col76` DATETIME, +`col77` BOOL, +`col78` TINYTEXT, +`col79` MEDIUMINT, +`col80` NUMERIC, +`col81` LONGTEXT, +`col82` SET ('test1','test2','test3'), +`col83` DOUBLE PRECISION, +`col84` NUMERIC, +`col85` VARCHAR (184), +`col86` DOUBLE PRECISION, +`col87` MEDIUMTEXT, +`col88` MEDIUMBLOB, +`col89` BOOL, +`col90` SMALLINT, +`col91` TINYINT, +`col92` ENUM ('test1','test2','test3'), +`col93` BOOL, +`col94` TIMESTAMP, +`col95` BOOL, +`col96` MEDIUMTEXT, +`col97` DECIMAL, +`col98` BOOL, +`col99` DECIMAL, +`col100` MEDIUMINT, +`col101` DOUBLE PRECISION, +`col102` TINYINT, +`col103` BOOL, +`col104` MEDIUMINT, +`col105` DECIMAL, +`col106` NUMERIC, +`col107` TIMESTAMP, +`col108` MEDIUMBLOB, +`col109` TINYBLOB, +`col110` SET ('test1','test2','test3'), +`col111` YEAR, +`col112` TIMESTAMP, +`col113` CHAR (201), +`col114` BOOL, +`col115` TINYINT, +`col116` DOUBLE, +`col117` TINYINT, +`col118` TIMESTAMP, +`col119` SET ('test1','test2','test3'), +`col120` SMALLINT, +`col121` TINYBLOB, +`col122` TIMESTAMP, +`col123` BLOB, +`col124` DATE, +`col125` SMALLINT, +`col126` ENUM ('test1','test2','test3'), +`col127` MEDIUMBLOB, +`col128` DOUBLE PRECISION, +`col129` REAL, +`col130` VARCHAR (159), +`col131` MEDIUMBLOB, +`col132` BIGINT, +`col133` INT, +`col134` SET ('test1','test2','test3'), +`col135` CHAR (198), +`col136` SET ('test1','test2','test3'), +`col137` MEDIUMTEXT, +`col138` SMALLINT, +`col139` BLOB, +`col140` LONGBLOB, +KEY `idx0` (`col14`(139),`col24`(208),`col38`,`col35`), +KEY `idx1` (`col48`,`col118`,`col29`(131),`col100`), +KEY `idx2` (`col86`,`col67`,`col43`(175)), +KEY `idx3` (`col19`), +KEY `idx4` (`col40`(220),`col67`), +KEY `idx5` (`col99`,`col56`), +KEY `idx6` (`col68`,`col28`,`col137`(157)), +KEY `idx7` (`col51`(160),`col99`,`col45`,`col39`(9)), +KEY `idx8` (`col15`,`col52`,`col90`,`col94`), +KEY `idx9` (`col24`(3),`col139`(248),`col108`(118),`col41`), +KEY `idx10` (`col36`,`col92`,`col114`), +KEY `idx11` (`col115`,`col9`(116)), +KEY `idx12` (`col130`,`col93`,`col134`), +KEY `idx13` (`col123`(65)), +KEY `idx14` (`col44`(90),`col86`,`col119`), +KEY `idx15` (`col69`), +KEY `idx16` (`col132`,`col81`(118),`col18`), +KEY `idx17` (`col24`(250),`col7`,`col92`,`col45`) +)engine=innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +# Creating a table with 199 columns and 1 indexes +DROP TABLE IF EXISTS `table3`; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE IF NOT EXISTS `table3` +(`col0` SMALLINT, +`col1` SET ('test1','test2','test3'), +`col2` TINYTEXT, +`col3` DOUBLE, +`col4` NUMERIC, +`col5` DATE, +`col6` BIGINT, +`col7` DOUBLE, +`col8` TEXT, +`col9` INT, +`col10` REAL, +`col11` TINYINT, +`col12` NUMERIC, +`col13` NUMERIC, +`col14` TIME, +`col15` DOUBLE, +`col16` REAL, +`col17` MEDIUMBLOB, +`col18` YEAR, +`col19` TINYTEXT, +`col20` YEAR, +`col21` CHAR (250), +`col22` TINYINT, +`col23` TINYINT, +`col24` SMALLINT, +`col25` DATETIME, +`col26` MEDIUMINT, +`col27` LONGBLOB, +`col28` VARCHAR (106), +`col29` FLOAT, +`col30` MEDIUMTEXT, +`col31` TINYBLOB, +`col32` BIGINT, +`col33` YEAR, +`col34` REAL, +`col35` MEDIUMBLOB, +`col36` LONGTEXT, +`col37` LONGBLOB, +`col38` BIGINT, +`col39` FLOAT, +`col40` TIME, +`col41` DATETIME, +`col42` BOOL, +`col43` BIGINT, +`col44` SMALLINT, +`col45` TIME, +`col46` DOUBLE PRECISION, +`col47` TIME, +`col48` TINYTEXT, +`col49` DOUBLE PRECISION, +`col50` BIGINT, +`col51` NUMERIC, +`col52` TINYBLOB, +`col53` DATE, +`col54` DECIMAL, +`col55` SMALLINT, +`col56` TINYTEXT, +`col57` ENUM ('test1','test2','test3'), +`col58` YEAR, +`col59` TIME, +`col60` TINYINT, +`col61` DECIMAL, +`col62` DOUBLE, +`col63` DATE, +`col64` LONGTEXT, +`col65` DOUBLE, +`col66` VARCHAR (88), +`col67` MEDIUMTEXT, +`col68` DATE, +`col69` MEDIUMINT, +`col70` DECIMAL, +`col71` MEDIUMTEXT, +`col72` LONGTEXT, +`col73` REAL, +`col74` DOUBLE, +`col75` TIME, +`col76` DATE, +`col77` DECIMAL, +`col78` MEDIUMBLOB, +`col79` NUMERIC, +`col80` BIGINT, +`col81` YEAR, +`col82` SMALLINT, +`col83` MEDIUMINT, +`col84` TINYINT, +`col85` MEDIUMBLOB, +`col86` TIME, +`col87` MEDIUMBLOB, +`col88` LONGTEXT, +`col89` BOOL, +`col90` BLOB, +`col91` LONGBLOB, +`col92` YEAR, +`col93` BLOB, +`col94` INT, +`col95` TINYTEXT, +`col96` TINYINT, +`col97` DECIMAL, +`col98` ENUM ('test1','test2','test3'), +`col99` MEDIUMINT, +`col100` TINYINT, +`col101` MEDIUMBLOB, +`col102` TINYINT, +`col103` SET ('test1','test2','test3'), +`col104` TIMESTAMP, +`col105` TEXT, +`col106` DATETIME, +`col107` MEDIUMTEXT, +`col108` CHAR (220), +`col109` TIME, +`col110` VARCHAR (131), +`col111` DECIMAL, +`col112` FLOAT, +`col113` SMALLINT, +`col114` BIGINT, +`col115` LONGBLOB, +`col116` SET ('test1','test2','test3'), +`col117` ENUM ('test1','test2','test3'), +`col118` BLOB, +`col119` MEDIUMTEXT, +`col120` SET ('test1','test2','test3'), +`col121` DATETIME, +`col122` FLOAT, +`col123` VARCHAR (242), +`col124` YEAR, +`col125` MEDIUMBLOB, +`col126` TIME, +`col127` BOOL, +`col128` TINYBLOB, +`col129` DOUBLE, +`col130` TINYINT, +`col131` BIGINT, +`col132` SMALLINT, +`col133` INT, +`col134` DOUBLE PRECISION, +`col135` MEDIUMBLOB, +`col136` SET ('test1','test2','test3'), +`col137` TINYTEXT, +`col138` DOUBLE PRECISION, +`col139` NUMERIC, +`col140` BLOB, +`col141` SET ('test1','test2','test3'), +`col142` INT, +`col143` VARCHAR (26), +`col144` BLOB, +`col145` REAL, +`col146` SET ('test1','test2','test3'), +`col147` LONGBLOB, +`col148` TEXT, +`col149` BLOB, +`col150` CHAR (189), +`col151` LONGTEXT, +`col152` INT, +`col153` FLOAT, +`col154` LONGTEXT, +`col155` DATE, +`col156` LONGBLOB, +`col157` TINYBLOB, +`col158` REAL, +`col159` DATE, +`col160` TIME, +`col161` YEAR, +`col162` DOUBLE, +`col163` VARCHAR (90), +`col164` FLOAT, +`col165` NUMERIC, +`col166` ENUM ('test1','test2','test3'), +`col167` DOUBLE PRECISION, +`col168` DOUBLE PRECISION, +`col169` TINYBLOB, +`col170` TIME, +`col171` SMALLINT, +`col172` TINYTEXT, +`col173` SMALLINT, +`col174` DOUBLE, +`col175` VARCHAR (14), +`col176` VARCHAR (90), +`col177` REAL, +`col178` MEDIUMINT, +`col179` TINYBLOB, +`col180` FLOAT, +`col181` TIMESTAMP, +`col182` REAL, +`col183` DOUBLE PRECISION, +`col184` BIGINT, +`col185` INT, +`col186` MEDIUMTEXT, +`col187` TIME, +`col188` FLOAT, +`col189` TIME, +`col190` INT, +`col191` FLOAT, +`col192` MEDIUMINT, +`col193` TINYINT, +`col194` MEDIUMTEXT, +`col195` DATE, +`col196` TIME, +`col197` YEAR, +`col198` CHAR (206), +KEY `idx0` (`col39`,`col23`) +)engine=innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +# Creating a table with 133 columns and 16 indexes +DROP TABLE IF EXISTS `table4`; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE IF NOT EXISTS `table4` +(`col0` VARCHAR (60), +`col1` NUMERIC, +`col2` LONGTEXT, +`col3` MEDIUMTEXT, +`col4` LONGTEXT, +`col5` LONGBLOB, +`col6` LONGBLOB, +`col7` DATETIME, +`col8` TINYTEXT, +`col9` BLOB, +`col10` BOOL, +`col11` BIGINT, +`col12` TEXT, +`col13` VARCHAR (213), +`col14` TINYBLOB, +`col15` BOOL, +`col16` MEDIUMTEXT, +`col17` DOUBLE, +`col18` TEXT, +`col19` BLOB, +`col20` SET ('test1','test2','test3'), +`col21` TINYINT, +`col22` DATETIME, +`col23` TINYINT, +`col24` ENUM ('test1','test2','test3'), +`col25` REAL, +`col26` BOOL, +`col27` FLOAT, +`col28` LONGBLOB, +`col29` DATETIME, +`col30` FLOAT, +`col31` SET ('test1','test2','test3'), +`col32` LONGBLOB, +`col33` NUMERIC, +`col34` YEAR, +`col35` VARCHAR (146), +`col36` BIGINT, +`col37` DATETIME, +`col38` DATE, +`col39` SET ('test1','test2','test3'), +`col40` CHAR (112), +`col41` FLOAT, +`col42` YEAR, +`col43` TIME, +`col44` DOUBLE, +`col45` NUMERIC, +`col46` FLOAT, +`col47` DECIMAL, +`col48` BIGINT, +`col49` DECIMAL, +`col50` YEAR, +`col51` MEDIUMTEXT, +`col52` LONGBLOB, +`col53` SET ('test1','test2','test3'), +`col54` BLOB, +`col55` FLOAT, +`col56` REAL, +`col57` REAL, +`col58` TEXT, +`col59` MEDIUMBLOB, +`col60` INT, +`col61` INT, +`col62` DATE, +`col63` TEXT, +`col64` DATE, +`col65` ENUM ('test1','test2','test3'), +`col66` DOUBLE PRECISION, +`col67` TINYTEXT, +`col68` TINYBLOB, +`col69` FLOAT, +`col70` BLOB, +`col71` DATETIME, +`col72` DOUBLE, +`col73` LONGTEXT, +`col74` TIME, +`col75` DATETIME, +`col76` VARCHAR (122), +`col77` MEDIUMTEXT, +`col78` MEDIUMTEXT, +`col79` BOOL, +`col80` LONGTEXT, +`col81` TINYTEXT, +`col82` NUMERIC, +`col83` DOUBLE PRECISION, +`col84` DATE, +`col85` YEAR, +`col86` BLOB, +`col87` TINYTEXT, +`col88` DOUBLE PRECISION, +`col89` MEDIUMINT, +`col90` MEDIUMTEXT, +`col91` NUMERIC, +`col92` DATETIME, +`col93` NUMERIC, +`col94` SET ('test1','test2','test3'), +`col95` TINYTEXT, +`col96` SET ('test1','test2','test3'), +`col97` YEAR, +`col98` MEDIUMINT, +`col99` TEXT, +`col100` TEXT, +`col101` TIME, +`col102` VARCHAR (225), +`col103` TINYTEXT, +`col104` TEXT, +`col105` MEDIUMTEXT, +`col106` TINYINT, +`col107` TEXT, +`col108` LONGBLOB, +`col109` LONGTEXT, +`col110` TINYTEXT, +`col111` CHAR (56), +`col112` YEAR, +`col113` ENUM ('test1','test2','test3'), +`col114` TINYBLOB, +`col115` DATETIME, +`col116` DATE, +`col117` TIME, +`col118` MEDIUMTEXT, +`col119` DOUBLE PRECISION, +`col120` FLOAT, +`col121` TIMESTAMP, +`col122` MEDIUMINT, +`col123` YEAR, +`col124` DATE, +`col125` TEXT, +`col126` FLOAT, +`col127` TINYTEXT, +`col128` BOOL, +`col129` NUMERIC, +`col130` TIMESTAMP, +`col131` INT, +`col132` MEDIUMBLOB, +KEY `idx0` (`col130`), +KEY `idx1` (`col30`,`col55`,`col19`(31)), +KEY `idx2` (`col104`(186)), +KEY `idx3` (`col131`), +KEY `idx4` (`col64`,`col93`,`col2`(11)), +KEY `idx5` (`col34`,`col121`,`col22`), +KEY `idx6` (`col33`,`col55`,`col83`), +KEY `idx7` (`col17`,`col87`(245),`col99`(17)), +KEY `idx8` (`col65`,`col120`), +KEY `idx9` (`col82`), +KEY `idx10` (`col9`(72)), +KEY `idx11` (`col88`), +KEY `idx12` (`col128`,`col9`(200),`col71`,`col66`), +KEY `idx13` (`col77`(126)), +KEY `idx14` (`col105`(26),`col13`,`col117`), +KEY `idx15` (`col4`(246),`col130`,`col115`,`col3`(141)) +)engine=innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +# Creating a table with 176 columns and 13 indexes +DROP TABLE IF EXISTS `table5`; +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE IF NOT EXISTS `table5` +(`col0` MEDIUMTEXT, +`col1` VARCHAR (90), +`col2` TINYTEXT, +`col3` TIME, +`col4` BOOL, +`col5` TINYTEXT, +`col6` BOOL, +`col7` TIMESTAMP, +`col8` TINYBLOB, +`col9` TINYINT, +`col10` YEAR, +`col11` SET ('test1','test2','test3'), +`col12` TEXT, +`col13` CHAR (248), +`col14` BIGINT, +`col15` TEXT, +`col16` TINYINT, +`col17` NUMERIC, +`col18` SET ('test1','test2','test3'), +`col19` LONGBLOB, +`col20` FLOAT, +`col21` INT, +`col22` TEXT, +`col23` BOOL, +`col24` DECIMAL, +`col25` DOUBLE PRECISION, +`col26` FLOAT, +`col27` TINYBLOB, +`col28` NUMERIC, +`col29` MEDIUMBLOB, +`col30` DATE, +`col31` LONGTEXT, +`col32` DATE, +`col33` FLOAT, +`col34` BIGINT, +`col35` TINYTEXT, +`col36` MEDIUMTEXT, +`col37` TIME, +`col38` INT, +`col39` TINYINT, +`col40` SET ('test1','test2','test3'), +`col41` CHAR (130), +`col42` SMALLINT, +`col43` INT, +`col44` MEDIUMTEXT, +`col45` VARCHAR (126), +`col46` INT, +`col47` DOUBLE PRECISION, +`col48` BIGINT, +`col49` MEDIUMTEXT, +`col50` TINYBLOB, +`col51` MEDIUMINT, +`col52` TEXT, +`col53` VARCHAR (208), +`col54` VARCHAR (207), +`col55` NUMERIC, +`col56` DATETIME, +`col57` ENUM ('test1','test2','test3'), +`col58` NUMERIC, +`col59` TINYBLOB, +`col60` VARCHAR (73), +`col61` MEDIUMTEXT, +`col62` TINYBLOB, +`col63` DATETIME, +`col64` NUMERIC, +`col65` MEDIUMINT, +`col66` DATETIME, +`col67` NUMERIC, +`col68` TINYINT, +`col69` VARCHAR (58), +`col70` DECIMAL, +`col71` MEDIUMTEXT, +`col72` DATE, +`col73` TIME, +`col74` DOUBLE PRECISION, +`col75` DECIMAL, +`col76` MEDIUMBLOB, +`col77` REAL, +`col78` YEAR, +`col79` YEAR, +`col80` LONGBLOB, +`col81` BLOB, +`col82` BIGINT, +`col83` ENUM ('test1','test2','test3'), +`col84` NUMERIC, +`col85` SET ('test1','test2','test3'), +`col86` MEDIUMTEXT, +`col87` LONGBLOB, +`col88` TIME, +`col89` ENUM ('test1','test2','test3'), +`col90` DECIMAL, +`col91` FLOAT, +`col92` DATETIME, +`col93` TINYTEXT, +`col94` TIMESTAMP, +`col95` TIMESTAMP, +`col96` TEXT, +`col97` REAL, +`col98` VARCHAR (198), +`col99` TIME, +`col100` TINYINT, +`col101` BIGINT, +`col102` LONGBLOB, +`col103` LONGBLOB, +`col104` MEDIUMINT, +`col105` MEDIUMTEXT, +`col106` TIMESTAMP, +`col107` SMALLINT, +`col108` NUMERIC, +`col109` DECIMAL, +`col110` FLOAT, +`col111` DECIMAL, +`col112` REAL, +`col113` TINYTEXT, +`col114` FLOAT, +`col115` VARCHAR (7), +`col116` LONGTEXT, +`col117` DATE, +`col118` BIGINT, +`col119` TEXT, +`col120` BIGINT, +`col121` BLOB, +`col122` CHAR (110), +`col123` NUMERIC, +`col124` MEDIUMBLOB, +`col125` NUMERIC, +`col126` NUMERIC, +`col127` BOOL, +`col128` TIME, +`col129` TINYBLOB, +`col130` TINYBLOB, +`col131` DATE, +`col132` INT, +`col133` VARCHAR (123), +`col134` CHAR (238), +`col135` VARCHAR (225), +`col136` LONGTEXT, +`col137` LONGBLOB, +`col138` REAL, +`col139` TINYBLOB, +`col140` DATETIME, +`col141` TINYTEXT, +`col142` LONGBLOB, +`col143` BIGINT, +`col144` VARCHAR (236), +`col145` TEXT, +`col146` YEAR, +`col147` DECIMAL, +`col148` TEXT, +`col149` MEDIUMBLOB, +`col150` TINYINT, +`col151` BOOL, +`col152` VARCHAR (72), +`col153` INT, +`col154` VARCHAR (165), +`col155` TINYINT, +`col156` MEDIUMTEXT, +`col157` DOUBLE PRECISION, +`col158` TIME, +`col159` MEDIUMBLOB, +`col160` LONGBLOB, +`col161` DATETIME, +`col162` DOUBLE PRECISION, +`col163` BLOB, +`col164` ENUM ('test1','test2','test3'), +`col165` TIMESTAMP, +`col166` DATE, +`col167` TINYBLOB, +`col168` TINYBLOB, +`col169` LONGBLOB, +`col170` DATETIME, +`col171` BIGINT, +`col172` VARCHAR (30), +`col173` LONGTEXT, +`col174` TIME, +`col175` FLOAT, +KEY `idx0` (`col16`,`col156`(139),`col97`,`col120`), +KEY `idx1` (`col24`,`col0`(108)), +KEY `idx2` (`col117`,`col173`(34),`col132`,`col82`), +KEY `idx3` (`col2`(86)), +KEY `idx4` (`col2`(43)), +KEY `idx5` (`col83`,`col35`(87),`col111`), +KEY `idx6` (`col6`,`col134`,`col92`), +KEY `idx7` (`col56`), +KEY `idx8` (`col30`,`col53`,`col129`(66)), +KEY `idx9` (`col53`,`col113`(211),`col32`,`col15`(75)), +KEY `idx10` (`col34`), +KEY `idx11` (`col126`), +KEY `idx12` (`col24`) +)engine=innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +# Creating a table with 179 columns and 46 indexes +DROP TABLE IF EXISTS `table6`; +-- error ER_TOO_BIG_ROWSIZE +--error ER_TOO_BIG_ROWSIZE +CREATE TABLE IF NOT EXISTS `table6` +(`col0` ENUM ('test1','test2','test3'), +`col1` MEDIUMBLOB, +`col2` MEDIUMBLOB, +`col3` DATETIME, +`col4` DATE, +`col5` YEAR, +`col6` REAL, +`col7` NUMERIC, +`col8` MEDIUMBLOB, +`col9` TEXT, +`col10` TIMESTAMP, +`col11` DOUBLE, +`col12` DOUBLE, +`col13` SMALLINT, +`col14` TIMESTAMP, +`col15` DECIMAL, +`col16` DATE, +`col17` TEXT, +`col18` LONGBLOB, +`col19` BIGINT, +`col20` FLOAT, +`col21` DATETIME, +`col22` TINYINT, +`col23` MEDIUMBLOB, +`col24` SET ('test1','test2','test3'), +`col25` TIME, +`col26` TEXT, +`col27` LONGTEXT, +`col28` BIGINT, +`col29` REAL, +`col30` YEAR, +`col31` MEDIUMBLOB, +`col32` MEDIUMINT, +`col33` FLOAT, +`col34` TEXT, +`col35` DATE, +`col36` TIMESTAMP, +`col37` REAL, +`col38` BLOB, +`col39` BLOB, +`col40` BLOB, +`col41` TINYBLOB, +`col42` INT, +`col43` TINYINT, +`col44` REAL, +`col45` BIGINT, +`col46` TIMESTAMP, +`col47` BLOB, +`col48` ENUM ('test1','test2','test3'), +`col49` BOOL, +`col50` CHAR (109), +`col51` DOUBLE, +`col52` DOUBLE PRECISION, +`col53` ENUM ('test1','test2','test3'), +`col54` FLOAT, +`col55` DOUBLE PRECISION, +`col56` CHAR (166), +`col57` TEXT, +`col58` TIME, +`col59` DECIMAL, +`col60` TEXT, +`col61` ENUM ('test1','test2','test3'), +`col62` LONGTEXT, +`col63` YEAR, +`col64` DOUBLE, +`col65` CHAR (87), +`col66` DATE, +`col67` BOOL, +`col68` MEDIUMBLOB, +`col69` DATETIME, +`col70` DECIMAL, +`col71` TIME, +`col72` REAL, +`col73` LONGTEXT, +`col74` BLOB, +`col75` REAL, +`col76` INT, +`col77` INT, +`col78` FLOAT, +`col79` DOUBLE, +`col80` MEDIUMINT, +`col81` ENUM ('test1','test2','test3'), +`col82` VARCHAR (221), +`col83` BIGINT, +`col84` TINYINT, +`col85` BIGINT, +`col86` FLOAT, +`col87` MEDIUMBLOB, +`col88` CHAR (126), +`col89` MEDIUMBLOB, +`col90` DATETIME, +`col91` TINYINT, +`col92` DOUBLE, +`col93` NUMERIC, +`col94` DATE, +`col95` BLOB, +`col96` DATETIME, +`col97` TIME, +`col98` LONGBLOB, +`col99` INT, +`col100` SET ('test1','test2','test3'), +`col101` TINYBLOB, +`col102` INT, +`col103` MEDIUMBLOB, +`col104` MEDIUMTEXT, +`col105` FLOAT, +`col106` TINYBLOB, +`col107` VARCHAR (26), +`col108` TINYINT, +`col109` TIME, +`col110` TINYBLOB, +`col111` LONGBLOB, +`col112` TINYTEXT, +`col113` FLOAT, +`col114` TINYINT, +`col115` NUMERIC, +`col116` TIME, +`col117` SET ('test1','test2','test3'), +`col118` DATE, +`col119` SMALLINT, +`col120` BLOB, +`col121` TINYTEXT, +`col122` REAL, +`col123` YEAR, +`col124` REAL, +`col125` BOOL, +`col126` BLOB, +`col127` REAL, +`col128` MEDIUMBLOB, +`col129` TIMESTAMP, +`col130` LONGBLOB, +`col131` MEDIUMBLOB, +`col132` YEAR, +`col133` YEAR, +`col134` INT, +`col135` MEDIUMINT, +`col136` MEDIUMINT, +`col137` TINYTEXT, +`col138` TINYBLOB, +`col139` BLOB, +`col140` SET ('test1','test2','test3'), +`col141` ENUM ('test1','test2','test3'), +`col142` ENUM ('test1','test2','test3'), +`col143` TINYTEXT, +`col144` DATETIME, +`col145` TEXT, +`col146` DOUBLE PRECISION, +`col147` DECIMAL, +`col148` MEDIUMTEXT, +`col149` TINYTEXT, +`col150` SET ('test1','test2','test3'), +`col151` MEDIUMTEXT, +`col152` CHAR (126), +`col153` DOUBLE, +`col154` CHAR (243), +`col155` SET ('test1','test2','test3'), +`col156` SET ('test1','test2','test3'), +`col157` DATETIME, +`col158` DOUBLE, +`col159` NUMERIC, +`col160` DECIMAL, +`col161` FLOAT, +`col162` LONGBLOB, +`col163` LONGTEXT, +`col164` INT, +`col165` TIME, +`col166` CHAR (27), +`col167` VARCHAR (63), +`col168` TEXT, +`col169` TINYBLOB, +`col170` TINYBLOB, +`col171` ENUM ('test1','test2','test3'), +`col172` INT, +`col173` TIME, +`col174` DECIMAL, +`col175` DOUBLE, +`col176` MEDIUMBLOB, +`col177` LONGBLOB, +`col178` CHAR (43), +KEY `idx0` (`col131`(219)), +KEY `idx1` (`col67`,`col122`,`col59`,`col87`(33)), +KEY `idx2` (`col83`,`col42`,`col57`(152)), +KEY `idx3` (`col106`(124)), +KEY `idx4` (`col173`,`col80`,`col165`,`col89`(78)), +KEY `idx5` (`col174`,`col145`(108),`col23`(228),`col141`), +KEY `idx6` (`col157`,`col140`), +KEY `idx7` (`col130`(188),`col15`), +KEY `idx8` (`col52`), +KEY `idx9` (`col144`), +KEY `idx10` (`col155`), +KEY `idx11` (`col62`(230),`col1`(109)), +KEY `idx12` (`col151`(24),`col95`(85)), +KEY `idx13` (`col114`), +KEY `idx14` (`col42`,`col98`(56),`col146`), +KEY `idx15` (`col147`,`col39`(254),`col35`), +KEY `idx16` (`col79`), +KEY `idx17` (`col65`), +KEY `idx18` (`col149`(165),`col168`(119),`col32`,`col117`), +KEY `idx19` (`col64`), +KEY `idx20` (`col93`), +KEY `idx21` (`col64`,`col113`,`col104`(182)), +KEY `idx22` (`col52`,`col111`(189)), +KEY `idx23` (`col45`), +KEY `idx24` (`col154`,`col107`,`col110`(159)), +KEY `idx25` (`col149`(1),`col87`(131)), +KEY `idx26` (`col58`,`col115`,`col63`), +KEY `idx27` (`col95`(9),`col0`,`col87`(113)), +KEY `idx28` (`col92`,`col130`(1)), +KEY `idx29` (`col151`(129),`col137`(254),`col13`), +KEY `idx30` (`col49`), +KEY `idx31` (`col28`), +KEY `idx32` (`col83`,`col146`), +KEY `idx33` (`col155`,`col90`,`col17`(245)), +KEY `idx34` (`col174`,`col169`(44),`col107`), +KEY `idx35` (`col113`), +KEY `idx36` (`col52`), +KEY `idx37` (`col16`,`col120`(190)), +KEY `idx38` (`col28`), +KEY `idx39` (`col131`(165)), +KEY `idx40` (`col135`,`col26`(86)), +KEY `idx41` (`col69`,`col94`), +KEY `idx42` (`col105`,`col151`(38),`col97`), +KEY `idx43` (`col88`), +KEY `idx44` (`col176`(100),`col42`,`col73`(189),`col94`), +KEY `idx45` (`col2`(27),`col27`(116)) +)engine=innodb ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +DROP TABLE IF EXISTS table0; +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; +DROP TABLE IF EXISTS table3; +DROP TABLE IF EXISTS table4; +DROP TABLE IF EXISTS table5; +DROP TABLE IF EXISTS table6; + +EVAL SET GLOBAL innodb_file_per_table=$file_per_table; +SET sql_mode = default; diff --git a/mysql-test/suite/innodb_zip/t/bug36172.test b/mysql-test/suite/innodb_zip/t/bug36172.test new file mode 100644 index 00000000000..49590f40192 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/bug36172.test @@ -0,0 +1,30 @@ +# +# Test case for bug 36172 +# + +-- source include/not_embedded.inc +-- source include/have_innodb.inc +-- source include/have_innodb_16k.inc + +SET default_storage_engine=InnoDB; + +# we do not really care about what gets printed, we are only +# interested in getting success or failure according to our +# expectations + +-- disable_query_log +-- disable_result_log + +let $file_per_table=`select @@innodb_file_per_table`; +SET GLOBAL innodb_file_per_table=on; + +DROP TABLE IF EXISTS `table0`; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +CREATE TABLE `table0` ( `col0` tinyint(1) DEFAULT NULL, `col1` tinyint(1) DEFAULT NULL, `col2` tinyint(4) DEFAULT NULL, `col3` date DEFAULT NULL, `col4` time DEFAULT NULL, `col5` set('test1','test2','test3') DEFAULT NULL, `col6` time DEFAULT NULL, `col7` text, `col8` decimal(10,0) DEFAULT NULL, `col9` set('test1','test2','test3') DEFAULT NULL, `col10` float DEFAULT NULL, `col11` double DEFAULT NULL, `col12` enum('test1','test2','test3') DEFAULT NULL, `col13` tinyblob, `col14` year(4) DEFAULT NULL, `col15` set('test1','test2','test3') DEFAULT NULL, `col16` decimal(10,0) DEFAULT NULL, `col17` decimal(10,0) DEFAULT NULL, `col18` blob, `col19` datetime DEFAULT NULL, `col20` double DEFAULT NULL, `col21` decimal(10,0) DEFAULT NULL, `col22` datetime DEFAULT NULL, `col23` decimal(10,0) DEFAULT NULL, `col24` decimal(10,0) DEFAULT NULL, `col25` longtext, `col26` tinyblob, `col27` time DEFAULT NULL, `col28` tinyblob, `col29` enum('test1','test2','test3') DEFAULT NULL, `col30` smallint(6) DEFAULT NULL, `col31` double DEFAULT NULL, `col32` float DEFAULT NULL, `col33` char(175) DEFAULT NULL, `col34` tinytext, `col35` tinytext, `col36` tinyblob, `col37` tinyblob, `col38` tinytext, `col39` mediumblob, `col40` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `col41` double DEFAULT NULL, `col42` smallint(6) DEFAULT NULL, `col43` longblob, `col44` varchar(80) DEFAULT NULL, `col45` mediumtext, `col46` decimal(10,0) DEFAULT NULL, `col47` bigint(20) DEFAULT NULL, `col48` date DEFAULT NULL, `col49` tinyblob, `col50` date DEFAULT NULL, `col51` tinyint(1) DEFAULT NULL, `col52` mediumint(9) DEFAULT NULL, `col53` float DEFAULT NULL, `col54` tinyblob, `col55` longtext, `col56` smallint(6) DEFAULT NULL, `col57` enum('test1','test2','test3') DEFAULT NULL, `col58` datetime DEFAULT NULL, `col59` mediumtext, `col60` varchar(232) DEFAULT NULL, `col61` decimal(10,0) DEFAULT NULL, `col62` year(4) DEFAULT NULL, `col63` smallint(6) DEFAULT NULL, `col64` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', `col65` blob, `col66` longblob, `col67` int(11) DEFAULT NULL, `col68` longtext, `col69` enum('test1','test2','test3') DEFAULT NULL, `col70` int(11) DEFAULT NULL, `col71` time DEFAULT NULL, `col72` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', `col73` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', `col74` varchar(170) DEFAULT NULL, `col75` set('test1','test2','test3') DEFAULT NULL, `col76` tinyblob, `col77` bigint(20) DEFAULT NULL, `col78` decimal(10,0) DEFAULT NULL, `col79` datetime DEFAULT NULL, `col80` year(4) DEFAULT NULL, `col81` decimal(10,0) DEFAULT NULL, `col82` longblob, `col83` text, `col84` char(83) DEFAULT NULL, `col85` decimal(10,0) DEFAULT NULL, `col86` float DEFAULT NULL, `col87` int(11) DEFAULT NULL, `col88` varchar(145) DEFAULT NULL, `col89` date DEFAULT NULL, `col90` decimal(10,0) DEFAULT NULL, `col91` decimal(10,0) DEFAULT NULL, `col92` mediumblob, `col93` time DEFAULT NULL, KEY `idx0` (`col69`,`col90`,`col8`), KEY `idx1` (`col60`), KEY `idx2` (`col60`,`col70`,`col74`), KEY `idx3` (`col22`,`col32`,`col72`,`col30`), KEY `idx4` (`col29`), KEY `idx5` (`col19`,`col45`(143)), KEY `idx6` (`col46`,`col48`,`col5`,`col39`(118)), KEY `idx7` (`col48`,`col61`), KEY `idx8` (`col93`), KEY `idx9` (`col31`), KEY `idx10` (`col30`,`col21`), KEY `idx11` (`col67`), KEY `idx12` (`col44`,`col6`,`col8`,`col38`(226)), KEY `idx13` (`col71`,`col41`,`col15`,`col49`(88)), KEY `idx14` (`col78`), KEY `idx15` (`col63`,`col67`,`col64`), KEY `idx16` (`col17`,`col86`), KEY `idx17` (`col77`,`col56`,`col10`,`col55`(24)), KEY `idx18` (`col62`), KEY `idx19` (`col31`,`col57`,`col56`,`col53`), KEY `idx20` (`col46`), KEY `idx21` (`col83`(54)), KEY `idx22` (`col51`,`col7`(120)), KEY `idx23` (`col7`(163),`col31`,`col71`,`col14`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +SET sql_mode = default; +insert ignore into `table0` set `col23` = 7887371.5084383683, `col24` = 4293854615.6906948000, `col25` = 'vitalist', `col26` = 'widespread', `col27` = '3570490', `col28` = 'habitual', `col30` = -5471, `col31` = 4286985783.6771750000, `col32` = 6354540.9826654866, `col33` = 'defoliation', `col34` = 'logarithms', `col35` = 'tegument\'s', `col36` = 'scouting\'s', `col37` = 'intermittency', `col38` = 'elongates', `col39` = 'prophecies', `col40` = '20560103035939', `col41` = 4292809130.0544143000, `col42` = 22057, `col43` = 'Hess\'s', `col44` = 'bandstand', `col45` = 'phenylketonuria', `col46` = 6338767.4018677324, `col47` = 5310247, `col48` = '12592418', `col49` = 'churchman\'s', `col50` = '32226125', `col51` = -58, `col52` = -6207968, `col53` = 1244839.3255104220, `col54` = 'robotized', `col55` = 'monotonous', `col56` = -26909, `col58` = '20720107023550', `col59` = 'suggestiveness\'s', `col60` = 'gemology', `col61` = 4287800670.2229986000, `col62` = '1944', `col63` = -16827, `col64` = '20700107212324', `col65` = 'Nicolais', `col66` = 'apteryx', `col67` = 6935317, `col68` = 'stroganoff', `col70` = 3316430, `col71` = '3277608', `col72` = '19300511045918', `col73` = '20421201003327', `col74` = 'attenuant', `col75` = '15173', `col76` = 'upstroke\'s', `col77` = 8118987, `col78` = 6791516.2735374002, `col79` = '20780701144624', `col80` = '2134', `col81` = 4290682351.3127537000, `col82` = 'unexplainably', `col83` = 'Storm', `col84` = 'Greyso\'s', `col85` = 4289119212.4306774000, `col86` = 7617575.8796655172, `col87` = -6325335, `col88` = 'fondue\'s', `col89` = '40608940', `col90` = 1659421.8093508712, `col91` = 8346904.6584368423, `col92` = 'reloads', `col93` = '5188366'; +CHECK TABLE table0 EXTENDED; +INSERT IGNORE INTO `table0` SET `col19` = '19940127002709', `col20` = 2383927.9055146948, `col21` = 4293243420.5621204000, `col22` = '20511211123705', `col23` = 4289899778.6573381000, `col24` = 4293449279.0540481000, `col25` = 'emphysemic', `col26` = 'dentally', `col27` = '2347406', `col28` = 'eruct', `col30` = 1222, `col31` = 4294372994.9941406000, `col32` = 4291385574.1173744000, `col33` = 'borrowing\'s', `col34` = 'septics', `col35` = 'ratter\'s', `col36` = 'Kaye', `col37` = 'Florentia', `col38` = 'allium', `col39` = 'barkeep', `col40` = '19510407003441', `col41` = 4293559200.4215522000, `col42` = 22482, `col43` = 'decussate', `col44` = 'Brom\'s', `col45` = 'violated', `col46` = 4925506.4635456400, `col47` = 930549, `col48` = '51296066', `col49` = 'voluminously', `col50` = '29306676', `col51` = -88, `col52` = -2153690, `col53` = 4290250202.1464887000, `col54` = 'expropriation', `col55` = 'Aberdeen\'s', `col56` = 20343, `col58` = '19640415171532', `col59` = 'extern', `col60` = 'Ubana', `col61` = 4290487961.8539081000, `col62` = '2147', `col63` = -24271, `col64` = '20750801194548', `col65` = 'Cunaxa\'s', `col66` = 'pasticcio', `col67` = 2795817, `col68` = 'Indore\'s', `col70` = 6864127, `col71` = '1817832', `col72` = '20540506114211', `col73` = '20040101012300', `col74` = 'rationalized', `col75` = '45522', `col76` = 'indene', `col77` = -6964559, `col78` = 4247535.5266884370, `col79` = '20720416124357', `col80` = '2143', `col81` = 4292060102.4466386000, `col82` = 'striving', `col83` = 'boneblack\'s', `col84` = 'redolent', `col85` = 6489697.9009369183, `col86` = 4287473465.9731131000, `col87` = 7726015, `col88` = 'perplexed', `col89` = '17153791', `col90` = 5478587.1108127078, `col91` = 4287091404.7004304000, `col92` = 'Boulez\'s', `col93` = '2931278'; +CHECK TABLE table0 EXTENDED; +DROP TABLE table0; +EVAL SET GLOBAL innodb_file_per_table=$file_per_table; diff --git a/mysql-test/suite/innodb_zip/t/bug52745.test b/mysql-test/suite/innodb_zip/t/bug52745.test new file mode 100644 index 00000000000..a3de7323efe --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/bug52745.test @@ -0,0 +1,105 @@ +-- source include/have_innodb.inc +-- source include/have_innodb_zip.inc + +let $file_per_table=`select @@innodb_file_per_table`; +SET GLOBAL innodb_file_per_table=on; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +CREATE TABLE bug52745 ( + a2 int(10) unsigned DEFAULT NULL, + col37 time DEFAULT NULL, + col38 char(229) CHARACTER SET utf8 DEFAULT NULL, + col39 text, + col40 timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + col41 int(10) unsigned DEFAULT NULL, + col42 varchar(248) CHARACTER SET utf8 DEFAULT NULL, + col43 smallint(5) unsigned zerofill DEFAULT NULL, + col44 varchar(150) CHARACTER SET utf8 DEFAULT NULL, + col45 float unsigned zerofill DEFAULT NULL, + col46 binary(1) DEFAULT NULL, + col47 tinyint(4) DEFAULT NULL, + col48 tinyint(1) DEFAULT NULL, + col49 timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + col50 binary(1) DEFAULT NULL, + col51 double unsigned zerofill DEFAULT NULL, + col52 int(10) unsigned DEFAULT NULL, + col53 time DEFAULT NULL, + col54 double unsigned DEFAULT NULL, + col55 time DEFAULT NULL, + col56 mediumtext CHARACTER SET latin2, + col57 blob, + col58 decimal(52,16) unsigned zerofill NOT NULL DEFAULT '000000000000000000000000000000000000.0000000000000000', + col59 binary(1) DEFAULT NULL, + col60 longblob, + col61 time DEFAULT NULL, + col62 longtext CHARACTER SET utf8 COLLATE utf8_persian_ci, + col63 timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + col64 int(10) unsigned DEFAULT NULL, + col65 date DEFAULT NULL, + col66 timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', + col67 binary(1) DEFAULT NULL, + col68 tinyblob, + col69 date DEFAULT NULL, + col70 tinyint(3) unsigned zerofill DEFAULT NULL, + col71 varchar(44) CHARACTER SET utf8 DEFAULT NULL, + col72 datetime DEFAULT NULL, + col73 smallint(5) unsigned zerofill DEFAULT NULL, + col74 longblob, + col75 bit(34) DEFAULT NULL, + col76 float unsigned zerofill DEFAULT NULL, + col77 year(4) DEFAULT NULL, + col78 tinyint(3) unsigned DEFAULT NULL, + col79 set('msfheowh','tbpxbgf','by','wahnrjw','myqfasxz','rsokyumrt') CHARACTER SET latin2 DEFAULT NULL, + col80 datetime DEFAULT NULL, + col81 smallint(6) DEFAULT NULL, + col82 enum('xtaurnqfqz','rifrse','kuzwpbvb','niisabk','zxavro','rbvasv','','uulrfaove','','') DEFAULT NULL, + col83 bigint(20) unsigned zerofill DEFAULT NULL, + col84 float unsigned zerofill DEFAULT NULL, + col85 double DEFAULT NULL, + col86 enum('ylannv','','vlkhycqc','snke','cxifustp','xiaxaswzp','oxl') CHARACTER SET latin1 COLLATE latin1_german2_ci DEFAULT NULL, + col87 varbinary(221) DEFAULT NULL, + col88 double unsigned DEFAULT NULL, + col89 float unsigned zerofill DEFAULT NULL, + col90 tinyblob +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SET sql_mode = default; +INSERT IGNORE INTO bug52745 SET +col40='0000-00-00 00:00:00', +col51=16547, +col53='7711484', +col54=-28604, +col55='7112612', +col56='wakefulness\'', +col57=repeat('absorbefacient\'',106), +col58=11027, +col59='AM09gW7', +col60=repeat('Noelani\'',16), +col61='2520576', +col62='substitutiv', +col63='19950106155112', +col64=-12038, +col65='86238806', +col66='19600719080256', +col68=repeat('Sagittarius\'',54), +col69='38943902', +col70=1232, +col71='Elora\'', +col74=repeat('zipp',11), +col75='0', +col76=23254, +col78=13247, +col79='56219', +col80='20500609035724', +col81=11632, +col82=7, +col84=-23863, +col85=6341, +col87='HZdkf.4 s7t,5Rmq 8so fmr,ruGLUG25TrtI.yQ 2SuHq0ML7rw7.4 b2yf2E5TJxOtBBZImezDnzpj,uPYfznnEUDN1e9aQoO 2DsplB7TFWy oQJ br HLF :F,eQ p4i1oWsr lL3PG,hjCz6hYqN h1QTjLCjrv:QCdSzpYBibJAtZCxLOk3l6Blsh.W', +col88=16894, +col89=6161, +col90=repeat('gale',48); + +SHOW WARNINGS; + +DROP TABLE bug52745; + +EVAL SET GLOBAL innodb_file_per_table=$file_per_table; diff --git a/mysql-test/suite/innodb_zip/t/bug53591.test b/mysql-test/suite/innodb_zip/t/bug53591.test new file mode 100644 index 00000000000..1943c59fe17 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/bug53591.test @@ -0,0 +1,22 @@ +-- source include/have_innodb.inc +-- source include/have_innodb_zip.inc + +let $file_per_table=`select @@innodb_file_per_table`; + +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_strict_mode=on; + +set old_alter_table=0; + +CREATE TABLE bug53591(a text charset utf8 not null) +ENGINE=InnoDB KEY_BLOCK_SIZE=1; +-- replace_result 8126 {checked_valid} 4030 {checked_valid} 1982 {checked_valid} +-- error ER_TOO_BIG_ROWSIZE +ALTER TABLE bug53591 ADD PRIMARY KEY(a(220)); +-- replace_result 8126 {checked_valid} 4030 {checked_valid} 1982 {checked_valid} +SHOW WARNINGS; + +DROP TABLE bug53591; + +EVAL SET GLOBAL innodb_file_per_table=$file_per_table; +SET GLOBAL innodb_strict_mode=DEFAULT; diff --git a/mysql-test/suite/innodb_zip/t/bug56680.test b/mysql-test/suite/innodb_zip/t/bug56680.test new file mode 100644 index 00000000000..694c5ffac59 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/bug56680.test @@ -0,0 +1,140 @@ +#Want to skip this test from daily Valgrind execution +--source include/no_valgrind_without_big.inc +# +# Bug #56680 InnoDB may return wrong results from a case-insensitive index +# +-- source include/have_innodb.inc +-- source include/have_innodb_zip.inc + +-- disable_query_log +SET @tx_isolation_orig = @@tx_isolation; +SET @innodb_file_per_table_orig = @@innodb_file_per_table; +# The flag innodb_change_buffering_debug is only available in debug builds. +# It instructs InnoDB to try to evict pages from the buffer pool when +# change buffering is possible, so that the change buffer will be used +# whenever possible. +-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +SET @innodb_change_buffering_debug_orig = @@innodb_change_buffering_debug; +-- error 0,ER_UNKNOWN_SYSTEM_VARIABLE +SET GLOBAL innodb_change_buffering_debug = 1; +-- enable_query_log +SET GLOBAL tx_isolation='REPEATABLE-READ'; +SET GLOBAL innodb_file_per_table=on; + +CREATE TABLE bug56680( + a INT AUTO_INCREMENT PRIMARY KEY, + b CHAR(1), + c INT, + INDEX(b)) +ENGINE=InnoDB STATS_PERSISTENT=0; + +INSERT INTO bug56680 VALUES(0,'x',1); +BEGIN; +SELECT b FROM bug56680; + +connect (con1,localhost,root,,); +connection con1; +BEGIN; +UPDATE bug56680 SET b='X'; + +connection default; +# This should return the last committed value 'x', but would return 'X' +# due to a bug in row_search_for_mysql(). +SELECT b FROM bug56680; +# This would always return the last committed value 'x'. +SELECT * FROM bug56680; + +connection con1; +ROLLBACK; +disconnect con1; + +connection default; + +SELECT b FROM bug56680; + +# For the rest of this test, use the READ UNCOMMITTED isolation level +# to see what exists in the secondary index. +SET GLOBAL tx_isolation='READ-UNCOMMITTED'; + +# Create enough rows for the table, so that the insert buffer will be +# used for modifying the secondary index page. There must be multiple +# index pages, because changes to the root page are never buffered. + +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; +INSERT INTO bug56680 SELECT 0,b,c FROM bug56680; + +BEGIN; +SELECT b FROM bug56680 LIMIT 2; + +connect (con1,localhost,root,,); +connection con1; +BEGIN; +DELETE FROM bug56680 WHERE a=1; +# This should be buffered, if innodb_change_buffering_debug = 1 is in effect. +INSERT INTO bug56680 VALUES(1,'X',1); + +# This should force an insert buffer merge, and return 'X' in the first row. +SELECT b FROM bug56680 LIMIT 3; + +connection default; +SELECT b FROM bug56680 LIMIT 2; +CHECK TABLE bug56680; + +connection con1; +ROLLBACK; +SELECT b FROM bug56680 LIMIT 2; +CHECK TABLE bug56680; + +connection default; +disconnect con1; + +SELECT b FROM bug56680 LIMIT 2; + +CREATE TABLE bug56680_2( + a INT AUTO_INCREMENT PRIMARY KEY, + b VARCHAR(2) CHARSET latin1 COLLATE latin1_german2_ci, + c INT, + INDEX(b)) +ENGINE=InnoDB STATS_PERSISTENT=0; + +INSERT INTO bug56680_2 SELECT 0,_latin1 0xdf,c FROM bug56680; + +BEGIN; +SELECT HEX(b) FROM bug56680_2 LIMIT 2; +DELETE FROM bug56680_2 WHERE a=1; +# This should be buffered, if innodb_change_buffering_debug = 1 is in effect. +INSERT INTO bug56680_2 VALUES(1,'SS',1); + +# This should force an insert buffer merge, and return 'SS' in the first row. +SELECT HEX(b) FROM bug56680_2 LIMIT 3; +CHECK TABLE bug56680_2; + +# Test this with compressed tables. +ALTER TABLE bug56680_2 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +SELECT HEX(b) FROM bug56680_2 LIMIT 2; +DELETE FROM bug56680_2 WHERE a=1; +# This should be buffered, if innodb_change_buffering_debug = 1 is in effect. +INSERT INTO bug56680_2 VALUES(1,_latin1 0xdf,1); + +# This should force an insert buffer merge, and return 0xdf in the first row. +SELECT HEX(b) FROM bug56680_2 LIMIT 3; +CHECK TABLE bug56680_2; + +DROP TABLE bug56680_2; +DROP TABLE bug56680; + +-- disable_query_log +SET GLOBAL tx_isolation = @tx_isolation_orig; +SET GLOBAL innodb_file_per_table = @innodb_file_per_table_orig; +-- error 0, ER_UNKNOWN_SYSTEM_VARIABLE +SET GLOBAL innodb_change_buffering_debug = @innodb_change_buffering_debug_orig; diff --git a/mysql-test/suite/innodb_zip/t/cmp_drop_table-master.opt b/mysql-test/suite/innodb_zip/t/cmp_drop_table-master.opt new file mode 100644 index 00000000000..a9a3d8c3db8 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/cmp_drop_table-master.opt @@ -0,0 +1 @@ +--innodb-buffer-pool-size=8M diff --git a/mysql-test/suite/innodb_zip/t/cmp_drop_table.test b/mysql-test/suite/innodb_zip/t/cmp_drop_table.test new file mode 100644 index 00000000000..145f55bb160 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/cmp_drop_table.test @@ -0,0 +1,57 @@ +-- source include/have_innodb.inc +-- source include/have_innodb_16k.inc + +let $per_table=`select @@innodb_file_per_table`; + +-- let $query_i_s = SELECT page_size FROM information_schema.innodb_cmpmem WHERE pages_used > 0 + +set global innodb_file_per_table=on; + +create table t1(a text) engine=innodb key_block_size=8; + +-- disable_query_log + +# insert some rows so we are using compressed pages +-- let $i = 10 +while ($i) +{ + insert into t1 values(repeat('abcdefghijklmnopqrstuvwxyz',100)); + dec $i; +} +-- enable_query_log + +# we should be using some 8K pages +-- eval $query_i_s + +drop table t1; + +# because of lazy eviction at drop table there should still be some +# used 8K pages +-- eval $query_i_s + +# create a non-compressed table and insert enough into it to evict +# compressed pages +create table t2(a text) engine=innodb; + +-- disable_query_log + +-- let $i = 500 +while ($i) +{ + insert into t2 values(repeat('abcdefghijklmnopqrstuvwxyz',1000)); + dec $i; +} + +-- enable_query_log + +# now there should be no 8K pages in the buffer pool +-- eval $query_i_s + +drop table t2; + +# +# restore environment to the state it was before this test execution +# + +-- disable_query_log +eval set global innodb_file_per_table=$per_table; diff --git a/mysql-test/suite/innodb_zip/t/cmp_per_index.test b/mysql-test/suite/innodb_zip/t/cmp_per_index.test new file mode 100644 index 00000000000..58b7855219b --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/cmp_per_index.test @@ -0,0 +1,118 @@ +# +# Test information_schema.innodb_cmp_per_index +# + +-- source include/have_innodb.inc + +# Using innodb_log_compressed=0 leads to a larger number of page +# compressions, because page_cur_insert_rec_zip() will reorganize the +# page before attempting an insert followed by page compression and +# page_zip_compress_write_log_no_data(). + +if (`SELECT @@innodb_log_compressed_pages = 0`) +{ + --skip Needs innodb_log_compressed_pages +} + +# numbers read in this test depend on the page size +-- source include/have_innodb_16k.inc +# include/restart_mysqld.inc does not work in embedded mode +-- source include/not_embedded.inc + +-- vertical_results + +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +# reset any leftover stats from previous tests +-- disable_query_log +-- disable_result_log +SELECT * FROM information_schema.innodb_cmp_per_index_reset; +-- enable_result_log +-- enable_query_log + +# see that the table is empty +SELECT * FROM information_schema.innodb_cmp_per_index; + +# create a table that uses compression +CREATE TABLE t ( + a INT, + b VARCHAR(512), + c VARCHAR(16), + PRIMARY KEY (a), + INDEX (b(512)), + INDEX (c(16)) +) ENGINE=INNODB KEY_BLOCK_SIZE=2; + +SELECT +database_name, +table_name, +index_name, +compress_ops, +compress_ops_ok, +uncompress_ops +FROM information_schema.innodb_cmp_per_index +ORDER BY 1, 2, 3; + +# insert some data into it +BEGIN; +-- disable_query_log +let $i=128; +while ($i) +{ + -- eval INSERT INTO t VALUES ($i, REPEAT('x', 512), NULL); + dec $i; +} +-- enable_query_log +COMMIT; + +ALTER TABLE t DROP INDEX c; + +GRANT USAGE ON *.* TO 'tuser01'@'localhost' IDENTIFIED BY 'cDJvI9s_Uq'; +FLUSH PRIVILEGES; + +-- connect (con1,localhost,tuser01,cDJvI9s_Uq,) +-- connection con1 + +-- error ER_SPECIFIC_ACCESS_DENIED_ERROR +SELECT * FROM information_schema.innodb_cmp_per_index; + +-- connection default +-- disconnect con1 + +DROP USER 'tuser01'@'localhost'; + +SELECT +database_name, +table_name, +index_name, +CASE WHEN compress_ops=47 and @@innodb_compression_level IN (4,8,9) THEN 65 +ELSE compress_ops END as compress_ops, +CASE WHEN compress_ops_ok=47 and @@innodb_compression_level IN (4,8,9) THEN 65 +ELSE compress_ops_ok END as compress_ops_ok, +uncompress_ops +FROM information_schema.innodb_cmp_per_index +ORDER BY 1, 2, 3; + +# restart mysqld and see that uncompress ops also gets increased when +# selecting from the table again + +-- source include/restart_mysqld.inc + +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +SELECT COUNT(*) FROM t; + +SELECT +database_name, +table_name, +index_name, +compress_ops, +compress_ops_ok, +CASE WHEN uncompress_ops=6 and @@innodb_compression_level IN (4,8,9) THEN 9 +ELSE uncompress_ops END as uncompress_ops +FROM information_schema.innodb_cmp_per_index +ORDER BY 1, 2, 3; + +DROP TABLE t; + +SET GLOBAL innodb_cmp_per_index_enabled=default; diff --git a/mysql-test/suite/innodb_zip/t/create_options.test b/mysql-test/suite/innodb_zip/t/create_options.test new file mode 100644 index 00000000000..1a3dbdff90a --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/create_options.test @@ -0,0 +1,528 @@ +# Tests for various combinations of ROW_FORMAT and KEY_BLOCK_SIZE +# Related bugs; +# Bug#54679: ALTER TABLE causes compressed row_format to revert to compact +# Bug#56628: ALTER TABLE .. KEY_BLOCK_SIZE=0 produces untrue warning or unnecessary error +# Bug#56632: ALTER TABLE implicitly changes ROW_FORMAT to COMPRESSED +# Rules for interpreting CREATE_OPTIONS +# 1) Create options on an ALTER are added to the options on the +# previous CREATE or ALTER statements. +# 2) KEY_BLOCK_SIZE=0 is considered a unspecified value. +# If the current ROW_FORMAT has explicitly been set to COMPRESSED, +# InnoDB will use a default value of 8. Otherwise KEY_BLOCK_SIZE +# will not be used. +# 3) ROW_FORMAT=DEFAULT allows InnoDB to choose its own default, COMPACT. +# 4) ROW_FORMAT=DEFAULT and KEY_BLOCK_SIZE=0 can be used at any time to +# unset or erase the values persisted in the MySQL dictionary and +# by SHOW CTREATE TABLE. +# 5) When incompatible values for ROW_FORMAT and KEY_BLOCK_SIZE are +# both explicitly given, the ROW_FORMAT is always used in non-strict +# mode. +# 6) InnoDB will automatically convert a table to COMPRESSED only if a +# valid non-zero KEY_BLOCK_SIZE has been given and ROW_FORMAT=DEFAULT +# or has not been used on a previous CREATE TABLE or ALTER TABLE. +# 7) InnoDB strict mode is designed to prevent incompatible create +# options from being used together. +# 8) The non-strict behavior is intended to permit you to import a +# mysqldump file into a database that does not support compressed +# tables, even if the source database contained compressed tables. +# All invalid values and/or incompatible combinations of ROW_FORMAT +# and KEY_BLOCK_SIZE are automatically corrected +# +# *** innodb_strict_mode=ON *** +# 1) Valid ROW_FORMATs are COMPRESSED, COMPACT, DEFAULT, DYNAMIC +# & REDUNDANT. All others are rejected. +# 2) Valid KEY_BLOCK_SIZEs are 0,1,2,4,8,16. All others are rejected. +# 3) KEY_BLOCK_SIZE=0 can be used to set it to 'unspecified'. +# 4) KEY_BLOCK_SIZE=1,2,4,8 & 16 are incompatible with COMPACT, DYNAMIC & +# REDUNDANT. +# 5) KEY_BLOCK_SIZE=1,2,4,8 & 16 as well as ROW_FORMAT=COMPRESSED +# are incompatible with innodb_file_format=Antelope +# and innodb_file_per_table=OFF +# 6) KEY_BLOCK_SIZE on an ALTER must occur with ROW_FORMAT=COMPRESSED +# or ROW_FORMAT=DEFAULT if the ROW_FORMAT was previously specified +# as COMPACT, DYNAMIC or REDUNDANT. +# 7) KEY_BLOCK_SIZE on an ALTER can occur without a ROW_FORMAT if the +# previous ROW_FORMAT was DEFAULT, COMPRESSED, or unspecified. +# +# *** innodb_strict_mode=OFF *** +# 1. Ignore a bad KEY_BLOCK_SIZE, defaulting it to 8. +# 2. Ignore a bad ROW_FORMAT, defaulting to COMPACT. +# 3. Ignore a valid KEY_BLOCK_SIZE when an incompatible but valid +# ROW_FORMAT is specified. +# 4. If innodb_file_format=Antelope or innodb_file_per_table=OFF +# it will ignore ROW_FORMAT=COMPRESSED and non-zero KEY_BLOCK_SIZEs. +# +# See InnoDB documentation page "SQL Compression Syntax Warnings and Errors" +# This test case does not try to create tables with KEY_BLOCK_SIZE > 4 +# since they are rejected for InnoDB page sizes of 8k and 16k. +# See innodb_16k and innodb_8k for those tests. + +-- source include/have_innodb.inc +-- source include/have_innodb_zip.inc +SET default_storage_engine=InnoDB; + +--disable_query_log +# These values can change during the test +LET $innodb_file_per_table_orig=`select @@innodb_file_per_table`; +LET $innodb_strict_mode_orig=`select @@session.innodb_strict_mode`; +--enable_query_log + +SET GLOBAL innodb_file_per_table=ON; + +# The first half of these tests are with strict mode ON. +SET SESSION innodb_strict_mode = ON; + +--echo # Test 1) StrictMode=ON, CREATE and ALTER with each ROW_FORMAT & KEY_BLOCK_SIZE=0 +--echo # KEY_BLOCK_SIZE=0 means 'no KEY_BLOCK_SIZE is specified' +--echo # 'FIXED' is sent to InnoDB since it is used by MyISAM. +--echo # But it is an invalid mode in InnoDB +--error ER_ILLEGAL_HA, 1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED; +SHOW WARNINGS; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + + + +--echo # Test 2) StrictMode=ON, CREATE with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE +--echo # KEY_BLOCK_SIZE is incompatible with COMPACT, REDUNDANT, & DYNAMIC +DROP TABLE t1; +--error ER_ILLEGAL_HA,1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +--error ER_ILLEGAL_HA,1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +--error ER_ILLEGAL_HA,1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + + +--echo # Test 3) StrictMode=ON, ALTER with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + + +--echo # Test 4) StrictMode=ON, CREATE with ROW_FORMAT=COMPACT, ALTER with a valid non-zero KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + +--echo # Test 5) StrictMode=ON, CREATE with a valid KEY_BLOCK_SIZE +--echo # ALTER with each ROW_FORMAT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=2; +SHOW CREATE TABLE t1; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW CREATE TABLE t1; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + +--echo # Test 6) StrictMode=ON, CREATE with an invalid KEY_BLOCK_SIZE. +DROP TABLE t1; +--error ER_ILLEGAL_HA, 1005 +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=9; +SHOW WARNINGS; + +--echo # Test 7) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and +--echo # and a valid non-zero KEY_BLOCK_SIZE are rejected with Antelope +--echo # and that they can be set to default values during strict mode. +SET GLOBAL innodb_file_format=Antelope; +--error ER_ILLEGAL_HA,1005 +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +--error ER_ILLEGAL_HA,1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +--error 1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +--error 1478 +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SET GLOBAL innodb_file_format=Barracuda; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SET GLOBAL innodb_file_format=Antelope; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW CREATE TABLE t1; +SHOW WARNINGS; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +SET GLOBAL innodb_file_format=Barracuda; + +--echo # Test 8) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and +--echo # and a valid non-zero KEY_BLOCK_SIZE are rejected with +--echo # innodb_file_per_table=OFF and that they can be set to default +--echo # values during strict mode. +SET GLOBAL innodb_file_per_table=OFF; +DROP TABLE t1; +--error ER_ILLEGAL_HA,1005 +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +--error ER_ILLEGAL_HA,1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +--error 1005 +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +--error ER_ILLEGAL_HA_CREATE_OPTION +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +--error 1478 +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DEFAULT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_per_table=ON; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +SET GLOBAL innodb_file_per_table=OFF; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +SET GLOBAL innodb_file_per_table=ON; + +--echo ################################################## +SET SESSION innodb_strict_mode = OFF; + +--echo # Test 9) StrictMode=OFF, CREATE and ALTER with each ROW_FORMAT & KEY_BLOCK_SIZE=0 +--echo # KEY_BLOCK_SIZE=0 means 'no KEY_BLOCK_SIZE is specified' +--echo # 'FIXED' is sent to InnoDB since it is used by MyISAM. +--echo # It is an invalid mode in InnoDB, use COMPACT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + +--echo # Test 10) StrictMode=OFF, CREATE with each ROW_FORMAT & a valid KEY_BLOCK_SIZE +--echo # KEY_BLOCK_SIZE is ignored with COMPACT, REDUNDANT, & DYNAMIC +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + + +--echo # Test 11) StrictMode=OFF, ALTER with each ROW_FORMAT & a valid KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ); +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + + +--echo # Test 12) StrictMode=OFF, CREATE with ROW_FORMAT=COMPACT, ALTER with a valid KEY_BLOCK_SIZE +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 KEY_BLOCK_SIZE=4; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + +--echo # Test 13) StrictMode=OFF, CREATE with a valid KEY_BLOCK_SIZE +--echo # ALTER with each ROW_FORMAT +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SHOW CREATE TABLE t1; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SHOW CREATE TABLE t1; +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=REDUNDANT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPRESSED; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +ALTER TABLE t1 ROW_FORMAT=COMPACT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + +--echo # Test 14) StrictMode=OFF, CREATE with an invalid KEY_BLOCK_SIZE, +--echo # it defaults to half of the page size. +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=15; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + +--echo # Test 15) StrictMode=OFF, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and a +--echo valid KEY_BLOCK_SIZE are remembered but not used when ROW_FORMAT +--echo is reverted to Antelope and then used again when ROW_FORMAT=Barracuda. +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_format=Antelope; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_format=Barracuda; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_format=Antelope; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_format=Barracuda; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + +--echo # Test 16) StrictMode=OFF, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and a +--echo valid KEY_BLOCK_SIZE are remembered but not used when innodb_file_per_table=OFF +--echo and then used again when innodb_file_per_table=ON. +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_per_table=OFF; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_per_table=ON; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +DROP TABLE t1; +CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_per_table=OFF; +ALTER TABLE t1 ADD COLUMN f1 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; +SET GLOBAL innodb_file_per_table=ON; +ALTER TABLE t1 ADD COLUMN f2 INT; +SHOW WARNINGS; +SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1'; + + +--echo # Cleanup +DROP TABLE t1; + +--disable_query_log +EVAL SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig; +EVAL SET SESSION innodb_strict_mode=$innodb_strict_mode_orig; +--enable_query_log + diff --git a/mysql-test/suite/innodb_zip/t/disabled.def b/mysql-test/suite/innodb_zip/t/disabled.def new file mode 100644 index 00000000000..d3799d0e2c9 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/disabled.def @@ -0,0 +1,17 @@ +############################################################################## +# +# List the test cases that are to be disabled temporarily. +# +# Separate the test case name and the comment with ':'. +# +# : BUG# +# +# Do not use any TAB characters for whitespace. +# +############################################################################## + +restart : Not supported by MariaDB 10.2 2/9/2016 jplindst +innochecksum : MDEV-10727 2/9/2016 jplindst +innochecksum_2 : MDEV-10727 2/9/2016 jplindst +innochecksum_3 : MDEV-10727 2/9/2016 jplindst + diff --git a/mysql-test/suite/innodb_zip/t/index_large_prefix.test b/mysql-test/suite/innodb_zip/t/index_large_prefix.test new file mode 100644 index 00000000000..d61cce8d484 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/index_large_prefix.test @@ -0,0 +1,441 @@ +# Testcase for worklog #5743: Lift the limit of index key prefixes + +--source include/have_innodb.inc +--source include/have_innodb_16k.inc +SET default_storage_engine=InnoDB; + +--disable_query_log +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +--enable_query_log + +let $innodb_file_per_table_orig=`select @@innodb_file_per_table`; + +set global innodb_file_per_table=1; + +-- echo ### Test 1 ### +# Create a table of DYNAMIC format, with a primary index of 1000 bytes in +# size +create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC; +show warnings; + +# Do some insertion and update to excercise the external cache +# code path +insert into worklog5743 values(repeat("a", 20000)); + +# default session, update the table +update worklog5743 set a = (repeat("b", 16000)); + +# Create a secondary index +create index idx on worklog5743(a(2000)); +show warnings; + +# Start a few sessions to do selections on table being updated in default +# session, so it would rebuild the previous version from undo log. +# 1) Default session: Initiate an update on the externally stored column +# 2) Session con1: Select from table with repeated read +# 3) Session con2: Select from table with read uncommitted +# 4) Default session: rollback updates + +begin; +update worklog5743 set a = (repeat("x", 17000)); + +# Start a new session to select the column to force it build +# an earlier version of the clustered index through undo log. So it should +# just see the result of repeat("b", 16000) +select @@session.tx_isolation; +--connect (con1,localhost,root,,) +select a = repeat("x", 17000) from worklog5743; +select a = repeat("b", 16000) from worklog5743; + +# Start another session doing "read uncommitted" query, it +# should see the uncommitted update +--connect (con2,localhost,root,,) +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a = repeat("x", 17000) from worklog5743; + +# Roll back the transaction +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 2 ### +# Create a table with only a secondary index has large prefix column +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +show warnings; +create index idx on worklog5743(a1, a2(2000)); +show warnings; + +insert into worklog5743 values(9, repeat("a", 10000)); + +begin; + +update worklog5743 set a1 = 1000; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 3 ### +# Create a table with a secondary index has small (50 bytes) prefix column +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; + +create index idx on worklog5743(a1, a2(50)); + +insert into worklog5743 values(9, repeat("a", 10000)); + +begin; + +update worklog5743 set a1 = 1000; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 4 ### +# Create compressed tables with each KEY_BLOCK_SIZE. +create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1; +create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2; +create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4; +create table worklog5743_8(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=8; +create table worklog5743_16(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=16; + +# The maximum overall index record (not prefix) length of a +# compressed table is dependent on innodb-page-size (IPS), +# key_block_size (KBS) and the number of fields (NF). +# "Too big row" error (HA_ERR_TOO_BIG_ROW) will be returned if this +# limit is exceeded. +# See page_zip_empty_size() and Bug #47495 for more detail. + +# Test edge cases for indexes using key_block_size=1 +set global innodb_large_prefix=0; +-- error ER_TOO_LONG_KEY,1118 +create index idx1 on worklog5743_1(a2(4000)); +show warnings; +set global innodb_large_prefix=1; +-- error ER_TOO_LONG_KEY,1118 +create index idx2 on worklog5743_1(a2(4000)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx3 on worklog5743_1(a2(436)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx4 on worklog5743_1(a2(434)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx5 on worklog5743_1(a1, a2(430)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx6 on worklog5743_1(a1, a2(428)); +show warnings; + +# Test edge cases for indexes using key_block_size=2 +set global innodb_large_prefix=0; +# Check index creation behavior without STRICT mode +SET sql_mode= ''; +create index idx1 on worklog5743_2(a2(4000)); +show warnings; +set global innodb_large_prefix=1; +-- error ER_TOO_BIG_ROWSIZE +create index idx2 on worklog5743_2(a2(4000)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx3 on worklog5743_2(a2(948)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx4 on worklog5743_2(a2(946)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx5 on worklog5743_2(a1, a2(942)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx6 on worklog5743_2(a1, a2(940)); +show warnings; + +# Test edge cases for indexes using key_block_size=4 +set global innodb_large_prefix=0; +create index idx1 on worklog5743_4(a2(4000)); +show warnings; +set global innodb_large_prefix=1; +-- error ER_TOO_BIG_ROWSIZE +create index idx2 on worklog5743_4(a2(4000)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx3 on worklog5743_4(a2(1972)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx4 on worklog5743_4(a2(1970)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx5 on worklog5743_4(a1, a2(1966)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx6 on worklog5743_4(a1, a2(1964)); +show warnings; + +# Test edge cases for indexes using key_block_size=8 +set global innodb_large_prefix=0; +create index idx1 on worklog5743_8(a2(1000)); +show warnings; +set global innodb_large_prefix=1; +create index idx2 on worklog5743_8(a2(3073)); +show warnings; +create index idx3 on worklog5743_8(a2(3072)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx4 on worklog5743_8(a1, a2(3069)); +show warnings; +create index idx5 on worklog5743_8(a1, a2(3068)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx6 on worklog5743_8(a1, a2(2000), a3(1069)); +show warnings; +create index idx7 on worklog5743_8(a1, a2(2000), a3(1068)); +show warnings; + +# Test edge cases for indexes using key_block_size=16 +set global innodb_large_prefix=0; +create index idx1 on worklog5743_16(a2(1000)); +show warnings; +set global innodb_large_prefix=1; +create index idx2 on worklog5743_16(a2(3073)); +show warnings; +create index idx3 on worklog5743_16(a2(3072)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx4 on worklog5743_16(a1, a2(3069)); +show warnings; +create index idx5 on worklog5743_16(a1, a2(3068)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx6 on worklog5743_16(a1, a2(2000), a3(1069)); +show warnings; +create index idx7 on worklog5743_16(a1, a2(2000), a3(1068)); +show warnings; +set sql_mode= default; + +# Insert a large record into each of these tables. +insert into worklog5743_1 values(9, repeat("a", 10000)); +insert into worklog5743_2 values(9, repeat("a", 10000)); +insert into worklog5743_4 values(9, repeat("a", 10000)); +insert into worklog5743_8 values(9, repeat("a", 10000), repeat("a", 10000)); +insert into worklog5743_16 values(9, repeat("a", 10000), repeat("a", 10000)); + +# Now if we change the global innodb_large_prefix back to 767, +# updates to these indexes should still be allowed. +set global innodb_large_prefix=0; +insert into worklog5743_1 values(2, repeat("b", 10000)); +insert into worklog5743_2 values(2, repeat("b", 10000)); +insert into worklog5743_4 values(2, repeat("b", 10000)); +insert into worklog5743_8 values(2, repeat("b", 10000), repeat("b", 10000)); +insert into worklog5743_16 values(2, repeat("b", 10000), repeat("b", 10000)); +set global innodb_large_prefix=1; + +select a1, left(a2, 20) from worklog5743_1; +select a1, left(a2, 20) from worklog5743_2; +select a1, left(a2, 20) from worklog5743_4; +select a1, left(a2, 20) from worklog5743_8; +select a1, left(a2, 20) from worklog5743_16; + +begin; + +update worklog5743_1 set a1 = 1000; +update worklog5743_2 set a1 = 1000; +update worklog5743_4 set a1 = 1000; +update worklog5743_8 set a1 = 1000; +update worklog5743_16 set a1 = 1000; +select a1, left(a2, 20) from worklog5743_1; +select a1, left(a2, 20) from worklog5743_2; +select a1, left(a2, 20) from worklog5743_4; +select a1, left(a2, 20) from worklog5743_8; +select a1, left(a2, 20) from worklog5743_16; + + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_16 where a1 = 9; +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +select a1, left(a2, 20) from worklog5743_16 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +select a1, left(a2, 20) from worklog5743_16 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743_1; +drop table worklog5743_2; +drop table worklog5743_4; +drop table worklog5743_8; +drop table worklog5743_16; + +-- echo ### Test 5 ### +# Create a table with large varchar columns and create indexes +# directly on these large columns to show that prefix limit is +# automatically applied and to show that limit. +create table worklog5743(a1 int, + a2 varchar(20000), + a3 varchar(3073), + a4 varchar(3072), + a5 varchar(3069), + a6 varchar(3068)) + ROW_FORMAT=DYNAMIC; +# Check index creation behavior without STRICT mode +SET sql_mode=''; +create index idx1 on worklog5743(a2); +create index idx2 on worklog5743(a3); +create index idx3 on worklog5743(a4); +show warnings; +SET sql_mode= default; +-- error ER_TOO_LONG_KEY +create index idx4 on worklog5743(a1, a2); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx5 on worklog5743(a1, a5); +show warnings; +create index idx6 on worklog5743(a1, a6); +show warnings; +show create table worklog5743; + +insert into worklog5743 values(9, + repeat("a", 20000), repeat("a", 3073), + repeat("a", 3072), repeat("a", 3069), + repeat("a", 3068)); + +begin; + +update worklog5743 set a1 = 1000; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1 from worklog5743 where a1 = 9; +select a1 from worklog5743 where a1 = 9; + +# Do read uncommitted, it would show there is no row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1 from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 6 ### +# Create a table with old format, and the limit is 768 bytes. +-- error ER_INDEX_COLUMN_TOO_LONG +create table worklog5743(a TEXT not null, primary key (a(1000))) +row_format=compact; + +create table worklog5743(a TEXT) +row_format=compact; + +# Excercise the column length check in ha_innobase::add_index() +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); + +# This should be successful +create index idx on worklog5743(a(767)); + +# Perform some DMLs +insert into worklog5743 values(repeat("a", 20000)); + +begin; +insert into worklog5743 values(repeat("b", 20000)); +update worklog5743 set a = (repeat("x", 25000)); + +# Start a new session to select the table to force it build +# an earlier version of the cluster index through undo log +select @@session.tx_isolation; +--connection con1 +select a = repeat("a", 20000) from worklog5743; +--disconnect con1 + +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a = repeat("x", 25000) from worklog5743; +--disconnect con2 + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 7 ### +# Some border line tests on the column length. +# We have a limit of 3072 bytes for Barracuda table +create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC; + +# Length exceeds maximum supported key length +# It will be auto-truncated to 3072 +# Check index creation behavior without STRICT mode +SET sql_mode=''; +create index idx1 on worklog5743(a(3073)); +create index idx2 on worklog5743(a(3072)); +show create table worklog5743; +drop table worklog5743; +SET sql_mode= default; + +# We have a limit of 767 bytes for Antelope tables +create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT; +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); +create index idx2 on worklog5743(a(767)); +drop table worklog5743; + +create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT; +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); +create index idx2 on worklog5743(a(767)); +drop table worklog5743; + + +eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig; diff --git a/mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test b/mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test new file mode 100644 index 00000000000..a0229abc4f8 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/index_large_prefix_4k.test @@ -0,0 +1,400 @@ +# Testcase for worklog #5743: Lift the limit of index key prefixes + +--source include/have_innodb.inc +--source include/have_innodb_4k.inc +SET default_storage_engine=InnoDB; + +--disable_query_log +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +--enable_query_log + +let $innodb_file_per_table_orig=`select @@innodb_file_per_table`; + +set global innodb_file_per_table=1; + +-- echo ### Test 1 ### +# Create a table of DYNAMIC format, with a primary index of 768 bytes in +# size +create table worklog5743(a TEXT not null, primary key (a(768))) ROW_FORMAT=DYNAMIC; +show warnings; + +# Do some insertion and update to excercise the external cache +# code path +insert into worklog5743 values(repeat("a", 20000)); + +# default session, update the table +update worklog5743 set a = (repeat("b", 16000)); + +# Create a secondary index +SET sql_mode= ''; +create index idx on worklog5743(a(900)); +show warnings; +SET sql_mode= default; +# Start a few sessions to do selections on table being updated in default +# session, so it would rebuild the previous version from undo log. +# 1) Default session: Initiate an update on the externally stored column +# 2) Session con1: Select from table with repeated read +# 3) Session con2: Select from table with read uncommitted +# 4) Default session: rollback updates + +begin; +update worklog5743 set a = (repeat("x", 17000)); + +# Start a new session to select the column to force it build +# an earlier version of the clustered index through undo log. So it should +# just see the result of repeat("b", 16000) +select @@session.tx_isolation; +--connect (con1,localhost,root,,) +select a = repeat("x", 17000) from worklog5743; +select a = repeat("b", 16000) from worklog5743; + +# Start another session doing "read uncommitted" query, it +# should see the uncommitted update +--connect (con2,localhost,root,,) +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a = repeat("x", 17000) from worklog5743; + +# Roll back the transaction +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 2 ### +# Create a table with only a secondary index has large prefix column +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +show warnings; +create index idx on worklog5743(a1, a2(750)); +show warnings; + +insert into worklog5743 values(9, repeat("a", 10000)); + +begin; + +update worklog5743 set a1 = 1111; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 3 ### +# Create a table with a secondary index has small (50 bytes) prefix column +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; + +create index idx on worklog5743(a1, a2(50)); + +insert into worklog5743 values(9, repeat("a", 10000)); + +begin; + +update worklog5743 set a1 = 2222; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 4 ### +# Create compressed tables with each KEY_BLOCK_SIZE. +create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1; +create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2; +create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4; + +# The maximum overall index record (not prefix) length of a +# compressed table is dependent on innodb-page-size (IPS), +# key_block_size (KBS) and the number of fields (NF). +# "Too big row" error (HA_ERR_TOO_BIG_ROW) will be returned if this +# limit is exceeded. +# See page_zip_empty_size() and Bug #47495 for more detail. + +# Test edge cases for indexes using key_block_size=1 +set global innodb_large_prefix=0; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743_1(a2(4000)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx3 on worklog5743_1(a2(436)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx4 on worklog5743_1(a2(434)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx5 on worklog5743_1(a1, a2(430)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx6 on worklog5743_1(a1, a2(428)); +show warnings; + +# Test edge cases for indexes using key_block_size=2 +set global innodb_large_prefix=1; +SET sql_mode= ''; +create index idx1 on worklog5743_2(a2(4000)); +show warnings; +show create table worklog5743_2; +create index idx3 on worklog5743_2(a2(769)); +show warnings; +create index idx4 on worklog5743_2(a2(768)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx5 on worklog5743_2(a1, a2(765)); +show warnings; +create index idx6 on worklog5743_2(a1, a2(764)); +show warnings; +# Test edge cases for indexes using key_block_size=4 +set global innodb_large_prefix=0; +create index idx1 on worklog5743_4(a2(4000)); +show warnings; +show create table worklog5743_4; +create index idx3 on worklog5743_4(a2(769)); +show warnings; +create index idx4 on worklog5743_4(a2(768)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx5 on worklog5743_4(a1, a2(765)); +show warnings; +create index idx6 on worklog5743_4(a1, a2(764)); +show warnings; +SET sql_mode= default; +# Insert a large record into each of these tables. +insert into worklog5743_1 values(9, repeat("a", 10000)); +insert into worklog5743_2 values(9, repeat("a", 10000)); +insert into worklog5743_4 values(9, repeat("a", 10000)); + +# Now if we change the global innodb_large_prefix back to 767, +# updates to these indexes should still be allowed. +set global innodb_large_prefix=0; +insert into worklog5743_1 values(2, repeat("b", 10000)); +insert into worklog5743_2 values(2, repeat("b", 10000)); +insert into worklog5743_4 values(2, repeat("b", 10000)); +set global innodb_large_prefix=1; + +select a1, left(a2, 20) from worklog5743_1; +select a1, left(a2, 20) from worklog5743_2; +select a1, left(a2, 20) from worklog5743_4; + +begin; + +update worklog5743_1 set a1 = 1000; +update worklog5743_2 set a1 = 1000; +update worklog5743_4 set a1 = 1000; +select a1, left(a2, 20) from worklog5743_1; +select a1, left(a2, 20) from worklog5743_2; +select a1, left(a2, 20) from worklog5743_4; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743_1; +drop table worklog5743_2; +drop table worklog5743_4; + +-- echo ### Test 5 ### +# Create a table with large varchar columns and create indexes +# directly on these large columns to show that prefix limit is +# automatically applied and to show that limit. + +# This commented form of the test causes an unlimited page split +# on update of the int field - Bug 12636590 - INNODB; UPDATE OF +# LARGE RECORD CAUSES UNLIMITED PAGE SPLITS IN 8K PAGE SIZE +#create table worklog5743(a1 int, +# a2 varchar(20000), +# a3 varchar(3073), +# a4 varchar(3072), +# a5 varchar(3069), +# a6 varchar(3068)) +# ROW_FORMAT=DYNAMIC; +#create index idx1 on worklog5743(a2); +#create index idx2 on worklog5743(a3); +#create index idx3 on worklog5743(a4); +#show warnings; +#-- error ER_TOO_LONG_KEY +#create index idx4 on worklog5743(a1, a2); +#show warnings; +#-- error ER_TOO_LONG_KEY +#create index idx5 on worklog5743(a1, a5); +#show warnings; +#create index idx6 on worklog5743(a1, a6); +#show warnings; +#show create table worklog5743; +# +#insert into worklog5743 values(9, +# repeat("a", 20000), repeat("a", 3073), +# repeat("a", 3072), repeat("a", 3069), +# repeat("a", 3068)); +# + +create table worklog5743(a1 int, a2 varchar(20000)) ROW_FORMAT=DYNAMIC; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743(a2); +show warnings; +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(3072)) ROW_FORMAT=DYNAMIC; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743(a2); +show warnings; +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(769)) ROW_FORMAT=DYNAMIC; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743(a2); +show warnings; +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(768)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +show warnings; +insert into worklog5743 values(9, repeat("a", 768)); +update worklog5743 set a1 = 3333; +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(765)) ROW_FORMAT=DYNAMIC; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743(a1, a2); +show warnings; +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(764)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a1, a2); +show warnings; +insert into worklog5743 values(9, repeat("a", 764)); + +begin; +update worklog5743 set a1 = 4444; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1 from worklog5743 where a1 = 9; +select a1 from worklog5743 where a1 = 9; + +# Do read uncommitted, it would show there is no row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1 from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 6 ### +# Create a table with old format, and the limit is 768 bytes. +-- error ER_TOO_LONG_KEY +create table worklog5743(a TEXT not null, primary key (a(1000))); + +create table worklog5743(a TEXT) ROW_FORMAT=COMPACT; + +# Excercise the column length check in ha_innobase::add_index() +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); + +# This should be successful +create index idx on worklog5743(a(767)); + +# Perform some DMLs +insert into worklog5743 values(repeat("a", 20000)); + +begin; +insert into worklog5743 values(repeat("b", 20000)); +update worklog5743 set a = (repeat("x", 25000)); + +# Start a new session to select the table to force it build +# an earlier version of the cluster index through undo log +select @@session.tx_isolation; +--connection con1 +select a = repeat("a", 20000) from worklog5743; +--disconnect con1 + +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a = repeat("x", 25000) from worklog5743; +--disconnect con2 + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 7 ### +# Some border line tests on the column length. +# We have a limit of 3072 bytes for Barracuda table +create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC; + +# Length exceeds maximum supported key length +# It will be auto-truncated to 3072 if the page size were not 4k. +# With this page size, the prefix length is less. +SET sql_mode= ''; +create index idx1 on worklog5743(a(769)); +show warnings; +SET sql_mode= default; +create index idx2 on worklog5743(a(768)); +show warnings; +show create table worklog5743; +insert into worklog5743 values(repeat("a", 768)); +drop table worklog5743; + +# We have a limit of 767 bytes for Antelope tables +create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT; +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); +create index idx2 on worklog5743(a(767)); +drop table worklog5743; + +create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT; +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); +create index idx2 on worklog5743(a(767)); +drop table worklog5743; + + +eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig; diff --git a/mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test b/mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test new file mode 100644 index 00000000000..00a97249a95 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/index_large_prefix_8k.test @@ -0,0 +1,429 @@ +# Testcase for worklog #5743: Lift the limit of index key prefixes + +--source include/have_innodb.inc +--source include/have_innodb_8k.inc +SET default_storage_engine=InnoDB; + +--disable_query_log +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is"); +--enable_query_log + +let $innodb_file_per_table_orig=`select @@innodb_file_per_table`; + +set global innodb_file_per_table=1; + +-- echo ### Test 1 ### +# Create a table of DYNAMIC format, with a primary index of 1000 bytes in +# size +create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC; +show warnings; + +# Do some insertion and update to excercise the external cache +# code path +insert into worklog5743 values(repeat("a", 20000)); + +# default session, update the table +update worklog5743 set a = (repeat("b", 16000)); + +# Create a secondary index +SET sql_mode= ''; +create index idx on worklog5743(a(2000)); +show warnings; +SET sql_mode= default; + +# Start a few sessions to do selections on table being updated in default +# session, so it would rebuild the previous version from undo log. +# 1) Default session: Initiate an update on the externally stored column +# 2) Session con1: Select from table with repeated read +# 3) Session con2: Select from table with read uncommitted +# 4) Default session: rollback updates + +begin; +update worklog5743 set a = (repeat("x", 17000)); + +# Start a new session to select the column to force it build +# an earlier version of the clustered index through undo log. So it should +# just see the result of repeat("b", 16000) +select @@session.tx_isolation; +--connect (con1,localhost,root,,) +select a = repeat("x", 17000) from worklog5743; +select a = repeat("b", 16000) from worklog5743; + +# Start another session doing "read uncommitted" query, it +# should see the uncommitted update +--connect (con2,localhost,root,,) +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a = repeat("x", 17000) from worklog5743; + +# Roll back the transaction +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 2 ### +# Create a table with only a secondary index has large prefix column +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; +show warnings; +create index idx on worklog5743(a1, a2(1250)); +show warnings; + +insert into worklog5743 values(9, repeat("a", 10000)); + +begin; + +update worklog5743 set a1 = 1000; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 3 ### +# Create a table with a secondary index has small (50 bytes) prefix column +create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC; + +create index idx on worklog5743(a1, a2(50)); + +insert into worklog5743 values(9, repeat("a", 10000)); + +begin; + +update worklog5743 set a1 = 1000; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 4 ### +# Create compressed tables with each KEY_BLOCK_SIZE. +create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1; +create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2; +create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4; +create table worklog5743_8(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=8; + +# The maximum overall index record (not prefix) length of a +# compressed table is dependent on innodb-page-size (IPS), +# key_block_size (KBS) and the number of fields (NF). +# "Too big row" error (HA_ERR_TOO_BIG_ROW) will be returned if this +# limit is exceeded. +# See page_zip_empty_size() and Bug #47495 for more detail. + +# Test edge cases for indexes using key_block_size=1 +set global innodb_large_prefix=0; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743_1(a2(4000)); +show warnings; +set global innodb_large_prefix=1; +-- error ER_TOO_LONG_KEY +create index idx2 on worklog5743_1(a2(4000)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx3 on worklog5743_1(a2(436)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx4 on worklog5743_1(a2(434)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx5 on worklog5743_1(a1, a2(430)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx6 on worklog5743_1(a1, a2(428)); +show warnings; + +# Test edge cases for indexes using key_block_size=2 +set global innodb_large_prefix=0; +SET sql_mode= ''; +create index idx1 on worklog5743_2(a2(4000)); +show warnings; +set global innodb_large_prefix=1; +-- error ER_TOO_BIG_ROWSIZE +create index idx2 on worklog5743_2(a2(4000)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx3 on worklog5743_2(a2(948)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx4 on worklog5743_2(a2(946)); +show warnings; +-- error ER_TOO_BIG_ROWSIZE +create index idx5 on worklog5743_2(a1, a2(942)); +show warnings; +# Bug#13391353 Limit is one byte less on on 32bit-Linux only +create index idx6 on worklog5743_2(a1, a2(940)); +show warnings; + +# Test edge cases for indexes using key_block_size=4 +set global innodb_large_prefix=0; +create index idx1 on worklog5743_4(a2(4000)); +show warnings; +set global innodb_large_prefix=1; +create index idx3 on worklog5743_4(a2(1537)); +show warnings; +create index idx4 on worklog5743_4(a2(1536)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx5 on worklog5743_4(a1, a2(1533)); +show warnings; +create index idx6 on worklog5743_4(a1, a2(1532)); +show warnings; + +# Test edge cases for indexes using key_block_size=8 +set global innodb_large_prefix=0; +create index idx1 on worklog5743_8(a2(1000)); +show warnings; +set global innodb_large_prefix=1; +create index idx2 on worklog5743_8(a2(3073)); +show warnings; +create index idx3 on worklog5743_8(a2(3072)); +show warnings; +-- error ER_TOO_LONG_KEY +create index idx4 on worklog5743_8(a1, a2(1533)); +show warnings; +create index idx5 on worklog5743_8(a1, a2(1532)); +show warnings; +SET sql_mode= default; + +# Insert a large record into each of these tables. +insert into worklog5743_1 values(9, repeat("a", 10000)); +insert into worklog5743_2 values(9, repeat("a", 10000)); +insert into worklog5743_4 values(9, repeat("a", 10000)); +insert into worklog5743_8 values(9, repeat("a", 10000), repeat("a", 10000)); + +# Now if we change the global innodb_large_prefix back to 767, +# updates to these indexes should still be allowed. +set global innodb_large_prefix=0; +insert into worklog5743_1 values(2, repeat("b", 10000)); +insert into worklog5743_2 values(2, repeat("b", 10000)); +insert into worklog5743_4 values(2, repeat("b", 10000)); +insert into worklog5743_8 values(2, repeat("b", 10000), repeat("b", 10000)); +set global innodb_large_prefix=1; + +select a1, left(a2, 20) from worklog5743_1; +select a1, left(a2, 20) from worklog5743_2; +select a1, left(a2, 20) from worklog5743_4; +select a1, left(a2, 20) from worklog5743_8; + +begin; + +update worklog5743_1 set a1 = 1000; +update worklog5743_2 set a1 = 1000; +update worklog5743_4 set a1 = 1000; +update worklog5743_8 set a1 = 1000; +select a1, left(a2, 20) from worklog5743_1; +select a1, left(a2, 20) from worklog5743_2; +select a1, left(a2, 20) from worklog5743_4; +select a1, left(a2, 20) from worklog5743_8; + + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +explain select a1, left(a2, 20) from worklog5743_8 where a1 = 9; +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; + +# Do read uncommitted in another session, it would show there is no +# row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1, left(a2, 20) from worklog5743_1 where a1 = 9; +select a1, left(a2, 20) from worklog5743_2 where a1 = 9; +select a1, left(a2, 20) from worklog5743_4 where a1 = 9; +select a1, left(a2, 20) from worklog5743_8 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743_1; +drop table worklog5743_2; +drop table worklog5743_4; +drop table worklog5743_8; + +-- echo ### Test 5 ### +# Create a table with large varchar columns and create indexes +# directly on these large columns to show that prefix limit is +# automatically applied and to show that limit. + +# This commented form of the test causes an unlimited page split +# on update of the int field - Bug 12636590 - INNODB; UPDATE OF +# LARGE RECORD CAUSES UNLIMITED PAGE SPLITS IN 8K PAGE SIZE +#create table worklog5743(a1 int, +# a2 varchar(20000), +# a3 varchar(3073), +# a4 varchar(3072), +# a5 varchar(3069), +# a6 varchar(3068)) +# ROW_FORMAT=DYNAMIC; +#create index idx1 on worklog5743(a2); +#create index idx2 on worklog5743(a3); +#create index idx3 on worklog5743(a4); +#show warnings; +#-- error ER_TOO_LONG_KEY +#create index idx4 on worklog5743(a1, a2); +#show warnings; +#-- error ER_TOO_LONG_KEY +#create index idx5 on worklog5743(a1, a5); +#show warnings; +#create index idx6 on worklog5743(a1, a6); +#show warnings; +#show create table worklog5743; +# +#insert into worklog5743 values(9, +# repeat("a", 20000), repeat("a", 3073), +# repeat("a", 3072), repeat("a", 3069), +# repeat("a", 3068)); +# + +create table worklog5743(a1 int, a2 varchar(20000)) ROW_FORMAT=DYNAMIC; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743(a2); +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(1537)) ROW_FORMAT=DYNAMIC; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743(a2); +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(1536)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a2); +show warnings; +insert into worklog5743 values(9, repeat("a", 1536)); +update worklog5743 set a1 = 1000; +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(1533)) ROW_FORMAT=DYNAMIC; +-- error ER_TOO_LONG_KEY +create index idx1 on worklog5743(a1, a2); +show warnings; +drop table worklog5743; + +create table worklog5743(a1 int, a2 varchar(1532)) ROW_FORMAT=DYNAMIC; +create index idx1 on worklog5743(a1, a2); +show warnings; +insert into worklog5743 values(9, repeat("a", 1532)); +update worklog5743 set a1 = 1000; + +begin; +update worklog5743 set a1 = 1000; + +# Do a select from another connection that would use the secondary index +--connection con1 +select @@session.tx_isolation; +explain select a1 from worklog5743 where a1 = 9; +select a1 from worklog5743 where a1 = 9; + +# Do read uncommitted, it would show there is no row with a1 = 9 +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a1 from worklog5743 where a1 = 9; + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 6 ### +# Create a table with old format, and the limit is 768 bytes. +-- error ER_INDEX_COLUMN_TOO_LONG +create table worklog5743(a TEXT not null, primary key (a(1000))) +row_format=compact; + +create table worklog5743(a TEXT) row_format=compact; + +# Excercise the column length check in ha_innobase::add_index() +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); + +# This should be successful +create index idx on worklog5743(a(767)); + +# Perform some DMLs +insert into worklog5743 values(repeat("a", 20000)); + +begin; +insert into worklog5743 values(repeat("b", 20000)); +update worklog5743 set a = (repeat("x", 25000)); + +# Start a new session to select the table to force it build +# an earlier version of the cluster index through undo log +select @@session.tx_isolation; +--connection con1 +select a = repeat("a", 20000) from worklog5743; +--disconnect con1 + +--connection con2 +SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +select @@session.tx_isolation; +select a = repeat("x", 25000) from worklog5743; +--disconnect con2 + +--connection default +rollback; + +drop table worklog5743; + +-- echo ### Test 7 ### +# Some border line tests on the column length. +# We have a limit of 3072 bytes for Barracuda table +create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC; + +# Length exceeds maximum supported key length +# It will be auto-truncated to 3072 +SET sql_mode= ''; +create index idx1 on worklog5743(a(3073)); +create index idx2 on worklog5743(a(3072)); +SET sql_mode= default; +show create table worklog5743; +drop table worklog5743; + +# We have a limit of 767 bytes for Antelope tables +create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT; +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); +create index idx2 on worklog5743(a(767)); +drop table worklog5743; + +create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT; +-- error ER_INDEX_COLUMN_TOO_LONG +create index idx on worklog5743(a(768)); +create index idx2 on worklog5743(a(767)); +drop table worklog5743; + + +eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig; diff --git a/mysql-test/suite/innodb_zip/t/innochecksum.test b/mysql-test/suite/innodb_zip/t/innochecksum.test new file mode 100644 index 00000000000..fd64e6d0d0c --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/innochecksum.test @@ -0,0 +1,240 @@ +#************************************************************ +# WL6045:Improve Innochecksum +#************************************************************ +--source include/have_innodb.inc +--source include/have_innodb_zip.inc +--source include/no_valgrind_without_big.inc +# Embedded server does not support crashing. +--source include/not_embedded.inc + +# Avoid CrashReporter popup on Mac. +--source include/not_crashrep.inc + +--echo # Set the environmental variables +let MYSQLD_BASEDIR= `SELECT @@basedir`; +let MYSQLD_DATADIR= `SELECT @@datadir`; +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/my_restart.err; +call mtr.add_suppression("InnoDB: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts"); +call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed"); + +SET GLOBAL innodb_file_per_table=on; + +CREATE TABLE tab1(c1 INT PRIMARY KEY,c2 VARCHAR(20)) ENGINE=InnoDB; +CREATE INDEX idx1 ON tab1(c2(10)); +INSERT INTO tab1 VALUES(1, 'Innochecksum InnoDB1'); +CREATE TABLE t1(id INT AUTO_INCREMENT PRIMARY KEY, msg VARCHAR(255)) ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4; +insert into t1 values(1,"i"); +insert into t1 values(2,"am"); +insert into t1 values(3,"compressed table"); + +--echo # Shutdown the Server +--source include/shutdown_mysqld.inc +--echo # Server Default checksum = innodb + +--echo [1a]: check the innochecksum when file doesn't exists +--error 1 +--exec $INNOCHECKSUM $MYSQLD_DATADIR/test/aa.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: $MYSQLD_DATADIR/test/aa.ibd cannot be found; +--source include/search_pattern_in_file.inc + +--echo [1b]: check the innochecksum without --strict-check +--exec $INNOCHECKSUM $MYSQLD_DATADIR/test/tab1.ibd + +--echo [2]: check the innochecksum with full form --strict-check=crc32 +--exec $INNOCHECKSUM --strict-check=crc32 $MYSQLD_DATADIR/test/tab1.ibd + +--echo [3]: check the innochecksum with short form -C crc32 +--exec $INNOCHECKSUM -C crc32 $MYSQLD_DATADIR/test/tab1.ibd + +--echo [4]: check the innochecksum with --no-check ignores algorithm check, warning is expected +--error 1 +--exec $INNOCHECKSUM --no-check $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: --no-check must be associated with --write option.; +--source include/search_pattern_in_file.inc + +--echo [5]: check the innochecksum with short form --no-check ignores algorithm check, warning is expected +--error 1 +--exec $INNOCHECKSUM -n $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: --no-check must be associated with --write option.; +--source include/search_pattern_in_file.inc + +--echo [6]: check the innochecksum with full form strict-check & no-check , an error is expected +--error 1 +--exec $INNOCHECKSUM --strict-check=innodb --no-check $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: --strict-check option cannot be used together with --no-check option.; +--source include/search_pattern_in_file.inc + +--echo [7]: check the innochecksum with short form strict-check & no-check , an error is expected +--error 1 +--exec $INNOCHECKSUM -C innodb -n $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: --strict-check option cannot be used together with --no-check option.; +--source include/search_pattern_in_file.inc + +--echo [8]: check the innochecksum with short & full form combination +--echo # strict-check & no-check, an error is expected +--error 1 +--exec $INNOCHECKSUM --strict-check=innodb -n $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: --strict-check option cannot be used together with --no-check option.; +--source include/search_pattern_in_file.inc + +--echo [9]: check the innochecksum with full form --strict-check=innodb +# Server Default checksum = crc32 +--exec $INNOCHECKSUM --strict-check=innodb $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE + +--echo [10]: check the innochecksum with full form --strict-check=none +--echo # when server Default checksum=crc32 +--exec $INNOCHECKSUM --strict-check=none $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE + +--echo [11]: check the innochecksum with short form -C innodb +--echo # when server Default checksum=crc32 +--exec $INNOCHECKSUM -C innodb $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE + +--echo [12]: check the innochecksum with short form -C none +--echo # when server Default checksum=crc32 +--exec $INNOCHECKSUM -C none $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE + +--echo [13]: check strict-check with invalid values +--error 1 +--exec $INNOCHECKSUM --strict-check=strict_innodb $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'strict_innodb\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -C strict_innodb $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'strict_innodb\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --strict-check=strict_crc32 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'strict_crc32\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -C strict_crc32 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'strict_crc32\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --strict-check=strict_none $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'strict_none\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -C strict_none $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'strict_none\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --strict-check=InnoBD $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'InnoBD\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -C InnoBD $MYSQLD_DATADIR/test/tab1.ibd 2>$SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'InnoBD\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --strict-check=crc $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'crc\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --strict-check=no $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error while setting value \'no\' to \'strict-check\'; +--source include/search_pattern_in_file.inc + +--echo [14a]: when server default checksum=crc32 rewrite new checksum=crc32 with innochecksum +--echo # Also check the long form of write option. +--exec $INNOCHECKSUM --strict-check=crc32 --write=crc32 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --strict-check=crc32 --write=crc32 $MYSQLD_DATADIR/test/t1.ibd +# Rewrite done, verify with --strict-check=crc32 +--exec $INNOCHECKSUM --strict-check=crc32 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --strict-check=crc32 $MYSQLD_DATADIR/test/t1.ibd + +--echo [14b]: when server default checksum=crc32 rewrite new checksum=innodb with innochecksum +--echo # Also check the long form of write option. +--exec $INNOCHECKSUM --no-check --write=innodb $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --strict-check=crc32 --write=innodb $MYSQLD_DATADIR/test/t1.ibd +# Rewrite done, verify with --strict-check=innodb +--exec $INNOCHECKSUM --strict-check=innodb $MYSQLD_DATADIR/test/tab1.ibd + +--echo # start the server with innodb_checksum_algorithm=InnoDB +--let restart_options= : --innodb_checksum_algorithm=innodb +--source include/start_mysqld.inc + +INSERT INTO tab1 VALUES(2, 'Innochecksum CRC32'); +SELECT c1,c2 FROM tab1 order by c1,c2; + +--echo # Stop the server +--source include/shutdown_mysqld.inc + +--echo [15]: when server default checksum=crc32 rewrite new checksum=none with innochecksum +--echo # Also check the short form of write option. +--exec $INNOCHECKSUM --no-check -w none $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --no-check -w none $MYSQLD_DATADIR/test/t1.ibd +# Rewrite done, verify with --strict-check=none +--exec $INNOCHECKSUM --strict-check=none $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --strict-check=none $MYSQLD_DATADIR/test/t1.ibd + +--echo # Start the server with checksum algorithm=none +--let restart_options= : --innodb_checksum_algorithm=none +--source include/start_mysqld.inc + +INSERT INTO tab1 VALUES(3, 'Innochecksum None'); +SELECT c1,c2 FROM tab1 order by c1,c2; +DROP TABLE t1; + +--echo # Stop the server +--source include/shutdown_mysqld.inc + +--echo [16]: rewrite into new checksum=crc32 with innochecksum +--exec $INNOCHECKSUM --no-check --write=crc32 $MYSQLD_DATADIR/test/tab1.ibd + +--echo # Restart the DB server with innodb_checksum_algorithm=crc32 +--let restart_options= : --innodb_checksum_algorithm=crc32 --innodb_file_per_table=on +--source include/start_mysqld.inc + +SELECT * FROM tab1; +DELETE FROM tab1 where c1=3; +SELECT c1,c2 FROM tab1 order by c1,c2; + +--echo # Stop server +--source include/shutdown_mysqld.inc + +--echo [17]: rewrite into new checksum=InnoDB +--exec $INNOCHECKSUM --no-check --write=InnoDB $MYSQLD_DATADIR/test/tab1.ibd + +--echo # Restart the DB server with innodb_checksum_algorithm=InnoDB +--let restart_options= : --innodb_checksum_algorithm=innodb --innodb_file_per_table=on +--source include/start_mysqld.inc + +DELETE FROM tab1 where c1=2; +SELECT * FROM tab1; + +--echo # Stop server +--source include/shutdown_mysqld.inc + +--echo [18]:check Innochecksum with invalid write options +--error 1 +--exec $INNOCHECKSUM --no-check --write=strict_crc32 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN=Error while setting value \'strict_crc32\' to \'write\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --no-check --write=strict_innodb $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN=Error while setting value \'strict_innodb\' to \'write\'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --no-check --write=crc23 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN=Error while setting value \'crc23\' to \'write\'; +--source include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +# Cleanup +--echo # Restart the server +--source include/start_mysqld.inc + +DROP TABLE tab1; +SET GLOBAL innodb_file_per_table=default; diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_2.test b/mysql-test/suite/innodb_zip/t/innochecksum_2.test new file mode 100644 index 00000000000..decec8e0f0a --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/innochecksum_2.test @@ -0,0 +1,114 @@ +#************************************************************ +# WL6045:Improve Innochecksum +#************************************************************ +--source include/have_innodb.inc +--source include/have_innodb_zip.inc + +--source include/not_embedded.inc +-- source include/big_test.inc + +--disable_query_log +# This warning occurs due to small buffer pool size(i.e. 8MB). It doesn't occur +# with --mysqld=--innodb_buffer_pool_size=10MB +call mtr.add_suppression("\\[Warning\\] InnoDB: Difficult to find free blocks in the buffer pool.*"); +--enable_query_log +let MYSQLD_BASEDIR= `SELECT @@basedir`; +let MYSQLD_DATADIR= `SELECT @@datadir`; +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/my_restart.err; + +SET GLOBAL innodb_compression_level=0; +SELECT @@innodb_compression_level; + +CREATE TABLE t1 (j LONGBLOB) ENGINE = InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; +INSERT INTO t1 VALUES (repeat('abcdefghijklmnopqrstuvwxyz',200)); +let $i=10; +while ($i > 0) { + INSERT INTO t1 SELECT * from t1; + dec $i; +} + +--echo # stop the server +--source include/shutdown_mysqld.inc + +# Page_type_dump for t1 +--exec $INNOCHECKSUM -v --page-type-dump $MYSQLTEST_VARDIR/tmp/dump.txt $MYSQLD_DATADIR/test/t1.ibd +--file_exists $MYSQLTEST_VARDIR/tmp/dump.txt +--remove_file $MYSQLTEST_VARDIR/tmp/dump.txt + +--echo [1]:# check the both short and long options for "help" +--exec $INNOCHECKSUM --help $MYSQLD_DATADIR/test/t1.ibd > $MYSQLTEST_VARDIR/tmp/help_output_long.txt +--exec $INNOCHECKSUM -I $MYSQLD_DATADIR/test/t1.ibd > $MYSQLTEST_VARDIR/tmp/help_output_short.txt +--diff_files $MYSQLTEST_VARDIR/tmp/help_output_long.txt $MYSQLTEST_VARDIR/tmp/help_output_short.txt + +--echo [2]:# Run the innochecksum when file isn't provided. +--echo # It will print the innochecksum usage similar to --help option. +--error 1 +--exec $INNOCHECKSUM > $MYSQLTEST_VARDIR/tmp/usage.txt +--diff_files $MYSQLTEST_VARDIR/tmp/help_output_long.txt $MYSQLTEST_VARDIR/tmp/usage.txt +--remove_file $MYSQLTEST_VARDIR/tmp/usage.txt + +perl; +use strict; +use warnings; +use File::Copy; +my $dir = $ENV{'MYSQLTEST_VARDIR'}; +my $file= 'help_output_long.txt'; +# open file in write mode +open IN_FILE,"<", "$dir/tmp/$file" or die $!; +open OUT_FILE, ">", "$dir/tmp/tmpfile" or die $!; +while() { + unless ($_=~ /^debug.*$/ || $_=~ /\-#, \-\-debug.*$/ || $_=~ /http:.*html/) { + $_=~ s/^\S*innochecksum.+Ver.+[0-9]*\.[0-9]*\.[0-9]*.+$/innochecksum Ver #.#.#/g; + $_=~ s/(Copyright\s\(c\))\s([0-9]*),\s([0-9]*)(.*)/$1 YEAR, YEAR $4/g; + $_=~ s/Usage:.*\[-c/Usage: innochecksum [-c/g; + print OUT_FILE $_; + } +} +close(IN_FILE); +close(OUT_FILE); +# move the new content from tmp file to the orginal file. +move ("$dir/tmp/tmpfile", "$dir/tmp/$file"); +EOF + +--cat_file $MYSQLTEST_VARDIR/tmp/help_output_long.txt +--remove_file $MYSQLTEST_VARDIR/tmp/help_output_long.txt +--remove_file $MYSQLTEST_VARDIR/tmp/help_output_short.txt + +--echo [3]:# check the both short and long options for "count" and exit +--replace_regex /[0-9]+/#/ +--exec $INNOCHECKSUM --count $MYSQLD_DATADIR/test/t1.ibd +--replace_regex /[0-9]+/#/ +--exec $INNOCHECKSUM -c $MYSQLD_DATADIR/test/t1.ibd + +--echo [4]:# Print the version of innochecksum and exit +--replace_regex /.*innochecksum.*Ver.*[0-9]*.[0-9]*.[0-9]*.*/innochecksum Ver #.#.#/ +--exec $INNOCHECKSUM -V $MYSQLD_DATADIR/test/t1.ibd + +--echo # Restart the DB server +--source include/start_mysqld.inc + +DROP TABLE t1; + +--echo [5]:# Check the innochecksum for compressed table t1 with different key_block_size +--echo # Test for KEY_BLOCK_SIZE=1 +--let $size=1 +--source ../include/innodb-wl6045.inc + +--echo # Test for KEY_BLOCK_SIZE=2 +--let $size=2 +--source ../include/innodb-wl6045.inc + +--echo # Test for for KEY_BLOCK_SIZE=4 +--let $size=4 +--source ../include/innodb-wl6045.inc + +set innodb_strict_mode=off; +--echo # Test for for KEY_BLOCK_SIZE=8 +--let $size=8 +--source ../include/innodb-wl6045.inc + +set innodb_strict_mode=off; +--echo # Test for KEY_BLOCK_SIZE=16 +--let $size=16 +--source ../include/innodb-wl6045.inc +--echo # Test[5] completed diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_3.test b/mysql-test/suite/innodb_zip/t/innochecksum_3.test new file mode 100644 index 00000000000..30e98aa25f5 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/innochecksum_3.test @@ -0,0 +1,378 @@ +#************************************************************ +# WL6045:Improve Innochecksum +#************************************************************ +--source include/have_innodb.inc +--source include/have_innodb_zip.inc + +# Valgrind would complain about memory leaks when we crash on purpose. +--source include/not_valgrind.inc + +# Embedded server does not support crashing. +--source include/not_embedded.inc + +# Avoid CrashReporter popup on Mac. +--source include/not_crashrep.inc + +--echo # Set the environmental variables +let MYSQLD_BASEDIR= `SELECT @@basedir`; +let MYSQLD_DATADIR= `SELECT @@datadir`; +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/my_restart.err; +call mtr.add_suppression("InnoDB: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts"); + +SET GLOBAL innodb_file_per_table=on; + +--echo [1]: Further Test are for rewrite checksum (innodb|crc32|none) for all ibd file & start the server. + +CREATE TABLE tab1 (pk INTEGER NOT NULL PRIMARY KEY, +linestring_key GEOMETRY NOT NULL, +linestring_nokey GEOMETRY NOT NULL) +ENGINE=InnoDB ; + +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (1, ST_GeomFromText('POINT(10 10) '), ST_GeomFromText('POINT(10 10) ')); + +CREATE INDEX linestring_index ON tab1(linestring_nokey(5)); +ALTER TABLE tab1 ADD KEY (linestring_key(5)); + +--echo # create a compressed table +CREATE TABLE tab2(col_1 CHAR (255) , +col_2 VARCHAR (255), col_3 longtext, +col_4 longtext,col_5 longtext, +col_6 longtext , col_7 int ) +engine = innodb row_format=compressed key_block_size=4; + +CREATE INDEX idx1 ON tab2(col_3(10)); +CREATE INDEX idx2 ON tab2(col_4(10)); +CREATE INDEX idx3 ON tab2(col_5(10)); + +# load the with repeat function +SET @col_1 = repeat('a', 5); +SET @col_2 = repeat('b', 20); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); + +# insert 5 records +let $i = 5; +while ($i) { + eval INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) + VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,$i); + dec $i; +} + +--disable_result_log +SELECT * FROM tab2 ORDER BY col_7; + +--echo # stop the server +--source include/shutdown_mysqld.inc + +--echo [1(a)]: Rewrite into new checksum=InnoDB for all *.ibd file and ibdata1 +--exec $INNOCHECKSUM --write=InnoDB $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --write=InnoDB $MYSQLD_DATADIR/test/tab2.ibd +--exec $INNOCHECKSUM --write=InnoDB $MYSQLD_DATADIR/ibdata1 +perl; +foreach (glob("$ENV{MYSQLD_DATADIR}/*/*.ibd")) { + system("$ENV{INNOCHECKSUM} --no-check --write=InnoDB $_") +} +EOF + +--echo : start the server with innodb_checksum_algorithm=strict_innodb +--let restart_options= : --innodb_checksum_algorithm=strict_innodb --default_storage_engine=InnoDB +--source include/start_mysqld.inc + +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (2, ST_GeomFromText('LINESTRING(10 10,20 20,30 30)'), ST_GeomFromText('LINESTRING(10 10,20 20,30 30)')); + +# load the with repeat function +SET @col_1 = repeat('a', 5); +SET @col_2 = repeat('b', 20); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); + +# check the table status is GOOD with DML +let $i = 6; +eval INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,$i); + +-- disable_result_log +SELECT pk,ST_AsText(linestring_key),ST_AsText(linestring_nokey) +FROM tab1 ORDER BY pk; + +-- disable_result_log +SELECT * FROM tab2 ORDER BY col_7; + +--echo # stop the server +--source include/shutdown_mysqld.inc + +--echo [1(b)]: Rewrite into new checksum=crc32 for all *.ibd file and ibdata1 +--exec $INNOCHECKSUM --write=CRC32 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --write=CRC32 $MYSQLD_DATADIR/test/tab2.ibd +--exec $INNOCHECKSUM --write=CRC32 $MYSQLD_DATADIR/ibdata1 +perl; +foreach (glob("$ENV{MYSQLD_DATADIR}/*/*.ibd")) { + system("$ENV{INNOCHECKSUM} --no-check --write=crc32 $_") +} +EOF + +--echo # start the server with innodb_checksum_algorithm=strict_crc32 +--let restart_options= : --innodb_checksum_algorithm=strict_crc32 --default_storage_engine=InnoDB +--source include/start_mysqld.inc + +# check the table status is GOOD with DML +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (3, ST_GeomFromText('POLYGON((0 0,5 5,10 10,15 15,0 0),(10 10,20 20,30 30,40 40,10 10))'), +ST_GeomFromText('POLYGON((0 0,5 5,10 10,15 15,0 0),(10 10,20 20,30 30,40 40,10 10))')); + +# load the with repeat function +SET @col_1 = repeat('g', 5); +SET @col_2 = repeat('h', 20); +SET @col_3 = repeat('i', 100); +SET @col_4 = repeat('j', 100); +SET @col_5 = repeat('k', 100); +SET @col_6 = repeat('l', 100); + +# check the table status is GOOD with DML +let $i = 7; +eval INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,$i); + +# check the records from table +-- disable_result_log +SELECT pk,ST_AsText(linestring_key),ST_AsText(linestring_nokey) +FROM tab1 ORDER BY pk; + +-- disable_result_log +SELECT * FROM tab2 ORDER BY col_7; + +--echo # stop the server +--source include/shutdown_mysqld.inc + +--echo [1(c)]: Rewrite into new checksum=none for all *.ibd file and ibdata1 +--exec $INNOCHECKSUM --write=none $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --write=none $MYSQLD_DATADIR/test/tab2.ibd +--exec $INNOCHECKSUM --write=none $MYSQLD_DATADIR/ibdata1 +perl; +foreach (glob("$ENV{MYSQLD_DATADIR}/undo*")) { + system("$ENV{INNOCHECKSUM} --no-check --write=NONE $_") +} +foreach (glob("$ENV{MYSQLD_DATADIR}/*/*.ibd")) { + system("$ENV{INNOCHECKSUM} --no-check --write=NONE $_") +} +EOF + +let $restart_parameters = restart: --innodb_checksum_algorithm=strict_none --default_storage_engine=InnoDB; +--source include/start_mysqld.inc + +# check the table status is GOOD with DML +INSERT INTO tab1 (pk, linestring_key, linestring_nokey) +VALUES (4, ST_GeomFromText('MULTIPOINT(0 0,5 5,10 10,20 20) '), ST_GeomFromText('MULTIPOINT(0 0,5 5,10 10,20 20) ')); + +# load the with repeat function +SET @col_1 = repeat('m', 5); +SET @col_2 = repeat('n', 20); +SET @col_3 = repeat('o', 100); +SET @col_4 = repeat('p', 100); +SET @col_5 = repeat('q', 100); +SET @col_6 = repeat('r', 100); + +# check the table status is GOOD with DML +let $i = 8; +eval INSERT INTO tab2(col_1,col_2,col_3,col_4,col_5,col_6,col_7) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,$i); + +# check the records from table +-- disable_result_log +SELECT pk,ST_AsText(linestring_key),ST_AsText(linestring_nokey) +FROM tab1 ORDER BY pk; + +--disable_result_log +SELECT * FROM tab2 ORDER BY col_7; +--enable_result_log + +--echo # stop the server +--source include/shutdown_mysqld.inc + +--echo [2]: Check the page type summary with shortform for tab1.ibd +--replace_regex /File.*.ibd/File::tab1.ibd/ /[0-9]+/#/ +--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/tab1.ibd 2>$MYSQLTEST_VARDIR/tmp/page_summary_short.txt + +--echo [3]: Check the page type summary with longform for tab1.ibd +--replace_regex /File.*.ibd/File::tab1.ibd/ /[0-9]+/#/ +--exec $INNOCHECKSUM --page-type-summary $MYSQLD_DATADIR/test/tab1.ibd 2>$MYSQLTEST_VARDIR/tmp/page_summary_long.txt + +--remove_file $MYSQLTEST_VARDIR/tmp/page_summary_short.txt +--remove_file $MYSQLTEST_VARDIR/tmp/page_summary_long.txt +--echo [4]: Page type dump for with longform for tab1.ibd +--exec $INNOCHECKSUM --page-type-dump $MYSQLTEST_VARDIR/tmp/dump.txt $MYSQLD_DATADIR/test/tab1.ibd + +perl; +use strict; +use warnings; +use File::Copy; +my $dir = $ENV{'MYSQLTEST_VARDIR'}; +opendir(DIR, $dir) or die $!; +my $file= 'dump.txt'; +# open file in write mode +open IN_FILE,"<", "$dir/tmp/$file" or die $!; +open OUT_FILE, ">", "$dir/tmp/tmpfile" or die $!; +while() +{ + # Replace the intergers to # and complete file patht to file name only. + $_=~ s/Filename.+/Filename::tab1.ibd/g; + $_=~ s/\d+/#/g; + print OUT_FILE $_; +} +close(IN_FILE); +close(OUT_FILE); +# move the new content from tmp file to the orginal file. +move ("$dir/tmp/tmpfile", "$dir/tmp/$file"); +closedir(DIR); +EOF + +--echo # Print the contents stored in dump.txt +cat_file $MYSQLTEST_VARDIR/tmp/dump.txt; +--remove_file $MYSQLTEST_VARDIR/tmp/dump.txt + +# Page type dump for ibdata1 +--exec $INNOCHECKSUM -v --page-type-dump $MYSQLTEST_VARDIR/tmp/dump.txt $MYSQLD_DATADIR/ibdata1 +--file_exists $MYSQLTEST_VARDIR/tmp/dump.txt +--remove_file $MYSQLTEST_VARDIR/tmp/dump.txt + +--echo [5]: Page type dump for with shortform for tab1.ibd +--exec $INNOCHECKSUM -D $MYSQLTEST_VARDIR/tmp/dump.txt $MYSQLD_DATADIR/test/tab1.ibd + +perl; +use strict; +use warnings; +use File::Copy; +my $dir = $ENV{'MYSQLTEST_VARDIR'}; +opendir(DIR, $dir) or die $!; +my $file= 'dump.txt'; +# open file in write mode +open IN_FILE,"<", "$dir/tmp/$file" or die $!; +open OUT_FILE, ">", "$dir/tmp/tmpfile" or die $!; +while() +{ + # Replace teh intergers to # and complete file patht to file name only. + $_=~ s/Filename.+/Filename::tab1.ibd/g; + $_=~ s/\d+/#/g; + print OUT_FILE $_; +} +close(IN_FILE); +close(OUT_FILE); +# move the new content from tmp file to the orginal file. +move ("$dir/tmp/tmpfile", "$dir/tmp/$file"); +closedir(DIR); +EOF + +# Print the contents stored in dump.txt +cat_file $MYSQLTEST_VARDIR/tmp/dump.txt; +--remove_file $MYSQLTEST_VARDIR/tmp/dump.txt + +--echo [6]: check the valid lower bound values for option +--echo # allow-mismatches,page,start-page,end-page +--exec $INNOCHECKSUM --allow-mismatches=0 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -a 0 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --page=0 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -p 0 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --start-page=0 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -s 0 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --end-page=0 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -e 0 $MYSQLD_DATADIR/test/tab1.ibd + +--echo [7]: check the negative values for option +--echo # allow-mismatches,page,start-page,end-page. +--echo # They will reset to zero for negative values. +--echo # check the invalid lower bound values +--exec $INNOCHECKSUM --allow-mismatches=-1 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -a -1 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --page=-1 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -p -1 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --start-page=-1 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -s -1 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --end-page=-1 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -e -1 $MYSQLD_DATADIR/test/tab1.ibd + +--echo [8]: check the valid upper bound values for +--echo # both short and long options "allow-mismatches" and "end-page" + +--exec $INNOCHECKSUM --allow-mismatches=18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -a 18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM --end-page=18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd +--exec $INNOCHECKSUM -e 18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd + +--echo [9]: check the both short and long options "page" and "start-page" when +--echo # seek value is larger than file size. +--error 1 +--exec $INNOCHECKSUM --page=18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: Unable to seek to necessary offset: Invalid argument; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -p 18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: Unable to seek to necessary offset: Invalid argument; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --start-page=18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: Unable to seek to necessary offset: Invalid argument; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -s 18446744073709551615 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Error: Unable to seek to necessary offset: Invalid argument; +--source include/search_pattern_in_file.inc + +--echo [34]: check the invalid upper bound values for options, allow-mismatches, end-page, start-page and page. +--echo # innochecksum will fail with error code: 1 +--error 1 +--exec $INNOCHECKSUM --allow-mismatches=18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -a 18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --end-page=18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -e 18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --page=18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -p 18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM --start-page=18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc + +--error 1 +--exec $INNOCHECKSUM -s 18446744073709551616 $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE +let SEARCH_PATTERN= Incorrect unsigned integer value: '18446744073709551616'; +--source include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +# Cleanup +--echo # Restart the server +--source include/start_mysqld.inc + +DROP TABLE tab1; +DROP TABLE tab2; +SET GLOBAL innodb_file_per_table=default; diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug36169.opt b/mysql-test/suite/innodb_zip/t/innodb_bug36169.opt new file mode 100644 index 00000000000..3a4e594f382 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/innodb_bug36169.opt @@ -0,0 +1 @@ +--innodb_large_prefix=ON \ No newline at end of file diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug36169.test b/mysql-test/suite/innodb_zip/t/innodb_bug36169.test index 6426bd683ae..1d82b95a602 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_bug36169.test +++ b/mysql-test/suite/innodb_zip/t/innodb_bug36169.test @@ -4,6 +4,8 @@ # http://bugs.mysql.com/36169 # +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size .* for a record on index leaf page."); + let $file_format=`select @@innodb_file_format`; let $file_per_table=`select @@innodb_file_per_table`; SET GLOBAL innodb_file_format='Barracuda'; diff --git a/mysql-test/suite/innodb_zip/t/innodb_bug53591.test b/mysql-test/suite/innodb_zip/t/innodb_bug53591.test index 8bc461719b8..6c80165f6eb 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_bug53591.test +++ b/mysql-test/suite/innodb_zip/t/innodb_bug53591.test @@ -5,7 +5,7 @@ let $file_per_table=`select @@innodb_file_per_table`; SET GLOBAL innodb_file_format='Barracuda'; SET GLOBAL innodb_file_per_table=on; - +SET GLOBAL innodb_strict_mode=on; set old_alter_table=0; CREATE TABLE bug53591(a text charset utf8 not null) @@ -20,3 +20,4 @@ DROP TABLE bug53591; EVAL SET GLOBAL innodb_file_format=$file_format; EVAL SET GLOBAL innodb_file_per_table=$file_per_table; +SET GLOBAL innodb_strict_mode=DEFAULT; diff --git a/mysql-test/suite/innodb_zip/t/innodb_index_large_prefix.test b/mysql-test/suite/innodb_zip/t/innodb_index_large_prefix.test index 17f82f88fef..8af2bc7ffc7 100644 --- a/mysql-test/suite/innodb_zip/t/innodb_index_large_prefix.test +++ b/mysql-test/suite/innodb_zip/t/innodb_index_large_prefix.test @@ -4,6 +4,8 @@ --source include/have_innodb_16k.inc SET default_storage_engine=InnoDB; +call mtr.add_suppression("Cannot add field .* in table .* because after adding it, the row size is .* which is greater than maximum allowed size (.*) for a record on index leaf page."); + let $innodb_file_format_orig=`select @@innodb_file_format`; let $innodb_file_per_table_orig=`select @@innodb_file_per_table`; let $innodb_large_prefix_orig=`select @@innodb_large_prefix`; @@ -11,6 +13,7 @@ let $innodb_large_prefix_orig=`select @@innodb_large_prefix`; set global innodb_file_format="Barracuda"; set global innodb_file_per_table=1; set global innodb_large_prefix=1; +set global innodb_strict_mode=1; -- echo ### Test 1 ### # Create a table of DYNAMIC format, with a primary index of 1000 bytes in @@ -365,9 +368,9 @@ drop table worklog5743; -- echo ### Test 6 ### # Create a table with old format, and the limit is 768 bytes. -- error ER_INDEX_COLUMN_TOO_LONG -create table worklog5743(a TEXT not null, primary key (a(1000))); +create table worklog5743(a TEXT not null, primary key (a(1000))) row_format=COMPACT; -create table worklog5743(a TEXT); +create table worklog5743(a TEXT) row_format=COMPACT; # Excercise the column length check in ha_innobase::add_index() -- error ER_INDEX_COLUMN_TOO_LONG @@ -428,6 +431,7 @@ drop table worklog5743; eval SET GLOBAL innodb_file_format=$innodb_file_format_orig; eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig; eval SET GLOBAL innodb_large_prefix=$innodb_large_prefix_orig; +SET GLOBAL innodb_strict_mode = DEFAULT; --connection con1 --disconnect con1 --source include/wait_until_disconnected.inc diff --git a/mysql-test/suite/innodb_zip/t/innodb_prefix_index_liftedlimit.test b/mysql-test/suite/innodb_zip/t/innodb_prefix_index_liftedlimit.test deleted file mode 100644 index 1c02cafa47e..00000000000 --- a/mysql-test/suite/innodb_zip/t/innodb_prefix_index_liftedlimit.test +++ /dev/null @@ -1,1371 +0,0 @@ -######## suite/innodb/t/innodb_prefix_index_liftedlimit.test ########## -# # -# Testcase for worklog WL#5743: Lift the limit of index key prefixes # -# Accorrding to WL#5743 - prefix index limit is increased from 767 # -# to 3072 for innodb. This change is applicable with Barracuda file # -# format. # -# All sub-test in this file focus on prefix index along with other # -# operations # -# # -# # -# Creation: # -# 2011-05-19 Implemented this test as part of WL#5743 # -# # -###################################################################### - ---source include/have_innodb.inc ---source include/have_innodb_16k.inc - -# Save innodb variables ---disable_query_log -let $innodb_file_format_orig = `select @@innodb_file_format`; -let $innodb_file_per_table_orig = `select @@innodb_file_per_table`; -let $innodb_large_prefix_orig = `select @@innodb_large_prefix`; ---enable_query_log - -# Set Innodb file format as feature works for Barracuda file format -set global innodb_file_format="Barracuda"; -set global innodb_file_per_table=1; -set global innodb_large_prefix=1; - --- disable_warnings -DROP TABLE IF EXISTS worklog5743; --- enable_warnings -#------------------------------------------------------------------------------ -# Prefix index with VARCHAR data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -# check IS -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE FROM -INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; ---error ER_INDEX_COLUMN_TOO_LONG -ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT; ---error ER_INDEX_COLUMN_TOO_LONG -ALTER TABLE worklog5743 ROW_FORMAT=COMPACT; -ALTER TABLE worklog5743 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16; -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Prefix index with TEXT data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_text (3072)); -# check IS -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE FROM -INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with MEDIUMTEXT data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_mediumtext MEDIUMTEXT , col_2_mediumtext MEDIUMTEXT , -PRIMARY KEY (col_1_mediumtext(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_mediumtext (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_mediumtext = REPEAT("a", 4000),col_2_mediumtext = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_mediumtext = REPEAT("c", 4000) -WHERE col_1_mediumtext = REPEAT("a", 4000) -AND col_2_mediumtext = REPEAT("o", 4000); -SELECT col_1_mediumtext = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_mediumtext = REPEAT("c", 4000) -AND col_2_mediumtext = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_mediumtext = REPEAT("b", 4000); -SELECT col_1_mediumtext = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Prefix index with LONGTEXT data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_longtext LONGTEXT , col_2_longtext LONGTEXT , -PRIMARY KEY (col_1_longtext(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_longtext (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_longtext = REPEAT("a", 4000) , col_2_longtext = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_longtext = REPEAT("c", 4000) -WHERE col_1_longtext = REPEAT("a", 4000) -AND col_2_longtext = REPEAT("o", 4000); -SELECT col_1_longtext = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_longtext = REPEAT("c", 4000) -AND col_2_longtext = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_longtext = REPEAT("b", 4000); -SELECT col_1_longtext = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Prefix index with BLOB data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_blob (3072)); -# check IS -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE FROM -INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Prefix index with MEDIUMBLOB data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_mediumblob MEDIUMBLOB , col_2_mediumblob MEDIUMBLOB , -PRIMARY KEY (col_1_mediumblob(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_mediumblob (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_mediumblob = REPEAT("a", 4000),col_2_mediumblob = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_mediumblob = REPEAT("c", 4000) -WHERE col_1_mediumblob = REPEAT("a", 4000) -AND col_2_mediumblob = REPEAT("o", 4000); -SELECT col_1_mediumblob = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_mediumblob = REPEAT("c", 4000) -AND col_2_mediumblob = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_mediumblob = REPEAT("b", 4000); -SELECT col_1_mediumblob = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with LONGBLOB data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_longblob LONGBLOB , col_2_longblob LONGBLOB , -PRIMARY KEY (col_1_longblob(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_longblob (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_longblob = REPEAT("a", 4000) , col_2_longblob = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_longblob = REPEAT("c", 4000) -WHERE col_1_longblob = REPEAT("a", 4000) -AND col_2_longblob = REPEAT("o", 4000); -SELECT col_1_longblob = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_longblob = REPEAT("c", 4000) -AND col_2_longblob = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_longblob = REPEAT("b", 4000); -SELECT col_1_longblob = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with VARBINARY data type , primary/secondary index and DML ops -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varbinary VARBINARY (4000) , -PRIMARY KEY (col_1_varbinary(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varbinary = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with CHAR data type , composite index and DML ops -CREATE TABLE worklog5743 (col_1_char CHAR (255) , col_2_char CHAR (255), -col_3_char CHAR (255), col_4_char CHAR (255),col_5_char CHAR (255), -col_6_char CHAR (255), col_7_char CHAR (255),col_8_char CHAR (255), -col_9_char CHAR (255), col_10_char CHAR (255),col_11_char CHAR (255), -col_12_char CHAR (255), col_13_char CHAR (255),col_14_char CHAR (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255) -); -# Create index with total prefix index length = 3072 -CREATE INDEX prefix_idx ON worklog5743(col_1_char(250),col_2_char(250), -col_3_char(250),col_4_char(250),col_5_char(250),col_6_char(250), -col_7_char(250),col_8_char(250),col_9_char(250),col_10_char(250), -col_11_char(250),col_12_char(250),col_13_char(72) -); -INSERT INTO worklog5743 VALUES(REPEAT("b", 255) , REPEAT("p", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255) -); -SELECT col_1_char = REPEAT("a", 255) , col_2_char = REPEAT("o", 255) FROM worklog5743; -UPDATE worklog5743 SET col_1_char = REPEAT("c", 255) -WHERE col_1_char = REPEAT("a", 255) AND col_2_char = REPEAT("o", 255); -SELECT col_1_char = REPEAT("c", 255) FROM worklog5743 -WHERE col_1_char = REPEAT("c", 255) AND col_2_char = REPEAT("o", 255); -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255),REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_char = REPEAT("b", 255); -SELECT col_1_char = REPEAT("c", 255) FROM worklog5743; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with BINARY data type , composite index and DML ops -CREATE TABLE worklog5743 (col_1_binary BINARY (255) , col_2_binary BINARY (255), -col_3_binary BINARY(255),col_4_binary BINARY (255),col_5_binary BINARY (255), -col_6_binary BINARY(255),col_7_binary BINARY (255),col_8_binary BINARY (255), -col_9_binary BINARY(255),col_10_binary BINARY (255),col_11_binary BINARY (255), -col_12_binary BINARY(255),col_13_binary BINARY (255),col_14_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255) -); -# Create index with total prefix index length = 3072 -CREATE INDEX prefix_idx ON worklog5743(col_1_binary (250),col_2_binary (250), -col_3_binary (250),col_4_binary (250),col_5_binary (250), -col_6_binary (250),col_7_binary (250),col_8_binary (250), -col_9_binary (250),col_10_binary (250),col_11_binary (250), -col_12_binary (250),col_13_binary (72) -); -INSERT INTO worklog5743 VALUES(REPEAT("b", 255) , REPEAT("p", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255) -); -SELECT col_1_binary = REPEAT("a", 255) , col_2_binary = REPEAT("o", 255) FROM worklog5743; -UPDATE worklog5743 SET col_1_binary = REPEAT("c", 255) -WHERE col_1_binary = REPEAT("a", 255) -AND col_2_binary = REPEAT("o", 255); -SELECT col_1_binary = REPEAT("c", 255) FROM worklog5743 -WHERE col_1_binary = REPEAT("c", 255) -AND col_2_binary = REPEAT("o", 255); -INSERT INTO worklog5743 VALUES(REPEAT("a", 255) , REPEAT("o", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255), -REPEAT("a", 255) , REPEAT("o", 255), REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_binary = REPEAT("b", 255); -SELECT col_1_binary = REPEAT("c", 255) FROM worklog5743; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with VARCHAR data type , primary/seconday index , DML ops -# and COMPRESSED row format. KEY_BLOCK_SIZE is varied as 2 , 4 , 8. - -# With KEY_BLOCK_SIZE = 2,prefix index limit comes around ~948 for following -CREATE TABLE worklog5743_key2 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key2 (col_1_varchar (767)); -INSERT INTO worklog5743_key2 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743_key2; -UPDATE worklog5743_key2 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key2 -WHERE col_2_varchar = REPEAT("o", 4000); -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key2 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key2; -DROP TABLE worklog5743_key2; - -# With KEY_BLOCK_SIZE = 4,prefix index limit comes around ~1964 for following -CREATE TABLE worklog5743_key4 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(1964)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb; -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key4 (col_1_varchar (767)); -INSERT INTO worklog5743_key4 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743_key4; -UPDATE worklog5743_key4 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743_key4 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key4 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key4; -DROP TABLE worklog5743_key4; - -# With KEY_BLOCK_SIZE = 8,prefix index limit comes around ~3072 for following -CREATE TABLE worklog5743_key8 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8, engine = innodb; -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key8 (col_1_varchar (767)); -INSERT INTO worklog5743_key8 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743_key8; -UPDATE worklog5743_key8 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743_key8 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key8 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743_key8; -DROP TABLE worklog5743_key8; - -# Prefix index with TEXT data type , primary/seconday index , DML ops -# and COMPRESSED row format. KEY_BLOCK_SIZE is varied as 2 , 4 , 8. - -# With KEY_BLOCK_SIZE = 2,prefix index limit comes around ~948 for following -CREATE TABLE worklog5743_key2 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key2 (col_1_text (767)); -INSERT INTO worklog5743_key2 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743_key2; -UPDATE worklog5743_key2 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("b", 3500) FROM worklog5743_key2 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key2 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743_key2; -DROP TABLE worklog5743_key2; - -# With KEY_BLOCK_SIZE = 4,prefix index limit comes around ~1964 for following -CREATE TABLE worklog5743_key4 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(1964)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb; -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key4 (col_1_text (767)); -INSERT INTO worklog5743_key4 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743_key4; -UPDATE worklog5743_key4 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("b", 3500) FROM worklog5743_key4 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key4 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743_key4; -DROP TABLE worklog5743_key4; - -# With KEY_BLOCK_SIZE = 8,prefix index limit comes around ~3072 for following -CREATE TABLE worklog5743_key8 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8, engine = innodb; -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key8 (col_1_text (767)); -INSERT INTO worklog5743_key8 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743_key8; -UPDATE worklog5743_key8 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("b", 3500) FROM worklog5743_key8 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key8 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743_key8; -DROP TABLE worklog5743_key8; - -# Prefix index with BLOB data type , primary/seconday index , DML ops -# and COMPRESSED row format. KEY_BLOCK_SIZE is varied as 2 , 4 , 8. - -# With KEY_BLOCK_SIZE = 2,prefix index limit comes around ~948 for following -CREATE TABLE worklog5743_key2 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key2 (col_1_blob (767)); -INSERT INTO worklog5743_key2 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743_key2; -UPDATE worklog5743_key2 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("b", 3500) FROM worklog5743_key2 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -INSERT INTO worklog5743_key2 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key2 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743_key2; -DROP TABLE worklog5743_key2; - -# With KEY_BLOCK_SIZE = 4,prefix index limit comes around ~1964 for following -CREATE TABLE worklog5743_key4 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(1964)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb; -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key4 (col_1_blob (767)); -INSERT INTO worklog5743_key4 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743_key4; -UPDATE worklog5743_key4 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("b", 3500) FROM worklog5743_key4 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -INSERT INTO worklog5743_key4 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key4 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743_key4; -DROP TABLE worklog5743_key4; - -# With KEY_BLOCK_SIZE = 8,prefix index limit comes around ~3072 for following -CREATE TABLE worklog5743_key8 ( -col_1_blob BLOB (4000) , col_2_blob BLOB (4000) , -PRIMARY KEY (col_1_blob(3072)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8, engine = innodb; -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743_key8 (col_1_blob (767)); -INSERT INTO worklog5743_key8 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_blob = REPEAT("a", 4000) , col_2_blob = REPEAT("o", 4000) -FROM worklog5743_key8; -UPDATE worklog5743_key8 SET col_1_blob = REPEAT("c", 4000) -WHERE col_1_blob = REPEAT("a", 4000) AND col_2_blob = REPEAT("o", 4000); -SELECT col_1_blob = REPEAT("b", 3500) FROM worklog5743_key8 -WHERE col_1_blob = REPEAT("c", 4000) AND col_2_blob = REPEAT("o", 4000); -INSERT INTO worklog5743_key8 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743_key8 WHERE col_1_blob = REPEAT("b", 4000); -SELECT col_1_blob = REPEAT("c", 4000) FROM worklog5743_key8; -DROP TABLE worklog5743_key8; - - -#------------------------------------------------------------------------------ -# Create mutiple prefix index. We can not create prefix index length > 16K -# as index is written in undo log page which of 16K size. -# So we can create max 2 prefix index of length 3072 on table -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000) , -col_3_text TEXT (4000), col_4_blob BLOB (4000), col_5_text TEXT (4000), -col_6_varchar VARCHAR (4000), col_7_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); - -# Update reports ER_UNDO_RECORD_TOO_BIG if we create more than 2 indexes. -# Bug#12547647 - UPDATE LOGGING COULD EXCEED LOG PAGE SIZE -CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072)); -CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072)); - -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); - -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; - -# Add 3 more indexes. -# Update used to hang but now ER_UNDO_RECORD_TOO_BIG is reported; -# Bug#12547647 - UPDATE LOGGING COULD EXCEED UNDO LOG PAGE SIZE -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072)); -CREATE INDEX prefix_idx4 ON worklog5743(col_4_blob (3072)); -CREATE INDEX prefix_idx5 ON worklog5743(col_5_text (3072)); ---error ER_UNDO_RECORD_TOO_BIG -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SHOW WARNINGS; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Create mutiple prefix index. We can not create prefix index length > 16K as -# we write in undo log page which of 16K size. -# so we can create max 5 prefix index of length 3072 on table. -# Similar to above case but with transactions -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000) , -col_3_text TEXT (4000), col_4_blob BLOB (4000),col_5_text TEXT (4000), -col_6_varchar VARCHAR (4000), col_7_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; - - -# Update used to hang if we create following 5 indexes. Fixed in; -# Bug#12547647 - UPDATE LOGGING COULD EXCEED UNDO LOG PAGE SIZE -CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072)); -CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072)); -CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072)); -CREATE INDEX prefix_idx4 ON worklog5743(col_4_blob (3072)); -CREATE INDEX prefix_idx5 ON worklog5743(col_5_text (3072)); - -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -ROLLBACK; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -COMMIT; -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; - -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -ROLLBACK; -# Bug#12547647 - UPDATE LOGGING COULD EXCEED LOG PAGE SIZE -# Instead of this error, it would hang before this fix. ---error ER_UNDO_RECORD_TOO_BIG -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -SHOW WARNINGS; -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with utf8 charset -# utf8 charcter takes 3 bytes in mysql so prefix index limit is 3072/3 = 1024 -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) CHARACTER SET 'utf8', -col_2_text TEXT (4000) CHARACTER SET 'utf8', -PRIMARY KEY (col_1_text(1024)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_text (1024)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - -# Prefix index with utf8 charset + varchar. -# utf8 charcter takes 3 bytes in mysql so prefix index limit is 3072/3 = 1024 -# This is a case where dict_index_too_big_for_undo() is too conservative. -# If it did not return error 1118, to commented code would work. -# See bug#12953735. ---replace_regex /> [0-9]*/> max_row_size/ --- error ER_TOO_BIG_ROWSIZE -CREATE TABLE worklog5743 (col_1_varchar VARCHAR (4000) CHARACTER SET 'utf8', -col_2_varchar VARCHAR (4000) CHARACTER SET 'utf8' , -PRIMARY KEY (col_1_varchar(1024)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -#INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -#CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (1024)); -#INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -#SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) FROM worklog5743; -#UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -#WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -#SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -#WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -#INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -#DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -#SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -#DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# prefinx index on utf8 charset with transaction -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , -col_2_varchar VARCHAR (4000) CHARACTER SET 'utf8', -col_3_text TEXT (4000) CHARACTER SET 'utf8', -col_4_blob BLOB (4000),col_5_text TEXT (4000), -col_6_varchar VARCHAR (4000), col_7_binary BINARY (255) -) ROW_FORMAT=DYNAMIC, engine = innodb; - - -CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (500)); -CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (500)); - -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -ROLLBACK; -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -COMMIT; -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; - -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -ROLLBACK; -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varchar = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000), -REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000), -REPEAT("a", 4000) , REPEAT("a", 255) -); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Prefix index with utf8 charset on TEXT data type with actual utf8 character -# like "स" and "क" -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) CHARACTER SET 'utf8', -col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(1024)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("स", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_text (1024)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("स", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("क", 4000) -WHERE col_1_text = REPEAT("स", 4000) AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("क", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("b", 4000); -SELECT col_1_text = REPEAT("क", 4000) FROM worklog5743; -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Prefix index with transaction when 2 client are ruuning there transaction -# in different sessions.With ISOLATION LEVEL as REPEATABLE READ and -# READ UNCOMMITTED. -CREATE TABLE worklog5743 ( -col_1_text TEXT(4000) , col_2_text TEXT(4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 200) , REPEAT("o", 200)); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; - ---connect (con1,localhost,root,,) -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT COUNT(*) FROM worklog5743; - - ---connect (con2,localhost,root,,) -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 200) , REPEAT("o", 200)); -# Uncomment after Bug#12552164 - TRANSACTION CAN NOT SEE OLD VERSION ROWS THAT -# BEING UPDATED -#UPDATE worklog5743 SET col_1_varchar = REPEAT("d", 200) WHERE col_1_varchar = -#REPEAT("a", 200) AND col_2_varchar = REPEAT("o", 200); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; - - ---connection con1 -select @@session.tx_isolation; -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; -select @@session.tx_isolation; -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; - -START TRANSACTION; - -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT COUNT(*) FROM worklog5743; - ---connection con2 -COMMIT; -# Wait for commit -let $wait_condition=SELECT COUNT(*)=0 FROM information_schema.processlist -WHERE info='COMMIT'; ---source include/wait_condition.inc - ---connection con1 -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT COUNT(*) FROM worklog5743; -COMMIT; - ---connection default -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Prefix index with transaction when 2 client are ruuning there transaction -# in different sessions.With ISOLATION LEVEL as REPEATABLE READ and -# READ UNCOMMITTED. Same as above case but con2 starts tnx before con1 - -CREATE TABLE worklog5743 ( -col_1_text TEXT(4000) , col_2_text TEXT(4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 200) , REPEAT("o", 200)); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; - ---connection con1 -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT COUNT(*) FROM worklog5743; -START TRANSACTION; - - ---connection con2 -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 200) , REPEAT("o", 200)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("a", 200); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -COMMIT; -# Wait for commit -let $wait_condition=SELECT COUNT(*)=0 FROM information_schema.processlist -WHERE info='COMMIT'; ---source include/wait_condition.inc - - ---connection con1 -SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; -select @@session.tx_isolation; -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; - -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT COUNT(*) FROM worklog5743; -COMMIT; - ---connection default -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ - -# Prefix index with transaction when 2 client are ruuning there transaction -# in different sessions.With ISOLATION LEVEL as REPEATABLE READ and -# READ UNCOMMITTED. Same as above cases but with ROLLBACK - -CREATE TABLE worklog5743 ( -col_1_text TEXT(4000) , col_2_text TEXT(4000) , -PRIMARY KEY (col_1_text(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 200) , REPEAT("o", 200)); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; - ---connection con1 -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT COUNT(*) FROM worklog5743; -START TRANSACTION; - - ---connection con2 -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("b", 200) , REPEAT("o", 200)); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("a", 200); -SELECT col_1_text = REPEAT("a", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -ROLLBACK; -# Wait for rollback -let $wait_condition=SELECT COUNT(*)=0 FROM information_schema.processlist -WHERE info='COMMIT'; ---source include/wait_condition.inc - - ---connection con1 -SELECT col_1_text = REPEAT("b", 200) , col_2_text = REPEAT("o", 200) FROM -worklog5743; -SELECT COUNT(*) FROM worklog5743; -COMMIT; - ---disconnect con1 ---source include/wait_until_disconnected.inc ---connection con2 ---disconnect con2 ---source include/wait_until_disconnected.inc - ---connection default -DROP TABLE worklog5743; - - -#------------------------------------------------------------------------------ -# Select queries on prefix index column as index will be used in queries. -# Use few select functions , join condition , subqueries. - -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) -AND col_2_varchar = REPEAT("o", 4000); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; - -# Select with Join -SELECT tbl1.col_1_varchar = tbl2.col_1_varchar -FROM worklog5743 tbl1 , worklog5743 tbl2 -WHERE tbl1.col_1_varchar = tbl2.col_1_varchar ; - -# Select in subquey -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar IN (SELECT tbl2.col_1_varchar FROM worklog5743 tbl2) ; -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar NOT IN (SELECT tbl2.col_1_varchar FROM worklog5743 tbl2) ; -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 WHERE -col_1_varchar IN (SELECT tbl2.col_1_varchar FROM worklog5743 tbl2) -AND col_1_varchar = REPEAT("c", 4000); -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar in ( -SELECT tbl2.col_1_varchar FROM worklog5743 tbl2 -WHERE tbl1.col_1_varchar != tbl2.col_1_varchar -) ; -SELECT tbl1.col_1_varchar = REPEAT("c", 4000) FROM worklog5743 tbl1 -WHERE col_1_varchar in ( -SELECT tbl2.col_1_varchar FROM worklog5743 tbl2 -WHERE tbl1.col_1_varchar = tbl2.col_1_varchar -) ; - -# function -SELECT -REVERSE(col_1_varchar) = REPEAT("c", 4000) , -REVERSE(REVERSE(col_1_varchar)) = REPEAT("c", 4000) -FROM worklog5743; -SELECT -UPPER(col_1_varchar) = REPEAT("c", 4000) , -UPPER(col_1_varchar) = REPEAT("C", 4000) , -LOWER(UPPER(col_1_varchar)) = REPEAT("c", 4000) -FROM worklog5743; -SELECT -col_1_varchar = REPEAT("c", 4000) -FROM worklog5743 WHERE col_1_varchar like '%c__%'; -SELECT SUBSTRING(INSERT(col_1_varchar, 1, 4, 'kkkk'),1,10) FROM worklog5743 ; -SELECT CONCAT(SUBSTRING(col_1_varchar,-5,3),'append') FROM worklog5743 ; - - -DROP TABLE worklog5743; - -#------------------------------------------------------------------------------ -# Prefix index with NULL values -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , -col_2_varchar VARCHAR (4000) , -UNIQUE INDEX (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 1000),REPEAT("c", 1000)), REPEAT("o", 4000)); -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 2000)), REPEAT("o", 4000)); -INSERT INTO worklog5743 VALUES(NULL,NULL); -INSERT INTO worklog5743 VALUES(NULL,NULL); -# check IS -SELECT COLUMN_NAME,INDEX_NAME,SUB_PART,INDEX_TYPE -FROM INFORMATION_SCHEMA.STATISTICS WHERE table_name = 'worklog5743' ; -SELECT col_1_varchar FROM worklog5743 WHERE col_1_varchar IS NULL; -SELECT col_1_varchar = concat(REPEAT("a", 2000),REPEAT("b", 2000)) -FROM worklog5743 WHERE col_1_varchar IS NOT NULL ORDER BY 1; - - -DROP TABLE worklog5743; - -# ----------------------------------------------------------------------------- -# Try drop and add secondary prefix index -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072))) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -# Create index -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -# Drop index -DROP INDEX prefix_idx ON worklog5743; - -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -# Again add index -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - -# ----------------------------------------------------------------------------- - -# Try drop and add primary prefix index -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY `prefix_primary` (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -# Create index -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -# Drop index -ALTER TABLE worklog5743 DROP PRIMARY KEY; - -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) -AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) -AND col_2_varchar = REPEAT("o", 4000); -# Again add index -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar(3072)); - -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); ---error ER_DUP_ENTRY -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - - -# ----------------------------------------------------------------------------- - -# Try drop and add both (primary/secondary) prefix index -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY `prefix_primary` (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -# Create index -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -# Drop primary index -ALTER TABLE worklog5743 DROP PRIMARY KEY; -# Drop secondary index -DROP INDEX prefix_idx ON worklog5743; - -SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000) -WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000); -# Again add index -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar(3072)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); - -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); ---error ER_DUP_ENTRY -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000); -SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743; -DROP TABLE worklog5743; - - -# ----------------------------------------------------------------------------- -# Drop index from differnt session -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR(4000) , col_2_varchar VARCHAR(4000) , -PRIMARY KEY (col_1_varchar (3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("c", 3500) , REPEAT("o", 3500)); -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072)); - ---connect (con1,localhost,root,,) - - ---connection con1 -SELECT col_1_varchar = REPEAT("c", 3500) , col_2_varchar = REPEAT("o", 3500) -FROM worklog5743; - ---connection default -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("a", 3500) , REPEAT("o", 3500)); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743 -WHERE col_2_varchar = REPEAT("o", 3500); -COMMIT; - ---connection con1 -START TRANSACTION; -INSERT INTO worklog5743 VALUES(REPEAT("k", 3500),REPEAT("p", 3500)); -# Drop primary index -ALTER TABLE worklog5743 DROP PRIMARY KEY; -UPDATE worklog5743 SET col_1_varchar = REPEAT("b", 3500) -WHERE col_1_varchar = REPEAT("a", 3500) -AND col_2_varchar = REPEAT("o", 3500); -SELECT col_1_varchar = REPEAT("b", 3500) FROM worklog5743 -WHERE col_2_varchar = REPEAT("o", 3500); - ---connection default -DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 3500); -SELECT col_1_varchar = REPEAT("a", 3500) FROM worklog5743 -WHERE col_2_varchar = REPEAT("p", 3500); - ---connection con1 -COMMIT; - ---connection default -DROP TABLE worklog5743; - - - -# ----------------------------------------------------------------------------- -# Create prefix index with length < 3072 , length = 3072 , length > 3072 -# - varbinary data type + secondary index -CREATE TABLE worklog5743 ( -col_1_varbinary VARBINARY (4000) , col_2_varbinary VARBINARY (4000) , -PRIMARY KEY (col_1_varbinary(3072))) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -# Create index of 3072 -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) , col_2_varbinary = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("c", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -# Drop index -DROP INDEX prefix_idx ON worklog5743; -SELECT col_1_varbinary = REPEAT("b", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("b", 4000) -AND col_2_varbinary = REPEAT("p", 4000); - - -# Again add index length < 3072 -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (2000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("c", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -# Drop index -DROP INDEX prefix_idx ON worklog5743; - -# Again add index length > 3072. -# If "innodb_large_prefix" is turned on, than the index prefix larger than 3072 -# will be truncated to 3072. If the table is REDUNDANT and COMPACT, which does -# not support prefix > 767, the create index will be rejected. -CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (4000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000) -WHERE col_1_varbinary = REPEAT("a", 4000) -AND col_2_varbinary = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("c", 4000); -SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_varbinary = REPEAT("c", 4000) -AND col_2_varbinary = REPEAT("o", 4000); - - -DROP TABLE worklog5743; - -# ----------------------------------------------------------------------------- -# Create prefix index with length < 3072 , length = 3072 , length > 3072 -# text data type + secondary index -CREATE TABLE worklog5743 (col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(500)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -# Create index of 3072 -CREATE INDEX prefix_idx ON worklog5743(col_1_text (3072)); -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) -FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -# Drop index -DROP INDEX prefix_idx ON worklog5743; -SELECT col_1_text = REPEAT("b", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("b", 4000) AND col_2_text = REPEAT("p", 4000); - -# Again add index length < 3072 -CREATE INDEX prefix_idx ON worklog5743(col_1_text (1000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); -# Drop index -DROP INDEX prefix_idx ON worklog5743; - -# Again add index length > 3072. Expect error.Length exceeds maximum supported -# key length -# Again add index length > 3072. -# If "innodb_large_prefix" is turned on, than the index prefix larger than 3072 -# will be truncated to 3072. If the table is REDUNDANT and COMPACT, which does -# not support prefix > 767, the create index will be rejected. -CREATE INDEX prefix_idx ON worklog5743(col_1_text (4000)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) AND col_2_text = REPEAT("o", 4000); - -DROP TABLE worklog5743; - - -# ----------------------------------------------------------------------------- -# Create prefix index with length < 948 , length = 948 , length > 948 -# For compressed row type + primary key -CREATE TABLE worklog5743 ( -col_1_text TEXT (4000) , col_2_text TEXT (4000) , -PRIMARY KEY (col_1_text(948)) -) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000)); -# Create index of 767 -INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000)); -SELECT col_1_text = REPEAT("a", 4000) , col_2_text = REPEAT("o", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); -# Drop index -#DROP INDEX prefix_idx ON worklog5743; -ALTER TABLE worklog5743 DROP PRIMARY KEY; -SELECT col_1_text = REPEAT("b", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("b", 4000) -AND col_2_text = REPEAT("p", 4000); - -# Again add index length < 767 -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_text (700)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); -# Drop index -ALTER TABLE worklog5743 DROP PRIMARY KEY; - -# Again add index length > 948. Expect error 'to big row ' due to exceed -# in key length. --- error ER_TOO_BIG_ROWSIZE -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_text (950)); -INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); -SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743; -UPDATE worklog5743 SET col_1_text = REPEAT("c", 4000) -WHERE col_1_text = REPEAT("a", 4000) -AND col_2_text = REPEAT("o", 4000); -DELETE FROM worklog5743 WHERE col_1_text = REPEAT("c", 4000); -SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743 -WHERE col_1_text = REPEAT("c", 4000) -AND col_2_text = REPEAT("o", 4000); - -DROP TABLE worklog5743; - -# ----------------------------------------------------------------------------- -# Create prefix index with length < 3072 , length = 3072 , length > 3072 -# data types VARCHAR -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -ALTER TABLE worklog5743 DROP PRIMARY KEY; -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar (900)); -ALTER TABLE worklog5743 DROP PRIMARY KEY; -# Again add index length > 3072. Expect error.Length exceeds maximum supported -# key length -# Again add index length > 3072. -# If "innodb_large_prefix" is turned on, than the index prefix larger than 3072 -# will be truncated to 3072. If the table is REDUNDANT and COMPACT, which does -# not support prefix > 767, the create index will be rejected. -# Index length is truncated only for 'create index' , but error if we add -# prefix index with length > 3072 ---error ER_TOO_LONG_KEY -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar (3073)); -DROP TABLE worklog5743; - - -CREATE TABLE worklog5743 ( -col_1_BLOB BLOB (4000) , PRIMARY KEY (col_1_BLOB(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; -ALTER TABLE worklog5743 DROP PRIMARY KEY; -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_BLOB (500)); -ALTER TABLE worklog5743 DROP PRIMARY KEY; -# Negative case -# Again add index length > 3072. Expect error.Length exceeds maximum supported -# key length -# Index length is truncated only for 'create index' , but error if we add -# prefix index with length > 3072 ---error ER_TOO_LONG_KEY -ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_BLOB (3073)); - -DROP TABLE worklog5743; - -# ----------------------------------------------------------------------------- -# Error on adding larger prefix if violates unique index. -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) -) ROW_FORMAT=DYNAMIC, engine = innodb; -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 1000),REPEAT("c", 1000)), -REPEAT("o", 4000)); -INSERT INTO worklog5743 -VALUES(concat(REPEAT("a", 2000),REPEAT("b", 2000)), REPEAT("o", 4000)); ---error ER_DUP_ENTRY -ALTER TABLE worklog5743 ADD PRIMARY KEY `pk_idx` (col_1_varchar(3000)); -DROP TABLE worklog5743; - -# ----------------------------------------------------------------------------- -set global innodb_large_prefix=0; -# Prefix index > 767 is allowed if innodb_large_prefix is set to 1 ---error ER_TOO_LONG_KEY -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(3072)) -) ROW_FORMAT=DYNAMIC, engine = innodb; - - -# ----------------------------------------------------------------------------- -set global innodb_large_prefix=0; -# Backward compatibility test - Index lenghth > 767 is truncated for REDUNDANT -# and COMPACT -CREATE TABLE worklog5743 ( -col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) , -PRIMARY KEY (col_1_varchar(767)) -) engine = innodb; -INSERT INTO worklog5743 VALUES(REPEAT('a',4000),REPEAT('b',4000)); -# Prefix index > 767 is truncated with REDUNDANT and COMPACT ---enable_info -CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (1000)); -ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT; ---disable_info -SHOW CREATE TABLE worklog5743; -DROP TABLE worklog5743; -#------------------------------------------------------------------------------ - ---disable_query_log -eval set global innodb_file_format = $innodb_file_format_orig; -eval set global innodb_file_per_table = $innodb_file_per_table_orig; -eval set global innodb_large_prefix = $innodb_large_prefix_orig; ---connection con1 ---disconnect con1 ---source include/wait_until_disconnected.inc ---enable_query_log ---connection default diff --git a/mysql-test/suite/innodb_zip/t/large_blob-master.opt b/mysql-test/suite/innodb_zip/t/large_blob-master.opt new file mode 100644 index 00000000000..90a3f0db9d4 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/large_blob-master.opt @@ -0,0 +1,3 @@ +--max_allowed_packet=200M +--innodb_buffer_pool_size=10M +--innodb_log_buffer_size=10M diff --git a/mysql-test/suite/innodb_zip/t/large_blob.test b/mysql-test/suite/innodb_zip/t/large_blob.test new file mode 100644 index 00000000000..dc1dc2eba29 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/large_blob.test @@ -0,0 +1,122 @@ +--echo # +--echo # This tests the use of large blobs in InnoDB. +--echo # + +--source include/have_innodb.inc +--source include/have_nodebug.inc +--source include/big_test.inc + +--disable_query_log +# These values can change during the test +let $innodb_file_per_table_orig = `SELECT @@innodb_file_per_table`; + +# Create a 20MB blob that does not compress easily. +# 1000 Random characters is enough to keep compression low. +set @alphabet="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; +set @r=abs(rand()*52) + 1; +set @blob=substr(@alphabet,@r,1); +let $1=1000; +while ($1 > 1) +{ + set @r=abs(rand()*52) + 1; + set @letter=substr(@alphabet,@r,1); + set @blob=concat(@blob,@letter); + dec $1; +} +# The loop above is extremely slow compared to repeat(). +set @longblob=repeat(@blob,200000); +--enable_query_log + +call mtr.add_suppression("InnoDB: Warning: a long semaphore wait"); + +SET GLOBAL innodb_file_per_table = OFF; + +--echo # +--echo # System tablespace, Row Format = Redundant +--echo # +CREATE TABLE t1 ( + c1 INT DEFAULT NULL, + c2 LONGBLOB NOT NULL, + KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; + +--echo # +--echo # System tablespace, Row Format = Compact +--echo # +CREATE TABLE t1 ( + c1 INT DEFAULT NULL, + c2 LONGBLOB NOT NULL, + KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; + +SET GLOBAL innodb_file_per_table = ON; + +--echo # +--echo # Separate tablespace, Row Format = Redundant +--echo # +CREATE TABLE t1 ( + c1 INT DEFAULT NULL, + c2 LONGBLOB NOT NULL, + KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; + +--echo # +--echo # Separate tablespace, Row Format = Compact +--echo # +CREATE TABLE t1 ( + c1 INT DEFAULT NULL, + c2 LONGBLOB NOT NULL, + KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; + +--echo # +--echo # Separate tablespace, Row Format = Compressed, Key Block Size = 2k +--echo # +CREATE TABLE t1 ( + c1 INT DEFAULT NULL, + c2 LONGBLOB NOT NULL, + KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; + +--echo # +--echo # Separate tablespace, Row Format = Compressed, Key Block Size = 1k +--echo # +CREATE TABLE t1 ( + c1 INT DEFAULT NULL, + c2 LONGBLOB NOT NULL, + KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; + +--echo # +--echo # Separate tablespace, Row Format = Dynamic +--echo # +CREATE TABLE t1 ( + c1 INT DEFAULT NULL, + c2 LONGBLOB NOT NULL, + KEY k2 (c2(250), c1) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC; +INSERT INTO t1 VALUES (1, ''); +UPDATE t1 SET c2=@longblob; +DROP TABLE t1; + +--disable_query_log +EVAL SET GLOBAL innodb_file_per_table = $innodb_file_per_table_orig; +--enable_query_log diff --git a/mysql-test/suite/innodb_zip/t/restart.test b/mysql-test/suite/innodb_zip/t/restart.test new file mode 100644 index 00000000000..354e63a69f7 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/restart.test @@ -0,0 +1,602 @@ +# +# These test make sure that tables are visible after rebooting +# + +--source include/have_innodb.inc +--source include/have_innodb_zip.inc +--source include/have_partition.inc +--source include/not_embedded.inc +SET default_storage_engine=InnoDB; +LET $MYSQLD_DATADIR = `select @@datadir`; +LET $INNODB_PAGE_SIZE = `select @@innodb_page_size`; + +--disable_query_log +# This error is expected in the error log for this test. +call mtr.add_suppression("\\[ERROR\\] InnoDB: Error number 17 means 'File exists'"); +call mtr.add_suppression("\\[ERROR\\] InnoDB: Operating system error number (17|80) in a file operation."); +call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot create file .*t55_restart.isl"); +call mtr.add_suppression("\\[ERROR\\] InnoDB: The link file: .* already exists."); +call mtr.add_suppression("\\[ERROR\\] InnoDB: Cannot open datafile for read-only:"); +call mtr.add_suppression("\\[ERROR\\] InnoDB: Operating system error number 2 in a file operation."); +call mtr.add_suppression("\\[ERROR\\] InnoDB: The error means the system cannot find the path specified."); +call mtr.add_suppression("\\[ERROR\\] InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them."); +--enable_query_log + +--echo # +--echo # A series of tests to make sure tables are opened after restart. +--echo # Bug#13357607 Compressed file-per-table tablespaces fail to open +--echo # +# This bug was introduced without a regression test failing since +# there were no tests showing that tablespaces could be created and +# then read after reboot. +# + +--disable_query_log +let $MYSQL_DATA_DIR= `select @@datadir`; +let $data_directory = DATA DIRECTORY='$MYSQL_TMP_DIR/alt_dir'; + +let $innodb_file_per_table_orig=`select @@innodb_file_per_table`; +--enable_query_log + +set global innodb_file_per_table=on; + +--echo # +--echo # Create and insert records into a REDUNDANT row formatted table. +--echo # +CREATE TABLE t1_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=REDUNDANT ENGINE=InnoDB; +INSERT INTO t1_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +SHOW CREATE TABLE t1_restart; +SELECT count(*) FROM t1_restart; + +--echo # +--echo # Create and insert records into a COMPACT row formatted table. +--echo # +CREATE TABLE t2_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=COMPACT ENGINE=InnoDB; +INSERT INTO t2_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +SHOW CREATE TABLE t2_restart; +SELECT count(*) FROM t2_restart; + +--echo # +--echo # Create and insert records into a COMPRESSED row formatted table. +--echo # +CREATE TABLE t3_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 ENGINE=InnoDB; +INSERT INTO t3_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +SHOW CREATE TABLE t3_restart; +SELECT count(*) FROM t3_restart; + +--echo # +--echo # Create and insert records into a DYNAMIC row formatted table. +--echo # +CREATE TABLE t4_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=DYNAMIC ENGINE=InnoDB; +INSERT INTO t4_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +SHOW CREATE TABLE t4_restart; +SELECT count(*) FROM t4_restart; + +--echo # +--echo # Create and insert records into a table that uses a remote DATA DIRECTORY. +--echo # +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +eval CREATE TABLE t5_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=DYNAMIC ENGINE=InnoDB $data_directory; +INSERT INTO t5_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t5_restart; +SELECT count(*) FROM t5_restart; + +--echo # +--echo # Create and insert records into a partitioned table that uses +--echo # a remote DATA DIRECTORY for each partition. +--echo # +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +eval CREATE TABLE t6_restart( + c1 INT AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 ENGINE=InnoDB + PARTITION BY HASH(c1) ( + PARTITION p0 DATA DIRECTORY = '$MYSQL_TMP_DIR/alt_dir', + PARTITION p1 DATA DIRECTORY = '$MYSQL_TMP_DIR/alt_dir', + PARTITION p2 DATA DIRECTORY = '$MYSQL_TMP_DIR/alt_dir'); +INSERT INTO t6_restart VALUES (0, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t6_restart; +SELECT count(*) FROM t6_restart; + +--echo # +--echo # Create and insert records into a subpartitioned table that uses +--echo # a remote DATA DIRECTORY for each subpartition. +--echo # +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +eval CREATE TABLE t7_restart( + c1 INT AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=DYNAMIC ENGINE=InnoDB + PARTITION BY RANGE(c1) SUBPARTITION BY HASH(c1) ( + PARTITION p0 VALUES LESS THAN (10) ( + SUBPARTITION s0 DATA DIRECTORY = '$MYSQL_TMP_DIR/alt_dir', + SUBPARTITION s1 DATA DIRECTORY = '$MYSQL_TMP_DIR/alt_dir'), + PARTITION p1 VALUES LESS THAN MAXVALUE ( + SUBPARTITION s2 DATA DIRECTORY = '$MYSQL_TMP_DIR/alt_dir', + SUBPARTITION s3 DATA DIRECTORY = '$MYSQL_TMP_DIR/alt_dir')); +INSERT INTO t7_restart VALUES (0, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t7_restart; +SELECT count(*) FROM t7_restart; + +--echo # +--echo # Create and insert records into a table that uses a general tablespace. +--echo # +CREATE TABLESPACE s1_restart ADD DATAFILE 's1_restart.ibd'; +CREATE TABLE t8_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=COMPACT ENGINE=InnoDB TABLESPACE=s1_restart; +INSERT INTO t8_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +SHOW CREATE TABLE t8_restart; +SELECT count(*) FROM t8_restart; +CREATE TABLE t9_restart(c1 DOUBLE AUTO_INCREMENT KEY, c2 CHAR(10), c3 VARCHAR(100), c4 DATE, c5 TEXT) + ROW_FORMAT=DYNAMIC ENGINE=InnoDB TABLESPACE=s1_restart; +INSERT INTO t9_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +SHOW CREATE TABLE t9_restart; +SELECT count(*) FROM t9_restart; + +--echo # +--echo # Show these tables in information_schema. +--echo # +--source suite/innodb/include/show_i_s_tables.inc +--source suite/innodb/include/show_i_s_tablespaces.inc + +--echo # +--echo # Shutdown the server and list the tablespace OS files +--echo # +--source include/shutdown_mysqld.inc + +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/alt_dir +--list_files $MYSQL_TMP_DIR/alt_dir +--echo ---- MYSQL_TMP_DIR/alt_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/alt_dir/test + +--echo # +--echo # Start the server and show that tables are still visible and accessible. +--echo # +--source include/start_mysqld.inc + +SHOW VARIABLES LIKE 'innodb_file_per_table'; +SHOW CREATE TABLE t1_restart; +SHOW CREATE TABLE t2_restart; +SHOW CREATE TABLE t3_restart; +SHOW CREATE TABLE t4_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t5_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t6_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t7_restart; +SHOW CREATE TABLE t8_restart; +SHOW CREATE TABLE t9_restart; + +INSERT INTO t1_restart (SELECT 0, c2, c3, c4, c5 FROM t1_restart); +INSERT INTO t2_restart (SELECT 0, c2, c3, c4, c5 FROM t2_restart); +INSERT INTO t3_restart (SELECT 0, c2, c3, c4, c5 FROM t3_restart); +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t6_restart (SELECT 0, c2, c3, c4, c5 FROM t6_restart); +INSERT INTO t7_restart (SELECT 0, c2, c3, c4, c5 FROM t7_restart); +INSERT INTO t8_restart (SELECT 0, c2, c3, c4, c5 FROM t8_restart); +INSERT INTO t9_restart (SELECT 0, c2, c3, c4, c5 FROM t9_restart); + +SELECT count(*) FROM t1_restart; +SELECT count(*) FROM t2_restart; +SELECT count(*) FROM t3_restart; +SELECT count(*) FROM t4_restart; +SELECT count(*) FROM t5_restart; +SELECT count(*) FROM t6_restart; +SELECT count(*) FROM t7_restart; +SELECT count(*) FROM t8_restart; +SELECT count(*) FROM t9_restart; + +--echo # +--echo # Show these tables in information_schema. +--echo # +--source suite/innodb/include/show_i_s_tables.inc +--source suite/innodb/include/show_i_s_tablespaces.inc + +DROP TABLE t1_restart; +DROP TABLE t2_restart; +DROP TABLE t3_restart; +# Tablespace for t4_restart will be moved later from default directory to a new directory +# and an ISL file will be created not using InnoDB. +# Table t5_restart will be expanded. +# Tables t6_restart and t7_restart will be truncated. +DROP TABLE t8_restart; +DROP TABLE t9_restart; +DROP TABLESPACE s1_restart; + +--echo # +--echo # Truncate the remote tablespaces. +--echo # +TRUNCATE TABLE t5_restart; +ALTER TABLE t6_restart TRUNCATE PARTITION p2; +ALTER TABLE t7_restart TRUNCATE PARTITION p1; + +--source suite/innodb/include/show_i_s_tablespaces.inc + +INSERT INTO t5_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot'); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); +INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart); + +SELECT count(*) FROM t5_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t5_restart; + +SELECT count(*) FROM t6_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t6_restart; + +SELECT count(*) FROM t7_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t7_restart; + +--echo # +--echo # Shutdown the server and make a backup of a tablespace +--echo # +--source include/shutdown_mysqld.inc + +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd $MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd.bak +--copy_file $MYSQL_DATA_DIR/test/t5_restart.isl $MYSQL_DATA_DIR/test/t5_restart.isl.bak +--copy_file $MYSQL_DATA_DIR/test/t5_restart.frm $MYSQL_DATA_DIR/test/t5_restart.frm.bak + +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/alt_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/alt_dir/test + +--echo # +--echo # Start the server and show the tablespaces. +--echo # +--source include/start_mysqld.inc + +SHOW VARIABLES LIKE 'innodb_file_per_table'; + +--source suite/innodb/include/show_i_s_tablespaces.inc + +SELECT count(*) FROM t5_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t5_restart; + +SELECT count(*) FROM t6_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t6_restart; + +SELECT count(*) FROM t7_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t7_restart; + +--echo # +--echo # Try to rename a tablespace to a file that already exists +--echo # + +--copy_file $MYSQL_DATA_DIR/test/t5_restart.frm.bak $MYSQL_DATA_DIR/test/t55_restart.frm +--error ER_TABLE_EXISTS_ERROR +RENAME TABLE t5_restart TO t55_restart; +--remove_file $MYSQL_DATA_DIR/test/t55_restart.frm +--remove_file $MYSQL_DATA_DIR/test/t5_restart.frm.bak + +--copy_file $MYSQL_DATA_DIR/test/t5_restart.isl.bak $MYSQL_DATA_DIR/test/t55_restart.isl +--error ER_ERROR_ON_RENAME +RENAME TABLE t5_restart TO t55_restart; +--remove_file $MYSQL_DATA_DIR/test/t55_restart.isl +--remove_file $MYSQL_DATA_DIR/test/t5_restart.isl.bak + +#--copy_file $MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd.bak $MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd +# This RENAME TABLE works of Linux but gets ER_ERROR_ON_RENAME on Windows +#--error ER_ERROR_ON_RENAME +#RENAME TABLE t5_restart TO t55_restart; +#--remove_file $MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd.bak + +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/alt_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/alt_dir/test + +--echo # +--echo # Rename file table and tablespace +--echo # + +RENAME TABLE t5_restart TO t55_restart; +RENAME TABLE t6_restart TO t66_restart; +RENAME TABLE t7_restart TO t77_restart; + +--source suite/innodb/include/show_i_s_tablespaces.inc + +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t55_restart; + +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t66_restart; + +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t77_restart; + +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/alt_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/alt_dir/test + +--echo # +--echo # Restart the server +--echo # +--source include/restart_mysqld.inc +SHOW VARIABLES LIKE 'innodb_file_per_table'; + +--source suite/innodb/include/show_i_s_tablespaces.inc + +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t55_restart; + +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t66_restart; + +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t77_restart; + +--echo # +--echo # Shutdown the server +--echo # +--source include/shutdown_mysqld.inc + +--echo # +--echo # Move the remote tablespaces to a new location and change the ISL files +--echo # +--mkdir $MYSQL_TMP_DIR/new_dir +--mkdir $MYSQL_TMP_DIR/new_dir/test +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/alt_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/alt_dir/test +--echo ---- MYSQL_TMP_DIR/new_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/new_dir/test + +--echo # Moving tablespace 't4_restart' from MYSQL_DATA_DIR to MYSQL_TMP_DIR/new_dir +--copy_file $MYSQL_DATA_DIR/test/t4_restart.ibd $MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd +--remove_file $MYSQL_DATA_DIR/test/t4_restart.ibd +--exec echo $MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd > $MYSQL_DATA_DIR/test/t4_restart.isl + +--echo # Moving tablespace 't55_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd $MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd +--remove_file $MYSQL_DATA_DIR/test/t55_restart.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd > $MYSQL_DATA_DIR/test/t55_restart.isl + +--echo # Moving tablespace 't66_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t66_restart#P#p0.ibd $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p0.ibd +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t66_restart#P#p1.ibd $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p1.ibd +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t66_restart#P#p2.ibd $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p2.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t66_restart#P#p0.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t66_restart#P#p1.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t66_restart#P#p2.ibd +--remove_file $MYSQL_DATA_DIR/test/t66_restart#P#p0.isl +--remove_file $MYSQL_DATA_DIR/test/t66_restart#P#p1.isl +--remove_file $MYSQL_DATA_DIR/test/t66_restart#P#p2.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p0.ibd > $MYSQL_DATA_DIR/test/t66_restart#P#p0.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p1.ibd > $MYSQL_DATA_DIR/test/t66_restart#P#p1.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p2.ibd > $MYSQL_DATA_DIR/test/t66_restart#P#p2.isl + +--echo # Moving tablespace 't77_restart' from MYSQL_TMP_DIR/alt_dir to MYSQL_TMP_DIR/new_dir +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p0#SP#s0.ibd $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s0.ibd +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p0#SP#s1.ibd $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s1.ibd +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p1#SP#s2.ibd $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s2.ibd +--copy_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p1#SP#s3.ibd $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s3.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p0#SP#s0.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p0#SP#s1.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p1#SP#s2.ibd +--remove_file $MYSQL_TMP_DIR/alt_dir/test/t77_restart#P#p1#SP#s3.ibd +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s0.isl +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s1.isl +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s2.isl +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s3.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s0.ibd > $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s0.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s1.ibd > $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s1.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s2.ibd > $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s2.isl +--exec echo $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s3.ibd > $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s3.isl + +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/alt_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/alt_dir/test +--echo ---- MYSQL_TMP_DIR/new_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/new_dir/test + +--echo # +--echo # Start the server and check tablespaces. +--echo # +--source include/start_mysqld.inc + +--source suite/innodb/include/show_i_s_tablespaces.inc + +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +SELECT count(*) FROM t4_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t4_restart; + +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t55_restart; + +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t66_restart; + +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +SHOW CREATE TABLE t77_restart; + + +--echo # +--echo # Shutdown the server +--echo # +--source include/shutdown_mysqld.inc + +--echo # +--echo # Move the remote tablespaces back to the default datadir and delete the ISL file. +--echo # + +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/new_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/new_dir/test + +--echo # Moving 't4_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +--copy_file $MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd $MYSQL_DATA_DIR/test/t4_restart.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd +--remove_file $MYSQL_DATA_DIR/test/t4_restart.isl + +--echo # Moving 't55_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +--copy_file $MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd $MYSQL_DATA_DIR/test/t55_restart.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd +--remove_file $MYSQL_DATA_DIR/test/t55_restart.isl + +--echo # Moving 't66_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +--copy_file $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p0.ibd $MYSQL_DATA_DIR/test/t66_restart#P#p0.ibd +--copy_file $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p1.ibd $MYSQL_DATA_DIR/test/t66_restart#P#p1.ibd +--copy_file $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p2.ibd $MYSQL_DATA_DIR/test/t66_restart#P#p2.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p0.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p1.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t66_restart#P#p2.ibd +--remove_file $MYSQL_DATA_DIR/test/t66_restart#P#p0.isl +--remove_file $MYSQL_DATA_DIR/test/t66_restart#P#p1.isl +--remove_file $MYSQL_DATA_DIR/test/t66_restart#P#p2.isl + +--echo # Moving 't77_restart' from MYSQL_TMP_DIR/new_dir to MYSQL_DATA_DIR +--copy_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s0.ibd $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s0.ibd +--copy_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s1.ibd $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s1.ibd +--copy_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s2.ibd $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s2.ibd +--copy_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s3.ibd $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s3.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s0.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p0#SP#s1.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s2.ibd +--remove_file $MYSQL_TMP_DIR/new_dir/test/t77_restart#P#p1#SP#s3.ibd +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s0.isl +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p0#SP#s1.isl +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s2.isl +--remove_file $MYSQL_DATA_DIR/test/t77_restart#P#p1#SP#s3.isl + +--echo ---- MYSQL_DATA_DIR/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_DATA_DIR/test +--echo ---- MYSQL_TMP_DIR/new_dir/test +--replace_result #P# #p# #SP# #sp# +--list_files $MYSQL_TMP_DIR/new_dir/test + +--echo # +--echo # Start the server and check tablespaces. +--echo # +-- source include/start_mysqld.inc + +--source suite/innodb/include/show_i_s_tablespaces.inc + +INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart); +SELECT count(*) FROM t4_restart; +SHOW CREATE TABLE t4_restart; + +INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart); +SELECT count(*) FROM t55_restart; +SHOW CREATE TABLE t55_restart; + +INSERT INTO t66_restart (SELECT 0, c2, c3, c4, c5 FROM t66_restart); +SELECT count(*) FROM t66_restart; +SHOW CREATE TABLE t66_restart; + +INSERT INTO t77_restart (SELECT 0, c2, c3, c4, c5 FROM t77_restart); +SELECT count(*) FROM t77_restart; +SHOW CREATE TABLE t77_restart; + + +--echo # +--echo # Cleanup +--echo # + +DROP TABLE t4_restart; +DROP TABLE t55_restart; +DROP TABLE t66_restart; +DROP TABLE t77_restart; + +--rmdir $MYSQL_TMP_DIR/alt_dir/test +--rmdir $MYSQL_TMP_DIR/alt_dir +--rmdir $MYSQL_TMP_DIR/new_dir/test +--rmdir $MYSQL_TMP_DIR/new_dir + +-- disable_query_log +eval set global innodb_file_per_table=$innodb_file_per_table_orig; +-- enable_query_log + diff --git a/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test b/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test new file mode 100644 index 00000000000..df4e66967f7 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6344_compress_level.test @@ -0,0 +1,135 @@ +#******************************************************************* +# This testcase is to test the funcitionality of wl#6344 +# When the innodb_compression_level=0 create a table with page size +# 1K and load data +# When the innodb_compression_level=9 create a table with page size +# 1K and load data +# compare the size of the both tables. +# The size of the table when compression level=0 should be +# greater than the +# the size of the table when compression level=9 +#******************************************************************* +--source include/have_innodb.inc +--source include/have_innodb_zip.inc + +USE test; +DROP TABLE IF EXISTS tab5; +DROP TABLE IF EXISTS tab6; + +--echo #set the other madatory flags before test starts +SET GLOBAL Innodb_file_per_table=on; +let $innodb_compression_level = `SELECT @@global.innodb_compression_level`; + +--echo #set the compression level=0 (No compress) +SET global innodb_compression_level=0; + +-- echo #check the compression level and the compressed_pages is default +SELECT @@innodb_compression_level; +SELECT @@Innodb_file_per_table; + +-- echo #create table with 1K block size +CREATE TABLE tab5 (col_1 CHAR (255) , +col_2 VARCHAR (255), col_3 longtext, +col_4 longtext,col_5 longtext, +col_6 longtext , col_7 longtext , +col_8 longtext ,col_9 longtext , +col_10 longtext ,col_11 int auto_increment primary key) +ENGINE = innodb ROW_FORMAT=compressed key_block_size=1; + +-- echo #create indexes +CREATE INDEX idx1 ON tab5(col_4(10)); +CREATE INDEX idx2 ON tab5(col_5(10)); +CREATE INDEX idx3 ON tab5(col_6(10)); +CREATE INDEX idx4 ON tab5(col_7(10)); +CREATE INDEX idx5 ON tab5(col_8(10)); +CREATE INDEX idx6 ON tab5(col_11); + +--echo #load the with repeat function +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +SET @col_10 = repeat('j', 100); + +--echo #insert 10 records +let $i = 10; +while ($i) { + +eval INSERT INTO tab5(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +dec $i; + +} + +--echo #set the compression level=9 (High compress) +SET global innodb_compression_level=9; + +-- echo #create table with 1K block size +CREATE TABLE tab6 (col_1 CHAR (255) , +col_2 VARCHAR (255), col_3 longtext, +col_4 longtext,col_5 longtext, +col_6 longtext , col_7 longtext , +col_8 longtext ,col_9 longtext , +col_10 longtext ,col_11 int auto_increment primary key) +ENGINE = innodb ROW_FORMAT=compressed key_block_size=1; + +-- echo #create indexes +CREATE INDEX idx1 ON tab6(col_4(10)); +CREATE INDEX idx2 ON tab6(col_5(10)); +CREATE INDEX idx3 ON tab6(col_6(10)); +CREATE INDEX idx4 ON tab6(col_7(10)); +CREATE INDEX idx5 ON tab6(col_8(10)); +CREATE INDEX idx6 ON tab6(col_11); + +--echo #load the with repeat function +SET @col_1 = repeat('a', 100); +SET @col_2 = repeat('b', 100); +SET @col_3 = repeat('c', 100); +SET @col_4 = repeat('d', 100); +SET @col_5 = repeat('e', 100); +SET @col_6 = repeat('f', 100); +SET @col_7 = repeat('g', 100); +SET @col_8 = repeat('h', 100); +SET @col_9 = repeat('i', 100); +SET @col_10 = repeat('j', 100); + +--echo #insert 10 records +let $i = 10; +while ($i) { + +eval INSERT INTO tab6(col_1,col_2,col_3,col_4,col_5,col_6,col_7,col_8,col_9,col_10) +VALUES (@col_1,@col_2,@col_3,@col_4,@cl_5,@col_6,@col_7,@col_8,@col_9,@col_10); +dec $i; +} + +-- echo #diff the sizes of the No compressed table and high compressed table +SET @size=(SELECT +(SELECT (SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024) +FROM INFORMATION_SCHEMA.TABLES +WHERE table_name='tab5' AND ENGINE='InnoDB' AND table_schema='test') +- +(SELECT SUM(DATA_LENGTH+INDEX_LENGTH)/1024/1024 +FROM INFORMATION_SCHEMA.TABLES +WHERE table_name='tab6' AND ENGINE='InnoDB' AND table_schema='test') +FROM DUAL); + +--echo #check the size of the table, it should not be Negative value +--echo #The results of this query Test pass = 1 and fail=0 +SELECT @size >= 0; + + +--echo # +--echo # Cleanup +--echo # +DROP TABLE tab5; +DROP TABLE tab6; + +--echo #reset back the compression_level to default. +--disable_query_log +eval SET GLOBAL innodb_compression_level=$innodb_compression_level; +--enable_query_log diff --git a/mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test b/mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test new file mode 100644 index 00000000000..445fd812183 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6347_comp_indx_stat.test @@ -0,0 +1,1337 @@ +--echo #****************************************************************** +--echo # Test 1: Test the interaction between stats and compression level +--echo # and logging of compressed pages configuration +--echo # This testcase is to verify the table/idex level compression stats +--echo # When the flags are set as follows +--echo # innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 +--echo # page size 1K,2K,4K,8K,16K +--echo # check the size and compression stats of the table tab5 +--echo #****************************************************************** + +# This test case needs InnoDB. +-- source include/have_innodb.inc +-- source include/not_embedded.inc +-- source include/have_innodb_16k.inc +-- source include/big_test.inc + +-- vertical_results + +let MYSQLD_DATADIR=`SELECT @@datadir`; +let $innodb_compression_level = `SELECT @@global.innodb_compression_level`; + +--echo # set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_level=0; + +--echo #****************************************************************** +--echo # Test 1-1K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 1K +--echo #****************************************************************** + +--echo # create a table with page size=1K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=1; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for deterministic reasons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +-- source include/restart_mysqld.inc + +--echo # set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=0; + +--echo # fetch the compressed page and check the stats +--echo # The stats figure may be different/same for each restart. +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table +-- echo # testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +--source suite/innodb_zip/include/innodb_stats_restart.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 1-2K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 2K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; + +--echo # create a table with page size=2K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=2; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +-- source include/restart_mysqld.inc + +--echo # set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=0; + +--echo # fetch the compressed page and check the stats +--echo # The stats figure may be different/same for each restart. +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table +-- echo # testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=2; +--source suite/innodb_zip/include/innodb_stats_restart.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 1-4K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 4K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; + +--echo # create a table with page size=4K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=4; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 1-8K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 8K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; + +--echo # create a table with page size=8K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=8; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 1-16K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=0 with page size 16K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; + +--echo # create a table with page size=16K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=16; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 2: test the interaction between wl6347 & wl6344 (2.2) +--echo # This testcase is to verify the table/idex level compression stats +--echo # When the flags are set as follows +--echo # innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 +--echo # page size 1K,2K,4K,8K,16K +--echo # check the size and compression stats of the table tab5 +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; + +--echo # set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=9; + + +--echo #****************************************************************** +--echo # Test 2-1K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 1K +--echo #****************************************************************** + +--echo # create a table with page size=1K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=1; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 65536 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 2097152 +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +-- source include/restart_mysqld.inc + +--echo # set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=9; + + +--echo # fetch the compressed page and check the stats +--echo # The stats figure may be different/same for each restart. +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table +-- echo # testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +--source suite/innodb_zip/include/innodb_stats_restart.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 2-2K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 2K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=2K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=2; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 65536 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 2097152 +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +-- source include/restart_mysqld.inc + +--echo # set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_level=9; + + +--echo # fetch the compressed page and check the stats +--echo # The stats figure may be different/same for each restart. +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table +-- echo # testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +--source suite/innodb_zip/include/innodb_stats_restart.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 2-4K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 4K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=4K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=4; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 65536 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 159744 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 2-8K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 8K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=8K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=8; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 122880 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 212992 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 2-16K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=9 with page size 16K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=16K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=16; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 245760 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 344064 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 3: test the interaction between wl6347 & wl6344 (2.3) +--echo # This testcase is to verify the table/idex level compression stats +--echo # When the flags are set as follows +--echo # innodb_cmp_per_index_enabled=ON and +--echo # innodb_compression_level=6 (default) +--echo # page size 1K,2K,4K,8K,16K +--echo # check the size and compression stats of the table tab5 +--echo #****************************************************************** + +--echo #****************************************************************** +--echo # Test 3-1K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 1K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +SET GLOBAL innodb_compression_level=default; + +--echo # create a table with page size=1K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=1; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 65536 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 65536 +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 3-2K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 2K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +SET GLOBAL innodb_compression_level=default; + +--echo # create a table with page size=2K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=2; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 65536 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 86016 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 3-4K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 4K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +SET GLOBAL innodb_compression_level=default; + +--echo # create a table with page size=4K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=4; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 65536 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 86016 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 3-8K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 8K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +SET GLOBAL innodb_compression_level=default; + +--echo # create a table with page size=8K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=8; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 122880 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 172032 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 3-16K: innodb_cmp_per_index_enabled=ON and innodb_compression_level=Def with page size 16K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +SET GLOBAL innodb_compression_level=default; + +--echo # create a table with page size=16K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=16; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 245760 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +--echo # The size of the file with 0 compress = 344064 +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 4: test the interaction between wl6347 & wl6344 (2.5 & 2.6) +--echo # This testcase is to verify the table/idex level compression stats +--echo # When the flags are set as follows +--echo # innodb_cmp_per_index_enabled=ON and +--echo # Innodb_compression_failure_threshold_pct=0 +--echo # page size 1K,2K,4K,8K,16K +--echo # check the size and compression stats of the table tab5 +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_compression_failure_threshold_pct=0; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # check the flags +SELECT @@innodb_cmp_per_index_enabled; +SELECT @@innodb_compression_failure_threshold_pct; +SELECT @@innodb_file_per_table; +SELECT @@innodb_compression_level; + +--echo #****************************************************************** +--echo # Test 4-1K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 1K +--echo #****************************************************************** + +--echo # create a table with page size=1K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=1; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +-- source include/restart_mysqld.inc + +--echo # set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_compression_failure_threshold_pct=0; +SET GLOBAL innodb_file_per_table=on; + + +--echo # fetch the compressed page and check the stats +--echo # The stats figure may be different/same for each restart. +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table +-- echo # testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +--source suite/innodb_zip/include/innodb_stats_restart.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 4-2K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 2K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=2K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=2; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 4-4K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 4K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=4K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=4; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 4-8K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 8K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=8K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=8; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 4-16K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=0 with page size 16K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=16K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=16; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 5: test the interaction between wl6347 & wl6344 (2.7) +--echo # This testcase is to verify the table/idex level compression stats +--echo # When the flags are set as follows +--echo # innodb_cmp_per_index_enabled=ON and +--echo # Innodb_compression_failure_threshold_pct=10 +--echo # page size 1K,2K,4K,8K,16K +--echo # check the size and compression stats of the table tab5 +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_failure_threshold_pct=10; +SET GLOBAL innodb_compression_level=Default; + + +--echo # check the flags +SELECT @@innodb_cmp_per_index_enabled; +SELECT @@innodb_compression_failure_threshold_pct; +SELECT @@innodb_file_per_table; +SELECT @@innodb_compression_level; + +--echo #****************************************************************** +--echo # Test 5-1K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 1K +--echo #****************************************************************** + +--echo # create a table with page size=1K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=1; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +-- source include/restart_mysqld.inc + +--echo # set the flag on (default off) +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # set the flags +SET GLOBAL innodb_compression_failure_threshold_pct=10; +SET GLOBAL innodb_file_per_table=on; +SET GLOBAL innodb_compression_failure_threshold_pct=10; + + +--echo # fetch the compressed page and check the stats +--echo # The stats figure may be different/same for each restart. +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table +-- echo # testcase : pass = 1 fail = 0 +SET @comp_val=0; +SET @uncomp_val=1; +--source suite/innodb_zip/include/innodb_stats_restart.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 5-2K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 2K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_failure_threshold_pct=10; + +--echo # create a table with page size=2K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=2; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=2; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 5-4K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 4K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=4K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=4; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 5-8K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 8K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; +SET GLOBAL innodb_compression_failure_threshold_pct=10; + +--echo # create a table with page size=8K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=8; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 5-16K: innodb_cmp_per_index_enabled=ON and Innodb_compression_failure_threshold_pct=10 with page size 16K +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=OFF; +SET GLOBAL innodb_cmp_per_index_enabled=ON; + +--echo # create a table with page size=16K +--echo # create indexes on each column.(total 9 indexes) +let $block_size=16; +--source suite/innodb_zip/include/innodb_create_tab_indx.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # for determintic resons simple data should be inserted. +--echo # insert some 100 records +let $i = 100; +--source suite/innodb_zip/include/innodb_load_data.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed page and check the stats +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +--echo # fetch the compressed same page once again and check the stats +--echo # the stat figures should be same as above query +--source suite/innodb_zip/include/innodb_fetch_records.inc + +--echo # check the stats of the table & size of the table +SET @inl_val=1; +--source suite/innodb_zip/include/innodb_stats_comp_index.inc + +DROP TABLE tab5; + +--echo #****************************************************************** +--echo # Test 6: Create multiple tables & indexes having same name in 2 diff DB's +--echo # Check the stats of the table. (1.1) +--echo #****************************************************************** + +--echo # reset the stat table before starting next testcase +SET GLOBAL innodb_cmp_per_index_enabled=0; +SET GLOBAL innodb_cmp_per_index_enabled=1; + +SET GLOBAL innodb_file_per_table=ON; +SET GLOBAL innodb_compression_level=default; +SET GLOBAL innodb_compression_failure_threshold_pct=default; + + +--echo # create a table page size=1K +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +CREATE INDEX idx1 ON tab5(col_1(10)); + +--echo # check the stats of the table +SELECT database_name,table_name,index_name,compress_ops,compress_ops_ok +FROM information_schema.innodb_cmp_per_index +WHERE database_name='test' and table_name='tab5' +ORDER BY index_name,table_name,database_name ; + +CREATE DATABASE sb; +USE sb; + +--echo # create a table page size=1K (testcase-1) +CREATE TABLE tab5(col_1 TINYBLOB, col_2 TINYTEXT,col_3 BLOB, +col_4 TEXT,col_5 MEDIUMBLOB,col_6 MEDIUMTEXT, +col_7 LONGBLOB,col_8 LONGTEXT,col_9 VARCHAR(255)) +ENGINE=INNODB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1; + +CREATE INDEX idx1 ON tab5(col_1(10)); + +SELECT database_name,table_name,index_name,compress_ops,compress_ops_ok +FROM information_schema.innodb_cmp_per_index +WHERE database_name='sb' and table_name='tab5' +ORDER BY index_name,table_name,database_name ; + +DROP TABLE tab5, test.tab5; +DROP DATABASE sb; + +--echo # reset the flags +eval SET GLOBAL innodb_file_per_table=default; +eval SET GLOBAL innodb_cmp_per_index_enabled=default; +--disable_query_log +eval SET GLOBAL innodb_compression_level=$innodb_compression_level; +--enable_query_log +eval SET GLOBAL innodb_compression_failure_threshold_pct=default; diff --git a/mysql-test/suite/innodb_zip/t/wl6470_1.test b/mysql-test/suite/innodb_zip/t/wl6470_1.test new file mode 100644 index 00000000000..ecf6b601d3d --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6470_1.test @@ -0,0 +1,60 @@ +--source include/have_innodb.inc +--source include/have_innodb_zip.inc +--source include/big_test.inc + +#################################################################### +# TC to test temp-table DML optimization changes for correctness # +# Sceanrio covered: # +# 1. bulk-insert with rollback + commit: this will ensure btree # +# node split with rollback and commit. # +#################################################################### + +#------------------------------------------------------------------- +# +# 1. bulk-insert with rollback + commit: this will ensure btree # +# node split with rollback and commit. # +# +create temporary table t1 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc), index sec_index(c1) + ) engine = innodb; +create temporary table t2 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc), index sec_index(c1) + ) engine = innodb; +--source suite/innodb_zip/include/innodb_dml_ops.inc +drop table t1; +drop table t2; +# +--disable_warnings +create temporary table t1 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc), index sec_index(c1) + ) engine = innodb key_block_size = 4; +set innodb_strict_mode=off; +create temporary table t2 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc), index sec_index(c1) + ) engine = innodb key_block_size = 8; +set innodb_strict_mode=default; +--enable_warnings +--source suite/innodb_zip/include/innodb_dml_ops.inc +drop table t1; +drop table t2; +# +let $file_per_table = `select @@innodb_file_per_table`; +set global innodb_file_per_table = 0; +create temporary table t1 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc), index sec_index(c1) + ) engine = innodb; +create temporary table t2 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc), index sec_index(c1) + ) engine = innodb; +--source suite/innodb_zip/include/innodb_dml_ops.inc +drop table t1; +drop table t2; +eval set global innodb_file_per_table = $file_per_table; +# + diff --git a/mysql-test/suite/innodb_zip/t/wl6470_2.test b/mysql-test/suite/innodb_zip/t/wl6470_2.test new file mode 100644 index 00000000000..020c27b97b9 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6470_2.test @@ -0,0 +1,468 @@ +--source include/have_innodb.inc +--source include/have_innodb_zip.inc +--source include/no_valgrind_without_big.inc + +#################################################################### +# TC to test temp-table DML optimization changes for correctness # +# Sceanrio covered in single testcase : # +# - Tables with row format(redundant,compressed,dynamic,compact # +# - Table with primary,composite,prefix,secondary index # +# - Insert/delete/update with transactioons # +# - Transaction with commit,rollback,savepoint statements # +# - Concurrency by execution of two clients creating tables with # +# same names # +# - Inserting data using # +# - Insert into .. , Load data infile..,insert ignore # +# - Insert into .. on duplicate update # +# - Check basic delete and upadte [ignore] # +# - Check constraints like duplicate key,default value # +# - Alter add column , add primary key # +# - with prepare and execute statement # +#################################################################### + +# run for page size >= 8k +--disable_warnings +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value >= 8192`) +{ + --skip Test requires InnoDB with page size >= 8k. +} +--enable_warnings + + +# Save initial values of server variable +--disable_query_log +let $innodb_file_per_table_orig=`select @@innodb_file_per_table`; +SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; +--enable_query_log + +# Create procedure to perform +# 1. Create temp table with row types , index , sufficent data types +# 2. Perform DML with transaction +delimiter |; +create procedure populate_tables() + begin + declare n int default 20; + declare inner_loop int default 100; + set global innodb_file_per_table=on; + drop table if exists t1,t2,t3,t4; + + create temporary table t1(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(6000) not null, + c5 blob(6000) not null, + c6 varchar(2000) not null, + c7 varchar(2000) not null, + c8 datetime, + c9 decimal(6,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=redundant; + + create temporary table t2(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(6000) not null, + c5 blob(6000) not null, + c6 varchar(2000) not null, + c7 varchar(2000) not null, + c8 datetime, + c9 decimal(6,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=compact; + + create temporary table t3(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(6000) not null, + c5 blob(6000) not null, + c6 varchar(2000) not null, + c7 varchar(2000) not null, + c8 datetime, + c9 decimal(6,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=compressed key_block_size=4; + + create temporary table t4(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(6000) not null, + c5 blob(6000) not null, + c6 varchar(2000) not null, + c7 varchar(2000) not null, + c8 datetime, + c9 decimal(6,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=dynamic; + + create temporary table t5(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(6000) not null, + c5 blob(6000) not null, + c6 varchar(2000) not null, + c7 varchar(2000) not null, + c8 datetime, + c9 decimal(6,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb; + + create temporary table t6 ( a int ) engine = innodb; + insert into t6 values (50),(100),(150),(190); + + while (n > 0) do + start transaction; + insert into t1 values(n,n,repeat(concat(' tc3_',n),30), + repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), + repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), + now(),(100.55+n)); + insert into t2 values(n,n,repeat(concat(' tc3_',n),30), + repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), + repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), + now(),(100.55+n)); + insert into t3 values(n,n,repeat(concat(' tc3_',n),30), + repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), + repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), + now(),(100.55+n)); + insert into t4 values(n,n,repeat(concat(' tc3_',n),30), + repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), + repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), + now(),(100.55+n)); + insert into t5 values(n,n,repeat(concat(' tc3_',n),30), + repeat(concat(' tc4_',n),800),repeat(concat(' tc_',n),800), + repeat(concat(' tc6_',n),800),repeat(concat(' tc7_',n),800), + now(),(100.55+n)); + + if (n > 10) then + commit; + else + delete from t1 where c1 > 10 ; + delete from t2 where c1 > 10 ; + delete from t3 where c1 > 10 ; + delete from t4 where c1 > 10 ; + delete from t5 where c1 > 10 ; + + rollback; + start transaction; + update t1 set c1 = c1 + 1000 where c1 > 10; + update t2 set c1 = c1 + 1000 where c1 > 10; + update t3 set c1 = c1 + 1000 where c1 > 10; + update t4 set c1 = c1 + 1000 where c1 > 10; + update t5 set c1 = c1 + 1000 where c1 > 10; + rollback; + end if; + + start transaction; + insert into t1 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t2 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t3 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t4 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t5 values(n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + + delete from t1 where c1 between 100 and 110; + delete from t2 where c1 between 100 and 110; + delete from t3 where c1 between 100 and 110; + delete from t4 where c1 between 100 and 110; + delete from t5 where c1 between 100 and 110; + + update t1 set c1 = c1+1 where c1>110; + update t2 set c1 = c1+1 where c1>110; + update t3 set c1 = c1+1 where c1>110; + update t4 set c1 = c1+1 where c1>110; + update t5 set c1 = c1+1 where c1>110; + + savepoint a; + + insert into t1 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t2 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t3 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t4 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t5 values(300+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + savepoint b; + + insert into t1 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t2 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t3 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t4 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + insert into t5 values(400+n+inner_loop,n+inner_loop,repeat(concat(' tc3_',n+inner_loop),30), + repeat(concat(' tc4_',n+inner_loop),800),repeat(concat(' tc_',n+inner_loop),800), + repeat(concat(' tc6_',n+inner_loop),245),repeat(concat(' tc7_',n+inner_loop),245), + now(),(100.55+n+inner_loop)); + savepoint c; + rollback to b; + rollback to a; + commit; + commit; + rollback; + set n = n - 1; + end while; +end| +delimiter ;| + +# Create two client for concurrent execution +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +--echo #---client 1 : dml operation ---" +connection con1; +-- disable_query_log +eval set global innodb_file_per_table=$innodb_file_per_table_orig; +-- enable_query_log +-- disable_query_log +# call procedure +--send call populate_tables(); +-- enable_query_log + +--echo #---client 2 : dml operation ---" +connection con2; +-- disable_query_log +eval set global innodb_file_per_table=$innodb_file_per_table_orig; +-- enable_query_log +-- disable_query_log +# call procedure +--send call populate_tables(); +-- enable_query_log + +# check data of client connection 1 +--echo # In connection 1 +connection con1; +--reap +# 20 rows exepceted in 5 tables +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select c1 from t1; +select c1 from t2; +select c1 from t3; +select c1 from t4; +select c1 from t5; +# check data of client connection 2 +--echo # In connection 2 +connection con2; +--reap +# 20 rows exepceted in 5 tables +select count(*) from t1; +select count(*) from t2; +select count(*) from t3; +select count(*) from t4; +select count(*) from t5; +select c1 from t1; +select c1 from t2; +select c1 from t3; +select c1 from t4; +select c1 from t5; + +--echo # In connection 1 +connection con1; + +set autocommit = 0; +# Check duplicate key constraint + insert ignore +--error ER_DUP_ENTRY +insert into t1 values (20,1,'a','a','a','a','a',now(),100.55); +insert ignore into t1 values (20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t2 values (20,1,'a','a','a','a','a',now(),100.55); +insert ignore into t2 values (20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t3 values (20,1,'a','a','a','a','a',now(),100.55); +insert ignore into t3 values (20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t4 values (20,1,'a','a','a','a','a',now(),100.55); +insert ignore into t4 values (20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t5 values (20,1,'a','a','a','a','a',now(),100.55); +insert ignore into t5 values (20,1,'a','a','a','a','a',now(),100.55); + +# check rollback due to duplicate value in second record of insert +--error ER_DUP_ENTRY +insert into t1 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t2 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t3 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t4 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); +--error ER_DUP_ENTRY +insert into t5 values (1,1,'a','a','a','a','a',now(),100.55), +(20,1,'a','a','a','a','a',now(),100.55); + +set autocommit = 1; + +select c1,c2 from t1 where c1 in (20,1); +select c1,c2 from t2 where c1 in (20,1); +select c1,c2 from t3 where c1 in (20,1); +select c1,c2 from t4 where c1 in (20,1); +select c1,c2 from t5 where c1 in (20,1); + +#replace statement +replace into t1 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t2 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t3 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t4 values (20,1,'a','a','a','a','a',now(),100.55); +replace into t5 values (20,1,'a','a','a','a','a',now(),100.55); +# verify row is replaced from (20,20) to (20,1) +select c1,c2,c3,c4,c5,c6,c7,c9 from t1 where c1 = 20; +select c1,c2,c3,c4,c5,c6,c7,c9 from t2 where c1 = 20; +select c1,c2,c3,c4,c5,c6,c7,c9 from t3 where c1 = 20; +select c1,c2,c3,c4,c5,c6,c7,c9 from t4 where c1 = 20; +select c1,c2,c3,c4,c5,c6,c7,c9 from t5 where c1 = 20; + +# Update ignore. statement is gonored as 20 value exits +update ignore t1 set c1 = 20 where c1 = 140 ; +update ignore t2 set c1 = 20 where c1 = 140 ; +update ignore t3 set c1 = 20 where c1 = 140 ; +update ignore t4 set c1 = 20 where c1 = 140 ; +update ignore t5 set c1 = 20 where c1 = 140 ; +# see record 140 is present as last update ignored +select count(*) from t1 where c1 = 140; +select count(*) from t2 where c1 = 140; +select count(*) from t3 where c1 = 140; +select count(*) from t4 where c1 = 140; +select count(*) from t5 where c1 = 140; + +# Load data infile +--echo "running select * into outfile from t1 ; +--disable_query_log +eval select * into outfile "$MYSQLTEST_VARDIR/tmp/t1.outfile" from t1; +--enable_query_log +# Create table as select +create temporary table temp_1 engine = innodb as select * from t1 where 1=2; +select count(*) from temp_1; +--echo "running load data infile into temp_1 ; +--disable_query_log +eval load data infile '$MYSQLTEST_VARDIR/tmp/t1.outfile' into table temp_1; +--enable_query_log +select count(*) from temp_1; + +# Alter table to add column and primary key +alter table temp_1 add column c10 int default 99 , +add column c11 varchar(100) default 'test'; +alter table temp_1 add primary key (c1); +insert into temp_1 (c1,c2,c3,c4,c5,c6,c7,c8,c9) values (-1,-1,'a','a','a','a','a',now(),100.55); +select c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 from temp_1 where c1 < 0; +select count(*) from temp_1 where c10 = 99 and c11 like 'test'; +# insert on duplicate key update +insert into temp_1 (c1,c2,c3,c4,c5,c6,c7,c8,c9) values (-1,-1,'a','a','a','a','a',now(),100.55) +on duplicate key update c1=-2,c2=-2; +select c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 from temp_1 where c1 < 0; + +#cleanup +drop table t1 ,t2 ,t3,t4,t5,t6,temp_1; +disconnect con1; + +connection con2; +drop table t1 ,t2 ,t3,t4,t5,t6; +disconnect con2; + +connection default; +drop procedure populate_tables; + + +# case 2 - with prepare and execute +let $prep_loop= 5; +create temporary table prep_1(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(6000) not null, + c5 blob(6000) not null, + c6 varchar(2000) not null, + c7 varchar(2000) not null, + c8 datetime, + c9 decimal(6,3), + index (c3,c4(50),c5(50)), + index (c2)) +engine=innodb; +PREPARE stm FROM "insert into prep_1 values(?,?,repeat(concat(' tc3_',?),30),repeat(concat(' tc4_',?),800),repeat(concat(' tc_',?),800),repeat(concat(' tc6_',?),245),repeat(concat(' tc7_',?),245),now(),(100.55+?))"; +set @var = 5; +set @var_static = 5; +while ($prep_loop>0) +{ + eval EXECUTE stm USING @var,@var,@var,@var,@var,@var,@var,@var; + eval EXECUTE stm USING @var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static,@var_static; + dec $prep_loop; + set @var = @var - 1; +} +select c1,left(c3,15) from prep_1 order by c1 ; +select count(*) from prep_1; + +PREPARE stm_1 FROM "UPDATE prep_1 SET c1 = c1 + 1"; +EXECUTE stm_1; +EXECUTE stm_1; +select c1,left(c3,15) from prep_1 order by c1 ; +select count(*) from prep_1; + +PREPARE stm_2 FROM "DELETE FROM prep_1 ORDER BY c1 LIMIT 1"; +EXECUTE stm_2; +EXECUTE stm_2; +select c1,left(c3,15) from prep_1 order by c1 ; +select count(*) from prep_1; + +drop prepare stm; +drop prepare stm_1; +drop prepare stm_2; +drop table prep_1; + +--remove_file $MYSQLTEST_VARDIR/tmp/t1.outfile + +-- disable_query_log +eval set global innodb_file_per_table=$innodb_file_per_table_orig; +SET sql_mode = default; +-- enable_query_log + diff --git a/mysql-test/suite/innodb_zip/t/wl6501_1.test b/mysql-test/suite/innodb_zip/t/wl6501_1.test new file mode 100644 index 00000000000..dd8b5f65b31 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6501_1.test @@ -0,0 +1,451 @@ + +#################################################################### +# TC to check truncate table statement atomicity for single # +# tablespace # +# Sceanrio covered: # +# 1. Debug points added for worklog # +# 2. Table with differnt row types # +# 3. Transactional statement. # +#################################################################### + + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/big_test.inc +--source include/have_innodb_16k.inc + +# Valgrind would result in a "long semaphore wait" inside InnoDB +--source include/not_valgrind.inc +# Embedded server does not support crashing +--source include/not_embedded.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc + +#----------------------------------------------------------------------- +--disable_query_log +let $MYSQL_DATA_DIR= `select @@datadir`; +let $data_directory = data directory='$MYSQL_TMP_DIR/alt_dir'; +let $innodb_file_per_table_orig=`select @@innodb_file_per_table`; + +call mtr.add_suppression("InnoDB.*table did not exist in the InnoDB data dictionary.*"); +call mtr.add_suppression("InnoDB: A page in the doublewrite buffer is not within space bounds.*"); +call mtr.add_suppression("InnoDB: Cannot create file.*"); +call mtr.add_suppression("InnoDB: Error number 17 means 'File exists'.*"); +call mtr.add_suppression("InnoDB: A page in the doublewrite buffer is not within space bounds"); +call mtr.add_suppression("InnoDB: Error: table .* does not exist in the InnoDB internal"); +--enable_query_log + +#----------------------------------------------------------------------- +set global innodb_file_per_table=on; +--echo # Verify that 'TRUNCATE TABLE' statement works fine and the size +--echo # of .ibd file is equal to the initial size after truncation. + +#----------------------------------------------------------------------- +drop table if exists t1,t2,t3,t4,t6; +let $cnt = 6; +while ($cnt) { + + # table with basic data type + primary ,secondary,composite,prefix index + create table t1(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(500) not null, + c5 blob(500) not null, + c6 varchar(500) not null, + c7 varchar(500) not null, + c8 datetime, + c9 decimal(5,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=redundant; + + + create table t2(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(500) not null, + c5 blob(500) not null, + c6 varchar(500) not null, + c7 varchar(500) not null, + c8 datetime, + c9 decimal(5,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=compact; + + + # with row type , key block size = 4K + create table t3(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(500) not null, + c5 blob(500) not null, + c6 varchar(500) not null, + c7 varchar(500) not null, + c8 datetime, + c9 decimal(5,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=compressed key_block_size=4; + + + create table t4(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(500) not null, + c5 blob(500) not null, + c6 varchar(500) not null, + c7 varchar(500) not null, + c8 datetime, + c9 decimal(5,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb row_format=dynamic; + + + create temporary table t5(c1 int not null, + c2 int not null, + c3 char(255) not null, + c4 text(500) not null, + c5 blob(500) not null, + c6 varchar(500) not null, + c7 varchar(500) not null, + c8 datetime, + c9 decimal(5,3), + primary key (c1), + index (c3,c4(50),c5(50)), + index (c2)) + engine=innodb; + + create table t6 ( a int ) engine = innodb; + insert into t6 values (50),(100),(150); + + --disable_query_log + --disable_result_log + let $n=5; + + # load created tables. + while ($n) + { + start transaction; + + eval insert ignore into t1 values( + $n, $n, + repeat(concat(' tc3_',$n), 42), + repeat(concat(' tc4_',$n), 300), + repeat(concat(' tc5_',$n), 300), + repeat(concat(' tc6_',$n), 300), + repeat(concat(' tc7_',$n), 300), + now(), (100.55+$n)); + + eval insert ignore into t2 values( + $n, $n, + repeat(concat(' tc3_',$n), 42), + repeat(concat(' tc4_',$n), 300), + repeat(concat(' tc5_',$n), 300), + repeat(concat(' tc6_',$n), 300), + repeat(concat(' tc7_',$n), 300), + now(), (100.55+$n)); + + eval insert ignore into t3 values( + $n, $n, + repeat(concat(' tc3_',$n), 42), + repeat(concat(' tc4_',$n), 300), + repeat(concat(' tc5_',$n), 300), + repeat(concat(' tc6_',$n), 300), + repeat(concat(' tc7_',$n), 300), + now(), (100.55+$n)); + + eval insert ignore into t4 values( + $n, $n, + repeat(concat(' tc3_',$n), 42), + repeat(concat(' tc4_',$n), 300), + repeat(concat(' tc5_',$n), 300), + repeat(concat(' tc6_',$n), 300), + repeat(concat(' tc7_',$n), 300), + now(), (100.55+$n)); + + eval insert ignore into t5 values( + $n, $n, + repeat(concat(' tc3_',$n), 42), + repeat(concat(' tc4_',$n), 300), + repeat(concat(' tc5_',$n), 300), + repeat(concat(' tc6_',$n), 300), + repeat(concat(' tc7_',$n), 300), + now(), (100.55+$n)); + + if ($n <= 3) + { + commit; + } + + if ($n > 3) + { + rollback; + } + + dec $n; + } + + # validate loading of the tables. + --enable_result_log + --enable_query_log + select count(*) from t1; + select count(*) from t2; + select count(*) from t3; + select count(*) from t4; + select count(*) from t5; + select count(*) from t6; + + # set the debug crash point and exercise them. + if ($cnt == 6) + { + set session debug="+d,ib_trunc_crash_during_drop_index_temp_table"; + --echo "---debug ib_trunc_crash_during_drop_index_temp_table point---" + } + if ($cnt == 5) + { + set session debug="+d,ib_trunc_crash_drop_reinit_done_create_to_start"; + --echo "---debug ib_trunc_crash_drop_reinit_done_create_to_start---" + } + + if ($cnt >= 5) { + --echo # Write file to make mysql-test-run.pl expect crash and restart + --exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --echo # Run the crashing query + --error 2013 + truncate table t5; + --source include/wait_until_disconnected.inc + --enable_reconnect + --echo # Restart the MySQL server + --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --source include/wait_until_connected_again.inc + --disable_reconnect + select count(*) from t1; + select count(*) from t2; + select count(*) from t3; + select count(*) from t4; + --error ER_NO_SUCH_TABLE + select count(*) from t5; + select count(*) from t6; + } + + # set the debug crash point and exercise them. + if ($cnt == 6) + { + set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; + --echo "---debug ib_trunc_crash_on_drop_of_sec_index point---" + } + if ($cnt == 5) + { + set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; + --echo "---debug ib_trunc_crash_on_create_of_sec_index---" + } + if ($cnt == 4) + { + set session debug="+d,ib_trunc_crash_before_log_removal"; + --echo "---debug ib_trunc_crash_before_log_removal point---" + } + if ($cnt == 3) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 2) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 1) + { + set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; + --echo "---debug ib_trunc_crash_after_redo_log_write_complete point---" + } + + --echo # Write file to make mysql-test-run.pl expect crash and restart + --exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --echo # Run the crashing query + --error 2013 + truncate table t1; + --source include/wait_until_disconnected.inc + --enable_reconnect + --echo # Restart the MySQL server + --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --source include/wait_until_connected_again.inc + --disable_reconnect + select count(*) from t1; + select count(*) from t2; + select count(*) from t3; + select count(*) from t4; + --error ER_NO_SUCH_TABLE + select count(*) from t5; + select count(*) from t6; + + if ($cnt == 6) + { + set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; + --echo "---debug ib_trunc_crash_on_drop_of_sec_index point---" + } + if ($cnt == 5) + { + set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; + --echo "---debug ib_trunc_crash_on_create_of_sec_index---" + } + if ($cnt == 4) + { + set session debug="+d,ib_trunc_crash_before_log_removal"; + --echo "---debug ib_trunc_crash_before_log_removal point---" + } + if ($cnt == 3) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 2) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 1) + { + set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; + --echo "---debug ib_trunc_crash_after_redo_log_write_complete point---" + } + + + --echo # Write file to make mysql-test-run.pl expect crash and restart + --exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --echo # Run the crashing query + --error 2013 + truncate table t2; + --source include/wait_until_disconnected.inc + --enable_reconnect + --echo # Restart the MySQL server + --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --source include/wait_until_connected_again.inc + --disable_reconnect + select count(*) from t1; + select count(*) from t2; + select count(*) from t3; + select count(*) from t4; + --error ER_NO_SUCH_TABLE + select count(*) from t5; + select count(*) from t6; + + if ($cnt == 6) + { + set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; + --echo "---debug ib_trunc_crash_on_drop_of_sec_index point---" + } + if ($cnt == 5) + { + set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; + --echo "---debug ib_trunc_crash_on_create_of_sec_index---" + } + if ($cnt == 4) + { + set session debug="+d,ib_trunc_crash_before_log_removal"; + --echo "---debug ib_trunc_crash_before_log_removal point---" + } + if ($cnt == 3) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 2) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 1) + { + set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; + --echo "---debug ib_trunc_crash_after_redo_log_write_complete point---" + } + + + --echo # Write file to make mysql-test-run.pl expect crash and restart + --exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --echo # Run the crashing query + --error 2013 + truncate table t3; + --source include/wait_until_disconnected.inc + --enable_reconnect + --echo # Restart the MySQL server + --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --source include/wait_until_connected_again.inc + --disable_reconnect + select count(*) from t1; + select count(*) from t2; + select count(*) from t3; + select count(*) from t4; + --error ER_NO_SUCH_TABLE + select count(*) from t5; + select count(*) from t6; + + + if ($cnt == 6) + { + set session debug="+d,ib_trunc_crash_on_drop_of_sec_index"; + --echo "---debug ib_trunc_crash_on_drop_of_sec_index point---" + } + if ($cnt == 5) + { + set session debug="+d,ib_trunc_crash_on_create_of_sec_index"; + --echo "---debug ib_trunc_crash_on_create_of_sec_index---" + } + if ($cnt == 4) + { + set session debug="+d,ib_trunc_crash_before_log_removal"; + --echo "---debug ib_trunc_crash_before_log_removal point---" + } + if ($cnt == 3) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 2) + { + set session debug="+d,ib_trunc_crash_after_truncate_done"; + --echo "---debug ib_trunc_crash_after_truncate_done point---" + } + if ($cnt == 1) + { + set session debug="+d,ib_trunc_crash_after_redo_log_write_complete"; + --echo "---debug ib_trunc_crash_after_redo_log_write_complete point---" + } + + --echo # Write file to make mysql-test-run.pl expect crash and restart + --exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --echo # Run the crashing query + --error 2013 + truncate table t4; + --source include/wait_until_disconnected.inc + --enable_reconnect + --echo # Restart the MySQL server + --exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect + --source include/wait_until_connected_again.inc + --disable_reconnect + select count(*) from t1; + select count(*) from t2; + select count(*) from t3; + select count(*) from t4; + --error ER_NO_SUCH_TABLE + select count(*) from t5; + select count(*) from t6; + + drop table t1, t2, t3, t4, t6; + + dec $cnt; + + --disable_query_log + eval set global innodb_file_per_table=$innodb_file_per_table_orig; + --enable_query_log +} + + + diff --git a/mysql-test/suite/innodb_zip/t/wl6501_crash_3.test b/mysql-test/suite/innodb_zip/t/wl6501_crash_3.test new file mode 100644 index 00000000000..eb4c23aa66e --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6501_crash_3.test @@ -0,0 +1,26 @@ +# +# WL#6501: make truncate table atomic +# + +# TC tries to hit crash point during truncate of +# compressed non-temp table residing in single tablespace +# with page-size=16k + +--source include/have_innodb.inc +--source include/have_innodb_16k.inc +--source include/have_debug.inc +--source include/big_test.inc + +# Valgrind would complain about memory leaks when we crash on purpose. +--source include/not_valgrind.inc +# Embedded server does not support crashing +--source include/not_embedded.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc + +let $wl6501_file_per_table = 1; +let $wl6501_row_fmt = compressed; +let $wl6501_kbs = 16; +let $wl6501_file_format = 'Barracuda'; +--source suite/innodb/include/innodb_wl6501_crash.inc + diff --git a/mysql-test/suite/innodb_zip/t/wl6501_crash_4.test b/mysql-test/suite/innodb_zip/t/wl6501_crash_4.test new file mode 100644 index 00000000000..870af3dfc94 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6501_crash_4.test @@ -0,0 +1,29 @@ +# +# WL#6501: make truncate table atomic +# + +# TC tries to hit crash point during truncate of +# compressed non-temp table residing in single tablespace. +# with page-size=4k + +--source include/have_innodb.inc +--source include/have_innodb_4k.inc +--source include/have_debug.inc +--source include/big_test.inc + +# Valgrind would complain about memory leaks when we crash on purpose. +--source include/not_valgrind.inc +# Embedded server does not support crashing +--source include/not_embedded.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc + +let $wl6501_file_per_table = 1; +let $wl6501_row_fmt = compressed; +let $wl6501_kbs = 4; +let $wl6501_file_format = 'Barracuda'; +--source suite/innodb/include/innodb_wl6501_crash.inc + +let $wl6501_temp = temporary; +--source suite/innodb/include/innodb_wl6501_crash_temp.inc + diff --git a/mysql-test/suite/innodb_zip/t/wl6501_crash_5.test b/mysql-test/suite/innodb_zip/t/wl6501_crash_5.test new file mode 100644 index 00000000000..3432a5a5c76 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6501_crash_5.test @@ -0,0 +1,26 @@ +# +# WL#6501: make truncate table atomic +# + +# TC tries to hit crash point during truncate of +# compressed non-temp table residing in single tablespace. +# with page-size=8k + +--source include/have_innodb.inc +--source include/have_innodb_8k.inc +--source include/have_debug.inc +--source include/big_test.inc + +# Valgrind would complain about memory leaks when we crash on purpose. +--source include/not_valgrind.inc +# Embedded server does not support crashing +--source include/not_embedded.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc + +let $wl6501_file_per_table = 1; +let $wl6501_row_fmt = compressed; +let $wl6501_kbs = 8; +let $wl6501_file_format = 'Barracuda'; +--source suite/innodb/include/innodb_wl6501_crash.inc + diff --git a/mysql-test/suite/innodb_zip/t/wl6501_scale_1.test b/mysql-test/suite/innodb_zip/t/wl6501_scale_1.test new file mode 100644 index 00000000000..8c746fe8abf --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6501_scale_1.test @@ -0,0 +1,41 @@ +# +# WL#6501: make truncate table atomic +# + +# load table with some significiant amount of data +# and then try truncate + +--source include/have_innodb.inc +--source include/have_debug.inc +--source include/big_test.inc +--source include/have_innodb_16k.inc + +# Valgrind would complain about memory leaks when we crash on purpose. +--source include/not_valgrind.inc +# Embedded server does not support crashing +--source include/not_embedded.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc + + +# Single-Tablespace/Non-Compressed +let $wl6501_file_per_table = 1; +let $wl6501_row_fmt = compact; +let $wl6501_kbs = 16; +let $wl6501_file_format = 'Antelope'; +--source suite/innodb_zip/include/innodb_wl6501_scale.inc + +# Single-Tablespace/Compressed +let $wl6501_file_per_table = 1; +let $wl6501_row_fmt = compressed; +let $wl6501_kbs = 16; +let $wl6501_file_format = 'Barracuda'; +--source suite/innodb_zip/include/innodb_wl6501_scale.inc + +# System-Tablespace/Non-Compressed +let $wl6501_file_per_table = 0; +let $wl6501_row_fmt = compact; +let $wl6501_kbs = 16; +let $wl6501_file_format = 'Antelope'; +--source suite/innodb_zip/include/innodb_wl6501_scale.inc + diff --git a/mysql-test/suite/innodb_zip/t/wl6560.test b/mysql-test/suite/innodb_zip/t/wl6560.test new file mode 100644 index 00000000000..041dd453c72 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6560.test @@ -0,0 +1,423 @@ +# +# WL#6560: InnoDB: separate tablespace for innodb-temp-tables. +# + +--source include/have_innodb.inc +--source include/have_innodb_zip.inc +# Embedded server does not restart of server +--source include/not_embedded.inc +-- source include/big_test.inc + +--disable_query_log +call mtr.add_suppression("Tablespace innodb_temporary ran out of space. Please add another file or use 'autoextend' for the last file in setting innodb_temp_data_file_path."); +call mtr.add_suppression("The table 't1' is full"); +--enable_query_log + +################################################################################ +# +# Will test following scenarios: +# 1. creation of shared temp-tablespace. +# 2. ddl + dml operation involving temp-tablespace. +# insert/delete/update/select +# create/drop/alter/truncate/import-discard (though blocked). +# 3. ddl + dml operation on compressed table. +# (table doesn't reside in shared temp-tablespace). +# 4. Test bulk-loading that result in auto-extension of temp-tablespace. +# 5. re-creation of temp-tablespace on re-start. +# also to ensure non-existence of existing temp-table. +# 6. restart server in innodb-read-only mode. this will also +# block creation of temp-tables. +# 7. try starting server with shared and temp-tablespace filename same. +# 8. try re-starting server with param so that temp-tablespace can't be +# expanded and insert enough data to make it full. +# 9. tests for different row format types and key block sizes for +# compressed tables. +# 10. try restarting server with raw device specified for temp-tablespace. +# 11. try restarting server with temp-tablespace less than min. threshold +# 12. no file specified for temp-tablespace. +################################################################################ + +#----------------------------------------------------------------------------- +# +# create test-bed +# +let $per_table = `select @@innodb_file_per_table`; + +set global innodb_file_per_table = off; +let $MYSQL_TMP_DIR = `select @@tmpdir`; +let $MYSQL_DATA_DIR = `select @@datadir`; +let SEARCH_FILE = $MYSQLTEST_VARDIR/log/my_restart.err; +let $args = --loose-console --core-file > $SEARCH_FILE 2>&1; +let crash = --loose-console > $SEARCH_FILE 2>&1 --innodb-force-recovery-crash; +let readonly = $args --innodb_read_only; +let nameconflicts = $args --innodb_data_file_path="ibdata1:12M:autoextend:max:134217728" --innodb_temp_data_file_path="ibdata1:12M:autoextend"; +let rawdevice1 = $args --innodb_temp_data_file_path="/dev/hdd1:3Gnewraw;/dev/hdd2:2Gnewraw"; +let rawdevice2 = $args --innodb_temp_data_file_path="/dev/hdd1:3Graw;/dev/hdd2:2Graw"; +let sizeoftempfile1 = $args --innodb_temp_data_file_path="ibtmp1:2M:autoextend"; +let sizeoftempfile2 = $args --innodb_data_file_path="ibdata1:2M:autoextend"; +let notemptablespacefile = $args --innodb_temp_data_file_path=""; + +#----------------------------------------------------------------------------- +# +# 1. creation of shared temp-tablespace. +# +--echo # files in MYSQL_DATA_DIR +--list_files $MYSQL_DATA_DIR/ ibtmp* + + +#----------------------------------------------------------------------------- +# +# 2. ddl + dml operation involving temp-tablespace. +# insert/delete/update/select +# create/drop/alter/truncate/import-discard (though blocked). +# +select @@global.innodb_file_per_table; +create temporary table t1 (i int, f float, c char(100)) engine=innodb; +# +--source suite/innodb_zip/include/innodb_temp_table_dml.inc +# +# alter table +--error ER_CANNOT_DISCARD_TEMPORARY_TABLE +alter table t1 discard tablespace; +--error ER_CANNOT_DISCARD_TEMPORARY_TABLE +alter table t1 import tablespace; +# +# drop table +drop table t1; + +#----------------------------------------------------------------------------- +# +# 3. ddl + dml operation on compressed table. +# (table doesn't reside in shared temp-tablespace). +# +--echo #files in MYSQL_TMP_DIR +--list_files $MYSQL_TMP_DIR/ *.ibd +set global innodb_file_per_table = 1; +select @@global.innodb_file_per_table; +create temporary table t1 + (i int, f float, c char(100)) engine = innodb key_block_size = 4; +show create table t1; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +# +--source suite/innodb_zip/include/innodb_temp_table_dml.inc +# +# alter table +--error ER_CANNOT_DISCARD_TEMPORARY_TABLE +alter table t1 discard tablespace; +# +# drop table +drop table t1; +set global innodb_file_per_table = off; + +#----------------------------------------------------------------------------- +# +# 4. Test bulk-loading that result in auto-extension of temp-tablespace. +# +create temporary table t1 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc)) engine = innodb; +delimiter |; +CREATE PROCEDURE populate_t1() +BEGIN + DECLARE i INT DEFAULT 1; + while (i <= 20000) DO + insert into t1 values (i, 'a', 'b'); + SET i = i + 1; + END WHILE; +END| +delimiter ;| +set autocommit=0; +select count(*) from t1; +call populate_t1(); +select count(*) from t1; +select * from t1 limit 10; +set autocommit=1; +truncate table t1; +select count(*) from t1; +# +drop procedure populate_t1; +drop table t1; + +#----------------------------------------------------------------------------- +# +# 5. re-creation of temp-tablespace on re-start. +# also to ensure non-existence of existing temp-table. +# +create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; +insert into t1 values (1, 'c', 'b'); +select * from t1; +# +--source include/restart_mysqld.inc +# +--echo # files in MYSQL_DATA_DIR +--list_files $MYSQL_DATA_DIR/ ibtmp* +use test; +--error ER_NO_SUCH_TABLE +select * from t1; + +#----------------------------------------------------------------------------- +# +# 6. restart server in innodb-read-only mode. this will also +# block creation of temp-tables. +# +# +--echo "testing temp-table creation in --innodb_read_only mode" +let $restart_parameters = restart: --innodb-read-only; +--source include/restart_mysqld.inc +# +use test; +show tables; +--error ER_INNODB_READ_ONLY +create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; + +#----------------------------------------------------------------------------- +# +# 7. try starting server with shared and temp-tablespace filename same. +# +--source include/shutdown_mysqld.inc +--echo "testing system and temp tablespace name conflict" +--error 1 +--exec $MYSQLD_CMD $nameconflicts +let SEARCH_PATTERN = innodb_temporary and innodb_system file names seem to be the same; +--source ./include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE +--echo "restarting server in normal mode" +--enable_reconnect +let $restart_parameters = restart; +--source include/start_mysqld.inc +# +show tables; +create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb; +drop table t1; + +#----------------------------------------------------------------------------- +# +# 8. try re-starting server with param so that temp-tablespace can't be expanded +# and insert enough data to make it full. +# +--echo # test condition of full-temp-tablespace +let $restart_parameters = restart: --innodb_temp_data_file_path=ibtmp1:12M; +--source include/restart_mysqld.inc +# +create temporary table t1 + (keyc int, c1 char(100), c2 char(100), + primary key(keyc)) engine = innodb; +delimiter |; +CREATE PROCEDURE populate_t1() +BEGIN + DECLARE i INT DEFAULT 1; + while (i <= 20000) DO + insert into t1 values (i, 'a', 'b'); + SET i = i + 1; + END WHILE; +END| +delimiter ;| +set autocommit=0; +select count(*) from t1; +--error ER_RECORD_FILE_FULL +call populate_t1(); +# +drop procedure populate_t1; +drop table t1; + +#----------------------------------------------------------------------------- +# +# 9. tests for different row format types and key block sizes for +# compressed tables. +# +set innodb_strict_mode = off; +--disable_warnings +set global innodb_file_per_table = 0; +set global innodb_file_format = 'Antelope'; +create temporary table t ( + i int) + engine = innodb row_format = compressed; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +drop table t; +# +create temporary table t ( + i int) + engine = innodb row_format = compressed key_block_size = 8; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +# +drop table t; +set global innodb_file_per_table = 1; +create temporary table t ( + i int) + engine = innodb row_format = compressed key_block_size = 8; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +drop table t; +# +create temporary table t ( + i int) + engine = innodb row_format = dynamic; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +drop table t; +# +set innodb_strict_mode = on; +create temporary table t ( + i int) + engine = innodb row_format = dynamic; +--replace_regex /[0-9]+/NUMBER/ +drop table t; +# +set global innodb_file_format = 'Barracuda'; +set innodb_strict_mode = off; +create temporary table t ( + i int) + engine = innodb row_format = compressed key_block_size = 8; +--replace_regex /[0-9]+/NUMBER/ +# explicitly disabling it else it will generate warning of ignoring +# key_block_size when suite is run with innodb-page-size=4k +#show warnings; +set innodb_strict_mode = default; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +# +drop table t; +create temporary table t ( + i int) + engine = innodb row_format = compressed; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +drop table t; +# +create temporary table t ( + i int) + engine = innodb row_format = dynamic; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +drop table t; +# +set innodb_strict_mode = on; +create temporary table t ( + i int) + engine = innodb row_format = dynamic; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +drop table t; +set innodb_strict_mode = off; +# +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +create temporary table t ( + i int) + engine = innodb row_format = dynamic key_block_size = 4; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +drop table t; +# +create temporary table t ( + i int) + engine = innodb row_format = compact; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +drop table t; +# +create temporary table t ( + i int) + engine = innodb key_block_size = 4; +--replace_regex /[0-9]+/NUMBER/ +show warnings; +--echo #files in MYSQL_TMP_DIR +--replace_regex /#sql[0-9a-f_]*/#sql/ +--list_files $MYSQL_TMP_DIR/ *.ibd +drop table t; +# + +#----------------------------------------------------------------------------- +# +# 10. try restarting server with raw device specified for temp-tablespace. +# +--source include/shutdown_mysqld.inc +--echo "testing temp tablespace non-support for raw device" +--error 1 +--exec $MYSQLD_CMD $rawdevice1 +let SEARCH_PATTERN = support raw device; +--source include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE +--echo "testing temp tablespace non-support for raw device" +--error 1 +--exec $MYSQLD_CMD $rawdevice2 +let SEARCH_PATTERN = support raw device; +--source include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +let $restart_parameters = restart; +--source include/start_mysqld.inc + +show tables; +create temporary table t1 ( + keyc int, c1 char(100), c2 char(100) + ) engine = innodb; +drop table t1; + +#----------------------------------------------------------------------------- +# +# 11. try restarting server with temp-tablespace less than min. threshold +# +--source include/shutdown_mysqld.inc +--echo "try starting server with temp-tablespace size < min. threshold" +--error 1 +--exec $MYSQLD_CMD $sizeoftempfile1 +let SEARCH_PATTERN = Tablespace size must be at least; +--source ./include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE +--echo "try starting server with sys-tablespace size < min. threshold" +--error 1 +--exec $MYSQLD_CMD $sizeoftempfile2 +let SEARCH_PATTERN = Tablespace size must be at least; +--source ./include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +--source include/start_mysqld.inc + +show tables; +create temporary table t1 ( + keyc int, c1 char(100), c2 char(100) + ) engine = innodb; +drop table t1; + +#----------------------------------------------------------------------------- +# +# 12. no file specified for temp-tablespace. +# +--source include/shutdown_mysqld.inc + +--echo "try starting server with no file specified for temp-tablespace" +--error 1 +--exec $MYSQLD_CMD $notemptablespacefile +let SEARCH_PATTERN = init function returned error; +--source ./include/search_pattern_in_file.inc +--remove_file $SEARCH_FILE + +--source include/start_mysqld.inc + +show tables; +create temporary table t1 ( + keyc int, c1 char(100), c2 char(100) + ) engine = innodb; +drop table t1; diff --git a/mysql-test/suite/innodb_zip/t/wl6915_1.test b/mysql-test/suite/innodb_zip/t/wl6915_1.test new file mode 100644 index 00000000000..625c8a36db2 --- /dev/null +++ b/mysql-test/suite/innodb_zip/t/wl6915_1.test @@ -0,0 +1,650 @@ +--source include/have_innodb.inc +--source include/have_innodb_zip.inc +--source include/have_no_undo_tablespaces.inc +--source include/big_test.inc + +# Embedded server does not support restarting +--source include/not_embedded.inc +# Avoid CrashReporter popup on Mac +--source include/not_crashrep.inc + +#################################################################### +# TC to test temp-table undolog changes correctness # +# Sceanrio covered in single testcase : # +# - Tables with row format(redundant,compressed,dynamic,compact # +# - Table with primary,composite,prefix,secondary INDEX # +# - Insert/delete/update with transactioons # +# - Transaction with COMMIT,rollback,savepoint statements # +# - Transaction having temporary table and normal table # +# - Concurrency by execution of two clients creating tables with # +# same names # +# - Inserting data using # +# - Insert into .. , Load data infile..,insert ignore # +# - Insert into .. on duplicate update # +# - Check basic delete and upadte [ignore] # +# - Check constraints like duplicate key,default value # +# - Alter ADD COLUMN , ADD PRIMARY KEY # +# - Flush Tables, logs command # +# - Vary innodb_undo_tablespaces=0,innodb_undo_logs # +# innodb_log_files_in_group # +# - Verify rseg message from server log # +#################################################################### + +# run for page size >= 8k +--disable_warnings +if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE LOWER(variable_name) = 'innodb_page_size' AND variable_value >= 8192`) +{ + --skip Test requires InnoDB with page size >= 8k. +} +--enable_warnings + +call mtr.ADD_suppression(".*Resizing redo log.*"); +call mtr.ADD_suppression(".*Starting to delete and rewrite log files.*"); +call mtr.ADD_suppression(".*New log files created.*"); +# Save initial VALUES of server variable +--disable_query_log +let $innodb_file_per_table_orig=`SELECT @@innodb_file_per_table`; +--enable_query_log + +SELECT @@global.innodb_undo_tablespaces; + +# Create procedure to perform +# 1. Create temp table with row types , INDEX , sufficent data types +# 2. Perform DML with transaction +delimiter |; +CREATE PROCEDURE populate_tables(IN id VARCHAR(10)) + begin + declare n int default 20; + set global innodb_file_per_table=on; + DROP TABLE IF EXISTS t1,t2,t3,t4; + + CREATE TEMPORARY TABLE t1_temp(c1 int NOT NULL, + c2 int NOT NULL, + c3 char(255) NOT NULL, + c4 text(600) NOT NULL, + c5 blob(600) NOT NULL, + c6 varchar(600) NOT NULL, + c7 varchar(600) NOT NULL, + c8 datetime, + c9 decimal(6,3), + PRIMARY KEY (c1), + INDEX (c3,c4(50),c5(50)), + INDEX (c2)) + ENGINE=InnoDB ROW_FORMAT=redundant; + + set @s = concat("CREATE TABLE t1",id," ( c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=redundant;"); + PREPARE createTable FROM @s; + EXECUTE createTable; + DEALLOCATE PREPARE createTable; + + + CREATE TEMPORARY TABLE t2_temp(c1 int NOT NULL, + c2 int NOT NULL, + c3 char(255) NOT NULL, + c4 text(600) NOT NULL, + c5 blob(600) NOT NULL, + c6 varchar(600) NOT NULL, + c7 varchar(600) NOT NULL, + c8 datetime, + c9 decimal(6,3), + PRIMARY KEY (c1), + INDEX (c3,c4(50),c5(50)), + INDEX (c2)) + ENGINE=InnoDB ROW_FORMAT=compact; + + set @s = concat("CREATE TABLE t2",id," (c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=compact;"); + PREPARE createTable FROM @s; + EXECUTE createTable; + DEALLOCATE PREPARE createTable; + + CREATE TEMPORARY TABLE t3_temp(c1 int NOT NULL, + c2 int NOT NULL, + c3 char(255) NOT NULL, + c4 text(600) NOT NULL, + c5 blob(600) NOT NULL, + c6 varchar(600) NOT NULL, + c7 varchar(600) NOT NULL, + c8 datetime, + c9 decimal(6,3), + PRIMARY KEY (c1), + INDEX (c3,c4(50),c5(50)), + INDEX (c2)) + ENGINE=InnoDB ROW_FORMAT=compressed key_block_size=4; + + set @s = concat("CREATE TABLE t3",id," (c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=compressed key_block_size=4;"); + PREPARE createTable FROM @s; + EXECUTE createTable; + DEALLOCATE PREPARE createTable; + + CREATE TEMPORARY TABLE t4_temp(c1 int NOT NULL, + c2 int NOT NULL, + c3 char(255) NOT NULL, + c4 text(600) NOT NULL, + c5 blob(600) NOT NULL, + c6 varchar(600) NOT NULL, + c7 varchar(600) NOT NULL, + c8 datetime, + c9 decimal(6,3), + PRIMARY KEY (c1), + INDEX (c3,c4(50),c5(50)), + INDEX (c2)) + ENGINE=InnoDB ROW_FORMAT=dynamic; + + set @s = concat("CREATE TABLE t4",id," (c1 int NOT NULL, c2 int NOT NULL, c3 char(255) NOT NULL, c4 text(600) NOT NULL, c5 blob(600) NOT NULL, c6 varchar(600) NOT NULL, c7 varchar(600) NOT NULL, c8 datetime, c9 decimal(6,3), PRIMARY KEY (c1), INDEX (c3,c4(50),c5(50)), INDEX (c2)) ENGINE=InnoDB ROW_FORMAT=dynamic;"); + PREPARE createTable FROM @s; + EXECUTE createTable; + DEALLOCATE PREPARE createTable; + + while (n > 0) do + START TRANSACTION; + set @s = concat("INSERT INTO t1",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t1_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), + REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), + REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), + NOW(),(100.55+n)); + + set @s = concat("INSERT INTO t2",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + + INSERT INTO t2_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), + REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), + REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), + NOW(),(100.55+n)); + + savepoint a; + + set @s = concat("INSERT INTO t3",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + + INSERT INTO t3_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), + REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), + REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), + NOW(),(100.55+n)); + + savepoint b; + + set @s = concat("INSERT INTO t4",id," VALUES(",n,",",n,",REPEAT(concat(' tc3_',",n,"),30), REPEAT(concat(' tc4_',",n,"),70),REPEAT(concat(' tc_',",n,"),70), REPEAT(concat(' tc6_',",n,"),70),REPEAT(concat(' tc7_',",n,"),70), NOW(),(100.55+",n,"));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + + INSERT INTO t4_temp VALUES(n,n,REPEAT(concat(' tc3_',n),30), + REPEAT(concat(' tc4_',n),70),REPEAT(concat(' tc_',n),70), + REPEAT(concat(' tc6_',n),70),REPEAT(concat(' tc7_',n),70), + NOW(),(100.55+n)); + + + if (n > 10) then + if (n > 10 and n <=12) then + ROLLBACK TO SAVEPOINT a; + COMMIT; + end if; + if (n > 12 and n < 15) then + ROLLBACK TO SAVEPOINT b; + COMMIT; + end if; + if (n > 15) then + COMMIT; + end if; + + else + if (n > 5) then + START TRANSACTION; + DELETE FROM t1_temp WHERE c1 > 10 ; + DELETE FROM t2_temp WHERE c1 > 10 ; + DELETE FROM t3_temp WHERE c1 > 10 ; + DELETE FROM t4_temp WHERE c1 > 10 ; + + rollback; + START TRANSACTION; + update t1_temp set c1 = c1 + 1000 WHERE c1 > 10; + update t2_temp set c1 = c1 + 1000 WHERE c1 > 10; + update t3_temp set c1 = c1 + 1000 WHERE c1 > 10; + update t4_temp set c1 = c1 + 1000 WHERE c1 > 10; + rollback; + end if; + end if; + + if (n < 5) then + rollback; + end if; + + FLUSH logs; + ALTER TABLE t1_temp DROP PRIMARY KEY; + ALTER TABLE t1_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); + ALTER TABLE t2_temp DROP PRIMARY KEY; + ALTER TABLE t2_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); + ALTER TABLE t3_temp DROP PRIMARY KEY; + ALTER TABLE t3_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); + ALTER TABLE t4_temp DROP PRIMARY KEY; + ALTER TABLE t4_temp ADD PRIMARY KEY (c1,c3(10),c4(10)); + FLUSH tables; + + START TRANSACTION; + set @s = concat("INSERT INTO t1",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t1_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t2",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t2_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t3",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t3_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t4",id," VALUES(",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t4_temp VALUES(n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + + + DELETE FROM t1_temp WHERE c1 between 100 and 110; + DELETE FROM t2_temp WHERE c1 between 100 and 110; + DELETE FROM t3_temp WHERE c1 between 100 and 110; + DELETE FROM t4_temp WHERE c1 between 100 and 110; + + update t1_temp set c1 = c1+1 WHERE c1>110; + update t2_temp set c1 = c1+1 WHERE c1>110; + update t3_temp set c1 = c1+1 WHERE c1>110; + update t4_temp set c1 = c1+1 WHERE c1>110; + + savepoint a; + + set @s = concat("INSERT INTO t1",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t1_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t2",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t2_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t3",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t3_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t4",id," VALUES(300+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t4_temp VALUES(300+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + savepoint b; + + set @s = concat("INSERT INTO t1",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t1_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t2",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t2_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t3",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t3_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + set @s = concat("INSERT INTO t4",id," VALUES(400+",n,"+100,",n,"+100,REPEAT(concat(' tc3_',",n,"+100),30), REPEAT(concat(' tc4_',",n,"+100),70),REPEAT(concat(' tc_',",n,"+100),70), REPEAT(concat(' tc6_',",n,"+100),60),REPEAT(concat(' tc7_',",n,"+100),60), NOW(),(100.55+",n,"+100));"); + PREPARE insertIntoTable FROM @s; + EXECUTE insertIntoTable; + DEALLOCATE PREPARE insertIntoTable; + INSERT INTO t4_temp VALUES(400+n+100,n+100,REPEAT(concat(' tc3_',n+100),30), + REPEAT(concat(' tc4_',n+100),70),REPEAT(concat(' tc_',n+100),70), + REPEAT(concat(' tc6_',n+100),60),REPEAT(concat(' tc7_',n+100),60), + NOW(),(100.55+n+100)); + savepoint c; + rollback to b; + rollback to a; + COMMIT; + COMMIT; + rollback; + set n = n - 1; + end while; +end| +delimiter ;| + +# Create two client for concurrent execution +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +--echo #---client 1 : dml operation ---" +connection con1; +-- disable_query_log +eval set global innodb_file_per_table=$innodb_file_per_table_orig; + +-- enable_query_log +-- disable_query_log +# call procedure +--send call populate_tables('_1'); +-- enable_query_log + +--echo #---client 2 : dml operation ---" +connection con2; +-- disable_query_log +eval set global innodb_file_per_table=$innodb_file_per_table_orig; +-- enable_query_log +-- disable_query_log +# call procedure +--send call populate_tables('_2'); + +-- enable_query_log + +# check data of client connection 1 +--echo # In connection 1 +connection con1; +--reap +# 20 rows exepceted in 5 tables +SELECT count(*) FROM t1_1; +SELECT count(*) FROM t2_1; +SELECT count(*) FROM t3_1; +SELECT count(*) FROM t4_1; +SELECT c1 FROM t1_1; +SELECT c1 FROM t2_1; +SELECT c1 FROM t3_1; +SELECT c1 FROM t4_1; +SELECT count(*) FROM t1_temp; +SELECT count(*) FROM t2_temp; +SELECT count(*) FROM t3_temp; +SELECT count(*) FROM t4_temp; +SELECT c1 FROM t1_temp; +SELECT c1 FROM t2_temp; +SELECT c1 FROM t3_temp; +SELECT c1 FROM t4_temp; +# check data of client connection 2 +--echo # In connection 2 +connection con2; +--reap +# 20 rows exepceted in 5 tables +SELECT count(*) FROM t1_2; +SELECT count(*) FROM t2_2; +SELECT count(*) FROM t3_2; +SELECT count(*) FROM t4_2; +SELECT c1 FROM t1_2; +SELECT c1 FROM t2_2; +SELECT c1 FROM t3_2; +SELECT c1 FROM t4_2; +SELECT count(*) FROM t1_temp; +SELECT count(*) FROM t2_temp; +SELECT count(*) FROM t3_temp; +SELECT count(*) FROM t4_temp; +SELECT c1 FROM t1_temp; +SELECT c1 FROM t2_temp; +SELECT c1 FROM t3_temp; +SELECT c1 FROM t4_temp; + +--echo # In connection 1 +connection con1; + +set AUTOCOMMIT = 0; +ALTER TABLE t1_temp DROP PRIMARY KEY; +ALTER TABLE t1_temp ADD PRIMARY KEY (c1); +ALTER TABLE t2_temp DROP PRIMARY KEY; +ALTER TABLE t2_temp ADD PRIMARY KEY (c1); +ALTER TABLE t3_temp DROP PRIMARY KEY; +ALTER TABLE t3_temp ADD PRIMARY KEY (c1); +ALTER TABLE t4_temp DROP PRIMARY KEY; +ALTER TABLE t4_temp ADD PRIMARY KEY (c1); +# Check duplicate key constraint + insert ignore +--error ER_DUP_ENTRY +INSERT INTO t1_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +insert ignore into t1_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +--error ER_DUP_ENTRY +INSERT INTO t2_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +insert ignore into t2_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +--error ER_DUP_ENTRY +INSERT INTO t3_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +insert ignore into t3_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +--error ER_DUP_ENTRY +INSERT INTO t4_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +insert ignore into t4_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); + +# check rollback due to duplicate value in second record of insert +--error ER_DUP_ENTRY +INSERT INTO t1_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); +--error ER_DUP_ENTRY +INSERT INTO t2_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); +--error ER_DUP_ENTRY +INSERT INTO t3_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); +--error ER_DUP_ENTRY +INSERT INTO t4_temp VALUES (1,1,'a','a','a','a','a',NOW(),100.55), +(20,1,'a','a','a','a','a',NOW(),100.55); + +set AUTOCOMMIT = 1; + +SELECT c1,c2 FROM t1_temp WHERE c1 in (20,1); +SELECT c1,c2 FROM t2_temp WHERE c1 in (20,1); +SELECT c1,c2 FROM t3_temp WHERE c1 in (20,1); +SELECT c1,c2 FROM t4_temp WHERE c1 in (20,1); + +#replace statement +REPLACE INTO t1_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +REPLACE INTO t2_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +REPLACE INTO t3_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +REPLACE INTO t4_temp VALUES (20,1,'a','a','a','a','a',NOW(),100.55); +# verify row is replaced FROM (20,20) to (20,1) +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t1_temp WHERE c1 = 20; +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t2_temp WHERE c1 = 20; +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t3_temp WHERE c1 = 20; +SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t4_temp WHERE c1 = 20; + +# Update ignore. statement is gonored as 20 value exits +update ignore t1_temp set c1 = 20 WHERE c1 = 140 ; +update ignore t2_temp set c1 = 20 WHERE c1 = 140 ; +update ignore t3_temp set c1 = 20 WHERE c1 = 140 ; +update ignore t4_temp set c1 = 20 WHERE c1 = 140 ; +# see record 140 is present as last update ignored +SELECT count(*) FROM t1_temp WHERE c1 = 140; +SELECT count(*) FROM t2_temp WHERE c1 = 140; +SELECT count(*) FROM t3_temp WHERE c1 = 140; +SELECT count(*) FROM t4_temp WHERE c1 = 140; + +# Alter table to ADD COLUMN and PRIMARY KEY +ALTER TABLE t1_temp ADD COLUMN c10 int default 99 , +ADD COLUMN c11 varchar(100) default 'test'; +ALTER TABLE t1_temp DROP PRIMARY KEY; +ALTER TABLE t1_temp ADD PRIMARY KEY (c1); +INSERT INTO t1_temp (c1,c2,c3,c4,c5,c6,c7,c8,c9) VALUES (-1,-1,'a','a','a','a','a',NOW(),100.55); +SELECT c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 FROM t1_temp WHERE c1 < 0; +SELECT count(*) FROM t1_temp WHERE c10 = 99 and c11 like 'test'; +# insert on duplicate key update +INSERT INTO t1_temp (c1,c2,c3,c4,c5,c6,c7,c8,c9) VALUES (-1,-1,'a','a','a','a','a',NOW(),100.55) +ON DUPLICATE KEY UPDATE c1=-2,c2=-2; +SELECT c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 FROM t1_temp WHERE c1 < 0; + +# + +#cleanup +DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1; +disconnect con1; + +connection con2; +DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2; +disconnect con2; + + +connection default; +# +## trying with VALUES innodb_undo_tablespaces, innodb_undo_logs ,innodb_log_files_in_group +## +let $restart_parameters = restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=20 --innodb_undo_logs=20 --innodb_log_files_in_group=4; +--source include/restart_mysqld.inc + +# Create two client for concurrent execution +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); +# +# +connection con1; +--send call populate_tables('_1'); +connection con2; +--send call populate_tables('_2'); +--echo "#connection 1 - verify tables" +connection con1; +--reap +SELECT count(*) FROM t1_1; +SELECT count(*) FROM t2_1; +SELECT count(*) FROM t3_1; +SELECT count(*) FROM t4_1; +SELECT c1 FROM t1_1; +SELECT c1 FROM t2_1; +SELECT c1 FROM t3_1; +SELECT c1 FROM t4_1; +SELECT count(*) FROM t1_temp; +SELECT count(*) FROM t2_temp; +SELECT count(*) FROM t3_temp; +SELECT count(*) FROM t4_temp; +SELECT c1 FROM t1_temp; +SELECT c1 FROM t2_temp; +SELECT c1 FROM t3_temp; +SELECT c1 FROM t4_temp; +DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1; +disconnect con1; +--echo "#connection 2 - verify tables" +connection con2; +--reap +SELECT count(*) FROM t1_2; +SELECT count(*) FROM t2_2; +SELECT count(*) FROM t3_2; +SELECT count(*) FROM t4_2; +SELECT c1 FROM t1_2; +SELECT c1 FROM t2_2; +SELECT c1 FROM t3_2; +SELECT c1 FROM t4_2; +SELECT count(*) FROM t1_temp; +SELECT count(*) FROM t2_temp; +SELECT count(*) FROM t3_temp; +SELECT count(*) FROM t4_temp; +SELECT c1 FROM t1_temp; +SELECT c1 FROM t2_temp; +SELECT c1 FROM t3_temp; +SELECT c1 FROM t4_temp; +DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2; +disconnect con2; + +connection default; +# innodb_undo_logs > non redo rsegment +let $restart_parameters = restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=30 --innodb_undo_logs=20 --innodb_log_files_in_group=4; +--source include/restart_mysqld.inc + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +--send call populate_tables('_1'); +connection con2; +--send call populate_tables('_2'); +--echo "#connection 1 - verify tables" +connection con1; +--reap +SELECT count(*) FROM t1_1; +SELECT count(*) FROM t2_1; +SELECT count(*) FROM t3_1; +SELECT count(*) FROM t4_1; +SELECT c1 FROM t1_1; +SELECT c1 FROM t2_1; +SELECT c1 FROM t3_1; +SELECT c1 FROM t4_1; +SELECT count(*) FROM t1_temp; +SELECT count(*) FROM t2_temp; +SELECT count(*) FROM t3_temp; +SELECT count(*) FROM t4_temp; +SELECT c1 FROM t1_temp; +SELECT c1 FROM t2_temp; +SELECT c1 FROM t3_temp; +SELECT c1 FROM t4_temp; +DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1; +disconnect con1; +--echo "#connection 2 - verify tables" +connection con2; +--reap +SELECT count(*) FROM t1_2; +SELECT count(*) FROM t2_2; +SELECT count(*) FROM t3_2; +SELECT count(*) FROM t4_2; +SELECT c1 FROM t1_2; +SELECT c1 FROM t2_2; +SELECT c1 FROM t3_2; +SELECT c1 FROM t4_2; +SELECT count(*) FROM t1_temp; +SELECT count(*) FROM t2_temp; +SELECT count(*) FROM t3_temp; +SELECT count(*) FROM t4_temp; +SELECT c1 FROM t1_temp; +SELECT c1 FROM t2_temp; +SELECT c1 FROM t3_temp; +SELECT c1 FROM t4_temp; +DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2; +disconnect con2; + +# + +connection default; +DROP PROCEDURE populate_tables; + +# check message in log +let $error_log= $MYSQLTEST_VARDIR/log/my_restart.err; +let SEARCH_FILE= $error_log; +# We get depending on the platform either "./ibdata1" or ".\ibdata1". +let SEARCH_PATTERN=redo rollback segment.*found.*redo rollback segment.*active +--source include/search_pattern_in_file.inc +let SEARCH_PATTERN=non-redo rollback.*active +--source include/search_pattern_in_file.inc + + +SHOW TABLES; + +-- disable_query_log +eval set global innodb_file_per_table=$innodb_file_per_table_orig; +-- enable_query_log + diff --git a/mysql-test/suite/rpl/disabled.def b/mysql-test/suite/rpl/disabled.def index de3091a56e5..c8799474e21 100644 --- a/mysql-test/suite/rpl/disabled.def +++ b/mysql-test/suite/rpl/disabled.def @@ -13,3 +13,6 @@ rpl_spec_variables : BUG#11755836 2009-10-27 jasonh rpl_spec_variables fails on PB2 hpux rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock rpl_partition_archive : MDEV-5077 2013-09-27 svoj Cannot exchange partition with archive table + + + diff --git a/mysql-test/suite/sys_vars/r/innodb_adaptive_hash_index_parts_basic.result b/mysql-test/suite/sys_vars/r/innodb_adaptive_hash_index_parts_basic.result new file mode 100644 index 00000000000..965e2efedf3 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_adaptive_hash_index_parts_basic.result @@ -0,0 +1,48 @@ +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts) +1 +1 Expected +SET @@GLOBAL.innodb_adaptive_hash_index_parts=1; +ERROR HY000: Variable 'innodb_adaptive_hash_index_parts' is a read only variable +Expected error 'Read only variable' +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts) +1 +1 Expected +SELECT @@GLOBAL.innodb_adaptive_hash_index_parts = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_adaptive_hash_index_parts'; +@@GLOBAL.innodb_adaptive_hash_index_parts = VARIABLE_VALUE +1 +1 Expected +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts) +1 +1 Expected +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_adaptive_hash_index_parts'; +COUNT(VARIABLE_VALUE) +1 +1 Expected +SELECT @@innodb_adaptive_hash_index_parts = @@GLOBAL.innodb_adaptive_hash_index_parts; +@@innodb_adaptive_hash_index_parts = @@GLOBAL.innodb_adaptive_hash_index_parts +1 +1 Expected +SELECT COUNT(@@innodb_adaptive_hash_index_parts); +COUNT(@@innodb_adaptive_hash_index_parts) +1 +1 Expected +SELECT COUNT(@@local.innodb_adaptive_hash_index_parts); +ERROR HY000: Variable 'innodb_adaptive_hash_index_parts' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@SESSION.innodb_adaptive_hash_index_parts); +ERROR HY000: Variable 'innodb_adaptive_hash_index_parts' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts) +1 +1 Expected +SELECT innodb_adaptive_hash_index_parts = @@SESSION.innodb_adaptive_hash_index_parts; +ERROR 42S22: Unknown column 'innodb_adaptive_hash_index_parts' in 'field list' +Expected error 'Readonly variable' diff --git a/mysql-test/suite/sys_vars/r/innodb_adaptive_max_sleep_delay_basic.result b/mysql-test/suite/sys_vars/r/innodb_adaptive_max_sleep_delay_basic.result index 54b1c1e78b4..b3ba28a4411 100644 --- a/mysql-test/suite/sys_vars/r/innodb_adaptive_max_sleep_delay_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_adaptive_max_sleep_delay_basic.result @@ -3,6 +3,18 @@ SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; 150000 150000 Expected SET @@GLOBAL.innodb_adaptive_max_sleep_delay=100; +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=0; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +@@GLOBAL.innodb_adaptive_max_sleep_delay +0 +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=100000; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +@@GLOBAL.innodb_adaptive_max_sleep_delay +100000 +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=1000000; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +@@GLOBAL.innodb_adaptive_max_sleep_delay +1000000 SET @@GLOBAL.innodb_adaptive_max_sleep_delay=1000001; Warnings: Warning 1292 Truncated incorrect innodb_adaptive_max_sleep_delay value: '1000001' @@ -10,6 +22,13 @@ SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; @@GLOBAL.innodb_adaptive_max_sleep_delay 1000000 1000000 Expected +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=4294967295; +Warnings: +Warning 1292 Truncated incorrect innodb_adaptive_max_sleep_delay value: '4294967295' +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +@@GLOBAL.innodb_adaptive_max_sleep_delay +1000000 +1000000 Expected SET @@GLOBAL.innodb_adaptive_max_sleep_delay=-1; Warnings: Warning 1292 Truncated incorrect innodb_adaptive_max_sleep_delay value: '-1' @@ -17,6 +36,13 @@ SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; @@GLOBAL.innodb_adaptive_max_sleep_delay 0 0 Expected +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=-1024; +Warnings: +Warning 1292 Truncated incorrect innodb_adaptive_max_sleep_delay value: '-1024' +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +@@GLOBAL.innodb_adaptive_max_sleep_delay +0 +0 Expected SELECT COUNT(@@GLOBAL.innodb_adaptive_max_sleep_delay); COUNT(@@GLOBAL.innodb_adaptive_max_sleep_delay) 1 diff --git a/mysql-test/suite/sys_vars/r/innodb_additional_mem_pool_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_additional_mem_pool_size_basic.result deleted file mode 100644 index fb062d62bc6..00000000000 --- a/mysql-test/suite/sys_vars/r/innodb_additional_mem_pool_size_basic.result +++ /dev/null @@ -1,53 +0,0 @@ -'#---------------------BS_STVARS_020_01----------------------#' -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); -COUNT(@@GLOBAL.innodb_additional_mem_pool_size) -1 -1 Expected -'#---------------------BS_STVARS_020_02----------------------#' -SET @@GLOBAL.innodb_additional_mem_pool_size=1; -ERROR HY000: Variable 'innodb_additional_mem_pool_size' is a read only variable -Expected error 'Read only variable' -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); -COUNT(@@GLOBAL.innodb_additional_mem_pool_size) -1 -1 Expected -'#---------------------BS_STVARS_020_03----------------------#' -SELECT @@GLOBAL.innodb_additional_mem_pool_size = VARIABLE_VALUE -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_additional_mem_pool_size'; -@@GLOBAL.innodb_additional_mem_pool_size = VARIABLE_VALUE -1 -1 Expected -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); -COUNT(@@GLOBAL.innodb_additional_mem_pool_size) -1 -1 Expected -SELECT COUNT(VARIABLE_VALUE) -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_additional_mem_pool_size'; -COUNT(VARIABLE_VALUE) -1 -1 Expected -'#---------------------BS_STVARS_020_04----------------------#' -SELECT @@innodb_additional_mem_pool_size = @@GLOBAL.innodb_additional_mem_pool_size; -@@innodb_additional_mem_pool_size = @@GLOBAL.innodb_additional_mem_pool_size -1 -1 Expected -'#---------------------BS_STVARS_020_05----------------------#' -SELECT COUNT(@@innodb_additional_mem_pool_size); -COUNT(@@innodb_additional_mem_pool_size) -1 -1 Expected -SELECT COUNT(@@local.innodb_additional_mem_pool_size); -ERROR HY000: Variable 'innodb_additional_mem_pool_size' is a GLOBAL variable -Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@SESSION.innodb_additional_mem_pool_size); -ERROR HY000: Variable 'innodb_additional_mem_pool_size' is a GLOBAL variable -Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); -COUNT(@@GLOBAL.innodb_additional_mem_pool_size) -1 -1 Expected -SELECT innodb_additional_mem_pool_size = @@SESSION.innodb_additional_mem_pool_size; -ERROR 42S22: Unknown column 'innodb_additional_mem_pool_size' in 'field list' -Expected error 'Readonly variable' diff --git a/mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result b/mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result index d2773b7da69..0bc17e10b06 100644 --- a/mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result @@ -42,6 +42,42 @@ VARIABLE_NAME VARIABLE_VALUE INNODB_API_BK_COMMIT_INTERVAL 100 SET session innodb_api_bk_commit_interval=1; ERROR HY000: Variable 'innodb_api_bk_commit_interval' is a GLOBAL variable and should be set with SET GLOBAL +SET global innodb_api_bk_commit_interval=1; +SELECT @@global.innodb_api_bk_commit_interval; +@@global.innodb_api_bk_commit_interval +1 +SET global innodb_api_bk_commit_interval=100000; +SELECT @@global.innodb_api_bk_commit_interval; +@@global.innodb_api_bk_commit_interval +100000 +SET global innodb_api_bk_commit_interval=1073741824; +SELECT @@global.innodb_api_bk_commit_interval; +@@global.innodb_api_bk_commit_interval +1073741824 +SET global innodb_api_bk_commit_interval=0; +Warnings: +Warning 1292 Truncated incorrect innodb_api_bk_commit_interval value: '0' +SELECT @@global.innodb_api_bk_commit_interval; +@@global.innodb_api_bk_commit_interval +1 +SET global innodb_api_bk_commit_interval=-1024; +Warnings: +Warning 1292 Truncated incorrect innodb_api_bk_commit_interval value: '-1024' +SELECT @@global.innodb_api_bk_commit_interval; +@@global.innodb_api_bk_commit_interval +1 +SET global innodb_api_bk_commit_interval=1073741825; +Warnings: +Warning 1292 Truncated incorrect innodb_api_bk_commit_interval value: '1073741825' +SELECT @@global.innodb_api_bk_commit_interval; +@@global.innodb_api_bk_commit_interval +1073741824 +SET global innodb_api_bk_commit_interval=4294967295; +Warnings: +Warning 1292 Truncated incorrect innodb_api_bk_commit_interval value: '4294967295' +SELECT @@global.innodb_api_bk_commit_interval; +@@global.innodb_api_bk_commit_interval +1073741824 SET global innodb_api_bk_commit_interval=1.1; ERROR 42000: Incorrect argument type to variable 'innodb_api_bk_commit_interval' SET global innodb_api_bk_commit_interval=1e1; diff --git a/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result b/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result index 900f0167261..6bcca056278 100644 --- a/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result @@ -53,6 +53,12 @@ Warning 1292 Truncated incorrect innodb_autoextend_increment value: '1001' SELECT @@global.innodb_autoextend_increment; @@global.innodb_autoextend_increment 1000 +SET @@global.innodb_autoextend_increment = 2000 ; +Warnings: +Warning 1292 Truncated incorrect innodb_autoextend_increment value: '2000' +SELECT @@global.innodb_autoextend_increment; +@@global.innodb_autoextend_increment +1000 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_autoextend_increment = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_chunk_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_chunk_size_basic.result new file mode 100644 index 00000000000..e9fd5519066 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_chunk_size_basic.result @@ -0,0 +1,48 @@ +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size) +1 +1 Expected +SET @@GLOBAL.innodb_buffer_pool_chunk_size=1; +ERROR HY000: Variable 'innodb_buffer_pool_chunk_size' is a read only variable +Expected error 'Read only variable' +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size) +1 +1 Expected +SELECT @@GLOBAL.innodb_buffer_pool_chunk_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_buffer_pool_chunk_size'; +@@GLOBAL.innodb_buffer_pool_chunk_size = VARIABLE_VALUE +1 +1 Expected +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size) +1 +1 Expected +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_buffer_pool_chunk_size'; +COUNT(VARIABLE_VALUE) +1 +1 Expected +SELECT @@innodb_buffer_pool_chunk_size = @@GLOBAL.innodb_buffer_pool_chunk_size; +@@innodb_buffer_pool_chunk_size = @@GLOBAL.innodb_buffer_pool_chunk_size +1 +1 Expected +SELECT COUNT(@@innodb_buffer_pool_chunk_size); +COUNT(@@innodb_buffer_pool_chunk_size) +1 +1 Expected +SELECT COUNT(@@local.innodb_buffer_pool_chunk_size); +ERROR HY000: Variable 'innodb_buffer_pool_chunk_size' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@SESSION.innodb_buffer_pool_chunk_size); +ERROR HY000: Variable 'innodb_buffer_pool_chunk_size' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size) +1 +1 Expected +SELECT innodb_buffer_pool_chunk_size = @@SESSION.innodb_buffer_pool_chunk_size; +ERROR 42S22: Unknown column 'innodb_buffer_pool_chunk_size' in 'field list' +Expected error 'Readonly variable' diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result index 26bb44b4587..ad329cd336f 100644 --- a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result @@ -1,7 +1,7 @@ SET @orig = @@global.innodb_buffer_pool_dump_at_shutdown; SELECT @orig; @orig -0 +1 SET GLOBAL innodb_buffer_pool_dump_at_shutdown = OFF; SELECT @@global.innodb_buffer_pool_dump_at_shutdown; @@global.innodb_buffer_pool_dump_at_shutdown @@ -16,3 +16,4 @@ SET GLOBAL innodb_buffer_pool_dump_at_shutdown = "string"; Got one of the listed errors SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 5; Got one of the listed errors +SET GLOBAL innodb_buffer_pool_dump_at_shutdown = default; diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_pct_basic.result index 51c72cfe791..b2cc55ce71e 100644 --- a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_pct_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_pct_basic.result @@ -1,20 +1,35 @@ -SET @orig = @@global.innodb_buffer_pool_dump_pct; -SELECT @orig; -@orig -100 -SET GLOBAL innodb_buffer_pool_dump_pct=3, GLOBAL innodb_buffer_pool_dump_now = ON; -SET GLOBAL innodb_buffer_pool_dump_pct=0; +SELECT @@global.innodb_buffer_pool_dump_pct; +@@global.innodb_buffer_pool_dump_pct +25 +SET GLOBAL innodb_buffer_pool_dump_pct=20; +SELECT @@global.innodb_buffer_pool_dump_pct; +@@global.innodb_buffer_pool_dump_pct +20 +SET GLOBAL innodb_buffer_pool_dump_pct=1; SELECT @@global.innodb_buffer_pool_dump_pct; @@global.innodb_buffer_pool_dump_pct 1 -SHOW WARNINGS; -Level Code Message -Warning 1292 Truncated incorrect innodb_buffer_pool_dump_pct value: '0' -SET GLOBAL innodb_buffer_pool_dump_pct=101; +SET GLOBAL innodb_buffer_pool_dump_pct=100; SELECT @@global.innodb_buffer_pool_dump_pct; @@global.innodb_buffer_pool_dump_pct 100 -SHOW WARNINGS; -Level Code Message +SET GLOBAL innodb_buffer_pool_dump_pct=101; +Warnings: Warning 1292 Truncated incorrect innodb_buffer_pool_dump_pct value: '101' -SET GLOBAL innodb_buffer_pool_dump_pct=@orig; +SELECT @@global.innodb_buffer_pool_dump_pct; +@@global.innodb_buffer_pool_dump_pct +100 +SET GLOBAL innodb_buffer_pool_dump_pct=-1; +Warnings: +Warning 1292 Truncated incorrect innodb_buffer_pool_dump_pct value: '-1' +SELECT @@global.innodb_buffer_pool_dump_pct; +@@global.innodb_buffer_pool_dump_pct +1 +SET GLOBAL innodb_buffer_pool_dump_pct=Default; +SELECT @@global.innodb_buffer_pool_dump_pct; +@@global.innodb_buffer_pool_dump_pct +25 +SET GLOBAL innodb_buffer_pool_dump_pct='foo'; +ERROR 42000: Incorrect argument type to variable 'innodb_buffer_pool_dump_pct' +SET innodb_buffer_pool_dump_pct=50; +ERROR HY000: Variable 'innodb_buffer_pool_dump_pct' is a GLOBAL variable and should be set with SET GLOBAL diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_filename_basic.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_filename_basic.result deleted file mode 100644 index 5e50a715307..00000000000 --- a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_filename_basic.result +++ /dev/null @@ -1,7 +0,0 @@ -SET @orig = @@global.innodb_buffer_pool_filename; -SELECT @orig; -@orig -ib_buffer_pool -SET GLOBAL innodb_buffer_pool_filename = 'innodb_foobar_dump'; -SET GLOBAL innodb_buffer_pool_dump_now = ON; -SET GLOBAL innodb_buffer_pool_filename = @orig; diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_load_at_startup_basic.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_load_at_startup_basic.result index 1dad72baefd..cd4c924b425 100644 --- a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_load_at_startup_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_load_at_startup_basic.result @@ -1,7 +1,7 @@ SET @orig = @@global.innodb_buffer_pool_load_at_startup; SELECT @orig; @orig -0 +1 SET GLOBAL innodb_buffer_pool_load_at_startup = OFF; ERROR HY000: Variable 'innodb_buffer_pool_load_at_startup' is a read only variable SET GLOBAL innodb_buffer_pool_load_at_startup = ON; diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_size_basic.result index 27e6cae41ef..e6210165436 100644 --- a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_size_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_size_basic.result @@ -1,12 +1,12 @@ +SET @start_buffer_pool_size = @@GLOBAL.innodb_buffer_pool_size; '#---------------------BS_STVARS_022_01----------------------#' SELECT COUNT(@@GLOBAL.innodb_buffer_pool_size); COUNT(@@GLOBAL.innodb_buffer_pool_size) 1 1 Expected '#---------------------BS_STVARS_022_02----------------------#' -SET @@GLOBAL.innodb_buffer_pool_size=1; -ERROR HY000: Variable 'innodb_buffer_pool_size' is a read only variable -Expected error 'Read only variable' +SET @@GLOBAL.innodb_buffer_pool_size=10485760; +Expected succeeded SELECT COUNT(@@GLOBAL.innodb_buffer_pool_size); COUNT(@@GLOBAL.innodb_buffer_pool_size) 1 @@ -51,3 +51,4 @@ COUNT(@@GLOBAL.innodb_buffer_pool_size) SELECT innodb_buffer_pool_size = @@SESSION.innodb_buffer_pool_size; ERROR 42S22: Unknown column 'innodb_buffer_pool_size' in 'field list' Expected error 'Readonly variable' +SET @@GLOBAL.innodb_buffer_pool_size = @start_buffer_pool_size; diff --git a/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result b/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result index 8e05db129dc..9c2e95b3c7c 100644 --- a/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result @@ -1,47 +1,47 @@ SET @orig = @@global.innodb_checksum_algorithm; SELECT @orig; @orig -INNODB +crc32 SET GLOBAL innodb_checksum_algorithm = 'crc32'; SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -CRC32 +crc32 SET GLOBAL innodb_checksum_algorithm = 'strict_crc32'; SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -STRICT_CRC32 +strict_crc32 SET GLOBAL innodb_checksum_algorithm = 'innodb'; SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -INNODB +innodb SET GLOBAL innodb_checksum_algorithm = 'strict_innodb'; SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -STRICT_INNODB +strict_innodb SET GLOBAL innodb_checksum_algorithm = 'none'; SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -NONE +none SET GLOBAL innodb_checksum_algorithm = 'strict_none'; SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -STRICT_NONE +strict_none SET GLOBAL innodb_checksum_algorithm = ''; ERROR 42000: Variable 'innodb_checksum_algorithm' can't be set to the value of '' SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -STRICT_NONE +strict_none SET GLOBAL innodb_checksum_algorithm = 'foobar'; ERROR 42000: Variable 'innodb_checksum_algorithm' can't be set to the value of 'foobar' SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -STRICT_NONE +strict_none SET GLOBAL innodb_checksum_algorithm = 123; ERROR 42000: Variable 'innodb_checksum_algorithm' can't be set to the value of '123' SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -STRICT_NONE +strict_none SET GLOBAL innodb_checksum_algorithm = @orig; SELECT @@global.innodb_checksum_algorithm; @@global.innodb_checksum_algorithm -INNODB +crc32 diff --git a/mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result b/mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result index 3ee9448bdab..31bc11de717 100644 --- a/mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result @@ -21,7 +21,6 @@ SET GLOBAL innodb_cmp_per_index_enabled=OFF; SELECT @@global.innodb_cmp_per_index_enabled; @@global.innodb_cmp_per_index_enabled 0 -SET GLOBAL innodb_file_format=Barracuda; SET GLOBAL innodb_cmp_per_index_enabled=ON; CREATE TABLE t (a INT) ENGINE=INNODB KEY_BLOCK_SIZE=8; INSERT INTO t VALUES (1); @@ -61,5 +60,4 @@ compress_time 0 uncompress_ops 0 uncompress_time 0 DROP TABLE t; -SET GLOBAL innodb_file_format=default; SET GLOBAL innodb_cmp_per_index_enabled=default; diff --git a/mysql-test/suite/sys_vars/r/innodb_commit_concurrency_basic.result b/mysql-test/suite/sys_vars/r/innodb_commit_concurrency_basic.result index 85a4b008ff0..474818829c5 100644 --- a/mysql-test/suite/sys_vars/r/innodb_commit_concurrency_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_commit_concurrency_basic.result @@ -28,14 +28,34 @@ SELECT @@global.innodb_commit_concurrency; '#--------------------FN_DYNVARS_046_04-------------------------#' SET @@global.innodb_commit_concurrency = 1; ERROR 42000: Variable 'innodb_commit_concurrency' can't be set to the value of '1' +SELECT @@global.innodb_commit_concurrency; +@@global.innodb_commit_concurrency +0 SET @@global.innodb_commit_concurrency = -1; ERROR 42000: Variable 'innodb_commit_concurrency' can't be set to the value of '-1' +SELECT @@global.innodb_commit_concurrency; +@@global.innodb_commit_concurrency +0 SET @@global.innodb_commit_concurrency = "T"; ERROR 42000: Incorrect argument type to variable 'innodb_commit_concurrency' +SELECT @@global.innodb_commit_concurrency; +@@global.innodb_commit_concurrency +0 SET @@global.innodb_commit_concurrency = "Y"; ERROR 42000: Incorrect argument type to variable 'innodb_commit_concurrency' +SELECT @@global.innodb_commit_concurrency; +@@global.innodb_commit_concurrency +0 +SET @@global.innodb_commit_concurrency = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_commit_concurrency' +SELECT @@global.innodb_commit_concurrency; +@@global.innodb_commit_concurrency +0 SET @@global.innodb_commit_concurrency = 1001; ERROR 42000: Variable 'innodb_commit_concurrency' can't be set to the value of '1001' +SELECT @@global.innodb_commit_concurrency; +@@global.innodb_commit_concurrency +0 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_commit_concurrency = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result index 9f85eccdb7a..ae556ceedb2 100644 --- a/mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result @@ -45,6 +45,11 @@ ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_thr SELECT @@global.innodb_compression_failure_threshold_pct; @@global.innodb_compression_failure_threshold_pct 0 +SET @@global.innodb_compression_failure_threshold_pct = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct' +SELECT @@global.innodb_compression_failure_threshold_pct; +@@global.innodb_compression_failure_threshold_pct +0 SET @@global.innodb_compression_failure_threshold_pct = "Y"; ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct' SELECT @@global.innodb_compression_failure_threshold_pct; @@ -56,6 +61,16 @@ Warning 1292 Truncated incorrect innodb_compression_failure_thres value: '101' SELECT @@global.innodb_compression_failure_threshold_pct; @@global.innodb_compression_failure_threshold_pct 100 +SET @@global.innodb_compression_failure_threshold_pct = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct' +SELECT @@global.innodb_compression_failure_threshold_pct; +@@global.innodb_compression_failure_threshold_pct +100 +SET @@global.innodb_compression_failure_threshold_pct = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct' +SELECT @@global.innodb_compression_failure_threshold_pct; +@@global.innodb_compression_failure_threshold_pct +100 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_compression_failure_threshold_pct = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result b/mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result index 628993ef873..6735d877c6b 100644 --- a/mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result @@ -40,12 +40,27 @@ ERROR 42000: Incorrect argument type to variable 'innodb_compression_pad_pct_max SELECT @@global.innodb_compression_pad_pct_max; @@global.innodb_compression_pad_pct_max 0 +SET @@global.innodb_compression_pad_pct_max = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_compression_pad_pct_max' +SELECT @@global.innodb_compression_pad_pct_max; +@@global.innodb_compression_pad_pct_max +0 SET @@global.innodb_compression_pad_pct_max = 76; Warnings: Warning 1292 Truncated incorrect innodb_compression_pad_pct_max value: '76' SELECT @@global.innodb_compression_pad_pct_max; @@global.innodb_compression_pad_pct_max 75 +SET @@global.innodb_compression_pad_pct_max = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_compression_pad_pct_max' +SELECT @@global.innodb_compression_pad_pct_max; +@@global.innodb_compression_pad_pct_max +75 +SET @@global.innodb_compression_pad_pct_max = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_compression_pad_pct_max' +SELECT @@global.innodb_compression_pad_pct_max; +@@global.innodb_compression_pad_pct_max +75 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_compression_pad_pct_max = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result b/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result index 0b790fb3557..dd4488a97b3 100644 --- a/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result @@ -38,12 +38,31 @@ SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets 4294967295 '#--------------------FN_DYNVARS_046_04-------------------------#' +SET @@global.innodb_concurrency_tickets = 4294967296; +SELECT @@global.innodb_concurrency_tickets IN (4294967296,4294967295); +@@global.innodb_concurrency_tickets IN (4294967296,4294967295) +1 +SET @@global.innodb_concurrency_tickets = 12345678901; +SELECT @@global.innodb_concurrency_tickets IN (12345678901,4294967295); +@@global.innodb_concurrency_tickets IN (12345678901,4294967295) +1 +SET @@global.innodb_concurrency_tickets = 18446744073709551615; +SELECT @@global.innodb_concurrency_tickets IN (18446744073709551615,4294967295); +@@global.innodb_concurrency_tickets IN (18446744073709551615,4294967295) +1 +'#--------------------FN_DYNVARS_046_05-------------------------#' SET @@global.innodb_concurrency_tickets = -1; Warnings: Warning 1292 Truncated incorrect innodb_concurrency_tickets value: '-1' SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets 1 +SET @@global.innodb_concurrency_tickets = -1024; +Warnings: +Warning 1292 Truncated incorrect innodb_concurrency_tickets value: '-1024' +SELECT @@global.innodb_concurrency_tickets; +@@global.innodb_concurrency_tickets +1 SET @@global.innodb_concurrency_tickets = "T"; ERROR 42000: Incorrect argument type to variable 'innodb_concurrency_tickets' SELECT @@global.innodb_concurrency_tickets; @@ -54,11 +73,22 @@ ERROR 42000: Incorrect argument type to variable 'innodb_concurrency_tickets' SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets 1 -SET @@global.innodb_concurrency_tickets = 1001; +SET @@global.innodb_concurrency_tickets = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_concurrency_tickets' SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets -1001 -'#----------------------FN_DYNVARS_046_05------------------------#' +1 +SET @@global.innodb_concurrency_tickets = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_concurrency_tickets' +SELECT @@global.innodb_concurrency_tickets; +@@global.innodb_concurrency_tickets +1 +SET @@global.innodb_concurrency_tickets = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_concurrency_tickets' +SELECT @@global.innodb_concurrency_tickets; +@@global.innodb_concurrency_tickets +1 +'#----------------------FN_DYNVARS_046_06------------------------#' SELECT @@global.innodb_concurrency_tickets = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_concurrency_tickets'; @@ -67,23 +97,23 @@ VARIABLE_VALUE 1 SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets -1001 +1 SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_concurrency_tickets'; VARIABLE_VALUE -1001 -'#---------------------FN_DYNVARS_046_06-------------------------#' +1 +'#---------------------FN_DYNVARS_046_07-------------------------#' SET @@global.innodb_concurrency_tickets = OFF; ERROR 42000: Incorrect argument type to variable 'innodb_concurrency_tickets' SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets -1001 +1 SET @@global.innodb_concurrency_tickets = ON; ERROR 42000: Incorrect argument type to variable 'innodb_concurrency_tickets' SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets -1001 -'#---------------------FN_DYNVARS_046_07----------------------#' +1 +'#---------------------FN_DYNVARS_046_08----------------------#' SET @@global.innodb_concurrency_tickets = TRUE; SELECT @@global.innodb_concurrency_tickets; @@global.innodb_concurrency_tickets diff --git a/mysql-test/suite/sys_vars/r/innodb_default_row_format_basic.result b/mysql-test/suite/sys_vars/r/innodb_default_row_format_basic.result new file mode 100644 index 00000000000..9710c3ef364 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_default_row_format_basic.result @@ -0,0 +1,48 @@ +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +dynamic +SET GLOBAL innodb_default_row_format = 'redundant'; +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +redundant +SET GLOBAL innodb_default_row_format = 'dynamic'; +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +dynamic +SET GLOBAL innodb_default_row_format = 'compact'; +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +compact +SET GLOBAL innodb_default_row_format = 'compressed'; +ERROR 42000: Variable 'innodb_default_row_format' can't be set to the value of 'compressed' +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +compact +SET GLOBAL innodb_default_row_format = 'foobar'; +ERROR 42000: Variable 'innodb_default_row_format' can't be set to the value of 'foobar' +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +compact +SET GLOBAL innodb_default_row_format = 0; +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +redundant +SET GLOBAL innodb_default_row_format = 1; +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +compact +SET GLOBAL innodb_default_row_format = 2; +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +dynamic +SET GLOBAL innodb_default_row_format = 3; +ERROR 42000: Variable 'innodb_default_row_format' can't be set to the value of '3' +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +dynamic +SET GLOBAL innodb_default_row_format = 123; +ERROR 42000: Variable 'innodb_default_row_format' can't be set to the value of '123' +SELECT @@global.innodb_default_row_format; +@@global.innodb_default_row_format +dynamic +SET GLOBAL innodb_default_row_format = default; diff --git a/mysql-test/suite/sys_vars/r/innodb_disable_resize_buffer_pool_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_disable_resize_buffer_pool_debug_basic.result new file mode 100644 index 00000000000..bbac9a4cde6 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_disable_resize_buffer_pool_debug_basic.result @@ -0,0 +1,60 @@ +# +# Basic test for innodb_disable_resize_buffer_pool_debug +# +SET @start_global_value = @@global.innodb_disable_resize_buffer_pool_debug; +SET @@global.innodb_disable_resize_buffer_pool_debug = 0; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; +@@global.innodb_disable_resize_buffer_pool_debug +0 +SET @@global.innodb_disable_resize_buffer_pool_debug ='On' ; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; +@@global.innodb_disable_resize_buffer_pool_debug +1 +SET @@global.innodb_disable_resize_buffer_pool_debug ='Off' ; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; +@@global.innodb_disable_resize_buffer_pool_debug +0 +SET @@global.innodb_disable_resize_buffer_pool_debug = 1; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; +@@global.innodb_disable_resize_buffer_pool_debug +1 +SELECT IF(@@GLOBAL.innodb_disable_resize_buffer_pool_debug,'ON','OFF') = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_disable_resize_buffer_pool_debug'; +IF(@@GLOBAL.innodb_disable_resize_buffer_pool_debug,'ON','OFF') = VARIABLE_VALUE +1 +1 Expected +SELECT COUNT(@@GLOBAL.innodb_disable_resize_buffer_pool_debug); +COUNT(@@GLOBAL.innodb_disable_resize_buffer_pool_debug) +1 +1 Expected +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_disable_resize_buffer_pool_debug'; +COUNT(VARIABLE_VALUE) +1 +1 Expected +SELECT @@innodb_disable_resize_buffer_pool_debug = @@GLOBAL.innodb_disable_resize_buffer_pool_debug; +@@innodb_disable_resize_buffer_pool_debug = @@GLOBAL.innodb_disable_resize_buffer_pool_debug +1 +1 Expected +SELECT COUNT(@@innodb_disable_resize_buffer_pool_debug); +COUNT(@@innodb_disable_resize_buffer_pool_debug) +1 +1 Expected +SELECT COUNT(@@local.innodb_disable_resize_buffer_pool_debug); +ERROR HY000: Variable 'innodb_disable_resize_buffer_pool_debug' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@SESSION.innodb_disable_resize_buffer_pool_debug); +ERROR HY000: Variable 'innodb_disable_resize_buffer_pool_debug' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@GLOBAL.innodb_disable_resize_buffer_pool_debug); +COUNT(@@GLOBAL.innodb_disable_resize_buffer_pool_debug) +1 +1 Expected +SELECT innodb_disable_resize_buffer_pool_debug = @@SESSION.innodb_disable_resize_buffer_pool_debug; +ERROR 42S22: Unknown column 'innodb_disable_resize_buffer_pool_debug' in 'field list' +SET @@global.innodb_disable_resize_buffer_pool_debug = @start_global_value; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; +@@global.innodb_disable_resize_buffer_pool_debug +1 diff --git a/mysql-test/suite/sys_vars/r/innodb_fast_shutdown_basic.result b/mysql-test/suite/sys_vars/r/innodb_fast_shutdown_basic.result index fe09a652700..38d5365b3f3 100644 --- a/mysql-test/suite/sys_vars/r/innodb_fast_shutdown_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_fast_shutdown_basic.result @@ -85,6 +85,21 @@ ERROR 42000: Incorrect argument type to variable 'innodb_fast_shutdown' SELECT @@global.innodb_fast_shutdown; @@global.innodb_fast_shutdown 0 +SET @@global.innodb_fast_shutdown = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_fast_shutdown' +SELECT @@global.innodb_fast_shutdown; +@@global.innodb_fast_shutdown +0 +SET @@global.innodb_fast_shutdown = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_fast_shutdown' +SELECT @@global.innodb_fast_shutdown; +@@global.innodb_fast_shutdown +0 +SET @@global.innodb_fast_shutdown = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_fast_shutdown' +SELECT @@global.innodb_fast_shutdown; +@@global.innodb_fast_shutdown +0 '#-------------------FN_DYNVARS_042_05----------------------------#' SET @@session.innodb_fast_shutdown = 0; ERROR HY000: Variable 'innodb_fast_shutdown' is a GLOBAL variable and should be set with SET GLOBAL diff --git a/mysql-test/suite/sys_vars/r/innodb_file_format_basic.result b/mysql-test/suite/sys_vars/r/innodb_file_format_basic.result index 58e009ea705..c330bbf5c16 100644 --- a/mysql-test/suite/sys_vars/r/innodb_file_format_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_file_format_basic.result @@ -1,29 +1,31 @@ SET @start_global_value = @@global.innodb_file_format; SELECT @start_global_value; @start_global_value -Antelope +Barracuda Valid values are 'Antelope' and 'Barracuda' select @@global.innodb_file_format in ('Antelope', 'Barracuda'); @@global.innodb_file_format in ('Antelope', 'Barracuda') 1 select @@global.innodb_file_format; @@global.innodb_file_format -Antelope +Barracuda select @@session.innodb_file_format; ERROR HY000: Variable 'innodb_file_format' is a GLOBAL variable show global variables like 'innodb_file_format'; Variable_name Value -innodb_file_format Antelope +innodb_file_format Barracuda show session variables like 'innodb_file_format'; Variable_name Value -innodb_file_format Antelope +innodb_file_format Barracuda select * from information_schema.global_variables where variable_name='innodb_file_format'; VARIABLE_NAME VARIABLE_VALUE -INNODB_FILE_FORMAT Antelope +INNODB_FILE_FORMAT Barracuda select * from information_schema.session_variables where variable_name='innodb_file_format'; VARIABLE_NAME VARIABLE_VALUE -INNODB_FILE_FORMAT Antelope +INNODB_FILE_FORMAT Barracuda set global innodb_file_format='Antelope'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@global.innodb_file_format; @@global.innodb_file_format Antelope @@ -34,6 +36,8 @@ select * from information_schema.session_variables where variable_name='innodb_f VARIABLE_NAME VARIABLE_VALUE INNODB_FILE_FORMAT Antelope set @@global.innodb_file_format='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@global.innodb_file_format; @@global.innodb_file_format Barracuda @@ -54,6 +58,8 @@ ERROR 42000: Incorrect argument type to variable 'innodb_file_format' set global innodb_file_format='Salmon'; ERROR 42000: Variable 'innodb_file_format' can't be set to the value of 'Salmon' SET @@global.innodb_file_format = @start_global_value; +Warnings: +Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@global.innodb_file_format; @@global.innodb_file_format -Antelope +Barracuda diff --git a/mysql-test/suite/sys_vars/r/innodb_file_format_max_basic.result b/mysql-test/suite/sys_vars/r/innodb_file_format_max_basic.result index 32b2262c091..5402e16a424 100644 --- a/mysql-test/suite/sys_vars/r/innodb_file_format_max_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_file_format_max_basic.result @@ -1,31 +1,33 @@ SET @start_global_value = @@global.innodb_file_format_max; SELECT @start_global_value; @start_global_value -Antelope +Barracuda Valid values are 'Antelope' and 'Barracuda' SELECT @@global.innodb_file_format_max in ('Antelope', 'Barracuda'); @@global.innodb_file_format_max in ('Antelope', 'Barracuda') 1 SELECT @@global.innodb_file_format_max; @@global.innodb_file_format_max -Antelope +Barracuda SELECT @@session.innodb_file_format_max; ERROR HY000: Variable 'innodb_file_format_max' is a GLOBAL variable SHOW global variables LIKE 'innodb_file_format_max'; Variable_name Value -innodb_file_format_max Antelope +innodb_file_format_max Barracuda SHOW session variables LIKE 'innodb_file_format_max'; Variable_name Value -innodb_file_format_max Antelope +innodb_file_format_max Barracuda SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_file_format_max'; VARIABLE_NAME VARIABLE_VALUE -INNODB_FILE_FORMAT_MAX Antelope +INNODB_FILE_FORMAT_MAX Barracuda SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_file_format_max'; VARIABLE_NAME VARIABLE_VALUE -INNODB_FILE_FORMAT_MAX Antelope +INNODB_FILE_FORMAT_MAX Barracuda SET global innodb_file_format_max='Antelope'; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@global.innodb_file_format_max; @@global.innodb_file_format_max Antelope @@ -38,6 +40,8 @@ WHERE variable_name='innodb_file_format_max'; VARIABLE_NAME VARIABLE_VALUE INNODB_FILE_FORMAT_MAX Antelope SET @@global.innodb_file_format_max='Barracuda'; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@global.innodb_file_format_max; @@global.innodb_file_format_max Barracuda @@ -60,6 +64,8 @@ ERROR 42000: Incorrect argument type to variable 'innodb_file_format_max' SET global innodb_file_format_max='Salmon'; ERROR 42000: Variable 'innodb_file_format_max' can't be set to the value of 'Salmon' SET @@global.innodb_file_format_max = @start_global_value; +Warnings: +Warning 131 Using innodb_file_format_max is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@global.innodb_file_format_max; @@global.innodb_file_format_max -Antelope +Barracuda diff --git a/mysql-test/suite/sys_vars/r/innodb_fill_factor_basic.result b/mysql-test/suite/sys_vars/r/innodb_fill_factor_basic.result new file mode 100644 index 00000000000..7a4cef2906f --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_fill_factor_basic.result @@ -0,0 +1,42 @@ +select @@global.innodb_fill_factor; +@@global.innodb_fill_factor +100 +select @@session.innodb_fill_factor; +ERROR HY000: Variable 'innodb_fill_factor' is a GLOBAL variable +show global variables like 'innodb_fill_factor'; +Variable_name Value +innodb_fill_factor 100 +show session variables like 'innodb_fill_factor'; +Variable_name Value +innodb_fill_factor 100 +select * from information_schema.global_variables where variable_name='innodb_fill_factor'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FILL_FACTOR 100 +select * from information_schema.session_variables where variable_name='innodb_fill_factor'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FILL_FACTOR 100 +set global innodb_fill_factor=9; +Warnings: +Warning 1292 Truncated incorrect innodb_fill_factor value: '9' +select @@innodb_fill_factor; +@@innodb_fill_factor +10 +set global innodb_fill_factor=10; +select @@innodb_fill_factor; +@@innodb_fill_factor +10 +set global innodb_fill_factor=75; +select @@innodb_fill_factor; +@@innodb_fill_factor +75 +set global innodb_fill_factor=100; +select @@innodb_fill_factor; +@@innodb_fill_factor +100 +set global innodb_fill_factor=101; +Warnings: +Warning 1292 Truncated incorrect innodb_fill_factor value: '101' +select @@innodb_fill_factor; +@@innodb_fill_factor +100 +set global innodb_fill_factor=100; diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result index 60a4081849f..79455e22b53 100644 --- a/mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result @@ -56,6 +56,21 @@ Warning 1292 Truncated incorrect innodb_flush_log_at_timeout value: '2701' SELECT @@global.innodb_flush_log_at_timeout; @@global.innodb_flush_log_at_timeout 2700 +SET @@global.innodb_flush_log_at_timeout = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_timeout' +SELECT @@global.innodb_flush_log_at_timeout; +@@global.innodb_flush_log_at_timeout +2700 +SET @@global.innodb_flush_log_at_timeout = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_timeout' +SELECT @@global.innodb_flush_log_at_timeout; +@@global.innodb_flush_log_at_timeout +2700 +SET @@global.innodb_flush_log_at_timeout = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_timeout' +SELECT @@global.innodb_flush_log_at_timeout; +@@global.innodb_flush_log_at_timeout +2700 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_flush_log_at_timeout = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result index 268d40c1be3..0d990d746a9 100644 --- a/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result @@ -9,6 +9,11 @@ SELECT @@global.innodb_flush_log_at_trx_commit; @@global.innodb_flush_log_at_trx_commit 1 '#---------------------FN_DYNVARS_046_02-------------------------#' +SET innodb_flush_log_at_trx_commit = 1; +ERROR HY000: Variable 'innodb_flush_log_at_trx_commit' is a GLOBAL variable and should be set with SET GLOBAL +SELECT @@innodb_flush_log_at_trx_commit; +@@innodb_flush_log_at_trx_commit +1 SELECT local.innodb_flush_log_at_trx_commit; ERROR 42S02: Unknown table 'local' in field list SET global innodb_flush_log_at_trx_commit = 0; @@ -51,6 +56,27 @@ Warning 1292 Truncated incorrect innodb_flush_log_at_trx_commit value: '1001' SELECT @@global.innodb_flush_log_at_trx_commit; @@global.innodb_flush_log_at_trx_commit 3 +SET @@global.innodb_flush_log_at_trx_commit = 100156787; +Warnings: +Warning 1292 Truncated incorrect innodb_flush_log_at_trx_commit value: '100156787' +SELECT @@global.innodb_flush_log_at_trx_commit; +@@global.innodb_flush_log_at_trx_commit +3 +SET @@global.innodb_flush_log_at_trx_commit = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_trx_commit' +SELECT @@global.innodb_flush_log_at_trx_commit; +@@global.innodb_flush_log_at_trx_commit +3 +SET @@global.innodb_flush_log_at_trx_commit = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_trx_commit' +SELECT @@global.innodb_flush_log_at_trx_commit; +@@global.innodb_flush_log_at_trx_commit +3 +SET @@global.innodb_flush_log_at_trx_commit = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_trx_commit' +SELECT @@global.innodb_flush_log_at_trx_commit; +@@global.innodb_flush_log_at_trx_commit +3 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_flush_log_at_trx_commit = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_sync_basic.result b/mysql-test/suite/sys_vars/r/innodb_flush_sync_basic.result new file mode 100644 index 00000000000..9e3f7d95eb9 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_flush_sync_basic.result @@ -0,0 +1,92 @@ +SET @start_global_value = @@global.innodb_flush_sync; +SELECT @start_global_value; +@start_global_value +1 +Valid values are 'ON' and 'OFF' +select @@global.innodb_flush_sync in (0, 1); +@@global.innodb_flush_sync in (0, 1) +1 +select @@global.innodb_flush_sync; +@@global.innodb_flush_sync +1 +select @@session.innodb_flush_sync; +ERROR HY000: Variable 'innodb_flush_sync' is a GLOBAL variable +show global variables like 'innodb_flush_sync'; +Variable_name Value +innodb_flush_sync ON +show session variables like 'innodb_flush_sync'; +Variable_name Value +innodb_flush_sync ON +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +set global innodb_flush_sync='OFF'; +select @@global.innodb_flush_sync; +@@global.innodb_flush_sync +0 +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC OFF +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC OFF +set @@global.innodb_flush_sync=1; +select @@global.innodb_flush_sync; +@@global.innodb_flush_sync +1 +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +set global innodb_flush_sync=0; +select @@global.innodb_flush_sync; +@@global.innodb_flush_sync +0 +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC OFF +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC OFF +set @@global.innodb_flush_sync='ON'; +select @@global.innodb_flush_sync; +@@global.innodb_flush_sync +1 +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +set session innodb_flush_sync='OFF'; +ERROR HY000: Variable 'innodb_flush_sync' is a GLOBAL variable and should be set with SET GLOBAL +set @@session.innodb_flush_sync='ON'; +ERROR HY000: Variable 'innodb_flush_sync' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_flush_sync=1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_sync' +set global innodb_flush_sync=1e1; +ERROR 42000: Incorrect argument type to variable 'innodb_flush_sync' +set global innodb_flush_sync=2; +ERROR 42000: Variable 'innodb_flush_sync' can't be set to the value of '2' +set global innodb_flush_sync=-3; +ERROR 42000: Variable 'innodb_flush_sync' can't be set to the value of '-3' +select @@global.innodb_flush_sync; +@@global.innodb_flush_sync +1 +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_FLUSH_SYNC ON +set global innodb_flush_sync='AUTO'; +ERROR 42000: Variable 'innodb_flush_sync' can't be set to the value of 'AUTO' +SET @@global.innodb_flush_sync = @start_global_value; +SELECT @@global.innodb_flush_sync; +@@global.innodb_flush_sync +1 diff --git a/mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result b/mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result index caa41bd64ed..e024581955a 100644 --- a/mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result @@ -56,6 +56,16 @@ Warning 1292 Truncated incorrect innodb_flushing_avg_loops value: '1001' SELECT @@global.innodb_flushing_avg_loops; @@global.innodb_flushing_avg_loops 1000 +SET @@global.innodb_flushing_avg_loops = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_flushing_avg_loops' +SELECT @@global.innodb_flushing_avg_loops; +@@global.innodb_flushing_avg_loops +1000 +SET @@global.innodb_flushing_avg_loops = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_flushing_avg_loops' +SELECT @@global.innodb_flushing_avg_loops; +@@global.innodb_flushing_avg_loops +1000 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_flushing_avg_loops = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit_basic.result b/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit_basic.result index 0aefabd48f7..59f6431a65e 100644 --- a/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_ft_result_cache_limit_basic.result @@ -25,8 +25,14 @@ set global innodb_ft_result_cache_limit=1000000; select @@innodb_ft_result_cache_limit; @@innodb_ft_result_cache_limit 1000000 -set global innodb_ft_result_cache_limit=4000000000; +set global innodb_ft_result_cache_limit=4294967295; select @@innodb_ft_result_cache_limit; @@innodb_ft_result_cache_limit -4000000000 +4294967295 +set global innodb_ft_result_cache_limit=4*1024*1024*1024; +Warnings: +Warning 1292 Truncated incorrect innodb_ft_result_cache_limit value: '4294967296' +select @@innodb_ft_result_cache_limit; +@@innodb_ft_result_cache_limit +4294967295 set global innodb_ft_result_cache_limit=2000000000; diff --git a/mysql-test/suite/sys_vars/r/innodb_ft_server_stopword_table_basic.result b/mysql-test/suite/sys_vars/r/innodb_ft_server_stopword_table_basic.result index 044e8f80951..1851c078e5c 100644 --- a/mysql-test/suite/sys_vars/r/innodb_ft_server_stopword_table_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_ft_server_stopword_table_basic.result @@ -16,6 +16,7 @@ INNODB_FT_SERVER_STOPWORD_TABLE select * from information_schema.session_variables where variable_name='innodb_ft_server_stopword_table'; VARIABLE_NAME VARIABLE_VALUE INNODB_FT_SERVER_STOPWORD_TABLE +call mtr.add_suppression("\\[ERROR\\] InnoDB: user stopword table Salmon does not exist."); set session innodb_ft_server_stopword_table='Salmon'; ERROR HY000: Variable 'innodb_ft_server_stopword_table' is a GLOBAL variable and should be set with SET GLOBAL set @@session.innodb_ft_server_stopword_table='Salmon'; diff --git a/mysql-test/suite/sys_vars/r/innodb_ft_user_stopword_table_basic.result b/mysql-test/suite/sys_vars/r/innodb_ft_user_stopword_table_basic.result index 66298481693..ca54f5f7521 100644 --- a/mysql-test/suite/sys_vars/r/innodb_ft_user_stopword_table_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_ft_user_stopword_table_basic.result @@ -17,6 +17,7 @@ INNODB_FT_USER_STOPWORD_TABLE select * from information_schema.session_variables where variable_name='innodb_ft_user_stopword_table'; VARIABLE_NAME VARIABLE_VALUE INNODB_FT_USER_STOPWORD_TABLE +call mtr.add_suppression("\\[ERROR\\] InnoDB: user stopword table Salmon does not exist."); set session innodb_ft_user_stopword_table='Salmon'; ERROR 42000: Variable 'innodb_ft_user_stopword_table' can't be set to the value of 'Salmon' set @@session.innodb_ft_user_stopword_table='Salmon'; @@ -27,4 +28,3 @@ set global innodb_ft_user_stopword_table=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_ft_user_stopword_table' set global innodb_ft_user_stopword_table='Salmon'; ERROR 42000: Variable 'innodb_ft_user_stopword_table' can't be set to the value of 'Salmon' -SET @@session.innodb_ft_user_stopword_table=@start_global_value; diff --git a/mysql-test/suite/sys_vars/r/innodb_large_prefix_basic.result b/mysql-test/suite/sys_vars/r/innodb_large_prefix_basic.result index 3877988bbee..c6e803ffef8 100644 --- a/mysql-test/suite/sys_vars/r/innodb_large_prefix_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_large_prefix_basic.result @@ -1,29 +1,31 @@ SET @start_global_value = @@global.innodb_large_prefix; SELECT @start_global_value; @start_global_value -0 +1 Valid values are 'ON' and 'OFF' select @@global.innodb_large_prefix in (0, 1); @@global.innodb_large_prefix in (0, 1) 1 select @@global.innodb_large_prefix; @@global.innodb_large_prefix -0 +1 select @@session.innodb_large_prefix; ERROR HY000: Variable 'innodb_large_prefix' is a GLOBAL variable show global variables like 'innodb_large_prefix'; Variable_name Value -innodb_large_prefix OFF +innodb_large_prefix ON show session variables like 'innodb_large_prefix'; Variable_name Value -innodb_large_prefix OFF +innodb_large_prefix ON select * from information_schema.global_variables where variable_name='innodb_large_prefix'; VARIABLE_NAME VARIABLE_VALUE -INNODB_LARGE_PREFIX OFF +INNODB_LARGE_PREFIX ON select * from information_schema.session_variables where variable_name='innodb_large_prefix'; VARIABLE_NAME VARIABLE_VALUE -INNODB_LARGE_PREFIX OFF +INNODB_LARGE_PREFIX ON set global innodb_large_prefix='OFF'; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@global.innodb_large_prefix; @@global.innodb_large_prefix 0 @@ -34,6 +36,8 @@ select * from information_schema.session_variables where variable_name='innodb_l VARIABLE_NAME VARIABLE_VALUE INNODB_LARGE_PREFIX OFF set @@global.innodb_large_prefix=1; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@global.innodb_large_prefix; @@global.innodb_large_prefix 1 @@ -44,6 +48,8 @@ select * from information_schema.session_variables where variable_name='innodb_l VARIABLE_NAME VARIABLE_VALUE INNODB_LARGE_PREFIX ON set global innodb_large_prefix=0; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@global.innodb_large_prefix; @@global.innodb_large_prefix 0 @@ -54,6 +60,8 @@ select * from information_schema.session_variables where variable_name='innodb_l VARIABLE_NAME VARIABLE_VALUE INNODB_LARGE_PREFIX OFF set @@global.innodb_large_prefix='ON'; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html select @@global.innodb_large_prefix; @@global.innodb_large_prefix 1 @@ -87,6 +95,8 @@ INNODB_LARGE_PREFIX ON set global innodb_large_prefix='AUTO'; ERROR 42000: Variable 'innodb_large_prefix' can't be set to the value of 'AUTO' SET @@global.innodb_large_prefix = @start_global_value; +Warnings: +Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html SELECT @@global.innodb_large_prefix; @@global.innodb_large_prefix -0 +1 diff --git a/mysql-test/suite/sys_vars/r/innodb_lock_wait_timeout_basic.result b/mysql-test/suite/sys_vars/r/innodb_lock_wait_timeout_basic.result index 1dcc2d554ce..74b1d21d475 100644 --- a/mysql-test/suite/sys_vars/r/innodb_lock_wait_timeout_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_lock_wait_timeout_basic.result @@ -55,6 +55,104 @@ COUNT(@@GLOBAL.innodb_lock_wait_timeout) 1 Expected SELECT innodb_lock_wait_timeout = @@SESSION.innodb_lock_wait_timeout; ERROR 42S22: Unknown column 'innodb_lock_wait_timeout' in 'field list' +set @@global.innodb_lock_wait_timeout=100; +set @@global.innodb_lock_wait_timeout=DEFAULT; +select @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +50 +set @@session.innodb_lock_wait_timeout=100; +set @@session.innodb_lock_wait_timeout=DEFAULT; +select @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +50 +SET @@global.innodb_lock_wait_timeout=1; +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1 +SET @@global.innodb_lock_wait_timeout=1024; +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1024 +SET @@global.innodb_lock_wait_timeout=1073741824; +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1073741824 +SET @@session.innodb_lock_wait_timeout=1; +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1 +SET @@session.innodb_lock_wait_timeout=1024; +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1024 +SET @@session.innodb_lock_wait_timeout=1073741824; +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1073741824 +SET @@global.innodb_lock_wait_timeout="t"; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1073741824 +SET @@global.innodb_lock_wait_timeout=-1024; +Warnings: +Warning 1292 Truncated incorrect innodb_lock_wait_timeout value: '-1024' +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1 +SET @@global.innodb_lock_wait_timeout=1073741825; +Warnings: +Warning 1292 Truncated incorrect innodb_lock_wait_timeout value: '1073741825' +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1073741824 +SET @@global.innodb_lock_wait_timeout=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1073741824 +SET @@global.innodb_lock_wait_timeout=' '; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1073741824 +SET @@global.innodb_lock_wait_timeout=1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@global.innodb_lock_wait_timeout; +@@global.innodb_lock_wait_timeout +1073741824 +SET @@session.innodb_lock_wait_timeout="T"; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1073741824 +SET @@session.innodb_lock_wait_timeout=-1024; +Warnings: +Warning 1292 Truncated incorrect innodb_lock_wait_timeout value: '-1024' +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1 +SET @@session.innodb_lock_wait_timeout=1073999999; +Warnings: +Warning 1292 Truncated incorrect innodb_lock_wait_timeout value: '1073999999' +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1073741824 +SET @@session.innodb_lock_wait_timeout=' '; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1073741824 +SET @@session.innodb_lock_wait_timeout=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1073741824 +SET @@session.innodb_lock_wait_timeout=1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_lock_wait_timeout' +SELECT @@session.innodb_lock_wait_timeout; +@@session.innodb_lock_wait_timeout +1073741824 SET @@global.innodb_lock_wait_timeout = @start_global_value; SELECT @@global.innodb_lock_wait_timeout; @@global.innodb_lock_wait_timeout diff --git a/mysql-test/suite/sys_vars/r/innodb_log_checkpoint_now_basic.result b/mysql-test/suite/sys_vars/r/innodb_log_checkpoint_now_basic.result index d9d067c2cf9..4774c2fe1d7 100644 --- a/mysql-test/suite/sys_vars/r/innodb_log_checkpoint_now_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_log_checkpoint_now_basic.result @@ -1 +1,80 @@ -XtraDB extension +SET @start_global_value = @@global.innodb_log_checkpoint_now; +SELECT @start_global_value; +@start_global_value +0 +select @@global.innodb_log_checkpoint_now in (0, 1); +@@global.innodb_log_checkpoint_now in (0, 1) +1 +select @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 +select @@session.innodb_log_checkpoint_now; +ERROR HY000: Variable 'innodb_log_checkpoint_now' is a GLOBAL variable +show global variables like 'innodb_log_checkpoint_now'; +Variable_name Value +innodb_log_checkpoint_now OFF +show session variables like 'innodb_log_checkpoint_now'; +Variable_name Value +innodb_log_checkpoint_now OFF +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +set global innodb_log_checkpoint_now=1; +select @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +set @@global.innodb_log_checkpoint_now=0; +select @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +set global innodb_log_checkpoint_now=ON; +select @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +set global innodb_log_checkpoint_now=OFF; +select @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_CHECKPOINT_NOW OFF +set session innodb_log_checkpoint_now='some'; +ERROR HY000: Variable 'innodb_log_checkpoint_now' is a GLOBAL variable and should be set with SET GLOBAL +set @@session.innodb_log_checkpoint_now='some'; +ERROR HY000: Variable 'innodb_log_checkpoint_now' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_log_checkpoint_now=1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_log_checkpoint_now' +set global innodb_log_checkpoint_now='foo'; +ERROR 42000: Variable 'innodb_log_checkpoint_now' can't be set to the value of 'foo' +set global innodb_log_checkpoint_now=-2; +ERROR 42000: Variable 'innodb_log_checkpoint_now' can't be set to the value of '-2' +set global innodb_log_checkpoint_now=1e1; +ERROR 42000: Incorrect argument type to variable 'innodb_log_checkpoint_now' +SET @@global.innodb_log_checkpoint_now = @start_global_value; +SELECT @@global.innodb_log_checkpoint_now; +@@global.innodb_log_checkpoint_now +0 diff --git a/mysql-test/suite/sys_vars/r/innodb_log_checksums_basic.result b/mysql-test/suite/sys_vars/r/innodb_log_checksums_basic.result new file mode 100644 index 00000000000..6679ca87249 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_log_checksums_basic.result @@ -0,0 +1,42 @@ +SET @orig = @@global.innodb_log_checksums; +SELECT @orig; +@orig +1 +SET GLOBAL innodb_log_checksums = 'crc32'; +ERROR 42000: Variable 'innodb_log_checksums' can't be set to the value of 'crc32' +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +1 +SET GLOBAL innodb_log_checksums = 2; +ERROR 42000: Variable 'innodb_log_checksums' can't be set to the value of '2' +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +1 +SET GLOBAL innodb_log_checksums = 1e2; +ERROR 42000: Incorrect argument type to variable 'innodb_log_checksums' +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +1 +SET GLOBAL innodb_log_checksums = 1.0; +ERROR 42000: Incorrect argument type to variable 'innodb_log_checksums' +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +1 +SET innodb_log_checksums = OFF; +ERROR HY000: Variable 'innodb_log_checksums' is a GLOBAL variable and should be set with SET GLOBAL +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +1 +SET GLOBAL innodb_log_checksums = OFF; +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +0 +SET GLOBAL innodb_log_checksums = default; +SET GLOBAL innodb_log_checksums = ON; +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +1 +SET GLOBAL innodb_log_checksums = @orig; +SELECT @@global.innodb_log_checksums; +@@global.innodb_log_checksums +1 diff --git a/mysql-test/suite/sys_vars/r/innodb_log_write_ahead_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_log_write_ahead_size_basic.result new file mode 100644 index 00000000000..5c9eb69de50 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_log_write_ahead_size_basic.result @@ -0,0 +1,88 @@ +SET @start_global_value = @@global.innodb_log_write_ahead_size; +SET global innodb_log_write_ahead_size=4096; +Valid values are positive number +SELECT @@global.innodb_log_write_ahead_size >= 512; +@@global.innodb_log_write_ahead_size >= 512 +1 +SELECT @@global.innodb_log_write_ahead_size <= 16*1024; +@@global.innodb_log_write_ahead_size <= 16*1024 +1 +SELECT @@session.innodb_log_write_ahead_size; +ERROR HY000: Variable 'innodb_log_write_ahead_size' is a GLOBAL variable +SHOW global variables LIKE 'innodb_log_write_ahead_size'; +Variable_name Value +innodb_log_write_ahead_size 4096 +SHOW session variables LIKE 'innodb_log_write_ahead_size'; +Variable_name Value +innodb_log_write_ahead_size 4096 +SELECT * FROM information_schema.global_variables +WHERE variable_name='innodb_log_write_ahead_size'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_WRITE_AHEAD_SIZE 4096 +SELECT * FROM information_schema.session_variables +WHERE variable_name='innodb_log_write_ahead_size'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_WRITE_AHEAD_SIZE 4096 +SET global innodb_log_write_ahead_size=1024; +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +1024 +SELECT * FROM information_schema.global_variables +WHERE variable_name='innodb_log_write_ahead_size'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_WRITE_AHEAD_SIZE 1024 +SELECT * FROM information_schema.session_variables +WHERE variable_name='innodb_log_write_ahead_size'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_WRITE_AHEAD_SIZE 1024 +SET session innodb_log_write_ahead_size=2048; +ERROR HY000: Variable 'innodb_log_write_ahead_size' is a GLOBAL variable and should be set with SET GLOBAL +SET global innodb_log_write_ahead_size=512; +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +512 +SET global innodb_log_write_ahead_size=2048; +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +2048 +SET global innodb_log_write_ahead_size=4096; +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +4096 +SET global innodb_log_write_ahead_size=0; +Warnings: +Warning 1292 Truncated incorrect innodb_log_write_ahead_size value: '0' +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +512 +SET global innodb_log_write_ahead_size=-1024; +Warnings: +Warning 1292 Truncated incorrect innodb_log_write_ahead_size value: '-1024' +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +512 +SET global innodb_log_write_ahead_size=3000; +Warnings: +Warning 1292 Truncated incorrect innodb_log_write_ahead_size value: '3000' +Warning 1210 innodb_log_write_ahead_size should be set 2^n value and larger than 512. +Warning 1210 Setting innodb_log_write_ahead_size to 4096 +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +4096 +SET global innodb_log_write_ahead_size=1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_log_write_ahead_size' +SET global innodb_log_write_ahead_size=1e1; +ERROR 42000: Incorrect argument type to variable 'innodb_log_write_ahead_size' +SET global innodb_log_write_ahead_size="foo"; +ERROR 42000: Incorrect argument type to variable 'innodb_log_write_ahead_size' +SET global innodb_log_write_ahead_size=-7; +Warnings: +Warning 1292 Truncated incorrect innodb_log_write_ahead_size value: '-7' +SELECT @@global.innodb_log_write_ahead_size; +@@global.innodb_log_write_ahead_size +512 +SELECT * FROM information_schema.global_variables +WHERE variable_name='innodb_log_write_ahead_size'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_LOG_WRITE_AHEAD_SIZE 512 +SET @@global.innodb_log_write_ahead_size = @start_global_value; diff --git a/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_basic.result index d705624eb53..13ae9821752 100644 --- a/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_basic.result @@ -2,11 +2,17 @@ SET @global_start_value = @@global.innodb_max_dirty_pages_pct; SELECT @global_start_value; @global_start_value 75 +SET @global_start_max_dirty_lwm_value = @@global.innodb_max_dirty_pages_pct_lwm; +SELECT @global_start_max_dirty_lwm_value; +@global_start_max_dirty_lwm_value +0 +SET @@global.innodb_max_dirty_pages_pct_lwm = 0; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +0.000000 '#--------------------FN_DYNVARS_046_01------------------------#' SET @@global.innodb_max_dirty_pages_pct = 0; -Warnings: -Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '0' -SET @@global.innodb_max_dirty_pages_pct = @global_start_value; +SET @@global.innodb_max_dirty_pages_pct = DEFAULT; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 75.000000 @@ -19,18 +25,14 @@ SELECT @@innodb_max_dirty_pages_pct; SELECT local.innodb_max_dirty_pages_pct; ERROR 42S02: Unknown table 'local' in field list SET global innodb_max_dirty_pages_pct = 0; -Warnings: -Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '0' SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct -0.001000 +0.000000 '#--------------------FN_DYNVARS_046_03------------------------#' -SET @@global.innodb_max_dirty_pages_pct = 0; -Warnings: -Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '0' +SET @@global.innodb_max_dirty_pages_pct = 0.0; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct -0.001000 +0.000000 SET @@global.innodb_max_dirty_pages_pct = 1; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct @@ -39,30 +41,94 @@ SET @@global.innodb_max_dirty_pages_pct = 99; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 99.000000 -'#--------------------FN_DYNVARS_046_04-------------------------#' +'#--------------------FN_DYNVARS_046_04------------------------#' +SET @@global.innodb_max_dirty_pages_pct_lwm = @global_start_value - 1; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +74.000000 +SET @@global.innodb_max_dirty_pages_pct = @global_start_value - 2; +Warnings: +Warning 1210 innodb_max_dirty_pages_pct cannot be set lower than innodb_max_dirty_pages_pct_lwm. +Warning 1210 Lowering innodb_max_dirty_page_pct_lwm to 73.000000 +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +73.000000 +'#--------------------FN_DYNVARS_046_05-------------------------#' SET @@global.innodb_max_dirty_pages_pct = -1; Warnings: Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '-1' +Warning 1210 innodb_max_dirty_pages_pct cannot be set lower than innodb_max_dirty_pages_pct_lwm. +Warning 1210 Lowering innodb_max_dirty_page_pct_lwm to 0.000000 +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +0.000000 +SET @@global.innodb_max_dirty_pages_pct = -1024; +Warnings: +Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '-1024' SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct -0.001000 +0.000000 SET @@global.innodb_max_dirty_pages_pct = "T"; ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct' SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct -0.001000 +0.000000 SET @@global.innodb_max_dirty_pages_pct = "Y"; ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct' SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct -0.001000 +0.000000 +SET @@global.innodb_max_dirty_pages_pct = 100; +Warnings: +Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '100' +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +99.999000 SET @@global.innodb_max_dirty_pages_pct = 1001; Warnings: Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '1001' SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 99.999000 -'#----------------------FN_DYNVARS_046_05------------------------#' +SET @@global.innodb_max_dirty_pages_pct = 100000; +Warnings: +Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '100000' +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +99.999000 +SET @@global.innodb_max_dirty_pages_pct = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct' +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +99.999000 +SET @@global.innodb_max_dirty_pages_pct = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct' +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +99.999000 +SET @@global.innodb_max_dirty_pages_pct = 1.1; +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +1.100000 +set global innodb_max_dirty_pages_pct = 0.1; +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +0.100000 +set global innodb_max_dirty_pages_pct = 31.34; +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +31.340000 +set global innodb_max_dirty_pages_pct = 100; +Warnings: +Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '100' +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +99.999000 +set global innodb_max_dirty_pages_pct = 99.999; +SELECT @@global.innodb_max_dirty_pages_pct; +@@global.innodb_max_dirty_pages_pct +99.999000 +'#----------------------FN_DYNVARS_046_06------------------------#' SELECT @@global.innodb_max_dirty_pages_pct = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct'; @@ -76,7 +142,7 @@ SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct'; VARIABLE_VALUE 99.999000 -'#---------------------FN_DYNVARS_046_06-------------------------#' +'#---------------------FN_DYNVARS_046_07-------------------------#' SET @@global.innodb_max_dirty_pages_pct = OFF; ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct' SELECT @@global.innodb_max_dirty_pages_pct; @@ -87,18 +153,20 @@ ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct' SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 99.999000 -'#---------------------FN_DYNVARS_046_07----------------------#' +'#---------------------FN_DYNVARS_046_08----------------------#' SET @@global.innodb_max_dirty_pages_pct = TRUE; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 1.000000 SET @@global.innodb_max_dirty_pages_pct = FALSE; -Warnings: -Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct value: '0' SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct -0.001000 +0.000000 SET @@global.innodb_max_dirty_pages_pct = @global_start_value; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 75.000000 +SET @@global.innodb_max_dirty_pages_pct_lwm = @global_start_max_dirty_lwm_value; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +0.000000 diff --git a/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_func.result b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_func.result index eb0de047df1..8b68f182789 100644 --- a/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_func.result +++ b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_func.result @@ -1,19 +1,26 @@ SET @innodb_max_dirty_pages_pct = @@global.innodb_max_dirty_pages_pct; '#--------------------FN_DYNVARS_044_02-------------------------#' SET @@global.innodb_max_dirty_pages_pct = 80; +'connect (con1,localhost,root,,,,)' connect con1,localhost,root,,,,; +'connection con1' connection con1; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 80.000000 SET @@global.innodb_max_dirty_pages_pct = 70; +'connect (con2,localhost,root,,,,)' connect con2,localhost,root,,,,; +'connection con2' connection con2; SELECT @@global.innodb_max_dirty_pages_pct; @@global.innodb_max_dirty_pages_pct 70.000000 +'connection default' connection default; +'disconnect con2' disconnect con2; +'disconnect con1' disconnect con1; SET @@global.innodb_max_dirty_pages_pct = @innodb_max_dirty_pages_pct; '#--------------------FN_DYNVARS_044_02-------------------------#' diff --git a/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result index 676ec103664..b6394d03b46 100644 --- a/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result @@ -1,23 +1,23 @@ SET @pct_lwm_start_value = @@global.innodb_max_dirty_pages_pct_lwm; SELECT @pct_lwm_start_value; @pct_lwm_start_value -0.001 +0 SET @pct_start_value = @@global.innodb_max_dirty_pages_pct; SELECT @pct_start_value; @pct_start_value 75 '#--------------------FN_DYNVARS_046_01------------------------#' SET @@global.innodb_max_dirty_pages_pct_lwm = 0; -SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_lwm_start_value; +SET @@global.innodb_max_dirty_pages_pct_lwm = DEFAULT; SELECT @@global.innodb_max_dirty_pages_pct_lwm; @@global.innodb_max_dirty_pages_pct_lwm -0.001000 +0.000000 '#---------------------FN_DYNVARS_046_02-------------------------#' SET innodb_max_dirty_pages_pct_lwm = 1; ERROR HY000: Variable 'innodb_max_dirty_pages_pct_lwm' is a GLOBAL variable and should be set with SET GLOBAL SELECT @@innodb_max_dirty_pages_pct_lwm; @@innodb_max_dirty_pages_pct_lwm -0.001000 +0.000000 SELECT local.innodb_max_dirty_pages_pct_lwm; ERROR 42S02: Unknown table 'local' in field list SET global innodb_max_dirty_pages_pct_lwm = 0; @@ -57,6 +57,18 @@ Warning 1210 Setting innodb_max_dirty_page_pct_lwm to 75.000000 SELECT @@global.innodb_max_dirty_pages_pct_lwm; @@global.innodb_max_dirty_pages_pct_lwm 75.000000 +SET @@global.innodb_max_dirty_pages_pct_lwm = 0.0; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +0.000000 +SET @@global.innodb_max_dirty_pages_pct_lwm = 1.1; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +1.100000 +SET @@global.innodb_max_dirty_pages_pct_lwm = 51.12; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +51.120000 SET @@global.innodb_max_dirty_pages_pct_lwm = 100; Warnings: Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct_lwm value: '100' @@ -65,6 +77,16 @@ Warning 1210 Setting innodb_max_dirty_page_pct_lwm to 75.000000 SELECT @@global.innodb_max_dirty_pages_pct_lwm; @@global.innodb_max_dirty_pages_pct_lwm 75.000000 +SET @@global.innodb_max_dirty_pages_pct_lwm = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct_lwm' +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +75.000000 +SET @@global.innodb_max_dirty_pages_pct_lwm = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct_lwm' +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +@@global.innodb_max_dirty_pages_pct_lwm +75.000000 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_max_dirty_pages_pct_lwm = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES @@ -106,4 +128,4 @@ SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_lwm_start_value; SELECT @@global.innodb_max_dirty_pages_pct_lwm; @@global.innodb_max_dirty_pages_pct_lwm -0.001000 +0.000000 diff --git a/mysql-test/suite/sys_vars/r/innodb_max_purge_lag_basic.result b/mysql-test/suite/sys_vars/r/innodb_max_purge_lag_basic.result index a01d2f2dd0c..bf526fc1c3d 100644 --- a/mysql-test/suite/sys_vars/r/innodb_max_purge_lag_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_max_purge_lag_basic.result @@ -33,13 +33,32 @@ SET @@global.innodb_max_purge_lag = 4294967295; SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag 4294967295 -'#--------------------FN_DYNVARS_046_04-------------------------#' +'#--------------------FN_DYNVARS_046_04------------------------#' +SET @@global.innodb_max_purge_lag = 4294967296; +SELECT @@global.innodb_max_purge_lag IN (4294967296,4294967295); +@@global.innodb_max_purge_lag IN (4294967296,4294967295) +1 +SET @@global.innodb_max_purge_lag = 12345678901; +SELECT @@global.innodb_max_purge_lag IN (12345678901,4294967295); +@@global.innodb_max_purge_lag IN (12345678901,4294967295) +1 +SET @@global.innodb_max_purge_lag = 18446744073709551615; +SELECT @@global.innodb_max_purge_lag IN (18446744073709551615,4294967295); +@@global.innodb_max_purge_lag IN (18446744073709551615,4294967295) +1 +'#--------------------FN_DYNVARS_046_05-------------------------#' SET @@global.innodb_max_purge_lag = -1; Warnings: Warning 1292 Truncated incorrect innodb_max_purge_lag value: '-1' SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag 0 +SET @@global.innodb_max_purge_lag = -1024; +Warnings: +Warning 1292 Truncated incorrect innodb_max_purge_lag value: '-1024' +SELECT @@global.innodb_max_purge_lag; +@@global.innodb_max_purge_lag +0 SET @@global.innodb_max_purge_lag = "T"; ERROR 42000: Incorrect argument type to variable 'innodb_max_purge_lag' SELECT @@global.innodb_max_purge_lag; @@ -50,11 +69,22 @@ ERROR 42000: Incorrect argument type to variable 'innodb_max_purge_lag' SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag 0 -SET @@global.innodb_max_purge_lag = 1001; +SET @@global.innodb_max_purge_lag = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_max_purge_lag' SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag -1001 -'#----------------------FN_DYNVARS_046_05------------------------#' +0 +SET @@global.innodb_max_purge_lag = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_max_purge_lag' +SELECT @@global.innodb_max_purge_lag; +@@global.innodb_max_purge_lag +0 +SET @@global.innodb_max_purge_lag = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_max_purge_lag' +SELECT @@global.innodb_max_purge_lag; +@@global.innodb_max_purge_lag +0 +'#----------------------FN_DYNVARS_046_06------------------------#' SELECT @@global.innodb_max_purge_lag = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_purge_lag'; @@ -63,23 +93,23 @@ VARIABLE_VALUE 1 SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag -1001 +0 SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_purge_lag'; VARIABLE_VALUE -1001 -'#---------------------FN_DYNVARS_046_06-------------------------#' +0 +'#---------------------FN_DYNVARS_046_07-------------------------#' SET @@global.innodb_max_purge_lag = OFF; ERROR 42000: Incorrect argument type to variable 'innodb_max_purge_lag' SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag -1001 +0 SET @@global.innodb_max_purge_lag = ON; ERROR 42000: Incorrect argument type to variable 'innodb_max_purge_lag' SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag -1001 -'#---------------------FN_DYNVARS_046_07----------------------#' +0 +'#---------------------FN_DYNVARS_046_08----------------------#' SET @@global.innodb_max_purge_lag = TRUE; SELECT @@global.innodb_max_purge_lag; @@global.innodb_max_purge_lag diff --git a/mysql-test/suite/sys_vars/r/innodb_max_undo_log_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_max_undo_log_size_basic.result new file mode 100644 index 00000000000..3854060b33b --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_max_undo_log_size_basic.result @@ -0,0 +1,54 @@ +'#---------------------BS_STVARS_035_01----------------------#' +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +COUNT(@@GLOBAL.innodb_max_undo_log_size) +1 +1 Expected +'#---------------------BS_STVARS_035_02----------------------#' +SET @@GLOBAL.innodb_max_undo_log_size=1073741824; +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +COUNT(@@GLOBAL.innodb_max_undo_log_size) +1 +1 Expected +SET @@GLOBAL.innodb_max_undo_log_size=18446744073709551615; +SELECT @@GLOBAL.innodb_max_undo_log_size; +@@GLOBAL.innodb_max_undo_log_size +18446744073709551615 +18446744073709551615 Expected +SET @@GLOBAL.innodb_max_undo_log_size=1073741824; +'#---------------------BS_STVARS_035_03----------------------#' +SELECT @@GLOBAL.innodb_max_undo_log_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_max_undo_log_size'; +@@GLOBAL.innodb_max_undo_log_size = VARIABLE_VALUE +1 +1 Expected +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +COUNT(@@GLOBAL.innodb_max_undo_log_size) +1 +1 Expected +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_max_undo_log_size'; +COUNT(VARIABLE_VALUE) +1 +1 Expected +'#---------------------BS_STVARS_035_04----------------------#' +SELECT @@innodb_max_undo_log_size = @@GLOBAL.innodb_max_undo_log_size; +@@innodb_max_undo_log_size = @@GLOBAL.innodb_max_undo_log_size +1 +1 Expected +'#---------------------BS_STVARS_035_05----------------------#' +SELECT COUNT(@@innodb_max_undo_log_size); +COUNT(@@innodb_max_undo_log_size) +1 +1 Expected +SELECT COUNT(@@local.innodb_max_undo_log_size); +ERROR HY000: Variable 'innodb_max_undo_log_size' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@SESSION.innodb_max_undo_log_size); +ERROR HY000: Variable 'innodb_max_undo_log_size' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +COUNT(@@GLOBAL.innodb_max_undo_log_size) +1 +1 Expected diff --git a/mysql-test/suite/sys_vars/r/innodb_merge_threshold_set_all_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_merge_threshold_set_all_debug_basic.result new file mode 100644 index 00000000000..6e325d0be38 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_merge_threshold_set_all_debug_basic.result @@ -0,0 +1,28 @@ +# +# Basic test for innodb_merge_threshold_set_all_debug +# +SELECT @@global.innodb_merge_threshold_set_all_debug; +@@global.innodb_merge_threshold_set_all_debug +50 +set global innodb_merge_threshold_set_all_debug = 1; +SELECT @@global.innodb_merge_threshold_set_all_debug; +@@global.innodb_merge_threshold_set_all_debug +1 +set global innodb_merge_threshold_set_all_debug = 51; +Warnings: +Warning 1292 Truncated incorrect innodb_merge_threshold_set_all_d value: '51' +SELECT @@global.innodb_merge_threshold_set_all_debug; +@@global.innodb_merge_threshold_set_all_debug +50 +set global innodb_merge_threshold_set_all_debug = 0; +Warnings: +Warning 1292 Truncated incorrect innodb_merge_threshold_set_all_d value: '0' +SELECT @@global.innodb_merge_threshold_set_all_debug; +@@global.innodb_merge_threshold_set_all_debug +1 +set innodb_merge_threshold_set_all_debug = 50; +ERROR HY000: Variable 'innodb_merge_threshold_set_all_debug' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_merge_threshold_set_all_debug = 50; +SELECT @@global.innodb_merge_threshold_set_all_debug; +@@global.innodb_merge_threshold_set_all_debug +50 diff --git a/mysql-test/suite/sys_vars/r/innodb_mirrored_log_groups_basic.result b/mysql-test/suite/sys_vars/r/innodb_mirrored_log_groups_basic.result deleted file mode 100644 index 1645d8163ae..00000000000 --- a/mysql-test/suite/sys_vars/r/innodb_mirrored_log_groups_basic.result +++ /dev/null @@ -1,53 +0,0 @@ -'#---------------------BS_STVARS_037_01----------------------#' -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); -COUNT(@@GLOBAL.innodb_mirrored_log_groups) -1 -1 Expected -'#---------------------BS_STVARS_037_02----------------------#' -SET @@GLOBAL.innodb_mirrored_log_groups=1; -ERROR HY000: Variable 'innodb_mirrored_log_groups' is a read only variable -Expected error 'Read only variable' -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); -COUNT(@@GLOBAL.innodb_mirrored_log_groups) -1 -1 Expected -'#---------------------BS_STVARS_037_03----------------------#' -SELECT @@GLOBAL.innodb_mirrored_log_groups = VARIABLE_VALUE -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_mirrored_log_groups'; -@@GLOBAL.innodb_mirrored_log_groups = VARIABLE_VALUE -1 -1 Expected -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); -COUNT(@@GLOBAL.innodb_mirrored_log_groups) -1 -1 Expected -SELECT COUNT(VARIABLE_VALUE) -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_mirrored_log_groups'; -COUNT(VARIABLE_VALUE) -1 -1 Expected -'#---------------------BS_STVARS_037_04----------------------#' -SELECT @@innodb_mirrored_log_groups = @@GLOBAL.innodb_mirrored_log_groups; -@@innodb_mirrored_log_groups = @@GLOBAL.innodb_mirrored_log_groups -1 -1 Expected -'#---------------------BS_STVARS_037_05----------------------#' -SELECT COUNT(@@innodb_mirrored_log_groups); -COUNT(@@innodb_mirrored_log_groups) -1 -1 Expected -SELECT COUNT(@@local.innodb_mirrored_log_groups); -ERROR HY000: Variable 'innodb_mirrored_log_groups' is a GLOBAL variable -Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@SESSION.innodb_mirrored_log_groups); -ERROR HY000: Variable 'innodb_mirrored_log_groups' is a GLOBAL variable -Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); -COUNT(@@GLOBAL.innodb_mirrored_log_groups) -1 -1 Expected -SELECT innodb_mirrored_log_groups = @@SESSION.innodb_mirrored_log_groups; -ERROR 42S22: Unknown column 'innodb_mirrored_log_groups' in 'field list' -Expected error 'Readonly variable' diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result index 6c7051dc3d0..3764b00688b 100644 --- a/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result @@ -4,7 +4,6 @@ name status metadata_table_handles_opened disabled metadata_table_handles_closed disabled metadata_table_reference_count disabled -metadata_mem_pool_size disabled lock_deadlocks disabled lock_timeouts disabled lock_rec_lock_waits disabled @@ -47,7 +46,6 @@ buffer_data_written disabled buffer_flush_batch_scanned disabled buffer_flush_batch_num_scan disabled buffer_flush_batch_scanned_per_call disabled -buffer_flush_batch_rescan disabled buffer_flush_batch_total_pages disabled buffer_flush_batches disabled buffer_flush_batch_pages disabled @@ -55,6 +53,19 @@ buffer_flush_neighbor_total_pages disabled buffer_flush_neighbor disabled buffer_flush_neighbor_pages disabled buffer_flush_n_to_flush_requested disabled +buffer_flush_n_to_flush_by_age disabled +buffer_flush_adaptive_avg_time_slot disabled +buffer_LRU_batch_flush_avg_time_slot disabled +buffer_flush_adaptive_avg_time_thread disabled +buffer_LRU_batch_flush_avg_time_thread disabled +buffer_flush_adaptive_avg_time_est disabled +buffer_LRU_batch_flush_avg_time_est disabled +buffer_flush_avg_time disabled +buffer_flush_adaptive_avg_pass disabled +buffer_LRU_batch_flush_avg_pass disabled +buffer_flush_avg_pass disabled +buffer_LRU_get_free_loops disabled +buffer_LRU_get_free_waits disabled buffer_flush_avg_page_rate disabled buffer_flush_lsn_avg_rate disabled buffer_flush_pct_for_dirty disabled @@ -157,12 +168,13 @@ log_lsn_checkpoint_age disabled log_lsn_buf_pool_oldest disabled log_max_modified_age_async disabled log_max_modified_age_sync disabled -log_pending_log_writes disabled +log_pending_log_flushes disabled log_pending_checkpoint_writes disabled log_num_log_io disabled log_waits disabled log_write_requests disabled log_writes disabled +log_padded disabled compress_pages_compressed disabled compress_pages_decompressed disabled compression_pad_increments disabled @@ -223,10 +235,13 @@ innodb_dblwr_pages_written disabled innodb_page_size disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled dml_reads disabled dml_inserts disabled dml_deletes disabled @@ -239,6 +254,8 @@ ddl_background_drop_indexes disabled ddl_background_drop_tables disabled ddl_online_create_index disabled ddl_pending_alter_table disabled +ddl_sort_file_alter_table disabled +ddl_log_file_alter_table disabled icp_attempts disabled icp_no_match disabled icp_out_of_range disabled @@ -280,10 +297,13 @@ lock_row_lock_waits disabled lock_row_lock_time_avg disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled set global innodb_monitor_enable = "%lock*"; ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*' set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%"; @@ -408,7 +428,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 2 NULL 2 enabled metadata_table_handles_closed 1 NULL 1 1 NULL 1 enabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_disable = module_metadata; set global innodb_monitor_reset = module_metadata; select name, max_count, min_count, count, @@ -419,7 +438,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 NULL NULL 0 disabled metadata_table_handles_closed 1 NULL 1 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_reset_all = module_metadata; select name, max_count, min_count, count, max_count_reset, min_count_reset, count_reset, status @@ -429,7 +447,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled metadata_table_handles_closed NULL NULL 0 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_enable = module_trx; begin; insert into monitor_test values(9); diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result index 6c7051dc3d0..3764b00688b 100644 --- a/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result @@ -4,7 +4,6 @@ name status metadata_table_handles_opened disabled metadata_table_handles_closed disabled metadata_table_reference_count disabled -metadata_mem_pool_size disabled lock_deadlocks disabled lock_timeouts disabled lock_rec_lock_waits disabled @@ -47,7 +46,6 @@ buffer_data_written disabled buffer_flush_batch_scanned disabled buffer_flush_batch_num_scan disabled buffer_flush_batch_scanned_per_call disabled -buffer_flush_batch_rescan disabled buffer_flush_batch_total_pages disabled buffer_flush_batches disabled buffer_flush_batch_pages disabled @@ -55,6 +53,19 @@ buffer_flush_neighbor_total_pages disabled buffer_flush_neighbor disabled buffer_flush_neighbor_pages disabled buffer_flush_n_to_flush_requested disabled +buffer_flush_n_to_flush_by_age disabled +buffer_flush_adaptive_avg_time_slot disabled +buffer_LRU_batch_flush_avg_time_slot disabled +buffer_flush_adaptive_avg_time_thread disabled +buffer_LRU_batch_flush_avg_time_thread disabled +buffer_flush_adaptive_avg_time_est disabled +buffer_LRU_batch_flush_avg_time_est disabled +buffer_flush_avg_time disabled +buffer_flush_adaptive_avg_pass disabled +buffer_LRU_batch_flush_avg_pass disabled +buffer_flush_avg_pass disabled +buffer_LRU_get_free_loops disabled +buffer_LRU_get_free_waits disabled buffer_flush_avg_page_rate disabled buffer_flush_lsn_avg_rate disabled buffer_flush_pct_for_dirty disabled @@ -157,12 +168,13 @@ log_lsn_checkpoint_age disabled log_lsn_buf_pool_oldest disabled log_max_modified_age_async disabled log_max_modified_age_sync disabled -log_pending_log_writes disabled +log_pending_log_flushes disabled log_pending_checkpoint_writes disabled log_num_log_io disabled log_waits disabled log_write_requests disabled log_writes disabled +log_padded disabled compress_pages_compressed disabled compress_pages_decompressed disabled compression_pad_increments disabled @@ -223,10 +235,13 @@ innodb_dblwr_pages_written disabled innodb_page_size disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled dml_reads disabled dml_inserts disabled dml_deletes disabled @@ -239,6 +254,8 @@ ddl_background_drop_indexes disabled ddl_background_drop_tables disabled ddl_online_create_index disabled ddl_pending_alter_table disabled +ddl_sort_file_alter_table disabled +ddl_log_file_alter_table disabled icp_attempts disabled icp_no_match disabled icp_out_of_range disabled @@ -280,10 +297,13 @@ lock_row_lock_waits disabled lock_row_lock_time_avg disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled set global innodb_monitor_enable = "%lock*"; ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*' set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%"; @@ -408,7 +428,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 2 NULL 2 enabled metadata_table_handles_closed 1 NULL 1 1 NULL 1 enabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_disable = module_metadata; set global innodb_monitor_reset = module_metadata; select name, max_count, min_count, count, @@ -419,7 +438,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 NULL NULL 0 disabled metadata_table_handles_closed 1 NULL 1 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_reset_all = module_metadata; select name, max_count, min_count, count, max_count_reset, min_count_reset, count_reset, status @@ -429,7 +447,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled metadata_table_handles_closed NULL NULL 0 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_enable = module_trx; begin; insert into monitor_test values(9); diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result index 6c7051dc3d0..3764b00688b 100644 --- a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result @@ -4,7 +4,6 @@ name status metadata_table_handles_opened disabled metadata_table_handles_closed disabled metadata_table_reference_count disabled -metadata_mem_pool_size disabled lock_deadlocks disabled lock_timeouts disabled lock_rec_lock_waits disabled @@ -47,7 +46,6 @@ buffer_data_written disabled buffer_flush_batch_scanned disabled buffer_flush_batch_num_scan disabled buffer_flush_batch_scanned_per_call disabled -buffer_flush_batch_rescan disabled buffer_flush_batch_total_pages disabled buffer_flush_batches disabled buffer_flush_batch_pages disabled @@ -55,6 +53,19 @@ buffer_flush_neighbor_total_pages disabled buffer_flush_neighbor disabled buffer_flush_neighbor_pages disabled buffer_flush_n_to_flush_requested disabled +buffer_flush_n_to_flush_by_age disabled +buffer_flush_adaptive_avg_time_slot disabled +buffer_LRU_batch_flush_avg_time_slot disabled +buffer_flush_adaptive_avg_time_thread disabled +buffer_LRU_batch_flush_avg_time_thread disabled +buffer_flush_adaptive_avg_time_est disabled +buffer_LRU_batch_flush_avg_time_est disabled +buffer_flush_avg_time disabled +buffer_flush_adaptive_avg_pass disabled +buffer_LRU_batch_flush_avg_pass disabled +buffer_flush_avg_pass disabled +buffer_LRU_get_free_loops disabled +buffer_LRU_get_free_waits disabled buffer_flush_avg_page_rate disabled buffer_flush_lsn_avg_rate disabled buffer_flush_pct_for_dirty disabled @@ -157,12 +168,13 @@ log_lsn_checkpoint_age disabled log_lsn_buf_pool_oldest disabled log_max_modified_age_async disabled log_max_modified_age_sync disabled -log_pending_log_writes disabled +log_pending_log_flushes disabled log_pending_checkpoint_writes disabled log_num_log_io disabled log_waits disabled log_write_requests disabled log_writes disabled +log_padded disabled compress_pages_compressed disabled compress_pages_decompressed disabled compression_pad_increments disabled @@ -223,10 +235,13 @@ innodb_dblwr_pages_written disabled innodb_page_size disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled dml_reads disabled dml_inserts disabled dml_deletes disabled @@ -239,6 +254,8 @@ ddl_background_drop_indexes disabled ddl_background_drop_tables disabled ddl_online_create_index disabled ddl_pending_alter_table disabled +ddl_sort_file_alter_table disabled +ddl_log_file_alter_table disabled icp_attempts disabled icp_no_match disabled icp_out_of_range disabled @@ -280,10 +297,13 @@ lock_row_lock_waits disabled lock_row_lock_time_avg disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled set global innodb_monitor_enable = "%lock*"; ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*' set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%"; @@ -408,7 +428,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 2 NULL 2 enabled metadata_table_handles_closed 1 NULL 1 1 NULL 1 enabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_disable = module_metadata; set global innodb_monitor_reset = module_metadata; select name, max_count, min_count, count, @@ -419,7 +438,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 NULL NULL 0 disabled metadata_table_handles_closed 1 NULL 1 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_reset_all = module_metadata; select name, max_count, min_count, count, max_count_reset, min_count_reset, count_reset, status @@ -429,7 +447,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled metadata_table_handles_closed NULL NULL 0 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_enable = module_trx; begin; insert into monitor_test values(9); diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result index 6c7051dc3d0..3764b00688b 100644 --- a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result @@ -4,7 +4,6 @@ name status metadata_table_handles_opened disabled metadata_table_handles_closed disabled metadata_table_reference_count disabled -metadata_mem_pool_size disabled lock_deadlocks disabled lock_timeouts disabled lock_rec_lock_waits disabled @@ -47,7 +46,6 @@ buffer_data_written disabled buffer_flush_batch_scanned disabled buffer_flush_batch_num_scan disabled buffer_flush_batch_scanned_per_call disabled -buffer_flush_batch_rescan disabled buffer_flush_batch_total_pages disabled buffer_flush_batches disabled buffer_flush_batch_pages disabled @@ -55,6 +53,19 @@ buffer_flush_neighbor_total_pages disabled buffer_flush_neighbor disabled buffer_flush_neighbor_pages disabled buffer_flush_n_to_flush_requested disabled +buffer_flush_n_to_flush_by_age disabled +buffer_flush_adaptive_avg_time_slot disabled +buffer_LRU_batch_flush_avg_time_slot disabled +buffer_flush_adaptive_avg_time_thread disabled +buffer_LRU_batch_flush_avg_time_thread disabled +buffer_flush_adaptive_avg_time_est disabled +buffer_LRU_batch_flush_avg_time_est disabled +buffer_flush_avg_time disabled +buffer_flush_adaptive_avg_pass disabled +buffer_LRU_batch_flush_avg_pass disabled +buffer_flush_avg_pass disabled +buffer_LRU_get_free_loops disabled +buffer_LRU_get_free_waits disabled buffer_flush_avg_page_rate disabled buffer_flush_lsn_avg_rate disabled buffer_flush_pct_for_dirty disabled @@ -157,12 +168,13 @@ log_lsn_checkpoint_age disabled log_lsn_buf_pool_oldest disabled log_max_modified_age_async disabled log_max_modified_age_sync disabled -log_pending_log_writes disabled +log_pending_log_flushes disabled log_pending_checkpoint_writes disabled log_num_log_io disabled log_waits disabled log_write_requests disabled log_writes disabled +log_padded disabled compress_pages_compressed disabled compress_pages_decompressed disabled compression_pad_increments disabled @@ -223,10 +235,13 @@ innodb_dblwr_pages_written disabled innodb_page_size disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled dml_reads disabled dml_inserts disabled dml_deletes disabled @@ -239,6 +254,8 @@ ddl_background_drop_indexes disabled ddl_background_drop_tables disabled ddl_online_create_index disabled ddl_pending_alter_table disabled +ddl_sort_file_alter_table disabled +ddl_log_file_alter_table disabled icp_attempts disabled icp_no_match disabled icp_out_of_range disabled @@ -280,10 +297,13 @@ lock_row_lock_waits disabled lock_row_lock_time_avg disabled innodb_rwlock_s_spin_waits disabled innodb_rwlock_x_spin_waits disabled +innodb_rwlock_sx_spin_waits disabled innodb_rwlock_s_spin_rounds disabled innodb_rwlock_x_spin_rounds disabled +innodb_rwlock_sx_spin_rounds disabled innodb_rwlock_s_os_waits disabled innodb_rwlock_x_os_waits disabled +innodb_rwlock_sx_os_waits disabled set global innodb_monitor_enable = "%lock*"; ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*' set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%"; @@ -408,7 +428,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 2 NULL 2 enabled metadata_table_handles_closed 1 NULL 1 1 NULL 1 enabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_disable = module_metadata; set global innodb_monitor_reset = module_metadata; select name, max_count, min_count, count, @@ -419,7 +438,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened 2 NULL 2 NULL NULL 0 disabled metadata_table_handles_closed 1 NULL 1 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_reset_all = module_metadata; select name, max_count, min_count, count, max_count_reset, min_count_reset, count_reset, status @@ -429,7 +447,6 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled metadata_table_handles_closed NULL NULL 0 NULL NULL 0 disabled metadata_table_reference_count NULL NULL 0 NULL NULL 0 disabled -metadata_mem_pool_size NULL NULL 0 NULL NULL 0 disabled set global innodb_monitor_enable = module_trx; begin; insert into monitor_test values(9); diff --git a/mysql-test/suite/sys_vars/r/innodb_numa_interleave_basic.result b/mysql-test/suite/sys_vars/r/innodb_numa_interleave_basic.result new file mode 100644 index 00000000000..6f6fb359b3d --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_numa_interleave_basic.result @@ -0,0 +1,10 @@ +SELECT @@GLOBAL.innodb_numa_interleave; +@@GLOBAL.innodb_numa_interleave +1 +SET @@GLOBAL.innodb_numa_interleave=off; +ERROR HY000: Variable 'innodb_numa_interleave' is a read only variable +SELECT @@GLOBAL.innodb_use_native_aio; +@@GLOBAL.innodb_use_native_aio +0 +SELECT @@SESSION.innodb_use_native_aio; +ERROR HY000: Variable 'innodb_use_native_aio' is a GLOBAL variable diff --git a/mysql-test/suite/sys_vars/r/innodb_old_blocks_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_old_blocks_pct_basic.result index bbcc2dabb22..6309ffc8cb0 100644 --- a/mysql-test/suite/sys_vars/r/innodb_old_blocks_pct_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_old_blocks_pct_basic.result @@ -35,12 +35,20 @@ VARIABLE_NAME VARIABLE_VALUE INNODB_OLD_BLOCKS_PCT 10 set session innodb_old_blocks_pct=1; ERROR HY000: Variable 'innodb_old_blocks_pct' is a GLOBAL variable and should be set with SET GLOBAL +set @@global.innodb_old_blocks_pct=DEFAULT; +select @@global.innodb_old_blocks_pct; +@@global.innodb_old_blocks_pct +37 set global innodb_old_blocks_pct=1.1; ERROR 42000: Incorrect argument type to variable 'innodb_old_blocks_pct' set global innodb_old_blocks_pct=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_old_blocks_pct' set global innodb_old_blocks_pct="foo"; ERROR 42000: Incorrect argument type to variable 'innodb_old_blocks_pct' +set global innodb_old_blocks_pct=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_old_blocks_pct' +set global innodb_old_blocks_pct=''; +ERROR 42000: Incorrect argument type to variable 'innodb_old_blocks_pct' set global innodb_old_blocks_pct=4; Warnings: Warning 1292 Truncated incorrect innodb_old_blocks_pct value: '4' diff --git a/mysql-test/suite/sys_vars/r/innodb_page_cleaners_basic.result b/mysql-test/suite/sys_vars/r/innodb_page_cleaners_basic.result new file mode 100644 index 00000000000..5a89af5ca88 --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_page_cleaners_basic.result @@ -0,0 +1,41 @@ +SELECT COUNT(@@GLOBAL.innodb_page_cleaners); +COUNT(@@GLOBAL.innodb_page_cleaners) +1 +1 Expected +SELECT COUNT(@@innodb_page_cleaners); +COUNT(@@innodb_page_cleaners) +1 +1 Expected +SET @@GLOBAL.innodb_page_cleaners=1; +ERROR HY000: Variable 'innodb_page_cleaners' is a read only variable +Expected error 'Read-only variable' +SELECT innodb_page_cleaners = @@SESSION.innodb_page_cleaners; +ERROR 42S22: Unknown column 'innodb_page_cleaners' in 'field list' +Expected error 'Read-only variable' +SELECT @@GLOBAL.innodb_page_cleaners = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_page_cleaners'; +@@GLOBAL.innodb_page_cleaners = VARIABLE_VALUE +1 +1 Expected +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_page_cleaners'; +COUNT(VARIABLE_VALUE) +1 +1 Expected +SELECT @@innodb_page_cleaners = @@GLOBAL.innodb_page_cleaners; +@@innodb_page_cleaners = @@GLOBAL.innodb_page_cleaners +1 +1 Expected +SELECT COUNT(@@local.innodb_page_cleaners); +ERROR HY000: Variable 'innodb_page_cleaners' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@SESSION.innodb_page_cleaners); +ERROR HY000: Variable 'innodb_page_cleaners' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT VARIABLE_NAME, VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME = 'innodb_page_cleaners'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_PAGE_CLEANERS 1 diff --git a/mysql-test/suite/sys_vars/r/innodb_page_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_page_size_basic.result index d9d067c2cf9..f1a90f0d561 100644 --- a/mysql-test/suite/sys_vars/r/innodb_page_size_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_page_size_basic.result @@ -1 +1,8 @@ -XtraDB extension +SET @orig = @@global.innodb_page_size; +SELECT @orig; +@orig +{valid_page_size} +SET GLOBAL innodb_page_size = 4k; +ERROR HY000: Variable 'innodb_page_size' is a read only variable +SET GLOBAL innodb_page_size = 8k; +ERROR HY000: Variable 'innodb_page_size' is a read only variable diff --git a/mysql-test/suite/sys_vars/r/innodb_purge_batch_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_purge_batch_size_basic.result index 8f81df74d5b..6279cd143cf 100644 --- a/mysql-test/suite/sys_vars/r/innodb_purge_batch_size_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_purge_batch_size_basic.result @@ -29,10 +29,12 @@ SET @@global.innodb_purge_batch_size = 5000; SELECT @@global.innodb_purge_batch_size; @@global.innodb_purge_batch_size 5000 -SET @@global.innodb_purge_batch_size = 1000; +SET @@global.innodb_purge_batch_size = 4294967295; +Warnings: +Warning 1292 Truncated incorrect innodb_purge_batch_size value: '4294967295' SELECT @@global.innodb_purge_batch_size; @@global.innodb_purge_batch_size -1000 +5000 '#--------------------FN_DYNVARS_046_04-------------------------#' SET @@global.innodb_purge_batch_size = 0; Warnings: @@ -50,9 +52,24 @@ ERROR 42000: Incorrect argument type to variable 'innodb_purge_batch_size' SELECT @@global.innodb_purge_batch_size; @@global.innodb_purge_batch_size 1 -SET @@global.innodb_purge_batch_size = 5001; +SET @@global.innodb_purge_batch_size = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_batch_size' +SELECT @@global.innodb_purge_batch_size; +@@global.innodb_purge_batch_size +1 +SET @@global.innodb_purge_batch_size = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_batch_size' +SELECT @@global.innodb_purge_batch_size; +@@global.innodb_purge_batch_size +1 +SET @@global.innodb_purge_batch_size = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_batch_size' +SELECT @@global.innodb_purge_batch_size; +@@global.innodb_purge_batch_size +1 +SET @@global.innodb_purge_batch_size = 4294967297; Warnings: -Warning 1292 Truncated incorrect innodb_purge_batch_size value: '5001' +Warning 1292 Truncated incorrect innodb_purge_batch_size value: '4294967297' SELECT @@global.innodb_purge_batch_size; @@global.innodb_purge_batch_size 5000 diff --git a/mysql-test/suite/sys_vars/r/innodb_purge_rseg_truncate_frequency_basic.result b/mysql-test/suite/sys_vars/r/innodb_purge_rseg_truncate_frequency_basic.result new file mode 100644 index 00000000000..79eb0743dfa --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_purge_rseg_truncate_frequency_basic.result @@ -0,0 +1,113 @@ +SET @global_start_value = @@global.innodb_purge_rseg_truncate_frequency; +SELECT @global_start_value; +@global_start_value +128 +'#--------------------FN_DYNVARS_046_01------------------------#' +SET @@global.innodb_purge_rseg_truncate_frequency = 1; +SET @@global.innodb_purge_rseg_truncate_frequency = DEFAULT; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +128 +'#---------------------FN_DYNVARS_046_02-------------------------#' +SET innodb_purge_rseg_truncate_frequency = 1; +ERROR HY000: Variable 'innodb_purge_rseg_truncate_frequency' is a GLOBAL variable and should be set with SET GLOBAL +SELECT @@innodb_purge_rseg_truncate_frequency; +@@innodb_purge_rseg_truncate_frequency +128 +SELECT local.innodb_purge_rseg_truncate_frequency; +ERROR 42S02: Unknown table 'local' in field list +SET global innodb_purge_rseg_truncate_frequency = 1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +'#--------------------FN_DYNVARS_046_03------------------------#' +SET @@global.innodb_purge_rseg_truncate_frequency = 1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = 1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = 128; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +128 +'#--------------------FN_DYNVARS_046_05-------------------------#' +SET @@global.innodb_purge_rseg_truncate_frequency = -1; +Warnings: +Warning 1292 Truncated incorrect innodb_purge_rseg_truncate_frequ value: '-1' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = -1024; +Warnings: +Warning 1292 Truncated incorrect innodb_purge_rseg_truncate_frequ value: '-1024' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = "T"; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_rseg_truncate_frequency' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = "Y"; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_rseg_truncate_frequency' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_rseg_truncate_frequency' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_rseg_truncate_frequency' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_rseg_truncate_frequency' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +'#----------------------FN_DYNVARS_046_06------------------------#' +SELECT @@global.innodb_purge_rseg_truncate_frequency = +VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_purge_rseg_truncate_frequency'; +@@global.innodb_purge_rseg_truncate_frequency = +VARIABLE_VALUE +1 +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_purge_rseg_truncate_frequency'; +VARIABLE_VALUE +1 +'#---------------------FN_DYNVARS_046_07-------------------------#' +SET @@global.innodb_purge_rseg_truncate_frequency = OFF; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_rseg_truncate_frequency' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = ON; +ERROR 42000: Incorrect argument type to variable 'innodb_purge_rseg_truncate_frequency' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +'#---------------------FN_DYNVARS_046_08----------------------#' +SET @@global.innodb_purge_rseg_truncate_frequency = TRUE; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = FALSE; +Warnings: +Warning 1292 Truncated incorrect innodb_purge_rseg_truncate_frequ value: '0' +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +1 +SET @@global.innodb_purge_rseg_truncate_frequency = @global_start_value; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +@@global.innodb_purge_rseg_truncate_frequency +128 diff --git a/mysql-test/suite/sys_vars/r/innodb_purge_threads_basic.result b/mysql-test/suite/sys_vars/r/innodb_purge_threads_basic.result index e3358a14ea2..2cb697acb6d 100644 --- a/mysql-test/suite/sys_vars/r/innodb_purge_threads_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_purge_threads_basic.result @@ -1,53 +1,41 @@ -'#---------------------BS_STVARS_035_01----------------------#' SELECT COUNT(@@GLOBAL.innodb_purge_threads); COUNT(@@GLOBAL.innodb_purge_threads) 1 1 Expected -'#---------------------BS_STVARS_035_02----------------------#' -SET @@GLOBAL.innodb_purge_threads=1; -ERROR HY000: Variable 'innodb_purge_threads' is a read only variable -Expected error 'Read only variable' -SELECT COUNT(@@GLOBAL.innodb_purge_threads); -COUNT(@@GLOBAL.innodb_purge_threads) +SELECT COUNT(@@innodb_purge_threads); +COUNT(@@innodb_purge_threads) 1 1 Expected -'#---------------------BS_STVARS_035_03----------------------#' +SET @@GLOBAL.innodb_purge_threads=1; +ERROR HY000: Variable 'innodb_purge_threads' is a read only variable +Expected error 'Read-only variable' +SELECT innodb_purge_threads = @@SESSION.innodb_purge_threads; +ERROR 42S22: Unknown column 'innodb_purge_threads' in 'field list' +Expected error 'Read-only variable' SELECT @@GLOBAL.innodb_purge_threads = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_purge_threads'; @@GLOBAL.innodb_purge_threads = VARIABLE_VALUE 1 1 Expected -SELECT COUNT(@@GLOBAL.innodb_purge_threads); -COUNT(@@GLOBAL.innodb_purge_threads) -1 -1 Expected SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_purge_threads'; COUNT(VARIABLE_VALUE) 1 1 Expected -'#---------------------BS_STVARS_035_04----------------------#' SELECT @@innodb_purge_threads = @@GLOBAL.innodb_purge_threads; @@innodb_purge_threads = @@GLOBAL.innodb_purge_threads 1 1 Expected -'#---------------------BS_STVARS_035_05----------------------#' -SELECT COUNT(@@innodb_purge_threads); -COUNT(@@innodb_purge_threads) -1 -1 Expected SELECT COUNT(@@local.innodb_purge_threads); ERROR HY000: Variable 'innodb_purge_threads' is a GLOBAL variable Expected error 'Variable is a GLOBAL variable' SELECT COUNT(@@SESSION.innodb_purge_threads); ERROR HY000: Variable 'innodb_purge_threads' is a GLOBAL variable Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@GLOBAL.innodb_purge_threads); -COUNT(@@GLOBAL.innodb_purge_threads) -1 -1 Expected -SELECT innodb_purge_threads = @@SESSION.innodb_purge_threads; -ERROR 42S22: Unknown column 'innodb_purge_threads' in 'field list' -Expected error 'Readonly variable' +SELECT VARIABLE_NAME, VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME = 'innodb_purge_threads'; +VARIABLE_NAME VARIABLE_VALUE +INNODB_PURGE_THREADS 4 diff --git a/mysql-test/suite/sys_vars/r/innodb_read_ahead_threshold_basic.result b/mysql-test/suite/sys_vars/r/innodb_read_ahead_threshold_basic.result index 65a1a8e319f..8ca5862ac09 100644 --- a/mysql-test/suite/sys_vars/r/innodb_read_ahead_threshold_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_read_ahead_threshold_basic.result @@ -35,12 +35,26 @@ VARIABLE_NAME VARIABLE_VALUE INNODB_READ_AHEAD_THRESHOLD 10 set session innodb_read_ahead_threshold=1; ERROR HY000: Variable 'innodb_read_ahead_threshold' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_read_ahead_threshold=DEFAULT; +select @@global.innodb_read_ahead_threshold; +@@global.innodb_read_ahead_threshold +56 set global innodb_read_ahead_threshold=1.1; ERROR 42000: Incorrect argument type to variable 'innodb_read_ahead_threshold' set global innodb_read_ahead_threshold=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_read_ahead_threshold' set global innodb_read_ahead_threshold="foo"; ERROR 42000: Incorrect argument type to variable 'innodb_read_ahead_threshold' +set global innodb_read_ahead_threshold=' '; +ERROR 42000: Incorrect argument type to variable 'innodb_read_ahead_threshold' +select @@global.innodb_read_ahead_threshold; +@@global.innodb_read_ahead_threshold +56 +set global innodb_read_ahead_threshold=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_read_ahead_threshold' +select @@global.innodb_read_ahead_threshold; +@@global.innodb_read_ahead_threshold +56 set global innodb_read_ahead_threshold=-7; Warnings: Warning 1292 Truncated incorrect innodb_read_ahead_threshold value: '-7' diff --git a/mysql-test/suite/sys_vars/r/innodb_replication_delay_basic.result b/mysql-test/suite/sys_vars/r/innodb_replication_delay_basic.result index fa00baa218e..5e0fb425f6b 100644 --- a/mysql-test/suite/sys_vars/r/innodb_replication_delay_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_replication_delay_basic.result @@ -35,21 +35,65 @@ VARIABLE_NAME VARIABLE_VALUE INNODB_REPLICATION_DELAY 10 set session innodb_replication_delay=1; ERROR HY000: Variable 'innodb_replication_delay' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_replication_delay=DEFAULT; +select @@global.innodb_replication_delay; +@@global.innodb_replication_delay +0 +set global innodb_replication_delay=0; +select @@global.innodb_replication_delay; +@@global.innodb_replication_delay +0 +set global innodb_replication_delay=65535; +select @@global.innodb_replication_delay; +@@global.innodb_replication_delay +65535 +set global innodb_replication_delay=4294967295; +select @@global.innodb_replication_delay; +@@global.innodb_replication_delay +4294967295 set global innodb_replication_delay=1.1; ERROR 42000: Incorrect argument type to variable 'innodb_replication_delay' set global innodb_replication_delay=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_replication_delay' set global innodb_replication_delay="foo"; ERROR 42000: Incorrect argument type to variable 'innodb_replication_delay' +set global innodb_replication_delay=' '; +ERROR 42000: Incorrect argument type to variable 'innodb_replication_delay' +select @@global.innodb_replication_delay; +@@global.innodb_replication_delay +4294967295 +set global innodb_replication_delay=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_replication_delay' +select @@global.innodb_replication_delay; +@@global.innodb_replication_delay +4294967295 set global innodb_replication_delay=-7; Warnings: Warning 1292 Truncated incorrect innodb_replication_delay value: '-7' select @@global.innodb_replication_delay; @@global.innodb_replication_delay 0 +set global innodb_replication_delay=-1024; +Warnings: +Warning 1292 Truncated incorrect innodb_replication_delay value: '-1024' +select @@global.innodb_replication_delay; +@@global.innodb_replication_delay +0 select * from information_schema.global_variables where variable_name='innodb_replication_delay'; VARIABLE_NAME VARIABLE_VALUE INNODB_REPLICATION_DELAY 0 +SET @@global.innodb_replication_delay = 4294967296; +SELECT @@global.innodb_replication_delay IN (4294967296,4294967295); +@@global.innodb_replication_delay IN (4294967296,4294967295) +1 +SET @@global.innodb_replication_delay = 12345678901; +SELECT @@global.innodb_replication_delay IN (12345678901,4294967295); +@@global.innodb_replication_delay IN (12345678901,4294967295) +1 +SET @@global.innodb_replication_delay = 18446744073709551615; +SELECT @@global.innodb_replication_delay IN (18446744073709551615,4294967295); +@@global.innodb_replication_delay IN (18446744073709551615,4294967295) +1 SET @@global.innodb_replication_delay = @start_global_value; SELECT @@global.innodb_replication_delay; @@global.innodb_replication_delay diff --git a/mysql-test/suite/sys_vars/r/innodb_spin_wait_delay_basic.result b/mysql-test/suite/sys_vars/r/innodb_spin_wait_delay_basic.result index 05672cbb966..621ef56f61f 100644 --- a/mysql-test/suite/sys_vars/r/innodb_spin_wait_delay_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_spin_wait_delay_basic.result @@ -35,21 +35,65 @@ VARIABLE_NAME VARIABLE_VALUE INNODB_SPIN_WAIT_DELAY 10 set session innodb_spin_wait_delay=1; ERROR HY000: Variable 'innodb_spin_wait_delay' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_spin_wait_delay=DEFAULT; +select @@global.innodb_spin_wait_delay; +@@global.innodb_spin_wait_delay +6 +set global innodb_spin_wait_delay=0; +select @@global.innodb_spin_wait_delay; +@@global.innodb_spin_wait_delay +0 +set global innodb_spin_wait_delay=65535; +select @@global.innodb_spin_wait_delay; +@@global.innodb_spin_wait_delay +65535 +set global innodb_spin_wait_delay=4294967295; +select @@global.innodb_spin_wait_delay; +@@global.innodb_spin_wait_delay +4294967295 set global innodb_spin_wait_delay=1.1; ERROR 42000: Incorrect argument type to variable 'innodb_spin_wait_delay' set global innodb_spin_wait_delay=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_spin_wait_delay' set global innodb_spin_wait_delay="foo"; ERROR 42000: Incorrect argument type to variable 'innodb_spin_wait_delay' +set global innodb_spin_wait_delay=' '; +ERROR 42000: Incorrect argument type to variable 'innodb_spin_wait_delay' +select @@global.innodb_spin_wait_delay; +@@global.innodb_spin_wait_delay +4294967295 +set global innodb_spin_wait_delay=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_spin_wait_delay' +select @@global.innodb_spin_wait_delay; +@@global.innodb_spin_wait_delay +4294967295 set global innodb_spin_wait_delay=-7; Warnings: Warning 1292 Truncated incorrect innodb_spin_wait_delay value: '-7' select @@global.innodb_spin_wait_delay; @@global.innodb_spin_wait_delay 0 +set global innodb_spin_wait_delay=-1024; +Warnings: +Warning 1292 Truncated incorrect innodb_spin_wait_delay value: '-1024' +select @@global.innodb_spin_wait_delay; +@@global.innodb_spin_wait_delay +0 select * from information_schema.global_variables where variable_name='innodb_spin_wait_delay'; VARIABLE_NAME VARIABLE_VALUE INNODB_SPIN_WAIT_DELAY 0 +SET @@global.innodb_spin_wait_delay = 4294967296; +SELECT @@global.innodb_spin_wait_delay IN (4294967296,4294967295); +@@global.innodb_spin_wait_delay IN (4294967296,4294967295) +1 +SET @@global.innodb_spin_wait_delay = 12345678901; +SELECT @@global.innodb_spin_wait_delay IN (12345678901,4294967295); +@@global.innodb_spin_wait_delay IN (12345678901,4294967295) +1 +SET @@global.innodb_spin_wait_delay = 18446744073709551615; +SELECT @@global.innodb_spin_wait_delay IN (18446744073709551615,4294967295); +@@global.innodb_spin_wait_delay IN (18446744073709551615,4294967295) +1 SET @@global.innodb_spin_wait_delay = @start_global_value; SELECT @@global.innodb_spin_wait_delay; @@global.innodb_spin_wait_delay diff --git a/mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result b/mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result index 1cbdd16afdf..94de032a0fd 100644 --- a/mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result @@ -21,4 +21,4 @@ SET GLOBAL innodb_stats_persistent=123; ERROR 42000: Variable 'innodb_stats_persistent' can't be set to the value of '123' SET GLOBAL innodb_stats_persistent='foo'; ERROR 42000: Variable 'innodb_stats_persistent' can't be set to the value of 'foo' -SET GLOBAL innodb_stats_persistent=off; +SET GLOBAL innodb_stats_persistent=OFF; diff --git a/mysql-test/suite/sys_vars/r/innodb_stats_persistent_sample_pages_basic.result b/mysql-test/suite/sys_vars/r/innodb_stats_persistent_sample_pages_basic.result index d2e848621dd..ec211b693df 100644 --- a/mysql-test/suite/sys_vars/r/innodb_stats_persistent_sample_pages_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_stats_persistent_sample_pages_basic.result @@ -29,7 +29,7 @@ SET global innodb_stats_persistent_sample_pages=10; SELECT @@global.innodb_stats_persistent_sample_pages; @@global.innodb_stats_persistent_sample_pages 10 -SELECT * FROM information_schema.global_variables +SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_persistent_sample_pages'; VARIABLE_NAME VARIABLE_VALUE INNODB_STATS_PERSISTENT_SAMPLE_PAGES 10 @@ -39,12 +39,36 @@ VARIABLE_NAME VARIABLE_VALUE INNODB_STATS_PERSISTENT_SAMPLE_PAGES 10 SET session innodb_stats_persistent_sample_pages=1; ERROR HY000: Variable 'innodb_stats_persistent_sample_pages' is a GLOBAL variable and should be set with SET GLOBAL +set global innodb_stats_persistent_sample_pages=DEFAULT; +select @@global.innodb_stats_persistent_sample_pages; +@@global.innodb_stats_persistent_sample_pages +20 +SET global innodb_stats_persistent_sample_pages=0; +Warnings: +Warning 1292 Truncated incorrect innodb_stats_persistent_sample_p value: '0' +SELECT @@global.innodb_stats_persistent_sample_pages; +@@global.innodb_stats_persistent_sample_pages +1 +SET global innodb_stats_persistent_sample_pages=10; +SELECT @@global.innodb_stats_persistent_sample_pages; +@@global.innodb_stats_persistent_sample_pages +10 SET global innodb_stats_persistent_sample_pages=1.1; ERROR 42000: Incorrect argument type to variable 'innodb_stats_persistent_sample_pages' SET global innodb_stats_persistent_sample_pages=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_stats_persistent_sample_pages' SET global innodb_stats_persistent_sample_pages="foo"; ERROR 42000: Incorrect argument type to variable 'innodb_stats_persistent_sample_pages' +SET global innodb_stats_persistent_sample_pages=' '; +ERROR 42000: Incorrect argument type to variable 'innodb_stats_persistent_sample_pages' +SELECT @@global.innodb_stats_persistent_sample_pages; +@@global.innodb_stats_persistent_sample_pages +10 +SET global innodb_stats_persistent_sample_pages=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_stats_persistent_sample_pages' +SELECT @@global.innodb_stats_persistent_sample_pages; +@@global.innodb_stats_persistent_sample_pages +10 SET global innodb_stats_persistent_sample_pages=-7; Warnings: Warning 1292 Truncated incorrect innodb_stats_persistent_sample_p value: '-7' diff --git a/mysql-test/suite/sys_vars/r/innodb_stats_sample_pages_basic.result b/mysql-test/suite/sys_vars/r/innodb_stats_sample_pages_basic.result index e490773b63a..8618d602922 100644 --- a/mysql-test/suite/sys_vars/r/innodb_stats_sample_pages_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_stats_sample_pages_basic.result @@ -37,12 +37,28 @@ VARIABLE_NAME VARIABLE_VALUE INNODB_STATS_SAMPLE_PAGES 10 set session innodb_stats_sample_pages=1; ERROR HY000: Variable 'innodb_stats_sample_pages' is a GLOBAL variable and should be set with SET GLOBAL -set global innodb_stats_sample_pages=1.1; +set global innodb_stats_sample_pages=DEFAULT; +Warnings: +Warning 131 Using innodb_stats_sample_pages is deprecated and the variable may be removed in future releases. Please use innodb_stats_transient_sample_pages instead. +select @@global.innodb_stats_sample_pages; +@@global.innodb_stats_sample_pages +8 +set global innodb_stats_sample_pages = 1.1; +ERROR 42000: Incorrect argument type to variable 'innodb_stats_sample_pages' +set global innodb_stats_sample_pages = 1e1; ERROR 42000: Incorrect argument type to variable 'innodb_stats_sample_pages' -set global innodb_stats_sample_pages=1e1; +set global innodb_stats_sample_pages = "foo"; ERROR 42000: Incorrect argument type to variable 'innodb_stats_sample_pages' -set global innodb_stats_sample_pages="foo"; +set global innodb_stats_sample_pages=' '; ERROR 42000: Incorrect argument type to variable 'innodb_stats_sample_pages' +select @@global.innodb_stats_sample_pages; +@@global.innodb_stats_sample_pages +8 +set global innodb_stats_sample_pages=" "; +ERROR 42000: Incorrect argument type to variable 'innodb_stats_sample_pages' +select @@global.innodb_stats_sample_pages; +@@global.innodb_stats_sample_pages +8 set global innodb_stats_sample_pages=-7; Warnings: Warning 1292 Truncated incorrect innodb_stats_sample_pages value: '-7' diff --git a/mysql-test/suite/sys_vars/r/innodb_stats_transient_sample_pages_basic.result b/mysql-test/suite/sys_vars/r/innodb_stats_transient_sample_pages_basic.result index 4c60dd5a697..1ea5ac3d3bc 100644 --- a/mysql-test/suite/sys_vars/r/innodb_stats_transient_sample_pages_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_stats_transient_sample_pages_basic.result @@ -25,6 +25,10 @@ SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_transient_sample_pages'; VARIABLE_NAME VARIABLE_VALUE INNODB_STATS_TRANSIENT_SAMPLE_PAGES 8 +set global innodb_stats_transient_sample_pages=DEFAULT; +select @@global.innodb_stats_transient_sample_pages; +@@global.innodb_stats_transient_sample_pages +8 SET global innodb_stats_transient_sample_pages=10; SELECT @@global.innodb_stats_transient_sample_pages; @@global.innodb_stats_transient_sample_pages @@ -45,6 +49,8 @@ SET global innodb_stats_transient_sample_pages=1e1; ERROR 42000: Incorrect argument type to variable 'innodb_stats_transient_sample_pages' SET global innodb_stats_transient_sample_pages="foo"; ERROR 42000: Incorrect argument type to variable 'innodb_stats_transient_sample_pages' +SET global innodb_stats_transient_sample_pages=' '; +ERROR 42000: Incorrect argument type to variable 'innodb_stats_transient_sample_pages' SET global innodb_stats_transient_sample_pages=-7; Warnings: Warning 1292 Truncated incorrect innodb_stats_transient_sample_pa value: '-7' diff --git a/mysql-test/suite/sys_vars/r/innodb_strict_mode_basic.result b/mysql-test/suite/sys_vars/r/innodb_strict_mode_basic.result index 9c5e62d2684..8bddb6a1694 100644 --- a/mysql-test/suite/sys_vars/r/innodb_strict_mode_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_strict_mode_basic.result @@ -1,32 +1,32 @@ SET @start_global_value = @@global.innodb_strict_mode; SELECT @start_global_value; @start_global_value -0 +1 Valid values are 'ON' and 'OFF' select @@global.innodb_strict_mode in (0, 1); @@global.innodb_strict_mode in (0, 1) 1 select @@global.innodb_strict_mode; @@global.innodb_strict_mode -0 +1 select @@session.innodb_strict_mode in (0, 1); @@session.innodb_strict_mode in (0, 1) 1 select @@session.innodb_strict_mode; @@session.innodb_strict_mode -0 +1 show global variables like 'innodb_strict_mode'; Variable_name Value -innodb_strict_mode OFF +innodb_strict_mode ON show session variables like 'innodb_strict_mode'; Variable_name Value -innodb_strict_mode OFF +innodb_strict_mode ON select * from information_schema.global_variables where variable_name='innodb_strict_mode'; VARIABLE_NAME VARIABLE_VALUE -INNODB_STRICT_MODE OFF +INNODB_STRICT_MODE ON select * from information_schema.session_variables where variable_name='innodb_strict_mode'; VARIABLE_NAME VARIABLE_VALUE -INNODB_STRICT_MODE OFF +INNODB_STRICT_MODE ON set global innodb_strict_mode='OFF'; set session innodb_strict_mode='OFF'; select @@global.innodb_strict_mode; @@ -118,4 +118,4 @@ INNODB_STRICT_MODE ON SET @@global.innodb_strict_mode = @start_global_value; SELECT @@global.innodb_strict_mode; @@global.innodb_strict_mode -0 +1 diff --git a/mysql-test/suite/sys_vars/r/innodb_sync_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_sync_debug_basic.result new file mode 100644 index 00000000000..72420c8595b --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_sync_debug_basic.result @@ -0,0 +1,11 @@ +# +# Basic test for innodb_sync_debug +# +SELECT @@global.innodb_sync_debug; +@@global.innodb_sync_debug +0 +set global innodb_sync_debug = 1; +ERROR HY000: Variable 'innodb_sync_debug' is a read only variable +SELECT @@global.innodb_sync_debug; +@@global.innodb_sync_debug +0 diff --git a/mysql-test/suite/sys_vars/r/innodb_sync_spin_loops_basic.result b/mysql-test/suite/sys_vars/r/innodb_sync_spin_loops_basic.result index ba45d4f2ed0..3377b690e49 100644 --- a/mysql-test/suite/sys_vars/r/innodb_sync_spin_loops_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_sync_spin_loops_basic.result @@ -25,15 +25,28 @@ SET @@global.innodb_sync_spin_loops = 0; SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops 0 -SET @@global.innodb_sync_spin_loops = 1; +SET @@global.innodb_sync_spin_loops = 65535; SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops -1 -SET @@global.innodb_sync_spin_loops = 1000; +65535 +SET @@global.innodb_sync_spin_loops = 4294967295; SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops -1000 +4294967295 '#--------------------FN_DYNVARS_046_04-------------------------#' +SET @@global.innodb_sync_spin_loops = 4294967296; +SELECT @@global.innodb_sync_spin_loops IN (4294967296,4294967295); +@@global.innodb_sync_spin_loops IN (4294967296,4294967295) +1 +SET @@global.innodb_sync_spin_loops = 12345678901; +SELECT @@global.innodb_sync_spin_loops IN (12345678901,4294967295); +@@global.innodb_sync_spin_loops IN (12345678901,4294967295) +1 +SET @@global.innodb_sync_spin_loops = 18446744073709551615; +SELECT @@global.innodb_sync_spin_loops IN (18446744073709551615,4294967295); +@@global.innodb_sync_spin_loops IN (18446744073709551615,4294967295) +1 +'#--------------------FN_DYNVARS_046_05-------------------------#' SET @@global.innodb_sync_spin_loops = -1; Warnings: Warning 1292 Truncated incorrect innodb_sync_spin_loops value: '-1' @@ -50,11 +63,28 @@ ERROR 42000: Incorrect argument type to variable 'innodb_sync_spin_loops' SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops 0 -SET @@global.innodb_sync_spin_loops = 1001; +SET @@global.innodb_sync_spin_loops = 65535.01; +ERROR 42000: Incorrect argument type to variable 'innodb_sync_spin_loops' +SELECT @@global.innodb_sync_spin_loops; +@@global.innodb_sync_spin_loops +0 +SET @@global.innodb_sync_spin_loops = -1024; +Warnings: +Warning 1292 Truncated incorrect innodb_sync_spin_loops value: '-1024' +SELECT @@global.innodb_sync_spin_loops; +@@global.innodb_sync_spin_loops +0 +SET @@global.innodb_sync_spin_loops = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_sync_spin_loops' +SELECT @@global.innodb_sync_spin_loops; +@@global.innodb_sync_spin_loops +0 +SET @@global.innodb_sync_spin_loops = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_sync_spin_loops' SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops -1001 -'#----------------------FN_DYNVARS_046_05------------------------#' +0 +'#----------------------FN_DYNVARS_046_06------------------------#' SELECT @@global.innodb_sync_spin_loops = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_sync_spin_loops'; @@ -63,23 +93,23 @@ VARIABLE_VALUE 1 SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops -1001 +0 SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_sync_spin_loops'; VARIABLE_VALUE -1001 -'#---------------------FN_DYNVARS_046_06-------------------------#' +0 +'#---------------------FN_DYNVARS_046_07-------------------------#' SET @@global.innodb_sync_spin_loops = OFF; ERROR 42000: Incorrect argument type to variable 'innodb_sync_spin_loops' SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops -1001 +0 SET @@global.innodb_sync_spin_loops = ON; ERROR 42000: Incorrect argument type to variable 'innodb_sync_spin_loops' SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops -1001 -'#---------------------FN_DYNVARS_046_07----------------------#' +0 +'#---------------------FN_DYNVARS_046_08----------------------#' SET @@global.innodb_sync_spin_loops = TRUE; SELECT @@global.innodb_sync_spin_loops; @@global.innodb_sync_spin_loops diff --git a/mysql-test/suite/sys_vars/r/innodb_table_locks_func.result b/mysql-test/suite/sys_vars/r/innodb_table_locks_func.result index 8e7806ad7e0..0f9e1e8ccf0 100644 --- a/mysql-test/suite/sys_vars/r/innodb_table_locks_func.result +++ b/mysql-test/suite/sys_vars/r/innodb_table_locks_func.result @@ -4,7 +4,9 @@ SELECT @start_value; @start_value 1 SET @@global.innodb_table_locks = OFF; +'connect (con1,localhost,root,,,,)' connect con1,localhost,root,,,,; +'connection con1' connection con1; SELECT @@global.innodb_table_locks; @@global.innodb_table_locks @@ -15,7 +17,9 @@ SELECT @@session.innodb_table_locks; disconnect con1; '#--------------------FN_DYNVARS_048_02-------------------------#' '----check when innodb_table_locks = ON and autocommit = OFF---' +'connect (con2,localhost,root,,,,)' connect con2,localhost,root,,,,; +'connection default' connection default; DROP TABLE IF EXISTS t1; CREATE TABLE t1 (a INT) ENGINE=INNODB; @@ -26,12 +30,15 @@ INSERT INTO t1 VALUES(1); SELECT * FROM t1 FOR UPDATE; a 1 +'CONNECTION con2' connection con2; SET @@innodb_table_locks = ON; SET @@autocommit = OFF; LOCK TABLES t1 WRITE; +'CONNECTION default' connection default; COMMIT; +'CONNECTION con2' connection con2; UNLOCK tables; DROP TABLE t1; diff --git a/mysql-test/suite/sys_vars/r/innodb_temp_data_file_path_basic.result b/mysql-test/suite/sys_vars/r/innodb_temp_data_file_path_basic.result new file mode 100644 index 00000000000..2357a07e3ab --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_temp_data_file_path_basic.result @@ -0,0 +1,53 @@ +'#---------------------BS_STVARS_024_01----------------------#' +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +COUNT(@@GLOBAL.innodb_temp_data_file_path) +1 +1 Expected +'#---------------------BS_STVARS_024_02----------------------#' +SET @@GLOBAL.innodb_temp_data_file_path=1; +ERROR HY000: Variable 'innodb_temp_data_file_path' is a read only variable +Expected error 'Read only variable' +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +COUNT(@@GLOBAL.innodb_temp_data_file_path) +1 +1 Expected +'#---------------------BS_STVARS_024_03----------------------#' +SELECT @@GLOBAL.innodb_temp_data_file_path = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_temp_data_file_path'; +@@GLOBAL.innodb_temp_data_file_path = VARIABLE_VALUE +1 +1 Expected +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +COUNT(@@GLOBAL.innodb_temp_data_file_path) +1 +1 Expected +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_temp_data_file_path'; +COUNT(VARIABLE_VALUE) +1 +1 Expected +'#---------------------BS_STVARS_024_04----------------------#' +SELECT @@innodb_temp_data_file_path = @@GLOBAL.innodb_temp_data_file_path; +@@innodb_temp_data_file_path = @@GLOBAL.innodb_temp_data_file_path +1 +1 Expected +'#---------------------BS_STVARS_024_05----------------------#' +SELECT COUNT(@@innodb_temp_data_file_path); +COUNT(@@innodb_temp_data_file_path) +1 +1 Expected +SELECT COUNT(@@local.innodb_temp_data_file_path); +ERROR HY000: Variable 'innodb_temp_data_file_path' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@SESSION.innodb_temp_data_file_path); +ERROR HY000: Variable 'innodb_temp_data_file_path' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +COUNT(@@GLOBAL.innodb_temp_data_file_path) +1 +1 Expected +SELECT innodb_temp_data_file_path = @@SESSION.innodb_temp_data_file_path; +ERROR 42S22: Unknown column 'innodb_temp_data_file_path' in 'field list' +Expected error 'Readonly variable' diff --git a/mysql-test/suite/sys_vars/r/innodb_thread_concurrency_basic.result b/mysql-test/suite/sys_vars/r/innodb_thread_concurrency_basic.result index c7af96bb22a..ca3c253604a 100644 --- a/mysql-test/suite/sys_vars/r/innodb_thread_concurrency_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_thread_concurrency_basic.result @@ -50,12 +50,27 @@ ERROR 42000: Incorrect argument type to variable 'innodb_thread_concurrency' SELECT @@global.innodb_thread_concurrency; @@global.innodb_thread_concurrency 0 +SET @@global.innodb_thread_concurrency = ' '; +ERROR 42000: Incorrect argument type to variable 'innodb_thread_concurrency' +SELECT @@global.innodb_thread_concurrency; +@@global.innodb_thread_concurrency +0 +SET @@global.innodb_thread_concurrency = " "; +ERROR 42000: Incorrect argument type to variable 'innodb_thread_concurrency' +SELECT @@global.innodb_thread_concurrency; +@@global.innodb_thread_concurrency +0 SET @@global.innodb_thread_concurrency = 1001; Warnings: Warning 1292 Truncated incorrect innodb_thread_concurrency value: '1001' SELECT @@global.innodb_thread_concurrency; @@global.innodb_thread_concurrency 1000 +SET @@global.innodb_thread_concurrency = 255.01; +ERROR 42000: Incorrect argument type to variable 'innodb_thread_concurrency' +SELECT @@global.innodb_thread_concurrency; +@@global.innodb_thread_concurrency +1000 '#----------------------FN_DYNVARS_046_05------------------------#' SELECT @@global.innodb_thread_concurrency = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES diff --git a/mysql-test/suite/sys_vars/r/innodb_undo_directory_basic.result b/mysql-test/suite/sys_vars/r/innodb_undo_directory_basic.result index e7d7cef67c7..fbafe653d29 100644 --- a/mysql-test/suite/sys_vars/r/innodb_undo_directory_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_undo_directory_basic.result @@ -1,7 +1,7 @@ -SELECT @@GLOBAL.innodb_undo_directory; -@@GLOBAL.innodb_undo_directory -. -. Expected +SELECT COUNT(@@GLOBAL.innodb_undo_directory); +COUNT(@@GLOBAL.innodb_undo_directory) +1 +1 Expected SET @@GLOBAL.innodb_undo_directory="/tmp"; ERROR HY000: Variable 'innodb_undo_directory' is a read only variable Expected error 'Read only variable' @@ -9,12 +9,12 @@ SELECT COUNT(@@GLOBAL.innodb_undo_directory); COUNT(@@GLOBAL.innodb_undo_directory) 1 1 Expected -SELECT VARIABLE_VALUE +SELECT @@GLOBAL.innodb_undo_directory = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_undo_directory'; -VARIABLE_VALUE -. -. Expected +@@GLOBAL.innodb_undo_directory = VARIABLE_VALUE +1 +1 Expected SELECT COUNT(@@GLOBAL.innodb_undo_directory); COUNT(@@GLOBAL.innodb_undo_directory) 1 diff --git a/mysql-test/suite/sys_vars/r/innodb_undo_log_truncate_basic.result b/mysql-test/suite/sys_vars/r/innodb_undo_log_truncate_basic.result new file mode 100644 index 00000000000..eb42f4965ea --- /dev/null +++ b/mysql-test/suite/sys_vars/r/innodb_undo_log_truncate_basic.result @@ -0,0 +1,69 @@ +SET @start_global_value = @@global.innodb_undo_log_truncate; +SELECT @start_global_value; +@start_global_value +0 +'#---------------------BS_STVARS_028_01----------------------#' +SELECT COUNT(@@GLOBAL.innodb_undo_log_truncate); +COUNT(@@GLOBAL.innodb_undo_log_truncate) +1 +1 Expected +'#---------------------BS_STVARS_028_02----------------------#' +SET @@global.innodb_undo_log_truncate = 0; +SELECT @@global.innodb_undo_log_truncate; +@@global.innodb_undo_log_truncate +0 +SET @@global.innodb_undo_log_truncate ='On' ; +SELECT @@global.innodb_undo_log_truncate; +@@global.innodb_undo_log_truncate +1 +SET @@global.innodb_undo_log_truncate ='Off' ; +SELECT @@global.innodb_undo_log_truncate; +@@global.innodb_undo_log_truncate +0 +SET @@global.innodb_undo_log_truncate = 1; +SELECT @@global.innodb_undo_log_truncate; +@@global.innodb_undo_log_truncate +1 +'#---------------------BS_STVARS_028_03----------------------#' +SELECT IF(@@GLOBAL.innodb_undo_log_truncate,'ON','OFF') = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_undo_log_truncate'; +IF(@@GLOBAL.innodb_undo_log_truncate,'ON','OFF') = VARIABLE_VALUE +1 +1 Expected +SELECT COUNT(@@GLOBAL.innodb_undo_log_truncate); +COUNT(@@GLOBAL.innodb_undo_log_truncate) +1 +1 Expected +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_undo_log_truncate'; +COUNT(VARIABLE_VALUE) +1 +1 Expected +'#---------------------BS_STVARS_028_04----------------------#' +SELECT @@innodb_undo_log_truncate = @@GLOBAL.innodb_undo_log_truncate; +@@innodb_undo_log_truncate = @@GLOBAL.innodb_undo_log_truncate +1 +1 Expected +'#---------------------BS_STVARS_028_05----------------------#' +SELECT COUNT(@@innodb_undo_log_truncate); +COUNT(@@innodb_undo_log_truncate) +1 +1 Expected +SELECT COUNT(@@local.innodb_undo_log_truncate); +ERROR HY000: Variable 'innodb_undo_log_truncate' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@SESSION.innodb_undo_log_truncate); +ERROR HY000: Variable 'innodb_undo_log_truncate' is a GLOBAL variable +Expected error 'Variable is a GLOBAL variable' +SELECT COUNT(@@GLOBAL.innodb_undo_log_truncate); +COUNT(@@GLOBAL.innodb_undo_log_truncate) +1 +1 Expected +SELECT innodb_undo_log_truncate = @@SESSION.innodb_undo_log_truncate; +ERROR 42S22: Unknown column 'innodb_undo_log_truncate' in 'field list' +SET @@global.innodb_undo_log_truncate = @start_global_value; +SELECT @@global.innodb_undo_log_truncate; +@@global.innodb_undo_log_truncate +0 diff --git a/mysql-test/suite/sys_vars/r/innodb_undo_tablespaces_basic.result b/mysql-test/suite/sys_vars/r/innodb_undo_tablespaces_basic.result index 6130484ad86..c7e0b21a12b 100644 --- a/mysql-test/suite/sys_vars/r/innodb_undo_tablespaces_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_undo_tablespaces_basic.result @@ -1,7 +1,6 @@ -SELECT @@GLOBAL.innodb_undo_tablespaces; -@@GLOBAL.innodb_undo_tablespaces -0 -0 Expected +SELECT @@GLOBAL.innodb_undo_tablespaces >= 0; +@@GLOBAL.innodb_undo_tablespaces >= 0 +1 SET @@GLOBAL.innodb_undo_tablespaces=128; ERROR HY000: Variable 'innodb_undo_tablespaces' is a read only variable Expected error 'Read only variable' @@ -9,10 +8,7 @@ SELECT COUNT(@@GLOBAL.innodb_undo_tablespaces); COUNT(@@GLOBAL.innodb_undo_tablespaces) 1 1 Expected -SELECT VARIABLE_VALUE -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_undo_tablespaces'; -VARIABLE_VALUE +DIFFERENCE 0 0 Expected SELECT @@innodb_undo_tablespaces = @@GLOBAL.innodb_undo_tablespaces; diff --git a/mysql-test/suite/sys_vars/r/innodb_use_sys_malloc_basic.result b/mysql-test/suite/sys_vars/r/innodb_use_sys_malloc_basic.result deleted file mode 100644 index 70ecef72042..00000000000 --- a/mysql-test/suite/sys_vars/r/innodb_use_sys_malloc_basic.result +++ /dev/null @@ -1,22 +0,0 @@ -Valid values are 'ON' and 'OFF' -select @@global.innodb_use_sys_malloc; -@@global.innodb_use_sys_malloc -1 -select @@session.innodb_use_sys_malloc; -ERROR HY000: Variable 'innodb_use_sys_malloc' is a GLOBAL variable -show global variables like 'innodb_use_sys_malloc'; -Variable_name Value -innodb_use_sys_malloc ON -show session variables like 'innodb_use_sys_malloc'; -Variable_name Value -innodb_use_sys_malloc ON -select * from information_schema.global_variables where variable_name='innodb_use_sys_malloc'; -VARIABLE_NAME VARIABLE_VALUE -INNODB_USE_SYS_MALLOC ON -select * from information_schema.session_variables where variable_name='innodb_use_sys_malloc'; -VARIABLE_NAME VARIABLE_VALUE -INNODB_USE_SYS_MALLOC ON -set global innodb_use_sys_malloc=1; -ERROR HY000: Variable 'innodb_use_sys_malloc' is a read only variable -set session innodb_use_sys_malloc=1; -ERROR HY000: Variable 'innodb_use_sys_malloc' is a read only variable diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 9f92ea99437..1a7a0188b1c 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -47,6 +47,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_ADAPTIVE_HASH_INDEX_PARTS +SESSION_VALUE NULL +GLOBAL_VALUE 8 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 8 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Number of InnoDB Adapative Hash Index Partitions. (default = 8). +NUMERIC_MIN_VALUE 1 +NUMERIC_MAX_VALUE 512 +NUMERIC_BLOCK_SIZE 0 +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_ADAPTIVE_MAX_SLEEP_DELAY SESSION_VALUE NULL GLOBAL_VALUE 150000 @@ -61,20 +75,6 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED -VARIABLE_NAME INNODB_ADDITIONAL_MEM_POOL_SIZE -SESSION_VALUE NULL -GLOBAL_VALUE 8388608 -GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 8388608 -VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT -VARIABLE_COMMENT DEPRECATED. This option may be removed in future releases, together with the option innodb_use_sys_malloc and with the InnoDB's internal memory allocator. Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures. -NUMERIC_MIN_VALUE 524288 -NUMERIC_MAX_VALUE 9223372036854775807 -NUMERIC_BLOCK_SIZE 1024 -ENUM_VALUE_LIST NULL -READ_ONLY YES -COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_API_BK_COMMIT_INTERVAL SESSION_VALUE NULL GLOBAL_VALUE 5 @@ -166,7 +166,7 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE 1 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT -VARIABLE_COMMENT The AUTOINC lock modes supported by InnoDB: 0 => Old style AUTOINC locking (for backward compatibility) 1 => New style AUTOINC locking 2 => No AUTOINC locking (unsafe for SBR) +VARIABLE_COMMENT The AUTOINC lock modes supported by InnoDB: 0 => Old style AUTOINC locking (for backward compatibility); 1 => New style AUTOINC locking; 2 => No AUTOINC locking (unsafe for SBR) NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 2 NUMERIC_BLOCK_SIZE 0 @@ -229,11 +229,25 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME INNODB_BUFFER_POOL_CHUNK_SIZE +SESSION_VALUE NULL +GLOBAL_VALUE 8388608 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 134217728 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Size of a single memory chunk within each buffer pool instance for resizing buffer pool. Online buffer pool resizing happens at this granularity. 0 means disable resizing buffer pool. +NUMERIC_MIN_VALUE 1048576 +NUMERIC_MAX_VALUE 9223372036854775807 +NUMERIC_BLOCK_SIZE 1048576 +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_AT_SHUTDOWN SESSION_VALUE NULL -GLOBAL_VALUE OFF +GLOBAL_VALUE ON GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE OFF +DEFAULT_VALUE ON VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN VARIABLE_COMMENT Dump the buffer pool into a file named @@innodb_buffer_pool_filename @@ -259,12 +273,12 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_DUMP_PCT SESSION_VALUE NULL -GLOBAL_VALUE 100 +GLOBAL_VALUE 25 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 100 +DEFAULT_VALUE 25 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED -VARIABLE_COMMENT Dump only the hottest N% of each buffer pool, defaults to 100 +VARIABLE_COMMENT Dump only the hottest N% of each buffer pool, defaults to 25 NUMERIC_MIN_VALUE 1 NUMERIC_MAX_VALUE 100 NUMERIC_BLOCK_SIZE 0 @@ -301,15 +315,15 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_INSTANCES SESSION_VALUE NULL -GLOBAL_VALUE 8 +GLOBAL_VALUE 1 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE 0 VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT +VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Number of buffer pool instances, set to higher value on high-end machines to increase scalability NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 64 -NUMERIC_BLOCK_SIZE 1 +NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED @@ -329,9 +343,9 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUFFER_POOL_LOAD_AT_STARTUP SESSION_VALUE NULL -GLOBAL_VALUE OFF +GLOBAL_VALUE ON GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE OFF +DEFAULT_VALUE ON VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN VARIABLE_COMMENT Load the buffer pool from a file named @@innodb_buffer_pool_filename @@ -367,7 +381,7 @@ NUMERIC_MIN_VALUE 5242880 NUMERIC_MAX_VALUE 9223372036854775807 NUMERIC_BLOCK_SIZE 1048576 ENUM_VALUE_LIST NULL -READ_ONLY YES +READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_BUF_DUMP_STATUS_FREQUENCY SESSION_VALUE NULL @@ -455,16 +469,16 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_CHECKSUM_ALGORITHM SESSION_VALUE NULL -GLOBAL_VALUE INNODB +GLOBAL_VALUE crc32 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE INNODB +DEFAULT_VALUE crc32 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE ENUM VARIABLE_COMMENT The algorithm InnoDB uses for page checksumming. Possible values are CRC32 (hardware accelerated if the CPU supports it) write crc32, allow any of the other checksums to match when reading; STRICT_CRC32 write crc32, do not allow other algorithms to match when reading; INNODB write a software calculated checksum, allow any other checksums to match when reading; STRICT_INNODB write a software calculated checksum, do not allow other algorithms to match when reading; NONE write a constant magic number, do not do any checksum verification when reading (same as innodb_checksums=OFF); STRICT_NONE write a constant magic number, do not allow values other than that magic number when reading; Files updated when this option is set to crc32 or strict_crc32 will not be readable by MySQL versions older than 5.6.3 NUMERIC_MIN_VALUE NULL NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST CRC32,STRICT_CRC32,INNODB,STRICT_INNODB,NONE,STRICT_NONE +ENUM_VALUE_LIST crc32,strict_crc32,innodb,strict_innodb,none,strict_none READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_CMP_PER_INDEX_ENABLED @@ -551,6 +565,20 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_COMPRESS_DEBUG +SESSION_VALUE NULL +GLOBAL_VALUE none +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE none +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Compress all tables, without specifying the COMRPESS table attribute +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST none,zlib,lz4,lz4hc +READ_ONLY NO +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_CONCURRENCY_TICKETS SESSION_VALUE NULL GLOBAL_VALUE 5000 @@ -621,6 +649,20 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME INNODB_DEFAULT_ROW_FORMAT +SESSION_VALUE NULL +GLOBAL_VALUE dynamic +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE dynamic +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE ENUM +VARIABLE_COMMENT The default ROW FORMAT for all innodb tables created without explicit ROW_FORMAT. Possible values are REDUNDANT, COMPACT, and DYNAMIC. The ROW_FORMAT value COMPRESSED is not allowed +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST redundant,compact,dynamic +READ_ONLY NO +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_DEFRAGMENT SESSION_VALUE NULL GLOBAL_VALUE OFF @@ -719,6 +761,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_DISABLE_RESIZE_BUFFER_POOL_DEBUG +SESSION_VALUE NULL +GLOBAL_VALUE ON +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE ON +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Disable resizing buffer pool to make assertion code not expensive. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_DISABLE_SORT_FILE_CACHE SESSION_VALUE NULL GLOBAL_VALUE OFF @@ -861,9 +917,9 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_FILE_FORMAT SESSION_VALUE NULL -GLOBAL_VALUE Antelope +GLOBAL_VALUE Barracuda GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE Antelope +DEFAULT_VALUE Barracuda VARIABLE_SCOPE GLOBAL VARIABLE_TYPE VARCHAR VARIABLE_COMMENT File format to use for new tables in .ibd files. @@ -889,7 +945,7 @@ READ_ONLY YES COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_FILE_FORMAT_MAX SESSION_VALUE NULL -GLOBAL_VALUE Antelope +GLOBAL_VALUE Barracuda GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE Antelope VARIABLE_SCOPE GLOBAL @@ -915,6 +971,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_FILL_FACTOR +SESSION_VALUE NULL +GLOBAL_VALUE 100 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 100 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BIGINT +VARIABLE_COMMENT Percentage of B-tree page filled during bulk insert +NUMERIC_MIN_VALUE 10 +NUMERIC_MAX_VALUE 100 +NUMERIC_BLOCK_SIZE 0 +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_FIL_MAKE_PAGE_DIRTY_DEBUG SESSION_VALUE NULL GLOBAL_VALUE 0 @@ -999,6 +1069,20 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_FLUSH_SYNC +SESSION_VALUE NULL +GLOBAL_VALUE ON +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE ON +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Allow IO bursts at the checkpoints ignoring io_capacity setting. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_FORCE_LOAD_CORRUPTED SESSION_VALUE NULL GLOBAL_VALUE OFF @@ -1050,7 +1134,7 @@ VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Kills the server during crash recovery. NUMERIC_MIN_VALUE 0 -NUMERIC_MAX_VALUE 10 +NUMERIC_MAX_VALUE 100 NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY YES @@ -1068,7 +1152,7 @@ NUMERIC_MAX_VALUE NULL NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO -COMMAND_LINE_ARGUMENT NONE +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_FT_CACHE_SIZE SESSION_VALUE NULL GLOBAL_VALUE 8000000 @@ -1295,9 +1379,9 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_LARGE_PREFIX SESSION_VALUE NULL -GLOBAL_VALUE OFF +GLOBAL_VALUE ON GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE OFF +DEFAULT_VALUE ON VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BOOLEAN VARIABLE_COMMENT Support large index prefix length of REC_VERSION_56_MAX_INDEX_COL_LEN (3072) bytes. @@ -1377,6 +1461,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_LOG_CHECKSUMS +SESSION_VALUE NULL +GLOBAL_VALUE ON +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE ON +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Whether to compute and require checksums for InnoDB redo log blocks +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_LOG_COMPRESSED_PAGES SESSION_VALUE NULL GLOBAL_VALUE OFF @@ -1433,6 +1531,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME INNODB_LOG_WRITE_AHEAD_SIZE +SESSION_VALUE NULL +GLOBAL_VALUE 8192 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 8192 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Redo log write ahead unit size to avoid read-on-write, it should match the OS cache block IO size +NUMERIC_MIN_VALUE 512 +NUMERIC_MAX_VALUE 16384 +NUMERIC_BLOCK_SIZE 512 +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_LRU_SCAN_DEPTH SESSION_VALUE NULL GLOBAL_VALUE 100 @@ -1455,7 +1567,7 @@ DEFAULT_VALUE 75.000000 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE DOUBLE VARIABLE_COMMENT Percentage of dirty pages allowed in bufferpool. -NUMERIC_MIN_VALUE 0.001 +NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 99.999 NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL @@ -1463,9 +1575,9 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_MAX_DIRTY_PAGES_PCT_LWM SESSION_VALUE NULL -GLOBAL_VALUE 0.001000 +GLOBAL_VALUE 0.000000 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 0.001000 +DEFAULT_VALUE 0.000000 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE DOUBLE VARIABLE_COMMENT Percentage of dirty pages at which flushing kicks in. @@ -1503,19 +1615,33 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED -VARIABLE_NAME INNODB_MIRRORED_LOG_GROUPS +VARIABLE_NAME INNODB_MAX_UNDO_LOG_SIZE SESSION_VALUE NULL -GLOBAL_VALUE 1 +GLOBAL_VALUE 1073741824 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 0 +DEFAULT_VALUE 1073741824 VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT -VARIABLE_COMMENT Number of identical copies of log groups we keep for the database. Currently this should be set to 1. -NUMERIC_MIN_VALUE 0 -NUMERIC_MAX_VALUE 10 +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Maximum size of UNDO tablespace in MB (If UNDO tablespace grows beyond this size it will be truncated in due course). +NUMERIC_MIN_VALUE 10485760 +NUMERIC_MAX_VALUE 18446744073709551615 NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL -READ_ONLY YES +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_MERGE_THRESHOLD_SET_ALL_DEBUG +SESSION_VALUE NULL +GLOBAL_VALUE 50 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 50 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE INT UNSIGNED +VARIABLE_COMMENT Override current MERGE_THRESHOLD setting for all indexes at dictionary cache by the specified value dynamically, at the time. +NUMERIC_MIN_VALUE 1 +NUMERIC_MAX_VALUE 50 +NUMERIC_BLOCK_SIZE 0 +ENUM_VALUE_LIST NULL +READ_ONLY NO COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_MONITOR_DISABLE SESSION_VALUE NULL @@ -1657,6 +1783,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT NONE +VARIABLE_NAME INNODB_PAGE_CLEANERS +SESSION_VALUE NULL +GLOBAL_VALUE 1 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 4 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Page cleaner threads can be from 1 to 64. Default is 4. +NUMERIC_MIN_VALUE 1 +NUMERIC_MAX_VALUE 64 +NUMERIC_BLOCK_SIZE 0 +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PAGE_HASH_LOCKS SESSION_VALUE NULL GLOBAL_VALUE 16 @@ -1727,6 +1867,20 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_PURGE_RSEG_TRUNCATE_FREQUENCY +SESSION_VALUE NULL +GLOBAL_VALUE 128 +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE 128 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_COMMENT Dictates rate at which UNDO records are purged. Value N means purge rollback segment(s) on every Nth iteration of purge invocation +NUMERIC_MIN_VALUE 1 +NUMERIC_MAX_VALUE 128 +NUMERIC_BLOCK_SIZE 0 +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_RUN_NOW SESSION_VALUE NULL GLOBAL_VALUE OFF @@ -1757,12 +1911,12 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_PURGE_THREADS SESSION_VALUE NULL -GLOBAL_VALUE 1 +GLOBAL_VALUE 4 GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE 1 +DEFAULT_VALUE 4 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED -VARIABLE_COMMENT Purge threads can be from 1 to 32. Default is 1. +VARIABLE_COMMENT Purge threads can be from 1 to 32. Default is 4. NUMERIC_MIN_VALUE 1 NUMERIC_MAX_VALUE 32 NUMERIC_BLOCK_SIZE 0 @@ -2147,6 +2301,20 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_SYNC_DEBUG +SESSION_VALUE NULL +GLOBAL_VALUE OFF +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE OFF +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Enable the sync debug checks +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_SYNC_SPIN_LOOPS SESSION_VALUE NULL GLOBAL_VALUE 30 @@ -2175,6 +2343,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_TEMP_DATA_FILE_PATH +SESSION_VALUE NULL +GLOBAL_VALUE ibtmp1:12M:autoextend +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE VARCHAR +VARIABLE_COMMENT Path to files and their sizes making temp-tablespace. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY YES +COMMAND_LINE_ARGUMENT REQUIRED VARIABLE_NAME INNODB_THREAD_CONCURRENCY SESSION_VALUE NULL GLOBAL_VALUE 0 @@ -2247,9 +2429,9 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT NULL VARIABLE_NAME INNODB_UNDO_DIRECTORY SESSION_VALUE NULL -GLOBAL_VALUE . +GLOBAL_VALUE PATH GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE . +DEFAULT_VALUE VARIABLE_SCOPE GLOBAL VARIABLE_TYPE VARCHAR VARIABLE_COMMENT Directory where undo tablespace files live, this path can be absolute. @@ -2273,6 +2455,20 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_UNDO_LOG_TRUNCATE +SESSION_VALUE NULL +GLOBAL_VALUE OFF +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE OFF +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Enable or Disable Truncate of UNDO tablespace. +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_UNDO_TABLESPACES SESSION_VALUE NULL GLOBAL_VALUE 0 @@ -2282,7 +2478,7 @@ VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED VARIABLE_COMMENT Number of undo tablespaces to use. NUMERIC_MIN_VALUE 0 -NUMERIC_MAX_VALUE 126 +NUMERIC_MAX_VALUE 95 NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY YES @@ -2329,20 +2525,6 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT NONE -VARIABLE_NAME INNODB_USE_SYS_MALLOC -SESSION_VALUE NULL -GLOBAL_VALUE ON -GLOBAL_VALUE_ORIGIN COMPILE-TIME -DEFAULT_VALUE ON -VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BOOLEAN -VARIABLE_COMMENT DEPRECATED. This option may be removed in future releases, together with the InnoDB's internal memory allocator. Use OS memory allocator instead of InnoDB's internal memory allocator -NUMERIC_MIN_VALUE NULL -NUMERIC_MAX_VALUE NULL -NUMERIC_BLOCK_SIZE NULL -ENUM_VALUE_LIST NULL -READ_ONLY YES -COMMAND_LINE_ARGUMENT NONE VARIABLE_NAME INNODB_USE_TRIM SESSION_VALUE NULL GLOBAL_VALUE OFF @@ -2359,7 +2541,7 @@ READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_VERSION SESSION_VALUE NULL -GLOBAL_VALUE 5.6.31 +GLOBAL_VALUE 5.7.9 GLOBAL_VALUE_ORIGIN COMPILE-TIME DEFAULT_VALUE NULL VARIABLE_SCOPE GLOBAL diff --git a/mysql-test/suite/sys_vars/t/innodb_adaptive_hash_index_parts_basic.test b/mysql-test/suite/sys_vars/t/innodb_adaptive_hash_index_parts_basic.test new file mode 100644 index 00000000000..3f4a9283339 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_adaptive_hash_index_parts_basic.test @@ -0,0 +1,75 @@ +--source include/have_innodb.inc + +#################################################################### +# Displaying default value # +#################################################################### +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +--echo 1 Expected + + +#################################################################### +# Check if Value can set # +#################################################################### + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET @@GLOBAL.innodb_adaptive_hash_index_parts=1; +--echo Expected error 'Read only variable' + +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +--echo 1 Expected + + + + +################################################################# +# Check if the value in GLOBAL Table matches value in variable # +################################################################# + +--disable_warnings +SELECT @@GLOBAL.innodb_adaptive_hash_index_parts = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_adaptive_hash_index_parts'; +--echo 1 Expected + +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +--echo 1 Expected + +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_adaptive_hash_index_parts'; +--echo 1 Expected +--enable_warnings + + + +################################################################################ +# Check if accessing variable with and without GLOBAL point to same variable # +################################################################################ +SELECT @@innodb_adaptive_hash_index_parts = @@GLOBAL.innodb_adaptive_hash_index_parts; +--echo 1 Expected + + + +################################################################################ +# Check if innodb_adaptive_hash_index_parts can be accessed with and without @@ sign # +################################################################################ + +SELECT COUNT(@@innodb_adaptive_hash_index_parts); +--echo 1 Expected + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.innodb_adaptive_hash_index_parts); +--echo Expected error 'Variable is a GLOBAL variable' + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.innodb_adaptive_hash_index_parts); +--echo Expected error 'Variable is a GLOBAL variable' + +SELECT COUNT(@@GLOBAL.innodb_adaptive_hash_index_parts); +--echo 1 Expected + +--Error ER_BAD_FIELD_ERROR +SELECT innodb_adaptive_hash_index_parts = @@SESSION.innodb_adaptive_hash_index_parts; +--echo Expected error 'Readonly variable' + + diff --git a/mysql-test/suite/sys_vars/t/innodb_adaptive_max_sleep_delay_basic.test b/mysql-test/suite/sys_vars/t/innodb_adaptive_max_sleep_delay_basic.test index a2508b073eb..49349d86713 100644 --- a/mysql-test/suite/sys_vars/t/innodb_adaptive_max_sleep_delay_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_adaptive_max_sleep_delay_basic.test @@ -32,22 +32,40 @@ SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; # Check if value can be set SET @@GLOBAL.innodb_adaptive_max_sleep_delay=100; +# Check for valid values +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=0; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=100000; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=1000000; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; + # Check for out of bounds SET @@GLOBAL.innodb_adaptive_max_sleep_delay=1000001; SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; --echo 1000000 Expected +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=4294967295; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +--echo 1000000 Expected + SET @@GLOBAL.innodb_adaptive_max_sleep_delay=-1; SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; --echo 0 Expected +SET @@GLOBAL.innodb_adaptive_max_sleep_delay=-1024; +SELECT @@GLOBAL.innodb_adaptive_max_sleep_delay; +--echo 0 Expected + SELECT COUNT(@@GLOBAL.innodb_adaptive_max_sleep_delay); --echo 1 Expected # Check if the value in GLOBAL table matches value in variable +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_adaptive_max_sleep_delay'; +--enable_warnings --echo 100 Expected # Check if accessing variable with and without GLOBAL point to same diff --git a/mysql-test/suite/sys_vars/t/innodb_additional_mem_pool_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_additional_mem_pool_size_basic.test deleted file mode 100644 index ffb1046ed32..00000000000 --- a/mysql-test/suite/sys_vars/t/innodb_additional_mem_pool_size_basic.test +++ /dev/null @@ -1,102 +0,0 @@ - - -################## mysql-test\t\innodb_additional_mem_pool_size_basic.test #### -# # -# Variable Name: innodb_additional_mem_pool_size # -# Scope: Global # -# Access Type: Static # -# Data Type: numeric # -# # -# # -# Creation Date: 2008-02-07 # -# Author : Sharique Abdullah # -# # -# # -# Description:Test Cases of Dynamic System Variable innodb_additional_mem_pool_size# -# that checks the behavior of this variable in the following ways # -# * Value Check # -# * Scope Check # -# # -# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # -# server-system-variables.html # -# # -############################################################################### - ---source include/have_innodb.inc - ---echo '#---------------------BS_STVARS_020_01----------------------#' -#################################################################### -# Displaying default value # -#################################################################### -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); ---echo 1 Expected - - ---echo '#---------------------BS_STVARS_020_02----------------------#' -#################################################################### -# Check if Value can set # -#################################################################### - ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -SET @@GLOBAL.innodb_additional_mem_pool_size=1; ---echo Expected error 'Read only variable' - -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); ---echo 1 Expected - - - - ---echo '#---------------------BS_STVARS_020_03----------------------#' -################################################################# -# Check if the value in GLOBAL Table matches value in variable # -################################################################# - -SELECT @@GLOBAL.innodb_additional_mem_pool_size = VARIABLE_VALUE -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_additional_mem_pool_size'; ---echo 1 Expected - -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); ---echo 1 Expected - -SELECT COUNT(VARIABLE_VALUE) -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_additional_mem_pool_size'; ---echo 1 Expected - - - ---echo '#---------------------BS_STVARS_020_04----------------------#' -################################################################################ -# Check if accessing variable with and without GLOBAL point to same variable # -################################################################################ -SELECT @@innodb_additional_mem_pool_size = @@GLOBAL.innodb_additional_mem_pool_size; ---echo 1 Expected - - - ---echo '#---------------------BS_STVARS_020_05----------------------#' -################################################################################ -# Check if innodb_additional_mem_pool_size can be accessed with and without @@ sign # -################################################################################ - -SELECT COUNT(@@innodb_additional_mem_pool_size); ---echo 1 Expected - ---Error ER_INCORRECT_GLOBAL_LOCAL_VAR -SELECT COUNT(@@local.innodb_additional_mem_pool_size); ---echo Expected error 'Variable is a GLOBAL variable' - ---Error ER_INCORRECT_GLOBAL_LOCAL_VAR -SELECT COUNT(@@SESSION.innodb_additional_mem_pool_size); ---echo Expected error 'Variable is a GLOBAL variable' - -SELECT COUNT(@@GLOBAL.innodb_additional_mem_pool_size); ---echo 1 Expected - ---Error ER_BAD_FIELD_ERROR -SELECT innodb_additional_mem_pool_size = @@SESSION.innodb_additional_mem_pool_size; ---echo Expected error 'Readonly variable' - - diff --git a/mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test index b3a7aebce4e..7c9ae6395be 100644 --- a/mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test @@ -19,22 +19,47 @@ SELECT @@global.innodb_api_bk_commit_interval; SELECT @@session.innodb_api_bk_commit_interval; SHOW global variables LIKE 'innodb_api_bk_commit_interval'; SHOW session variables LIKE 'innodb_api_bk_commit_interval'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_api_bk_commit_interval'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_api_bk_commit_interval'; +--enable_warnings # # show that it's writable # SET global innodb_api_bk_commit_interval=100; SELECT @@global.innodb_api_bk_commit_interval; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_api_bk_commit_interval'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_api_bk_commit_interval'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_api_bk_commit_interval=1; +# +# Valid values +# +SET global innodb_api_bk_commit_interval=1; +SELECT @@global.innodb_api_bk_commit_interval; +SET global innodb_api_bk_commit_interval=100000; +SELECT @@global.innodb_api_bk_commit_interval; +SET global innodb_api_bk_commit_interval=1073741824; +SELECT @@global.innodb_api_bk_commit_interval; + +# +# Invalid values +# +SET global innodb_api_bk_commit_interval=0; +SELECT @@global.innodb_api_bk_commit_interval; +SET global innodb_api_bk_commit_interval=-1024; +SELECT @@global.innodb_api_bk_commit_interval; +SET global innodb_api_bk_commit_interval=1073741825; +SELECT @@global.innodb_api_bk_commit_interval; +SET global innodb_api_bk_commit_interval=4294967295; +SELECT @@global.innodb_api_bk_commit_interval; # # incorrect types @@ -47,8 +72,10 @@ SET global innodb_api_bk_commit_interval=1e1; SET global innodb_api_bk_commit_interval="foo"; SET global innodb_api_bk_commit_interval=-7; SELECT @@global.innodb_api_bk_commit_interval; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_api_bk_commit_interval'; +--enable_warnings # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test index c9c04a27229..42e9903df5e 100644 --- a/mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_api_disable_rowlock, 'ON', 'OFF') = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_api_disable_rowlock'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_api_disable_rowlock'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test index 637541ef621..e88d8ecac5c 100644 --- a/mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_api_enable_binlog, 'ON', 'OFF') = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_api_enable_binlog'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_api_enable_binlog'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test index 0e440a72cce..d3086878d6a 100644 --- a/mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_api_enable_mdl, 'ON', 'OFF') = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_api_enable_mdl'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_api_enable_mdl'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test index 49c34b647fd..c77f8471d7b 100644 --- a/mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test @@ -19,20 +19,24 @@ SELECT @@global.innodb_api_trx_level; SELECT @@session.innodb_api_trx_level; SHOW global variables LIKE 'innodb_api_trx_level'; SHOW session variables LIKE 'innodb_api_trx_level'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_api_trx_level'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_api_trx_level'; +--enable_warnings # # show that it's writable # SET global innodb_api_trx_level=100; SELECT @@global.innodb_api_trx_level; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_api_trx_level'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_api_trx_level'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_api_trx_level=1; @@ -47,8 +51,10 @@ SET global innodb_api_trx_level=1e1; SET global innodb_api_trx_level="foo"; SET global innodb_api_trx_level=-7; SELECT @@global.innodb_api_trx_level; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_api_trx_level'; +--enable_warnings # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test b/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test index cbe62a105ff..864dd732ec7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test @@ -94,18 +94,25 @@ SET @@global.innodb_autoextend_increment = 1001; SELECT @@global.innodb_autoextend_increment; +SET @@global.innodb_autoextend_increment = 2000 ; +SELECT @@global.innodb_autoextend_increment; + --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_autoextend_increment = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_autoextend_increment '; +--enable_warnings SELECT @@global.innodb_autoextend_increment ; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_autoextend_increment '; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' diff --git a/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_basic.test b/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_basic.test index e07234a9152..81e63ddf858 100644 --- a/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_basic.test @@ -75,13 +75,17 @@ SELECT @@global.innodb_autoinc_lock_mode; # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_autoinc_lock_mode = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_autoinc_lock_mode'; +--enable_warnings SELECT @@global.innodb_autoinc_lock_mode; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_autoinc_lock_mode'; +--enable_warnings ############################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_func-master.opt b/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_func-master.opt index f0b6727d6d8..ab9fcb75678 100644 --- a/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_func-master.opt +++ b/mysql-test/suite/sys_vars/t/innodb_autoinc_lock_mode_func-master.opt @@ -1,2 +1,2 @@ ---loose-innodb-autoinc-lock-mode=1 +--innodb-autoinc-lock-mode=1 diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_chunk_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_chunk_size_basic.test new file mode 100644 index 00000000000..561786ee6e5 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_chunk_size_basic.test @@ -0,0 +1,75 @@ +--source include/have_innodb.inc + +#################################################################### +# Displaying default value # +#################################################################### +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +--echo 1 Expected + + +#################################################################### +# Check if Value can set # +#################################################################### + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET @@GLOBAL.innodb_buffer_pool_chunk_size=1; +--echo Expected error 'Read only variable' + +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +--echo 1 Expected + + + + +################################################################# +# Check if the value in GLOBAL Table matches value in variable # +################################################################# + +--disable_warnings +SELECT @@GLOBAL.innodb_buffer_pool_chunk_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_buffer_pool_chunk_size'; +--echo 1 Expected + +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +--echo 1 Expected + +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_buffer_pool_chunk_size'; +--echo 1 Expected +--enable_warnings + + + +################################################################################ +# Check if accessing variable with and without GLOBAL point to same variable # +################################################################################ +SELECT @@innodb_buffer_pool_chunk_size = @@GLOBAL.innodb_buffer_pool_chunk_size; +--echo 1 Expected + + + +################################################################################ +# Check if innodb_buffer_pool_chunk_size can be accessed with and without @@ sign # +################################################################################ + +SELECT COUNT(@@innodb_buffer_pool_chunk_size); +--echo 1 Expected + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.innodb_buffer_pool_chunk_size); +--echo Expected error 'Variable is a GLOBAL variable' + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.innodb_buffer_pool_chunk_size); +--echo Expected error 'Variable is a GLOBAL variable' + +SELECT COUNT(@@GLOBAL.innodb_buffer_pool_chunk_size); +--echo 1 Expected + +--Error ER_BAD_FIELD_ERROR +SELECT innodb_buffer_pool_chunk_size = @@SESSION.innodb_buffer_pool_chunk_size; +--echo Expected error 'Readonly variable' + + diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_at_shutdown_basic.test b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_at_shutdown_basic.test index b69e856be5a..feb7bf05638 100644 --- a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_at_shutdown_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_at_shutdown_basic.test @@ -40,3 +40,5 @@ SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 5; -- source include/restart_mysqld.inc -- file_exists $file + +SET GLOBAL innodb_buffer_pool_dump_at_shutdown = default; diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_pct_basic.test b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_pct_basic.test index d2f5cb4a0de..ae45be7f2a3 100644 --- a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_pct_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_dump_pct_basic.test @@ -1,34 +1,57 @@ -# -# Basic test for innodb_buffer_pool_dump_pct -# +############################################ +# Variable Name: innodb_buffer_pool_dump_pct +# Scope: GLOBAL +# Access Type: Dynamic +# Data Type: Integer +# Default Value: 100 +# Range: 1-100 +############################################ -- source include/have_innodb.inc # Check the default value -SET @orig = @@global.innodb_buffer_pool_dump_pct; -SELECT @orig; +SELECT @@global.innodb_buffer_pool_dump_pct; + +# Set the valid value +SET GLOBAL innodb_buffer_pool_dump_pct=20; + +# Check the value is 20 +SELECT @@global.innodb_buffer_pool_dump_pct; -# Do the dump -SET GLOBAL innodb_buffer_pool_dump_pct=3, GLOBAL innodb_buffer_pool_dump_now = ON; +# Set the lower Boundary value +SET GLOBAL innodb_buffer_pool_dump_pct=1; -# Wait for the dump to complete -let $wait_condition = - SELECT SUBSTR(variable_value, 1, 33) = 'Buffer pool(s) dump completed at ' - FROM information_schema.global_status - WHERE LOWER(variable_name) = 'innodb_buffer_pool_dump_status'; --- source include/wait_condition.inc +# Check the value is 1 +SELECT @@global.innodb_buffer_pool_dump_pct; -# Confirm that the dump file has been created --- let $file = `SELECT CONCAT(@@datadir, @@global.innodb_buffer_pool_filename)` --- file_exists $file +# Set the upper boundary value +SET GLOBAL innodb_buffer_pool_dump_pct=100; ---disable_warnings -SET GLOBAL innodb_buffer_pool_dump_pct=0; +# Check the value is 100 SELECT @@global.innodb_buffer_pool_dump_pct; -SHOW WARNINGS; + +# Set the beyond upper boundary value SET GLOBAL innodb_buffer_pool_dump_pct=101; + +# Check the value is 100 SELECT @@global.innodb_buffer_pool_dump_pct; -SHOW WARNINGS; ---enable_warnings -SET GLOBAL innodb_buffer_pool_dump_pct=@orig; +# Set the beyond lower boundary value +SET GLOBAL innodb_buffer_pool_dump_pct=-1; + +# Check the value is 1 +SELECT @@global.innodb_buffer_pool_dump_pct; + +# Set the Default value +SET GLOBAL innodb_buffer_pool_dump_pct=Default; + +# Check the default value +SELECT @@global.innodb_buffer_pool_dump_pct; + +# Set with some invalid value +--error ER_WRONG_TYPE_FOR_VAR +SET GLOBAL innodb_buffer_pool_dump_pct='foo'; + +# Set without using Global +--error ER_GLOBAL_VARIABLE +SET innodb_buffer_pool_dump_pct=50; diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_filename_basic.test b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_filename_basic.test deleted file mode 100644 index c50d2d66dff..00000000000 --- a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_filename_basic.test +++ /dev/null @@ -1,32 +0,0 @@ -# -# Basic test for innodb_buffer_pool_filename -# - --- source include/have_innodb.inc - -# Check the default value and save for later restoration -SET @orig = @@global.innodb_buffer_pool_filename; -SELECT @orig; - -let $old_val=query_get_value(SHOW STATUS LIKE 'innodb_buffer_pool_dump_status', Value, 1); -sleep 1; # to ensure that the previous and the next dumps are at least a second apart - -# Try with a non-default filename - -SET GLOBAL innodb_buffer_pool_filename = 'innodb_foobar_dump'; - -SET GLOBAL innodb_buffer_pool_dump_now = ON; --- let $file = `SELECT CONCAT(@@datadir, @@global.innodb_buffer_pool_filename)` - -# Wait for the dump to complete -let $wait_condition = - SELECT variable_value LIKE 'Buffer pool(s) dump completed at %' - AND variable_value <> '$old_val' - FROM information_schema.global_status - WHERE variable_name = 'innodb_buffer_pool_dump_status'; --- source include/wait_condition.inc - --- file_exists $file - -# Restore the env -SET GLOBAL innodb_buffer_pool_filename = @orig; diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_instances_basic.test b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_instances_basic.test index 0960f1fb38b..8785272e10d 100644 --- a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_instances_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_instances_basic.test @@ -53,17 +53,21 @@ SELECT COUNT(@@GLOBAL.innodb_buffer_pool_instances); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_buffer_pool_instances = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_buffer_pool_instances'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_buffer_pool_instances); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_buffer_pool_instances'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_load_now_basic.test b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_load_now_basic.test index a0409901865..701d6a8a96a 100644 --- a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_load_now_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_load_now_basic.test @@ -20,13 +20,11 @@ let $old_status= `SELECT variable_value FROM information_schema.global_status # let $wait_condition = # SELECT TRIM(SUBSTR('$old_status', -8)) != DATE_FORMAT(CURTIME(), '%k:%i:%s'); # -- source include/wait_condition.inc - if (`SELECT variable_value LIKE '%dump completed at%' FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_dump_status'`) { -- sleep 2 } - # Do the dump SET GLOBAL innodb_buffer_pool_dump_now = ON; @@ -36,7 +34,9 @@ let $wait_condition = AND SUBSTR(variable_value, 1, 33) = 'Buffer pool(s) dump completed at ' FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_dump_status'; +--disable_warnings -- source include/wait_condition.inc +--enable_warnings # Confirm the file is really created -- let $file = `SELECT CONCAT(@@datadir, @@global.innodb_buffer_pool_filename)` @@ -50,10 +50,15 @@ let $wait_condition = SELECT SUBSTR(variable_value, 1, 33) = 'Buffer pool(s) load completed at ' FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_load_status'; +--disable_warnings -- source include/wait_condition.inc +--enable_warnings # Show the status, interesting if the above timed out +--disable_warnings -- replace_regex /[0-9]{6}[[:space:]]+[0-9]{1,2}:[0-9]{2}:[0-9]{2}/TIMESTAMP_NOW/ SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_load_status'; +--enable_warnings + diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic-master.opt b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic-master.opt new file mode 100644 index 00000000000..aa536bf0070 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic-master.opt @@ -0,0 +1 @@ +--innodb-buffer-pool-chunk-size=2M diff --git a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic.test index 190b2d19bc4..c5b4c118da2 100644 --- a/mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_buffer_pool_size_basic.test @@ -24,6 +24,21 @@ --source include/have_innodb.inc +--disable_query_log +if (`select (version() like '%debug%') > 0`) +{ + set @old_innodb_disable_resize = @@innodb_disable_resize_buffer_pool_debug; + set global innodb_disable_resize_buffer_pool_debug = OFF; +} +--enable_query_log + +let $wait_condition = + SELECT SUBSTR(variable_value, 1, 34) = 'Completed resizing buffer pool at ' + FROM information_schema.global_status + WHERE LOWER(variable_name) = 'innodb_buffer_pool_resize_status'; + +SET @start_buffer_pool_size = @@GLOBAL.innodb_buffer_pool_size; + --echo '#---------------------BS_STVARS_022_01----------------------#' #################################################################### # Displaying default value # @@ -37,9 +52,9 @@ SELECT COUNT(@@GLOBAL.innodb_buffer_pool_size); # Check if Value can set # #################################################################### ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -SET @@GLOBAL.innodb_buffer_pool_size=1; ---echo Expected error 'Read only variable' +SET @@GLOBAL.innodb_buffer_pool_size=10485760; +--echo Expected succeeded +--source include/wait_condition.inc SELECT COUNT(@@GLOBAL.innodb_buffer_pool_size); --echo 1 Expected @@ -52,17 +67,21 @@ SELECT COUNT(@@GLOBAL.innodb_buffer_pool_size); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_buffer_pool_size = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_buffer_pool_size'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_buffer_pool_size); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_buffer_pool_size'; +--enable_warnings --echo 1 Expected @@ -100,3 +119,12 @@ SELECT innodb_buffer_pool_size = @@SESSION.innodb_buffer_pool_size; --echo Expected error 'Readonly variable' +SET @@GLOBAL.innodb_buffer_pool_size = @start_buffer_pool_size; +--source include/wait_condition.inc + +--disable_query_log +if (`select (version() like '%debug%') > 0`) +{ + set global innodb_disable_resize_buffer_pool_debug = @old_innodb_disable_resize; +} +--enable_query_log diff --git a/mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test index 5e081b1a0a4..2094ef3dc0b 100644 --- a/mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_change_buffer_max_size_basic.test @@ -18,16 +18,20 @@ select @@global.innodb_change_buffer_max_size; select @@session.innodb_change_buffer_max_size; show global variables like 'innodb_change_buffer_max_size'; show session variables like 'innodb_change_buffer_max_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size'; select * from information_schema.session_variables where variable_name='innodb_change_buffer_max_size'; +--enable_warnings # # show that it's writable # set global innodb_change_buffer_max_size=10; select @@global.innodb_change_buffer_max_size; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size'; select * from information_schema.session_variables where variable_name='innodb_change_buffer_max_size'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_change_buffer_max_size=1; @@ -43,10 +47,14 @@ set global innodb_change_buffer_max_size="foo"; set global innodb_change_buffer_max_size=-7; select @@global.innodb_change_buffer_max_size; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size'; +--enable_warnings set global innodb_change_buffer_max_size=56; select @@global.innodb_change_buffer_max_size; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffer_max_size'; +--enable_warnings # # min/max/DEFAULT values diff --git a/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test b/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test index abdfddb4c4b..aba3b1e3479 100644 --- a/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_change_buffering_basic.test @@ -18,20 +18,26 @@ select @@global.innodb_change_buffering; select @@session.innodb_change_buffering; show global variables like 'innodb_change_buffering'; show session variables like 'innodb_change_buffering'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffering'; select * from information_schema.session_variables where variable_name='innodb_change_buffering'; +--enable_warnings # # show that it's writable # set global innodb_change_buffering='none'; select @@global.innodb_change_buffering; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffering'; select * from information_schema.session_variables where variable_name='innodb_change_buffering'; +--enable_warnings set @@global.innodb_change_buffering='inserts'; select @@global.innodb_change_buffering; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffering'; select * from information_schema.session_variables where variable_name='innodb_change_buffering'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_change_buffering='some'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test index 893d1cb42e3..a6fc09f767e 100644 --- a/mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_change_buffering_debug_basic.test @@ -13,20 +13,26 @@ select @@global.innodb_change_buffering_debug; select @@session.innodb_change_buffering_debug; show global variables like 'innodb_change_buffering_debug'; show session variables like 'innodb_change_buffering_debug'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug'; select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug'; +--enable_warnings # # show that it's writable # set global innodb_change_buffering_debug=1; select @@global.innodb_change_buffering_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug'; select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug'; +--enable_warnings set @@global.innodb_change_buffering_debug=0; select @@global.innodb_change_buffering_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_change_buffering_debug'; select * from information_schema.session_variables where variable_name='innodb_change_buffering_debug'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_change_buffering_debug='some'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test b/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test index e7098b7e3b3..bb0f3417f87 100644 --- a/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test @@ -1,5 +1,4 @@ --source include/have_innodb.inc ---source include/not_encrypted.inc # Check the default value SET @orig = @@global.innodb_checksum_algorithm; diff --git a/mysql-test/suite/sys_vars/t/innodb_checksums_basic.test b/mysql-test/suite/sys_vars/t/innodb_checksums_basic.test index c4c39d7d380..5db0a18e8fd 100644 --- a/mysql-test/suite/sys_vars/t/innodb_checksums_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_checksums_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_checksums); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_checksums, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_checksums'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_checksums); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_checksums'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test b/mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test index d729acea02c..432c04857ec 100644 --- a/mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test @@ -26,8 +26,6 @@ SELECT @@global.innodb_cmp_per_index_enabled; SET GLOBAL innodb_cmp_per_index_enabled=OFF; SELECT @@global.innodb_cmp_per_index_enabled; -SET GLOBAL innodb_file_format=Barracuda; - -- vertical_results # Check that enabling after being disabled resets the stats @@ -65,5 +63,4 @@ DROP TABLE t; # -SET GLOBAL innodb_file_format=default; SET GLOBAL innodb_cmp_per_index_enabled=default; diff --git a/mysql-test/suite/sys_vars/t/innodb_commit_concurrency_basic.test b/mysql-test/suite/sys_vars/t/innodb_commit_concurrency_basic.test index 42d172934d1..4ed706b372b 100644 --- a/mysql-test/suite/sys_vars/t/innodb_commit_concurrency_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_commit_concurrency_basic.test @@ -85,26 +85,38 @@ SELECT @@global.innodb_commit_concurrency; # --Error ER_WRONG_VALUE_FOR_VAR SET @@global.innodb_commit_concurrency = 1; +SELECT @@global.innodb_commit_concurrency; --Error ER_WRONG_VALUE_FOR_VAR SET @@global.innodb_commit_concurrency = -1; +SELECT @@global.innodb_commit_concurrency; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_commit_concurrency = "T"; +SELECT @@global.innodb_commit_concurrency; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_commit_concurrency = "Y"; +SELECT @@global.innodb_commit_concurrency; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_commit_concurrency = 1.1; +SELECT @@global.innodb_commit_concurrency; --Error ER_WRONG_VALUE_FOR_VAR SET @@global.innodb_commit_concurrency = 1001; +SELECT @@global.innodb_commit_concurrency; + + --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_commit_concurrency = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_commit_concurrency'; SELECT @@global.innodb_commit_concurrency; SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_commit_concurrency'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test b/mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test index 1cdfaa6b31d..315fe2df3c7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test @@ -87,6 +87,9 @@ SELECT @@global.innodb_compression_failure_threshold_pct; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_compression_failure_threshold_pct = "T"; SELECT @@global.innodb_compression_failure_threshold_pct; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_compression_failure_threshold_pct = 1.1; +SELECT @@global.innodb_compression_failure_threshold_pct; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_compression_failure_threshold_pct = "Y"; @@ -94,19 +97,28 @@ SELECT @@global.innodb_compression_failure_threshold_pct; SET @@global.innodb_compression_failure_threshold_pct = 101; SELECT @@global.innodb_compression_failure_threshold_pct; - +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_compression_failure_threshold_pct = " "; +SELECT @@global.innodb_compression_failure_threshold_pct; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_compression_failure_threshold_pct = ' '; +SELECT @@global.innodb_compression_failure_threshold_pct; --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_compression_failure_threshold_pct = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_compression_failure_threshold_pct'; +--enable_warnings SELECT @@global.innodb_compression_failure_threshold_pct; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_compression_failure_threshold_pct'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test b/mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test index a90abdde2f1..d19d2971fc9 100644 --- a/mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test @@ -18,16 +18,20 @@ select @@global.innodb_compression_level; select @@session.innodb_compression_level; show global variables like 'innodb_compression_level'; show session variables like 'innodb_compression_level'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_compression_level'; select * from information_schema.session_variables where variable_name='innodb_compression_level'; +--enable_warnings # # show that it's writable # set global innodb_compression_level=2; select @@global.innodb_compression_level; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_compression_level'; select * from information_schema.session_variables where variable_name='innodb_compression_level'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_compression_level=4; @@ -43,10 +47,14 @@ set global innodb_compression_level="foo"; set global innodb_compression_level=10; select @@global.innodb_compression_level; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_compression_level'; +--enable_warnings set global innodb_compression_level=-7; select @@global.innodb_compression_level; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_compression_level'; +--enable_warnings # # min/max values diff --git a/mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test b/mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test index 3ca566956ef..1491f705ab2 100644 --- a/mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test @@ -84,22 +84,34 @@ SELECT @@global.innodb_compression_pad_pct_max; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_compression_pad_pct_max = "T"; SELECT @@global.innodb_compression_pad_pct_max; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_compression_pad_pct_max = 1.1; +SELECT @@global.innodb_compression_pad_pct_max; SET @@global.innodb_compression_pad_pct_max = 76; SELECT @@global.innodb_compression_pad_pct_max; - +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_compression_pad_pct_max = " "; +SELECT @@global.innodb_compression_pad_pct_max; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_compression_pad_pct_max = ' '; +SELECT @@global.innodb_compression_pad_pct_max; --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_compression_pad_pct_max = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_compression_pad_pct_max'; +--enable_warnings SELECT @@global.innodb_compression_pad_pct_max; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_compression_pad_pct_max'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test b/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test index f73e25179ba..d753b8bc344 100644 --- a/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test @@ -59,7 +59,6 @@ SELECT @@innodb_concurrency_tickets; --Error ER_UNKNOWN_TABLE SELECT local.innodb_concurrency_tickets; - SET global innodb_concurrency_tickets = 0; SELECT @@global.innodb_concurrency_tickets; @@ -69,7 +68,6 @@ SELECT @@global.innodb_concurrency_tickets; # change the value of innodb_concurrency_tickets to a valid value # ########################################################################## - SET @@global.innodb_concurrency_tickets = 1; SELECT @@global.innodb_concurrency_tickets; @@ -79,15 +77,38 @@ SELECT @@global.innodb_concurrency_tickets; SET @@global.innodb_concurrency_tickets = 4294967295; SELECT @@global.innodb_concurrency_tickets; - --echo '#--------------------FN_DYNVARS_046_04-------------------------#' ########################################################################### +# Check the value of innodb_concurrency_tickets for out of bounds # +########################################################################### + +# With a 64 bit mysqld:18446744073709551615,with a 32 bit mysqld: 4294967295 +--disable_warnings +SET @@global.innodb_concurrency_tickets = 4294967296; +--enable_warnings +SELECT @@global.innodb_concurrency_tickets IN (4294967296,4294967295); + +--disable_warnings +SET @@global.innodb_concurrency_tickets = 12345678901; +--enable_warnings +SELECT @@global.innodb_concurrency_tickets IN (12345678901,4294967295); + +--disable_warnings +SET @@global.innodb_concurrency_tickets = 18446744073709551615; +--enable_warnings +SELECT @@global.innodb_concurrency_tickets IN (18446744073709551615,4294967295); + +--echo '#--------------------FN_DYNVARS_046_05-------------------------#' +########################################################################### # Change the value of innodb_concurrency_tickets to invalid value # ########################################################################### SET @@global.innodb_concurrency_tickets = -1; SELECT @@global.innodb_concurrency_tickets; +SET @@global.innodb_concurrency_tickets = -1024; +SELECT @@global.innodb_concurrency_tickets; + --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_concurrency_tickets = "T"; SELECT @@global.innodb_concurrency_tickets; @@ -96,22 +117,35 @@ SELECT @@global.innodb_concurrency_tickets; SET @@global.innodb_concurrency_tickets = "Y"; SELECT @@global.innodb_concurrency_tickets; -SET @@global.innodb_concurrency_tickets = 1001; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_concurrency_tickets = 1.1; +SELECT @@global.innodb_concurrency_tickets; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_concurrency_tickets = " "; +SELECT @@global.innodb_concurrency_tickets; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_concurrency_tickets = ' '; SELECT @@global.innodb_concurrency_tickets; ---echo '#----------------------FN_DYNVARS_046_05------------------------#' +--echo '#----------------------FN_DYNVARS_046_06------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_concurrency_tickets = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_concurrency_tickets'; +--enable_warnings SELECT @@global.innodb_concurrency_tickets; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_concurrency_tickets'; +--enable_warnings ---echo '#---------------------FN_DYNVARS_046_06-------------------------#' +--echo '#---------------------FN_DYNVARS_046_07-------------------------#' ################################################################### # Check if ON and OFF values can be used on variable # ################################################################### @@ -124,7 +158,7 @@ SELECT @@global.innodb_concurrency_tickets; SET @@global.innodb_concurrency_tickets = ON; SELECT @@global.innodb_concurrency_tickets; ---echo '#---------------------FN_DYNVARS_046_07----------------------#' +--echo '#---------------------FN_DYNVARS_046_08----------------------#' ################################################################### # Check if TRUE and FALSE values can be used on variable # ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_data_file_path_basic.test b/mysql-test/suite/sys_vars/t/innodb_data_file_path_basic.test index 1d88c47b1bb..c936744297f 100644 --- a/mysql-test/suite/sys_vars/t/innodb_data_file_path_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_data_file_path_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_data_file_path); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_data_file_path = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_data_file_path'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_data_file_path); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_data_file_path'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_data_home_dir_basic.test b/mysql-test/suite/sys_vars/t/innodb_data_home_dir_basic.test index acf3741d5fa..b6b7999900a 100644 --- a/mysql-test/suite/sys_vars/t/innodb_data_home_dir_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_data_home_dir_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_data_home_dir); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_data_home_dir = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_data_home_dir'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_data_home_dir); --echo 0 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_data_home_dir'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_default_row_format_basic.test b/mysql-test/suite/sys_vars/t/innodb_default_row_format_basic.test new file mode 100644 index 00000000000..f9aabf49ba4 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_default_row_format_basic.test @@ -0,0 +1,41 @@ +--source include/have_innodb.inc + +# Check the default value +SELECT @@global.innodb_default_row_format; + +SET GLOBAL innodb_default_row_format = 'redundant'; +SELECT @@global.innodb_default_row_format; + +SET GLOBAL innodb_default_row_format = 'dynamic'; +SELECT @@global.innodb_default_row_format; + +SET GLOBAL innodb_default_row_format = 'compact'; +SELECT @@global.innodb_default_row_format; + +--error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL innodb_default_row_format = 'compressed'; +SELECT @@global.innodb_default_row_format; + +--error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL innodb_default_row_format = 'foobar'; +SELECT @@global.innodb_default_row_format; + +SET GLOBAL innodb_default_row_format = 0; +SELECT @@global.innodb_default_row_format; + +SET GLOBAL innodb_default_row_format = 1; +SELECT @@global.innodb_default_row_format; + +SET GLOBAL innodb_default_row_format = 2; +SELECT @@global.innodb_default_row_format; + +--error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL innodb_default_row_format = 3; +SELECT @@global.innodb_default_row_format; + +--error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL innodb_default_row_format = 123; +SELECT @@global.innodb_default_row_format; + + +SET GLOBAL innodb_default_row_format = default; diff --git a/mysql-test/suite/sys_vars/t/innodb_disable_resize_buffer_pool_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_disable_resize_buffer_pool_debug_basic.test new file mode 100644 index 00000000000..e381e746c06 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_disable_resize_buffer_pool_debug_basic.test @@ -0,0 +1,72 @@ +--echo # +--echo # Basic test for innodb_disable_resize_buffer_pool_debug +--echo # + +--source include/have_innodb.inc + +# The config variable is a debug variable +-- source include/have_debug.inc + +SET @start_global_value = @@global.innodb_disable_resize_buffer_pool_debug; + +# Check if Value can set + +SET @@global.innodb_disable_resize_buffer_pool_debug = 0; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; + +SET @@global.innodb_disable_resize_buffer_pool_debug ='On' ; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; + +SET @@global.innodb_disable_resize_buffer_pool_debug ='Off' ; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; + +SET @@global.innodb_disable_resize_buffer_pool_debug = 1; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; + +# Check if the value in GLOBAL Table matches value in variable + +--disable_warnings +SELECT IF(@@GLOBAL.innodb_disable_resize_buffer_pool_debug,'ON','OFF') = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_disable_resize_buffer_pool_debug'; +--enable_warnings +--echo 1 Expected + +SELECT COUNT(@@GLOBAL.innodb_disable_resize_buffer_pool_debug); +--echo 1 Expected + +--disable_warnings +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_disable_resize_buffer_pool_debug'; +--enable_warnings +--echo 1 Expected + +# Check if accessing variable with and without GLOBAL point to same variable + +SELECT @@innodb_disable_resize_buffer_pool_debug = @@GLOBAL.innodb_disable_resize_buffer_pool_debug; +--echo 1 Expected + +# Check if innodb_disable_resize_buffer_pool_debug can be accessed with and without @@ sign + +SELECT COUNT(@@innodb_disable_resize_buffer_pool_debug); +--echo 1 Expected + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.innodb_disable_resize_buffer_pool_debug); +--echo Expected error 'Variable is a GLOBAL variable' + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.innodb_disable_resize_buffer_pool_debug); +--echo Expected error 'Variable is a GLOBAL variable' + +SELECT COUNT(@@GLOBAL.innodb_disable_resize_buffer_pool_debug); +--echo 1 Expected + +--Error ER_BAD_FIELD_ERROR +SELECT innodb_disable_resize_buffer_pool_debug = @@SESSION.innodb_disable_resize_buffer_pool_debug; + +# Cleanup + +SET @@global.innodb_disable_resize_buffer_pool_debug = @start_global_value; +SELECT @@global.innodb_disable_resize_buffer_pool_debug; diff --git a/mysql-test/suite/sys_vars/t/innodb_doublewrite_basic.test b/mysql-test/suite/sys_vars/t/innodb_doublewrite_basic.test index 72dd22cbeb8..1ae10d0f7cf 100644 --- a/mysql-test/suite/sys_vars/t/innodb_doublewrite_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_doublewrite_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_doublewrite); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_doublewrite, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_doublewrite'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_doublewrite); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_doublewrite'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_doublewrite_batch_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_doublewrite_batch_size_basic.test index ccdab532737..5e9104b5335 100644 --- a/mysql-test/suite/sys_vars/t/innodb_doublewrite_batch_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_doublewrite_batch_size_basic.test @@ -10,8 +10,10 @@ select @@global.innodb_doublewrite_batch_size; select @@session.innodb_doublewrite_batch_size; show global variables like 'innodb_doublewrite_batch_size'; show session variables like 'innodb_doublewrite_batch_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_doublewrite_batch_size'; select * from information_schema.session_variables where variable_name='innodb_doublewrite_batch_size'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_fast_shutdown_basic.test b/mysql-test/suite/sys_vars/t/innodb_fast_shutdown_basic.test index e1b62046313..9fe9f490aa4 100644 --- a/mysql-test/suite/sys_vars/t/innodb_fast_shutdown_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_fast_shutdown_basic.test @@ -116,7 +116,15 @@ SELECT @@global.innodb_fast_shutdown; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_fast_shutdown = "0"; SELECT @@global.innodb_fast_shutdown; - +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_fast_shutdown = 1.1; +SELECT @@global.innodb_fast_shutdown; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_fast_shutdown = ' '; +SELECT @@global.innodb_fast_shutdown; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_fast_shutdown = " "; +SELECT @@global.innodb_fast_shutdown; --echo '#-------------------FN_DYNVARS_042_05----------------------------#' ########################################################################### @@ -137,9 +145,11 @@ SET @@local.innodb_fast_shutdown = 0; # Check if the value in SESSION Table contains variable value # ######################################################################### +--disable_warnings SELECT count(VARIABLE_VALUE) AS res_is_0 FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME='innodb_fast_shutdown'; +--enable_warnings --echo '#----------------------FN_DYNVARS_042_07------------------------#' @@ -147,9 +157,11 @@ WHERE VARIABLE_NAME='innodb_fast_shutdown'; # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_fast_shutdown = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_fast_shutdown'; +--enable_warnings --echo '#---------------------FN_DYNVARS_042_08-------------------------#' diff --git a/mysql-test/suite/sys_vars/t/innodb_file_format_basic.test b/mysql-test/suite/sys_vars/t/innodb_file_format_basic.test index bfc092f2f05..739260c07e5 100644 --- a/mysql-test/suite/sys_vars/t/innodb_file_format_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_file_format_basic.test @@ -18,20 +18,26 @@ select @@global.innodb_file_format; select @@session.innodb_file_format; show global variables like 'innodb_file_format'; show session variables like 'innodb_file_format'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_file_format'; select * from information_schema.session_variables where variable_name='innodb_file_format'; +--enable_warnings # # show that it's writable # set global innodb_file_format='Antelope'; select @@global.innodb_file_format; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_file_format'; select * from information_schema.session_variables where variable_name='innodb_file_format'; +--enable_warnings set @@global.innodb_file_format='Barracuda'; select @@global.innodb_file_format; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_file_format'; select * from information_schema.session_variables where variable_name='innodb_file_format'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_file_format='Salmon'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_file_format_check_basic.test b/mysql-test/suite/sys_vars/t/innodb_file_format_check_basic.test index f9f61b9380c..56afba48e29 100644 --- a/mysql-test/suite/sys_vars/t/innodb_file_format_check_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_file_format_check_basic.test @@ -19,10 +19,12 @@ SELECT @@global.innodb_file_format_check; SELECT @@session.innodb_file_format_check; SHOW global variables LIKE 'innodb_file_format_check'; SHOW session variables LIKE 'innodb_file_format_check'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_file_format_check'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_file_format_check'; +--enable_warnings # # show that it's read only @@ -53,17 +55,21 @@ SET @@session.innodb_stats_on_metadata='ON'; # Check if the value in GLOBAL Table matches value in variable # +--disable_warnings SELECT IF(@@GLOBAL.innodb_file_format_check, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_file_format_check'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_file_format_check); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_file_format_check'; +--enable_warnings --echo 1 Expected # diff --git a/mysql-test/suite/sys_vars/t/innodb_file_format_max_basic.test b/mysql-test/suite/sys_vars/t/innodb_file_format_max_basic.test index 18076cfef7f..494f3817cb8 100644 --- a/mysql-test/suite/sys_vars/t/innodb_file_format_max_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_file_format_max_basic.test @@ -3,7 +3,6 @@ # --source include/not_embedded.inc --source include/have_innodb.inc ---source suite/innodb/include/restart_and_reinit.inc SET @start_global_value = @@global.innodb_file_format_max; SELECT @start_global_value; @@ -18,26 +17,32 @@ SELECT @@global.innodb_file_format_max; SELECT @@session.innodb_file_format_max; SHOW global variables LIKE 'innodb_file_format_max'; SHOW session variables LIKE 'innodb_file_format_max'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_file_format_max'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_file_format_max'; +--enable_warnings # # show that it's writable # SET global innodb_file_format_max='Antelope'; SELECT @@global.innodb_file_format_max; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_file_format_max'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_file_format_max'; +--enable_warnings SET @@global.innodb_file_format_max='Barracuda'; SELECT @@global.innodb_file_format_max; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_file_format_max'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_file_format_max'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_file_format_max='Salmon'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_file_io_threads_basic.test b/mysql-test/suite/sys_vars/t/innodb_file_io_threads_basic.test index 32cdd0beac4..c701c2ee171 100644 --- a/mysql-test/suite/sys_vars/t/innodb_file_io_threads_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_file_io_threads_basic.test @@ -61,30 +61,38 @@ SELECT COUNT(@@GLOBAL.innodb_write_io_threads); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_read_io_threads = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_read_io_threads'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_read_io_threads); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_read_io_threads'; +--enable_warnings --echo 1 Expected +--disable_warnings SELECT @@GLOBAL.innodb_write_io_threads = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_write_io_threads'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_write_io_threads); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_write_io_threads'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_file_per_table_basic-master.opt b/mysql-test/suite/sys_vars/t/innodb_file_per_table_basic-master.opt new file mode 100644 index 00000000000..9d2c4f807e0 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_file_per_table_basic-master.opt @@ -0,0 +1 @@ +--innodb_file_per_table=On diff --git a/mysql-test/suite/sys_vars/t/innodb_file_per_table_basic.test b/mysql-test/suite/sys_vars/t/innodb_file_per_table_basic.test index 1478d6df2e9..2fd9783e16d 100644 --- a/mysql-test/suite/sys_vars/t/innodb_file_per_table_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_file_per_table_basic.test @@ -58,17 +58,21 @@ SELECT @@global.innodb_file_per_table; # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_file_per_table,'ON','OFF') = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_file_per_table'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_file_per_table); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_file_per_table'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_fill_factor_basic.test b/mysql-test/suite/sys_vars/t/innodb_fill_factor_basic.test new file mode 100644 index 00000000000..8e4caae0088 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_fill_factor_basic.test @@ -0,0 +1,41 @@ + +# +# 2014-03-26 - Added +# + +--source include/have_innodb.inc + +# +# show the global and session values; +# +select @@global.innodb_fill_factor; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.innodb_fill_factor; +show global variables like 'innodb_fill_factor'; +show session variables like 'innodb_fill_factor'; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_fill_factor'; +select * from information_schema.session_variables where variable_name='innodb_fill_factor'; +--enable_warnings + +# +# test default, min, max value +# +let $innodb_fill_factor_orig=`select @@innodb_fill_factor`; + +set global innodb_fill_factor=9; +select @@innodb_fill_factor; + +set global innodb_fill_factor=10; +select @@innodb_fill_factor; + +set global innodb_fill_factor=75; +select @@innodb_fill_factor; + +set global innodb_fill_factor=100; +select @@innodb_fill_factor; + +set global innodb_fill_factor=101; +select @@innodb_fill_factor; + +eval set global innodb_fill_factor=$innodb_fill_factor_orig; diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test index 0ab079adaa8..09a790fc3b6 100644 --- a/mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test @@ -101,18 +101,31 @@ SELECT @@global.innodb_flush_log_at_timeout; SET @@global.innodb_flush_log_at_timeout = 2701; SELECT @@global.innodb_flush_log_at_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flush_log_at_timeout = ' '; +SELECT @@global.innodb_flush_log_at_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flush_log_at_timeout = " "; +SELECT @@global.innodb_flush_log_at_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flush_log_at_timeout = 1.1; +SELECT @@global.innodb_flush_log_at_timeout; --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_flush_log_at_timeout = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_log_at_timeout'; +--enable_warnings SELECT @@global.innodb_flush_log_at_timeout; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_log_at_timeout'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_log_at_trx_commit_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_log_at_trx_commit_basic.test index 56cfc2ffebe..34510cdb462 100644 --- a/mysql-test/suite/sys_vars/t/innodb_flush_log_at_trx_commit_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_flush_log_at_trx_commit_basic.test @@ -56,6 +56,11 @@ SELECT @@global.innodb_flush_log_at_trx_commit; # Check if variable can be accessed with and without @@ sign # ############################################################################### +--Error ER_GLOBAL_VARIABLE +SET innodb_flush_log_at_trx_commit = 1; +SELECT @@innodb_flush_log_at_trx_commit; + + --Error ER_UNKNOWN_TABLE SELECT local.innodb_flush_log_at_trx_commit; @@ -96,18 +101,35 @@ SELECT @@global.innodb_flush_log_at_trx_commit; SET @@global.innodb_flush_log_at_trx_commit = 1001; SELECT @@global.innodb_flush_log_at_trx_commit; + +SET @@global.innodb_flush_log_at_trx_commit = 100156787; +SELECT @@global.innodb_flush_log_at_trx_commit; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flush_log_at_trx_commit = " "; +SELECT @@global.innodb_flush_log_at_trx_commit; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flush_log_at_trx_commit = 1.1; +SELECT @@global.innodb_flush_log_at_trx_commit; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flush_log_at_trx_commit = ' '; +SELECT @@global.innodb_flush_log_at_trx_commit; --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_flush_log_at_trx_commit = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_log_at_trx_commit'; +--enable_warnings SELECT @@global.innodb_flush_log_at_trx_commit; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_log_at_trx_commit'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test index 75af00e33af..9f99c1305fd 100644 --- a/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_flush_method_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_flush_method); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_flush_method = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_method'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_flush_method); --echo 0 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flush_method'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test index 698e30b6669..671e6f58310 100644 --- a/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test @@ -16,32 +16,44 @@ select @@global.innodb_flush_neighbors; select @@session.innodb_flush_neighbors; show global variables like 'innodb_flush_neighbors'; show session variables like 'innodb_flush_neighbors'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_flush_neighbors'; select * from information_schema.session_variables where variable_name='innodb_flush_neighbors'; +--enable_warnings # # show that it's writable # set global innodb_flush_neighbors=0; select @@global.innodb_flush_neighbors; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_flush_neighbors'; select * from information_schema.session_variables where variable_name='innodb_flush_neighbors'; +--enable_warnings set @@global.innodb_flush_neighbors=TRUE; select @@global.innodb_flush_neighbors; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_flush_neighbors'; select * from information_schema.session_variables where variable_name='innodb_flush_neighbors'; +--enable_warnings set global innodb_flush_neighbors=0; select @@global.innodb_flush_neighbors; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_flush_neighbors'; select * from information_schema.session_variables where variable_name='innodb_flush_neighbors'; +--enable_warnings set @@global.innodb_flush_neighbors=2; select @@global.innodb_flush_neighbors; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_flush_neighbors'; select * from information_schema.session_variables where variable_name='innodb_flush_neighbors'; +--enable_warnings set @@global.innodb_flush_neighbors=DEFAULT; select @@global.innodb_flush_neighbors; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_flush_neighbors'; select * from information_schema.session_variables where variable_name='innodb_flush_neighbors'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_flush_neighbors=0; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_sync_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_sync_basic.test new file mode 100644 index 00000000000..a73575864bd --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_flush_sync_basic.test @@ -0,0 +1,77 @@ +--source include/have_innodb.inc + +SET @start_global_value = @@global.innodb_flush_sync; +SELECT @start_global_value; + +# +# exists as global only +# +--echo Valid values are 'ON' and 'OFF' +select @@global.innodb_flush_sync in (0, 1); +select @@global.innodb_flush_sync; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.innodb_flush_sync; +show global variables like 'innodb_flush_sync'; +show session variables like 'innodb_flush_sync'; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +--enable_warnings + +# +# show that it's writable +# +set global innodb_flush_sync='OFF'; +select @@global.innodb_flush_sync; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +--enable_warnings +set @@global.innodb_flush_sync=1; +select @@global.innodb_flush_sync; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +--enable_warnings +set global innodb_flush_sync=0; +select @@global.innodb_flush_sync; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +--enable_warnings +set @@global.innodb_flush_sync='ON'; +select @@global.innodb_flush_sync; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +--enable_warnings +--error ER_GLOBAL_VARIABLE +set session innodb_flush_sync='OFF'; +--error ER_GLOBAL_VARIABLE +set @@session.innodb_flush_sync='ON'; + +# +# incorrect types +# +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_flush_sync=1.1; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_flush_sync=1e1; +--error ER_WRONG_VALUE_FOR_VAR +set global innodb_flush_sync=2; +--error ER_WRONG_VALUE_FOR_VAR +set global innodb_flush_sync=-3; +select @@global.innodb_flush_sync; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_flush_sync'; +select * from information_schema.session_variables where variable_name='innodb_flush_sync'; +--enable_warnings +--error ER_WRONG_VALUE_FOR_VAR +set global innodb_flush_sync='AUTO'; + +# +# Cleanup +# + +SET @@global.innodb_flush_sync = @start_global_value; +SELECT @@global.innodb_flush_sync; diff --git a/mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test b/mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test index a84e623f2c3..f23f9697197 100644 --- a/mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test @@ -94,18 +94,28 @@ SELECT @@global.innodb_flushing_avg_loops; SET @@global.innodb_flushing_avg_loops = 1001; SELECT @@global.innodb_flushing_avg_loops; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flushing_avg_loops = ' '; +SELECT @@global.innodb_flushing_avg_loops; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_flushing_avg_loops = " "; +SELECT @@global.innodb_flushing_avg_loops; --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_flushing_avg_loops = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flushing_avg_loops'; +--enable_warnings SELECT @@global.innodb_flushing_avg_loops; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_flushing_avg_loops'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_force_load_corrupted_basic.test b/mysql-test/suite/sys_vars/t/innodb_force_load_corrupted_basic.test index 1726b320f47..f12f2f670a4 100644 --- a/mysql-test/suite/sys_vars/t/innodb_force_load_corrupted_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_force_load_corrupted_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_force_load_corrupted); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_force_load_corrupted, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_force_load_corrupted'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_force_load_corrupted); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_force_load_corrupted'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_force_recovery_basic.test b/mysql-test/suite/sys_vars/t/innodb_force_recovery_basic.test index f5aa769f09f..a62c895c202 100644 --- a/mysql-test/suite/sys_vars/t/innodb_force_recovery_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_force_recovery_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_force_recovery); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_force_recovery = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_force_recovery'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_force_recovery); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_force_recovery'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test b/mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test index 5eefe1b9219..cfbd10c4e31 100644 --- a/mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test @@ -10,8 +10,10 @@ select @@global.innodb_force_recovery_crash; select @@session.innodb_force_recovery_crash; show global variables like 'innodb_force_recovery_crash'; show session variables like 'innodb_force_recovery_crash'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_force_recovery_crash'; select * from information_schema.session_variables where variable_name='innodb_force_recovery_crash'; +--enable_warnings # show that it's read-only # diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_aux_table_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_aux_table_basic.test index 2ea99cf9835..04ca34c2b19 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_aux_table_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_aux_table_basic.test @@ -15,8 +15,10 @@ SELECT @start_global_value; select @@session.innodb_ft_aux_table; show global variables like 'innodb_ft_aux_table'; show session variables like 'innodb_ft_aux_table'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_aux_table'; select * from information_schema.session_variables where variable_name='innodb_ft_aux_table'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_ft_aux_table='Salmon'; diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_cache_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_cache_size_basic.test index f6d62835f0a..30bcd08d4dd 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_cache_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_cache_size_basic.test @@ -13,8 +13,10 @@ select @@global.innodb_ft_cache_size; select @@session.innodb_ft_cache_size; show global variables like 'innodb_ft_cache_size'; show session variables like 'innodb_ft_cache_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_cache_size'; select * from information_schema.session_variables where variable_name='innodb_ft_cache_size'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_enable_diag_print_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_enable_diag_print_basic.test index ebe9cc556ec..630ada004df 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_enable_diag_print_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_enable_diag_print_basic.test @@ -18,28 +18,38 @@ select @@global.innodb_ft_enable_diag_print; select @@session.innodb_ft_enable_diag_print; show global variables like 'innodb_ft_enable_diag_print'; show session variables like 'innodb_ft_enable_diag_print'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_diag_print'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_diag_print'; +--enable_warnings # # show that it's writable # set global innodb_ft_enable_diag_print='OFF'; select @@global.innodb_ft_enable_diag_print; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_diag_print'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_diag_print'; +--enable_warnings set @@global.innodb_ft_enable_diag_print=1; select @@global.innodb_ft_enable_diag_print; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_diag_print'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_diag_print'; +--enable_warnings set global innodb_ft_enable_diag_print=0; select @@global.innodb_ft_enable_diag_print; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_diag_print'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_diag_print'; +--enable_warnings set @@global.innodb_ft_enable_diag_print='ON'; select @@global.innodb_ft_enable_diag_print; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_diag_print'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_diag_print'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_ft_enable_diag_print='OFF'; --error ER_GLOBAL_VARIABLE @@ -57,8 +67,10 @@ set global innodb_ft_enable_diag_print=2; --error ER_WRONG_VALUE_FOR_VAR set global innodb_ft_enable_diag_print=-3; select @@global.innodb_ft_enable_diag_print; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_diag_print'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_diag_print'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR set global innodb_ft_enable_diag_print='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_enable_stopword_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_enable_stopword_basic.test index 1a983a3d7e6..5eb5af4df23 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_enable_stopword_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_enable_stopword_basic.test @@ -18,8 +18,10 @@ select @@session.innodb_ft_enable_stopword in (0, 1); select @@session.innodb_ft_enable_stopword; show global variables like 'innodb_ft_enable_stopword'; show session variables like 'innodb_ft_enable_stopword'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_stopword'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_stopword'; +--enable_warnings # # show that it's writable @@ -28,26 +30,34 @@ set global innodb_ft_enable_stopword='OFF'; set session innodb_ft_enable_stopword='OFF'; select @@global.innodb_ft_enable_stopword; select @@session.innodb_ft_enable_stopword; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_stopword'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_stopword'; +--enable_warnings set @@global.innodb_ft_enable_stopword=1; set @@session.innodb_ft_enable_stopword=1; select @@global.innodb_ft_enable_stopword; select @@session.innodb_ft_enable_stopword; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_stopword'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_stopword'; +--enable_warnings set global innodb_ft_enable_stopword=0; set session innodb_ft_enable_stopword=0; select @@global.innodb_ft_enable_stopword; select @@session.innodb_ft_enable_stopword; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_stopword'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_stopword'; +--enable_warnings set @@global.innodb_ft_enable_stopword='ON'; set @@session.innodb_ft_enable_stopword='ON'; select @@global.innodb_ft_enable_stopword; select @@session.innodb_ft_enable_stopword; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_stopword'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_stopword'; +--enable_warnings # # incorrect types @@ -74,8 +84,10 @@ set global innodb_ft_enable_stopword=-3; set session innodb_ft_enable_stopword=-7; select @@global.innodb_ft_enable_stopword; select @@session.innodb_ft_enable_stopword; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_enable_stopword'; select * from information_schema.session_variables where variable_name='innodb_ft_enable_stopword'; +--enable_warnings # # Cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_max_token_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_max_token_size_basic.test index e75517466d7..8f6f93f7517 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_max_token_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_max_token_size_basic.test @@ -13,8 +13,10 @@ select @@global.innodb_ft_max_token_size; select @@session.innodb_ft_max_token_size; show global variables like 'innodb_ft_max_token_size'; show session variables like 'innodb_ft_max_token_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_max_token_size'; select * from information_schema.session_variables where variable_name='innodb_ft_max_token_size'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_min_token_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_min_token_size_basic.test index edf63c70782..753985e1af0 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_min_token_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_min_token_size_basic.test @@ -13,8 +13,10 @@ select @@global.innodb_ft_min_token_size; select @@session.innodb_ft_min_token_size; show global variables like 'innodb_ft_min_token_size'; show session variables like 'innodb_ft_min_token_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_min_token_size'; select * from information_schema.session_variables where variable_name='innodb_ft_min_token_size'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_num_word_optimize_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_num_word_optimize_basic.test index 255caf86116..f288398e595 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_num_word_optimize_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_num_word_optimize_basic.test @@ -18,16 +18,20 @@ select @@global.innodb_ft_num_word_optimize; select @@session.innodb_ft_num_word_optimize; show global variables like 'innodb_ft_num_word_optimize'; show session variables like 'innodb_ft_num_word_optimize'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_num_word_optimize'; select * from information_schema.session_variables where variable_name='innodb_ft_num_word_optimize'; +--enable_warnings # # show that it's writable # set global innodb_ft_num_word_optimize=1000; select @@global.innodb_ft_num_word_optimize; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_num_word_optimize'; select * from information_schema.session_variables where variable_name='innodb_ft_num_word_optimize'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_ft_num_word_optimize=1000; @@ -43,7 +47,9 @@ set global innodb_ft_num_word_optimize="foo"; set global innodb_ft_num_word_optimize=-7; select @@global.innodb_ft_num_word_optimize; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_num_word_optimize'; +--enable_warnings # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_result_cache_limit_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_result_cache_limit_basic.test index 245ed4abdfb..0a797a5ab5d 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_result_cache_limit_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_result_cache_limit_basic.test @@ -5,11 +5,6 @@ --source include/have_innodb.inc -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - # # show the global and session values; # @@ -18,8 +13,10 @@ select @@global.innodb_ft_result_cache_limit; select @@session.innodb_ft_result_cache_limit; show global variables like 'innodb_ft_result_cache_limit'; show session variables like 'innodb_ft_result_cache_limit'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_result_cache_limit'; select * from information_schema.session_variables where variable_name='innodb_ft_result_cache_limit'; +--enable_warnings # # test default, min, max value @@ -32,7 +29,10 @@ select @@innodb_ft_result_cache_limit; set global innodb_ft_result_cache_limit=1000000; select @@innodb_ft_result_cache_limit; -set global innodb_ft_result_cache_limit=4000000000; +set global innodb_ft_result_cache_limit=4294967295; +select @@innodb_ft_result_cache_limit; + +set global innodb_ft_result_cache_limit=4*1024*1024*1024; select @@innodb_ft_result_cache_limit; eval set global innodb_ft_result_cache_limit=$innodb_ft_result_cache_limit_orig; diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_server_stopword_table_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_server_stopword_table_basic.test index e227e790a1d..5de822a54e5 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_server_stopword_table_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_server_stopword_table_basic.test @@ -15,8 +15,12 @@ SELECT @start_global_value; select @@session.innodb_ft_server_stopword_table; show global variables like 'innodb_ft_server_stopword_table'; show session variables like 'innodb_ft_server_stopword_table'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_server_stopword_table'; select * from information_schema.session_variables where variable_name='innodb_ft_server_stopword_table'; +--enable_warnings + +call mtr.add_suppression("\\[ERROR\\] InnoDB: user stopword table Salmon does not exist."); --error ER_GLOBAL_VARIABLE set session innodb_ft_server_stopword_table='Salmon'; diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_sort_pll_degree_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_sort_pll_degree_basic.test index 3cf55f6700b..cacd6a690b8 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_sort_pll_degree_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_sort_pll_degree_basic.test @@ -13,8 +13,10 @@ select @@global.innodb_ft_sort_pll_degree; select @@session.innodb_ft_sort_pll_degree; show global variables like 'innodb_ft_sort_pll_degree'; show session variables like 'innodb_ft_sort_pll_degree'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_sort_pll_degree'; select * from information_schema.session_variables where variable_name='innodb_ft_sort_pll_degree'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_total_cache_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_total_cache_size_basic.test index 772ec5a1919..207ec64b705 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_total_cache_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_total_cache_size_basic.test @@ -1,9 +1,9 @@ ---source include/have_innodb.inc -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} +# +# 2011-11-17 - Added +# + +--source include/have_innodb.inc # # show the global and session values; @@ -13,8 +13,10 @@ select @@global.innodb_ft_total_cache_size; select @@session.innodb_ft_total_cache_size; show global variables like 'innodb_ft_total_cache_size'; show session variables like 'innodb_ft_total_cache_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_total_cache_size'; select * from information_schema.session_variables where variable_name='innodb_ft_total_cache_size'; +--enable_warnings # # show that it's read-only @@ -24,4 +26,3 @@ set global innodb_ft_total_cache_size=1; --error ER_INCORRECT_GLOBAL_LOCAL_VAR set session innodb_ft_total_cache_size=1; - diff --git a/mysql-test/suite/sys_vars/t/innodb_ft_user_stopword_table_basic.test b/mysql-test/suite/sys_vars/t/innodb_ft_user_stopword_table_basic.test index 159e570b3ce..475bf8df526 100644 --- a/mysql-test/suite/sys_vars/t/innodb_ft_user_stopword_table_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_ft_user_stopword_table_basic.test @@ -16,8 +16,12 @@ select @@session.innodb_ft_user_stopword_table; show global variables like 'innodb_ft_user_stopword_table'; show session variables like 'innodb_ft_user_stopword_table'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_ft_user_stopword_table'; select * from information_schema.session_variables where variable_name='innodb_ft_user_stopword_table'; +--enable_warnings + +call mtr.add_suppression("\\[ERROR\\] InnoDB: user stopword table Salmon does not exist."); --error ER_WRONG_VALUE_FOR_VAR set session innodb_ft_user_stopword_table='Salmon'; @@ -35,4 +39,3 @@ set global innodb_ft_user_stopword_table=1e1; --error ER_WRONG_VALUE_FOR_VAR set global innodb_ft_user_stopword_table='Salmon'; -SET @@session.innodb_ft_user_stopword_table=@start_global_value; diff --git a/mysql-test/suite/sys_vars/t/innodb_large_prefix_basic.test b/mysql-test/suite/sys_vars/t/innodb_large_prefix_basic.test index 8d3f3afa0a9..877fe17b003 100644 --- a/mysql-test/suite/sys_vars/t/innodb_large_prefix_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_large_prefix_basic.test @@ -18,28 +18,38 @@ select @@global.innodb_large_prefix; select @@session.innodb_large_prefix; show global variables like 'innodb_large_prefix'; show session variables like 'innodb_large_prefix'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_large_prefix'; select * from information_schema.session_variables where variable_name='innodb_large_prefix'; +--enable_warnings # # show that it's writable # set global innodb_large_prefix='OFF'; select @@global.innodb_large_prefix; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_large_prefix'; select * from information_schema.session_variables where variable_name='innodb_large_prefix'; +--enable_warnings set @@global.innodb_large_prefix=1; select @@global.innodb_large_prefix; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_large_prefix'; select * from information_schema.session_variables where variable_name='innodb_large_prefix'; +--enable_warnings set global innodb_large_prefix=0; select @@global.innodb_large_prefix; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_large_prefix'; select * from information_schema.session_variables where variable_name='innodb_large_prefix'; +--enable_warnings set @@global.innodb_large_prefix='ON'; select @@global.innodb_large_prefix; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_large_prefix'; select * from information_schema.session_variables where variable_name='innodb_large_prefix'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_large_prefix='OFF'; --error ER_GLOBAL_VARIABLE @@ -57,8 +67,10 @@ set global innodb_large_prefix=2; --error ER_WRONG_VALUE_FOR_VAR set global innodb_large_prefix=-3; select @@global.innodb_large_prefix; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_large_prefix'; select * from information_schema.session_variables where variable_name='innodb_large_prefix'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR set global innodb_large_prefix='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_limit_optimistic_insert_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_limit_optimistic_insert_debug_basic.test index 7998297c69e..8f2271cbd7f 100644 --- a/mysql-test/suite/sys_vars/t/innodb_limit_optimistic_insert_debug_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_limit_optimistic_insert_debug_basic.test @@ -12,20 +12,26 @@ select @@global.innodb_limit_optimistic_insert_debug; select @@session.innodb_limit_optimistic_insert_debug; show global variables like 'innodb_limit_optimistic_insert_debug'; show session variables like 'innodb_limit_optimistic_insert_debug'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_limit_optimistic_insert_debug'; select * from information_schema.session_variables where variable_name='innodb_limit_optimistic_insert_debug'; +--enable_warnings # # show that it's writable # set global innodb_limit_optimistic_insert_debug=1; select @@global.innodb_limit_optimistic_insert_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_limit_optimistic_insert_debug'; select * from information_schema.session_variables where variable_name='innodb_limit_optimistic_insert_debug'; +--enable_warnings set @@global.innodb_limit_optimistic_insert_debug=0; select @@global.innodb_limit_optimistic_insert_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_limit_optimistic_insert_debug'; select * from information_schema.session_variables where variable_name='innodb_limit_optimistic_insert_debug'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_limit_optimistic_insert_debug='some'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_lock_wait_timeout_basic.test b/mysql-test/suite/sys_vars/t/innodb_lock_wait_timeout_basic.test index f80b8e48736..a2aecf4ca8a 100644 --- a/mysql-test/suite/sys_vars/t/innodb_lock_wait_timeout_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_lock_wait_timeout_basic.test @@ -50,17 +50,21 @@ SELECT @@session.innodb_lock_wait_timeout; # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_lock_wait_timeout = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_lock_wait_timeout'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_lock_wait_timeout); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_lock_wait_timeout'; +--enable_warnings --echo 1 Expected @@ -93,6 +97,73 @@ SELECT COUNT(@@GLOBAL.innodb_lock_wait_timeout); SELECT innodb_lock_wait_timeout = @@SESSION.innodb_lock_wait_timeout; # +# check the default value +# +set @@global.innodb_lock_wait_timeout=100; +set @@global.innodb_lock_wait_timeout=DEFAULT; +select @@global.innodb_lock_wait_timeout; +set @@session.innodb_lock_wait_timeout=100; +set @@session.innodb_lock_wait_timeout=DEFAULT; +select @@session.innodb_lock_wait_timeout; + +# +# check for valid values +# + +SET @@global.innodb_lock_wait_timeout=1; +SELECT @@global.innodb_lock_wait_timeout; +SET @@global.innodb_lock_wait_timeout=1024; +SELECT @@global.innodb_lock_wait_timeout; +SET @@global.innodb_lock_wait_timeout=1073741824; +SELECT @@global.innodb_lock_wait_timeout; + +SET @@session.innodb_lock_wait_timeout=1; +SELECT @@session.innodb_lock_wait_timeout; +SET @@session.innodb_lock_wait_timeout=1024; +SELECT @@session.innodb_lock_wait_timeout; +SET @@session.innodb_lock_wait_timeout=1073741824; +SELECT @@session.innodb_lock_wait_timeout; + +# +# check for invalid values +# +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_lock_wait_timeout="t"; +SELECT @@global.innodb_lock_wait_timeout; +SET @@global.innodb_lock_wait_timeout=-1024; +SELECT @@global.innodb_lock_wait_timeout; +SET @@global.innodb_lock_wait_timeout=1073741825; +SELECT @@global.innodb_lock_wait_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_lock_wait_timeout=" "; +SELECT @@global.innodb_lock_wait_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_lock_wait_timeout=' '; +SELECT @@global.innodb_lock_wait_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_lock_wait_timeout=1.1; +SELECT @@global.innodb_lock_wait_timeout; + + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@session.innodb_lock_wait_timeout="T"; +SELECT @@session.innodb_lock_wait_timeout; +SET @@session.innodb_lock_wait_timeout=-1024; +SELECT @@session.innodb_lock_wait_timeout; +SET @@session.innodb_lock_wait_timeout=1073999999; +SELECT @@session.innodb_lock_wait_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@session.innodb_lock_wait_timeout=' '; +SELECT @@session.innodb_lock_wait_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@session.innodb_lock_wait_timeout=" "; +SELECT @@session.innodb_lock_wait_timeout; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@session.innodb_lock_wait_timeout=1.1; +SELECT @@session.innodb_lock_wait_timeout; + + + # Cleanup # diff --git a/mysql-test/suite/sys_vars/t/innodb_locks_unsafe_for_binlog_basic.test b/mysql-test/suite/sys_vars/t/innodb_locks_unsafe_for_binlog_basic.test index 08792d299a1..755c5c62c70 100644 --- a/mysql-test/suite/sys_vars/t/innodb_locks_unsafe_for_binlog_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_locks_unsafe_for_binlog_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_locks_unsafe_for_binlog); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_locks_unsafe_for_binlog, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_locks_unsafe_for_binlog'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_locks_unsafe_for_binlog); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_locks_unsafe_for_binlog'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_log_buffer_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_buffer_size_basic.test index 74c1aeab87a..550bba0c0b7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_log_buffer_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_log_buffer_size_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_log_buffer_size); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_log_buffer_size = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_buffer_size'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_log_buffer_size); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_buffer_size'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_log_checkpoint_now_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_checkpoint_now_basic.test index 00aa476e8d2..331803fff86 100644 --- a/mysql-test/suite/sys_vars/t/innodb_log_checkpoint_now_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_log_checkpoint_now_basic.test @@ -1 +1,79 @@ ---echo XtraDB extension +--source include/have_innodb.inc +--source include/have_debug.inc + +SET @start_global_value = @@global.innodb_log_checkpoint_now; +SELECT @start_global_value; + +# +# exists as global only +# +select @@global.innodb_log_checkpoint_now in (0, 1); +select @@global.innodb_log_checkpoint_now; +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +select @@session.innodb_log_checkpoint_now; +show global variables like 'innodb_log_checkpoint_now'; +show session variables like 'innodb_log_checkpoint_now'; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +--enable_warnings + +# +# show that it's writable +# +set global innodb_log_checkpoint_now=1; +# Should always be OFF +select @@global.innodb_log_checkpoint_now; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +--enable_warnings + +set @@global.innodb_log_checkpoint_now=0; +# Should always be OFF +select @@global.innodb_log_checkpoint_now; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +--enable_warnings + +set global innodb_log_checkpoint_now=ON; +# Should always be OFF +select @@global.innodb_log_checkpoint_now; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +--enable_warnings + +set global innodb_log_checkpoint_now=OFF; +# Should always be OFF +select @@global.innodb_log_checkpoint_now; +--disable_warnings +select * from information_schema.global_variables where variable_name='innodb_log_checkpoint_now'; +select * from information_schema.session_variables where variable_name='innodb_log_checkpoint_now'; +--enable_warnings + +--error ER_GLOBAL_VARIABLE +set session innodb_log_checkpoint_now='some'; + +--error ER_GLOBAL_VARIABLE +set @@session.innodb_log_checkpoint_now='some'; + +# +# incorrect types +# +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_log_checkpoint_now=1.1; +--error ER_WRONG_VALUE_FOR_VAR +set global innodb_log_checkpoint_now='foo'; +--error ER_WRONG_VALUE_FOR_VAR +set global innodb_log_checkpoint_now=-2; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_log_checkpoint_now=1e1; + +# +# Cleanup +# + +SET @@global.innodb_log_checkpoint_now = @start_global_value; +SELECT @@global.innodb_log_checkpoint_now; diff --git a/mysql-test/suite/sys_vars/t/innodb_log_checksums_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_checksums_basic.test new file mode 100644 index 00000000000..8ebc9f1652b --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_log_checksums_basic.test @@ -0,0 +1,36 @@ +--source include/have_innodb.inc + +# Check the default value +SET @orig = @@global.innodb_log_checksums; +SELECT @orig; + +-- error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL innodb_log_checksums = 'crc32'; +SELECT @@global.innodb_log_checksums; + +-- error ER_WRONG_VALUE_FOR_VAR +SET GLOBAL innodb_log_checksums = 2; +SELECT @@global.innodb_log_checksums; + +-- error ER_WRONG_TYPE_FOR_VAR +SET GLOBAL innodb_log_checksums = 1e2; +SELECT @@global.innodb_log_checksums; + +-- error ER_WRONG_TYPE_FOR_VAR +SET GLOBAL innodb_log_checksums = 1.0; +SELECT @@global.innodb_log_checksums; + +-- error ER_GLOBAL_VARIABLE +SET innodb_log_checksums = OFF; +SELECT @@global.innodb_log_checksums; + +SET GLOBAL innodb_log_checksums = OFF; +SELECT @@global.innodb_log_checksums; + +SET GLOBAL innodb_log_checksums = default; + +SET GLOBAL innodb_log_checksums = ON; +SELECT @@global.innodb_log_checksums; + +SET GLOBAL innodb_log_checksums = @orig; +SELECT @@global.innodb_log_checksums; diff --git a/mysql-test/suite/sys_vars/t/innodb_log_compressed_pages_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_compressed_pages_basic.test index 8d10309ae02..2c83a36a0fd 100644 --- a/mysql-test/suite/sys_vars/t/innodb_log_compressed_pages_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_log_compressed_pages_basic.test @@ -1,10 +1,5 @@ --source include/have_innodb.inc -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - SET @start_global_value = @@global.innodb_log_compressed_pages; SELECT @start_global_value; @@ -39,17 +34,21 @@ SELECT @@global.innodb_log_compressed_pages; # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_log_compressed_pages,'ON','OFF') = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_compressed_pages'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_log_compressed_pages); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_compressed_pages'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_log_file_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_file_size_basic.test index 08925b73957..21fd2a80021 100644 --- a/mysql-test/suite/sys_vars/t/innodb_log_file_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_log_file_size_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_log_file_size); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_log_file_size = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_file_size'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_log_file_size); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_file_size'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_log_files_in_group_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_files_in_group_basic.test index 67978efe76a..60046bd09e6 100644 --- a/mysql-test/suite/sys_vars/t/innodb_log_files_in_group_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_log_files_in_group_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_log_files_in_group); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_log_files_in_group = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_files_in_group'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_log_files_in_group); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_files_in_group'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_log_group_home_dir_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_group_home_dir_basic.test index 7e3969c6bd7..d6d5446c4c7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_log_group_home_dir_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_log_group_home_dir_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_log_group_home_dir); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_log_group_home_dir = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_group_home_dir'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_log_group_home_dir); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_log_group_home_dir'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_log_write_ahead_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_log_write_ahead_size_basic.test new file mode 100644 index 00000000000..8693c6a7b1b --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_log_write_ahead_size_basic.test @@ -0,0 +1,93 @@ +--source include/have_innodb.inc + +SET @start_global_value = @@global.innodb_log_write_ahead_size; + +# default value is limited by innodb_page_size and varying along with the page size. +#SELECT @start_global_value; + +#set common valid value +SET global innodb_log_write_ahead_size=4096; + +# +# exists as global only +# +--echo Valid values are positive number +SELECT @@global.innodb_log_write_ahead_size >= 512; +SELECT @@global.innodb_log_write_ahead_size <= 16*1024; + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT @@session.innodb_log_write_ahead_size; +SHOW global variables LIKE 'innodb_log_write_ahead_size'; +SHOW session variables LIKE 'innodb_log_write_ahead_size'; +--disable_warnings +SELECT * FROM information_schema.global_variables +WHERE variable_name='innodb_log_write_ahead_size'; +SELECT * FROM information_schema.session_variables +WHERE variable_name='innodb_log_write_ahead_size'; +--enable_warnings + +# +# show that it's writable +# +SET global innodb_log_write_ahead_size=1024; +SELECT @@global.innodb_log_write_ahead_size; +--disable_warnings +SELECT * FROM information_schema.global_variables +WHERE variable_name='innodb_log_write_ahead_size'; +SELECT * FROM information_schema.session_variables +WHERE variable_name='innodb_log_write_ahead_size'; +--enable_warnings +--error ER_GLOBAL_VARIABLE +SET session innodb_log_write_ahead_size=2048; + +# +# Valid values +# +SET global innodb_log_write_ahead_size=512; +SELECT @@global.innodb_log_write_ahead_size; +SET global innodb_log_write_ahead_size=2048; +SELECT @@global.innodb_log_write_ahead_size; +SET global innodb_log_write_ahead_size=4096; +SELECT @@global.innodb_log_write_ahead_size; + +# limited by innodb_page_size, and the followings are occationally invalid +#SET global innodb_log_write_ahead_size=8192; +#SELECT @@global.innodb_log_write_ahead_size; +#SET global innodb_log_write_ahead_size=16384; +#SELECT @@global.innodb_log_write_ahead_size; + +# +# Invalid values +# +SET global innodb_log_write_ahead_size=0; +SELECT @@global.innodb_log_write_ahead_size; +SET global innodb_log_write_ahead_size=-1024; +SELECT @@global.innodb_log_write_ahead_size; +SET global innodb_log_write_ahead_size=3000; +SELECT @@global.innodb_log_write_ahead_size; + +# limited by innodb_page_size, and the followings result occationally different +#SET global innodb_log_write_ahead_size=32768; +#SELECT @@global.innodb_log_write_ahead_size; + +# +# incorrect types +# +--error ER_WRONG_TYPE_FOR_VAR +SET global innodb_log_write_ahead_size=1.1; +--error ER_WRONG_TYPE_FOR_VAR +SET global innodb_log_write_ahead_size=1e1; +--error ER_WRONG_TYPE_FOR_VAR +SET global innodb_log_write_ahead_size="foo"; +SET global innodb_log_write_ahead_size=-7; +SELECT @@global.innodb_log_write_ahead_size; +--disable_warnings +SELECT * FROM information_schema.global_variables +WHERE variable_name='innodb_log_write_ahead_size'; +--enable_warnings + +# +# cleanup +# + +SET @@global.innodb_log_write_ahead_size = @start_global_value; diff --git a/mysql-test/suite/sys_vars/t/innodb_lru_scan_depth_basic.test b/mysql-test/suite/sys_vars/t/innodb_lru_scan_depth_basic.test index 12211308410..8f08a1bff14 100644 --- a/mysql-test/suite/sys_vars/t/innodb_lru_scan_depth_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_lru_scan_depth_basic.test @@ -18,16 +18,20 @@ select @@global.innodb_lru_scan_depth; select @@session.innodb_lru_scan_depth; show global variables like 'innodb_lru_scan_depth'; show session variables like 'innodb_lru_scan_depth'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_lru_scan_depth'; select * from information_schema.session_variables where variable_name='innodb_lru_scan_depth'; +--enable_warnings # # show that it's writable # set global innodb_lru_scan_depth=325; select @@global.innodb_lru_scan_depth; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_lru_scan_depth'; select * from information_schema.session_variables where variable_name='innodb_lru_scan_depth'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_lru_scan_depth=444; @@ -43,10 +47,14 @@ set global innodb_lru_scan_depth="foo"; set global innodb_lru_scan_depth=7; select @@global.innodb_lru_scan_depth; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_lru_scan_depth'; +--enable_warnings set global innodb_lru_scan_depth=-7; select @@global.innodb_lru_scan_depth; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_lru_scan_depth'; +--enable_warnings # # min/max values diff --git a/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_basic.test b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_basic.test index 5b4eaa41598..e8cc46086bc 100644 --- a/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_basic.test @@ -31,20 +31,29 @@ ###################################################################### -################################################################################ +################################################################################ # Saving initial value of innodb_max_dirty_pages_pct in a temporary variable # -################################################################################ +################################################################################ SET @global_start_value = @@global.innodb_max_dirty_pages_pct; SELECT @global_start_value; +# need this because setting innodb_max_dirty_pages_pct to lower than this +# should cause a warning +SET @global_start_max_dirty_lwm_value = @@global.innodb_max_dirty_pages_pct_lwm; +SELECT @global_start_max_dirty_lwm_value; + + +SET @@global.innodb_max_dirty_pages_pct_lwm = 0; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; + --echo '#--------------------FN_DYNVARS_046_01------------------------#' -######################################################################## +######################################################################## # Display the DEFAULT value of innodb_max_dirty_pages_pct # -######################################################################## +######################################################################## SET @@global.innodb_max_dirty_pages_pct = 0; -SET @@global.innodb_max_dirty_pages_pct = @global_start_value; +SET @@global.innodb_max_dirty_pages_pct = DEFAULT; SELECT @@global.innodb_max_dirty_pages_pct; --echo '#---------------------FN_DYNVARS_046_02-------------------------#' @@ -63,11 +72,11 @@ SET global innodb_max_dirty_pages_pct = 0; SELECT @@global.innodb_max_dirty_pages_pct; --echo '#--------------------FN_DYNVARS_046_03------------------------#' -########################################################################## +########################################################################## # change the value of innodb_max_dirty_pages_pct to a valid value # -########################################################################## +########################################################################## -SET @@global.innodb_max_dirty_pages_pct = 0; +SET @@global.innodb_max_dirty_pages_pct = 0.0; SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct = 1; @@ -75,14 +84,26 @@ SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct = 99; SELECT @@global.innodb_max_dirty_pages_pct; ---echo '#--------------------FN_DYNVARS_046_04-------------------------#' -########################################################################### +--echo '#--------------------FN_DYNVARS_046_04------------------------#' +########################################################################## +# change value of based on innodb_max_dirty_pages_pct_lwm # +########################################################################## +SET @@global.innodb_max_dirty_pages_pct_lwm = @global_start_value - 1; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; + +# this should cause warning +SET @@global.innodb_max_dirty_pages_pct = @global_start_value - 2; +SELECT @@global.innodb_max_dirty_pages_pct; + +--echo '#--------------------FN_DYNVARS_046_05-------------------------#' +########################################################################### # Change the value of innodb_max_dirty_pages_pct to invalid value # -########################################################################### +########################################################################### SET @@global.innodb_max_dirty_pages_pct = -1; SELECT @@global.innodb_max_dirty_pages_pct; - +SET @@global.innodb_max_dirty_pages_pct = -1024; +SELECT @@global.innodb_max_dirty_pages_pct; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_max_dirty_pages_pct = "T"; SELECT @@global.innodb_max_dirty_pages_pct; @@ -91,26 +112,49 @@ SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct = "Y"; SELECT @@global.innodb_max_dirty_pages_pct; +SET @@global.innodb_max_dirty_pages_pct = 100; +SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct = 1001; SELECT @@global.innodb_max_dirty_pages_pct; +SET @@global.innodb_max_dirty_pages_pct = 100000; +SELECT @@global.innodb_max_dirty_pages_pct; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_max_dirty_pages_pct = ' '; +SELECT @@global.innodb_max_dirty_pages_pct; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_max_dirty_pages_pct = " "; +SELECT @@global.innodb_max_dirty_pages_pct; +SET @@global.innodb_max_dirty_pages_pct = 1.1; +SELECT @@global.innodb_max_dirty_pages_pct; +set global innodb_max_dirty_pages_pct = 0.1; +SELECT @@global.innodb_max_dirty_pages_pct; +set global innodb_max_dirty_pages_pct = 31.34; +SELECT @@global.innodb_max_dirty_pages_pct; +set global innodb_max_dirty_pages_pct = 100; +SELECT @@global.innodb_max_dirty_pages_pct; +set global innodb_max_dirty_pages_pct = 99.999; +SELECT @@global.innodb_max_dirty_pages_pct; - ---echo '#----------------------FN_DYNVARS_046_05------------------------#' -######################################################################### +--echo '#----------------------FN_DYNVARS_046_06------------------------#' +######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_max_dirty_pages_pct = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct'; +--enable_warnings SELECT @@global.innodb_max_dirty_pages_pct; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct'; +--enable_warnings ---echo '#---------------------FN_DYNVARS_046_06-------------------------#' -################################################################### +--echo '#---------------------FN_DYNVARS_046_07-------------------------#' +################################################################### # Check if ON and OFF values can be used on variable # -################################################################### +################################################################### --ERROR ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_max_dirty_pages_pct = OFF; @@ -120,23 +164,26 @@ SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct = ON; SELECT @@global.innodb_max_dirty_pages_pct; ---echo '#---------------------FN_DYNVARS_046_07----------------------#' -################################################################### +--echo '#---------------------FN_DYNVARS_046_08----------------------#' +################################################################### # Check if TRUE and FALSE values can be used on variable # -################################################################### +################################################################### SET @@global.innodb_max_dirty_pages_pct = TRUE; SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct = FALSE; SELECT @@global.innodb_max_dirty_pages_pct; -############################## +############################## # Restore initial value # ############################## SET @@global.innodb_max_dirty_pages_pct = @global_start_value; SELECT @@global.innodb_max_dirty_pages_pct; +SET @@global.innodb_max_dirty_pages_pct_lwm = @global_start_max_dirty_lwm_value; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; + ############################################################### # END OF innodb_max_dirty_pages_pct TESTS # -############################################################### +############################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_func.test b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_func.test index 62c88f43ebd..c7a9e567e69 100644 --- a/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_func.test +++ b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_func.test @@ -33,15 +33,22 @@ SET @innodb_max_dirty_pages_pct = @@global.innodb_max_dirty_pages_pct; ############################################################################ SET @@global.innodb_max_dirty_pages_pct = 80; +--echo 'connect (con1,localhost,root,,,,)' connect (con1,localhost,root,,,,); +--echo 'connection con1' connection con1; SELECT @@global.innodb_max_dirty_pages_pct; SET @@global.innodb_max_dirty_pages_pct = 70; +--echo 'connect (con2,localhost,root,,,,)' connect (con2,localhost,root,,,,); +--echo 'connection con2' connection con2; SELECT @@global.innodb_max_dirty_pages_pct; +--echo 'connection default' connection default; +--echo 'disconnect con2' disconnect con2; +--echo 'disconnect con1' disconnect con1; # restore initial value SET @@global.innodb_max_dirty_pages_pct = @innodb_max_dirty_pages_pct; diff --git a/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test index d81b6cc725b..b06f209a263 100644 --- a/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test @@ -47,7 +47,7 @@ SELECT @pct_start_value; ######################################################################## SET @@global.innodb_max_dirty_pages_pct_lwm = 0; -SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_lwm_start_value; +SET @@global.innodb_max_dirty_pages_pct_lwm = DEFAULT; SELECT @@global.innodb_max_dirty_pages_pct_lwm; --echo '#---------------------FN_DYNVARS_046_02-------------------------#' @@ -96,22 +96,40 @@ SELECT @@global.innodb_max_dirty_pages_pct_lwm; SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_start_value + 1; SELECT @@global.innodb_max_dirty_pages_pct_lwm; -SET @@global.innodb_max_dirty_pages_pct_lwm = 100; + +SET @@global.innodb_max_dirty_pages_pct_lwm = 0.0; SELECT @@global.innodb_max_dirty_pages_pct_lwm; +SET @@global.innodb_max_dirty_pages_pct_lwm = 1.1; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; + +SET @@global.innodb_max_dirty_pages_pct_lwm = 51.12; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +SET @@global.innodb_max_dirty_pages_pct_lwm = 100; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_max_dirty_pages_pct_lwm = " "; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_max_dirty_pages_pct_lwm = ' '; +SELECT @@global.innodb_max_dirty_pages_pct_lwm; --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_max_dirty_pages_pct_lwm = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct_lwm'; +--enable_warnings SELECT @@global.innodb_max_dirty_pages_pct_lwm; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct_lwm'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_basic.test b/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_basic.test index 9e6b8201e3d..6c7676f113d 100644 --- a/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_basic.test @@ -66,16 +66,37 @@ SELECT @@global.innodb_max_purge_lag; # change the value of innodb_max_purge_lag to a valid value # ########################################################################## - SET @@global.innodb_max_purge_lag = 0; SELECT @@global.innodb_max_purge_lag; SET @@global.innodb_max_purge_lag = 1; SELECT @@global.innodb_max_purge_lag; + SET @@global.innodb_max_purge_lag = 4294967295; SELECT @@global.innodb_max_purge_lag; ---echo '#--------------------FN_DYNVARS_046_04-------------------------#' +--echo '#--------------------FN_DYNVARS_046_04------------------------#' +########################################################################## +# check the value of innodb_concurrency_tickets for out of bounds # +########################################################################## + +# With a 64 bit mysqld:18446744073709551615,with a 32 bit mysqld: 4294967295 +--disable_warnings +SET @@global.innodb_max_purge_lag = 4294967296; +--enable_warnings +SELECT @@global.innodb_max_purge_lag IN (4294967296,4294967295); + +--disable_warnings +SET @@global.innodb_max_purge_lag = 12345678901; +--enable_warnings +SELECT @@global.innodb_max_purge_lag IN (12345678901,4294967295); + +--disable_warnings +SET @@global.innodb_max_purge_lag = 18446744073709551615; +--enable_warnings +SELECT @@global.innodb_max_purge_lag IN (18446744073709551615,4294967295); + +--echo '#--------------------FN_DYNVARS_046_05-------------------------#' ########################################################################### # Change the value of innodb_max_purge_lag to invalid value # ########################################################################### @@ -83,6 +104,9 @@ SELECT @@global.innodb_max_purge_lag; SET @@global.innodb_max_purge_lag = -1; SELECT @@global.innodb_max_purge_lag; +SET @@global.innodb_max_purge_lag = -1024; +SELECT @@global.innodb_max_purge_lag; + --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_max_purge_lag = "T"; SELECT @@global.innodb_max_purge_lag; @@ -91,26 +115,35 @@ SELECT @@global.innodb_max_purge_lag; SET @@global.innodb_max_purge_lag = "Y"; SELECT @@global.innodb_max_purge_lag; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_max_purge_lag = 1.1; +SELECT @@global.innodb_max_purge_lag; -SET @@global.innodb_max_purge_lag = 1001; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_max_purge_lag = ' '; SELECT @@global.innodb_max_purge_lag; ---echo '#----------------------FN_DYNVARS_046_05------------------------#' +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_max_purge_lag = " "; +SELECT @@global.innodb_max_purge_lag; + +--echo '#----------------------FN_DYNVARS_046_06------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_max_purge_lag = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_purge_lag'; +--enable_warnings SELECT @@global.innodb_max_purge_lag; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_purge_lag'; +--enable_warnings - - - ---echo '#---------------------FN_DYNVARS_046_06-------------------------#' +--echo '#---------------------FN_DYNVARS_046_07-------------------------#' ################################################################### # Check if ON and OFF values can be used on variable # ################################################################### @@ -123,12 +156,11 @@ SELECT @@global.innodb_max_purge_lag; SET @@global.innodb_max_purge_lag = ON; SELECT @@global.innodb_max_purge_lag; ---echo '#---------------------FN_DYNVARS_046_07----------------------#' +--echo '#---------------------FN_DYNVARS_046_08----------------------#' ################################################################### # Check if TRUE and FALSE values can be used on variable # ################################################################### - SET @@global.innodb_max_purge_lag = TRUE; SELECT @@global.innodb_max_purge_lag; SET @@global.innodb_max_purge_lag = FALSE; diff --git a/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_delay_basic.test b/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_delay_basic.test index 6374e3716df..f7580c99507 100644 --- a/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_delay_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_max_purge_lag_delay_basic.test @@ -13,9 +13,11 @@ SET @@GLOBAL.innodb_max_purge_lag_delay=1; SELECT COUNT(@@GLOBAL.innodb_max_purge_lag_delay); --echo 1 Expected +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_max_purge_lag_delay'; +--enable_warnings --echo 1 Expected SELECT @@innodb_max_purge_lag_delay = @@GLOBAL.innodb_max_purge_lag_delay; diff --git a/mysql-test/suite/sys_vars/t/innodb_max_undo_log_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_max_undo_log_size_basic.test new file mode 100644 index 00000000000..9882578923e --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_max_undo_log_size_basic.test @@ -0,0 +1,99 @@ + + +############### mysql-test\t\innodb_max_undo_log_size_basic.test ############### +# # +# Variable Name: innodb_max_undo_log_size # +# Scope: Global # +# Access Type: Static # +# Data Type: numeric # +# # +# # +# Creation Date: 2014-27-05 # +# Author : Krunal Bauskar # +# # +# # +# Description:Test Cases of Dynamic System Variable innodb_max_undo_log_size # +# that checks the behavior of this variable in the following ways # +# * Value Check # +# * Scope Check # +# # +# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # +# server-system-variables.html # +# # +############################################################################### + +--source include/have_innodb.inc + +--echo '#---------------------BS_STVARS_035_01----------------------#' +#################################################################### +# Displaying default value # +#################################################################### +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +--echo 1 Expected + + +--echo '#---------------------BS_STVARS_035_02----------------------#' +#################################################################### +# Check if Value can set # +#################################################################### + +SET @@GLOBAL.innodb_max_undo_log_size=1073741824; + +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +--echo 1 Expected + +SET @@GLOBAL.innodb_max_undo_log_size=18446744073709551615; +SELECT @@GLOBAL.innodb_max_undo_log_size; +--echo 18446744073709551615 Expected + +SET @@GLOBAL.innodb_max_undo_log_size=1073741824; + +--echo '#---------------------BS_STVARS_035_03----------------------#' +################################################################# +# Check if the value in GLOBAL Table matches value in variable # +################################################################# + +--disable_warnings +SELECT @@GLOBAL.innodb_max_undo_log_size = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_max_undo_log_size'; +--echo 1 Expected + +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +--echo 1 Expected + +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_max_undo_log_size'; +--echo 1 Expected +--enable_warnings + + +--echo '#---------------------BS_STVARS_035_04----------------------#' +################################################################################ +# Check if accessing variable with and without GLOBAL point to same variable # +################################################################################ +SELECT @@innodb_max_undo_log_size = @@GLOBAL.innodb_max_undo_log_size; +--echo 1 Expected + + + +--echo '#---------------------BS_STVARS_035_05----------------------#' +################################################################################ +# Check if innodb_max_undo_log_size can be accessed with and without @@ sign # +################################################################################ + +SELECT COUNT(@@innodb_max_undo_log_size); +--echo 1 Expected + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.innodb_max_undo_log_size); +--echo Expected error 'Variable is a GLOBAL variable' + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.innodb_max_undo_log_size); +--echo Expected error 'Variable is a GLOBAL variable' + +SELECT COUNT(@@GLOBAL.innodb_max_undo_log_size); +--echo 1 Expected + diff --git a/mysql-test/suite/sys_vars/t/innodb_merge_threshold_set_all_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_merge_threshold_set_all_debug_basic.test new file mode 100644 index 00000000000..0ea30277801 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_merge_threshold_set_all_debug_basic.test @@ -0,0 +1,30 @@ +--echo # +--echo # Basic test for innodb_merge_threshold_set_all_debug +--echo # + +--source include/have_innodb.inc + +# The config variable is a debug variable +-- source include/have_debug.inc + +SELECT @@global.innodb_merge_threshold_set_all_debug; + +set global innodb_merge_threshold_set_all_debug = 1; + +SELECT @@global.innodb_merge_threshold_set_all_debug; + +set global innodb_merge_threshold_set_all_debug = 51; + +SELECT @@global.innodb_merge_threshold_set_all_debug; + +set global innodb_merge_threshold_set_all_debug = 0; + +SELECT @@global.innodb_merge_threshold_set_all_debug; + +--error ER_GLOBAL_VARIABLE +set innodb_merge_threshold_set_all_debug = 50; + +set global innodb_merge_threshold_set_all_debug = 50; + +SELECT @@global.innodb_merge_threshold_set_all_debug; + diff --git a/mysql-test/suite/sys_vars/t/innodb_mirrored_log_groups_basic.test b/mysql-test/suite/sys_vars/t/innodb_mirrored_log_groups_basic.test deleted file mode 100644 index 6edb07ac39f..00000000000 --- a/mysql-test/suite/sys_vars/t/innodb_mirrored_log_groups_basic.test +++ /dev/null @@ -1,102 +0,0 @@ - - -################## mysql-test\t\innodb_mirrored_log_groups_basic.test ######### -# # -# Variable Name: innodb_mirrored_log_groups # -# Scope: Global # -# Access Type: Static # -# Data Type: numeric # -# # -# # -# Creation Date: 2008-02-07 # -# Author : Sharique Abdullah # -# # -# # -# Description:Test Cases of Dynamic System Variable innodb_mirrored_log_groups# -# that checks the behavior of this variable in the following ways # -# * Value Check # -# * Scope Check # -# # -# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # -# server-system-variables.html # -# # -############################################################################### - ---source include/have_innodb.inc - ---echo '#---------------------BS_STVARS_037_01----------------------#' -#################################################################### -# Displaying default value # -#################################################################### -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); ---echo 1 Expected - - ---echo '#---------------------BS_STVARS_037_02----------------------#' -#################################################################### -# Check if Value can set # -#################################################################### - ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -SET @@GLOBAL.innodb_mirrored_log_groups=1; ---echo Expected error 'Read only variable' - -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); ---echo 1 Expected - - - - ---echo '#---------------------BS_STVARS_037_03----------------------#' -################################################################# -# Check if the value in GLOBAL Table matches value in variable # -################################################################# - -SELECT @@GLOBAL.innodb_mirrored_log_groups = VARIABLE_VALUE -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_mirrored_log_groups'; ---echo 1 Expected - -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); ---echo 1 Expected - -SELECT COUNT(VARIABLE_VALUE) -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='innodb_mirrored_log_groups'; ---echo 1 Expected - - - ---echo '#---------------------BS_STVARS_037_04----------------------#' -################################################################################ -# Check if accessing variable with and without GLOBAL point to same variable # -################################################################################ -SELECT @@innodb_mirrored_log_groups = @@GLOBAL.innodb_mirrored_log_groups; ---echo 1 Expected - - - ---echo '#---------------------BS_STVARS_037_05----------------------#' -################################################################################ -# Check if innodb_mirrored_log_groups can be accessed with and without @@ sign # -################################################################################ - -SELECT COUNT(@@innodb_mirrored_log_groups); ---echo 1 Expected - ---Error ER_INCORRECT_GLOBAL_LOCAL_VAR -SELECT COUNT(@@local.innodb_mirrored_log_groups); ---echo Expected error 'Variable is a GLOBAL variable' - ---Error ER_INCORRECT_GLOBAL_LOCAL_VAR -SELECT COUNT(@@SESSION.innodb_mirrored_log_groups); ---echo Expected error 'Variable is a GLOBAL variable' - -SELECT COUNT(@@GLOBAL.innodb_mirrored_log_groups); ---echo 1 Expected - ---Error ER_BAD_FIELD_ERROR -SELECT innodb_mirrored_log_groups = @@SESSION.innodb_mirrored_log_groups; ---echo Expected error 'Readonly variable' - - diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test index 0615d62a0e1..1b23ae14e49 100644 --- a/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test @@ -2,11 +2,6 @@ # Test the metrics monitor system's control system # and counter accuracy. -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - --source include/have_innodb.inc set global innodb_monitor_disable = All; # Test turn on/off the monitor counter with "all" option diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test index 0615d62a0e1..1b23ae14e49 100644 --- a/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test @@ -2,11 +2,6 @@ # Test the metrics monitor system's control system # and counter accuracy. -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - --source include/have_innodb.inc set global innodb_monitor_disable = All; # Test turn on/off the monitor counter with "all" option diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test index 868f69300fa..1b23ae14e49 100644 --- a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test @@ -2,11 +2,6 @@ # Test the metrics monitor system's control system # and counter accuracy. -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip not fixed in innodb 5.6.10 or earlier -} - --source include/have_innodb.inc set global innodb_monitor_disable = All; # Test turn on/off the monitor counter with "all" option diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test index 868f69300fa..1b23ae14e49 100644 --- a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test @@ -2,11 +2,6 @@ # Test the metrics monitor system's control system # and counter accuracy. -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip not fixed in innodb 5.6.10 or earlier -} - --source include/have_innodb.inc set global innodb_monitor_disable = All; # Test turn on/off the monitor counter with "all" option diff --git a/mysql-test/suite/sys_vars/t/innodb_numa_interleave_basic.test b/mysql-test/suite/sys_vars/t/innodb_numa_interleave_basic.test new file mode 100644 index 00000000000..9f41cb74da8 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_numa_interleave_basic.test @@ -0,0 +1,13 @@ +--source include/have_innodb.inc +--source include/have_numa.inc + +SELECT @@GLOBAL.innodb_numa_interleave; + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET @@GLOBAL.innodb_numa_interleave=off; + +SELECT @@GLOBAL.innodb_use_native_aio; + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT @@SESSION.innodb_use_native_aio; + diff --git a/mysql-test/suite/sys_vars/t/innodb_old_blocks_pct_basic.test b/mysql-test/suite/sys_vars/t/innodb_old_blocks_pct_basic.test index 0dcef3bb09f..1f72fc250ce 100644 --- a/mysql-test/suite/sys_vars/t/innodb_old_blocks_pct_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_old_blocks_pct_basic.test @@ -18,19 +18,29 @@ select @@global.innodb_old_blocks_pct; select @@session.innodb_old_blocks_pct; show global variables like 'innodb_old_blocks_pct'; show session variables like 'innodb_old_blocks_pct'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_pct'; select * from information_schema.session_variables where variable_name='innodb_old_blocks_pct'; +--enable_warnings # # show that it's writable # set global innodb_old_blocks_pct=10; select @@global.innodb_old_blocks_pct; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_pct'; select * from information_schema.session_variables where variable_name='innodb_old_blocks_pct'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_old_blocks_pct=1; +# +# check the default value +# +set @@global.innodb_old_blocks_pct=DEFAULT; +select @@global.innodb_old_blocks_pct; + # # incorrect types # @@ -40,16 +50,26 @@ set global innodb_old_blocks_pct=1.1; set global innodb_old_blocks_pct=1e1; --error ER_WRONG_TYPE_FOR_VAR set global innodb_old_blocks_pct="foo"; - +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_old_blocks_pct=" "; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_old_blocks_pct=''; + set global innodb_old_blocks_pct=4; select @@global.innodb_old_blocks_pct; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_pct'; +--enable_warnings set global innodb_old_blocks_pct=-7; select @@global.innodb_old_blocks_pct; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_pct'; +--enable_warnings set global innodb_old_blocks_pct=96; select @@global.innodb_old_blocks_pct; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_pct'; +--enable_warnings # # min/max values diff --git a/mysql-test/suite/sys_vars/t/innodb_old_blocks_time_basic.test b/mysql-test/suite/sys_vars/t/innodb_old_blocks_time_basic.test index 3efec2bbf15..d05e7244b93 100644 --- a/mysql-test/suite/sys_vars/t/innodb_old_blocks_time_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_old_blocks_time_basic.test @@ -18,16 +18,20 @@ select @@global.innodb_old_blocks_time; select @@session.innodb_old_blocks_time; show global variables like 'innodb_old_blocks_time'; show session variables like 'innodb_old_blocks_time'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_time'; select * from information_schema.session_variables where variable_name='innodb_old_blocks_time'; +--enable_warnings # # show that it's writable # set global innodb_old_blocks_time=10; select @@global.innodb_old_blocks_time; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_time'; select * from information_schema.session_variables where variable_name='innodb_old_blocks_time'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_old_blocks_time=1; @@ -43,7 +47,9 @@ set global innodb_old_blocks_time="foo"; set global innodb_old_blocks_time=-7; select @@global.innodb_old_blocks_time; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_old_blocks_time'; +--enable_warnings # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test index aa1cc83819e..b86f04cac15 100644 --- a/mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test @@ -12,20 +12,26 @@ select @@global.innodb_online_alter_log_max_size; select @@session.innodb_online_alter_log_max_size; show global variables like 'innodb_online_alter_log_max_size'; show session variables like 'innodb_online_alter_log_max_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size'; select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size'; +--enable_warnings # # show that it's writable # set global innodb_online_alter_log_max_size=1048576; select @@global.innodb_online_alter_log_max_size; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size'; select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size'; +--enable_warnings set @@global.innodb_online_alter_log_max_size=524288; select @@global.innodb_online_alter_log_max_size; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size'; select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_online_alter_log_max_size='some'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_open_files_basic.test b/mysql-test/suite/sys_vars/t/innodb_open_files_basic.test index c55b7e55937..d6b7c857fb4 100644 --- a/mysql-test/suite/sys_vars/t/innodb_open_files_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_open_files_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_open_files); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT @@GLOBAL.innodb_open_files = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_open_files'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_open_files); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_open_files'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_optimize_fulltext_only_basic.test b/mysql-test/suite/sys_vars/t/innodb_optimize_fulltext_only_basic.test index e9ff8a651bc..08b8c137342 100644 --- a/mysql-test/suite/sys_vars/t/innodb_optimize_fulltext_only_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_optimize_fulltext_only_basic.test @@ -18,28 +18,38 @@ select @@global.innodb_optimize_fulltext_only; select @@session.innodb_optimize_fulltext_only; show global variables like 'innodb_optimize_fulltext_only'; show session variables like 'innodb_optimize_fulltext_only'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_optimize_fulltext_only'; select * from information_schema.session_variables where variable_name='innodb_optimize_fulltext_only'; +--enable_warnings # # show that it's writable # set global innodb_optimize_fulltext_only='ON'; select @@global.innodb_optimize_fulltext_only; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_optimize_fulltext_only'; select * from information_schema.session_variables where variable_name='innodb_optimize_fulltext_only'; +--enable_warnings set @@global.innodb_optimize_fulltext_only=0; select @@global.innodb_optimize_fulltext_only; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_optimize_fulltext_only'; select * from information_schema.session_variables where variable_name='innodb_optimize_fulltext_only'; +--enable_warnings set global innodb_optimize_fulltext_only=1; select @@global.innodb_optimize_fulltext_only; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_optimize_fulltext_only'; select * from information_schema.session_variables where variable_name='innodb_optimize_fulltext_only'; +--enable_warnings set @@global.innodb_optimize_fulltext_only='OFF'; select @@global.innodb_optimize_fulltext_only; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_optimize_fulltext_only'; select * from information_schema.session_variables where variable_name='innodb_optimize_fulltext_only'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_optimize_fulltext_only='OFF'; --error ER_GLOBAL_VARIABLE @@ -57,8 +67,10 @@ set global innodb_optimize_fulltext_only=2; --error ER_WRONG_VALUE_FOR_VAR set global innodb_optimize_fulltext_only=-3; select @@global.innodb_optimize_fulltext_only; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_optimize_fulltext_only'; select * from information_schema.session_variables where variable_name='innodb_optimize_fulltext_only'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR set global innodb_optimize_fulltext_only='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_page_cleaners_basic.test b/mysql-test/suite/sys_vars/t/innodb_page_cleaners_basic.test new file mode 100644 index 00000000000..716492ba1b3 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_page_cleaners_basic.test @@ -0,0 +1,53 @@ +# Variable name: innodb_page_cleaners +# Scope: Global +# Access type: Static +# Data type: numeric + +--source include/have_innodb.inc + +SELECT COUNT(@@GLOBAL.innodb_page_cleaners); +--echo 1 Expected + +SELECT COUNT(@@innodb_page_cleaners); +--echo 1 Expected + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET @@GLOBAL.innodb_page_cleaners=1; +--echo Expected error 'Read-only variable' + +--Error ER_BAD_FIELD_ERROR +SELECT innodb_page_cleaners = @@SESSION.innodb_page_cleaners; +--echo Expected error 'Read-only variable' + +--disable_warnings +SELECT @@GLOBAL.innodb_page_cleaners = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_page_cleaners'; +--enable_warnings +--echo 1 Expected + +--disable_warnings +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_page_cleaners'; +--enable_warnings +--echo 1 Expected + +SELECT @@innodb_page_cleaners = @@GLOBAL.innodb_page_cleaners; +--echo 1 Expected + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.innodb_page_cleaners); +--echo Expected error 'Variable is a GLOBAL variable' + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.innodb_page_cleaners); +--echo Expected error 'Variable is a GLOBAL variable' + +# Check the default value +--disable_warnings +SELECT VARIABLE_NAME, VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME = 'innodb_page_cleaners'; +--enable_warnings + diff --git a/mysql-test/suite/sys_vars/t/innodb_page_hash_locks_basic.test b/mysql-test/suite/sys_vars/t/innodb_page_hash_locks_basic.test index 1479cbad744..ee4798c1f90 100644 --- a/mysql-test/suite/sys_vars/t/innodb_page_hash_locks_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_page_hash_locks_basic.test @@ -10,8 +10,10 @@ select @@global.innodb_page_hash_locks; select @@session.innodb_page_hash_locks; show global variables like 'innodb_page_hash_locks'; show session variables like 'innodb_page_hash_locks'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_page_hash_locks'; select * from information_schema.session_variables where variable_name='innodb_page_hash_locks'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_page_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_page_size_basic.test index 00aa476e8d2..1d4f9353f53 100644 --- a/mysql-test/suite/sys_vars/t/innodb_page_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_page_size_basic.test @@ -1 +1,16 @@ ---echo XtraDB extension +# +# Basic test for innodb_page_size +# + +-- source include/have_innodb.inc + +# Check the default value +SET @orig = @@global.innodb_page_size; +--replace_result 65536 {valid_page_size} 32768 {valid_page_size} 16384 {valid_page_size} 8192 {valid_page_size} 4096 {valid_page_size} +SELECT @orig; + +# Confirm that we can not change the value +-- error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET GLOBAL innodb_page_size = 4k; +-- error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET GLOBAL innodb_page_size = 8k; diff --git a/mysql-test/suite/sys_vars/t/innodb_print_all_deadlocks_basic.test b/mysql-test/suite/sys_vars/t/innodb_print_all_deadlocks_basic.test index 4cbd7062108..5693a829373 100644 --- a/mysql-test/suite/sys_vars/t/innodb_print_all_deadlocks_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_print_all_deadlocks_basic.test @@ -17,38 +17,48 @@ SELECT @@global.innodb_print_all_deadlocks; SELECT @@session.innodb_print_all_deadlocks; SHOW global variables LIKE 'innodb_print_all_deadlocks'; SHOW session variables LIKE 'innodb_print_all_deadlocks'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_print_all_deadlocks'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_print_all_deadlocks'; +--enable_warnings # # SHOW that it's writable # SET global innodb_print_all_deadlocks='OFF'; SELECT @@global.innodb_print_all_deadlocks; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_print_all_deadlocks'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_print_all_deadlocks'; +--enable_warnings SET @@global.innodb_print_all_deadlocks=1; SELECT @@global.innodb_print_all_deadlocks; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_print_all_deadlocks'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_print_all_deadlocks'; +--enable_warnings SET global innodb_print_all_deadlocks=0; SELECT @@global.innodb_print_all_deadlocks; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_print_all_deadlocks'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_print_all_deadlocks'; +--enable_warnings SET @@global.innodb_print_all_deadlocks='ON'; SELECT @@global.innodb_print_all_deadlocks; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_print_all_deadlocks'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_print_all_deadlocks'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_print_all_deadlocks='OFF'; --error ER_GLOBAL_VARIABLE @@ -66,10 +76,12 @@ SET global innodb_print_all_deadlocks=2; --error ER_WRONG_VALUE_FOR_VAR SET global innodb_print_all_deadlocks=-3; SELECT @@global.innodb_print_all_deadlocks; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_print_all_deadlocks'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_print_all_deadlocks'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR SET global innodb_print_all_deadlocks='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_purge_batch_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_purge_batch_size_basic.test index 88271d26965..4f3dc9f364b 100644 --- a/mysql-test/suite/sys_vars/t/innodb_purge_batch_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_purge_batch_size_basic.test @@ -72,7 +72,7 @@ SELECT @@global.innodb_purge_batch_size; SET @@global.innodb_purge_batch_size = 5000; SELECT @@global.innodb_purge_batch_size; -SET @@global.innodb_purge_batch_size = 1000; +SET @@global.innodb_purge_batch_size = 4294967295; SELECT @@global.innodb_purge_batch_size; --echo '#--------------------FN_DYNVARS_046_04-------------------------#' @@ -90,8 +90,17 @@ SELECT @@global.innodb_purge_batch_size; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_purge_batch_size = "Y"; SELECT @@global.innodb_purge_batch_size; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_batch_size = ' '; +SELECT @@global.innodb_purge_batch_size; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_batch_size = " "; +SELECT @@global.innodb_purge_batch_size; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_batch_size = 1.1; +SELECT @@global.innodb_purge_batch_size; -SET @@global.innodb_purge_batch_size = 5001; +SET @@global.innodb_purge_batch_size = 4294967297; SELECT @@global.innodb_purge_batch_size; --echo '#----------------------FN_DYNVARS_046_05------------------------#' @@ -99,12 +108,16 @@ SELECT @@global.innodb_purge_batch_size; # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_purge_batch_size = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_purge_batch_size'; +--enable_warnings SELECT @@global.innodb_purge_batch_size; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_purge_batch_size'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_purge_rseg_truncate_frequency_basic.test b/mysql-test/suite/sys_vars/t/innodb_purge_rseg_truncate_frequency_basic.test new file mode 100644 index 00000000000..e0871ba4ab5 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_purge_rseg_truncate_frequency_basic.test @@ -0,0 +1,161 @@ +###### mysql-test\t\innodb_purge_rseg_truncate_frequency_basic.test ########### +# # +# Variable Name: innodb_purge_rseg_truncate_frequency # +# Scope: GLOBAL # +# Access Type: Dynamic # +# Data Type: Numeric # +# Default Value: 128 # +# Range: 1 - 128 # +# # +# # +# Creation Date: 2014-27-05 # +# Author: Krunal Bauskar # +# # +#Description:Test Cases of Dynamic System Variable # +# innodb_purge_rseg_truncate_frequency # +# that checks the behavior of this variable in the following ways # +# * Default Value # +# * Valid & Invalid values # +# * Scope & Access method # +# * Data Integrity # +# # +# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # +# server-system-variables.html # +# # +############################################################################### + +--source include/have_innodb.inc +--source include/load_sysvars.inc + +######################################################################## +# START OF innodb_purge_rseg_truncate_frequency TESTS # +######################################################################## + +############################################################################### +# Saving initial value of innodb_purge_rseg_truncate_frequency in a # +# temporary variable # +############################################################################### + +SET @global_start_value = @@global.innodb_purge_rseg_truncate_frequency; +SELECT @global_start_value; + +--echo '#--------------------FN_DYNVARS_046_01------------------------#' +######################################################################## +# Display the DEFAULT value of innodb_purge_rseg_truncate_frequency # +######################################################################## + +SET @@global.innodb_purge_rseg_truncate_frequency = 1; +SET @@global.innodb_purge_rseg_truncate_frequency = DEFAULT; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--echo '#---------------------FN_DYNVARS_046_02-------------------------#' +############################################################################ +# Check if innodb_purge_rseg_truncate_frequency can be accessed with and # +# without @@ sign # +############################################################################ + +--Error ER_GLOBAL_VARIABLE +SET innodb_purge_rseg_truncate_frequency = 1; +SELECT @@innodb_purge_rseg_truncate_frequency; + +--Error ER_UNKNOWN_TABLE +SELECT local.innodb_purge_rseg_truncate_frequency; + +SET global innodb_purge_rseg_truncate_frequency = 1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--echo '#--------------------FN_DYNVARS_046_03------------------------#' +########################################################################## +# change the value of innodb_purge_rseg_truncate_frequency to a valid # +# value # +########################################################################## + +SET @@global.innodb_purge_rseg_truncate_frequency = 1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +SET @@global.innodb_purge_rseg_truncate_frequency = 1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +SET @@global.innodb_purge_rseg_truncate_frequency = 128; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--echo '#--------------------FN_DYNVARS_046_05-------------------------#' +########################################################################### +# Change the value of innodb_purge_rseg_truncate_frequency to # +# invalid value # +########################################################################### + +SET @@global.innodb_purge_rseg_truncate_frequency = -1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +SET @@global.innodb_purge_rseg_truncate_frequency = -1024; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_rseg_truncate_frequency = "T"; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_rseg_truncate_frequency = "Y"; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_rseg_truncate_frequency = 1.1; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_rseg_truncate_frequency = ' '; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_rseg_truncate_frequency = " "; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--echo '#----------------------FN_DYNVARS_046_06------------------------#' +######################################################################### +# Check if the value in GLOBAL Table matches value in variable # +######################################################################### + +--disable_warnings +SELECT @@global.innodb_purge_rseg_truncate_frequency = + VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES + WHERE VARIABLE_NAME='innodb_purge_rseg_truncate_frequency'; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES + WHERE VARIABLE_NAME='innodb_purge_rseg_truncate_frequency'; +--enable_warnings + +--echo '#---------------------FN_DYNVARS_046_07-------------------------#' +################################################################### +# Check if ON and OFF values can be used on variable # +################################################################### + +--ERROR ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_rseg_truncate_frequency = OFF; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--ERROR ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_purge_rseg_truncate_frequency = ON; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +--echo '#---------------------FN_DYNVARS_046_08----------------------#' +################################################################### +# Check if TRUE and FALSE values can be used on variable # +################################################################### + +SET @@global.innodb_purge_rseg_truncate_frequency = TRUE; +SELECT @@global.innodb_purge_rseg_truncate_frequency; +SET @@global.innodb_purge_rseg_truncate_frequency = FALSE; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +############################## +# Restore initial value # +############################## + + +SET @@global.innodb_purge_rseg_truncate_frequency = @global_start_value; +SELECT @@global.innodb_purge_rseg_truncate_frequency; + +############################################################### +# END OF innodb_purge_rseg_truncate_frequency TESTS # +############################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_purge_threads_basic.test b/mysql-test/suite/sys_vars/t/innodb_purge_threads_basic.test index 64d834c6344..4d039601e40 100644 --- a/mysql-test/suite/sys_vars/t/innodb_purge_threads_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_purge_threads_basic.test @@ -1,89 +1,41 @@ - - -################## mysql-test\t\innodb_log_purge_threads_basic.test ########### -# # -# Variable Name: innodb_purge_threads # -# Scope: Global # -# Access Type: Static # -# Data Type: numeric # -# # -# # -# Creation Date: 2008-02-07 # -# Author : Sharique Abdullah # -# # -# # -# Description:Test Cases of Dynamic System Variable innodb_purge_threads # -# that checks the behavior of this variable in the following ways # -# * Value Check # -# * Scope Check # -# # -# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # -# server-system-variables.html # -# # -############################################################################### +# Variable name: innodb_purge_threads +# Scope: Global +# Access type: Static +# Data type: numeric --source include/have_innodb.inc ---echo '#---------------------BS_STVARS_035_01----------------------#' -#################################################################### -# Displaying default value # -#################################################################### SELECT COUNT(@@GLOBAL.innodb_purge_threads); --echo 1 Expected - ---echo '#---------------------BS_STVARS_035_02----------------------#' -#################################################################### -# Check if Value can set # -#################################################################### +SELECT COUNT(@@innodb_purge_threads); +--echo 1 Expected --error ER_INCORRECT_GLOBAL_LOCAL_VAR SET @@GLOBAL.innodb_purge_threads=1; ---echo Expected error 'Read only variable' - -SELECT COUNT(@@GLOBAL.innodb_purge_threads); ---echo 1 Expected +--echo Expected error 'Read-only variable' +--Error ER_BAD_FIELD_ERROR +SELECT innodb_purge_threads = @@SESSION.innodb_purge_threads; +--echo Expected error 'Read-only variable' - - ---echo '#---------------------BS_STVARS_035_03----------------------#' -################################################################# -# Check if the value in GLOBAL Table matches value in variable # -################################################################# - +--disable_warnings SELECT @@GLOBAL.innodb_purge_threads = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_purge_threads'; +--enable_warnings --echo 1 Expected -SELECT COUNT(@@GLOBAL.innodb_purge_threads); ---echo 1 Expected - +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_purge_threads'; +--enable_warnings --echo 1 Expected - - ---echo '#---------------------BS_STVARS_035_04----------------------#' -################################################################################ -# Check if accessing variable with and without GLOBAL point to same variable # -################################################################################ SELECT @@innodb_purge_threads = @@GLOBAL.innodb_purge_threads; --echo 1 Expected - - ---echo '#---------------------BS_STVARS_035_05----------------------#' -################################################################################ -# Check if innodb_purge_threads can be accessed with and without @@ sign # -################################################################################ - -SELECT COUNT(@@innodb_purge_threads); ---echo 1 Expected - --Error ER_INCORRECT_GLOBAL_LOCAL_VAR SELECT COUNT(@@local.innodb_purge_threads); --echo Expected error 'Variable is a GLOBAL variable' @@ -92,11 +44,10 @@ SELECT COUNT(@@local.innodb_purge_threads); SELECT COUNT(@@SESSION.innodb_purge_threads); --echo Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@GLOBAL.innodb_purge_threads); ---echo 1 Expected - ---Error ER_BAD_FIELD_ERROR -SELECT innodb_purge_threads = @@SESSION.innodb_purge_threads; ---echo Expected error 'Readonly variable' - +# Check the default value +--disable_warnings +SELECT VARIABLE_NAME, VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME = 'innodb_purge_threads'; +--enable_warnings diff --git a/mysql-test/suite/sys_vars/t/innodb_random_read_ahead_basic.test b/mysql-test/suite/sys_vars/t/innodb_random_read_ahead_basic.test index b7ba6f36b15..a805fc80314 100644 --- a/mysql-test/suite/sys_vars/t/innodb_random_read_ahead_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_random_read_ahead_basic.test @@ -18,28 +18,38 @@ select @@global.innodb_random_read_ahead; select @@session.innodb_random_read_ahead; show global variables like 'innodb_random_read_ahead'; show session variables like 'innodb_random_read_ahead'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_random_read_ahead'; select * from information_schema.session_variables where variable_name='innodb_random_read_ahead'; +--enable_warnings # # show that it's writable # set global innodb_random_read_ahead='ON'; select @@global.innodb_random_read_ahead; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_random_read_ahead'; select * from information_schema.session_variables where variable_name='innodb_random_read_ahead'; +--enable_warnings set @@global.innodb_random_read_ahead=0; select @@global.innodb_random_read_ahead; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_random_read_ahead'; select * from information_schema.session_variables where variable_name='innodb_random_read_ahead'; +--enable_warnings set global innodb_random_read_ahead=1; select @@global.innodb_random_read_ahead; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_random_read_ahead'; select * from information_schema.session_variables where variable_name='innodb_random_read_ahead'; +--enable_warnings set @@global.innodb_random_read_ahead='OFF'; select @@global.innodb_random_read_ahead; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_random_read_ahead'; select * from information_schema.session_variables where variable_name='innodb_random_read_ahead'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_random_read_ahead='OFF'; --error ER_GLOBAL_VARIABLE @@ -57,8 +67,10 @@ set global innodb_random_read_ahead=2; --error ER_WRONG_VALUE_FOR_VAR set global innodb_random_read_ahead=-3; select @@global.innodb_random_read_ahead; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_random_read_ahead'; select * from information_schema.session_variables where variable_name='innodb_random_read_ahead'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR set global innodb_random_read_ahead='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_read_ahead_threshold_basic.test b/mysql-test/suite/sys_vars/t/innodb_read_ahead_threshold_basic.test index 1298a28b3d3..65bb9c03115 100644 --- a/mysql-test/suite/sys_vars/t/innodb_read_ahead_threshold_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_read_ahead_threshold_basic.test @@ -18,18 +18,27 @@ select @@global.innodb_read_ahead_threshold; select @@session.innodb_read_ahead_threshold; show global variables like 'innodb_read_ahead_threshold'; show session variables like 'innodb_read_ahead_threshold'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_read_ahead_threshold'; select * from information_schema.session_variables where variable_name='innodb_read_ahead_threshold'; +--enable_warnings # # show that it's writable # set global innodb_read_ahead_threshold=10; select @@global.innodb_read_ahead_threshold; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_read_ahead_threshold'; select * from information_schema.session_variables where variable_name='innodb_read_ahead_threshold'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_read_ahead_threshold=1; +# +# check the default value +# +set global innodb_read_ahead_threshold=DEFAULT; +select @@global.innodb_read_ahead_threshold; # # incorrect types @@ -40,13 +49,23 @@ set global innodb_read_ahead_threshold=1.1; set global innodb_read_ahead_threshold=1e1; --error ER_WRONG_TYPE_FOR_VAR set global innodb_read_ahead_threshold="foo"; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_read_ahead_threshold=' '; +select @@global.innodb_read_ahead_threshold; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_read_ahead_threshold=" "; +select @@global.innodb_read_ahead_threshold; set global innodb_read_ahead_threshold=-7; select @@global.innodb_read_ahead_threshold; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_read_ahead_threshold'; +--enable_warnings set global innodb_read_ahead_threshold=96; select @@global.innodb_read_ahead_threshold; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_read_ahead_threshold'; +--enable_warnings # # min/max values diff --git a/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test b/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test index 14426395d6c..c4c49d5bb20 100644 --- a/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_read_io_threads_basic.test @@ -13,8 +13,10 @@ select @@global.innodb_read_io_threads; select @@session.innodb_read_io_threads; show global variables like 'innodb_read_io_threads'; show session variables like 'innodb_read_io_threads'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_read_io_threads'; select * from information_schema.session_variables where variable_name='innodb_read_io_threads'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_read_only_basic.test b/mysql-test/suite/sys_vars/t/innodb_read_only_basic.test index 581eb3538b8..31cbe779ef7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_read_only_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_read_only_basic.test @@ -9,8 +9,10 @@ select @@global.innodb_read_only; select @@session.innodb_read_only; show global variables like 'innodb_read_only'; show session variables like 'innodb_read_only'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_read_only'; select * from information_schema.session_variables where variable_name='innodb_read_only'; +--enable_warnings # Show that it's read-only --error ER_INCORRECT_GLOBAL_LOCAL_VAR diff --git a/mysql-test/suite/sys_vars/t/innodb_replication_delay_basic.test b/mysql-test/suite/sys_vars/t/innodb_replication_delay_basic.test index e495de46611..c85cc85e78c 100644 --- a/mysql-test/suite/sys_vars/t/innodb_replication_delay_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_replication_delay_basic.test @@ -18,19 +18,40 @@ select @@global.innodb_replication_delay; select @@session.innodb_replication_delay; show global variables like 'innodb_replication_delay'; show session variables like 'innodb_replication_delay'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_replication_delay'; select * from information_schema.session_variables where variable_name='innodb_replication_delay'; +--enable_warnings # # show that it's writable # set global innodb_replication_delay=10; select @@global.innodb_replication_delay; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_replication_delay'; select * from information_schema.session_variables where variable_name='innodb_replication_delay'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_replication_delay=1; +# +# check the default value +# +set global innodb_replication_delay=DEFAULT; +select @@global.innodb_replication_delay; + +# +# valid values +# + +set global innodb_replication_delay=0; +select @@global.innodb_replication_delay; +set global innodb_replication_delay=65535; +select @@global.innodb_replication_delay; +set global innodb_replication_delay=4294967295; +select @@global.innodb_replication_delay; + # # incorrect types # @@ -40,10 +61,39 @@ set global innodb_replication_delay=1.1; set global innodb_replication_delay=1e1; --error ER_WRONG_TYPE_FOR_VAR set global innodb_replication_delay="foo"; - +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_replication_delay=' '; +select @@global.innodb_replication_delay; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_replication_delay=" "; +select @@global.innodb_replication_delay; set global innodb_replication_delay=-7; select @@global.innodb_replication_delay; +set global innodb_replication_delay=-1024; +select @@global.innodb_replication_delay; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_replication_delay'; +--enable_warnings + +# +# Check for out of bounds +# + +# With a 64 bit mysqld:18446744073709551615,with a 32 bit mysqld: 4294967295 +--disable_warnings +SET @@global.innodb_replication_delay = 4294967296; +--enable_warnings +SELECT @@global.innodb_replication_delay IN (4294967296,4294967295); + +--disable_warnings +SET @@global.innodb_replication_delay = 12345678901; +--enable_warnings +SELECT @@global.innodb_replication_delay IN (12345678901,4294967295); + +--disable_warnings +SET @@global.innodb_replication_delay = 18446744073709551615; +--enable_warnings +SELECT @@global.innodb_replication_delay IN (18446744073709551615,4294967295); # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_rollback_on_timeout_basic.test b/mysql-test/suite/sys_vars/t/innodb_rollback_on_timeout_basic.test index 81025bb9d73..2aee2e25db7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_rollback_on_timeout_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_rollback_on_timeout_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_rollback_on_timeout); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_rollback_on_timeout, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_rollback_on_timeout'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_rollback_on_timeout); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_rollback_on_timeout'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_rollback_segments_basic.test b/mysql-test/suite/sys_vars/t/innodb_rollback_segments_basic.test index 9f0b70a528f..33223d4c064 100644 --- a/mysql-test/suite/sys_vars/t/innodb_rollback_segments_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_rollback_segments_basic.test @@ -19,20 +19,24 @@ SELECT @@global.innodb_rollback_segments; SELECT @@session.innodb_rollback_segments; SHOW global variables LIKE 'innodb_rollback_segments'; SHOW session variables LIKE 'innodb_rollback_segments'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_rollback_segments'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_rollback_segments'; +--enable_warnings # # show that it's writable # SET global innodb_rollback_segments=100; SELECT @@global.innodb_rollback_segments; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_rollback_segments'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_rollback_segments'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_rollback_segments=1; @@ -47,8 +51,10 @@ SET global innodb_rollback_segments=1e1; SET global innodb_rollback_segments="foo"; SET global innodb_rollback_segments=-7; SELECT @@global.innodb_rollback_segments; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_rollback_segments'; +--enable_warnings # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_sort_buffer_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_sort_buffer_size_basic.test index 920c992c1f9..49318c00661 100644 --- a/mysql-test/suite/sys_vars/t/innodb_sort_buffer_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_sort_buffer_size_basic.test @@ -13,8 +13,10 @@ select @@global.innodb_sort_buffer_size; select @@session.innodb_sort_buffer_size; show global variables like 'innodb_sort_buffer_size'; show session variables like 'innodb_sort_buffer_size'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_sort_buffer_size'; select * from information_schema.session_variables where variable_name='innodb_sort_buffer_size'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_spin_wait_delay_basic.test b/mysql-test/suite/sys_vars/t/innodb_spin_wait_delay_basic.test index 8f2eee08b6a..ab0b38bb6ce 100644 --- a/mysql-test/suite/sys_vars/t/innodb_spin_wait_delay_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_spin_wait_delay_basic.test @@ -18,19 +18,39 @@ select @@global.innodb_spin_wait_delay; select @@session.innodb_spin_wait_delay; show global variables like 'innodb_spin_wait_delay'; show session variables like 'innodb_spin_wait_delay'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_spin_wait_delay'; select * from information_schema.session_variables where variable_name='innodb_spin_wait_delay'; +--enable_warnings # # show that it's writable # set global innodb_spin_wait_delay=10; select @@global.innodb_spin_wait_delay; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_spin_wait_delay'; select * from information_schema.session_variables where variable_name='innodb_spin_wait_delay'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_spin_wait_delay=1; +# +# check the default value +# +set global innodb_spin_wait_delay=DEFAULT; +select @@global.innodb_spin_wait_delay; + +# +# valid values +# +set global innodb_spin_wait_delay=0; +select @@global.innodb_spin_wait_delay; +set global innodb_spin_wait_delay=65535; +select @@global.innodb_spin_wait_delay; +set global innodb_spin_wait_delay=4294967295; +select @@global.innodb_spin_wait_delay; + # # incorrect types # @@ -40,10 +60,39 @@ set global innodb_spin_wait_delay=1.1; set global innodb_spin_wait_delay=1e1; --error ER_WRONG_TYPE_FOR_VAR set global innodb_spin_wait_delay="foo"; - +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_spin_wait_delay=' '; +select @@global.innodb_spin_wait_delay; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_spin_wait_delay=" "; +select @@global.innodb_spin_wait_delay; set global innodb_spin_wait_delay=-7; select @@global.innodb_spin_wait_delay; +set global innodb_spin_wait_delay=-1024; +select @@global.innodb_spin_wait_delay; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_spin_wait_delay'; +--enable_warnings + +# +# Check for out of bounds +# + +# With a 64 bit mysqld:18446744073709551615,with a 32 bit mysqld: 4294967295 +--disable_warnings +SET @@global.innodb_spin_wait_delay = 4294967296; +--enable_warnings +SELECT @@global.innodb_spin_wait_delay IN (4294967296,4294967295); + +--disable_warnings +SET @@global.innodb_spin_wait_delay = 12345678901; +--enable_warnings +SELECT @@global.innodb_spin_wait_delay IN (12345678901,4294967295); + +--disable_warnings +SET @@global.innodb_spin_wait_delay = 18446744073709551615; +--enable_warnings +SELECT @@global.innodb_spin_wait_delay IN (18446744073709551615,4294967295); # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_method_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_method_basic.test index f01574c3683..77288dfb130 100644 --- a/mysql-test/suite/sys_vars/t/innodb_stats_method_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_stats_method_basic.test @@ -18,32 +18,40 @@ SELECT @@global.innodb_stats_method; SELECT @@session.innodb_stats_method; SHOW global variables LIKE 'innodb_stats_method'; SHOW session variables LIKE 'innodb_stats_method'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_method'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_method'; +--enable_warnings # # show that it's writable # SET global innodb_stats_method='nulls_equal'; SELECT @@global.innodb_stats_method; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_method'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_method'; +--enable_warnings SET @@global.innodb_stats_method='nulls_unequal'; SELECT @@global.innodb_stats_method; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_method'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_method'; +--enable_warnings SET global innodb_stats_method=2; SELECT @@global.innodb_stats_method; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_method'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_method'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_stats_method='nulls_equal'; diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_on_metadata_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_on_metadata_basic.test index 9028ee7f687..a0bccb50652 100644 --- a/mysql-test/suite/sys_vars/t/innodb_stats_on_metadata_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_stats_on_metadata_basic.test @@ -18,28 +18,38 @@ select @@global.innodb_stats_on_metadata; select @@session.innodb_stats_on_metadata; show global variables like 'innodb_stats_on_metadata'; show session variables like 'innodb_stats_on_metadata'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_on_metadata'; select * from information_schema.session_variables where variable_name='innodb_stats_on_metadata'; +--enable_warnings # # show that it's writable # set global innodb_stats_on_metadata='OFF'; select @@global.innodb_stats_on_metadata; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_on_metadata'; select * from information_schema.session_variables where variable_name='innodb_stats_on_metadata'; +--enable_warnings set @@global.innodb_stats_on_metadata=1; select @@global.innodb_stats_on_metadata; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_on_metadata'; select * from information_schema.session_variables where variable_name='innodb_stats_on_metadata'; +--enable_warnings set global innodb_stats_on_metadata=0; select @@global.innodb_stats_on_metadata; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_on_metadata'; select * from information_schema.session_variables where variable_name='innodb_stats_on_metadata'; +--enable_warnings set @@global.innodb_stats_on_metadata='ON'; select @@global.innodb_stats_on_metadata; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_on_metadata'; select * from information_schema.session_variables where variable_name='innodb_stats_on_metadata'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_stats_on_metadata='OFF'; --error ER_GLOBAL_VARIABLE @@ -57,8 +67,10 @@ set global innodb_stats_on_metadata=2; --error ER_WRONG_VALUE_FOR_VAR set global innodb_stats_on_metadata=-3; select @@global.innodb_stats_on_metadata; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_on_metadata'; select * from information_schema.session_variables where variable_name='innodb_stats_on_metadata'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR set global innodb_stats_on_metadata='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test index 4277b58de00..c5f977321b7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test @@ -28,4 +28,4 @@ SET GLOBAL innodb_stats_persistent=123; SET GLOBAL innodb_stats_persistent='foo'; # restore the environment -SET GLOBAL innodb_stats_persistent=off; +SET GLOBAL innodb_stats_persistent=OFF; \ No newline at end of file diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_persistent_sample_pages_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_persistent_sample_pages_basic.test index cf223c02090..5fc62f0a571 100644 --- a/mysql-test/suite/sys_vars/t/innodb_stats_persistent_sample_pages_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_stats_persistent_sample_pages_basic.test @@ -18,23 +18,45 @@ SELECT @@global.innodb_stats_persistent_sample_pages; SELECT @@session.innodb_stats_persistent_sample_pages; SHOW global variables LIKE 'innodb_stats_persistent_sample_pages'; SHOW session variables LIKE 'innodb_stats_persistent_sample_pages'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_persistent_sample_pages'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_persistent_sample_pages'; +--enable_warnings # # SHOW that it's writable # SET global innodb_stats_persistent_sample_pages=10; SELECT @@global.innodb_stats_persistent_sample_pages; -SELECT * FROM information_schema.global_variables +--disable_warnings +SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_persistent_sample_pages'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_persistent_sample_pages'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_stats_persistent_sample_pages=1; +# +# show the default value +# +set global innodb_stats_persistent_sample_pages=DEFAULT; +select @@global.innodb_stats_persistent_sample_pages; + +# +# valid values +# +SET global innodb_stats_persistent_sample_pages=0; +SELECT @@global.innodb_stats_persistent_sample_pages; + +SET global innodb_stats_persistent_sample_pages=10; +SELECT @@global.innodb_stats_persistent_sample_pages; + + + + # # incorrect types # @@ -44,11 +66,19 @@ SET global innodb_stats_persistent_sample_pages=1.1; SET global innodb_stats_persistent_sample_pages=1e1; --error ER_WRONG_TYPE_FOR_VAR SET global innodb_stats_persistent_sample_pages="foo"; - +--error ER_WRONG_TYPE_FOR_VAR +SET global innodb_stats_persistent_sample_pages=' '; +SELECT @@global.innodb_stats_persistent_sample_pages; +--error ER_WRONG_TYPE_FOR_VAR +SET global innodb_stats_persistent_sample_pages=" "; +SELECT @@global.innodb_stats_persistent_sample_pages; SET global innodb_stats_persistent_sample_pages=-7; SELECT @@global.innodb_stats_persistent_sample_pages; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_persistent_sample_pages'; +--enable_warnings + # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_sample_pages_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_sample_pages_basic.test index 2c91f11405d..0e4fcb508ac 100644 --- a/mysql-test/suite/sys_vars/t/innodb_stats_sample_pages_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_stats_sample_pages_basic.test @@ -18,32 +18,52 @@ select @@global.innodb_stats_sample_pages; select @@session.innodb_stats_sample_pages; show global variables like 'innodb_stats_sample_pages'; show session variables like 'innodb_stats_sample_pages'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_sample_pages'; select * from information_schema.session_variables where variable_name='innodb_stats_sample_pages'; +--enable_warnings # # show that it's writable # set global innodb_stats_sample_pages=10; select @@global.innodb_stats_sample_pages; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_sample_pages'; select * from information_schema.session_variables where variable_name='innodb_stats_sample_pages'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_stats_sample_pages=1; +# +# show the default value # -# incorrect types +set global innodb_stats_sample_pages=DEFAULT; +select @@global.innodb_stats_sample_pages; + + # +# invalid values +# +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_stats_sample_pages = 1.1; --error ER_WRONG_TYPE_FOR_VAR -set global innodb_stats_sample_pages=1.1; +set global innodb_stats_sample_pages = 1e1; --error ER_WRONG_TYPE_FOR_VAR -set global innodb_stats_sample_pages=1e1; +set global innodb_stats_sample_pages = "foo"; --error ER_WRONG_TYPE_FOR_VAR -set global innodb_stats_sample_pages="foo"; +set global innodb_stats_sample_pages=' '; +select @@global.innodb_stats_sample_pages; +--error ER_WRONG_TYPE_FOR_VAR +set global innodb_stats_sample_pages=" "; +select @@global.innodb_stats_sample_pages; set global innodb_stats_sample_pages=-7; select @@global.innodb_stats_sample_pages; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_stats_sample_pages'; +--enable_warnings + # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_transient_sample_pages_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_transient_sample_pages_basic.test index ff3a50efa1f..897d3de42e0 100644 --- a/mysql-test/suite/sys_vars/t/innodb_stats_transient_sample_pages_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_stats_transient_sample_pages_basic.test @@ -18,20 +18,30 @@ SELECT @@global.innodb_stats_transient_sample_pages; SELECT @@session.innodb_stats_transient_sample_pages; SHOW global variables LIKE 'innodb_stats_transient_sample_pages'; SHOW session variables LIKE 'innodb_stats_transient_sample_pages'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_transient_sample_pages'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_transient_sample_pages'; +--enable_warnings + +# +# show the default value +# +set global innodb_stats_transient_sample_pages=DEFAULT; +select @@global.innodb_stats_transient_sample_pages; # # SHOW that it's writable # SET global innodb_stats_transient_sample_pages=10; SELECT @@global.innodb_stats_transient_sample_pages; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_transient_sample_pages'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_stats_transient_sample_pages'; +--enable_warnings --error ER_GLOBAL_VARIABLE SET session innodb_stats_transient_sample_pages=1; @@ -44,11 +54,15 @@ SET global innodb_stats_transient_sample_pages=1.1; SET global innodb_stats_transient_sample_pages=1e1; --error ER_WRONG_TYPE_FOR_VAR SET global innodb_stats_transient_sample_pages="foo"; +--error ER_WRONG_TYPE_FOR_VAR +SET global innodb_stats_transient_sample_pages=' '; SET global innodb_stats_transient_sample_pages=-7; SELECT @@global.innodb_stats_transient_sample_pages; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_stats_transient_sample_pages'; +--enable_warnings # # cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_status_output_basic.test b/mysql-test/suite/sys_vars/t/innodb_status_output_basic.test index 4459632134d..8e33b364c4e 100644 --- a/mysql-test/suite/sys_vars/t/innodb_status_output_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_status_output_basic.test @@ -13,28 +13,38 @@ select @@global.innodb_status_output; select @@session.innodb_status_output; show global variables like 'innodb_status_output'; show session variables like 'innodb_status_output'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output'; select * from information_schema.session_variables where variable_name='innodb_status_output'; +--enable_warnings # # show that it's writable # set global innodb_status_output='OFF'; select @@global.innodb_status_output; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output'; select * from information_schema.session_variables where variable_name='innodb_status_output'; +--enable_warnings set @@global.innodb_status_output=1; select @@global.innodb_status_output; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output'; select * from information_schema.session_variables where variable_name='innodb_status_output'; +--enable_warnings set global innodb_status_output=0; select @@global.innodb_status_output; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output'; select * from information_schema.session_variables where variable_name='innodb_status_output'; +--enable_warnings set @@global.innodb_status_output='ON'; select @@global.innodb_status_output; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output'; select * from information_schema.session_variables where variable_name='innodb_status_output'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_status_output='OFF'; --error ER_GLOBAL_VARIABLE @@ -52,12 +62,16 @@ set global innodb_status_output=2; --error ER_WRONG_VALUE_FOR_VAR set global innodb_status_output=-3; select @@global.innodb_status_output; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output'; select * from information_schema.session_variables where variable_name='innodb_status_output'; +--enable_warnings set global innodb_status_output=DEFAULT; select @@global.innodb_status_output; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output'; select * from information_schema.session_variables where variable_name='innodb_status_output'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR set global innodb_status_output='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_status_output_locks_basic.test b/mysql-test/suite/sys_vars/t/innodb_status_output_locks_basic.test index 92c82b2ddbf..9f510c2feaa 100644 --- a/mysql-test/suite/sys_vars/t/innodb_status_output_locks_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_status_output_locks_basic.test @@ -13,28 +13,38 @@ select @@global.innodb_status_output_locks; select @@session.innodb_status_output_locks; show global variables like 'innodb_status_output_locks'; show session variables like 'innodb_status_output_locks'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output_locks'; select * from information_schema.session_variables where variable_name='innodb_status_output_locks'; +--enable_warnings # # show that it's writable # set global innodb_status_output_locks='OFF'; select @@global.innodb_status_output_locks; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output_locks'; select * from information_schema.session_variables where variable_name='innodb_status_output_locks'; +--enable_warnings set @@global.innodb_status_output_locks=1; select @@global.innodb_status_output_locks; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output_locks'; select * from information_schema.session_variables where variable_name='innodb_status_output_locks'; +--enable_warnings set global innodb_status_output_locks=0; select @@global.innodb_status_output_locks; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output_locks'; select * from information_schema.session_variables where variable_name='innodb_status_output_locks'; +--enable_warnings set @@global.innodb_status_output_locks='ON'; select @@global.innodb_status_output_locks; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output_locks'; select * from information_schema.session_variables where variable_name='innodb_status_output_locks'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_status_output_locks='OFF'; --error ER_GLOBAL_VARIABLE @@ -52,12 +62,16 @@ set global innodb_status_output_locks=2; --error ER_WRONG_VALUE_FOR_VAR set global innodb_status_output_locks=-3; select @@global.innodb_status_output_locks; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output_locks'; select * from information_schema.session_variables where variable_name='innodb_status_output_locks'; +--enable_warnings set global innodb_status_output_locks=DEFAULT; select @@global.innodb_status_output_locks; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_status_output_locks'; select * from information_schema.session_variables where variable_name='innodb_status_output_locks'; +--enable_warnings --error ER_WRONG_VALUE_FOR_VAR set global innodb_status_output_locks='AUTO'; diff --git a/mysql-test/suite/sys_vars/t/innodb_strict_mode_basic.test b/mysql-test/suite/sys_vars/t/innodb_strict_mode_basic.test index 10f8d1ce4e7..243985f95de 100644 --- a/mysql-test/suite/sys_vars/t/innodb_strict_mode_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_strict_mode_basic.test @@ -18,8 +18,10 @@ select @@session.innodb_strict_mode in (0, 1); select @@session.innodb_strict_mode; show global variables like 'innodb_strict_mode'; show session variables like 'innodb_strict_mode'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_strict_mode'; select * from information_schema.session_variables where variable_name='innodb_strict_mode'; +--enable_warnings # # show that it's writable @@ -28,26 +30,34 @@ set global innodb_strict_mode='OFF'; set session innodb_strict_mode='OFF'; select @@global.innodb_strict_mode; select @@session.innodb_strict_mode; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_strict_mode'; select * from information_schema.session_variables where variable_name='innodb_strict_mode'; +--enable_warnings set @@global.innodb_strict_mode=1; set @@session.innodb_strict_mode=1; select @@global.innodb_strict_mode; select @@session.innodb_strict_mode; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_strict_mode'; select * from information_schema.session_variables where variable_name='innodb_strict_mode'; +--enable_warnings set global innodb_strict_mode=0; set session innodb_strict_mode=0; select @@global.innodb_strict_mode; select @@session.innodb_strict_mode; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_strict_mode'; select * from information_schema.session_variables where variable_name='innodb_strict_mode'; +--enable_warnings set @@global.innodb_strict_mode='ON'; set @@session.innodb_strict_mode='ON'; select @@global.innodb_strict_mode; select @@session.innodb_strict_mode; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_strict_mode'; select * from information_schema.session_variables where variable_name='innodb_strict_mode'; +--enable_warnings # # incorrect types @@ -74,8 +84,10 @@ set global innodb_strict_mode=-3; set session innodb_strict_mode=-7; select @@global.innodb_strict_mode; select @@session.innodb_strict_mode; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_strict_mode'; select * from information_schema.session_variables where variable_name='innodb_strict_mode'; +--enable_warnings # # Cleanup diff --git a/mysql-test/suite/sys_vars/t/innodb_support_xa_basic.test b/mysql-test/suite/sys_vars/t/innodb_support_xa_basic.test index 988b8f01b93..6668d486090 100644 --- a/mysql-test/suite/sys_vars/t/innodb_support_xa_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_support_xa_basic.test @@ -127,7 +127,7 @@ SET @@session.innodb_support_xa = # for global ---error ER_WRONG_VALUE_FOR_VAR +--Error ER_WRONG_VALUE_FOR_VAR SET @@global.innodb_support_xa = -1; SELECT @@global.innodb_support_xa; @@ -166,12 +166,16 @@ SELECT @@session.innodb_support_xa AS res_is_1; # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT IF(@@global.innodb_support_xa, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_support_xa'; +--enable_warnings SELECT @@global.innodb_support_xa; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_support_xa'; +--enable_warnings --echo '#----------------------FN_DYNVARS_046_07------------------------#' @@ -179,12 +183,16 @@ SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES # Check if the value in SESSION Table matches value in variable # ######################################################################### +--disable_warnings SELECT IF(@@session.innodb_support_xa, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME='innodb_support_xa'; +--enable_warnings SELECT @@session.innodb_support_xa; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME='innodb_support_xa'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_08-------------------------#' diff --git a/mysql-test/suite/sys_vars/t/innodb_sync_array_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_sync_array_size_basic.test index 53011acb576..39ff69affea 100644 --- a/mysql-test/suite/sys_vars/t/innodb_sync_array_size_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_sync_array_size_basic.test @@ -2,11 +2,6 @@ --source include/have_innodb.inc -if (`select plugin_auth_version <= "5.6.10" from information_schema.plugins where plugin_name='innodb'`) -{ - --skip Not fixed in InnoDB 5.6.10 or earlier -} - # Exists as global only # --echo Valid values are between 0 and 1024 @@ -14,12 +9,15 @@ SELECT @@global.innodb_sync_array_size between 0 and 1024; SELECT @@global.innodb_sync_array_size; --error ER_INCORRECT_GLOBAL_LOCAL_VAR SELECT @@session.innodb_sync_array_size; + SHOW GLOBAL variables LIKE 'innodb_sync_array_size'; SHOW SESSION variables LIKE 'innodb_sync_array_size'; +--disable_warnings SELECT * FROM information_schema.global_variables WHERE variable_name='innodb_sync_array_size'; SELECT * FROM information_schema.session_variables WHERE variable_name='innodb_sync_array_size'; +--enable_warnings # # Show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_sync_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_sync_debug_basic.test new file mode 100644 index 00000000000..665482e6963 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_sync_debug_basic.test @@ -0,0 +1,16 @@ +--echo # +--echo # Basic test for innodb_sync_debug +--echo # + +--source include/have_innodb.inc + +# The config variable is a debug read-only variable +-- source include/have_debug.inc + +SELECT @@global.innodb_sync_debug; + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +set global innodb_sync_debug = 1; + +SELECT @@global.innodb_sync_debug; + diff --git a/mysql-test/suite/sys_vars/t/innodb_sync_spin_loops_basic.test b/mysql-test/suite/sys_vars/t/innodb_sync_spin_loops_basic.test index 35460fe47f2..138e877dd42 100644 --- a/mysql-test/suite/sys_vars/t/innodb_sync_spin_loops_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_sync_spin_loops_basic.test @@ -70,13 +70,35 @@ SELECT @@global.innodb_sync_spin_loops; SET @@global.innodb_sync_spin_loops = 0; SELECT @@global.innodb_sync_spin_loops; -SET @@global.innodb_sync_spin_loops = 1; +SET @@global.innodb_sync_spin_loops = 65535; SELECT @@global.innodb_sync_spin_loops; -SET @@global.innodb_sync_spin_loops = 1000; + +SET @@global.innodb_sync_spin_loops = 4294967295; SELECT @@global.innodb_sync_spin_loops; --echo '#--------------------FN_DYNVARS_046_04-------------------------#' ########################################################################### +# Check the value of innodb_sync_spin_loops for out of bounds # +########################################################################### + +# With a 64 bit mysqld:18446744073709551615,with a 32 bit mysqld: 4294967295 +--disable_warnings +SET @@global.innodb_sync_spin_loops = 4294967296; +--enable_warnings +SELECT @@global.innodb_sync_spin_loops IN (4294967296,4294967295); + +--disable_warnings +SET @@global.innodb_sync_spin_loops = 12345678901; +--enable_warnings +SELECT @@global.innodb_sync_spin_loops IN (12345678901,4294967295); + +--disable_warnings +SET @@global.innodb_sync_spin_loops = 18446744073709551615; +--enable_warnings +SELECT @@global.innodb_sync_spin_loops IN (18446744073709551615,4294967295); + +--echo '#--------------------FN_DYNVARS_046_05-------------------------#' +########################################################################### # Change the value of innodb_sync_spin_loops to invalid value # ########################################################################### @@ -91,22 +113,38 @@ SELECT @@global.innodb_sync_spin_loops; SET @@global.innodb_sync_spin_loops = "Y"; SELECT @@global.innodb_sync_spin_loops; -SET @@global.innodb_sync_spin_loops = 1001; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_sync_spin_loops = 65535.01; +SELECT @@global.innodb_sync_spin_loops; + +SET @@global.innodb_sync_spin_loops = -1024; +SELECT @@global.innodb_sync_spin_loops; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_sync_spin_loops = " "; +SELECT @@global.innodb_sync_spin_loops; + +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_sync_spin_loops = ' '; SELECT @@global.innodb_sync_spin_loops; ---echo '#----------------------FN_DYNVARS_046_05------------------------#' +--echo '#----------------------FN_DYNVARS_046_06------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_sync_spin_loops = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_sync_spin_loops'; +--enable_warnings SELECT @@global.innodb_sync_spin_loops; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_sync_spin_loops'; +--enable_warnings ---echo '#---------------------FN_DYNVARS_046_06-------------------------#' +--echo '#---------------------FN_DYNVARS_046_07-------------------------#' ################################################################### # Check if ON and OFF values can be used on variable # ################################################################### @@ -119,7 +157,7 @@ SELECT @@global.innodb_sync_spin_loops; SET @@global.innodb_sync_spin_loops = ON; SELECT @@global.innodb_sync_spin_loops; ---echo '#---------------------FN_DYNVARS_046_07----------------------#' +--echo '#---------------------FN_DYNVARS_046_08----------------------#' ################################################################### # Check if TRUE and FALSE values can be used on variable # ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_table_locks_basic.test b/mysql-test/suite/sys_vars/t/innodb_table_locks_basic.test index e7503bd334d..e3e4bda345e 100644 --- a/mysql-test/suite/sys_vars/t/innodb_table_locks_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_table_locks_basic.test @@ -106,7 +106,7 @@ SELECT @@global.innodb_table_locks; # for session ---error ER_WRONG_VALUE_FOR_VAR +--Error ER_WRONG_VALUE_FOR_VAR SET @@session.innodb_table_locks = -6; --Error ER_WRONG_TYPE_FOR_VAR SET @@session.innodb_table_locks = 1.6; @@ -128,7 +128,7 @@ SET @@session.innodb_table_locks = # for global ---error ER_WRONG_VALUE_FOR_VAR +--Error ER_WRONG_VALUE_FOR_VAR SET @@global.innodb_table_locks = -1; --Error ER_WRONG_VALUE_FOR_VAR SET @@global.innodb_table_locks = 2; @@ -165,24 +165,32 @@ SELECT @@session.innodb_table_locks AS res_is_1; # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT IF(@@global.innodb_table_locks, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_table_locks'; +--enable_warnings SELECT @@global.innodb_table_locks; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_table_locks'; +--enable_warnings --echo '#----------------------FN_DYNVARS_046_07------------------------#' ######################################################################### # Check if the value in SESSION Table matches value in variable # ######################################################################### +--disable_warnings SELECT IF(@@session.innodb_table_locks, "ON", "OFF") = VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME='innodb_table_locks'; +--enable_warnings SELECT @@session.innodb_table_locks; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_VARIABLES WHERE VARIABLE_NAME='innodb_table_locks'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_08-------------------------#' diff --git a/mysql-test/suite/sys_vars/t/innodb_table_locks_func.test b/mysql-test/suite/sys_vars/t/innodb_table_locks_func.test index d69cacd1377..330addd6b3b 100644 --- a/mysql-test/suite/sys_vars/t/innodb_table_locks_func.test +++ b/mysql-test/suite/sys_vars/t/innodb_table_locks_func.test @@ -31,7 +31,9 @@ SET @start_value= @@global.innodb_table_locks; SELECT @start_value; SET @@global.innodb_table_locks = OFF; +--echo 'connect (con1,localhost,root,,,,)' connect (con1,localhost,root,,,,); +--echo 'connection con1' connection con1; SELECT @@global.innodb_table_locks; SELECT @@session.innodb_table_locks; @@ -46,8 +48,10 @@ disconnect con1; #============================================================================== --echo '----check when innodb_table_locks = ON and autocommit = OFF---' #============================================================================== +--echo 'connect (con2,localhost,root,,,,)' connect (con2,localhost,root,,,,); +--echo 'connection default' connection default; --disable_warnings @@ -62,14 +66,17 @@ BEGIN; INSERT INTO t1 VALUES(1); SELECT * FROM t1 FOR UPDATE; +--echo 'CONNECTION con2' CONNECTION con2; SET @@innodb_table_locks = ON; SET @@autocommit = OFF; send LOCK TABLES t1 WRITE; +--echo 'CONNECTION default' CONNECTION default; COMMIT; +--echo 'CONNECTION con2' CONNECTION con2; reap; UNLOCK tables; diff --git a/mysql-test/suite/sys_vars/t/innodb_temp_data_file_path_basic.test b/mysql-test/suite/sys_vars/t/innodb_temp_data_file_path_basic.test new file mode 100644 index 00000000000..607ee9b27e2 --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_temp_data_file_path_basic.test @@ -0,0 +1,106 @@ + + +################ mysql-test\t\innodb_temp_data_file_path_basic.test ############ +# # +# Variable Name: innodb_temp_data_file_path # +# Scope: Global # +# Access Type: Static # +# Data Type: filename # +# # +# # +# Creation Date: 2012-12-27 # +# Author : Krunal Bauskar # +# # +# # +# Description:Test Cases of Dynamic System Variable innodb_temp_data_file_path# +# that checks the behavior of this variable in the following ways # +# * Value Check # +# * Scope Check # +# # +# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # +# server-system-variables.html # +# # +############################################################################### + +--source include/have_innodb.inc + +--echo '#---------------------BS_STVARS_024_01----------------------#' +#################################################################### +# Displaying default value # +#################################################################### +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +--echo 1 Expected + + +--echo '#---------------------BS_STVARS_024_02----------------------#' +#################################################################### +# Check if Value can set # +#################################################################### + +--error ER_INCORRECT_GLOBAL_LOCAL_VAR +SET @@GLOBAL.innodb_temp_data_file_path=1; +--echo Expected error 'Read only variable' + +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +--echo 1 Expected + + + + +--echo '#---------------------BS_STVARS_024_03----------------------#' +################################################################# +# Check if the value in GLOBAL Table matches value in variable # +################################################################# + +--disable_warnings +SELECT @@GLOBAL.innodb_temp_data_file_path = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_temp_data_file_path'; +--enable_warnings +--echo 1 Expected + +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +--echo 1 Expected + +--disable_warnings +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_temp_data_file_path'; +--enable_warnings +--echo 1 Expected + + + +--echo '#---------------------BS_STVARS_024_04----------------------#' +################################################################################ +# Check if accessing variable with and without GLOBAL point to same variable # +################################################################################ +SELECT @@innodb_temp_data_file_path = @@GLOBAL.innodb_temp_data_file_path; +--echo 1 Expected + + + +--echo '#---------------------BS_STVARS_024_05----------------------#' +################################################################################ +#Check if innodb_temp_data_file_path can be accessed with and without @@ sign # +################################################################################ + +SELECT COUNT(@@innodb_temp_data_file_path); +--echo 1 Expected + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.innodb_temp_data_file_path); +--echo Expected error 'Variable is a GLOBAL variable' + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.innodb_temp_data_file_path); +--echo Expected error 'Variable is a GLOBAL variable' + +SELECT COUNT(@@GLOBAL.innodb_temp_data_file_path); +--echo 1 Expected + +--Error ER_BAD_FIELD_ERROR +SELECT innodb_temp_data_file_path = @@SESSION.innodb_temp_data_file_path; +--echo Expected error 'Readonly variable' + + diff --git a/mysql-test/suite/sys_vars/t/innodb_thread_concurrency_basic.test b/mysql-test/suite/sys_vars/t/innodb_thread_concurrency_basic.test index d30ec214f4a..0be32543d26 100644 --- a/mysql-test/suite/sys_vars/t/innodb_thread_concurrency_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_thread_concurrency_basic.test @@ -99,22 +99,35 @@ SELECT @@global.innodb_thread_concurrency; --Error ER_WRONG_TYPE_FOR_VAR SET @@global.innodb_thread_concurrency = "Y"; SELECT @@global.innodb_thread_concurrency; - +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_thread_concurrency = ' '; +SELECT @@global.innodb_thread_concurrency; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_thread_concurrency = " "; +SELECT @@global.innodb_thread_concurrency; SET @@global.innodb_thread_concurrency = 1001; SELECT @@global.innodb_thread_concurrency; +--Error ER_WRONG_TYPE_FOR_VAR +SET @@global.innodb_thread_concurrency = 255.01; +SELECT @@global.innodb_thread_concurrency; + --echo '#----------------------FN_DYNVARS_046_05------------------------#' ######################################################################### # Check if the value in GLOBAL Table matches value in variable # ######################################################################### +--disable_warnings SELECT @@global.innodb_thread_concurrency = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_thread_concurrency'; +--enable_warnings SELECT @@global.innodb_thread_concurrency; +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_thread_concurrency'; +--enable_warnings --echo '#---------------------FN_DYNVARS_046_06-------------------------#' ################################################################### diff --git a/mysql-test/suite/sys_vars/t/innodb_trx_purge_view_update_only_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_trx_purge_view_update_only_debug_basic.test index d7207515fe1..04f406a311d 100644 --- a/mysql-test/suite/sys_vars/t/innodb_trx_purge_view_update_only_debug_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_trx_purge_view_update_only_debug_basic.test @@ -13,20 +13,26 @@ select @@global.innodb_trx_purge_view_update_only_debug; select @@session.innodb_trx_purge_view_update_only_debug; show global variables like 'innodb_trx_purge_view_update_only_debug'; show session variables like 'innodb_trx_purge_view_update_only_debug'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_trx_purge_view_update_only_debug'; select * from information_schema.session_variables where variable_name='innodb_trx_purge_view_update_only_debug'; +--enable_warnings # # show that it's writable # set global innodb_trx_purge_view_update_only_debug=1; select @@global.innodb_trx_purge_view_update_only_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_trx_purge_view_update_only_debug'; select * from information_schema.session_variables where variable_name='innodb_trx_purge_view_update_only_debug'; +--enable_warnings set @@global.innodb_trx_purge_view_update_only_debug=0; select @@global.innodb_trx_purge_view_update_only_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_trx_purge_view_update_only_debug'; select * from information_schema.session_variables where variable_name='innodb_trx_purge_view_update_only_debug'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_trx_purge_view_update_only_debug='some'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_trx_rseg_n_slots_debug_basic.test b/mysql-test/suite/sys_vars/t/innodb_trx_rseg_n_slots_debug_basic.test index d17917de8e9..858e1b63908 100644 --- a/mysql-test/suite/sys_vars/t/innodb_trx_rseg_n_slots_debug_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_trx_rseg_n_slots_debug_basic.test @@ -13,20 +13,26 @@ select @@global.innodb_trx_rseg_n_slots_debug; select @@session.innodb_trx_rseg_n_slots_debug; show global variables like 'innodb_trx_rseg_n_slots_debug'; show session variables like 'innodb_trx_rseg_n_slots_debug'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug'; select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug'; +--enable_warnings # # show that it's writable # set global innodb_trx_rseg_n_slots_debug=1; select @@global.innodb_trx_rseg_n_slots_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug'; select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug'; +--enable_warnings set @@global.innodb_trx_rseg_n_slots_debug=0; select @@global.innodb_trx_rseg_n_slots_debug; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_trx_rseg_n_slots_debug'; select * from information_schema.session_variables where variable_name='innodb_trx_rseg_n_slots_debug'; +--enable_warnings --error ER_GLOBAL_VARIABLE set session innodb_trx_rseg_n_slots_debug='some'; --error ER_GLOBAL_VARIABLE diff --git a/mysql-test/suite/sys_vars/t/innodb_undo_directory_basic.test b/mysql-test/suite/sys_vars/t/innodb_undo_directory_basic.test index 583dbe6aa03..0df071c2029 100644 --- a/mysql-test/suite/sys_vars/t/innodb_undo_directory_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_undo_directory_basic.test @@ -21,8 +21,8 @@ #################################################################### # Display the default value # #################################################################### -SELECT @@GLOBAL.innodb_undo_directory; ---echo . Expected +SELECT COUNT(@@GLOBAL.innodb_undo_directory); +--echo 1 Expected #################################################################### @@ -41,17 +41,21 @@ SELECT COUNT(@@GLOBAL.innodb_undo_directory); # Check if the value in GLOBAL table matches value in variable # ################################################################################ -SELECT VARIABLE_VALUE +--disable_warnings +SELECT @@GLOBAL.innodb_undo_directory = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_undo_directory'; ---echo . Expected +--enable_warnings +--echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_undo_directory); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_undo_directory'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_undo_log_truncate_basic.test b/mysql-test/suite/sys_vars/t/innodb_undo_log_truncate_basic.test new file mode 100644 index 00000000000..b60f9be22fa --- /dev/null +++ b/mysql-test/suite/sys_vars/t/innodb_undo_log_truncate_basic.test @@ -0,0 +1,113 @@ + + +############### mysql-test\t\innodb_undo_log_truncate_basic.test ############## +# # +# Variable Name: innodb_undo_log_truncate # +# Scope: Global # +# Access Type: Dynamic # +# Data Type: boolean # +# # +# # +# Creation Date: 2008-02-07 # +# Author : Sharique Abdullah # +# # +# # +# Description:Test Cases of Dynamic System Variable innodb_undo_log_truncate # +# that checks the behavior of this variable in the following ways # +# * Value Check # +# * Scope Check # +# # +# Reference: http://dev.mysql.com/doc/refman/5.1/en/ # +# server-system-variables.html # +# # +############################################################################### + +--source include/have_innodb.inc + +SET @start_global_value = @@global.innodb_undo_log_truncate; +SELECT @start_global_value; + + +--echo '#---------------------BS_STVARS_028_01----------------------#' +#################################################################### +# Displaying default value # +#################################################################### +SELECT COUNT(@@GLOBAL.innodb_undo_log_truncate); +--echo 1 Expected + + +--echo '#---------------------BS_STVARS_028_02----------------------#' +#################################################################### +# Check if Value can set # +#################################################################### + +SET @@global.innodb_undo_log_truncate = 0; +SELECT @@global.innodb_undo_log_truncate; + +SET @@global.innodb_undo_log_truncate ='On' ; +SELECT @@global.innodb_undo_log_truncate; + +SET @@global.innodb_undo_log_truncate ='Off' ; +SELECT @@global.innodb_undo_log_truncate; + +SET @@global.innodb_undo_log_truncate = 1; +SELECT @@global.innodb_undo_log_truncate; + +--echo '#---------------------BS_STVARS_028_03----------------------#' +################################################################# +# Check if the value in GLOBAL Table matches value in variable # +################################################################# + +--disable_warnings +SELECT IF(@@GLOBAL.innodb_undo_log_truncate,'ON','OFF') = VARIABLE_VALUE +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_undo_log_truncate'; +--echo 1 Expected + +SELECT COUNT(@@GLOBAL.innodb_undo_log_truncate); +--echo 1 Expected + +SELECT COUNT(VARIABLE_VALUE) +FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES +WHERE VARIABLE_NAME='innodb_undo_log_truncate'; +--echo 1 Expected +--enable_warnings + + +--echo '#---------------------BS_STVARS_028_04----------------------#' +################################################################################ +# Check if accessing variable with and without GLOBAL point to same variable # +################################################################################ +SELECT @@innodb_undo_log_truncate = @@GLOBAL.innodb_undo_log_truncate; +--echo 1 Expected + + + +--echo '#---------------------BS_STVARS_028_05----------------------#' +################################################################################ +# Check if innodb_undo_log_truncate can be accessed with and without @@ sign # +################################################################################ + +SELECT COUNT(@@innodb_undo_log_truncate); +--echo 1 Expected + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@local.innodb_undo_log_truncate); +--echo Expected error 'Variable is a GLOBAL variable' + +--Error ER_INCORRECT_GLOBAL_LOCAL_VAR +SELECT COUNT(@@SESSION.innodb_undo_log_truncate); +--echo Expected error 'Variable is a GLOBAL variable' + +SELECT COUNT(@@GLOBAL.innodb_undo_log_truncate); +--echo 1 Expected + +--Error ER_BAD_FIELD_ERROR +SELECT innodb_undo_log_truncate = @@SESSION.innodb_undo_log_truncate; + +# +# Cleanup +# + +SET @@global.innodb_undo_log_truncate = @start_global_value; +SELECT @@global.innodb_undo_log_truncate; diff --git a/mysql-test/suite/sys_vars/t/innodb_undo_logs_basic.test b/mysql-test/suite/sys_vars/t/innodb_undo_logs_basic.test index 77b6af6909c..f83b5ede247 100644 --- a/mysql-test/suite/sys_vars/t/innodb_undo_logs_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_undo_logs_basic.test @@ -39,9 +39,11 @@ SELECT COUNT(@@GLOBAL.innodb_undo_logs); # Check if the value in GLOBAL table matches value in variable # ################################################################################ +--disable_warnings SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_undo_logs'; +--enable_warnings --echo 128 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_undo_tablespaces_basic.test b/mysql-test/suite/sys_vars/t/innodb_undo_tablespaces_basic.test index 53396249e03..e1744b09038 100644 --- a/mysql-test/suite/sys_vars/t/innodb_undo_tablespaces_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_undo_tablespaces_basic.test @@ -18,11 +18,8 @@ --source include/have_innodb.inc -#################################################################### -# Display default value # -#################################################################### -SELECT @@GLOBAL.innodb_undo_tablespaces; ---echo 0 Expected +SELECT @@GLOBAL.innodb_undo_tablespaces >= 0; +let $undo_tablespaces=`SELECT @@GLOBAL.innodb_undo_tablespaces`; #################################################################### @@ -41,9 +38,13 @@ SELECT COUNT(@@GLOBAL.innodb_undo_tablespaces); # Check if the value in GLOBAL table matches value in variable # ################################################################################ -SELECT VARIABLE_VALUE +--disable_warnings +--disable_query_log +eval SELECT VARIABLE_VALUE-$undo_tablespaces DIFFERENCE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_undo_tablespaces'; +--enable_query_log +--enable_warnings --echo 0 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_use_native_aio_basic.test b/mysql-test/suite/sys_vars/t/innodb_use_native_aio_basic.test index 37879530d75..524b5a7b161 100644 --- a/mysql-test/suite/sys_vars/t/innodb_use_native_aio_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_use_native_aio_basic.test @@ -52,17 +52,21 @@ SELECT COUNT(@@GLOBAL.innodb_use_native_aio); # Check if the value in GLOBAL Table matches value in variable # ################################################################# +--disable_warnings SELECT IF(@@GLOBAL.innodb_use_native_aio, 'ON', 'OFF') = VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_use_native_aio'; +--enable_warnings --echo 1 Expected SELECT COUNT(@@GLOBAL.innodb_use_native_aio); --echo 1 Expected +--disable_warnings SELECT COUNT(VARIABLE_VALUE) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME='innodb_use_native_aio'; +--enable_warnings --echo 1 Expected diff --git a/mysql-test/suite/sys_vars/t/innodb_use_sys_malloc_basic.test b/mysql-test/suite/sys_vars/t/innodb_use_sys_malloc_basic.test deleted file mode 100644 index 699773f4a62..00000000000 --- a/mysql-test/suite/sys_vars/t/innodb_use_sys_malloc_basic.test +++ /dev/null @@ -1,31 +0,0 @@ - -# -# 2010-01-27 OBN - Added -# - ---source include/have_innodb.inc - -# when running with valgring, mtr uses --innodb-use-sys-malloc=0, -# while below we want to see the default value. ---source include/not_valgrind.inc - -# -# show the global and session values; -# ---echo Valid values are 'ON' and 'OFF' -select @@global.innodb_use_sys_malloc; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -select @@session.innodb_use_sys_malloc; -show global variables like 'innodb_use_sys_malloc'; -show session variables like 'innodb_use_sys_malloc'; -select * from information_schema.global_variables where variable_name='innodb_use_sys_malloc'; -select * from information_schema.session_variables where variable_name='innodb_use_sys_malloc'; - -# -# show that it's read-only -# ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -set global innodb_use_sys_malloc=1; ---error ER_INCORRECT_GLOBAL_LOCAL_VAR -set session innodb_use_sys_malloc=1; - diff --git a/mysql-test/suite/sys_vars/t/innodb_version_basic.test b/mysql-test/suite/sys_vars/t/innodb_version_basic.test index 6ee2adf6cf9..182841048f7 100644 --- a/mysql-test/suite/sys_vars/t/innodb_version_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_version_basic.test @@ -15,8 +15,10 @@ select @@global.innodb_version; select @@session.innodb_version; --echo show global variables like 'innodb_version' disabled so to not change with every version; --echo show session variables like 'innodb_version' disabled so to not change with every version; +--disable_warnings select VARIABLE_VALUE=@@global.innodb_version from information_schema.global_variables where variable_name='innodb_version'; select VARIABLE_VALUE=@@global.innodb_version from information_schema.session_variables where variable_name='innodb_version'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/suite/sys_vars/t/innodb_write_io_threads_basic.test b/mysql-test/suite/sys_vars/t/innodb_write_io_threads_basic.test index 8efa6576a66..d9556af37d1 100644 --- a/mysql-test/suite/sys_vars/t/innodb_write_io_threads_basic.test +++ b/mysql-test/suite/sys_vars/t/innodb_write_io_threads_basic.test @@ -13,8 +13,10 @@ select @@global.innodb_write_io_threads; select @@session.innodb_write_io_threads; show global variables like 'innodb_write_io_threads'; show session variables like 'innodb_write_io_threads'; +--disable_warnings select * from information_schema.global_variables where variable_name='innodb_write_io_threads'; select * from information_schema.session_variables where variable_name='innodb_write_io_threads'; +--enable_warnings # # show that it's read-only diff --git a/mysql-test/t/ctype_utf8mb4_innodb-master.opt b/mysql-test/t/ctype_utf8mb4_innodb-master.opt index 96f0ce3f36c..56d40323eae 100644 --- a/mysql-test/t/ctype_utf8mb4_innodb-master.opt +++ b/mysql-test/t/ctype_utf8mb4_innodb-master.opt @@ -1 +1,2 @@ --default-storage-engine=MyISAM +--loose-innodb-large-prefix=OFF diff --git a/mysql-test/t/mysqlbinlog_row_minimal.test b/mysql-test/t/mysqlbinlog_row_minimal.test index 9c319880fbd..7909f75e9a1 100644 --- a/mysql-test/t/mysqlbinlog_row_minimal.test +++ b/mysql-test/t/mysqlbinlog_row_minimal.test @@ -27,7 +27,7 @@ DELETE FROM t2; FLUSH BINARY LOGS; --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR ---replace_regex /\d{6} *\d*:\d\d:\d\d// /Start:.*at startup/Start: xxx/ /SET TIMESTAMP=\d*/SET TIMESTAMP=X/ /exec_time=\d*/exec_time=x/ /CRC32 0x[0-9a-f]*/CRC32 XXX/ +--replace_regex /\d{6} *\d*:\d\d:\d\d// /Start:.*at startup/Start: xxx/ /SET TIMESTAMP=\d*/SET TIMESTAMP=X/ /exec_time=\d*/exec_time=x/ /mapped to number \d*/mapped to number num/ /CRC32 0x[0-9a-f]+/CRC32 XXX/ --exec $MYSQL_BINLOG --verbose --verbose --base64-output=DECODE-ROWS $datadir/$binlog DROP TABLE t1,t2; diff --git a/mysql-test/t/partition_exchange-master.opt b/mysql-test/t/partition_exchange-master.opt new file mode 100644 index 00000000000..5a0380b7a1d --- /dev/null +++ b/mysql-test/t/partition_exchange-master.opt @@ -0,0 +1 @@ +--loose-innodb_default_row_format=COMPACT diff --git a/mysql-test/t/partition_innodb-master.opt b/mysql-test/t/partition_innodb-master.opt new file mode 100644 index 00000000000..cf94b2d7dca --- /dev/null +++ b/mysql-test/t/partition_innodb-master.opt @@ -0,0 +1 @@ +--loose-innodb-large-prefix=OFF diff --git a/mysql-test/t/row-checksum-master.opt b/mysql-test/t/row-checksum-master.opt new file mode 100644 index 00000000000..990e4941ae9 --- /dev/null +++ b/mysql-test/t/row-checksum-master.opt @@ -0,0 +1 @@ +--loose-innodb-strict-mode=0 diff --git a/mysql-test/t/row-checksum-old-master.opt b/mysql-test/t/row-checksum-old-master.opt index 8e7b7f9e36f..40027795fff 100644 --- a/mysql-test/t/row-checksum-old-master.opt +++ b/mysql-test/t/row-checksum-old-master.opt @@ -1 +1,2 @@ --old +--loose-innodb-strict-mode=0 diff --git a/mysql-test/thou_shalt_not_kill.pm b/mysql-test/thou_shalt_not_kill.pm new file mode 100755 index 00000000000..9a562761d04 --- /dev/null +++ b/mysql-test/thou_shalt_not_kill.pm @@ -0,0 +1,26 @@ +package thou_shalt_not_kill; +require Exporter; +@ISA = 'Exporter'; +@EXPORT_OK = 'kill'; + +use subs 'kill'; +use Carp qw(cluck); + +sub import { + my $pkg = shift; + $pkg->export('CORE::GLOBAL', 'kill', @_); +} + +sub kill { + return CORE::kill(@_) unless $_[0]; + cluck "kill(@_)"; + print "\e[1;31m" if -t STDOUT; + system "pstree -c $_" foreach @_[1..$#_]; + print "\e[0;39m" if -t STDOUT; + print STDERR 'Kill [y/n] ? '; + my $answer=; + return CORE::kill(@_) if $answer =~ /y/i or $answer eq "\n"; + 1; +} + +1; diff --git a/sql/handler.h b/sql/handler.h index cae95ea4ae6..75f8df98add 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -573,7 +573,32 @@ struct xid_t { long bqual_length; char data[XIDDATASIZE]; // not \0-terminated ! - xid_t() {} /* Remove gcc warning */ + xid_t() {} /* Remove gcc warning */ + long get_format_id() const + { return formatID; } + void set_format_id(long v) + { + formatID= v; + } + long get_gtrid_length() const + { return gtrid_length; } + void set_gtrid_length(long v) + { + gtrid_length= v; + } + long get_bqual_length() const + { return bqual_length; } + void set_bqual_length(long v) + { + bqual_length= v; + } + const char* get_data() const + { return data; } + void set_data(const void* v, long l) + { + DBUG_ASSERT(l <= XIDDATASIZE); + memcpy(data, v, l); + } bool eq(struct xid_t *xid) { return !xid->is_null() && eq(xid->gtrid_length, xid->bqual_length, xid->data); } bool eq(long g, long b, const char *d) @@ -586,6 +611,13 @@ struct xid_t { memcpy(data, g, gtrid_length= gl); memcpy(data+gl, b, bqual_length= bl); } + void reset() + { + formatID= -1; + gtrid_length= 0; + bqual_length= 0; + memset(data, 0, XIDDATASIZE); + } void set(ulonglong xid) { my_xid tmp; @@ -3885,8 +3917,8 @@ public: TABLE_SHARE* get_table_share() { return table_share; } protected: /* deprecated, don't use in new engines */ - inline void ha_statistic_increment(ulong SSV::*offset) const { } - + // inline void ha_statistic_increment(ulong SSV::*offset) const { } + #define ha_statistic_increment(A) /* Service methods for use by storage engines. */ void **ha_data(THD *) const; THD *ha_thd(void) const; diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index 8c608e82388..b835d6bc0fb 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -336,7 +336,16 @@ IF(MSVC) ADD_DEFINITIONS(-DHAVE_WINDOWS_MM_FENCE) ENDIF() +SET(MUTEXTYPE "event" CACHE STRING "Mutex type: event, sys or futex") +IF(MUTEXTYPE MATCHES "event") + ADD_DEFINITIONS(-DMUTEX_EVENT) +ELSEIF(MUTEXTYPE MATCHES "futex" AND DEFINED HAVE_IB_LINUX_FUTEX) + ADD_DEFINITIONS(-DMUTEX_FUTEX) +ELSE() + ADD_DEFINITIONS(-DMUTEX_SYS) +ENDIF() + # Include directories under innobase INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/innobase/include ${CMAKE_SOURCE_DIR}/storage/innobase/handler) @@ -371,6 +380,7 @@ SET(INNOBASE_SOURCES api/api0api.cc api/api0misc.cc btr/btr0btr.cc + btr/btr0bulk.cc btr/btr0cur.cc btr/btr0pcur.cc btr/btr0scrub.cc @@ -394,13 +404,16 @@ SET(INNOBASE_SOURCES dict/dict0mem.cc dict/dict0stats.cc dict/dict0stats_bg.cc - dyn/dyn0dyn.cc +# dyn/dyn0dyn.cc eval/eval0eval.cc eval/eval0proc.cc fil/fil0fil.cc fil/fil0pagecompress.cc fil/fil0crypt.cc fsp/fsp0fsp.cc + fsp/fsp0file.cc + fsp/fsp0space.cc + fsp/fsp0sysspace.cc fut/fut0fut.cc fut/fut0lst.cc ha/ha0ha.cc @@ -412,27 +425,34 @@ SET(INNOBASE_SOURCES fts/fts0config.cc fts/fts0opt.cc fts/fts0pars.cc + fts/fts0plugin.cc fts/fts0que.cc fts/fts0sql.cc fts/fts0tlex.cc + gis/gis0geo.cc + gis/gis0rtree.cc + gis/gis0sea.cc handler/ha_innodb.cc +# handler/ha_innopart.cc handler/handler0alter.cc handler/i_s.cc ibuf/ibuf0ibuf.cc lock/lock0iter.cc lock/lock0lock.cc + lock/lock0prdt.cc lock/lock0wait.cc log/log0log.cc log/log0recv.cc log/log0crypt.cc mach/mach0data.cc mem/mem0mem.cc - mem/mem0pool.cc +# mem/mem0pool.cc mtr/mtr0log.cc mtr/mtr0mtr.cc + os/os0event.cc os/os0file.cc os/os0proc.cc - os/os0sync.cc +# os/os0sync.cc os/os0thread.cc page/page0cur.cc page/page0page.cc @@ -456,6 +476,7 @@ SET(INNOBASE_SOURCES row/row0purge.cc row/row0row.cc row/row0sel.cc + row/row0trunc.cc row/row0uins.cc row/row0umod.cc row/row0undo.cc @@ -467,6 +488,7 @@ SET(INNOBASE_SOURCES srv/srv0srv.cc srv/srv0start.cc sync/sync0arr.cc + sync/sync0debug.cc sync/sync0rw.cc sync/sync0sync.cc trx/trx0i_s.cc @@ -478,12 +500,13 @@ SET(INNOBASE_SOURCES trx/trx0trx.cc trx/trx0undo.cc usr/usr0sess.cc - ut/ut0bh.cc +# ut/ut0bh.cc ut/ut0byte.cc ut/ut0crc32.cc ut/ut0dbg.cc ut/ut0list.cc ut/ut0mem.cc + ut/ut0new.cc ut/ut0rbt.cc ut/ut0rnd.cc ut/ut0ut.cc @@ -515,7 +538,8 @@ ELSE() ENDIF() MYSQL_ADD_PLUGIN(innobase ${INNOBASE_SOURCES} STORAGE_ENGINE - MODULE_ONLY - MODULE_OUTPUT_NAME ha_innodb +# MODULE_ONLY +# MODULE_OUTPUT_NAME ha_innodb + DEFAULT RECOMPILE_FOR_EMBEDDED LINK_LIBRARIES ${ZLIB_LIBRARY} ${LINKER_SCRIPT}) diff --git a/storage/innobase/Doxyfile b/storage/innobase/Doxyfile deleted file mode 100644 index 7cf5048fa52..00000000000 --- a/storage/innobase/Doxyfile +++ /dev/null @@ -1,1419 +0,0 @@ -# Doxyfile 1.5.6 - -# Usage: SVNVERSION=-r$(svnversion) doxygen - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# http://www.gnu.org/software/libiconv for the list of possible encodings. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = "InnoDB Plugin" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = 1.0$(SVNVERSION) - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = dox - -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create -# 4096 sub-directories (in 2 levels) under the output directory of each output -# format and will distribute the generated files over these directories. -# Enabling this option can be useful when feeding doxygen a huge amount of -# source files, where putting all generated files in the same directory would -# otherwise cause performance problems for the file system. - -CREATE_SUBDIRS = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, -# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, -# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, -# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, -# and Ukrainian. - -OUTPUT_LANGUAGE = English - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator -# that is used to form the text in various listings. Each string -# in this list, if found as the leading text of the brief description, will be -# stripped from the text and the result after processing the whole list, is -# used as the annotated text. Otherwise, the brief description is used as-is. -# If left blank, the following values are used ("$name" is automatically -# replaced with the name of the entity): "The $name class" "The $name widget" -# "The $name file" "is" "provides" "specifies" "contains" -# "represents" "a" "an" "the" - -ABBREVIATE_BRIEF = - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user-defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the -# path to strip. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of -# the path mentioned in the documentation of a class, which tells -# the reader which header file to include in order to use a class. -# If left blank only the name of the header file containing the class -# definition is used. Otherwise one should specify the include paths that -# are normally passed to the compiler using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like regular Qt-style comments -# (thus requiring an explicit @brief command for a brief description.) - -JAVADOC_AUTOBRIEF = NO - -# If the QT_AUTOBRIEF tag is set to YES then Doxygen will -# interpret the first line (until the first dot) of a Qt-style -# comment as the brief description. If set to NO, the comments -# will behave just like regular Qt-style comments (thus requiring -# an explicit \brief command for a brief description.) - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -# treat a multi-line C++ special comment block (i.e. a block of //! or /// -# comments) as a brief description. This used to be the default behaviour. -# The new default is to treat a multi-line C++ comment block as a detailed -# description. Set this tag to YES if you prefer the old behaviour instead. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the DETAILS_AT_TOP tag is set to YES then Doxygen -# will output the detailed description near the top, like JavaDoc. -# If set to NO, the detailed description appears after the member -# documentation. - -DETAILS_AT_TOP = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# re-implements. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce -# a new page for each member. If set to NO, the documentation of a member will -# be part of the file/class/namespace that contains it. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 8 - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user-defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C -# sources only. Doxygen will then generate output that is more tailored for C. -# For instance, some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = YES - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java -# sources only. Doxygen will then generate output that is more tailored for -# Java. For instance, namespaces will be presented as packages, qualified -# scopes will look different, etc. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources only. Doxygen will then generate output that is more tailored for -# Fortran. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for -# VHDL. - -OPTIMIZE_OUTPUT_VHDL = NO - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should -# set this tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. -# func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. - -BUILTIN_STL_SUPPORT = NO - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. -# Doxygen will parse them like normal C++ but will assume all classes use public -# instead of private inheritance when no explicit protection keyword is present. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate getter -# and setter methods for a property. Setting this option to YES (the default) -# will make doxygen to replace the get and set methods by a property in the -# documentation. This will only work if the methods are indeed getting or -# setting a simple type. If this is not the case, or you want to show the -# methods anyway, you should set this option to NO. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# Set the SUBGROUPING tag to YES (the default) to allow class member groups of -# the same type (for instance a group of public functions) to be put as a -# subgroup of that type (e.g. under the Public Functions section). Set it to -# NO to prevent subgrouping. Alternatively, this can be done per class using -# the \nosubgrouping command. - -SUBGROUPING = YES - -# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum -# is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically -# be useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. - -TYPEDEF_HIDES_STRUCT = NO - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = YES - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. When set to YES local -# methods, which are defined in the implementation section but not in -# the interface are included in the documentation. -# If set to NO (the default) only methods in the interface are included. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base -# name of the file that contains the anonymous namespace. By default -# anonymous namespace are hidden. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these classes will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all -# friend (class|struct|union) declarations. -# If set to NO (the default) these declarations will be included in the -# documentation. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any -# documentation blocks found inside the body of a function. -# If set to NO (the default) these blocks will be appended to the -# function's detailed documentation block. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower-case letters. If set to YES upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put a list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the -# brief documentation of file, namespace and class members alphabetically -# by member name. If set to NO (the default) the members will appear in -# declaration order. - -SORT_BRIEF_DOCS = NO - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the -# hierarchy of group names into alphabetical order. If set to NO (the default) -# the group names will appear in their defined order. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be -# sorted by fully-qualified names, including namespaces. If set to -# NO (the default), the class list will be sorted only by class name, -# not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the -# alphabetical list. - -SORT_BY_SCOPE_NAME = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or -# disable (NO) the deprecated list. This list is created by putting -# \deprecated commands in the documentation. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consists of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = YES - -# If the sources in your project are distributed over multiple directories -# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy -# in the documentation. The default is NO. - -SHOW_DIRECTORIES = NO - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. -# This will remove the Files entry from the Quick Index and from the -# Folder Tree View (if specified). The default is YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the -# Namespaces page. This will remove the Namespaces entry from the Quick Index -# and from the Folder Tree View (if specified). The default is YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command , where is the value of -# the FILE_VERSION_FILTER tag, and is the name of an input file -# provided by doxygen. Whatever the program writes to standard output -# is used as the file version. See the manual for examples. - -FILE_VERSION_FILTER = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = YES - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some -# parameters in a documented function, or documenting parameters that -# don't exist or using markup commands wrongly. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be abled to get warnings for -# functions that are documented, but have no documentation for their parameters -# or return value. If set to NO (the default) doxygen will only warn about -# wrong or incomplete parameter documentation, but not about the absence of -# documentation. - -WARN_NO_PARAMDOC = NO - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. Optionally the format may contain -# $version, which will be replaced by the version of the file (if it could -# be obtained via FILE_VERSION_FILTER) - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = . include/univ.i - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is -# also the default input encoding. Doxygen uses libiconv (or the iconv built -# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for -# the list of possible encodings. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx -# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 - -FILE_PATTERNS = *.c *.ic *.h - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or -# directories that are symbolic links (a Unix filesystem feature) are excluded -# from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. Note that the wildcards are matched -# against the file with absolute path, so to exclude all test directories -# for example use the pattern */test/* - -EXCLUDE_PATTERNS = - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test - -EXCLUDE_SYMBOLS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. If FILTER_PATTERNS is specified, this tag will be -# ignored. - -INPUT_FILTER = - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: -# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further -# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER -# is applied to all files. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse (i.e. when SOURCE_BROWSER is set to YES). - -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. -# Note: To get rid of all source code in the generated output, make sure also -# VERBATIM_HEADERS is set to NO. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) -# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from -# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will -# link to the source code. Otherwise they will link to the documentstion. - -REFERENCES_LINK_SOURCE = YES - -# If the USE_HTAGS tag is set to YES then the references to source code -# will point to the HTML generated by the htags(1) tool instead of doxygen -# built-in source browser. The htags tool is part of GNU's global source -# tagging system (see http://www.gnu.org/software/global/global.html). You -# will need version 4.8.6 or higher. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = NO - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet. Note that doxygen will try to copy -# the style sheet file to the HTML output directory, so don't put your own -# stylesheet in the HTML output directory as well, or it will be erased! - -HTML_STYLESHEET = - -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = YES - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_DOCSET tag is set to YES, additional index files -# will be generated that can be used as input for Apple's Xcode 3 -# integrated development environment, introduced with OSX 10.5 (Leopard). -# To create a documentation set, doxygen will generate a Makefile in the -# HTML output directory. Running make will produce the docset in that -# directory and running "make install" will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find -# it at startup. - -GENERATE_DOCSET = NO - -# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the -# feed. A documentation feed provides an umbrella under which multiple -# documentation sets from a single provider (such as a company or product suite) -# can be grouped. - -DOCSET_FEEDNAME = "Doxygen generated docs" - -# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that -# should uniquely identify the documentation set bundle. This should be a -# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen -# will append .docset to the name. - -DOCSET_BUNDLE_ID = org.doxygen.Project - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. For this to work a browser that supports -# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox -# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). - -HTML_DYNAMIC_SECTIONS = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can -# be used to specify the file name of the resulting .chm file. You -# can add a path in front of the file if the result should not be -# written to the html output directory. - -CHM_FILE = - -# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can -# be used to specify the location (absolute path including file name) of -# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run -# the HTML help compiler on the generated index.hhp. - -HHC_LOCATION = - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING -# is used to encode HtmlHelp index (hhk), content (hhc) and project file -# content. - -CHM_INDEX_ENCODING = - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the HTML help documentation and to the tree view. - -TOC_EXPAND = NO - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = NO - -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. -# If the tag value is set to FRAME, a side panel will be generated -# containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, -# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are -# probably better off using the HTML help feature. Other possible values -# for this tag are: HIERARCHIES, which will generate the Groups, Directories, -# and Class Hiererachy pages using a tree view instead of an ordered list; -# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which -# disables this behavior completely. For backwards compatibility with previous -# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE -# respectively. - -GENERATE_TREEVIEW = NONE - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -# Use this tag to change the font size of Latex formulas included -# as images in the HTML documentation. The default is 10. Note that -# when you change the font size after a successful doxygen run you need -# to manually remove any form_*.png images from the HTML output directory -# to force them to be regenerated. - -FORMULA_FONTSIZE = 10 - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be -# invoked. If left blank `latex' will be used as the default command name. - -LATEX_CMD_NAME = latex - -# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to -# generate index for LaTeX. If left blank `makeindex' will be used as the -# default command name. - -MAKEINDEX_CMD_NAME = makeindex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = a4wide - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -# If LATEX_HIDE_INDICES is set to YES then doxygen will not -# include the index chapters (such as File Index, Compound Index, etc.) -# in the output. - -LATEX_HIDE_INDICES = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimized for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assignments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. - -GENERATE_XML = NO - -# The XML_OUTPUT tag is used to specify where the XML pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `xml' will be used as the default path. - -XML_OUTPUT = xml - -# The XML_SCHEMA tag can be used to specify an XML schema, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_SCHEMA = - -# The XML_DTD tag can be used to specify an XML DTD, -# which can be used by a validating XML parser to check the -# syntax of the XML files. - -XML_DTD = - -# If the XML_PROGRAMLISTING tag is set to YES Doxygen will -# dump the program listings (including syntax highlighting -# and cross-referencing information) to the XML output. Note that -# enabling this will significantly increase the size of the XML output. - -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- - -# If the GENERATE_PERLMOD tag is set to YES Doxygen will -# generate a Perl module file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_PERLMOD = NO - -# If the PERLMOD_LATEX tag is set to YES Doxygen will generate -# the necessary Makefile rules, Perl scripts and LaTeX code to be able -# to generate PDF and DVI output from the Perl module output. - -PERLMOD_LATEX = NO - -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be -# nicely formatted so it can be parsed by a human reader. This is useful -# if you want to understand what is going on. On the other hand, if this -# tag is set to NO the size of the Perl module output will be much smaller -# and Perl will parse it just the same. - -PERLMOD_PRETTY = YES - -# The names of the make variables in the generated doxyrules.make file -# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. -# This is useful so different doxyrules.make files included by the same -# Makefile don't overwrite each other's variables. - -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = YES - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_DEFINED tags. - -EXPAND_ONLY_PREDEF = YES - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# in the INCLUDE_PATH (see below) will be search if a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. To prevent a macro definition from being -# undefined via #undef or recursively expanded use the := operator -# instead of the = operator. - -PREDEFINED = DOXYGEN UNIV_DEBUG UNIV_SYNC_DEBUG __attribute__()= - -# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED = UT_LIST_BASE_NODE_T UT_LIST_NODE_T - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all function-like macros that are alone -# on a line, have an all uppercase name, and do not end with a semicolon. Such -# function macros are typically used for boiler-plate code, and will confuse -# the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES option can be used to specify one or more tagfiles. -# Optionally an initial location of the external documentation -# can be added for each tagfile. The format of a tag file without -# this location is as follows: -# TAGFILES = file1 file2 ... -# Adding location for the tag files is done as follows: -# TAGFILES = file1=loc1 "file2 = loc2" ... -# where "loc1" and "loc2" can be relative or absolute paths or -# URLs. If a location is present for each tag, the installdox tool -# does not have to be run to correct the links. -# Note that each tag file must have a unique name -# (where the name does NOT include the path) -# If a tag file is not located in the directory in which doxygen -# is run, you must also specify the path to the tagfile here. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = NO - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base -# or super classes. Setting the tag to NO turns the diagrams off. Note that -# this option is superseded by the HAVE_DOT option below. This is only a -# fallback. It is recommended to install and use dot, since it yields more -# powerful graphs. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see -# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = YES - -# By default doxygen will write a font called FreeSans.ttf to the output -# directory and reference it in all dot files that doxygen generates. This -# font does not include all possible unicode characters however, so when you need -# these (or just want a differently looking font) you can specify the font name -# using DOT_FONTNAME. You need need to make sure dot is able to find the font, -# which can be done by putting it in a standard location or by setting the -# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory -# containing the font. - -DOT_FONTNAME = FreeSans - -# By default doxygen will tell dot to use the output directory to look for the -# FreeSans.ttf font (which doxygen will put there itself). If you specify a -# different font using DOT_FONTNAME you can set the path where dot -# can find it using this tag. - -DOT_FONTPATH = - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for groups, showing the direct groups dependencies - -GROUP_GRAPHS = NO - -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and -# collaboration diagrams in a style similar to the OMG's Unified Modeling -# Language. - -UML_LOOK = NO - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the CALL_GRAPH and HAVE_DOT options are set to YES then -# doxygen will generate a call dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable call graphs -# for selected functions only using the \callgraph command. - -CALL_GRAPH = NO - -# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then -# doxygen will generate a caller dependency graph for every global function -# or class method. Note that enabling this option will significantly increase -# the time of a run. So in most cases it will be better to enable caller -# graphs for selected functions only using the \callergraph command. - -CALLER_GRAPH = NO - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES -# then doxygen will show the dependencies a directory has on other directories -# in a graphical way. The dependency relations are determined by the #include -# relations between the files in the directories. - -DIRECTORY_GRAPH = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are png, jpg, or gif -# If left blank png will be used. - -DOT_IMAGE_FORMAT = png - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found in the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of -# nodes that will be shown in the graph. If the number of nodes in a graph -# becomes larger than this value, doxygen will truncate the graph, which is -# visualized by representing a node as a red box. Note that doxygen if the -# number of direct children of the root node in a graph is already larger than -# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note -# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. - -DOT_GRAPH_MAX_NODES = 50 - -# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the -# graphs generated by dot. A depth value of 3 means that only nodes reachable -# from the root by following a path via at most 3 edges will be shown. Nodes -# that lay further from the root node will be omitted. Note that setting this -# option to 1 or 2 may greatly reduce the computation time needed for large -# code bases. Also note that the size of a graph can be further restricted by -# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. - -MAX_DOT_GRAPH_DEPTH = 3 - -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is enabled by default, which results in a transparent -# background. Warning: Depending on the platform used, enabling this option -# may lead to badly anti-aliased labels on the edges of a graph (i.e. they -# become hard to read). - -DOT_TRANSPARENT = YES - -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output -# files in one run (i.e. multiple -o and -T options on the command line). This -# makes dot run faster, but since only newer versions of dot (>1.8.10) -# support this, this feature is disabled by default. - -DOT_MULTI_TARGETS = NO - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermediate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to the search engine -#--------------------------------------------------------------------------- - -# The SEARCHENGINE tag specifies whether or not a search engine should be -# used. If set to NO the values of all tags below this one will be ignored. - -SEARCHENGINE = NO diff --git a/storage/innobase/api/api0api.cc b/storage/innobase/api/api0api.cc index 739ea9f7572..1f04b1b1234 100644 --- a/storage/innobase/api/api0api.cc +++ b/storage/innobase/api/api0api.cc @@ -24,15 +24,7 @@ InnoDB Native API 3/20/2011 Jimmy Yang extracted from Embedded InnoDB *******************************************************/ -#include "univ.i" - -#include -#include -#include -#include -#ifdef HAVE_UNISTD_H -#include -#endif +#include "ha_prototypes.h" #include "api0api.h" #include "api0misc.h" @@ -50,11 +42,9 @@ InnoDB Native API #include "row0sel.h" #include "lock0lock.h" #include "rem0cmp.h" -#include "ut0dbg.h" #include "dict0priv.h" -#include "ut0ut.h" -#include "ha_prototypes.h" #include "trx0roll.h" +#include "row0trunc.h" /** configure variable for binlog option with InnoDB APIs */ my_bool ib_binlog_enabled = FALSE; @@ -212,15 +202,15 @@ operation, we only do it every INNOBASE_WAKE_INTERVAL'th step. */ #define INNOBASE_WAKE_INTERVAL 32 /*****************************************************************//** -Check whether the Innodb persistent cursor is positioned. -@return IB_TRUE if positioned */ +Check whether the InnoDB persistent cursor is positioned. +@return IB_TRUE if positioned */ UNIV_INLINE ib_bool_t ib_btr_cursor_is_positioned( /*========================*/ btr_pcur_t* pcur) /*!< in: InnoDB persistent cursor */ { - return(pcur->old_stored == BTR_PCUR_OLD_STORED + return(pcur->old_stored && (pcur->pos_state == BTR_PCUR_IS_POSITIONED || pcur->pos_state == BTR_PCUR_WAS_POSITIONED)); } @@ -228,7 +218,7 @@ ib_btr_cursor_is_positioned( /********************************************************************//** Open a table using the table id, if found then increment table ref count. -@return table instance if found */ +@return table instance if found */ static dict_table_t* ib_open_table_by_id( @@ -260,9 +250,9 @@ ib_open_table_by_id( /********************************************************************//** Open a table using the table name, if found then increment table ref count. -@return table instance if found */ -UNIV_INTERN -void* +@return table instance if found */ +static +dict_table_t* ib_open_table_by_name( /*==================*/ const char* name) /*!< in: table name to lookup */ @@ -281,7 +271,7 @@ ib_open_table_by_name( /********************************************************************//** Find table using table name. -@return table instance if found */ +@return table instance if found */ static dict_table_t* ib_lookup_table_by_name( @@ -358,7 +348,7 @@ ib_read_tuple( *len = offset_size; } ptr = *rec_buf; - } else { + } else { /* Make a copy of the rec. */ ptr = mem_heap_alloc(tuple->heap, offset_size); } @@ -392,13 +382,12 @@ ib_read_tuple( /* Fetch and copy any externally stored column. */ if (rec_offs_nth_extern(offsets, i)) { - ulint zip_size; - - zip_size = dict_table_zip_size(index->table); + const page_size_t page_size( + dict_table_page_size(index->table)); data = btr_rec_copy_externally_stored_field( - copy, offsets, zip_size, i, &len, - tuple->heap, NULL); + copy, offsets, page_size, i, &len, + tuple->heap); ut_a(len != UNIV_SQL_NULL); } @@ -409,7 +398,7 @@ ib_read_tuple( /*****************************************************************//** Create an InnoDB key tuple. -@return tuple instance created, or NULL */ +@return tuple instance created, or NULL */ static ib_tpl_t ib_key_tuple_new_low( @@ -462,7 +451,7 @@ ib_key_tuple_new_low( /*****************************************************************//** Create an InnoDB key tuple. -@return tuple instance created, or NULL */ +@return tuple instance created, or NULL */ static ib_tpl_t ib_key_tuple_new( @@ -483,7 +472,7 @@ ib_key_tuple_new( /*****************************************************************//** Create an InnoDB row tuple. -@return tuple instance, or NULL */ +@return tuple instance, or NULL */ static ib_tpl_t ib_row_tuple_new_low( @@ -515,7 +504,7 @@ ib_row_tuple_new_low( /*****************************************************************//** Create an InnoDB row tuple. -@return tuple instance, or NULL */ +@return tuple instance, or NULL */ static ib_tpl_t ib_row_tuple_new( @@ -536,8 +525,7 @@ ib_row_tuple_new( /*****************************************************************//** Begin a transaction. -@return innobase txn handle */ -UNIV_INTERN +@return innobase txn handle */ ib_err_t ib_trx_start( /*=========*/ @@ -558,7 +546,7 @@ ib_trx_start( trx->api_auto_commit = auto_commit; trx->read_write = read_write; - trx_start_if_not_started(trx); + trx_start_if_not_started(trx, read_write); trx->isolation_level = ib_trx_level; @@ -572,8 +560,7 @@ ib_trx_start( /*****************************************************************//** Begin a transaction. This will allocate a new transaction handle. put the transaction in the active state. -@return innobase txn handle */ -UNIV_INTERN +@return innobase txn handle */ ib_trx_t ib_trx_begin( /*=========*/ @@ -595,11 +582,9 @@ ib_trx_begin( return(static_cast(trx)); } - /*****************************************************************//** Check if transaction is read_only @return transaction read_only status */ -UNIV_INTERN ib_u32_t ib_trx_read_only( /*=============*/ @@ -609,25 +594,9 @@ ib_trx_read_only( return(trx->read_only); } - -/*****************************************************************//** -Get the transaction's state. -@return transaction state */ -UNIV_INTERN -ib_trx_state_t -ib_trx_state( -/*=========*/ - ib_trx_t ib_trx) /*!< in: trx handle */ -{ - trx_t* trx = (trx_t*) ib_trx; - - return((ib_trx_state_t) trx->state); -} - /*****************************************************************//** Get a trx start time. -@return trx start_time */ -UNIV_INTERN +@return trx start_time */ ib_u64_t ib_trx_get_start_time( /*==================*/ @@ -638,8 +607,7 @@ ib_trx_get_start_time( } /*****************************************************************//** Release the resources of the transaction. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_trx_release( /*===========*/ @@ -656,17 +624,17 @@ ib_trx_release( /*****************************************************************//** Commit a transaction. This function will also release the schema latches too. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_trx_commit( /*==========*/ ib_trx_t ib_trx) /*!< in: trx handle */ { ib_err_t err = DB_SUCCESS; - trx_t* trx = (trx_t*) ib_trx; + trx_t* trx = reinterpret_cast(ib_trx); + + if (!trx_is_started(trx)) { - if (trx->state == TRX_STATE_NOT_STARTED) { return(err); } @@ -678,8 +646,7 @@ ib_trx_commit( /*****************************************************************//** Rollback a transaction. This function will also release the schema latches too. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_trx_rollback( /*============*/ @@ -696,7 +663,7 @@ ib_trx_rollback( return(err); } -#ifdef __WIN__ +#ifdef _WIN32 /*****************************************************************//** Convert a string to lower case. */ static @@ -710,7 +677,7 @@ ib_to_lower_case( ++ptr; } } -#endif /* __WIN__ */ +#endif /* _WIN32 */ /*****************************************************************//** Normalizes a table name string. A normalized name consists of the @@ -762,7 +729,7 @@ ib_normalize_table_name( ut_strlen(name) + 1 - (db_name - name)); norm_name[table_name - db_name - 1] = '/'; -#ifdef __WIN__ +#ifdef _WIN32 ib_to_lower_case(norm_name); #endif } else { @@ -770,57 +737,9 @@ ib_normalize_table_name( } } -/*****************************************************************//** -Check whether the table name conforms to our requirements. Currently -we only do a simple check for the presence of a '/'. -@return DB_SUCCESS or err code */ -UNIV_INTERN -ib_err_t -ib_table_name_check( -/*================*/ - const char* name) /*!< in: table name to check */ -{ - const char* slash = NULL; - ulint len = ut_strlen(name); - - if (len < 2 - || *name == '/' - || name[len - 1] == '/' - || (name[0] == '.' && name[1] == '/') - || (name[0] == '.' && name[1] == '.' && name[2] == '/')) { - - return(DB_DATA_MISMATCH); - } - - for ( ; *name; ++name) { -#ifdef __WIN__ - /* Check for reserved characters in DOS filenames. */ - switch (*name) { - case ':': - case '|': - case '"': - case '*': - case '<': - case '>': - return(DB_DATA_MISMATCH); - } -#endif /* __WIN__ */ - if (*name == '/') { - if (slash) { - return(DB_DATA_MISMATCH); - } - slash = name; - } - } - - return(slash ? DB_SUCCESS : DB_DATA_MISMATCH); -} - - - /*****************************************************************//** Get a table id. The caller must have acquired the dictionary mutex. -@return DB_SUCCESS if found */ +@return DB_SUCCESS if found */ static ib_err_t ib_table_get_id_low( @@ -846,7 +765,7 @@ ib_table_get_id_low( /*****************************************************************//** Create an internal cursor instance. -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ static ib_err_t ib_create_cursor( @@ -918,7 +837,7 @@ ib_create_cursor( /*****************************************************************//** Create an internal cursor instance, and set prebuilt->index to index with supplied index_id. -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ static ib_err_t ib_create_cursor_with_index_id( @@ -943,8 +862,7 @@ ib_create_cursor_with_index_id( /*****************************************************************//** Open an InnoDB table and return a cursor handle to it. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_open_table_using_id( /*==========================*/ @@ -955,12 +873,10 @@ ib_cursor_open_table_using_id( { ib_err_t err; dict_table_t* table; + const ib_bool_t locked + = ib_trx && ib_schema_lock_is_exclusive(ib_trx); - if (ib_trx == NULL || !ib_schema_lock_is_exclusive(ib_trx)) { - table = ib_open_table_by_id(table_id, FALSE); - } else { - table = ib_open_table_by_id(table_id, TRUE); - } + table = ib_open_table_by_id(table_id, locked); if (table == NULL) { @@ -973,59 +889,9 @@ ib_cursor_open_table_using_id( return(err); } -/*****************************************************************//** -Open an InnoDB index and return a cursor handle to it. -@return DB_SUCCESS or err code */ -UNIV_INTERN -ib_err_t -ib_cursor_open_index_using_id( -/*==========================*/ - ib_id_u64_t index_id, /*!< in: index id of index to open */ - ib_trx_t ib_trx, /*!< in: Current transaction handle - can be NULL */ - ib_crsr_t* ib_crsr) /*!< out: InnoDB cursor */ -{ - ib_err_t err; - dict_table_t* table; - ulint table_id = (ulint)( index_id >> 32); - - if (ib_trx == NULL || !ib_schema_lock_is_exclusive(ib_trx)) { - table = ib_open_table_by_id(table_id, FALSE); - } else { - table = ib_open_table_by_id(table_id, TRUE); - } - - if (table == NULL) { - - return(DB_TABLE_NOT_FOUND); - } - - /* We only return the lower 32 bits of the dulint. */ - err = ib_create_cursor_with_index_id( - ib_crsr, table, index_id, (trx_t*) ib_trx); - - if (ib_crsr != NULL) { - const ib_cursor_t* cursor; - - cursor = *(ib_cursor_t**) ib_crsr; - - if (cursor->prebuilt->index == NULL) { - ib_err_t crsr_err; - - crsr_err = ib_cursor_close(*ib_crsr); - ut_a(crsr_err == DB_SUCCESS); - - *ib_crsr = NULL; - } - } - - return(err); -} - /*****************************************************************//** Open an InnoDB secondary index cursor and return a cursor handle to it. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_open_index_using_name( /*============================*/ @@ -1092,8 +958,7 @@ ib_cursor_open_index_using_name( /*****************************************************************//** Open an InnoDB table and return a cursor handle to it. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_open_table( /*=================*/ @@ -1106,22 +971,22 @@ ib_cursor_open_table( dict_table_t* table; char* normalized_name; - normalized_name = static_cast(mem_alloc(ut_strlen(name) + 1)); + normalized_name = static_cast(ut_malloc_nokey(ut_strlen(name) + + 1)); ib_normalize_table_name(normalized_name, name); if (ib_trx != NULL) { - if (!ib_schema_lock_is_exclusive(ib_trx)) { - table = (dict_table_t*)ib_open_table_by_name( - normalized_name); + if (!ib_schema_lock_is_exclusive(ib_trx)) { + table = ib_open_table_by_name(normalized_name); } else { /* NOTE: We do not acquire MySQL metadata lock */ table = ib_lookup_table_by_name(normalized_name); } } else { - table = (dict_table_t*)ib_open_table_by_name(normalized_name); + table = ib_open_table_by_name(normalized_name); } - mem_free(normalized_name); + ut_free(normalized_name); normalized_name = NULL; /* It can happen that another thread has created the table but @@ -1156,23 +1021,9 @@ ib_qry_proc_free( memset(q_proc, 0x0, sizeof(*q_proc)); } -/*****************************************************************//** -set a cursor trx to NULL */ -UNIV_INTERN -void -ib_cursor_clear_trx( -/*================*/ - ib_crsr_t ib_crsr) /*!< in/out: InnoDB cursor */ -{ - ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr; - - cursor->prebuilt->trx = NULL; -} - /*****************************************************************//** Reset the cursor. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_reset( /*============*/ @@ -1198,7 +1049,7 @@ ib_cursor_reset( /*****************************************************************//** update the cursor with new transactions and also reset the cursor -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_new_trx( /*==============*/ @@ -1217,16 +1068,16 @@ ib_cursor_new_trx( trx_assign_read_view(prebuilt->trx); - ib_qry_proc_free(&cursor->q_proc); + ib_qry_proc_free(&cursor->q_proc); - mem_heap_empty(cursor->query_heap); + mem_heap_empty(cursor->query_heap); return(err); } /*****************************************************************//** Commit the transaction in a cursor -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_commit_trx( /*=================*/ @@ -1247,8 +1098,7 @@ ib_cursor_commit_trx( /*****************************************************************//** Close an InnoDB table and free the cursor. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_close( /*============*/ @@ -1285,8 +1135,7 @@ ib_cursor_close( /*****************************************************************//** Close the table, decrement n_ref_count count. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_close_table( /*==================*/ @@ -1303,7 +1152,7 @@ ib_cursor_close_table( } /**********************************************************************//** Run the insert query and do error handling. -@return DB_SUCCESS or error code */ +@return DB_SUCCESS or error code */ UNIV_INLINE ib_err_t ib_insert_row_with_lock_retry( @@ -1344,7 +1193,7 @@ ib_insert_row_with_lock_retry( /*****************************************************************//** Write a row. -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ static ib_err_t ib_execute_insert_query_graph( @@ -1397,7 +1246,7 @@ ib_insert_query_graph_create( ib_qry_node_t* node = &q_proc->node; trx_t* trx = cursor->prebuilt->trx; - ut_a(trx->state != TRX_STATE_NOT_STARTED); + ut_a(trx_is_started(trx)); if (node->ins == NULL) { dtuple_t* row; @@ -1426,8 +1275,7 @@ ib_insert_query_graph_create( /*****************************************************************//** Insert a row to a table. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_insert_row( /*=================*/ @@ -1507,7 +1355,7 @@ ib_cursor_insert_row( /*********************************************************************//** Gets pointer to a prebuilt update vector used in updates. -@return update vector */ +@return update vector */ UNIV_INLINE upd_t* ib_update_vector_create( @@ -1521,7 +1369,7 @@ ib_update_vector_create( ib_qry_grph_t* grph = &q_proc->grph; ib_qry_node_t* node = &q_proc->node; - ut_a(trx->state != TRX_STATE_NOT_STARTED); + ut_a(trx_is_started(trx)); if (node->upd == NULL) { node->upd = static_cast( @@ -1572,7 +1420,7 @@ ib_update_col( /**********************************************************************//** Checks which fields have changed in a row and stores the new data to an update vector. -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ static ib_err_t ib_calc_diff( @@ -1639,7 +1487,7 @@ ib_calc_diff( /**********************************************************************//** Run the update query and do error handling. -@return DB_SUCCESS or error code */ +@return DB_SUCCESS or error code */ UNIV_INLINE ib_err_t ib_update_row_with_lock_retry( @@ -1687,7 +1535,7 @@ ib_update_row_with_lock_retry( /*********************************************************************//** Does an update or delete of a row. -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ UNIV_INLINE ib_err_t ib_execute_update_query_graph( @@ -1704,7 +1552,7 @@ ib_execute_update_query_graph( ib_qry_proc_t* q_proc = &cursor->q_proc; /* The transaction must be running. */ - ut_a(trx->state != TRX_STATE_NOT_STARTED); + ut_a(trx_is_started(trx)); node = q_proc->node.upd; @@ -1755,8 +1603,7 @@ ib_execute_update_query_graph( /*****************************************************************//** Update a row in a table. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_update_row( /*=================*/ @@ -1773,9 +1620,9 @@ ib_cursor_update_row( const ib_tuple_t*new_tuple = (const ib_tuple_t*) ib_new_tpl; if (dict_index_is_clust(prebuilt->index)) { - pcur = &cursor->prebuilt->pcur; + pcur = cursor->prebuilt->pcur; } else if (prebuilt->need_to_access_clustered) { - pcur = &cursor->prebuilt->clust_pcur; + pcur = cursor->prebuilt->clust_pcur; } else { return(DB_ERROR); } @@ -1801,7 +1648,7 @@ ib_cursor_update_row( /**********************************************************************//** Build the update query graph to delete a row from an index. -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ static ib_err_t ib_delete_row( @@ -1868,8 +1715,7 @@ ib_delete_row( /*****************************************************************//** Delete a row in a table. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_delete_row( /*=================*/ @@ -1886,12 +1732,12 @@ ib_cursor_delete_row( /* Check whether this is a secondary index cursor */ if (index != prebuilt->index) { if (prebuilt->need_to_access_clustered) { - pcur = &prebuilt->clust_pcur; + pcur = prebuilt->clust_pcur; } else { return(DB_ERROR); } } else { - pcur = &prebuilt->pcur; + pcur = prebuilt->pcur; } if (ib_btr_cursor_is_positioned(pcur)) { @@ -1943,8 +1789,7 @@ ib_cursor_delete_row( /*****************************************************************//** Read current row. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_read_row( /*===============*/ @@ -1957,7 +1802,7 @@ ib_cursor_read_row( ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl; ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr; - ut_a(cursor->prebuilt->trx->state != TRX_STATE_NOT_STARTED); + ut_a(trx_is_started(cursor->prebuilt->trx)); /* When searching with IB_EXACT_MATCH set, row_search_for_mysql() will not position the persistent cursor but will copy the record @@ -1971,9 +1816,9 @@ ib_cursor_read_row( if (prebuilt->need_to_access_clustered && tuple->type == TPL_TYPE_ROW) { - pcur = &prebuilt->clust_pcur; + pcur = prebuilt->clust_pcur; } else { - pcur = &prebuilt->pcur; + pcur = prebuilt->pcur; } if (pcur == NULL) { @@ -2015,7 +1860,7 @@ ib_cursor_read_row( /*****************************************************************//** Move cursor to the first record in the table. -@return DB_SUCCESS or err code */ +@return DB_SUCCESS or err code */ UNIV_INLINE ib_err_t ib_cursor_position( @@ -2027,24 +1872,23 @@ ib_cursor_position( row_prebuilt_t* prebuilt = cursor->prebuilt; unsigned char* buf; - buf = static_cast(mem_alloc(UNIV_PAGE_SIZE)); + buf = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); /* We want to position at one of the ends, row_search_for_mysql() uses the search_tuple fields to work out what to do. */ dtuple_set_n_fields(prebuilt->search_tuple, 0); err = static_cast(row_search_for_mysql( - buf, mode, prebuilt, 0, 0)); + buf, static_cast(mode), prebuilt, 0, 0)); - mem_free(buf); + ut_free(buf); return(err); } /*****************************************************************//** Move cursor to the first record in the table. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_first( /*============*/ @@ -2055,24 +1899,9 @@ ib_cursor_first( return(ib_cursor_position(cursor, IB_CUR_G)); } -/*****************************************************************//** -Move cursor to the last record in the table. -@return DB_SUCCESS or err code */ -UNIV_INTERN -ib_err_t -ib_cursor_last( -/*===========*/ - ib_crsr_t ib_crsr) /*!< in: InnoDB cursor instance */ -{ - ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr; - - return(ib_cursor_position(cursor, IB_CUR_L)); -} - /*****************************************************************//** Move cursor to the next user record in the table. @return DB_SUCCESS or err code */ -UNIV_INTERN ib_err_t ib_cursor_next( /*===========*/ @@ -2094,8 +1923,7 @@ ib_cursor_next( /*****************************************************************//** Search for key. -@return DB_SUCCESS or err code */ -UNIV_INTERN +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_moveto( /*=============*/ @@ -2133,19 +1961,19 @@ ib_cursor_moveto( prebuilt->innodb_api_rec = NULL; - buf = static_cast(mem_alloc(UNIV_PAGE_SIZE)); + buf = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); err = static_cast(row_search_for_mysql( - buf, ib_srch_mode, prebuilt, cursor->match_mode, 0)); + buf, static_cast(ib_srch_mode), prebuilt, + cursor->match_mode, 0)); - mem_free(buf); + ut_free(buf); return(err); } /*****************************************************************//** Set the cursor search mode. */ -UNIV_INTERN void ib_cursor_set_match_mode( /*=====================*/ @@ -2159,7 +1987,7 @@ ib_cursor_set_match_mode( /*****************************************************************//** Get the dfield instance for the column in the tuple. -@return dfield instance in tuple */ +@return dfield instance in tuple */ UNIV_INLINE dfield_t* ib_col_get_dfield( @@ -2176,7 +2004,7 @@ ib_col_get_dfield( /*****************************************************************//** Predicate to check whether a column type contains variable length data. -@return DB_SUCCESS or error code */ +@return DB_SUCCESS or error code */ UNIV_INLINE ib_err_t ib_col_is_capped( @@ -2189,14 +2017,14 @@ ib_col_is_capped( || dtype_get_mtype(dtype) == DATA_MYSQL || dtype_get_mtype(dtype) == DATA_VARMYSQL || dtype_get_mtype(dtype) == DATA_FIXBINARY - || dtype_get_mtype(dtype) == DATA_BINARY) + || dtype_get_mtype(dtype) == DATA_BINARY + || dtype_get_mtype(dtype) == DATA_POINT) && dtype_get_len(dtype) > 0)); } /*****************************************************************//** Set a column of the tuple. Make a copy using the tuple's heap. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ ib_err_t ib_col_set_value( /*=============*/ @@ -2287,24 +2115,19 @@ ib_col_set_value( ut_error; break; - case DATA_CHAR: { - ulint pad_char = ULINT_UNDEFINED; - - pad_char = dtype_get_pad_char( - dtype_get_mtype(dtype), dtype_get_prtype(dtype)); - - ut_a(pad_char != ULINT_UNDEFINED); - - memset((byte*) dst + len, - static_cast(pad_char), - static_cast(col_len - len)); - + case DATA_CHAR: memcpy(dst, src, len); + memset((byte*) dst + len, 0x20, col_len - len); + len = col_len; + break; - len = static_cast(col_len); + case DATA_POINT: + memcpy(dst, src, len); break; - } + case DATA_BLOB: + case DATA_VAR_POINT: + case DATA_GEOMETRY: case DATA_BINARY: case DATA_DECIMAL: case DATA_VARCHAR: @@ -2403,8 +2226,7 @@ ib_col_set_value( /*****************************************************************//** Get the size of the data available in a column of the tuple. -@return bytes avail or IB_SQL_NULL */ -UNIV_INTERN +@return bytes avail or IB_SQL_NULL */ ib_ulint_t ib_col_get_len( /*===========*/ @@ -2425,7 +2247,7 @@ ib_col_get_len( /*****************************************************************//** Copy a column value from the tuple. -@return bytes copied or IB_SQL_NULL */ +@return bytes copied or IB_SQL_NULL */ UNIV_INLINE ib_ulint_t ib_col_copy_value_low( @@ -2452,7 +2274,7 @@ ib_col_copy_value_low( switch (dtype_get_mtype(dfield_get_type(dfield))) { case DATA_INT: { ibool usign; - ullint ret; + uintmax_t ret; ut_a(data_len == len); @@ -2521,8 +2343,7 @@ ib_col_copy_value_low( /*****************************************************************//** Copy a column value from the tuple. -@return bytes copied or IB_SQL_NULL */ -UNIV_INTERN +@return bytes copied or IB_SQL_NULL */ ib_ulint_t ib_col_copy_value( /*==============*/ @@ -2536,7 +2357,7 @@ ib_col_copy_value( /*****************************************************************//** Get the InnoDB column attribute from the internal column precise type. -@return precise type in api format */ +@return precise type in api format */ UNIV_INLINE ib_col_attr_t ib_col_get_attr( @@ -2558,8 +2379,7 @@ ib_col_get_attr( /*****************************************************************//** Get a column name from the tuple. -@return name of the column */ -UNIV_INTERN +@return name of the column */ const char* ib_col_get_name( /*============*/ @@ -2579,8 +2399,7 @@ ib_col_get_name( /*****************************************************************//** Get an index field name from the cursor. -@return name of the field */ -UNIV_INTERN +@return name of the field */ const char* ib_get_idx_field_name( /*==================*/ @@ -2604,7 +2423,7 @@ ib_get_idx_field_name( /*****************************************************************//** Get a column type, length and attributes from the tuple. -@return len of column data */ +@return len of column data */ UNIV_INLINE ib_ulint_t ib_col_get_meta_low( @@ -2667,8 +2486,7 @@ ib_tuple_check_int( /*************************************************************//** Read a signed int 8 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i8( /*=============*/ @@ -2689,8 +2507,7 @@ ib_tuple_read_i8( /*************************************************************//** Read an unsigned int 8 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u8( /*=============*/ @@ -2711,8 +2528,7 @@ ib_tuple_read_u8( /*************************************************************//** Read a signed int 16 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i16( /*==============*/ @@ -2733,8 +2549,7 @@ ib_tuple_read_i16( /*************************************************************//** Read an unsigned int 16 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u16( /*==============*/ @@ -2755,8 +2570,7 @@ ib_tuple_read_u16( /*************************************************************//** Read a signed int 32 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i32( /*==============*/ @@ -2777,8 +2591,7 @@ ib_tuple_read_i32( /*************************************************************//** Read an unsigned int 32 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u32( /*==============*/ @@ -2799,8 +2612,7 @@ ib_tuple_read_u32( /*************************************************************//** Read a signed int 64 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i64( /*==============*/ @@ -2821,8 +2633,7 @@ ib_tuple_read_i64( /*************************************************************//** Read an unsigned int 64 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u64( /*==============*/ @@ -2843,8 +2654,7 @@ ib_tuple_read_u64( /*****************************************************************//** Get a column value pointer from the tuple. -@return NULL or pointer to buffer */ -UNIV_INTERN +@return NULL or pointer to buffer */ const void* ib_col_get_value( /*=============*/ @@ -2866,8 +2676,7 @@ ib_col_get_value( /*****************************************************************//** Get a column type, length and attributes from the tuple. -@return len of column data */ -UNIV_INTERN +@return len of column data */ ib_ulint_t ib_col_get_meta( /*============*/ @@ -2880,8 +2689,7 @@ ib_col_get_meta( /*****************************************************************//** "Clear" or reset an InnoDB tuple. We free the heap and recreate the tuple. -@return new tuple, or NULL */ -UNIV_INTERN +@return new tuple, or NULL */ ib_tpl_t ib_tuple_clear( /*============*/ @@ -2909,8 +2717,7 @@ ib_tuple_clear( Create a new cluster key search tuple and copy the contents of the secondary index key tuple columns that refer to the cluster index record to the cluster key. It does a deep copy of the column data. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ ib_err_t ib_tuple_get_cluster_key( /*=====================*/ @@ -2981,65 +2788,9 @@ ib_tuple_get_cluster_key( return(err); } -/*****************************************************************//** -Copy the contents of source tuple to destination tuple. The tuples -must be of the same type and belong to the same table/index. -@return DB_SUCCESS or error code */ -UNIV_INTERN -ib_err_t -ib_tuple_copy( -/*==========*/ - ib_tpl_t ib_dst_tpl, /*!< in: destination tuple */ - const ib_tpl_t ib_src_tpl) /*!< in: source tuple */ -{ - ulint i; - ulint n_fields; - ib_err_t err = DB_SUCCESS; - const ib_tuple_t*src_tuple = (const ib_tuple_t*) ib_src_tpl; - ib_tuple_t* dst_tuple = (ib_tuple_t*) ib_dst_tpl; - - /* Make sure src and dst are not the same. */ - ut_a(src_tuple != dst_tuple); - - /* Make sure they are the same type and refer to the same index. */ - if (src_tuple->type != dst_tuple->type - || src_tuple->index != dst_tuple->index) { - - return(DB_DATA_MISMATCH); - } - - n_fields = dtuple_get_n_fields(src_tuple->ptr); - ut_ad(n_fields == dtuple_get_n_fields(dst_tuple->ptr)); - - /* Do a deep copy of the data fields. */ - for (i = 0; i < n_fields; ++i) { - dfield_t* src_field; - dfield_t* dst_field; - - src_field = dtuple_get_nth_field(src_tuple->ptr, i); - dst_field = dtuple_get_nth_field(dst_tuple->ptr, i); - - if (!dfield_is_null(src_field)) { - UNIV_MEM_ASSERT_RW(src_field->data, src_field->len); - - dst_field->data = mem_heap_dup( - dst_tuple->heap, - src_field->data, - src_field->len); - - dst_field->len = src_field->len; - } else { - dfield_set_null(dst_field); - } - } - - return(err); -} - /*****************************************************************//** Create an InnoDB tuple used for index/table search. -@return own: Tuple for current index */ -UNIV_INTERN +@return own: Tuple for current index */ ib_tpl_t ib_sec_search_tuple_create( /*=======================*/ @@ -3055,8 +2806,7 @@ ib_sec_search_tuple_create( /*****************************************************************//** Create an InnoDB tuple used for index/table search. -@return own: Tuple for current index */ -UNIV_INTERN +@return own: Tuple for current index */ ib_tpl_t ib_sec_read_tuple_create( /*=====================*/ @@ -3072,8 +2822,7 @@ ib_sec_read_tuple_create( /*****************************************************************//** Create an InnoDB tuple used for table key operations. -@return own: Tuple for current table */ -UNIV_INTERN +@return own: Tuple for current table */ ib_tpl_t ib_clust_search_tuple_create( /*=========================*/ @@ -3091,8 +2840,7 @@ ib_clust_search_tuple_create( /*****************************************************************//** Create an InnoDB tuple for table row operations. -@return own: Tuple for current table */ -UNIV_INTERN +@return own: Tuple for current table */ ib_tpl_t ib_clust_read_tuple_create( /*=======================*/ @@ -3110,8 +2858,7 @@ ib_clust_read_tuple_create( /*****************************************************************//** Return the number of user columns in the tuple definition. -@return number of user columns */ -UNIV_INTERN +@return number of user columns */ ib_ulint_t ib_tuple_get_n_user_cols( /*=====================*/ @@ -3130,8 +2877,7 @@ ib_tuple_get_n_user_cols( /*****************************************************************//** Return the number of columns in the tuple definition. -@return number of columns */ -UNIV_INTERN +@return number of columns */ ib_ulint_t ib_tuple_get_n_cols( /*================*/ @@ -3144,7 +2890,6 @@ ib_tuple_get_n_cols( /*****************************************************************//** Destroy an InnoDB tuple. */ -UNIV_INTERN void ib_tuple_delete( /*============*/ @@ -3161,8 +2906,7 @@ ib_tuple_delete( /*****************************************************************//** Get a table id. This function will acquire the dictionary mutex. -@return DB_SUCCESS if found */ -UNIV_INTERN +@return DB_SUCCESS if found */ ib_err_t ib_table_get_id( /*============*/ @@ -3180,63 +2924,9 @@ ib_table_get_id( return(err); } -/*****************************************************************//** -Get an index id. -@return DB_SUCCESS if found */ -UNIV_INTERN -ib_err_t -ib_index_get_id( -/*============*/ - const char* table_name, /*!< in: find index for this table */ - const char* index_name, /*!< in: index to find */ - ib_id_u64_t* index_id) /*!< out: index id if found */ -{ - dict_table_t* table; - char* normalized_name; - ib_err_t err = DB_TABLE_NOT_FOUND; - - *index_id = 0; - - normalized_name = static_cast( - mem_alloc(ut_strlen(table_name) + 1)); - ib_normalize_table_name(normalized_name, table_name); - - table = ib_lookup_table_by_name(normalized_name); - - mem_free(normalized_name); - normalized_name = NULL; - - if (table != NULL) { - dict_index_t* index; - - index = dict_table_get_index_on_name(table, index_name); - - if (index != NULL) { - /* We only support 32 bit table and index ids. Because - we need to pack the table id into the index id. */ - - *index_id = (table->id); - *index_id <<= 32; - *index_id |= (index->id); - - err = DB_SUCCESS; - } - } - - return(err); -} - -#ifdef __WIN__ -#define SRV_PATH_SEPARATOR '\\' -#else -#define SRV_PATH_SEPARATOR '/' -#endif - - /*****************************************************************//** Check if cursor is positioned. -@return IB_TRUE if positioned */ -UNIV_INTERN +@return IB_TRUE if positioned */ ib_bool_t ib_cursor_is_positioned( /*====================*/ @@ -3245,14 +2935,13 @@ ib_cursor_is_positioned( const ib_cursor_t* cursor = (const ib_cursor_t*) ib_crsr; row_prebuilt_t* prebuilt = cursor->prebuilt; - return(ib_btr_cursor_is_positioned(&prebuilt->pcur)); + return(ib_btr_cursor_is_positioned(prebuilt->pcur)); } /*****************************************************************//** Checks if the data dictionary is latched in exclusive mode. -@return TRUE if exclusive latch */ -UNIV_INTERN +@return TRUE if exclusive latch */ ib_bool_t ib_schema_lock_is_exclusive( /*========================*/ @@ -3265,8 +2954,7 @@ ib_schema_lock_is_exclusive( /*****************************************************************//** Checks if the data dictionary is latched in shared mode. -@return TRUE if shared latch */ -UNIV_INTERN +@return TRUE if shared latch */ ib_bool_t ib_schema_lock_is_shared( /*=====================*/ @@ -3279,8 +2967,7 @@ ib_schema_lock_is_shared( /*****************************************************************//** Set the Lock an InnoDB cursor/table. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ ib_err_t ib_cursor_lock( /*===========*/ @@ -3298,8 +2985,7 @@ ib_cursor_lock( /*****************************************************************//** Set the Lock an InnoDB table using the table id. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ ib_err_t ib_table_lock( /*==========*/ @@ -3314,7 +3000,7 @@ ib_table_lock( ib_qry_proc_t q_proc; trx_t* trx = (trx_t*) ib_trx; - ut_a(trx->state != TRX_STATE_NOT_STARTED); + ut_ad(trx_is_started(trx)); table = ib_open_table_by_id(table_id, FALSE); @@ -3348,8 +3034,7 @@ ib_table_lock( /*****************************************************************//** Unlock an InnoDB table. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ ib_err_t ib_cursor_unlock( /*=============*/ @@ -3370,8 +3055,7 @@ ib_cursor_unlock( /*****************************************************************//** Set the Lock mode of the cursor. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ ib_err_t ib_cursor_set_lock_mode( /*====================*/ @@ -3391,8 +3075,8 @@ ib_cursor_set_lock_mode( } if (err == DB_SUCCESS) { - prebuilt->select_lock_type = (enum lock_mode) ib_lck_mode; - ut_a(prebuilt->trx->state != TRX_STATE_NOT_STARTED); + prebuilt->select_lock_type = (lock_mode) ib_lck_mode; + ut_a(trx_is_started(prebuilt->trx)); } return(err); @@ -3400,7 +3084,6 @@ ib_cursor_set_lock_mode( /*****************************************************************//** Set need to access clustered index record. */ -UNIV_INTERN void ib_cursor_set_cluster_access( /*=========================*/ @@ -3412,129 +3095,8 @@ ib_cursor_set_cluster_access( prebuilt->need_to_access_clustered = TRUE; } -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_i8( -/*==============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i8_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_i16( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i16_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_i32( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i32_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_i64( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i64_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_u8( -/*==============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_u8_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_u16( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tupe to write to */ - int col_no, /*!< in: column number */ - ib_u16_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_u32( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_u32_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCCESS or error */ -UNIV_INTERN -ib_err_t -ib_tuple_write_u64( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_u64_t val) /*!< in: value to write */ -{ - return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val), true)); -} - /*****************************************************************//** Inform the cursor that it's the start of an SQL statement. */ -UNIV_INTERN void ib_cursor_stmt_begin( /*=================*/ @@ -3547,8 +3109,7 @@ ib_cursor_stmt_begin( /*****************************************************************//** Write a double value to a column. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_write_double( /*==================*/ @@ -3571,8 +3132,7 @@ ib_tuple_write_double( /*************************************************************//** Read a double column value from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_double( /*=================*/ @@ -3598,8 +3158,7 @@ ib_tuple_read_double( /*****************************************************************//** Write a float value to a column. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_write_float( /*=================*/ @@ -3622,8 +3181,7 @@ ib_tuple_write_float( /*************************************************************//** Read a float value from an InnoDB tuple. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_float( /*================*/ @@ -3651,7 +3209,6 @@ ib_tuple_read_float( Truncate a table. The cursor handle will be closed and set to NULL on success. @return DB_SUCCESS or error code */ -UNIV_INTERN ib_err_t ib_cursor_truncate( /*===============*/ @@ -3704,7 +3261,6 @@ ib_cursor_truncate( /*****************************************************************//** Truncate a table. @return DB_SUCCESS or error code */ -UNIV_INTERN ib_err_t ib_table_truncate( /*==============*/ @@ -3755,8 +3311,7 @@ ib_table_truncate( } if (trunc_err == DB_SUCCESS) { - ut_a(ib_trx_state(ib_trx) == static_cast( - TRX_STATE_NOT_STARTED)); + ut_a(!trx_is_started(static_cast(ib_trx))); } else { err = ib_trx_rollback(ib_trx); ut_a(err == DB_SUCCESS); @@ -3777,36 +3332,19 @@ ib_table_truncate( return(trunc_err); } -/*****************************************************************//** -Frees a possible InnoDB trx object associated with the current THD. -@return 0 or error number */ -UNIV_INTERN -ib_err_t -ib_close_thd( -/*=========*/ - void* thd) /*!< in: handle to the MySQL thread of the user - whose resources should be free'd */ -{ - innobase_close_thd(static_cast(thd)); - - return(DB_SUCCESS); -} - /*****************************************************************//** Return isolation configuration set by "innodb_api_trx_level" @return trx isolation level*/ -UNIV_INTERN -ib_trx_state_t +ib_trx_level_t ib_cfg_trx_level() /*==============*/ { - return(static_cast(ib_trx_level_setting)); + return(static_cast(ib_trx_level_setting)); } /*****************************************************************//** Return configure value for background commit interval (in seconds) @return background commit interval (in seconds) */ -UNIV_INTERN ib_ulint_t ib_cfg_bk_commit_interval() /*=======================*/ @@ -3817,7 +3355,6 @@ ib_cfg_bk_commit_interval() /*****************************************************************//** Get generic configure status @return configure status*/ -UNIV_INTERN int ib_cfg_get_cfg() /*============*/ @@ -3837,11 +3374,22 @@ ib_cfg_get_cfg() return(cfg_status); } +/*****************************************************************//** +Wrapper of ut_strerr() which converts an InnoDB error number to a +human readable text message. +@return string, describing the error */ +const char* +ib_ut_strerr( +/*=========*/ + ib_err_t num) /*!< in: error number */ +{ + return(ut_strerr(num)); +} + /*****************************************************************//** Increase/decrease the memcached sync count of table to sync memcached DML with SQL DDLs. @return DB_SUCCESS or error number */ -UNIV_INTERN ib_err_t ib_cursor_set_memcached_sync( /*=========================*/ @@ -3861,21 +3409,9 @@ ib_cursor_set_memcached_sync( } if (flag) { -#ifdef HAVE_ATOMIC_BUILTINS os_atomic_increment_lint(&table->memcached_sync_count, 1); -#else - dict_mutex_enter_for_mysql(); - ++table->memcached_sync_count; - dict_mutex_exit_for_mysql(); -#endif } else { -#ifdef HAVE_ATOMIC_BUILTINS os_atomic_decrement_lint(&table->memcached_sync_count, 1); -#else - dict_mutex_enter_for_mysql(); - --table->memcached_sync_count; - dict_mutex_exit_for_mysql(); -#endif ut_a(table->memcached_sync_count >= 0); } } else { diff --git a/storage/innobase/api/api0misc.cc b/storage/innobase/api/api0misc.cc index a980d32c33f..c83eaedad6e 100644 --- a/storage/innobase/api/api0misc.cc +++ b/storage/innobase/api/api0misc.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2008, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2008, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,12 +24,7 @@ InnoDB Native API 3/20/2011 Jimmy Yang extracted from Embedded InnoDB *******************************************************/ -#include -#include - -#ifdef HAVE_UNISTD_H -#include -#endif /* HAVE_UNISTD_H */ +#include "ha_prototypes.h" #include "api0misc.h" #include "trx0roll.h" @@ -39,15 +34,10 @@ InnoDB Native API #include "pars0pars.h" #include "row0sel.h" #include "lock0lock.h" -#include "ha_prototypes.h" -#include -#include -#include /*********************************************************************//** Sets a lock on a table. -@return error code or DB_SUCCESS */ -UNIV_INTERN +@return error code or DB_SUCCESS */ dberr_t ib_trx_lock_table_with_retry( /*=========================*/ @@ -123,85 +113,84 @@ run_again: Handles user errors and lock waits detected by the database engine. @return TRUE if it was a lock wait and we should continue running the query thread */ -UNIV_INTERN ibool ib_handle_errors( /*=============*/ - dberr_t* new_err,/*!< out: possible new error encountered in - lock wait, or if no new error, the value - of trx->error_state at the entry of this - function */ - trx_t* trx, /*!< in: transaction */ - que_thr_t* thr, /*!< in: query thread */ - trx_savept_t* savept) /*!< in: savepoint or NULL */ + dberr_t* new_err,/*!< out: possible new error encountered in + lock wait, or if no new error, the value + of trx->error_state at the entry of this + function */ + trx_t* trx, /*!< in: transaction */ + que_thr_t* thr, /*!< in: query thread */ + trx_savept_t* savept) /*!< in: savepoint or NULL */ { - dberr_t err; + dberr_t err; handle_new_error: - err = trx->error_state; + err = trx->error_state; - ut_a(err != DB_SUCCESS); + ut_a(err != DB_SUCCESS); - trx->error_state = DB_SUCCESS; + trx->error_state = DB_SUCCESS; - switch (err) { - case DB_LOCK_WAIT_TIMEOUT: + switch (err) { + case DB_LOCK_WAIT_TIMEOUT: trx_rollback_for_mysql(trx); break; - /* fall through */ - case DB_DUPLICATE_KEY: - case DB_FOREIGN_DUPLICATE_KEY: - case DB_TOO_BIG_RECORD: - case DB_ROW_IS_REFERENCED: - case DB_NO_REFERENCED_ROW: - case DB_CANNOT_ADD_CONSTRAINT: - case DB_TOO_MANY_CONCURRENT_TRXS: - case DB_OUT_OF_FILE_SPACE: - if (savept) { - /* Roll back the latest, possibly incomplete - insertion or update */ + /* fall through */ + case DB_DUPLICATE_KEY: + case DB_FOREIGN_DUPLICATE_KEY: + case DB_TOO_BIG_RECORD: + case DB_ROW_IS_REFERENCED: + case DB_NO_REFERENCED_ROW: + case DB_CANNOT_ADD_CONSTRAINT: + case DB_TOO_MANY_CONCURRENT_TRXS: + case DB_OUT_OF_FILE_SPACE: + if (savept) { + /* Roll back the latest, possibly incomplete + insertion or update */ trx_rollback_to_savepoint(trx, savept); - } - break; - case DB_LOCK_WAIT: + } + break; + case DB_LOCK_WAIT: lock_wait_suspend_thread(thr); - if (trx->error_state != DB_SUCCESS) { - que_thr_stop_for_mysql(thr); + if (trx->error_state != DB_SUCCESS) { + que_thr_stop_for_mysql(thr); - goto handle_new_error; - } + goto handle_new_error; + } - *new_err = err; + *new_err = err; - return(TRUE); /* Operation needs to be retried. */ + return(TRUE); /* Operation needs to be retried. */ - case DB_DEADLOCK: - case DB_LOCK_TABLE_FULL: - /* Roll back the whole transaction; this resolution was added - to version 3.23.43 */ + case DB_DEADLOCK: + case DB_LOCK_TABLE_FULL: + /* Roll back the whole transaction; this resolution was added + to version 3.23.43 */ - trx_rollback_for_mysql(trx); - break; + trx_rollback_for_mysql(trx); + break; - case DB_MUST_GET_MORE_FILE_SPACE: + case DB_MUST_GET_MORE_FILE_SPACE: - exit(1); + ut_error; - case DB_CORRUPTION: + case DB_CORRUPTION: case DB_FOREIGN_EXCEED_MAX_CASCADE: - break; - default: - ut_error; - } + break; + default: + ut_error; + } - if (trx->error_state != DB_SUCCESS) { - *new_err = trx->error_state; - } else { - *new_err = err; - } + if (trx->error_state != DB_SUCCESS) { + *new_err = trx->error_state; + } else { + *new_err = err; + } - trx->error_state = DB_SUCCESS; + trx->error_state = DB_SUCCESS; - return(FALSE); + return(FALSE); } diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index 33ca57c9654..36ec01d7b3d 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2015, MariaDB Corporation +Copyright (c) 2014, 2016, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,14 +26,16 @@ Created 6/2/1994 Heikki Tuuri *******************************************************/ #include "btr0btr.h" +#include "ha_prototypes.h" #ifdef UNIV_NONINL #include "btr0btr.ic" #endif -#include "fsp0fsp.h" +#include "fsp0sysspace.h" #include "page0page.h" #include "page0zip.h" +#include "gis0rtree.h" #ifndef UNIV_HOTBACKUP #include "btr0cur.h" @@ -45,13 +47,15 @@ Created 6/2/1994 Heikki Tuuri #include "ibuf0ibuf.h" #include "trx0trx.h" #include "srv0mon.h" +#include "gis0geo.h" +#include "ut0new.h" +#include "dict0boot.h" /**************************************************************//** Checks if the page in the cursor can be merged with given page. If necessary, re-organize the merge_page. -@return TRUE if possible to merge. */ -UNIV_INTERN -ibool +@return true if possible to merge. */ +bool btr_can_merge_with_page( /*====================*/ btr_cur_t* cursor, /*!< in: cursor on the page to merge */ @@ -63,581 +67,19 @@ btr_can_merge_with_page( /**************************************************************//** Report that an index page is corrupted. */ -UNIV_INTERN void btr_corruption_report( /*==================*/ const buf_block_t* block, /*!< in: corrupted block */ const dict_index_t* index) /*!< in: index tree */ { - fprintf(stderr, "InnoDB: flag mismatch in space %u page %u" - " index %s of table %s\n", - (unsigned) buf_block_get_space(block), - (unsigned) buf_block_get_page_no(block), - index->name, index->table_name); - if (block->page.zip.data) { - buf_page_print(block->page.zip.data, - buf_block_get_zip_size(block), - BUF_PAGE_PRINT_NO_CRASH); - } - buf_page_print(buf_block_get_frame(block), 0, 0); + ib::error() + << "Flag mismatch in page " << block->page.id + << " index " << index->name + << " of table " << index->table->name; } #ifndef UNIV_HOTBACKUP -#ifdef UNIV_BLOB_DEBUG -# include "srv0srv.h" -# include "ut0rbt.h" - -/** TRUE when messages about index->blobs modification are enabled. */ -static ibool btr_blob_dbg_msg; - -/** Issue a message about an operation on index->blobs. -@param op operation -@param b the entry being subjected to the operation -@param ctx the context of the operation */ -#define btr_blob_dbg_msg_issue(op, b, ctx) \ - fprintf(stderr, op " %u:%u:%u->%u %s(%u,%u,%u)\n", \ - (b)->ref_page_no, (b)->ref_heap_no, \ - (b)->ref_field_no, (b)->blob_page_no, ctx, \ - (b)->owner, (b)->always_owner, (b)->del) - -/** Insert to index->blobs a reference to an off-page column. -@param index the index tree -@param b the reference -@param ctx context (for logging) */ -UNIV_INTERN -void -btr_blob_dbg_rbt_insert( -/*====================*/ - dict_index_t* index, /*!< in/out: index tree */ - const btr_blob_dbg_t* b, /*!< in: the reference */ - const char* ctx) /*!< in: context (for logging) */ -{ - if (btr_blob_dbg_msg) { - btr_blob_dbg_msg_issue("insert", b, ctx); - } - mutex_enter(&index->blobs_mutex); - rbt_insert(index->blobs, b, b); - mutex_exit(&index->blobs_mutex); -} - -/** Remove from index->blobs a reference to an off-page column. -@param index the index tree -@param b the reference -@param ctx context (for logging) */ -UNIV_INTERN -void -btr_blob_dbg_rbt_delete( -/*====================*/ - dict_index_t* index, /*!< in/out: index tree */ - const btr_blob_dbg_t* b, /*!< in: the reference */ - const char* ctx) /*!< in: context (for logging) */ -{ - if (btr_blob_dbg_msg) { - btr_blob_dbg_msg_issue("delete", b, ctx); - } - mutex_enter(&index->blobs_mutex); - ut_a(rbt_delete(index->blobs, b)); - mutex_exit(&index->blobs_mutex); -} - -/**************************************************************//** -Comparator for items (btr_blob_dbg_t) in index->blobs. -The key in index->blobs is (ref_page_no, ref_heap_no, ref_field_no). -@return negative, 0 or positive if *a<*b, *a=*b, *a>*b */ -static -int -btr_blob_dbg_cmp( -/*=============*/ - const void* a, /*!< in: first btr_blob_dbg_t to compare */ - const void* b) /*!< in: second btr_blob_dbg_t to compare */ -{ - const btr_blob_dbg_t* aa = static_cast(a); - const btr_blob_dbg_t* bb = static_cast(b); - - ut_ad(aa != NULL); - ut_ad(bb != NULL); - - if (aa->ref_page_no != bb->ref_page_no) { - return(aa->ref_page_no < bb->ref_page_no ? -1 : 1); - } - if (aa->ref_heap_no != bb->ref_heap_no) { - return(aa->ref_heap_no < bb->ref_heap_no ? -1 : 1); - } - if (aa->ref_field_no != bb->ref_field_no) { - return(aa->ref_field_no < bb->ref_field_no ? -1 : 1); - } - return(0); -} - -/**************************************************************//** -Add a reference to an off-page column to the index->blobs map. */ -UNIV_INTERN -void -btr_blob_dbg_add_blob( -/*==================*/ - const rec_t* rec, /*!< in: clustered index record */ - ulint field_no, /*!< in: off-page column number */ - ulint page_no, /*!< in: start page of the column */ - dict_index_t* index, /*!< in/out: index tree */ - const char* ctx) /*!< in: context (for logging) */ -{ - btr_blob_dbg_t b; - const page_t* page = page_align(rec); - - ut_a(index->blobs); - - b.blob_page_no = page_no; - b.ref_page_no = page_get_page_no(page); - b.ref_heap_no = page_rec_get_heap_no(rec); - b.ref_field_no = field_no; - ut_a(b.ref_field_no >= index->n_uniq); - b.always_owner = b.owner = TRUE; - b.del = FALSE; - ut_a(!rec_get_deleted_flag(rec, page_is_comp(page))); - btr_blob_dbg_rbt_insert(index, &b, ctx); -} - -/**************************************************************//** -Add to index->blobs any references to off-page columns from a record. -@return number of references added */ -UNIV_INTERN -ulint -btr_blob_dbg_add_rec( -/*=================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: offsets */ - const char* ctx) /*!< in: context (for logging) */ -{ - ulint count = 0; - ulint i; - btr_blob_dbg_t b; - ibool del; - - ut_ad(rec_offs_validate(rec, index, offsets)); - - if (!rec_offs_any_extern(offsets)) { - return(0); - } - - b.ref_page_no = page_get_page_no(page_align(rec)); - b.ref_heap_no = page_rec_get_heap_no(rec); - del = (rec_get_deleted_flag(rec, rec_offs_comp(offsets)) != 0); - - for (i = 0; i < rec_offs_n_fields(offsets); i++) { - if (rec_offs_nth_extern(offsets, i)) { - ulint len; - const byte* field_ref = rec_get_nth_field( - rec, offsets, i, &len); - - ut_a(len != UNIV_SQL_NULL); - ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); - field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; - - if (!memcmp(field_ref, field_ref_zero, - BTR_EXTERN_FIELD_REF_SIZE)) { - /* the column has not been stored yet */ - continue; - } - - b.ref_field_no = i; - b.blob_page_no = mach_read_from_4( - field_ref + BTR_EXTERN_PAGE_NO); - ut_a(b.ref_field_no >= index->n_uniq); - b.always_owner = b.owner - = !(field_ref[BTR_EXTERN_LEN] - & BTR_EXTERN_OWNER_FLAG); - b.del = del; - - btr_blob_dbg_rbt_insert(index, &b, ctx); - count++; - } - } - - return(count); -} - -/**************************************************************//** -Display the references to off-page columns. -This function is to be called from a debugger, -for example when a breakpoint on ut_dbg_assertion_failed is hit. */ -UNIV_INTERN -void -btr_blob_dbg_print( -/*===============*/ - const dict_index_t* index) /*!< in: index tree */ -{ - const ib_rbt_node_t* node; - - if (!index->blobs) { - return; - } - - /* We intentionally do not acquire index->blobs_mutex here. - This function is to be called from a debugger, and the caller - should make sure that the index->blobs_mutex is held. */ - - for (node = rbt_first(index->blobs); - node != NULL; node = rbt_next(index->blobs, node)) { - const btr_blob_dbg_t* b - = rbt_value(btr_blob_dbg_t, node); - fprintf(stderr, "%u:%u:%u->%u%s%s%s\n", - b->ref_page_no, b->ref_heap_no, b->ref_field_no, - b->blob_page_no, - b->owner ? "" : "(disowned)", - b->always_owner ? "" : "(has disowned)", - b->del ? "(deleted)" : ""); - } -} - -/**************************************************************//** -Remove from index->blobs any references to off-page columns from a record. -@return number of references removed */ -UNIV_INTERN -ulint -btr_blob_dbg_remove_rec( -/*====================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: offsets */ - const char* ctx) /*!< in: context (for logging) */ -{ - ulint i; - ulint count = 0; - btr_blob_dbg_t b; - - ut_ad(rec_offs_validate(rec, index, offsets)); - - if (!rec_offs_any_extern(offsets)) { - return(0); - } - - b.ref_page_no = page_get_page_no(page_align(rec)); - b.ref_heap_no = page_rec_get_heap_no(rec); - - for (i = 0; i < rec_offs_n_fields(offsets); i++) { - if (rec_offs_nth_extern(offsets, i)) { - ulint len; - const byte* field_ref = rec_get_nth_field( - rec, offsets, i, &len); - - ut_a(len != UNIV_SQL_NULL); - ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); - field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; - - b.ref_field_no = i; - b.blob_page_no = mach_read_from_4( - field_ref + BTR_EXTERN_PAGE_NO); - - switch (b.blob_page_no) { - case 0: - /* The column has not been stored yet. - The BLOB pointer must be all zero. - There cannot be a BLOB starting at - page 0, because page 0 is reserved for - the tablespace header. */ - ut_a(!memcmp(field_ref, field_ref_zero, - BTR_EXTERN_FIELD_REF_SIZE)); - /* fall through */ - case FIL_NULL: - /* the column has been freed already */ - continue; - } - - btr_blob_dbg_rbt_delete(index, &b, ctx); - count++; - } - } - - return(count); -} - -/**************************************************************//** -Check that there are no references to off-page columns from or to -the given page. Invoked when freeing or clearing a page. -@return TRUE when no orphan references exist */ -UNIV_INTERN -ibool -btr_blob_dbg_is_empty( -/*==================*/ - dict_index_t* index, /*!< in: index */ - ulint page_no) /*!< in: page number */ -{ - const ib_rbt_node_t* node; - ibool success = TRUE; - - if (!index->blobs) { - return(success); - } - - mutex_enter(&index->blobs_mutex); - - for (node = rbt_first(index->blobs); - node != NULL; node = rbt_next(index->blobs, node)) { - const btr_blob_dbg_t* b - = rbt_value(btr_blob_dbg_t, node); - - if (b->ref_page_no != page_no && b->blob_page_no != page_no) { - continue; - } - - fprintf(stderr, - "InnoDB: orphan BLOB ref%s%s%s %u:%u:%u->%u\n", - b->owner ? "" : "(disowned)", - b->always_owner ? "" : "(has disowned)", - b->del ? "(deleted)" : "", - b->ref_page_no, b->ref_heap_no, b->ref_field_no, - b->blob_page_no); - - if (b->blob_page_no != page_no || b->owner || !b->del) { - success = FALSE; - } - } - - mutex_exit(&index->blobs_mutex); - return(success); -} - -/**************************************************************//** -Count and process all references to off-page columns on a page. -@return number of references processed */ -UNIV_INTERN -ulint -btr_blob_dbg_op( -/*============*/ - const page_t* page, /*!< in: B-tree leaf page */ - const rec_t* rec, /*!< in: record to start from - (NULL to process the whole page) */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx, /*!< in: context (for logging) */ - const btr_blob_dbg_op_f op) /*!< in: operation on records */ -{ - ulint count = 0; - mem_heap_t* heap = NULL; - ulint offsets_[REC_OFFS_NORMAL_SIZE]; - ulint* offsets = offsets_; - rec_offs_init(offsets_); - - ut_a(fil_page_get_type(page) == FIL_PAGE_INDEX); - ut_a(!rec || page_align(rec) == page); - - if (!index->blobs || !page_is_leaf(page) - || !dict_index_is_clust(index)) { - return(0); - } - - if (rec == NULL) { - rec = page_get_infimum_rec(page); - } - - do { - offsets = rec_get_offsets(rec, index, offsets, - ULINT_UNDEFINED, &heap); - count += op(rec, index, offsets, ctx); - rec = page_rec_get_next_const(rec); - } while (!page_rec_is_supremum(rec)); - - if (heap) { - mem_heap_free(heap); - } - - return(count); -} - -/**************************************************************//** -Count and add to index->blobs any references to off-page columns -from records on a page. -@return number of references added */ -UNIV_INTERN -ulint -btr_blob_dbg_add( -/*=============*/ - const page_t* page, /*!< in: rewritten page */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx) /*!< in: context (for logging) */ -{ - btr_blob_dbg_assert_empty(index, page_get_page_no(page)); - - return(btr_blob_dbg_op(page, NULL, index, ctx, btr_blob_dbg_add_rec)); -} - -/**************************************************************//** -Count and remove from index->blobs any references to off-page columns -from records on a page. -Used when reorganizing a page, before copying the records. -@return number of references removed */ -UNIV_INTERN -ulint -btr_blob_dbg_remove( -/*================*/ - const page_t* page, /*!< in: b-tree page */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx) /*!< in: context (for logging) */ -{ - ulint count; - - count = btr_blob_dbg_op(page, NULL, index, ctx, - btr_blob_dbg_remove_rec); - - /* Check that no references exist. */ - btr_blob_dbg_assert_empty(index, page_get_page_no(page)); - - return(count); -} - -/**************************************************************//** -Restore in index->blobs any references to off-page columns -Used when page reorganize fails due to compressed page overflow. */ -UNIV_INTERN -void -btr_blob_dbg_restore( -/*=================*/ - const page_t* npage, /*!< in: page that failed to compress */ - const page_t* page, /*!< in: copy of original page */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx) /*!< in: context (for logging) */ -{ - ulint removed; - ulint added; - - ut_a(page_get_page_no(npage) == page_get_page_no(page)); - ut_a(page_get_space_id(npage) == page_get_space_id(page)); - - removed = btr_blob_dbg_remove(npage, index, ctx); - added = btr_blob_dbg_add(page, index, ctx); - ut_a(added == removed); -} - -/**************************************************************//** -Modify the 'deleted' flag of a record. */ -UNIV_INTERN -void -btr_blob_dbg_set_deleted_flag( -/*==========================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ - ibool del) /*!< in: TRUE=deleted, FALSE=exists */ -{ - const ib_rbt_node_t* node; - btr_blob_dbg_t b; - btr_blob_dbg_t* c; - ulint i; - - ut_ad(rec_offs_validate(rec, index, offsets)); - ut_a(dict_index_is_clust(index)); - ut_a(del == !!del);/* must be FALSE==0 or TRUE==1 */ - - if (!rec_offs_any_extern(offsets) || !index->blobs) { - - return; - } - - b.ref_page_no = page_get_page_no(page_align(rec)); - b.ref_heap_no = page_rec_get_heap_no(rec); - - for (i = 0; i < rec_offs_n_fields(offsets); i++) { - if (rec_offs_nth_extern(offsets, i)) { - ulint len; - const byte* field_ref = rec_get_nth_field( - rec, offsets, i, &len); - - ut_a(len != UNIV_SQL_NULL); - ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); - field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; - - b.ref_field_no = i; - b.blob_page_no = mach_read_from_4( - field_ref + BTR_EXTERN_PAGE_NO); - - switch (b.blob_page_no) { - case 0: - ut_a(memcmp(field_ref, field_ref_zero, - BTR_EXTERN_FIELD_REF_SIZE)); - /* page number 0 is for the - page allocation bitmap */ - case FIL_NULL: - /* the column has been freed already */ - ut_error; - } - - mutex_enter(&index->blobs_mutex); - node = rbt_lookup(index->blobs, &b); - ut_a(node); - - c = rbt_value(btr_blob_dbg_t, node); - /* The flag should be modified. */ - c->del = del; - if (btr_blob_dbg_msg) { - b = *c; - mutex_exit(&index->blobs_mutex); - btr_blob_dbg_msg_issue("del_mk", &b, ""); - } else { - mutex_exit(&index->blobs_mutex); - } - } - } -} - -/**************************************************************//** -Change the ownership of an off-page column. */ -UNIV_INTERN -void -btr_blob_dbg_owner( -/*===============*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ - ulint i, /*!< in: ith field in rec */ - ibool own) /*!< in: TRUE=owned, FALSE=disowned */ -{ - const ib_rbt_node_t* node; - btr_blob_dbg_t b; - const byte* field_ref; - ulint len; - - ut_ad(rec_offs_validate(rec, index, offsets)); - ut_a(rec_offs_nth_extern(offsets, i)); - - field_ref = rec_get_nth_field(rec, offsets, i, &len); - ut_a(len != UNIV_SQL_NULL); - ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); - field_ref += len - BTR_EXTERN_FIELD_REF_SIZE; - - b.ref_page_no = page_get_page_no(page_align(rec)); - b.ref_heap_no = page_rec_get_heap_no(rec); - b.ref_field_no = i; - b.owner = !(field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG); - b.blob_page_no = mach_read_from_4(field_ref + BTR_EXTERN_PAGE_NO); - - ut_a(b.owner == own); - - mutex_enter(&index->blobs_mutex); - node = rbt_lookup(index->blobs, &b); - /* row_ins_clust_index_entry_by_modify() invokes - btr_cur_unmark_extern_fields() also for the newly inserted - references, which are all zero bytes until the columns are stored. - The node lookup must fail if and only if that is the case. */ - ut_a(!memcmp(field_ref, field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE) - == !node); - - if (node) { - btr_blob_dbg_t* c = rbt_value(btr_blob_dbg_t, node); - /* Some code sets ownership from TRUE to TRUE. - We do not allow changing ownership from FALSE to FALSE. */ - ut_a(own || c->owner); - - c->owner = own; - if (!own) { - c->always_owner = FALSE; - } - } - - mutex_exit(&index->blobs_mutex); -} -#endif /* UNIV_BLOB_DEBUG */ - /* Latching strategy of the InnoDB B-tree -------------------------------------- @@ -696,7 +138,7 @@ we allocate pages for the non-leaf levels of the tree. #ifdef UNIV_BTR_DEBUG /**************************************************************//** Checks a file segment header within a B-tree root page. -@return TRUE if valid */ +@return TRUE if valid */ static ibool btr_root_fseg_validate( @@ -715,8 +157,7 @@ btr_root_fseg_validate( /**************************************************************//** Gets the root node of a tree and x- or s-latches it. -@return root page, x- or s-latched */ -static +@return root page, x- or s-latched */ buf_block_t* btr_root_block_get( /*===============*/ @@ -725,16 +166,13 @@ btr_root_block_get( or RW_X_LATCH */ mtr_t* mtr) /*!< in: mtr */ { - ulint space; - ulint zip_size; - ulint root_page_no; - buf_block_t* block; + const ulint space = dict_index_get_space(index); + const page_id_t page_id(space, dict_index_get_page(index)); + const page_size_t page_size(dict_table_page_size(index->table)); - space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); - root_page_no = dict_index_get_page(index); + buf_block_t* block = btr_block_get(page_id, page_size, mode, + index, mtr); - block = btr_block_get(space, zip_size, root_page_no, mode, (dict_index_t*)index, mtr); if (!block) { index->table->is_encrypted = TRUE; @@ -765,17 +203,19 @@ btr_root_block_get( } /**************************************************************//** -Gets the root node of a tree and x-latches it. -@return root page, x-latched */ -UNIV_INTERN +Gets the root node of a tree and sx-latches it for segment access. +@return root page, sx-latched */ page_t* btr_root_get( /*=========*/ const dict_index_t* index, /*!< in: index tree */ mtr_t* mtr) /*!< in: mtr */ { - buf_block_t* root = btr_root_block_get(index, RW_X_LATCH, - mtr); + /* Intended to be used for segment list access. + SX lock doesn't block reading user data by other threads. + And block the segment list access by others.*/ + buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, + mtr); if (root && root->page.encrypted == true) { root = NULL; @@ -788,8 +228,7 @@ btr_root_get( Gets the height of the B-tree (the level of the root, when the leaf level is assumed to be 0). The caller must hold an S or X latch on the index. -@return tree height (level of the root) */ -UNIV_INTERN +@return tree height (level of the root) */ ulint btr_height_get( /*===========*/ @@ -799,22 +238,23 @@ btr_height_get( ulint height=0; buf_block_t* root_block; - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_S_LOCK) - || mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); + ut_ad(srv_read_only_mode + || mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_S_LOCK + | MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); - /* S latches the page */ - root_block = btr_root_block_get(index, RW_S_LATCH, mtr); + /* S latches the page */ + root_block = btr_root_block_get(index, RW_S_LATCH, mtr); if (root_block) { height = btr_page_get_level(buf_block_get_frame(root_block), mtr); /* Release the S latch on the root page. */ - mtr_memo_release(mtr, root_block, MTR_MEMO_PAGE_S_FIX); -#ifdef UNIV_SYNC_DEBUG - sync_thread_reset_level(&root_block->lock); -#endif /* UNIV_SYNC_DEBUG */ + mtr->memo_release(root_block, MTR_MEMO_PAGE_S_FIX); + + ut_d(sync_check_unlock(&root_block->lock)); } return(height); @@ -823,7 +263,7 @@ btr_height_get( /**************************************************************//** Checks a file segment header within a B-tree root page and updates the segment header space id. -@return TRUE if valid */ +@return TRUE if valid */ static bool btr_root_fseg_adjust_on_import( @@ -856,41 +296,34 @@ btr_root_fseg_adjust_on_import( /**************************************************************//** Checks and adjusts the root node of a tree during IMPORT TABLESPACE. @return error code, or DB_SUCCESS */ -UNIV_INTERN dberr_t btr_root_adjust_on_import( /*======================*/ const dict_index_t* index) /*!< in: index tree */ { - dberr_t err; - mtr_t mtr; - page_t* page; - buf_block_t* block; - page_zip_des_t* page_zip; - dict_table_t* table = index->table; - ulint space_id = dict_index_get_space(index); - ulint zip_size = dict_table_zip_size(table); - ulint root_page_no = dict_index_get_page(index); + dberr_t err; + mtr_t mtr; + page_t* page; + buf_block_t* block; + page_zip_des_t* page_zip; + dict_table_t* table = index->table; + const ulint space_id = dict_index_get_space(index); + const page_id_t page_id(space_id, dict_index_get_page(index)); + const page_size_t page_size(dict_table_page_size(table)); + + DBUG_EXECUTE_IF("ib_import_trigger_corruption_3", + return(DB_CORRUPTION);); mtr_start(&mtr); mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); - DBUG_EXECUTE_IF("ib_import_trigger_corruption_3", - return(DB_CORRUPTION);); - - block = btr_block_get( - space_id, zip_size, root_page_no, RW_X_LATCH, (dict_index_t*)index, &mtr); + block = btr_block_get(page_id, page_size, RW_X_LATCH, index, &mtr); page = buf_block_get_frame(block); page_zip = buf_block_get_page_zip(block); - /* Check that this is a B-tree page and both the PREV and NEXT - pointers are FIL_NULL, because the root page does not have any - siblings. */ - if (fil_page_get_type(page) != FIL_PAGE_INDEX - || fil_page_get_prev(page) != FIL_NULL - || fil_page_get_next(page) != FIL_NULL) { + if (!page_is_root(page)) { err = DB_CORRUPTION; @@ -903,18 +336,13 @@ btr_root_adjust_on_import( if (page_is_compact_format != dict_table_is_comp(table)) { err = DB_CORRUPTION; } else { - /* Check that the table flags and the tablespace flags match. */ - ulint flags = fil_space_get_flags(table->space); - - if (flags - && flags != dict_tf_to_fsp_flags(table->flags)) { - - err = DB_CORRUPTION; - } else { - err = DB_SUCCESS; - } + ulint flags = dict_tf_to_fsp_flags(table->flags, + false); + ulint fsp_flags = fil_space_get_flags(table->space); + err = fsp_flags_are_equal(flags, fsp_flags) + ? DB_SUCCESS : DB_CORRUPTION; } } else { err = DB_SUCCESS; @@ -937,124 +365,9 @@ btr_root_adjust_on_import( return(err); } -/*************************************************************//** -Gets pointer to the previous user record in the tree. It is assumed that -the caller has appropriate latches on the page and its neighbor. -@return previous user record, NULL if there is none */ -UNIV_INTERN -rec_t* -btr_get_prev_user_rec( -/*==================*/ - rec_t* rec, /*!< in: record on leaf level */ - mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if - needed, also to the previous page */ -{ - page_t* page; - page_t* prev_page; - ulint prev_page_no; - - if (!page_rec_is_infimum(rec)) { - - rec_t* prev_rec = page_rec_get_prev(rec); - - if (!page_rec_is_infimum(prev_rec)) { - - return(prev_rec); - } - } - - page = page_align(rec); - prev_page_no = btr_page_get_prev(page, mtr); - - if (prev_page_no != FIL_NULL) { - - ulint space; - ulint zip_size; - buf_block_t* prev_block; - - space = page_get_space_id(page); - zip_size = fil_space_get_zip_size(space); - - prev_block = buf_page_get_with_no_latch(space, zip_size, - prev_page_no, mtr); - prev_page = buf_block_get_frame(prev_block); - /* The caller must already have a latch to the brother */ - ut_ad(mtr_memo_contains(mtr, prev_block, - MTR_MEMO_PAGE_S_FIX) - || mtr_memo_contains(mtr, prev_block, - MTR_MEMO_PAGE_X_FIX)); -#ifdef UNIV_BTR_DEBUG - ut_a(page_is_comp(prev_page) == page_is_comp(page)); - ut_a(btr_page_get_next(prev_page, mtr) - == page_get_page_no(page)); -#endif /* UNIV_BTR_DEBUG */ - - return(page_rec_get_prev(page_get_supremum_rec(prev_page))); - } - - return(NULL); -} - -/*************************************************************//** -Gets pointer to the next user record in the tree. It is assumed that the -caller has appropriate latches on the page and its neighbor. -@return next user record, NULL if there is none */ -UNIV_INTERN -rec_t* -btr_get_next_user_rec( -/*==================*/ - rec_t* rec, /*!< in: record on leaf level */ - mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if - needed, also to the next page */ -{ - page_t* page; - page_t* next_page; - ulint next_page_no; - - if (!page_rec_is_supremum(rec)) { - - rec_t* next_rec = page_rec_get_next(rec); - - if (!page_rec_is_supremum(next_rec)) { - - return(next_rec); - } - } - - page = page_align(rec); - next_page_no = btr_page_get_next(page, mtr); - - if (next_page_no != FIL_NULL) { - ulint space; - ulint zip_size; - buf_block_t* next_block; - - space = page_get_space_id(page); - zip_size = fil_space_get_zip_size(space); - - next_block = buf_page_get_with_no_latch(space, zip_size, - next_page_no, mtr); - next_page = buf_block_get_frame(next_block); - /* The caller must already have a latch to the brother */ - ut_ad(mtr_memo_contains(mtr, next_block, MTR_MEMO_PAGE_S_FIX) - || mtr_memo_contains(mtr, next_block, - MTR_MEMO_PAGE_X_FIX)); -#ifdef UNIV_BTR_DEBUG - ut_a(page_is_comp(next_page) == page_is_comp(page)); - ut_a(btr_page_get_prev(next_page, mtr) - == page_get_page_no(page)); -#endif /* UNIV_BTR_DEBUG */ - - return(page_rec_get_next(page_get_infimum_rec(next_page))); - } - - return(NULL); -} - /**************************************************************//** Creates a new index page (not the root, and also not used in page reorganization). @see btr_page_empty(). */ -static void btr_page_create( /*============*/ @@ -1066,18 +379,21 @@ btr_page_create( { page_t* page = buf_block_get_frame(block); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); - btr_blob_dbg_assert_empty(index, buf_block_get_page_no(block)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); if (page_zip) { - page_create_zip(block, index, level, 0, mtr); + page_create_zip(block, index, level, 0, NULL, mtr); } else { - page_create(block, mtr, dict_table_is_comp(index->table)); + page_create(block, mtr, dict_table_is_comp(index->table), + dict_index_is_spatial(index)); /* Set the level of the new index page */ btr_page_set_level(page, NULL, level, mtr); } - block->check_index_page_at_flush = TRUE; + /* For Spatial Index, initialize the Split Sequence Number */ + if (dict_index_is_spatial(index)) { + page_set_ssn_id(block, page_zip, 0, mtr); + } btr_page_set_index_id(page, page_zip, index->id, mtr); } @@ -1085,7 +401,7 @@ btr_page_create( /**************************************************************//** Allocates a new file page to be used in an ibuf tree. Takes the page from the free list of the tree, which must contain pages! -@return new allocated block, x-latched */ +@return new allocated block, x-latched */ static buf_block_t* btr_page_alloc_for_ibuf( @@ -1104,9 +420,11 @@ btr_page_alloc_for_ibuf( + PAGE_BTR_IBUF_FREE_LIST, mtr); ut_a(node_addr.page != FIL_NULL); - new_block = buf_page_get(dict_index_get_space(index), - dict_table_zip_size(index->table), - node_addr.page, RW_X_LATCH, mtr); + new_block = buf_page_get( + page_id_t(dict_index_get_space(index), node_addr.page), + dict_table_page_size(index->table), + RW_X_LATCH, mtr); + new_page = buf_block_get_frame(new_block); buf_block_dbg_add_level(new_block, SYNC_IBUF_TREE_NODE_NEW); @@ -1190,7 +508,6 @@ that the caller has made the reservation for free extents! @retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) @retval block (not allocated or initialized) otherwise */ -UNIV_INTERN buf_block_t* btr_page_alloc( /*===========*/ @@ -1225,8 +542,7 @@ btr_page_alloc( /**************************************************************//** Gets the number of pages in a B-tree. -@return number of pages, or ULINT_UNDEFINED if the index is unavailable */ -UNIV_INTERN +@return number of pages, or ULINT_UNDEFINED if the index is unavailable */ ulint btr_get_size( /*=========*/ @@ -1235,16 +551,46 @@ btr_get_size( mtr_t* mtr) /*!< in/out: mini-transaction where index is s-latched */ { - ulint used; - if (flag == BTR_N_LEAF_PAGES) { - btr_get_size_and_reserved(index, flag, &used, mtr); - return used; - } else if (flag == BTR_TOTAL_SIZE) { - return btr_get_size_and_reserved(index, flag, &used, mtr); + fseg_header_t* seg_header; + page_t* root; + ulint n=0; + ulint dummy; + + ut_ad(srv_read_only_mode + || mtr_memo_contains(mtr, dict_index_get_lock(index), + MTR_MEMO_S_LOCK) + || dict_table_is_intrinsic(index->table)); + + if (index->page == FIL_NULL + || dict_index_is_online_ddl(index) + || !index->is_committed()) { + return(ULINT_UNDEFINED); + } + + root = btr_root_get(index, mtr); + + if (root) { + if (flag == BTR_N_LEAF_PAGES) { + seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; + + fseg_n_reserved_pages(seg_header, &n, mtr); + + } else if (flag == BTR_TOTAL_SIZE) { + seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP; + + n = fseg_n_reserved_pages(seg_header, &dummy, mtr); + + seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; + + n += fseg_n_reserved_pages(seg_header, &dummy, mtr); + } else { + ut_error; + } } else { - ut_error; + n = ULINT_UNDEFINED; } - return (ULINT_UNDEFINED); + + return(n); } /**************************************************************//** @@ -1271,8 +617,9 @@ btr_get_size_and_reserved( ut_a(flag == BTR_N_LEAF_PAGES || flag == BTR_TOTAL_SIZE); - if (index->page == FIL_NULL || dict_index_is_online_ddl(index) - || *index->name == TEMP_INDEX_PREFIX) { + if (index->page == FIL_NULL + || dict_index_is_online_ddl(index) + || !index->is_committed()) { return(ULINT_UNDEFINED); } @@ -1310,7 +657,7 @@ btr_page_free_for_ibuf( { page_t* root; - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); root = btr_root_get(index, mtr); flst_add_first(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, @@ -1323,27 +670,24 @@ btr_page_free_for_ibuf( /**************************************************************//** Frees a file page used in an index tree. Can be used also to (BLOB) -external storage pages, because the page level 0 can be given as an -argument. */ -UNIV_INTERN +external storage pages. */ void btr_page_free_low( /*==============*/ dict_index_t* index, /*!< in: index tree */ buf_block_t* block, /*!< in: block to be freed, x-latched */ - ulint level, /*!< in: page level */ + ulint level, /*!< in: page level (ULINT_UNDEFINED=BLOB) */ bool blob, /*!< in: blob page */ mtr_t* mtr) /*!< in: mtr */ { fseg_header_t* seg_header; page_t* root; - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); /* The page gets invalid for optimistic searches: increment the frame modify clock */ buf_block_modify_clock_inc(block); - btr_blob_dbg_assert_empty(index, buf_block_get_page_no(block)); if (blob) { ut_a(level == 0); @@ -1424,12 +768,19 @@ btr_page_free_low( root = btr_root_get(index, mtr); - if (level == 0) { + if (level == 0 || level == ULINT_UNDEFINED) { seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; } else { seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP; } +#ifdef UNIV_GIS_DEBUG + if (dict_index_is_spatial(index)) { + fprintf(stderr, "GIS_DIAG: Freed %ld\n", + (long) block->page.id.page_no()); + } +#endif + if (scrub) { /** * Reset page type so that scrub thread won't try to scrub it @@ -1439,13 +790,14 @@ btr_page_free_low( } fseg_free_page(seg_header, - buf_block_get_space(block), - buf_block_get_page_no(block), mtr); + block->page.id.space(), + block->page.id.page_no(), + level != ULINT_UNDEFINED, mtr); /* The page was marked free in the allocation bitmap, but it should remain buffer-fixed until mtr_commit(mtr) or until it is explicitly freed from the mini-transaction. */ - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); /* TODO: Discard any operations on the page from the redo log and remove the block from the flush list and the buffer pool. This would free up buffer pool earlier and reduce writes to @@ -1455,7 +807,6 @@ btr_page_free_low( /**************************************************************//** Frees a file page used in an index tree. NOTE: cannot free field external storage pages because the page must contain info on its level. */ -UNIV_INTERN void btr_page_free( /*==========*/ @@ -1466,7 +817,8 @@ btr_page_free( const page_t* page = buf_block_get_frame(block); ulint level = btr_page_get_level(page, mtr); - ut_ad(fil_page_get_type(block->frame) == FIL_PAGE_INDEX); + ut_ad(fil_page_index_page_check(block->frame)); + ut_ad(level != ULINT_UNDEFINED); btr_page_free_low(index, block, level, false, mtr); } @@ -1506,8 +858,8 @@ btr_node_ptr_set_child_page_no( } /************************************************************//** -Returns the child page of a node pointer and x-latches it. -@return child page, x-latched */ +Returns the child page of a node pointer and sx-latches it. +@return child page, sx-latched */ static buf_block_t* btr_node_ptr_get_child( @@ -1517,21 +869,20 @@ btr_node_ptr_get_child( const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ mtr_t* mtr) /*!< in: mtr */ { - ulint page_no; - ulint space; - ut_ad(rec_offs_validate(node_ptr, index, offsets)); - space = page_get_space_id(page_align(node_ptr)); - page_no = btr_node_ptr_get_child_page_no(node_ptr, offsets); - return(btr_block_get(space, dict_table_zip_size(index->table), - page_no, RW_X_LATCH, index, mtr)); + const page_id_t page_id( + page_get_space_id(page_align(node_ptr)), + btr_node_ptr_get_child_page_no(node_ptr, offsets)); + + return(btr_block_get(page_id, dict_table_page_size(index->table), + RW_SX_LATCH, index, mtr)); } /************************************************************//** Returns the upper level node pointer to a page. It is assumed that mtr holds -an x-latch on the tree. -@return rec_get_offsets() of the node pointer record */ +an sx-latch on the tree. +@return rec_get_offsets() of the node pointer record */ static ulint* btr_page_get_father_node_ptr_func( @@ -1541,6 +892,8 @@ btr_page_get_father_node_ptr_func( btr_cur_t* cursor, /*!< in: cursor pointing to user record, out: cursor on node pointer record, its page x-latched */ + ulint latch_mode,/*!< in: BTR_CONT_MODIFY_TREE + or BTR_CONT_SEARCH_TREE */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in: mtr */ @@ -1552,11 +905,17 @@ btr_page_get_father_node_ptr_func( ulint page_no; dict_index_t* index; - page_no = buf_block_get_page_no(btr_cur_get_block(cursor)); + ut_ad(latch_mode == BTR_CONT_MODIFY_TREE + || latch_mode == BTR_CONT_SEARCH_TREE); + + page_no = btr_cur_get_block(cursor)->page.id.page_no(); index = btr_cur_get_index(cursor); - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); + ut_ad(srv_read_only_mode + || mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); ut_ad(dict_index_get_page(index) != page_no); @@ -1566,9 +925,38 @@ btr_page_get_father_node_ptr_func( ut_a(page_rec_is_user_rec(user_rec)); tuple = dict_index_build_node_ptr(index, user_rec, 0, heap, level); - btr_cur_search_to_nth_level(index, level + 1, tuple, PAGE_CUR_LE, - BTR_CONT_MODIFY_TREE, cursor, 0, - file, line, mtr); + if (!dict_index_is_spatial(index)) { + dberr_t err = DB_SUCCESS; + if (dict_table_is_intrinsic(index->table)) { + err = btr_cur_search_to_nth_level_with_no_latch( + index, level + 1, tuple, PAGE_CUR_LE, cursor, + file, line, mtr); + } else { + err = btr_cur_search_to_nth_level( + index, level + 1, tuple, + PAGE_CUR_LE, latch_mode, cursor, 0, + file, line, mtr); + } + + if (err != DB_SUCCESS) { + ib::warn() << " Error code: " << err + << " btr_page_get_father_node_ptr_func " + << " level: " << level + 1 + << " called from file: " + << file << " line: " << line + << " table: " << index->table->name + << " index: " << index->name; + } + } else { + /* For R-tree, only latch mode from caller would be + BTR_CONT_MODIFY_TREE */ + ut_ad(latch_mode == BTR_CONT_MODIFY_TREE); + + /* Try to avoid traverse from the root, and get the + father node from parent_path vector */ + rtr_get_father_node(index, level + 1, tuple, + NULL, cursor, page_no, mtr); + } node_ptr = btr_cur_get_rec(cursor); ut_ad(!page_rec_is_comp(node_ptr) @@ -1578,22 +966,15 @@ btr_page_get_father_node_ptr_func( if (btr_node_ptr_get_child_page_no(node_ptr, offsets) != page_no) { rec_t* print_rec; - fputs("InnoDB: Dump of the child page:\n", stderr); - buf_page_print(page_align(user_rec), 0, - BUF_PAGE_PRINT_NO_CRASH); - fputs("InnoDB: Dump of the parent page:\n", stderr); - buf_page_print(page_align(node_ptr), 0, - BUF_PAGE_PRINT_NO_CRASH); - - fputs("InnoDB: Corruption of an index tree: table ", stderr); - ut_print_name(stderr, NULL, TRUE, index->table_name); - fputs(", index ", stderr); - ut_print_name(stderr, NULL, FALSE, index->name); - fprintf(stderr, ",\n" - "InnoDB: father ptr page no %lu, child page no %lu\n", - (ulong) - btr_node_ptr_get_child_page_no(node_ptr, offsets), - (ulong) page_no); + + ib::error() + << "Corruption of an index tree: table " + << index->table->name + << " index " << index->name + << ", father ptr page no " + << btr_node_ptr_get_child_page_no(node_ptr, offsets) + << ", child page no " << page_no; + print_rec = page_rec_get_next( page_get_infimum_rec(page_align(user_rec))); offsets = rec_get_offsets(print_rec, index, @@ -1603,27 +984,28 @@ btr_page_get_father_node_ptr_func( ULINT_UNDEFINED, &heap); page_rec_print(node_ptr, offsets); - fputs("InnoDB: You should dump + drop + reimport the table" - " to fix the\n" - "InnoDB: corruption. If the crash happens at " - "the database startup, see\n" - "InnoDB: " REFMAN "forcing-innodb-recovery.html about\n" - "InnoDB: forcing recovery. " - "Then dump + drop + reimport.\n", stderr); - - ut_error; + ib::fatal() + << "You should dump + drop + reimport the table to" + << " fix the corruption. If the crash happens at" + << " database startup. " << FORCE_RECOVERY_MSG + << " Then dump + drop + reimport."; } return(offsets); } #define btr_page_get_father_node_ptr(of,heap,cur,mtr) \ - btr_page_get_father_node_ptr_func(of,heap,cur,__FILE__,__LINE__,mtr) + btr_page_get_father_node_ptr_func( \ + of,heap,cur,BTR_CONT_MODIFY_TREE,__FILE__,__LINE__,mtr) + +#define btr_page_get_father_node_ptr_for_validate(of,heap,cur,mtr) \ + btr_page_get_father_node_ptr_func( \ + of,heap,cur,BTR_CONT_SEARCH_TREE,__FILE__,__LINE__,mtr) /************************************************************//** Returns the upper level node pointer to a page. It is assumed that mtr holds an x-latch on the tree. -@return rec_get_offsets() of the node pointer record */ +@return rec_get_offsets() of the node pointer record */ static ulint* btr_page_get_father_block( @@ -1667,26 +1049,123 @@ btr_page_get_father( mem_heap_free(heap); } -/************************************************************//** -Creates the root node for a new index tree. -@return page number of the created root, FIL_NULL if did not succeed */ -UNIV_INTERN +/** Free a B-tree root page. btr_free_but_not_root() must already +have been called. +In a persistent tablespace, the caller must invoke fsp_init_file_page() +before mtr.commit(). +@param[in,out] block index root page +@param[in,out] mtr mini-transaction */ +static +void +btr_free_root( + buf_block_t* block, + mtr_t* mtr) +{ + fseg_header_t* header; + + ut_ad(mtr_memo_contains_flagged(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr->is_named_space(block->page.id.space())); + + btr_search_drop_page_hash_index(block); + + header = buf_block_get_frame(block) + PAGE_HEADER + PAGE_BTR_SEG_TOP; +#ifdef UNIV_BTR_DEBUG + ut_a(btr_root_fseg_validate(header, block->page.id.space())); +#endif /* UNIV_BTR_DEBUG */ + + while (!fseg_free_step(header, true, mtr)) { + /* Free the entire segment in small steps. */ + } +} + +/** PAGE_INDEX_ID value for freed index B-trees */ +static const index_id_t BTR_FREED_INDEX_ID = 0; + +/** Invalidate an index root page so that btr_free_root_check() +will not find it. +@param[in,out] block index root page +@param[in,out] mtr mini-transaction */ +static +void +btr_free_root_invalidate( + buf_block_t* block, + mtr_t* mtr) +{ + ut_ad(page_is_root(block->frame)); + + btr_page_set_index_id( + buf_block_get_frame(block), + buf_block_get_page_zip(block), + BTR_FREED_INDEX_ID, mtr); +} + +/** Prepare to free a B-tree. +@param[in] page_id page id +@param[in] page_size page size +@param[in] index_id PAGE_INDEX_ID contents +@param[in,out] mtr mini-transaction +@return root block, to invoke btr_free_but_not_root() and btr_free_root() +@retval NULL if the page is no longer a matching B-tree page */ +static __attribute__((warn_unused_result)) +buf_block_t* +btr_free_root_check( + const page_id_t& page_id, + const page_size_t& page_size, + index_id_t index_id, + mtr_t* mtr) +{ + ut_ad(page_id.space() != srv_tmp_space.space_id()); + ut_ad(index_id != BTR_FREED_INDEX_ID); + + buf_block_t* block = buf_page_get( + page_id, page_size, RW_X_LATCH, mtr); + + if (block) { + buf_block_dbg_add_level(block, SYNC_TREE_NODE); + + if (fil_page_index_page_check(block->frame) + && index_id == btr_page_get_index_id(block->frame)) { + /* This should be a root page. + It should not be possible to reassign the same + index_id for some other index in the tablespace. */ + ut_ad(page_is_root(block->frame)); + } else { + block = NULL; + } + } + + return(block); +} + +/** Create the root node for a new index tree. +@param[in] type type of the index +@param[in] space space where created +@param[in] page_size page size +@param[in] index_id index id +@param[in] index index, or NULL when applying TRUNCATE +log record during recovery +@param[in] btr_redo_create_info used for applying TRUNCATE log +@param[in] mtr mini-transaction handle +record during recovery +@return page number of the created root, FIL_NULL if did not succeed */ ulint btr_create( -/*=======*/ - ulint type, /*!< in: type of the index */ - ulint space, /*!< in: space where created */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - index_id_t index_id,/*!< in: index id */ - dict_index_t* index, /*!< in: index */ - mtr_t* mtr) /*!< in: mini-transaction handle */ + ulint type, + ulint space, + const page_size_t& page_size, + index_id_t index_id, + dict_index_t* index, + const btr_create_t* btr_redo_create_info, + mtr_t* mtr) { - ulint page_no; - buf_block_t* block; - buf_frame_t* frame; - page_t* page; - page_zip_des_t* page_zip; + ulint page_no; + buf_block_t* block; + buf_frame_t* frame; + page_t* page; + page_zip_des_t* page_zip; + + ut_ad(mtr->is_named_space(space)); + ut_ad(index_id != BTR_FREED_INDEX_ID); /* Create the two new segments (one, in the case of an ibuf tree) for the index tree; the segment headers are put on the allocated root page @@ -1699,10 +1178,14 @@ btr_create( space, 0, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); + if (ibuf_hdr_block == NULL) { + return(FIL_NULL); + } + buf_block_dbg_add_level( ibuf_hdr_block, SYNC_IBUF_TREE_NODE_NEW); - ut_ad(buf_block_get_page_no(ibuf_hdr_block) + ut_ad(ibuf_hdr_block->page.id.page_no() == IBUF_HEADER_PAGE_NO); /* Allocate then the next page to the segment: it will be the tree root page */ @@ -1712,16 +1195,8 @@ btr_create( + IBUF_HEADER + IBUF_TREE_SEG_HEADER, IBUF_TREE_ROOT_PAGE_NO, FSP_UP, mtr); - ut_ad(buf_block_get_page_no(block) == IBUF_TREE_ROOT_PAGE_NO); + ut_ad(block->page.id.page_no() == IBUF_TREE_ROOT_PAGE_NO); } else { -#ifdef UNIV_BLOB_DEBUG - if ((type & DICT_CLUSTERED) && !index->blobs) { - mutex_create(PFS_NOT_INSTRUMENTED, - &index->blobs_mutex, SYNC_ANY_LATCH); - index->blobs = rbt_create(sizeof(btr_blob_dbg_t), - btr_blob_dbg_cmp); - } -#endif /* UNIV_BLOB_DEBUG */ block = fseg_create(space, 0, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); } @@ -1731,7 +1206,7 @@ btr_create( return(FIL_NULL); } - page_no = buf_block_get_page_no(block); + page_no = block->page.id.page_no(); frame = buf_block_get_frame(block); if (type & DICT_IBUF) { @@ -1750,7 +1225,10 @@ btr_create( PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) { /* Not enough space for new segment, free root segment before return. */ - btr_free_root(space, zip_size, page_no, mtr); + btr_free_root(block, mtr); + if (!dict_table_is_temporary(index->table)) { + btr_free_root_invalidate(block, mtr); + } return(FIL_NULL); } @@ -1764,16 +1242,48 @@ btr_create( page_zip = buf_block_get_page_zip(block); if (page_zip) { - page = page_create_zip(block, index, 0, 0, mtr); + if (index != NULL) { + page = page_create_zip(block, index, 0, 0, NULL, mtr); + } else { + /* Create a compressed index page when applying + TRUNCATE log record during recovery */ + ut_ad(btr_redo_create_info != NULL); + + redo_page_compress_t page_comp_info; + + page_comp_info.type = type; + + page_comp_info.index_id = index_id; + + page_comp_info.n_fields = + btr_redo_create_info->n_fields; + + page_comp_info.field_len = + btr_redo_create_info->field_len; + + page_comp_info.fields = btr_redo_create_info->fields; + + page_comp_info.trx_id_pos = + btr_redo_create_info->trx_id_pos; + + page = page_create_zip(block, NULL, 0, 0, + &page_comp_info, mtr); + } } else { - page = page_create(block, mtr, - dict_table_is_comp(index->table)); + if (index != NULL) { + page = page_create(block, mtr, + dict_table_is_comp(index->table), + dict_index_is_spatial(index)); + } else { + ut_ad(btr_redo_create_info != NULL); + page = page_create( + block, mtr, btr_redo_create_info->format_flags, + type == DICT_SPATIAL); + } /* Set the level of the new index page */ btr_page_set_level(page, NULL, 0, mtr); } - block->check_index_page_at_flush = TRUE; - /* Set the index id of the page */ btr_page_set_index_id(page, page_zip, index_id, mtr); @@ -1783,9 +1293,16 @@ btr_create( /* We reset the free bits for the page to allow creation of several trees in the same mtr, otherwise the latch on a bitmap page would - prevent it because of the latching order */ + prevent it because of the latching order. + + index will be NULL if we are recreating the table during recovery + on behalf of TRUNCATE. + + Note: Insert Buffering is disabled for temporary tables given that + most temporary tables are smaller in size and short-lived. */ + if (!(type & DICT_CLUSTERED) + && (index == NULL || !dict_table_is_temporary(index->table))) { - if (!(type & DICT_CLUSTERED)) { ibuf_reset_free_bits(block); } @@ -1798,39 +1315,39 @@ btr_create( return(page_no); } -/************************************************************//** -Frees a B-tree except the root page, which MUST be freed after this -by calling btr_free_root. */ -UNIV_INTERN +/** Free a B-tree except the root page. The root page MUST be freed after +this by calling btr_free_root. +@param[in,out] block root page +@param[in] log_mode mtr logging mode */ +static void btr_free_but_not_root( -/*==================*/ - ulint space, /*!< in: space where created */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint root_page_no) /*!< in: root page number */ + buf_block_t* block, + mtr_log_t log_mode) { ibool finished; - page_t* root; mtr_t mtr; + ut_ad(page_is_root(block->frame)); leaf_loop: mtr_start(&mtr); + mtr_set_log_mode(&mtr, log_mode); + mtr.set_named_space(block->page.id.space()); + + page_t* root = block->frame; - root = btr_page_get(space, zip_size, root_page_no, RW_X_LATCH, - NULL, &mtr); #ifdef UNIV_BTR_DEBUG ut_a(btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF - + root, space)); + + root, block->page.id.space())); ut_a(btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP - + root, space)); + + root, block->page.id.space())); #endif /* UNIV_BTR_DEBUG */ /* NOTE: page hash indexes are dropped when a page is freed inside fsp0fsp. */ finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF, - &mtr); + true, &mtr); mtr_commit(&mtr); if (!finished) { @@ -1839,16 +1356,18 @@ leaf_loop: } top_loop: mtr_start(&mtr); + mtr_set_log_mode(&mtr, log_mode); + mtr.set_named_space(block->page.id.space()); + + root = block->frame; - root = btr_page_get(space, zip_size, root_page_no, RW_X_LATCH, - NULL, &mtr); #ifdef UNIV_BTR_DEBUG ut_a(btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP - + root, space)); + + root, block->page.id.space())); #endif /* UNIV_BTR_DEBUG */ finished = fseg_free_step_not_header( - root + PAGE_HEADER + PAGE_BTR_SEG_TOP, &mtr); + root + PAGE_HEADER + PAGE_BTR_SEG_TOP, true, &mtr); mtr_commit(&mtr); if (!finished) { @@ -1857,34 +1376,51 @@ top_loop: } } -/************************************************************//** -Frees the B-tree root page. Other tree MUST already have been freed. */ -UNIV_INTERN +/** Free a persistent index tree if it exists. +@param[in] page_id root page id +@param[in] page_size page size +@param[in] index_id PAGE_INDEX_ID contents +@param[in,out] mtr mini-transaction */ void -btr_free_root( -/*==========*/ - ulint space, /*!< in: space where created */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint root_page_no, /*!< in: root page number */ - mtr_t* mtr) /*!< in/out: mini-transaction */ +btr_free_if_exists( + const page_id_t& page_id, + const page_size_t& page_size, + index_id_t index_id, + mtr_t* mtr) { - buf_block_t* block; - fseg_header_t* header; + buf_block_t* root = btr_free_root_check( + page_id, page_size, index_id, mtr); - block = btr_block_get(space, zip_size, root_page_no, RW_X_LATCH, - NULL, mtr); + if (root == NULL) { + return; + } - btr_search_drop_page_hash_index(block); + btr_free_but_not_root(root, mtr->get_log_mode()); + mtr->set_named_space(page_id.space()); + btr_free_root(root, mtr); + btr_free_root_invalidate(root, mtr); +} - header = buf_block_get_frame(block) + PAGE_HEADER + PAGE_BTR_SEG_TOP; -#ifdef UNIV_BTR_DEBUG - ut_a(btr_root_fseg_validate(header, space)); -#endif /* UNIV_BTR_DEBUG */ +/** Free an index tree in a temporary tablespace or during TRUNCATE TABLE. +@param[in] page_id root page id +@param[in] page_size page size */ +void +btr_free( + const page_id_t& page_id, + const page_size_t& page_size) +{ + mtr_t mtr; + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); - while (!fseg_free_step(header, mtr)) { - /* Free the entire segment in small steps. */ - } + buf_block_t* block = buf_page_get( + page_id, page_size, RW_X_LATCH, &mtr); + + ut_ad(page_is_root(block->frame)); + + btr_free_but_not_root(block, MTR_LOG_NO_REDO); + btr_free_root(block, &mtr); + mtr.commit(); } #endif /* !UNIV_HOTBACKUP */ @@ -1899,7 +1435,6 @@ IBUF_BITMAP_FREE is unaffected by reorganization. @retval true if the operation was successful @retval false if it is a compressed page, and recompression failed */ -UNIV_INTERN bool btr_page_reorganize_low( /*====================*/ @@ -1922,7 +1457,6 @@ btr_page_reorganize_low( page_zip_des_t* page_zip = buf_block_get_page_zip(block); buf_block_t* temp_block; page_t* temp_page; - ulint log_mode; ulint data_size1; ulint data_size2; ulint max_ins_size1; @@ -1930,8 +1464,9 @@ btr_page_reorganize_low( bool success = false; ulint pos; bool log_compressed; + bool is_spatial; - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); btr_assert_not_corrupted(block, index); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); @@ -1940,7 +1475,7 @@ btr_page_reorganize_low( max_ins_size1 = page_get_max_insert_size_after_reorganize(page, 1); /* Turn logging off */ - log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE); + mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE); #ifndef UNIV_HOTBACKUP temp_block = buf_block_alloc(buf_pool); @@ -1952,6 +1487,11 @@ btr_page_reorganize_low( MONITOR_INC(MONITOR_INDEX_REORG_ATTEMPTS); + /* This function can be called by log redo with a "dummy" index. + So we would trust more on the original page's type */ + is_spatial = (fil_page_get_type(page) == FIL_PAGE_RTREE + || dict_index_is_spatial(index)); + /* Copy the old page to temporary space */ buf_frame_copy(temp_page, page); @@ -1959,10 +1499,7 @@ btr_page_reorganize_low( if (!recovery) { btr_search_drop_page_hash_index(block); } - - block->check_index_page_at_flush = TRUE; #endif /* !UNIV_HOTBACKUP */ - btr_blob_dbg_remove(page, index, "btr_page_reorganize"); /* Save the cursor position. */ pos = page_rec_get_n_recs_before(page_cur_get_rec(cursor)); @@ -1970,7 +1507,7 @@ btr_page_reorganize_low( /* Recreate the page: note that global data on page (possible segment headers, next page-field, etc.) is preserved intact */ - page_create(block, mtr, dict_table_is_comp(index->table)); + page_create(block, mtr, dict_table_is_comp(index->table), is_spatial); /* Copy the records from the temporary space to the recreated page; do not copy the lock bits yet */ @@ -1979,7 +1516,13 @@ btr_page_reorganize_low( page_get_infimum_rec(temp_page), index, mtr); - if (dict_index_is_sec_or_ibuf(index) && page_is_leaf(page)) { + /* Multiple transactions cannot simultaneously operate on the + same temp-table in parallel. + max_trx_id is ignored for temp tables because it not required + for MVCC. */ + if (dict_index_is_sec_or_ibuf(index) + && page_is_leaf(page) + && !dict_table_is_temporary(index->table)) { /* Copy max trx id to recreated page */ trx_id_t max_trx_id = page_get_max_trx_id(temp_page); page_set_max_trx_id(block, NULL, max_trx_id, mtr); @@ -1998,12 +1541,9 @@ btr_page_reorganize_low( } if (page_zip - && !page_zip_compress(page_zip, page, index, z_level, mtr)) { + && !page_zip_compress(page_zip, page, index, z_level, NULL, mtr)) { /* Restore the old page and exit. */ - btr_blob_dbg_restore(page, temp_page, index, - "btr_page_reorganize_compress_fail"); - #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG /* Check that the bytes that we skip are identical. */ ut_a(!memcmp(page, temp_page, PAGE_HEADER)); @@ -2028,7 +1568,8 @@ btr_page_reorganize_low( } #ifndef UNIV_HOTBACKUP - if (!recovery) { + /* No locks are acquried for intrinsic tables. */ + if (!recovery && !dict_table_is_locking_disabled(index->table)) { /* Update the record lock bitmaps */ lock_move_reorganize_page(block, temp_block); } @@ -2038,19 +1579,13 @@ btr_page_reorganize_low( max_ins_size2 = page_get_max_insert_size_after_reorganize(page, 1); if (data_size1 != data_size2 || max_ins_size1 != max_ins_size2) { - buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH); - buf_page_print(temp_page, 0, BUF_PAGE_PRINT_NO_CRASH); + ib::error() + << "Page old data size " << data_size1 + << " new data size " << data_size2 + << ", page old max ins size " << max_ins_size1 + << " new max ins size " << max_ins_size2; - fprintf(stderr, - "InnoDB: Error: page old data size %lu" - " new data size %lu\n" - "InnoDB: Error: page old max ins size %lu" - " new max ins size %lu\n" - "InnoDB: Submit a detailed bug report" - " to http://bugs.mysql.com\n", - (unsigned long) data_size1, (unsigned long) data_size2, - (unsigned long) max_ins_size1, - (unsigned long) max_ins_size2); + ib::error() << BUG_REPORT_MSG; ut_ad(0); } else { success = true; @@ -2076,8 +1611,8 @@ func_exit: #ifndef UNIV_HOTBACKUP if (success) { - byte type; - byte* log_ptr; + mlog_id_t type; + byte* log_ptr; /* Write the log record */ if (page_zip) { @@ -2152,7 +1687,6 @@ IBUF_BITMAP_FREE is unaffected by reorganization. @retval true if the operation was successful @retval false if it is a compressed page, and recompression failed */ -UNIV_INTERN bool btr_page_reorganize( /*================*/ @@ -2167,8 +1701,7 @@ btr_page_reorganize( /***********************************************************//** Parses a redo log record of reorganizing a page. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_parse_page_reorganize( /*======================*/ @@ -2183,6 +1716,7 @@ btr_parse_page_reorganize( ut_ad(ptr != NULL); ut_ad(end_ptr != NULL); + ut_ad(index != NULL); /* If dealing with a compressed page the record has the compression level used during original compression written in @@ -2222,26 +1756,24 @@ btr_page_empty( { page_t* page = buf_block_get_frame(block); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); ut_ad(page_zip == buf_block_get_page_zip(block)); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ btr_search_drop_page_hash_index(block); - btr_blob_dbg_remove(page, index, "btr_page_empty"); /* Recreate the page: note that global data on page (possible segment headers, next page-field, etc.) is preserved intact */ if (page_zip) { - page_create_zip(block, index, level, 0, mtr); + page_create_zip(block, index, level, 0, NULL, mtr); } else { - page_create(block, mtr, dict_table_is_comp(index->table)); + page_create(block, mtr, dict_table_is_comp(index->table), + dict_index_is_spatial(index)); btr_page_set_level(page, NULL, level, mtr); } - - block->check_index_page_at_flush = TRUE; } /*************************************************************//** @@ -2251,7 +1783,6 @@ NOTE that the operation of this function must always succeed, we cannot reverse it: therefore enough free disk space must be guaranteed to be available before this function is called. @return inserted record or NULL if run out of space */ -UNIV_INTERN rec_t* btr_root_raise_and_insert( /*======================*/ @@ -2300,9 +1831,12 @@ btr_root_raise_and_insert( ut_a(dict_index_get_page(index) == page_get_page_no(root)); #endif /* UNIV_BTR_DEBUG */ - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, root_block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + ut_ad(mtr_is_block_fix( + mtr, root_block, MTR_MEMO_PAGE_X_FIX, index->table)); /* Allocate a new page to the tree. Root splitting is done by first moving the root records to the new page, emptying the root, putting @@ -2346,8 +1880,15 @@ btr_root_raise_and_insert( /* Update the lock table and possible hash index. */ - lock_move_rec_list_end(new_block, root_block, - page_get_infimum_rec(root)); + if (!dict_table_is_locking_disabled(index->table)) { + lock_move_rec_list_end(new_block, root_block, + page_get_infimum_rec(root)); + } + + /* Move any existing predicate locks */ + if (dict_index_is_spatial(index)) { + lock_prdt_rec_move(new_block, root_block); + } btr_search_move_or_delete_hash_entries(new_block, root_block, index); @@ -2358,7 +1899,9 @@ btr_root_raise_and_insert( information of the record to be inserted on the infimum of the root page: we cannot discard the lock structs on the root page */ - lock_update_root_raise(new_block, root_block); + if (!dict_table_is_locking_disabled(index->table)) { + lock_update_root_raise(new_block, root_block); + } /* Create a memory heap where the node pointer is stored */ if (!*heap) { @@ -2366,13 +1909,20 @@ btr_root_raise_and_insert( } rec = page_rec_get_next(page_get_infimum_rec(new_page)); - new_page_no = buf_block_get_page_no(new_block); + new_page_no = new_block->page.id.page_no(); /* Build the node pointer (= node key and page address) for the child */ + if (dict_index_is_spatial(index)) { + rtr_mbr_t new_mbr; - node_ptr = dict_index_build_node_ptr( - index, rec, new_page_no, *heap, level); + rtr_page_cal_mbr(index, new_block, &new_mbr, *heap); + node_ptr = rtr_index_build_node_ptr( + index, &new_mbr, rec, new_page_no, *heap, level); + } else { + node_ptr = dict_index_build_node_ptr( + index, rec, new_page_no, *heap, level); + } /* The node pointer must be marked as the predefined minimum record, as there is no lower alphabetical limit to records in the leftmost node of a level: */ @@ -2406,33 +1956,34 @@ btr_root_raise_and_insert( /* We play safe and reset the free bits for the new page */ -#if 0 - fprintf(stderr, "Root raise new page no %lu\n", new_page_no); -#endif - - if (!dict_index_is_clust(index)) { + if (!dict_index_is_clust(index) + && !dict_table_is_temporary(index->table)) { ibuf_reset_free_bits(new_block); } if (tuple != NULL) { /* Reposition the cursor to the child node */ - page_cur_search(new_block, index, tuple, - PAGE_CUR_LE, page_cursor); + page_cur_search(new_block, index, tuple, page_cursor); } else { /* Set cursor to first record on child node */ page_cur_set_before_first(new_block, page_cursor); } /* Split the child and insert tuple */ - return(btr_page_split_and_insert(flags, cursor, offsets, heap, - tuple, n_ext, mtr)); + if (dict_index_is_spatial(index)) { + /* Split rtree page and insert tuple */ + return(rtr_page_split_and_insert(flags, cursor, offsets, heap, + tuple, n_ext, mtr)); + } else { + return(btr_page_split_and_insert(flags, cursor, offsets, heap, + tuple, n_ext, mtr)); + } } /*************************************************************//** Decides if the page should be split at the convergence point of inserts converging to the left. -@return TRUE if split recommended */ -UNIV_INTERN +@return TRUE if split recommended */ ibool btr_page_get_split_rec_to_left( /*===========================*/ @@ -2476,8 +2027,7 @@ btr_page_get_split_rec_to_left( /*************************************************************//** Decides if the page should be split at the convergence point of inserts converging to the right. -@return TRUE if split recommended */ -UNIV_INTERN +@return TRUE if split recommended */ ibool btr_page_get_split_rec_to_right( /*============================*/ @@ -2653,7 +2203,7 @@ func_exit: /*************************************************************//** Returns TRUE if the insert fits on the appropriate half-page with the chosen split_rec. -@return true if fits */ +@return true if fits */ static MY_ATTRIBUTE((nonnull(1,3,4,6), warn_unused_result)) bool btr_page_insert_fits( @@ -2746,7 +2296,6 @@ btr_page_insert_fits( /*******************************************************//** Inserts a data tuple to a tree on a non-leaf level. It is assumed that mtr holds an x-latch on the tree. */ -UNIV_INTERN void btr_insert_on_non_leaf_level_func( /*==============================*/ @@ -2762,14 +2311,48 @@ btr_insert_on_non_leaf_level_func( btr_cur_t cursor; dberr_t err; rec_t* rec; - ulint* offsets = NULL; mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + rec_offs_init(offsets_); + rtr_info_t rtr_info; ut_ad(level > 0); - btr_cur_search_to_nth_level(index, level, tuple, PAGE_CUR_LE, - BTR_CONT_MODIFY_TREE, - &cursor, 0, file, line, mtr); + if (!dict_index_is_spatial(index)) { + dberr_t err = DB_SUCCESS; + if (dict_table_is_intrinsic(index->table)) { + err = btr_cur_search_to_nth_level_with_no_latch( + index, level, tuple, PAGE_CUR_LE, &cursor, + __FILE__, __LINE__, mtr); + } else { + err = btr_cur_search_to_nth_level( + index, level, tuple, PAGE_CUR_LE, + BTR_CONT_MODIFY_TREE, + &cursor, 0, file, line, mtr); + } + + if (err != DB_SUCCESS) { + ib::warn() << " Error code: " << err + << " btr_page_get_father_node_ptr_func " + << " level: " << level + << " called from file: " + << file << " line: " << line + << " table: " << index->table->name + << " index: " << index->name; + } + } else { + /* For spatial index, initialize structures to track + its parents etc. */ + rtr_init_rtr_info(&rtr_info, false, &cursor, index, false); + + rtr_info_update_btr(&cursor, &rtr_info); + + btr_cur_search_to_nth_level(index, level, tuple, + PAGE_CUR_RTREE_INSERT, + BTR_CONT_MODIFY_TREE, + &cursor, 0, file, line, mtr); + } ut_ad(cursor.flag == BTR_CUR_BINARY); @@ -2791,7 +2374,16 @@ btr_insert_on_non_leaf_level_func( &dummy_big_rec, 0, NULL, mtr); ut_a(err == DB_SUCCESS); } - mem_heap_free(heap); + + if (heap != NULL) { + mem_heap_free(heap); + } + + if (dict_index_is_spatial(index)) { + ut_ad(cursor.rtr_info); + + rtr_clean_rtr_info(&rtr_info, true); + } } /**************************************************************//** @@ -2811,8 +2403,6 @@ btr_attach_half_pages( ulint direction, /*!< in: FSP_UP or FSP_DOWN */ mtr_t* mtr) /*!< in: mtr */ { - ulint space; - ulint zip_size; ulint prev_page_no; ulint next_page_no; ulint level; @@ -2825,9 +2415,12 @@ btr_attach_half_pages( page_zip_des_t* upper_page_zip; dtuple_t* node_ptr_upper; mem_heap_t* heap; + buf_block_t* prev_block = NULL; + buf_block_t* next_block = NULL; - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains(mtr, new_block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr_is_block_fix( + mtr, new_block, MTR_MEMO_PAGE_X_FIX, index->table)); /* Create a memory heap where the data tuple is stored */ heap = mem_heap_create(1024); @@ -2839,10 +2432,10 @@ btr_attach_half_pages( ulint* offsets; lower_page = buf_block_get_frame(new_block); - lower_page_no = buf_block_get_page_no(new_block); + lower_page_no = new_block->page.id.page_no(); lower_page_zip = buf_block_get_page_zip(new_block); upper_page = buf_block_get_frame(block); - upper_page_no = buf_block_get_page_no(block); + upper_page_no = block->page.id.page_no(); upper_page_zip = buf_block_get_page_zip(block); /* Look up the index for the node pointer to page */ @@ -2859,13 +2452,31 @@ btr_attach_half_pages( mem_heap_empty(heap); } else { lower_page = buf_block_get_frame(block); - lower_page_no = buf_block_get_page_no(block); + lower_page_no = block->page.id.page_no(); lower_page_zip = buf_block_get_page_zip(block); upper_page = buf_block_get_frame(new_block); - upper_page_no = buf_block_get_page_no(new_block); + upper_page_no = new_block->page.id.page_no(); upper_page_zip = buf_block_get_page_zip(new_block); } + /* Get the previous and next pages of page */ + prev_page_no = btr_page_get_prev(page, mtr); + next_page_no = btr_page_get_next(page, mtr); + + const ulint space = block->page.id.space(); + + /* for consistency, both blocks should be locked, before change */ + if (prev_page_no != FIL_NULL && direction == FSP_DOWN) { + prev_block = btr_block_get( + page_id_t(space, prev_page_no), block->page.size, + RW_X_LATCH, index, mtr); + } + if (next_page_no != FIL_NULL && direction != FSP_DOWN) { + next_block = btr_block_get( + page_id_t(space, next_page_no), block->page.size, + RW_X_LATCH, index, mtr); + } + /* Get the level of the split pages */ level = btr_page_get_level(buf_block_get_frame(block), mtr); ut_ad(level @@ -2886,22 +2497,13 @@ btr_attach_half_pages( /* Free the memory heap */ mem_heap_free(heap); - /* Get the previous and next pages of page */ - - prev_page_no = btr_page_get_prev(page, mtr); - next_page_no = btr_page_get_next(page, mtr); - space = buf_block_get_space(block); - zip_size = buf_block_get_zip_size(block); - /* Update page links of the level */ - if (prev_page_no != FIL_NULL) { - buf_block_t* prev_block = btr_block_get( - space, zip_size, prev_page_no, RW_X_LATCH, index, mtr); + if (prev_block) { #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(prev_block->frame) == page_is_comp(page)); ut_a(btr_page_get_next(prev_block->frame, mtr) - == buf_block_get_page_no(block)); + == block->page.id.page_no()); #endif /* UNIV_BTR_DEBUG */ btr_page_set_next(buf_block_get_frame(prev_block), @@ -2909,9 +2511,7 @@ btr_attach_half_pages( lower_page_no, mtr); } - if (next_page_no != FIL_NULL) { - buf_block_t* next_block = btr_block_get( - space, zip_size, next_page_no, RW_X_LATCH, index, mtr); + if (next_block) { #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(next_block->frame) == page_is_comp(page)); ut_a(btr_page_get_prev(next_block->frame, mtr) @@ -2923,11 +2523,24 @@ btr_attach_half_pages( upper_page_no, mtr); } - btr_page_set_prev(lower_page, lower_page_zip, prev_page_no, mtr); + if (direction == FSP_DOWN) { + /* lower_page is new */ + btr_page_set_prev(lower_page, lower_page_zip, + prev_page_no, mtr); + } else { + ut_ad(btr_page_get_prev(lower_page, mtr) == prev_page_no); + } + btr_page_set_next(lower_page, lower_page_zip, upper_page_no, mtr); - btr_page_set_prev(upper_page, upper_page_zip, lower_page_no, mtr); - btr_page_set_next(upper_page, upper_page_zip, next_page_no, mtr); + + if (direction != FSP_DOWN) { + /* upper_page is new */ + btr_page_set_next(upper_page, upper_page_zip, + next_page_no, mtr); + } else { + ut_ad(btr_page_get_next(upper_page, mtr) == next_page_no); + } } /*************************************************************//** @@ -2989,9 +2602,12 @@ btr_insert_into_right_sibling( page_t* page = buf_block_get_frame(block); ulint next_page_no = btr_page_get_next(page, mtr); - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(cursor->index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(dict_table_is_intrinsic(cursor->index->table) + || mtr_memo_contains_flagged( + mtr, dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); + ut_ad(mtr_is_block_fix( + mtr, block, MTR_MEMO_PAGE_X_FIX, cursor->index->table)); ut_ad(heap); if (next_page_no == FIL_NULL || !page_rec_is_supremum( @@ -3005,12 +2621,13 @@ btr_insert_into_right_sibling( page_t* next_page; btr_cur_t next_father_cursor; rec_t* rec = NULL; - ulint zip_size = buf_block_get_zip_size(block); ulint max_size; + const ulint space = block->page.id.space(); + next_block = btr_block_get( - buf_block_get_space(block), zip_size, - next_page_no, RW_X_LATCH, cursor->index, mtr); + page_id_t(space, next_page_no), block->page.size, + RW_X_LATCH, cursor->index, mtr); next_page = buf_block_get_frame(next_block); bool is_leaf = page_is_leaf(next_page); @@ -3025,15 +2642,19 @@ btr_insert_into_right_sibling( max_size = page_get_max_insert_size_after_reorganize(next_page, 1); /* Extends gap lock for the next page */ - lock_update_split_left(next_block, block); + if (!dict_table_is_locking_disabled(cursor->index->table)) { + lock_update_split_left(next_block, block); + } rec = page_cur_tuple_insert( &next_page_cursor, tuple, cursor->index, offsets, &heap, n_ext, mtr); if (rec == NULL) { - if (zip_size && is_leaf - && !dict_index_is_clust(cursor->index)) { + if (is_leaf + && next_block->page.size.is_compressed() + && !dict_index_is_clust(cursor->index) + && !dict_table_is_temporary(cursor->index->table)) { /* Reset the IBUF_BITMAP_FREE bits, because page_cur_tuple_insert() will have attempted page reorganize before failing. */ @@ -3056,7 +2677,7 @@ btr_insert_into_right_sibling( compressed = btr_cur_pessimistic_delete( &err, TRUE, &next_father_cursor, - BTR_CREATE_FLAG, RB_NONE, mtr); + BTR_CREATE_FLAG, false, mtr); ut_a(err == DB_SUCCESS); @@ -3065,7 +2686,7 @@ btr_insert_into_right_sibling( } dtuple_t* node_ptr = dict_index_build_node_ptr( - cursor->index, rec, buf_block_get_page_no(next_block), + cursor->index, rec, next_block->page.id.page_no(), heap, level); btr_insert_on_non_leaf_level( @@ -3073,11 +2694,13 @@ btr_insert_into_right_sibling( ut_ad(rec_offs_validate(rec, cursor->index, *offsets)); - if (is_leaf && !dict_index_is_clust(cursor->index)) { + if (is_leaf + && !dict_index_is_clust(cursor->index) + && !dict_table_is_temporary(cursor->index->table)) { /* Update the free bits of the B-tree page in the insert buffer bitmap. */ - if (zip_size) { + if (next_block->page.size.is_compressed()) { ibuf_update_free_bits_zip(next_block, mtr); } else { ibuf_update_free_bits_if_full( @@ -3101,7 +2724,6 @@ NOTE: jonaso added support for calling function with tuple == NULL which cause it to only split a page. @return inserted record or NULL if run out of space */ -UNIV_INTERN rec_t* btr_page_split_and_insert( /*======================*/ @@ -3137,6 +2759,15 @@ btr_page_split_and_insert( ulint n_iterations = 0; rec_t* rec; ulint n_uniq; + dict_index_t* index; + + index = btr_cur_get_index(cursor); + + if (dict_index_is_spatial(index)) { + /* Split rtree page and update parent */ + return(rtr_page_split_and_insert(flags, cursor, offsets, heap, + tuple, n_ext, mtr)); + } if (!*heap) { *heap = mem_heap_create(1024); @@ -3146,20 +2777,23 @@ func_start: mem_heap_empty(*heap); *offsets = NULL; - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(cursor->index), - MTR_MEMO_X_LOCK)); + ut_ad(mtr_memo_contains_flagged(mtr, + dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(cursor->index->table)); ut_ad(!dict_index_is_online_ddl(cursor->index) || (flags & BTR_CREATE_FLAG) || dict_index_is_clust(cursor->index)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(dict_index_get_lock(cursor->index), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own_flagged(dict_index_get_lock(cursor->index), + RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX) + || dict_table_is_intrinsic(cursor->index->table)); block = btr_cur_get_block(cursor); page = buf_block_get_frame(block); page_zip = buf_block_get_page_zip(block); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix( + mtr, block, MTR_MEMO_PAGE_X_FIX, cursor->index->table)); ut_ad(!page_is_empty(page)); /* try to insert to the next page if possible before split */ @@ -3170,7 +2804,7 @@ func_start: return(rec); } - page_no = buf_block_get_page_no(block); + page_no = block->page.id.page_no(); /* 1. Decide the split record; split_rec == NULL means that the tuple to be inserted should be the first record on the upper @@ -3269,8 +2903,9 @@ func_start: insert_empty: ut_ad(!split_rec); ut_ad(!insert_left); - buf = (byte*) mem_alloc(rec_get_converted_size(cursor->index, - tuple, n_ext)); + buf = UT_NEW_ARRAY_NOKEY( + byte, + rec_get_converted_size(cursor->index, tuple, n_ext)); first_rec = rec_convert_dtuple_to_rec(buf, cursor->index, tuple, n_ext); @@ -3295,7 +2930,7 @@ insert_empty: offsets, tuple, n_ext, heap); } else { if (!insert_left) { - mem_free(buf); + UT_DELETE_ARRAY(buf); buf = NULL; } @@ -3304,11 +2939,18 @@ insert_empty: offsets, tuple, n_ext, heap); } - if (insert_will_fit && page_is_leaf(page) + if (!srv_read_only_mode + && !dict_table_is_intrinsic(cursor->index->table) + && insert_will_fit + && page_is_leaf(page) && !dict_index_is_online_ddl(cursor->index)) { - mtr_memo_release(mtr, dict_index_get_lock(cursor->index), - MTR_MEMO_X_LOCK); + mtr->memo_release( + dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK); + + /* NOTE: We cannot release root block latch here, because it + has segment header and already modified in most of cases.*/ } /* 5. Move then the records to the new page */ @@ -3337,9 +2979,12 @@ insert_empty: /* Update the lock table and possible hash index. */ - lock_move_rec_list_start( - new_block, block, move_limit, - new_page + PAGE_NEW_INFIMUM); + if (!dict_table_is_locking_disabled( + cursor->index->table)) { + lock_move_rec_list_start( + new_block, block, move_limit, + new_page + PAGE_NEW_INFIMUM); + } btr_search_move_or_delete_hash_entries( new_block, block, cursor->index); @@ -3353,7 +2998,9 @@ insert_empty: left_block = new_block; right_block = block; - lock_update_split_left(right_block, left_block); + if (!dict_table_is_locking_disabled(cursor->index->table)) { + lock_update_split_left(right_block, left_block); + } } else { /* fputs("Split right\n", stderr); */ @@ -3377,8 +3024,13 @@ insert_empty: cursor->index, mtr); /* Update the lock table and possible hash index. */ + if (!dict_table_is_locking_disabled( + cursor->index->table)) { + lock_move_rec_list_end( + new_block, block, move_limit); + } - lock_move_rec_list_end(new_block, block, move_limit); + ut_ad(!dict_index_is_spatial(index)); btr_search_move_or_delete_hash_entries( new_block, block, cursor->index); @@ -3394,7 +3046,9 @@ insert_empty: left_block = block; right_block = new_block; - lock_update_split_right(right_block, left_block); + if (!dict_table_is_locking_disabled(cursor->index->table)) { + lock_update_split_right(right_block, left_block); + } } #ifdef UNIV_ZIP_DEBUG @@ -3424,8 +3078,7 @@ insert_empty: /* 7. Reposition the cursor for insert and try insertion */ page_cursor = btr_cur_get_page_cur(cursor); - page_cur_search(insert_block, cursor->index, tuple, - PAGE_CUR_LE, page_cursor); + page_cur_search(insert_block, cursor->index, tuple, page_cursor); rec = page_cur_tuple_insert(page_cursor, tuple, cursor->index, offsets, heap, n_ext, mtr); @@ -3466,14 +3119,13 @@ insert_empty: /* The insert did not fit on the page: loop back to the start of the function for a new split */ insert_failed: - /* We play safe and reset the free bits */ - if (!dict_index_is_clust(cursor->index)) { + /* We play safe and reset the free bits for new_page */ + if (!dict_index_is_clust(cursor->index) + && !dict_table_is_temporary(cursor->index->table)) { ibuf_reset_free_bits(new_block); ibuf_reset_free_bits(block); } - /* fprintf(stderr, "Split second round %lu\n", - page_get_page_no(page)); */ n_iterations++; ut_ad(n_iterations < 2 || buf_block_get_page_zip(insert_block)); @@ -3486,17 +3138,14 @@ func_exit: /* Insert fit on the page: update the free bits for the left and right pages in the same mtr */ - if (!dict_index_is_clust(cursor->index) && page_is_leaf(page)) { + if (!dict_index_is_clust(cursor->index) + && !dict_table_is_temporary(cursor->index->table) + && page_is_leaf(page)) { + ibuf_update_free_bits_for_two_pages_low( - buf_block_get_zip_size(left_block), left_block, right_block, mtr); } -#if 0 - fprintf(stderr, "Split and insert done %lu %lu\n", - buf_block_get_page_no(left_block), - buf_block_get_page_no(right_block)); -#endif MONITOR_INC(MONITOR_INDEX_SPLIT); ut_ad(page_validate(buf_block_get_frame(left_block), cursor->index)); @@ -3509,37 +3158,37 @@ func_exit: return(rec); } -/*************************************************************//** -Removes a page from the level list of pages. */ +/** Removes a page from the level list of pages. +@param[in] space space where removed +@param[in] page_size page size +@param[in,out] page page to remove +@param[in] index index tree +@param[in,out] mtr mini-transaction */ UNIV_INTERN void btr_level_list_remove_func( -/*=======================*/ - ulint space, /*!< in: space where removed */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - page_t* page, /*!< in/out: page to remove */ - dict_index_t* index, /*!< in: index tree */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint space, + const page_size_t& page_size, + page_t* page, + dict_index_t* index, + mtr_t* mtr) { - ulint prev_page_no; - ulint next_page_no; - ut_ad(page != NULL); ut_ad(mtr != NULL); - ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_page_fix(mtr, page, MTR_MEMO_PAGE_X_FIX, index->table)); ut_ad(space == page_get_space_id(page)); /* Get the previous and next page numbers of page */ - prev_page_no = btr_page_get_prev(page, mtr); - next_page_no = btr_page_get_next(page, mtr); + const ulint prev_page_no = btr_page_get_prev(page, mtr); + const ulint next_page_no = btr_page_get_next(page, mtr); /* Update page links of the level */ if (prev_page_no != FIL_NULL) { buf_block_t* prev_block - = btr_block_get(space, zip_size, prev_page_no, - RW_X_LATCH, index, mtr); + = btr_block_get(page_id_t(space, prev_page_no), + page_size, RW_X_LATCH, index, mtr); + page_t* prev_page = buf_block_get_frame(prev_block); #ifdef UNIV_BTR_DEBUG @@ -3555,8 +3204,10 @@ btr_level_list_remove_func( if (next_page_no != FIL_NULL) { buf_block_t* next_block - = btr_block_get(space, zip_size, next_page_no, - RW_X_LATCH, index, mtr); + = btr_block_get( + page_id_t(space, next_page_no), page_size, + RW_X_LATCH, index, mtr); + page_t* next_page = buf_block_get_frame(next_block); #ifdef UNIV_BTR_DEBUG @@ -3578,9 +3229,10 @@ UNIV_INLINE void btr_set_min_rec_mark_log( /*=====================*/ - rec_t* rec, /*!< in: record */ - byte type, /*!< in: MLOG_COMP_REC_MIN_MARK or MLOG_REC_MIN_MARK */ - mtr_t* mtr) /*!< in: mtr */ + rec_t* rec, /*!< in: record */ + mlog_id_t type, /*!< in: MLOG_COMP_REC_MIN_MARK or + MLOG_REC_MIN_MARK */ + mtr_t* mtr) /*!< in: mtr */ { mlog_write_initial_log_record(rec, type, mtr); @@ -3594,8 +3246,7 @@ btr_set_min_rec_mark_log( /****************************************************************//** Parses the redo log record for setting an index record as the predefined minimum record. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_parse_set_min_rec_mark( /*=======================*/ @@ -3625,7 +3276,6 @@ btr_parse_set_min_rec_mark( /****************************************************************//** Sets a record as the predefined minimum record. */ -UNIV_INTERN void btr_set_min_rec_mark( /*=================*/ @@ -3652,7 +3302,6 @@ btr_set_min_rec_mark( #ifndef UNIV_HOTBACKUP /*************************************************************//** Deletes on the upper level the node pointer to a page. */ -UNIV_INTERN void btr_node_ptr_delete( /*================*/ @@ -3664,13 +3313,13 @@ btr_node_ptr_delete( ibool compressed; dberr_t err; - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); /* Delete node pointer on father page */ btr_page_get_father(index, block, mtr, &cursor); compressed = btr_cur_pessimistic_delete(&err, TRUE, &cursor, - BTR_CREATE_FLAG, RB_NONE, mtr); + BTR_CREATE_FLAG, false, mtr); ut_a(err == DB_SUCCESS); if (!compressed) { @@ -3707,7 +3356,7 @@ btr_lift_page_up( ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL); ut_ad(btr_page_get_next(page, mtr) == FIL_NULL); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); page_level = btr_page_get_level(page, mtr); root_page_no = dict_index_get_page(index); @@ -3734,7 +3383,7 @@ btr_lift_page_up( the first level, the tree is in an inconsistent state and can not be searched. */ for (b = father_block; - buf_block_get_page_no(b) != root_page_no; ) { + b->page.id.page_no() != root_page_no; ) { ut_a(n_blocks < BTR_MAX_LEVELS); offsets = btr_page_get_father_block(offsets, heap, @@ -3760,7 +3409,8 @@ btr_lift_page_up( ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL); ut_ad(btr_page_get_next(page, mtr) == FIL_NULL); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix( + mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); father_block = blocks[0]; father_page_zip = buf_block_get_page_zip(father_block); @@ -3795,15 +3445,23 @@ btr_lift_page_up( /* Update the lock table and possible hash index. */ - lock_move_rec_list_end(father_block, block, - page_get_infimum_rec(page)); + if (!dict_table_is_locking_disabled(index->table)) { + lock_move_rec_list_end(father_block, block, + page_get_infimum_rec(page)); + } + + /* Also update the predicate locks */ + if (dict_index_is_spatial(index)) { + lock_prdt_rec_move(father_block, block); + } btr_search_move_or_delete_hash_entries(father_block, block, index); } - btr_blob_dbg_remove(page, index, "btr_lift_page_up"); - lock_update_copy_and_discard(father_block, block); + if (!dict_table_is_locking_disabled(index->table)) { + lock_update_copy_and_discard(father_block, block); + } /* Go upward to root page, decrementing levels by one. */ for (i = lift_father_up ? 1 : 0; i < n_blocks; i++, page_level++) { @@ -3818,11 +3476,16 @@ btr_lift_page_up( #endif /* UNIV_ZIP_DEBUG */ } + if (dict_index_is_spatial(index)) { + rtr_check_discard_page(index, NULL, block); + } + /* Free the file page */ btr_page_free(index, block, mtr); /* We play it safe and reset the free bits for the father */ - if (!dict_index_is_clust(index)) { + if (!dict_index_is_clust(index) + && !dict_table_is_temporary(index->table)) { ibuf_reset_free_bits(father_block); } ut_ad(page_validate(father_page, index)); @@ -3840,8 +3503,7 @@ level lifts the records of the page to the father page, thus reducing the tree height. It is assumed that mtr holds an x-latch on the tree and on the page. If cursor is on the leaf level, mtr must also hold x-latches to the brothers, if they exist. -@return TRUE on success */ -UNIV_INTERN +@return TRUE on success */ ibool btr_compress( /*=========*/ @@ -3855,7 +3517,6 @@ btr_compress( { dict_index_t* index; ulint space; - ulint zip_size; ulint left_page_no; ulint right_page_no; buf_block_t* merge_block; @@ -3868,6 +3529,10 @@ btr_compress( mem_heap_t* heap; ulint* offsets; ulint nth_rec = 0; /* remove bogus warning */ + bool mbr_changed = false; +#ifdef UNIV_DEBUG + bool leftmost_child; +#endif DBUG_ENTER("btr_compress"); block = btr_cur_get_block(cursor); @@ -3876,11 +3541,22 @@ btr_compress( btr_assert_not_corrupted(block, index); - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); +#ifdef UNIV_DEBUG + if (dict_index_is_spatial(index)) { + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK)); + } else { + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + } +#endif /* UNIV_DEBUG */ + + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); + + const page_size_t page_size(dict_table_page_size(index->table)); MONITOR_INC(MONITOR_INDEX_MERGE_ATTEMPTS); @@ -3896,8 +3572,27 @@ btr_compress( #endif /* UNIV_DEBUG */ heap = mem_heap_create(100); - offsets = btr_page_get_father_block(NULL, heap, index, block, mtr, - &father_cursor); + + if (dict_index_is_spatial(index)) { + offsets = rtr_page_get_father_block( + NULL, heap, index, block, mtr, cursor, &father_cursor); + ut_ad(cursor->page_cur.block->page.id.page_no() + == block->page.id.page_no()); + rec_t* my_rec = father_cursor.page_cur.rec; + + ulint page_no = btr_node_ptr_get_child_page_no(my_rec, offsets); + + if (page_no != block->page.id.page_no()) { + ib::info() << "father positioned on page " + << page_no << "instead of " + << block->page.id.page_no(); + offsets = btr_page_get_father_block( + NULL, heap, index, block, mtr, &father_cursor); + } + } else { + offsets = btr_page_get_father_block( + NULL, heap, index, block, mtr, &father_cursor); + } if (adjust) { nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor)); @@ -3912,6 +3607,13 @@ btr_compress( goto func_exit; } + ut_d(leftmost_child = + left_page_no != FIL_NULL + && (page_rec_get_next( + page_get_infimum_rec( + btr_cur_get_page(&father_cursor))) + == btr_cur_get_rec(&father_cursor))); + /* Decide the page to which we try to merge and which will inherit the locks */ @@ -3919,10 +3621,13 @@ btr_compress( &merge_block, mtr); DBUG_EXECUTE_IF("ib_always_merge_right", is_left = FALSE;); - - if(!is_left +retry: + if (!is_left && !btr_can_merge_with_page(cursor, right_page_no, &merge_block, mtr)) { + if (!merge_block) { + merge_page = NULL; + } goto err_exit; } @@ -3930,14 +3635,26 @@ btr_compress( #ifdef UNIV_BTR_DEBUG if (is_left) { - ut_a(btr_page_get_next(merge_page, mtr) - == buf_block_get_page_no(block)); + ut_a(btr_page_get_next(merge_page, mtr) + == block->page.id.page_no()); } else { - ut_a(btr_page_get_prev(merge_page, mtr) - == buf_block_get_page_no(block)); + ut_a(btr_page_get_prev(merge_page, mtr) + == block->page.id.page_no()); } #endif /* UNIV_BTR_DEBUG */ +#ifdef UNIV_GIS_DEBUG + if (dict_index_is_spatial(index)) { + if (is_left) { + fprintf(stderr, "GIS_DIAG: merge left %ld to %ld \n", + (long) block->page.id.page_no(), left_page_no); + } else { + fprintf(stderr, "GIS_DIAG: merge right %ld to %ld\n", + (long) block->page.id.page_no(), right_page_no); + } + } +#endif /* UNIV_GIS_DEBUG */ + ut_ad(page_validate(merge_page, index)); merge_page_zip = buf_block_get_page_zip(merge_block); @@ -3953,6 +3670,33 @@ btr_compress( /* Move records to the merge page */ if (is_left) { + btr_cur_t cursor2; + rtr_mbr_t new_mbr; + ulint* offsets2 = NULL; + + /* For rtree, we need to update father's mbr. */ + if (dict_index_is_spatial(index)) { + /* We only support merge pages with the same parent + page */ + if (!rtr_check_same_block( + index, &cursor2, + btr_cur_get_block(&father_cursor), + merge_block, heap)) { + is_left = false; + goto retry; + } + + offsets2 = rec_get_offsets( + btr_cur_get_rec(&cursor2), index, + NULL, ULINT_UNDEFINED, &heap); + + /* Check if parent entry needs to be updated */ + mbr_changed = rtr_merge_mbr_changed( + &cursor2, &father_cursor, + offsets2, offsets, &new_mbr, + merge_block, block, index); + } + rec_t* orig_pred = page_copy_rec_list_start( merge_block, block, page_get_supremum_rec(page), index, mtr); @@ -3964,10 +3708,51 @@ btr_compress( btr_search_drop_page_hash_index(block); /* Remove the page from the level list */ - btr_level_list_remove(space, zip_size, page, index, mtr); + btr_level_list_remove(space, page_size, page, index, mtr); + + if (dict_index_is_spatial(index)) { + rec_t* my_rec = father_cursor.page_cur.rec; + + ulint page_no = btr_node_ptr_get_child_page_no( + my_rec, offsets); + + if (page_no != block->page.id.page_no()) { + + ib::fatal() << "father positioned on " + << page_no << " instead of " + << block->page.id.page_no(); + + ut_ad(0); + } + + if (mbr_changed) { +#ifdef UNIV_DEBUG + bool success = rtr_update_mbr_field( + &cursor2, offsets2, &father_cursor, + merge_page, &new_mbr, NULL, mtr); + + ut_ad(success); +#else + rtr_update_mbr_field( + &cursor2, offsets2, &father_cursor, + merge_page, &new_mbr, NULL, mtr); +#endif + } else { + rtr_node_ptr_delete( + index, &father_cursor, block, mtr); + } - btr_node_ptr_delete(index, block, mtr); - lock_update_merge_left(merge_block, orig_pred, block); + /* No GAP lock needs to be worrying about */ + lock_mutex_enter(); + lock_rec_free_all_from_discard_page(block); + lock_mutex_exit(); + } else { + btr_node_ptr_delete(index, block, mtr); + if (!dict_table_is_locking_disabled(index->table)) { + lock_update_merge_left( + merge_block, orig_pred, block); + } + } if (adjust) { nth_rec += page_rec_get_n_recs_before(orig_pred); @@ -3983,7 +3768,22 @@ btr_compress( byte fil_page_prev[4]; #endif /* UNIV_BTR_DEBUG */ - btr_page_get_father(index, merge_block, mtr, &cursor2); + if (dict_index_is_spatial(index)) { + cursor2.rtr_info = NULL; + + /* For spatial index, we disallow merge of blocks + with different parents, since the merge would need + to update entry (for MBR and Primary key) in the + parent of block being merged */ + if (!rtr_check_same_block( + index, &cursor2, + btr_cur_get_block(&father_cursor), + merge_block, heap)) { + goto err_exit; + } + } else { + btr_page_get_father(index, merge_block, mtr, &cursor2); + } if (merge_page_zip && left_page_no == FIL_NULL) { @@ -4033,7 +3833,11 @@ btr_compress( #endif /* UNIV_BTR_DEBUG */ /* Remove the page from the level list */ - btr_level_list_remove(space, zip_size, page, index, mtr); + btr_level_list_remove(space, page_size, (page_t*)page, index, mtr); + + ut_ad(btr_node_ptr_get_child_page_no( + btr_cur_get_rec(&father_cursor), offsets) + == block->page.id.page_no()); /* Replace the address of the old child node (= page) with the address of the merge page to the right */ @@ -4042,21 +3846,58 @@ btr_compress( btr_cur_get_page_zip(&father_cursor), offsets, right_page_no, mtr); - compressed = btr_cur_pessimistic_delete(&err, TRUE, &cursor2, - BTR_CREATE_FLAG, - RB_NONE, mtr); - ut_a(err == DB_SUCCESS); - - if (!compressed) { - btr_cur_compress_if_useful(&cursor2, FALSE, mtr); +#ifdef UNIV_DEBUG + if (!page_is_leaf(page) && left_page_no == FIL_NULL) { + ut_ad(REC_INFO_MIN_REC_FLAG & rec_get_info_bits( + page_rec_get_next(page_get_infimum_rec( + buf_block_get_frame(merge_block))), + page_is_comp(page))); } +#endif /* UNIV_DEBUG */ - lock_update_merge_right(merge_block, orig_succ, block); - } + /* For rtree, we need to update father's mbr. */ + if (dict_index_is_spatial(index)) { + ulint* offsets2; + offsets2 = rec_get_offsets( + btr_cur_get_rec(&cursor2), + index, NULL, ULINT_UNDEFINED, &heap); + + ut_ad(btr_node_ptr_get_child_page_no( + btr_cur_get_rec(&cursor2), offsets2) + == right_page_no); + + rtr_merge_and_update_mbr(&father_cursor, + &cursor2, + offsets, offsets2, + merge_page, merge_block, + block, index, mtr); + lock_mutex_enter(); + lock_rec_free_all_from_discard_page(block); + lock_mutex_exit(); + } else { + + compressed = btr_cur_pessimistic_delete(&err, TRUE, + &cursor2, + BTR_CREATE_FLAG, + false, mtr); + ut_a(err == DB_SUCCESS); + + if (!compressed) { + btr_cur_compress_if_useful(&cursor2, + FALSE, + mtr); + } - btr_blob_dbg_remove(page, index, "btr_compress"); + if (!dict_table_is_locking_disabled(index->table)) { + lock_update_merge_right( + merge_block, orig_succ, block); + } + } + } - if (!dict_index_is_clust(index) && page_is_leaf(merge_page)) { + if (!dict_index_is_clust(index) + && !dict_table_is_temporary(index->table) + && page_is_leaf(merge_page)) { /* Update the free bits of the B-tree page in the insert buffer bitmap. This has to be done in a separate mini-transaction that is committed before the @@ -4079,7 +3920,7 @@ btr_compress( committed mini-transaction, because in crash recovery, the free bits could momentarily be set too high. */ - if (zip_size) { + if (page_size.is_compressed()) { /* Because the free bits may be incremented and we cannot update the insert buffer bitmap in the same mini-transaction, the only safe @@ -4103,10 +3944,25 @@ btr_compress( index)); #endif /* UNIV_ZIP_DEBUG */ + if (dict_index_is_spatial(index)) { +#ifdef UNIV_GIS_DEBUG + fprintf(stderr, "GIS_DIAG: compressed away %ld\n", + (long) block->page.id.page_no()); + fprintf(stderr, "GIS_DIAG: merged to %ld\n", + (long) merge_block->page.id.page_no()); +#endif + + rtr_check_discard_page(index, NULL, block); + } + /* Free the file page */ btr_page_free(index, block, mtr); - ut_ad(btr_check_node_ptr(index, merge_block, mtr)); + /* btr_check_node_ptr() needs parent block latched. + If the merge_block's parent block is not same, + we cannot use btr_check_node_ptr() */ + ut_ad(leftmost_child + || btr_check_node_ptr(index, merge_block, mtr)); func_exit: mem_heap_free(heap); @@ -4124,10 +3980,11 @@ func_exit: err_exit: /* We play it safe and reset the free bits. */ - if (zip_size + if (page_size.is_compressed() && merge_page && page_is_leaf(merge_page) && !dict_index_is_clust(index)) { + ibuf_reset_free_bits(merge_block); } @@ -4154,7 +4011,7 @@ btr_discard_only_page_on_level( /* Save the PAGE_MAX_TRX_ID from the leaf page. */ max_trx_id = page_get_max_trx_id(buf_block_get_frame(block)); - while (buf_block_get_page_no(block) != dict_index_get_page(index)) { + while (block->page.id.page_no() != dict_index_get_page(index)) { btr_cur_t cursor; buf_block_t* father; const page_t* page = buf_block_get_frame(block); @@ -4164,13 +4021,22 @@ btr_discard_only_page_on_level( ut_a(btr_page_get_prev(page, mtr) == FIL_NULL); ut_a(btr_page_get_next(page, mtr) == FIL_NULL); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix( + mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); btr_search_drop_page_hash_index(block); + if (dict_index_is_spatial(index)) { + /* Check any concurrent search having this page */ + rtr_check_discard_page(index, NULL, block); + } + btr_page_get_father(index, block, mtr, &cursor); father = btr_cur_get_block(&cursor); - lock_update_discard(father, PAGE_HEAP_NO_SUPREMUM, block); + if (!dict_table_is_locking_disabled(index->table)) { + lock_update_discard( + father, PAGE_HEAP_NO_SUPREMUM, block); + } /* Free the file page */ btr_page_free(index, block, mtr); @@ -4196,7 +4062,8 @@ btr_discard_only_page_on_level( btr_page_empty(block, buf_block_get_page_zip(block), index, 0, mtr); ut_ad(page_is_leaf(buf_block_get_frame(block))); - if (!dict_index_is_clust(index)) { + if (!dict_index_is_clust(index) + && !dict_table_is_temporary(index->table)) { /* We play it safe and reset the free bits for the root */ ibuf_reset_free_bits(block); @@ -4211,7 +4078,6 @@ btr_discard_only_page_on_level( Discards a page from a B-tree. This is used to remove the last record from a B-tree page: the whole page must be removed at the same time. This cannot be used for the root page, which is allowed to be empty. */ -UNIV_INTERN void btr_discard_page( /*=============*/ @@ -4220,8 +4086,6 @@ btr_discard_page( mtr_t* mtr) /*!< in: mtr */ { dict_index_t* index; - ulint space; - ulint zip_size; ulint left_page_no; ulint right_page_no; buf_block_t* merge_block; @@ -4229,40 +4093,65 @@ btr_discard_page( buf_block_t* block; page_t* page; rec_t* node_ptr; +#ifdef UNIV_DEBUG + btr_cur_t parent_cursor; + bool parent_is_different = false; +#endif block = btr_cur_get_block(cursor); index = btr_cur_get_index(cursor); - ut_ad(dict_index_get_page(index) != buf_block_get_page_no(block)); - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); - space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); + ut_ad(dict_index_get_page(index) != block->page.id.page_no()); + + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + + const ulint space = dict_index_get_space(index); MONITOR_INC(MONITOR_INDEX_DISCARD); +#ifdef UNIV_DEBUG + btr_page_get_father(index, block, mtr, &parent_cursor); +#endif + /* Decide the page which will inherit the locks */ left_page_no = btr_page_get_prev(buf_block_get_frame(block), mtr); right_page_no = btr_page_get_next(buf_block_get_frame(block), mtr); + const page_size_t page_size(dict_table_page_size(index->table)); + if (left_page_no != FIL_NULL) { - merge_block = btr_block_get(space, zip_size, left_page_no, - RW_X_LATCH, index, mtr); + merge_block = btr_block_get( + page_id_t(space, left_page_no), page_size, + RW_X_LATCH, index, mtr); + merge_page = buf_block_get_frame(merge_block); #ifdef UNIV_BTR_DEBUG ut_a(btr_page_get_next(merge_page, mtr) - == buf_block_get_page_no(block)); + == block->page.id.page_no()); #endif /* UNIV_BTR_DEBUG */ + ut_d(parent_is_different = + (page_rec_get_next( + page_get_infimum_rec( + btr_cur_get_page( + &parent_cursor))) + == btr_cur_get_rec(&parent_cursor))); } else if (right_page_no != FIL_NULL) { - merge_block = btr_block_get(space, zip_size, right_page_no, - RW_X_LATCH, index, mtr); + merge_block = btr_block_get( + page_id_t(space, right_page_no), page_size, + RW_X_LATCH, index, mtr); + merge_page = buf_block_get_frame(merge_block); #ifdef UNIV_BTR_DEBUG ut_a(btr_page_get_prev(merge_page, mtr) - == buf_block_get_page_no(block)); + == block->page.id.page_no()); #endif /* UNIV_BTR_DEBUG */ + ut_d(parent_is_different = page_rec_is_supremum( + page_rec_get_next(btr_cur_get_rec(&parent_cursor)))); } else { btr_discard_only_page_on_level(index, block, mtr); @@ -4292,7 +4181,8 @@ btr_discard_page( btr_node_ptr_delete(index, block, mtr); /* Remove the page from the level list */ - btr_level_list_remove(space, zip_size, page, index, mtr); + btr_level_list_remove(space, page_size, (page_t*)page, index, mtr); + #ifdef UNIV_ZIP_DEBUG { page_zip_des_t* merge_page_zip @@ -4302,27 +4192,34 @@ btr_discard_page( } #endif /* UNIV_ZIP_DEBUG */ - if (left_page_no != FIL_NULL) { - lock_update_discard(merge_block, PAGE_HEAP_NO_SUPREMUM, - block); - } else { - lock_update_discard(merge_block, - lock_get_min_heap_no(merge_block), - block); + if (!dict_table_is_locking_disabled(index->table)) { + if (left_page_no != FIL_NULL) { + lock_update_discard(merge_block, PAGE_HEAP_NO_SUPREMUM, + block); + } else { + lock_update_discard(merge_block, + lock_get_min_heap_no(merge_block), + block); + } } - btr_blob_dbg_remove(page, index, "btr_discard_page"); + if (dict_index_is_spatial(index)) { + rtr_check_discard_page(index, cursor, block); + } /* Free the file page */ btr_page_free(index, block, mtr); - ut_ad(btr_check_node_ptr(index, merge_block, mtr)); + /* btr_check_node_ptr() needs parent block latched. + If the merge_block's parent block is not same, + we cannot use btr_check_node_ptr() */ + ut_ad(parent_is_different + || btr_check_node_ptr(index, merge_block, mtr)); } #ifdef UNIV_BTR_PRINT /*************************************************************//** Prints size info of a B-tree. */ -UNIV_INTERN void btr_print_size( /*===========*/ @@ -4348,7 +4245,7 @@ btr_print_size( fputs("INFO OF THE NON-LEAF PAGE SEGMENT\n", stderr); fseg_print(seg, &mtr); - if (!dict_index_is_univ(index)) { + if (!dict_index_is_ibuf(index)) { seg = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF; @@ -4379,10 +4276,10 @@ btr_print_recursive( ulint i = 0; mtr_t mtr2; - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); - fprintf(stderr, "NODE ON LEVEL %lu page number %lu\n", - (ulong) btr_page_get_level(page, mtr), - (ulong) buf_block_get_page_no(block)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_SX_FIX, index->table)); + + ib::info() << "NODE ON LEVEL " << btr_page_get_level(page, mtr) + << " page " << block->page.id; page_print(block, index, width, width); @@ -4423,7 +4320,6 @@ btr_print_recursive( /**************************************************************//** Prints directories and other info of all nodes in the tree. */ -UNIV_INTERN void btr_print_index( /*============*/ @@ -4443,7 +4339,7 @@ btr_print_index( mtr_start(&mtr); - root = btr_root_block_get(index, RW_X_LATCH, &mtr); + root = btr_root_block_get(index, RW_SX_LATCH, &mtr); btr_print_recursive(index, root, width, &heap, &offsets, &mtr); if (heap) { @@ -4452,15 +4348,14 @@ btr_print_index( mtr_commit(&mtr); - btr_validate_index(index, 0); + ut_ad(btr_validate_index(index, 0, false)); } #endif /* UNIV_BTR_PRINT */ #ifdef UNIV_DEBUG /************************************************************//** Checks that the node pointer to a page is appropriate. -@return TRUE */ -UNIV_INTERN +@return TRUE */ ibool btr_check_node_ptr( /*===============*/ @@ -4474,8 +4369,9 @@ btr_check_node_ptr( btr_cur_t cursor; page_t* page = buf_block_get_frame(block); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); - if (dict_index_get_page(index) == buf_block_get_page_no(block)) { + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + + if (dict_index_get_page(index) == block->page.id.page_no()) { return(TRUE); } @@ -4493,7 +4389,16 @@ btr_check_node_ptr( index, page_rec_get_next(page_get_infimum_rec(page)), 0, heap, btr_page_get_level(page, mtr)); - ut_a(!cmp_dtuple_rec(tuple, btr_cur_get_rec(&cursor), offsets)); + /* For spatial index, the MBR in the parent rec could be different + with that of first rec of child, their relationship should be + "WITHIN" relationship */ + if (dict_index_is_spatial(index)) { + ut_a(!cmp_dtuple_rec_with_gis( + tuple, btr_cur_get_rec(&cursor), + offsets, PAGE_CUR_WITHIN)); + } else { + ut_a(!cmp_dtuple_rec(tuple, btr_cur_get_rec(&cursor), offsets)); + } func_exit: mem_heap_free(heap); @@ -4511,17 +4416,17 @@ btr_index_rec_validate_report( const rec_t* rec, /*!< in: index record */ const dict_index_t* index) /*!< in: index */ { - fputs("InnoDB: Record in ", stderr); - dict_index_name_print(stderr, NULL, index); - fprintf(stderr, ", page %lu, at offset %lu\n", - page_get_page_no(page), (ulint) page_offset(rec)); + ib::info() << "Record in index " << index->name + << " of table " << index->table->name + << ", page " << page_id_t(page_get_space_id(page), + page_get_page_no(page)) + << ", at offset " << page_offset(rec); } /************************************************************//** Checks the size and number of fields in a record based on the definition of the index. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool btr_index_rec_validate( /*===================*/ @@ -4542,7 +4447,7 @@ btr_index_rec_validate( page = page_align(rec); - if (dict_index_is_univ(index)) { + if (dict_index_is_ibuf(index)) { /* The insert buffer index tree can contain records from any other index: we cannot check the number of fields or their length */ @@ -4550,25 +4455,34 @@ btr_index_rec_validate( return(TRUE); } +#ifdef VIRTUAL_INDEX_DEBUG + if (dict_index_has_virtual(index)) { + fprintf(stderr, "index name is %s\n", index->name()); + } +#endif if ((ibool)!!page_is_comp(page) != dict_table_is_comp(index->table)) { btr_index_rec_validate_report(page, rec, index); - fprintf(stderr, "InnoDB: compact flag=%lu, should be %lu\n", - (ulong) !!page_is_comp(page), - (ulong) dict_table_is_comp(index->table)); + + ib::error() << "Compact flag=" << !!page_is_comp(page) + << ", should be " << dict_table_is_comp(index->table); return(FALSE); } n = dict_index_get_n_fields(index); - if (!page_is_comp(page) && rec_get_n_fields_old(rec) != n) { + if (!page_is_comp(page) + && (rec_get_n_fields_old(rec) != n + /* a record for older SYS_INDEXES table + (missing merge_threshold column) is acceptable. */ + && !(index->id == DICT_INDEXES_ID + && rec_get_n_fields_old(rec) == n - 1))) { btr_index_rec_validate_report(page, rec, index); - fprintf(stderr, "InnoDB: has %lu fields, should have %lu\n", - (ulong) rec_get_n_fields_old(rec), (ulong) n); - if (dump_on_error) { - buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH); + ib::error() << "Has " << rec_get_n_fields_old(rec) + << " fields, should have " << n; + if (dump_on_error) { fputs("InnoDB: corrupt record ", stderr); rec_print_old(stderr, rec); putc('\n', stderr); @@ -4579,38 +4493,58 @@ btr_index_rec_validate( offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); for (i = 0; i < n; i++) { - ulint fixed_size = dict_col_get_fixed_size( - dict_index_get_nth_col(index, i), page_is_comp(page)); + dict_field_t* field = dict_index_get_nth_field(index, i); + ulint fixed_size = dict_col_get_fixed_size( + dict_field_get_col(field), + page_is_comp(page)); rec_get_nth_field_offs(offsets, i, &len); /* Note that if fixed_size != 0, it equals the - length of a fixed-size column in the clustered index. + length of a fixed-size column in the clustered index, + except the DATA_POINT, whose length would be MBR_LEN + when it's indexed in a R-TREE. We should adjust it here. A prefix index of the column is of fixed, but different length. When fixed_size == 0, prefix_len is the maximum length of the prefix index column. */ - if ((dict_index_get_nth_field(index, i)->prefix_len == 0 + if (dict_field_get_col(field)->mtype == DATA_POINT) { + ut_ad(fixed_size == DATA_POINT_LEN); + if (dict_index_is_spatial(index)) { + /* For DATA_POINT data, when it has R-tree + index, the fixed_len is the MBR of the point. + But if it's a primary key and on R-TREE + as the PK pointer, the length shall be + DATA_POINT_LEN as well. */ + ut_ad((field->fixed_len == DATA_MBR_LEN + && i == 0) + || (field->fixed_len == DATA_POINT_LEN + && i != 0)); + fixed_size = field->fixed_len; + } + } + + if ((field->prefix_len == 0 && len != UNIV_SQL_NULL && fixed_size && len != fixed_size) - || (dict_index_get_nth_field(index, i)->prefix_len > 0 + || (field->prefix_len > 0 && len != UNIV_SQL_NULL && len - > dict_index_get_nth_field(index, i)->prefix_len)) { + > field->prefix_len)) { btr_index_rec_validate_report(page, rec, index); - fprintf(stderr, - "InnoDB: field %lu len is %lu," - " should be %lu\n", - (ulong) i, (ulong) len, (ulong) fixed_size); - if (dump_on_error) { - buf_page_print(page, 0, - BUF_PAGE_PRINT_NO_CRASH); + ib::error error; - fputs("InnoDB: corrupt record ", stderr); - rec_print_new(stderr, rec, offsets); - putc('\n', stderr); + error << "Field " << i << " len is " << len + << ", should be " << fixed_size; + + if (dump_on_error) { + error << "; "; + rec_print(error.m_oss, rec, + rec_get_info_bits( + rec, rec_offs_comp(offsets)), + offsets); } if (heap) { mem_heap_free(heap); @@ -4619,6 +4553,12 @@ btr_index_rec_validate( } } +#ifdef VIRTUAL_INDEX_DEBUG + if (dict_index_has_virtual(index)) { + rec_print_new(stderr, rec, offsets); + } +#endif + if (heap) { mem_heap_free(heap); } @@ -4628,7 +4568,7 @@ btr_index_rec_validate( /************************************************************//** Checks the size and number of fields in records based on the definition of the index. -@return TRUE if ok */ +@return TRUE if ok */ static ibool btr_index_page_validate( @@ -4693,13 +4633,14 @@ btr_validate_report1( ulint level, /*!< in: B-tree level */ const buf_block_t* block) /*!< in: index page */ { - fprintf(stderr, "InnoDB: Error in page %lu of ", - buf_block_get_page_no(block)); - dict_index_name_print(stderr, NULL, index); - if (level) { - fprintf(stderr, ", index tree level %lu", level); + ib::error error; + error << "In page " << block->page.id.page_no() + << " of index " << index->name + << " of table " << index->table->name; + + if (level > 0) { + error << ", index tree level " << level; } - putc('\n', stderr); } /************************************************************//** @@ -4713,30 +4654,28 @@ btr_validate_report2( const buf_block_t* block1, /*!< in: first index page */ const buf_block_t* block2) /*!< in: second index page */ { - fprintf(stderr, "InnoDB: Error in pages %lu and %lu of ", - buf_block_get_page_no(block1), - buf_block_get_page_no(block2)); - dict_index_name_print(stderr, NULL, index); - if (level) { - fprintf(stderr, ", index tree level %lu", level); - } - putc('\n', stderr); + ib::error error; + error << "In pages " << block1->page.id + << " and " << block2->page.id << " of index " << index->name + << " of table " << index->table->name; + + if (level > 0) { + error << ", index tree level " << level; + } } /************************************************************//** Validates index tree level. -@return TRUE if ok */ +@return TRUE if ok */ static bool btr_validate_level( /*===============*/ dict_index_t* index, /*!< in: index tree */ const trx_t* trx, /*!< in: transaction or NULL */ - ulint level) /*!< in: level number */ + ulint level, /*!< in: level number */ + bool lockout)/*!< in: true if X-latch index is intended */ { - ulint space; - ulint space_flags; - ulint zip_size; buf_block_t* block; page_t* page; buf_block_t* right_block = 0; /* remove warning */ @@ -4758,25 +4697,42 @@ btr_validate_level( #ifdef UNIV_ZIP_DEBUG page_zip_des_t* page_zip; #endif /* UNIV_ZIP_DEBUG */ + ulint savepoint = 0; + ulint savepoint2 = 0; + ulint parent_page_no = FIL_NULL; + ulint parent_right_page_no = FIL_NULL; + bool rightmost_child = false; mtr_start(&mtr); - mtr_x_lock(dict_index_get_lock(index), &mtr); + if (!srv_read_only_mode) { + if (lockout) { + mtr_x_lock(dict_index_get_lock(index), &mtr); + } else { + mtr_sx_lock(dict_index_get_lock(index), &mtr); + } + } - block = btr_root_block_get(index, RW_X_LATCH, &mtr); + block = btr_root_block_get(index, RW_SX_LATCH, &mtr); page = buf_block_get_frame(block); seg = page + PAGE_HEADER + PAGE_BTR_SEG_TOP; - space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); +#ifdef UNIV_DEBUG + if (dict_index_is_spatial(index)) { + fprintf(stderr, "Root page no: %lu\n", + (ulong) page_get_page_no(page)); + } +#endif - fil_space_get_latch(space, &space_flags); + const fil_space_t* space = fil_space_get(index->space); + const page_size_t table_page_size( + dict_table_page_size(index->table)); + const page_size_t space_page_size(space->flags); - if (zip_size != dict_tf_get_zip_size(space_flags)) { + if (!table_page_size.equals_to(space_page_size)) { - ib_logf(IB_LOG_LEVEL_WARN, - "Flags mismatch: table=%lu, tablespace=%lu", - (ulint) index->table->flags, (ulint) space_flags); + ib::warn() << "Flags mismatch: table=" << index->table->flags + << ", tablespace=" << space->flags; mtr_commit(&mtr); @@ -4787,17 +4743,18 @@ btr_validate_level( const rec_t* node_ptr; if (fseg_page_is_free(seg, - block->page.space, block->page.offset)) { + block->page.id.space(), + block->page.id.page_no())) { btr_validate_report1(index, level, block); - ib_logf(IB_LOG_LEVEL_WARN, "page is free"); + ib::warn() << "Page is free"; ret = false; } - ut_a(space == buf_block_get_space(block)); - ut_a(space == page_get_space_id(page)); + ut_a(index->space == block->page.id.space()); + ut_a(index->space == page_get_space_id(page)); #ifdef UNIV_ZIP_DEBUG page_zip = buf_block_get_page_zip(block); ut_a(!page_zip || page_zip_validate(page_zip, page, index)); @@ -4810,8 +4767,38 @@ btr_validate_level( node_ptr = page_cur_get_rec(&cursor); offsets = rec_get_offsets(node_ptr, index, offsets, ULINT_UNDEFINED, &heap); + + savepoint2 = mtr_set_savepoint(&mtr); block = btr_node_ptr_get_child(node_ptr, index, offsets, &mtr); page = buf_block_get_frame(block); + + /* For R-Tree, since record order might not be the same as + linked index page in the lower level, we need to travers + backwards to get the first page rec in this level. + This is only used for index validation. Spatial index + does not use such scan for any of its DML or query + operations */ + if (dict_index_is_spatial(index)) { + left_page_no = btr_page_get_prev(page, &mtr); + + while (left_page_no != FIL_NULL) { + page_id_t left_page_id( + index->space, left_page_no); + /* To obey latch order of tree blocks, + we should release the right_block once to + obtain lock of the uncle block. */ + mtr_release_block_at_savepoint( + &mtr, savepoint2, block); + + savepoint2 = mtr_set_savepoint(&mtr); + block = btr_block_get( + left_page_id, + table_page_size, + RW_SX_LATCH, index, &mtr); + page = buf_block_get_frame(block); + left_page_no = btr_page_get_prev(page, &mtr); + } + } } /* Now we are on the desired level. Loop through the pages on that @@ -4825,28 +4812,34 @@ btr_validate_level( loop: mem_heap_empty(heap); offsets = offsets2 = NULL; - mtr_x_lock(dict_index_get_lock(index), &mtr); + if (!srv_read_only_mode) { + if (lockout) { + mtr_x_lock(dict_index_get_lock(index), &mtr); + } else { + mtr_sx_lock(dict_index_get_lock(index), &mtr); + } + } #ifdef UNIV_ZIP_DEBUG page_zip = buf_block_get_page_zip(block); ut_a(!page_zip || page_zip_validate(page_zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ - ut_a(block->page.space == space); + ut_a(block->page.id.space() == index->space); - if (fseg_page_is_free(seg, block->page.space, block->page.offset)) { + if (fseg_page_is_free(seg, + block->page.id.space(), + block->page.id.page_no())) { btr_validate_report1(index, level, block); - ib_logf(IB_LOG_LEVEL_WARN, "Page is marked as free"); + ib::warn() << "Page is marked as free"; ret = false; } else if (btr_page_get_index_id(page) != index->id) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Page index id " IB_ID_FMT " != data dictionary " - "index id " IB_ID_FMT, - btr_page_get_index_id(page), index->id); + ib::error() << "Page index id " << btr_page_get_index_id(page) + << " != data dictionary index id " << index->id; ret = false; @@ -4874,17 +4867,21 @@ loop: if (right_page_no != FIL_NULL) { const rec_t* right_rec; - right_block = btr_block_get(space, zip_size, right_page_no, - RW_X_LATCH, index, &mtr); + savepoint = mtr_set_savepoint(&mtr); + + right_block = btr_block_get( + page_id_t(index->space, right_page_no), + table_page_size, + RW_SX_LATCH, index, &mtr); + right_page = buf_block_get_frame(right_block); + if (btr_page_get_prev(right_page, &mtr) != page_get_page_no(page)) { btr_validate_report2(index, level, block, right_block); fputs("InnoDB: broken FIL_PAGE_NEXT" " or FIL_PAGE_PREV links\n", stderr); - buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH); - buf_page_print(right_page, 0, BUF_PAGE_PRINT_NO_CRASH); ret = false; } @@ -4892,8 +4889,6 @@ loop: if (page_is_comp(right_page) != page_is_comp(page)) { btr_validate_report2(index, level, block, right_block); fputs("InnoDB: 'compact' flag mismatch\n", stderr); - buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH); - buf_page_print(right_page, 0, BUF_PAGE_PRINT_NO_CRASH); ret = false; @@ -4907,17 +4902,19 @@ loop: offsets, ULINT_UNDEFINED, &heap); offsets2 = rec_get_offsets(right_rec, index, offsets2, ULINT_UNDEFINED, &heap); - if (cmp_rec_rec(rec, right_rec, offsets, offsets2, - index) >= 0) { + + /* For spatial index, we cannot guarantee the key ordering + across pages, so skip the record compare verification for + now. Will enhanced in special R-Tree index validation scheme */ + if (!dict_index_is_spatial(index) + && cmp_rec_rec(rec, right_rec, + offsets, offsets2, index) >= 0) { btr_validate_report2(index, level, block, right_block); fputs("InnoDB: records in wrong order" " on adjacent pages\n", stderr); - buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH); - buf_page_print(right_page, 0, BUF_PAGE_PRINT_NO_CRASH); - fputs("InnoDB: record ", stderr); rec = page_rec_get_prev(page_get_supremum_rec(page)); rec_print(stderr, rec, index); @@ -4938,35 +4935,49 @@ loop: page_is_comp(page))); } - if (buf_block_get_page_no(block) != dict_index_get_page(index)) { + /* Similarly skip the father node check for spatial index for now, + for a couple of reasons: + 1) As mentioned, there is no ordering relationship between records + in parent level and linked pages in the child level. + 2) Search parent from root is very costly for R-tree. + We will add special validation mechanism for R-tree later (WL #7520) */ + if (!dict_index_is_spatial(index) + && block->page.id.page_no() != dict_index_get_page(index)) { /* Check father node pointers */ - rec_t* node_ptr; - offsets = btr_page_get_father_block(offsets, heap, index, - block, &mtr, &node_cur); + btr_cur_position( + index, page_rec_get_next(page_get_infimum_rec(page)), + block, &node_cur); + offsets = btr_page_get_father_node_ptr_for_validate( + offsets, heap, &node_cur, &mtr); + father_page = btr_cur_get_page(&node_cur); node_ptr = btr_cur_get_rec(&node_cur); + parent_page_no = page_get_page_no(father_page); + parent_right_page_no = btr_page_get_next(father_page, &mtr); + rightmost_child = page_rec_is_supremum( + page_rec_get_next(node_ptr)); + btr_cur_position( - index, page_rec_get_prev(page_get_supremum_rec(page)), + index, + page_rec_get_prev(page_get_supremum_rec(page)), block, &node_cur); - offsets = btr_page_get_father_node_ptr(offsets, heap, - &node_cur, &mtr); + + offsets = btr_page_get_father_node_ptr_for_validate( + offsets, heap, &node_cur, &mtr); if (node_ptr != btr_cur_get_rec(&node_cur) || btr_node_ptr_get_child_page_no(node_ptr, offsets) - != buf_block_get_page_no(block)) { + != block->page.id.page_no()) { btr_validate_report1(index, level, block); fputs("InnoDB: node pointer to the page is wrong\n", stderr); - buf_page_print(father_page, 0, BUF_PAGE_PRINT_NO_CRASH); - buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH); - fputs("InnoDB: node ptr ", stderr); rec_print(stderr, node_ptr, index); @@ -4997,14 +5008,9 @@ loop: btr_validate_report1(index, level, block); - buf_page_print(father_page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print(page, 0, - BUF_PAGE_PRINT_NO_CRASH); + ib::error() << "Node ptrs differ on levels > 0"; - fputs("InnoDB: Error: node ptrs differ" - " on levels > 0\n" - "InnoDB: node ptr ", stderr); + fputs("InnoDB: node ptr ",stderr); rec_print_new(stderr, node_ptr, offsets); fputs("InnoDB: first rec ", stderr); rec_print(stderr, first_rec, index); @@ -5026,12 +5032,41 @@ loop: page_get_supremum_rec(father_page))); ut_a(btr_page_get_next(father_page, &mtr) == FIL_NULL); } else { - const rec_t* right_node_ptr - = page_rec_get_next(node_ptr); + const rec_t* right_node_ptr; + + right_node_ptr = page_rec_get_next(node_ptr); + + if (!lockout && rightmost_child) { + + /* To obey latch order of tree blocks, + we should release the right_block once to + obtain lock of the uncle block. */ + mtr_release_block_at_savepoint( + &mtr, savepoint, right_block); + + btr_block_get( + page_id_t(index->space, + parent_right_page_no), + table_page_size, + RW_SX_LATCH, index, &mtr); + + right_block = btr_block_get( + page_id_t(index->space, + right_page_no), + table_page_size, + RW_SX_LATCH, index, &mtr); + } + + btr_cur_position( + index, page_rec_get_next( + page_get_infimum_rec( + buf_block_get_frame( + right_block))), + right_block, &right_node_cur); + + offsets = btr_page_get_father_node_ptr_for_validate( + offsets, heap, &right_node_cur, &mtr); - offsets = btr_page_get_father_block( - offsets, heap, index, right_block, - &mtr, &right_node_cur); if (right_node_ptr != page_get_supremum_rec(father_page)) { @@ -5044,16 +5079,6 @@ loop: btr_validate_report1(index, level, block); - - buf_page_print( - father_page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - right_page, 0, - BUF_PAGE_PRINT_NO_CRASH); } } else { page_t* right_father_page @@ -5070,19 +5095,6 @@ loop: btr_validate_report1(index, level, block); - - buf_page_print( - father_page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - right_father_page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - right_page, 0, - BUF_PAGE_PRINT_NO_CRASH); } if (page_get_page_no(right_father_page) @@ -5095,19 +5107,6 @@ loop: btr_validate_report1(index, level, block); - - buf_page_print( - father_page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - right_father_page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - page, 0, - BUF_PAGE_PRINT_NO_CRASH); - buf_page_print( - right_page, 0, - BUF_PAGE_PRINT_NO_CRASH); } } } @@ -5125,9 +5124,29 @@ node_ptr_fails: mtr_start(&mtr); + if (!lockout) { + if (rightmost_child) { + if (parent_right_page_no != FIL_NULL) { + btr_block_get( + page_id_t( + index->space, + parent_right_page_no), + table_page_size, + RW_SX_LATCH, index, &mtr); + } + } else if (parent_page_no != FIL_NULL) { + btr_block_get( + page_id_t(index->space, + parent_page_no), + table_page_size, + RW_SX_LATCH, index, &mtr); + } + } + block = btr_block_get( - space, zip_size, right_page_no, - RW_X_LATCH, index, &mtr); + page_id_t(index->space, right_page_no), + table_page_size, + RW_SX_LATCH, index, &mtr); page = buf_block_get_frame(block); @@ -5139,15 +5158,55 @@ node_ptr_fails: return(ret); } +/**************************************************************//** +Do an index level validation of spaital index tree. +@return true if no error found */ +bool +btr_validate_spatial_index( +/*=======================*/ + dict_index_t* index, /*!< in: index */ + const trx_t* trx) /*!< in: transaction or NULL */ +{ + + mtr_t mtr; + bool ok = true; + + mtr_start(&mtr); + + mtr_x_lock(dict_index_get_lock(index), &mtr); + + page_t* root = btr_root_get(index, &mtr); + ulint n = btr_page_get_level(root, &mtr); + +#ifdef UNIV_RTR_DEBUG + fprintf(stderr, "R-tree level is %lu\n", n); +#endif /* UNIV_RTR_DEBUG */ + + for (ulint i = 0; i <= n; ++i) { +#ifdef UNIV_RTR_DEBUG + fprintf(stderr, "Level %lu:\n", n - i); +#endif /* UNIV_RTR_DEBUG */ + + if (!btr_validate_level(index, trx, n - i, true)) { + ok = false; + break; + } + } + + mtr_commit(&mtr); + + return(ok); +} + /**************************************************************//** Checks the consistency of an index tree. @return DB_SUCCESS if ok, error code if not */ -UNIV_INTERN dberr_t btr_validate_index( /*===============*/ dict_index_t* index, /*!< in: index */ - const trx_t* trx) /*!< in: transaction or NULL */ + const trx_t* trx, /*!< in: transaction or NULL */ + bool lockout)/*!< in: true if X-latch index is intended */ { dberr_t err = DB_SUCCESS; @@ -5157,11 +5216,24 @@ btr_validate_index( return(err); } + if (dict_index_is_spatial(index)) { + if(!btr_validate_spatial_index(index, trx)) { + err = DB_ERROR; + } + return(err); + } + mtr_t mtr; mtr_start(&mtr); - mtr_x_lock(dict_index_get_lock(index), &mtr); + if (!srv_read_only_mode) { + if (lockout) { + mtr_x_lock(dict_index_get_lock(index), &mtr); + } else { + mtr_sx_lock(dict_index_get_lock(index), &mtr); + } + } page_t* root = btr_root_get(index, &mtr); @@ -5175,7 +5247,7 @@ btr_validate_index( for (ulint i = 0; i <= n; ++i) { - if (!btr_validate_level(index, trx, n - i)) { + if (!btr_validate_level(index, trx, n - i, lockout)) { err = DB_CORRUPTION; break; } @@ -5189,9 +5261,8 @@ btr_validate_index( /**************************************************************//** Checks if the page in the cursor can be merged with given page. If necessary, re-organize the merge_page. -@return TRUE if possible to merge. */ -UNIV_INTERN -ibool +@return true if possible to merge. */ +bool btr_can_merge_with_page( /*====================*/ btr_cur_t* cursor, /*!< in: cursor on the page to merge */ @@ -5201,34 +5272,33 @@ btr_can_merge_with_page( { dict_index_t* index; page_t* page; - ulint space; - ulint zip_size; ulint n_recs; ulint data_size; - ulint max_ins_size_reorg; + ulint max_ins_size_reorg; ulint max_ins_size; buf_block_t* mblock; page_t* mpage; DBUG_ENTER("btr_can_merge_with_page"); if (page_no == FIL_NULL) { - goto error; + *merge_block = NULL; + DBUG_RETURN(false); } index = btr_cur_get_index(cursor); - page = btr_cur_get_page(cursor); - space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); + page = btr_cur_get_page(cursor); - mblock = btr_block_get(space, zip_size, page_no, RW_X_LATCH, index, - mtr); + const page_id_t page_id(dict_index_get_space(index), page_no); + const page_size_t page_size(dict_table_page_size(index->table)); + + mblock = btr_block_get(page_id, page_size, RW_X_LATCH, index, mtr); mpage = buf_block_get_frame(mblock); - n_recs = page_get_n_recs(page); - data_size = page_get_data_size(page); + n_recs = page_get_n_recs(page); + data_size = page_get_data_size(page); - max_ins_size_reorg = page_get_max_insert_size_after_reorganize( - mpage, n_recs); + max_ins_size_reorg = page_get_max_insert_size_after_reorganize( + mpage, n_recs); if (data_size > max_ins_size_reorg) { goto error; @@ -5237,7 +5307,7 @@ btr_can_merge_with_page( /* If compression padding tells us that merging will result in too packed up page i.e.: which is likely to cause compression failure then don't merge the pages. */ - if (zip_size && page_is_leaf(mpage) + if (page_size.is_compressed() && page_is_leaf(mpage) && (page_get_data_size(mpage) + data_size >= dict_index_zip_pad_optimal_page_size(index))) { @@ -5272,11 +5342,11 @@ btr_can_merge_with_page( } *merge_block = mblock; - DBUG_RETURN(TRUE); + DBUG_RETURN(true); error: *merge_block = NULL; - DBUG_RETURN(FALSE); + DBUG_RETURN(false); } #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc new file mode 100644 index 00000000000..4a3de744823 --- /dev/null +++ b/storage/innobase/btr/btr0bulk.cc @@ -0,0 +1,995 @@ +/***************************************************************************** + +Copyright (c) 2014, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file btr/btr0bulk.cc +The B-tree bulk load + +Created 03/11/2014 Shaohua Wang +*******************************************************/ + +#include "btr0bulk.h" +#include "btr0btr.h" +#include "btr0cur.h" +#include "btr0pcur.h" +#include "ibuf0ibuf.h" + +/** Innodb B-tree index fill factor for bulk load. */ +long innobase_fill_factor; + +/** Initialize members, allocate page if needed and start mtr. +Note: we commit all mtrs on failure. +@return error code. */ +dberr_t +PageBulk::init() +{ + mtr_t* mtr; + buf_block_t* new_block; + page_t* new_page; + page_zip_des_t* new_page_zip; + ulint new_page_no; + + ut_ad(m_heap == NULL); + m_heap = mem_heap_create(1000); + + mtr = static_cast( + mem_heap_alloc(m_heap, sizeof(mtr_t))); + mtr_start(mtr); + mtr_x_lock(dict_index_get_lock(m_index), mtr); + mtr_set_log_mode(mtr, MTR_LOG_NO_REDO); + mtr_set_flush_observer(mtr, m_flush_observer); + + if (m_page_no == FIL_NULL) { + mtr_t alloc_mtr; + + /* We commit redo log for allocation by a separate mtr, + because we don't guarantee pages are committed following + the allocation order, and we will always generate redo log + for page allocation, even when creating a new tablespace. */ + mtr_start(&alloc_mtr); + alloc_mtr.set_named_space(dict_index_get_space(m_index)); + + ulint n_reserved; + bool success; + success = fsp_reserve_free_extents(&n_reserved, m_index->space, + 1, FSP_NORMAL, &alloc_mtr); + if (!success) { + mtr_commit(&alloc_mtr); + mtr_commit(mtr); + return(DB_OUT_OF_FILE_SPACE); + } + + /* Allocate a new page. */ + new_block = btr_page_alloc(m_index, 0, FSP_UP, m_level, + &alloc_mtr, mtr); + + if (n_reserved > 0) { + fil_space_release_free_extents(m_index->space, + n_reserved); + } + + mtr_commit(&alloc_mtr); + + new_page = buf_block_get_frame(new_block); + new_page_zip = buf_block_get_page_zip(new_block); + new_page_no = page_get_page_no(new_page); + + if (new_page_zip) { + page_create_zip(new_block, m_index, m_level, 0, + NULL, mtr); + } else { + ut_ad(!dict_index_is_spatial(m_index)); + page_create(new_block, mtr, + dict_table_is_comp(m_index->table), + false); + btr_page_set_level(new_page, NULL, m_level, mtr); + } + + btr_page_set_next(new_page, NULL, FIL_NULL, mtr); + btr_page_set_prev(new_page, NULL, FIL_NULL, mtr); + + btr_page_set_index_id(new_page, NULL, m_index->id, mtr); + } else { + page_id_t page_id(dict_index_get_space(m_index), m_page_no); + page_size_t page_size(dict_table_page_size(m_index->table)); + + new_block = btr_block_get(page_id, page_size, + RW_X_LATCH, m_index, mtr); + + new_page = buf_block_get_frame(new_block); + new_page_zip = buf_block_get_page_zip(new_block); + new_page_no = page_get_page_no(new_page); + ut_ad(m_page_no == new_page_no); + + ut_ad(page_dir_get_n_heap(new_page) == PAGE_HEAP_NO_USER_LOW); + + btr_page_set_level(new_page, NULL, m_level, mtr); + } + + if (dict_index_is_sec_or_ibuf(m_index) + && !dict_table_is_temporary(m_index->table) + && page_is_leaf(new_page)) { + page_update_max_trx_id(new_block, NULL, m_trx_id, mtr); + } + + m_mtr = mtr; + m_block = new_block; + m_block->skip_flush_check = true; + m_page = new_page; + m_page_zip = new_page_zip; + m_page_no = new_page_no; + m_cur_rec = page_get_infimum_rec(new_page); + ut_ad(m_is_comp == !!page_is_comp(new_page)); + m_free_space = page_get_free_space_of_empty(m_is_comp); + + if (innobase_fill_factor == 100 && dict_index_is_clust(m_index)) { + /* Keep default behavior compatible with 5.6 */ + m_reserved_space = dict_index_get_space_reserve(); + } else { + m_reserved_space = + UNIV_PAGE_SIZE * (100 - innobase_fill_factor) / 100; + } + + m_padding_space = + UNIV_PAGE_SIZE - dict_index_zip_pad_optimal_page_size(m_index); + m_heap_top = page_header_get_ptr(new_page, PAGE_HEAP_TOP); + m_rec_no = page_header_get_field(new_page, PAGE_N_RECS); + + ut_d(m_total_data = 0); + page_header_set_field(m_page, NULL, PAGE_HEAP_TOP, UNIV_PAGE_SIZE - 1); + + return(DB_SUCCESS); +} + +/** Insert a record in the page. +@param[in] rec record +@param[in] offsets record offsets */ +void +PageBulk::insert( + const rec_t* rec, + ulint* offsets) +{ + ulint rec_size; + + ut_ad(m_heap != NULL); + + rec_size = rec_offs_size(offsets); + +#ifdef UNIV_DEBUG + /* Check whether records are in order. */ + if (!page_rec_is_infimum(m_cur_rec)) { + rec_t* old_rec = m_cur_rec; + ulint* old_offsets = rec_get_offsets( + old_rec, m_index, NULL, ULINT_UNDEFINED, &m_heap); + + ut_ad(cmp_rec_rec(rec, old_rec, offsets, old_offsets, m_index) + > 0); + } + + m_total_data += rec_size; +#endif /* UNIV_DEBUG */ + + /* 1. Copy the record to page. */ + rec_t* insert_rec = rec_copy(m_heap_top, rec, offsets); + rec_offs_make_valid(insert_rec, m_index, offsets); + + /* 2. Insert the record in the linked list. */ + rec_t* next_rec = page_rec_get_next(m_cur_rec); + + page_rec_set_next(insert_rec, next_rec); + page_rec_set_next(m_cur_rec, insert_rec); + + /* 3. Set the n_owned field in the inserted record to zero, + and set the heap_no field. */ + if (m_is_comp) { + rec_set_n_owned_new(insert_rec, NULL, 0); + rec_set_heap_no_new(insert_rec, + PAGE_HEAP_NO_USER_LOW + m_rec_no); + } else { + rec_set_n_owned_old(insert_rec, 0); + rec_set_heap_no_old(insert_rec, + PAGE_HEAP_NO_USER_LOW + m_rec_no); + } + + /* 4. Set member variables. */ + ulint slot_size; + slot_size = page_dir_calc_reserved_space(m_rec_no + 1) + - page_dir_calc_reserved_space(m_rec_no); + + ut_ad(m_free_space >= rec_size + slot_size); + ut_ad(m_heap_top + rec_size < m_page + UNIV_PAGE_SIZE); + + m_free_space -= rec_size + slot_size; + m_heap_top += rec_size; + m_rec_no += 1; + m_cur_rec = insert_rec; +} + +/** Mark end of insertion to the page. Scan all records to set page dirs, +and set page header members. +Note: we refer to page_copy_rec_list_end_to_created_page. */ +void +PageBulk::finish() +{ + ut_ad(m_rec_no > 0); + +#ifdef UNIV_DEBUG + ut_ad(m_total_data + page_dir_calc_reserved_space(m_rec_no) + <= page_get_free_space_of_empty(m_is_comp)); + + /* To pass the debug tests we have to set these dummy values + in the debug version */ + page_dir_set_n_slots(m_page, NULL, UNIV_PAGE_SIZE / 2); +#endif + + ulint count = 0; + ulint n_recs = 0; + ulint slot_index = 0; + rec_t* insert_rec = page_rec_get_next(page_get_infimum_rec(m_page)); + page_dir_slot_t* slot = NULL; + + /* Set owner & dir. */ + do { + + count++; + n_recs++; + + if (count == (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2) { + + slot_index++; + + slot = page_dir_get_nth_slot(m_page, slot_index); + + page_dir_slot_set_rec(slot, insert_rec); + page_dir_slot_set_n_owned(slot, NULL, count); + + count = 0; + } + + insert_rec = page_rec_get_next(insert_rec); + } while (!page_rec_is_supremum(insert_rec)); + + if (slot_index > 0 + && (count + 1 + (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2 + <= PAGE_DIR_SLOT_MAX_N_OWNED)) { + /* We can merge the two last dir slots. This operation is + here to make this function imitate exactly the equivalent + task made using page_cur_insert_rec, which we use in database + recovery to reproduce the task performed by this function. + To be able to check the correctness of recovery, it is good + that it imitates exactly. */ + + count += (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2; + + page_dir_slot_set_n_owned(slot, NULL, 0); + + slot_index--; + } + + slot = page_dir_get_nth_slot(m_page, 1 + slot_index); + page_dir_slot_set_rec(slot, page_get_supremum_rec(m_page)); + page_dir_slot_set_n_owned(slot, NULL, count + 1); + + ut_ad(!dict_index_is_spatial(m_index)); + page_dir_set_n_slots(m_page, NULL, 2 + slot_index); + page_header_set_ptr(m_page, NULL, PAGE_HEAP_TOP, m_heap_top); + page_dir_set_n_heap(m_page, NULL, PAGE_HEAP_NO_USER_LOW + m_rec_no); + page_header_set_field(m_page, NULL, PAGE_N_RECS, m_rec_no); + + page_header_set_ptr(m_page, NULL, PAGE_LAST_INSERT, m_cur_rec); + page_header_set_field(m_page, NULL, PAGE_DIRECTION, PAGE_RIGHT); + page_header_set_field(m_page, NULL, PAGE_N_DIRECTION, 0); + + m_block->skip_flush_check = false; +} + +/** Commit inserts done to the page +@param[in] success Flag whether all inserts succeed. */ +void +PageBulk::commit( + bool success) +{ + if (success) { + ut_ad(page_validate(m_page, m_index)); + + /* Set no free space left and no buffered changes in ibuf. */ + if (!dict_index_is_clust(m_index) + && !dict_table_is_temporary(m_index->table) + && page_is_leaf(m_page)) { + ibuf_set_bitmap_for_bulk_load( + m_block, innobase_fill_factor == 100); + } + } + + mtr_commit(m_mtr); +} + +/** Compress a page of compressed table +@return true compress successfully or no need to compress +@return false compress failed. */ +bool +PageBulk::compress() +{ + ut_ad(m_page_zip != NULL); + + return(page_zip_compress(m_page_zip, m_page, m_index, + page_zip_level, NULL, m_mtr)); +} + +/** Get node pointer +@return node pointer */ +dtuple_t* +PageBulk::getNodePtr() +{ + rec_t* first_rec; + dtuple_t* node_ptr; + + /* Create node pointer */ + first_rec = page_rec_get_next(page_get_infimum_rec(m_page)); + ut_a(page_rec_is_user_rec(first_rec)); + node_ptr = dict_index_build_node_ptr(m_index, first_rec, m_page_no, + m_heap, m_level); + + return(node_ptr); +} + +/** Get split rec in left page.We split a page in half when compresssion fails, +and the split rec will be copied to right page. +@return split rec */ +rec_t* +PageBulk::getSplitRec() +{ + rec_t* rec; + ulint* offsets; + ulint total_used_size; + ulint total_recs_size; + ulint n_recs; + + ut_ad(m_page_zip != NULL); + ut_ad(m_rec_no >= 2); + + ut_ad(page_get_free_space_of_empty(m_is_comp) > m_free_space); + total_used_size = page_get_free_space_of_empty(m_is_comp) + - m_free_space; + + total_recs_size = 0; + n_recs = 0; + offsets = NULL; + rec = page_get_infimum_rec(m_page); + + do { + rec = page_rec_get_next(rec); + ut_ad(page_rec_is_user_rec(rec)); + + offsets = rec_get_offsets(rec, m_index, + offsets, ULINT_UNDEFINED, + &(m_heap)); + total_recs_size += rec_offs_size(offsets); + n_recs++; + } while (total_recs_size + page_dir_calc_reserved_space(n_recs) + < total_used_size / 2); + + /* Keep at least one record on left page */ + if (page_rec_is_infimum(page_rec_get_prev(rec))) { + rec = page_rec_get_next(rec); + ut_ad(page_rec_is_user_rec(rec)); + } + + return(rec); +} + +/** Copy all records after split rec including itself. +@param[in] rec split rec */ +void +PageBulk::copyIn( + rec_t* split_rec) +{ + + rec_t* rec = split_rec; + ulint* offsets = NULL; + + ut_ad(m_rec_no == 0); + ut_ad(page_rec_is_user_rec(rec)); + + do { + offsets = rec_get_offsets(rec, m_index, offsets, + ULINT_UNDEFINED, &(m_heap)); + + insert(rec, offsets); + + rec = page_rec_get_next(rec); + } while (!page_rec_is_supremum(rec)); + + ut_ad(m_rec_no > 0); +} + +/** Remove all records after split rec including itself. +@param[in] rec split rec */ +void +PageBulk::copyOut( + rec_t* split_rec) +{ + rec_t* rec; + rec_t* last_rec; + ulint n; + + /* Suppose before copyOut, we have 5 records on the page: + infimum->r1->r2->r3->r4->r5->supremum, and r3 is the split rec. + + after copyOut, we have 2 records on the page: + infimum->r1->r2->supremum. slot ajustment is not done. */ + + rec = page_rec_get_next(page_get_infimum_rec(m_page)); + last_rec = page_rec_get_prev(page_get_supremum_rec(m_page)); + n = 0; + + while (rec != split_rec) { + rec = page_rec_get_next(rec); + n++; + } + + ut_ad(n > 0); + + /* Set last record's next in page */ + ulint* offsets = NULL; + rec = page_rec_get_prev(split_rec); + offsets = rec_get_offsets(rec, m_index, + offsets, ULINT_UNDEFINED, + &(m_heap)); + page_rec_set_next(rec, page_get_supremum_rec(m_page)); + + /* Set related members */ + m_cur_rec = rec; + m_heap_top = rec_get_end(rec, offsets); + + offsets = rec_get_offsets(last_rec, m_index, + offsets, ULINT_UNDEFINED, + &(m_heap)); + + m_free_space += rec_get_end(last_rec, offsets) + - m_heap_top + + page_dir_calc_reserved_space(m_rec_no) + - page_dir_calc_reserved_space(n); + ut_ad(m_free_space > 0); + m_rec_no = n; + +#ifdef UNIV_DEBUG + m_total_data -= rec_get_end(last_rec, offsets) - m_heap_top; +#endif /* UNIV_DEBUG */ +} + +/** Set next page +@param[in] next_page_no next page no */ +void +PageBulk::setNext( + ulint next_page_no) +{ + btr_page_set_next(m_page, NULL, next_page_no, m_mtr); +} + +/** Set previous page +@param[in] prev_page_no previous page no */ +void +PageBulk::setPrev( + ulint prev_page_no) +{ + btr_page_set_prev(m_page, NULL, prev_page_no, m_mtr); +} + +/** Check if required space is available in the page for the rec to be inserted. +We check fill factor & padding here. +@param[in] length required length +@return true if space is available */ +bool +PageBulk::isSpaceAvailable( + ulint rec_size) +{ + ulint slot_size; + ulint required_space; + + slot_size = page_dir_calc_reserved_space(m_rec_no + 1) + - page_dir_calc_reserved_space(m_rec_no); + + required_space = rec_size + slot_size; + + if (required_space > m_free_space) { + ut_ad(m_rec_no > 0); + return false; + } + + /* Fillfactor & Padding apply to both leaf and non-leaf pages. + Note: we keep at least 2 records in a page to avoid B-tree level + growing too high. */ + if (m_rec_no >= 2 + && ((m_page_zip == NULL && m_free_space - required_space + < m_reserved_space) + || (m_page_zip != NULL && m_free_space - required_space + < m_padding_space))) { + return(false); + } + + return(true); +} + +/** Check whether the record needs to be stored externally. +@return false if the entire record can be stored locally on the page */ +bool +PageBulk::needExt( + const dtuple_t* tuple, + ulint rec_size) +{ + return(page_zip_rec_needs_ext(rec_size, m_is_comp, + dtuple_get_n_fields(tuple), m_block->page.size)); +} + +/** Store external record +Since the record is not logged yet, so we don't log update to the record. +the blob data is logged first, then the record is logged in bulk mode. +@param[in] big_rec external recrod +@param[in] offsets record offsets +@return error code */ +dberr_t +PageBulk::storeExt( + const big_rec_t* big_rec, + ulint* offsets) +{ + /* Note: not all fileds are initialized in btr_pcur. */ + btr_pcur_t btr_pcur; + btr_pcur.pos_state = BTR_PCUR_IS_POSITIONED; + btr_pcur.latch_mode = BTR_MODIFY_LEAF; + btr_pcur.btr_cur.index = m_index; + + page_cur_t* page_cur = &btr_pcur.btr_cur.page_cur; + page_cur->index = m_index; + page_cur->rec = m_cur_rec; + page_cur->offsets = offsets; + page_cur->block = m_block; + + dberr_t err = btr_store_big_rec_extern_fields( + &btr_pcur, NULL, offsets, big_rec, m_mtr, + BTR_STORE_INSERT_BULK); + + ut_ad(page_offset(m_cur_rec) == page_offset(page_cur->rec)); + + /* Reset m_block and m_cur_rec from page cursor, because + block may be changed during blob insert. */ + m_block = page_cur->block; + m_cur_rec = page_cur->rec; + m_page = buf_block_get_frame(m_block); + + return(err); +} + +/** Release block by commiting mtr +Note: log_free_check requires holding no lock/latch in current thread. */ +void +PageBulk::release() +{ + ut_ad(!dict_index_is_spatial(m_index)); + + /* We fix the block because we will re-pin it soon. */ + buf_block_buf_fix_inc(m_block, __FILE__, __LINE__); + + /* No other threads can modify this block. */ + m_modify_clock = buf_block_get_modify_clock(m_block); + + mtr_commit(m_mtr); +} + +/** Start mtr and latch the block */ +dberr_t +PageBulk::latch() +{ + ibool ret; + + mtr_start(m_mtr); + mtr_x_lock(dict_index_get_lock(m_index), m_mtr); + mtr_set_log_mode(m_mtr, MTR_LOG_NO_REDO); + mtr_set_flush_observer(m_mtr, m_flush_observer); + + /* TODO: need a simple and wait version of buf_page_optimistic_get. */ + ret = buf_page_optimistic_get(RW_X_LATCH, m_block, m_modify_clock, + __FILE__, __LINE__, m_mtr); + /* In case the block is S-latched by page_cleaner. */ + if (!ret) { + page_id_t page_id(dict_index_get_space(m_index), m_page_no); + page_size_t page_size(dict_table_page_size(m_index->table)); + + m_block = buf_page_get_gen(page_id, page_size, RW_X_LATCH, + m_block, BUF_GET_IF_IN_POOL, + __FILE__, __LINE__, m_mtr, &m_err); + + if (m_err != DB_SUCCESS) { + return (m_err); + } + + ut_ad(m_block != NULL); + } + + buf_block_buf_fix_dec(m_block); + + ut_ad(m_cur_rec > m_page && m_cur_rec < m_heap_top); + + return (m_err); +} + +/** Split a page +@param[in] page_bulk page to split +@param[in] next_page_bulk next page +@return error code */ +dberr_t +BtrBulk::pageSplit( + PageBulk* page_bulk, + PageBulk* next_page_bulk) +{ + ut_ad(page_bulk->getPageZip() != NULL); + + /* 1. Check if we have only one user record on the page. */ + if (page_bulk->getRecNo() <= 1) { + return DB_TOO_BIG_RECORD; + } + + /* 2. create a new page. */ + PageBulk new_page_bulk(m_index, m_trx_id, FIL_NULL, + page_bulk->getLevel(), m_flush_observer); + dberr_t err = new_page_bulk.init(); + if (err != DB_SUCCESS) { + return(err); + } + + /* 3. copy the upper half to new page. */ + rec_t* split_rec = page_bulk->getSplitRec(); + new_page_bulk.copyIn(split_rec); + page_bulk->copyOut(split_rec); + + /* 4. commit the splitted page. */ + err = pageCommit(page_bulk, &new_page_bulk, true); + if (err != DB_SUCCESS) { + pageAbort(&new_page_bulk); + return(err); + } + + /* 5. commit the new page. */ + err = pageCommit(&new_page_bulk, next_page_bulk, true); + if (err != DB_SUCCESS) { + pageAbort(&new_page_bulk); + return(err); + } + + return(err); +} + +/** Commit(finish) a page. We set next/prev page no, compress a page of +compressed table and split the page if compression fails, insert a node +pointer to father page if needed, and commit mini-transaction. +@param[in] page_bulk page to commit +@param[in] next_page_bulk next page +@param[in] insert_father false when page_bulk is a root page and + true when it's a non-root page +@return error code */ +dberr_t +BtrBulk::pageCommit( + PageBulk* page_bulk, + PageBulk* next_page_bulk, + bool insert_father) +{ + page_bulk->finish(); + + /* Set page links */ + if (next_page_bulk != NULL) { + ut_ad(page_bulk->getLevel() == next_page_bulk->getLevel()); + + page_bulk->setNext(next_page_bulk->getPageNo()); + next_page_bulk->setPrev(page_bulk->getPageNo()); + } else { + /** Suppose a page is released and latched again, we need to + mark it modified in mini-transaction. */ + page_bulk->setNext(FIL_NULL); + } + + /* Compress page if it's a compressed table. */ + if (page_bulk->getPageZip() != NULL && !page_bulk->compress()) { + return(pageSplit(page_bulk, next_page_bulk)); + } + + /* Insert node pointer to father page. */ + if (insert_father) { + dtuple_t* node_ptr = page_bulk->getNodePtr(); + dberr_t err = insert(node_ptr, page_bulk->getLevel()+1); + + if (err != DB_SUCCESS) { + return(err); + } + } + + /* Commit mtr. */ + page_bulk->commit(true); + + return(DB_SUCCESS); +} + +/** Log free check */ +void +BtrBulk::logFreeCheck() +{ + if (log_sys->check_flush_or_checkpoint) { + release(); + + log_free_check(); + + latch(); + } +} + +/** Release all latches */ +void +BtrBulk::release() +{ + ut_ad(m_root_level + 1 == m_page_bulks->size()); + + for (ulint level = 0; level <= m_root_level; level++) { + PageBulk* page_bulk = m_page_bulks->at(level); + + page_bulk->release(); + } +} + +/** Re-latch all latches */ +void +BtrBulk::latch() +{ + ut_ad(m_root_level + 1 == m_page_bulks->size()); + + for (ulint level = 0; level <= m_root_level; level++) { + PageBulk* page_bulk = m_page_bulks->at(level); + page_bulk->latch(); + } +} + +/** Insert a tuple to page in a level +@param[in] tuple tuple to insert +@param[in] level B-tree level +@return error code */ +dberr_t +BtrBulk::insert( + dtuple_t* tuple, + ulint level) +{ + bool is_left_most = false; + + ut_ad(m_heap != NULL); + + /* Check if we need to create a PageBulk for the level. */ + if (level + 1 > m_page_bulks->size()) { + PageBulk* new_page_bulk + = UT_NEW_NOKEY(PageBulk(m_index, m_trx_id, FIL_NULL, + level, m_flush_observer)); + dberr_t err = new_page_bulk->init(); + if (err != DB_SUCCESS) { + return(err); + } + + m_page_bulks->push_back(new_page_bulk); + ut_ad(level + 1 == m_page_bulks->size()); + m_root_level = level; + + is_left_most = true; + } + + ut_ad(m_page_bulks->size() > level); + + PageBulk* page_bulk = m_page_bulks->at(level); + + if (is_left_most && level > 0 && page_bulk->getRecNo() == 0) { + /* The node pointer must be marked as the predefined minimum + record, as there is no lower alphabetical limit to records in + the leftmost node of a level: */ + dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple) + | REC_INFO_MIN_REC_FLAG); + } + + ulint n_ext = 0; + ulint rec_size = rec_get_converted_size(m_index, tuple, n_ext); + big_rec_t* big_rec = NULL; + + if (page_bulk->needExt(tuple, rec_size)) { + /* The record is so big that we have to store some fields + externally on separate database pages */ + big_rec = dtuple_convert_big_rec(m_index, 0, tuple, &n_ext); + + if (UNIV_UNLIKELY(big_rec == NULL)) { + return(DB_TOO_BIG_RECORD); + } + + rec_size = rec_get_converted_size(m_index, tuple, n_ext); + } + + if (!page_bulk->isSpaceAvailable(rec_size)) { + /* Create a sibling page_bulk. */ + PageBulk* sibling_page_bulk; + sibling_page_bulk = UT_NEW_NOKEY(PageBulk(m_index, m_trx_id, + FIL_NULL, level, + m_flush_observer)); + dberr_t err = sibling_page_bulk->init(); + if (err != DB_SUCCESS) { + UT_DELETE(sibling_page_bulk); + return(err); + } + + /* Commit page bulk. */ + err = pageCommit(page_bulk, sibling_page_bulk, true); + if (err != DB_SUCCESS) { + pageAbort(sibling_page_bulk); + UT_DELETE(sibling_page_bulk); + return(err); + } + + /* Set new page bulk to page_bulks. */ + ut_ad(sibling_page_bulk->getLevel() <= m_root_level); + m_page_bulks->at(level) = sibling_page_bulk; + + UT_DELETE(page_bulk); + page_bulk = sibling_page_bulk; + + /* Important: log_free_check whether we need a checkpoint. */ + if (page_is_leaf(sibling_page_bulk->getPage())) { + /* Check whether trx is interrupted */ + if (m_flush_observer->check_interrupted()) { + return(DB_INTERRUPTED); + } + + /* Wake up page cleaner to flush dirty pages. */ + srv_inc_activity_count(); + os_event_set(buf_flush_event); + + logFreeCheck(); + } + + } + + rec_t* rec; + ulint* offsets = NULL; + /* Convert tuple to rec. */ + rec = rec_convert_dtuple_to_rec(static_cast(mem_heap_alloc( + page_bulk->m_heap, rec_size)), m_index, tuple, n_ext); + offsets = rec_get_offsets(rec, m_index, offsets, ULINT_UNDEFINED, + &(page_bulk->m_heap)); + + page_bulk->insert(rec, offsets); + + if (big_rec != NULL) { + dberr_t err; + + ut_ad(dict_index_is_clust(m_index)); + ut_ad(page_bulk->getLevel() == 0); + ut_ad(page_bulk == m_page_bulks->at(0)); + + /* Release all latched but leaf node. */ + for (ulint level = 1; level <= m_root_level; level++) { + PageBulk* page_bulk = m_page_bulks->at(level); + + page_bulk->release(); + } + + err = page_bulk->storeExt(big_rec, offsets); + + /* Latch */ + for (ulint level = 1; level <= m_root_level; level++) { + PageBulk* page_bulk = m_page_bulks->at(level); + page_bulk->latch(); + } + + dtuple_convert_back_big_rec(m_index, tuple, big_rec); + + return(err); + } + + return(DB_SUCCESS); +} + +/** Btree bulk load finish. We commit the last page in each level +and copy the last page in top level to the root page of the index +if no error occurs. +@param[in] err whether bulk load was successful until now +@return error code */ +dberr_t +BtrBulk::finish(dberr_t err) +{ + ulint last_page_no = FIL_NULL; + + ut_ad(!dict_table_is_temporary(m_index->table)); + + if (m_page_bulks->size() == 0) { + /* The table is empty. The root page of the index tree + is already in a consistent state. No need to flush. */ + return(err); + } + + ut_ad(m_root_level + 1 == m_page_bulks->size()); + + /* Finish all page bulks */ + for (ulint level = 0; level <= m_root_level; level++) { + PageBulk* page_bulk = m_page_bulks->at(level); + + last_page_no = page_bulk->getPageNo(); + + if (err == DB_SUCCESS) { + err = pageCommit(page_bulk, NULL, + level != m_root_level); + } + + if (err != DB_SUCCESS) { + pageAbort(page_bulk); + } + + UT_DELETE(page_bulk); + } + + if (err == DB_SUCCESS) { + rec_t* first_rec; + mtr_t mtr; + buf_block_t* last_block; + page_t* last_page; + page_id_t page_id(dict_index_get_space(m_index), + last_page_no); + page_size_t page_size(dict_table_page_size(m_index->table)); + ulint root_page_no = dict_index_get_page(m_index); + PageBulk root_page_bulk(m_index, m_trx_id, + root_page_no, m_root_level, + m_flush_observer); + + mtr_start(&mtr); + mtr.set_named_space(dict_index_get_space(m_index)); + mtr_x_lock(dict_index_get_lock(m_index), &mtr); + + ut_ad(last_page_no != FIL_NULL); + last_block = btr_block_get(page_id, page_size, + RW_X_LATCH, m_index, &mtr); + last_page = buf_block_get_frame(last_block); + first_rec = page_rec_get_next(page_get_infimum_rec(last_page)); + ut_ad(page_rec_is_user_rec(first_rec)); + + /* Copy last page to root page. */ + err = root_page_bulk.init(); + if (err != DB_SUCCESS) { + mtr_commit(&mtr); + return(err); + } + root_page_bulk.copyIn(first_rec); + + /* Remove last page. */ + btr_page_free_low(m_index, last_block, m_root_level, false, &mtr); + + /* Do not flush the last page. */ + last_block->page.flush_observer = NULL; + + mtr_commit(&mtr); + + err = pageCommit(&root_page_bulk, NULL, false); + ut_ad(err == DB_SUCCESS); + } + +#ifdef UNIV_DEBUG + dict_sync_check check(true); + + ut_ad(!sync_check_iterate(check)); +#endif /* UNIV_DEBUG */ + + ut_ad(err != DB_SUCCESS || btr_validate_index(m_index, NULL, false)); + return(err); +} diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index f1d4e03e230..ab8ee1d443c 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -3,7 +3,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2015, MariaDB Corporation. +Copyright (c) 2015, 2016, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -63,13 +63,14 @@ Created 10/16/1994 Heikki Tuuri #include "row0purge.h" #include "row0upd.h" #include "trx0rec.h" -#include "trx0roll.h" /* trx_is_recv() */ +#include "trx0roll.h" #include "que0que.h" #include "row0row.h" #include "srv0srv.h" #include "ibuf0ibuf.h" #include "lock0lock.h" #include "zlib.h" +#include "srv0start.h" /** Buffered B-tree operation types, introduced as part of delete buffering. */ enum btr_op_t { @@ -80,29 +81,47 @@ enum btr_op_t { BTR_DELMARK_OP /*!< Mark a record for deletion */ }; -#ifdef UNIV_DEBUG -/** If the following is set to TRUE, this module prints a lot of -trace information of individual record operations */ -UNIV_INTERN ibool btr_cur_print_record_ops = FALSE; -#endif /* UNIV_DEBUG */ +/** Modification types for the B-tree operation. */ +enum btr_intention_t { + BTR_INTENTION_DELETE, + BTR_INTENTION_BOTH, + BTR_INTENTION_INSERT +}; +#if BTR_INTENTION_DELETE > BTR_INTENTION_BOTH +#error "BTR_INTENTION_DELETE > BTR_INTENTION_BOTH" +#endif +#if BTR_INTENTION_BOTH > BTR_INTENTION_INSERT +#error "BTR_INTENTION_BOTH > BTR_INTENTION_INSERT" +#endif + +/** For the index->lock scalability improvement, only possibility of clear +performance regression observed was caused by grown huge history list length. +That is because the exclusive use of index->lock also worked as reserving +free blocks and read IO bandwidth with priority. To avoid huge glowing history +list as same level with previous implementation, prioritizes pessimistic tree +operations by purge as the previous, when it seems to be growing huge. + + Experimentally, the history list length starts to affect to performance +throughput clearly from about 100000. */ +#define BTR_CUR_FINE_HISTORY_LENGTH 100000 /** Number of searches down the B-tree in btr_cur_search_to_nth_level(). */ -UNIV_INTERN ulint btr_cur_n_non_sea = 0; +ulint btr_cur_n_non_sea = 0; /** Number of successful adaptive hash index lookups in btr_cur_search_to_nth_level(). */ -UNIV_INTERN ulint btr_cur_n_sea = 0; +ulint btr_cur_n_sea = 0; /** Old value of btr_cur_n_non_sea. Copied by srv_refresh_innodb_monitor_stats(). Referenced by srv_printf_innodb_monitor(). */ -UNIV_INTERN ulint btr_cur_n_non_sea_old = 0; +ulint btr_cur_n_non_sea_old = 0; /** Old value of btr_cur_n_sea. Copied by srv_refresh_innodb_monitor_stats(). Referenced by srv_printf_innodb_monitor(). */ -UNIV_INTERN ulint btr_cur_n_sea_old = 0; +ulint btr_cur_n_sea_old = 0; #ifdef UNIV_DEBUG /* Flag to limit optimistic insert records */ -UNIV_INTERN uint btr_cur_limit_optimistic_insert_debug = 0; +uint btr_cur_limit_optimistic_insert_debug = 0; #endif /* UNIV_DEBUG */ /** In the optimistic insert, if the insert does not fit, but this much space @@ -121,29 +140,19 @@ can be released by page reorganize, then it is reorganized */ part header, in bytes */ /** Estimated table level stats from sampled value. -@param value sampled stats -@param index index being sampled -@param sample number of sampled rows -@param ext_size external stored data size -@param not_empty table not empty +@param value sampled stats +@param index index being sampled +@param sample number of sampled rows +@param ext_size external stored data size +@param not_empty table not empty @return estimated table wide stats from sampled value */ -#define BTR_TABLE_STATS_FROM_SAMPLE(value, index, sample, ext_size, not_empty)\ - (((value) * (ib_int64_t) index->stat_n_leaf_pages \ +#define BTR_TABLE_STATS_FROM_SAMPLE(value, index, sample, ext_size, not_empty) \ + (((value) * static_cast(index->stat_n_leaf_pages) \ + (sample) - 1 + (ext_size) + (not_empty)) / ((sample) + (ext_size))) /* @} */ #endif /* !UNIV_HOTBACKUP */ -/** A BLOB field reference full of zero, for use in assertions and tests. -Initially, BLOB field references are set to zero, in -dtuple_convert_big_rec(). */ -const byte field_ref_zero[BTR_EXTERN_FIELD_REF_SIZE] = { - 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, -}; - #ifndef UNIV_HOTBACKUP /*******************************************************************//** Marks all extern fields in a record as owned by the record. This function @@ -184,7 +193,7 @@ btr_rec_free_updated_extern_fields( part will be updated, or NULL */ const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ const upd_t* update, /*!< in: update vector */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ + bool rollback,/*!< in: performing rollback? */ mtr_t* mtr); /*!< in: mini-transaction handle which contains an X-latch to record page and to the tree */ /***********************************************************//** @@ -199,120 +208,173 @@ btr_rec_free_externally_stored_fields( const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed part will be updated, or NULL */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ + bool rollback,/*!< in: performing rollback? */ mtr_t* mtr); /*!< in: mini-transaction handle which contains an X-latch to record page and to the index tree */ #endif /* !UNIV_HOTBACKUP */ -/******************************************************//** -The following function is used to set the deleted bit of a record. */ -UNIV_INLINE -void -btr_rec_set_deleted_flag( -/*=====================*/ - rec_t* rec, /*!< in/out: physical record */ - page_zip_des_t* page_zip,/*!< in/out: compressed page (or NULL) */ - ulint flag) /*!< in: nonzero if delete marked */ -{ - if (page_rec_is_comp(rec)) { - rec_set_deleted_flag_new(rec, page_zip, flag); - } else { - ut_ad(!page_zip); - rec_set_deleted_flag_old(rec, flag); - } -} - #ifndef UNIV_HOTBACKUP /*==================== B-TREE SEARCH =========================*/ -/********************************************************************//** -Latches the leaf page or pages requested. */ -static -void +#if MTR_MEMO_PAGE_S_FIX != RW_S_LATCH +#error "MTR_MEMO_PAGE_S_FIX != RW_S_LATCH" +#endif +#if MTR_MEMO_PAGE_X_FIX != RW_X_LATCH +#error "MTR_MEMO_PAGE_X_FIX != RW_X_LATCH" +#endif +#if MTR_MEMO_PAGE_SX_FIX != RW_SX_LATCH +#error "MTR_MEMO_PAGE_SX_FIX != RW_SX_LATCH" +#endif + +/** Latches the leaf page or pages requested. +@param[in] block leaf page where the search converged +@param[in] page_id page id of the leaf +@param[in] latch_mode BTR_SEARCH_LEAF, ... +@param[in] cursor cursor +@param[in] mtr mini-transaction +@return blocks and savepoints which actually latched. */ +btr_latch_leaves_t btr_cur_latch_leaves( -/*=================*/ - page_t* page, /*!< in: leaf page where the search - converged */ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page_no, /*!< in: page number of the leaf */ - ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ... */ - btr_cur_t* cursor, /*!< in: cursor */ - mtr_t* mtr) /*!< in: mtr */ + buf_block_t* block, + const page_id_t& page_id, + const page_size_t& page_size, + ulint latch_mode, + btr_cur_t* cursor, + mtr_t* mtr) { ulint mode; ulint left_page_no; ulint right_page_no; buf_block_t* get_block; + page_t* page = buf_block_get_frame(block); + bool spatial; + btr_latch_leaves_t latch_leaves = {{NULL, NULL, NULL}, {0, 0, 0}}; - ut_ad(page && mtr); + spatial = dict_index_is_spatial(cursor->index) && cursor->rtr_info; + ut_ad(buf_page_in_file(&block->page)); switch (latch_mode) { case BTR_SEARCH_LEAF: case BTR_MODIFY_LEAF: - mode = latch_mode == BTR_SEARCH_LEAF ? RW_S_LATCH : RW_X_LATCH; - get_block = btr_block_get( - space, zip_size, page_no, mode, cursor->index, mtr); + case BTR_SEARCH_TREE: + if (spatial) { + cursor->rtr_info->tree_savepoints[RTR_MAX_LEVELS] + = mtr_set_savepoint(mtr); + } + + mode = latch_mode == BTR_MODIFY_LEAF ? RW_X_LATCH : RW_S_LATCH; + latch_leaves.savepoints[1] = mtr_set_savepoint(mtr); + get_block = btr_block_get(page_id, page_size, mode, + cursor->index, mtr); + latch_leaves.blocks[1] = get_block; #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(get_block->frame) == page_is_comp(page)); #endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; - return; + if (spatial) { + cursor->rtr_info->tree_blocks[RTR_MAX_LEVELS] + = get_block; + } + + return(latch_leaves); case BTR_MODIFY_TREE: - /* x-latch also brothers from left to right */ + /* It is exclusive for other operations which calls + btr_page_set_prev() */ + ut_ad(mtr_memo_contains_flagged(mtr, + dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(cursor->index->table)); + /* x-latch also siblings from left to right */ left_page_no = btr_page_get_prev(page, mtr); mode = latch_mode; if (left_page_no != FIL_NULL) { + + if (spatial) { + cursor->rtr_info->tree_savepoints[ + RTR_MAX_LEVELS] = mtr_set_savepoint(mtr); + } + + latch_leaves.savepoints[0] = mtr_set_savepoint(mtr); get_block = btr_block_get( - space, zip_size, left_page_no, - RW_X_LATCH, cursor->index, mtr); -#ifdef UNIV_BTR_DEBUG - ut_a(page_is_comp(get_block->frame) - == page_is_comp(page)); - ut_a(btr_page_get_next(get_block->frame, mtr) - == page_get_page_no(page)); -#endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; + page_id_t(page_id.space(), left_page_no), + page_size, RW_X_LATCH, cursor->index, mtr); + latch_leaves.blocks[0] = get_block; + + if (spatial) { + cursor->rtr_info->tree_blocks[RTR_MAX_LEVELS] + = get_block; + } } + if (spatial) { + cursor->rtr_info->tree_savepoints[RTR_MAX_LEVELS + 1] + = mtr_set_savepoint(mtr); + } + + latch_leaves.savepoints[1] = mtr_set_savepoint(mtr); get_block = btr_block_get( - space, zip_size, page_no, - RW_X_LATCH, cursor->index, mtr); + page_id, page_size, RW_X_LATCH, cursor->index, mtr); + latch_leaves.blocks[1] = get_block; + #ifdef UNIV_BTR_DEBUG + /* Sanity check only after both the blocks are latched. */ + if (latch_leaves.blocks[0] != NULL) { + ut_a(page_is_comp(latch_leaves.blocks[0]->frame) + == page_is_comp(page)); + ut_a(btr_page_get_next( + latch_leaves.blocks[0]->frame, mtr) + == page_get_page_no(page)); + } ut_a(page_is_comp(get_block->frame) == page_is_comp(page)); #endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; + + if (spatial) { + cursor->rtr_info->tree_blocks[RTR_MAX_LEVELS + 1] + = get_block; + } right_page_no = btr_page_get_next(page, mtr); if (right_page_no != FIL_NULL) { + if (spatial) { + cursor->rtr_info->tree_savepoints[ + RTR_MAX_LEVELS + 2] = mtr_set_savepoint( + mtr); + } + latch_leaves.savepoints[2] = mtr_set_savepoint(mtr); get_block = btr_block_get( - space, zip_size, right_page_no, - RW_X_LATCH, cursor->index, mtr); + page_id_t(page_id.space(), right_page_no), + page_size, RW_X_LATCH, cursor->index, mtr); + latch_leaves.blocks[2] = get_block; #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(get_block->frame) == page_is_comp(page)); ut_a(btr_page_get_prev(get_block->frame, mtr) == page_get_page_no(page)); #endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; + if (spatial) { + cursor->rtr_info->tree_blocks[ + RTR_MAX_LEVELS + 2] = get_block; + } } - return; + return(latch_leaves); case BTR_SEARCH_PREV: case BTR_MODIFY_PREV: mode = latch_mode == BTR_SEARCH_PREV ? RW_S_LATCH : RW_X_LATCH; - /* latch also left brother */ + /* latch also left sibling */ + rw_lock_s_lock(&block->lock); left_page_no = btr_page_get_prev(page, mtr); + rw_lock_s_unlock(&block->lock); if (left_page_no != FIL_NULL) { + latch_leaves.savepoints[0] = mtr_set_savepoint(mtr); get_block = btr_block_get( - space, zip_size, - left_page_no, mode, cursor->index, mtr); + page_id_t(page_id.space(), left_page_no), + page_size, mode, cursor->index, mtr); + latch_leaves.blocks[0] = get_block; cursor->left_block = get_block; #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(get_block->frame) @@ -320,19 +382,333 @@ btr_cur_latch_leaves( ut_a(btr_page_get_next(get_block->frame, mtr) == page_get_page_no(page)); #endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; } - get_block = btr_block_get( - space, zip_size, page_no, mode, cursor->index, mtr); + latch_leaves.savepoints[1] = mtr_set_savepoint(mtr); + get_block = btr_block_get(page_id, page_size, mode, + cursor->index, mtr); + latch_leaves.blocks[1] = get_block; #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(get_block->frame) == page_is_comp(page)); #endif /* UNIV_BTR_DEBUG */ - get_block->check_index_page_at_flush = TRUE; - return; + return(latch_leaves); + case BTR_CONT_MODIFY_TREE: + ut_ad(dict_index_is_spatial(cursor->index)); + return(latch_leaves); + } + + ut_error; + return(latch_leaves); +} + +/** Optimistically latches the leaf page or pages requested. +@param[in] block guessed buffer block +@param[in] modify_clock modify clock value +@param[in,out] latch_mode BTR_SEARCH_LEAF, ... +@param[in,out] cursor cursor +@param[in] file file name +@param[in] line line where called +@param[in] mtr mini-transaction +@return true if success */ +bool +btr_cur_optimistic_latch_leaves( + buf_block_t* block, + ib_uint64_t modify_clock, + ulint* latch_mode, + btr_cur_t* cursor, + const char* file, + ulint line, + mtr_t* mtr) +{ + ulint mode; + ulint left_page_no; + + switch (*latch_mode) { + case BTR_SEARCH_LEAF: + case BTR_MODIFY_LEAF: + return(buf_page_optimistic_get(*latch_mode, block, + modify_clock, file, line, mtr)); + case BTR_SEARCH_PREV: + case BTR_MODIFY_PREV: + mode = *latch_mode == BTR_SEARCH_PREV + ? RW_S_LATCH : RW_X_LATCH; + + buf_page_mutex_enter(block); + if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) { + buf_page_mutex_exit(block); + return(false); + } + /* pin the block not to be relocated */ + buf_block_buf_fix_inc(block, file, line); + buf_page_mutex_exit(block); + + rw_lock_s_lock(&block->lock); + if (block->modify_clock != modify_clock) { + rw_lock_s_unlock(&block->lock); + + goto unpin_failed; + } + left_page_no = btr_page_get_prev( + buf_block_get_frame(block), mtr); + rw_lock_s_unlock(&block->lock); + + if (left_page_no != FIL_NULL) { + const page_id_t page_id( + dict_index_get_space(cursor->index), + left_page_no); + + cursor->left_block = btr_block_get( + page_id, + dict_table_page_size(cursor->index->table), + mode, cursor->index, mtr); + } else { + cursor->left_block = NULL; + } + + if (buf_page_optimistic_get(mode, block, modify_clock, + file, line, mtr)) { + if (btr_page_get_prev(buf_block_get_frame(block), mtr) + == left_page_no) { + /* adjust buf_fix_count */ + buf_page_mutex_enter(block); + buf_block_buf_fix_dec(block); + buf_page_mutex_exit(block); + + *latch_mode = mode; + return(true); + } else { + /* release the block */ + btr_leaf_page_release(block, mode, mtr); + } + } + + /* release the left block */ + if (cursor->left_block != NULL) { + btr_leaf_page_release(cursor->left_block, + mode, mtr); + } +unpin_failed: + /* unpin the block */ + buf_page_mutex_enter(block); + buf_block_buf_fix_dec(block); + buf_page_mutex_exit(block); + + return(false); + + default: + ut_error; + return(false); + } +} + +/** +Gets intention in btr_intention_t from latch_mode, and cleares the intention +at the latch_mode. +@param latch_mode in/out: pointer to latch_mode +@return intention for latching tree */ +static +btr_intention_t +btr_cur_get_and_clear_intention( + ulint *latch_mode) +{ + btr_intention_t intention; + + switch (*latch_mode & (BTR_LATCH_FOR_INSERT | BTR_LATCH_FOR_DELETE)) { + case BTR_LATCH_FOR_INSERT: + intention = BTR_INTENTION_INSERT; + break; + case BTR_LATCH_FOR_DELETE: + intention = BTR_INTENTION_DELETE; + break; + default: + /* both or unknown */ + intention = BTR_INTENTION_BOTH; + } + *latch_mode &= ~(BTR_LATCH_FOR_INSERT | BTR_LATCH_FOR_DELETE); + + return(intention); +} + +/** +Gets the desired latch type for the root leaf (root page is root leaf) +at the latch mode. +@param latch_mode in: BTR_SEARCH_LEAF, ... +@return latch type */ +static +rw_lock_type_t +btr_cur_latch_for_root_leaf( + ulint latch_mode) +{ + switch (latch_mode) { + case BTR_SEARCH_LEAF: + case BTR_SEARCH_TREE: + case BTR_SEARCH_PREV: + return(RW_S_LATCH); + case BTR_MODIFY_LEAF: + case BTR_MODIFY_TREE: + case BTR_MODIFY_PREV: + return(RW_X_LATCH); + case BTR_CONT_MODIFY_TREE: + case BTR_CONT_SEARCH_TREE: + /* A root page should be latched already, + and don't need to be latched here. + fall through (RW_NO_LATCH) */ + case BTR_NO_LATCHES: + return(RW_NO_LATCH); + } + + ut_error; + return(RW_NO_LATCH); /* avoid compiler warnings */ +} + +/** Detects whether the modifying record might need a modifying tree structure. +@param[in] index index +@param[in] page page +@param[in] lock_intention lock intention for the tree operation +@param[in] rec record (current node_ptr) +@param[in] rec_size size of the record or max size of node_ptr +@param[in] page_size page size +@param[in] mtr mtr +@return true if tree modification is needed */ +static +bool +btr_cur_will_modify_tree( + dict_index_t* index, + const page_t* page, + btr_intention_t lock_intention, + const rec_t* rec, + ulint rec_size, + const page_size_t& page_size, + mtr_t* mtr) +{ + ut_ad(!page_is_leaf(page)); + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + + /* Pessimistic delete of the first record causes delete & insert + of node_ptr at upper level. And a subsequent page shrink is + possible. It causes delete of node_ptr at the upper level. + So we should pay attention also to 2nd record not only + first record and last record. Because if the "delete & insert" are + done for the different page, the 2nd record become + first record and following compress might delete the record and causes + the uppper level node_ptr modification. */ + + if (lock_intention <= BTR_INTENTION_BOTH) { + ulint margin; + + /* check delete will cause. (BTR_INTENTION_BOTH + or BTR_INTENTION_DELETE) */ + /* first, 2nd, 2nd-last and last records are 4 records */ + if (page_get_n_recs(page) < 5) { + return(true); + } + + /* is first, 2nd or last record */ + if (page_rec_is_first(rec, page) + || (mach_read_from_4(page + FIL_PAGE_NEXT) != FIL_NULL + && (page_rec_is_last(rec, page) + || page_rec_is_second_last(rec, page))) + || (mach_read_from_4(page + FIL_PAGE_PREV) != FIL_NULL + && page_rec_is_second(rec, page))) { + return(true); + } + + if (lock_intention == BTR_INTENTION_BOTH) { + /* Delete at leftmost record in a page causes delete + & insert at its parent page. After that, the delete + might cause btr_compress() and delete record at its + parent page. Thus we should consider max 2 deletes. */ + + margin = rec_size * 2; + } else { + ut_ad(lock_intention == BTR_INTENTION_DELETE); + + margin = rec_size; + } + /* NOTE: call mach_read_from_4() directly to avoid assertion + failure. It is safe because we already have SX latch of the + index tree */ + if (page_get_data_size(page) + < margin + BTR_CUR_PAGE_COMPRESS_LIMIT(index) + || (mach_read_from_4(page + FIL_PAGE_NEXT) + == FIL_NULL + && mach_read_from_4(page + FIL_PAGE_PREV) + == FIL_NULL)) { + return(true); + } + } + + if (lock_intention >= BTR_INTENTION_BOTH) { + /* check insert will cause. BTR_INTENTION_BOTH + or BTR_INTENTION_INSERT*/ + + /* Once we invoke the btr_cur_limit_optimistic_insert_debug, + we should check it here in advance, since the max allowable + records in a page is limited. */ + LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page), + return(true)); + + /* needs 2 records' space for the case the single split and + insert cannot fit. + page_get_max_insert_size_after_reorganize() includes space + for page directory already */ + ulint max_size + = page_get_max_insert_size_after_reorganize(page, 2); + + if (max_size < BTR_CUR_PAGE_REORGANIZE_LIMIT + rec_size + || max_size < rec_size * 2) { + return(true); + } + /* TODO: optimize this condition for compressed page. + this is based on the worst compress rate. + currently looking only uncompressed page, but we can look + also compressed page page_zip_available() if already in the + buffer pool */ + /* needs 2 records' space also for worst compress rate. */ + if (page_size.is_compressed() + && page_zip_empty_size(index->n_fields, + page_size.physical()) + < rec_size * 2 + page_get_data_size(page) + + page_dir_calc_reserved_space( + page_get_n_recs(page) + 2) + 1) { + return(true); + } + } + + return(false); +} + +/** Detects whether the modifying record might need a opposite modification +to the intention. +@param[in] page page +@param[in] lock_intention lock intention for the tree operation +@param[in] rec record (current node_ptr) +@return true if tree modification is needed */ +static +bool +btr_cur_need_opposite_intention( + const page_t* page, + btr_intention_t lock_intention, + const rec_t* rec) +{ + switch (lock_intention) { + case BTR_INTENTION_DELETE: + return((mach_read_from_4(page + FIL_PAGE_PREV) != FIL_NULL + && page_rec_is_first(rec, page)) + || (mach_read_from_4(page + FIL_PAGE_NEXT) != FIL_NULL + && page_rec_is_last(rec, page))); + case BTR_INTENTION_INSERT: + return(mach_read_from_4(page + FIL_PAGE_NEXT) != FIL_NULL + && page_rec_is_last(rec, page)); + case BTR_INTENTION_BOTH: + return(false); } ut_error; + return(false); } /********************************************************************//** @@ -356,7 +732,7 @@ btr_cur_search_to_nth_level( const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in tuple must be set so that it cannot get compared to the node ptr page number field! */ - ulint mode, /*!< in: PAGE_CUR_L, ...; + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ...; Inserts should always be made using PAGE_CUR_LE to search the position! */ ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ..., ORed with @@ -372,45 +748,71 @@ btr_cur_search_to_nth_level( to protect the record! */ btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is s- or x-latched, but see also above! */ - ulint has_search_latch,/*!< in: info on the latch mode the - caller currently has on btr_search_latch: + ulint has_search_latch, + /*!< in: info on the latch mode the + caller currently has on search system: RW_S_LATCH, or 0 */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in: mtr */ { - page_t* page; + page_t* page = NULL; /* remove warning */ buf_block_t* block; - ulint space; buf_block_t* guess; ulint height; - ulint page_no; ulint up_match; ulint up_bytes; ulint low_match; ulint low_bytes; ulint savepoint; ulint rw_latch; - ulint page_mode; + page_cur_mode_t page_mode; + page_cur_mode_t search_mode = PAGE_CUR_UNSUPP; ulint buf_mode; ulint estimate; - ulint zip_size; + ulint node_ptr_max_size = UNIV_PAGE_SIZE / 2; page_cur_t* page_cursor; btr_op_t btr_op; ulint root_height = 0; /* remove warning */ dberr_t err = DB_SUCCESS; + ulint upper_rw_latch, root_leaf_rw_latch; + btr_intention_t lock_intention; + bool modify_external; + buf_block_t* tree_blocks[BTR_MAX_LEVELS]; + ulint tree_savepoints[BTR_MAX_LEVELS]; + ulint n_blocks = 0; + ulint n_releases = 0; + bool detected_same_key_root = false; + + bool retrying_for_search_prev = false; + ulint leftmost_from_level = 0; + buf_block_t** prev_tree_blocks = NULL; + ulint* prev_tree_savepoints = NULL; + ulint prev_n_blocks = 0; + ulint prev_n_releases = 0; + bool need_path = true; + bool rtree_parent_modified = false; + bool mbr_adj = false; + bool found = false; + + DBUG_ENTER("btr_cur_search_to_nth_level"); + #ifdef BTR_CUR_ADAPT btr_search_t* info; -#endif +#endif /* BTR_CUR_ADAPT */ mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; + ulint offsets2_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets2 = offsets2_; rec_offs_init(offsets_); + rec_offs_init(offsets2_); /* Currently, PAGE_CUR_LE is the only search mode used for searches ending to upper levels */ - ut_ad(level == 0 || mode == PAGE_CUR_LE); + ut_ad(level == 0 || mode == PAGE_CUR_LE + || RTREE_SEARCH_MODE(mode)); ut_ad(dict_index_check_search_tuple(index, tuple)); ut_ad(!dict_index_is_ibuf(index) || ibuf_inside(mtr)); ut_ad(dtuple_check_typed(tuple)); @@ -424,15 +826,18 @@ btr_cur_search_to_nth_level( #ifdef UNIV_DEBUG cursor->up_match = ULINT_UNDEFINED; cursor->low_match = ULINT_UNDEFINED; -#endif +#endif /* UNIV_DEBUG */ ibool s_latch_by_caller; s_latch_by_caller = latch_mode & BTR_ALREADY_S_LATCHED; ut_ad(!s_latch_by_caller - || mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_S_LOCK)); + || srv_read_only_mode + || mtr_memo_contains_flagged(mtr, + dict_index_get_lock(index), + MTR_MEMO_S_LOCK + | MTR_MEMO_SX_LOCK)); /* These flags are mutually exclusive, they are lumped together with the latch mode for historical reasons. It's possible for @@ -465,14 +870,25 @@ btr_cur_search_to_nth_level( ut_ad(btr_op == BTR_NO_OP || !dict_index_is_ibuf(index)); /* Operations on the clustered index cannot be buffered. */ ut_ad(btr_op == BTR_NO_OP || !dict_index_is_clust(index)); + /* Operations on the temporary table(indexes) cannot be buffered. */ + ut_ad(btr_op == BTR_NO_OP || !dict_table_is_temporary(index->table)); + /* Operation on the spatial index cannot be buffered. */ + ut_ad(btr_op == BTR_NO_OP || !dict_index_is_spatial(index)); estimate = latch_mode & BTR_ESTIMATE; + lock_intention = btr_cur_get_and_clear_intention(&latch_mode); + + modify_external = latch_mode & BTR_MODIFY_EXTERNAL; + /* Turn the flags unrelated to the latch mode off. */ latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode); + ut_ad(!modify_external || latch_mode == BTR_MODIFY_LEAF); + ut_ad(!s_latch_by_caller || latch_mode == BTR_SEARCH_LEAF + || latch_mode == BTR_SEARCH_TREE || latch_mode == BTR_MODIFY_LEAF); cursor->flag = BTR_CUR_BINARY; @@ -483,24 +899,34 @@ btr_cur_search_to_nth_level( #else info = btr_search_get_info(index); - guess = info->root_guess; + if (!buf_pool_is_obsolete(info->withdraw_clock)) { + guess = info->root_guess; + } else { + guess = NULL; + } #ifdef BTR_CUR_HASH_ADAPT # ifdef UNIV_SEARCH_PERF_STAT info->n_searches++; # endif - if (rw_lock_get_writer(&btr_search_latch) == RW_LOCK_NOT_LOCKED + /* Use of AHI is disabled for intrinsic table as these tables re-use + the index-id and AHI validation is based on index-id. */ + if (rw_lock_get_writer(btr_get_search_latch(index)) + == RW_LOCK_NOT_LOCKED && latch_mode <= BTR_MODIFY_LEAF && info->last_hash_succ + && !index->disable_ahi && !estimate # ifdef PAGE_CUR_LE_OR_EXTENDS && mode != PAGE_CUR_LE_OR_EXTENDS # endif /* PAGE_CUR_LE_OR_EXTENDS */ + && !dict_index_is_spatial(index) /* If !has_search_latch, we do a dirty read of btr_search_enabled below, and btr_search_guess_on_hash() will have to check it again. */ && UNIV_LIKELY(btr_search_enabled) + && !modify_external && btr_search_guess_on_hash(index, info, tuple, mode, latch_mode, cursor, has_search_latch, mtr)) { @@ -515,7 +941,7 @@ btr_cur_search_to_nth_level( || mode != PAGE_CUR_LE); btr_cur_n_sea++; - return err; + DBUG_RETURN(err); } # endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_ADAPT */ @@ -526,7 +952,7 @@ btr_cur_search_to_nth_level( if (has_search_latch) { /* Release possible search latch to obey latching order */ - rw_lock_s_unlock(&btr_search_latch); + rw_lock_s_unlock(btr_get_search_latch(index)); } /* Store the position of the tree latch we push to mtr so that we @@ -536,23 +962,76 @@ btr_cur_search_to_nth_level( switch (latch_mode) { case BTR_MODIFY_TREE: - mtr_x_lock(dict_index_get_lock(index), mtr); + /* Most of delete-intended operations are purging. + Free blocks and read IO bandwidth should be prior + for them, when the history list is glowing huge. */ + if (lock_intention == BTR_INTENTION_DELETE + && trx_sys->rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH + && buf_get_n_pending_read_ios()) { + mtr_x_lock(dict_index_get_lock(index), mtr); + } else if (dict_index_is_spatial(index) + && lock_intention <= BTR_INTENTION_BOTH) { + /* X lock the if there is possibility of + pessimistic delete on spatial index. As we could + lock upward for the tree */ + + mtr_x_lock(dict_index_get_lock(index), mtr); + } else { + mtr_sx_lock(dict_index_get_lock(index), mtr); + } + upper_rw_latch = RW_X_LATCH; break; case BTR_CONT_MODIFY_TREE: + case BTR_CONT_SEARCH_TREE: /* Do nothing */ - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); + ut_ad(srv_read_only_mode + || mtr_memo_contains_flagged(mtr, + dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK)); + if (dict_index_is_spatial(index) + && latch_mode == BTR_CONT_MODIFY_TREE) { + /* If we are about to locating parent page for split + and/or merge operation for R-Tree index, X latch + the parent */ + upper_rw_latch = RW_X_LATCH; + } else { + upper_rw_latch = RW_NO_LATCH; + } break; default: - if (!s_latch_by_caller) { - mtr_s_lock(dict_index_get_lock(index), mtr); + if (!srv_read_only_mode) { + if (s_latch_by_caller) { + ut_ad(rw_lock_own(dict_index_get_lock(index), + RW_LOCK_S)); + } else if (!modify_external) { + /* BTR_SEARCH_TREE is intended to be used with + BTR_ALREADY_S_LATCHED */ + ut_ad(latch_mode != BTR_SEARCH_TREE); + + mtr_s_lock(dict_index_get_lock(index), mtr); + } else { + /* BTR_MODIFY_EXTERNAL needs to be excluded */ + mtr_sx_lock(dict_index_get_lock(index), mtr); + } + upper_rw_latch = RW_S_LATCH; + } else { + upper_rw_latch = RW_NO_LATCH; } } + root_leaf_rw_latch = btr_cur_latch_for_root_leaf(latch_mode); page_cursor = btr_cur_get_page_cur(cursor); - space = dict_index_get_space(index); - page_no = dict_index_get_page(index); + const ulint space = dict_index_get_space(index); + const page_size_t page_size(dict_table_page_size(index->table)); + + /* Start with the root page. */ + page_id_t page_id(space, dict_index_get_page(index)); + + if (root_leaf_rw_latch == RW_X_LATCH) { + node_ptr_max_size = dict_index_node_ptr_max_size(index); + } up_match = 0; up_bytes = 0; @@ -575,22 +1054,41 @@ btr_cur_search_to_nth_level( default: #ifdef PAGE_CUR_LE_OR_EXTENDS ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE + || RTREE_SEARCH_MODE(mode) || mode == PAGE_CUR_LE_OR_EXTENDS); #else /* PAGE_CUR_LE_OR_EXTENDS */ - ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE); + ut_ad(mode == PAGE_CUR_L || mode == PAGE_CUR_LE + || RTREE_SEARCH_MODE(mode)); #endif /* PAGE_CUR_LE_OR_EXTENDS */ page_mode = mode; break; } /* Loop and search until we arrive at the desired level */ + btr_latch_leaves_t latch_leaves = {{NULL, NULL, NULL}, {0, 0, 0}}; search_loop: buf_mode = BUF_GET; rw_latch = RW_NO_LATCH; + rtree_parent_modified = false; if (height != 0) { /* We are about to fetch the root or a non-leaf page. */ + if ((latch_mode != BTR_MODIFY_TREE + || height == level) + && !retrying_for_search_prev) { + /* If doesn't have SX or X latch of index, + each pages should be latched before reading. */ + if (modify_external + && height == ULINT_UNDEFINED + && upper_rw_latch == RW_S_LATCH) { + /* needs sx-latch of root page + for fseg operation */ + rw_latch = RW_SX_LATCH; + } else { + rw_latch = upper_rw_latch; + } + } } else if (latch_mode <= BTR_MODIFY_LEAF) { rw_latch = latch_mode; @@ -606,12 +1104,12 @@ search_loop: } } - zip_size = dict_table_zip_size(index->table); - retry_page_get: - block = buf_page_get_gen( - space, zip_size, page_no, rw_latch, guess, buf_mode, - file, line, mtr, &err); + ut_ad(n_blocks < BTR_MAX_LEVELS); + tree_savepoints[n_blocks] = mtr_set_savepoint(mtr); + block = buf_page_get_gen(page_id, page_size, rw_latch, guess, + buf_mode, file, line, mtr, &err); + tree_blocks[n_blocks] = block; if (err != DB_SUCCESS) { if (err == DB_DECRYPTION_FAILED) { @@ -638,10 +1136,10 @@ retry_page_get: case BTR_INSERT_OP: case BTR_INSERT_IGNORE_UNIQUE_OP: ut_ad(buf_mode == BUF_GET_IF_IN_POOL); + ut_ad(!dict_index_is_spatial(index)); if (ibuf_insert(IBUF_OP_INSERT, tuple, index, - space, zip_size, page_no, - cursor->thr)) { + page_id, page_size, cursor->thr)) { cursor->flag = BTR_CUR_INSERT_TO_IBUF; @@ -651,10 +1149,11 @@ retry_page_get: case BTR_DELMARK_OP: ut_ad(buf_mode == BUF_GET_IF_IN_POOL); + ut_ad(!dict_index_is_spatial(index)); if (ibuf_insert(IBUF_OP_DELETE_MARK, tuple, - index, space, zip_size, - page_no, cursor->thr)) { + index, page_id, page_size, + cursor->thr)) { cursor->flag = BTR_CUR_DEL_MARK_IBUF; @@ -665,6 +1164,7 @@ retry_page_get: case BTR_DELETE_OP: ut_ad(buf_mode == BUF_GET_IF_IN_POOL_OR_WATCH); + ut_ad(!dict_index_is_spatial(index)); if (!row_purge_poss_sec(cursor->purge_node, index, tuple)) { @@ -672,19 +1172,18 @@ retry_page_get: /* The record cannot be purged yet. */ cursor->flag = BTR_CUR_DELETE_REF; } else if (ibuf_insert(IBUF_OP_DELETE, tuple, - index, space, zip_size, - page_no, + index, page_id, page_size, cursor->thr)) { /* The purge was buffered. */ cursor->flag = BTR_CUR_DELETE_IBUF; } else { /* The purge could not be buffered. */ - buf_pool_watch_unset(space, page_no); + buf_pool_watch_unset(page_id); break; } - buf_pool_watch_unset(space, page_no); + buf_pool_watch_unset(page_id); goto func_exit; default: @@ -699,9 +1198,97 @@ retry_page_get: goto retry_page_get; } - block->check_index_page_at_flush = TRUE; + if (retrying_for_search_prev && height != 0) { + /* also latch left sibling */ + ulint left_page_no; + buf_block_t* get_block; + + ut_ad(rw_latch == RW_NO_LATCH); + + rw_latch = upper_rw_latch; + + rw_lock_s_lock(&block->lock); + left_page_no = btr_page_get_prev( + buf_block_get_frame(block), mtr); + rw_lock_s_unlock(&block->lock); + + if (left_page_no != FIL_NULL) { + ut_ad(prev_n_blocks < leftmost_from_level); + + prev_tree_savepoints[prev_n_blocks] + = mtr_set_savepoint(mtr); + get_block = buf_page_get_gen( + page_id_t(page_id.space(), left_page_no), + page_size, rw_latch, NULL, buf_mode, + file, line, mtr, &err); + prev_tree_blocks[prev_n_blocks] = get_block; + prev_n_blocks++; + + if (err != DB_SUCCESS) { + if (err == DB_DECRYPTION_FAILED) { + ib_push_warning((void *)NULL, + DB_DECRYPTION_FAILED, + "Table %s is encrypted but encryption service or" + " used key_id is not available. " + " Can't continue reading table.", + index->table->name); + index->table->is_encrypted = true; + } + + goto func_exit; + } + + /* BTR_MODIFY_TREE doesn't update prev/next_page_no, + without their parent page's lock. So, not needed to + retry here, because we have the parent page's lock. */ + } + + /* release RW_NO_LATCH page and lock with RW_S_LATCH */ + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_blocks], + tree_blocks[n_blocks]); + + tree_savepoints[n_blocks] = mtr_set_savepoint(mtr); + block = buf_page_get_gen(page_id, page_size, rw_latch, NULL, + buf_mode, file, line, mtr, &err); + tree_blocks[n_blocks] = block; + + if (err != DB_SUCCESS) { + if (err == DB_DECRYPTION_FAILED) { + ib_push_warning((void *)NULL, + DB_DECRYPTION_FAILED, + "Table %s is encrypted but encryption service or" + " used key_id is not available. " + " Can't continue reading table.", + index->table->name); + index->table->is_encrypted = true; + } + + goto func_exit; + } + } + page = buf_block_get_frame(block); + if (height == ULINT_UNDEFINED + && page_is_leaf(page) + && rw_latch != RW_NO_LATCH + && rw_latch != root_leaf_rw_latch) { + /* We should retry to get the page, because the root page + is latched with different level as a leaf page. */ + ut_ad(root_leaf_rw_latch != RW_NO_LATCH); + ut_ad(rw_latch == RW_S_LATCH || rw_latch == RW_SX_LATCH); + ut_ad(rw_latch == RW_S_LATCH || modify_external); + + ut_ad(n_blocks == 0); + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_blocks], + tree_blocks[n_blocks]); + + upper_rw_latch = root_leaf_rw_latch; + goto search_loop; + } + if (rw_latch != RW_NO_LATCH) { #ifdef UNIV_ZIP_DEBUG const page_zip_des_t* page_zip @@ -714,7 +1301,7 @@ retry_page_get: ? SYNC_IBUF_TREE_NODE : SYNC_TREE_NODE); } - ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX); + ut_ad(fil_page_index_page_check(page)); ut_ad(index->id == btr_page_get_index_id(page)); if (UNIV_UNLIKELY(height == ULINT_UNDEFINED)) { @@ -724,9 +1311,32 @@ retry_page_get: root_height = height; cursor->tree_height = root_height + 1; + if (dict_index_is_spatial(index)) { + ut_ad(cursor->rtr_info); + + node_seq_t seq_no = rtr_get_current_ssn_id(index); + + /* If SSN in memory is not initialized, fetch + it from root page */ + if (seq_no < 1) { + node_seq_t root_seq_no; + + root_seq_no = page_get_ssn_id(page); + + mutex_enter(&(index->rtr_ssn.mutex)); + index->rtr_ssn.seq_no = root_seq_no + 1; + mutex_exit(&(index->rtr_ssn.mutex)); + } + + /* Save the MBR */ + cursor->rtr_info->thr = cursor->thr; + rtr_get_mbr_from_tuple(tuple, &cursor->rtr_info->mbr); + } + #ifdef BTR_CUR_ADAPT if (block != guess) { info->root_guess = block; + info->withdraw_clock = buf_withdraw_clock; } #endif } @@ -734,30 +1344,142 @@ retry_page_get: if (height == 0) { if (rw_latch == RW_NO_LATCH) { - btr_cur_latch_leaves( - page, space, zip_size, page_no, latch_mode, + latch_leaves = btr_cur_latch_leaves( + block, page_id, page_size, latch_mode, cursor, mtr); } switch (latch_mode) { case BTR_MODIFY_TREE: case BTR_CONT_MODIFY_TREE: + case BTR_CONT_SEARCH_TREE: break; default: - if (!s_latch_by_caller) { + if (!s_latch_by_caller + && !srv_read_only_mode + && !modify_external) { /* Release the tree s-latch */ + /* NOTE: BTR_MODIFY_EXTERNAL + needs to keep tree sx-latch */ mtr_release_s_latch_at_savepoint( mtr, savepoint, dict_index_get_lock(index)); } + + /* release upper blocks */ + if (retrying_for_search_prev) { + for (; + prev_n_releases < prev_n_blocks; + prev_n_releases++) { + mtr_release_block_at_savepoint( + mtr, + prev_tree_savepoints[ + prev_n_releases], + prev_tree_blocks[ + prev_n_releases]); + } + } + + for (; n_releases < n_blocks; n_releases++) { + if (n_releases == 0 && modify_external) { + /* keep latch of root page */ + ut_ad(mtr_memo_contains_flagged( + mtr, tree_blocks[n_releases], + MTR_MEMO_PAGE_SX_FIX + | MTR_MEMO_PAGE_X_FIX)); + continue; + } + + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_releases], + tree_blocks[n_releases]); + } } page_mode = mode; } - page_cur_search_with_match( - block, index, tuple, page_mode, &up_match, &up_bytes, - &low_match, &low_bytes, page_cursor); + if (dict_index_is_spatial(index)) { + /* Remember the page search mode */ + search_mode = page_mode; + + /* Some adjustment on search mode, when the + page search mode is PAGE_CUR_RTREE_LOCATE + or PAGE_CUR_RTREE_INSERT, as we are searching + with MBRs. When it is not the target level, we + should search all sub-trees that "CONTAIN" the + search range/MBR. When it is at the target + level, the search becomes PAGE_CUR_LE */ + if (page_mode == PAGE_CUR_RTREE_LOCATE + && level == height) { + page_mode = PAGE_CUR_LE; + } + + if (page_mode == PAGE_CUR_RTREE_INSERT) { + page_mode = (level == height) + ? PAGE_CUR_LE + : PAGE_CUR_RTREE_INSERT; + + ut_ad(!page_is_leaf(page) || page_mode == PAGE_CUR_LE); + } + + /* "need_path" indicates if we need to tracking the parent + pages, if it is not spatial comparison, then no need to + track it */ + if (page_mode < PAGE_CUR_CONTAIN) { + need_path = false; + } + + up_match = 0; + low_match = 0; + + if (latch_mode == BTR_MODIFY_TREE + || latch_mode == BTR_CONT_MODIFY_TREE + || latch_mode == BTR_CONT_SEARCH_TREE) { + /* Tree are locked, no need for Page Lock to protect + the "path" */ + cursor->rtr_info->need_page_lock = false; + } + } + + if (dict_index_is_spatial(index) && page_mode >= PAGE_CUR_CONTAIN) { + ut_ad(need_path); + found = rtr_cur_search_with_match( + block, index, tuple, page_mode, page_cursor, + cursor->rtr_info); + + /* Need to use BTR_MODIFY_TREE to do the MBR adjustment */ + if (search_mode == PAGE_CUR_RTREE_INSERT + && cursor->rtr_info->mbr_adj) { + if (latch_mode & BTR_MODIFY_LEAF) { + /* Parent MBR needs updated, should retry + with BTR_MODIFY_TREE */ + goto func_exit; + } else if (latch_mode & BTR_MODIFY_TREE) { + rtree_parent_modified = true; + cursor->rtr_info->mbr_adj = false; + mbr_adj = true; + } else { + ut_ad(0); + } + } + } else if (height == 0 && btr_search_enabled + && !dict_index_is_spatial(index)) { + /* The adaptive hash index is only used when searching + for leaf pages (height==0), but not in r-trees. + We only need the byte prefix comparison for the purpose + of updating the adaptive hash index. */ + page_cur_search_with_match_bytes( + block, index, tuple, page_mode, &up_match, &up_bytes, + &low_match, &low_bytes, page_cursor); + } else { + /* Search for complete index fields. */ + up_bytes = low_bytes = 0; + page_cur_search_with_match( + block, index, tuple, page_mode, &up_match, + &low_match, page_cursor, + need_path ? cursor->rtr_info : NULL); + } if (estimate) { btr_cur_add_path_info(cursor, height, root_height); @@ -768,6 +1490,34 @@ retry_page_get: ut_ad(height == btr_page_get_level(page_cur_get_page(page_cursor), mtr)); + /* Add Predicate lock if it is serializable isolation + and only if it is in the search case */ + if (dict_index_is_spatial(index) + && cursor->rtr_info->need_prdt_lock + && mode != PAGE_CUR_RTREE_INSERT + && mode != PAGE_CUR_RTREE_LOCATE + && mode >= PAGE_CUR_CONTAIN) { + trx_t* trx = thr_get_trx(cursor->thr); + lock_prdt_t prdt; + + lock_mutex_enter(); + lock_init_prdt_from_mbr( + &prdt, &cursor->rtr_info->mbr, mode, + trx->lock.lock_heap); + lock_mutex_exit(); + + if (rw_latch == RW_NO_LATCH && height != 0) { + rw_lock_s_lock(&(block->lock)); + } + + lock_prdt_lock(block, &prdt, index, LOCK_S, + LOCK_PREDICATE, cursor->thr, mtr); + + if (rw_latch == RW_NO_LATCH && height != 0) { + rw_lock_s_unlock(&(block->lock)); + } + } + if (level != height) { const rec_t* node_ptr; @@ -781,12 +1531,296 @@ retry_page_get: offsets = rec_get_offsets( node_ptr, index, offsets, ULINT_UNDEFINED, &heap); - /* Go to the child node */ - page_no = btr_node_ptr_get_child_page_no(node_ptr, offsets); + /* If the rec is the first or last in the page for + pessimistic delete intention, it might cause node_ptr insert + for the upper level. We should change the intention and retry. + */ + if (latch_mode == BTR_MODIFY_TREE + && btr_cur_need_opposite_intention( + page, lock_intention, node_ptr)) { + +need_opposite_intention: + ut_ad(upper_rw_latch == RW_X_LATCH); + + if (n_releases > 0) { + /* release root block */ + mtr_release_block_at_savepoint( + mtr, tree_savepoints[0], + tree_blocks[0]); + } - if (UNIV_UNLIKELY(height == 0 && dict_index_is_ibuf(index))) { - /* We're doing a search on an ibuf tree and we're one - level above the leaf page. */ + /* release all blocks */ + for (; n_releases <= n_blocks; n_releases++) { + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + + lock_intention = BTR_INTENTION_BOTH; + + page_id.reset(space, dict_index_get_page(index)); + up_match = 0; + low_match = 0; + height = ULINT_UNDEFINED; + + n_blocks = 0; + n_releases = 0; + + goto search_loop; + } + + if (dict_index_is_spatial(index)) { + if (page_rec_is_supremum(node_ptr)) { + cursor->low_match = 0; + cursor->up_match = 0; + goto func_exit; + } + + /* If we are doing insertion or record locating, + remember the tree nodes we visited */ + if (page_mode == PAGE_CUR_RTREE_INSERT + || (search_mode == PAGE_CUR_RTREE_LOCATE + && (latch_mode != BTR_MODIFY_LEAF))) { + bool add_latch = false; + + if (latch_mode == BTR_MODIFY_TREE + && rw_latch == RW_NO_LATCH) { + ut_ad(mtr_memo_contains_flagged( + mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK)); + rw_lock_s_lock(&block->lock); + add_latch = true; + } + + /* Store the parent cursor location */ +#ifdef UNIV_DEBUG + ulint num_stored = rtr_store_parent_path( + block, cursor, latch_mode, + height + 1, mtr); +#else + rtr_store_parent_path( + block, cursor, latch_mode, + height + 1, mtr); +#endif + + if (page_mode == PAGE_CUR_RTREE_INSERT) { + btr_pcur_t* r_cursor = + rtr_get_parent_cursor( + cursor, height + 1, + true); + /* If it is insertion, there should + be only one parent for each level + traverse */ +#ifdef UNIV_DEBUG + ut_ad(num_stored == 1); +#endif + + node_ptr = btr_pcur_get_rec(r_cursor); + + } + + if (add_latch) { + rw_lock_s_unlock(&block->lock); + } + + ut_ad(!page_rec_is_supremum(node_ptr)); + } + + ut_ad(page_mode == search_mode + || (page_mode == PAGE_CUR_WITHIN + && search_mode == PAGE_CUR_RTREE_LOCATE)); + + page_mode = search_mode; + } + + /* If the first or the last record of the page + or the same key value to the first record or last record, + the another page might be choosen when BTR_CONT_MODIFY_TREE. + So, the parent page should not released to avoiding deadlock + with blocking the another search with the same key value. */ + if (!detected_same_key_root + && lock_intention == BTR_INTENTION_BOTH + && !dict_index_is_unique(index) + && latch_mode == BTR_MODIFY_TREE + && (up_match >= rec_offs_n_fields(offsets) - 1 + || low_match >= rec_offs_n_fields(offsets) - 1)) { + const rec_t* first_rec + = page_rec_get_next_const( + page_get_infimum_rec( + page)); + ulint matched_fields; + + ut_ad(upper_rw_latch == RW_X_LATCH); + + if (node_ptr == first_rec + || page_rec_is_last(node_ptr, page)) { + detected_same_key_root = true; + } else { + matched_fields = 0; + + offsets2 = rec_get_offsets( + first_rec, index, offsets2, + ULINT_UNDEFINED, &heap); + cmp_rec_rec_with_match(node_ptr, first_rec, + offsets, offsets2, index, FALSE, + &matched_fields); + + if (matched_fields + >= rec_offs_n_fields(offsets) - 1) { + detected_same_key_root = true; + } else { + const rec_t* last_rec; + + last_rec = page_rec_get_prev_const( + page_get_supremum_rec( + page)); + + matched_fields = 0; + + offsets2 = rec_get_offsets( + last_rec, index, offsets2, + ULINT_UNDEFINED, &heap); + cmp_rec_rec_with_match( + node_ptr, last_rec, + offsets, offsets2, index, + FALSE, &matched_fields); + if (matched_fields + >= rec_offs_n_fields(offsets) - 1) { + detected_same_key_root = true; + } + } + } + } + + /* If the page might cause modify_tree, + we should not release the parent page's lock. */ + if (!detected_same_key_root + && latch_mode == BTR_MODIFY_TREE + && !btr_cur_will_modify_tree( + index, page, lock_intention, node_ptr, + node_ptr_max_size, page_size, mtr) + && !rtree_parent_modified) { + ut_ad(upper_rw_latch == RW_X_LATCH); + ut_ad(n_releases <= n_blocks); + + /* we can release upper blocks */ + for (; n_releases < n_blocks; n_releases++) { + if (n_releases == 0) { + /* we should not release root page + to pin to same block. */ + continue; + } + + /* release unused blocks to unpin */ + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + } + + if (height == level + && latch_mode == BTR_MODIFY_TREE) { + ut_ad(upper_rw_latch == RW_X_LATCH); + /* we should sx-latch root page, if released already. + It contains seg_header. */ + if (n_releases > 0) { + mtr_block_sx_latch_at_savepoint( + mtr, tree_savepoints[0], + tree_blocks[0]); + } + + /* x-latch the branch blocks not released yet. */ + for (ulint i = n_releases; i <= n_blocks; i++) { + mtr_block_x_latch_at_savepoint( + mtr, tree_savepoints[i], + tree_blocks[i]); + } + } + + /* We should consider prev_page of parent page, if the node_ptr + is the leftmost of the page. because BTR_SEARCH_PREV and + BTR_MODIFY_PREV latches prev_page of the leaf page. */ + if ((latch_mode == BTR_SEARCH_PREV + || latch_mode == BTR_MODIFY_PREV) + && !retrying_for_search_prev) { + /* block should be latched for consistent + btr_page_get_prev() */ + ut_ad(mtr_memo_contains_flagged(mtr, block, + MTR_MEMO_PAGE_S_FIX + | MTR_MEMO_PAGE_X_FIX)); + + if (btr_page_get_prev(page, mtr) != FIL_NULL + && page_rec_is_first(node_ptr, page)) { + + if (leftmost_from_level == 0) { + leftmost_from_level = height + 1; + } + } else { + leftmost_from_level = 0; + } + + if (height == 0 && leftmost_from_level > 0) { + /* should retry to get also prev_page + from level==leftmost_from_level. */ + retrying_for_search_prev = true; + + prev_tree_blocks = static_cast( + ut_malloc_nokey(sizeof(buf_block_t*) + * leftmost_from_level)); + + prev_tree_savepoints = static_cast( + ut_malloc_nokey(sizeof(ulint) + * leftmost_from_level)); + + /* back to the level (leftmost_from_level+1) */ + ulint idx = n_blocks + - (leftmost_from_level - 1); + + page_id.reset( + space, + tree_blocks[idx]->page.id.page_no()); + + for (ulint i = n_blocks + - (leftmost_from_level - 1); + i <= n_blocks; i++) { + mtr_release_block_at_savepoint( + mtr, tree_savepoints[i], + tree_blocks[i]); + } + + n_blocks -= (leftmost_from_level - 1); + height = leftmost_from_level; + ut_ad(n_releases == 0); + + /* replay up_match, low_match */ + up_match = 0; + low_match = 0; + rtr_info_t* rtr_info = need_path + ? cursor->rtr_info : NULL; + + for (ulint i = 0; i < n_blocks; i++) { + page_cur_search_with_match( + tree_blocks[i], index, tuple, + page_mode, &up_match, + &low_match, page_cursor, + rtr_info); + } + + goto search_loop; + } + } + + /* Go to the child node */ + page_id.reset( + space, + btr_node_ptr_get_child_page_no(node_ptr, offsets)); + + n_blocks++; + + if (UNIV_UNLIKELY(height == 0 && dict_index_is_ibuf(index))) { + /* We're doing a search on an ibuf tree and we're one + level above the leaf page. */ ut_ad(level == 0); @@ -795,16 +1829,116 @@ retry_page_get: goto retry_page_get; } + if (dict_index_is_spatial(index) + && page_mode >= PAGE_CUR_CONTAIN + && page_mode != PAGE_CUR_RTREE_INSERT) { + ut_ad(need_path); + rtr_node_path_t* path = + cursor->rtr_info->path; + + if (!path->empty() && found) { +#ifdef UNIV_DEBUG + node_visit_t last_visit = path->back(); + + ut_ad(last_visit.page_no == page_id.page_no()); +#endif /* UNIV_DEBUG */ + + path->pop_back(); + +#ifdef UNIV_DEBUG + if (page_mode == PAGE_CUR_RTREE_LOCATE + && (latch_mode != BTR_MODIFY_LEAF)) { + btr_pcur_t* cur + = cursor->rtr_info->parent_path->back( + ).cursor; + rec_t* my_node_ptr + = btr_pcur_get_rec(cur); + + offsets = rec_get_offsets( + my_node_ptr, index, offsets, + ULINT_UNDEFINED, &heap); + + ulint my_page_no + = btr_node_ptr_get_child_page_no( + my_node_ptr, offsets); + + ut_ad(page_id.page_no() == my_page_no); + + } +#endif + } + } + goto search_loop; + } else if (!dict_index_is_spatial(index) + && latch_mode == BTR_MODIFY_TREE + && lock_intention == BTR_INTENTION_INSERT + && mach_read_from_4(page + FIL_PAGE_NEXT) != FIL_NULL + && page_rec_is_last(page_cur_get_rec(page_cursor), page)) { + + /* btr_insert_into_right_sibling() might cause + deleting node_ptr at upper level */ + + guess = NULL; + + if (height == 0) { + /* release the leaf pages if latched */ + for (uint i = 0; i < 3; i++) { + if (latch_leaves.blocks[i] != NULL) { + mtr_release_block_at_savepoint( + mtr, latch_leaves.savepoints[i], + latch_leaves.blocks[i]); + latch_leaves.blocks[i] = NULL; + } + } + } + + goto need_opposite_intention; } if (level != 0) { - /* x-latch the page */ - buf_block_t* child_block = btr_block_get( - space, zip_size, page_no, RW_X_LATCH, index, mtr); + if (upper_rw_latch == RW_NO_LATCH) { + /* latch the page */ + buf_block_t* child_block; + + if (latch_mode == BTR_CONT_MODIFY_TREE) { + child_block = btr_block_get( + page_id, page_size, RW_X_LATCH, + index, mtr); + } else { + ut_ad(latch_mode == BTR_CONT_SEARCH_TREE); + child_block = btr_block_get( + page_id, page_size, RW_SX_LATCH, + index, mtr); + } + + btr_assert_not_corrupted(child_block, index); + } else { + ut_ad(mtr_memo_contains(mtr, block, upper_rw_latch)); + btr_assert_not_corrupted(block, index); + + if (s_latch_by_caller) { + ut_ad(latch_mode == BTR_SEARCH_TREE); + /* to exclude modifying tree operations + should sx-latch the index. */ + ut_ad(mtr_memo_contains( + mtr, dict_index_get_lock(index), + MTR_MEMO_SX_LOCK)); + /* because has sx-latch of index, + can release upper blocks. */ + for (; n_releases < n_blocks; n_releases++) { + mtr_release_block_at_savepoint( + mtr, + tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + } + } - page = buf_block_get_frame(child_block); - btr_assert_not_corrupted(child_block, index); + if (page_mode <= PAGE_CUR_LE) { + cursor->low_match = low_match; + cursor->up_match = up_match; + } } else { cursor->low_match = low_match; cursor->low_bytes = low_bytes; @@ -815,8 +1949,8 @@ retry_page_get: /* We do a dirty read of btr_search_enabled here. We will properly check btr_search_enabled again in btr_search_build_page_hash_index() before building a - page hash index, while holding btr_search_latch. */ - if (btr_search_enabled) { + page hash index, while holding search latch. */ + if (btr_search_enabled && !index->disable_ahi) { btr_search_info_update(index, cursor); } #endif @@ -828,15 +1962,213 @@ retry_page_get: || mode != PAGE_CUR_LE); } + /* For spatial index, remember what blocks are still latched */ + if (dict_index_is_spatial(index) + && (latch_mode == BTR_MODIFY_TREE + || latch_mode == BTR_MODIFY_LEAF)) { + for (ulint i = 0; i < n_releases; i++) { + cursor->rtr_info->tree_blocks[i] = NULL; + cursor->rtr_info->tree_savepoints[i] = 0; + } + + for (ulint i = n_releases; i <= n_blocks; i++) { + cursor->rtr_info->tree_blocks[i] = tree_blocks[i]; + cursor->rtr_info->tree_savepoints[i] = tree_savepoints[i]; + } + } + func_exit: if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } + if (retrying_for_search_prev) { + ut_free(prev_tree_blocks); + ut_free(prev_tree_savepoints); + } + if (has_search_latch) { - rw_lock_s_lock(&btr_search_latch); + rw_lock_s_lock(btr_get_search_latch(index)); + } + + if (mbr_adj) { + /* remember that we will need to adjust parent MBR */ + cursor->rtr_info->mbr_adj = true; + } + + DBUG_RETURN(err); +} + +/** Searches an index tree and positions a tree cursor on a given level. +This function will avoid latching the traversal path and so should be +used only for cases where-in latching is not needed. + +@param[in,out] index index +@param[in] level the tree level of search +@param[in] tuple data tuple; Note: n_fields_cmp in compared + to the node ptr page node field +@param[in] mode PAGE_CUR_L, .... + Insert should always be made using PAGE_CUR_LE + to search the position. +@param[in,out] cursor tree cursor; points to record of interest. +@param[in] file file name +@param[in[ line line where called from +@param[in,out] mtr mtr +@param[in] mark_dirty + if true then mark the block as dirty */ +dberr_t +btr_cur_search_to_nth_level_with_no_latch( + dict_index_t* index, + ulint level, + const dtuple_t* tuple, + page_cur_mode_t mode, + btr_cur_t* cursor, + const char* file, + ulint line, + mtr_t* mtr, + bool mark_dirty) +{ + page_t* page = NULL; /* remove warning */ + buf_block_t* block; + ulint height; + ulint up_match; + ulint low_match; + ulint rw_latch; + page_cur_mode_t page_mode; + ulint buf_mode; + page_cur_t* page_cursor; + ulint root_height = 0; /* remove warning */ + ulint n_blocks = 0; + dberr_t err = DB_SUCCESS; + mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + rec_offs_init(offsets_); + + DBUG_ENTER("btr_cur_search_to_nth_level_with_no_latch"); + + ut_ad(dict_table_is_intrinsic(index->table)); + ut_ad(level == 0 || mode == PAGE_CUR_LE); + ut_ad(dict_index_check_search_tuple(index, tuple)); + ut_ad(dtuple_check_typed(tuple)); + ut_ad(index->page != FIL_NULL); + + UNIV_MEM_INVALID(&cursor->up_match, sizeof cursor->up_match); + UNIV_MEM_INVALID(&cursor->low_match, sizeof cursor->low_match); +#ifdef UNIV_DEBUG + cursor->up_match = ULINT_UNDEFINED; + cursor->low_match = ULINT_UNDEFINED; +#endif /* UNIV_DEBUG */ + + cursor->flag = BTR_CUR_BINARY; + cursor->index = index; + + page_cursor = btr_cur_get_page_cur(cursor); + + const ulint space = dict_index_get_space(index); + const page_size_t page_size(dict_table_page_size(index->table)); + /* Start with the root page. */ + page_id_t page_id(space, dict_index_get_page(index)); + + up_match = 0; + low_match = 0; + + height = ULINT_UNDEFINED; + + /* We use these modified search modes on non-leaf levels of the + B-tree. These let us end up in the right B-tree leaf. In that leaf + we use the original search mode. */ + + switch (mode) { + case PAGE_CUR_GE: + page_mode = PAGE_CUR_L; + break; + case PAGE_CUR_G: + page_mode = PAGE_CUR_LE; + break; + default: + page_mode = mode; + break; + } + + /* Loop and search until we arrive at the desired level */ + bool at_desired_level = false; + while (!at_desired_level) { + buf_mode = BUF_GET; + rw_latch = RW_NO_LATCH; + + ut_ad(n_blocks < BTR_MAX_LEVELS); + + block = buf_page_get_gen(page_id, page_size, rw_latch, NULL, + buf_mode, file, line, mtr, &err, mark_dirty); + + if (err != DB_SUCCESS) { + if (err == DB_DECRYPTION_FAILED) { + ib_push_warning((void *)NULL, + DB_DECRYPTION_FAILED, + "Table %s is encrypted but encryption service or" + " used key_id is not available. " + " Can't continue reading table.", + index->table->name); + index->table->is_encrypted = true; + } + + return (err); + } + + page = buf_block_get_frame(block); + + if (height == ULINT_UNDEFINED) { + /* We are in the root node */ + + height = btr_page_get_level(page, mtr); + root_height = height; + cursor->tree_height = root_height + 1; + } + + if (height == 0) { + /* On leaf level. Switch back to original search mode.*/ + page_mode = mode; + } + + page_cur_search_with_match( + block, index, tuple, page_mode, &up_match, + &low_match, page_cursor, NULL); + + ut_ad(height == btr_page_get_level( + page_cur_get_page(page_cursor), mtr)); + + if (level != height) { + + const rec_t* node_ptr; + ut_ad(height > 0); + + height--; + + node_ptr = page_cur_get_rec(page_cursor); + + offsets = rec_get_offsets( + node_ptr, index, offsets, + ULINT_UNDEFINED, &heap); + + /* Go to the child node */ + page_id.reset(space, btr_node_ptr_get_child_page_no( + node_ptr, offsets)); + + n_blocks++; + } else { + /* If this is the desired level, leave the loop */ + at_desired_level = true; + } + } + + cursor->low_match = low_match; + cursor->up_match = up_match; + + if (heap != NULL) { + mem_heap_free(heap); } return err; @@ -844,7 +2176,6 @@ func_exit: /*****************************************************************//** Opens a cursor at either end of an index. */ -UNIV_INTERN dberr_t btr_cur_open_at_index_side_func( /*============================*/ @@ -860,14 +2191,18 @@ btr_cur_open_at_index_side_func( mtr_t* mtr) /*!< in/out: mini-transaction */ { page_cur_t* page_cursor; - ulint page_no; - ulint space; - ulint zip_size; + ulint node_ptr_max_size = UNIV_PAGE_SIZE / 2; ulint height; ulint root_height = 0; /* remove warning */ rec_t* node_ptr; ulint estimate; ulint savepoint; + ulint upper_rw_latch, root_leaf_rw_latch; + btr_intention_t lock_intention; + buf_block_t* tree_blocks[BTR_MAX_LEVELS]; + ulint tree_savepoints[BTR_MAX_LEVELS]; + ulint n_blocks = 0; + ulint n_releases = 0; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; @@ -880,6 +2215,22 @@ btr_cur_open_at_index_side_func( ut_ad(level != ULINT_UNDEFINED); + bool s_latch_by_caller; + + s_latch_by_caller = latch_mode & BTR_ALREADY_S_LATCHED; + latch_mode &= ~BTR_ALREADY_S_LATCHED; + + lock_intention = btr_cur_get_and_clear_intention(&latch_mode); + + ut_ad(!(latch_mode & BTR_MODIFY_EXTERNAL)); + + /* This function doesn't need to lock left page of the leaf page */ + if (latch_mode == BTR_SEARCH_PREV) { + latch_mode = BTR_SEARCH_LEAF; + } else if (latch_mode == BTR_MODIFY_PREV) { + latch_mode = BTR_MODIFY_LEAF; + } + /* Store the position of the tree latch we push to mtr so that we know how to release it when we have latched the leaf node */ @@ -887,35 +2238,76 @@ btr_cur_open_at_index_side_func( switch (latch_mode) { case BTR_CONT_MODIFY_TREE: + case BTR_CONT_SEARCH_TREE: + upper_rw_latch = RW_NO_LATCH; break; case BTR_MODIFY_TREE: - mtr_x_lock(dict_index_get_lock(index), mtr); - break; - case BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED: - case BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED: - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_S_LOCK)); + /* Most of delete-intended operations are purging. + Free blocks and read IO bandwidth should be prior + for them, when the history list is glowing huge. */ + if (lock_intention == BTR_INTENTION_DELETE + && trx_sys->rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH + && buf_get_n_pending_read_ios()) { + mtr_x_lock(dict_index_get_lock(index), mtr); + } else { + mtr_sx_lock(dict_index_get_lock(index), mtr); + } + upper_rw_latch = RW_X_LATCH; break; default: - mtr_s_lock(dict_index_get_lock(index), mtr); + ut_ad(!s_latch_by_caller + || mtr_memo_contains_flagged(mtr, + dict_index_get_lock(index), + MTR_MEMO_SX_LOCK + | MTR_MEMO_S_LOCK)); + if (!srv_read_only_mode) { + if (!s_latch_by_caller) { + /* BTR_SEARCH_TREE is intended to be used with + BTR_ALREADY_S_LATCHED */ + ut_ad(latch_mode != BTR_SEARCH_TREE); + + mtr_s_lock(dict_index_get_lock(index), mtr); + } + upper_rw_latch = RW_S_LATCH; + } else { + upper_rw_latch = RW_NO_LATCH; + } } + root_leaf_rw_latch = btr_cur_latch_for_root_leaf(latch_mode); page_cursor = btr_cur_get_page_cur(cursor); cursor->index = index; - space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); - page_no = dict_index_get_page(index); + page_id_t page_id(dict_index_get_space(index), + dict_index_get_page(index)); + const page_size_t& page_size = dict_table_page_size(index->table); + + if (root_leaf_rw_latch == RW_X_LATCH) { + node_ptr_max_size = dict_index_node_ptr_max_size(index); + } height = ULINT_UNDEFINED; for (;;) { buf_block_t* block=NULL; page_t* page=NULL; + ulint rw_latch; + + ut_ad(n_blocks < BTR_MAX_LEVELS); + + if (height != 0 + && (latch_mode != BTR_MODIFY_TREE + || height == level)) { + rw_latch = upper_rw_latch; + } else { + rw_latch = RW_NO_LATCH; + } + + tree_savepoints[n_blocks] = mtr_set_savepoint(mtr); + block = buf_page_get_gen(page_id, page_size, rw_latch, NULL, + BUF_GET, file, line, mtr, &err); + tree_blocks[n_blocks] = block; - block = buf_page_get_gen(space, zip_size, page_no, - RW_NO_LATCH, NULL, BUF_GET, - file, line, mtr, &err); if (err != DB_SUCCESS) { if (err == DB_DECRYPTION_FAILED) { ib_push_warning((void *)NULL, @@ -931,10 +2323,27 @@ btr_cur_open_at_index_side_func( } page = buf_block_get_frame(block); - ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX); - ut_ad(index->id == btr_page_get_index_id(page)); - block->check_index_page_at_flush = TRUE; + if (height == ULINT_UNDEFINED + && btr_page_get_level(page, mtr) == 0 + && rw_latch != RW_NO_LATCH + && rw_latch != root_leaf_rw_latch) { + /* We should retry to get the page, because the root page + is latched with different level as a leaf page. */ + ut_ad(root_leaf_rw_latch != RW_NO_LATCH); + ut_ad(rw_latch == RW_S_LATCH); + + ut_ad(n_blocks == 0); + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_blocks], + tree_blocks[n_blocks]); + + upper_rw_latch = root_leaf_rw_latch; + continue; + } + + ut_ad(fil_page_index_page_check(page)); + ut_ad(index->id == btr_page_get_index_id(page)); if (height == ULINT_UNDEFINED) { /* We are in the root node */ @@ -948,12 +2357,16 @@ btr_cur_open_at_index_side_func( } if (height == level) { - btr_cur_latch_leaves( - page, space, zip_size, page_no, - latch_mode & ~BTR_ALREADY_S_LATCHED, - cursor, mtr); - - if (height == 0) { + if (srv_read_only_mode) { + btr_cur_latch_leaves( + block, page_id, page_size, + latch_mode, cursor, mtr); + } else if (height == 0) { + if (rw_latch == RW_NO_LATCH) { + btr_cur_latch_leaves( + block, page_id, page_size, + latch_mode, cursor, mtr); + } /* In versions <= 3.23.52 we had forgotten to release the tree latch here. If in an index scan we had to @@ -965,15 +2378,55 @@ btr_cur_open_at_index_side_func( switch (latch_mode) { case BTR_MODIFY_TREE: case BTR_CONT_MODIFY_TREE: - case BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED: - case BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED: + case BTR_CONT_SEARCH_TREE: break; default: - /* Release the tree s-latch */ - - mtr_release_s_latch_at_savepoint( - mtr, savepoint, - dict_index_get_lock(index)); + if (!s_latch_by_caller) { + /* Release the tree s-latch */ + mtr_release_s_latch_at_savepoint( + mtr, savepoint, + dict_index_get_lock( + index)); + } + + /* release upper blocks */ + for (; n_releases < n_blocks; + n_releases++) { + mtr_release_block_at_savepoint( + mtr, + tree_savepoints[ + n_releases], + tree_blocks[ + n_releases]); + } + } + } else { /* height != 0 */ + /* We already have the block latched. */ + ut_ad(latch_mode == BTR_SEARCH_TREE); + ut_ad(s_latch_by_caller); + ut_ad(upper_rw_latch == RW_S_LATCH); + + ut_ad(mtr_memo_contains(mtr, block, + upper_rw_latch)); + + if (s_latch_by_caller) { + /* to exclude modifying tree operations + should sx-latch the index. */ + ut_ad(mtr_memo_contains( + mtr, + dict_index_get_lock(index), + MTR_MEMO_SX_LOCK)); + /* because has sx-latch of index, + can release upper blocks. */ + for (; n_releases < n_blocks; + n_releases++) { + mtr_release_block_at_savepoint( + mtr, + tree_savepoints[ + n_releases], + tree_blocks[ + n_releases]); + } } } } @@ -1010,8 +2463,81 @@ btr_cur_open_at_index_side_func( node_ptr = page_cur_get_rec(page_cursor); offsets = rec_get_offsets(node_ptr, cursor->index, offsets, ULINT_UNDEFINED, &heap); + + /* If the rec is the first or last in the page for + pessimistic delete intention, it might cause node_ptr insert + for the upper level. We should change the intention and retry. + */ + if (latch_mode == BTR_MODIFY_TREE + && btr_cur_need_opposite_intention( + page, lock_intention, node_ptr)) { + + ut_ad(upper_rw_latch == RW_X_LATCH); + /* release all blocks */ + for (; n_releases <= n_blocks; n_releases++) { + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + + lock_intention = BTR_INTENTION_BOTH; + + page_id.set_page_no(dict_index_get_page(index)); + + height = ULINT_UNDEFINED; + + n_blocks = 0; + n_releases = 0; + + continue; + } + + if (latch_mode == BTR_MODIFY_TREE + && !btr_cur_will_modify_tree( + cursor->index, page, lock_intention, node_ptr, + node_ptr_max_size, page_size, mtr)) { + ut_ad(upper_rw_latch == RW_X_LATCH); + ut_ad(n_releases <= n_blocks); + + /* we can release upper blocks */ + for (; n_releases < n_blocks; n_releases++) { + if (n_releases == 0) { + /* we should not release root page + to pin to same block. */ + continue; + } + + /* release unused blocks to unpin */ + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + } + + if (height == level + && latch_mode == BTR_MODIFY_TREE) { + ut_ad(upper_rw_latch == RW_X_LATCH); + /* we should sx-latch root page, if released already. + It contains seg_header. */ + if (n_releases > 0) { + mtr_block_sx_latch_at_savepoint( + mtr, tree_savepoints[0], + tree_blocks[0]); + } + + /* x-latch the branch blocks not released yet. */ + for (ulint i = n_releases; i <= n_blocks; i++) { + mtr_block_x_latch_at_savepoint( + mtr, tree_savepoints[i], + tree_blocks[i]); + } + } + /* Go to the child node */ - page_no = btr_node_ptr_get_child_page_no(node_ptr, offsets); + page_id.set_page_no( + btr_node_ptr_get_child_page_no(node_ptr, offsets)); + + n_blocks++; } exit_loop: @@ -1022,10 +2548,131 @@ btr_cur_open_at_index_side_func( return err; } +/** Opens a cursor at either end of an index. +Avoid taking latches on buffer, just pin (by incrementing fix_count) +to keep them in buffer pool. This mode is used by intrinsic table +as they are not shared and so there is no need of latching. +@param[in] from_left true if open to low end, false if open + to high end. +@param[in] index index +@param[in,out] cursor cursor +@param[in] file file name +@param[in] line line where called +@param[in,out] mtr mini transaction +*/ +dberr_t +btr_cur_open_at_index_side_with_no_latch_func( + bool from_left, + dict_index_t* index, + btr_cur_t* cursor, + ulint level, + const char* file, + ulint line, + mtr_t* mtr) +{ + page_cur_t* page_cursor; + ulint height; + rec_t* node_ptr; + ulint n_blocks = 0; + mem_heap_t* heap = NULL; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + dberr_t err = DB_SUCCESS; + + rec_offs_init(offsets_); + + ut_ad(level != ULINT_UNDEFINED); + + page_cursor = btr_cur_get_page_cur(cursor); + cursor->index = index; + page_id_t page_id(dict_index_get_space(index), + dict_index_get_page(index)); + const page_size_t& page_size = dict_table_page_size(index->table); + + height = ULINT_UNDEFINED; + + for (;;) { + buf_block_t* block; + page_t* page; + ulint rw_latch = RW_NO_LATCH; + + ut_ad(n_blocks < BTR_MAX_LEVELS); + + block = buf_page_get_gen(page_id, page_size, rw_latch, NULL, + BUF_GET, file, line, mtr, &err); + + if (err != DB_SUCCESS) { + if (err == DB_DECRYPTION_FAILED) { + ib_push_warning((void *)NULL, + DB_DECRYPTION_FAILED, + "Table %s is encrypted but encryption service or" + " used key_id is not available. " + " Can't continue reading table.", + index->table->name); + index->table->is_encrypted = true; + } + + return (err); + } + + page = buf_block_get_frame(block); + + ut_ad(fil_page_index_page_check(page)); + ut_ad(index->id == btr_page_get_index_id(page)); + + if (height == ULINT_UNDEFINED) { + /* We are in the root node */ + + height = btr_page_get_level(page, mtr); + ut_a(height >= level); + } else { + /* TODO: flag the index corrupted if this fails */ + ut_ad(height == btr_page_get_level(page, mtr)); + } + + if (from_left) { + page_cur_set_before_first(block, page_cursor); + } else { + page_cur_set_after_last(block, page_cursor); + } + + if (height == level) { + break; + } + + ut_ad(height > 0); + + if (from_left) { + page_cur_move_to_next(page_cursor); + } else { + page_cur_move_to_prev(page_cursor); + } + + height--; + + node_ptr = page_cur_get_rec(page_cursor); + offsets = rec_get_offsets(node_ptr, cursor->index, offsets, + ULINT_UNDEFINED, &heap); + + /* Go to the child node */ + page_id.set_page_no( + btr_node_ptr_get_child_page_no(node_ptr, offsets)); + + n_blocks++; + } + + if (heap != NULL) { + mem_heap_free(heap); + } + + return (err); +} + /**********************************************************************//** -Positions a cursor at a randomly chosen position within a B-tree. */ -UNIV_INTERN -void +Positions a cursor at a randomly chosen position within a B-tree. +@return true if the index is available and we have put the cursor, false +if the index is unavailable */ +bool btr_cur_open_at_rnd_pos_func( /*=========================*/ dict_index_t* index, /*!< in: index */ @@ -1036,31 +2683,86 @@ btr_cur_open_at_rnd_pos_func( mtr_t* mtr) /*!< in: mtr */ { page_cur_t* page_cursor; - ulint page_no; - ulint space; - ulint zip_size; + ulint node_ptr_max_size = UNIV_PAGE_SIZE / 2; ulint height; rec_t* node_ptr; + ulint savepoint; + ulint upper_rw_latch, root_leaf_rw_latch; + btr_intention_t lock_intention; + buf_block_t* tree_blocks[BTR_MAX_LEVELS]; + ulint tree_savepoints[BTR_MAX_LEVELS]; + ulint n_blocks = 0; + ulint n_releases = 0; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; rec_offs_init(offsets_); + ut_ad(!dict_index_is_spatial(index)); + + lock_intention = btr_cur_get_and_clear_intention(&latch_mode); + + ut_ad(!(latch_mode & BTR_MODIFY_EXTERNAL)); + + savepoint = mtr_set_savepoint(mtr); + switch (latch_mode) { case BTR_MODIFY_TREE: - mtr_x_lock(dict_index_get_lock(index), mtr); + /* Most of delete-intended operations are purging. + Free blocks and read IO bandwidth should be prior + for them, when the history list is glowing huge. */ + if (lock_intention == BTR_INTENTION_DELETE + && trx_sys->rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH + && buf_get_n_pending_read_ios()) { + mtr_x_lock(dict_index_get_lock(index), mtr); + } else { + mtr_sx_lock(dict_index_get_lock(index), mtr); + } + upper_rw_latch = RW_X_LATCH; break; + case BTR_SEARCH_PREV: + case BTR_MODIFY_PREV: + /* This function doesn't support left uncle + page lock for left leaf page lock, when + needed. */ + case BTR_SEARCH_TREE: + case BTR_CONT_MODIFY_TREE: + case BTR_CONT_SEARCH_TREE: + ut_ad(0); + /* fall through */ default: - ut_ad(latch_mode != BTR_CONT_MODIFY_TREE); - mtr_s_lock(dict_index_get_lock(index), mtr); + if (!srv_read_only_mode) { + mtr_s_lock(dict_index_get_lock(index), mtr); + upper_rw_latch = RW_S_LATCH; + } else { + upper_rw_latch = RW_NO_LATCH; + } } + DBUG_EXECUTE_IF("test_index_is_unavailable", + return(false);); + + if (index->page == FIL_NULL) { + /* Since we don't hold index lock until just now, the index + could be modified by others, for example, if this is a + statistics updater for referenced table, it could be marked + as unavailable by 'DROP TABLE' in the mean time, since + we don't hold lock for statistics updater */ + return(false); + } + + root_leaf_rw_latch = btr_cur_latch_for_root_leaf(latch_mode); + page_cursor = btr_cur_get_page_cur(cursor); cursor->index = index; - space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); - page_no = dict_index_get_page(index); + page_id_t page_id(dict_index_get_space(index), + dict_index_get_page(index)); + const page_size_t& page_size = dict_table_page_size(index->table); + + if (root_leaf_rw_latch == RW_X_LATCH) { + node_ptr_max_size = dict_index_node_ptr_max_size(index); + } height = ULINT_UNDEFINED; @@ -1068,10 +2770,22 @@ btr_cur_open_at_rnd_pos_func( buf_block_t* block; page_t* page; dberr_t err=DB_SUCCESS; + ulint rw_latch; + + ut_ad(n_blocks < BTR_MAX_LEVELS); + + if (height != 0 + && latch_mode != BTR_MODIFY_TREE) { + rw_latch = upper_rw_latch; + } else { + rw_latch = RW_NO_LATCH; + } + + tree_savepoints[n_blocks] = mtr_set_savepoint(mtr); + block = buf_page_get_gen(page_id, page_size, rw_latch, NULL, + BUF_GET, file, line, mtr, &err); + tree_blocks[n_blocks] = block; - block = buf_page_get_gen(space, zip_size, page_no, - RW_NO_LATCH, NULL, BUF_GET, - file, line, mtr, &err); if (err != DB_SUCCESS) { if (err == DB_DECRYPTION_FAILED) { @@ -1087,7 +2801,26 @@ btr_cur_open_at_rnd_pos_func( } page = buf_block_get_frame(block); - ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX); + + if (height == ULINT_UNDEFINED + && btr_page_get_level(page, mtr) == 0 + && rw_latch != RW_NO_LATCH + && rw_latch != root_leaf_rw_latch) { + /* We should retry to get the page, because the root page + is latched with different level as a leaf page. */ + ut_ad(root_leaf_rw_latch != RW_NO_LATCH); + ut_ad(rw_latch == RW_S_LATCH); + + ut_ad(n_blocks == 0); + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_blocks], + tree_blocks[n_blocks]); + + upper_rw_latch = root_leaf_rw_latch; + continue; + } + + ut_ad(fil_page_index_page_check(page)); ut_ad(index->id == btr_page_get_index_id(page)); if (height == ULINT_UNDEFINED) { @@ -1096,9 +2829,38 @@ btr_cur_open_at_rnd_pos_func( height = btr_page_get_level(page, mtr); } - if (height == 0) { - btr_cur_latch_leaves(page, space, zip_size, page_no, - latch_mode, cursor, mtr); + if (height == 0) { + if (rw_latch == RW_NO_LATCH + || srv_read_only_mode) { + btr_cur_latch_leaves( + block, page_id, page_size, + latch_mode, cursor, mtr); + } + + /* btr_cur_open_at_index_side_func() and + btr_cur_search_to_nth_level() release + tree s-latch here.*/ + switch (latch_mode) { + case BTR_MODIFY_TREE: + case BTR_CONT_MODIFY_TREE: + case BTR_CONT_SEARCH_TREE: + break; + default: + /* Release the tree s-latch */ + if (!srv_read_only_mode) { + mtr_release_s_latch_at_savepoint( + mtr, savepoint, + dict_index_get_lock(index)); + } + + /* release upper blocks */ + for (; n_releases < n_blocks; n_releases++) { + mtr_release_block_at_savepoint( + mtr, + tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + } } page_cur_open_on_rnd_user_rec(block, page_cursor); @@ -1115,14 +2877,89 @@ btr_cur_open_at_rnd_pos_func( node_ptr = page_cur_get_rec(page_cursor); offsets = rec_get_offsets(node_ptr, cursor->index, offsets, ULINT_UNDEFINED, &heap); + + /* If the rec is the first or last in the page for + pessimistic delete intention, it might cause node_ptr insert + for the upper level. We should change the intention and retry. + */ + if (latch_mode == BTR_MODIFY_TREE + && btr_cur_need_opposite_intention( + page, lock_intention, node_ptr)) { + + ut_ad(upper_rw_latch == RW_X_LATCH); + /* release all blocks */ + for (; n_releases <= n_blocks; n_releases++) { + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + + lock_intention = BTR_INTENTION_BOTH; + + page_id.set_page_no(dict_index_get_page(index)); + + height = ULINT_UNDEFINED; + + n_blocks = 0; + n_releases = 0; + + continue; + } + + if (latch_mode == BTR_MODIFY_TREE + && !btr_cur_will_modify_tree( + cursor->index, page, lock_intention, node_ptr, + node_ptr_max_size, page_size, mtr)) { + ut_ad(upper_rw_latch == RW_X_LATCH); + ut_ad(n_releases <= n_blocks); + + /* we can release upper blocks */ + for (; n_releases < n_blocks; n_releases++) { + if (n_releases == 0) { + /* we should not release root page + to pin to same block. */ + continue; + } + + /* release unused blocks to unpin */ + mtr_release_block_at_savepoint( + mtr, tree_savepoints[n_releases], + tree_blocks[n_releases]); + } + } + + if (height == 0 + && latch_mode == BTR_MODIFY_TREE) { + ut_ad(upper_rw_latch == RW_X_LATCH); + /* we should sx-latch root page, if released already. + It contains seg_header. */ + if (n_releases > 0) { + mtr_block_sx_latch_at_savepoint( + mtr, tree_savepoints[0], + tree_blocks[0]); + } + + /* x-latch the branch blocks not released yet. */ + for (ulint i = n_releases; i <= n_blocks; i++) { + mtr_block_x_latch_at_savepoint( + mtr, tree_savepoints[i], + tree_blocks[i]); + } + } + /* Go to the child node */ - page_no = btr_node_ptr_get_child_page_no(node_ptr, offsets); + page_id.set_page_no( + btr_node_ptr_get_child_page_no(node_ptr, offsets)); + + n_blocks++; } exit_loop: if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } + + return(true); } /*==================== B-TREE INSERT =========================*/ @@ -1138,7 +2975,7 @@ if this is a compressed leaf page in a secondary index. This has to be done either within the same mini-transaction, or by invoking ibuf_reset_free_bits() before mtr_commit(). -@return pointer to inserted record if succeed, else NULL */ +@return pointer to inserted record if succeed, else NULL */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) rec_t* btr_cur_insert_if_possible( @@ -1157,8 +2994,9 @@ btr_cur_insert_if_possible( ut_ad(dtuple_check_typed(tuple)); - ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix( + mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX, cursor->index->table)); page_cursor = btr_cur_get_page_cur(cursor); /* Now, try the insert */ @@ -1181,7 +3019,7 @@ btr_cur_insert_if_possible( /*************************************************************//** For an insert, checks the locks and does the undo logging if desired. -@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */ +@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */ UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,5,6))) dberr_t btr_cur_ins_lock_and_undo( @@ -1198,7 +3036,7 @@ btr_cur_ins_lock_and_undo( successor record */ { dict_index_t* index; - dberr_t err; + dberr_t err = DB_SUCCESS; rec_t* rec; roll_ptr_t roll_ptr; @@ -1211,10 +3049,32 @@ btr_cur_ins_lock_and_undo( ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) || (flags & BTR_CREATE_FLAG)); + ut_ad(mtr->is_named_space(index->space)); - err = lock_rec_insert_check_and_lock(flags, rec, - btr_cur_get_block(cursor), - index, thr, mtr, inherit); + /* Check if there is predicate or GAP lock preventing the insertion */ + if (!(flags & BTR_NO_LOCKING_FLAG)) { + if (dict_index_is_spatial(index)) { + lock_prdt_t prdt; + rtr_mbr_t mbr; + + rtr_get_mbr_from_tuple(entry, &mbr); + + /* Use on stack MBR variable to test if a lock is + needed. If so, the predicate (MBR) will be allocated + from lock heap in lock_prdt_insert_check_and_lock() */ + lock_init_prdt_from_mbr( + &prdt, &mbr, 0, NULL); + + err = lock_prdt_insert_check_and_lock( + flags, rec, btr_cur_get_block(cursor), + index, thr, mtr, &prdt); + *inherit = false; + } else { + err = lock_rec_insert_check_and_lock( + flags, rec, btr_cur_get_block(cursor), + index, thr, mtr, inherit); + } + } if (err != DB_SUCCESS || !dict_index_is_clust(index) || dict_index_is_ibuf(index)) { @@ -1231,9 +3091,11 @@ btr_cur_ins_lock_and_undo( return(err); } - /* Now we can fill in the roll ptr field in entry */ + /* Now we can fill in the roll ptr field in entry + (except if table is intrinsic) */ - if (!(flags & BTR_KEEP_SYS_FLAG)) { + if (!(flags & BTR_KEEP_SYS_FLAG) + && !dict_table_is_intrinsic(index->table)) { row_upd_index_entry_sys_field(entry, index, DATA_ROLL_PTR, roll_ptr); @@ -1242,23 +3104,36 @@ btr_cur_ins_lock_and_undo( return(DB_SUCCESS); } -#ifdef UNIV_DEBUG -/*************************************************************//** -Report information about a transaction. */ +/** +Prefetch siblings of the leaf for the pessimistic operation. +@param block leaf page */ static void -btr_cur_trx_report( -/*===============*/ - trx_id_t trx_id, /*!< in: transaction id */ - const dict_index_t* index, /*!< in: index */ - const char* op) /*!< in: operation */ +btr_cur_prefetch_siblings( + buf_block_t* block) { - fprintf(stderr, "Trx with id " TRX_ID_FMT " going to ", trx_id); - fputs(op, stderr); - dict_index_name_print(stderr, NULL, index); - putc('\n', stderr); + page_t* page = buf_block_get_frame(block); + + ut_ad(page_is_leaf(page)); + + ulint left_page_no = fil_page_get_prev(page); + ulint right_page_no = fil_page_get_next(page); + + if (left_page_no != FIL_NULL) { + buf_read_page_background( + page_id_t(block->page.id.space(), left_page_no), + block->page.size, false); + } + if (right_page_no != FIL_NULL) { + buf_read_page_background( + page_id_t(block->page.id.space(), right_page_no), + block->page.size, false); + } + if (left_page_no != FIL_NULL + || right_page_no != FIL_NULL) { + os_aio_simulated_wake_handler_threads(); + } } -#endif /* UNIV_DEBUG */ /*************************************************************//** Tries to perform an insert to a page in an index tree, next to cursor. @@ -1266,8 +3141,7 @@ It is assumed that mtr holds an x-latch on the page. The operation does not succeed if there is too little space on the page. If there is just one record on the page, the insert will always succeed; this is to prevent trying to split a page with just one record. -@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */ -UNIV_INTERN +@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */ dberr_t btr_cur_optimistic_insert( /*======================*/ @@ -1302,7 +3176,6 @@ btr_cur_optimistic_insert( ibool leaf; ibool reorg; ibool inherit = TRUE; - ulint zip_size; ulint rec_size; dberr_t err; @@ -1312,38 +3185,34 @@ btr_cur_optimistic_insert( page = buf_block_get_frame(block); index = cursor->index; - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + /* Block are not latched for insert if table is intrinsic + and index is auto-generated clustered index. */ + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) || (flags & BTR_CREATE_FLAG)); ut_ad(dtuple_check_typed(entry)); - zip_size = buf_block_get_zip_size(block); + const page_size_t& page_size = block->page.size; + #ifdef UNIV_DEBUG_VALGRIND - if (zip_size) { - UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE); - UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size); + if (page_size.is_compressed()) { + UNIV_MEM_ASSERT_RW(page, page_size.logical()); + UNIV_MEM_ASSERT_RW(block->page.zip.data, page_size.physical()); } #endif /* UNIV_DEBUG_VALGRIND */ -#ifdef UNIV_DEBUG - if (btr_cur_print_record_ops && thr) { - btr_cur_trx_report(thr_get_trx(thr)->id, index, "insert "); - dtuple_print(stderr, entry); - } -#endif /* UNIV_DEBUG */ - leaf = page_is_leaf(page); /* Calculate the record size when entry is converted to a record */ rec_size = rec_get_converted_size(index, entry, n_ext); if (page_zip_rec_needs_ext(rec_size, page_is_comp(page), - dtuple_get_n_fields(entry), zip_size)) { + dtuple_get_n_fields(entry), page_size)) { /* The record is so big that we have to store some fields externally on separate database pages */ - big_rec_vec = dtuple_convert_big_rec(index, entry, &n_ext); + big_rec_vec = dtuple_convert_big_rec(index, 0, entry, &n_ext); if (UNIV_UNLIKELY(big_rec_vec == NULL)) { @@ -1353,12 +3222,12 @@ btr_cur_optimistic_insert( rec_size = rec_get_converted_size(index, entry, n_ext); } - if (zip_size) { + if (page_size.is_compressed()) { /* Estimate the free space of an empty compressed page. Subtract one byte for the encoded heap_no in the modification log. */ ulint free_space_zip = page_zip_empty_size( - cursor->index->n_fields, zip_size); + cursor->index->n_fields, page_size.physical()); ulint n_uniq = dict_index_get_n_unique_in_tree(index); ut_ad(dict_table_is_comp(index->table)); @@ -1398,7 +3267,7 @@ too_big: LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page), goto fail); - if (leaf && zip_size + if (leaf && page_size.is_compressed() && (page_get_data_size(page) + rec_size >= dict_index_zip_pad_optimal_page_size(index))) { /* If compression padding tells us that insertion will @@ -1407,6 +3276,12 @@ too_big: insertion. */ fail: err = DB_FAIL; + + /* prefetch siblings of the leaf for the pessimistic + operation, if the page is leaf. */ + if (page_is_leaf(page)) { + btr_cur_prefetch_siblings(block); + } fail_err: if (big_rec_vec) { @@ -1435,7 +3310,7 @@ fail_err: we have to split the page to reserve enough free space for future updates of records. */ - if (leaf && !zip_size && dict_index_is_clust(index) + if (leaf && !page_size.is_compressed() && dict_index_is_clust(index) && page_get_n_recs(page) >= 2 && dict_index_get_space_reserve() + rec_size > max_size && (btr_page_get_split_rec_to_right(cursor, &dummy) @@ -1443,37 +3318,71 @@ fail_err: goto fail; } - /* Check locks and write to the undo log, if specified */ - err = btr_cur_ins_lock_and_undo(flags, cursor, entry, - thr, mtr, &inherit); - - if (UNIV_UNLIKELY(err != DB_SUCCESS)) { + page_cursor = btr_cur_get_page_cur(cursor); - goto fail_err; +#ifdef UNIV_DEBUG + { + rec_printer p(entry); + DBUG_PRINT("ib_cur", ("insert %s (%llu) by %lu %s", + index->name(), index->id, + thr != NULL + ? trx_get_id_for_print(thr_get_trx(thr)) + : 0, + p.str().c_str())); } +#endif - page_cursor = btr_cur_get_page_cur(cursor); + DBUG_EXECUTE_IF("do_page_reorganize", + btr_page_reorganize(page_cursor, index, mtr);); /* Now, try the insert */ - { - const rec_t* page_cursor_rec = page_cur_get_rec(page_cursor); - *rec = page_cur_tuple_insert(page_cursor, entry, index, - offsets, heap, n_ext, mtr); + const rec_t* page_cursor_rec = page_cur_get_rec(page_cursor); + + if (dict_table_is_intrinsic(index->table)) { + + index->rec_cache.rec_size = rec_size; + + *rec = page_cur_tuple_direct_insert( + page_cursor, entry, index, n_ext, mtr); + } else { + /* Check locks and write to the undo log, + if specified */ + err = btr_cur_ins_lock_and_undo(flags, cursor, entry, + thr, mtr, &inherit); + + if (err != DB_SUCCESS) { + goto fail_err; + } + + *rec = page_cur_tuple_insert( + page_cursor, entry, index, offsets, heap, + n_ext, mtr); + } + reorg = page_cursor_rec != page_cur_get_rec(page_cursor); } if (*rec) { - } else if (zip_size) { + } else if (page_size.is_compressed()) { /* Reset the IBUF_BITMAP_FREE bits, because page_cur_tuple_insert() will have attempted page reorganize before failing. */ - if (leaf && !dict_index_is_clust(index)) { + if (leaf + && !dict_index_is_clust(index) + && !dict_table_is_temporary(index->table)) { ibuf_reset_free_bits(block); } goto fail; } else { + + /* For intrinsic table we take a consistent path + to re-organize using pessimistic path. */ + if (dict_table_is_intrinsic(index->table)) { + goto fail; + } + ut_ad(!reorg); /* If the record did not fit, reorganize */ @@ -1490,30 +3399,31 @@ fail_err: offsets, heap, n_ext, mtr); if (UNIV_UNLIKELY(!*rec)) { - fputs("InnoDB: Error: cannot insert tuple ", stderr); - dtuple_print(stderr, entry); - fputs(" into ", stderr); - dict_index_name_print(stderr, thr_get_trx(thr), index); - fprintf(stderr, "\nInnoDB: max insert size %lu\n", - (ulong) max_size); - ut_error; + ib::fatal() << "Cannot insert tuple " << *entry + << "into index " << index->name + << " of table " << index->table->name + << ". Max size: " << max_size; } } #ifdef BTR_CUR_HASH_ADAPT - if (!reorg && leaf && (cursor->flag == BTR_CUR_HASH)) { - btr_search_update_hash_node_on_insert(cursor); - } else { - btr_search_update_hash_on_insert(cursor); + if (!index->disable_ahi) { + if (!reorg && leaf && (cursor->flag == BTR_CUR_HASH)) { + btr_search_update_hash_node_on_insert(cursor); + } else { + btr_search_update_hash_on_insert(cursor); + } } -#endif +#endif /* BTR_CUR_HASH_ADAPT */ if (!(flags & BTR_NO_LOCKING_FLAG) && inherit) { lock_update_insert(block, *rec); } - if (leaf && !dict_index_is_clust(index)) { + if (leaf + && !dict_index_is_clust(index) + && !dict_table_is_temporary(index->table)) { /* Update the free bits of the B-tree page in the insert buffer bitmap. */ @@ -1527,7 +3437,7 @@ fail_err: committed mini-transaction, because in crash recovery, the free bits could momentarily be set too high. */ - if (zip_size) { + if (page_size.is_compressed()) { /* Update the bits in the same mini-transaction. */ ibuf_update_free_bits_zip(block, mtr); } else { @@ -1549,8 +3459,7 @@ Performs an insert on a page of an index tree. It is assumed that mtr holds an x-latch on the tree and on the cursor page. If the insert is made on the leaf level, to avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. -@return DB_SUCCESS or error number */ -UNIV_INTERN +@return DB_SUCCESS or error number */ dberr_t btr_cur_pessimistic_insert( /*=======================*/ @@ -1576,22 +3485,23 @@ btr_cur_pessimistic_insert( mtr_t* mtr) /*!< in/out: mini-transaction */ { dict_index_t* index = cursor->index; - ulint zip_size = dict_table_zip_size(index->table); big_rec_t* big_rec_vec = NULL; dberr_t err; ibool inherit = FALSE; - ibool success; + bool success; ulint n_reserved = 0; ut_ad(dtuple_check_typed(entry)); *big_rec = NULL; - ut_ad(mtr_memo_contains(mtr, - dict_index_get_lock(btr_cur_get_index(cursor)), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_flagged( + mtr, dict_index_get_lock(btr_cur_get_index(cursor)), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(cursor->index->table)); + ut_ad(mtr_is_block_fix( + mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX, cursor->index->table)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) || (flags & BTR_CREATE_FLAG)); @@ -1608,7 +3518,8 @@ btr_cur_pessimistic_insert( return(err); } - if (!(flags & BTR_NO_UNDO_LOG_FLAG)) { + if (!(flags & BTR_NO_UNDO_LOG_FLAG) + || dict_table_is_intrinsic(index->table)) { /* First reserve enough free space for the file segments of the index tree, so that the insert will not fail because of lack of space */ @@ -1625,7 +3536,7 @@ btr_cur_pessimistic_insert( if (page_zip_rec_needs_ext(rec_get_converted_size(index, entry, n_ext), dict_table_is_comp(index->table), dtuple_get_n_fields(entry), - zip_size)) { + dict_table_page_size(index->table))) { /* The record is so big that we have to store some fields externally on separate database pages */ @@ -1636,7 +3547,7 @@ btr_cur_pessimistic_insert( dtuple_convert_back_big_rec(index, entry, big_rec_vec); } - big_rec_vec = dtuple_convert_big_rec(index, entry, &n_ext); + big_rec_vec = dtuple_convert_big_rec(index, 0, entry, &n_ext); if (big_rec_vec == NULL) { @@ -1649,7 +3560,7 @@ btr_cur_pessimistic_insert( } if (dict_index_get_page(index) - == buf_block_get_page_no(btr_cur_get_block(cursor))) { + == btr_cur_get_block(cursor)->page.id.page_no()) { /* The page is the root page */ *rec = btr_root_raise_and_insert( @@ -1663,24 +3574,33 @@ btr_cur_pessimistic_insert( return(DB_OUT_OF_FILE_SPACE); } - ut_ad(page_rec_get_next(btr_cur_get_rec(cursor)) == *rec); + ut_ad(page_rec_get_next(btr_cur_get_rec(cursor)) == *rec + || dict_index_is_spatial(index)); + if (!(flags & BTR_NO_LOCKING_FLAG)) { - /* The cursor might be moved to the other page, - and the max trx id field should be updated after - the cursor was fixed. */ - if (!dict_index_is_clust(index)) { - page_update_max_trx_id( - btr_cur_get_block(cursor), - btr_cur_get_page_zip(cursor), - thr_get_trx(thr)->id, mtr); - } - - if (!page_rec_is_infimum(btr_cur_get_rec(cursor))) { - /* split and inserted need to call - lock_update_insert() always. */ - inherit = TRUE; - } + ut_ad(!dict_table_is_temporary(index->table)); + if (dict_index_is_spatial(index)) { + /* Do nothing */ + } else { + /* The cursor might be moved to the other page + and the max trx id field should be updated after + the cursor was fixed. */ + if (!dict_index_is_clust(index)) { + page_update_max_trx_id( + btr_cur_get_block(cursor), + btr_cur_get_page_zip(cursor), + thr_get_trx(thr)->id, mtr); + } + if (!page_rec_is_infimum(btr_cur_get_rec(cursor)) + || btr_page_get_prev( + buf_block_get_frame( + btr_cur_get_block(cursor)), mtr) + == FIL_NULL) { + /* split and inserted need to call + lock_update_insert() always. */ + inherit = TRUE; + } buf_block_t* block = btr_cur_get_block(cursor); buf_frame_t* frame = NULL; @@ -1693,10 +3613,13 @@ btr_cur_pessimistic_insert( if (frame && btr_page_get_prev(frame, mtr) == FIL_NULL) { inherit = TRUE; } + } } #ifdef BTR_CUR_ADAPT - btr_search_update_hash_on_insert(cursor); + if (!index->disable_ahi) { + btr_search_update_hash_on_insert(cursor); + } #endif if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) { @@ -1716,7 +3639,7 @@ btr_cur_pessimistic_insert( /*************************************************************//** For an update, checks the locks and does the undo logging. -@return DB_SUCCESS, DB_WAIT_LOCK, or error number */ +@return DB_SUCCESS, DB_WAIT_LOCK, or error number */ UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,6,7))) dberr_t btr_cur_upd_lock_and_undo( @@ -1742,6 +3665,7 @@ btr_cur_upd_lock_and_undo( index = cursor->index; ut_ad(rec_offs_validate(rec, index, offsets)); + ut_ad(mtr->is_named_space(index->space)); if (!dict_index_is_clust(index)) { ut_ad(dict_index_is_online_ddl(index) @@ -1776,7 +3700,6 @@ btr_cur_upd_lock_and_undo( /***********************************************************//** Writes a redo log record of updating a record in-place. */ -UNIV_INTERN void btr_cur_update_in_place_log( /*========================*/ @@ -1824,7 +3747,7 @@ btr_cur_update_in_place_log( trx_write_roll_ptr(log_ptr, 0); log_ptr += DATA_ROLL_PTR_LEN; /* TRX_ID */ - log_ptr += mach_ull_write_compressed(log_ptr, 0); + log_ptr += mach_u64_write_compressed(log_ptr, 0); } mach_write_to_2(log_ptr, page_offset(rec)); @@ -1836,8 +3759,7 @@ btr_cur_update_in_place_log( /***********************************************************//** Parses a redo log record of updating a record in-place. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_cur_parse_update_in_place( /*==========================*/ @@ -1894,7 +3816,7 @@ btr_cur_parse_update_in_place( ut_a((ibool)!!page_is_comp(page) == dict_table_is_comp(index->table)); rec = page + rec_offset; - /* We do not need to reserve btr_search_latch, as the page is only + /* We do not need to reserve search latch, as the page is only being recovered, and there cannot be a hash index to it. */ offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap); @@ -1919,13 +3841,12 @@ an update-in-place. @retval false if out of space; IBUF_BITMAP_FREE will be reset outside mtr if the page was recompressed -@retval true if enough place; +@retval true if enough place; IMPORTANT: The caller will have to update IBUF_BITMAP_FREE if this is a secondary index leaf page. This has to be done either within the same mini-transaction, or by invoking ibuf_reset_free_bits() before mtr_commit(mtr). */ -UNIV_INTERN bool btr_cur_update_alloc_zip_func( /*==========================*/ @@ -1993,7 +3914,9 @@ out_of_space: ut_ad(rec_offs_validate(page_cur_get_rec(cursor), index, offsets)); /* Out of space: reset the free bits. */ - if (!dict_index_is_clust(index) && page_is_leaf(page)) { + if (!dict_index_is_clust(index) + && !dict_table_is_temporary(index->table) + && page_is_leaf(page)) { ibuf_reset_free_bits(page_cur_get_block(cursor)); } @@ -2007,7 +3930,6 @@ We assume here that the ordering fields of the record do not change. @retval DB_SUCCESS on success @retval DB_ZIP_OVERFLOW if there is not enough space left on the compressed page (IBUF_BITMAP_FREE was reset outside mtr) */ -UNIV_INTERN dberr_t btr_cur_update_in_place( /*====================*/ @@ -2039,6 +3961,9 @@ btr_cur_update_in_place( index = cursor->index; ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); + ut_ad(trx_id > 0 + || (flags & BTR_KEEP_SYS_FLAG) + || dict_table_is_intrinsic(index->table)); /* The insert buffer tree should never be updated in place. */ ut_ad(!dict_index_is_ibuf(index)); ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG) @@ -2047,15 +3972,17 @@ btr_cur_update_in_place( || (flags & ~(BTR_KEEP_POS_FLAG | BTR_KEEP_IBUF_BITMAP)) == (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG | BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG)); - ut_ad(fil_page_get_type(btr_cur_get_page(cursor)) == FIL_PAGE_INDEX); + ut_ad(fil_page_index_page_check(btr_cur_get_page(cursor))); ut_ad(btr_page_get_index_id(btr_cur_get_page(cursor)) == index->id); #ifdef UNIV_DEBUG - if (btr_cur_print_record_ops) { - btr_cur_trx_report(trx_id, index, "update "); - rec_print_new(stderr, rec, offsets); + { + rec_printer p(rec, offsets); + DBUG_PRINT("ib_cur", ("update-in-place %s (%llu) by %lu: %s", + index->name(), index->id, trx_id, + p.str().c_str())); } -#endif /* UNIV_DEBUG */ +#endif block = btr_cur_get_block(cursor); page_zip = buf_block_get_page_zip(block); @@ -2083,7 +4010,8 @@ btr_cur_update_in_place( goto func_exit; } - if (!(flags & BTR_KEEP_SYS_FLAG)) { + if (!(flags & BTR_KEEP_SYS_FLAG) + && !dict_table_is_intrinsic(index->table)) { row_upd_rec_sys_fields(rec, NULL, index, offsets, thr_get_trx(thr), roll_ptr); } @@ -2110,13 +4038,13 @@ btr_cur_update_in_place( btr_search_update_hash_on_delete(cursor); } - rw_lock_x_lock(&btr_search_latch); + rw_lock_x_lock(btr_get_search_latch(index)); } row_upd_rec_in_place(rec, index, offsets, update, page_zip); if (is_hashed) { - rw_lock_x_unlock(&btr_search_latch); + rw_lock_x_unlock(btr_get_search_latch(index)); } btr_cur_update_in_place_log(flags, rec, index, update, @@ -2138,6 +4066,7 @@ func_exit: if (page_zip && !(flags & BTR_KEEP_IBUF_BITMAP) && !dict_index_is_clust(index) + && !dict_table_is_temporary(index->table) && page_is_leaf(buf_block_get_frame(block))) { /* Update the free bits in the insert buffer. */ ibuf_update_free_bits_zip(block, mtr); @@ -2158,7 +4087,6 @@ fields of the record do not change. @retval DB_UNDERFLOW if the page would become too empty @retval DB_ZIP_OVERFLOW if there is not enough space left on the compressed page (IBUF_BITMAP_FREE was reset outside mtr) */ -UNIV_INTERN dberr_t btr_cur_optimistic_update( /*======================*/ @@ -2199,8 +4127,13 @@ btr_cur_optimistic_update( page = buf_block_get_frame(block); rec = btr_cur_get_rec(cursor); index = cursor->index; + ut_ad(trx_id > 0 + || (flags & BTR_KEEP_SYS_FLAG) + || dict_table_is_intrinsic(index->table)); ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + /* This is intended only for leaf page updates */ + ut_ad(page_is_leaf(page)); /* The insert buffer tree should never be updated in place. */ ut_ad(!dict_index_is_ibuf(index)); ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG) @@ -2209,7 +4142,7 @@ btr_cur_optimistic_update( || (flags & ~(BTR_KEEP_POS_FLAG | BTR_KEEP_IBUF_BITMAP)) == (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG | BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG)); - ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX); + ut_ad(fil_page_index_page_check(page)); ut_ad(btr_page_get_index_id(page) == index->id); *offsets = rec_get_offsets(rec, index, *offsets, @@ -2219,13 +4152,6 @@ btr_cur_optimistic_update( || trx_is_recv(thr_get_trx(thr))); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ -#ifdef UNIV_DEBUG - if (btr_cur_print_record_ops) { - btr_cur_trx_report(trx_id, index, "update "); - rec_print_new(stderr, rec, *offsets); - } -#endif /* UNIV_DEBUG */ - if (!row_upd_changes_field_size_or_external(index, *offsets, update)) { /* The simplest and the most common case: the update does not @@ -2243,6 +4169,10 @@ any_extern: /* Externally stored fields are treated in pessimistic update */ + /* prefetch siblings of the leaf for the pessimistic + operation. */ + btr_cur_prefetch_siblings(block); + return(DB_OVERFLOW); } @@ -2253,6 +4183,15 @@ any_extern: } } +#ifdef UNIV_DEBUG + { + rec_printer p(rec, *offsets); + DBUG_PRINT("ib_cur", ("update %s (%llu) by %lu: %s", + index->name(), index->id, trx_id, + p.str().c_str())); + } +#endif + page_cursor = btr_cur_get_page_cur(cursor); if (!*heap) { @@ -2282,7 +4221,7 @@ any_extern: if (page_zip) { if (page_zip_rec_needs_ext(new_rec_size, page_is_comp(page), dict_index_get_n_fields(index), - page_zip_get_size(page_zip))) { + dict_table_page_size(index->table))) { goto any_extern; } @@ -2295,6 +4234,13 @@ any_extern: rec = page_cur_get_rec(page_cursor); } + /* We limit max record size to 16k even for 64k page size. */ + if (new_rec_size >= REC_MAX_DATA_SIZE) { + err = DB_OVERFLOW; + + goto func_exit; + } + if (UNIV_UNLIKELY(new_rec_size >= (page_get_free_space_of_empty(page_is_comp(page)) / 2))) { @@ -2307,7 +4253,7 @@ any_extern: if (UNIV_UNLIKELY(page_get_data_size(page) - old_rec_size + new_rec_size - < BTR_CUR_PAGE_COMPRESS_LIMIT)) { + < BTR_CUR_PAGE_COMPRESS_LIMIT(index))) { /* We may need to update the IBUF_BITMAP_FREE bits after a reorganize that was done in btr_cur_update_alloc_zip(). */ @@ -2325,7 +4271,8 @@ any_extern: + page_get_max_insert_size_after_reorganize(page, 1)); if (!page_zip) { - max_ins_size = page_get_max_insert_size_after_reorganize(page, 1); + max_ins_size = page_get_max_insert_size_after_reorganize( + page, 1); } if (!(((max_size >= BTR_CUR_PAGE_REORGANIZE_LIMIT) @@ -2358,8 +4305,9 @@ any_extern: /* Ok, we may do the replacement. Store on the page infimum the explicit locks on rec, before deleting rec (see the comment in btr_cur_pessimistic_update). */ - - lock_rec_store_on_page_infimum(block, rec); + if (!dict_table_is_locking_disabled(index->table)) { + lock_rec_store_on_page_infimum(block, rec); + } btr_search_update_hash_on_delete(cursor); @@ -2367,7 +4315,8 @@ any_extern: page_cur_move_to_prev(page_cursor); - if (!(flags & BTR_KEEP_SYS_FLAG)) { + if (!(flags & BTR_KEEP_SYS_FLAG) + && !dict_table_is_intrinsic(index->table)) { row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR, roll_ptr); row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID, @@ -2380,8 +4329,9 @@ any_extern: ut_a(rec); /* <- We calculated above the insert would fit */ /* Restore the old explicit lock state on the record */ - - lock_rec_restore_from_page_infimum(block, rec, block); + if (!dict_table_is_locking_disabled(index->table)) { + lock_rec_restore_from_page_infimum(block, rec, block); + } page_cur_move_to_next(page_cursor); ut_ad(err == DB_SUCCESS); @@ -2389,8 +4339,8 @@ any_extern: func_exit: if (!(flags & BTR_KEEP_IBUF_BITMAP) && !dict_index_is_clust(index) - && page_is_leaf(page)) { - + && !dict_table_is_temporary(index->table)) { + /* Update the free bits in the insert buffer. */ if (page_zip) { ibuf_update_free_bits_zip(block, mtr); } else { @@ -2398,6 +4348,12 @@ func_exit: } } + if (err != DB_SUCCESS) { + /* prefetch siblings of the leaf for the pessimistic + operation. */ + btr_cur_prefetch_siblings(block); + } + return(err); } @@ -2417,9 +4373,6 @@ btr_cur_pess_upd_restore_supremum( { page_t* page; buf_block_t* prev_block; - ulint space; - ulint zip_size; - ulint prev_page_no; page = buf_block_get_frame(block); @@ -2429,13 +4382,12 @@ btr_cur_pess_upd_restore_supremum( return; } - space = buf_block_get_space(block); - zip_size = buf_block_get_zip_size(block); - prev_page_no = btr_page_get_prev(page, mtr); + const ulint prev_page_no = btr_page_get_prev(page, mtr); + + const page_id_t page_id(block->page.id.space(), prev_page_no); ut_ad(prev_page_no != FIL_NULL); - prev_block = buf_page_get_with_no_latch(space, zip_size, - prev_page_no, mtr); + prev_block = buf_page_get_with_no_latch(page_id, block->page.size, mtr); #ifdef UNIV_BTR_DEBUG ut_a(btr_page_get_next(prev_block->frame, mtr) == page_get_page_no(page)); @@ -2449,46 +4401,13 @@ btr_cur_pess_upd_restore_supremum( page_rec_get_heap_no(rec)); } -/*************************************************************//** -Check if the total length of the modified blob for the row is within 10% -of the total redo log size. This constraint on the blob length is to -avoid overwriting the redo logs beyond the last checkpoint lsn. -@return DB_SUCCESS or DB_TOO_BIG_FOR_REDO. */ -static -dberr_t -btr_check_blob_limit(const big_rec_t* big_rec_vec) -{ - const ib_uint64_t redo_size = srv_n_log_files * srv_log_file_size - * UNIV_PAGE_SIZE; - const ib_uint64_t redo_10p = redo_size / 10; - ib_uint64_t total_blob_len = 0; - dberr_t err = DB_SUCCESS; - - /* Calculate the total number of bytes for blob data */ - for (ulint i = 0; i < big_rec_vec->n_fields; i++) { - total_blob_len += big_rec_vec->fields[i].len; - } - - if (total_blob_len > redo_10p) { - ib_logf(IB_LOG_LEVEL_ERROR, "The total blob data" - " length (" UINT64PF ") is greater than" - " 10%% of the total redo log size (" UINT64PF - "). Please increase total redo log size.", - total_blob_len, redo_size); - err = DB_TOO_BIG_FOR_REDO; - } - - return(err); -} - /*************************************************************//** Performs an update of a record on a page of a tree. It is assumed that mtr holds an x-latch on the tree and on the cursor page. If the update is made on the leaf level, to avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. We assume here that the ordering fields of the record do not change. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t btr_cur_pessimistic_update( /*=======================*/ @@ -2506,9 +4425,10 @@ btr_cur_pessimistic_update( big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to be stored externally by the caller, or NULL */ - const upd_t* update, /*!< in: update vector; this is allowed also - contain trx id and roll ptr fields, but - the values in update vector have no effect */ + upd_t* update, /*!< in/out: update vector; this is allowed to + also contain trx id and roll ptr fields. + Non-updated columns that are moved offpage will + be appended to this. */ ulint cmpl_info,/*!< in: compiler info on secondary index updates */ que_thr_t* thr, /*!< in: query thread */ @@ -2540,14 +4460,19 @@ btr_cur_pessimistic_update( page_zip = buf_block_get_page_zip(block); index = cursor->index; - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK | + MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ /* The insert buffer tree should never be updated in place. */ ut_ad(!dict_index_is_ibuf(index)); + ut_ad(trx_id > 0 + || (flags & BTR_KEEP_SYS_FLAG) + || dict_table_is_intrinsic(index->table)); ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG) || dict_index_is_clust(index)); ut_ad(thr_get_trx(thr)->id == trx_id @@ -2574,41 +4499,16 @@ btr_cur_pessimistic_update( if (page_zip && optim_err != DB_ZIP_OVERFLOW && !dict_index_is_clust(index) + && !dict_table_is_temporary(index->table) && page_is_leaf(page)) { ibuf_update_free_bits_zip(block, mtr); } - return(err); - } - - /* Do lock checking and undo logging */ - err = btr_cur_upd_lock_and_undo(flags, cursor, *offsets, - update, cmpl_info, - thr, mtr, &roll_ptr); - if (err != DB_SUCCESS) { - goto err_exit; - } - - if (optim_err == DB_OVERFLOW) { - ulint reserve_flag; - - /* First reserve enough free space for the file segments - of the index tree, so that the update will not fail because - of lack of space */ - - ulint n_extents = cursor->tree_height / 16 + 3; - - if (flags & BTR_NO_UNDO_LOG_FLAG) { - reserve_flag = FSP_CLEANING; - } else { - reserve_flag = FSP_NORMAL; + if (big_rec_vec != NULL) { + dtuple_big_rec_free(big_rec_vec); } - if (!fsp_reserve_free_extents(&n_reserved, index->space, - n_extents, reserve_flag, mtr)) { - err = DB_OUT_OF_FILE_SPACE; - goto err_exit; - } + return(err); } rec = btr_cur_get_rec(cursor); @@ -2627,14 +4527,19 @@ btr_cur_pessimistic_update( itself. Thus the following call is safe. */ row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update, FALSE, entry_heap); - if (!(flags & BTR_KEEP_SYS_FLAG)) { - row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR, - roll_ptr); - row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID, - trx_id); - } - if ((flags & BTR_NO_UNDO_LOG_FLAG) && rec_offs_any_extern(*offsets)) { + /* We have to set appropriate extern storage bits in the new + record to be inserted: we have to remember which fields were such */ + + ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec)); + ut_ad(rec_offs_validate(rec, index, *offsets)); + n_ext += btr_push_update_extern_fields(new_entry, update, entry_heap); + + /* UNDO logging is also turned-off during normal operation on intrinsic + table so condition needs to ensure that table is not intrinsic. */ + if ((flags & BTR_NO_UNDO_LOG_FLAG) + && rec_offs_any_extern(*offsets) + && !dict_table_is_intrinsic(index->table)) { /* We are in a transaction rollback undoing a row update: we must free possible externally stored fields which got new values in the update, if they are not @@ -2643,35 +4548,23 @@ btr_cur_pessimistic_update( update it back again. */ ut_ad(big_rec_vec == NULL); + ut_ad(dict_index_is_clust(index)); + ut_ad(thr_get_trx(thr)->in_rollback); + + DBUG_EXECUTE_IF("ib_blob_update_rollback", DBUG_SUICIDE();); + RECOVERY_CRASH(99); btr_rec_free_updated_extern_fields( - index, rec, page_zip, *offsets, update, - trx_is_recv(thr_get_trx(thr)) - ? RB_RECOVERY : RB_NORMAL, mtr); + index, rec, page_zip, *offsets, update, true, mtr); } - /* We have to set appropriate extern storage bits in the new - record to be inserted: we have to remember which fields were such */ - - ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec)); - ut_ad(rec_offs_validate(rec, index, *offsets)); - n_ext += btr_push_update_extern_fields(new_entry, update, entry_heap); + if (page_zip_rec_needs_ext( + rec_get_converted_size(index, new_entry, n_ext), + page_is_comp(page), + dict_index_get_n_fields(index), + block->page.size)) { - if (page_zip) { - ut_ad(page_is_comp(page)); - if (page_zip_rec_needs_ext( - rec_get_converted_size(index, new_entry, n_ext), - TRUE, - dict_index_get_n_fields(index), - page_zip_get_size(page_zip))) { - - goto make_external; - } - } else if (page_zip_rec_needs_ext( - rec_get_converted_size(index, new_entry, n_ext), - page_is_comp(page), 0, 0)) { -make_external: - big_rec_vec = dtuple_convert_big_rec(index, new_entry, &n_ext); + big_rec_vec = dtuple_convert_big_rec(index, update, new_entry, &n_ext); if (UNIV_UNLIKELY(big_rec_vec == NULL)) { /* We cannot goto return_after_reservations, @@ -2696,21 +4589,43 @@ make_external: ut_ad(flags & BTR_KEEP_POS_FLAG); } - if (big_rec_vec) { + /* Do lock checking and undo logging */ + err = btr_cur_upd_lock_and_undo(flags, cursor, *offsets, + update, cmpl_info, + thr, mtr, &roll_ptr); + if (err != DB_SUCCESS) { + goto err_exit; + } - err = btr_check_blob_limit(big_rec_vec); + if (optim_err == DB_OVERFLOW) { - if (err != DB_SUCCESS) { - if (n_reserved > 0) { - fil_space_release_free_extents( - index->space, n_reserved); - } + /* First reserve enough free space for the file segments + of the index tree, so that the update will not fail because + of lack of space */ + + ulint n_extents = cursor->tree_height / 16 + 3; + + if (!fsp_reserve_free_extents( + &n_reserved, index->space, n_extents, + flags & BTR_NO_UNDO_LOG_FLAG + ? FSP_CLEANING : FSP_NORMAL, + mtr)) { + err = DB_OUT_OF_FILE_SPACE; goto err_exit; } } + if (!(flags & BTR_KEEP_SYS_FLAG) + && !dict_table_is_intrinsic(index->table)) { + row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR, + roll_ptr); + row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID, + trx_id); + } + if (!page_zip) { - max_ins_size = page_get_max_insert_size_after_reorganize(page, 1); + max_ins_size = page_get_max_insert_size_after_reorganize( + page, 1); } /* Store state of explicit locks on rec on the page infimum record, @@ -2721,8 +4636,9 @@ make_external: btr_root_raise_and_insert. Therefore we cannot in the lock system delete the lock structs set on the root page even if the root page carries just node pointers. */ - - lock_rec_store_on_page_infimum(block, rec); + if (!dict_table_is_locking_disabled(index->table)) { + lock_rec_store_on_page_infimum(block, rec); + } btr_search_update_hash_on_delete(cursor); @@ -2741,8 +4657,10 @@ make_external: if (rec) { page_cursor->rec = rec; - lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor), - rec, block); + if (!dict_table_is_locking_disabled(index->table)) { + lock_rec_restore_from_page_infimum( + btr_cur_get_block(cursor), rec, block); + } if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) { /* The new inserted record owns its possible externally @@ -2759,8 +4677,8 @@ make_external: page_cursor->rec, index, *offsets); } } else if (!dict_index_is_clust(index) + && !dict_table_is_temporary(index->table) && page_is_leaf(page)) { - /* Update the free bits in the insert buffer. This is the same block which was skipped by BTR_KEEP_IBUF_BITMAP. */ @@ -2772,6 +4690,18 @@ make_external: } } + if (!srv_read_only_mode + && !big_rec_vec + && page_is_leaf(page) + && !dict_index_is_online_ddl(index)) { + + mtr_memo_release(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK); + + /* NOTE: We cannot release root block latch here, because it + has segment header and already modified in most of cases.*/ + } + err = DB_SUCCESS; goto return_after_reservations; } else { @@ -2785,24 +4715,31 @@ make_external: /* Out of space: reset the free bits. This is the same block which was skipped by BTR_KEEP_IBUF_BITMAP. */ - if (!dict_index_is_clust(index) && page_is_leaf(page)) { + if (!dict_index_is_clust(index) + && !dict_table_is_temporary(index->table) + && page_is_leaf(page)) { ibuf_reset_free_bits(block); } } - if (big_rec_vec) { + if (big_rec_vec != NULL && !dict_table_is_intrinsic(index->table)) { ut_ad(page_is_leaf(page)); ut_ad(dict_index_is_clust(index)); ut_ad(flags & BTR_KEEP_POS_FLAG); /* btr_page_split_and_insert() in btr_cur_pessimistic_insert() invokes - mtr_memo_release(mtr, index->lock, MTR_MEMO_X_LOCK). + mtr_memo_release(mtr, index->lock, MTR_MEMO_SX_LOCK). We must keep the index->lock when we created a big_rec, so that row_upd_clust_rec() can store the big_rec in the same mini-transaction. */ - mtr_x_lock(dict_index_get_lock(index), mtr); + ut_ad(mtr_memo_contains_flagged(mtr, + dict_index_get_lock(index), + MTR_MEMO_X_LOCK | + MTR_MEMO_SX_LOCK)); + + mtr_sx_lock(dict_index_get_lock(index), mtr); } /* Was the record to be updated positioned as the first user @@ -2826,7 +4763,12 @@ make_external: ut_ad(rec_offs_validate(rec, cursor->index, *offsets)); page_cursor->rec = rec; - if (dict_index_is_sec_or_ibuf(index)) { + /* Multiple transactions cannot simultaneously operate on the + same temp-table in parallel. + max_trx_id is ignored for temp tables because it not required + for MVCC. */ + if (dict_index_is_sec_or_ibuf(index) + && !dict_table_is_temporary(index->table)) { /* Update PAGE_MAX_TRX_ID in the index page header. It was not updated by btr_cur_pessimistic_insert() because of BTR_NO_LOCKING_FLAG. */ @@ -2854,15 +4796,17 @@ make_external: rec, index, *offsets, mtr); } - lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor), - rec, block); + if (!dict_table_is_locking_disabled(index->table)) { + lock_rec_restore_from_page_infimum( + btr_cur_get_block(cursor), rec, block); + } /* If necessary, restore also the correct lock state for a new, preceding supremum record created in a page split. While the old record was nonexistent, the supremum might have inherited its locks from a wrong record. */ - if (!was_first) { + if (!was_first && !dict_table_is_locking_disabled(index->table)) { btr_cur_pess_upd_restore_supremum(btr_cur_get_block(cursor), rec, mtr); } @@ -2899,6 +4843,7 @@ btr_cur_del_mark_set_clust_rec_log( byte* log_ptr; ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); + ut_ad(mtr->is_named_space(index->space)); log_ptr = mlog_open_and_write_index(mtr, rec, index, page_rec_is_comp(rec) @@ -2927,8 +4872,7 @@ btr_cur_del_mark_set_clust_rec_log( /****************************************************************//** Parses the redo log record for delete marking or unmarking of a clustered index record. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_cur_parse_del_mark_set_clust_rec( /*=================================*/ @@ -2979,7 +4923,7 @@ btr_cur_parse_del_mark_set_clust_rec( if (page) { rec = page + offset; - /* We do not need to reserve btr_search_latch, as the page + /* We do not need to reserve search latch, as the page is only being recovered, and there cannot be a hash index to it. Besides, these fields are being updated in place and the adaptive hash index does not depend on them. */ @@ -3011,16 +4955,18 @@ Marks a clustered index record deleted. Writes an undo log record to undo log on this delete marking. Writes in the trx id field the id of the deleting transaction, and in the roll ptr field pointer to the undo log record created. -@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ -UNIV_INTERN +@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ dberr_t btr_cur_del_mark_set_clust_rec( /*===========================*/ + ulint flags, /*!< in: undo logging and locking flags */ buf_block_t* block, /*!< in/out: buffer block of the record */ rec_t* rec, /*!< in/out: record */ dict_index_t* index, /*!< in: clustered index of the record */ const ulint* offsets,/*!< in: rec_get_offsets(rec) */ que_thr_t* thr, /*!< in: query thread */ + const dtuple_t* entry, /*!< in: dtuple for the deleting record, also + contains the virtual cols if there are any */ mtr_t* mtr) /*!< in/out: mini-transaction */ { roll_ptr_t roll_ptr; @@ -3033,16 +4979,13 @@ btr_cur_del_mark_set_clust_rec( ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); ut_ad(buf_block_get_frame(block) == page_align(rec)); ut_ad(page_is_leaf(page_align(rec))); + ut_ad(mtr->is_named_space(index->space)); -#ifdef UNIV_DEBUG - if (btr_cur_print_record_ops && (thr != NULL)) { - btr_cur_trx_report(thr_get_trx(thr)->id, index, "del mark "); - rec_print_new(stderr, rec, offsets); + if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))) { + /* While cascading delete operations, this becomes possible. */ + ut_ad(rec_get_trx_id(rec, index) == thr_get_trx(thr)->id); + return(DB_SUCCESS); } -#endif /* UNIV_DEBUG */ - - ut_ad(dict_index_is_clust(index)); - ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets))); err = lock_clust_rec_modify_check_and_lock(BTR_NO_LOCKING_FLAG, block, rec, index, offsets, thr); @@ -3052,27 +4995,46 @@ btr_cur_del_mark_set_clust_rec( return(err); } - err = trx_undo_report_row_operation(0, TRX_UNDO_MODIFY_OP, thr, - index, NULL, NULL, 0, rec, offsets, + err = trx_undo_report_row_operation(flags, TRX_UNDO_MODIFY_OP, thr, + index, entry, NULL, 0, rec, offsets, &roll_ptr); if (err != DB_SUCCESS) { return(err); } - /* The btr_search_latch is not needed here, because + /* The search latch is not needed here, because the adaptive hash index does not depend on the delete-mark and the delete-mark is being updated in place. */ page_zip = buf_block_get_page_zip(block); - btr_blob_dbg_set_deleted_flag(rec, index, offsets, TRUE); btr_rec_set_deleted_flag(rec, page_zip, TRUE); + /* For intrinsic table, roll-ptr is not maintained as there is no UNDO + logging. Skip updating it. */ + if (dict_table_is_intrinsic(index->table)) { + return(err); + } + trx = thr_get_trx(thr); + /* This function must not be invoked during rollback + (of a TRX_STATE_PREPARE transaction or otherwise). */ + ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE)); + ut_ad(!trx->in_rollback); + +#ifdef UNIV_DEBUG + { + rec_printer p(rec, offsets); + DBUG_PRINT("ib_cur", ("delete-mark clust %s (%llu) by %lu: %s", + index->table_name, index->id, + trx_get_id_for_print(trx), + p.str().c_str())); + } +#endif if (dict_index_is_online_ddl(index)) { - row_log_table_delete(rec, index, offsets, NULL); + row_log_table_delete(rec, entry, index, offsets, NULL); } row_upd_rec_sys_fields(rec, page_zip, index, offsets, trx, roll_ptr); @@ -3120,8 +5082,7 @@ btr_cur_del_mark_set_sec_rec_log( /****************************************************************//** Parses the redo log record for delete marking or unmarking of a secondary index record. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_cur_parse_del_mark_set_sec_rec( /*===============================*/ @@ -3150,7 +5111,7 @@ btr_cur_parse_del_mark_set_sec_rec( if (page) { rec = page + offset; - /* We do not need to reserve btr_search_latch, as the page + /* We do not need to reserve search latch, as the page is only being recovered, and there cannot be a hash index to it. Besides, the delete-mark flag is being updated in place and the adaptive hash index does not depend on it. */ @@ -3164,8 +5125,7 @@ btr_cur_parse_del_mark_set_sec_rec( #ifndef UNIV_HOTBACKUP /***********************************************************//** Sets a secondary index record delete mark to TRUE or FALSE. -@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ -UNIV_INTERN +@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ dberr_t btr_cur_del_mark_set_sec_rec( /*=========================*/ @@ -3179,17 +5139,9 @@ btr_cur_del_mark_set_sec_rec( rec_t* rec; dberr_t err; - block = btr_cur_get_block(cursor); - rec = btr_cur_get_rec(cursor); - -#ifdef UNIV_DEBUG - if (btr_cur_print_record_ops && (thr != NULL)) { - btr_cur_trx_report(thr_get_trx(thr)->id, cursor->index, - "del mark "); - rec_print(stderr, rec, cursor->index); - } -#endif /* UNIV_DEBUG */ - + block = btr_cur_get_block(cursor); + rec = btr_cur_get_rec(cursor); + err = lock_sec_rec_modify_check_and_lock(flags, btr_cur_get_block(cursor), rec, cursor->index, thr, mtr); @@ -3201,7 +5153,15 @@ btr_cur_del_mark_set_sec_rec( ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(cursor->index->table)); - /* We do not need to reserve btr_search_latch, as the + DBUG_PRINT("ib_cur", ("delete-mark=%u sec %u:%u:%u in %s(" + UINT32PF ") by " TRX_ID_FMT, + unsigned(val), + block->page.id.space(), block->page.id.page_no(), + unsigned(page_rec_get_heap_no(rec)), + cursor->index->name(), cursor->index->id, + trx_get_id_for_print(thr_get_trx(thr)))); + + /* We do not need to reserve search latch, as the delete-mark flag is being updated in place and the adaptive hash index does not depend on it. */ btr_rec_set_deleted_flag(rec, buf_block_get_page_zip(block), val); @@ -3214,7 +5174,6 @@ btr_cur_del_mark_set_sec_rec( /***********************************************************//** Sets a secondary index record's delete mark to the given value. This function is only used by the insert buffer merge mechanism. */ -UNIV_INTERN void btr_cur_set_deleted_flag_for_ibuf( /*==============================*/ @@ -3226,7 +5185,7 @@ btr_cur_set_deleted_flag_for_ibuf( ibool val, /*!< in: value to set */ mtr_t* mtr) /*!< in/out: mini-transaction */ { - /* We do not need to reserve btr_search_latch, as the page + /* We do not need to reserve search latch, as the page has just been read to the buffer pool and there cannot be a hash index to it. Besides, the delete-mark flag is being updated in place and the adaptive hash index does not depend @@ -3245,8 +5204,7 @@ that mtr holds an x-latch on the tree and on the cursor page. To avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. NOTE: it is assumed that the caller has reserved enough free extents so that the compression will always succeed if done! -@return TRUE if compression occurred */ -UNIV_INTERN +@return TRUE if compression occurred */ ibool btr_cur_compress_if_useful( /*=======================*/ @@ -3257,11 +5215,29 @@ btr_cur_compress_if_useful( cursor position even if compression occurs */ mtr_t* mtr) /*!< in/out: mini-transaction */ { - ut_ad(mtr_memo_contains(mtr, - dict_index_get_lock(btr_cur_get_index(cursor)), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX)); + /* Avoid applying compression as we don't accept lot of page garbage + given the workload of intrinsic table. */ + if (dict_table_is_intrinsic(cursor->index->table)) { + return(FALSE); + } + + ut_ad(mtr_memo_contains_flagged( + mtr, dict_index_get_lock(btr_cur_get_index(cursor)), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(cursor->index->table)); + ut_ad(mtr_is_block_fix( + mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + + if (dict_index_is_spatial(cursor->index)) { + const page_t* page = btr_cur_get_page(cursor); + + /* Check whether page lock prevents the compression */ + if (!lock_test_prdt_page_lock( + page_get_space_id(page), page_get_page_no(page))) { + return(false); + } + } return(btr_cur_compress_recommendation(cursor, mtr) && btr_compress(cursor, adjust, mtr)); @@ -3271,8 +5247,7 @@ btr_cur_compress_if_useful( Removes the record on which the tree cursor is positioned on a leaf page. It is assumed that the mtr has an x-latch on the page where the cursor is positioned, but no latch on the whole tree. -@return TRUE if success, i.e., the page did not become too empty */ -UNIV_INTERN +@return TRUE if success, i.e., the page did not become too empty */ ibool btr_cur_optimistic_delete_func( /*===========================*/ @@ -3299,6 +5274,10 @@ btr_cur_optimistic_delete_func( ut_ad(flags == 0 || flags == BTR_CREATE_FLAG); ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX, cursor->index->table)); + ut_ad(mtr->is_named_space(cursor->index->space)); + /* This is intended only for leaf page deletions */ block = btr_cur_get_block(cursor); @@ -3351,12 +5330,16 @@ btr_cur_optimistic_delete_func( /* The change buffer does not handle inserts into non-leaf pages, into clustered indexes, or into the change buffer. */ - if (page_is_leaf(page) - && !dict_index_is_clust(cursor->index) + if (!dict_index_is_clust(cursor->index) + && !dict_table_is_temporary(cursor->index->table) && !dict_index_is_ibuf(cursor->index)) { ibuf_update_free_bits_low(block, max_ins, mtr); } } + } else { + /* prefetch siblings of the leaf for the pessimistic + operation. */ + btr_cur_prefetch_siblings(block); } if (UNIV_LIKELY_NULL(heap)) { @@ -3373,8 +5356,8 @@ or if it is the only page on the level. It is assumed that mtr holds an x-latch on the tree and on the cursor page. To avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. -@return TRUE if compression occurred */ -UNIV_INTERN +@return TRUE if compression occurred and FALSE if not or something +wrong. */ ibool btr_cur_pessimistic_delete( /*=======================*/ @@ -3392,7 +5375,7 @@ btr_cur_pessimistic_delete( stays valid: it points to successor of deleted record on function exit */ ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ + bool rollback,/*!< in: performing rollback? */ mtr_t* mtr) /*!< in: mtr */ { buf_block_t* block; @@ -3401,11 +5384,14 @@ btr_cur_pessimistic_delete( dict_index_t* index; rec_t* rec; ulint n_reserved = 0; - ibool success; + bool success; ibool ret = FALSE; ulint level; mem_heap_t* heap; ulint* offsets; +#ifdef UNIV_DEBUG + bool parent_latched = false; +#endif /* UNIV_DEBUG */ block = btr_cur_get_block(cursor); page = buf_block_get_frame(block); @@ -3415,9 +5401,13 @@ btr_cur_pessimistic_delete( ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) || (flags & BTR_CREATE_FLAG)); - ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); + ut_ad(mtr->is_named_space(index->space)); + if (!has_reserved_extents) { /* First reserve enough free space for the file segments of the index tree, so that the node pointer updates will @@ -3448,7 +5438,7 @@ btr_cur_pessimistic_delete( if (rec_offs_any_extern(offsets)) { btr_rec_free_externally_stored_fields(index, rec, offsets, page_zip, - rb_ctx, mtr); + rollback, mtr); #ifdef UNIV_ZIP_DEBUG ut_a(!page_zip || page_zip_validate(page_zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ @@ -3456,7 +5446,7 @@ btr_cur_pessimistic_delete( if (UNIV_UNLIKELY(page_get_n_recs(page) < 2) && UNIV_UNLIKELY(dict_index_get_page(index) - != buf_block_get_page_no(block))) { + != block->page.id.page_no())) { /* If there is only one record, drop the whole page in btr_discard_page, if this is not the root page */ @@ -3492,20 +5482,55 @@ btr_cur_pessimistic_delete( mini-transaction and because writing to the redo log is an atomic operation (performed by mtr_commit()). */ btr_set_min_rec_mark(next_rec, mtr); + } else if (dict_index_is_spatial(index)) { + /* For rtree, if delete the leftmost node pointer, + we need to update parent page. */ + rtr_mbr_t father_mbr; + rec_t* father_rec; + btr_cur_t father_cursor; + ulint* offsets; + bool upd_ret; + ulint len; + + rtr_page_get_father_block(NULL, heap, index, + block, mtr, NULL, + &father_cursor); + offsets = rec_get_offsets( + btr_cur_get_rec(&father_cursor), index, + NULL, ULINT_UNDEFINED, &heap); + + father_rec = btr_cur_get_rec(&father_cursor); + rtr_read_mbr(rec_get_nth_field( + father_rec, offsets, 0, &len), &father_mbr); + + upd_ret = rtr_update_mbr_field(&father_cursor, offsets, + NULL, page, &father_mbr, + next_rec, mtr); + + if (!upd_ret) { + *err = DB_ERROR; + + mem_heap_free(heap); + return(FALSE); + } + + ut_d(parent_latched = true); } else { /* Otherwise, if we delete the leftmost node pointer - on a page, we have to change the father node pointer + on a page, we have to change the parent node pointer so that it is equal to the new leftmost node pointer on the page */ btr_node_ptr_delete(index, block, mtr); dtuple_t* node_ptr = dict_index_build_node_ptr( - index, next_rec, buf_block_get_page_no(block), + index, next_rec, block->page.id.page_no(), heap, level); btr_insert_on_non_leaf_level( flags, index, level + 1, node_ptr, mtr); + + ut_d(parent_latched = true); } } @@ -3516,7 +5541,8 @@ btr_cur_pessimistic_delete( ut_a(!page_zip || page_zip_validate(page_zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ - ut_ad(btr_check_node_ptr(index, block, mtr)); + /* btr_check_node_ptr() needs parent block latched */ + ut_ad(!parent_latched || btr_check_node_ptr(index, block, mtr)); return_after_reservations: *err = DB_SUCCESS; @@ -3527,6 +5553,17 @@ return_after_reservations: ret = btr_cur_compress_if_useful(cursor, FALSE, mtr); } + if (!srv_read_only_mode + && page_is_leaf(page) + && !dict_index_is_online_ddl(index)) { + + mtr_memo_release(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK); + + /* NOTE: We cannot release root block latch here, because it + has segment header and already modified in most of cases.*/ + } + if (n_reserved > 0) { fil_space_release_free_extents(index->space, n_reserved); } @@ -3590,63 +5627,63 @@ so far and assume that all pages that we did not scan up to slot2->page contain the same number of records, then we multiply that average to the number of pages between slot1->page and slot2->page (which is n_rows_on_prev_level). In this case we set is_n_rows_exact to FALSE. -@return number of rows (exact or estimated) */ +@return number of rows, not including the borders (exact or estimated) */ static -ib_int64_t +int64_t btr_estimate_n_rows_in_range_on_level( /*==================================*/ dict_index_t* index, /*!< in: index */ btr_path_t* slot1, /*!< in: left border */ btr_path_t* slot2, /*!< in: right border */ - ib_int64_t n_rows_on_prev_level, /*!< in: number of rows + int64_t n_rows_on_prev_level, /*!< in: number of rows on the previous level for the same descend paths; used to - determine the numbe of pages + determine the number of pages on this level */ ibool* is_n_rows_exact) /*!< out: TRUE if the returned value is exact i.e. not an estimation */ { - ulint space; - ib_int64_t n_rows; + int64_t n_rows; ulint n_pages_read; - ulint page_no; - ulint zip_size; ulint level; - space = dict_index_get_space(index); - n_rows = 0; n_pages_read = 0; /* Assume by default that we will scan all pages between - slot1->page_no and slot2->page_no */ + slot1->page_no and slot2->page_no. */ *is_n_rows_exact = TRUE; - /* add records from slot1->page_no which are to the right of - the record which serves as a left border of the range, if any */ - if (slot1->nth_rec < slot1->n_recs) { + /* Add records from slot1->page_no which are to the right of + the record which serves as a left border of the range, if any + (we don't include the record itself in this count). */ + if (slot1->nth_rec <= slot1->n_recs) { n_rows += slot1->n_recs - slot1->nth_rec; } - /* add records from slot2->page_no which are to the left of - the record which servers as a right border of the range, if any */ + /* Add records from slot2->page_no which are to the left of + the record which servers as a right border of the range, if any + (we don't include the record itself in this count). */ if (slot2->nth_rec > 1) { n_rows += slot2->nth_rec - 1; } - /* count the records in the pages between slot1->page_no and - slot2->page_no (non inclusive), if any */ - - zip_size = fil_space_get_zip_size(space); + /* Count the records in the pages between slot1->page_no and + slot2->page_no (non inclusive), if any. */ /* Do not read more than this number of pages in order not to hurt performance with this code which is just an estimation. If we read this many pages before reaching slot2->page_no then we estimate the - average from the pages scanned so far */ + average from the pages scanned so far. */ # define N_PAGES_READ_LIMIT 10 - page_no = slot1->page_no; + page_id_t page_id( + dict_index_get_space(index), slot1->page_no); + const fil_space_t* space = fil_space_get(index->space); + ut_ad(space); + const page_size_t page_size(space->flags); + level = slot1->page_level; do { @@ -3662,7 +5699,7 @@ btr_estimate_n_rows_in_range_on_level( attempting to read a page that is no longer part of the B-tree. We pass BUF_GET_POSSIBLY_FREED in order to silence a debug assertion about this. */ - block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, + block = buf_page_get_gen(page_id, page_size, RW_S_LATCH, NULL, BUF_GET_POSSIBLY_FREED, __FILE__, __LINE__, &mtr, &err); @@ -3690,7 +5727,7 @@ btr_estimate_n_rows_in_range_on_level( this is only an estimate. We are sure that a page with page_no exists because InnoDB never frees pages, only reuses them. */ - if (fil_page_get_type(page) != FIL_PAGE_INDEX + if (!fil_page_index_page_check(page) || btr_page_get_index_id(page) != index->id || btr_page_get_level_low(page) != level) { @@ -3708,18 +5745,18 @@ btr_estimate_n_rows_in_range_on_level( n_pages_read++; - if (page_no != slot1->page_no) { + if (page_id.page_no() != slot1->page_no) { /* Do not count the records on slot1->page_no, we already counted them before this loop. */ n_rows += page_get_n_recs(page); } - page_no = btr_page_get_next(page, &mtr); + page_id.set_page_no(btr_page_get_next(page, &mtr)); mtr_commit(&mtr); if (n_pages_read == N_PAGES_READ_LIMIT - || page_no == FIL_NULL) { + || page_id.page_no() == FIL_NULL) { /* Either we read too many pages or we reached the end of the level without passing through slot2->page_no, the tree must have changed @@ -3727,7 +5764,7 @@ btr_estimate_n_rows_in_range_on_level( goto inexact; } - } while (page_no != slot2->page_no); + } while (page_id.page_no() != slot2->page_no); return(n_rows); @@ -3752,19 +5789,40 @@ inexact: return(n_rows); } -/*******************************************************************//** -Estimates the number of rows in a given index range. -@return estimated number of rows */ -UNIV_INTERN -ib_int64_t -btr_estimate_n_rows_in_range( -/*=========================*/ - dict_index_t* index, /*!< in: index */ - const dtuple_t* tuple1, /*!< in: range start, may also be empty tuple */ - ulint mode1, /*!< in: search mode for range start */ - const dtuple_t* tuple2, /*!< in: range end, may also be empty tuple */ - ulint mode2, /*!< in: search mode for range end */ - trx_t* trx) /*!< in: trx */ +/** If the tree gets changed too much between the two dives for the left +and right boundary then btr_estimate_n_rows_in_range_low() will retry +that many times before giving up and returning the value stored in +rows_in_range_arbitrary_ret_val. */ +static const unsigned rows_in_range_max_retries = 4; + +/** We pretend that a range has that many records if the tree keeps changing +for rows_in_range_max_retries retries while we try to estimate the records +in a given range. */ +static const int64_t rows_in_range_arbitrary_ret_val = 10; + +/** Estimates the number of rows in a given index range. +@param[in] index index +@param[in] tuple1 range start, may also be empty tuple +@param[in] mode1 search mode for range start +@param[in] tuple2 range end, may also be empty tuple +@param[in] mode2 search mode for range end +@param[in] nth_attempt if the tree gets modified too much while +we are trying to analyze it, then we will retry (this function will call +itself, incrementing this parameter) +@return estimated number of rows; if after rows_in_range_max_retries +retries the tree keeps changing, then we will just return +rows_in_range_arbitrary_ret_val as a result (if +nth_attempt >= rows_in_range_max_retries and the tree is modified between +the two dives). */ +static +int64_t +btr_estimate_n_rows_in_range_low( + dict_index_t* index, + const dtuple_t* tuple1, + page_cur_mode_t mode1, + const dtuple_t* tuple2, + page_cur_mode_t mode2, + unsigned nth_attempt) { btr_path_t path1[BTR_PATH_ARRAY_N_SLOTS]; btr_path_t path2[BTR_PATH_ARRAY_N_SLOTS]; @@ -3774,60 +5832,157 @@ btr_estimate_n_rows_in_range( ibool diverged; ibool diverged_lot; ulint divergence_level; - ib_int64_t n_rows; + int64_t n_rows; ibool is_n_rows_exact; ulint i; mtr_t mtr; - ib_int64_t table_n_rows; + int64_t table_n_rows; table_n_rows = dict_table_get_n_rows(index->table); - mtr_start_trx(&mtr, trx); + mtr_start(&mtr); + /* Below we dive to the two records specified by tuple1 and tuple2 and + we remember the entire dive paths from the tree root. The place where + the tuple1 path ends on the leaf level we call "left border" of our + interval and the place where the tuple2 path ends on the leaf level - + "right border". We take care to either include or exclude the interval + boundaries depending on whether <, <=, > or >= was specified. For + example if "5 < x AND x <= 10" then we should not include the left + boundary, but should include the right one. */ + cursor.path_arr = path1; + bool should_count_the_left_border; + if (dtuple_get_n_fields(tuple1) > 0) { btr_cur_search_to_nth_level(index, 0, tuple1, mode1, BTR_SEARCH_LEAF | BTR_ESTIMATE, &cursor, 0, __FILE__, __LINE__, &mtr); + + ut_ad(!page_rec_is_infimum(btr_cur_get_rec(&cursor))); + + /* We should count the border if there are any records to + match the criteria, i.e. if the maximum record on the tree is + 5 and x > 3 is specified then the cursor will be positioned at + 5 and we should count the border, but if x > 7 is specified, + then the cursor will be positioned at 'sup' on the rightmost + leaf page in the tree and we should not count the border. */ + should_count_the_left_border + = !page_rec_is_supremum(btr_cur_get_rec(&cursor)); } else { - btr_cur_open_at_index_side(true, index, + dberr_t err = DB_SUCCESS; + + err = btr_cur_open_at_index_side(true, index, BTR_SEARCH_LEAF | BTR_ESTIMATE, &cursor, 0, &mtr); + + if (err != DB_SUCCESS) { + ib::warn() << " Error code: " << err + << " btr_estimate_n_rows_in_range_low " + << " called from file: " + << __FILE__ << " line: " << __LINE__ + << " table: " << index->table->name + << " index: " << index->name; + } + + ut_ad(page_rec_is_infimum(btr_cur_get_rec(&cursor))); + + /* The range specified is wihout a left border, just + 'x < 123' or 'x <= 123' and btr_cur_open_at_index_side() + positioned the cursor on the infimum record on the leftmost + page, which must not be counted. */ + should_count_the_left_border = false; } mtr_commit(&mtr); - mtr_start_trx(&mtr, trx); + mtr_start(&mtr); cursor.path_arr = path2; + bool should_count_the_right_border; + if (dtuple_get_n_fields(tuple2) > 0) { btr_cur_search_to_nth_level(index, 0, tuple2, mode2, BTR_SEARCH_LEAF | BTR_ESTIMATE, &cursor, 0, __FILE__, __LINE__, &mtr); + + const rec_t* rec = btr_cur_get_rec(&cursor); + + ut_ad(!(mode2 == PAGE_CUR_L && page_rec_is_supremum(rec))); + + should_count_the_right_border + = (mode2 == PAGE_CUR_LE /* if the range is '<=' */ + /* and the record was found */ + && cursor.low_match >= dtuple_get_n_fields(tuple2)) + || (mode2 == PAGE_CUR_L /* or if the range is '<' */ + /* and there are any records to match the criteria, + i.e. if the minimum record on the tree is 5 and + x < 7 is specified then the cursor will be + positioned at 5 and we should count the border, but + if x < 2 is specified, then the cursor will be + positioned at 'inf' and we should not count the + border */ + && !page_rec_is_infimum(rec)); + /* Notice that for "WHERE col <= 'foo'" MySQL passes to + ha_innobase::records_in_range(): + min_key=NULL (left-unbounded) which is expected + max_key='foo' flag=HA_READ_AFTER_KEY (PAGE_CUR_G), which is + unexpected - one would expect + flag=HA_READ_KEY_OR_PREV (PAGE_CUR_LE). In this case the + cursor will be positioned on the first record to the right of + the requested one (can also be positioned on the 'sup') and + we should not count the right border. */ } else { - btr_cur_open_at_index_side(false, index, + dberr_t err = DB_SUCCESS; + + err = btr_cur_open_at_index_side(false, index, BTR_SEARCH_LEAF | BTR_ESTIMATE, &cursor, 0, &mtr); + + if (err != DB_SUCCESS) { + ib::warn() << " Error code: " << err + << " btr_estimate_n_rows_in_range_low " + << " called from file: " + << __FILE__ << " line: " << __LINE__ + << " table: " << index->table->name + << " index: " << index->name; + } + + + ut_ad(page_rec_is_supremum(btr_cur_get_rec(&cursor))); + + /* The range specified is wihout a right border, just + 'x > 123' or 'x >= 123' and btr_cur_open_at_index_side() + positioned the cursor on the supremum record on the rightmost + page, which must not be counted. */ + should_count_the_right_border = false; } mtr_commit(&mtr); /* We have the path information for the range in path1 and path2 */ - n_rows = 1; + n_rows = 0; is_n_rows_exact = TRUE; - diverged = FALSE; /* This becomes true when the path is not - the same any more */ - diverged_lot = FALSE; /* This becomes true when the paths are - not the same or adjacent any more */ - divergence_level = 1000000; /* This is the level where paths diverged - a lot */ + + /* This becomes true when the two paths do not pass through the + same pages anymore. */ + diverged = FALSE; + + /* This becomes true when the paths are not the same or adjacent + any more. This means that they pass through the same or + neighboring-on-the-same-level pages only. */ + diverged_lot = FALSE; + + /* This is the level where paths diverged a lot. */ + divergence_level = 1000000; + for (i = 0; ; i++) { ut_ad(i < BTR_PATH_ARRAY_N_SLOTS); @@ -3837,6 +5992,70 @@ btr_estimate_n_rows_in_range( if (slot1->nth_rec == ULINT_UNDEFINED || slot2->nth_rec == ULINT_UNDEFINED) { + /* Here none of the borders were counted. For example, + if on the leaf level we descended to: + (inf, a, b, c, d, e, f, sup) + ^ ^ + path1 path2 + then n_rows will be 2 (c and d). */ + + if (is_n_rows_exact) { + /* Only fiddle to adjust this off-by-one + if the number is exact, otherwise we do + much grosser adjustments below. */ + + btr_path_t* last1 = &path1[i - 1]; + btr_path_t* last2 = &path2[i - 1]; + + /* If both paths end up on the same record on + the leaf level. */ + if (last1->page_no == last2->page_no + && last1->nth_rec == last2->nth_rec) { + + /* n_rows can be > 0 here if the paths + were first different and then converged + to the same record on the leaf level. + For example: + SELECT ... LIKE 'wait/synch/rwlock%' + mode1=PAGE_CUR_GE, + tuple1="wait/synch/rwlock" + path1[0]={nth_rec=58, n_recs=58, + page_no=3, page_level=1} + path1[1]={nth_rec=56, n_recs=55, + page_no=119, page_level=0} + + mode2=PAGE_CUR_G + tuple2="wait/synch/rwlock" + path2[0]={nth_rec=57, n_recs=57, + page_no=3, page_level=1} + path2[1]={nth_rec=56, n_recs=55, + page_no=119, page_level=0} */ + + /* If the range is such that we should + count both borders, then avoid + counting that record twice - once as a + left border and once as a right + border. */ + if (should_count_the_left_border + && should_count_the_right_border) { + + n_rows = 1; + } else { + /* Some of the borders should + not be counted, e.g. [3,3). */ + n_rows = 0; + } + } else { + if (should_count_the_left_border) { + n_rows++; + } + + if (should_count_the_right_border) { + n_rows++; + } + } + } + if (i > divergence_level + 1 && !is_n_rows_exact) { /* In trees whose height is > 1 our algorithm tends to underestimate: multiply the estimate @@ -3868,12 +6087,41 @@ btr_estimate_n_rows_in_range( if (!diverged && slot1->nth_rec != slot2->nth_rec) { + /* If both slots do not point to the same page, + this means that the tree must have changed between + the dive for slot1 and the dive for slot2 at the + beginning of this function. */ + if (slot1->page_no != slot2->page_no + || slot1->page_level != slot2->page_level) { + + /* If the tree keeps changing even after a + few attempts, then just return some arbitrary + number. */ + if (nth_attempt >= rows_in_range_max_retries) { + return(rows_in_range_arbitrary_ret_val); + } + + const int64_t ret = + btr_estimate_n_rows_in_range_low( + index, tuple1, mode1, + tuple2, mode2, nth_attempt + 1); + + return(ret); + } + diverged = TRUE; if (slot1->nth_rec < slot2->nth_rec) { - n_rows = slot2->nth_rec - slot1->nth_rec; - - if (n_rows > 1) { + /* We do not count the borders (nor the left + nor the right one), thus "- 1". */ + n_rows = slot2->nth_rec - slot1->nth_rec - 1; + + if (n_rows > 0) { + /* There is at least one row between + the two borders pointed to by slot1 + and slot2, so on the level below the + slots will point to non-adjacent + pages. */ diverged_lot = TRUE; divergence_level = i; } @@ -3885,8 +6133,10 @@ btr_estimate_n_rows_in_range( and we select where x > 20 and x < 30; in this case slot1->nth_rec will point to the supr record and slot2->nth_rec - will point to 6 */ + will point to 6. */ n_rows = 0; + should_count_the_left_border = false; + should_count_the_right_border = false; } } else if (diverged && !diverged_lot) { @@ -3917,6 +6167,27 @@ btr_estimate_n_rows_in_range( } } +/** Estimates the number of rows in a given index range. +@param[in] index index +@param[in] tuple1 range start, may also be empty tuple +@param[in] mode1 search mode for range start +@param[in] tuple2 range end, may also be empty tuple +@param[in] mode2 search mode for range end +@return estimated number of rows */ +int64_t +btr_estimate_n_rows_in_range( + dict_index_t* index, + const dtuple_t* tuple1, + page_cur_mode_t mode1, + const dtuple_t* tuple2, + page_cur_mode_t mode2) +{ + const int64_t ret = btr_estimate_n_rows_in_range_low( + index, tuple1, mode1, tuple2, mode2, 1 /* first attempt */); + + return(ret); +} + /*******************************************************************//** Record the number of non_null key values in a given index for each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index). @@ -3960,9 +6231,10 @@ The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed index->stat_n_sample_sizes[]. If innodb_stats_method is nulls_ignored, we also record the number of non-null values for each prefix and stored the estimates in -array index->stat_n_non_null_key_vals. */ -UNIV_INTERN -void +array index->stat_n_non_null_key_vals. +@return true if the index is available and we get the estimated numbers, +false if the index is unavailable. */ +bool btr_estimate_number_of_different_key_vals( /*======================================*/ dict_index_t* index) /*!< in: index */ @@ -3971,22 +6243,26 @@ btr_estimate_number_of_different_key_vals( page_t* page; rec_t* rec; ulint n_cols; - ulint matched_fields; - ulint matched_bytes; ib_uint64_t* n_diff; ib_uint64_t* n_not_null; ibool stats_null_not_equal; - ullint n_sample_pages = 1; /* number of pages to sample */ + uintmax_t n_sample_pages=1; /* number of pages to sample */ ulint not_empty_flag = 0; ulint total_external_size = 0; ulint i; ulint j; - ullint add_on; + uintmax_t add_on; mtr_t mtr; mem_heap_t* heap = NULL; ulint* offsets_rec = NULL; ulint* offsets_next_rec = NULL; + /* For spatial index, there is no such stats can be + fetched. */ + if (dict_index_is_spatial(index)) { + return(false); + } + n_cols = dict_index_get_n_unique(index); heap = mem_heap_create((sizeof *n_diff + sizeof *n_not_null) @@ -3996,7 +6272,7 @@ btr_estimate_number_of_different_key_vals( + sizeof *offsets_next_rec)); n_diff = (ib_uint64_t*) mem_heap_zalloc( - heap, n_cols * sizeof(ib_int64_t)); + heap, n_cols * sizeof(n_diff[0])); n_not_null = NULL; @@ -4021,7 +6297,7 @@ btr_estimate_number_of_different_key_vals( default: ut_error; - } + } if (srv_stats_sample_traditional) { /* It makes no sense to test more pages than are contained @@ -4070,8 +6346,8 @@ btr_estimate_number_of_different_key_vals( */ if (index->stat_index_size > 1) { n_sample_pages = (srv_stats_transient_sample_pages < index->stat_index_size) ? - (ulint) ut_min((double) index->stat_index_size, - log2(index->stat_index_size)*srv_stats_transient_sample_pages) + ut_min(static_cast(index->stat_index_size), + static_cast(log2(index->stat_index_size)*srv_stats_transient_sample_pages)) : index->stat_index_size; } @@ -4085,7 +6361,17 @@ btr_estimate_number_of_different_key_vals( for (i = 0; i < n_sample_pages; i++) { mtr_start(&mtr); - btr_cur_open_at_rnd_pos(index, BTR_SEARCH_LEAF, &cursor, &mtr); + bool available; + + available = btr_cur_open_at_rnd_pos(index, BTR_SEARCH_LEAF, + &cursor, &mtr); + + if (!available) { + mtr_commit(&mtr); + mem_heap_free(heap); + + return(false); + } /* Count the number of different key values for each prefix of the key on this index page. If the prefix does not determine @@ -4109,6 +6395,7 @@ btr_estimate_number_of_different_key_vals( } while (!page_rec_is_supremum(rec)) { + ulint matched_fields; rec_t* next_rec = page_rec_get_next(rec); if (page_rec_is_supremum(next_rec)) { total_external_size += @@ -4117,8 +6404,6 @@ btr_estimate_number_of_different_key_vals( break; } - matched_fields = 0; - matched_bytes = 0; offsets_next_rec = rec_get_offsets(next_rec, index, offsets_next_rec, ULINT_UNDEFINED, @@ -4127,8 +6412,7 @@ btr_estimate_number_of_different_key_vals( cmp_rec_rec_with_match(rec, next_rec, offsets_rec, offsets_next_rec, index, stats_null_not_equal, - &matched_fields, - &matched_bytes); + &matched_fields); for (j = matched_fields; j < n_cols; j++) { /* We add one if this index record has @@ -4226,13 +6510,15 @@ btr_estimate_number_of_different_key_vals( } mem_heap_free(heap); + + return(true); } /*================== EXTERNAL STORAGE OF BIG FIELDS ===================*/ /***********************************************************//** Gets the offset of the pointer to the externally stored part of a field. -@return offset of the pointer to the externally stored part */ +@return offset of the pointer to the externally stored part */ static ulint btr_rec_get_field_ref_offs( @@ -4252,9 +6538,9 @@ btr_rec_get_field_ref_offs( } /** Gets a pointer to the externally stored part of a field. -@param rec record -@param offsets rec_get_offsets(rec) -@param n index of the externally stored field +@param rec record +@param offsets rec_get_offsets(rec) +@param n index of the externally stored field @return pointer to the externally stored part */ #define btr_rec_get_field_ref(rec, offsets, n) \ ((rec) + btr_rec_get_field_ref_offs(offsets, n)) @@ -4262,8 +6548,7 @@ btr_rec_get_field_ref_offs( /** Gets the externally stored size of a record, in units of a database page. @param[in] rec record @param[in] offsets array returned by rec_get_offsets() -@return externally stored part, in units of a database page */ - +@return externally stored part, in units of a database page */ ulint btr_rec_get_externally_stored_len( const rec_t* rec, @@ -4342,8 +6627,6 @@ btr_cur_set_ownership_of_extern_field( } else { mach_write_to_1(data + local_len + BTR_EXTERN_LEN, byte_val); } - - btr_blob_dbg_owner(rec, index, offsets, i, val); } /*******************************************************************//** @@ -4351,7 +6634,6 @@ Marks non-updated off-page fields as disowned by this record. The ownership must be transferred to the updated record which is inserted elsewhere in the index tree. In purge only the owner of externally stored field is allowed to free the field. */ -UNIV_INTERN void btr_cur_disown_inherited_fields( /*============================*/ @@ -4372,7 +6654,7 @@ btr_cur_disown_inherited_fields( for (i = 0; i < rec_offs_n_fields(offsets); i++) { if (rec_offs_nth_extern(offsets, i) - && !upd_get_field_by_field_no(update, i)) { + && !upd_get_field_by_field_no(update, i, false)) { btr_cur_set_ownership_of_extern_field( page_zip, rec, index, offsets, i, FALSE, mtr); } @@ -4418,8 +6700,7 @@ btr_cur_unmark_extern_fields( Flags the data tuple fields that are marked as extern storage in the update vector. We use this function to remember which fields we must mark as extern storage in a record inserted for an update. -@return number of flagged external columns */ -UNIV_INTERN +@return number of flagged external columns */ ulint btr_push_update_extern_fields( /*==========================*/ @@ -4459,7 +6740,8 @@ btr_push_update_extern_fields( InnoDB writes a longer prefix of externally stored columns, so that column prefixes in secondary indexes can be reconstructed. */ - dfield_set_data(field, (byte*) dfield_get_data(field) + dfield_set_data(field, + (byte*) dfield_get_data(field) + dfield_get_len(field) - BTR_EXTERN_FIELD_REF_SIZE, BTR_EXTERN_FIELD_REF_SIZE); @@ -4497,7 +6779,7 @@ btr_push_update_extern_fields( /*******************************************************************//** Returns the length of a BLOB part stored on the header page. -@return part length */ +@return part length */ static ulint btr_blob_get_part_len( @@ -4509,7 +6791,7 @@ btr_blob_get_part_len( /*******************************************************************//** Returns the page number where the next BLOB part is stored. -@return page number or FIL_NULL if no more pages */ +@return page number or FIL_NULL if no more pages */ static ulint btr_blob_get_next_page_no( @@ -4525,16 +6807,17 @@ static void btr_blob_free( /*==========*/ + dict_index_t* index, /*!< in: index */ buf_block_t* block, /*!< in: buffer block */ ibool all, /*!< in: TRUE=remove also the compressed page if there is one */ mtr_t* mtr) /*!< in: mini-transaction to commit */ { buf_pool_t* buf_pool = buf_pool_from_block(block); - ulint space = buf_block_get_space(block); - ulint page_no = buf_block_get_page_no(block); + ulint space = block->page.id.space(); + ulint page_no = block->page.id.page_no(); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix(mtr, block, MTR_MEMO_PAGE_X_FIX, index->table)); mtr_commit(mtr); @@ -4545,8 +6828,8 @@ btr_blob_free( if (buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE - && buf_block_get_space(block) == space - && buf_block_get_page_no(block) == page_no) { + && block->page.id.space() == space + && block->page.id.page_no() == page_no) { if (!buf_LRU_free_page(&block->page, all) && all && block->page.zip.data) { @@ -4560,28 +6843,149 @@ btr_blob_free( buf_pool_mutex_exit(buf_pool); } +/** Helper class used while writing blob pages, during insert or update. */ +struct btr_blob_log_check_t { + /** Persistent cursor on a clusterex index record with blobs. */ + btr_pcur_t* m_pcur; + /** Mini transaction holding the latches for m_pcur */ + mtr_t* m_mtr; + /** rec_get_offsets(rec, index); offset of clust_rec */ + const ulint* m_offsets; + /** The block containing clustered record */ + buf_block_t** m_block; + /** The clustered record pointer */ + rec_t** m_rec; + /** The blob operation code */ + enum blob_op m_op; + + /** Constructor + @param[in] pcur persistent cursor on a clustered + index record with blobs. + @param[in] mtr mini-transaction holding latches for + pcur. + @param[in] offsets offsets of the clust_rec + @param[in,out] block record block containing pcur record + @param[in,out] rec the clustered record pointer + @param[in] op the blob operation code */ + btr_blob_log_check_t( + btr_pcur_t* pcur, + mtr_t* mtr, + const ulint* offsets, + buf_block_t** block, + rec_t** rec, + enum blob_op op) + : m_pcur(pcur), + m_mtr(mtr), + m_offsets(offsets), + m_block(block), + m_rec(rec), + m_op(op) + { + ut_ad(rec_offs_validate(*m_rec, m_pcur->index(), m_offsets)); + ut_ad((*m_block)->frame == page_align(*m_rec)); + ut_ad(*m_rec == btr_pcur_get_rec(m_pcur)); + } + + /** Check if there is enough space in log file. Commit and re-start the + mini transaction. */ + void check() + { + dict_index_t* index = m_pcur->index(); + ulint offs = 0; + ulint page_no = ULINT_UNDEFINED; + FlushObserver* observer = m_mtr->get_flush_observer(); + + if (m_op == BTR_STORE_INSERT_BULK) { + offs = page_offset(*m_rec); + page_no = page_get_page_no( + buf_block_get_frame(*m_block)); + + buf_block_buf_fix_inc(*m_block, __FILE__, __LINE__); + } else { + btr_pcur_store_position(m_pcur, m_mtr); + } + m_mtr->commit(); + + DEBUG_SYNC_C("blob_write_middle"); + + log_free_check(); + + const mtr_log_t log_mode = m_mtr->get_log_mode(); + m_mtr->start(); + m_mtr->set_log_mode(log_mode); + m_mtr->set_named_space(index->space); + m_mtr->set_flush_observer(observer); + + if (m_op == BTR_STORE_INSERT_BULK) { + page_id_t page_id(dict_index_get_space(index), + page_no); + page_size_t page_size(dict_table_page_size( + index->table)); + page_cur_t* page_cur = &m_pcur->btr_cur.page_cur; + + mtr_x_lock(dict_index_get_lock(index), m_mtr); + page_cur->block = btr_block_get( + page_id, page_size, RW_X_LATCH, index, m_mtr); + page_cur->rec = buf_block_get_frame(page_cur->block) + + offs; + + buf_block_buf_fix_dec(page_cur->block); + } else { + ut_ad(m_pcur->rel_pos == BTR_PCUR_ON); + bool ret = btr_pcur_restore_position( + BTR_MODIFY_LEAF | BTR_MODIFY_EXTERNAL, + m_pcur, m_mtr); + + ut_a(ret); + } + + *m_block = btr_pcur_get_block(m_pcur); + *m_rec = btr_pcur_get_rec(m_pcur); + + ut_d(rec_offs_make_valid( + *m_rec, index, const_cast(m_offsets))); + + ut_ad(m_mtr->memo_contains_page_flagged( + *m_rec, + MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX) + || dict_table_is_intrinsic(index->table)); + + ut_ad(mtr_memo_contains_flagged(m_mtr, + dict_index_get_lock(index), + MTR_MEMO_SX_LOCK | MTR_MEMO_X_LOCK) + || dict_table_is_intrinsic(index->table)); + } +}; + + /*******************************************************************//** Stores the fields in big_rec_vec to the tablespace and puts pointers to them in rec. The extern flags in rec will have to be set beforehand. The fields are stored on pages allocated from leaf node file segment of the index tree. -@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE or DB_TOO_BIG_FOR_REDO */ -UNIV_INTERN + +TODO: If the allocation extends the tablespace, it will not be redo logged, in +any mini-transaction. Tablespace extension should be redo-logged, so that +recovery will not fail when the big_rec was written to the extended portion of +the file, in case the file was somehow truncated in the crash. + +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ dberr_t btr_store_big_rec_extern_fields( /*============================*/ - dict_index_t* index, /*!< in: index of rec; the index tree - MUST be X-latched */ - buf_block_t* rec_block, /*!< in/out: block containing rec */ - rec_t* rec, /*!< in/out: record */ - const ulint* offsets, /*!< in: rec_get_offsets(rec, index); - the "external storage" flags in offsets - will not correspond to rec when - this function returns */ + btr_pcur_t* pcur, /*!< in/out: a persistent cursor. if + btr_mtr is restarted, then this can + be repositioned. */ + const upd_t* upd, /*!< in: update vector */ + ulint* offsets, /*!< in/out: rec_get_offsets() on + pcur. the "external storage" flags + in offsets will correctly correspond + to rec when this function returns */ const big_rec_t*big_rec_vec, /*!< in: vector containing fields to be stored externally */ - mtr_t* btr_mtr, /*!< in: mtr containing the - latches to the clustered index */ + mtr_t* btr_mtr, /*!< in/out: mtr containing the + latches to the clustered index. can be + committed and restarted. */ enum blob_op op) /*! in: operation code */ { ulint rec_page_no; @@ -4590,43 +6994,41 @@ btr_store_big_rec_extern_fields( ulint store_len; ulint page_no; ulint space_id; - ulint zip_size; ulint prev_page_no; ulint hint_page_no; ulint i; mtr_t mtr; - mtr_t* alloc_mtr; + mtr_t mtr_bulk; mem_heap_t* heap = NULL; page_zip_des_t* page_zip; z_stream c_stream; - buf_block_t** freed_pages = NULL; - ulint n_freed_pages = 0; dberr_t error = DB_SUCCESS; + dict_index_t* index = pcur->index(); + buf_block_t* rec_block = btr_pcur_get_block(pcur); + rec_t* rec = btr_pcur_get_rec(pcur); ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(rec_offs_any_extern(offsets)); ut_ad(btr_mtr); - ut_ad(mtr_memo_contains(btr_mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains(btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_flagged(btr_mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + ut_ad(mtr_is_block_fix( + btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX, index->table)); ut_ad(buf_block_get_frame(rec_block) == page_align(rec)); ut_a(dict_index_is_clust(index)); - page_zip = buf_block_get_page_zip(rec_block); - ut_a(dict_table_zip_size(index->table) - == buf_block_get_zip_size(rec_block)); - - space_id = buf_block_get_space(rec_block); - zip_size = buf_block_get_zip_size(rec_block); - rec_page_no = buf_block_get_page_no(rec_block); - ut_a(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX); - - error = btr_check_blob_limit(big_rec_vec); + ut_a(dict_table_page_size(index->table) + .equals_to(rec_block->page.size)); - if (error != DB_SUCCESS) { - ut_ad(op == BTR_STORE_INSERT); - return(error); - } + btr_blob_log_check_t redo_log(pcur, btr_mtr, offsets, &rec_block, + &rec, op); + page_zip = buf_block_get_page_zip(rec_block); + space_id = rec_block->page.id.space(); + rec_page_no = rec_block->page.id.page_no(); + ut_a(fil_page_index_page_check(page_align(rec)) + || op == BTR_STORE_INSERT_BULK); if (page_zip) { int err; @@ -4644,52 +7046,13 @@ btr_store_big_rec_extern_fields( ut_a(err == Z_OK); } - if (btr_blob_op_is_update(op)) { - /* Avoid reusing pages that have been previously freed - in btr_mtr. */ - if (btr_mtr->n_freed_pages) { - if (heap == NULL) { - heap = mem_heap_create( - btr_mtr->n_freed_pages - * sizeof *freed_pages); - } - - freed_pages = static_cast( - mem_heap_alloc( - heap, - btr_mtr->n_freed_pages - * sizeof *freed_pages)); - n_freed_pages = 0; - } - - /* Because btr_mtr will be committed after mtr, it is - possible that the tablespace has been extended when - the B-tree record was updated or inserted, or it will - be extended while allocating pages for big_rec. - - TODO: In mtr (not btr_mtr), write a redo log record - about extending the tablespace to its current size, - and remember the current size. Whenever the tablespace - grows as pages are allocated, write further redo log - records to mtr. (Currently tablespace extension is not - covered by the redo log. If it were, the record would - only be written to btr_mtr, which is committed after - mtr.) */ - alloc_mtr = btr_mtr; - } else { - /* Use the local mtr for allocations. */ - alloc_mtr = &mtr; - } - #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /* All pointers to externally stored columns in the record must either be zero or they must be pointers to inherited columns, owned by this record or an earlier record version. */ - for (i = 0; i < rec_offs_n_fields(offsets); i++) { - if (!rec_offs_nth_extern(offsets, i)) { - continue; - } - field_ref = btr_rec_get_field_ref(rec, offsets, i); + for (i = 0; i < big_rec_vec->n_fields; i++) { + field_ref = btr_rec_get_field_ref( + rec, offsets, big_rec_vec->fields[i].field_no); ut_a(!(field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG)); /* Either this must be an update in place, @@ -4701,12 +7064,76 @@ btr_store_big_rec_extern_fields( BTR_EXTERN_FIELD_REF_SIZE)); } #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ + + /* Calculate the total number of pages for blob data */ + ulint total_blob_pages = 0; + const page_size_t page_size(dict_table_page_size(index->table)); + const ulint pages_in_extent = dict_table_extent_size(index->table); + + /* Space available in compressed page to carry blob data */ + const ulint payload_size_zip = page_size.physical() + - FIL_PAGE_DATA; + + /* Space available in uncompressed page to carry blob data */ + const ulint payload_size = page_size.physical() + - FIL_PAGE_DATA - BTR_BLOB_HDR_SIZE - FIL_PAGE_DATA_END; + + if (page_size.is_compressed()) { + for (ulint i = 0; i < big_rec_vec->n_fields; i++) { + total_blob_pages + += static_cast + (compressBound(static_cast + (big_rec_vec->fields[i].len)) + + payload_size_zip - 1) + / payload_size_zip; + } + } else { + for (ulint i = 0; i < big_rec_vec->n_fields; i++) { + total_blob_pages += (big_rec_vec->fields[i].len + + payload_size - 1) + / payload_size; + } + } + + const ulint n_extents = (total_blob_pages + pages_in_extent - 1) + / pages_in_extent; + ulint n_reserved = 0; +#ifdef UNIV_DEBUG + ulint n_used = 0; /* number of pages used */ +#endif /* UNIV_DEBUG */ + + if (op == BTR_STORE_INSERT_BULK) { + mtr_t alloc_mtr; + + mtr_start(&alloc_mtr); + alloc_mtr.set_named_space(index->space); + + if (!fsp_reserve_free_extents(&n_reserved, space_id, n_extents, + FSP_BLOB, &alloc_mtr)) { + mtr_commit(&alloc_mtr); + error = DB_OUT_OF_FILE_SPACE; + goto func_exit; + } + + mtr_commit(&alloc_mtr); + } else { + if (!fsp_reserve_free_extents(&n_reserved, space_id, n_extents, + FSP_BLOB, btr_mtr)) { + error = DB_OUT_OF_FILE_SPACE; + goto func_exit; + } + } + + ut_ad(n_reserved > 0); + ut_ad(n_reserved == n_extents); + /* We have to create a file segment to the tablespace for each field and put the pointer to the field in rec */ for (i = 0; i < big_rec_vec->n_fields; i++) { - field_ref = btr_rec_get_field_ref( - rec, offsets, big_rec_vec->fields[i].field_no); + const ulint field_no = big_rec_vec->fields[i].field_no; + + field_ref = btr_rec_get_field_ref(rec, offsets, field_no); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /* A zero BLOB pointer should have been initially inserted. */ ut_a(!memcmp(field_ref, field_ref_zero, @@ -4729,11 +7156,31 @@ btr_store_big_rec_extern_fields( c_stream.avail_in = static_cast(extern_len); } - for (;;) { + for (ulint blob_npages = 0;; ++blob_npages) { buf_block_t* block; page_t* page; + const ulint commit_freq = 4; + + ut_ad(page_align(field_ref) == page_align(rec)); + + if (!(blob_npages % commit_freq)) { + + redo_log.check(); + + field_ref = btr_rec_get_field_ref( + rec, offsets, field_no); + + page_zip = buf_block_get_page_zip(rec_block); + rec_page_no = rec_block->page.id.page_no(); + } mtr_start(&mtr); + mtr.set_named_space(index->space); + mtr.set_log_mode(btr_mtr->get_log_mode()); + mtr.set_flush_observer(btr_mtr->get_flush_observer()); + + buf_page_get(rec_block->page.id, + rec_block->page.size, RW_X_LATCH, &mtr); if (prev_page_no == FIL_NULL) { hint_page_no = 1 + rec_page_no; @@ -4741,36 +7188,36 @@ btr_store_big_rec_extern_fields( hint_page_no = prev_page_no + 1; } -alloc_another: - block = btr_page_alloc(index, hint_page_no, - FSP_NO_DIR, 0, alloc_mtr, &mtr); - if (UNIV_UNLIKELY(block == NULL)) { - mtr_commit(&mtr); - error = DB_OUT_OF_FILE_SPACE; - goto func_exit; - } + if (op == BTR_STORE_INSERT_BULK) { + mtr_t alloc_mtr; + + mtr_start(&alloc_mtr); + alloc_mtr.set_named_space(index->space); - if (rw_lock_get_x_lock_count(&block->lock) > 1) { - /* This page must have been freed in - btr_mtr previously. Put it aside, and - allocate another page for the BLOB data. */ - ut_ad(alloc_mtr == btr_mtr); - ut_ad(btr_blob_op_is_update(op)); - ut_ad(n_freed_pages < btr_mtr->n_freed_pages); - freed_pages[n_freed_pages++] = block; - goto alloc_another; + block = btr_page_alloc(index, hint_page_no, + FSP_NO_DIR, 0, &alloc_mtr, &mtr); + mtr_commit(&alloc_mtr); + + } else { + block = btr_page_alloc(index, hint_page_no, + FSP_NO_DIR, 0, &mtr, &mtr); } - page_no = buf_block_get_page_no(block); + ut_a(block != NULL); + ut_ad(++n_used <= (n_reserved * pages_in_extent)); + + page_no = block->page.id.page_no(); page = buf_block_get_frame(block); if (prev_page_no != FIL_NULL) { buf_block_t* prev_block; page_t* prev_page; - prev_block = buf_page_get(space_id, zip_size, - prev_page_no, - RW_X_LATCH, &mtr); + prev_block = buf_page_get( + page_id_t(space_id, prev_page_no), + rec_block->page.size, + RW_X_LATCH, &mtr); + buf_block_dbg_add_level(prev_block, SYNC_EXTERN_STORAGE); prev_page = buf_block_get_frame(prev_block); @@ -4816,9 +7263,8 @@ alloc_another: c_stream.next_out = page + FIL_PAGE_DATA; - c_stream.avail_out - = static_cast(page_zip_get_size(page_zip)) - - FIL_PAGE_DATA; + c_stream.avail_out = static_cast( + payload_size_zip); err = deflate(&c_stream, Z_FINISH); ut_a(err == Z_OK || err == Z_STREAM_END); @@ -4844,7 +7290,12 @@ alloc_another: btr_page_reorganize(). However, also the page number of the record may change when B-tree nodes are split or - merged. */ + merged. + NOTE: FIL_PAGE_FILE_FLUSH_LSN space is + used by R-tree index for a Split Sequence + Number */ + ut_ad(!dict_index_is_spatial(index)); + mlog_write_ulint(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, space_id, @@ -4878,16 +7329,6 @@ alloc_another: goto next_zip_page; } - if (alloc_mtr == &mtr) { - rec_block = buf_page_get( - space_id, zip_size, - rec_page_no, - RW_X_LATCH, &mtr); - buf_block_dbg_add_level( - rec_block, - SYNC_NO_ORDER_CHECK); - } - if (err == Z_STREAM_END) { mach_write_to_4(field_ref + BTR_EXTERN_LEN, 0); @@ -4900,11 +7341,7 @@ alloc_another: } if (prev_page_no == FIL_NULL) { - btr_blob_dbg_add_blob( - rec, big_rec_vec->fields[i] - .field_no, page_no, index, - "store"); - + ut_ad(blob_npages == 0); mach_write_to_4(field_ref + BTR_EXTERN_SPACE_ID, space_id); @@ -4918,17 +7355,19 @@ alloc_another: FIL_PAGE_NEXT); } - page_zip_write_blob_ptr( - page_zip, rec, index, offsets, - big_rec_vec->fields[i].field_no, - alloc_mtr); + /* We compress a page when finish bulk insert.*/ + if (op != BTR_STORE_INSERT_BULK) { + page_zip_write_blob_ptr( + page_zip, rec, index, offsets, + field_no, &mtr); + } next_zip_page: prev_page_no = page_no; /* Commit mtr and release the uncompressed page frame to save memory. */ - btr_blob_free(block, FALSE, &mtr); + btr_blob_free(index, block, FALSE, &mtr); if (err == Z_STREAM_END) { break; @@ -4938,14 +7377,8 @@ next_zip_page: FIL_PAGE_TYPE_BLOB, MLOG_2BYTES, &mtr); - if (extern_len > (UNIV_PAGE_SIZE - - FIL_PAGE_DATA - - BTR_BLOB_HDR_SIZE - - FIL_PAGE_DATA_END)) { - store_len = UNIV_PAGE_SIZE - - FIL_PAGE_DATA - - BTR_BLOB_HDR_SIZE - - FIL_PAGE_DATA_END; + if (extern_len > payload_size) { + store_len = payload_size; } else { store_len = extern_len; } @@ -4966,45 +7399,31 @@ next_zip_page: extern_len -= store_len; - if (alloc_mtr == &mtr) { - rec_block = buf_page_get( - space_id, zip_size, - rec_page_no, - RW_X_LATCH, &mtr); - buf_block_dbg_add_level( - rec_block, - SYNC_NO_ORDER_CHECK); - } - mlog_write_ulint(field_ref + BTR_EXTERN_LEN, 0, - MLOG_4BYTES, alloc_mtr); + MLOG_4BYTES, &mtr); mlog_write_ulint(field_ref + BTR_EXTERN_LEN + 4, big_rec_vec->fields[i].len - extern_len, - MLOG_4BYTES, alloc_mtr); + MLOG_4BYTES, &mtr); if (prev_page_no == FIL_NULL) { - btr_blob_dbg_add_blob( - rec, big_rec_vec->fields[i] - .field_no, page_no, index, - "store"); - + ut_ad(blob_npages == 0); mlog_write_ulint(field_ref + BTR_EXTERN_SPACE_ID, space_id, MLOG_4BYTES, - alloc_mtr); + &mtr); mlog_write_ulint(field_ref + BTR_EXTERN_PAGE_NO, page_no, MLOG_4BYTES, - alloc_mtr); + &mtr); mlog_write_ulint(field_ref + BTR_EXTERN_OFFSET, FIL_PAGE_DATA, MLOG_4BYTES, - alloc_mtr); + &mtr); } prev_page_no = page_no; @@ -5020,32 +7439,28 @@ next_zip_page: DBUG_EXECUTE_IF("btr_store_big_rec_extern", error = DB_OUT_OF_FILE_SPACE; goto func_exit;); + + rec_offs_make_nth_extern(offsets, field_no); } + /* Verify that the number of extents used is the same as the number + of extents reserved. */ + ut_ad(page_zip != NULL + || ((n_used + pages_in_extent - 1) / pages_in_extent + == n_reserved)); + ut_ad((n_used + pages_in_extent - 1) / pages_in_extent <= n_reserved); + func_exit: if (page_zip) { deflateEnd(&c_stream); } - if (n_freed_pages) { - ulint i; - - ut_ad(alloc_mtr == btr_mtr); - ut_ad(btr_blob_op_is_update(op)); - - for (i = 0; i < n_freed_pages; i++) { - btr_page_free_low(index, freed_pages[i], 0, true, alloc_mtr); - } - - DBUG_EXECUTE_IF("btr_store_big_rec_extern", - error = DB_OUT_OF_FILE_SPACE; - goto func_exit;); - } - if (heap != NULL) { mem_heap_free(heap); } + fil_space_release_free_extents(space_id, n_reserved); + #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG /* All pointers to externally stored columns in the record must be valid. */ @@ -5097,13 +7512,10 @@ btr_check_blob_fil_page_type( } #endif /* !UNIV_DEBUG */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: FIL_PAGE_TYPE=%lu" - " on BLOB %s space %lu page %lu flags %lx\n", - (ulong) type, read ? "read" : "purge", - (ulong) space_id, (ulong) page_no, (ulong) flags); - ut_error; + ib::fatal() << "FIL_PAGE_TYPE=" << type + << " on BLOB " << (read ? "read" : "purge") + << " space " << space_id << " page " << page_no + << " flags " << flags; } } @@ -5112,7 +7524,6 @@ Frees the space in an externally stored field to the file space management if the field in data is owned by the externally stored field, in a rollback we may have the additional condition that the field must not be inherited. */ -UNIV_INTERN void btr_free_externally_stored_field( /*=============================*/ @@ -5133,7 +7544,7 @@ btr_free_externally_stored_field( to rec, or NULL if rec == NULL */ ulint i, /*!< in: field number of field_ref; ignored if rec == NULL */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ + bool rollback, /*!< in: performing rollback? */ mtr_t* local_mtr MY_ATTRIBUTE((unused))) /*!< in: mtr containing the latch to data an an X-latch to the index tree */ @@ -5143,110 +7554,68 @@ btr_free_externally_stored_field( field_ref + BTR_EXTERN_SPACE_ID); const ulint start_page = mach_read_from_4( field_ref + BTR_EXTERN_PAGE_NO); - ulint rec_zip_size = dict_table_zip_size(index->table); - ulint ext_zip_size; ulint page_no; ulint next_page_no; mtr_t mtr; ut_ad(dict_index_is_clust(index)); - ut_ad(mtr_memo_contains(local_mtr, dict_index_get_lock(index), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains_page(local_mtr, field_ref, - MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_flagged(local_mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK) + || dict_table_is_intrinsic(index->table)); + ut_ad(mtr_is_page_fix( + local_mtr, field_ref, MTR_MEMO_PAGE_X_FIX, index->table)); ut_ad(!rec || rec_offs_validate(rec, index, offsets)); ut_ad(!rec || field_ref == btr_rec_get_field_ref(rec, offsets, i)); + ut_ad(local_mtr->is_named_space( + page_get_space_id(page_align(field_ref)))); if (UNIV_UNLIKELY(!memcmp(field_ref, field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE))) { /* In the rollback, we may encounter a clustered index record with some unwritten off-page columns. There is nothing to free then. */ - if (rb_ctx == RB_NONE) { - char buf[3 * 512]; - char *bufend; - ulint ispace = dict_index_get_space(index); - bufend = innobase_convert_name(buf, sizeof buf, - index->name, strlen(index->name), - NULL, - FALSE); - buf[bufend - buf]='\0'; - ib_logf(IB_LOG_LEVEL_ERROR, "Unwritten off-page columns in " - "rollback context %d. Table %s index %s space_id %lu " - "index space %lu.", - rb_ctx, index->table->name, buf, space_id, ispace); - } - - ut_a(rb_ctx != RB_NONE); + ut_a(rollback); return; } + ut_ad(!(mach_read_from_4(field_ref + BTR_EXTERN_LEN) + & ~((BTR_EXTERN_OWNER_FLAG + | BTR_EXTERN_INHERITED_FLAG) << 24))); ut_ad(space_id == index->space); - if (UNIV_UNLIKELY(space_id != dict_index_get_space(index))) { - ext_zip_size = fil_space_get_zip_size(space_id); - /* This must be an undo log record in the system tablespace, - that is, in row_purge_upd_exist_or_extern(). - Currently, externally stored records are stored in the - same tablespace as the referring records. */ - ut_ad(!page_get_space_id(page_align(field_ref))); - ut_ad(!rec); - ut_ad(!page_zip); - } else { - ext_zip_size = rec_zip_size; - } - - if (!rec) { + const page_size_t ext_page_size(dict_table_page_size(index->table)); + const page_size_t& rec_page_size(rec == NULL + ? univ_page_size + : ext_page_size); + if (rec == NULL) { /* This is a call from row_purge_upd_exist_or_extern(). */ ut_ad(!page_zip); - rec_zip_size = 0; - } - -#ifdef UNIV_BLOB_DEBUG - if (!(field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG) - && !((field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_INHERITED_FLAG) - && (rb_ctx == RB_NORMAL || rb_ctx == RB_RECOVERY))) { - /* This off-page column will be freed. - Check that no references remain. */ - - btr_blob_dbg_t b; - - b.blob_page_no = start_page; - - if (rec) { - /* Remove the reference from the record to the - BLOB. If the BLOB were not freed, the - reference would be removed when the record is - removed. Freeing the BLOB will overwrite the - BTR_EXTERN_PAGE_NO in the field_ref of the - record with FIL_NULL, which would make the - btr_blob_dbg information inconsistent with the - record. */ - b.ref_page_no = page_get_page_no(page_align(rec)); - b.ref_heap_no = page_rec_get_heap_no(rec); - b.ref_field_no = i; - btr_blob_dbg_rbt_delete(index, &b, "free"); - } - - btr_blob_dbg_assert_empty(index, b.blob_page_no); } -#endif /* UNIV_BLOB_DEBUG */ for (;;) { -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG buf_block_t* rec_block; -#endif /* UNIV_SYNC_DEBUG */ +#endif /* UNIV_DEBUG */ buf_block_t* ext_block; mtr_start(&mtr); + mtr.set_spaces(*local_mtr); + mtr.set_log_mode(local_mtr->get_log_mode()); + + ut_ad(!dict_table_is_temporary(index->table) + || local_mtr->get_log_mode() == MTR_LOG_NO_REDO); -#ifdef UNIV_SYNC_DEBUG + const page_t* p = page_align(field_ref); + + const page_id_t page_id(page_get_space_id(p), + page_get_page_no(p)); + +#ifdef UNIV_DEBUG rec_block = -#endif /* UNIV_SYNC_DEBUG */ - buf_page_get(page_get_space_id(page_align(field_ref)), - rec_zip_size, - page_get_page_no(page_align(field_ref)), - RW_X_LATCH, &mtr); +#endif /* UNIV_DEBUG */ + buf_page_get(page_id, rec_page_size, RW_X_LATCH, &mtr); + buf_block_dbg_add_level(rec_block, SYNC_NO_ORDER_CHECK); page_no = mach_read_from_4(field_ref + BTR_EXTERN_PAGE_NO); @@ -5256,7 +7625,7 @@ btr_free_externally_stored_field( || (mach_read_from_1(field_ref + BTR_EXTERN_LEN) & BTR_EXTERN_OWNER_FLAG) /* Rollback and inherited field */ - || ((rb_ctx == RB_NORMAL || rb_ctx == RB_RECOVERY) + || (rollback && (mach_read_from_1(field_ref + BTR_EXTERN_LEN) & BTR_EXTERN_INHERITED_FLAG))) { @@ -5270,12 +7639,14 @@ btr_free_externally_stored_field( row_log_table_blob_free(index, start_page); } - ext_block = buf_page_get(space_id, ext_zip_size, page_no, - RW_X_LATCH, &mtr); + ext_block = buf_page_get( + page_id_t(space_id, page_no), ext_page_size, + RW_X_LATCH, &mtr); + buf_block_dbg_add_level(ext_block, SYNC_EXTERN_STORAGE); page = buf_block_get_frame(ext_block); - if (ext_zip_size) { + if (ext_page_size.is_compressed()) { /* Note that page_zip will be NULL in row_purge_upd_exist_or_extern(). */ switch (fil_page_get_type(page)) { @@ -5287,7 +7658,8 @@ btr_free_externally_stored_field( } next_page_no = mach_read_from_4(page + FIL_PAGE_NEXT); - btr_page_free_low(index, ext_block, 0, true, &mtr); + btr_page_free_low(index, ext_block, 0, + true, &mtr); if (page_zip != NULL) { mach_write_to_4(field_ref + BTR_EXTERN_PAGE_NO, @@ -5314,11 +7686,8 @@ btr_free_externally_stored_field( page + FIL_PAGE_DATA + BTR_BLOB_HDR_NEXT_PAGE_NO); - /* We must supply the page level (= 0) as an argument - because we did not store it on the page (we save the - space overhead from an index page header. */ - - btr_page_free_low(index, ext_block, 0, true, &mtr); + btr_page_free_low(index, ext_block, 0, + true, &mtr); mlog_write_ulint(field_ref + BTR_EXTERN_PAGE_NO, next_page_no, @@ -5334,7 +7703,7 @@ btr_free_externally_stored_field( } /* Commit mtr and release the BLOB block to save memory. */ - btr_blob_free(ext_block, TRUE, &mtr); + btr_blob_free(index, ext_block, TRUE, &mtr); } } @@ -5350,7 +7719,7 @@ btr_rec_free_externally_stored_fields( const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ page_zip_des_t* page_zip,/*!< in: compressed page whose uncompressed part will be updated, or NULL */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ + bool rollback,/*!< in: performing rollback? */ mtr_t* mtr) /*!< in: mini-transaction handle which contains an X-latch to record page and to the index tree */ @@ -5359,7 +7728,7 @@ btr_rec_free_externally_stored_fields( ulint i; ut_ad(rec_offs_validate(rec, index, offsets)); - ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_page_fix(mtr, rec, MTR_MEMO_PAGE_X_FIX, index->table)); /* Free possible externally stored fields in the record */ ut_ad(dict_table_is_comp(index->table) == !!rec_offs_comp(offsets)); @@ -5369,7 +7738,7 @@ btr_rec_free_externally_stored_fields( if (rec_offs_nth_extern(offsets, i)) { btr_free_externally_stored_field( index, btr_rec_get_field_ref(rec, offsets, i), - rec, offsets, page_zip, i, rb_ctx, mtr); + rec, offsets, page_zip, i, rollback, mtr); } } } @@ -5388,7 +7757,7 @@ btr_rec_free_updated_extern_fields( part will be updated, or NULL */ const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ const upd_t* update, /*!< in: update vector */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ + bool rollback,/*!< in: performing rollback? */ mtr_t* mtr) /*!< in: mini-transaction handle which contains an X-latch to record page and to the tree */ { @@ -5396,7 +7765,7 @@ btr_rec_free_updated_extern_fields( ulint i; ut_ad(rec_offs_validate(rec, index, offsets)); - ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_page_fix(mtr, rec, MTR_MEMO_PAGE_X_FIX, index->table)); /* Free possible externally stored fields in the record */ @@ -5414,7 +7783,7 @@ btr_rec_free_updated_extern_fields( btr_free_externally_stored_field( index, data + len - BTR_EXTERN_FIELD_REF_SIZE, rec, offsets, page_zip, - ufield->field_no, rb_ctx, mtr); + ufield->field_no, rollback, mtr); } } } @@ -5422,7 +7791,7 @@ btr_rec_free_updated_extern_fields( /*******************************************************************//** Copies the prefix of an uncompressed BLOB. The clustered index record that points to this BLOB must be protected by a lock or a page latch. -@return number of bytes written to buf */ +@return number of bytes written to buf */ static ulint btr_copy_blob_prefix( @@ -5432,8 +7801,7 @@ btr_copy_blob_prefix( ulint len, /*!< in: length of buf, in bytes */ ulint space_id,/*!< in: space id of the BLOB pages */ ulint page_no,/*!< in: page number of the first BLOB page */ - ulint offset, /*!< in: offset on the first BLOB page */ - trx_t* trx) /*!< in: transaction handle */ + ulint offset) /*!< in: offset on the first BLOB page */ { ulint copied_len = 0; @@ -5445,9 +7813,10 @@ btr_copy_blob_prefix( ulint part_len; ulint copy_len; - mtr_start_trx(&mtr, trx); + mtr_start(&mtr); - block = buf_page_get(space_id, 0, page_no, RW_S_LATCH, &mtr); + block = buf_page_get(page_id_t(space_id, page_no), + univ_page_size, RW_S_LATCH, &mtr); buf_block_dbg_add_level(block, SYNC_EXTERN_STORAGE); page = buf_block_get_frame(block); @@ -5479,21 +7848,25 @@ btr_copy_blob_prefix( } } -/*******************************************************************//** -Copies the prefix of a compressed BLOB. The clustered index record -that points to this BLOB must be protected by a lock or a page latch. -@return number of bytes written to buf */ +/** Copies the prefix of a compressed BLOB. +The clustered index record that points to this BLOB must be protected +by a lock or a page latch. +@param[out] buf the externally stored part of the field, +or a prefix of it +@param[in] len length of buf, in bytes +@param[in] page_size compressed BLOB page size +@param[in] space_id space id of the BLOB pages +@param[in] offset offset on the first BLOB page +@return number of bytes written to buf */ static ulint btr_copy_zblob_prefix( -/*==================*/ - byte* buf, /*!< out: the externally stored part of - the field, or a prefix of it */ - ulint len, /*!< in: length of buf, in bytes */ - ulint zip_size,/*!< in: compressed BLOB page size */ - ulint space_id,/*!< in: space id of the BLOB pages */ - ulint page_no,/*!< in: page number of the first BLOB page */ - ulint offset) /*!< in: offset on the first BLOB page */ + byte* buf, + ulint len, + const page_size_t& page_size, + ulint space_id, + ulint page_no, + ulint offset) { ulint page_type = FIL_PAGE_TYPE_ZBLOB; mem_heap_t* heap; @@ -5510,9 +7883,7 @@ btr_copy_zblob_prefix( heap = mem_heap_create(40000); page_zip_set_alloc(&d_stream, heap); - ut_ad(ut_is_2pow(zip_size)); - ut_ad(zip_size >= UNIV_ZIP_SIZE_MIN); - ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX); + ut_ad(page_size.is_compressed()); ut_ad(space_id); err = inflateInit(&d_stream); @@ -5526,27 +7897,23 @@ btr_copy_zblob_prefix( bpage is protected by the B-tree page latch that is being held on the clustered index record, or, in row_merge_copy_blobs(), by an exclusive table lock. */ - bpage = buf_page_get_zip(space_id, zip_size, page_no); + bpage = buf_page_get_zip(page_id_t(space_id, page_no), + page_size); if (UNIV_UNLIKELY(!bpage)) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Cannot load" - " compressed BLOB" - " page %lu space %lu\n", - (ulong) page_no, (ulong) space_id); + ib::error() << "Cannot load compressed BLOB " + << page_id_t(space_id, page_no); goto func_exit; } if (UNIV_UNLIKELY (fil_page_get_type(bpage->zip.data) != page_type)) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Unexpected type %lu of" - " compressed BLOB" - " page %lu space %lu\n", - (ulong) fil_page_get_type(bpage->zip.data), - (ulong) page_no, (ulong) space_id); + + ib::error() << "Unexpected type " + << fil_page_get_type(bpage->zip.data) + << " of compressed BLOB page " + << page_id_t(space_id, page_no); + ut_ad(0); goto end_of_blob; } @@ -5563,7 +7930,8 @@ btr_copy_zblob_prefix( } d_stream.next_in = bpage->zip.data + offset; - d_stream.avail_in = static_cast(zip_size - offset); + d_stream.avail_in = static_cast(page_size.physical() + - offset); err = inflate(&d_stream, Z_NO_FLUSH); switch (err) { @@ -5579,26 +7947,21 @@ btr_copy_zblob_prefix( /* fall through */ default: inflate_error: - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: inflate() of" - " compressed BLOB" - " page %lu space %lu returned %d (%s)\n", - (ulong) page_no, (ulong) space_id, - err, d_stream.msg); + ib::error() << "inflate() of compressed BLOB page " + << page_id_t(space_id, page_no) + << " returned " << err + << " (" << d_stream.msg << ")"; + case Z_BUF_ERROR: goto end_of_blob; } if (next_page_no == FIL_NULL) { if (!d_stream.avail_in) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: unexpected end of" - " compressed BLOB" - " page %lu space %lu\n", - (ulong) page_no, - (ulong) space_id); + ib::error() + << "Unexpected end of compressed " + << "BLOB page " + << page_id_t(space_id, page_no); } else { err = inflate(&d_stream, Z_FINISH); switch (err) { @@ -5632,57 +7995,59 @@ func_exit: return(d_stream.total_out); } -/*******************************************************************//** -Copies the prefix of an externally stored field of a record. The -clustered index record that points to this BLOB must be protected by a -lock or a page latch. -@return number of bytes written to buf */ +/** Copies the prefix of an externally stored field of a record. +The clustered index record that points to this BLOB must be protected +by a lock or a page latch. +@param[out] buf the externally stored part of the +field, or a prefix of it +@param[in] len length of buf, in bytes +@param[in] page_size BLOB page size +@param[in] space_id space id of the first BLOB page +@param[in] page_no page number of the first BLOB page +@param[in] offset offset on the first BLOB page +@return number of bytes written to buf */ static ulint btr_copy_externally_stored_field_prefix_low( -/*========================================*/ - byte* buf, /*!< out: the externally stored part of - the field, or a prefix of it */ - ulint len, /*!< in: length of buf, in bytes */ - ulint zip_size,/*!< in: nonzero=compressed BLOB page size, - zero for uncompressed BLOBs */ - ulint space_id,/*!< in: space id of the first BLOB page */ - ulint page_no,/*!< in: page number of the first BLOB page */ - ulint offset, /*!< in: offset on the first BLOB page */ - trx_t* trx) /*!< in: transaction handle */ + byte* buf, + ulint len, + const page_size_t& page_size, + ulint space_id, + ulint page_no, + ulint offset) { - if (UNIV_UNLIKELY(len == 0)) { + if (len == 0) { return(0); } - if (zip_size) { - return(btr_copy_zblob_prefix(buf, len, zip_size, + if (page_size.is_compressed()) { + return(btr_copy_zblob_prefix(buf, len, page_size, space_id, page_no, offset)); } else { + ut_ad(page_size.equals_to(univ_page_size)); return(btr_copy_blob_prefix(buf, len, space_id, - page_no, offset, trx)); + page_no, offset)); } } -/*******************************************************************//** -Copies the prefix of an externally stored field of a record. The -clustered index record must be protected by a lock or a page latch. +/** Copies the prefix of an externally stored field of a record. +The clustered index record must be protected by a lock or a page latch. +@param[out] buf the field, or a prefix of it +@param[in] len length of buf, in bytes +@param[in] page_size BLOB page size +@param[in] data 'internally' stored part of the field +containing also the reference to the external part; must be protected by +a lock or a page latch +@param[in] local_len length of data, in bytes @return the length of the copied field, or 0 if the column was being or has been deleted */ -UNIV_INTERN ulint btr_copy_externally_stored_field_prefix( -/*====================================*/ - byte* buf, /*!< out: the field, or a prefix of it */ - ulint len, /*!< in: length of buf, in bytes */ - ulint zip_size,/*!< in: nonzero=compressed BLOB page size, - zero for uncompressed BLOBs */ - const byte* data, /*!< in: 'internally' stored part of the - field containing also the reference to - the external part; must be protected by - a lock or a page latch */ - ulint local_len,/*!< in: length of data, in bytes */ - trx_t* trx) /*!< in: transaction handle */ + byte* buf, + ulint len, + const page_size_t& page_size, + const byte* data, + ulint local_len) { ulint space_id; ulint page_no; @@ -5719,29 +8084,28 @@ btr_copy_externally_stored_field_prefix( return(local_len + btr_copy_externally_stored_field_prefix_low(buf + local_len, len - local_len, - zip_size, + page_size, space_id, page_no, - offset, trx)); + offset)); } -/*******************************************************************//** -Copies an externally stored field of a record to mem heap. The -clustered index record must be protected by a lock or a page latch. -@return the whole field copied to heap */ -UNIV_INTERN +/** Copies an externally stored field of a record to mem heap. +The clustered index record must be protected by a lock or a page latch. +@param[out] len length of the whole field +@param[in] data 'internally' stored part of the field +containing also the reference to the external part; must be protected by +a lock or a page latch +@param[in] page_size BLOB page size +@param[in] local_len length of data +@param[in,out] heap mem heap +@return the whole field copied to heap */ byte* btr_copy_externally_stored_field( -/*=============================*/ - ulint* len, /*!< out: length of the whole field */ - const byte* data, /*!< in: 'internally' stored part of the - field containing also the reference to - the external part; must be protected by - a lock or a page latch */ - ulint zip_size,/*!< in: nonzero=compressed BLOB page size, - zero for uncompressed BLOBs */ - ulint local_len,/*!< in: length of data */ - mem_heap_t* heap, /*!< in: mem heap */ - trx_t* trx) /*!< in: transaction handle */ + ulint* len, + const byte* data, + const page_size_t& page_size, + ulint local_len, + mem_heap_t* heap) { ulint space_id; ulint page_no; @@ -5770,30 +8134,30 @@ btr_copy_externally_stored_field( *len = local_len + btr_copy_externally_stored_field_prefix_low(buf + local_len, extern_len, - zip_size, + page_size, space_id, - page_no, offset, - trx); + page_no, offset); return(buf); } -/*******************************************************************//** -Copies an externally stored field of a record to mem heap. -@return the field copied to heap, or NULL if the field is incomplete */ -UNIV_INTERN +/** Copies an externally stored field of a record to mem heap. +@param[in] rec record in a clustered index; must be +protected by a lock or a page latch +@param[in] offset array returned by rec_get_offsets() +@param[in] page_size BLOB page size +@param[in] no field number +@param[out] len length of the field +@param[in,out] heap mem heap +@return the field copied to heap, or NULL if the field is incomplete */ byte* btr_rec_copy_externally_stored_field( -/*=================================*/ - const rec_t* rec, /*!< in: record in a clustered index; - must be protected by a lock or a page latch */ - const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ - ulint zip_size,/*!< in: nonzero=compressed BLOB page size, - zero for uncompressed BLOBs */ - ulint no, /*!< in: field number */ - ulint* len, /*!< out: length of the field */ - mem_heap_t* heap, /*!< in: mem heap */ - trx_t* trx) /*!< in: transaction handle */ + const rec_t* rec, + const ulint* offsets, + const page_size_t& page_size, + ulint no, + ulint* len, + mem_heap_t* heap) { ulint local_len; const byte* data; @@ -5824,7 +8188,6 @@ btr_rec_copy_externally_stored_field( } return(btr_copy_externally_stored_field(len, data, - zip_size, local_len, heap, - trx)); + page_size, local_len, heap)); } #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index 4d9eab8f2bd..bacc3a4694a 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -27,6 +27,7 @@ Modified 30/07/2014 Jan Lindström jan.lindstrom@mariadb.com #include "btr0defragment.h" #ifndef UNIV_HOTBACKUP +#include "btr0btr.h" #include "btr0cur.h" #include "btr0sea.h" #include "btr0pcur.h" @@ -152,8 +153,7 @@ btr_defragment_init() { srv_defragment_interval = ut_microseconds_to_timer( (ulonglong) (1000000.0 / srv_defragment_frequency)); - mutex_create(btr_defragment_mutex_key, &btr_defragment_mutex, - SYNC_ANY_LATCH); + mutex_create(LATCH_ID_DEFRAGMENT_MUTEX, &btr_defragment_mutex); os_thread_create(btr_defragment_thread, NULL, NULL); } @@ -163,7 +163,7 @@ void btr_defragment_shutdown() { mutex_enter(&btr_defragment_mutex); - list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); + std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); while(iter != btr_defragment_wq.end()) { btr_defragment_item_t* item = *iter; iter = btr_defragment_wq.erase(iter); @@ -185,7 +185,7 @@ btr_defragment_find_index( dict_index_t* index) /*!< Index to find. */ { mutex_enter(&btr_defragment_mutex); - for (list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); + for (std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); iter != btr_defragment_wq.end(); ++iter) { btr_defragment_item_t* item = *iter; @@ -213,14 +213,14 @@ btr_defragment_add_index( dberr_t* err) /*!< out: error code */ { mtr_t mtr; - ulint space = dict_index_get_space(index); - ulint zip_size = dict_table_zip_size(index->table); ulint page_no = dict_index_get_page(index); *err = DB_SUCCESS; mtr_start(&mtr); // Load index rood page. - buf_block_t* block = btr_block_get(space, zip_size, page_no, RW_NO_LATCH, index, &mtr); + const page_id_t page_id(dict_index_get_space(index), page_no); + const page_size_t page_size(dict_table_page_size(index->table)); + buf_block_t* block = btr_block_get(page_id, page_size, RW_NO_LATCH, index, &mtr); page_t* page = NULL; if (block) { @@ -241,7 +241,7 @@ btr_defragment_add_index( btr_pcur_t* pcur = btr_pcur_create_for_mysql(); os_event_t event = NULL; if (!async) { - event = os_event_create(); + event = os_event_create(0); } btr_pcur_open_at_index_side(true, index, BTR_SEARCH_LEAF, pcur, true, 0, &mtr); @@ -265,7 +265,7 @@ btr_defragment_remove_table( dict_table_t* table) /*!< Index to be removed. */ { mutex_enter(&btr_defragment_mutex); - for (list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); + for (std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); iter != btr_defragment_wq.end(); ++iter) { btr_defragment_item_t* item = *iter; @@ -287,7 +287,7 @@ btr_defragment_remove_index( dict_index_t* index) /*!< Index to be removed. */ { mutex_enter(&btr_defragment_mutex); - for (list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); + for (std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); iter != btr_defragment_wq.end(); ++iter) { btr_defragment_item_t* item = *iter; @@ -316,7 +316,7 @@ btr_defragment_remove_item( btr_defragment_item_t* item) /*!< Item to be removed. */ { mutex_enter(&btr_defragment_mutex); - for (list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); + for (std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); iter != btr_defragment_wq.end(); ++iter) { if (item == *iter) { @@ -345,7 +345,7 @@ btr_defragment_get_item() //return nullptr; } mutex_enter(&btr_defragment_mutex); - list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); + std::list< btr_defragment_item_t* >::iterator iter = btr_defragment_wq.begin(); if (iter == btr_defragment_wq.end()) { iter = btr_defragment_wq.begin(); } @@ -425,7 +425,7 @@ btr_defragment_merge_pages( dict_index_t* index, /*!< in: index tree */ buf_block_t* from_block, /*!< in: origin of merge */ buf_block_t* to_block, /*!< in: destination of merge */ - ulint zip_size, /*!< in: zip size of the block */ + const page_size_t page_size, /*!< in: page size of the block */ ulint reserved_space, /*!< in: space reserved for future insert to avoid immediate page split */ ulint* max_data_size, /*!< in/out: max data size to @@ -454,7 +454,7 @@ btr_defragment_merge_pages( // Estimate how many records can be moved from the from_page to // the to_page. - if (zip_size) { + if (page_size.is_compressed()) { ulint page_diff = UNIV_PAGE_SIZE - *max_data_size; max_ins_size_to_use = (max_ins_size_to_use > page_diff) ? max_ins_size_to_use - page_diff : 0; @@ -523,7 +523,7 @@ btr_defragment_merge_pages( // Set ibuf free bits if necessary. if (!dict_index_is_clust(index) && page_is_leaf(to_page)) { - if (zip_size) { + if (page_size.is_compressed()) { ibuf_reset_free_bits(to_block); } else { ibuf_update_free_bits_if_full( @@ -538,11 +538,10 @@ btr_defragment_merge_pages( lock_update_merge_left(to_block, orig_pred, from_block); btr_search_drop_page_hash_index(from_block); - btr_level_list_remove(space, zip_size, from_page, - index, mtr); + btr_level_list_remove(space, page_size, (page_t*)from_page, index, mtr); btr_node_ptr_delete(index, from_block, mtr); - btr_blob_dbg_remove(from_page, index, - "btr_defragment_n_pages"); + /* btr_blob_dbg_remove(from_page, index, + "btr_defragment_n_pages"); */ btr_page_free(index, from_block, mtr); } else { // There are still records left on the page, so @@ -591,7 +590,6 @@ btr_defragment_n_pages( mtr_t* mtr) /*!< in/out: mini-transaction */ { ulint space; - ulint zip_size; /* We will need to load the n+1 block because if the last page is freed and we need to modify the prev_page_no of that block. */ buf_block_t* blocks[BTR_DEFRAGMENT_MAX_N_PAGES + 1]; @@ -624,9 +622,9 @@ btr_defragment_n_pages( n_pages = BTR_DEFRAGMENT_MAX_N_PAGES; } - zip_size = dict_table_zip_size(index->table); first_page = buf_block_get_frame(block); level = btr_page_get_level(first_page, mtr); + const page_size_t page_size(dict_table_page_size(index->table)); if (level != 0) { return NULL; @@ -644,7 +642,10 @@ btr_defragment_n_pages( end_of_index = TRUE; break; } - blocks[i] = btr_block_get(space, zip_size, page_no, + + const page_id_t page_id(dict_index_get_space(index), page_no); + + blocks[i] = btr_block_get(page_id, page_size, RW_X_LATCH, index, mtr); } @@ -670,7 +671,7 @@ btr_defragment_n_pages( optimal_page_size = page_get_free_space_of_empty( page_is_comp(first_page)); // For compressed pages, we take compression failures into account. - if (zip_size) { + if (page_size.is_compressed()) { ulint size = 0; int i = 0; // We estimate the optimal data size of the index use samples of @@ -687,12 +688,12 @@ btr_defragment_n_pages( } if (i != 0) { size = size / i; - optimal_page_size = min(optimal_page_size, size); + optimal_page_size = ut_min(optimal_page_size, size); } max_data_size = optimal_page_size; } - reserved_space = min((ulint)(optimal_page_size + reserved_space = ut_min((ulint)(optimal_page_size * (1 - srv_defragment_fill_factor)), (data_size_per_rec * srv_defragment_fill_factor_n_recs)); @@ -713,7 +714,7 @@ btr_defragment_n_pages( // Start from the second page. for (uint i = 1; i < n_pages; i ++) { buf_block_t* new_block = btr_defragment_merge_pages( - index, blocks[i], current_block, zip_size, + index, blocks[i], current_block, page_size, reserved_space, &max_data_size, heap, mtr); if (new_block != current_block) { n_defragmented ++; diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc index 01d2e1bb8e2..a5da1b9fb0c 100644 --- a/storage/innobase/btr/btr0pcur.cc +++ b/storage/innobase/btr/btr0pcur.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -35,43 +35,38 @@ Created 2/23/1996 Heikki Tuuri /**************************************************************//** Allocates memory for a persistent cursor object and initializes the cursor. -@return own: persistent cursor */ -UNIV_INTERN +@return own: persistent cursor */ btr_pcur_t* btr_pcur_create_for_mysql(void) /*============================*/ { btr_pcur_t* pcur; + DBUG_ENTER("btr_pcur_create_for_mysql"); - pcur = (btr_pcur_t*) mem_alloc(sizeof(btr_pcur_t)); + pcur = (btr_pcur_t*) ut_malloc_nokey(sizeof(btr_pcur_t)); pcur->btr_cur.index = NULL; btr_pcur_init(pcur); - return(pcur); + DBUG_PRINT("btr_pcur_create_for_mysql", ("pcur: %p", pcur)); + DBUG_RETURN(pcur); } /**************************************************************//** Resets a persistent cursor object, freeing ::old_rec_buf if it is allocated and resetting the other members to their initial values. */ -UNIV_INTERN void btr_pcur_reset( /*===========*/ btr_pcur_t* cursor) /*!< in, out: persistent cursor */ { - if (cursor->old_rec_buf != NULL) { - - mem_free(cursor->old_rec_buf); - - cursor->old_rec_buf = NULL; - } - + btr_pcur_free(cursor); + cursor->old_rec_buf = NULL; cursor->btr_cur.index = NULL; cursor->btr_cur.page_cur.rec = NULL; cursor->old_rec = NULL; cursor->old_n_fields = 0; - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; cursor->latch_mode = BTR_NO_LATCHES; cursor->pos_state = BTR_PCUR_NOT_POSITIONED; @@ -79,14 +74,17 @@ btr_pcur_reset( /**************************************************************//** Frees the memory for a persistent cursor object. */ -UNIV_INTERN void btr_pcur_free_for_mysql( /*====================*/ btr_pcur_t* cursor) /*!< in, own: persistent cursor */ { - btr_pcur_reset(cursor); - mem_free(cursor); + DBUG_ENTER("btr_pcur_free_for_mysql"); + DBUG_PRINT("btr_pcur_free_for_mysql", ("pcur: %p", cursor)); + + btr_pcur_free(cursor); + ut_free(cursor); + DBUG_VOID_RETURN; } /**************************************************************//** @@ -96,7 +94,6 @@ cursor data structure, or just setting a flag if the cursor id before the first in an EMPTY tree, or after the last in an EMPTY tree. NOTE that the page where the cursor is positioned must not be empty if the index tree is not totally empty! */ -UNIV_INTERN void btr_pcur_store_position( /*====================*/ @@ -122,8 +119,23 @@ btr_pcur_store_position( page = page_align(rec); offs = page_offset(rec); - ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_S_FIX) - || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); +#ifdef UNIV_DEBUG + if (dict_index_is_spatial(index)) { + /* For spatial index, when we do positioning on parent + buffer if necessary, it might not hold latches, but the + tree must be locked to prevent change on the page */ + ut_ad((mtr_memo_contains_flagged( + mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK) + || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_S_FIX) + || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)) + && (block->page.buf_fix_count > 0)); + } else { + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_S_FIX) + || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX) + || dict_table_is_intrinsic(index->table)); + } +#endif /* UNIV_DEBUG */ if (page_is_empty(page)) { /* It must be an empty index tree; NOTE that in this case @@ -135,7 +147,7 @@ btr_pcur_store_position( ut_ad(page_is_leaf(page)); ut_ad(page_get_page_no(page) == index->page); - cursor->old_stored = BTR_PCUR_OLD_STORED; + cursor->old_stored = true; if (page_rec_is_supremum_low(offs)) { @@ -162,18 +174,20 @@ btr_pcur_store_position( cursor->rel_pos = BTR_PCUR_ON; } - cursor->old_stored = BTR_PCUR_OLD_STORED; + cursor->old_stored = true; cursor->old_rec = dict_index_copy_rec_order_prefix( index, rec, &cursor->old_n_fields, &cursor->old_rec_buf, &cursor->buf_size); cursor->block_when_stored = block; + + /* Function try to check if block is S/X latch. */ cursor->modify_clock = buf_block_get_modify_clock(block); + cursor->withdraw_clock = buf_withdraw_clock; } /**************************************************************//** Copies the stored position of a pcur to another pcur. */ -UNIV_INTERN void btr_pcur_copy_stored_position( /*==========================*/ @@ -182,16 +196,13 @@ btr_pcur_copy_stored_position( btr_pcur_t* pcur_donate) /*!< in: pcur from which the info is copied */ { - if (pcur_receive->old_rec_buf) { - mem_free(pcur_receive->old_rec_buf); - } - + ut_free(pcur_receive->old_rec_buf); ut_memcpy(pcur_receive, pcur_donate, sizeof(btr_pcur_t)); if (pcur_donate->old_rec_buf) { pcur_receive->old_rec_buf = (byte*) - mem_alloc(pcur_donate->buf_size); + ut_malloc_nokey(pcur_donate->buf_size); ut_memcpy(pcur_receive->old_rec_buf, pcur_donate->old_rec_buf, pcur_donate->buf_size); @@ -217,7 +228,6 @@ restores to before first or after the last in the tree. @return TRUE if the cursor position was stored when it was on a user record and it can be restored on a user record whose ordering fields are identical to the ones of the original user record */ -UNIV_INTERN ibool btr_pcur_restore_position_func( /*===========================*/ @@ -229,13 +239,12 @@ btr_pcur_restore_position_func( { dict_index_t* index; dtuple_t* tuple; - ulint mode; - ulint old_mode; + page_cur_mode_t mode; + page_cur_mode_t old_mode; mem_heap_t* heap; - ut_ad(mtr); - ut_ad(mtr->state == MTR_ACTIVE); - ut_ad(cursor->old_stored == BTR_PCUR_OLD_STORED); + ut_ad(mtr->is_active()); + //ut_ad(cursor->old_stored); ut_ad(cursor->pos_state == BTR_PCUR_WAS_POSITIONED || cursor->pos_state == BTR_PCUR_IS_POSITIONED); @@ -244,16 +253,27 @@ btr_pcur_restore_position_func( if (UNIV_UNLIKELY (cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE || cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) { + dberr_t err = DB_SUCCESS; /* In these cases we do not try an optimistic restoration, but always do a search */ - btr_cur_open_at_index_side( + err = btr_cur_open_at_index_side( cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE, index, latch_mode, btr_pcur_get_btr_cur(cursor), 0, mtr); - cursor->latch_mode = latch_mode; + if (err != DB_SUCCESS) { + ib::warn() << " Error code: " << err + << " btr_pcur_restore_position_func " + << " called from file: " + << file << " line: " << line + << " table: " << index->table->name + << " index: " << index->name; + } + + cursor->latch_mode = + BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode); cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->block_when_stored = btr_pcur_get_block(cursor); @@ -263,14 +283,21 @@ btr_pcur_restore_position_func( ut_a(cursor->old_rec); ut_a(cursor->old_n_fields); - if (UNIV_LIKELY(latch_mode == BTR_SEARCH_LEAF) - || UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) { + /* Optimistic latching involves S/X latch not required for + intrinsic table instead we would prefer to search fresh. */ + if ((latch_mode == BTR_SEARCH_LEAF + || latch_mode == BTR_MODIFY_LEAF + || latch_mode == BTR_SEARCH_PREV + || latch_mode == BTR_MODIFY_PREV) + && !dict_table_is_intrinsic(cursor->btr_cur.index->table)) { /* Try optimistic restoration. */ - if (buf_page_optimistic_get(latch_mode, - cursor->block_when_stored, - cursor->modify_clock, - file, line, mtr)) { + if (!buf_pool_is_obsolete(cursor->withdraw_clock) + && btr_cur_optimistic_latch_leaves( + cursor->block_when_stored, cursor->modify_clock, + &latch_mode, btr_pcur_get_btr_cur(cursor), + file, line, mtr)) { + cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->latch_mode = latch_mode; @@ -334,7 +361,7 @@ btr_pcur_restore_position_func( break; default: ut_error; - mode = 0; + mode = PAGE_CUR_UNSUPP; } btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode, @@ -343,39 +370,28 @@ btr_pcur_restore_position_func( /* Restore the old search mode */ cursor->search_mode = old_mode; - switch (cursor->rel_pos) { - case BTR_PCUR_ON: - if (btr_pcur_is_on_user_rec(cursor) - && !cmp_dtuple_rec( - tuple, btr_pcur_get_rec(cursor), - rec_get_offsets(btr_pcur_get_rec(cursor), - index, NULL, - ULINT_UNDEFINED, &heap))) { - - /* We have to store the NEW value for - the modify clock, since the cursor can - now be on a different page! But we can - retain the value of old_rec */ - - cursor->block_when_stored = - btr_pcur_get_block(cursor); - cursor->modify_clock = - buf_block_get_modify_clock( - cursor->block_when_stored); - cursor->old_stored = BTR_PCUR_OLD_STORED; - - mem_heap_free(heap); - - return(TRUE); - } -#ifdef UNIV_DEBUG - /* fall through */ - case BTR_PCUR_BEFORE: - case BTR_PCUR_AFTER: - break; - default: - ut_error; -#endif /* UNIV_DEBUG */ + ut_ad(cursor->rel_pos == BTR_PCUR_ON + || cursor->rel_pos == BTR_PCUR_BEFORE + || cursor->rel_pos == BTR_PCUR_AFTER); + if (cursor->rel_pos == BTR_PCUR_ON + && btr_pcur_is_on_user_rec(cursor) + && !cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor), + rec_get_offsets(btr_pcur_get_rec(cursor), + index, NULL, ULINT_UNDEFINED, &heap))) { + + /* We have to store the NEW value for the modify clock, + since the cursor can now be on a different page! + But we can retain the value of old_rec */ + + cursor->block_when_stored = btr_pcur_get_block(cursor); + cursor->modify_clock = buf_block_get_modify_clock( + cursor->block_when_stored); + cursor->old_stored = true; + cursor->withdraw_clock = buf_withdraw_clock; + + mem_heap_free(heap); + + return(TRUE); } mem_heap_free(heap); @@ -394,7 +410,6 @@ Moves the persistent cursor to the first record on the next page. Releases the latch on the current page, and bufferunfixes it. Note that there must not be modifications on the current page, as then the x-latch can be released only in mtr_commit. */ -UNIV_INTERN void btr_pcur_move_to_next_page( /*=======================*/ @@ -403,42 +418,57 @@ btr_pcur_move_to_next_page( mtr_t* mtr) /*!< in: mtr */ { ulint next_page_no; - ulint space; - ulint zip_size; page_t* page; buf_block_t* next_block; page_t* next_page; + ulint mode; + dict_table_t* table = btr_pcur_get_btr_cur(cursor)->index->table; ut_ad(cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(cursor->latch_mode != BTR_NO_LATCHES); ut_ad(btr_pcur_is_after_last_on_page(cursor)); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; page = btr_pcur_get_page(cursor); next_page_no = btr_page_get_next(page, mtr); - space = buf_block_get_space(btr_pcur_get_block(cursor)); - zip_size = buf_block_get_zip_size(btr_pcur_get_block(cursor)); ut_ad(next_page_no != FIL_NULL); - next_block = btr_block_get(space, zip_size, next_page_no, - cursor->latch_mode, - btr_pcur_get_btr_cur(cursor)->index, mtr); + mode = cursor->latch_mode; + switch (mode) { + case BTR_SEARCH_TREE: + mode = BTR_SEARCH_LEAF; + break; + case BTR_MODIFY_TREE: + mode = BTR_MODIFY_LEAF; + } + + /* For intrinsic tables we avoid taking any latches as table is + accessed by only one thread at any given time. */ + if (dict_table_is_intrinsic(table)) { + mode = BTR_NO_LATCHES; + } + + buf_block_t* block = btr_pcur_get_block(cursor); + + next_block = btr_block_get( + page_id_t(block->page.id.space(), next_page_no), + block->page.size, mode, + btr_pcur_get_btr_cur(cursor)->index, mtr); + next_page = buf_block_get_frame(next_block); #ifdef UNIV_BTR_DEBUG ut_a(page_is_comp(next_page) == page_is_comp(page)); ut_a(btr_page_get_prev(next_page, mtr) - == buf_block_get_page_no(btr_pcur_get_block(cursor))); + == btr_pcur_get_block(cursor)->page.id.page_no()); #endif /* UNIV_BTR_DEBUG */ - next_block->check_index_page_at_flush = TRUE; - btr_leaf_page_release(btr_pcur_get_block(cursor), - cursor->latch_mode, mtr); + btr_leaf_page_release(btr_pcur_get_block(cursor), mode, mtr); page_cur_set_before_first(next_block, btr_pcur_get_page_cur(cursor)); - page_check_dir(next_page); + ut_d(page_check_dir(next_page)); } /*********************************************************//** @@ -450,7 +480,6 @@ alphabetical position of the cursor is guaranteed to be sensible on return, but it may happen that the cursor is not positioned on the last record of any page, because the structure of the tree may have changed during the time when the cursor had no latches. */ -UNIV_INTERN void btr_pcur_move_backward_from_page( /*=============================*/ @@ -486,7 +515,7 @@ btr_pcur_move_backward_from_page( mtr_commit(mtr); - mtr_start_trx(mtr, mtr->trx); + mtr_start(mtr); btr_pcur_restore_position(latch_mode2, cursor, mtr); @@ -494,37 +523,42 @@ btr_pcur_move_backward_from_page( prev_page_no = btr_page_get_prev(page, mtr); - if (prev_page_no == FIL_NULL) { - } else if (btr_pcur_is_before_first_on_page(cursor)) { + /* For intrinsic table we don't do optimistic restore and so there is + no left block that is pinned that needs to be released. */ + if (!dict_table_is_intrinsic( + btr_cur_get_index(btr_pcur_get_btr_cur(cursor))->table)) { - prev_block = btr_pcur_get_btr_cur(cursor)->left_block; + if (prev_page_no == FIL_NULL) { + } else if (btr_pcur_is_before_first_on_page(cursor)) { - btr_leaf_page_release(btr_pcur_get_block(cursor), - latch_mode, mtr); + prev_block = btr_pcur_get_btr_cur(cursor)->left_block; - page_cur_set_after_last(prev_block, + btr_leaf_page_release(btr_pcur_get_block(cursor), + latch_mode, mtr); + + page_cur_set_after_last(prev_block, btr_pcur_get_page_cur(cursor)); - } else { + } else { - /* The repositioned cursor did not end on an infimum record on - a page. Cursor repositioning acquired a latch also on the - previous page, but we do not need the latch: release it. */ + /* The repositioned cursor did not end on an infimum + record on a page. Cursor repositioning acquired a latch + also on the previous page, but we do not need the latch: + release it. */ - prev_block = btr_pcur_get_btr_cur(cursor)->left_block; + prev_block = btr_pcur_get_btr_cur(cursor)->left_block; - btr_leaf_page_release(prev_block, latch_mode, mtr); + btr_leaf_page_release(prev_block, latch_mode, mtr); + } } cursor->latch_mode = latch_mode; - - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; } /*********************************************************//** Moves the persistent cursor to the previous record in the tree. If no records are left, the cursor stays 'before first in tree'. -@return TRUE if the cursor was not before first in tree */ -UNIV_INTERN +@return TRUE if the cursor was not before first in tree */ ibool btr_pcur_move_to_prev( /*==================*/ @@ -535,7 +569,7 @@ btr_pcur_move_to_prev( ut_ad(cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(cursor->latch_mode != BTR_NO_LATCHES); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; if (btr_pcur_is_before_first_on_page(cursor)) { @@ -561,13 +595,12 @@ PAGE_CUR_LE, on the last user record. If no such user record exists, then in the first case sets the cursor after last in tree, and in the latter case before first in tree. The latching mode must be BTR_SEARCH_LEAF or BTR_MODIFY_LEAF. */ -UNIV_INTERN void btr_pcur_open_on_user_rec_func( /*===========================*/ dict_index_t* index, /*!< in: index */ const dtuple_t* tuple, /*!< in: tuple on which search done */ - ulint mode, /*!< in: PAGE_CUR_L, ... */ + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ... */ ulint latch_mode, /*!< in: BTR_SEARCH_LEAF or BTR_MODIFY_LEAF */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent diff --git a/storage/innobase/btr/btr0scrub.cc b/storage/innobase/btr/btr0scrub.cc index e6acb7802f1..88ec4f0e538 100644 --- a/storage/innobase/btr/btr0scrub.cc +++ b/storage/innobase/btr/btr0scrub.cc @@ -77,6 +77,7 @@ static void log_scrub_failure( /*===============*/ + dict_index_t* index, /*!< in: index */ btr_scrub_t* scrub_data, /*!< in: data to store statistics on */ buf_block_t* block, /*!< in: block */ dberr_t err) /*!< in: error */ @@ -100,10 +101,16 @@ log_scrub_failure( reason = "unknown"; scrub_data->scrub_stat.page_split_failures_unknown++; } + + buf_frame_t* buf = buf_block_get_frame(block); + const ulint space_id = mach_read_from_4(buf + FIL_PAGE_SPACE_ID); + const ulint page_no = mach_read_from_4(buf + FIL_PAGE_OFFSET); fprintf(stderr, - "InnoDB: Warning: Failed to scrub page %lu in space %lu : %s\n", - buf_block_get_page_no(block), - buf_block_get_space(block), + "InnoDB: Warning: Failed to scrub index %s table %s page %lu in space %lu : %s\n", + index->name(), + index->table->name.m_name, + page_no, + space_id, reason); } @@ -117,7 +124,7 @@ btr_scrub_lock_dict_func(ulint space, bool lock_to_close_table, uint start = time(0); uint last = start; - while (mutex_enter_nowait_func(&(dict_sys->mutex), file, line)) { + while (mutex_enter_nowait(&(dict_sys->mutex))) { /* if we lock to close a table, we wait forever * if we don't lock to close a table, we check if space * is closing, and then instead give up @@ -344,16 +351,7 @@ btr_optimistic_scrub( page_get_n_recs(buf_block_get_frame(block)) > 2 && (rand() % 100) < test_pessimistic_scrub_pct) { - fprintf(stderr, - "scrub: simulate btr_page_reorganize failed %lu:%lu " - " table: %llu:%s index: %llu:%s get_n_recs(): %lu\n", - buf_block_get_space(block), - buf_block_get_page_no(block), - (ulonglong)scrub_data->current_table->id, - scrub_data->current_table->name, - (ulonglong)scrub_data->current_index->id, - scrub_data->current_index->name, - page_get_n_recs(buf_block_get_frame(block))); + log_scrub_failure(index, scrub_data, block, DB_OVERFLOW); return DB_OVERFLOW; } #endif @@ -392,11 +390,12 @@ btr_pessimistic_scrub( mtr_t* mtr) /*!< in: mtr */ { page_t* page = buf_block_get_frame(block); + if (page_get_n_recs(page) < 2) { /** * There is no way we can split a page with < 2 records */ - log_scrub_failure(scrub_data, block, DB_UNDERFLOW); + log_scrub_failure(index, scrub_data, block, DB_UNDERFLOW); return DB_UNDERFLOW; } @@ -407,17 +406,20 @@ btr_pessimistic_scrub( ulint n_reserved = 0; if (!fsp_reserve_free_extents(&n_reserved, index->space, n_extents, FSP_NORMAL, mtr)) { - log_scrub_failure(scrub_data, block, + log_scrub_failure(index, scrub_data, block, DB_OUT_OF_FILE_SPACE); return DB_OUT_OF_FILE_SPACE; } /* read block variables */ - ulint space = buf_block_get_space(block); - ulint page_no = buf_block_get_page_no(block); - ulint zip_size = buf_block_get_zip_size(block); - ulint left_page_no = btr_page_get_prev(page, mtr); - ulint right_page_no = btr_page_get_next(page, mtr); + const ulint space_id = mach_read_from_4(page + FIL_PAGE_SPACE_ID); + const ulint page_no = mach_read_from_4(page + FIL_PAGE_OFFSET); + const page_id_t page_id(dict_index_get_space(index), page_no); + const ulint left_page_no = btr_page_get_prev(page, mtr); + const ulint right_page_no = btr_page_get_next(page, mtr); + const page_id_t lpage_id(dict_index_get_space(index), left_page_no); + const page_id_t rpage_id(dict_index_get_space(index), right_page_no); + const page_size_t page_size(dict_table_page_size(index->table)); /** * When splitting page, we need X-latches on left/right brothers @@ -430,19 +432,17 @@ btr_pessimistic_scrub( * and re-lock. We still have x-lock on index * so this should be safe */ - mtr_release_buf_page_at_savepoint(mtr, scrub_data->savepoint, - block); + mtr->release_block_at_savepoint(scrub_data->savepoint, block); buf_block_t* get_block = btr_block_get( - space, zip_size, left_page_no, + lpage_id, page_size, RW_X_LATCH, index, mtr); - get_block->check_index_page_at_flush = TRUE; /** * Refetch block and re-initialize page */ block = btr_block_get( - space, zip_size, page_no, + page_id, page_size, RW_X_LATCH, index, mtr); page = buf_block_get_frame(block); @@ -456,9 +456,8 @@ btr_pessimistic_scrub( if (right_page_no != FIL_NULL) { buf_block_t* get_block = btr_block_get( - space, zip_size, right_page_no, + rpage_id, page_size, RW_X_LATCH, index, mtr); - get_block->check_index_page_at_flush = TRUE; } /* arguments to btr_page_split_and_insert */ @@ -478,7 +477,7 @@ btr_pessimistic_scrub( /** * call split page with NULL as argument for entry to insert */ - if (dict_index_get_page(index) == buf_block_get_page_no(block)) { + if (dict_index_get_page(index) == page_no) { /* The page is the root page * NOTE: ibuf_reset_free_bits is called inside * btr_root_raise_and_insert */ @@ -659,8 +658,9 @@ btr_scrub_free_page( FIL_PAGE_TYPE_ALLOCATED); } - ulint compact = 1; - page_create(block, mtr, compact); + page_create(block, mtr, + dict_table_is_comp(scrub_data->current_table), + dict_index_is_spatial(scrub_data->current_index)); mtr_commit(mtr); @@ -828,11 +828,13 @@ btr_scrub_start_space( ulint space, /*!< in: space */ btr_scrub_t* scrub_data) /*!< in/out: scrub data */ { + bool found; scrub_data->space = space; scrub_data->current_table = NULL; scrub_data->current_index = NULL; + const page_size_t page_size = fil_space_get_page_size(space, &found); - scrub_data->compressed = fil_space_get_zip_size(space) > 0; + scrub_data->compressed = page_size.is_compressed(); scrub_data->scrubbing = check_scrub_setting(scrub_data); return scrub_data->scrubbing; } @@ -891,8 +893,7 @@ UNIV_INTERN void btr_scrub_init() { - mutex_create(scrub_stat_mutex_key, - &scrub_stat_mutex, SYNC_NO_ORDER_CHECK); + mutex_create(LATCH_ID_SCRUB_STAT_MUTEX, &scrub_stat_mutex); memset(&scrub_stat, 0, sizeof(scrub_stat)); } @@ -905,3 +906,4 @@ btr_scrub_cleanup() { mutex_free(&scrub_stat_mutex); } + diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index d1263969ce9..60104b650cd 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -33,7 +33,7 @@ Created 2/17/1996 Heikki Tuuri #include "btr0sea.h" #ifdef UNIV_NONINL #include "btr0sea.ic" -#endif +#endif /* UNIV_NOINL */ #include "buf0buf.h" #include "page0page.h" @@ -42,40 +42,37 @@ Created 2/17/1996 Heikki Tuuri #include "btr0pcur.h" #include "btr0btr.h" #include "ha0ha.h" +#include "srv0mon.h" +#include "sync0sync.h" -/** Flag: has the search system been enabled? -Protected by btr_search_latch. */ -UNIV_INTERN char btr_search_enabled = TRUE; +/** Is search system enabled. +Search system is protected by array of latches. */ +char btr_search_enabled = true; -/** A dummy variable to fool the compiler */ -UNIV_INTERN ulint btr_search_this_is_zero = 0; +/** Number of adaptive hash index partition. */ +ulong btr_ahi_parts = 8; +ulint btr_search_n_succ = 0; +ulint btr_search_n_hash_fail = 0; /** padding to prevent other memory update hotspots from residing on the same memory -cache line as btr_search_latch */ +cache line as btr_search_latches */ UNIV_INTERN byte btr_sea_pad1[CACHE_LINE_SIZE]; -/** The latch protecting the adaptive search system: this latch protects the +/** The latches protecting the adaptive search system: this latches protects the (1) positions of records on those pages where a hash index has been built. NOTE: It does not protect values of non-ordering fields within a record from being updated in-place! We can use fact (1) to perform unique searches to -indexes. */ - -/* We will allocate the latch from dynamic memory to get it to the +indexes. We will allocate the latches from dynamic memory to get it to the same DRAM page as other hotspot semaphores */ -UNIV_INTERN rw_lock_t* btr_search_latch_temp; +rw_lock_t** btr_search_latches; /** padding to prevent other memory update hotspots from residing on the same memory cache line */ UNIV_INTERN byte btr_sea_pad2[CACHE_LINE_SIZE]; /** The adaptive hash index */ -UNIV_INTERN btr_search_sys_t* btr_search_sys; - -#ifdef UNIV_PFS_RWLOCK -/* Key to register btr_search_sys with performance schema */ -UNIV_INTERN mysql_pfs_key_t btr_search_latch_key; -#endif /* UNIV_PFS_RWLOCK */ +btr_search_sys_t* btr_search_sys; /** If the number of records on the page divided by this parameter would have been successfully accessed using a hash index, the index @@ -86,6 +83,30 @@ is then built on the page, assuming the global limit has been reached */ before hash index building is started */ #define BTR_SEARCH_BUILD_LIMIT 100 +/** Determine the number of accessed key fields. +@param[in] n_fields number of complete fields +@param[in] n_bytes number of bytes in an incomplete last field +@return number of complete or incomplete fields */ +inline __attribute__((warn_unused_result)) +ulint +btr_search_get_n_fields( + ulint n_fields, + ulint n_bytes) +{ + return(n_fields + (n_bytes > 0 ? 1 : 0)); +} + +/** Determine the number of accessed key fields. +@param[in] cursor b-tree cursor +@return number of complete or incomplete fields */ +inline __attribute__((warn_unused_result)) +ulint +btr_search_get_n_fields( + const btr_cur_t* cursor) +{ + return(btr_search_get_n_fields(cursor->n_fields, cursor->n_bytes)); +} + /********************************************************************//** Builds a hash index on a page with the given parameters. If the page already has a hash index with different parameters, the old hash index is removed. @@ -103,8 +124,7 @@ btr_search_build_page_hash_index( field */ ibool left_side);/*!< in: hash for searches from left side? */ -/*****************************************************************//** -This function should be called before reserving any btr search mutex, if +/** This function should be called before reserving any btr search mutex, if the intended operation might add nodes to the search system hash table. Because of the latching order, once we have reserved the btr search system latch, we cannot allocate a free frame from the buffer pool. Checks that @@ -112,21 +132,19 @@ there is a free buffer frame allocated for hash table heap in the btr search system. If not, allocates a free frames for the heap. This check makes it probable that, when have reserved the btr search system latch and we need to allocate a new node to the hash table, it will succeed. However, the check -will not guarantee success. */ +will not guarantee success. +@param[in] index index handler */ static void -btr_search_check_free_space_in_heap(void) -/*=====================================*/ +btr_search_check_free_space_in_heap(dict_index_t* index) { hash_table_t* table; mem_heap_t* heap; -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S)); + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); - table = btr_search_sys->hash_index; + table = btr_get_search_table(index); heap = table->heap; @@ -137,96 +155,174 @@ btr_search_check_free_space_in_heap(void) if (heap->free_block == NULL) { buf_block_t* block = buf_block_alloc(NULL); - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); - if (heap->free_block == NULL) { + if (btr_search_enabled + && heap->free_block == NULL) { heap->free_block = block; } else { buf_block_free(block); } - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock(index); } } -/*****************************************************************//** -Creates and initializes the adaptive search system at a database start. */ -UNIV_INTERN +/** Creates and initializes the adaptive search system at a database start. +@param[in] hash_size hash table size. */ void -btr_search_sys_create( -/*==================*/ - ulint hash_size) /*!< in: hash index hash table size */ +btr_search_sys_create(ulint hash_size) { - /* We allocate the search latch from dynamic memory: - see above at the global variable definition */ + /* Search System is divided into n parts. + Each part controls access to distinct set of hash buckets from + hash table through its own latch. */ + + /* Step-1: Allocate latches (1 per part). */ + btr_search_latches = reinterpret_cast( + ut_malloc(sizeof(rw_lock_t*) * btr_ahi_parts, mem_key_ahi)); + + for (ulint i = 0; i < btr_ahi_parts; ++i) { + + btr_search_latches[i] = reinterpret_cast( + ut_malloc(sizeof(rw_lock_t), mem_key_ahi)); + + rw_lock_create(btr_search_latch_key, + btr_search_latches[i], SYNC_SEARCH_SYS); + } - btr_search_latch_temp = (rw_lock_t*) mem_alloc(sizeof(rw_lock_t)); + /* Step-2: Allocate hash tablees. */ + btr_search_sys = reinterpret_cast( + ut_malloc(sizeof(btr_search_sys_t), mem_key_ahi)); - rw_lock_create(btr_search_latch_key, &btr_search_latch, - SYNC_SEARCH_SYS); + btr_search_sys->hash_tables = reinterpret_cast( + ut_malloc(sizeof(hash_table_t*) * btr_ahi_parts, mem_key_ahi)); - btr_search_sys = (btr_search_sys_t*) - mem_alloc(sizeof(btr_search_sys_t)); + for (ulint i = 0; i < btr_ahi_parts; ++i) { + + btr_search_sys->hash_tables[i] = + ib_create((hash_size / btr_ahi_parts), + LATCH_ID_HASH_TABLE_MUTEX, + 0, MEM_HEAP_FOR_BTR_SEARCH); - btr_search_sys->hash_index = ha_create(hash_size, 0, - MEM_HEAP_FOR_BTR_SEARCH, 0); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG - btr_search_sys->hash_index->adaptive = TRUE; + btr_search_sys->hash_tables[i]->adaptive = TRUE; #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ + } +} + +/** Resize hash index hash table. +@param[in] hash_size hash index hash table size */ +void +btr_search_sys_resize(ulint hash_size) +{ + /* Step-1: Lock all search latches in exclusive mode. */ + btr_search_x_lock_all(); + + if (btr_search_enabled) { + + btr_search_x_unlock_all(); + ib::error() << "btr_search_sys_resize failed because" + " hash index hash table is not empty."; + ut_ad(0); + return; + } + + /* Step-2: Recreate hash tables with new size. */ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + + mem_heap_free(btr_search_sys->hash_tables[i]->heap); + hash_table_free(btr_search_sys->hash_tables[i]); + + btr_search_sys->hash_tables[i] = + ib_create((hash_size / btr_ahi_parts), + LATCH_ID_HASH_TABLE_MUTEX, + 0, MEM_HEAP_FOR_BTR_SEARCH); + +#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG + btr_search_sys->hash_tables[i]->adaptive = TRUE; +#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ + } + + /* Step-3: Unlock all search latches from exclusive mode. */ + btr_search_x_unlock_all(); } -/*****************************************************************//** -Frees the adaptive search system at a database shutdown. */ -UNIV_INTERN +/** Frees the adaptive search system at a database shutdown. */ void -btr_search_sys_free(void) -/*=====================*/ +btr_search_sys_free() { - rw_lock_free(&btr_search_latch); - mem_free(btr_search_latch_temp); - btr_search_latch_temp = NULL; - mem_heap_free(btr_search_sys->hash_index->heap); - hash_table_free(btr_search_sys->hash_index); - mem_free(btr_search_sys); + ut_ad(btr_search_sys != NULL && btr_search_latches != NULL); + + /* Step-1: Release the hash tables. */ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + + mem_heap_free(btr_search_sys->hash_tables[i]->heap); + hash_table_free(btr_search_sys->hash_tables[i]); + + } + + ut_free(btr_search_sys->hash_tables); + ut_free(btr_search_sys); btr_search_sys = NULL; + + /* Step-2: Release all allocates latches. */ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + + rw_lock_free(btr_search_latches[i]); + ut_free(btr_search_latches[i]); + } + + ut_free(btr_search_latches); + btr_search_latches = NULL; } -/********************************************************************//** -Set index->ref_count = 0 on all indexes of a table. */ +/** Set index->ref_count = 0 on all indexes of a table. +@param[in,out] table table handler */ static void btr_search_disable_ref_count( -/*=========================*/ - dict_table_t* table) /*!< in/out: table */ + dict_table_t* table) { dict_index_t* index; ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - for (index = dict_table_get_first_index(table); index; + for (index = dict_table_get_first_index(table); + index != NULL; index = dict_table_get_next_index(index)) { + ut_ad(rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); + index->search_info->ref_count = 0; } } -/********************************************************************//** -Disable the adaptive hash search system and empty the index. */ -UNIV_INTERN +/** Disable the adaptive hash search system and empty the index. +@param[in] need_mutex need to acquire dict_sys->mutex */ void -btr_search_disable(void) -/*====================*/ +btr_search_disable( + bool need_mutex) { dict_table_t* table; - mutex_enter(&dict_sys->mutex); - rw_lock_x_lock(&btr_search_latch); + if (need_mutex) { + mutex_enter(&dict_sys->mutex); + } - btr_search_enabled = FALSE; + ut_ad(mutex_own(&dict_sys->mutex)); + btr_search_x_lock_all(); + + if (!btr_search_enabled) { + if (need_mutex) { + mutex_exit(&dict_sys->mutex); + } + + btr_search_x_unlock_all(); + return; + } + + btr_search_enabled = false; /* Clear the index->search_info->ref_count of every index in the data dictionary cache. */ @@ -242,51 +338,53 @@ btr_search_disable(void) btr_search_disable_ref_count(table); } - mutex_exit(&dict_sys->mutex); + if (need_mutex) { + mutex_exit(&dict_sys->mutex); + } /* Set all block->index = NULL. */ buf_pool_clear_hash_index(); /* Clear the adaptive hash index. */ - hash_table_clear(btr_search_sys->hash_index); - mem_heap_empty(btr_search_sys->hash_index->heap); + for (ulint i = 0; i < btr_ahi_parts; ++i) { + hash_table_clear(btr_search_sys->hash_tables[i]); + mem_heap_empty(btr_search_sys->hash_tables[i]->heap); + } - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock_all(); } -/********************************************************************//** -Enable the adaptive hash search system. */ -UNIV_INTERN +/** Enable the adaptive hash search system. */ void -btr_search_enable(void) -/*====================*/ +btr_search_enable() { - rw_lock_x_lock(&btr_search_latch); - - btr_search_enabled = TRUE; + buf_pool_mutex_enter_all(); + if (srv_buf_pool_old_size != srv_buf_pool_size) { + buf_pool_mutex_exit_all(); + return; + } + buf_pool_mutex_exit_all(); - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_lock_all(); + btr_search_enabled = true; + btr_search_x_unlock_all(); } -/*****************************************************************//** -Creates and initializes a search info struct. -@return own: search info struct */ -UNIV_INTERN +/** Creates and initializes a search info struct. +@param[in] heap heap where created. +@return own: search info struct */ btr_search_t* -btr_search_info_create( -/*===================*/ - mem_heap_t* heap) /*!< in: heap where created */ +btr_search_info_create(mem_heap_t* heap) { btr_search_t* info; info = (btr_search_t*) mem_heap_alloc(heap, sizeof(btr_search_t)); -#ifdef UNIV_DEBUG - info->magic_n = BTR_SEARCH_MAGIC_N; -#endif /* UNIV_DEBUG */ + ut_d(info->magic_n = BTR_SEARCH_MAGIC_N); info->ref_count = 0; info->root_guess = NULL; + info->withdraw_clock = 0; info->hash_analysis = 0; info->n_hash_potential = 0; @@ -309,53 +407,50 @@ btr_search_info_create( return(info); } -/*****************************************************************//** -Returns the value of ref_count. The value is protected by -btr_search_latch. -@return ref_count value. */ -UNIV_INTERN +/** Returns the value of ref_count. The value is protected by latch. +@param[in] info search info +@param[in] index index identifier +@return ref_count value. */ ulint btr_search_info_get_ref_count( -/*==========================*/ - btr_search_t* info) /*!< in: search info. */ + btr_search_t* info, + dict_index_t* index) { - ulint ret; + ulint ret = 0; + + if (!btr_search_enabled) { + return(ret); + } ut_ad(info); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S)); + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); - rw_lock_s_lock(&btr_search_latch); + btr_search_s_lock(index); ret = info->ref_count; - rw_lock_s_unlock(&btr_search_latch); + btr_search_s_unlock(index); return(ret); } -/*********************************************************************//** -Updates the search info of an index about hash successes. NOTE that info +/** Updates the search info of an index about hash successes. NOTE that info is NOT protected by any semaphore, to save CPU time! Do not assume its fields -are consistent. */ +are consistent. +@param[in,out] info search info +@param[in] cursor cursor which was just positioned */ static void btr_search_info_update_hash( -/*========================*/ - btr_search_t* info, /*!< in/out: search info */ - const btr_cur_t* cursor)/*!< in: cursor which was just positioned */ + btr_search_t* info, + btr_cur_t* cursor) { - dict_index_t* index; + dict_index_t* index = cursor->index; ulint n_unique; int cmp; -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - - index = cursor->index; + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S)); + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); if (dict_index_is_ibuf(index)) { /* So many deletes are performed on an insert buffer tree @@ -441,7 +536,6 @@ set_new_recomm: info->n_fields = n_unique; info->n_bytes = 0; - } else if (cursor->low_match > cursor->up_match) { info->n_fields = cursor->up_match + 1; @@ -455,27 +549,24 @@ set_new_recomm: } } -/*********************************************************************//** -Updates the block search info on hash successes. NOTE that info and -block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any +/** Update the block search info on hash successes. NOTE that info and +block->n_hash_helps, n_fields, n_bytes, left_side are NOT protected by any semaphore, to save CPU time! Do not assume the fields are consistent. -@return TRUE if building a (new) hash index on the block is recommended */ +@return TRUE if building a (new) hash index on the block is recommended +@param[in,out] info search info +@param[in,out] block buffer block +@param[in] cursor cursor */ static ibool btr_search_update_block_hash_info( -/*==============================*/ - btr_search_t* info, /*!< in: search info */ - buf_block_t* block, /*!< in: buffer block */ - btr_cur_t* cursor MY_ATTRIBUTE((unused))) - /*!< in: cursor */ + btr_search_t* info, + buf_block_t* block, + const btr_cur_t* cursor MY_ATTRIBUTE((unused))) { -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); - ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED) - || rw_lock_own(&block->lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(cursor); + ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_S)); + ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X)); + ut_ad(rw_lock_own(&block->lock, RW_LOCK_S) + || rw_lock_own(&block->lock, RW_LOCK_X)); info->last_hash_succ = FALSE; @@ -533,32 +624,31 @@ btr_search_update_block_hash_info( return(FALSE); } -/*********************************************************************//** -Updates a hash node reference when it has been unsuccessfully used in a +/** Updates a hash node reference when it has been unsuccessfully used in a search which could have succeeded with the used hash parameters. This can happen because when building a hash index for a page, we do not check what happens at page boundaries, and therefore there can be misleading hash nodes. Also, collisions in the fold value can lead to misleading references. This function lazily fixes these imperfections in the hash -index. */ +index. +@param[in] info search info +@param[in] block buffer block where cursor positioned +@param[in] cursor cursor */ static void btr_search_update_hash_ref( -/*=======================*/ - btr_search_t* info, /*!< in: search info */ - buf_block_t* block, /*!< in: buffer block where cursor positioned */ - btr_cur_t* cursor) /*!< in: cursor */ + const btr_search_t* info, + buf_block_t* block, + const btr_cur_t* cursor) { dict_index_t* index; ulint fold; rec_t* rec; ut_ad(cursor->flag == BTR_CUR_HASH_FAIL); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) - || rw_lock_own(&(block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X)); + ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S) + || rw_lock_own(&(block->lock), RW_LOCK_X)); ut_ad(page_align(btr_cur_get_rec(cursor)) == buf_block_get_frame(block)); @@ -569,6 +659,7 @@ btr_search_update_hash_ref( return; } + ut_ad(block->page.id.space() == index->space); ut_a(index == cursor->index); ut_a(!dict_index_is_ibuf(index)); @@ -595,35 +686,28 @@ btr_search_update_hash_ref( if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); - ha_insert_for_fold(btr_search_sys->hash_index, fold, + ha_insert_for_fold(btr_get_search_table(index), fold, block, rec); MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); } } -/*********************************************************************//** -Updates the search info. */ -UNIV_INTERN +/** Updates the search info. +@param[in,out] info search info +@param[in] cursor cursor which was just positioned */ void btr_search_info_update_slow( -/*========================*/ - btr_search_t* info, /*!< in/out: search info */ - btr_cur_t* cursor) /*!< in: cursor which was just positioned */ + btr_search_t* info, + btr_cur_t* cursor) { buf_block_t* block; ibool build_index; - ulint* params; - ulint* params2; -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_S)); + ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X)); block = btr_cur_get_block(cursor); @@ -638,74 +722,57 @@ btr_search_info_update_slow( if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) { - btr_search_check_free_space_in_heap(); + btr_search_check_free_space_in_heap(cursor->index); } if (cursor->flag == BTR_CUR_HASH_FAIL) { /* Update the hash node reference, if appropriate */ - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(cursor->index); btr_search_update_hash_ref(info, block, cursor); - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock(cursor->index); } if (build_index) { /* Note that since we did not protect block->n_fields etc. with any semaphore, the values can be inconsistent. We have - to check inside the function call that they make sense. We - also malloc an array and store the values there to make sure - the compiler does not let the function call parameters change - inside the called function. It might be that the compiler - would optimize the call just to pass pointers to block. */ - - params = (ulint*) mem_alloc(3 * sizeof(ulint)); - params[0] = block->n_fields; - params[1] = block->n_bytes; - params[2] = block->left_side; - - /* Make sure the compiler cannot deduce the values and do - optimizations */ - - params2 = params + btr_search_this_is_zero; - - btr_search_build_page_hash_index(cursor->index, - block, - params2[0], - params2[1], - params2[2]); - mem_free(params); + to check inside the function call that they make sense. */ + btr_search_build_page_hash_index(cursor->index, block, + block->n_fields, + block->n_bytes, + block->left_side); } } -/******************************************************************//** -Checks if a guessed position for a tree cursor is right. Note that if +/** Checks if a guessed position for a tree cursor is right. Note that if mode is PAGE_CUR_LE, which is used in inserts, and the function returns TRUE, then cursor->up_match and cursor->low_match both have sensible values. -@return TRUE if success */ +@param[in,out] cursor guess cursor position +@param[in] can_only_compare_to_cursor_rec + if we do not have a latch on the page of cursor, + but a latch corresponding search system, then + ONLY the columns of the record UNDER the cursor + are protected, not the next or previous record + in the chain: we cannot look at the next or + previous record to check our guess! +@param[in] tuple data tuple +@param[in] mode PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G, PAGE_CUR_GE +@param[in] mtr mini transaction +@return TRUE if success */ static ibool btr_search_check_guess( -/*===================*/ - btr_cur_t* cursor, /*!< in: guessed cursor position */ + btr_cur_t* cursor, ibool can_only_compare_to_cursor_rec, - /*!< in: if we do not have a latch on the page - of cursor, but only a latch on - btr_search_latch, then ONLY the columns - of the record UNDER the cursor are - protected, not the next or previous record - in the chain: we cannot look at the next or - previous record to check our guess! */ - const dtuple_t* tuple, /*!< in: data tuple */ - ulint mode, /*!< in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G, - or PAGE_CUR_GE */ - mtr_t* mtr) /*!< in: mtr */ + const dtuple_t* tuple, + ulint mode, + mtr_t* mtr) { rec_t* rec; ulint n_unique; ulint match; - ulint bytes; int cmp; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; @@ -720,15 +787,13 @@ btr_search_check_guess( ut_ad(page_rec_is_user_rec(rec)); match = 0; - bytes = 0; offsets = rec_get_offsets(rec, cursor->index, offsets, n_unique, &heap); - cmp = page_cmp_dtuple_rec_with_match(tuple, rec, - offsets, &match, &bytes); + cmp = cmp_dtuple_rec_with_match(tuple, rec, offsets, &match); if (mode == PAGE_CUR_GE) { - if (cmp == 1) { + if (cmp > 0) { goto exit_func; } @@ -739,18 +804,18 @@ btr_search_check_guess( goto exit_func; } } else if (mode == PAGE_CUR_LE) { - if (cmp == -1) { + if (cmp < 0) { goto exit_func; } cursor->low_match = match; } else if (mode == PAGE_CUR_G) { - if (cmp != -1) { + if (cmp >= 0) { goto exit_func; } } else if (mode == PAGE_CUR_L) { - if (cmp != 1) { + if (cmp <= 0) { goto exit_func; } } @@ -762,7 +827,6 @@ btr_search_check_guess( } match = 0; - bytes = 0; if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) { rec_t* prev_rec; @@ -780,12 +844,12 @@ btr_search_check_guess( offsets = rec_get_offsets(prev_rec, cursor->index, offsets, n_unique, &heap); - cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec, - offsets, &match, &bytes); + cmp = cmp_dtuple_rec_with_match( + tuple, prev_rec, offsets, &match); if (mode == PAGE_CUR_GE) { - success = cmp == 1; + success = cmp > 0; } else { - success = cmp != -1; + success = cmp >= 0; } goto exit_func; @@ -809,13 +873,13 @@ btr_search_check_guess( offsets = rec_get_offsets(next_rec, cursor->index, offsets, n_unique, &heap); - cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec, - offsets, &match, &bytes); + cmp = cmp_dtuple_rec_with_match( + tuple, next_rec, offsets, &match); if (mode == PAGE_CUR_LE) { - success = cmp == -1; + success = cmp < 0; cursor->up_match = match; } else { - success = cmp != 1; + success = cmp <= 0; } } exit_func: @@ -825,34 +889,53 @@ exit_func: return(success); } -/******************************************************************//** -Tries to guess the right search position based on the hash search info +static +void +btr_search_failure(btr_search_t* info, btr_cur_t* cursor) +{ + cursor->flag = BTR_CUR_HASH_FAIL; + +#ifdef UNIV_SEARCH_PERF_STAT + ++info->n_hash_fail; + + if (info->n_hash_succ > 0) { + --info->n_hash_succ; + } +#endif /* UNIV_SEARCH_PERF_STAT */ + + info->last_hash_succ = FALSE; +} + +/** Tries to guess the right search position based on the hash search info of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts, and the function returns TRUE, then cursor->up_match and cursor->low_match both have sensible values. -@return TRUE if succeeded */ -UNIV_INTERN +@param[in,out] index index +@param[in,out] info index search info +@param[in] tuple logical record +@param[in] mode PAGE_CUR_L, .... +@param[in] latch_mode BTR_SEARCH_LEAF, ...; + NOTE that only if has_search_latch is 0, we will + have a latch set on the cursor page, otherwise + we assume the caller uses his search latch + to protect the record! +@param[out] cursor tree cursor +@param[in] has_search_latch + latch mode the caller currently has on + search system: RW_S/X_LATCH or 0 +@param[in] mtr mini transaction +@return TRUE if succeeded */ ibool btr_search_guess_on_hash( -/*=====================*/ - dict_index_t* index, /*!< in: index */ - btr_search_t* info, /*!< in: index search info */ - const dtuple_t* tuple, /*!< in: logical record */ - ulint mode, /*!< in: PAGE_CUR_L, ... */ - ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ...; - NOTE that only if has_search_latch - is 0, we will have a latch set on - the cursor page, otherwise we assume - the caller uses his search latch - to protect the record! */ - btr_cur_t* cursor, /*!< out: tree cursor */ - ulint has_search_latch,/*!< in: latch mode the caller - currently has on btr_search_latch: - RW_S_LATCH, RW_X_LATCH, or 0 */ - mtr_t* mtr) /*!< in: mtr */ + dict_index_t* index, + btr_search_t* info, + const dtuple_t* tuple, + ulint mode, + ulint latch_mode, + btr_cur_t* cursor, + ulint has_search_latch, + mtr_t* mtr) { - buf_pool_t* buf_pool; - buf_block_t* block; const rec_t* rec; ulint fold; index_id_t index_id; @@ -860,15 +943,23 @@ btr_search_guess_on_hash( btr_cur_t cursor2; btr_pcur_t pcur; #endif + + if (!btr_search_enabled) { + return(FALSE); + } + ut_ad(index && info && tuple && cursor && mtr); ut_ad(!dict_index_is_ibuf(index)); ut_ad((latch_mode == BTR_SEARCH_LEAF) || (latch_mode == BTR_MODIFY_LEAF)); + /* Not supported for spatial index */ + ut_ad(!dict_index_is_spatial(index)); + /* Note that, for efficiency, the struct info may not be protected by any latch here! */ - if (UNIV_UNLIKELY(info->n_hash_potential == 0)) { + if (info->n_hash_potential == 0) { return(FALSE); } @@ -876,8 +967,7 @@ btr_search_guess_on_hash( cursor->n_fields = info->n_fields; cursor->n_bytes = info->n_bytes; - if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple) - < cursor->n_fields + (cursor->n_bytes > 0))) { + if (dtuple_get_n_fields(tuple) < btr_search_get_n_fields(cursor)) { return(FALSE); } @@ -892,49 +982,69 @@ btr_search_guess_on_hash( cursor->fold = fold; cursor->flag = BTR_CUR_HASH; - if (UNIV_LIKELY(!has_search_latch)) { - rw_lock_s_lock(&btr_search_latch); + if (!has_search_latch) { + btr_search_s_lock(index); - if (UNIV_UNLIKELY(!btr_search_enabled)) { - goto failure_unlock; + if (!btr_search_enabled) { + btr_search_s_unlock(index); + + btr_search_failure(info, cursor); + + return(FALSE); } } - ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX); - ut_ad(rw_lock_get_reader_count(&btr_search_latch) > 0); + ut_ad(rw_lock_get_writer(btr_get_search_latch(index)) != RW_LOCK_X); + ut_ad(rw_lock_get_reader_count(btr_get_search_latch(index)) > 0); + + rec = (rec_t*) ha_search_and_get_data( + btr_get_search_table(index), fold); + + if (rec == NULL) { - rec = (rec_t*) ha_search_and_get_data(btr_search_sys->hash_index, fold); + if (!has_search_latch) { + btr_search_s_unlock(index); + } + + btr_search_failure(info, cursor); - if (UNIV_UNLIKELY(!rec)) { - goto failure_unlock; + return(FALSE); } - block = buf_block_align(rec); + buf_block_t* block = buf_block_align(rec); + + if (!has_search_latch) { - if (UNIV_LIKELY(!has_search_latch)) { + if (!buf_page_get_known_nowait( + latch_mode, block, BUF_MAKE_YOUNG, + __FILE__, __LINE__, mtr)) { - if (UNIV_UNLIKELY( - !buf_page_get_known_nowait(latch_mode, block, - BUF_MAKE_YOUNG, - __FILE__, __LINE__, - mtr))) { - goto failure_unlock; + if (!has_search_latch) { + btr_search_s_unlock(index); + } + + btr_search_failure(info, cursor); + + return(FALSE); } - rw_lock_s_unlock(&btr_search_latch); + btr_search_s_unlock(index); buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH); } - if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) { + if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) { + ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH); - if (UNIV_LIKELY(!has_search_latch)) { + if (!has_search_latch) { btr_leaf_page_release(block, latch_mode, mtr); } - goto failure; + btr_search_failure(info, cursor); + + return(FALSE); } ut_ad(page_rec_is_user_rec(rec)); @@ -943,23 +1053,26 @@ btr_search_guess_on_hash( /* Check the validity of the guess within the page */ - /* If we only have the latch on btr_search_latch, not on the + /* If we only have the latch on search system, not on the page, it only protects the columns of the record the cursor is positioned on. We cannot look at the next of the previous record to determine if our guess for the cursor position is right. */ - if (UNIV_UNLIKELY(index_id != btr_page_get_index_id(block->frame)) + if (index_id != btr_page_get_index_id(block->frame) || !btr_search_check_guess(cursor, has_search_latch, tuple, mode, mtr)) { - if (UNIV_LIKELY(!has_search_latch)) { + + if (!has_search_latch) { btr_leaf_page_release(block, latch_mode, mtr); } - goto failure; + btr_search_failure(info, cursor); + + return(FALSE); } - if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) { + if (info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5) { info->n_hash_potential++; } @@ -975,8 +1088,9 @@ btr_search_guess_on_hash( btr_leaf_page_release(block, latch_mode, mtr); - btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode, - &cursor2, 0, mtr); + btr_cur_search_to_nth_level( + index, 0, tuple, mode, latch_mode, &cursor2, 0, mtr); + if (mode == PAGE_CUR_GE && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) { @@ -986,8 +1100,9 @@ btr_search_guess_on_hash( info->last_hash_succ = FALSE; - btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode, - &pcur, mtr); + btr_pcur_open_on_user_rec( + index, tuple, mode, latch_mode, &pcur, mtr); + ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor)); } else { ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor)); @@ -999,62 +1114,38 @@ btr_search_guess_on_hash( #endif info->last_hash_succ = TRUE; - if (UNIV_LIKELY(!has_search_latch) - && buf_page_peek_if_too_old(&block->page)) { + if (!has_search_latch && buf_page_peek_if_too_old(&block->page)) { buf_page_make_young(&block->page); } /* Increment the page get statistics though we did not really fix the page: for user info only */ - buf_pool = buf_pool_from_bpage(&block->page); - buf_pool->stat.n_page_gets++; - return(TRUE); + { + buf_pool_t* buf_pool = buf_pool_from_bpage(&block->page); - /*-------------------------------------------*/ -failure_unlock: - if (UNIV_LIKELY(!has_search_latch)) { - rw_lock_s_unlock(&btr_search_latch); + ++buf_pool->stat.n_page_gets; } -failure: - cursor->flag = BTR_CUR_HASH_FAIL; - -#ifdef UNIV_SEARCH_PERF_STAT - info->n_hash_fail++; - - if (info->n_hash_succ > 0) { - info->n_hash_succ--; - } -#endif - info->last_hash_succ = FALSE; - return(FALSE); + return(TRUE); } -/********************************************************************//** -Drops a page hash index. */ -UNIV_INTERN +/** Drop any adaptive hash index entries that point to an index page. +@param[in,out] block block containing index page, s- or x-latched, or an + index page for which we know that + block->buf_fix_count == 0 or it is an index page which + has already been removed from the buf_pool->page_hash + i.e.: it is in state BUF_BLOCK_REMOVE_HASH */ void -btr_search_drop_page_hash_index( -/*============================*/ - buf_block_t* block) /*!< in: block containing index page, - s- or x-latched, or an index page - for which we know that - block->buf_fix_count == 0 or it is an - index page which has already been - removed from the buf_pool->page_hash - i.e.: it is in state - BUF_BLOCK_REMOVE_HASH */ +btr_search_drop_page_hash_index(buf_block_t* block) { - hash_table_t* table; ulint n_fields; ulint n_bytes; const page_t* page; const rec_t* rec; ulint fold; ulint prev_fold; - index_id_t index_id; ulint n_cached; ulint n_recs; ulint* folds; @@ -1062,31 +1153,59 @@ btr_search_drop_page_hash_index( mem_heap_t* heap; const dict_index_t* index; ulint* offsets; + rw_lock_t* latch; btr_search_t* info; -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - - /* Do a dirty check on block->index, return if the block is - not in the adaptive hash index. This is to avoid acquiring - shared btr_search_latch for performance consideration. */ - if (!block->index) { + if (!btr_search_enabled) { return; } retry: - rw_lock_s_lock(&btr_search_latch); + /* Do a dirty check on block->index, return if the block is + not in the adaptive hash index. */ index = block->index; - if (UNIV_LIKELY(!index)) { + if (index == NULL) { + return; + } + + ut_ad(block->page.buf_fix_count == 0 + || buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH + || rw_lock_own(&block->lock, RW_LOCK_S) + || rw_lock_own(&block->lock, RW_LOCK_X)); - rw_lock_s_unlock(&btr_search_latch); + /* We must not dereference index here, because it could be freed + if (index->table->n_ref_count == 0 && !mutex_own(&dict_sys->mutex)). + Determine the ahi_slot based on the block contents. */ + const index_id_t index_id + = btr_page_get_index_id(block->frame); + const ulint ahi_slot + = ut_fold_ulint_pair(index_id, block->page.id.space()) + % btr_ahi_parts; + latch = btr_search_latches[ahi_slot]; + + ut_ad(!btr_search_own_any(RW_LOCK_S)); + ut_ad(!btr_search_own_any(RW_LOCK_X)); + + rw_lock_s_lock(latch); + + if (block->index == NULL) { + rw_lock_s_unlock(latch); return; } + /* The index associated with a block must remain the + same, because we are holding block->lock or the block is + not accessible by other threads (BUF_BLOCK_REMOVE_HASH), + or the index is not accessible to other threads + (buf_fix_count == 0 when DROP TABLE or similar is executing + buf_LRU_drop_page_hash_for_tablespace()). */ + ut_a(index == block->index); + ut_ad(!index->disable_ahi); + + ut_ad(block->page.id.space() == index->space); + ut_a(index_id == index->id); ut_a(!dict_index_is_ibuf(index)); #ifdef UNIV_DEBUG switch (dict_index_get_online_status(index)) { @@ -1109,25 +1228,15 @@ retry: } #endif /* UNIV_DEBUG */ - table = btr_search_sys->hash_index; - -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) - || rw_lock_own(&(block->lock), RW_LOCK_EX) - || block->page.buf_fix_count == 0 - || buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH); -#endif /* UNIV_SYNC_DEBUG */ - n_fields = block->curr_n_fields; n_bytes = block->curr_n_bytes; - /* NOTE: The fields of block must not be accessed after - releasing btr_search_latch, as the index page might only - be s-latched! */ + /* NOTE: The AHI fields of block must not be accessed after + releasing search latch, as the index page might only be s-latched! */ - rw_lock_s_unlock(&btr_search_latch); + rw_lock_s_unlock(latch); - ut_a(n_fields + n_bytes > 0); + ut_a(n_fields > 0 || n_bytes > 0); page = block->frame; n_recs = page_get_n_recs(page); @@ -1135,26 +1244,23 @@ retry: /* Calculate and cache fold values into an array for fast deletion from the hash index */ - folds = (ulint*) mem_alloc(n_recs * sizeof(ulint)); + folds = (ulint*) ut_malloc_nokey(n_recs * sizeof(ulint)); n_cached = 0; rec = page_get_infimum_rec(page); rec = page_rec_get_next_low(rec, page_is_comp(page)); - index_id = btr_page_get_index_id(page); - - ut_a(index_id == index->id); - prev_fold = 0; heap = NULL; offsets = NULL; while (!page_rec_is_supremum(rec)) { - offsets = rec_get_offsets(rec, index, offsets, - n_fields + (n_bytes > 0), &heap); - ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0)); + offsets = rec_get_offsets( + rec, index, offsets, + btr_search_get_n_fields(n_fields, n_bytes), + &heap); fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); if (fold == prev_fold && prev_fold != 0) { @@ -1176,7 +1282,7 @@ next_rec: mem_heap_free(heap); } - rw_lock_x_lock(&btr_search_latch); + rw_lock_x_lock(latch); if (UNIV_UNLIKELY(!block->index)) { /* Someone else has meanwhile dropped the hash index */ @@ -1186,21 +1292,23 @@ next_rec: ut_a(block->index == index); - if (UNIV_UNLIKELY(block->curr_n_fields != n_fields) - || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) { + if (block->curr_n_fields != n_fields + || block->curr_n_bytes != n_bytes) { /* Someone else has meanwhile built a new hash index on the page, with different parameters */ - rw_lock_x_unlock(&btr_search_latch); + rw_lock_x_unlock(latch); - mem_free(folds); + ut_free(folds); goto retry; } for (i = 0; i < n_cached; i++) { - ha_remove_all_nodes_to_page(table, folds[i], page); + ha_remove_all_nodes_to_page( + btr_search_sys->hash_tables[ahi_slot], + folds[i], page); } info = btr_search_get_info(block->index); @@ -1216,40 +1324,39 @@ cleanup: #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG if (UNIV_UNLIKELY(block->n_pointers)) { /* Corruption */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Corruption of adaptive hash index." - " After dropping\n" - "InnoDB: the hash index to a page of %s," - " still %lu hash nodes remain.\n", - index->name, (ulong) block->n_pointers); - rw_lock_x_unlock(&btr_search_latch); + ib::error() << "Corruption of adaptive hash index." + << " After dropping, the hash index to a page of " + << index->name + << ", still " << block->n_pointers + << " hash nodes remain."; + rw_lock_x_unlock(latch); ut_ad(btr_search_validate()); } else { - rw_lock_x_unlock(&btr_search_latch); + rw_lock_x_unlock(latch); } #else /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - rw_lock_x_unlock(&btr_search_latch); + rw_lock_x_unlock(latch); #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - mem_free(folds); + ut_free(folds); } -/********************************************************************//** -Drops a possible page hash index when a page is evicted from the buffer pool -or freed in a file segment. */ -UNIV_INTERN +/** Drop any adaptive hash index entries that may point to an index +page that may be in the buffer pool, when a page is evicted from the +buffer pool or freed in a file segment. +@param[in] page_id page id +@param[in] page_size page size */ void btr_search_drop_page_hash_when_freed( -/*=================================*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page_no) /*!< in: page number */ + const page_id_t& page_id, + const page_size_t& page_size) { buf_block_t* block; mtr_t mtr; + dberr_t err = DB_SUCCESS; + + ut_d(export_vars.innodb_ahi_drop_lookups++); mtr_start(&mtr); @@ -1259,35 +1366,49 @@ btr_search_drop_page_hash_when_freed( are possibly holding, we cannot s-latch the page, but must (recursively) x-latch it, even though we are only reading. */ - block = buf_page_get_gen(space, zip_size, page_no, RW_X_LATCH, NULL, + block = buf_page_get_gen(page_id, page_size, RW_X_LATCH, NULL, BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__, - &mtr); + &mtr, &err); - if (block && block->index) { + if (block) { + + /* If AHI is still valid, page can't be in free state. + AHI is dropped when page is freed. */ + ut_ad(!block->page.file_page_was_freed); buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH); - btr_search_drop_page_hash_index(block); + dict_index_t* index = block->index; + if (index != NULL) { + /* In all our callers, the table handle should + be open, or we should be in the process of + dropping the table (preventing eviction). */ + ut_ad(index->table->n_ref_count > 0 + || mutex_own(&dict_sys->mutex)); + btr_search_drop_page_hash_index(block); + } } mtr_commit(&mtr); } -/********************************************************************//** -Builds a hash index on a page with the given parameters. If the page already +/** Build a hash index on a page with the given parameters. If the page already has a hash index with different parameters, the old hash index is removed. If index is non-NULL, this function checks if n_fields and n_bytes are -sensible values, and does not build a hash index if not. */ +sensible, and does not build a hash index if not. +@param[in,out] index index for which to build. +@param[in,out] block index page, s-/x- latched. +@param[in] n_fields hash this many full fields +@param[in] n_bytes hash this many bytes of the next field +@param[in] left_side hash for searches from left side */ static void btr_search_build_page_hash_index( -/*=============================*/ - dict_index_t* index, /*!< in: index for which to build */ - buf_block_t* block, /*!< in: index page, s- or x-latched */ - ulint n_fields,/*!< in: hash this many full fields */ - ulint n_bytes,/*!< in: hash this many bytes from the next - field */ - ibool left_side)/*!< in: hash for searches from left side? */ + dict_index_t* index, + buf_block_t* block, + ulint n_fields, + ulint n_bytes, + ibool left_side) { hash_table_t* table; page_t* page; @@ -1303,63 +1424,60 @@ btr_search_build_page_hash_index( mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; - rec_offs_init(offsets_); + if (index->disable_ahi || !btr_search_enabled) { + return; + } + + rec_offs_init(offsets_); ut_ad(index); + ut_ad(block->page.id.space() == index->space); ut_a(!dict_index_is_ibuf(index)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) - || rw_lock_own(&(block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); + ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S) + || rw_lock_own(&(block->lock), RW_LOCK_X)); - rw_lock_s_lock(&btr_search_latch); + btr_search_s_lock(index); - if (!btr_search_enabled) { - rw_lock_s_unlock(&btr_search_latch); - return; - } - - table = btr_search_sys->hash_index; + table = btr_get_search_table(index); page = buf_block_get_frame(block); if (block->index && ((block->curr_n_fields != n_fields) || (block->curr_n_bytes != n_bytes) || (block->curr_left_side != left_side))) { - rw_lock_s_unlock(&btr_search_latch); + btr_search_s_unlock(index); btr_search_drop_page_hash_index(block); } else { - rw_lock_s_unlock(&btr_search_latch); + btr_search_s_unlock(index); } - n_recs = page_get_n_recs(page); + /* Check that the values for hash index build are sensible */ - if (n_recs == 0) { + if (n_fields == 0 && n_bytes == 0) { return; } - /* Check that the values for hash index build are sensible */ - - if (n_fields + n_bytes == 0) { - + if (dict_index_get_n_unique_in_tree(index) + < btr_search_get_n_fields(n_fields, n_bytes)) { return; } - if (dict_index_get_n_unique_in_tree(index) < n_fields - || (dict_index_get_n_unique_in_tree(index) == n_fields - && n_bytes > 0)) { + n_recs = page_get_n_recs(page); + + if (n_recs == 0) { + return; } /* Calculate and cache fold values and corresponding records into an array for fast insertion to the hash index */ - folds = (ulint*) mem_alloc(n_recs * sizeof(ulint)); - recs = (rec_t**) mem_alloc(n_recs * sizeof(rec_t*)); + folds = (ulint*) ut_malloc_nokey(n_recs * sizeof(ulint)); + recs = (rec_t**) ut_malloc_nokey(n_recs * sizeof(rec_t*)); n_cached = 0; @@ -1367,16 +1485,12 @@ btr_search_build_page_hash_index( rec = page_rec_get_next(page_get_infimum_rec(page)); - offsets = rec_get_offsets(rec, index, offsets, - n_fields + (n_bytes > 0), &heap); - - if (!page_rec_is_supremum(rec)) { - ut_a(n_fields <= rec_offs_n_fields(offsets)); - - if (n_bytes > 0) { - ut_a(n_fields < rec_offs_n_fields(offsets)); - } - } + offsets = rec_get_offsets( + rec, index, offsets, + btr_search_get_n_fields(n_fields, n_bytes), + &heap); + ut_ad(page_rec_is_supremum(rec) + || n_fields + (n_bytes > 0) == rec_offs_n_fields(offsets)); fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id); @@ -1402,8 +1516,9 @@ btr_search_build_page_hash_index( break; } - offsets = rec_get_offsets(next_rec, index, offsets, - n_fields + (n_bytes > 0), &heap); + offsets = rec_get_offsets( + next_rec, index, offsets, + btr_search_get_n_fields(n_fields, n_bytes), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, n_bytes, index->id); @@ -1426,11 +1541,11 @@ btr_search_build_page_hash_index( fold = next_fold; } - btr_search_check_free_space_in_heap(); + btr_search_check_free_space_in_heap(index); - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); - if (UNIV_UNLIKELY(!btr_search_enabled)) { + if (!btr_search_enabled) { goto exit_func; } @@ -1464,42 +1579,42 @@ btr_search_build_page_hash_index( MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_ADDED); MONITOR_INC_VALUE(MONITOR_ADAPTIVE_HASH_ROW_ADDED, n_cached); exit_func: - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock(index); - mem_free(folds); - mem_free(recs); + ut_free(folds); + ut_free(recs); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } } -/********************************************************************//** -Moves or deletes hash entries for moved records. If new_page is already hashed, -then the hash index for page, if any, is dropped. If new_page is not hashed, -and page is hashed, then a new hash index is built to new_page with the same -parameters as page (this often happens when a page is split). */ -UNIV_INTERN +/** Moves or deletes hash entries for moved records. If new_page is already +hashed, then the hash index for page, if any, is dropped. If new_page is not +hashed, and page is hashed, then a new hash index is built to new_page with the +same parameters as page (this often happens when a page is split). +@param[in,out] new_block records are copied to this page. +@param[in,out] block index page from which record are copied, and the + copied records will be deleted from this page. +@param[in,out] index record descriptor */ void btr_search_move_or_delete_hash_entries( -/*===================================*/ - buf_block_t* new_block, /*!< in: records are copied - to this page */ - buf_block_t* block, /*!< in: index page from which - records were copied, and the - copied records will be deleted - from this page */ - dict_index_t* index) /*!< in: record descriptor */ + buf_block_t* new_block, + buf_block_t* block, + dict_index_t* index) { - ulint n_fields; - ulint n_bytes; - ibool left_side; + /* AHI is disabled for intrinsic table as it depends on index-id + which is dynamically assigned for intrinsic table indexes and not + through a centralized index generator. */ + if (index->disable_ahi || !btr_search_enabled) { + return; + } + + ut_ad(!dict_table_is_intrinsic(index->table)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); - ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X)); + ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_X)); - rw_lock_s_lock(&btr_search_latch); + btr_search_s_lock(index); ut_a(!new_block->index || new_block->index == index); ut_a(!block->index || block->index == index); @@ -1508,7 +1623,7 @@ btr_search_move_or_delete_hash_entries( if (new_block->index) { - rw_lock_s_unlock(&btr_search_latch); + btr_search_s_unlock(index); btr_search_drop_page_hash_index(block); @@ -1516,39 +1631,34 @@ btr_search_move_or_delete_hash_entries( } if (block->index) { - - n_fields = block->curr_n_fields; - n_bytes = block->curr_n_bytes; - left_side = block->curr_left_side; + ulint n_fields = block->curr_n_fields; + ulint n_bytes = block->curr_n_bytes; + ibool left_side = block->curr_left_side; new_block->n_fields = block->curr_n_fields; new_block->n_bytes = block->curr_n_bytes; new_block->left_side = left_side; - rw_lock_s_unlock(&btr_search_latch); + btr_search_s_unlock(index); - ut_a(n_fields + n_bytes > 0); + ut_a(n_fields > 0 || n_bytes > 0); - btr_search_build_page_hash_index(index, new_block, n_fields, - n_bytes, left_side); + btr_search_build_page_hash_index( + index, new_block, n_fields, n_bytes, left_side); ut_ad(n_fields == block->curr_n_fields); ut_ad(n_bytes == block->curr_n_bytes); ut_ad(left_side == block->curr_left_side); return; } - rw_lock_s_unlock(&btr_search_latch); + btr_search_s_unlock(index); } -/********************************************************************//** -Updates the page hash index when a single record is deleted from a page. */ -UNIV_INTERN +/** Updates the page hash index when a single record is deleted from a page. +@param[in] cursor cursor which was positioned on the record to delete + using btr_cur_search_, the record is not yet deleted.*/ void -btr_search_update_hash_on_delete( -/*=============================*/ - btr_cur_t* cursor) /*!< in: cursor which was positioned on the - record to delete using btr_cur_search_..., - the record is not yet deleted */ +btr_search_update_hash_on_delete(btr_cur_t* cursor) { hash_table_t* table; buf_block_t* block; @@ -1559,11 +1669,13 @@ btr_search_update_hash_on_delete( mem_heap_t* heap = NULL; rec_offs_init(offsets_); + if (cursor->index->disable_ahi || !btr_search_enabled) { + return; + } + block = btr_cur_get_block(cursor); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X)); index = block->index; @@ -1572,11 +1684,12 @@ btr_search_update_hash_on_delete( return; } + ut_ad(block->page.id.space() == index->space); ut_a(index == cursor->index); - ut_a(block->curr_n_fields + block->curr_n_bytes > 0); + ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0); ut_a(!dict_index_is_ibuf(index)); - table = btr_search_sys->hash_index; + table = btr_get_search_table(index); rec = btr_cur_get_rec(cursor); @@ -1587,7 +1700,7 @@ btr_search_update_hash_on_delete( mem_heap_free(heap); } - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); if (block->index) { ut_a(block->index == index); @@ -1600,32 +1713,30 @@ btr_search_update_hash_on_delete( } } - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock(index); } -/********************************************************************//** -Updates the page hash index when a single record is inserted on a page. */ -UNIV_INTERN +/** Updates the page hash index when a single record is inserted on a page. +@param[in] cursor cursor which was positioned to the place to insert + using btr_cur_search_, and the new record has been + inserted next to the cursor. */ void -btr_search_update_hash_node_on_insert( -/*==================================*/ - btr_cur_t* cursor) /*!< in: cursor which was positioned to the - place to insert using btr_cur_search_..., - and the new record has been inserted next - to the cursor */ +btr_search_update_hash_node_on_insert(btr_cur_t* cursor) { hash_table_t* table; buf_block_t* block; dict_index_t* index; rec_t* rec; + if (cursor->index->disable_ahi || !btr_search_enabled) { + return; + } + rec = btr_cur_get_rec(cursor); block = btr_cur_get_block(cursor); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X)); index = block->index; @@ -1637,7 +1748,7 @@ btr_search_update_hash_node_on_insert( ut_a(cursor->index == index); ut_a(!dict_index_is_ibuf(index)); - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); if (!block->index) { @@ -1651,7 +1762,7 @@ btr_search_update_hash_node_on_insert( && (cursor->n_bytes == block->curr_n_bytes) && !block->curr_left_side) { - table = btr_search_sys->hash_index; + table = btr_get_search_table(index); if (ha_search_and_update_if_found( table, cursor->fold, rec, block, @@ -1660,24 +1771,21 @@ btr_search_update_hash_node_on_insert( } func_exit: - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock(index); } else { - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock(index); btr_search_update_hash_on_insert(cursor); } } -/********************************************************************//** -Updates the page hash index when a single record is inserted on a page. */ -UNIV_INTERN -void -btr_search_update_hash_on_insert( -/*=============================*/ - btr_cur_t* cursor) /*!< in: cursor which was positioned to the +/** Updates the page hash index when a single record is inserted on a page. +@param[in,out] cursor cursor which was positioned to the place to insert using btr_cur_search_..., and the new record has been inserted next to the cursor */ +void +btr_search_update_hash_on_insert(btr_cur_t* cursor) { hash_table_t* table; buf_block_t* block; @@ -1697,11 +1805,13 @@ btr_search_update_hash_on_insert( ulint* offsets = offsets_; rec_offs_init(offsets_); + if (cursor->index->disable_ahi || !btr_search_enabled) { + return; + } + block = btr_cur_get_block(cursor); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(&(block->lock), RW_LOCK_X)); index = block->index; @@ -1710,12 +1820,14 @@ btr_search_update_hash_on_insert( return; } - btr_search_check_free_space_in_heap(); + ut_ad(block->page.id.space() == index->space); + btr_search_check_free_space_in_heap(index); - table = btr_search_sys->hash_index; + table = btr_get_search_table(index); rec = btr_cur_get_rec(cursor); + ut_a(!index->disable_ahi); ut_a(index == cursor->index); ut_a(!dict_index_is_ibuf(index)); @@ -1731,20 +1843,22 @@ btr_search_update_hash_on_insert( ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id); if (!page_rec_is_supremum(next_rec)) { - offsets = rec_get_offsets(next_rec, index, offsets, - n_fields + (n_bytes > 0), &heap); + offsets = rec_get_offsets( + next_rec, index, offsets, + btr_search_get_n_fields(n_fields, n_bytes), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, n_bytes, index->id); } if (!page_rec_is_infimum(rec)) { - offsets = rec_get_offsets(rec, index, offsets, - n_fields + (n_bytes > 0), &heap); + offsets = rec_get_offsets( + rec, index, offsets, + btr_search_get_n_fields(n_fields, n_bytes), &heap); fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id); } else { if (left_side) { - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); locked = TRUE; @@ -1762,7 +1876,7 @@ btr_search_update_hash_on_insert( if (!locked) { - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); locked = TRUE; @@ -1784,7 +1898,7 @@ check_next_rec: if (!left_side) { if (!locked) { - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); locked = TRUE; @@ -1803,7 +1917,7 @@ check_next_rec: if (!locked) { - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock(index); locked = TRUE; @@ -1813,13 +1927,7 @@ check_next_rec: } if (!left_side) { - ha_insert_for_fold(table, ins_fold, block, ins_rec); - /* - fputs("Hash insert for ", stderr); - dict_index_name_print(stderr, index); - fprintf(stderr, " fold %lu\n", ins_fold); - */ } else { ha_insert_for_fold(table, next_fold, block, next_rec); } @@ -1830,21 +1938,20 @@ function_exit: mem_heap_free(heap); } if (locked) { - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock(index); } } #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG -/********************************************************************//** -Validates the search system. -@return TRUE if ok */ -UNIV_INTERN + +/** Validates the search system for given hash table. +@param[in] hash_table_id hash table to validate +@return TRUE if ok */ +static ibool -btr_search_validate(void) -/*=====================*/ +btr_search_hash_table_validate(ulint hash_table_id) { ha_node_t* node; - ulint n_page_dumps = 0; ibool ok = TRUE; ulint i; ulint cell_count; @@ -1852,30 +1959,50 @@ btr_search_validate(void) ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; + if (!btr_search_enabled) { + return(TRUE); + } + /* How many cells to check before temporarily releasing - btr_search_latch. */ + search latches. */ ulint chunk_size = 10000; rec_offs_init(offsets_); - rw_lock_x_lock(&btr_search_latch); + btr_search_x_lock_all(); buf_pool_mutex_enter_all(); - cell_count = hash_get_n_cells(btr_search_sys->hash_index); + cell_count = hash_get_n_cells( + btr_search_sys->hash_tables[hash_table_id]); for (i = 0; i < cell_count; i++) { - /* We release btr_search_latch every once in a while to + /* We release search latches every once in a while to give other queries a chance to run. */ if ((i != 0) && ((i % chunk_size) == 0)) { + buf_pool_mutex_exit_all(); - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock_all(); + os_thread_yield(); - rw_lock_x_lock(&btr_search_latch); + + btr_search_x_lock_all(); buf_pool_mutex_enter_all(); + + ulint curr_cell_count = hash_get_n_cells( + btr_search_sys->hash_tables[hash_table_id]); + + if (cell_count != curr_cell_count) { + + cell_count = curr_cell_count; + + if (i >= cell_count) { + break; + } + } } - node = (ha_node_t*) - hash_get_nth_cell(btr_search_sys->hash_index, i)->node; + node = (ha_node_t*) hash_get_nth_cell( + btr_search_sys->hash_tables[hash_table_id], i)->node; for (; node != NULL; node = node->next) { const buf_block_t* block @@ -1896,8 +2023,7 @@ btr_search_validate(void) assertion and the comment below) */ hash_block = buf_block_hash_get( buf_pool, - buf_block_get_space(block), - buf_block_get_page_no(block)); + block->page.id); } else { hash_block = NULL; } @@ -1913,94 +2039,115 @@ btr_search_validate(void) After that, it invokes btr_search_drop_page_hash_index() to remove the block from - btr_search_sys->hash_index. */ + btr_search_sys->hash_tables[i]. */ ut_a(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH); } ut_a(!dict_index_is_ibuf(block->index)); + ut_ad(block->page.id.space() == block->index->space); page_index_id = btr_page_get_index_id(block->frame); - offsets = rec_get_offsets(node->data, - block->index, offsets, - block->curr_n_fields - + (block->curr_n_bytes > 0), - &heap); - - if (!block->index || node->fold - != rec_fold(node->data, - offsets, - block->curr_n_fields, - block->curr_n_bytes, - page_index_id)) { + offsets = rec_get_offsets( + node->data, block->index, offsets, + btr_search_get_n_fields(block->curr_n_fields, + block->curr_n_bytes), + &heap); + + const ulint fold = rec_fold( + node->data, offsets, + block->curr_n_fields, + block->curr_n_bytes, + page_index_id); + + if (node->fold != fold) { const page_t* page = block->frame; ok = FALSE; - ut_print_timestamp(stderr); - - fprintf(stderr, - " InnoDB: Error in an adaptive hash" - " index pointer to page %lu\n" - "InnoDB: ptr mem address %p" - " index id %llu," - " node fold %lu, rec fold %lu\n", - (ulong) page_get_page_no(page), - node->data, - (ullint) page_index_id, - (ulong) node->fold, - (ulong) rec_fold(node->data, - offsets, - block->curr_n_fields, - block->curr_n_bytes, - page_index_id)); + + ib::error() << "Error in an adaptive hash" + << " index pointer to page " + << page_id_t(page_get_space_id(page), + page_get_page_no(page)) + << ", ptr mem address " + << reinterpret_cast( + node->data) + << ", index id " << page_index_id + << ", node fold " << node->fold + << ", rec fold " << fold; fputs("InnoDB: Record ", stderr); rec_print_new(stderr, node->data, offsets); fprintf(stderr, "\nInnoDB: on that page." " Page mem address %p, is hashed %p," - " n fields %lu, n bytes %lu\n" + " n fields %lu\n" "InnoDB: side %lu\n", (void*) page, (void*) block->index, (ulong) block->curr_n_fields, - (ulong) block->curr_n_bytes, (ulong) block->curr_left_side); - - if (n_page_dumps < 20) { - buf_page_print( - page, 0, - BUF_PAGE_PRINT_NO_CRASH); - n_page_dumps++; - } + ut_ad(0); } } } for (i = 0; i < cell_count; i += chunk_size) { - ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1); - - /* We release btr_search_latch every once in a while to + /* We release search latches every once in a while to give other queries a chance to run. */ if (i != 0) { + buf_pool_mutex_exit_all(); - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock_all(); + os_thread_yield(); - rw_lock_x_lock(&btr_search_latch); + + btr_search_x_lock_all(); buf_pool_mutex_enter_all(); + + ulint curr_cell_count = hash_get_n_cells( + btr_search_sys->hash_tables[hash_table_id]); + + if (cell_count != curr_cell_count) { + + cell_count = curr_cell_count; + + if (i >= cell_count) { + break; + } + } } - if (!ha_validate(btr_search_sys->hash_index, i, end_index)) { + ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1); + + if (!ha_validate(btr_search_sys->hash_tables[hash_table_id], + i, end_index)) { ok = FALSE; } } buf_pool_mutex_exit_all(); - rw_lock_x_unlock(&btr_search_latch); + btr_search_x_unlock_all(); + if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } return(ok); } + +/** Validate the search system. +@return true if ok. */ +bool +btr_search_validate() +{ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + if (!btr_search_hash_table_validate(i)) { + return(false); + } + } + + return(true); +} + #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */ diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc index f2ab73217e0..1d6083a5f77 100644 --- a/storage/innobase/buf/buf0buddy.cc +++ b/storage/innobase/buf/buf0buddy.cc @@ -23,12 +23,10 @@ Binary buddy allocator for compressed pages Created December 2006 by Marko Makela *******************************************************/ -#define THIS_MODULE #include "buf0buddy.h" #ifdef UNIV_NONINL # include "buf0buddy.ic" #endif -#undef THIS_MODULE #include "buf0buf.h" #include "buf0lru.h" #include "buf0flu.h" @@ -71,11 +69,11 @@ are written.*/ /** Value that we stamp on all buffers that are currently on the zip_free list. This value is stamped at BUF_BUDDY_STAMP_OFFSET offset */ -#define BUF_BUDDY_STAMP_FREE (SRV_LOG_SPACE_FIRST_ID) +#define BUF_BUDDY_STAMP_FREE SRV_LOG_SPACE_FIRST_ID /** Stamp value for non-free buffers. Will be overwritten by a non-zero value by the consumer of the block */ -#define BUF_BUDDY_STAMP_NONFREE (0XFFFFFFFF) +#define BUF_BUDDY_STAMP_NONFREE 0XFFFFFFFFUL #if BUF_BUDDY_STAMP_FREE >= BUF_BUDDY_STAMP_NONFREE # error "BUF_BUDDY_STAMP_FREE >= BUF_BUDDY_STAMP_NONFREE" @@ -111,7 +109,7 @@ buf_buddy_mem_invalid( /**********************************************************************//** Check if a buddy is stamped free. -@return whether the buddy is free */ +@return whether the buddy is free */ UNIV_INLINE MY_ATTRIBUTE((warn_unused_result)) bool buf_buddy_stamp_is_free( @@ -140,7 +138,7 @@ buf_buddy_stamp_free( /**********************************************************************//** Stamps a buddy nonfree. -@param[in/out] buf block to stamp +@param[in,out] buf block to stamp @param[in] i block size */ #define buf_buddy_stamp_nonfree(buf, i) do { \ buf_buddy_mem_invalid(buf, i); \ @@ -152,7 +150,7 @@ Stamps a buddy nonfree. /**********************************************************************//** Get the offset of the buddy of a compressed page frame. -@return the buddy relative of page */ +@return the buddy relative of page */ UNIV_INLINE void* buf_buddy_get( @@ -174,23 +172,33 @@ buf_buddy_get( } } +#ifdef UNIV_DEBUG /** Validate a given zip_free list. */ struct CheckZipFree { - ulint i; - CheckZipFree(ulint i) : i (i) {} + CheckZipFree(ulint i) : m_i(i) {} void operator()(const buf_buddy_free_t* elem) const { ut_a(buf_buddy_stamp_is_free(elem)); - ut_a(elem->stamp.size <= i); + ut_a(elem->stamp.size <= m_i); } + + ulint m_i; }; -#define BUF_BUDDY_LIST_VALIDATE(bp, i) \ - UT_LIST_VALIDATE(list, buf_buddy_free_t, \ - bp->zip_free[i], CheckZipFree(i)) +/** Validate a buddy list. +@param[in] buf_pool buffer pool instance +@param[in] i buddy size to validate */ +static +void +buf_buddy_list_validate( + const buf_pool_t* buf_pool, + ulint i) +{ + CheckZipFree check(i); + ut_list_validate(buf_pool->zip_free[i], check); +} -#ifdef UNIV_DEBUG /**********************************************************************//** Debug function to validate that a buffer is indeed free i.e.: in the zip_free[]. @@ -282,8 +290,8 @@ buf_buddy_add_to_free( ut_ad(buf_pool->zip_free[i].start != buf); buf_buddy_stamp_free(buf, i); - UT_LIST_ADD_FIRST(list, buf_pool->zip_free[i], buf); - ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); + UT_LIST_ADD_FIRST(buf_pool->zip_free[i], buf); + ut_d(buf_buddy_list_validate(buf_pool, i)); } /**********************************************************************//** @@ -293,20 +301,21 @@ void buf_buddy_remove_from_free( /*=======================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - buf_buddy_free_t* buf, /*!< in,own: block to be freed */ + buf_buddy_free_t* buf, /*!< in,own: block to be + freed */ ulint i) /*!< in: index of buf_pool->zip_free[] */ { ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_buddy_check_free(buf_pool, buf, i)); - UT_LIST_REMOVE(list, buf_pool->zip_free[i], buf); + UT_LIST_REMOVE(buf_pool->zip_free[i], buf); buf_buddy_stamp_nonfree(buf, i); } /**********************************************************************//** Try to allocate a block from buf_pool->zip_free[]. -@return allocated block, or NULL if buf_pool->zip_free[] was empty */ +@return allocated block, or NULL if buf_pool->zip_free[] was empty */ static buf_buddy_free_t* buf_buddy_alloc_zip( @@ -320,10 +329,22 @@ buf_buddy_alloc_zip( ut_a(i < BUF_BUDDY_SIZES); ut_a(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN)); - ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); + ut_d(buf_buddy_list_validate(buf_pool, i)); buf = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); + if (buf_pool->curr_size < buf_pool->old_size + && UT_LIST_GET_LEN(buf_pool->withdraw) + < buf_pool->withdraw_target) { + + while (buf != NULL + && buf_frame_will_withdrawn( + buf_pool, reinterpret_cast(buf))) { + /* This should be withdrawn, not to be allocated */ + buf = UT_LIST_GET_NEXT(list, buf); + } + } + if (buf) { buf_buddy_remove_from_free(buf_pool, buf, i); } else if (i + 1 < BUF_BUDDY_SIZES) { @@ -388,9 +409,9 @@ buf_buddy_block_free( UNIV_MEM_INVALID(buf, UNIV_PAGE_SIZE); block = (buf_block_t*) bpage; - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); buf_LRU_block_free_non_file_page(block); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); ut_ad(buf_pool->buddy_n_frames > 0); ut_d(buf_pool->buddy_n_frames--); @@ -425,7 +446,7 @@ buf_buddy_block_register( /**********************************************************************//** Allocate a block from a bigger object. -@return allocated block */ +@return allocated block */ static void* buf_buddy_alloc_from( @@ -463,8 +484,7 @@ buf_buddy_alloc_from( Allocate a block. The thread calling this function must hold buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex. The buf_pool_mutex may be released and reacquired. -@return allocated block, never NULL */ -UNIV_INTERN +@return allocated block, never NULL */ void* buf_buddy_alloc_low( /*================*/ @@ -520,7 +540,7 @@ func_exit: /**********************************************************************//** Try to relocate a block. -@return true if relocated */ +@return true if relocated */ static bool buf_buddy_relocate( @@ -528,11 +548,13 @@ buf_buddy_relocate( buf_pool_t* buf_pool, /*!< in: buffer pool instance */ void* src, /*!< in: block to relocate */ void* dst, /*!< in: free block to relocate to */ - ulint i) /*!< in: index of + ulint i, /*!< in: index of buf_pool->zip_free[] */ + bool force) /*!< in: true if we must relocate + always */ { buf_page_t* bpage; - const ulint size = BUF_BUDDY_LOW << i; + const ulint size = BUF_BUDDY_LOW << i; ulint space; ulint offset; @@ -555,12 +577,19 @@ buf_buddy_relocate( ut_ad(space != BUF_BUDDY_STAMP_FREE); - ulint fold = buf_page_address_fold(space, offset); - rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, fold); + const page_id_t page_id(space, offset); + + /* If space,offset is bogus, then we know that the + buf_page_hash_get_low() call below will return NULL. */ + if (!force && buf_pool != buf_pool_get(page_id)) { + return(false); + } + + rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, page_id); rw_lock_x_lock(hash_lock); - bpage = buf_page_hash_get_low(buf_pool, space, offset, fold); + bpage = buf_page_hash_get_low(buf_pool, page_id); if (!bpage || bpage->zip.data != src) { /* The block has probably been freshly @@ -570,7 +599,27 @@ buf_buddy_relocate( rw_lock_x_unlock(hash_lock); - return(false); + if (!force || space != 0 || offset != 0) { + return(false); + } + + /* It might be just uninitialized page. + We should search from LRU list also. */ + + bpage = UT_LIST_GET_FIRST(buf_pool->LRU); + while (bpage != NULL) { + if (bpage->zip.data == src) { + hash_lock = buf_page_hash_lock_get( + buf_pool, bpage->id); + rw_lock_x_lock(hash_lock); + break; + } + bpage = UT_LIST_GET_NEXT(LRU, bpage); + } + + if (bpage == NULL) { + return(false); + } } if (page_zip_get_size(&bpage->zip) != size) { @@ -588,20 +637,17 @@ buf_buddy_relocate( contain uninitialized data. */ UNIV_MEM_ASSERT_W(src, size); - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); if (buf_page_can_relocate(bpage)) { /* Relocate the compressed page. */ - ullint usec = ut_time_us(NULL); + uintmax_t usec = ut_time_us(NULL); ut_a(bpage->zip.data == src); - /* Note: This is potentially expensive, we need a better - solution here. We go with correctness for now. */ - ::memcpy(dst, src, size); - + memcpy(dst, src, size); bpage->zip.data = reinterpret_cast(dst); rw_lock_x_unlock(hash_lock); @@ -612,24 +658,19 @@ buf_buddy_relocate( reinterpret_cast(src), i); buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i]; - - ++buddy_stat->relocated; - + buddy_stat->relocated++; buddy_stat->relocated_usec += ut_time_us(NULL) - usec; - return(true); } rw_lock_x_unlock(hash_lock); mutex_exit(block_mutex); - return(false); } /**********************************************************************//** Deallocate a block. */ -UNIV_INTERN void buf_buddy_free_low( /*===============*/ @@ -663,7 +704,8 @@ recombine: /* Do not recombine blocks if there are few free blocks. We may waste up to 15360*max_len bytes to free blocks (1024 + 2048 + 4096 + 8192 = 15360) */ - if (UT_LIST_GET_LEN(buf_pool->zip_free[i]) < 16) { + if (UT_LIST_GET_LEN(buf_pool->zip_free[i]) < 16 + && buf_pool->curr_size >= buf_pool->old_size) { goto func_exit; } @@ -684,7 +726,7 @@ buddy_is_free: goto recombine; case BUF_BUDDY_STATE_USED: - ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); + ut_d(buf_buddy_list_validate(buf_pool, i)); /* The buddy is not free. Is there a free block of this size? */ @@ -698,7 +740,8 @@ buddy_is_free: /* Try to relocate the buddy of buf to the free block. */ - if (buf_buddy_relocate(buf_pool, buddy, zip_buf, i)) { + if (buf_buddy_relocate(buf_pool, buddy, zip_buf, i, + false)) { goto buddy_is_free; } @@ -719,3 +762,119 @@ func_exit: reinterpret_cast(buf), i); } + +/** Reallocate a block. +@param[in] buf_pool buffer pool instance +@param[in] buf block to be reallocated, must be pointed +to by the buffer pool +@param[in] size block size, up to UNIV_PAGE_SIZE +@retval false if failed because of no free blocks. */ +bool +buf_buddy_realloc( + buf_pool_t* buf_pool, + void* buf, + ulint size) +{ + buf_block_t* block = NULL; + ulint i = buf_buddy_get_slot(size); + + ut_ad(buf_pool_mutex_own(buf_pool)); + ut_ad(!mutex_own(&buf_pool->zip_mutex)); + ut_ad(i <= BUF_BUDDY_SIZES); + ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN)); + + if (i < BUF_BUDDY_SIZES) { + /* Try to allocate from the buddy system. */ + block = reinterpret_cast( + buf_buddy_alloc_zip(buf_pool, i)); + } + + if (block == NULL) { + /* Try allocating from the buf_pool->free list. */ + block = buf_LRU_get_free_only(buf_pool); + + if (block == NULL) { + return(false); /* free_list was not enough */ + } + + buf_buddy_block_register(block); + + block = reinterpret_cast( + buf_buddy_alloc_from( + buf_pool, block->frame, i, BUF_BUDDY_SIZES)); + } + + buf_pool->buddy_stat[i].used++; + + /* Try to relocate the buddy of buf to the free block. */ + if (buf_buddy_relocate(buf_pool, buf, block, i, true)) { + /* succeeded */ + buf_buddy_free_low(buf_pool, buf, i); + } else { + /* failed */ + buf_buddy_free_low(buf_pool, block, i); + } + + return(true); /* free_list was enough */ +} + +/** Combine all pairs of free buddies. +@param[in] buf_pool buffer pool instance */ +void +buf_buddy_condense_free( + buf_pool_t* buf_pool) +{ + ut_ad(buf_pool_mutex_own(buf_pool)); + ut_ad(buf_pool->curr_size < buf_pool->old_size); + + for (ulint i = 0; i < UT_ARR_SIZE(buf_pool->zip_free); ++i) { + buf_buddy_free_t* buf = + UT_LIST_GET_FIRST(buf_pool->zip_free[i]); + + /* seek to withdraw target */ + while (buf != NULL + && !buf_frame_will_withdrawn( + buf_pool, reinterpret_cast(buf))) { + buf = UT_LIST_GET_NEXT(list, buf); + } + + while (buf != NULL) { + buf_buddy_free_t* next = + UT_LIST_GET_NEXT(list, buf); + + buf_buddy_free_t* buddy = + reinterpret_cast( + buf_buddy_get( + reinterpret_cast(buf), + BUF_BUDDY_LOW << i)); + + /* seek to the next withdraw target */ + while (true) { + while (next != NULL + && !buf_frame_will_withdrawn( + buf_pool, + reinterpret_cast(next))) { + next = UT_LIST_GET_NEXT(list, next); + } + + if (buddy != next) { + break; + } + + next = UT_LIST_GET_NEXT(list, next); + } + + if (buf_buddy_is_free(buddy, i) + == BUF_BUDDY_STATE_FREE) { + /* Both buf and buddy are free. + Try to combine them. */ + buf_buddy_remove_from_free(buf_pool, buf, i); + buf_pool->buddy_stat[i].used++; + + buf_buddy_free_low(buf_pool, buf, i); + } + + buf = next; + } + } +} diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 7b0cf339ef1..04260df18ff 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -31,35 +31,59 @@ The database buffer buf_pool Created 11/5/1995 Heikki Tuuri *******************************************************/ +#include "ha_prototypes.h" + +#include "page0size.h" #include "buf0buf.h" #ifdef UNIV_NONINL #include "buf0buf.ic" #endif - +#ifdef UNIV_INNOCHECKSUM +#include "string.h" +#include "mach0data.h" +#endif /* UNIV_INNOCHECKSUM */ +#ifndef UNIV_INNOCHECKSUM #include "mem0mem.h" #include "btr0btr.h" #include "fil0fil.h" #include "fil0crypt.h" +#include "fsp0sysspace.h" #ifndef UNIV_HOTBACKUP #include "buf0buddy.h" #include "lock0lock.h" +#include "sync0rw.h" #include "btr0sea.h" #include "ibuf0ibuf.h" #include "trx0undo.h" +#include "trx0purge.h" #include "log0log.h" +#include "dict0stats_bg.h" #endif /* !UNIV_HOTBACKUP */ #include "srv0srv.h" +#include "srv0start.h" #include "dict0dict.h" #include "log0recv.h" -#include "page0zip.h" #include "srv0mon.h" +#include "fsp0sysspace.h" +#endif /* !UNIV_INNOCHECKSUM */ +#include "page0zip.h" #include "buf0checksum.h" +#include "sync0sync.h" +#include "buf0dump.h" +#include "ut0new.h" + +#include +#include +#include #ifdef HAVE_LIBNUMA #include #include #endif // HAVE_LIBNUMA +#ifndef UNIV_INNOCHECKSUM #include "fil0pagecompress.h" +#include "fsp0pagecompress.h" +#endif #include "ha_prototypes.h" #include "ut0byte.h" #include @@ -256,41 +280,62 @@ that the whole area may be needed in the near future, and issue the read requests for the whole area. */ -#ifndef UNIV_HOTBACKUP +#if (!(defined(UNIV_HOTBACKUP) || defined(UNIV_INNOCHECKSUM))) /** Value in microseconds */ static const int WAIT_FOR_READ = 100; -/** Number of attemtps made to read in a page in the buffer pool */ -static const ulint BUF_PAGE_READ_MAX_RETRIES = 100; +static const int WAIT_FOR_WRITE = 100; +/** Number of attempts made to read in a page in the buffer pool */ +static const ulint BUF_PAGE_READ_MAX_RETRIES = 100; +/** Number of pages to read ahead */ +static const ulint BUF_READ_AHEAD_PAGES = 64; +/** The maximum portion of the buffer pool that can be used for the +read-ahead buffer. (Divide buf_pool size by this amount) */ +static const ulint BUF_READ_AHEAD_PORTION = 32; /** The buffer pools of the database */ -UNIV_INTERN buf_pool_t* buf_pool_ptr; +buf_pool_t* buf_pool_ptr; + +/** true when resizing buffer pool is in the critical path. */ +volatile bool buf_pool_resizing; + +/** true when withdrawing buffer pool pages might cause page relocation */ +volatile bool buf_pool_withdrawing; + +/** the clock is incremented every time a pointer to a page may become obsolete; +if the withdrwa clock has not changed, the pointer is still valid in buffer +pool. if changed, the pointer might not be in buffer pool any more. */ +volatile ulint buf_withdraw_clock; + +/** Map of buffer pool chunks by its first frame address +This is newly made by initialization of buffer pool and buf_resize_thread. +Currently, no need mutex protection for update. */ +typedef std::map< + const byte*, + buf_chunk_t*, + std::less, + ut_allocator > > + buf_pool_chunk_map_t; + +static buf_pool_chunk_map_t* buf_chunk_map_reg; + +/** Chunk map to be used to lookup. +The map pointed by this should not be updated */ +static buf_pool_chunk_map_t* buf_chunk_map_ref = NULL; -#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG -static ulint buf_dbg_counter = 0; /*!< This is used to insert validation - operations in execution in the - debug version */ -#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ #ifdef UNIV_DEBUG -/** If this is set TRUE, the program prints info whenever -read-ahead or flush occurs */ -UNIV_INTERN ibool buf_debug_prints = FALSE; +/** Protect reference for buf_chunk_map_ref from deleting map, +because the reference can be caused by debug assertion code. */ +static rw_lock_t* buf_chunk_map_latch; + +/** Disable resizing buffer pool to make assertion code not expensive. */ +my_bool buf_disable_resize_buffer_pool_debug = TRUE; #endif /* UNIV_DEBUG */ -#ifdef UNIV_PFS_RWLOCK -/* Keys to register buffer block related rwlocks and mutexes with -performance schema */ -UNIV_INTERN mysql_pfs_key_t buf_block_lock_key; -# ifdef UNIV_SYNC_DEBUG -UNIV_INTERN mysql_pfs_key_t buf_block_debug_latch_key; -# endif /* UNIV_SYNC_DEBUG */ -#endif /* UNIV_PFS_RWLOCK */ - -#ifdef UNIV_PFS_MUTEX -UNIV_INTERN mysql_pfs_key_t buffer_block_mutex_key; -UNIV_INTERN mysql_pfs_key_t buf_pool_mutex_key; -UNIV_INTERN mysql_pfs_key_t buf_pool_zip_mutex_key; -UNIV_INTERN mysql_pfs_key_t flush_list_mutex_key; -#endif /* UNIV_PFS_MUTEX */ +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG +/** This is used to insert validation operations in execution +in the debug version */ +static ulint buf_dbg_counter = 0; +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ #if defined UNIV_PFS_MUTEX || defined UNIV_PFS_RWLOCK # ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK @@ -317,6 +362,17 @@ on the io_type */ ? (counter##_READ) \ : (counter##_WRITTEN)) +/** Registers a chunk to buf_pool_chunk_map +@param[in] chunk chunk of buffers */ +static +void +buf_pool_register_chunk( + buf_chunk_t* chunk) +{ + buf_chunk_map_reg->insert(buf_pool_chunk_map_t::value_type( + chunk->blocks->frame, chunk)); +} + /********************************************************************//** Check if page is maybe compressed, encrypted or both when we encounter corrupted page. Note that we can't be 100% sure if page is corrupted @@ -333,13 +389,10 @@ buf_page_check_corrupt( Gets the smallest oldest_modification lsn for any page in the pool. Returns zero if all modified pages have been flushed to disk. @return oldest modification in pool, zero if none */ -UNIV_INTERN lsn_t buf_pool_get_oldest_modification(void) /*==================================*/ { - ulint i; - buf_page_t* bpage; lsn_t lsn = 0; lsn_t oldest_lsn = 0; @@ -347,14 +400,24 @@ buf_pool_get_oldest_modification(void) thread to add a dirty page to any flush list. */ log_flush_order_mutex_enter(); - for (i = 0; i < srv_buf_pool_instances; i++) { + for (ulint i = 0; i < srv_buf_pool_instances; i++) { buf_pool_t* buf_pool; buf_pool = buf_pool_from_array(i); buf_flush_list_mutex_enter(buf_pool); - bpage = UT_LIST_GET_LAST(buf_pool->flush_list); + buf_page_t* bpage; + + /* We don't let log-checkpoint halt because pages from system + temporary are not yet flushed to the disk. Anyway, object + residing in system temporary doesn't generate REDO logging. */ + for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list); + bpage != NULL + && fsp_is_system_temporary(bpage->id.space()); + bpage = UT_LIST_GET_PREV(list, bpage)) { + /* Do nothing. */ + } if (bpage != NULL) { ut_ad(bpage->in_flush_list); @@ -378,7 +441,6 @@ buf_pool_get_oldest_modification(void) /********************************************************************//** Get total buffer pool statistics. */ -UNIV_INTERN void buf_get_total_list_len( /*===================*/ @@ -405,7 +467,6 @@ buf_get_total_list_len( /********************************************************************//** Get total list size in bytes from all buffer pools. */ -UNIV_INTERN void buf_get_total_list_size_in_bytes( /*=============================*/ @@ -431,7 +492,6 @@ buf_get_total_list_size_in_bytes( /********************************************************************//** Get total buffer pool statistics. */ -UNIV_INTERN void buf_get_total_stat( /*===============*/ @@ -465,7 +525,6 @@ buf_get_total_stat( /********************************************************************//** Allocates a buffer block. @return own: the allocated block, in state BUF_BLOCK_MEMORY */ -UNIV_INTERN buf_block_t* buf_block_alloc( /*============*/ @@ -490,21 +549,18 @@ buf_block_alloc( return(block); } -#endif /* !UNIV_HOTBACKUP */ +#endif /* !UNIV_HOTBACKUP && !UNIV_INNOCHECKSUM */ -/********************************************************************//** -Checks if a page is all zeroes. -@return TRUE if the page is all zeroes */ +/** Checks if a page contains only zeroes. +@param[in] read_buf database page +@param[in] page_size page size +@return true if page is filled with zeroes */ bool buf_page_is_zeroes( -/*===============*/ - const byte* read_buf, /*!< in: a database page */ - const ulint zip_size) /*!< in: size of compressed page; - 0 for uncompressed pages */ + const byte* read_buf, + const page_size_t& page_size) { - const ulint page_size = zip_size ? zip_size : UNIV_PAGE_SIZE; - - for (ulint i = 0; i < page_size; i++) { + for (ulint i = 0; i < page_size.logical(); i++) { if (read_buf[i] != 0) { return(false); } @@ -516,30 +572,77 @@ buf_page_is_zeroes( @param[in] read_buf database page @param[in] checksum_field1 new checksum field @param[in] checksum_field2 old checksum field -@return true if the page is in crc32 checksum format */ +@param[in] page_no page number of given read_buf +@param[in] is_log_enabled true if log option is enabled +@param[in] log_file file pointer to log_file +@param[in] curr_algo current checksum algorithm +@return true if the page is in crc32 checksum format. */ UNIV_INLINE bool buf_page_is_checksum_valid_crc32( - const byte* read_buf, - ulint checksum_field1, - ulint checksum_field2) + const byte* read_buf, + ulint checksum_field1, + ulint checksum_field2 +#ifdef UNIV_INNOCHECKSUM + ,uintmax_t page_no, + bool is_log_enabled, + FILE* log_file, + const srv_checksum_algorithm_t curr_algo +#endif /* UNIV_INNOCHECKSUM */ + ) { - ib_uint32_t crc32 = buf_calc_page_crc32(read_buf); + const uint32_t crc32 = buf_calc_page_crc32(read_buf); + +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled + && curr_algo == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) { + fprintf(log_file, "page::%lu;" + " crc32 calculated = %u;" + " recorded checksum field1 = %lu recorded" + " checksum field2 =%lu\n", page_no, + crc32, checksum_field1, checksum_field2); + } +#endif /* UNIV_INNOCHECKSUM */ + + if (checksum_field1 != checksum_field2) { + return(false); + } + + if (checksum_field1 == crc32) { + return(true); + } + + const uint32_t crc32_legacy = buf_calc_page_crc32(read_buf, true); + + if (checksum_field1 == crc32_legacy) { + return(true); + } - return(checksum_field1 == crc32 && checksum_field2 == crc32); + return(false); } /** Checks if the page is in innodb checksum format. @param[in] read_buf database page @param[in] checksum_field1 new checksum field @param[in] checksum_field2 old checksum field -@return true if the page is in innodb checksum format */ +@param[in] page_no page number of given read_buf +@param[in] is_log_enabled true if log option is enabled +@param[in] log_file file pointer to log_file +@param[in] curr_algo current checksum algorithm +@return true if the page is in innodb checksum format. */ UNIV_INLINE bool buf_page_is_checksum_valid_innodb( - const byte* read_buf, - ulint checksum_field1, - ulint checksum_field2) + const byte* read_buf, + ulint checksum_field1, + ulint checksum_field2 +#ifdef UNIV_INNOCHECKSUM + ,uintmax_t page_no, + bool is_log_enabled, + FILE* log_file, + const srv_checksum_algorithm_t curr_algo +#endif /* UNIV_INNOCHECKSUM */ + ) { /* There are 2 valid formulas for checksum_field2 (old checksum field) which algo=innodb could have @@ -551,8 +654,41 @@ buf_page_is_checksum_valid_innodb( 2. Newer InnoDB versions store the old formula checksum (buf_calc_page_old_checksum()). */ + ulint old_checksum = buf_calc_page_old_checksum(read_buf); + ulint new_checksum = buf_calc_page_new_checksum(read_buf); + +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled + && curr_algo == SRV_CHECKSUM_ALGORITHM_INNODB) { + fprintf(log_file, "page::%lu;" + " old style: calculated =" + " %lu; recorded = %lu\n", + page_no, old_checksum, + checksum_field2); + fprintf(log_file, "page::%lu;" + " new style: calculated =" + " %lu; crc32 = %u; recorded = %lu\n", + page_no, new_checksum, + buf_calc_page_crc32(read_buf), checksum_field1); + } + + if (is_log_enabled + && curr_algo == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) { + fprintf(log_file, "page::%lu;" + " old style: calculated =" + " %lu; recorded checksum = %lu\n", + page_no, old_checksum, + checksum_field2); + fprintf(log_file, "page::%lu;" + " new style: calculated =" + " %lu; recorded checksum = %lu\n", + page_no, new_checksum, + checksum_field1); + } +#endif /* UNIV_INNOCHECKSUM */ + if (checksum_field2 != mach_read_from_4(read_buf + FIL_PAGE_LSN) - && checksum_field2 != buf_calc_page_old_checksum(read_buf)) { + && checksum_field2 != old_checksum) { return(false); } @@ -561,8 +697,7 @@ buf_page_is_checksum_valid_innodb( /* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id (always equal to 0), to FIL_PAGE_SPACE_OR_CHKSUM */ - if (checksum_field1 != 0 - && checksum_field1 != buf_calc_page_new_checksum(read_buf)) { + if (checksum_field1 != 0 && checksum_field1 != new_checksum) { return(false); } @@ -573,38 +708,74 @@ buf_page_is_checksum_valid_innodb( @param[in] read_buf database page @param[in] checksum_field1 new checksum field @param[in] checksum_field2 old checksum field -@return true if the page is in none checksum format */ +@param[in] page_no page number of given read_buf +@param[in] is_log_enabled true if log option is enabled +@param[in] log_file file pointer to log_file +@param[in] curr_algo current checksum algorithm +@return true if the page is in none checksum format. */ UNIV_INLINE bool buf_page_is_checksum_valid_none( - const byte* read_buf, - ulint checksum_field1, - ulint checksum_field2) + const byte* read_buf, + ulint checksum_field1, + ulint checksum_field2 +#ifdef UNIV_INNOCHECKSUM + ,uintmax_t page_no, + bool is_log_enabled, + FILE* log_file, + const srv_checksum_algorithm_t curr_algo +#endif /* UNIV_INNOCHECKSUM */ + ) { + +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled + && curr_algo == SRV_CHECKSUM_ALGORITHM_STRICT_NONE) { + fprintf(log_file, + "page::%lu; none checksum: calculated" + " = %lu; recorded checksum_field1 = %lu" + " recorded checksum_field2 = %lu\n", + page_no, BUF_NO_CHECKSUM_MAGIC, + checksum_field1, checksum_field2); + } +#endif /* UNIV_INNOCHECKSUM */ + return(checksum_field1 == checksum_field2 && checksum_field1 == BUF_NO_CHECKSUM_MAGIC); } -/********************************************************************//** -Checks if a page is corrupt. -@return TRUE if corrupted */ -UNIV_INTERN +/** Checks if a page is corrupt. +@param[in] check_lsn true if we need to check and complain about +the LSN +@param[in] read_buf database page +@param[in] page_size page size +@param[in] skip_checksum if true, skip checksum +@param[in] page_no page number of given read_buf +@param[in] strict_check true if strict-check option is enabled +@param[in] is_log_enabled true if log option is enabled +@param[in] log_file file pointer to log_file +@return TRUE if corrupted */ ibool buf_page_is_corrupted( -/*==================*/ - bool check_lsn, /*!< in: true if we need to check - and complain about the LSN */ - const byte* read_buf, /*!< in: a database page */ - ulint zip_size) /*!< in: size of compressed page; - 0 for uncompressed pages */ + bool check_lsn, + const byte* read_buf, + const page_size_t& page_size, + bool skip_checksum +#ifdef UNIV_INNOCHECKSUM + ,uintmax_t page_no, + bool strict_check, + bool is_log_enabled, + FILE* log_file +#endif /* UNIV_INNOCHECKSUM */ +) { - ulint page_encrypted = fil_page_is_encrypted(read_buf); + ulint page_encrypted = (mach_read_from_4(read_buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION) != 0); ulint checksum_field1; ulint checksum_field2; - if (!page_encrypted && !zip_size + if (!page_encrypted && !page_size.is_compressed() && memcmp(read_buf + FIL_PAGE_LSN + 4, - read_buf + UNIV_PAGE_SIZE + read_buf + page_size.logical() - FIL_PAGE_END_LSN_OLD_CHKSUM + 4, 4)) { /* Stored log sequence numbers at the start and the end @@ -613,47 +784,55 @@ buf_page_is_corrupted( return(TRUE); } -#ifndef UNIV_HOTBACKUP +#if !defined(UNIV_HOTBACKUP) && !defined(UNIV_INNOCHECKSUM) if (check_lsn && recv_lsn_checks_on) { - lsn_t current_lsn; + lsn_t current_lsn; + const lsn_t page_lsn + = mach_read_from_8(read_buf + FIL_PAGE_LSN); /* Since we are going to reset the page LSN during the import phase it makes no sense to spam the log with error messages. */ - if (log_peek_lsn(¤t_lsn) - && current_lsn - < mach_read_from_8(read_buf + FIL_PAGE_LSN)) { - ut_print_timestamp(stderr); - - fprintf(stderr, - " InnoDB: Error: page %lu log sequence number" - " " LSN_PF "\n" - "InnoDB: is in the future! Current system " - "log sequence number " LSN_PF ".\n" - "InnoDB: Your database may be corrupt or " - "you may have copied the InnoDB\n" - "InnoDB: tablespace but not the InnoDB " - "log files. See\n" - "InnoDB: " REFMAN - "forcing-innodb-recovery.html\n" - "InnoDB: for more information.\n", - (ulong) mach_read_from_4( - read_buf + FIL_PAGE_OFFSET), - (lsn_t) mach_read_from_8( - read_buf + FIL_PAGE_LSN), - current_lsn); + if (log_peek_lsn(¤t_lsn) && current_lsn < page_lsn) { + + const ulint space_id = mach_read_from_4( + read_buf + FIL_PAGE_SPACE_ID); + const ulint page_no = mach_read_from_4( + read_buf + FIL_PAGE_OFFSET); + + ib::error() << "Page " << page_id_t(space_id, page_no) + << " log sequence number " << page_lsn + << " is in the future! Current system" + << " log sequence number " + << current_lsn << "."; + + ib::error() << "Your database may be corrupt or" + " you may have copied the InnoDB" + " tablespace but not the InnoDB" + " log files. " + << FORCE_RECOVERY_MSG; + } } -#endif +#endif /* !UNIV_HOTBACKUP && !UNIV_INNOCHECKSUM */ /* Check whether the checksum fields have correct values */ - if (srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_NONE) { + if (srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_NONE + || skip_checksum) { return(FALSE); } - if (zip_size) { - return(!page_zip_verify_checksum(read_buf, zip_size)); + if (page_size.is_compressed()) { +#ifdef UNIV_INNOCHECKSUM + return(!page_zip_verify_checksum(read_buf, + page_size.physical(), + page_no, strict_check, + is_log_enabled, log_file)); +#else + return(!page_zip_verify_checksum(read_buf, + page_size.physical())); +#endif /* UNIV_INNOCHECKSUM */ } if (page_encrypted) { return (FALSE); @@ -663,30 +842,57 @@ buf_page_is_corrupted( read_buf + FIL_PAGE_SPACE_OR_CHKSUM); checksum_field2 = mach_read_from_4( - read_buf + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM); + read_buf + page_size.logical() - FIL_PAGE_END_LSN_OLD_CHKSUM); #if FIL_PAGE_LSN % 8 #error "FIL_PAGE_LSN must be 64 bit aligned" #endif /* declare empty pages non-corrupted */ - if (checksum_field1 == 0 && checksum_field2 == 0 - && *reinterpret_cast(read_buf + - FIL_PAGE_LSN) == 0) { + if (checksum_field1 == 0 + && checksum_field2 == 0 + && *reinterpret_cast( + read_buf + FIL_PAGE_LSN) == 0) { + /* make sure that the page is really empty */ - for (ulint i = 0; i < UNIV_PAGE_SIZE; i++) { - if (read_buf[i] != 0) { - return(TRUE); + + ulint i; + + for (i = 0; i < page_size.logical(); ++i) { + + /* The FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID has been + repurposed for page compression. It can be + set for uncompressed empty pages. */ + + if ((i < FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + || i >= FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID) + && read_buf[i] != 0) { + + break; } } - - return(FALSE); +#ifdef UNIV_INNOCHECKSUM + if (i >= page_size.logical()) { + if (is_log_enabled) { + fprintf(log_file, "Page::%lu" + " is empty and uncorrupted\n", + page_no); + } + return(FALSE); + } +#else + return(i < page_size.logical()); +#endif /* UNIV_INNOCHECKSUM */ } - DBUG_EXECUTE_IF("buf_page_is_corrupt_failure", return(TRUE); ); +#ifndef UNIV_INNOCHECKSUM + const page_id_t page_id(mach_read_from_4( + read_buf + FIL_PAGE_SPACE_ID), + mach_read_from_4( + read_buf + FIL_PAGE_OFFSET)); +#endif /* UNIV_INNOCHECKSUM */ - ulint page_no = mach_read_from_4(read_buf + FIL_PAGE_OFFSET); - ulint space_id = mach_read_from_4(read_buf + FIL_PAGE_SPACE_ID); + DBUG_EXECUTE_IF("buf_page_import_corrupt_failure", return(TRUE); ); const srv_checksum_algorithm_t curr_algo = static_cast(srv_checksum_algorithm); @@ -695,99 +901,194 @@ buf_page_is_corrupted( case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: if (buf_page_is_checksum_valid_crc32(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo +#endif /* UNIV_INNOCHECKSUM */ + )) { return(FALSE); } if (buf_page_is_checksum_valid_none(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo)) { +#else /* UNIV_INNOCHECKSUM */ + )) { if (curr_algo == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) { page_warn_strict_checksum( curr_algo, SRV_CHECKSUM_ALGORITHM_NONE, - space_id, page_no); + page_id); } - +#endif /* UNIV_INNOCHECKSUM */ + +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled) { + + fprintf(log_file, "page::%lu;" + " old style: calculated = %lu;" + " recorded = %lu\n", page_no, + buf_calc_page_old_checksum(read_buf), + checksum_field2); + fprintf(log_file, "page::%lu;" + " new style: calculated = %lu;" + " crc32 = %u; recorded = %lu\n", + page_no, + buf_calc_page_new_checksum(read_buf), + buf_calc_page_crc32(read_buf), + checksum_field1); + } +#endif /* UNIV_INNOCHECKSUM */ return(FALSE); } if (buf_page_is_checksum_valid_innodb(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo)) { +#else /* UNIV_INNOCHECKSUM */ + )) { if (curr_algo == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32) { page_warn_strict_checksum( curr_algo, SRV_CHECKSUM_ALGORITHM_INNODB, - space_id, page_no); + page_id); } - +#endif /* UNIV_INNOCHECKSUM */ return(FALSE); } +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled) { + fprintf(log_file, "Fail; page %lu" + " invalid (fails crc32 checksum)\n", + page_no); + } +#endif /* UNIV_INNOCHECKSUM */ return(TRUE); case SRV_CHECKSUM_ALGORITHM_INNODB: case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: if (buf_page_is_checksum_valid_innodb(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo +#endif /* UNIV_INNOCHECKSUM */ + )) { return(FALSE); } if (buf_page_is_checksum_valid_none(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo)) { +#else /* UNIV_INNOCHECKSUM */ + )) { if (curr_algo == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) { page_warn_strict_checksum( curr_algo, SRV_CHECKSUM_ALGORITHM_NONE, - space_id, page_no); + page_id); } - +#endif /* UNIV_INNOCHECKSUM */ + +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled) { + fprintf(log_file, "page::%lu;" + " old style: calculated = %lu;" + " recorded = %lu\n", page_no, + buf_calc_page_old_checksum(read_buf), + checksum_field2); + fprintf(log_file, "page::%lu;" + " new style: calculated = %lu;" + " crc32 = %u; recorded = %lu\n", + page_no, + buf_calc_page_new_checksum(read_buf), + buf_calc_page_crc32(read_buf), + checksum_field1); + } +#endif /* UNIV_INNOCHECKSUM */ return(FALSE); } if (buf_page_is_checksum_valid_crc32(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo)) { +#else /* UNIV_INNOCHECKSUM */ + )) { if (curr_algo == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) { page_warn_strict_checksum( curr_algo, SRV_CHECKSUM_ALGORITHM_CRC32, - space_id, page_no); + page_id); } +#endif /* UNIV_INNOCHECKSUM */ return(FALSE); } +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled) { + fprintf(log_file, "Fail; page %lu" + " invalid (fails innodb checksum)\n", + page_no); + } +#endif /* UNIV_INNOCHECKSUM */ return(TRUE); case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: if (buf_page_is_checksum_valid_none(read_buf, - checksum_field1, checksum_field2)) { - return(FALSE); + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo +#endif /* UNIV_INNOCHECKSUM */ + )) { + return(false); } if (buf_page_is_checksum_valid_crc32(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo)) { +#else /* UNIV_INNOCHECKSUM */ + )) { page_warn_strict_checksum( curr_algo, SRV_CHECKSUM_ALGORITHM_CRC32, - space_id, page_no); + page_id); +#endif /* UNIV_INNOCHECKSUM */ return(FALSE); } if (buf_page_is_checksum_valid_innodb(read_buf, - checksum_field1, checksum_field2)) { + checksum_field1, checksum_field2 +#ifdef UNIV_INNOCHECKSUM + , page_no, is_log_enabled, log_file, curr_algo)) { +#else /* UNIV_INNOCHECKSUM */ + )) { page_warn_strict_checksum( curr_algo, SRV_CHECKSUM_ALGORITHM_INNODB, - space_id, page_no); + page_id); +#endif /* UNIV_INNOCHECKSUM */ return(FALSE); } +#ifdef UNIV_INNOCHECKSUM + if (is_log_enabled) { + fprintf(log_file, "Fail; page %lu" + " invalid (fails none checksum)\n", + page_no); + } +#endif /* UNIV_INNOCHECKSUM */ return(TRUE); case SRV_CHECKSUM_ALGORITHM_NONE: @@ -801,118 +1102,125 @@ buf_page_is_corrupted( return(FALSE); } -/********************************************************************//** -Prints a page to stderr. */ -UNIV_INTERN +#ifndef UNIV_INNOCHECKSUM + +/** Prints a page to stderr. +@param[in] read_buf a database page +@param[in] page_size page size +@param[in] flags 0 or BUF_PAGE_PRINT_NO_CRASH or +BUF_PAGE_PRINT_NO_FULL */ void buf_page_print( -/*===========*/ - const byte* read_buf, /*!< in: a database page */ - ulint zip_size, /*!< in: compressed page size, or - 0 for uncompressed pages */ - ulint flags) /*!< in: 0 or - BUF_PAGE_PRINT_NO_CRASH or - BUF_PAGE_PRINT_NO_FULL */ - + const byte* read_buf, + const page_size_t& page_size, + ulint flags) { #ifndef UNIV_HOTBACKUP dict_index_t* index; #endif /* !UNIV_HOTBACKUP */ - ulint size = zip_size; - - if (!size) { - size = UNIV_PAGE_SIZE; - } if (!(flags & BUF_PAGE_PRINT_NO_FULL)) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Page dump in ascii and hex (%lu bytes):\n", - (ulong) size); - ut_print_buf(stderr, read_buf, size); + + ib::info() << "Page dump in ascii and hex (" + << page_size.physical() << " bytes):"; + + ut_print_buf(stderr, read_buf, page_size.physical()); fputs("\nInnoDB: End of page dump\n", stderr); } - if (zip_size) { + if (page_size.is_compressed()) { /* Print compressed page. */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Compressed page type (" ULINTPF "); " - "stored checksum in field1 " ULINTPF "; " - "calculated checksums for field1: " - "%s " ULINTPF ", " - "%s " ULINTPF ", " - "%s " ULINTPF "; " - "page LSN " LSN_PF "; " - "page number (if stored to page already) " ULINTPF "; " - "space id (if stored to page already) " ULINTPF "\n", - fil_page_get_type(read_buf), - mach_read_from_4(read_buf + FIL_PAGE_SPACE_OR_CHKSUM), - buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_CRC32), - page_zip_calc_checksum(read_buf, zip_size, - SRV_CHECKSUM_ALGORITHM_CRC32), - buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_INNODB), - page_zip_calc_checksum(read_buf, zip_size, - SRV_CHECKSUM_ALGORITHM_INNODB), - buf_checksum_algorithm_name( - SRV_CHECKSUM_ALGORITHM_NONE), - page_zip_calc_checksum(read_buf, zip_size, - SRV_CHECKSUM_ALGORITHM_NONE), - mach_read_from_8(read_buf + FIL_PAGE_LSN), - mach_read_from_4(read_buf + FIL_PAGE_OFFSET), - mach_read_from_4(read_buf - + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID)); + ib::info() << "Compressed page type (" + << fil_page_get_type(read_buf) + << "); stored checksum in field1 " + << mach_read_from_4( + read_buf + FIL_PAGE_SPACE_OR_CHKSUM) + << "; calculated checksums for field1: " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_CRC32) + << " " + << page_zip_calc_checksum( + read_buf, page_size.physical(), + SRV_CHECKSUM_ALGORITHM_CRC32) + << "/" + << page_zip_calc_checksum( + read_buf, page_size.physical(), + SRV_CHECKSUM_ALGORITHM_CRC32, true) + << ", " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_INNODB) + << " " + << page_zip_calc_checksum( + read_buf, page_size.physical(), + SRV_CHECKSUM_ALGORITHM_INNODB) + << ", " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_NONE) + << " " + << page_zip_calc_checksum( + read_buf, page_size.physical(), + SRV_CHECKSUM_ALGORITHM_NONE) + << "; page LSN " + << mach_read_from_8(read_buf + FIL_PAGE_LSN) + << "; page number (if stored to page" + << " already) " + << mach_read_from_4(read_buf + FIL_PAGE_OFFSET) + << "; space id (if stored to page already) " + << mach_read_from_4( + read_buf + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + } else { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: uncompressed page, " - "stored checksum in field1 " ULINTPF ", " - "calculated checksums for field1: " - "%s " UINT32PF ", " - "%s " ULINTPF ", " - "%s " ULINTPF ", " - - "stored checksum in field2 " ULINTPF ", " - "calculated checksums for field2: " - "%s " UINT32PF ", " - "%s " ULINTPF ", " - "%s " ULINTPF ", " - - "page LSN " ULINTPF " " ULINTPF ", " - "low 4 bytes of LSN at page end " ULINTPF ", " - "page number (if stored to page already) " ULINTPF ", " - "space id (if created with >= MySQL-4.1.1 " - "and stored already) %lu\n", - mach_read_from_4(read_buf + FIL_PAGE_SPACE_OR_CHKSUM), - buf_checksum_algorithm_name(SRV_CHECKSUM_ALGORITHM_CRC32), - buf_calc_page_crc32(read_buf), - buf_checksum_algorithm_name(SRV_CHECKSUM_ALGORITHM_INNODB), - buf_calc_page_new_checksum(read_buf), - buf_checksum_algorithm_name(SRV_CHECKSUM_ALGORITHM_NONE), - BUF_NO_CHECKSUM_MAGIC, - - mach_read_from_4(read_buf + UNIV_PAGE_SIZE - - FIL_PAGE_END_LSN_OLD_CHKSUM), - buf_checksum_algorithm_name(SRV_CHECKSUM_ALGORITHM_CRC32), - buf_calc_page_crc32(read_buf), - buf_checksum_algorithm_name(SRV_CHECKSUM_ALGORITHM_INNODB), - buf_calc_page_old_checksum(read_buf), - buf_checksum_algorithm_name(SRV_CHECKSUM_ALGORITHM_NONE), - BUF_NO_CHECKSUM_MAGIC, - - mach_read_from_4(read_buf + FIL_PAGE_LSN), - mach_read_from_4(read_buf + FIL_PAGE_LSN + 4), - mach_read_from_4(read_buf + UNIV_PAGE_SIZE - - FIL_PAGE_END_LSN_OLD_CHKSUM + 4), - mach_read_from_4(read_buf + FIL_PAGE_OFFSET), - mach_read_from_4(read_buf - + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID)); + const uint32_t crc32 = buf_calc_page_crc32(read_buf); + const uint32_t crc32_legacy = buf_calc_page_crc32(read_buf, + true); ulint page_type = fil_page_get_type(read_buf); - fprintf(stderr, "InnoDB: page type %ld meaning %s\n", page_type, - fil_get_page_type_name(page_type)); + ib::info() << "Uncompressed page, stored checksum in field1 " + << mach_read_from_4( + read_buf + FIL_PAGE_SPACE_OR_CHKSUM) + << ", calculated checksums for field1: " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_CRC32) << " " + << crc32 << "/" << crc32_legacy + << ", " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_INNODB) << " " + << buf_calc_page_new_checksum(read_buf) + << ", " + << " page type " << page_type << " == " + << fil_get_page_type_name(page_type) << "." + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_NONE) << " " + << BUF_NO_CHECKSUM_MAGIC + << ", stored checksum in field2 " + << mach_read_from_4(read_buf + page_size.logical() + - FIL_PAGE_END_LSN_OLD_CHKSUM) + << ", calculated checksums for field2: " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_CRC32) << " " + << crc32 << "/" << crc32_legacy + << ", " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_INNODB) << " " + << buf_calc_page_old_checksum(read_buf) + << ", " + << buf_checksum_algorithm_name( + SRV_CHECKSUM_ALGORITHM_NONE) << " " + << BUF_NO_CHECKSUM_MAGIC + << ", page LSN " + << mach_read_from_4(read_buf + FIL_PAGE_LSN) + << " " + << mach_read_from_4(read_buf + FIL_PAGE_LSN + 4) + << ", low 4 bytes of LSN at page end " + << mach_read_from_4(read_buf + page_size.logical() + - FIL_PAGE_END_LSN_OLD_CHKSUM + 4) + << ", page number (if stored to page already) " + << mach_read_from_4(read_buf + FIL_PAGE_OFFSET) + << ", space id (if created with >= MySQL-4.1.1" + " and stored already) " + << mach_read_from_4( + read_buf + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); } #ifndef UNIV_HOTBACKUP @@ -931,17 +1239,19 @@ buf_page_print( switch (fil_page_get_type(read_buf)) { index_id_t index_id; case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: index_id = btr_page_get_index_id(read_buf); - fprintf(stderr, + ib::error() << "InnoDB: Page may be an index page where" - " index id is %llu\n", - (ullint) index_id); + " index id is " << index_id; + #ifndef UNIV_HOTBACKUP index = dict_index_find_on_id_low(index_id); if (index) { - fputs("InnoDB: (", stderr); - dict_index_name_print(stderr, NULL, index); - fputs(")\n", stderr); + ib::info() + << "Index " << index_id + << " is " << index->name + << " in table " << index->table->name; } #endif /* !UNIV_HOTBACKUP */ break; @@ -993,6 +1303,8 @@ buf_page_print( #ifndef UNIV_HOTBACKUP # ifdef PFS_GROUP_BUFFER_SYNC +extern mysql_pfs_key_t buffer_block_mutex_key; + /********************************************************************//** This function registers mutexes and rwlocks in buffer blocks with performance schema. If PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER is @@ -1005,27 +1317,24 @@ pfs_register_buffer_block( /*======================*/ buf_chunk_t* chunk) /*!< in/out: chunk of buffers */ { - ulint i; - ulint num_to_register; buf_block_t* block; + ulint num_to_register; block = chunk->blocks; - num_to_register = ut_min(chunk->size, - PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER); - - for (i = 0; i < num_to_register; i++) { - ib_mutex_t* mutex; - rw_lock_t* rwlock; + num_to_register = ut_min( + chunk->size, PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER); + for (ulint i = 0; i < num_to_register; i++) { # ifdef UNIV_PFS_MUTEX + BPageMutex* mutex; + mutex = &block->mutex; - ut_a(!mutex->pfs_psi); - mutex->pfs_psi = (PSI_server) - ? PSI_server->init_mutex(buffer_block_mutex_key, mutex) - : NULL; + mutex->pfs_add(buffer_block_mutex_key); # endif /* UNIV_PFS_MUTEX */ + rw_lock_t* rwlock; + # ifdef UNIV_PFS_RWLOCK rwlock = &block->lock; ut_a(!rwlock->pfs_psi); @@ -1033,14 +1342,14 @@ pfs_register_buffer_block( ? PSI_server->init_rwlock(buf_block_lock_key, rwlock) : NULL; -# ifdef UNIV_SYNC_DEBUG +# ifdef UNIV_DEBUG rwlock = &block->debug_latch; ut_a(!rwlock->pfs_psi); rwlock->pfs_psi = (PSI_server) ? PSI_server->init_rwlock(buf_block_debug_latch_key, rwlock) : NULL; -# endif /* UNIV_SYNC_DEBUG */ +# endif /* UNIV_DEBUG */ # endif /* UNIV_PFS_RWLOCK */ block++; @@ -1066,6 +1375,7 @@ buf_block_init( block->page.state = BUF_BLOCK_NOT_USED; block->page.buf_fix_count = 0; block->page.io_fix = BUF_IO_NONE; + block->page.flush_observer = NULL; block->page.key_version = 0; block->page.page_encrypted = false; block->page.page_compressed = false; @@ -1077,57 +1387,59 @@ buf_block_init( block->modify_clock = 0; block->page.slot = NULL; -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - block->page.file_page_was_freed = FALSE; -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ + ut_d(block->page.file_page_was_freed = FALSE); - block->check_index_page_at_flush = FALSE; block->index = NULL; + block->made_dirty_with_no_latch = false; + block->skip_flush_check = false; + + ut_d(block->page.in_page_hash = FALSE); + ut_d(block->page.in_zip_hash = FALSE); + ut_d(block->page.in_flush_list = FALSE); + ut_d(block->page.in_free_list = FALSE); + ut_d(block->page.in_LRU_list = FALSE); + ut_d(block->in_unzip_LRU_list = FALSE); + ut_d(block->in_withdraw_list = FALSE); -#ifdef UNIV_DEBUG - block->page.in_page_hash = FALSE; - block->page.in_zip_hash = FALSE; - block->page.in_flush_list = FALSE; - block->page.in_free_list = FALSE; - block->page.in_LRU_list = FALSE; - block->in_unzip_LRU_list = FALSE; -#endif /* UNIV_DEBUG */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG block->n_pointers = 0; #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ page_zip_des_init(&block->page.zip); + mutex_create(LATCH_ID_BUF_BLOCK_MUTEX, &block->mutex); + #if defined PFS_SKIP_BUFFER_MUTEX_RWLOCK || defined PFS_GROUP_BUFFER_SYNC /* If PFS_SKIP_BUFFER_MUTEX_RWLOCK is defined, skip registration - of buffer block mutex/rwlock with performance schema. If - PFS_GROUP_BUFFER_SYNC is defined, skip the registration - since buffer block mutex/rwlock will be registered later in - pfs_register_buffer_block() */ + of buffer block rwlock with performance schema. + + If PFS_GROUP_BUFFER_SYNC is defined, skip the registration + since buffer block rwlock will be registered later in + pfs_register_buffer_block(). */ - mutex_create(PFS_NOT_INSTRUMENTED, &block->mutex, SYNC_BUF_BLOCK); rw_lock_create(PFS_NOT_INSTRUMENTED, &block->lock, SYNC_LEVEL_VARYING); -# ifdef UNIV_SYNC_DEBUG - rw_lock_create(PFS_NOT_INSTRUMENTED, - &block->debug_latch, SYNC_NO_ORDER_CHECK); -# endif /* UNIV_SYNC_DEBUG */ + ut_d(rw_lock_create( + PFS_NOT_INSTRUMENTED, + &block->debug_latch, SYNC_NO_ORDER_CHECK)); #else /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */ - mutex_create(buffer_block_mutex_key, &block->mutex, SYNC_BUF_BLOCK); + rw_lock_create(buf_block_lock_key, &block->lock, SYNC_LEVEL_VARYING); -# ifdef UNIV_SYNC_DEBUG - rw_lock_create(buf_block_debug_latch_key, - &block->debug_latch, SYNC_NO_ORDER_CHECK); -# endif /* UNIV_SYNC_DEBUG */ + ut_d(rw_lock_create( + buf_block_debug_latch_key, + &block->debug_latch, SYNC_NO_ORDER_CHECK)); + #endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */ + block->lock.is_block_lock = 1; + ut_ad(rw_lock_validate(&(block->lock))); } /********************************************************************//** Allocates a chunk of buffer frames. -@return chunk, or NULL on failure */ +@return chunk, or NULL on failure */ static buf_chunk_t* buf_chunk_init( @@ -1147,8 +1459,10 @@ buf_chunk_init( mem_size += ut_2pow_round((mem_size / UNIV_PAGE_SIZE) * (sizeof *block) + (UNIV_PAGE_SIZE - 1), UNIV_PAGE_SIZE); - chunk->mem_size = mem_size; - chunk->mem = os_mem_alloc_large(&chunk->mem_size); + DBUG_EXECUTE_IF("ib_buf_chunk_init_fails", return(NULL);); + + chunk->mem = buf_pool->allocator.allocate_large(mem_size, + &chunk->mem_pfx); if (UNIV_UNLIKELY(chunk->mem == NULL)) { @@ -1157,16 +1471,15 @@ buf_chunk_init( #ifdef HAVE_LIBNUMA if (srv_numa_interleave) { - int st = mbind(chunk->mem, chunk->mem_size, + int st = mbind(chunk->mem, mem_size, MPOL_INTERLEAVE, numa_all_nodes_ptr->maskp, numa_all_nodes_ptr->size, MPOL_MF_MOVE); if (st != 0) { - ib_logf(IB_LOG_LEVEL_WARN, - "Failed to set NUMA memory policy of buffer" - " pool page frames to MPOL_INTERLEAVE" - " (error: %s).", strerror(errno)); + ib::warn() << "Failed to set NUMA memory policy of" + " buffer pool page frames to MPOL_INTERLEAVE" + " (error: " << strerror(errno) << ")."; } } #endif // HAVE_LIBNUMA @@ -1181,7 +1494,7 @@ buf_chunk_init( it is bigger, we may allocate more blocks than requested. */ frame = (byte*) ut_align(chunk->mem, UNIV_PAGE_SIZE); - chunk->size = chunk->mem_size / UNIV_PAGE_SIZE + chunk->size = chunk->mem_pfx.m_size / UNIV_PAGE_SIZE - (frame != chunk->mem); /* Subtract the space needed for block descriptors. */ @@ -1208,7 +1521,7 @@ buf_chunk_init( UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); /* Add the block to the free list */ - UT_LIST_ADD_LAST(list, buf_pool->free, (&block->page)); + UT_LIST_ADD_LAST(buf_pool->free, &block->page); ut_d(block->page.in_free_list = TRUE); ut_ad(buf_pool_from_block(block) == buf_pool); @@ -1217,9 +1530,11 @@ buf_chunk_init( frame += UNIV_PAGE_SIZE; } + buf_pool_register_chunk(chunk); + #ifdef PFS_GROUP_BUFFER_SYNC pfs_register_buffer_block(chunk); -#endif +#endif /* PFS_GROUP_BUFFER_SYNC */ return(chunk); } @@ -1227,7 +1542,7 @@ buf_chunk_init( /*********************************************************************//** Finds a block in the given buffer chunk that points to a given compressed page. -@return buffer block pointing to the compressed page, or NULL */ +@return buffer block pointing to the compressed page, or NULL */ static buf_block_t* buf_chunk_contains_zip( @@ -1253,8 +1568,7 @@ buf_chunk_contains_zip( /*********************************************************************//** Finds a block in the buffer pool that points to a given compressed page. -@return buffer block pointing to the compressed page, or NULL */ -UNIV_INTERN +@return buffer block pointing to the compressed page, or NULL */ buf_block_t* buf_pool_contains_zip( /*==================*/ @@ -1281,7 +1595,7 @@ buf_pool_contains_zip( /*********************************************************************//** Checks that all file pages in the buffer chunk are in a replaceable state. -@return address of a non-free block, or NULL if all freed */ +@return address of a non-free block, or NULL if all freed */ static const buf_block_t* buf_chunk_not_freed( @@ -1312,9 +1626,9 @@ buf_chunk_not_freed( file pages. */ break; case BUF_BLOCK_FILE_PAGE: - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); ready = buf_flush_ready_for_replace(&block->page); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); if (!ready) { @@ -1349,6 +1663,7 @@ buf_pool_set_sizes(void) srv_buf_pool_curr_size = curr_size; srv_buf_pool_old_size = srv_buf_pool_size; + srv_buf_pool_base_size = srv_buf_pool_size; buf_pool_mutex_exit_all(); } @@ -1356,7 +1671,6 @@ buf_pool_set_sizes(void) /********************************************************************//** Initialize a buffer pool instance. @return DB_SUCCESS if all goes well. */ -UNIV_INTERN ulint buf_pool_init_instance( /*===================*/ @@ -1365,50 +1679,99 @@ buf_pool_init_instance( ulint instance_no) /*!< in: id of the instance */ { ulint i; + ulint chunk_size; buf_chunk_t* chunk; + ut_ad(buf_pool_size % srv_buf_pool_chunk_unit == 0); + /* 1. Initialize general fields ------------------------------- */ - mutex_create(buf_pool_mutex_key, - &buf_pool->mutex, SYNC_BUF_POOL); - mutex_create(buf_pool_zip_mutex_key, - &buf_pool->zip_mutex, SYNC_BUF_BLOCK); + mutex_create(LATCH_ID_BUF_POOL, &buf_pool->mutex); + + mutex_create(LATCH_ID_BUF_POOL_ZIP, &buf_pool->zip_mutex); + + new(&buf_pool->allocator) + ut_allocator(mem_key_buf_buf_pool); buf_pool_mutex_enter(buf_pool); if (buf_pool_size > 0) { - buf_pool->n_chunks = 1; + buf_pool->n_chunks + = buf_pool_size / srv_buf_pool_chunk_unit; + chunk_size = srv_buf_pool_chunk_unit; + + buf_pool->chunks = + reinterpret_cast(ut_zalloc_nokey( + buf_pool->n_chunks * sizeof(*chunk))); + buf_pool->chunks_old = NULL; + + UT_LIST_INIT(buf_pool->LRU, &buf_page_t::LRU); + UT_LIST_INIT(buf_pool->free, &buf_page_t::list); + UT_LIST_INIT(buf_pool->withdraw, &buf_page_t::list); + buf_pool->withdraw_target = 0; + UT_LIST_INIT(buf_pool->flush_list, &buf_page_t::list); + UT_LIST_INIT(buf_pool->unzip_LRU, &buf_block_t::unzip_LRU); + +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG + UT_LIST_INIT(buf_pool->zip_clean, &buf_page_t::list); +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ - buf_pool->chunks = chunk = - (buf_chunk_t*) mem_zalloc(sizeof *chunk); + for (i = 0; i < UT_ARR_SIZE(buf_pool->zip_free); ++i) { + UT_LIST_INIT( + buf_pool->zip_free[i], &buf_buddy_free_t::list); + } - UT_LIST_INIT(buf_pool->free); + buf_pool->curr_size = 0; + chunk = buf_pool->chunks; - if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) { - mem_free(chunk); - mem_free(buf_pool); + do { + if (!buf_chunk_init(buf_pool, chunk, chunk_size)) { + while (--chunk >= buf_pool->chunks) { + buf_block_t* block = chunk->blocks; - buf_pool_mutex_exit(buf_pool); + for (i = chunk->size; i--; block++) { + mutex_free(&block->mutex); + rw_lock_free(&block->lock); - return(DB_ERROR); - } + ut_d(rw_lock_free( + &block->debug_latch)); + } + + buf_pool->allocator.deallocate_large( + chunk->mem, &chunk->mem_pfx); + } + ut_free(buf_pool->chunks); + buf_pool_mutex_exit(buf_pool); + + return(DB_ERROR); + } + + buf_pool->curr_size += chunk->size; + } while (++chunk < buf_pool->chunks + buf_pool->n_chunks); buf_pool->instance_no = instance_no; - buf_pool->old_pool_size = buf_pool_size; - buf_pool->curr_size = chunk->size; + buf_pool->read_ahead_area = + ut_min(BUF_READ_AHEAD_PAGES, + ut_2_power_up(buf_pool->curr_size / + BUF_READ_AHEAD_PORTION)); buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE; + buf_pool->old_size = buf_pool->curr_size; + buf_pool->n_chunks_new = buf_pool->n_chunks; + /* Number of locks protecting page_hash must be a power of two */ srv_n_page_hash_locks = static_cast( - ut_2_power_up(srv_n_page_hash_locks)); + ut_2_power_up(srv_n_page_hash_locks)); ut_a(srv_n_page_hash_locks != 0); ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS); - buf_pool->page_hash = ha_create(2 * buf_pool->curr_size, - srv_n_page_hash_locks, - MEM_HEAP_FOR_PAGE_HASH, - SYNC_BUF_PAGE_HASH); + buf_pool->page_hash = ib_create( + 2 * buf_pool->curr_size, + LATCH_ID_HASH_TABLE_RW_LOCK, + srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH); + + buf_pool->page_hash_old = NULL; buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size); @@ -1417,17 +1780,19 @@ buf_pool_init_instance( /* 2. Initialize flushing fields -------------------------------- */ - mutex_create(flush_list_mutex_key, &buf_pool->flush_list_mutex, - SYNC_BUF_FLUSH_LIST); + mutex_create(LATCH_ID_FLUSH_LIST, &buf_pool->flush_list_mutex); for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) { - buf_pool->no_flush[i] = os_event_create(); + buf_pool->no_flush[i] = os_event_create(0); } - buf_pool->watch = (buf_page_t*) mem_zalloc( + buf_pool->watch = (buf_page_t*) ut_zalloc_nokey( sizeof(*buf_pool->watch) * BUF_POOL_WATCH_SIZE); + for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) { + buf_pool->watch[i].buf_pool_index = buf_pool->instance_no; + } - /* All fields are initialized by mem_zalloc(). */ + /* All fields are initialized by ut_zalloc_nokey(). */ buf_pool->try_LRU_scan = TRUE; @@ -1445,10 +1810,12 @@ buf_pool_init_instance( new(&buf_pool->single_scan_itr) LRUItr(buf_pool, &buf_pool->mutex); /* Initialize the temporal memory array and slots */ - buf_pool->tmp_arr = (buf_tmp_array_t *)mem_zalloc(sizeof(buf_tmp_array_t)); + buf_pool->tmp_arr = (buf_tmp_array_t *)ut_malloc_nokey(sizeof(buf_tmp_array_t)); + memset(buf_pool->tmp_arr, 0, sizeof(buf_tmp_array_t)); ulint n_slots = srv_n_read_io_threads * srv_n_write_io_threads * (8 * OS_AIO_N_PENDING_IOS_PER_THREAD); buf_pool->tmp_arr->n_slots = n_slots; - buf_pool->tmp_arr->slots = (buf_tmp_buffer_t*)mem_zalloc(sizeof(buf_tmp_buffer_t) * n_slots); + buf_pool->tmp_arr->slots = (buf_tmp_buffer_t*)ut_malloc_nokey(sizeof(buf_tmp_buffer_t) * n_slots); + memset(buf_pool->tmp_arr->slots, 0, (sizeof(buf_tmp_buffer_t) * n_slots)); buf_pool_mutex_exit(buf_pool); @@ -1470,11 +1837,18 @@ buf_pool_free_instance( buf_chunk_t* chunk; buf_chunk_t* chunks; buf_page_t* bpage; + buf_page_t* prev_bpage = 0; + + mutex_free(&buf_pool->mutex); + mutex_free(&buf_pool->zip_mutex); + mutex_free(&buf_pool->flush_list_mutex); - bpage = UT_LIST_GET_LAST(buf_pool->LRU); - while (bpage != NULL) { - buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage); - enum buf_page_state state = buf_page_get_state(bpage); + for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); + bpage != NULL; + bpage = prev_bpage) { + + prev_bpage = UT_LIST_GET_PREV(LRU, bpage); + buf_page_state state = buf_page_get_state(bpage); ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); @@ -1486,25 +1860,38 @@ buf_pool_free_instance( || srv_fast_shutdown == 2); buf_page_free_descriptor(bpage); } - - bpage = prev_bpage; } - mem_free(buf_pool->watch); + ut_free(buf_pool->watch); buf_pool->watch = NULL; chunks = buf_pool->chunks; chunk = chunks + buf_pool->n_chunks; while (--chunk >= chunks) { - os_mem_free_large(chunk->mem, chunk->mem_size); + buf_block_t* block = chunk->blocks; + + for (ulint i = chunk->size; i--; block++) { + mutex_free(&block->mutex); + rw_lock_free(&block->lock); + + ut_d(rw_lock_free(&block->debug_latch)); + } + + buf_pool->allocator.deallocate_large( + chunk->mem, &chunk->mem_pfx); + } + + for (ulint i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; ++i) { + os_event_destroy(buf_pool->no_flush[i]); } - mem_free(buf_pool->chunks); + ut_free(buf_pool->chunks); ha_clear(buf_pool->page_hash); hash_table_free(buf_pool->page_hash); hash_table_free(buf_pool->zip_hash); + buf_pool->allocator.~ut_allocator(); /* Free all used temporary slots */ if (buf_pool->tmp_arr) { for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) { @@ -1527,15 +1914,14 @@ buf_pool_free_instance( } } - mem_free(buf_pool->tmp_arr->slots); - mem_free(buf_pool->tmp_arr); + ut_free(buf_pool->tmp_arr->slots); + ut_free(buf_pool->tmp_arr); buf_pool->tmp_arr = NULL; } /********************************************************************//** Creates the buffer pool. -@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */ -UNIV_INTERN +@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */ dberr_t buf_pool_init( /*==========*/ @@ -1549,24 +1935,33 @@ buf_pool_init( ut_ad(n_instances <= MAX_BUFFER_POOLS); ut_ad(n_instances == srv_buf_pool_instances); + buf_pool_resizing = false; + buf_pool_withdrawing = false; + buf_withdraw_clock = 0; + #ifdef HAVE_LIBNUMA if (srv_numa_interleave) { - ib_logf(IB_LOG_LEVEL_INFO, - "Setting NUMA memory policy to MPOL_INTERLEAVE"); + ib::info() << "Setting NUMA memory policy to MPOL_INTERLEAVE"; if (set_mempolicy(MPOL_INTERLEAVE, numa_all_nodes_ptr->maskp, numa_all_nodes_ptr->size) != 0) { - ib_logf(IB_LOG_LEVEL_WARN, - "Failed to set NUMA memory policy to" - " MPOL_INTERLEAVE (error: %s).", - strerror(errno)); + ib::warn() << "Failed to set NUMA memory policy to" + " MPOL_INTERLEAVE: " << strerror(errno); } } #endif // HAVE_LIBNUMA - buf_pool_ptr = (buf_pool_t*) mem_zalloc( + buf_pool_ptr = (buf_pool_t*) ut_zalloc_nokey( n_instances * sizeof *buf_pool_ptr); + buf_chunk_map_reg = UT_NEW_NOKEY(buf_pool_chunk_map_t()); + + ut_d(buf_chunk_map_latch = static_cast( + ut_zalloc_nokey(sizeof(*buf_chunk_map_latch)))); + + ut_d(rw_lock_create( + buf_chunk_map_latch_key, buf_chunk_map_latch, SYNC_ANY_LATCH)); + for (i = 0; i < n_instances; i++) { buf_pool_t* ptr = &buf_pool_ptr[i]; @@ -1579,6 +1974,8 @@ buf_pool_init( } } + buf_chunk_map_ref = buf_chunk_map_reg; + buf_pool_set_sizes(); buf_LRU_old_ratio_update(100 * 3/ 8, FALSE); @@ -1586,17 +1983,15 @@ buf_pool_init( #ifdef HAVE_LIBNUMA if (srv_numa_interleave) { - ib_logf(IB_LOG_LEVEL_INFO, - "Setting NUMA memory policy to MPOL_DEFAULT"); + ib::info() << "Setting NUMA memory policy to MPOL_DEFAULT"; if (set_mempolicy(MPOL_DEFAULT, NULL, 0) != 0) { - ib_logf(IB_LOG_LEVEL_WARN, - "Failed to set NUMA memory policy to" - " MPOL_DEFAULT (error: %s).", strerror(errno)); + ib::warn() << "Failed to set NUMA memory policy to" + " MPOL_DEFAULT: " << strerror(errno); } } #endif // HAVE_LIBNUMA - buf_flush_event = os_event_create(); + buf_flush_event = os_event_create(0); return(DB_SUCCESS); } @@ -1604,97 +1999,1142 @@ buf_pool_init( /********************************************************************//** Frees the buffer pool at shutdown. This must not be invoked before freeing all mutexes. */ -UNIV_INTERN void buf_pool_free( /*==========*/ ulint n_instances) /*!< in: numbere of instances to free */ { - ulint i; - - for (i = 0; i < n_instances; i++) { + for (ulint i = 0; i < n_instances; i++) { buf_pool_free_instance(buf_pool_from_array(i)); } - mem_free(buf_pool_ptr); + ut_d(rw_lock_free(buf_chunk_map_latch)); + ut_d(ut_free(buf_chunk_map_latch)); + ut_d(buf_chunk_map_latch = NULL); + + UT_DELETE(buf_chunk_map_reg); + buf_chunk_map_reg = buf_chunk_map_ref = NULL; + + ut_free(buf_pool_ptr); buf_pool_ptr = NULL; } -/********************************************************************//** -Clears the adaptive hash index on all pages in the buffer pool. */ -UNIV_INTERN -void -buf_pool_clear_hash_index(void) -/*===========================*/ +/** Reallocate a control block. +@param[in] buf_pool buffer pool instance +@param[in] block pointer to control block +@retval false if failed because of no free blocks. */ +static +bool +buf_page_realloc( + buf_pool_t* buf_pool, + buf_block_t* block) { - ulint p; + buf_block_t* new_block; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(!btr_search_enabled); + ut_ad(buf_pool_withdrawing); + ut_ad(buf_pool_mutex_own(buf_pool)); + ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - for (p = 0; p < srv_buf_pool_instances; p++) { - buf_pool_t* buf_pool = buf_pool_from_array(p); - buf_chunk_t* chunks = buf_pool->chunks; - buf_chunk_t* chunk = chunks + buf_pool->n_chunks; + new_block = buf_LRU_get_free_only(buf_pool); - while (--chunk >= chunks) { - buf_block_t* block = chunk->blocks; - ulint i = chunk->size; + if (new_block == NULL) { + return(false); /* free_list was not enough */ + } - for (; i--; block++) { - dict_index_t* index = block->index; + rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, block->page.id); - /* We can set block->index = NULL - when we have an x-latch on btr_search_latch; - see the comment in buf0buf.h */ + rw_lock_x_lock(hash_lock); + mutex_enter(&block->mutex); - if (!index) { - /* Not hashed */ - continue; - } + if (buf_page_can_relocate(&block->page)) { + mutex_enter(&new_block->mutex); - block->index = NULL; -# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG - block->n_pointers = 0; -# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ + memcpy(new_block->frame, block->frame, UNIV_PAGE_SIZE); + memcpy(&new_block->page, &block->page, sizeof block->page); + + /* relocate LRU list */ + ut_ad(block->page.in_LRU_list); + ut_ad(!block->page.in_zip_hash); + ut_d(block->page.in_LRU_list = FALSE); + + buf_LRU_adjust_hp(buf_pool, &block->page); + + buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, &block->page); + UT_LIST_REMOVE(buf_pool->LRU, &block->page); + + if (prev_b != NULL) { + UT_LIST_INSERT_AFTER(buf_pool->LRU, prev_b, &new_block->page); + } else { + UT_LIST_ADD_FIRST(buf_pool->LRU, &new_block->page); + } + + if (buf_pool->LRU_old == &block->page) { + buf_pool->LRU_old = &new_block->page; + } + + ut_ad(new_block->page.in_LRU_list); + + /* relocate unzip_LRU list */ + if (block->page.zip.data != NULL) { + ut_ad(block->in_unzip_LRU_list); + ut_d(new_block->in_unzip_LRU_list = TRUE); + UNIV_MEM_DESC(&new_block->page.zip.data, + page_zip_get_size(&new_block->page.zip)); + + buf_block_t* prev_block = UT_LIST_GET_PREV(unzip_LRU, block); + UT_LIST_REMOVE(buf_pool->unzip_LRU, block); + + ut_d(block->in_unzip_LRU_list = FALSE); + block->page.zip.data = NULL; + page_zip_set_size(&block->page.zip, 0); + + if (prev_block != NULL) { + UT_LIST_INSERT_AFTER(buf_pool->unzip_LRU, prev_block, new_block); + } else { + UT_LIST_ADD_FIRST(buf_pool->unzip_LRU, new_block); } + } else { + ut_ad(!block->in_unzip_LRU_list); + ut_d(new_block->in_unzip_LRU_list = FALSE); + } + + /* relocate buf_pool->page_hash */ + ut_ad(block->page.in_page_hash); + ut_ad(&block->page == buf_page_hash_get_low(buf_pool, + block->page.id)); + ut_d(block->page.in_page_hash = FALSE); + ulint fold = block->page.id.fold(); + ut_ad(fold == new_block->page.id.fold()); + HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, (&block->page)); + HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, (&new_block->page)); + + ut_ad(new_block->page.in_page_hash); + + buf_block_modify_clock_inc(block); + memset(block->frame + FIL_PAGE_OFFSET, 0xff, 4); + memset(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xff, 4); + UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); + buf_block_set_state(block, BUF_BLOCK_REMOVE_HASH); + block->page.id.reset(ULINT32_UNDEFINED, ULINT32_UNDEFINED); + + /* Relocate buf_pool->flush_list. */ + if (block->page.oldest_modification) { + buf_flush_relocate_on_flush_list( + &block->page, &new_block->page); } + + /* set other flags of buf_block_t */ + + ut_ad(!block->index); + new_block->index = NULL; + new_block->n_hash_helps = 0; + new_block->n_fields = 1; + new_block->left_side = TRUE; + + new_block->lock_hash_val = block->lock_hash_val; + ut_ad(new_block->lock_hash_val == lock_rec_hash( + new_block->page.id.space(), + new_block->page.id.page_no())); + + rw_lock_x_unlock(hash_lock); + mutex_exit(&new_block->mutex); + + /* free block */ + buf_block_set_state(block, BUF_BLOCK_MEMORY); + buf_LRU_block_free_non_file_page(block); + + mutex_exit(&block->mutex); + } else { + rw_lock_x_unlock(hash_lock); + mutex_exit(&block->mutex); + + /* free new_block */ + mutex_enter(&new_block->mutex); + buf_LRU_block_free_non_file_page(new_block); + mutex_exit(&new_block->mutex); } + + return(true); /* free_list was enough */ } -/********************************************************************//** -Relocate a buffer control block. Relocates the block on the LRU list -and in buf_pool->page_hash. Does not relocate bpage->list. -The caller must take care of relocating bpage->list. */ -UNIV_INTERN +/** Sets the global variable that feeds MySQL's innodb_buffer_pool_resize_status +to the specified string. The format and the following parameters are the +same as the ones used for printf(3). +@param[in] fmt format +@param[in] ... extra parameters according to fmt */ +static void -buf_relocate( -/*=========*/ - buf_page_t* bpage, /*!< in/out: control block being relocated; - buf_page_get_state(bpage) must be - BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ - buf_page_t* dpage) /*!< in/out: destination control block */ +buf_resize_status( + const char* fmt, + ...) { - buf_page_t* b; - ulint fold; - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + va_list ap; - fold = buf_page_address_fold(bpage->space, bpage->offset); + va_start(ap, fmt); - ut_ad(buf_pool_mutex_own(buf_pool)); - ut_ad(buf_page_hash_lock_held_x(buf_pool, bpage)); - ut_ad(mutex_own(buf_page_get_mutex(bpage))); - ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); + ut_vsnprintf( + export_vars.innodb_buffer_pool_resize_status, + sizeof(export_vars.innodb_buffer_pool_resize_status), + fmt, ap); + + va_end(ap); + + ib::info() << export_vars.innodb_buffer_pool_resize_status; +} + +/** Determines if a block is intended to be withdrawn. +@param[in] buf_pool buffer pool instance +@param[in] block pointer to control block +@retval true if will be withdrawn */ +bool +buf_block_will_withdrawn( + buf_pool_t* buf_pool, + const buf_block_t* block) +{ + ut_ad(buf_pool->curr_size < buf_pool->old_size); + ut_ad(!buf_pool_resizing || buf_pool_mutex_own(buf_pool)); + + const buf_chunk_t* chunk + = buf_pool->chunks + buf_pool->n_chunks_new; + const buf_chunk_t* echunk + = buf_pool->chunks + buf_pool->n_chunks; + + while (chunk < echunk) { + if (block >= chunk->blocks + && block < chunk->blocks + chunk->size) { + return(true); + } + ++chunk; + } + + return(false); +} + +/** Determines if a frame is intended to be withdrawn. +@param[in] buf_pool buffer pool instance +@param[in] ptr pointer to a frame +@retval true if will be withdrawn */ +bool +buf_frame_will_withdrawn( + buf_pool_t* buf_pool, + const byte* ptr) +{ + ut_ad(buf_pool->curr_size < buf_pool->old_size); + ut_ad(!buf_pool_resizing || buf_pool_mutex_own(buf_pool)); + + const buf_chunk_t* chunk + = buf_pool->chunks + buf_pool->n_chunks_new; + const buf_chunk_t* echunk + = buf_pool->chunks + buf_pool->n_chunks; + + while (chunk < echunk) { + if (ptr >= chunk->blocks->frame + && ptr < (chunk->blocks + chunk->size - 1)->frame + + UNIV_PAGE_SIZE) { + return(true); + } + ++chunk; + } + + return(false); +} + +/** Withdraw the buffer pool blocks from end of the buffer pool instance +until withdrawn by buf_pool->withdraw_target. +@param[in] buf_pool buffer pool instance +@retval true if retry is needed */ +static +bool +buf_pool_withdraw_blocks( + buf_pool_t* buf_pool) +{ + buf_block_t* block; + ulint loop_count = 0; + ulint i = buf_pool_index(buf_pool); + + ib::info() << "buffer pool " << i + << " : start to withdraw the last " + << buf_pool->withdraw_target << " blocks."; + + /* Minimize buf_pool->zip_free[i] lists */ + buf_pool_mutex_enter(buf_pool); + buf_buddy_condense_free(buf_pool); + buf_pool_mutex_exit(buf_pool); + + while (UT_LIST_GET_LEN(buf_pool->withdraw) + < buf_pool->withdraw_target) { + + /* try to withdraw from free_list */ + ulint count1 = 0; + + buf_pool_mutex_enter(buf_pool); + block = reinterpret_cast( + UT_LIST_GET_FIRST(buf_pool->free)); + while (block != NULL + && UT_LIST_GET_LEN(buf_pool->withdraw) + < buf_pool->withdraw_target) { + ut_ad(block->page.in_free_list); + ut_ad(!block->page.in_flush_list); + ut_ad(!block->page.in_LRU_list); + ut_a(!buf_page_in_file(&block->page)); + + buf_block_t* next_block; + next_block = reinterpret_cast( + UT_LIST_GET_NEXT( + list, &block->page)); + + if (buf_block_will_withdrawn(buf_pool, block)) { + /* This should be withdrawn */ + UT_LIST_REMOVE( + buf_pool->free, + &block->page); + UT_LIST_ADD_LAST( + buf_pool->withdraw, + &block->page); + ut_d(block->in_withdraw_list = TRUE); + count1++; + } + + block = next_block; + } + buf_pool_mutex_exit(buf_pool); + + /* reserve free_list length */ + if (UT_LIST_GET_LEN(buf_pool->withdraw) + < buf_pool->withdraw_target) { + ulint scan_depth; + flush_counters_t n; + + /* cap scan_depth with current LRU size. */ + buf_pool_mutex_enter(buf_pool); + scan_depth = UT_LIST_GET_LEN(buf_pool->LRU); + buf_pool_mutex_exit(buf_pool); + + scan_depth = ut_min( + ut_max(buf_pool->withdraw_target + - UT_LIST_GET_LEN(buf_pool->withdraw), + static_cast(srv_LRU_scan_depth)), + scan_depth); + + buf_flush_do_batch(buf_pool, BUF_FLUSH_LRU, + scan_depth, 0, &n); + buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU); + + if (n.flushed) { + MONITOR_INC_VALUE_CUMULATIVE( + MONITOR_LRU_BATCH_FLUSH_TOTAL_PAGE, + MONITOR_LRU_BATCH_FLUSH_COUNT, + MONITOR_LRU_BATCH_FLUSH_PAGES, + n.flushed); + } + } + + /* relocate blocks/buddies in withdrawn area */ + ulint count2 = 0; + + buf_pool_mutex_enter(buf_pool); + buf_page_t* bpage; + bpage = UT_LIST_GET_FIRST(buf_pool->LRU); + while (bpage != NULL) { + BPageMutex* block_mutex; + buf_page_t* next_bpage; + + block_mutex = buf_page_get_mutex(bpage); + mutex_enter(block_mutex); + + next_bpage = UT_LIST_GET_NEXT(LRU, bpage); + + if (bpage->zip.data != NULL + && buf_frame_will_withdrawn( + buf_pool, + static_cast(bpage->zip.data))) { + + if (buf_page_can_relocate(bpage)) { + mutex_exit(block_mutex); + buf_pool_mutex_exit_forbid(buf_pool); + if(!buf_buddy_realloc( + buf_pool, bpage->zip.data, + page_zip_get_size( + &bpage->zip))) { + + /* failed to allocate block */ + buf_pool_mutex_exit_allow( + buf_pool); + break; + } + buf_pool_mutex_exit_allow(buf_pool); + mutex_enter(block_mutex); + count2++; + } + /* NOTE: if the page is in use, + not reallocated yet */ + } + + if (buf_page_get_state(bpage) + == BUF_BLOCK_FILE_PAGE + && buf_block_will_withdrawn( + buf_pool, + reinterpret_cast(bpage))) { + + if (buf_page_can_relocate(bpage)) { + mutex_exit(block_mutex); + buf_pool_mutex_exit_forbid(buf_pool); + if(!buf_page_realloc( + buf_pool, + reinterpret_cast( + bpage))) { + /* failed to allocate block */ + buf_pool_mutex_exit_allow( + buf_pool); + break; + } + buf_pool_mutex_exit_allow(buf_pool); + count2++; + } else { + mutex_exit(block_mutex); + } + /* NOTE: if the page is in use, + not reallocated yet */ + } else { + mutex_exit(block_mutex); + } + + bpage = next_bpage; + } + buf_pool_mutex_exit(buf_pool); + + buf_resize_status( + "buffer pool %lu : withdrawing blocks. (%lu/%lu)", + i, UT_LIST_GET_LEN(buf_pool->withdraw), + buf_pool->withdraw_target); + + ib::info() << "buffer pool " << i << " : withdrew " + << count1 << " blocks from free list." + << " Tried to relocate " << count2 << " pages (" + << UT_LIST_GET_LEN(buf_pool->withdraw) << "/" + << buf_pool->withdraw_target << ")."; + + if (++loop_count >= 10) { + /* give up for now. + retried after user threads paused. */ + + ib::info() << "buffer pool " << i + << " : will retry to withdraw later."; + + /* need retry later */ + return(true); + } + } + + /* confirm withdrawn enough */ + const buf_chunk_t* chunk + = buf_pool->chunks + buf_pool->n_chunks_new; + const buf_chunk_t* echunk + = buf_pool->chunks + buf_pool->n_chunks; + + while (chunk < echunk) { + block = chunk->blocks; + for (ulint j = chunk->size; j--; block++) { + /* If !=BUF_BLOCK_NOT_USED block in the + withdrawn area, it means corruption + something */ + ut_a(buf_block_get_state(block) + == BUF_BLOCK_NOT_USED); + ut_ad(block->in_withdraw_list); + } + ++chunk; + } + + ib::info() << "buffer pool " << i << " : withdrawn target " + << UT_LIST_GET_LEN(buf_pool->withdraw) << " blocks."; + + /* retry is not needed */ + ++buf_withdraw_clock; + os_wmb; + + return(false); +} + +/** resize page_hash and zip_hash for a buffer pool instance. +@param[in] buf_pool buffer pool instance */ +static +void +buf_pool_resize_hash( + buf_pool_t* buf_pool) +{ + hash_table_t* new_hash_table; + + ut_ad(buf_pool->page_hash_old == NULL); + + /* recreate page_hash */ + new_hash_table = ib_recreate( + buf_pool->page_hash, 2 * buf_pool->curr_size); + + for (ulint i = 0; i < hash_get_n_cells(buf_pool->page_hash); i++) { + buf_page_t* bpage; + + bpage = static_cast( + HASH_GET_FIRST( + buf_pool->page_hash, i)); + + while (bpage) { + buf_page_t* prev_bpage = bpage; + ulint fold; + + bpage = static_cast( + HASH_GET_NEXT( + hash, prev_bpage)); + + fold = prev_bpage->id.fold(); + + HASH_DELETE(buf_page_t, hash, + buf_pool->page_hash, fold, + prev_bpage); + + HASH_INSERT(buf_page_t, hash, + new_hash_table, fold, + prev_bpage); + } + } + + buf_pool->page_hash_old = buf_pool->page_hash; + buf_pool->page_hash = new_hash_table; + + /* recreate zip_hash */ + new_hash_table = hash_create(2 * buf_pool->curr_size); + + for (ulint i = 0; i < hash_get_n_cells(buf_pool->zip_hash); i++) { + buf_page_t* bpage; + + bpage = static_cast( + HASH_GET_FIRST(buf_pool->zip_hash, i)); + + while (bpage) { + buf_page_t* prev_bpage = bpage; + ulint fold; + + bpage = static_cast( + HASH_GET_NEXT( + hash, prev_bpage)); + + fold = BUF_POOL_ZIP_FOLD( + reinterpret_cast( + prev_bpage)); + + HASH_DELETE(buf_page_t, hash, + buf_pool->zip_hash, fold, + prev_bpage); + + HASH_INSERT(buf_page_t, hash, + new_hash_table, fold, + prev_bpage); + } + } + + hash_table_free(buf_pool->zip_hash); + buf_pool->zip_hash = new_hash_table; +} + +#ifndef DBUG_OFF +/** This is a debug routine to inject an memory allocation failure error. */ +static +void +buf_pool_resize_chunk_make_null(buf_chunk_t** new_chunks) +{ + static int count = 0; + + if (count == 1) { + ut_free(*new_chunks); + *new_chunks = NULL; + } + + count++; +} +#endif // DBUG_OFF + +/** Resize the buffer pool based on srv_buf_pool_size from +srv_buf_pool_old_size. */ +void +buf_pool_resize() +{ + buf_pool_t* buf_pool; + ulint new_instance_size; + bool warning = false; + + ut_ad(!buf_pool_resizing); + ut_ad(!buf_pool_withdrawing); + ut_ad(srv_buf_pool_chunk_unit > 0); + + new_instance_size = srv_buf_pool_size / srv_buf_pool_instances; + new_instance_size /= UNIV_PAGE_SIZE; + + buf_resize_status("Resizing buffer pool from " ULINTPF " to " + ULINTPF " (unit=" ULINTPF ").", + srv_buf_pool_old_size, srv_buf_pool_size, + srv_buf_pool_chunk_unit); + + /* set new limit for all buffer pool for resizing */ + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_pool = buf_pool_from_array(i); + buf_pool_mutex_enter(buf_pool); + + ut_ad(buf_pool->curr_size == buf_pool->old_size); + ut_ad(buf_pool->n_chunks_new == buf_pool->n_chunks); + ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0); + ut_ad(buf_pool->flush_rbt == NULL); + + buf_pool->curr_size = new_instance_size; + + buf_pool->n_chunks_new = new_instance_size * UNIV_PAGE_SIZE + / srv_buf_pool_chunk_unit; + + buf_pool_mutex_exit(buf_pool); + } + + /* disable AHI if needed */ + bool btr_search_disabled = false; + + buf_resize_status("Disabling adaptive hash index."); + + btr_search_s_lock_all(); + if (btr_search_enabled) { + btr_search_s_unlock_all(); + btr_search_disabled = true; + } else { + btr_search_s_unlock_all(); + } + + btr_search_disable(true); + + if (btr_search_disabled) { + ib::info() << "disabled adaptive hash index."; + } + + /* set withdraw target */ + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_pool = buf_pool_from_array(i); + if (buf_pool->curr_size < buf_pool->old_size) { + ulint withdraw_target = 0; + + const buf_chunk_t* chunk + = buf_pool->chunks + buf_pool->n_chunks_new; + const buf_chunk_t* echunk + = buf_pool->chunks + buf_pool->n_chunks; + + while (chunk < echunk) { + withdraw_target += chunk->size; + ++chunk; + } + + ut_ad(buf_pool->withdraw_target == 0); + buf_pool->withdraw_target = withdraw_target; + buf_pool_withdrawing = true; + } + } + + buf_resize_status("Withdrawing blocks to be shrunken."); + + ib_time_t withdraw_started = ut_time(); + ulint message_interval = 60; + ulint retry_interval = 1; + +withdraw_retry: + bool should_retry_withdraw = false; + + /* wait for the number of blocks fit to the new size (if needed)*/ + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_pool = buf_pool_from_array(i); + if (buf_pool->curr_size < buf_pool->old_size) { + + should_retry_withdraw |= + buf_pool_withdraw_blocks(buf_pool); + } + } + + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { + /* abort to resize for shutdown. */ + buf_pool_withdrawing = false; + return; + } + + /* abort buffer pool load */ + buf_load_abort(); + + if (should_retry_withdraw + && ut_difftime(ut_time(), withdraw_started) >= message_interval) { + + if (message_interval > 900) { + message_interval = 1800; + } else { + message_interval *= 2; + } + + lock_mutex_enter(); + trx_sys_mutex_enter(); + bool found = false; + for (trx_t* trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list); + trx != NULL; + trx = UT_LIST_GET_NEXT(mysql_trx_list, trx)) { + if (trx->state != TRX_STATE_NOT_STARTED + && trx->mysql_thd != NULL + && ut_difftime(withdraw_started, + trx->start_time) > 0) { + if (!found) { + ib::warn() << + "The following trx might hold" + " the blocks in buffer pool to" + " be withdrawn. Buffer pool" + " resizing can complete only" + " after all the transactions" + " below release the blocks."; + found = true; + } + + lock_trx_print_wait_and_mvcc_state( + stderr, trx); + } + } + trx_sys_mutex_exit(); + lock_mutex_exit(); + + withdraw_started = ut_time(); + } + + if (should_retry_withdraw) { + ib::info() << "Will retry to withdraw " << retry_interval + << " seconds later."; + os_thread_sleep(retry_interval * 1000000); + + if (retry_interval > 5) { + retry_interval = 10; + } else { + retry_interval *= 2; + } + + goto withdraw_retry; + } + + buf_pool_withdrawing = false; + + buf_resize_status("Latching whole of buffer pool."); + +#ifndef DBUG_OFF + { + bool should_wait = true; + + while (should_wait) { + should_wait = false; + DBUG_EXECUTE_IF( + "ib_buf_pool_resize_wait_before_resize", + should_wait = true; os_thread_sleep(10000);); + } + } +#endif /* !DBUG_OFF */ + + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { + return; + } + + /* Indicate critical path */ + buf_pool_resizing = true; + + /* Acquire all buf_pool_mutex/hash_lock */ + for (ulint i = 0; i < srv_buf_pool_instances; ++i) { + buf_pool_t* buf_pool = buf_pool_from_array(i); + + buf_pool_mutex_enter(buf_pool); + } + for (ulint i = 0; i < srv_buf_pool_instances; ++i) { + buf_pool_t* buf_pool = buf_pool_from_array(i); + + hash_lock_x_all(buf_pool->page_hash); + } + + buf_chunk_map_reg = UT_NEW_NOKEY(buf_pool_chunk_map_t()); + + /* add/delete chunks */ + for (ulint i = 0; i < srv_buf_pool_instances; ++i) { + buf_pool_t* buf_pool = buf_pool_from_array(i); + buf_chunk_t* chunk; + buf_chunk_t* echunk; + + buf_resize_status("buffer pool %lu :" + " resizing with chunks %lu to %lu.", + i, buf_pool->n_chunks, buf_pool->n_chunks_new); + + if (buf_pool->n_chunks_new < buf_pool->n_chunks) { + /* delete chunks */ + chunk = buf_pool->chunks + + buf_pool->n_chunks_new; + echunk = buf_pool->chunks + buf_pool->n_chunks; + + ulint sum_freed = 0; + + while (chunk < echunk) { + buf_block_t* block = chunk->blocks; + + for (ulint j = chunk->size; + j--; block++) { + mutex_free(&block->mutex); + rw_lock_free(&block->lock); + + ut_d(rw_lock_free( + &block->debug_latch)); + } + + buf_pool->allocator.deallocate_large( + chunk->mem, &chunk->mem_pfx); + + sum_freed += chunk->size; + + ++chunk; + } + + /* discard withdraw list */ + UT_LIST_INIT(buf_pool->withdraw, + &buf_page_t::list); + buf_pool->withdraw_target = 0; + + ib::info() << "buffer pool " << i << " : " + << buf_pool->n_chunks - buf_pool->n_chunks_new + << " chunks (" << sum_freed + << " blocks) were freed."; + + buf_pool->n_chunks = buf_pool->n_chunks_new; + } + + { + /* reallocate buf_pool->chunks */ + const ulint new_chunks_size + = buf_pool->n_chunks_new * sizeof(*chunk); + + buf_chunk_t* new_chunks + = reinterpret_cast( + ut_zalloc_nokey_nofatal(new_chunks_size)); + + DBUG_EXECUTE_IF("buf_pool_resize_chunk_null", + buf_pool_resize_chunk_make_null(&new_chunks);); + + if (new_chunks == NULL) { + ib::error() << "buffer pool " << i + << " : failed to allocate" + " the chunk array."; + buf_pool->n_chunks_new + = buf_pool->n_chunks; + warning = true; + buf_pool->chunks_old = NULL; + goto calc_buf_pool_size; + } + + ulint n_chunks_copy = ut_min(buf_pool->n_chunks_new, + buf_pool->n_chunks); + + memcpy(new_chunks, buf_pool->chunks, + n_chunks_copy * sizeof(*chunk)); + + for (ulint j = 0; j < n_chunks_copy; j++) { + buf_pool_register_chunk(&new_chunks[j]); + } + + buf_pool->chunks_old = buf_pool->chunks; + buf_pool->chunks = new_chunks; + } + + + if (buf_pool->n_chunks_new > buf_pool->n_chunks) { + /* add chunks */ + chunk = buf_pool->chunks + buf_pool->n_chunks; + echunk = buf_pool->chunks + + buf_pool->n_chunks_new; + + ulint sum_added = 0; + ulint n_chunks = buf_pool->n_chunks; + + while (chunk < echunk) { + ulong unit = srv_buf_pool_chunk_unit; + + if (!buf_chunk_init(buf_pool, chunk, unit)) { + + ib::error() << "buffer pool " << i + << " : failed to allocate" + " new memory."; + + warning = true; + + buf_pool->n_chunks_new + = n_chunks; + + break; + } + + sum_added += chunk->size; + + ++n_chunks; + ++chunk; + } + + ib::info() << "buffer pool " << i << " : " + << buf_pool->n_chunks_new - buf_pool->n_chunks + << " chunks (" << sum_added + << " blocks) were added."; + + buf_pool->n_chunks = n_chunks; + } +calc_buf_pool_size: + + /* recalc buf_pool->curr_size */ + ulint new_size = 0; + + chunk = buf_pool->chunks; + do { + new_size += chunk->size; + } while (++chunk < buf_pool->chunks + + buf_pool->n_chunks); + + buf_pool->curr_size = new_size; + buf_pool->n_chunks_new = buf_pool->n_chunks; + + if (buf_pool->chunks_old) { + ut_free(buf_pool->chunks_old); + buf_pool->chunks_old = NULL; + } + } + + buf_pool_chunk_map_t* chunk_map_old = buf_chunk_map_ref; + buf_chunk_map_ref = buf_chunk_map_reg; + + /* set instance sizes */ + { + ulint curr_size = 0; + + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_pool = buf_pool_from_array(i); + + ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0); + + buf_pool->read_ahead_area = + ut_min(BUF_READ_AHEAD_PAGES, + ut_2_power_up(buf_pool->curr_size / + BUF_READ_AHEAD_PORTION)); + buf_pool->curr_pool_size + = buf_pool->curr_size * UNIV_PAGE_SIZE; + curr_size += buf_pool->curr_pool_size; + buf_pool->old_size = buf_pool->curr_size; + } + srv_buf_pool_curr_size = curr_size; + innodb_set_buf_pool_size(buf_pool_size_align(curr_size)); + } + + const bool new_size_too_diff + = srv_buf_pool_base_size > srv_buf_pool_size * 2 + || srv_buf_pool_base_size * 2 < srv_buf_pool_size; + + /* Normalize page_hash and zip_hash, + if the new size is too different */ + if (!warning && new_size_too_diff) { + + buf_resize_status("Resizing hash tables."); + + for (ulint i = 0; i < srv_buf_pool_instances; ++i) { + buf_pool_t* buf_pool = buf_pool_from_array(i); + + buf_pool_resize_hash(buf_pool); + + ib::info() << "buffer pool " << i + << " : hash tables were resized."; + } + } + + /* Release all buf_pool_mutex/page_hash */ + for (ulint i = 0; i < srv_buf_pool_instances; ++i) { + buf_pool_t* buf_pool = buf_pool_from_array(i); + + hash_unlock_x_all(buf_pool->page_hash); + buf_pool_mutex_exit(buf_pool); + + if (buf_pool->page_hash_old != NULL) { + hash_table_free(buf_pool->page_hash_old); + buf_pool->page_hash_old = NULL; + } + } + + ut_d(rw_lock_x_lock(buf_chunk_map_latch)); + UT_DELETE(chunk_map_old); + ut_d(rw_lock_x_unlock(buf_chunk_map_latch)); + + buf_pool_resizing = false; + + /* Normalize other components, if the new size is too different */ + if (!warning && new_size_too_diff) { + srv_buf_pool_base_size = srv_buf_pool_size; + + buf_resize_status("Resizing also other hash tables."); + + /* normalize lock_sys */ + srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE); + lock_sys_resize(srv_lock_table_size); + + /* normalize btr_search_sys */ + btr_search_sys_resize( + buf_pool_get_curr_size() / sizeof(void*) / 64); + + /* normalize dict_sys */ + dict_resize(); + + ib::info() << "Resized hash tables at lock_sys," + " adaptive hash index, dictionary."; + } + + /* normalize ibuf->max_size */ + ibuf_max_size_update(srv_change_buffer_max_size); + + if (srv_buf_pool_old_size != srv_buf_pool_size) { + + ib::info() << "Completed to resize buffer pool from " + << srv_buf_pool_old_size + << " to " << srv_buf_pool_size << "."; + srv_buf_pool_old_size = srv_buf_pool_size; + } + + /* enable AHI if needed */ + if (btr_search_disabled) { + btr_search_enable(); + ib::info() << "Re-enabled adaptive hash index."; + } + + char now[32]; + + ut_sprintf_timestamp(now); + if (!warning) { + buf_resize_status("Completed resizing buffer pool at %s.", + now); + } else { + buf_resize_status("Resizing buffer pool failed," + " finished resizing at %s.", now); + } + +#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG + ut_a(buf_validate()); +#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ + + return; +} + +/** This is the thread for resizing buffer pool. It waits for an event and +when waked up either performs a resizing and sleeps again. +@param[in] arg a dummy parameter required by os_thread_create. +@return this function does not return, calls os_thread_exit() +*/ +extern "C" +os_thread_ret_t +DECLARE_THREAD(buf_resize_thread)( + void* arg __attribute__((unused))) +{ + my_thread_init(); + + srv_buf_resize_thread_active = true; + + buf_resize_status("not started"); + + while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { + os_event_wait(srv_buf_resize_event); + os_event_reset(srv_buf_resize_event); + + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { + break; + } + + buf_pool_mutex_enter_all(); + if (srv_buf_pool_old_size == srv_buf_pool_size) { + buf_pool_mutex_exit_all(); + std::ostringstream sout; + sout << "Size did not change (old size = new size = " + << srv_buf_pool_size << ". Nothing to do."; + buf_resize_status(sout.str().c_str()); + + /* nothing to do */ + continue; + } + buf_pool_mutex_exit_all(); + + buf_pool_resize(); + } + + srv_buf_resize_thread_active = false; + + my_thread_end(); + os_thread_exit(NULL); + + OS_THREAD_DUMMY_RETURN; +} + +/********************************************************************//** +Clears the adaptive hash index on all pages in the buffer pool. */ +void +buf_pool_clear_hash_index(void) +/*===========================*/ +{ + ulint p; + + ut_ad(btr_search_own_all(RW_LOCK_X)); + ut_ad(!buf_pool_resizing); + ut_ad(!btr_search_enabled); + + for (p = 0; p < srv_buf_pool_instances; p++) { + buf_pool_t* buf_pool = buf_pool_from_array(p); + buf_chunk_t* chunks = buf_pool->chunks; + buf_chunk_t* chunk = chunks + buf_pool->n_chunks; + + while (--chunk >= chunks) { + buf_block_t* block = chunk->blocks; + ulint i = chunk->size; + + for (; i--; block++) { + dict_index_t* index = block->index; + + /* We can set block->index = NULL + when we have an x-latch on search latch; + see the comment in buf0buf.h */ + + if (!index) { + /* Not hashed */ + continue; + } + + block->index = NULL; +# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG + block->n_pointers = 0; +# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ + } + } + } +} + +/********************************************************************//** +Relocate a buffer control block. Relocates the block on the LRU list +and in buf_pool->page_hash. Does not relocate bpage->list. +The caller must take care of relocating bpage->list. */ +static +void +buf_relocate( +/*=========*/ + buf_page_t* bpage, /*!< in/out: control block being relocated; + buf_page_get_state(bpage) must be + BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ + buf_page_t* dpage) /*!< in/out: destination control block */ +{ + buf_page_t* b; + buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + + ut_ad(buf_pool_mutex_own(buf_pool)); + ut_ad(buf_page_hash_lock_held_x(buf_pool, bpage)); + ut_ad(mutex_own(buf_page_get_mutex(bpage))); + ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); ut_a(bpage->buf_fix_count == 0); ut_ad(bpage->in_LRU_list); ut_ad(!bpage->in_zip_hash); ut_ad(bpage->in_page_hash); - ut_ad(bpage == buf_page_hash_get_low(buf_pool, - bpage->space, - bpage->offset, - fold)); + ut_ad(bpage == buf_page_hash_get_low(buf_pool, bpage->id)); ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage)); #ifdef UNIV_DEBUG @@ -1723,12 +3163,12 @@ buf_relocate( /* relocate buf_pool->LRU */ b = UT_LIST_GET_PREV(LRU, bpage); - UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage); + UT_LIST_REMOVE(buf_pool->LRU, bpage); - if (b) { - UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, b, dpage); + if (b != NULL) { + UT_LIST_INSERT_AFTER(buf_pool->LRU, b, dpage); } else { - UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, dpage); + UT_LIST_ADD_FIRST(buf_pool->LRU, dpage); } if (UNIV_UNLIKELY(buf_pool->LRU_old == bpage)) { @@ -1748,10 +3188,11 @@ buf_relocate( #endif /* UNIV_LRU_DEBUG */ } - ut_d(UT_LIST_VALIDATE( - LRU, buf_page_t, buf_pool->LRU, CheckInLRUList())); + ut_d(CheckInLRUList::validate(buf_pool)); /* relocate buf_pool->page_hash */ + ulint fold = bpage->id.fold(); + ut_ad(fold == dpage->id.fold()); HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage); HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage); } @@ -1834,15 +3275,14 @@ LRUItr::start() return(m_hp); } -/********************************************************************//** -Determine if a block is a sentinel for a buffer pool watch. -@return TRUE if a sentinel for a buffer pool watch, FALSE if not */ -UNIV_INTERN +/** Determine if a block is a sentinel for a buffer pool watch. +@param[in] buf_pool buffer pool instance +@param[in] bpage block +@return TRUE if a sentinel for a buffer pool watch, FALSE if not */ ibool buf_pool_watch_is_sentinel( -/*=======================*/ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - const buf_page_t* bpage) /*!< in: block */ + const buf_pool_t* buf_pool, + const buf_page_t* bpage) { /* We must also own the appropriate hash lock. */ ut_ad(buf_page_hash_lock_held_s_or_x(buf_pool, bpage)); @@ -1861,35 +3301,29 @@ buf_pool_watch_is_sentinel( ut_ad(!bpage->in_zip_hash); ut_ad(bpage->in_page_hash); ut_ad(bpage->zip.data == NULL); - ut_ad(bpage->buf_fix_count > 0); return(TRUE); } -/****************************************************************//** -Add watch for the given page to be read in. Caller must have +/** Add watch for the given page to be read in. Caller must have appropriate hash_lock for the bpage. This function may release the hash_lock and reacquire it. +@param[in] page_id page id +@param[in,out] hash_lock hash_lock currently latched @return NULL if watch set, block if the page is in the buffer pool */ -UNIV_INTERN buf_page_t* buf_pool_watch_set( -/*===============*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page number */ - ulint fold) /*!< in: buf_page_address_fold(space, offset) */ + const page_id_t& page_id, + rw_lock_t** hash_lock) { buf_page_t* bpage; ulint i; - buf_pool_t* buf_pool = buf_pool_get(space, offset); - rw_lock_t* hash_lock; + buf_pool_t* buf_pool = buf_pool_get(page_id); - hash_lock = buf_page_hash_lock_get(buf_pool, fold); + ut_ad(*hash_lock == buf_page_hash_lock_get(buf_pool, page_id)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(hash_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(*hash_lock, RW_LOCK_X)); - bpage = buf_page_hash_get_low(buf_pool, space, offset, fold); + bpage = buf_page_hash_get_low(buf_pool, page_id); if (bpage != NULL) { page_found: @@ -1899,11 +3333,7 @@ page_found: } /* Add to an existing watch. */ -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_increment_uint32(&bpage->buf_fix_count, 1); -#else - ++bpage->buf_fix_count; -#endif /* PAGE_ATOMIC_REF_COUNT */ + buf_block_fix(bpage); return(NULL); } @@ -1917,21 +3347,24 @@ page_found: /* To obey latching order first release the hash_lock. */ - rw_lock_x_unlock(hash_lock); + rw_lock_x_unlock(*hash_lock); buf_pool_mutex_enter(buf_pool); hash_lock_x_all(buf_pool->page_hash); + /* If not own buf_pool_mutex, page_hash can be changed. */ + *hash_lock = buf_page_hash_lock_get(buf_pool, page_id); + /* We have to recheck that the page was not loaded or a watch set by some other purge thread. This is because of the small time window between when we release the hash_lock to acquire buf_pool mutex above. */ - bpage = buf_page_hash_get_low(buf_pool, space, offset, fold); + bpage = buf_page_hash_get_low(buf_pool, page_id); if (UNIV_LIKELY_NULL(bpage)) { buf_pool_mutex_exit(buf_pool); - hash_unlock_x_all_but(buf_pool->page_hash, hash_lock); + hash_unlock_x_all_but(buf_pool->page_hash, *hash_lock); goto page_found; } @@ -1958,20 +3391,19 @@ page_found: buf_block_t::mutex or buf_pool->zip_mutex or both. */ bpage->state = BUF_BLOCK_ZIP_PAGE; - bpage->space = static_cast(space); - bpage->offset = static_cast(offset); + bpage->id.copy_from(page_id); bpage->buf_fix_count = 1; ut_d(bpage->in_page_hash = TRUE); HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, - fold, bpage); + page_id.fold(), bpage); buf_pool_mutex_exit(buf_pool); /* Once the sentinel is in the page_hash we can safely release all locks except just the relevant hash_lock */ hash_unlock_x_all_but(buf_pool->page_hash, - hash_lock); + *hash_lock); return(NULL); case BUF_BLOCK_ZIP_PAGE: @@ -1993,48 +3425,42 @@ page_found: return(NULL); } -/****************************************************************//** -Remove the sentinel block for the watch before replacing it with a real block. -buf_page_watch_clear() or buf_page_watch_occurred() will notice that -the block has been replaced with the real block. +/** Remove the sentinel block for the watch before replacing it with a +real block. buf_page_watch_clear() or buf_page_watch_occurred() will notice +that the block has been replaced with the real block. +@param[in,out] buf_pool buffer pool instance +@param[in,out] watch sentinel for watch @return reference count, to be added to the replacement block */ static void buf_pool_watch_remove( -/*==================*/ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint fold, /*!< in: buf_page_address_fold( - space, offset) */ - buf_page_t* watch) /*!< in/out: sentinel for watch */ + buf_pool_t* buf_pool, + buf_page_t* watch) { -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG /* We must also own the appropriate hash_bucket mutex. */ - rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, fold); - ut_ad(rw_lock_own(hash_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, watch->id); + ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)); +#endif /* UNIV_DEBUG */ ut_ad(buf_pool_mutex_own(buf_pool)); - HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, watch); + HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, watch->id.fold(), + watch); ut_d(watch->in_page_hash = FALSE); watch->buf_fix_count = 0; watch->state = BUF_BLOCK_POOL_WATCH; } -/****************************************************************//** -Stop watching if the page has been read in. -buf_pool_watch_set(space,offset) must have returned NULL before. */ -UNIV_INTERN +/** Stop watching if the page has been read in. +buf_pool_watch_set(same_page_id) must have returned NULL before. +@param[in] page_id page id */ void buf_pool_watch_unset( -/*=================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id) { buf_page_t* bpage; - buf_pool_t* buf_pool = buf_pool_get(space, offset); - ulint fold = buf_page_address_fold(space, offset); - rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, fold); + buf_pool_t* buf_pool = buf_pool_get(page_id); /* We only need to have buf_pool mutex in case where we end up calling buf_pool_watch_remove but to obey latching order @@ -2043,58 +3469,44 @@ buf_pool_watch_unset( called from the purge thread. */ buf_pool_mutex_enter(buf_pool); + rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, page_id); rw_lock_x_lock(hash_lock); - /* The page must exist because buf_pool_watch_set() increments - buf_fix_count. */ - - bpage = buf_page_hash_get_low(buf_pool, space, offset, fold); - - if (!buf_pool_watch_is_sentinel(buf_pool, bpage)) { - buf_block_unfix(reinterpret_cast(bpage)); - } else { - - ut_ad(bpage->buf_fix_count > 0); - -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_decrement_uint32(&bpage->buf_fix_count, 1); -#else - --bpage->buf_fix_count; -#endif /* PAGE_ATOMIC_REF_COUNT */ + /* The page must exist because buf_pool_watch_set() + increments buf_fix_count. */ + bpage = buf_page_hash_get_low(buf_pool, page_id); - if (bpage->buf_fix_count == 0) { - buf_pool_watch_remove(buf_pool, fold, bpage); - } + if (buf_block_unfix(bpage) == 0 + && buf_pool_watch_is_sentinel(buf_pool, bpage)) { + buf_pool_watch_remove(buf_pool, bpage); } buf_pool_mutex_exit(buf_pool); rw_lock_x_unlock(hash_lock); } -/****************************************************************//** -Check if the page has been read in. -This may only be called after buf_pool_watch_set(space,offset) -has returned NULL and before invoking buf_pool_watch_unset(space,offset). -@return FALSE if the given page was not read in, TRUE if it was */ -UNIV_INTERN +/** Check if the page has been read in. +This may only be called after buf_pool_watch_set(same_page_id) +has returned NULL and before invoking buf_pool_watch_unset(same_page_id). +@param[in] page_id page id +@return FALSE if the given page was not read in, TRUE if it was */ ibool buf_pool_watch_occurred( -/*====================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id) { ibool ret; buf_page_t* bpage; - buf_pool_t* buf_pool = buf_pool_get(space, offset); - ulint fold = buf_page_address_fold(space, offset); - rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, - fold); + buf_pool_t* buf_pool = buf_pool_get(page_id); + rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, page_id); rw_lock_s_lock(hash_lock); + /* If not own buf_pool_mutex, page_hash can be changed. */ + hash_lock = buf_page_hash_lock_s_confirm(hash_lock, buf_pool, page_id); + /* The page must exist because buf_pool_watch_set() increments buf_fix_count. */ - bpage = buf_page_hash_get_low(buf_pool, space, offset, fold); + bpage = buf_page_hash_get_low(buf_pool, page_id); ret = !buf_pool_watch_is_sentinel(buf_pool, bpage); rw_lock_s_unlock(hash_lock); @@ -2106,7 +3518,6 @@ buf_pool_watch_occurred( Moves a page to the start of the buffer pool LRU list. This high-level function can be used to prevent an important page from slipping out of the buffer pool. */ -UNIV_INTERN void buf_page_make_young( /*================*/ @@ -2145,54 +3556,26 @@ buf_page_make_young_if_needed( } } -/********************************************************************//** -Resets the check_index_page_at_flush field of a page if found in the buffer -pool. */ -UNIV_INTERN -void -buf_reset_check_index_page_at_flush( -/*================================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ -{ - buf_block_t* block; - buf_pool_t* buf_pool = buf_pool_get(space, offset); - - buf_pool_mutex_enter(buf_pool); - - block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset); - - if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) { - ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page)); - block->check_index_page_at_flush = FALSE; - } - - buf_pool_mutex_exit(buf_pool); -} +#ifdef UNIV_DEBUG -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG -/********************************************************************//** -Sets file_page_was_freed TRUE if the page is found in the buffer pool. +/** Sets file_page_was_freed TRUE if the page is found in the buffer pool. This function should be called when we free a file page and want the debug version to check that it is not accessed any more unless reallocated. -@return control block if found in page hash table, otherwise NULL */ -UNIV_INTERN +@param[in] page_id page id +@return control block if found in page hash table, otherwise NULL */ buf_page_t* buf_page_set_file_page_was_freed( -/*=============================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id) { buf_page_t* bpage; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); rw_lock_t* hash_lock; - bpage = buf_page_hash_get_s_locked(buf_pool, space, offset, - &hash_lock); + bpage = buf_page_hash_get_s_locked(buf_pool, page_id, &hash_lock); if (bpage) { - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage)); mutex_enter(block_mutex); rw_lock_s_unlock(hash_lock); @@ -2205,27 +3588,23 @@ buf_page_set_file_page_was_freed( return(bpage); } -/********************************************************************//** -Sets file_page_was_freed FALSE if the page is found in the buffer pool. +/** Sets file_page_was_freed FALSE if the page is found in the buffer pool. This function should be called when we free a file page and want the debug version to check that it is not accessed any more unless reallocated. -@return control block if found in page hash table, otherwise NULL */ -UNIV_INTERN +@param[in] page_id page id +@return control block if found in page hash table, otherwise NULL */ buf_page_t* buf_page_reset_file_page_was_freed( -/*===============================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id) { buf_page_t* bpage; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); rw_lock_t* hash_lock; - bpage = buf_page_hash_get_s_locked(buf_pool, space, offset, - &hash_lock); + bpage = buf_page_hash_get_s_locked(buf_pool, page_id, &hash_lock); if (bpage) { - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage)); mutex_enter(block_mutex); rw_lock_s_unlock(hash_lock); @@ -2235,21 +3614,19 @@ buf_page_reset_file_page_was_freed( return(bpage); } -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ +#endif /* UNIV_DEBUG */ -/********************************************************************//** -Attempts to discard the uncompressed frame of a compressed page. The -caller should not be holding any mutexes when this function is called. -@return TRUE if successful, FALSE otherwise. */ +/** Attempts to discard the uncompressed frame of a compressed page. +The caller should not be holding any mutexes when this function is called. +@param[in] page_id page id +@return TRUE if successful, FALSE otherwise. */ static void buf_block_try_discard_uncompressed( -/*===============================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id) { buf_page_t* bpage; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); /* Since we need to acquire buf_pool mutex to discard the uncompressed frame and because page_hash mutex resides @@ -2259,7 +3636,7 @@ buf_block_try_discard_uncompressed( we need to check again if the block is still in page_hash. */ buf_pool_mutex_enter(buf_pool); - bpage = buf_page_hash_get(buf_pool, space, offset); + bpage = buf_page_hash_get(buf_pool, page_id); if (bpage) { buf_LRU_free_page(bpage, false); @@ -2268,29 +3645,27 @@ buf_block_try_discard_uncompressed( buf_pool_mutex_exit(buf_pool); } -/********************************************************************//** -Get read access to a compressed page (usually of type +/** Get read access to a compressed page (usually of type FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2). The page must be released with buf_page_release_zip(). NOTE: the page is not protected by any latch. Mutual exclusion has to be implemented at a higher level. In other words, all possible accesses to a given page through this function must be protected by the same set of mutexes or latches. -@return pointer to the block */ -UNIV_INTERN +@param[in] page_id page id +@param[in] page_size page size +@return pointer to the block */ buf_page_t* buf_page_get_zip( -/*=============*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id, + const page_size_t& page_size) { buf_page_t* bpage; - ib_mutex_t* block_mutex; + BPageMutex* block_mutex; rw_lock_t* hash_lock; ibool discard_attempted = FALSE; ibool must_read; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); buf_pool->stat.n_page_gets++; @@ -2299,8 +3674,8 @@ lookup: /* The following call will also grab the page_hash mutex if the page is found. */ - bpage = buf_page_hash_get_s_locked(buf_pool, space, - offset, &hash_lock); + bpage = buf_page_hash_get_s_locked(buf_pool, page_id, + &hash_lock); if (bpage) { ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage)); break; @@ -2309,7 +3684,7 @@ lookup: /* Page not in buf_pool: needs to be read from file */ ut_ad(!hash_lock); - buf_read_page(space, zip_size, offset, NULL); + buf_read_page(page_id, page_size, NULL); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG ut_a(++buf_dbg_counter % 5771 || buf_validate()); @@ -2337,28 +3712,26 @@ err_exit: case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_DIRTY: + buf_block_fix(bpage); block_mutex = &buf_pool->zip_mutex; mutex_enter(block_mutex); -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_increment_uint32(&bpage->buf_fix_count, 1); -#else - ++bpage->buf_fix_count; -#endif /* PAGE_ATOMIC_REF_COUNT */ goto got_block; case BUF_BLOCK_FILE_PAGE: /* Discard the uncompressed page frame if possible. */ if (!discard_attempted) { rw_lock_s_unlock(hash_lock); - buf_block_try_discard_uncompressed(space, offset); + buf_block_try_discard_uncompressed(page_id); discard_attempted = TRUE; goto lookup; } + buf_block_buf_fix_inc((buf_block_t*) bpage, + __FILE__, __LINE__); + block_mutex = &((buf_block_t*) bpage)->mutex; mutex_enter(block_mutex); - buf_block_buf_fix_inc((buf_block_t*) bpage, __FILE__, __LINE__); goto got_block; } @@ -2369,9 +3742,8 @@ got_block: must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ; rw_lock_s_unlock(hash_lock); -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - ut_a(!bpage->file_page_was_freed); -#endif /* defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG */ + + ut_ad(!bpage->file_page_was_freed); buf_page_set_accessed(bpage); @@ -2406,9 +3778,9 @@ got_block: } #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(buf_page_get_space(bpage), - buf_page_get_page_no(bpage)) == 0); -#endif + ut_a(ibuf_count_get(page_id) == 0); +#endif /* UNIV_IBUF_COUNT_DEBUG */ + return(bpage); } @@ -2420,8 +3792,9 @@ buf_block_init_low( /*===============*/ buf_block_t* block) /*!< in: block to init */ { - block->check_index_page_at_flush = FALSE; block->index = NULL; + block->made_dirty_with_no_latch = false; + block->skip_flush_check = false; block->n_hash_helps = 0; block->n_fields = 1; @@ -2432,8 +3805,7 @@ buf_block_init_low( /********************************************************************//** Decompress a block. -@return TRUE if successful */ -UNIV_INTERN +@return TRUE if successful */ ibool buf_zip_decompress( /*===============*/ @@ -2443,38 +3815,43 @@ buf_zip_decompress( const byte* frame = block->page.zip.data; ulint size = page_zip_get_size(&block->page.zip); - ut_ad(buf_block_get_zip_size(block)); - ut_a(buf_block_get_space(block) != 0); + ut_ad(block->page.size.is_compressed()); + ut_a(block->page.id.space() != 0); if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: compressed page checksum mismatch" - " (space %u page %u): stored: %lu, crc32: %lu " - "innodb: %lu, none: %lu\n", - block->page.space, block->page.offset, - mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM), - page_zip_calc_checksum(frame, size, - SRV_CHECKSUM_ALGORITHM_CRC32), - page_zip_calc_checksum(frame, size, - SRV_CHECKSUM_ALGORITHM_INNODB), - page_zip_calc_checksum(frame, size, - SRV_CHECKSUM_ALGORITHM_NONE)); + ib::error() << "Compressed page checksum mismatch " + << block->page.id << "): stored: " + << mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM) + << ", crc32: " + << page_zip_calc_checksum( + frame, size, SRV_CHECKSUM_ALGORITHM_CRC32) + << "/" + << page_zip_calc_checksum( + frame, size, SRV_CHECKSUM_ALGORITHM_CRC32, + true) + << " innodb: " + << page_zip_calc_checksum( + frame, size, SRV_CHECKSUM_ALGORITHM_INNODB) + << ", none: " + << page_zip_calc_checksum( + frame, size, SRV_CHECKSUM_ALGORITHM_NONE); + return(FALSE); } switch (fil_page_get_type(frame)) { case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: if (page_zip_decompress(&block->page.zip, block->frame, TRUE)) { return(TRUE); } - fprintf(stderr, - "InnoDB: unable to decompress space %lu page %lu\n", - (ulong) block->page.space, - (ulong) block->page.offset); + ib::error() << "Unable to decompress space " + << block->page.id.space() + << " page " << block->page.id.page_no(); + return(FALSE); case FIL_PAGE_TYPE_ALLOCATED: @@ -2485,155 +3862,168 @@ buf_zip_decompress( case FIL_PAGE_TYPE_ZBLOB: case FIL_PAGE_TYPE_ZBLOB2: /* Copy to uncompressed storage. */ - memcpy(block->frame, frame, - buf_block_get_zip_size(block)); + memcpy(block->frame, frame, block->page.size.physical()); return(TRUE); } - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: unknown compressed page" - " type %lu\n", - fil_page_get_type(frame)); + ib::error() << "Unknown compressed page type " + << fil_page_get_type(frame); + return(FALSE); } #ifndef UNIV_HOTBACKUP /*******************************************************************//** -Gets the block to whose frame the pointer is pointing to if found -in this buffer pool instance. -@return pointer to block */ -UNIV_INTERN +Gets the block to whose frame the pointer is pointing to. +@return pointer to block, never NULL */ buf_block_t* -buf_block_align_instance( -/*=====================*/ - buf_pool_t* buf_pool, /*!< in: buffer in which the block - resides */ - const byte* ptr) /*!< in: pointer to a frame */ +buf_block_align( +/*============*/ + const byte* ptr) /*!< in: pointer to a frame */ { - buf_chunk_t* chunk; - ulint i; - - /* TODO: protect buf_pool->chunks with a mutex (it will - currently remain constant after buf_pool_init()) */ - for (chunk = buf_pool->chunks, i = buf_pool->n_chunks; i--; chunk++) { - ulint offs; - - if (UNIV_UNLIKELY(ptr < chunk->blocks->frame)) { + buf_pool_chunk_map_t::iterator it; - continue; - } - /* else */ - - offs = ptr - chunk->blocks->frame; - - offs >>= UNIV_PAGE_SIZE_SHIFT; + ut_ad(srv_buf_pool_chunk_unit > 0); - if (UNIV_LIKELY(offs < chunk->size)) { - buf_block_t* block = &chunk->blocks[offs]; + /* TODO: This might be still optimistic treatment. + buf_pool_resize() needs all buf_pool_mutex and all + buf_pool->page_hash x-latched until actual modification. + It should block the other user threads and should take while + which is enough to done the buf_pool_chunk_map access. */ + while (buf_pool_resizing) { + /* buf_pool_chunk_map is being modified */ + os_thread_sleep(100000); /* 0.1 sec */ + } - /* The function buf_chunk_init() invokes - buf_block_init() so that block[n].frame == - block->frame + n * UNIV_PAGE_SIZE. Check it. */ - ut_ad(block->frame == page_align(ptr)); + ulint counter = 0; +retry: #ifdef UNIV_DEBUG - /* A thread that updates these fields must - hold buf_pool->mutex and block->mutex. Acquire - only the latter. */ - mutex_enter(&block->mutex); - - switch (buf_block_get_state(block)) { - case BUF_BLOCK_POOL_WATCH: - case BUF_BLOCK_ZIP_PAGE: - case BUF_BLOCK_ZIP_DIRTY: - /* These types should only be used in - the compressed buffer pool, whose - memory is allocated from - buf_pool->chunks, in UNIV_PAGE_SIZE - blocks flagged as BUF_BLOCK_MEMORY. */ - ut_error; - break; - case BUF_BLOCK_NOT_USED: - case BUF_BLOCK_READY_FOR_USE: - case BUF_BLOCK_MEMORY: - /* Some data structures contain - "guess" pointers to file pages. The - file pages may have been freed and - reused. Do not complain. */ - break; - case BUF_BLOCK_REMOVE_HASH: - /* buf_LRU_block_remove_hashed_page() - will overwrite the FIL_PAGE_OFFSET and - FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID with - 0xff and set the state to - BUF_BLOCK_REMOVE_HASH. */ - ut_ad(page_get_space_id(page_align(ptr)) - == 0xffffffff); - ut_ad(page_get_page_no(page_align(ptr)) - == 0xffffffff); - break; - case BUF_BLOCK_FILE_PAGE: { - ulint space = page_get_space_id(page_align(ptr)); - ulint offset = page_get_page_no(page_align(ptr)); - - if (block->page.space != space || - block->page.offset != offset) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Block space_id %lu != page space_id %lu or " - "Block offset %lu != page offset %lu", - (ulint)block->page.space, space, - (ulint)block->page.offset, offset); - } + bool resize_disabled = (buf_disable_resize_buffer_pool_debug != FALSE); + if (!resize_disabled) { + rw_lock_s_lock(buf_chunk_map_latch); + } +#endif /* UNIV_DEBUG */ + buf_pool_chunk_map_t* chunk_map = buf_chunk_map_ref; - ut_ad(block->page.space - == page_get_space_id(page_align(ptr))); - ut_ad(block->page.offset - == page_get_page_no(page_align(ptr))); - break; - } - } + if (ptr < reinterpret_cast(srv_buf_pool_chunk_unit)) { + it = chunk_map->upper_bound(0); + } else { + it = chunk_map->upper_bound( + ptr - srv_buf_pool_chunk_unit); + } - mutex_exit(&block->mutex); + if (it == chunk_map->end()) { +#ifdef UNIV_DEBUG + if (!resize_disabled) { + rw_lock_s_unlock(buf_chunk_map_latch); + } #endif /* UNIV_DEBUG */ + /* The block should always be found. */ + ++counter; + ut_a(counter < 10); + os_thread_sleep(100000); /* 0.1 sec */ + goto retry; + } - return(block); - } + buf_chunk_t* chunk = it->second; +#ifdef UNIV_DEBUG + if (!resize_disabled) { + rw_lock_s_unlock(buf_chunk_map_latch); } +#endif /* UNIV_DEBUG */ - return(NULL); -} + ulint offs = ptr - chunk->blocks->frame; -/*******************************************************************//** -Gets the block to whose frame the pointer is pointing to. -@return pointer to block, never NULL */ -UNIV_INTERN -buf_block_t* -buf_block_align( -/*============*/ - const byte* ptr) /*!< in: pointer to a frame */ -{ - ulint i; + offs >>= UNIV_PAGE_SIZE_SHIFT; - for (i = 0; i < srv_buf_pool_instances; i++) { - buf_block_t* block; + if (offs < chunk->size) { + buf_block_t* block = &chunk->blocks[offs]; - block = buf_block_align_instance( - buf_pool_from_array(i), ptr); - if (block) { - return(block); + /* The function buf_chunk_init() invokes + buf_block_init() so that block[n].frame == + block->frame + n * UNIV_PAGE_SIZE. Check it. */ + ut_ad(block->frame == page_align(ptr)); +#ifdef UNIV_DEBUG + /* A thread that updates these fields must + hold buf_pool->mutex and block->mutex. Acquire + only the latter. */ + buf_page_mutex_enter(block); + + switch (buf_block_get_state(block)) { + case BUF_BLOCK_POOL_WATCH: + case BUF_BLOCK_ZIP_PAGE: + case BUF_BLOCK_ZIP_DIRTY: + /* These types should only be used in + the compressed buffer pool, whose + memory is allocated from + buf_pool->chunks, in UNIV_PAGE_SIZE + blocks flagged as BUF_BLOCK_MEMORY. */ + ut_error; + break; + case BUF_BLOCK_NOT_USED: + case BUF_BLOCK_READY_FOR_USE: + case BUF_BLOCK_MEMORY: + /* Some data structures contain + "guess" pointers to file pages. The + file pages may have been freed and + reused. Do not complain. */ + break; + case BUF_BLOCK_REMOVE_HASH: + /* buf_LRU_block_remove_hashed_page() + will overwrite the FIL_PAGE_OFFSET and + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID with + 0xff and set the state to + BUF_BLOCK_REMOVE_HASH. */ +# ifndef UNIV_DEBUG_VALGRIND + /* In buf_LRU_block_remove_hashed() we + explicitly set those values to 0xff and + declare them uninitialized with + UNIV_MEM_INVALID() after that. */ + ut_ad(page_get_space_id(page_align(ptr)) + == 0xffffffff); + ut_ad(page_get_page_no(page_align(ptr)) + == 0xffffffff); +# endif /* UNIV_DEBUG_VALGRIND */ + break; + case BUF_BLOCK_FILE_PAGE: + const ulint space_id1 = block->page.id.space(); + const ulint page_no1 = block->page.id.page_no(); + const ulint space_id2 = page_get_space_id( + page_align(ptr)); + const ulint page_no2= page_get_page_no( + page_align(ptr)); + + if (space_id1 != space_id2 || page_no1 != page_no2) { + + ib::error() << "Found a mismatch page," + << " expect page " + << page_id_t(space_id1, page_no1) + << " but found " + << page_id_t(space_id2, page_no2); + + ut_ad(0); + } + break; } + + buf_page_mutex_exit(block); +#endif /* UNIV_DEBUG */ + + return(block); } /* The block should always be found. */ - ut_error; - return(NULL); + ++counter; + ut_a(counter < 10); + os_thread_sleep(100000); /* 0.1 sec */ + goto retry; } /********************************************************************//** Find out if a pointer belongs to a buf_block_t. It can be a pointer to the buf_block_t itself or a member of it. This functions checks one of the buffer pool instances. -@return TRUE if ptr belongs to a buf_block_t struct */ +@return TRUE if ptr belongs to a buf_block_t struct */ static ibool buf_pointer_is_block_field_instance( @@ -2642,10 +4032,11 @@ buf_pointer_is_block_field_instance( const void* ptr) /*!< in: pointer not dereferenced */ { const buf_chunk_t* chunk = buf_pool->chunks; - const buf_chunk_t* const echunk = chunk + buf_pool->n_chunks; + const buf_chunk_t* const echunk = chunk + ut_min( + buf_pool->n_chunks, buf_pool->n_chunks_new); - /* TODO: protect buf_pool->chunks with a mutex (it will - currently remain constant after buf_pool_init()) */ + /* TODO: protect buf_pool->chunks with a mutex (the older pointer will + currently remain while during buf_pool_resize()) */ while (chunk < echunk) { if (ptr >= (void*) chunk->blocks && ptr < (void*) (chunk->blocks + chunk->size)) { @@ -2662,8 +4053,7 @@ buf_pointer_is_block_field_instance( /********************************************************************//** Find out if a pointer belongs to a buf_block_t. It can be a pointer to the buf_block_t itself or a member of it -@return TRUE if ptr belongs to a buf_block_t struct */ -UNIV_INTERN +@return TRUE if ptr belongs to a buf_block_t struct */ ibool buf_pointer_is_block_field( /*=======================*/ @@ -2686,7 +4076,7 @@ buf_pointer_is_block_field( /********************************************************************//** Find out if a buffer block was created by buf_chunk_init(). -@return TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */ +@return TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */ static ibool buf_block_is_uncompressed( @@ -2725,14 +4115,14 @@ buf_debug_execute_is_force_flush() } #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ -/** -Wait for the block to be read in. -@param block The block to check */ +/** Wait for the block to be read in. +@param[in] block The block to check */ static void -buf_wait_for_read(buf_block_t* block) +buf_wait_for_read( + buf_block_t* block) { - /* Note: For the PAGE_ATOMIC_REF_COUNT case: + /* Note: We are using the block->lock to check for IO state (and a dirty read). We set the IO_READ state under the protection of the hash_lock @@ -2744,7 +4134,7 @@ buf_wait_for_read(buf_block_t* block) /* Wait until the read operation completes */ - ib_mutex_t* mutex = buf_page_get_mutex(&block->page); + BPageMutex* mutex = buf_page_get_mutex(&block->page); for (;;) { buf_io_fix io_fix; @@ -2766,41 +4156,43 @@ buf_wait_for_read(buf_block_t* block) } } -/********************************************************************//** -This is the general function used to get access to a database page. -@return pointer to the block or NULL */ -UNIV_INTERN +/** This is the general function used to get access to a database page. +@param[in] page_id page id +@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH +@param[in] guess guessed block or NULL +@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL, +BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH +@param[in] file file name +@param[in] line line where called +@param[in] mtr mini-transaction +@param[in] dirty_with_no_latch + mark page as dirty even if page + is being pinned without any latch +@return pointer to the block or NULL */ buf_block_t* buf_page_get_gen( -/*=============*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint offset, /*!< in: page number */ - ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */ - buf_block_t* guess, /*!< in: guessed block or NULL */ - ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL, - BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or - BUF_GET_IF_IN_POOL_OR_WATCH */ - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - mtr_t* mtr, /*!< in: mini-transaction */ - dberr_t* err) /*!< out: error code */ + const page_id_t& page_id, + const page_size_t& page_size, + ulint rw_latch, + buf_block_t* guess, + ulint mode, + const char* file, + ulint line, + mtr_t* mtr, + dberr_t* err, + bool dirty_with_no_latch) { buf_block_t* block; - ulint fold; unsigned access_time; - ulint fix_type; rw_lock_t* hash_lock; - ulint retries = 0; buf_block_t* fix_block; - ib_mutex_t* fix_mutex = NULL; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + ulint retries = 0; + buf_pool_t* buf_pool = buf_pool_get(page_id); - ut_ad(mtr); - ut_ad(mtr->state == MTR_ACTIVE); + ut_ad(mtr->is_active()); ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH) + || (rw_latch == RW_SX_LATCH) || (rw_latch == RW_NO_LATCH)); if (err) { @@ -2821,22 +4213,29 @@ buf_page_get_gen( default: ut_error; } + + bool found; + const page_size_t& space_page_size + = fil_space_get_page_size(page_id.space(), &found); + + ut_ad(found); + + ut_ad(page_size.equals_to(space_page_size)); #endif /* UNIV_DEBUG */ - ut_ad(zip_size == fil_space_get_zip_size(space)); - ut_ad(ut_is_2pow(zip_size)); -#ifndef UNIV_LOG_DEBUG + ut_ad(!ibuf_inside(mtr) - || ibuf_page_low(space, zip_size, offset, - FALSE, file, line, NULL)); -#endif + || ibuf_page_low(page_id, page_size, FALSE, file, line, NULL)); + buf_pool->stat.n_page_gets++; - fold = buf_page_address_fold(space, offset); - hash_lock = buf_page_hash_lock_get(buf_pool, fold); + hash_lock = buf_page_hash_lock_get(buf_pool, page_id); loop: block = guess; rw_lock_s_lock(hash_lock); + /* If not own buf_pool_mutex, page_hash can be changed. */ + hash_lock = buf_page_hash_lock_s_confirm(hash_lock, buf_pool, page_id); + if (block != NULL) { /* If the guess is a compressed page descriptor that @@ -2844,8 +4243,7 @@ loop: it may have been freed by buf_relocate(). */ if (!buf_block_is_uncompressed(buf_pool, block) - || offset != block->page.offset - || space != block->page.space + || !page_id.equals_to(block->page.id) || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) { /* Our guess was bogus or things have changed @@ -2857,8 +4255,7 @@ loop: } if (block == NULL) { - block = (buf_block_t*) buf_page_hash_get_low( - buf_pool, space, offset, fold); + block = (buf_block_t*) buf_page_hash_get_low(buf_pool, page_id); } if (!block || buf_pool_watch_is_sentinel(buf_pool, &block->page)) { @@ -2873,15 +4270,39 @@ loop: if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) { rw_lock_x_lock(hash_lock); + + /* If not own buf_pool_mutex, + page_hash can be changed. */ + hash_lock = buf_page_hash_lock_x_confirm( + hash_lock, buf_pool, page_id); + block = (buf_block_t*) buf_pool_watch_set( - space, offset, fold); + page_id, &hash_lock); - if (UNIV_LIKELY_NULL(block)) { + if (block) { /* We can release hash_lock after we increment the fix count to make sure that no state change takes place. */ fix_block = block; - buf_block_fix(fix_block); + + if (fsp_is_system_temporary(page_id.space())) { + /* For temporary tablespace, + the mutex is being used for + synchronization between user + thread and flush thread, + instead of block->lock. See + buf_flush_page() for the flush + thread counterpart. */ + + BPageMutex* fix_mutex + = buf_page_get_mutex( + &fix_block->page); + mutex_enter(fix_mutex); + buf_block_fix(fix_block); + mutex_exit(fix_mutex); + } else { + buf_block_fix(fix_block); + } /* Now safe to release page_hash mutex */ rw_lock_x_unlock(hash_lock); @@ -2894,15 +4315,15 @@ loop: if (mode == BUF_GET_IF_IN_POOL || mode == BUF_PEEK_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH) { -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)); - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)); + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_S)); + return(NULL); } - if (buf_read_page(space, zip_size, offset, &bpage)) { - buf_read_ahead_random(space, zip_size, offset, + if (buf_read_page(page_id, page_size, &bpage)) { + buf_read_ahead_random(page_id, page_size, ibuf_inside(mtr)); retries = 0; @@ -2917,7 +4338,7 @@ loop: /* Do not try again for encrypted pages */ if (!corrupted) { - ib_mutex_t* pmutex = buf_page_get_mutex(bpage); + BPageMutex* pmutex = buf_page_get_mutex(bpage); buf_pool = buf_pool_from_bpage(bpage); buf_pool_mutex_enter(buf_pool); @@ -2951,25 +4372,20 @@ loop: } if (corrupted) { - fprintf(stderr, "InnoDB: Error: Unable" - " to read tablespace %lu page no" - " %lu into the buffer pool after" - " %lu attempts\n" - "InnoDB: The most probable cause" - " of this error may be that the" - " table has been corrupted.\n" - "InnoDB: You can try to fix this" - " problem by using" - " innodb_force_recovery.\n" - "InnoDB: Please see reference manual" - " for more details.\n" - "InnoDB: Aborting...\n", - space, offset, - BUF_PAGE_READ_MAX_RETRIES); - - ut_error; + ib::fatal() << "Unable to read page " << page_id + << " into the buffer pool after " + << BUF_PAGE_READ_MAX_RETRIES << " attempts." + " The most probable cause of this error may" + " be that the table has been corrupted. Or," + " the table was compressed with with an" + " algorithm that is not supported by this" + " instance. If it is not a decompress failure," + " you can try to fix this problem by using" + " innodb_force_recovery." + " Please see " REFMAN " for more" + " details. Aborting..."; } else { - ib_mutex_t* pmutex = buf_page_get_mutex(bpage); + BPageMutex* pmutex = buf_page_get_mutex(bpage); buf_pool = buf_pool_from_bpage(bpage); buf_pool_mutex_enter(buf_pool); @@ -2993,39 +4409,42 @@ loop: } #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - ut_a(++buf_dbg_counter % 5771 || buf_validate()); + ut_a(fsp_skip_sanity_check(page_id.space()) + || ++buf_dbg_counter % 5771 + || buf_validate()); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ goto loop; } else { fix_block = block; } - buf_block_fix(fix_block); + if (fsp_is_system_temporary(page_id.space())) { + /* For temporary tablespace, the mutex is being used + for synchronization between user thread and flush + thread, instead of block->lock. See buf_flush_page() + for the flush thread counterpart. */ + BPageMutex* fix_mutex = buf_page_get_mutex( + &fix_block->page); + mutex_enter(fix_mutex); + buf_block_fix(fix_block); + mutex_exit(fix_mutex); + } else { + buf_block_fix(fix_block); + } /* Now safe to release page_hash mutex */ rw_lock_s_unlock(hash_lock); got_block: - fix_mutex = buf_page_get_mutex(&fix_block->page); - - ut_ad(page_zip_get_size(&block->page.zip) == zip_size); - if (mode == BUF_GET_IF_IN_POOL || mode == BUF_PEEK_IF_IN_POOL) { - bool must_read; - - { - buf_page_t* fix_page = &fix_block->page; - - mutex_enter(fix_mutex); - - buf_io_fix io_fix = buf_page_get_io_fix(fix_page); - - must_read = (io_fix == BUF_IO_READ); - - mutex_exit(fix_mutex); - } + buf_page_t* fix_page = &fix_block->page; + BPageMutex* fix_mutex = buf_page_get_mutex(fix_page); + mutex_enter(fix_mutex); + const bool must_read + = (buf_page_get_io_fix(fix_page) == BUF_IO_READ); + mutex_exit(fix_mutex); if (must_read) { /* The page is being read to buffer pool, @@ -3037,10 +4456,22 @@ got_block: } } - switch(buf_block_get_state(fix_block)) { + switch (buf_block_get_state(fix_block)) { buf_page_t* bpage; case BUF_BLOCK_FILE_PAGE: + bpage = &block->page; + if (fsp_is_system_temporary(page_id.space()) + && buf_page_get_io_fix(bpage) != BUF_IO_NONE) { + /* This suggest that page is being flushed. + Avoid returning reference to this page. + Instead wait for flush action to complete. + For normal page this sync is done using SX + lock but for intrinsic there is no latching. */ + buf_block_unfix(fix_block); + os_thread_sleep(WAIT_FOR_WRITE); + goto loop; + } break; case BUF_BLOCK_ZIP_PAGE: @@ -3081,24 +4512,19 @@ got_block: buf_pool_mutex_enter(buf_pool); + /* If not own buf_pool_mutex, page_hash can be changed. */ + hash_lock = buf_page_hash_lock_get(buf_pool, page_id); + rw_lock_x_lock(hash_lock); /* Buffer-fixing prevents the page_hash from changing. */ - ut_ad(bpage == buf_page_hash_get_low( - buf_pool, space, offset, fold)); + ut_ad(bpage == buf_page_hash_get_low(buf_pool, page_id)); - buf_block_mutex_enter(block); + buf_block_unfix(fix_block); + buf_page_mutex_enter(block); mutex_enter(&buf_pool->zip_mutex); - ut_ad(fix_block->page.buf_fix_count > 0); - -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_decrement_uint32(&fix_block->page.buf_fix_count, 1); -#else - --fix_block->page.buf_fix_count; -#endif /* PAGE_ATOMIC_REF_COUNT */ - fix_block = block; if (bpage->buf_fix_count > 0 @@ -3114,7 +4540,7 @@ got_block: buf_LRU_block_free_non_file_page(block); buf_pool_mutex_exit(buf_pool); rw_lock_x_unlock(hash_lock); - buf_block_mutex_exit(block); + buf_page_mutex_exit(block); /* Try again */ goto loop; @@ -3130,18 +4556,18 @@ got_block: buf_block_init_low(block); - /* Set after relocate(). */ + /* Set after buf_relocate(). */ block->page.buf_fix_count = 1; - block->lock_hash_val = lock_rec_hash(space, offset); + block->lock_hash_val = lock_rec_hash(page_id.space(), + page_id.page_no()); UNIV_MEM_DESC(&block->page.zip.data, - page_zip_get_size(&block->page.zip)); + page_zip_get_size(&block->page.zip)); if (buf_page_get_state(&block->page) == BUF_BLOCK_ZIP_PAGE) { #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - UT_LIST_REMOVE(list, buf_pool->zip_clean, - &block->page); + UT_LIST_REMOVE(buf_pool->zip_clean, &block->page); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ ut_ad(!block->page.in_flush_list); } else { @@ -3163,15 +4589,13 @@ got_block: UNIV_MEM_INVALID(bpage, sizeof *bpage); rw_lock_x_unlock(hash_lock); - - ++buf_pool->n_pend_unzip; - + buf_pool->n_pend_unzip++; mutex_exit(&buf_pool->zip_mutex); buf_pool_mutex_exit(buf_pool); access_time = buf_page_is_accessed(&block->page); - buf_block_mutex_exit(block); + buf_page_mutex_exit(block); buf_page_free_descriptor(bpage); @@ -3189,22 +4613,21 @@ got_block: if (!recv_no_ibuf_operations) { if (access_time) { #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(space, offset) == 0); + ut_a(ibuf_count_get(page_id) == 0); #endif /* UNIV_IBUF_COUNT_DEBUG */ } else { ibuf_merge_or_delete_for_page( - block, space, offset, zip_size, TRUE); + block, page_id, &page_size, TRUE); } } buf_pool_mutex_enter(buf_pool); - /* Unfix and unlatch the block. */ - buf_block_mutex_enter(fix_block); + buf_page_mutex_enter(fix_block); buf_block_set_io_fix(fix_block, BUF_IO_NONE); - buf_block_mutex_exit(fix_block); + buf_page_mutex_exit(fix_block); --buf_pool->n_pend_unzip; @@ -3226,10 +4649,8 @@ got_block: ut_ad(block == fix_block); ut_ad(fix_block->page.buf_fix_count > 0); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)); - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)); + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_S)); ut_ad(buf_block_get_state(fix_block) == BUF_BLOCK_FILE_PAGE); @@ -3251,18 +4672,29 @@ got_block: are holding the buf_pool->mutex. */ if (buf_LRU_free_page(&fix_block->page, true)) { + buf_pool_mutex_exit(buf_pool); + + /* If not own buf_pool_mutex, + page_hash can be changed. */ + hash_lock = buf_page_hash_lock_get(buf_pool, page_id); + rw_lock_x_lock(hash_lock); + /* If not own buf_pool_mutex, + page_hash can be changed. */ + hash_lock = buf_page_hash_lock_x_confirm( + hash_lock, buf_pool, page_id); + if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) { /* Set the watch, as it would have been set if the page were not in the buffer pool in the first place. */ block = (buf_block_t*) buf_pool_watch_set( - space, offset, fold); + page_id, &hash_lock); } else { block = (buf_block_t*) buf_page_hash_get_low( - buf_pool, space, offset, fold); + buf_pool, page_id); } rw_lock_x_unlock(hash_lock); @@ -3274,26 +4706,29 @@ got_block: and before we acquire the hash_lock above. Try again. */ guess = block; + goto loop; } - fprintf(stderr, - "innodb_change_buffering_debug evict %u %u\n", - (unsigned) space, (unsigned) offset); + ib::info() << "innodb_change_buffering_debug evict " + << page_id; + return(NULL); } - mutex_enter(&fix_block->mutex); + buf_page_mutex_enter(fix_block); if (buf_flush_page_try(buf_pool, fix_block)) { - fprintf(stderr, - "innodb_change_buffering_debug flush %u %u\n", - (unsigned) space, (unsigned) offset); + + ib::info() << "innodb_change_buffering_debug flush " + << page_id; + guess = fix_block; + goto loop; } - buf_block_mutex_exit(fix_block); + buf_page_mutex_exit(fix_block); buf_block_fix(fix_block); @@ -3305,30 +4740,40 @@ got_block: ut_ad(fix_block->page.buf_fix_count > 0); -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG /* We have already buffer fixed the page, and we are committed to - returning this page to the caller. Register for debugging. */ - { - ibool ret; - ret = rw_lock_s_lock_nowait(&fix_block->debug_latch, file, line); + returning this page to the caller. Register for debugging. + Avoid debug latching if page/block belongs to system temporary + tablespace (Not much needed for table with single threaded access.). */ + if (!fsp_is_system_temporary(page_id.space())) { + ibool ret; + ret = rw_lock_s_lock_nowait( + &fix_block->debug_latch, file, line); ut_a(ret); } -#endif /* UNIV_SYNC_DEBUG */ +#endif /* UNIV_DEBUG */ + + /* While tablespace is reinited the indexes are already freed but the + blocks related to it still resides in buffer pool. Trying to remove + such blocks from buffer pool would invoke removal of AHI entries + associated with these blocks. Logic to remove AHI entry will try to + load the block but block is already in free state. Handle the said case + with mode = BUF_PEEK_IF_IN_POOL that is invoked from + "btr_search_drop_page_hash_when_freed". */ + ut_ad(mode == BUF_GET_POSSIBLY_FREED + || mode == BUF_PEEK_IF_IN_POOL + || !fix_block->page.file_page_was_freed); -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - ut_a(mode == BUF_GET_POSSIBLY_FREED - || !fix_block->page.file_page_was_freed); -#endif /* Check if this is the first access to the page */ access_time = buf_page_is_accessed(&fix_block->page); /* This is a heuristic and we don't care about ordering issues. */ if (access_time == 0) { - buf_block_mutex_enter(fix_block); + buf_page_mutex_enter(fix_block); buf_page_set_accessed(&fix_block->page); - buf_block_mutex_exit(fix_block); + buf_page_mutex_exit(fix_block); } if (mode != BUF_PEEK_IF_IN_POOL) { @@ -3336,25 +4781,33 @@ got_block: } #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - ut_a(++buf_dbg_counter % 5771 || buf_validate()); - ut_a(fix_block->page.buf_fix_count > 0); + ut_a(fsp_skip_sanity_check(page_id.space()) + || ++buf_dbg_counter % 5771 + || buf_validate()); ut_a(buf_block_get_state(fix_block) == BUF_BLOCK_FILE_PAGE); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ -#ifdef PAGE_ATOMIC_REF_COUNT /* We have to wait here because the IO_READ state was set - under the protection of the hash_lock and the block->mutex - but not the block->lock. */ + under the protection of the hash_lock and not the block->mutex + and block->lock. */ buf_wait_for_read(fix_block); -#endif /* PAGE_ATOMIC_REF_COUNT */ + + /* Mark block as dirty if requested by caller. If not requested (false) + then we avoid updating the dirty state of the block and retain the + original one. This is reason why ? + Same block can be shared/pinned by 2 different mtrs. If first mtr + set the dirty state to true and second mtr mark it as false the last + updated dirty state is retained. Which means we can loose flushing of + a modified block. */ + if (dirty_with_no_latch) { + fix_block->made_dirty_with_no_latch = dirty_with_no_latch; + } + + mtr_memo_type_t fix_type; switch (rw_latch) { case RW_NO_LATCH: -#ifndef PAGE_ATOMIC_REF_COUNT - buf_wait_for_read(fix_block); -#endif /* !PAGE_ATOMIC_REF_COUNT */ - fix_type = MTR_MEMO_BUF_FIX; break; @@ -3364,6 +4817,12 @@ got_block: fix_type = MTR_MEMO_PAGE_S_FIX; break; + case RW_SX_LATCH: + rw_lock_sx_lock_inline(&fix_block->lock, 0, file, line); + + fix_type = MTR_MEMO_PAGE_SX_FIX; + break; + default: ut_ad(rw_latch == RW_X_LATCH); rw_lock_x_lock_inline(&fix_block->lock, 0, file, line); @@ -3378,26 +4837,23 @@ got_block: /* In the case of a first access, try to apply linear read-ahead */ - buf_read_ahead_linear( - space, zip_size, offset, ibuf_inside(mtr)); + buf_read_ahead_linear(page_id, page_size, ibuf_inside(mtr)); } #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(buf_block_get_space(fix_block), - buf_block_get_page_no(fix_block)) == 0); + ut_a(ibuf_count_get(fix_block->page.id) == 0); #endif -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)); - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)); + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_S)); + return(fix_block); } /********************************************************************//** This is the general function used to get optimistic access to a database page. -@return TRUE if success */ -UNIV_INTERN +@return TRUE if success */ ibool buf_page_optimistic_get( /*====================*/ @@ -3411,18 +4867,17 @@ buf_page_optimistic_get( buf_pool_t* buf_pool; unsigned access_time; ibool success; - ulint fix_type; ut_ad(block); ut_ad(mtr); - ut_ad(mtr->state == MTR_ACTIVE); + ut_ad(mtr->is_active()); ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH)); - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) { - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); return(FALSE); } @@ -3433,41 +4888,52 @@ buf_page_optimistic_get( buf_page_set_accessed(&block->page); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); buf_page_make_young_if_needed(&block->page); ut_ad(!ibuf_inside(mtr) - || ibuf_page(buf_block_get_space(block), - buf_block_get_zip_size(block), - buf_block_get_page_no(block), NULL)); + || ibuf_page(block->page.id, block->page.size, NULL)); + + mtr_memo_type_t fix_type; + + switch (rw_latch) { + case RW_S_LATCH: + success = rw_lock_s_lock_nowait(&block->lock, file, line); - if (rw_latch == RW_S_LATCH) { - success = rw_lock_s_lock_nowait(&(block->lock), - file, line); fix_type = MTR_MEMO_PAGE_S_FIX; - } else { - success = rw_lock_x_lock_func_nowait_inline(&(block->lock), - file, line); + break; + case RW_X_LATCH: + success = rw_lock_x_lock_func_nowait_inline( + &block->lock, file, line); + fix_type = MTR_MEMO_PAGE_X_FIX; + break; + default: + ut_error; /* RW_SX_LATCH is not implemented yet */ } - if (UNIV_UNLIKELY(!success)) { + if (!success) { + buf_page_mutex_enter(block); buf_block_buf_fix_dec(block); + buf_page_mutex_exit(block); return(FALSE); } - if (UNIV_UNLIKELY(modify_clock != block->modify_clock)) { + if (modify_clock != block->modify_clock) { + buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK); if (rw_latch == RW_S_LATCH) { - rw_lock_s_unlock(&(block->lock)); + rw_lock_s_unlock(&block->lock); } else { - rw_lock_x_unlock(&(block->lock)); + rw_lock_x_unlock(&block->lock); } + buf_page_mutex_enter(block); buf_block_buf_fix_dec(block); + buf_page_mutex_exit(block); return(FALSE); } @@ -3475,31 +4941,28 @@ buf_page_optimistic_get( mtr_memo_push(mtr, block, fix_type); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - ut_a(++buf_dbg_counter % 5771 || buf_validate()); + ut_a(fsp_skip_sanity_check(block->page.id.space()) + || ++buf_dbg_counter % 5771 + || buf_validate()); ut_a(block->page.buf_fix_count > 0); ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - mutex_enter(&block->mutex); - ut_a(!block->page.file_page_was_freed); - mutex_exit(&block->mutex); -#endif + ut_d(buf_page_mutex_enter(block)); + ut_ad(!block->page.file_page_was_freed); + ut_d(buf_page_mutex_exit(block)); if (!access_time) { /* In the case of a first access, try to apply linear read-ahead */ - - buf_read_ahead_linear(buf_block_get_space(block), - buf_block_get_zip_size(block), - buf_block_get_page_no(block), + buf_read_ahead_linear(block->page.id, block->page.size, ibuf_inside(mtr)); } #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(buf_block_get_space(block), - buf_block_get_page_no(block)) == 0); -#endif + ut_a(ibuf_count_get(block->page.id) == 0); +#endif /* UNIV_IBUF_COUNT_DEBUG */ + buf_pool = buf_pool_from_block(block); buf_pool->stat.n_page_gets++; @@ -3510,8 +4973,7 @@ buf_page_optimistic_get( This is used to get access to a known database page, when no waiting can be done. For example, if a search in an adaptive hash index leads us to this frame. -@return TRUE if success */ -UNIV_INTERN +@return TRUE if success */ ibool buf_page_get_known_nowait( /*======================*/ @@ -3524,13 +4986,11 @@ buf_page_get_known_nowait( { buf_pool_t* buf_pool; ibool success; - ulint fix_type; - ut_ad(mtr); - ut_ad(mtr->state == MTR_ACTIVE); + ut_ad(mtr->is_active()); ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH)); - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); if (buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH) { /* Another thread is just freeing the block from the LRU list @@ -3540,7 +5000,7 @@ buf_page_get_known_nowait( we have already removed it from the page address hash table of the buffer pool. */ - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); return(FALSE); } @@ -3551,7 +5011,7 @@ buf_page_get_known_nowait( buf_page_set_accessed(&block->page); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); buf_pool = buf_pool_from_block(block); @@ -3561,18 +5021,27 @@ buf_page_get_known_nowait( ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD); - if (rw_latch == RW_S_LATCH) { - success = rw_lock_s_lock_nowait(&(block->lock), - file, line); + mtr_memo_type_t fix_type; + + switch (rw_latch) { + case RW_S_LATCH: + success = rw_lock_s_lock_nowait(&block->lock, file, line); fix_type = MTR_MEMO_PAGE_S_FIX; - } else { - success = rw_lock_x_lock_func_nowait_inline(&(block->lock), - file, line); + break; + case RW_X_LATCH: + success = rw_lock_x_lock_func_nowait_inline( + &block->lock, file, line); + fix_type = MTR_MEMO_PAGE_X_FIX; + break; + default: + ut_error; /* RW_SX_LATCH is not implemented yet */ } if (!success) { + buf_page_mutex_enter(block); buf_block_buf_fix_dec(block); + buf_page_mutex_exit(block); return(FALSE); } @@ -3584,7 +5053,8 @@ buf_page_get_known_nowait( ut_a(block->page.buf_fix_count > 0); ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG + +#ifdef UNIV_DEBUG if (mode != BUF_KEEP_OLD) { /* If mode == BUF_KEEP_OLD, we are executing an I/O completion routine. Avoid a bogus assertion failure @@ -3593,50 +5063,44 @@ buf_page_get_known_nowait( deleting a record from SYS_INDEXES. This check will be skipped in recv_recover_page() as well. */ - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); ut_a(!block->page.file_page_was_freed); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); } -#endif +#endif /* UNIV_DEBUG */ #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a((mode == BUF_KEEP_OLD) - || (ibuf_count_get(buf_block_get_space(block), - buf_block_get_page_no(block)) == 0)); + ut_a((mode == BUF_KEEP_OLD) || ibuf_count_get(block->page.id) == 0); #endif buf_pool->stat.n_page_gets++; return(TRUE); } -/*******************************************************************//** -Given a tablespace id and page number tries to get that page. If the +/** Given a tablespace id and page number tries to get that page. If the page is not in the buffer pool it is not loaded and NULL is returned. Suitable for using when holding the lock_sys_t::mutex. -@return pointer to a page or NULL */ -UNIV_INTERN +@param[in] page_id page id +@param[in] file file name +@param[in] line line where called +@param[in] mtr mini-transaction +@return pointer to a page or NULL */ buf_block_t* buf_page_try_get_func( -/*==================*/ - ulint space_id,/*!< in: tablespace id */ - ulint page_no,/*!< in: page number */ - ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH */ - bool possibly_freed, - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - mtr_t* mtr) /*!< in: mini-transaction */ + const page_id_t& page_id, + const char* file, + ulint line, + mtr_t* mtr) { buf_block_t* block; ibool success; - ulint fix_type; - buf_pool_t* buf_pool = buf_pool_get(space_id, page_no); + buf_pool_t* buf_pool = buf_pool_get(page_id); rw_lock_t* hash_lock; ut_ad(mtr); - ut_ad(mtr->state == MTR_ACTIVE); + ut_ad(mtr->is_active()); - block = buf_block_hash_get_s_locked(buf_pool, space_id, - page_no, &hash_lock); + block = buf_block_hash_get_s_locked(buf_pool, page_id, &hash_lock); if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) { if (block) { @@ -3647,24 +5111,19 @@ buf_page_try_get_func( ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page)); - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); rw_lock_s_unlock(hash_lock); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - ut_a(buf_block_get_space(block) == space_id); - ut_a(buf_block_get_page_no(block) == page_no); + ut_a(page_id.equals_to(block->page.id)); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ buf_block_buf_fix_inc(block, file, line); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); - if (rw_latch == RW_S_LATCH) { - fix_type = MTR_MEMO_PAGE_S_FIX; - success = rw_lock_s_lock_nowait(&block->lock, file, line); - } else { - success = false; - } + mtr_memo_type_t fix_type = MTR_MEMO_PAGE_S_FIX; + success = rw_lock_s_lock_nowait(&block->lock, file, line); if (!success) { /* Let us try to get an X-latch. If the current thread @@ -3677,32 +5136,34 @@ buf_page_try_get_func( } if (!success) { + buf_page_mutex_enter(block); buf_block_buf_fix_dec(block); + buf_page_mutex_exit(block); return(NULL); } mtr_memo_push(mtr, block, fix_type); + #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - ut_a(++buf_dbg_counter % 5771 || buf_validate()); + ut_a(fsp_skip_sanity_check(block->page.id.space()) + || ++buf_dbg_counter % 5771 + || buf_validate()); ut_a(block->page.buf_fix_count > 0); ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - if (!possibly_freed) { - mutex_enter(&block->mutex); - ut_a(!block->page.file_page_was_freed); - mutex_exit(&block->mutex); - } -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ + + ut_d(buf_page_mutex_enter(block)); + ut_d(ut_a(!block->page.file_page_was_freed)); + ut_d(buf_page_mutex_exit(block)); + buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK); buf_pool->stat.n_page_gets++; #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(buf_block_get_space(block), - buf_block_get_page_no(block)) == 0); -#endif + ut_a(ibuf_count_get(block->page.id) == 0); +#endif /* UNIV_IBUF_COUNT_DEBUG */ return(block); } @@ -3733,43 +5194,38 @@ buf_page_init_low( bpage->slot = NULL; HASH_INVALIDATE(bpage, hash); -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - bpage->file_page_was_freed = FALSE; -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ + + ut_d(bpage->file_page_was_freed = FALSE); } -/********************************************************************//** -Inits a page to the buffer buf_pool. */ -static MY_ATTRIBUTE((nonnull)) +/** Inits a page to the buffer buf_pool. +@param[in,out] buf_pool buffer pool +@param[in] page_id page id +@param[in,out] block block to init */ +static void buf_page_init( -/*==========*/ - buf_pool_t* buf_pool,/*!< in/out: buffer pool */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: offset of the page within space - in units of a page */ - ulint fold, /*!< in: buf_page_address_fold(space,offset) */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - buf_block_t* block) /*!< in/out: block to init */ + buf_pool_t* buf_pool, + const page_id_t& page_id, + const page_size_t& page_size, + buf_block_t* block) { buf_page_t* hash_page; - ut_ad(buf_pool == buf_pool_get(space, offset)); + ut_ad(buf_pool == buf_pool_get(page_id)); ut_ad(buf_pool_mutex_own(buf_pool)); - ut_ad(mutex_own(&(block->mutex))); + ut_ad(buf_page_mutex_own(block)); ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(buf_page_hash_lock_get(buf_pool, fold), - RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(buf_page_hash_lock_get(buf_pool, page_id), + RW_LOCK_X)); /* Set the state of the block */ - buf_block_set_file_page(block, space, offset); + buf_block_set_file_page(block, page_id); #ifdef UNIV_DEBUG_VALGRIND - if (!space) { + if (is_system_tablespace(page_id.space())) { /* Silence valid Valgrind warnings about uninitialized data being written to data files. There are some unused bytes on some pages that InnoDB does not initialize. */ @@ -3779,60 +5235,58 @@ buf_page_init( buf_block_init_low(block); - block->lock_hash_val = lock_rec_hash(space, offset); + block->lock_hash_val = lock_rec_hash(page_id.space(), + page_id.page_no()); buf_page_init_low(&block->page); /* Insert into the hash table of file pages */ - hash_page = buf_page_hash_get_low(buf_pool, space, offset, fold); + hash_page = buf_page_hash_get_low(buf_pool, page_id); if (hash_page == NULL) { - /* Block not found in the hash table */ + /* Block not found in hash table */ } else if (buf_pool_watch_is_sentinel(buf_pool, hash_page)) { + /* Preserve the reference count. */ ib_uint32_t buf_fix_count = hash_page->buf_fix_count; - ut_a(buf_fix_count > 0); + ut_a(buf_fix_count > 0); -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_increment_uint32( - &block->page.buf_fix_count, buf_fix_count); -#else - block->page.buf_fix_count += ulint(buf_fix_count); -#endif /* PAGE_ATOMIC_REF_COUNT */ + os_atomic_increment_uint32(&block->page.buf_fix_count, + buf_fix_count); - buf_pool_watch_remove(buf_pool, fold, hash_page); + buf_pool_watch_remove(buf_pool, hash_page); } else { - fprintf(stderr, - "InnoDB: Error: page %lu %lu already found" - " in the hash table: %p, %p\n", - (ulong) space, - (ulong) offset, - (const void*) hash_page, (const void*) block); -#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - mutex_exit(&block->mutex); - buf_pool_mutex_exit(buf_pool); - buf_print(); - buf_LRU_print(); - buf_validate(); - buf_LRU_validate(); -#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ - ut_error; + + ib::error() << "Page " << page_id + << " already found in the hash table: " + << hash_page << ", " << block; + + ut_d(buf_page_mutex_exit(block)); + ut_d(buf_pool_mutex_exit(buf_pool)); + ut_d(buf_print()); + ut_d(buf_LRU_print()); + ut_d(buf_validate()); + ut_d(buf_LRU_validate()); + ut_ad(0); } ut_ad(!block->page.in_zip_hash); ut_ad(!block->page.in_page_hash); ut_d(block->page.in_page_hash = TRUE); - HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, &block->page); + block->page.id.copy_from(page_id); + block->page.size.copy_from(page_size); + + HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, + page_id.fold(), &block->page); - if (zip_size) { - page_zip_set_size(&block->page.zip, zip_size); + if (page_size.is_compressed()) { + page_zip_set_size(&block->page.zip, page_size.physical()); } } -/********************************************************************//** -Function which inits a page for read to the buffer buf_pool. If the page is +/** Inits a page for read to the buffer buf_pool. If the page is (1) already in buf_pool, or (2) if we specify to read only ibuf pages and the page is not an ibuf page, or (3) if the space is deleted or being deleted, @@ -3840,31 +5294,27 @@ then this function does nothing. Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock on the buffer frame. The io-handler must take care that the flag is cleared and the lock released later. -@return pointer to the block or NULL */ -UNIV_INTERN +@param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED +@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ... +@param[in] page_id page id +@param[in] unzip TRUE=request uncompressed page +@return pointer to the block or NULL */ buf_page_t* buf_page_init_for_read( -/*===================*/ - dberr_t* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */ - ulint mode, /*!< in: BUF_READ_IBUF_PAGES_ONLY, ... */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - ibool unzip, /*!< in: TRUE=request uncompressed page */ - ib_int64_t tablespace_version, - /*!< in: prevents reading from a wrong - version of the tablespace in case we have done - DISCARD + IMPORT */ - ulint offset) /*!< in: page number */ + dberr_t* err, + ulint mode, + const page_id_t& page_id, + const page_size_t& page_size, + ibool unzip) { buf_block_t* block; buf_page_t* bpage = NULL; buf_page_t* watch_page; rw_lock_t* hash_lock; mtr_t mtr; - ulint fold; ibool lru = FALSE; void* data; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); ut_ad(buf_pool); @@ -3873,12 +5323,12 @@ buf_page_init_for_read( if (mode == BUF_READ_IBUF_PAGES_ONLY) { /* It is a read-ahead within an ibuf routine */ - ut_ad(!ibuf_bitmap_page(zip_size, offset)); + ut_ad(!ibuf_bitmap_page(page_id, page_size)); ibuf_mtr_start(&mtr); - if (!recv_no_ibuf_operations - && !ibuf_page(space, zip_size, offset, &mtr)) { + if (!recv_no_ibuf_operations && + !ibuf_page(page_id, page_size, &mtr)) { ibuf_mtr_commit(&mtr); @@ -3888,7 +5338,7 @@ buf_page_init_for_read( ut_ad(mode == BUF_READ_ANY_PAGE); } - if (zip_size && !unzip && !recv_recovery_is_on()) { + if (page_size.is_compressed() && !unzip && !recv_recovery_is_on()) { block = NULL; } else { block = buf_LRU_get_free_block(buf_pool); @@ -3896,53 +5346,40 @@ buf_page_init_for_read( ut_ad(buf_pool_from_block(block) == buf_pool); } - fold = buf_page_address_fold(space, offset); - hash_lock = buf_page_hash_lock_get(buf_pool, fold); - buf_pool_mutex_enter(buf_pool); + + hash_lock = buf_page_hash_lock_get(buf_pool, page_id); rw_lock_x_lock(hash_lock); - watch_page = buf_page_hash_get_low(buf_pool, space, offset, fold); + watch_page = buf_page_hash_get_low(buf_pool, page_id); if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) { /* The page is already in the buffer pool. */ watch_page = NULL; -err_exit: rw_lock_x_unlock(hash_lock); if (block) { - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); buf_LRU_block_free_non_file_page(block); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); } bpage = NULL; goto func_exit; } - if (fil_tablespace_deleted_or_being_deleted_in_mem( - space, tablespace_version)) { - /* The page belongs to a space which has been - deleted or is being deleted. */ - *err = DB_TABLESPACE_DELETED; - - goto err_exit; - } - if (block) { bpage = &block->page; - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); ut_ad(buf_pool_from_bpage(bpage) == buf_pool); - buf_page_init(buf_pool, space, offset, fold, zip_size, block); + buf_page_init(buf_pool, page_id, page_size, block); -#ifdef PAGE_ATOMIC_REF_COUNT - /* Note: We set the io state without the protection of - the block->lock. This is because other threads cannot - access this block unless it is in the hash table. */ + /* Note: We are using the hash_lock for protection. This is + safe because no other thread can lookup the block from the + page hashtable yet. */ buf_page_set_io_fix(bpage, BUF_IO_READ); -#endif /* PAGE_ATOMIC_REF_COUNT */ rw_lock_x_unlock(hash_lock); @@ -3960,11 +5397,7 @@ err_exit: rw_lock_x_lock_gen(&block->lock, BUF_IO_READ); -#ifndef PAGE_ATOMIC_REF_COUNT - buf_page_set_io_fix(bpage, BUF_IO_READ); -#endif /* !PAGE_ATOMIC_REF_COUNT */ - - if (zip_size) { + if (page_size.is_compressed()) { /* buf_pool->mutex may be released and reacquired by buf_buddy_alloc(). Thus, we must release block->mutex in order not to @@ -3973,9 +5406,10 @@ err_exit: operation until after the block descriptor has been added to buf_pool->LRU and buf_pool->page_hash. */ - mutex_exit(&block->mutex); - data = buf_buddy_alloc(buf_pool, zip_size, &lru); - mutex_enter(&block->mutex); + buf_page_mutex_exit(block); + data = buf_buddy_alloc(buf_pool, page_size.physical(), + &lru); + buf_page_mutex_enter(block); block->page.zip.data = (page_zip_t*) data; /* To maintain the invariant @@ -3987,7 +5421,7 @@ err_exit: buf_unzip_LRU_add_block(block, TRUE); } - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); } else { rw_lock_x_unlock(hash_lock); @@ -3995,7 +5429,7 @@ err_exit: control block (bpage), in order to avoid the invocation of buf_buddy_relocate_block() on uninitialized data. */ - data = buf_buddy_alloc(buf_pool, zip_size, &lru); + data = buf_buddy_alloc(buf_pool, page_size.physical(), &lru); rw_lock_x_lock(hash_lock); @@ -4004,8 +5438,7 @@ err_exit: check the page_hash again, as it may have been modified. */ if (UNIV_UNLIKELY(lru)) { - watch_page = buf_page_hash_get_low( - buf_pool, space, offset, fold); + watch_page = buf_page_hash_get_low(buf_pool, page_id); if (UNIV_UNLIKELY(watch_page && !buf_pool_watch_is_sentinel(buf_pool, @@ -4014,7 +5447,8 @@ err_exit: /* The block was added by some other thread. */ rw_lock_x_unlock(hash_lock); watch_page = NULL; - buf_buddy_free(buf_pool, data, zip_size); + buf_buddy_free(buf_pool, data, + page_size.physical()); bpage = NULL; goto func_exit; @@ -4027,28 +5461,25 @@ err_exit: bpage->buf_pool_index = buf_pool_index(buf_pool); page_zip_des_init(&bpage->zip); - page_zip_set_size(&bpage->zip, zip_size); + page_zip_set_size(&bpage->zip, page_size.physical()); bpage->zip.data = (page_zip_t*) data; - bpage->slot = NULL; + bpage->size.copy_from(page_size); mutex_enter(&buf_pool->zip_mutex); - UNIV_MEM_DESC(bpage->zip.data, - page_zip_get_size(&bpage->zip)); + UNIV_MEM_DESC(bpage->zip.data, bpage->size.physical()); buf_page_init_low(bpage); - bpage->state = BUF_BLOCK_ZIP_PAGE; - bpage->space = static_cast(space); - bpage->offset = static_cast(offset); + bpage->state = BUF_BLOCK_ZIP_PAGE; + bpage->id.copy_from(page_id); + bpage->flush_observer = NULL; -#ifdef UNIV_DEBUG - bpage->in_page_hash = FALSE; - bpage->in_zip_hash = FALSE; - bpage->in_flush_list = FALSE; - bpage->in_free_list = FALSE; - bpage->in_LRU_list = FALSE; -#endif /* UNIV_DEBUG */ + ut_d(bpage->in_page_hash = FALSE); + ut_d(bpage->in_zip_hash = FALSE); + ut_d(bpage->in_flush_list = FALSE); + ut_d(bpage->in_free_list = FALSE); + ut_d(bpage->in_LRU_list = FALSE); ut_d(bpage->in_page_hash = TRUE); @@ -4061,24 +5492,20 @@ err_exit: ut_a(buf_fix_count > 0); -#ifdef PAGE_ATOMIC_REF_COUNT os_atomic_increment_uint32( &bpage->buf_fix_count, buf_fix_count); -#else - bpage->buf_fix_count += buf_fix_count; -#endif /* PAGE_ATOMIC_REF_COUNT */ ut_ad(buf_pool_watch_is_sentinel(buf_pool, watch_page)); - buf_pool_watch_remove(buf_pool, fold, watch_page); + buf_pool_watch_remove(buf_pool, watch_page); } - HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, - bpage); + HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, + bpage->id.fold(), bpage); rw_lock_x_unlock(hash_lock); /* The block must be put to the LRU list, to the old blocks. - The zip_size is already set into the page zip */ + The zip size is already set into the page zip */ buf_LRU_add_block(bpage, TRUE/* to old blocks */); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG buf_LRU_insert_zip_clean(bpage); @@ -4098,63 +5525,54 @@ func_exit: ibuf_mtr_commit(&mtr); } - -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)); - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ - + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)); + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_S)); ut_ad(!bpage || buf_page_in_file(bpage)); + return(bpage); } -/********************************************************************//** -Initializes a page to the buffer buf_pool. The page is usually not read +/** Initializes a page to the buffer buf_pool. The page is usually not read from a file even if it cannot be found in the buffer buf_pool. This is one of the functions which perform to a block a state transition NOT_USED => FILE_PAGE (the other is buf_page_get_gen). -@return pointer to the block, page bufferfixed */ -UNIV_INTERN +@param[in] page_id page id +@param[in] page_size page size +@param[in] mtr mini-transaction +@return pointer to the block, page bufferfixed */ buf_block_t* buf_page_create( -/*============*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: offset of the page within space in units of - a page */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - mtr_t* mtr) /*!< in: mini-transaction handle */ + const page_id_t& page_id, + const page_size_t& page_size, + mtr_t* mtr) { buf_frame_t* frame; buf_block_t* block; - ulint fold; buf_block_t* free_block = NULL; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); rw_lock_t* hash_lock; - ut_ad(mtr); - ut_ad(mtr->state == MTR_ACTIVE); - ut_ad(space || !zip_size); + ut_ad(mtr->is_active()); + ut_ad(page_id.space() != 0 || !page_size.is_compressed()); free_block = buf_LRU_get_free_block(buf_pool); - fold = buf_page_address_fold(space, offset); - hash_lock = buf_page_hash_lock_get(buf_pool, fold); - buf_pool_mutex_enter(buf_pool); + + hash_lock = buf_page_hash_lock_get(buf_pool, page_id); rw_lock_x_lock(hash_lock); - block = (buf_block_t*) buf_page_hash_get_low( - buf_pool, space, offset, fold); + block = (buf_block_t*) buf_page_hash_get_low(buf_pool, page_id); if (block && buf_page_in_file(&block->page) && !buf_pool_watch_is_sentinel(buf_pool, &block->page)) { + #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(space, offset) == 0); -#endif -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - block->page.file_page_was_freed = FALSE; -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ + ut_a(ibuf_count_get(page_id) == 0); +#endif /* UNIV_IBUF_COUNT_DEBUG */ + + ut_d(block->page.file_page_was_freed = FALSE); /* Page can be found in buf_pool */ buf_pool_mutex_exit(buf_pool); @@ -4162,23 +5580,19 @@ buf_page_create( buf_block_free(free_block); - return(buf_page_get_with_no_latch(space, zip_size, offset, mtr)); + return(buf_page_get_with_no_latch(page_id, page_size, mtr)); } /* If we get here, the page was not in buf_pool: init it there */ -#ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, "Creating space %lu page %lu to buffer\n", - (ulong) space, (ulong) offset); - } -#endif /* UNIV_DEBUG */ + DBUG_PRINT("ib_buf", ("create page %u:%u", + page_id.space(), page_id.page_no())); block = free_block; - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); - buf_page_init(buf_pool, space, offset, fold, zip_size, block); + buf_page_init(buf_pool, page_id, page_size, block); rw_lock_x_unlock(hash_lock); @@ -4188,7 +5602,7 @@ buf_page_create( buf_block_buf_fix_inc(block, __FILE__, __LINE__); buf_pool->stat.n_pages_created++; - if (zip_size) { + if (page_size.is_compressed()) { void* data; ibool lru; @@ -4199,15 +5613,15 @@ buf_page_create( buf_page_set_io_fix(&block->page, BUF_IO_READ); rw_lock_x_lock(&block->lock); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); /* buf_pool->mutex may be released and reacquired by buf_buddy_alloc(). Thus, we must release block->mutex in order not to break the latching order in the reacquisition of buf_pool->mutex. We also must defer this operation until after the block descriptor has been added to buf_pool->LRU and buf_pool->page_hash. */ - data = buf_buddy_alloc(buf_pool, zip_size, &lru); - mutex_enter(&block->mutex); + data = buf_buddy_alloc(buf_pool, page_size.physical(), &lru); + buf_page_mutex_enter(block); block->page.zip.data = (page_zip_t*) data; /* To maintain the invariant @@ -4228,12 +5642,11 @@ buf_page_create( buf_page_set_accessed(&block->page); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); /* Delete possible entries for the page from the insert buffer: such can exist if the page belonged to an index which was dropped */ - - ibuf_merge_or_delete_for_page(NULL, space, offset, zip_size, TRUE); + ibuf_merge_or_delete_for_page(NULL, page_id, &page_size, TRUE); frame = block->frame; @@ -4241,11 +5654,14 @@ buf_page_create( memset(frame + FIL_PAGE_NEXT, 0xff, 4); mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED); - /* Reset to zero the file flush lsn field in the page; if the first - page of an ibdata file is 'created' in this function into the buffer - pool then we lose the original contents of the file flush lsn stamp. - Then InnoDB could in a crash recovery print a big, false, corruption - warning if the stamp contains an lsn bigger than the ib_logfile lsn. */ + /* These 8 bytes are also repurposed for PageIO compression and must + be reset when the frame is assigned to a new page id. See fil0fil.h. + + FIL_PAGE_FILE_FLUSH_LSN is used on the following pages: + (1) The first page of the InnoDB system tablespace (page 0:0) + (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages . + + Therefore we don't transparently compress such pages. */ memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); @@ -4253,8 +5669,7 @@ buf_page_create( ut_a(++buf_dbg_counter % 5771 || buf_validate()); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(buf_block_get_space(block), - buf_block_get_page_no(block)) == 0); + ut_a(ibuf_count_get(block->page.id) == 0); #endif return(block); } @@ -4288,6 +5703,7 @@ buf_page_monitor( ulint level; case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: level = btr_page_get_level_low(frame); /* Check if it is an index page for insert buffer */ @@ -4312,49 +5728,49 @@ buf_page_monitor( } break; - case FIL_PAGE_UNDO_LOG: + case FIL_PAGE_UNDO_LOG: counter = MONITOR_RW_COUNTER(io_type, MONITOR_UNDO_LOG_PAGE); break; - case FIL_PAGE_INODE: + case FIL_PAGE_INODE: counter = MONITOR_RW_COUNTER(io_type, MONITOR_INODE_PAGE); break; - case FIL_PAGE_IBUF_FREE_LIST: + case FIL_PAGE_IBUF_FREE_LIST: counter = MONITOR_RW_COUNTER(io_type, MONITOR_IBUF_FREELIST_PAGE); break; - case FIL_PAGE_IBUF_BITMAP: + case FIL_PAGE_IBUF_BITMAP: counter = MONITOR_RW_COUNTER(io_type, MONITOR_IBUF_BITMAP_PAGE); break; - case FIL_PAGE_TYPE_SYS: + case FIL_PAGE_TYPE_SYS: counter = MONITOR_RW_COUNTER(io_type, MONITOR_SYSTEM_PAGE); break; - case FIL_PAGE_TYPE_TRX_SYS: + case FIL_PAGE_TYPE_TRX_SYS: counter = MONITOR_RW_COUNTER(io_type, MONITOR_TRX_SYSTEM_PAGE); break; - case FIL_PAGE_TYPE_FSP_HDR: + case FIL_PAGE_TYPE_FSP_HDR: counter = MONITOR_RW_COUNTER(io_type, MONITOR_FSP_HDR_PAGE); break; - case FIL_PAGE_TYPE_XDES: + case FIL_PAGE_TYPE_XDES: counter = MONITOR_RW_COUNTER(io_type, MONITOR_XDES_PAGE); break; - case FIL_PAGE_TYPE_BLOB: + case FIL_PAGE_TYPE_BLOB: counter = MONITOR_RW_COUNTER(io_type, MONITOR_BLOB_PAGE); break; - case FIL_PAGE_TYPE_ZBLOB: + case FIL_PAGE_TYPE_ZBLOB: counter = MONITOR_RW_COUNTER(io_type, MONITOR_ZBLOB_PAGE); break; - case FIL_PAGE_TYPE_ZBLOB2: + case FIL_PAGE_TYPE_ZBLOB2: counter = MONITOR_RW_COUNTER(io_type, MONITOR_ZBLOB2_PAGE); break; @@ -4366,7 +5782,7 @@ buf_page_monitor( } /********************************************************************//** -Mark a table with the specified space pointed by bpage->space corrupted. +Mark a table with the specified space pointed by bpage->id.space() corrupted. Also remove the bpage from LRU list. @return TRUE if successful */ static @@ -4378,7 +5794,7 @@ buf_mark_space_corrupt( buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); const ibool uncompressed = (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); - ulint space = bpage->space; + ib_uint32_t space = bpage->id.space(); ibool ret = TRUE; if (!bpage->encrypted) { @@ -4430,8 +5846,7 @@ buf_page_check_corrupt( /*===================*/ buf_page_t* bpage) /*!< in/out: buffer page read from disk */ { - ulint zip_size = buf_page_get_zip_size(bpage); - byte* dst_frame = (zip_size) ? bpage->zip.data : + byte* dst_frame = (bpage->zip.data) ? bpage->zip.data : ((buf_block_t*) bpage)->frame; unsigned key_version = bpage->key_version; bool page_compressed = bpage->page_encrypted; @@ -4459,39 +5874,43 @@ buf_page_check_corrupt( corrupted = (!page_compressed_encrypted && stored_checksum != calculated_checksum); if (corrupted) { - ib_logf(IB_LOG_LEVEL_ERROR, - "%s: Block in space_id %lu in file %s corrupted.", - page_compressed_encrypted ? "Maybe corruption" : "Corruption", - space_id, space ? space->name : "NULL"); - ib_logf(IB_LOG_LEVEL_ERROR, - "Page based on contents %s encrypted.", - (key_version == 0 && page_compressed_encrypted == false) ? "not" : "maybe"); - if (stored_checksum != BUF_NO_CHECKSUM_MAGIC || calculated_checksum != BUF_NO_CHECKSUM_MAGIC) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Page stored checksum %lu but calculated checksum %lu.", - stored_checksum, calculated_checksum); + ib::error() << (page_compressed_encrypted ? "Maybe corruption" : "Corruption") + << ": Block in space_id " << space_id + << " in file " << (space ? space->name : "NULL") + << " corrupted."; + + ib::error() << "Page based on contents " + << ((key_version == 0 && page_compressed_encrypted == false) ? "not" : "maybe") + << " encrypted."; + + if (stored_checksum != BUF_NO_CHECKSUM_MAGIC || + calculated_checksum != BUF_NO_CHECKSUM_MAGIC) { + ib::error() << "Page stored checksum " << stored_checksum + << " but calculated checksum " + << calculated_checksum << " ."; } - ib_logf(IB_LOG_LEVEL_ERROR, - "Reason could be that key_version %u in page " - "or in crypt_data %p could not be found.", - key_version, crypt_data); - ib_logf(IB_LOG_LEVEL_ERROR, - "Reason could be also that key management plugin is not found or" - " used encryption algorithm or method does not match."); - ib_logf(IB_LOG_LEVEL_ERROR, - "Based on page page compressed %d, compressed and encrypted %d.", - page_compressed, page_compressed_encrypted); + + ib::error() << "Reason could be that key_version " << key_version + << " in page or in crypt_data " << crypt_data + << " could not be found."; + ib::error() << "Reason could be also that key management plugin is not found or" + " used encryption algorithm or method does not match."; + ib::error() << "Based on page page compressed" + << page_compressed + << ", compressed and encrypted " + << page_compressed_encrypted << " ."; } else { - ib_logf(IB_LOG_LEVEL_ERROR, - "Block in space_id %lu in file %s encrypted.", - space_id, space ? space->name : "NULL"); - ib_logf(IB_LOG_LEVEL_ERROR, - "However key management plugin or used key_id %u is not found or" - " used encryption algorithm or method does not match.", - key_version); - ib_logf(IB_LOG_LEVEL_ERROR, - "Marking tablespace as missing. You may drop this table or" - " install correct key management plugin and key file."); + ib::error() << "Block in space_id " + << space_id + << " in file " + << (space ? space->name : "NULL") + << " encrypted."; + ib::error() << "However key management plugin or used key_id " + << key_version + << " is not found or" + << " used encryption algorithm or method does not match."; + ib::error() << "Marking tablespace as missing. You may drop this table or" + << " install correct key management plugin and key file."; } } @@ -4502,7 +5921,6 @@ buf_page_check_corrupt( Completes an asynchronous read or write request of a file page to or from the buffer pool. @return true if successful */ -UNIV_INTERN bool buf_page_io_complete( /*=================*/ @@ -4515,8 +5933,6 @@ buf_page_io_complete( buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); const ibool uncompressed = (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); - fil_space_t* space = NULL; - ut_a(buf_page_in_file(bpage)); /* We do not need protect io_fix here by mutex to read @@ -4532,10 +5948,13 @@ buf_page_io_complete( ulint read_page_no; ulint read_space_id; byte* frame; + bool compressed_page; + + ut_ad(bpage->zip.data != NULL || ((buf_block_t*)bpage)->frame != NULL); if (!buf_page_decrypt_after_read(bpage)) { /* encryption error! */ - if (buf_page_get_zip_size(bpage)) { + if (bpage->size.is_compressed()) { frame = bpage->zip.data; } else { frame = ((buf_block_t*) bpage)->frame; @@ -4543,14 +5962,16 @@ buf_page_io_complete( goto corrupt; } - if (buf_page_get_zip_size(bpage)) { + if (bpage->size.is_compressed()) { frame = bpage->zip.data; buf_pool->n_pend_unzip++; + if (uncompressed && !buf_zip_decompress((buf_block_t*) bpage, FALSE)) { buf_pool->n_pend_unzip--; + compressed_page = false; goto corrupt; } buf_pool->n_pend_unzip--; @@ -4566,49 +5987,63 @@ buf_page_io_complete( read_space_id = mach_read_from_4( frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); - if (bpage->space == TRX_SYS_SPACE - && buf_dblwr_page_inside(bpage->offset)) { + if (bpage->id.space() == TRX_SYS_SPACE + && buf_dblwr_page_inside(bpage->id.page_no())) { + + ib::error() << "Reading page " << bpage->id + << ", which is in the doublewrite buffer!"; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: reading page %lu\n" - "InnoDB: which is in the" - " doublewrite buffer!\n", - (ulong) bpage->offset); - } else if (!read_space_id && !read_page_no) { + } else if (read_space_id == 0 && read_page_no == 0) { /* This is likely an uninitialized page. */ - } else if ((bpage->space - && bpage->space != read_space_id) - || bpage->offset != read_page_no) { + } else if ((bpage->id.space() != 0 + && bpage->id.space() != read_space_id) + || bpage->id.page_no() != read_page_no) { /* We did not compare space_id to read_space_id if bpage->space == 0, because the field on the page may contain garbage in MySQL < 4.1.1, which only supported bpage->space == 0. */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: space id and page n:o" - " stored in the page\n" - "InnoDB: read in are %lu:%lu," - " should be %lu:%lu!\n", - (ulong) read_space_id, (ulong) read_page_no, - (ulong) bpage->space, - (ulong) bpage->offset); + ib::error() << "Space id and page no stored in " + "the page, read in are " + << page_id_t(read_space_id, read_page_no) + << ", should be " << bpage->id; + } + + compressed_page = Compression::is_compressed_page(frame); + + /* If the decompress failed then the most likely case is + that we are reading in a page for which this instance doesn't + support the compression algorithm. */ + if (compressed_page) { + + Compression::meta_t meta; + + Compression::deserialize_header(frame, &meta); + + ib::error() + << "Page " << bpage->id << " " + << "compressed with " + << Compression::to_string(meta) << " " + << "that is not supported by this instance"; } /* From version 3.23.38 up we store the page checksum to the 4 first bytes of the page end lsn field */ - - if (buf_page_is_corrupted(true, frame, - buf_page_get_zip_size(bpage))) { + if (compressed_page + || buf_page_is_corrupted( + true, frame, bpage->size, + fsp_is_checksum_disabled(bpage->id.space()))) { /* Not a real corruption if it was triggered by error injection */ - DBUG_EXECUTE_IF("buf_page_is_corrupt_failure", - if (bpage->space > TRX_SYS_SPACE + DBUG_EXECUTE_IF( + "buf_page_import_corrupt_failure", + if (bpage->id.space() > TRX_SYS_SPACE + && !Tablespace::is_undo_tablespace( + bpage->id.space()) && buf_mark_space_corrupt(bpage)) { - ib_logf(IB_LOG_LEVEL_INFO, - "Simulated page corruption"); + ib::info() << "Simulated IMPORT " + "corruption"; return(true); } goto page_not_corrupt; @@ -4616,60 +6051,52 @@ buf_page_io_complete( corrupt: bool corrupted = buf_page_check_corrupt(bpage); - if (corrupted) { - fil_system_enter(); - space = fil_space_get_by_id(bpage->space); - fil_system_exit(); - ib_logf(IB_LOG_LEVEL_ERROR, - "Database page corruption on disk" - " or a failed"); - ib_logf(IB_LOG_LEVEL_ERROR, - "Space %lu file %s read of page %lu.", - (ulint)bpage->space, - space ? space->name : "NULL", - (ulong) bpage->offset); - ib_logf(IB_LOG_LEVEL_ERROR, - "You may have to recover" - " from a backup."); - - - buf_page_print(frame, buf_page_get_zip_size(bpage), - BUF_PAGE_PRINT_NO_CRASH); - - ib_logf(IB_LOG_LEVEL_ERROR, - "It is also possible that your operating" - "system has corrupted its own file cache."); - ib_logf(IB_LOG_LEVEL_ERROR, - "and rebooting your computer removes the error."); - ib_logf(IB_LOG_LEVEL_ERROR, - "If the corrupt page is an index page you can also try to"); - ib_logf(IB_LOG_LEVEL_ERROR, - "fix the corruption by dumping, dropping, and reimporting"); - ib_logf(IB_LOG_LEVEL_ERROR, - "the corrupt table. You can use CHECK"); - ib_logf(IB_LOG_LEVEL_ERROR, - "TABLE to scan your table for corruption."); - ib_logf(IB_LOG_LEVEL_ERROR, - "See also " - REFMAN "forcing-innodb-recovery.html" - " about forcing recovery."); + /* Compressed and encrypted pages are basically gibberish avoid + printing the contents. */ + if (corrupted && !compressed_page) { + + ib::error() + << "Database page corruption on disk" + " or a failed file read of page " + << bpage->id + << ". You may have to recover from " + << "a backup."; + + ib::info() + << "It is also possible that your" + " operating system has corrupted" + " its own file cache and rebooting" + " your computer removes the error." + " If the corrupt page is an index page." + " You can also try to fix the" + " corruption by dumping, dropping," + " and reimporting the corrupt table." + " You can use CHECK TABLE to scan" + " your table for corruption. " + << FORCE_RECOVERY_MSG; } if (srv_force_recovery < SRV_FORCE_IGNORE_CORRUPT) { + /* If page space id is larger than TRX_SYS_SPACE (0), we will attempt to mark the corresponding table as corrupted instead of crashing server */ - if (bpage->space > TRX_SYS_SPACE + + if (bpage->id.space() > TRX_SYS_SPACE && buf_mark_space_corrupt(bpage)) { + return(false); } else { corrupted = buf_page_check_corrupt(bpage); if (corrupted) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Ending processing because of a corrupt database page."); - - ut_error; + ib::fatal() + << "Aborting because of a" + " corrupt database page in" + " the system tablespace. Or, " + " there was a failure in" + " tagging the tablespace " + " as corrupt."; } ib_push_warning((void *)NULL, DB_DECRYPTION_FAILED, @@ -4679,19 +6106,14 @@ corrupt: " Can't continue opening the table.", (ulint)bpage->space, bpage->key_version); - if (bpage->space > TRX_SYS_SPACE) { - if (corrupted) { - buf_mark_space_corrupt(bpage); - } - } else { - ut_error; - } - return(false); + buf_page_print(frame, bpage->size, BUF_PAGE_PRINT_NO_CRASH); + + return (false); } } } - DBUG_EXECUTE_IF("buf_page_is_corrupt_failure", + DBUG_EXECUTE_IF("buf_page_import_corrupt_failure", page_not_corrupt: bpage = bpage; ); if (recv_recovery_is_on()) { @@ -4700,8 +6122,18 @@ corrupt: recv_recover_page(TRUE, (buf_block_t*) bpage); } - if (uncompressed && !recv_no_ibuf_operations) { - if (bpage && bpage->encrypted) { + /* If space is being truncated then avoid ibuf operation. + During re-init we have already freed ibuf entries. */ + if (uncompressed + && !Compression::is_compressed_page(frame) + && !recv_no_ibuf_operations + && !Tablespace::is_undo_tablespace(bpage->id.space()) + && bpage->id.space() != srv_tmp_space.space_id() + && !srv_is_tablespace_truncated(bpage->id.space()) + && fil_page_get_type(frame) == FIL_PAGE_INDEX + && page_is_leaf(frame)) { + + if (bpage && bpage->encrypted) { fprintf(stderr, "InnoDB: Warning: Table in tablespace %lu encrypted." "However key management plugin or used key_id %u is not found or" @@ -4710,9 +6142,8 @@ corrupt: (ulint)bpage->space, bpage->key_version); } else { ibuf_merge_or_delete_for_page( - (buf_block_t*) bpage, bpage->space, - bpage->offset, buf_page_get_zip_size(bpage), - TRUE); + (buf_block_t*) bpage, bpage->id, + &bpage->size, TRUE); } } } else { @@ -4732,7 +6163,7 @@ corrupt: /* For BUF_IO_READ of compressed-only blocks, the buffered operations will be merged by buf_page_get_gen() after the block has been uncompressed. */ - ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0); + ut_a(ibuf_count_get(bpage->id) == 0); } #endif /* Because this thread which does the unlocking is not the same that @@ -4769,44 +6200,39 @@ corrupt: buf_flush_write_complete(bpage); if (uncompressed) { - rw_lock_s_unlock_gen(&((buf_block_t*) bpage)->lock, - BUF_IO_WRITE); + rw_lock_sx_unlock_gen(&((buf_block_t*) bpage)->lock, + BUF_IO_WRITE); } buf_pool->stat.n_pages_written++; - /* In case of flush batches i.e.: BUF_FLUSH_LIST and - BUF_FLUSH_LRU this function is always called from IO - helper thread. In this case, we decide whether or not - to evict the page based on flush type. The value - passed as evict is the default value in function - definition which is false. - We always evict in case of LRU batch and never evict - in case of flush list batch. For single page flush - the caller sets the appropriate value. */ + /* We decide whether or not to evict the page from the + LRU list based on the flush_type. + * BUF_FLUSH_LIST: don't evict + * BUF_FLUSH_LRU: always evict + * BUF_FLUSH_SINGLE_PAGE: eviction preference is passed + by the caller explicitly. */ if (buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU) { evict = true; } - mutex_exit(buf_page_get_mutex(bpage)); if (evict) { + mutex_exit(buf_page_get_mutex(bpage)); buf_LRU_free_page(bpage, true); + } else { + mutex_exit(buf_page_get_mutex(bpage)); } + break; default: ut_error; } -#ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, "Has %s page space %lu page no %lu\n", - io_type == BUF_IO_READ ? "read" : "written", - (ulong) buf_page_get_space(bpage), - (ulong) buf_page_get_page_no(bpage)); - } -#endif /* UNIV_DEBUG */ + DBUG_PRINT("ib_buf", ("%s page %u:%u", + io_type == BUF_IO_READ ? "read" : "wrote", + bpage->id.space(), bpage->id.page_no())); buf_pool_mutex_exit(buf_pool); @@ -4815,7 +6241,7 @@ corrupt: /*********************************************************************//** Asserts that all file pages in the buffer are in a replaceable state. -@return TRUE */ +@return TRUE */ static ibool buf_all_freed_instance( @@ -4837,11 +6263,8 @@ buf_all_freed_instance( if (UNIV_LIKELY_NULL(block)) { if (block->page.key_version == 0) { - fprintf(stderr, - "Page %lu %lu still fixed or dirty\n", - (ulong) block->page.space, - (ulong) block->page.offset); - ut_error; + ib::fatal() << "Page " << block->page.id + << " still fixed or dirty"; } } } @@ -4891,7 +6314,7 @@ buf_pool_invalidate_instance( buf_pool_mutex_enter(buf_pool); - while (buf_LRU_scan_and_free_block(buf_pool, TRUE)) { + while (buf_LRU_scan_and_free_block(buf_pool, true)) { } ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0); @@ -4911,7 +6334,6 @@ buf_pool_invalidate_instance( Invalidates the file pages in the buffer pool when an archive recovery is completed. All the file pages buffered must be in a replaceable state when this function is called: not latched and not modified. */ -UNIV_INTERN void buf_pool_invalidate(void) /*=====================*/ @@ -4926,7 +6348,7 @@ buf_pool_invalidate(void) #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /*********************************************************************//** Validates data in one buffer pool instance -@return TRUE */ +@return TRUE */ static ibool buf_pool_validate_instance( @@ -4943,9 +6365,6 @@ buf_pool_validate_instance( ulint n_flush = 0; ulint n_free = 0; ulint n_zip = 0; - ulint fold = 0; - ulint space = 0; - ulint offset = 0; ut_ad(buf_pool); @@ -4963,7 +6382,7 @@ buf_pool_validate_instance( for (j = chunk->size; j--; block++) { - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); switch (buf_block_get_state(block)) { case BUF_BLOCK_POOL_WATCH: @@ -4975,22 +6394,14 @@ buf_pool_validate_instance( break; case BUF_BLOCK_FILE_PAGE: - space = buf_block_get_space(block); - offset = buf_block_get_page_no(block); - fold = buf_page_address_fold(space, offset); - ut_a(buf_page_hash_get_low(buf_pool, - space, - offset, - fold) + ut_a(buf_page_hash_get_low( + buf_pool, block->page.id) == &block->page); #ifdef UNIV_IBUF_COUNT_DEBUG ut_a(buf_page_get_io_fix(&block->page) == BUF_IO_READ - || !ibuf_count_get(buf_block_get_space( - block), - buf_block_get_page_no( - block))); + || !ibuf_count_get(block->page.id)); #endif switch (buf_page_get_io_fix(&block->page)) { case BUF_IO_NONE: @@ -5007,7 +6418,10 @@ buf_pool_validate_instance( assert_s_latched: ut_a(rw_lock_is_locked( &block->lock, - RW_LOCK_SHARED)); + RW_LOCK_S) + || rw_lock_is_locked( + &block->lock, + RW_LOCK_SX)); break; case BUF_FLUSH_LIST: n_list_flush++; @@ -5021,7 +6435,7 @@ assert_s_latched: case BUF_IO_READ: ut_a(rw_lock_is_locked(&block->lock, - RW_LOCK_EX)); + RW_LOCK_X)); break; case BUF_IO_PIN: @@ -5042,7 +6456,7 @@ assert_s_latched: break; } - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); } } @@ -5073,9 +6487,7 @@ assert_s_latched: we have acquired buf_pool->zip_mutex above which acts as the 'block->mutex' for these bpages. */ ut_a(!b->oldest_modification); - fold = buf_page_address_fold(b->space, b->offset); - ut_a(buf_page_hash_get_low(buf_pool, b->space, b->offset, - fold) == b); + ut_a(buf_page_hash_get_low(buf_pool, b->id) == b); n_lru++; n_zip++; } @@ -5127,9 +6539,7 @@ assert_s_latched: ut_error; break; } - fold = buf_page_address_fold(b->space, b->offset); - ut_a(buf_page_hash_get_low(buf_pool, b->space, b->offset, - fold) == b); + ut_a(buf_page_hash_get_low(buf_pool, b->id) == b); } ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush); @@ -5139,19 +6549,21 @@ assert_s_latched: mutex_exit(&buf_pool->zip_mutex); - if (n_lru + n_free > buf_pool->curr_size + n_zip) { - fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n", - (ulong) n_lru, (ulong) n_free, - (ulong) buf_pool->curr_size, (ulong) n_zip); - ut_error; + if (buf_pool->curr_size == buf_pool->old_size + && n_lru + n_free > buf_pool->curr_size + n_zip) { + + ib::fatal() << "n_LRU " << n_lru << ", n_free " << n_free + << ", pool " << buf_pool->curr_size + << " zip " << n_zip << ". Aborting..."; } ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru); - if (UT_LIST_GET_LEN(buf_pool->free) != n_free) { - fprintf(stderr, "Free list len %lu, free blocks %lu\n", - (ulong) UT_LIST_GET_LEN(buf_pool->free), - (ulong) n_free); - ut_error; + if (buf_pool->curr_size == buf_pool->old_size + && UT_LIST_GET_LEN(buf_pool->free) != n_free) { + + ib::fatal() << "Free list len " + << UT_LIST_GET_LEN(buf_pool->free) + << ", free blocks " << n_free << ". Aborting..."; } ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush); @@ -5168,8 +6580,7 @@ assert_s_latched: /*********************************************************************//** Validates the buffer buf_pool data structure. -@return TRUE */ -UNIV_INTERN +@return TRUE */ ibool buf_validate(void) /*==============*/ @@ -5212,37 +6623,14 @@ buf_print_instance( size = buf_pool->curr_size; index_ids = static_cast( - mem_alloc(size * sizeof *index_ids)); + ut_malloc_nokey(size * sizeof *index_ids)); - counts = static_cast(mem_alloc(sizeof(ulint) * size)); + counts = static_cast(ut_malloc_nokey(sizeof(ulint) * size)); buf_pool_mutex_enter(buf_pool); buf_flush_list_mutex_enter(buf_pool); - fprintf(stderr, - "buf_pool size %lu\n" - "database pages %lu\n" - "free pages %lu\n" - "modified database pages %lu\n" - "n pending decompressions %lu\n" - "n pending reads %lu\n" - "n pending flush LRU %lu list %lu single page %lu\n" - "pages made young %lu, not young %lu\n" - "pages read %lu, created %lu, written %lu\n", - (ulong) size, - (ulong) UT_LIST_GET_LEN(buf_pool->LRU), - (ulong) UT_LIST_GET_LEN(buf_pool->free), - (ulong) UT_LIST_GET_LEN(buf_pool->flush_list), - (ulong) buf_pool->n_pend_unzip, - (ulong) buf_pool->n_pend_reads, - (ulong) buf_pool->n_flush[BUF_FLUSH_LRU], - (ulong) buf_pool->n_flush[BUF_FLUSH_LIST], - (ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE], - (ulong) buf_pool->stat.n_pages_made_young, - (ulong) buf_pool->stat.n_pages_not_made_young, - (ulong) buf_pool->stat.n_pages_read, - (ulong) buf_pool->stat.n_pages_created, - (ulong) buf_pool->stat.n_pages_written); + ib::info() << *buf_pool; buf_flush_list_mutex_exit(buf_pool); @@ -5259,7 +6647,7 @@ buf_print_instance( for (; n_blocks--; block++) { const buf_frame_t* frame = block->frame; - if (fil_page_get_type(frame) == FIL_PAGE_INDEX) { + if (fil_page_index_page_check(frame)) { id = btr_page_get_index_id(frame); @@ -5290,28 +6678,26 @@ buf_print_instance( for (i = 0; i < n_found; i++) { index = dict_index_get_if_in_cache(index_ids[i]); - fprintf(stderr, - "Block count for index %llu in buffer is about %lu", - (ullint) index_ids[i], - (ulong) counts[i]); - - if (index) { - putc(' ', stderr); - dict_index_name_print(stderr, NULL, index); + if (!index) { + ib::info() << "Block count for index " + << index_ids[i] << " in buffer is about " + << counts[i]; + } else { + ib::info() << "Block count for index " << index_ids[i] + << " in buffer is about " << counts[i] + << ", index " << index->name + << " of table " << index->table->name; } - - putc('\n', stderr); } - mem_free(index_ids); - mem_free(counts); + ut_free(index_ids); + ut_free(counts); ut_a(buf_pool_validate_instance(buf_pool)); } /*********************************************************************//** Prints info of the buffer buf_pool data structure. */ -UNIV_INTERN void buf_print(void) /*===========*/ @@ -5330,8 +6716,7 @@ buf_print(void) #ifdef UNIV_DEBUG /*********************************************************************//** Returns the number of latched pages in the buffer pool. -@return number of latched pages */ -UNIV_INTERN +@return number of latched pages */ ulint buf_get_latched_pages_number_instance( /*==================================*/ @@ -5359,7 +6744,7 @@ buf_get_latched_pages_number_instance( continue; } - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); if (block->page.buf_fix_count != 0 || buf_page_get_io_fix(&block->page) @@ -5367,7 +6752,7 @@ buf_get_latched_pages_number_instance( fixed_pages_number++; } - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); } } @@ -5421,8 +6806,7 @@ buf_get_latched_pages_number_instance( /*********************************************************************//** Returns the number of latched pages in all the buffer pools. -@return number of latched pages */ -UNIV_INTERN +@return number of latched pages */ ulint buf_get_latched_pages_number(void) /*==============================*/ @@ -5446,16 +6830,14 @@ buf_get_latched_pages_number(void) /*********************************************************************//** Returns the number of pending buf pool read ios. -@return number of pending read I/O operations */ -UNIV_INTERN +@return number of pending read I/O operations */ ulint buf_get_n_pending_read_ios(void) /*============================*/ { - ulint i; ulint pend_ios = 0; - for (i = 0; i < srv_buf_pool_instances; i++) { + for (ulint i = 0; i < srv_buf_pool_instances; i++) { pend_ios += buf_pool_from_array(i)->n_pend_reads; } @@ -5465,24 +6847,24 @@ buf_get_n_pending_read_ios(void) /*********************************************************************//** Returns the ratio in percents of modified pages in the buffer pool / database pages in the buffer pool. -@return modified page percentage ratio */ -UNIV_INTERN +@return modified page percentage ratio */ double buf_get_modified_ratio_pct(void) /*============================*/ { - double percentage = 0.0; + double ratio; ulint lru_len = 0; ulint free_len = 0; ulint flush_list_len = 0; buf_get_total_list_len(&lru_len, &free_len, &flush_list_len); - percentage = (100.0 * flush_list_len) / (1.0 + lru_len + free_len); + ratio = static_cast(100 * flush_list_len) + / (1 + lru_len + free_len); /* 1 + is there to avoid division by zero */ - return(percentage); + return(ratio); } /*******************************************************************//** @@ -5545,7 +6927,6 @@ buf_stats_aggregate_pool_info( Collect buffer pool stats information for a buffer pool. Also record aggregated stats if there are more than one buffer pool in the server */ -UNIV_INTERN void buf_stats_get_pool_info( /*====================*/ @@ -5554,7 +6935,7 @@ buf_stats_get_pool_info( buf_pool_info_t* all_pool_info) /*!< in/out: buffer pool info to fill */ { - buf_pool_info_t* pool_info; + buf_pool_info_t* pool_info; time_t current_time; double time_elapsed; @@ -5680,7 +7061,6 @@ buf_stats_get_pool_info( /*********************************************************************//** Prints info of the buffer i/o. */ -UNIV_INTERN void buf_print_io_instance( /*==================*/ @@ -5772,7 +7152,6 @@ buf_print_io_instance( /*********************************************************************//** Prints info of the buffer i/o. */ -UNIV_INTERN void buf_print_io( /*=========*/ @@ -5786,7 +7165,7 @@ buf_print_io( one extra buf_pool_info_t, the last one stores aggregated/total values from all pools */ if (srv_buf_pool_instances > 1) { - pool_info = (buf_pool_info_t*) mem_zalloc(( + pool_info = (buf_pool_info_t*) ut_zalloc_nokey(( srv_buf_pool_instances + 1) * sizeof *pool_info); pool_info_total = &pool_info[srv_buf_pool_instances]; @@ -5795,7 +7174,7 @@ buf_print_io( pool_info_total = pool_info = static_cast( - mem_zalloc(sizeof *pool_info)); + ut_zalloc_nokey(sizeof *pool_info)); } for (i = 0; i < srv_buf_pool_instances; i++) { @@ -5831,12 +7210,11 @@ buf_print_io( } } - mem_free(pool_info); + ut_free(pool_info); } /**********************************************************************//** Refreshes the statistics used to print per-second averages. */ -UNIV_INTERN void buf_refresh_io_stats( /*=================*/ @@ -5848,7 +7226,6 @@ buf_refresh_io_stats( /**********************************************************************//** Refreshes the statistics used to print per-second averages. */ -UNIV_INTERN void buf_refresh_io_stats_all(void) /*==========================*/ @@ -5865,7 +7242,6 @@ buf_refresh_io_stats_all(void) /**********************************************************************//** Check if all pages in all buffer pools are in a replacable state. @return FALSE if not */ -UNIV_INTERN ibool buf_all_freed(void) /*===============*/ @@ -5886,8 +7262,7 @@ buf_all_freed(void) /*********************************************************************//** Checks that there currently are no pending i/o-operations for the buffer pool. -@return number of pending i/o */ -UNIV_INTERN +@return number of pending i/o */ ulint buf_pool_check_no_pending_io(void) /*==============================*/ @@ -5918,8 +7293,7 @@ buf_pool_check_no_pending_io(void) Code currently not used /*********************************************************************//** Gets the current length of the free list of buffer blocks. -@return length of the free list */ -UNIV_INTERN +@return length of the free list */ ulint buf_get_free_list_len(void) /*=======================*/ @@ -5937,36 +7311,77 @@ buf_get_free_list_len(void) #endif #else /* !UNIV_HOTBACKUP */ -/********************************************************************//** -Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */ -UNIV_INTERN + +/** Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. +@param[in] page_id page id +@param[in] page_size page size +@param[in,out] block block to init */ void buf_page_init_for_backup_restore( -/*=============================*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: offset of the page within space - in units of a page */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - buf_block_t* block) /*!< in: block to init */ + const page_id_t& page_id, + const page_size_t& page_size, + buf_block_t* block) { - block->page.state = BUF_BLOCK_FILE_PAGE; - block->page.space = space; - block->page.offset = offset; + block->page.state = BUF_BLOCK_FILE_PAGE; + block->page.id = page_id; + block->page.size.copy_from(page_size); page_zip_des_init(&block->page.zip); /* We assume that block->page.data has been allocated - with zip_size == UNIV_PAGE_SIZE. */ - ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX); - ut_ad(ut_is_2pow(zip_size)); - page_zip_set_size(&block->page.zip, zip_size); - if (zip_size) { - block->page.zip.data = block->frame + UNIV_PAGE_SIZE; + with page_size == univ_page_size. */ + if (page_size.is_compressed()) { + page_zip_set_size(&block->page.zip, page_size.physical()); + block->page.zip.data = block->frame + page_size.logical(); + } else { + page_zip_set_size(&block->page.zip, 0); } } + #endif /* !UNIV_HOTBACKUP */ +/** Print the given page_id_t object. +@param[in,out] out the output stream +@param[in] page_id the page_id_t object to be printed +@return the output stream */ +std::ostream& +operator<<( + std::ostream& out, + const page_id_t& page_id) +{ + out << "[page id: space=" << page_id.m_space + << ", page number=" << page_id.m_page_no << "]"; + return(out); +} + +/** Print the given buf_pool_t object. +@param[in,out] out the output stream +@param[in] buf_pool the buf_pool_t object to be printed +@return the output stream */ +std::ostream& +operator<<( + std::ostream& out, + const buf_pool_t& buf_pool) +{ + out << "[buffer pool instance: " + << "buf_pool size=" << buf_pool.curr_size + << ", database pages=" << UT_LIST_GET_LEN(buf_pool.LRU) + << ", free pages=" << UT_LIST_GET_LEN(buf_pool.free) + << ", modified database pages=" + << UT_LIST_GET_LEN(buf_pool.flush_list) + << ", n pending decompressions=" << buf_pool.n_pend_unzip + << ", n pending reads=" << buf_pool.n_pend_reads + << ", n pending flush LRU=" << buf_pool.n_flush[BUF_FLUSH_LRU] + << " list=" << buf_pool.n_flush[BUF_FLUSH_LIST] + << " single page=" << buf_pool.n_flush[BUF_FLUSH_SINGLE_PAGE] + << ", pages made young=" << buf_pool.stat.n_pages_made_young + << ", not young=" << buf_pool.stat.n_pages_not_made_young + << ", pages read=" << buf_pool.stat.n_pages_read + << ", created=" << buf_pool.stat.n_pages_created + << ", written=" << buf_pool.stat.n_pages_written << "]"; + return(out); +} + /********************************************************************//** Reserve unused slot from temporary memory array and allocate necessary temporary memory if not yet allocated. @@ -6002,7 +7417,7 @@ buf_pool_reserve_tmp_slot( /* Allocate temporary memory for encryption/decryption */ if (free_slot->crypt_buf_free == NULL) { - free_slot->crypt_buf_free = static_cast(ut_malloc(UNIV_PAGE_SIZE*2)); + free_slot->crypt_buf_free = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE*2)); free_slot->crypt_buf = static_cast(ut_align(free_slot->crypt_buf_free, UNIV_PAGE_SIZE)); memset(free_slot->crypt_buf_free, 0, UNIV_PAGE_SIZE *2); } @@ -6010,11 +7425,11 @@ buf_pool_reserve_tmp_slot( /* For page compressed tables allocate temporary memory for compression/decompression */ if (compressed && free_slot->comp_buf_free == NULL) { - free_slot->comp_buf_free = static_cast(ut_malloc(UNIV_PAGE_SIZE*2)); + free_slot->comp_buf_free = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE*2)); free_slot->comp_buf = static_cast(ut_align(free_slot->comp_buf_free, UNIV_PAGE_SIZE)); memset(free_slot->comp_buf_free, 0, UNIV_PAGE_SIZE *2); #ifdef HAVE_LZO - free_slot->lzo_mem = static_cast(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); + free_slot->lzo_mem = static_cast(ut_malloc_nokey(LZO1X_1_15_MEM_COMPRESS)); memset(free_slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); #endif } @@ -6032,24 +7447,23 @@ buf_page_encrypt_before_write( byte* src_frame, /*!< in: src frame */ ulint space_id) /*!< in: space id */ { - fil_space_crypt_t* crypt_data = fil_space_get_crypt_data(space_id); - ulint zip_size = buf_page_get_zip_size(bpage); - ulint page_size = (zip_size) ? zip_size : UNIV_PAGE_SIZE; + fil_space_crypt_t* crypt_data = fil_space_get_crypt_data(space_id); + const page_size_t& page_size = bpage->size; buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - bool page_compressed = fil_space_is_page_compressed(bpage->space); + bool page_compressed = fil_space_is_page_compressed(space_id); bool encrypted = true; bpage->real_size = UNIV_PAGE_SIZE; fil_page_type_validate(src_frame); - if (bpage->offset == 0) { + if (bpage->id.page_no() == 0) { /* Page 0 of a tablespace is not encrypted/compressed */ ut_ad(bpage->key_version == 0); return src_frame; } - if (bpage->space == TRX_SYS_SPACE && bpage->offset == TRX_SYS_PAGE_NO) { + if (space_id == TRX_SYS_SPACE && bpage->id.page_no() == TRX_SYS_PAGE_NO) { /* don't encrypt/compress page as it contains address to dblwr buffer */ bpage->key_version = 0; return src_frame; @@ -6086,17 +7500,17 @@ buf_page_encrypt_before_write( if (!page_compressed) { /* Encrypt page content */ - byte* tmp = fil_space_encrypt(bpage->space, - bpage->offset, + byte* tmp = fil_space_encrypt(space_id, + bpage->id.page_no(), bpage->newest_modification, src_frame, - zip_size, + page_size, dst_frame); ulint key_version = mach_read_from_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); ut_ad(key_version == 0 || key_version >= bpage->key_version); bpage->key_version = key_version; - bpage->real_size = page_size; + bpage->real_size = page_size.physical(); slot->out_buf = dst_frame = tmp; #ifdef UNIV_DEBUG @@ -6106,13 +7520,13 @@ buf_page_encrypt_before_write( } else { /* First we compress the page content */ ulint out_len = 0; - ulint block_size = fil_space_get_block_size(bpage->space, bpage->offset, page_size); + ulint block_size = fil_space_get_block_size(space_id, bpage->id.page_no(), page_size.logical()); - byte *tmp = fil_compress_page(bpage->space, + byte *tmp = fil_compress_page(space_id, (byte *)src_frame, slot->comp_buf, - page_size, - fil_space_get_page_compression_level(bpage->space), + page_size.logical(), + fil_space_get_page_compression_level(space_id), block_size, encrypted, &out_len, @@ -6128,11 +7542,11 @@ buf_page_encrypt_before_write( if(encrypted) { /* And then we encrypt the page content */ - tmp = fil_space_encrypt(bpage->space, - bpage->offset, + tmp = fil_space_encrypt(space_id, + bpage->id.page_no(), bpage->newest_modification, tmp, - zip_size, + page_size, dst_frame); } @@ -6155,10 +7569,9 @@ buf_page_decrypt_after_read( /*========================*/ buf_page_t* bpage) /*!< in/out: buffer page read from disk */ { - ulint zip_size = buf_page_get_zip_size(bpage); - ulint size = (zip_size) ? zip_size : UNIV_PAGE_SIZE; - - byte* dst_frame = (zip_size) ? bpage->zip.data : + bool compressed = bpage->size.is_compressed(); + const page_size_t& size = bpage->size; + byte* dst_frame = compressed ? bpage->zip.data : ((buf_block_t*) bpage)->frame; unsigned key_version = mach_read_from_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); @@ -6174,7 +7587,7 @@ buf_page_decrypt_after_read( ut_ad(bpage->key_version == 0); - if (bpage->offset == 0) { + if (bpage->id.page_no() == 0) { /* File header pages are not encrypted/compressed */ return (TRUE); } @@ -6183,6 +7596,7 @@ buf_page_decrypt_after_read( bpage->key_version = key_version; bpage->page_encrypted = page_compressed_encrypted; bpage->page_compressed = page_compressed; + bpage->space = bpage->id.space(); if (page_compressed) { /* the page we read is unencrypted */ @@ -6196,7 +7610,7 @@ buf_page_decrypt_after_read( /* decompress using comp_buf to dst_frame */ fil_decompress_page(slot->comp_buf, dst_frame, - size, + size.logical(), &bpage->write_size); /* Mark this slot as free */ @@ -6220,15 +7634,16 @@ buf_page_decrypt_after_read( /* Calculate checksum before decrypt, this will be used later to find out if incorrect key was used. */ if (!page_compressed_encrypted) { - bpage->calculated_checksum = fil_crypt_calculate_checksum(zip_size, dst_frame); + bpage->calculated_checksum = fil_crypt_calculate_checksum(size, dst_frame); } /* decrypt using crypt_buf to dst_frame */ - byte* res = fil_space_decrypt(bpage->space, + byte* res = fil_space_decrypt(bpage->id.space(), slot->crypt_buf, size, dst_frame); + if (!res) { bpage->encrypted = true; success = false; @@ -6249,7 +7664,7 @@ buf_page_decrypt_after_read( /* decompress using comp_buf to dst_frame */ fil_decompress_page(slot->comp_buf, dst_frame, - size, + size.logical(), &bpage->write_size); #ifdef UNIV_DEBUG @@ -6267,3 +7682,4 @@ buf_page_decrypt_after_read( return (success); } +#endif /* !UNIV_INNOCHECKSUM */ diff --git a/storage/innobase/buf/buf0checksum.cc b/storage/innobase/buf/buf0checksum.cc index 4101d117896..935c8a7ceee 100644 --- a/storage/innobase/buf/buf0checksum.cc +++ b/storage/innobase/buf/buf0checksum.cc @@ -24,37 +24,37 @@ Created Aug 11, 2011 Vasil Dimov *******************************************************/ #include "univ.i" -#include "fil0fil.h" /* FIL_* */ -#include "ut0crc32.h" /* ut_crc32() */ -#include "ut0rnd.h" /* ut_fold_binary() */ +#include "fil0fil.h" +#include "ut0crc32.h" +#include "ut0rnd.h" #include "buf0checksum.h" #ifndef UNIV_INNOCHECKSUM -#include "srv0srv.h" /* SRV_CHECKSUM_* */ -#include "buf0types.h" - +#include "srv0srv.h" #endif /* !UNIV_INNOCHECKSUM */ +#include "buf0types.h" /** the macro MYSQL_SYSVAR_ENUM() requires "long unsigned int" and if we use srv_checksum_algorithm_t here then we get a compiler error: ha_innodb.cc:12251: error: cannot convert 'srv_checksum_algorithm_t*' to 'long unsigned int*' in initialization */ -UNIV_INTERN ulong srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_INNODB; - -/********************************************************************//** -Calculates a page CRC32 which is stored to the page when it is written -to a file. Note that we must be careful to calculate the same value on -32-bit and 64-bit architectures. -@return checksum */ -UNIV_INTERN -ib_uint32_t +ulong srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_INNODB; + +/** Calculates the CRC32 checksum of a page. The value is stored to the page +when it is written to a file and also checked for a match when reading from +the file. When reading we allow both normal CRC32 and CRC-legacy-big-endian +variants. Note that we must be careful to calculate the same value on 32-bit +and 64-bit architectures. +@param[in] page buffer page (UNIV_PAGE_SIZE bytes) +@param[in] use_legacy_big_endian if true then use big endian +byteorder when converting byte strings to integers +@return checksum */ +uint32_t buf_calc_page_crc32( -/*================*/ - const byte* page) /*!< in: buffer page */ + const byte* page, + bool use_legacy_big_endian /* = false */) { - ib_uint32_t checksum; - /* Since the field FIL_PAGE_FILE_FLUSH_LSN, and in versions <= 4.1.x FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, are written outside the buffer pool to the first pages of data files, we have to skip them in the page @@ -63,22 +63,26 @@ buf_calc_page_crc32( checksum is stored, and also the last 8 bytes of page because there we store the old formula checksum. */ - checksum = ut_crc32(page + FIL_PAGE_OFFSET, - FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION - - FIL_PAGE_OFFSET) - ^ ut_crc32(page + FIL_PAGE_DATA, - UNIV_PAGE_SIZE - FIL_PAGE_DATA - - FIL_PAGE_END_LSN_OLD_CHKSUM); + ut_crc32_func_t crc32_func = use_legacy_big_endian + ? ut_crc32_legacy_big_endian + : ut_crc32; - return(checksum); + const uint32_t c1 = crc32_func( + page + FIL_PAGE_OFFSET, + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION - FIL_PAGE_OFFSET); + + const uint32_t c2 = crc32_func( + page + FIL_PAGE_DATA, + UNIV_PAGE_SIZE - FIL_PAGE_DATA - FIL_PAGE_END_LSN_OLD_CHKSUM); + + return(c1 ^ c2); } /********************************************************************//** Calculates a page checksum which is stored to the page when it is written to a file. Note that we must be careful to calculate the same value on 32-bit and 64-bit architectures. -@return checksum */ -UNIV_INTERN +@return checksum */ ulint buf_calc_page_new_checksum( /*=======================*/ @@ -112,8 +116,7 @@ checksum. NOTE: we must first store the new formula checksum to FIL_PAGE_SPACE_OR_CHKSUM before calculating and storing this old checksum because this takes that field as an input! -@return checksum */ -UNIV_INTERN +@return checksum */ ulint buf_calc_page_old_checksum( /*=======================*/ @@ -128,12 +131,9 @@ buf_calc_page_old_checksum( return(checksum); } -#ifndef UNIV_INNOCHECKSUM - /********************************************************************//** Return a printable string describing the checksum algorithm. -@return algorithm name */ -UNIV_INTERN +@return algorithm name */ const char* buf_checksum_algorithm_name( /*========================*/ @@ -157,5 +157,3 @@ buf_checksum_algorithm_name( ut_error; return(NULL); } - -#endif /* !UNIV_INNOCHECKSUM */ diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 16877818ba9..64ecb54be63 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2016, MariaDB Corporation. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under @@ -24,6 +24,7 @@ Doublwrite buffer module Created 2011/12/19 *******************************************************/ +#include "ha_prototypes.h" #include "buf0dblwr.h" #ifdef UNIV_NONINL @@ -41,16 +42,11 @@ Created 2011/12/19 #ifndef UNIV_HOTBACKUP -#ifdef UNIV_PFS_MUTEX -/* Key to register the mutex with performance schema */ -UNIV_INTERN mysql_pfs_key_t buf_dblwr_mutex_key; -#endif /* UNIV_PFS_RWLOCK */ - /** The doublewrite buffer */ -UNIV_INTERN buf_dblwr_t* buf_dblwr = NULL; +buf_dblwr_t* buf_dblwr = NULL; /** Set to TRUE when the doublewrite buffer is being created */ -UNIV_INTERN ibool buf_dblwr_being_created = FALSE; +ibool buf_dblwr_being_created = FALSE; #define TRX_SYS_DOUBLEWRITE_BLOCKS 2 @@ -58,7 +54,6 @@ UNIV_INTERN ibool buf_dblwr_being_created = FALSE; Determines if a page number is located inside the doublewrite buffer. @return TRUE if the location is inside the two blocks of the doublewrite buffer */ -UNIV_INTERN ibool buf_dblwr_page_inside( /*==================*/ @@ -87,7 +82,7 @@ buf_dblwr_page_inside( /****************************************************************//** Calls buf_page_get() on the TRX_SYS_PAGE and returns a pointer to the doublewrite buffer within it. -@return pointer to the doublewrite buffer within the filespace header +@return pointer to the doublewrite buffer within the filespace header page. */ UNIV_INLINE byte* @@ -97,8 +92,9 @@ buf_dblwr_get( { buf_block_t* block; - block = buf_page_get(TRX_SYS_SPACE, 0, TRX_SYS_PAGE_NO, - RW_X_LATCH, mtr); + block = buf_page_get(page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO), + univ_page_size, RW_X_LATCH, mtr); + buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK); return(buf_block_get_frame(block) + TRX_SYS_DOUBLEWRITE); @@ -107,7 +103,6 @@ buf_dblwr_get( /********************************************************************//** Flush a batch of writes to the datafiles that have already been written to the dblwr buffer on disk. */ -UNIV_INLINE void buf_dblwr_sync_datafiles() /*======================*/ @@ -121,7 +116,7 @@ buf_dblwr_sync_datafiles() os_aio_wait_until_no_pending_writes(); /* Now we flush the data to disk (for example, with fsync) */ - fil_flush_file_spaces(FIL_TABLESPACE); + fil_flush_file_spaces(FIL_TYPE_TABLESPACE); } /****************************************************************//** @@ -136,7 +131,7 @@ buf_dblwr_init( ulint buf_size; buf_dblwr = static_cast( - mem_zalloc(sizeof(buf_dblwr_t))); + ut_zalloc_nokey(sizeof(buf_dblwr_t))); /* There are two blocks of same size in the doublewrite buffer. */ @@ -147,11 +142,10 @@ buf_dblwr_init( ut_a(srv_doublewrite_batch_size > 0 && srv_doublewrite_batch_size < buf_size); - mutex_create(buf_dblwr_mutex_key, - &buf_dblwr->mutex, SYNC_DOUBLEWRITE); + mutex_create(LATCH_ID_BUF_DBLWR, &buf_dblwr->mutex); - buf_dblwr->b_event = os_event_create(); - buf_dblwr->s_event = os_event_create(); + buf_dblwr->b_event = os_event_create("dblwr_batch_event"); + buf_dblwr->s_event = os_event_create("dblwr_single_event"); buf_dblwr->first_free = 0; buf_dblwr->s_reserved = 0; buf_dblwr->b_reserved = 0; @@ -162,24 +156,25 @@ buf_dblwr_init( doublewrite + TRX_SYS_DOUBLEWRITE_BLOCK2); buf_dblwr->in_use = static_cast( - mem_zalloc(buf_size * sizeof(bool))); + ut_zalloc_nokey(buf_size * sizeof(bool))); buf_dblwr->write_buf_unaligned = static_cast( - ut_malloc((1 + buf_size) * UNIV_PAGE_SIZE)); + ut_malloc_nokey((1 + buf_size) * UNIV_PAGE_SIZE)); buf_dblwr->write_buf = static_cast( ut_align(buf_dblwr->write_buf_unaligned, UNIV_PAGE_SIZE)); buf_dblwr->buf_block_arr = static_cast( - mem_zalloc(buf_size * sizeof(void*))); + ut_zalloc_nokey(buf_size * sizeof(void*))); } /****************************************************************//** Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ -UNIV_INTERN -void +doublewrite buffer is placed on the trx system header page. +@return true if successful, false if not. */ +__attribute__((warn_unused_result)) +bool buf_dblwr_create(void) /*==================*/ { @@ -195,7 +190,7 @@ buf_dblwr_create(void) if (buf_dblwr) { /* Already inited */ - return; + return(true); } start_again: @@ -213,23 +208,22 @@ start_again: mtr_commit(&mtr); buf_dblwr_being_created = FALSE; - return; + return(true); } - ib_logf(IB_LOG_LEVEL_INFO, - "Doublewrite buffer not found: creating new"); - - if (buf_pool_get_curr_size() - < ((TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE - + FSP_EXTENT_SIZE / 2 + 100) - * UNIV_PAGE_SIZE)) { + ib::info() << "Doublewrite buffer not found: creating new"; - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot create doublewrite buffer: you must " - "increase your buffer pool size. Cannot continue " - "operation."); + ulint min_doublewrite_size = + ( ( 2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE + + FSP_EXTENT_SIZE / 2 + + 100) + * UNIV_PAGE_SIZE); + if (buf_pool_get_curr_size() < min_doublewrite_size) { + ib::error() << "Cannot create doublewrite buffer: you must" + " increase your buffer pool size. Cannot continue" + " operation."; - exit(EXIT_FAILURE); + return(false); } block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, @@ -242,15 +236,14 @@ start_again: buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK); if (block2 == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot create doublewrite buffer: you must " - "increase your tablespace size. " - "Cannot continue operation."); + ib::error() << "Cannot create doublewrite buffer: you must" + " increase your tablespace size." + " Cannot continue operation."; /* We exit without committing the mtr to prevent its modifications to the database getting to disk */ - exit(EXIT_FAILURE); + return(false); } fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG; @@ -261,12 +254,11 @@ start_again: new_block = fseg_alloc_free_page( fseg_header, prev_page_no + 1, FSP_UP, &mtr); if (new_block == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot create doublewrite buffer: you must " - "increase your tablespace size. " - "Cannot continue operation."); + ib::error() << "Cannot create doublewrite buffer: " + " you must increase your tablespace size." + " Cannot continue operation."; - exit(EXIT_FAILURE); + return(false); } /* We read the allocated pages to the buffer pool; @@ -279,7 +271,7 @@ start_again: has not been written to in doublewrite. */ ut_ad(rw_lock_get_x_lock_count(&new_block->lock) == 1); - page_no = buf_block_get_page_no(new_block); + page_no = new_block->page.id.page_no(); if (i == FSP_EXTENT_SIZE / 2) { ut_a(page_no == FSP_EXTENT_SIZE); @@ -346,55 +338,73 @@ start_again: /* Remove doublewrite pages from LRU */ buf_pool_invalidate(); - ib_logf(IB_LOG_LEVEL_INFO, "Doublewrite buffer created"); + ib::info() << "Doublewrite buffer created"; goto start_again; } -/****************************************************************//** -At a database startup initializes the doublewrite buffer memory structure if +/** +At database startup initializes the doublewrite buffer memory structure if we already have a doublewrite buffer created in the data files. If we are upgrading to an InnoDB version which supports multiple tablespaces, then this function performs the necessary update operations. If we are in a crash -recovery, this function loads the pages from double write buffer into memory. */ -void +recovery, this function loads the pages from double write buffer into memory. +@param[in] file File handle +@param[in] path Path name of file +@return DB_SUCCESS or error code */ +dberr_t buf_dblwr_init_or_load_pages( -/*=========================*/ os_file_t file, - char* path, - bool load_corrupt_pages) + const char* path) { - byte* buf; - byte* read_buf; - byte* unaligned_read_buf; - ulint block1; - ulint block2; - byte* page; - ibool reset_space_ids = FALSE; - byte* doublewrite; - ulint space_id; - ulint i; - ulint block_bytes = 0; - recv_dblwr_t& recv_dblwr = recv_sys->dblwr; + byte* buf; + byte* page; + ulint block1; + ulint block2; + ulint space_id; + byte* read_buf; + byte* doublewrite; + byte* unaligned_read_buf; + ibool reset_space_ids = FALSE; + recv_dblwr_t& recv_dblwr = recv_sys->dblwr; /* We do the file i/o past the buffer pool */ - unaligned_read_buf = static_cast(ut_malloc(3 * UNIV_PAGE_SIZE)); + unaligned_read_buf = static_cast( + ut_malloc_nokey(3 * UNIV_PAGE_SIZE)); read_buf = static_cast( ut_align(unaligned_read_buf, UNIV_PAGE_SIZE)); /* Read the trx sys header to check if we are using the doublewrite buffer */ - off_t trx_sys_page = TRX_SYS_PAGE_NO * UNIV_PAGE_SIZE; - os_file_read(file, read_buf, trx_sys_page, UNIV_PAGE_SIZE); + dberr_t err; + + IORequest read_request(IORequest::READ); + + read_request.disable_compression(); + + err = os_file_read( + read_request, + file, read_buf, TRX_SYS_PAGE_NO * UNIV_PAGE_SIZE, + UNIV_PAGE_SIZE); + + if (err != DB_SUCCESS) { + + ib::error() + << "Failed to read the system tablespace header page"; + + ut_free(unaligned_read_buf); + + return(err); + } doublewrite = read_buf + TRX_SYS_DOUBLEWRITE; if (mach_read_from_4(read_buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION) != 0) { byte* tmp = fil_space_decrypt((ulint)TRX_SYS_SPACE, read_buf + UNIV_PAGE_SIZE, - UNIV_PAGE_SIZE, /* page size */ + univ_page_size, /* page size */ read_buf); doublewrite = tmp + TRX_SYS_DOUBLEWRITE; } @@ -410,7 +420,8 @@ buf_dblwr_init_or_load_pages( buf = buf_dblwr->write_buf; } else { - goto leave_func; + ut_free(unaligned_read_buf); + return(DB_SUCCESS); } if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED) @@ -424,32 +435,56 @@ buf_dblwr_init_or_load_pages( reset_space_ids = TRUE; - ib_logf(IB_LOG_LEVEL_INFO, - "Resetting space id's in the doublewrite buffer"); + ib::info() << "Resetting space id's in the doublewrite buffer"; } /* Read the pages from the doublewrite buffer to memory */ + err = os_file_read( + read_request, + file, buf, block1 * UNIV_PAGE_SIZE, + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE); + + if (err != DB_SUCCESS) { + + ib::error() + << "Failed to read the first double write buffer " + "extent"; + + ut_free(unaligned_read_buf); + + return(err); + } + + err = os_file_read( + read_request, + file, + buf + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE, + block2 * UNIV_PAGE_SIZE, + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE); - block_bytes = TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE; + if (err != DB_SUCCESS) { - os_file_read(file, buf, block1 * UNIV_PAGE_SIZE, block_bytes); - os_file_read(file, buf + block_bytes, block2 * UNIV_PAGE_SIZE, - block_bytes); + ib::error() + << "Failed to read the second double write buffer " + "extent"; + + ut_free(unaligned_read_buf); + + return(err); + } /* Check if any of these pages is half-written in data files, in the intended position */ page = buf; - for (i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * TRX_SYS_DOUBLEWRITE_BLOCKS; i++) { - - ulint source_page_no; - + for (ulint i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 2; i++) { if (reset_space_ids) { + ulint source_page_no; space_id = 0; - mach_write_to_4(page - + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id); + mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, + space_id); /* We do not need to calculate new checksums for the pages because the field .._SPACE_ID does not affect them. Write the page back to where we read it from. */ @@ -461,79 +496,111 @@ buf_dblwr_init_or_load_pages( + i - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE; } - os_file_write(path, file, page, - source_page_no * UNIV_PAGE_SIZE, - UNIV_PAGE_SIZE); - } else if (load_corrupt_pages) { + IORequest write_request(IORequest::WRITE); + + /* Recovered data file pages are written out + as uncompressed. */ + + write_request.disable_compression(); + + err = os_file_write( + write_request, path, file, page, + source_page_no * UNIV_PAGE_SIZE, + UNIV_PAGE_SIZE); + if (err != DB_SUCCESS) { + + ib::error() + << "Failed to write to the double write" + " buffer"; + + ut_free(unaligned_read_buf); + + return(err); + } + + } else { recv_dblwr.add(page); } - page += UNIV_PAGE_SIZE; + page += univ_page_size.physical(); } if (reset_space_ids) { os_file_flush(file); } -leave_func: ut_free(unaligned_read_buf); + + return(DB_SUCCESS); } -/****************************************************************//** -Process the double write buffer pages. */ +/** Process and remove the double write buffer pages for all tablespaces. */ void -buf_dblwr_process() -/*===============*/ +buf_dblwr_process(void) { - ulint space_id; - ulint page_no; - ulint page_no_dblwr = 0; - byte* page; - byte* read_buf; - byte* unaligned_read_buf; - recv_dblwr_t& recv_dblwr = recv_sys->dblwr; + ulint page_no_dblwr = 0; + byte* read_buf; + byte* unaligned_read_buf; + recv_dblwr_t& recv_dblwr = recv_sys->dblwr; - unaligned_read_buf = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); + unaligned_read_buf = static_cast( + ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); read_buf = static_cast( ut_align(unaligned_read_buf, UNIV_PAGE_SIZE)); - for (std::list::iterator i = recv_dblwr.pages.begin(); - i != recv_dblwr.pages.end(); ++i, ++page_no_dblwr ) { + for (recv_dblwr_t::list::iterator i = recv_dblwr.pages.begin(); + i != recv_dblwr.pages.end(); + ++i, ++page_no_dblwr) { bool is_compressed = false; - page = *i; - page_no = mach_read_from_4(page + FIL_PAGE_OFFSET); - space_id = mach_read_from_4(page + FIL_PAGE_SPACE_ID); + const byte* page = *i; + ulint page_no = page_get_page_no(page); + ulint space_id = page_get_space_id(page); + + fil_space_t* space = fil_space_get(space_id); - if (!fil_tablespace_exists_in_mem(space_id)) { - /* Maybe we have dropped the single-table tablespace + if (space == NULL) { + /* Maybe we have dropped the tablespace and this page once belonged to it: do nothing */ + continue; + } + + fil_space_open_if_needed(space); - } else if (!fil_check_adress_in_tablespace(space_id, - page_no)) { - ib_logf(IB_LOG_LEVEL_WARN, - "A page in the doublewrite buffer is not " - "within space bounds; space id %lu " - "page number %lu, page %lu in " - "doublewrite buf.", - (ulong) space_id, (ulong) page_no, - page_no_dblwr); + if (page_no >= space->size) { + + /* Do not report the warning if the tablespace is + schedule for truncate or was truncated and we have live + MLOG_TRUNCATE record in redo. */ + bool skip_warning = + srv_is_tablespace_truncated(space_id) + || srv_was_tablespace_truncated(space_id); + + if (!skip_warning) { + ib::warn() << "Page " << page_no_dblwr + << " in the doublewrite buffer is" + " not within space bounds: page " + << page_id_t(space_id, page_no); + } } else { - ulint zip_size = fil_space_get_zip_size(space_id); + const page_size_t page_size(space->flags); + const page_id_t page_id(space_id, page_no); + + /* We want to ensure that for partial reads the + unread portion of the page is NUL. */ + memset(read_buf, 0x0, page_size.physical()); + + IORequest request; + + request.dblwr_recover(); /* Read in the actual page from the file */ - fil_io(OS_FILE_READ, - true, - space_id, - zip_size, - page_no, - 0, - zip_size ? zip_size : UNIV_PAGE_SIZE, - read_buf, - NULL, - 0); + dberr_t err = fil_io( + request, true, + page_id, page_size, + 0, page_size.physical(), read_buf, NULL, NULL); /* Is page compressed ? */ is_compressed = fil_page_is_compressed_encrypted(read_buf) | @@ -544,19 +611,26 @@ buf_dblwr_process() if (is_compressed) { fil_decompress_page(NULL, read_buf, UNIV_PAGE_SIZE, NULL, true); } + if (err != DB_SUCCESS) { + + ib::warn() + << "Double write buffer recovery: " + << page_id << " read failed with " + << "error: " << ut_strerr(err); + } + + if (fil_space_verify_crypt_checksum(read_buf, page_size)) { - if (fil_space_verify_crypt_checksum(read_buf, zip_size)) { /* page is encrypted and checksum is OK */ - } else if (buf_page_is_corrupted(true, read_buf, zip_size)) { + } else if (buf_page_is_corrupted( + true, read_buf, page_size, + fsp_is_checksum_disabled(space_id))) { - fprintf(stderr, - "InnoDB: Warning: database page" - " corruption or a failed\n" - "InnoDB: file read of" - " space %lu page %lu.\n" - "InnoDB: Trying to recover it from" - " the doublewrite buffer.\n", - (ulong) space_id, (ulong) page_no); + ib::warn() << "Database page corruption or" + << " a failed file read of page " + << page_id + << ". Trying to recover it from the" + << " doublewrite buffer."; /* Is page compressed ? */ is_compressed = fil_page_is_compressed_encrypted(page) | @@ -565,109 +639,95 @@ buf_dblwr_process() /* If page was compressed, decompress it before we check checksum. */ if (is_compressed) { - fil_decompress_page(NULL, page, UNIV_PAGE_SIZE, NULL, true); + fil_decompress_page(NULL, (byte*)page, UNIV_PAGE_SIZE, NULL, true); } - if (fil_space_verify_crypt_checksum(page, zip_size)) { + if (fil_space_verify_crypt_checksum(page, page_size)) { /* the doublewrite buffer page is encrypted and OK */ - } else if (buf_page_is_corrupted(true, - page, - zip_size)) { - fprintf(stderr, - "InnoDB: Dump of the page:\n"); + } else if (buf_page_is_corrupted( + true, page, page_size, + fsp_is_checksum_disabled(space_id))) { + + ib::error() << "Dump of the page:"; + buf_page_print( - read_buf, zip_size, + read_buf, page_size, BUF_PAGE_PRINT_NO_CRASH); - fprintf(stderr, - "InnoDB: Dump of" - " corresponding page" - " in doublewrite buffer:\n"); + ib::error() << "Dump of corresponding" + " page in doublewrite buffer:"; + buf_page_print( - page, zip_size, + page, page_size, BUF_PAGE_PRINT_NO_CRASH); - fprintf(stderr, - "InnoDB: Also the page in the" - " doublewrite buffer" - " is corrupt.\n" - "InnoDB: Cannot continue" - " operation.\n" - "InnoDB: You can try to" - " recover the database" - " with the my.cnf\n" - "InnoDB: option:\n" - "InnoDB:" - " innodb_force_recovery=6\n"); - ut_error; + ib::fatal() << "The page in the" + " doublewrite buffer is" + " corrupt. Cannot continue" + " operation. You can try to" + " recover the database with" + " innodb_force_recovery=6"; } + } else if (buf_page_is_zeroes(read_buf, page_size) + && !buf_page_is_zeroes(page, page_size) + && !buf_page_is_corrupted( + true, page, page_size, + fsp_is_checksum_disabled(space_id))) { + + /* Database page contained only zeroes, while + a valid copy is available in dblwr buffer. */ - /* Write the good page from the - doublewrite buffer to the intended - position */ + } else { - fil_io(OS_FILE_WRITE, - true, - space_id, - zip_size, - page_no, - 0, - zip_size ? zip_size : UNIV_PAGE_SIZE, - page, - NULL, - 0); + bool t1 = buf_page_is_zeroes( + read_buf, page_size); - ib_logf(IB_LOG_LEVEL_INFO, - "Recovered the page from" - " the doublewrite buffer."); + bool t2 = buf_page_is_zeroes(page, page_size); - } else if (buf_page_is_zeroes(read_buf, zip_size)) { + bool t3 = buf_page_is_corrupted( + true, page, page_size, + fsp_is_checksum_disabled(space_id)); - if (!buf_page_is_zeroes(page, zip_size) - && !buf_page_is_corrupted(true, page, - zip_size)) { + if (t1 && !(t2 || t3)) { /* Database page contained only zeroes, while a valid copy is available in dblwr buffer. */ - fil_io(OS_FILE_WRITE, - true, - space_id, - zip_size, - page_no, - 0, - zip_size ? zip_size : UNIV_PAGE_SIZE, - page, - NULL, - 0); + } else { + continue; } } - } - } - fil_flush_file_spaces(FIL_TABLESPACE); + /* Recovered data file pages are written out + as uncompressed. */ - { - size_t bytes = TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE; - byte *unaligned_buf = static_cast( - ut_malloc(bytes + UNIV_PAGE_SIZE - 1)); + IORequest write_request(IORequest::WRITE); - byte *buf = static_cast( - ut_align(unaligned_buf, UNIV_PAGE_SIZE)); - memset(buf, 0, bytes); + write_request.disable_compression(); - fil_io(OS_FILE_WRITE, true, TRX_SYS_SPACE, 0, - buf_dblwr->block1, 0, bytes, buf, NULL, NULL); - fil_io(OS_FILE_WRITE, true, TRX_SYS_SPACE, 0, - buf_dblwr->block2, 0, bytes, buf, NULL, NULL); + /* Write the good page from the doublewrite + buffer to the intended position. */ - ut_free(unaligned_buf); - } + fil_io(write_request, true, + page_id, page_size, + 0, page_size.physical(), + const_cast(page), NULL, NULL); + + ib::info() + << "Recovered page " + << page_id + << " from the doublewrite buffer."; + } + } + + recv_dblwr.pages.clear(); + + fil_flush_file_spaces(FIL_TYPE_TABLESPACE); + ut_free(unaligned_read_buf); } /****************************************************************//** Frees doublewrite buffer. */ -UNIV_INTERN void buf_dblwr_free(void) /*================*/ @@ -677,35 +737,38 @@ buf_dblwr_free(void) ut_ad(buf_dblwr->s_reserved == 0); ut_ad(buf_dblwr->b_reserved == 0); - os_event_free(buf_dblwr->b_event); - os_event_free(buf_dblwr->s_event); + os_event_destroy(buf_dblwr->b_event); + os_event_destroy(buf_dblwr->s_event); ut_free(buf_dblwr->write_buf_unaligned); buf_dblwr->write_buf_unaligned = NULL; - mem_free(buf_dblwr->buf_block_arr); + ut_free(buf_dblwr->buf_block_arr); buf_dblwr->buf_block_arr = NULL; - mem_free(buf_dblwr->in_use); + ut_free(buf_dblwr->in_use); buf_dblwr->in_use = NULL; mutex_free(&buf_dblwr->mutex); - mem_free(buf_dblwr); + ut_free(buf_dblwr); buf_dblwr = NULL; } /********************************************************************//** Updates the doublewrite buffer when an IO request is completed. */ -UNIV_INTERN void buf_dblwr_update( /*=============*/ const buf_page_t* bpage, /*!< in: buffer block descriptor */ buf_flush_t flush_type)/*!< in: flush type */ { - if (!srv_use_doublewrite_buf || buf_dblwr == NULL) { + if (!srv_use_doublewrite_buf + || buf_dblwr == NULL + || fsp_is_system_temporary(bpage->id.space())) { return; } + ut_ad(!srv_read_only_mode); + switch (flush_type) { case BUF_FLUSH_LIST: case BUF_FLUSH_LRU: @@ -721,7 +784,7 @@ buf_dblwr_update( mutex_exit(&buf_dblwr->mutex); /* This will finish the batch. Sync data files to the disk. */ - fil_flush_file_spaces(FIL_TABLESPACE); + fil_flush_file_spaces(FIL_TYPE_TABLESPACE); mutex_enter(&buf_dblwr->mutex); /* We can now reuse the doublewrite memory buffer: */ @@ -779,18 +842,16 @@ buf_dblwr_check_page_lsn( - FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4)) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: ERROR: The page to be written" - " seems corrupt!\n" - "InnoDB: The low 4 bytes of LSN fields do not match " - "(" ULINTPF " != " ULINTPF ")!" - " Noticed in the buffer pool.\n", - mach_read_from_4( - page + FIL_PAGE_LSN + 4), - mach_read_from_4( - page + UNIV_PAGE_SIZE - - FIL_PAGE_END_LSN_OLD_CHKSUM + 4)); + const ulint lsn1 = mach_read_from_4( + page + FIL_PAGE_LSN + 4); + const ulint lsn2 = mach_read_from_4( + page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM + + 4); + + ib::error() << "The page to be written seems corrupt!" + " The low 4 bytes of LSN fields do not match" + " (" << lsn1 << " != " << lsn2 << ")!" + " Noticed in the buffer pool."; } } @@ -803,21 +864,13 @@ buf_dblwr_assert_on_corrupt_block( /*==============================*/ const buf_block_t* block) /*!< in: block to check */ { - buf_page_print(block->frame, 0, BUF_PAGE_PRINT_NO_CRASH); - - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Apparent corruption of an" - " index page n:o %lu in space %lu\n" - "InnoDB: to be written to data file." - " We intentionally crash server\n" - "InnoDB: to prevent corrupt data" - " from ending up in data\n" - "InnoDB: files.\n", - (ulong) buf_block_get_page_no(block), - (ulong) buf_block_get_space(block)); - - ut_error; + buf_page_print(block->frame, univ_page_size, BUF_PAGE_PRINT_NO_CRASH); + + ib::fatal() << "Apparent corruption of an index page " + << block->page.id + << " to be written to data file. We intentionally crash" + " the server to prevent corrupt data from ending up in" + " data files."; } /********************************************************************//** @@ -829,26 +882,50 @@ buf_dblwr_check_block( /*==================*/ const buf_block_t* block) /*!< in: block to check */ { - if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE - || block->page.zip.data) { - /* No simple validate for compressed pages exists. */ - return; - } + ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - buf_dblwr_check_page_lsn(block->frame); - - if (!block->check_index_page_at_flush) { + if (block->skip_flush_check) { return; } - if (page_is_comp(block->frame)) { - if (!page_simple_validate_new(block->frame)) { - buf_dblwr_assert_on_corrupt_block(block); + switch (fil_page_get_type(block->frame)) { + case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: + if (page_is_comp(block->frame)) { + if (page_simple_validate_new(block->frame)) { + return; + } + } else if (page_simple_validate_old(block->frame)) { + return; } - } else if (!page_simple_validate_old(block->frame)) { - - buf_dblwr_assert_on_corrupt_block(block); + /* While it is possible that this is not an index page + but just happens to have wrongly set FIL_PAGE_TYPE, + such pages should never be modified to without also + adjusting the page type during page allocation or + buf_flush_init_for_writing() or fil_page_reset_type(). */ + break; + case FIL_PAGE_TYPE_FSP_HDR: + case FIL_PAGE_IBUF_BITMAP: + case FIL_PAGE_TYPE_UNKNOWN: + /* Do not complain again, we already reset this field. */ + case FIL_PAGE_UNDO_LOG: + case FIL_PAGE_INODE: + case FIL_PAGE_IBUF_FREE_LIST: + case FIL_PAGE_TYPE_SYS: + case FIL_PAGE_TYPE_TRX_SYS: + case FIL_PAGE_TYPE_XDES: + case FIL_PAGE_TYPE_BLOB: + case FIL_PAGE_TYPE_ZBLOB: + case FIL_PAGE_TYPE_ZBLOB2: + /* TODO: validate also non-index pages */ + return; + case FIL_PAGE_TYPE_ALLOCATED: + /* empty pages should never be flushed */ + return; + break; } + + buf_dblwr_assert_on_corrupt_block(block); } /********************************************************************//** @@ -862,45 +939,43 @@ buf_dblwr_write_block_to_datafile( bool sync) /*!< in: true if sync IO is requested */ { - ut_a(bpage); ut_a(buf_page_in_file(bpage)); - const ulint flags = sync - ? OS_FILE_WRITE - : OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER; + ulint type = IORequest::WRITE; + + if (sync) { + type |= IORequest::DO_NOT_WAKE; + } + + IORequest request(type); + /* We request frame here to get correct buffer in case of + encryption and/or page compression */ void * frame = buf_page_get_frame(bpage); - if (bpage->zip.data) { - fil_io(flags, - sync, - buf_page_get_space(bpage), - buf_page_get_zip_size(bpage), - buf_page_get_page_no(bpage), - 0, - buf_page_get_zip_size(bpage), - frame, - (void*) bpage, - 0); + if (bpage->zip.data != NULL) { + ut_ad(bpage->size.is_compressed()); - return; - } + fil_io(request, sync, bpage->id, bpage->size, 0, + bpage->size.physical(), + (void*) frame, + (void*) bpage, NULL); + } else { + ut_ad(!bpage->size.is_compressed()); + + /* Our IO API is common for both reads and writes and is + therefore geared towards a non-const parameter. */ + buf_block_t* block = reinterpret_cast( + const_cast(bpage)); - const buf_block_t* block = (buf_block_t*) bpage; - ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - buf_dblwr_check_page_lsn(block->frame); - - fil_io(flags, - sync, - buf_block_get_space(block), - 0, - buf_block_get_page_no(block), - 0, - bpage->real_size, - frame, - (void*) block, - (ulint *)&bpage->write_size); + ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); + buf_dblwr_check_page_lsn(block->frame); + + fil_io(request, + sync, bpage->id, bpage->size, 0, bpage->size.physical(), + frame, block, (ulint *)&bpage->write_size); + } } /********************************************************************//** @@ -909,7 +984,6 @@ and also wakes up the aio thread if simulated aio is used. It is very important to call this function after a batch of writes has been posted, and also when we may have to wait for a page latch! Otherwise a deadlock of threads can occur. */ -UNIV_INTERN void buf_dblwr_flush_buffered_writes(void) /*=================================*/ @@ -924,6 +998,8 @@ buf_dblwr_flush_buffered_writes(void) return; } + ut_ad(!srv_read_only_mode); + try_again: mutex_enter(&buf_dblwr->mutex); @@ -935,13 +1011,19 @@ try_again: mutex_exit(&buf_dblwr->mutex); + /* Wake possible simulated aio thread as there could be + system temporary tablespace pages active for flushing. + Note: system temporary tablespace pages are not scheduled + for doublewrite. */ + os_aio_simulated_wake_handler_threads(); + return; } if (buf_dblwr->batch_running) { /* Another thread is running the batch right now. Wait for it to finish. */ - ib_int64_t sig_count = os_event_reset(buf_dblwr->b_event); + int64_t sig_count = os_event_reset(buf_dblwr->b_event); mutex_exit(&buf_dblwr->mutex); os_event_wait_low(buf_dblwr->b_event, sig_count); @@ -992,9 +1074,9 @@ try_again: len = ut_min(TRX_SYS_DOUBLEWRITE_BLOCK_SIZE, buf_dblwr->first_free) * UNIV_PAGE_SIZE; - fil_io(OS_FILE_WRITE, true, TRX_SYS_SPACE, 0, - buf_dblwr->block1, 0, len, - (void*) write_buf, NULL, 0); + fil_io(IORequestWrite, true, + page_id_t(TRX_SYS_SPACE, buf_dblwr->block1), univ_page_size, + 0, len, (void*) write_buf, NULL, NULL); if (buf_dblwr->first_free <= TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) { /* No unwritten pages in the second block. */ @@ -1008,9 +1090,9 @@ try_again: write_buf = buf_dblwr->write_buf + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE; - fil_io(OS_FILE_WRITE, true, TRX_SYS_SPACE, 0, - buf_dblwr->block2, 0, len, - (void*) write_buf, NULL, 0); + fil_io(IORequestWrite, true, + page_id_t(TRX_SYS_SPACE, buf_dblwr->block2), univ_page_size, + 0, len, (void*) write_buf, NULL, NULL); flush: /* increment the doublewrite flushed pages counter */ @@ -1052,14 +1134,11 @@ flush: Posts a buffer page for writing. If the doublewrite memory buffer is full, calls buf_dblwr_flush_buffered_writes and waits for for free space to appear. */ -UNIV_INTERN void buf_dblwr_add_to_batch( /*====================*/ buf_page_t* bpage) /*!< in: buffer block to write */ { - ulint zip_size; - ut_a(buf_page_in_file(bpage)); try_again: @@ -1075,7 +1154,7 @@ try_again: point. The only exception is when a user thread is forced to do a flush batch because of a sync checkpoint. */ - ib_int64_t sig_count = os_event_reset(buf_dblwr->b_event); + int64_t sig_count = os_event_reset(buf_dblwr->b_event); mutex_exit(&buf_dblwr->mutex); os_event_wait_low(buf_dblwr->b_event, sig_count); @@ -1090,26 +1169,28 @@ try_again: goto try_again; } - zip_size = buf_page_get_zip_size(bpage); + byte* p = buf_dblwr->write_buf + + univ_page_size.physical() * buf_dblwr->first_free; + + /* We request frame here to get correct buffer in case of + encryption and/or page compression */ void * frame = buf_page_get_frame(bpage); - if (zip_size) { - UNIV_MEM_ASSERT_RW(bpage->zip.data, zip_size); + if (bpage->size.is_compressed()) { + UNIV_MEM_ASSERT_RW(bpage->zip.data, bpage->size.physical()); /* Copy the compressed page and clear the rest. */ - memcpy(buf_dblwr->write_buf - + UNIV_PAGE_SIZE * buf_dblwr->first_free, - frame, zip_size); - memset(buf_dblwr->write_buf - + UNIV_PAGE_SIZE * buf_dblwr->first_free - + zip_size, 0, UNIV_PAGE_SIZE - zip_size); + + memcpy(p, frame, bpage->size.physical()); + + memset(p + bpage->size.physical(), 0x0, + univ_page_size.physical() - bpage->size.physical()); } else { ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); - UNIV_MEM_ASSERT_RW(((buf_block_t*) bpage)->frame, - UNIV_PAGE_SIZE); - memcpy(buf_dblwr->write_buf - + UNIV_PAGE_SIZE * buf_dblwr->first_free, - frame, UNIV_PAGE_SIZE); + UNIV_MEM_ASSERT_RW(frame, + bpage->size.logical()); + + memcpy(p, frame, bpage->size.logical()); } buf_dblwr->buf_block_arr[buf_dblwr->first_free] = bpage; @@ -1140,7 +1221,6 @@ flushes in the doublewrite buffer are in use we wait here for one to become free. We are guaranteed that a slot will become free because any thread that is using a slot must also release the slot before leaving this function. */ -UNIV_INTERN void buf_dblwr_write_single_page( /*========================*/ @@ -1149,7 +1229,6 @@ buf_dblwr_write_single_page( { ulint n_slots; ulint size; - ulint zip_size; ulint offset; ulint i; @@ -1183,8 +1262,7 @@ retry: if (buf_dblwr->s_reserved == n_slots) { /* All slots are reserved. */ - ib_int64_t sig_count = - os_event_reset(buf_dblwr->s_event); + int64_t sig_count = os_event_reset(buf_dblwr->s_event); mutex_exit(&buf_dblwr->mutex); os_event_wait_low(buf_dblwr->s_event, sig_count); @@ -1230,36 +1308,39 @@ retry: write it. This is so because we want to pad the remaining bytes in the doublewrite page with zeros. */ - zip_size = buf_page_get_zip_size(bpage); + /* We request frame here to get correct buffer in case of + encryption and/or page compression */ void * frame = buf_page_get_frame(bpage); - if (zip_size) { - memcpy(buf_dblwr->write_buf + UNIV_PAGE_SIZE * i, - frame, zip_size); - memset(buf_dblwr->write_buf + UNIV_PAGE_SIZE * i - + zip_size, 0, UNIV_PAGE_SIZE - zip_size); - - fil_io(OS_FILE_WRITE, - true, - TRX_SYS_SPACE, 0, - offset, - 0, - UNIV_PAGE_SIZE, - (void*) (buf_dblwr->write_buf + UNIV_PAGE_SIZE * i), - NULL, - 0); + if (bpage->size.is_compressed()) { + memcpy(buf_dblwr->write_buf + univ_page_size.physical() * i, + frame, bpage->size.physical()); + + memset(buf_dblwr->write_buf + univ_page_size.physical() * i + + bpage->size.physical(), 0x0, + univ_page_size.physical() - bpage->size.physical()); + + fil_io(IORequestWrite, + true, + page_id_t(TRX_SYS_SPACE, offset), + univ_page_size, + 0, + univ_page_size.physical(), + (void *)(buf_dblwr->write_buf + univ_page_size.physical() * i), + NULL, + NULL); } else { /* It is a regular page. Write it directly to the doublewrite buffer */ - fil_io(OS_FILE_WRITE, - true, - TRX_SYS_SPACE, 0, - offset, - 0, - bpage->real_size, - frame, - NULL, - 0); + fil_io(IORequestWrite, + true, + page_id_t(TRX_SYS_SPACE, offset), + univ_page_size, + 0, + univ_page_size.physical(), + (void*) frame, + NULL, + NULL); } /* Now flush the doublewrite buffer data to disk */ diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index 0abf7118b4f..831dd13e172 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -23,32 +23,37 @@ Implements a buffer pool dump/load. Created April 08, 2011 Vasil Dimov *******************************************************/ -#include "univ.i" +#include "my_global.h" +#include "my_sys.h" +/* JAN: TODO: missing MySQL 5.7 include */ +#ifdef HAVE_MY_THREAD_H +// #include "my_thread.h" +#endif + +#include "mysql/psi/mysql_stage.h" +#include "mysql/psi/psi.h" -#include /* va_* */ -#include /* strerror() */ +#include "univ.i" -#include "buf0buf.h" /* buf_pool_mutex_enter(), srv_buf_pool_instances */ +#include "buf0buf.h" #include "buf0dump.h" -#include "db0err.h" -#include "dict0dict.h" /* dict_operation_lock */ -#include "os0file.h" /* OS_FILE_MAX_PATH */ -#include "os0sync.h" /* os_event* */ -#include "os0thread.h" /* os_thread_* */ -#include "srv0srv.h" /* srv_fast_shutdown, srv_buf_dump* */ -#include "srv0start.h" /* srv_shutdown_state */ -#include "sync0rw.h" /* rw_lock_s_lock() */ -#include "ut0byte.h" /* ut_ull_create() */ -#include "ut0sort.h" /* UT_SORT_FUNCTION_BODY */ +#include "dict0dict.h" +#include "os0file.h" +#include "os0thread.h" +#include "srv0srv.h" +#include "srv0start.h" +#include "sync0rw.h" +#include "ut0byte.h" + +#include enum status_severity { + STATUS_VERBOSE, STATUS_INFO, - STATUS_NOTICE, STATUS_ERR }; -#define SHUTTING_DOWN() (UNIV_UNLIKELY(srv_shutdown_state \ - != SRV_SHUTDOWN_NONE)) +#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE) /* Flags that tell the buffer pool dump/load thread which action should it take after being waked up. */ @@ -73,7 +78,6 @@ Wakes up the buffer pool dump/load thread and instructs it to start a dump. This function is called by MySQL code via buffer_pool_dump_now() and it should return immediately because the whole MySQL is frozen during its execution. */ -UNIV_INTERN void buf_dump_start() /*============*/ @@ -87,7 +91,6 @@ Wakes up the buffer pool dump/load thread and instructs it to start a load. This function is called by MySQL code via buffer_pool_load_now() and it should return immediately because the whole MySQL is frozen during its execution. */ -UNIV_INTERN void buf_load_start() /*============*/ @@ -123,7 +126,18 @@ buf_dump_status( sizeof(export_vars.innodb_buffer_pool_dump_status), fmt, ap); - ib_logf((ib_log_level_t) severity, "%s", export_vars.innodb_buffer_pool_dump_status); + switch (severity) { + case STATUS_INFO: + ib::info() << export_vars.innodb_buffer_pool_dump_status; + break; + + case STATUS_ERR: + ib::error() << export_vars.innodb_buffer_pool_dump_status; + break; + + case STATUS_VERBOSE: + break; + } va_end(ap); } @@ -154,32 +168,70 @@ buf_load_status( sizeof(export_vars.innodb_buffer_pool_load_status), fmt, ap); - if (severity == STATUS_NOTICE || severity == STATUS_ERR) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: %s\n", - export_vars.innodb_buffer_pool_load_status); + switch (severity) { + case STATUS_INFO: + ib::info() << export_vars.innodb_buffer_pool_load_status; + break; + + case STATUS_ERR: + ib::error() << export_vars.innodb_buffer_pool_load_status; + break; + + case STATUS_VERBOSE: + break; } va_end(ap); } -/** Returns the directory path where the buffer pool dump file will be created. -@return directory path */ +/** Generate the path to the buffer pool dump/load file. +@param[out] path generated path +@param[in] path_size size of 'path', used as in snprintf(3). */ static -const char* -get_buf_dump_dir() +void +buf_dump_generate_path( + char* path, + size_t path_size) { - const char* dump_dir; + char buf[FN_REFLEN]; + + ut_snprintf(buf, sizeof(buf), "%s%c%s", srv_data_home, + OS_PATH_SEPARATOR, srv_buf_dump_filename); + + os_file_type_t type; + bool exists = false; + bool ret; + + ret = os_file_status(buf, &exists, &type); + + /* For realpath() to succeed the file must exist. */ + + if (ret && exists) { + /* my_realpath() assumes the destination buffer is big enough + to hold FN_REFLEN bytes. */ + ut_a(path_size >= FN_REFLEN); - /* The dump file should be created in the default data directory if - innodb_data_home_dir is set as an empty string. */ - if (strcmp(srv_data_home, "") == 0) { - dump_dir = fil_path_to_mysql_datadir; + my_realpath(path, buf, 0); } else { - dump_dir = srv_data_home; - } + /* If it does not exist, then resolve only srv_data_home + and append srv_buf_dump_filename to it. */ + char srv_data_home_full[FN_REFLEN]; + + my_realpath(srv_data_home_full, srv_data_home, 0); - return(dump_dir); + if (srv_data_home_full[strlen(srv_data_home_full) - 1] + == OS_PATH_SEPARATOR) { + + ut_snprintf(path, path_size, "%s%s", + srv_data_home_full, + srv_buf_dump_filename); + } else { + ut_snprintf(path, path_size, "%s%c%s", + srv_data_home_full, + OS_PATH_SEPARATOR, + srv_buf_dump_filename); + } + } } /*****************************************************************//** @@ -204,14 +256,12 @@ buf_dump( ulint i; int ret; - ut_snprintf(full_filename, sizeof(full_filename), - "%s%c%s", get_buf_dump_dir(), SRV_PATH_SEPARATOR, - srv_buf_dump_filename); + buf_dump_generate_path(full_filename, sizeof(full_filename)); ut_snprintf(tmp_filename, sizeof(tmp_filename), "%s.incomplete", full_filename); - buf_dump_status(STATUS_NOTICE, "Dumping buffer pool(s) to %s", + buf_dump_status(STATUS_INFO, "Dumping buffer pool(s) to %s", full_filename); f = fopen(tmp_filename, "w"); @@ -257,8 +307,8 @@ buf_dump( } } - dump = static_cast( - ut_malloc(n_pages * sizeof(*dump))) ; + dump = static_cast(ut_malloc_nokey( + n_pages * sizeof(*dump))); if (dump == NULL) { buf_pool_mutex_exit(buf_pool); @@ -277,8 +327,8 @@ buf_dump( ut_a(buf_page_in_file(bpage)); - dump[j] = BUF_DUMP_CREATE(buf_page_get_space(bpage), - buf_page_get_page_no(bpage)); + dump[j] = BUF_DUMP_CREATE(bpage->id.space(), + bpage->id.page_no()); } ut_a(j == n_pages); @@ -311,10 +361,10 @@ buf_dump( counter == limit) { counter = 0; buf_dump_status( - STATUS_INFO, - "Dumping buffer pool " - ULINTPF "/" ULINTPF ", " - "page " ULINTPF "/" ULINTPF, + STATUS_VERBOSE, + "Dumping buffer pool" + " " ULINTPF "/" ULINTPF "," + " page " ULINTPF "/" ULINTPF, i + 1, srv_buf_pool_instances, j + 1, n_pages); } @@ -357,46 +407,10 @@ buf_dump( ut_sprintf_timestamp(now); - buf_dump_status(STATUS_NOTICE, + buf_dump_status(STATUS_INFO, "Buffer pool(s) dump completed at %s", now); } -/*****************************************************************//** -Compare two buffer pool dump entries, used to sort the dump on -space_no,page_no before loading in order to increase the chance for -sequential IO. -@return -1/0/1 if entry 1 is smaller/equal/bigger than entry 2 */ -static -lint -buf_dump_cmp( -/*=========*/ - const buf_dump_t d1, /*!< in: buffer pool dump entry 1 */ - const buf_dump_t d2) /*!< in: buffer pool dump entry 2 */ -{ - if (d1 < d2) { - return(-1); - } else if (d1 == d2) { - return(0); - } else { - return(1); - } -} - -/*****************************************************************//** -Sort a buffer pool dump on space_no, page_no. */ -static -void -buf_dump_sort( -/*==========*/ - buf_dump_t* dump, /*!< in/out: buffer pool dump to sort */ - buf_dump_t* tmp, /*!< in/out: temp storage */ - ulint low, /*!< in: lowest index (inclusive) */ - ulint high) /*!< in: highest index (non-inclusive) */ -{ - UT_SORT_FUNCTION_BODY(buf_dump_sort, dump, tmp, low, high, - buf_dump_cmp); -} - /*****************************************************************//** Artificially delay the buffer pool loading if necessary. The idea of this function is to prevent hogging the server with IO and slowing down @@ -405,7 +419,7 @@ UNIV_INLINE void buf_load_throttle_if_needed( /*========================*/ - ulint* last_check_time, /*!< in/out: miliseconds since epoch + ulint* last_check_time, /*!< in/out: milliseconds since epoch of the last time we did check if throttling is needed, we do the check every srv_io_capacity IO ops. */ @@ -455,7 +469,7 @@ buf_load_throttle_if_needed( "cur_activity_count == *last_activity_count" check and calling ut_time_ms() that often may turn out to be too expensive. */ - if (elapsed_time < 1000 /* 1 sec (1000 mili secs) */) { + if (elapsed_time < 1000 /* 1 sec (1000 milli secs) */) { os_thread_sleep((1000 - elapsed_time) * 1000 /* micro secs */); } @@ -478,7 +492,6 @@ buf_load() char now[32]; FILE* f; buf_dump_t* dump; - buf_dump_t* dump_tmp; ulint dump_n; ulint total_buffer_pools_pages; ulint i; @@ -489,11 +502,9 @@ buf_load() /* Ignore any leftovers from before */ buf_load_abort_flag = FALSE; - ut_snprintf(full_filename, sizeof(full_filename), - "%s%c%s", get_buf_dump_dir(), SRV_PATH_SEPARATOR, - srv_buf_dump_filename); + buf_dump_generate_path(full_filename, sizeof(full_filename)); - buf_load_status(STATUS_NOTICE, + buf_load_status(STATUS_INFO, "Loading buffer pool(s) from %s", full_filename); f = fopen(full_filename, "r"); @@ -523,22 +534,23 @@ buf_load() what = "parsing"; } fclose(f); - buf_load_status(STATUS_ERR, "Error %s '%s', " - "unable to load buffer pool (stage 1)", + buf_load_status(STATUS_ERR, "Error %s '%s'," + " unable to load buffer pool (stage 1)", what, full_filename); return; } /* If dump is larger than the buffer pool(s), then we ignore the extra trailing. This could happen if a dump is made, then buffer - pool is shrunk and then load it attempted. */ + pool is shrunk and then load is attempted. */ total_buffer_pools_pages = buf_pool_get_n_pages() * srv_buf_pool_instances; if (dump_n > total_buffer_pools_pages) { dump_n = total_buffer_pools_pages; } - dump = static_cast(ut_malloc(dump_n * sizeof(*dump))); + dump = static_cast(ut_malloc_nokey(dump_n + * sizeof(*dump))); if (dump == NULL) { fclose(f); @@ -549,19 +561,6 @@ buf_load() return; } - dump_tmp = static_cast( - ut_malloc(dump_n * sizeof(*dump_tmp))); - - if (dump_tmp == NULL) { - ut_free(dump); - fclose(f); - buf_load_status(STATUS_ERR, - "Cannot allocate " ULINTPF " bytes: %s", - (ulint) (dump_n * sizeof(*dump_tmp)), - strerror(errno)); - return; - } - rewind(f); for (i = 0; i < dump_n && !SHUTTING_DOWN(); i++) { @@ -575,24 +574,22 @@ buf_load() /* else */ ut_free(dump); - ut_free(dump_tmp); fclose(f); buf_load_status(STATUS_ERR, - "Error parsing '%s', unable " - "to load buffer pool (stage 2)", + "Error parsing '%s', unable" + " to load buffer pool (stage 2)", full_filename); return; } if (space_id > ULINT32_MASK || page_no > ULINT32_MASK) { ut_free(dump); - ut_free(dump_tmp); fclose(f); buf_load_status(STATUS_ERR, - "Error parsing '%s': bogus " - "space,page " ULINTPF "," ULINTPF - " at line " ULINTPF ", " - "unable to load buffer pool", + "Error parsing '%s': bogus" + " space,page " ULINTPF "," ULINTPF + " at line " ULINTPF "," + " unable to load buffer pool", full_filename, space_id, page_no, i); @@ -612,42 +609,107 @@ buf_load() if (dump_n == 0) { ut_free(dump); ut_sprintf_timestamp(now); - buf_load_status(STATUS_NOTICE, - "Buffer pool(s) load completed at %s " - "(%s was empty)", now, full_filename); + buf_load_status(STATUS_INFO, + "Buffer pool(s) load completed at %s" + " (%s was empty)", now, full_filename); return; } if (!SHUTTING_DOWN()) { - buf_dump_sort(dump, dump_tmp, 0, dump_n); + std::sort(dump, dump + dump_n); } - ut_free(dump_tmp); - - ulint last_check_time = 0; - ulint last_activity_cnt = 0; + ulint last_check_time = 0; + ulint last_activity_cnt = 0; + + /* Avoid calling the expensive fil_space_acquire_silent() for each + page within the same tablespace. dump[] is sorted by (space, page), + so all pages from a given tablespace are consecutive. */ + ulint cur_space_id = BUF_DUMP_SPACE(dump[0]); + fil_space_t* space = fil_space_acquire_silent(cur_space_id); + page_size_t page_size(space ? space->flags : 0); + + /* JAN: TODO: MySQL 5.7 PSI +#ifdef HAVE_PSI_STAGE_INTERFACE + PSI_stage_progress* pfs_stage_progress + = mysql_set_stage(srv_stage_buffer_pool_load.m_key); + #endif*/ /* HAVE_PSI_STAGE_INTERFACE */ + /* + mysql_stage_set_work_estimated(pfs_stage_progress, dump_n); + mysql_stage_set_work_completed(pfs_stage_progress, 0); + */ for (i = 0; i < dump_n && !SHUTTING_DOWN(); i++) { - buf_read_page_async(BUF_DUMP_SPACE(dump[i]), - BUF_DUMP_PAGE(dump[i])); + /* space_id for this iteration of the loop */ + const ulint this_space_id = BUF_DUMP_SPACE(dump[i]); + + if (this_space_id != cur_space_id) { + if (space != NULL) { + fil_space_release(space); + } + + cur_space_id = this_space_id; + space = fil_space_acquire_silent(cur_space_id); + + if (space != NULL) { + const page_size_t cur_page_size( + space->flags); + page_size.copy_from(cur_page_size); + } + } + + /* JAN: TODO: As we use background page read below, + if tablespace is encrypted we cant use it. */ + if (space == NULL || + (space && space->crypt_data && + space->crypt_data->encryption != FIL_SPACE_ENCRYPTION_OFF && + space->crypt_data->type != CRYPT_SCHEME_UNENCRYPTED)) { + continue; + } + + buf_read_page_background( + page_id_t(this_space_id, BUF_DUMP_PAGE(dump[i])), + page_size, true); if (i % 64 == 63) { os_aio_simulated_wake_handler_threads(); } - if (i % 128 == 0) { - buf_load_status(STATUS_INFO, + /* Update the progress every 32 MiB, which is every Nth page, + where N = 32*1024^2 / page_size. */ + static const ulint update_status_every_n_mb = 32; + static const ulint update_status_every_n_pages + = update_status_every_n_mb * 1024 * 1024 + / page_size.physical(); + + if (i % update_status_every_n_pages == 0) { + buf_load_status(STATUS_VERBOSE, "Loaded " ULINTPF "/" ULINTPF " pages", i + 1, dump_n); + /* mysql_stage_set_work_completed(pfs_stage_progress, + i); */ } if (buf_load_abort_flag) { + if (space != NULL) { + fil_space_release(space); + } buf_load_abort_flag = FALSE; ut_free(dump); buf_load_status( - STATUS_NOTICE, + STATUS_INFO, "Buffer pool(s) load aborted on request"); + /* Premature end, set estimated = completed = i and + end the current stage event. */ + /* + mysql_stage_set_work_estimated(pfs_stage_progress, i); + mysql_stage_set_work_completed(pfs_stage_progress, + i); + */ +#ifdef HAVE_PSI_STAGE_INTERFACE + /* mysql_end_stage(); */ +#endif /* HAVE_PSI_STAGE_INTERFACE */ return; } @@ -655,19 +717,29 @@ buf_load() &last_check_time, &last_activity_cnt, i); } + if (space != NULL) { + fil_space_release(space); + } + ut_free(dump); ut_sprintf_timestamp(now); - buf_load_status(STATUS_NOTICE, + buf_load_status(STATUS_INFO, "Buffer pool(s) load completed at %s", now); + + /* Make sure that estimated = completed when we end. */ + /* mysql_stage_set_work_completed(pfs_stage_progress, dump_n); */ + /* End the stage progress event. */ +#ifdef HAVE_PSI_STAGE_INTERFACE + /* mysql_end_stage(); */ +#endif /* HAVE_PSI_STAGE_INTERFACE */ } /*****************************************************************//** Aborts a currently running buffer pool load. This function is called by MySQL code via buffer_pool_load_abort() and it should return immediately because the whole MySQL is frozen during its execution. */ -UNIV_INTERN void buf_load_abort() /*============*/ @@ -680,7 +752,7 @@ This is the main thread for buffer pool dump/load. It waits for an event and when waked up either performs a dump or load and sleeps again. @return this function does not return, it calls os_thread_exit() */ -extern "C" UNIV_INTERN +extern "C" os_thread_ret_t DECLARE_THREAD(buf_dump_thread)( /*============================*/ @@ -688,11 +760,15 @@ DECLARE_THREAD(buf_dump_thread)( required by os_thread_create */ { ut_ad(!srv_read_only_mode); + /* JAN: TODO: MySQL 5.7 PSI +#ifdef UNIV_PFS_THREAD + pfs_register_thread(buf_dump_thread_key); + #endif */ /* UNIV_PFS_THREAD */ srv_buf_dump_thread_active = TRUE; - buf_dump_status(STATUS_INFO, "Dumping buffer pool(s) not yet started"); - buf_load_status(STATUS_INFO, "Loading buffer pool(s) not yet started"); + buf_dump_status(STATUS_VERBOSE, "not started"); + buf_load_status(STATUS_VERBOSE, "not started"); if (srv_buffer_pool_load_at_startup) { buf_load(); diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index f7721e69128..227c71c5cfd 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -25,6 +25,9 @@ The database buffer buf_pool flush algorithm Created 11/11/1995 Heikki Tuuri *******************************************************/ +#include "ha_prototypes.h" +#include + #include "buf0flu.h" #ifdef UNIV_NONINL @@ -39,7 +42,6 @@ Created 11/11/1995 Heikki Tuuri #include "page0zip.h" #ifndef UNIV_HOTBACKUP #include "ut0byte.h" -#include "ut0lst.h" #include "page0page.h" #include "fil0fil.h" #include "buf0lru.h" @@ -47,12 +49,23 @@ Created 11/11/1995 Heikki Tuuri #include "ibuf0ibuf.h" #include "log0log.h" #include "os0file.h" -#include "os0sync.h" #include "trx0sys.h" #include "srv0mon.h" -#include "mysql/plugin.h" -#include "mysql/service_thd_wait.h" +#include "fsp0sysspace.h" +#include "ut0stage.h" #include "fil0pagecompress.h" +#ifdef UNIV_LINUX +/* include defs for CPU time priority settings */ +#include +#include +#include +#include +static const int buf_flush_page_cleaner_priority = -20; +#endif /* UNIV_LINUX */ + +/** Sleep time in microseconds for loop waiting for the oldest +modification lsn */ +static const ulint buf_flush_wait_flushed_sleep_time = 10000; /** Number of pages flushed through non flush_list flushes. */ static ulint buf_lru_flush_page_count = 0; @@ -62,14 +75,114 @@ is set to TRUE by the page_cleaner thread when it is spawned and is set back to FALSE at shutdown by the page_cleaner as well. Therefore no need to protect it by a mutex. It is only ever read by the thread doing the shutdown */ -UNIV_INTERN ibool buf_page_cleaner_is_active = FALSE; +bool buf_page_cleaner_is_active = false; + +/** Factor for scan length to determine n_pages for intended oldest LSN +progress */ +static ulint buf_flush_lsn_scan_factor = 3; +/** Average redo generation rate */ +static lsn_t lsn_avg_rate = 0; + +/** Target oldest LSN for the requested flush_sync */ +static lsn_t buf_flush_sync_lsn = 0; #ifdef UNIV_PFS_THREAD -UNIV_INTERN mysql_pfs_key_t buf_page_cleaner_thread_key; +mysql_pfs_key_t page_cleaner_thread_key; #endif /* UNIV_PFS_THREAD */ /** Event to synchronise with the flushing. */ - os_event_t buf_flush_event; +os_event_t buf_flush_event; + +/** State for page cleaner array slot */ +enum page_cleaner_state_t { + /** Not requested any yet. + Moved from FINISHED by the coordinator. */ + PAGE_CLEANER_STATE_NONE = 0, + /** Requested but not started flushing. + Moved from NONE by the coordinator. */ + PAGE_CLEANER_STATE_REQUESTED, + /** Flushing is on going. + Moved from REQUESTED by the worker. */ + PAGE_CLEANER_STATE_FLUSHING, + /** Flushing was finished. + Moved from FLUSHING by the worker. */ + PAGE_CLEANER_STATE_FINISHED +}; + +/** Page cleaner request state for each buffer pool instance */ +struct page_cleaner_slot_t { + page_cleaner_state_t state; /*!< state of the request. + protected by page_cleaner_t::mutex + if the worker thread got the slot and + set to PAGE_CLEANER_STATE_FLUSHING, + n_flushed_lru and n_flushed_list can be + updated only by the worker thread */ + /* This value is set during state==PAGE_CLEANER_STATE_NONE */ + ulint n_pages_requested; + /*!< number of requested pages + for the slot */ + /* These values are updated during state==PAGE_CLEANER_STATE_FLUSHING, + and commited with state==PAGE_CLEANER_STATE_FINISHED. + The consistency is protected by the 'state' */ + ulint n_flushed_lru; + /*!< number of flushed pages + by LRU scan flushing */ + ulint n_flushed_list; + /*!< number of flushed pages + by flush_list flushing */ + bool succeeded_list; + /*!< true if flush_list flushing + succeeded. */ + ulint flush_lru_time; + /*!< elapsed time for LRU flushing */ + ulint flush_list_time; + /*!< elapsed time for flush_list + flushing */ + ulint flush_lru_pass; + /*!< count to attempt LRU flushing */ + ulint flush_list_pass; + /*!< count to attempt flush_list + flushing */ +}; + +/** Page cleaner structure common for all threads */ +struct page_cleaner_t { + ib_mutex_t mutex; /*!< mutex to protect whole of + page_cleaner_t struct and + page_cleaner_slot_t slots. */ + os_event_t is_requested; /*!< event to activate worker + threads. */ + os_event_t is_finished; /*!< event to signal that all + slots were finished. */ + volatile ulint n_workers; /*!< number of worker threads + in existence */ + bool requested; /*!< true if requested pages + to flush */ + lsn_t lsn_limit; /*!< upper limit of LSN to be + flushed */ + ulint n_slots; /*!< total number of slots */ + ulint n_slots_requested; + /*!< number of slots + in the state + PAGE_CLEANER_STATE_REQUESTED */ + ulint n_slots_flushing; + /*!< number of slots + in the state + PAGE_CLEANER_STATE_FLUSHING */ + ulint n_slots_finished; + /*!< number of slots + in the state + PAGE_CLEANER_STATE_FINISHED */ + ulint flush_time; /*!< elapsed time to flush + requests for all slots */ + ulint flush_pass; /*!< count to finish to flush + requests for all slots */ + page_cleaner_slot_t* slots; /*!< pointer to the slots */ + bool is_running; /*!< false if attempt + to shutdown */ +}; + +static page_cleaner_t* page_cleaner = NULL; /** If LRU list of a buf_pool is less than this size then LRU eviction should not happen. This is because when we do LRU flushing we also put @@ -80,8 +193,7 @@ in thrashing. */ /* @} */ /******************************************************************//** -Increases flush_list size in bytes with zip_size for compressed page, -UNIV_PAGE_SIZE for uncompressed page in inline function */ +Increases flush_list size in bytes with the page size in inline function */ static inline void incr_flush_list_size_in_bytes( @@ -90,15 +202,16 @@ incr_flush_list_size_in_bytes( buf_pool_t* buf_pool) /*!< in: buffer pool instance */ { ut_ad(buf_flush_list_mutex_own(buf_pool)); - ulint zip_size = page_zip_get_size(&block->page.zip); - buf_pool->stat.flush_list_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE; + + buf_pool->stat.flush_list_bytes += block->page.size.physical(); + ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size); } #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /******************************************************************//** Validates the flush list. -@return TRUE if ok */ +@return TRUE if ok */ static ibool buf_flush_validate_low( @@ -107,7 +220,7 @@ buf_flush_validate_low( /******************************************************************//** Validates the flush list some of the time. -@return TRUE if ok or the check was skipped */ +@return TRUE if ok or the check was skipped */ static ibool buf_flush_validate_skip( @@ -138,7 +251,7 @@ buf_flush_validate_skip( Insert a block in the flush_rbt and returns a pointer to its predecessor or NULL if no predecessor. The ordering is maintained on the basis of the key. -@return pointer to the predecessor or NULL if no predecessor. */ +@return pointer to the predecessor or NULL if no predecessor. */ static buf_page_t* buf_flush_insert_in_flush_rbt( @@ -201,7 +314,7 @@ buf_pool->flush_rbt. Note that for the purpose of flush_rbt, we only need to order blocks on the oldest_modification. The other two fields are used to uniquely identify the blocks. -@return < 0 if b2 < b1, 0 if b2 == b1, > 0 if b2 > b1 */ +@return < 0 if b2 < b1, 0 if b2 == b1, > 0 if b2 > b1 */ static int buf_flush_block_cmp( @@ -212,13 +325,14 @@ buf_flush_block_cmp( int ret; const buf_page_t* b1 = *(const buf_page_t**) p1; const buf_page_t* b2 = *(const buf_page_t**) p2; -#ifdef UNIV_DEBUG - buf_pool_t* buf_pool = buf_pool_from_bpage(b1); -#endif /* UNIV_DEBUG */ ut_ad(b1 != NULL); ut_ad(b2 != NULL); +#ifdef UNIV_DEBUG + buf_pool_t* buf_pool = buf_pool_from_bpage(b1); +#endif /* UNIV_DEBUG */ + ut_ad(buf_flush_list_mutex_own(buf_pool)); ut_ad(b1->in_flush_list); @@ -231,17 +345,16 @@ buf_flush_block_cmp( } /* If oldest_modification is same then decide on the space. */ - ret = (int)(b2->space - b1->space); + ret = (int)(b2->id.space() - b1->id.space()); - /* Or else decide ordering on the offset field. */ - return(ret ? ret : (int)(b2->offset - b1->offset)); + /* Or else decide ordering on the page number. */ + return(ret ? ret : (int) (b2->id.page_no() - b1->id.page_no())); } /********************************************************************//** Initialize the red-black tree to speed up insertions into the flush_list during recovery process. Should be called at the start of recovery process before any page has been read/written. */ -UNIV_INTERN void buf_flush_init_flush_rbt(void) /*==========================*/ @@ -255,6 +368,8 @@ buf_flush_init_flush_rbt(void) buf_flush_list_mutex_enter(buf_pool); + ut_ad(buf_pool->flush_rbt == NULL); + /* Create red black tree for speedy insertions in flush list. */ buf_pool->flush_rbt = rbt_create( sizeof(buf_page_t*), buf_flush_block_cmp); @@ -265,7 +380,6 @@ buf_flush_init_flush_rbt(void) /********************************************************************//** Frees up the red-black tree. */ -UNIV_INTERN void buf_flush_free_flush_rbt(void) /*==========================*/ @@ -292,7 +406,6 @@ buf_flush_free_flush_rbt(void) /********************************************************************//** Inserts a modified block into the flush list. */ -UNIV_INTERN void buf_flush_insert_into_flush_list( /*=============================*/ @@ -302,7 +415,7 @@ buf_flush_insert_into_flush_list( { ut_ad(!buf_pool_mutex_own(buf_pool)); ut_ad(log_flush_order_mutex_own()); - ut_ad(mutex_own(&block->mutex)); + ut_ad(buf_page_mutex_own(block)); buf_flush_list_mutex_enter(buf_pool); @@ -312,7 +425,7 @@ buf_flush_insert_into_flush_list( /* If we are in the recovery then we need to update the flush red-black tree as well. */ - if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) { + if (buf_pool->flush_rbt != NULL) { buf_flush_list_mutex_exit(buf_pool); buf_flush_insert_sorted_into_flush_list(buf_pool, block, lsn); return; @@ -323,20 +436,23 @@ buf_flush_insert_into_flush_list( ut_d(block->page.in_flush_list = TRUE); block->page.oldest_modification = lsn; - UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page); + + UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page); + incr_flush_list_size_in_bytes(block, buf_pool); #ifdef UNIV_DEBUG_VALGRIND - { - ulint zip_size = buf_block_get_zip_size(block); + void* p; - if (zip_size) { - UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size); - } else { - UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE); - } + if (block->page.size.is_compressed()) { + p = block->page.zip.data; + } else { + p = block->frame; } + + UNIV_MEM_ASSERT_RW(p, block->page.size.physical()); #endif /* UNIV_DEBUG_VALGRIND */ + #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG ut_a(buf_flush_validate_skip(buf_pool)); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ @@ -348,7 +464,6 @@ buf_flush_insert_into_flush_list( Inserts a modified block into the flush list in the right sorted position. This function is used by recovery, because there the modifications do not necessarily come in the order of lsn's. */ -UNIV_INTERN void buf_flush_insert_sorted_into_flush_list( /*====================================*/ @@ -361,7 +476,7 @@ buf_flush_insert_sorted_into_flush_list( ut_ad(!buf_pool_mutex_own(buf_pool)); ut_ad(log_flush_order_mutex_own()); - ut_ad(mutex_own(&block->mutex)); + ut_ad(buf_page_mutex_own(block)); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); buf_flush_list_mutex_enter(buf_pool); @@ -387,15 +502,15 @@ buf_flush_insert_sorted_into_flush_list( block->page.oldest_modification = lsn; #ifdef UNIV_DEBUG_VALGRIND - { - ulint zip_size = buf_block_get_zip_size(block); + void* p; - if (zip_size) { - UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size); - } else { - UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE); - } + if (block->page.size.is_compressed()) { + p = block->page.zip.data; + } else { + p = block->frame; } + + UNIV_MEM_ASSERT_RW(p, block->page.size.physical()); #endif /* UNIV_DEBUG_VALGRIND */ prev_b = NULL; @@ -404,9 +519,9 @@ buf_flush_insert_sorted_into_flush_list( should not be NULL. In a very rare boundary case it is possible that the flush_rbt has already been freed by the recovery thread before the last page was hooked up in the flush_list by the - io-handler thread. In that case we'll just do a simple + io-handler thread. In that case we'll just do a simple linear search in the else block. */ - if (buf_pool->flush_rbt) { + if (buf_pool->flush_rbt != NULL) { prev_b = buf_flush_insert_in_flush_rbt(&block->page); @@ -414,8 +529,9 @@ buf_flush_insert_sorted_into_flush_list( b = UT_LIST_GET_FIRST(buf_pool->flush_list); - while (b && b->oldest_modification + while (b != NULL && b->oldest_modification > block->page.oldest_modification) { + ut_ad(b->in_flush_list); prev_b = b; b = UT_LIST_GET_NEXT(list, b); @@ -423,10 +539,9 @@ buf_flush_insert_sorted_into_flush_list( } if (prev_b == NULL) { - UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page); + UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page); } else { - UT_LIST_INSERT_AFTER(list, buf_pool->flush_list, - prev_b, &block->page); + UT_LIST_INSERT_AFTER(buf_pool->flush_list, prev_b, &block->page); } incr_flush_list_size_in_bytes(block, buf_pool); @@ -441,8 +556,7 @@ buf_flush_insert_sorted_into_flush_list( /********************************************************************//** Returns TRUE if the file page block is immediately suitable for replacement, i.e., the transition FILE_PAGE => NOT_USED allowed. -@return TRUE if can replace immediately */ -UNIV_INTERN +@return TRUE if can replace immediately */ ibool buf_flush_ready_for_replace( /*========================*/ @@ -463,21 +577,15 @@ buf_flush_ready_for_replace( && buf_page_get_io_fix(bpage) == BUF_IO_NONE); } - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: buffer block state %lu" - " in the LRU list!\n", - (ulong) buf_page_get_state(bpage)); - ut_print_buf(stderr, bpage, sizeof(buf_page_t)); - putc('\n', stderr); + ib::fatal() << "Buffer block " << bpage << " state " << bpage->state + << " in the LRU list!"; return(FALSE); } /********************************************************************//** Returns true if the block is modified and ready for flushing. -@return true if can flush immediately */ -UNIV_INTERN +@return true if can flush immediately */ bool buf_flush_ready_for_flush( /*======================*/ @@ -517,14 +625,12 @@ buf_flush_ready_for_flush( /********************************************************************//** Remove a block from the flush list of modified blocks. */ -UNIV_INTERN void buf_flush_remove( /*=============*/ buf_page_t* bpage) /*!< in: pointer to the block in question */ { buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - ulint zip_size; ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(mutex_own(buf_page_get_mutex(bpage))); @@ -548,18 +654,18 @@ buf_flush_remove( return; case BUF_BLOCK_ZIP_DIRTY: buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE); - UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); + UT_LIST_REMOVE(buf_pool->flush_list, bpage); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG buf_LRU_insert_zip_clean(bpage); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ break; case BUF_BLOCK_FILE_PAGE: - UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); + UT_LIST_REMOVE(buf_pool->flush_list, bpage); break; } /* If the flush_rbt is active then delete from there as well. */ - if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) { + if (buf_pool->flush_rbt != NULL) { buf_flush_delete_from_flush_rbt(bpage); } @@ -567,8 +673,7 @@ buf_flush_remove( because we assert on in_flush_list in comparison function. */ ut_d(bpage->in_flush_list = FALSE); - zip_size = page_zip_get_size(&bpage->zip); - buf_pool->stat.flush_list_bytes -= zip_size ? zip_size : UNIV_PAGE_SIZE; + buf_pool->stat.flush_list_bytes -= bpage->size.physical(); bpage->oldest_modification = 0; @@ -576,6 +681,14 @@ buf_flush_remove( ut_a(buf_flush_validate_skip(buf_pool)); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ + /* If there is an observer that want to know if the asynchronous + flushing was done then notify it. */ + if (bpage->flush_observer != NULL) { + bpage->flush_observer->notify_remove(buf_pool, bpage); + + bpage->flush_observer = NULL; + } + buf_flush_list_mutex_exit(buf_pool); } @@ -590,7 +703,6 @@ use the current list node (bpage) to do the list manipulation because the list pointers could have changed between the time that we copied the contents of bpage to the dpage and the flush list manipulation below. */ -UNIV_INTERN void buf_flush_relocate_on_flush_list( /*=============================*/ @@ -621,7 +733,7 @@ buf_flush_relocate_on_flush_list( /* If recovery is active we must swap the control blocks in the flush_rbt as well. */ - if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) { + if (buf_pool->flush_rbt != NULL) { buf_flush_delete_from_flush_rbt(bpage); prev_b = buf_flush_insert_in_flush_rbt(dpage); } @@ -635,24 +747,18 @@ buf_flush_relocate_on_flush_list( ut_d(bpage->in_flush_list = FALSE); prev = UT_LIST_GET_PREV(list, bpage); - UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); + UT_LIST_REMOVE(buf_pool->flush_list, bpage); if (prev) { ut_ad(prev->in_flush_list); - UT_LIST_INSERT_AFTER( - list, - buf_pool->flush_list, - prev, dpage); + UT_LIST_INSERT_AFTER( buf_pool->flush_list, prev, dpage); } else { - UT_LIST_ADD_FIRST( - list, - buf_pool->flush_list, - dpage); + UT_LIST_ADD_FIRST(buf_pool->flush_list, dpage); } /* Just an extra check. Previous in flush_list should be the same control block as in flush_rbt. */ - ut_a(!buf_pool->flush_rbt || prev_b == prev); + ut_a(buf_pool->flush_rbt == NULL || prev_b == prev); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG ut_a(buf_flush_validate_low(buf_pool)); @@ -663,7 +769,6 @@ buf_flush_relocate_on_flush_list( /********************************************************************//** Updates the flush system data structures when a write is completed. */ -UNIV_INTERN void buf_flush_write_complete( /*=====================*/ @@ -679,11 +784,6 @@ buf_flush_write_complete( flush_type = buf_page_get_flush_type(bpage); buf_pool->n_flush[flush_type]--; -#ifdef UNIV_DEBUG - /* fprintf(stderr, "n pending flush %lu\n", - buf_pool->n_flush[flush_type]); */ -#endif - if (buf_pool->n_flush[flush_type] == 0 && buf_pool->init_flush[flush_type] == FALSE) { @@ -696,80 +796,85 @@ buf_flush_write_complete( } #endif /* !UNIV_HOTBACKUP */ -/********************************************************************//** -Calculate the checksum of a page from compressed table and update the page. */ -UNIV_INTERN +/** Calculate the checksum of a page from compressed table and update +the page. +@param[in,out] page page to update +@param[in] size compressed page size +@param[in] lsn LSN to stamp on the page */ void buf_flush_update_zip_checksum( -/*==========================*/ - buf_frame_t* page, /*!< in/out: Page to update */ - ulint zip_size, /*!< in: Compressed page size */ - lsn_t lsn) /*!< in: Lsn to stamp on the page */ + buf_frame_t* page, + ulint size, + lsn_t lsn) { - ut_a(zip_size > 0); + ut_a(size > 0); - ib_uint32_t checksum = static_cast( - page_zip_calc_checksum( - page, zip_size, - static_cast( - srv_checksum_algorithm))); + const uint32_t checksum = page_zip_calc_checksum( + page, size, + static_cast(srv_checksum_algorithm)); mach_write_to_8(page + FIL_PAGE_LSN, lsn); memset(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum); } -/********************************************************************//** -Initializes a page for writing to the tablespace. */ -UNIV_INTERN +/** Initialize a page for writing to the tablespace. +@param[in] block buffer block; NULL if bypassing the buffer pool +@param[in,out] page page frame +@param[in,out] page_zip_ compressed page, or NULL if uncompressed +@param[in] newest_lsn newest modification LSN to the page +@param[in] skip_checksum whether to disable the page checksum */ void buf_flush_init_for_writing( -/*=======================*/ - byte* page, /*!< in/out: page */ - void* page_zip_, /*!< in/out: compressed page, or NULL */ - lsn_t newest_lsn) /*!< in: newest modification lsn - to the page */ + const buf_block_t* block, + byte* page, + void* page_zip_, + lsn_t newest_lsn, + bool skip_checksum) { - ib_uint32_t checksum = 0 /* silence bogus gcc warning */; + ib_uint32_t checksum = BUF_NO_CHECKSUM_MAGIC; + ut_ad(block == NULL || block->frame == page); + ut_ad(block == NULL || page_zip_ == NULL + || &block->page.zip == page_zip_); ut_ad(page); if (page_zip_) { page_zip_des_t* page_zip; - ulint zip_size; + ulint size; page_zip = static_cast(page_zip_); - zip_size = page_zip_get_size(page_zip); + size = page_zip_get_size(page_zip); - ut_ad(zip_size); - ut_ad(ut_is_2pow(zip_size)); - ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX); + ut_ad(size); + ut_ad(ut_is_2pow(size)); + ut_ad(size <= UNIV_ZIP_SIZE_MAX); - switch (UNIV_EXPECT(fil_page_get_type(page), FIL_PAGE_INDEX)) { + switch (fil_page_get_type(page)) { case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: case FIL_PAGE_IBUF_BITMAP: case FIL_PAGE_TYPE_FSP_HDR: case FIL_PAGE_TYPE_XDES: /* These are essentially uncompressed pages. */ - memcpy(page_zip->data, page, zip_size); + memcpy(page_zip->data, page, size); /* fall through */ case FIL_PAGE_TYPE_ZBLOB: case FIL_PAGE_TYPE_ZBLOB2: case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: buf_flush_update_zip_checksum( - page_zip->data, zip_size, newest_lsn); + page_zip->data, size, newest_lsn); return; } - ut_print_timestamp(stderr); - fputs(" InnoDB: ERROR: The compressed page to be written" - " seems corrupt:", stderr); - ut_print_buf(stderr, page, zip_size); + ib::error() << "The compressed page to be written" + " seems corrupt:"; + ut_print_buf(stderr, page, size); fputs("\nInnoDB: Possibly older version of the page:", stderr); - ut_print_buf(stderr, page_zip->data, zip_size); + ut_print_buf(stderr, page_zip->data, size); putc('\n', stderr); ut_error; } @@ -780,27 +885,85 @@ buf_flush_init_for_writing( mach_write_to_8(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM, newest_lsn); - /* Store the new formula checksum */ - - switch ((srv_checksum_algorithm_t) srv_checksum_algorithm) { - case SRV_CHECKSUM_ALGORITHM_CRC32: - case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: - checksum = buf_calc_page_crc32(page); - mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum); - break; - case SRV_CHECKSUM_ALGORITHM_INNODB: - case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: - checksum = (ib_uint32_t) buf_calc_page_new_checksum(page); - mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum); - checksum = (ib_uint32_t) buf_calc_page_old_checksum(page); - break; - case SRV_CHECKSUM_ALGORITHM_NONE: - case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: - checksum = BUF_NO_CHECKSUM_MAGIC; + if (skip_checksum) { mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum); - break; - /* no default so the compiler will emit a warning if new enum - is added and not handled here */ + } else { + if (block != NULL && UNIV_PAGE_SIZE == 16384) { + /* The page type could be garbage in old files + created before MySQL 5.5. Such files always + had a page size of 16 kilobytes. */ + ulint page_type = fil_page_get_type(page); + ulint reset_type = page_type; + + switch (block->page.id.page_no() % 16384) { + case 0: + reset_type = block->page.id.page_no() == 0 + ? FIL_PAGE_TYPE_FSP_HDR + : FIL_PAGE_TYPE_XDES; + break; + case 1: + reset_type = FIL_PAGE_IBUF_BITMAP; + break; + default: + switch (page_type) { + case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: + case FIL_PAGE_UNDO_LOG: + case FIL_PAGE_INODE: + case FIL_PAGE_IBUF_FREE_LIST: + case FIL_PAGE_TYPE_ALLOCATED: + case FIL_PAGE_TYPE_SYS: + case FIL_PAGE_TYPE_TRX_SYS: + case FIL_PAGE_TYPE_BLOB: + case FIL_PAGE_TYPE_ZBLOB: + case FIL_PAGE_TYPE_ZBLOB2: + break; + case FIL_PAGE_TYPE_FSP_HDR: + case FIL_PAGE_TYPE_XDES: + case FIL_PAGE_IBUF_BITMAP: + /* These pages should have + predetermined page numbers + (see above). */ + default: + reset_type = FIL_PAGE_TYPE_UNKNOWN; + break; + } + } + + if (UNIV_UNLIKELY(page_type != reset_type)) { + ib::info() + << "Resetting invalid page " + << block->page.id << " type " + << page_type << " to " + << reset_type << " when flushing."; + fil_page_set_type(page, reset_type); + } + } + + switch ((srv_checksum_algorithm_t) srv_checksum_algorithm) { + case SRV_CHECKSUM_ALGORITHM_CRC32: + case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: + checksum = buf_calc_page_crc32(page); + mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, + checksum); + break; + case SRV_CHECKSUM_ALGORITHM_INNODB: + case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: + checksum = (ib_uint32_t) buf_calc_page_new_checksum( + page); + mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, + checksum); + checksum = (ib_uint32_t) buf_calc_page_old_checksum( + page); + break; + case SRV_CHECKSUM_ALGORITHM_NONE: + case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: + mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, + checksum); + break; + /* no default so the compiler will emit a warning if + new enum is added and not handled here */ + } } /* With the InnoDB checksum, we overwrite the first 4 bytes of @@ -834,19 +997,18 @@ buf_flush_write_block_low( buf_flush_t flush_type, /*!< in: type of flush */ bool sync) /*!< in: true if sync IO request */ { - ulint zip_size = buf_page_get_zip_size(bpage); - page_t* frame = NULL; - ulint space_id = buf_page_get_space(bpage); + page_t* frame = NULL; + ulint space_id = bpage->id.space(); atomic_writes_t awrites = fil_space_get_atomic_writes(space_id); #ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); ut_ad(!buf_pool_mutex_own(buf_pool)); -#endif +#endif /* UNIV_DEBUG */ -#ifdef UNIV_LOG_DEBUG - static ibool univ_log_debug_warned; -#endif /* UNIV_LOG_DEBUG */ + DBUG_PRINT("ib_buf", ("flush %s %u page %u:%u", + sync ? "sync" : "async", (unsigned) flush_type, + bpage->id.space(), bpage->id.page_no())); ut_ad(buf_page_in_file(bpage)); @@ -857,27 +1019,21 @@ buf_flush_write_block_low( LRU_list. */ ut_ad(!buf_pool_mutex_own(buf_pool)); ut_ad(!buf_flush_list_mutex_own(buf_pool)); - ut_ad(!mutex_own(buf_page_get_mutex(bpage))); + ut_ad(!buf_page_get_mutex(bpage)->is_owned()); ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE); ut_ad(bpage->oldest_modification != 0); #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0); -#endif + ut_a(ibuf_count_get(bpage->id) == 0); +#endif /* UNIV_IBUF_COUNT_DEBUG */ + ut_ad(bpage->newest_modification != 0); -#ifdef UNIV_LOG_DEBUG - if (!univ_log_debug_warned) { - univ_log_debug_warned = TRUE; - fputs("Warning: cannot force log to disk if" - " UNIV_LOG_DEBUG is defined!\n" - "Crash recovery will not work!\n", - stderr); - } -#else /* Force the log to the disk before writing the modified block */ - log_write_up_to(bpage->newest_modification, LOG_WAIT_ALL_GROUPS, TRUE); -#endif + if (!srv_read_only_mode) { + log_write_up_to(bpage->newest_modification, true); + } + switch (buf_page_get_state(bpage)) { case BUF_BLOCK_POOL_WATCH: case BUF_BLOCK_ZIP_PAGE: /* The page should be dirty. */ @@ -889,11 +1045,11 @@ buf_flush_write_block_low( break; case BUF_BLOCK_ZIP_DIRTY: frame = bpage->zip.data; + mach_write_to_8(frame + FIL_PAGE_LSN, bpage->newest_modification); - ut_a(page_zip_verify_checksum(frame, zip_size)); - + ut_a(page_zip_verify_checksum(frame, bpage->size.physical())); memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); break; case BUF_BLOCK_FILE_PAGE: @@ -902,47 +1058,39 @@ buf_flush_write_block_low( frame = ((buf_block_t*) bpage)->frame; } - buf_flush_init_for_writing(((buf_block_t*) bpage)->frame, - bpage->zip.data - ? &bpage->zip : NULL, - bpage->newest_modification); + buf_flush_init_for_writing( + reinterpret_cast(bpage), + reinterpret_cast(bpage)->frame, + bpage->zip.data ? &bpage->zip : NULL, + bpage->newest_modification, + fsp_is_checksum_disabled(bpage->id.space())); break; } frame = buf_page_encrypt_before_write(bpage, frame, space_id); - if (!srv_use_doublewrite_buf || !buf_dblwr) { - fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER, - sync, - buf_page_get_space(bpage), - zip_size, - buf_page_get_page_no(bpage), - 0, - zip_size ? zip_size : bpage->real_size, - frame, - bpage, - &bpage->write_size); - } else { + /* Disable use of double-write buffer for temporary tablespace. + Given the nature and load of temporary tablespace doublewrite buffer + adds an overhead during flushing. */ + + if (!srv_use_doublewrite_buf + || buf_dblwr == NULL + || srv_read_only_mode + || fsp_is_system_temporary(bpage->id.space()) + || awrites == ATOMIC_WRITES_ON) { - /* InnoDB uses doublewrite buffer and doublewrite buffer - is initialized. User can define do we use atomic writes - on a file space (table) or not. If atomic writes are - not used we should use doublewrite buffer and if - atomic writes should be used, no doublewrite buffer - is used. */ - - if (awrites == ATOMIC_WRITES_ON) { - fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER, - FALSE, - buf_page_get_space(bpage), - zip_size, - buf_page_get_page_no(bpage), - 0, - zip_size ? zip_size : bpage->real_size, - frame, - bpage, - &bpage->write_size); - } else if (flush_type == BUF_FLUSH_SINGLE_PAGE) { + ut_ad(!srv_read_only_mode + || fsp_is_system_temporary(bpage->id.space())); + + ulint type = IORequest::WRITE | IORequest::DO_NOT_WAKE; + + IORequest request(type); + + fil_io(request, + sync, bpage->id, bpage->size, 0, bpage->size.physical(), + frame, bpage, NULL); + } else { + if (flush_type == BUF_FLUSH_SINGLE_PAGE) { buf_dblwr_write_single_page(bpage, sync); } else { ut_ad(!sync); @@ -955,7 +1103,7 @@ buf_flush_write_block_low( are working on. */ if (sync) { ut_ad(flush_type == BUF_FLUSH_SINGLE_PAGE); - fil_flush(buf_page_get_space(bpage)); + fil_flush(bpage->id.space()); /* true means we want to evict this page from the LRU list as well. */ @@ -975,8 +1123,7 @@ writes! NOTE: buf_pool->mutex and buf_page_get_mutex(bpage) must be held upon entering this function, and they will be released by this function if it returns true. @return TRUE if the page was flushed */ -UNIV_INTERN -bool +ibool buf_flush_page( /*===========*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ @@ -984,47 +1131,50 @@ buf_flush_page( buf_flush_t flush_type, /*!< in: type of flush */ bool sync) /*!< in: true if sync IO request */ { + BPageMutex* block_mutex; + ut_ad(flush_type < BUF_FLUSH_N_TYPES); ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_page_in_file(bpage)); ut_ad(!sync || flush_type == BUF_FLUSH_SINGLE_PAGE); - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); - + block_mutex = buf_page_get_mutex(bpage); ut_ad(mutex_own(block_mutex)); ut_ad(buf_flush_ready_for_flush(bpage, flush_type)); - bool is_uncompressed; + bool is_uncompressed; - is_uncompressed = (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); - ut_ad(is_uncompressed == (block_mutex != &buf_pool->zip_mutex)); + is_uncompressed = (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); + ut_ad(is_uncompressed == (block_mutex != &buf_pool->zip_mutex)); - ibool flush; - rw_lock_t* rw_lock; - bool no_fix_count = bpage->buf_fix_count == 0; + ibool flush; + rw_lock_t* rw_lock; + bool no_fix_count = bpage->buf_fix_count == 0; - if (!is_uncompressed) { - flush = TRUE; + if (!is_uncompressed) { + flush = TRUE; rw_lock = NULL; - - } else if (!(no_fix_count || flush_type == BUF_FLUSH_LIST)) { - /* This is a heuristic, to avoid expensive S attempts. */ + } else if (!(no_fix_count || flush_type == BUF_FLUSH_LIST) + || (!no_fix_count + && srv_shutdown_state <= SRV_SHUTDOWN_CLEANUP + && fsp_is_system_temporary(bpage->id.space()))) { + /* This is a heuristic, to avoid expensive SX attempts. */ + /* For table residing in temporary tablespace sync is done + using IO_FIX and so before scheduling for flush ensure that + page is not fixed. */ flush = FALSE; } else { - rw_lock = &reinterpret_cast(bpage)->lock; - if (flush_type != BUF_FLUSH_LIST) { - flush = rw_lock_s_lock_gen_nowait( - rw_lock, BUF_IO_WRITE); + flush = rw_lock_sx_lock_nowait(rw_lock, BUF_IO_WRITE); } else { - /* Will S lock later */ + /* Will SX lock later */ flush = TRUE; } } - if (flush) { + if (flush) { /* We are committed to flushing by the time we get here */ @@ -1033,33 +1183,51 @@ buf_flush_page( buf_page_set_flush_type(bpage, flush_type); if (buf_pool->n_flush[flush_type] == 0) { - os_event_reset(buf_pool->no_flush[flush_type]); } ++buf_pool->n_flush[flush_type]; mutex_exit(block_mutex); + buf_pool_mutex_exit(buf_pool); if (flush_type == BUF_FLUSH_LIST && is_uncompressed - && !rw_lock_s_lock_gen_nowait(rw_lock, BUF_IO_WRITE)) { - /* avoiding deadlock possibility involves doublewrite - buffer, should flush it, because it might hold the - another block->lock. */ - buf_dblwr_flush_buffered_writes(); + && !rw_lock_sx_lock_nowait(rw_lock, BUF_IO_WRITE)) { + + if (!fsp_is_system_temporary(bpage->id.space())) { + /* avoiding deadlock possibility involves + doublewrite buffer, should flush it, because + it might hold the another block->lock. */ + buf_dblwr_flush_buffered_writes(); + } else { + buf_dblwr_sync_datafiles(); + } + + rw_lock_sx_lock_gen(rw_lock, BUF_IO_WRITE); + } + + /* If there is an observer that want to know if the asynchronous + flushing was sent then notify it. + Note: we set flush observer to a page with x-latch, so we can + guarantee that notify_flush and notify_remove are called in pair + with s-latch on a uncompressed page. */ + if (bpage->flush_observer != NULL) { + buf_pool_mutex_enter(buf_pool); - rw_lock_s_lock_gen(rw_lock, BUF_IO_WRITE); - } + bpage->flush_observer->notify_flush(buf_pool, bpage); + + buf_pool_mutex_exit(buf_pool); + } /* Even though bpage is not protected by any mutex at this point, it is safe to access bpage, because it is io_fixed and oldest_modification != 0. Thus, it cannot be relocated in the buffer pool or removed from flush_list or LRU_list. */ - buf_flush_write_block_low(bpage, flush_type, sync); - } + buf_flush_write_block_low(bpage, flush_type, sync); + } return(flush); } @@ -1071,7 +1239,6 @@ NOTE: buf_pool->mutex and block->mutex must be held upon entering this function, and they will be released by this function after flushing. This is loosely based on buf_flush_batch() and buf_flush_page(). @return TRUE if the page was flushed and the mutexes released */ -UNIV_INTERN ibool buf_flush_page_try( /*===============*/ @@ -1080,7 +1247,7 @@ buf_flush_page_try( { ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - ut_ad(mutex_own(&block->mutex)); + ut_ad(buf_page_mutex_own(block)); if (!buf_flush_ready_for_flush(&block->page, BUF_FLUSH_SINGLE_PAGE)) { return(FALSE); @@ -1089,23 +1256,23 @@ buf_flush_page_try( /* The following call will release the buffer pool and block mutex. */ return(buf_flush_page( - buf_pool, &block->page, BUF_FLUSH_SINGLE_PAGE, true)); + buf_pool, &block->page, + BUF_FLUSH_SINGLE_PAGE, true)); } # endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ -/***********************************************************//** -Check the page is in buffer pool and can be flushed. -@return true if the page can be flushed. */ + +/** Check the page is in buffer pool and can be flushed. +@param[in] page_id page id +@param[in] flush_type BUF_FLUSH_LRU or BUF_FLUSH_LIST +@return true if the page can be flushed. */ static bool buf_flush_check_neighbor( -/*=====================*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page offset */ - buf_flush_t flush_type) /*!< in: BUF_FLUSH_LRU or - BUF_FLUSH_LIST */ + const page_id_t& page_id, + buf_flush_t flush_type) { buf_page_t* bpage; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); bool ret; ut_ad(flush_type == BUF_FLUSH_LRU @@ -1114,7 +1281,7 @@ buf_flush_check_neighbor( buf_pool_mutex_enter(buf_pool); /* We only want to flush pages from this buffer pool. */ - bpage = buf_page_hash_get(buf_pool, space, offset); + bpage = buf_page_hash_get(buf_pool, page_id); if (!bpage) { @@ -1129,7 +1296,7 @@ buf_flush_check_neighbor( ret = false; if (flush_type != BUF_FLUSH_LRU || buf_page_is_old(bpage)) { - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); if (buf_flush_ready_for_flush(bpage, flush_type)) { @@ -1142,26 +1309,25 @@ buf_flush_check_neighbor( return(ret); } -/***********************************************************//** -Flushes to disk all flushable pages within the flush area. -@return number of pages flushed */ +/** Flushes to disk all flushable pages within the flush area. +@param[in] page_id page id +@param[in] flush_type BUF_FLUSH_LRU or BUF_FLUSH_LIST +@param[in] n_flushed number of pages flushed so far in this batch +@param[in] n_to_flush maximum number of pages we are allowed to flush +@return number of pages flushed */ static ulint buf_flush_try_neighbors( -/*====================*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page offset */ - buf_flush_t flush_type, /*!< in: BUF_FLUSH_LRU or - BUF_FLUSH_LIST */ - ulint n_flushed, /*!< in: number of pages - flushed so far in this batch */ - ulint n_to_flush) /*!< in: maximum number of pages - we are allowed to flush */ + const page_id_t& page_id, + buf_flush_t flush_type, + ulint n_flushed, + ulint n_to_flush) { ulint i; ulint low; ulint high; - buf_pool_t* buf_pool = buf_pool_get(space, offset); + ulint count = 0; + buf_pool_t* buf_pool = buf_pool_get(page_id); ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); @@ -1169,8 +1335,8 @@ buf_flush_try_neighbors( || srv_flush_neighbors == 0) { /* If there is little space or neighbor flushing is not enabled then just flush the victim. */ - low = offset; - high = offset + 1; + low = page_id.page_no(); + high = page_id.page_no() + 1; } else { /* When flushed, dirty blocks are searched in neighborhoods of this size, and flushed along with the @@ -1182,27 +1348,38 @@ buf_flush_try_neighbors( BUF_READ_AHEAD_AREA(buf_pool), buf_pool->curr_size / 16); - low = (offset / buf_flush_area) * buf_flush_area; - high = (offset / buf_flush_area + 1) * buf_flush_area; + low = (page_id.page_no() / buf_flush_area) * buf_flush_area; + high = (page_id.page_no() / buf_flush_area + 1) * buf_flush_area; if (srv_flush_neighbors == 1) { /* adjust 'low' and 'high' to limit for contiguous dirty area */ - if (offset > low) { - for (i = offset - 1; - i >= low - && buf_flush_check_neighbor( - space, i, flush_type); - i--) { - /* do nothing */ + if (page_id.page_no() > low) { + for (i = page_id.page_no() - 1; i >= low; i--) { + if (!buf_flush_check_neighbor( + page_id_t(page_id.space(), i), + flush_type)) { + + break; + } + + if (i == low) { + /* Avoid overwrap when low == 0 + and calling + buf_flush_check_neighbor() with + i == (ulint) -1 */ + i--; + break; + } } low = i + 1; } - for (i = offset + 1; + for (i = page_id.page_no() + 1; i < high && buf_flush_check_neighbor( - space, i, flush_type); + page_id_t(page_id.space(), i), + flush_type); i++) { /* do nothing */ } @@ -1210,17 +1387,17 @@ buf_flush_try_neighbors( } } -#ifdef UNIV_DEBUG - /* fprintf(stderr, "Flush area: low %lu high %lu\n", low, high); */ -#endif - - if (high > fil_space_get_size(space)) { - high = fil_space_get_size(space); + const ulint space_size = fil_space_get_size(page_id.space()); + if (high > space_size) { + high = space_size; } - ulint count = 0; + DBUG_PRINT("ib_buf", ("flush %u:%u..%u", + page_id.space(), + (unsigned) low, (unsigned) high)); - for (i = low; i < high; i++) { + for (ulint i = low; i < high; i++) { + buf_page_t* bpage; if ((count + n_flushed) >= n_to_flush) { @@ -1230,19 +1407,21 @@ buf_flush_try_neighbors( are flushing has not been flushed yet then we'll try to flush the victim that we selected originally. */ - if (i <= offset) { - i = offset; + if (i <= page_id.page_no()) { + i = page_id.page_no(); } else { break; } } - buf_pool = buf_pool_get(space, i); + const page_id_t cur_page_id(page_id.space(), i); + + buf_pool = buf_pool_get(cur_page_id); buf_pool_mutex_enter(buf_pool); /* We only want to flush pages from this buffer pool. */ - buf_page_t* bpage = buf_page_hash_get(buf_pool, space, i); + bpage = buf_page_hash_get(buf_pool, cur_page_id); if (bpage == NULL) { @@ -1256,70 +1435,76 @@ buf_flush_try_neighbors( because the flushed blocks are soon freed */ if (flush_type != BUF_FLUSH_LRU - || i == offset + || i == page_id.page_no() || buf_page_is_old(bpage)) { - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); if (buf_flush_ready_for_flush(bpage, flush_type) - && (i == offset || bpage->buf_fix_count == 0) - && buf_flush_page( + && (i == page_id.page_no() + || bpage->buf_fix_count == 0)) { + + /* We also try to flush those + neighbors != offset */ + + if (buf_flush_page( buf_pool, bpage, flush_type, false)) { - ++count; + ++count; + } else { + mutex_exit(block_mutex); + buf_pool_mutex_exit(buf_pool); + } continue; + } else { + mutex_exit(block_mutex); } - - mutex_exit(block_mutex); } - buf_pool_mutex_exit(buf_pool); } - if (count > 0) { + if (count > 1) { MONITOR_INC_VALUE_CUMULATIVE( - MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE, - MONITOR_FLUSH_NEIGHBOR_COUNT, - MONITOR_FLUSH_NEIGHBOR_PAGES, - (count - 1)); + MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE, + MONITOR_FLUSH_NEIGHBOR_COUNT, + MONITOR_FLUSH_NEIGHBOR_PAGES, + (count - 1)); } return(count); } -/********************************************************************//** -Check if the block is modified and ready for flushing. If the the block -is ready to flush then flush the page and try o flush its neighbors. - -@return TRUE if buf_pool mutex was released during this function. +/** Check if the block is modified and ready for flushing. +If the the block is ready to flush then flush the page and try o flush +its neighbors. +@param[in] bpage buffer control block, +must be buf_page_in_file(bpage) +@param[in] flush_type BUF_FLUSH_LRU or BUF_FLUSH_LIST +@param[in] n_to_flush number of pages to flush +@param[in,out] count number of pages flushed +@return TRUE if buf_pool mutex was released during this function. This does not guarantee that some pages were written as well. Number of pages written are incremented to the count. */ static -ibool +bool buf_flush_page_and_try_neighbors( -/*=============================*/ - buf_page_t* bpage, /*!< in: buffer control block, - must be - buf_page_in_file(bpage) */ - buf_flush_t flush_type, /*!< in: BUF_FLUSH_LRU - or BUF_FLUSH_LIST */ - ulint n_to_flush, /*!< in: number of pages to - flush */ - ulint* count) /*!< in/out: number of pages - flushed */ + buf_page_t* bpage, + buf_flush_t flush_type, + ulint n_to_flush, + ulint* count) { - ibool flushed; - ib_mutex_t* block_mutex; #ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); -#endif /* UNIV_DEBUG */ ut_ad(buf_pool_mutex_own(buf_pool)); +#endif /* UNIV_DEBUG */ + + bool flushed; + BPageMutex* block_mutex = buf_page_get_mutex(bpage); - block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); ut_a(buf_page_in_file(bpage)); @@ -1329,26 +1514,22 @@ buf_flush_page_and_try_neighbors( buf_pool = buf_pool_from_bpage(bpage); - buf_pool_mutex_exit(buf_pool); - - /* These fields are protected by both the - buffer pool mutex and block mutex. */ - ulint space = buf_page_get_space(bpage); - ulint offset = buf_page_get_page_no(bpage); + const page_id_t page_id = bpage->id; mutex_exit(block_mutex); + buf_pool_mutex_exit(buf_pool); + /* Try to flush also all the neighbors */ *count += buf_flush_try_neighbors( - space, offset, flush_type, *count, n_to_flush); + page_id, flush_type, *count, n_to_flush); buf_pool_mutex_enter(buf_pool); - flushed = TRUE; - } else { mutex_exit(block_mutex); - flushed = FALSE; + + flushed = false; } ut_ad(buf_pool_mutex_own(buf_pool)); @@ -1373,7 +1554,6 @@ buf_free_from_unzip_LRU_list_batch( ulint max) /*!< in: desired number of blocks in the free_list */ { - buf_block_t* block; ulint scanned = 0; ulint count = 0; ulint free_len = UT_LIST_GET_LEN(buf_pool->free); @@ -1381,8 +1561,10 @@ buf_free_from_unzip_LRU_list_batch( ut_ad(buf_pool_mutex_own(buf_pool)); - block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); - while (block != NULL && count < max + buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); + + while (block != NULL + && count < max && free_len < srv_LRU_scan_depth && lru_len > UT_LIST_GET_LEN(buf_pool->LRU) / 10) { @@ -1433,8 +1615,11 @@ buf_flush_LRU_list_batch( { buf_page_t* bpage; ulint scanned = 0; + ulint evict_count = 0; + ulint count = 0; ulint free_len = UT_LIST_GET_LEN(buf_pool->free); ulint lru_len = UT_LIST_GET_LEN(buf_pool->LRU); + ulint withdraw_depth = 0; n->flushed = 0; n->evicted = 0; @@ -1442,9 +1627,15 @@ buf_flush_LRU_list_batch( ut_ad(buf_pool_mutex_own(buf_pool)); + if (buf_pool->curr_size < buf_pool->old_size + && buf_pool->withdraw_target > 0) { + withdraw_depth = buf_pool->withdraw_target + - UT_LIST_GET_LEN(buf_pool->withdraw); + } + for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); - bpage != NULL && (n->evicted + n->flushed) < max - && free_len < srv_LRU_scan_depth + bpage != NULL && count + evict_count < max + && free_len < srv_LRU_scan_depth + withdraw_depth && lru_len > BUF_LRU_MIN_LEN; ++scanned, bpage = buf_pool->lru_hp.get()) { @@ -1452,23 +1643,29 @@ buf_flush_LRU_list_batch( buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage); buf_pool->lru_hp.set(prev); - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); + mutex_enter(block_mutex); - bool evict = buf_flush_ready_for_replace(bpage); - mutex_exit(block_mutex); - if (evict) { + if (buf_flush_ready_for_replace(bpage)) { /* block is ready for eviction i.e., it is clean and is not IO-fixed or buffer fixed. */ + mutex_exit(block_mutex); if (buf_LRU_free_page(bpage, true)) { - n->evicted++; + ++evict_count; } - } else { + } else if (buf_flush_ready_for_flush(bpage, BUF_FLUSH_LRU)) { /* Block is ready for flush. Dispatch an IO request. The IO helper thread will put it on free list in IO completion routine. */ + mutex_exit(block_mutex); buf_flush_page_and_try_neighbors( - bpage, BUF_FLUSH_LRU, max, &n->flushed); + bpage, BUF_FLUSH_LRU, max, &count); + } else { + /* Can't evict or dispatch this block. Go to + previous. */ + ut_ad(buf_pool->lru_hp.is_hp(prev)); + mutex_exit(block_mutex); } ut_ad(!mutex_own(block_mutex)); @@ -1487,6 +1684,14 @@ buf_flush_LRU_list_batch( ut_ad(buf_pool_mutex_own(buf_pool)); + if (evict_count) { + MONITOR_INC_VALUE_CUMULATIVE( + MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE, + MONITOR_LRU_BATCH_EVICT_COUNT, + MONITOR_LRU_BATCH_EVICT_PAGES, + evict_count); + } + if (scanned) { MONITOR_INC_VALUE_CUMULATIVE( MONITOR_LRU_BATCH_SCANNED, @@ -1528,26 +1733,22 @@ buf_do_LRU_batch( n->evicted += n->unzip_LRU_evicted; } -/*******************************************************************//** -This utility flushes dirty blocks from the end of the flush_list. -the calling thread is not allowed to own any latches on pages! +/** This utility flushes dirty blocks from the end of the flush_list. +The calling thread is not allowed to own any latches on pages! +@param[in] buf_pool buffer pool instance +@param[in] min_n wished minimum mumber of blocks flushed (it is +not guaranteed that the actual number is that big, though) +@param[in] lsn_limit all blocks whose oldest_modification is smaller +than this should be flushed (if their number does not exceed min_n) @return number of blocks for which the write request was queued; ULINT_UNDEFINED if there was a flush of the same type already running */ static ulint buf_do_flush_list_batch( -/*====================*/ - buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ulint min_n, /*!< in: wished minimum mumber - of blocks flushed (it is not - guaranteed that the actual - number is that big, though) */ - lsn_t lsn_limit) /*!< all blocks whose - oldest_modification is smaller - than this should be flushed (if - their number does not exceed - min_n) */ + buf_pool_t* buf_pool, + ulint min_n, + lsn_t lsn_limit) { ulint count = 0; ulint scanned = 0; @@ -1595,50 +1796,65 @@ buf_do_flush_list_batch( buf_pool->flush_hp.set(NULL); buf_flush_list_mutex_exit(buf_pool); - MONITOR_INC_VALUE_CUMULATIVE(MONITOR_FLUSH_BATCH_SCANNED, - MONITOR_FLUSH_BATCH_SCANNED_NUM_CALL, - MONITOR_FLUSH_BATCH_SCANNED_PER_CALL, - scanned); + if (scanned) { + MONITOR_INC_VALUE_CUMULATIVE( + MONITOR_FLUSH_BATCH_SCANNED, + MONITOR_FLUSH_BATCH_SCANNED_NUM_CALL, + MONITOR_FLUSH_BATCH_SCANNED_PER_CALL, + scanned); + } + + if (count) { + MONITOR_INC_VALUE_CUMULATIVE( + MONITOR_FLUSH_BATCH_TOTAL_PAGE, + MONITOR_FLUSH_BATCH_COUNT, + MONITOR_FLUSH_BATCH_PAGES, + count); + } ut_ad(buf_pool_mutex_own(buf_pool)); return(count); } -/*******************************************************************//** -This utility flushes dirty blocks from the end of the LRU list or flush_list. +/** This utility flushes dirty blocks from the end of the LRU list or +flush_list. NOTE 1: in the case of an LRU flush the calling thread may own latches to pages: to avoid deadlocks, this function must be written so that it cannot end up waiting for these latches! NOTE 2: in the case of a flush list flush, -the calling thread is not allowed to own any latches on pages! */ -__attribute__((nonnull)) +the calling thread is not allowed to own any latches on pages! +@param[in] buf_pool buffer pool instance +@param[in] flush_type BUF_FLUSH_LRU or BUF_FLUSH_LIST; if +BUF_FLUSH_LIST, then the caller must not own any latches on pages +@param[in] min_n wished minimum mumber of blocks flushed (it is +not guaranteed that the actual number is that big, though) +@param[in] lsn_limit in the case of BUF_FLUSH_LIST all blocks whose +oldest_modification is smaller than this should be flushed (if their number +does not exceed min_n), otherwise ignored*/ void buf_flush_batch( -/*============*/ - buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - buf_flush_t flush_type, /*!< in: BUF_FLUSH_LRU or - BUF_FLUSH_LIST; if BUF_FLUSH_LIST, - then the caller must not own any - latches on pages */ - ulint min_n, /*!< in: wished minimum mumber of blocks - flushed (it is not guaranteed that the - actual number is that big, though) */ - lsn_t lsn_limit, /*!< in: in the case of BUF_FLUSH_LIST - all blocks whose oldest_modification is - smaller than this should be flushed - (if their number does not exceed - min_n), otherwise ignored */ + buf_pool_t* buf_pool, + buf_flush_t flush_type, + ulint min_n, + lsn_t lsn_limit, flush_counters_t* n) /*!< out: flushed/evicted page counts */ { ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); -#ifdef UNIV_SYNC_DEBUG - ut_ad((flush_type != BUF_FLUSH_LIST) - || sync_thread_levels_empty_except_dict()); -#endif /* UNIV_SYNC_DEBUG */ + +#ifdef UNIV_DEBUG + { + dict_sync_check check(true); + + ut_ad(flush_type != BUF_FLUSH_LIST + || !sync_check_iterate(check)); + } +#endif /* UNIV_DEBUG */ buf_pool_mutex_enter(buf_pool); + ulint count = 0; + /* Note: The buffer pool mutex is released and reacquired within the flush functions. */ switch (flush_type) { @@ -1655,38 +1871,27 @@ buf_flush_batch( buf_pool_mutex_exit(buf_pool); -#ifdef UNIV_DEBUG - if (buf_debug_prints && n->flushed > 0) { - fprintf(stderr, flush_type == BUF_FLUSH_LRU - ? "Flushed %lu pages in LRU flush\n" - : "Flushed %lu pages in flush list flush\n", - (ulong) n->flushed); - } -#endif /* UNIV_DEBUG */ + DBUG_PRINT("ib_buf", ("flush %u completed, %u pages", + unsigned(flush_type), unsigned(count))); } /******************************************************************//** -Gather the aggregated stats for both flush list and LRU list flushing */ +Gather the aggregated stats for both flush list and LRU list flushing. +@param page_count_flush number of pages flushed from the end of the flush_list +@param page_count_LRU number of pages flushed from the end of the LRU list +*/ void -buf_flush_common( -/*=============*/ - buf_flush_t flush_type, /*!< in: type of flush */ - ulint page_count) /*!< in: number of pages flushed */ +buf_flush_stats( +/*============*/ + ulint page_count_flush, + ulint page_count_LRU) { - buf_dblwr_flush_buffered_writes(); + DBUG_PRINT("ib_buf", ("flush completed, from flush_list %u pages, " + "from LRU_list %u pages", + unsigned(page_count_flush), + unsigned(page_count_LRU))); - ut_a(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); - -#ifdef UNIV_DEBUG - if (buf_debug_prints && page_count > 0) { - fprintf(stderr, flush_type == BUF_FLUSH_LRU - ? "Flushed %lu pages in LRU flush\n" - : "Flushed %lu pages in flush list flush\n", - (ulong) page_count); - } -#endif /* UNIV_DEBUG */ - - srv_stats.buf_pool_flushed.add(page_count); + srv_stats.buf_pool_flushed.add(page_count_flush + page_count_LRU); } /******************************************************************//** @@ -1698,6 +1903,8 @@ buf_flush_start( buf_flush_t flush_type) /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */ { + ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); + buf_pool_mutex_enter(buf_pool); if (buf_pool->n_flush[flush_type] > 0 @@ -1712,11 +1919,28 @@ buf_flush_start( buf_pool->init_flush[flush_type] = TRUE; + os_event_reset(buf_pool->no_flush[flush_type]); + buf_pool_mutex_exit(buf_pool); return(TRUE); } +/******************************************************************//** +Gather the aggregated stats for both flush list and LRU list flushing */ +void +buf_flush_common( +/*=============*/ + buf_flush_t flush_type, /*!< in: type of flush */ + ulint page_count) /*!< in: number of pages flushed */ +{ + buf_dblwr_flush_buffered_writes(); + + ut_a(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); + + srv_stats.buf_pool_flushed.add(page_count); +} + /******************************************************************//** End a buffer flush batch for LRU or flush list */ void @@ -1740,11 +1964,16 @@ buf_flush_end( } buf_pool_mutex_exit(buf_pool); + + if (!srv_read_only_mode) { + buf_dblwr_flush_buffered_writes(); + } else { + os_aio_simulated_wake_handler_threads(); + } } /******************************************************************//** Waits until a flush batch of the given type ends */ -UNIV_INTERN void buf_flush_wait_batch_end( /*=====================*/ @@ -1773,31 +2002,116 @@ buf_flush_wait_batch_end( } } -/*******************************************************************//** -This utility flushes dirty blocks from the end of the flush list of -all buffer pool instances. +/** Do flushing batch of a given type. +@param[in,out] buf_pool buffer pool instance +@param[in] type flush type +@param[in] min_n wished minimum mumber of blocks flushed +(it is not guaranteed that the actual number is that big, though) +@param[in] lsn_limit in the case BUF_FLUSH_LIST all blocks whose +oldest_modification is smaller than this should be flushed (if their number +does not exceed min_n), otherwise ignored +@param[out] n the number of pages which were processed is +passed back to caller. Ignored if NULL +@retval true if a batch was queued successfully. +@retval false if another batch of same type was already running. */ +bool +buf_flush_do_batch( + buf_pool_t* buf_pool, + buf_flush_t type, + ulint min_n, + lsn_t lsn_limit, + flush_counters_t* n) +{ + ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST); + + if (n != NULL) { + n->flushed = 0; + } + + if (!buf_flush_start(buf_pool, type)) { + return(false); + } + + buf_flush_batch(buf_pool, type, min_n, lsn_limit, n); + + buf_flush_end(buf_pool, type); + + return(true); +} + +/** +Waits until a flush batch of the given lsn ends +@param[in] new_oldest target oldest_modified_lsn to wait for */ + +void +buf_flush_wait_flushed( + lsn_t new_oldest) +{ + for (ulint i = 0; i < srv_buf_pool_instances; ++i) { + buf_pool_t* buf_pool; + lsn_t oldest; + + buf_pool = buf_pool_from_array(i); + + for (;;) { + /* We don't need to wait for fsync of the flushed + blocks, because anyway we need fsync to make chekpoint. + So, we don't need to wait for the batch end here. */ + + buf_flush_list_mutex_enter(buf_pool); + + buf_page_t* bpage; + + /* We don't need to wait for system temporary pages */ + for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list); + bpage != NULL + && fsp_is_system_temporary(bpage->id.space()); + bpage = UT_LIST_GET_PREV(list, bpage)) { + /* Do nothing. */ + } + + if (bpage != NULL) { + ut_ad(bpage->in_flush_list); + oldest = bpage->oldest_modification; + } else { + oldest = 0; + } + + buf_flush_list_mutex_exit(buf_pool); + + if (oldest == 0 || oldest >= new_oldest) { + break; + } + + /* sleep and retry */ + os_thread_sleep(buf_flush_wait_flushed_sleep_time); + + MONITOR_INC(MONITOR_FLUSH_SYNC_WAITS); + } + } +} + +/** This utility flushes dirty blocks from the end of the flush list of all +buffer pool instances. NOTE: The calling thread is not allowed to own any latches on pages! +@param[in] min_n wished minimum mumber of blocks flushed (it is +not guaranteed that the actual number is that big, though) +@param[in] lsn_limit in the case BUF_FLUSH_LIST all blocks whose +oldest_modification is smaller than this should be flushed (if their number +does not exceed min_n), otherwise ignored +@param[out] n_processed the number of pages which were processed is +passed back to caller. Ignored if NULL. @return true if a batch was queued successfully for each buffer pool instance. false if another batch of same type was already running in at least one of the buffer pool instance */ -UNIV_INTERN bool -buf_flush_list( -/*===========*/ - ulint min_n, /*!< in: wished minimum mumber of blocks - flushed (it is not guaranteed that the - actual number is that big, though) */ - lsn_t lsn_limit, /*!< in the case BUF_FLUSH_LIST all - blocks whose oldest_modification is - smaller than this should be flushed - (if their number does not exceed - min_n), otherwise ignored */ - ulint* n_processed) /*!< out: the number of pages - which were processed is passed - back to caller. Ignored if NULL */ - +buf_flush_lists( + ulint min_n, + lsn_t lsn_limit, + ulint* n_processed) { ulint i; + ulint n_flushed = 0; bool success = true; if (buf_mtflu_init_done()) { @@ -1824,7 +2138,11 @@ buf_flush_list( buf_pool = buf_pool_from_array(i); - if (!buf_flush_start(buf_pool, BUF_FLUSH_LIST)) { + if (!buf_flush_do_batch(buf_pool, + BUF_FLUSH_LIST, + min_n, + lsn_limit, + &n)) { /* We have two choices here. If lsn_limit was specified then skipping an instance of buffer pool means we cannot guarantee that all pages @@ -1839,25 +2157,14 @@ buf_flush_list( continue; } + } - buf_flush_batch( - buf_pool, BUF_FLUSH_LIST, min_n, lsn_limit, &n); - - buf_flush_end(buf_pool, BUF_FLUSH_LIST); - - buf_flush_common(BUF_FLUSH_LIST, n.flushed); - - if (n_processed) { - *n_processed += n.flushed; - } + if (n_flushed) { + buf_flush_stats(n_flushed, 0); + } - if (n.flushed) { - MONITOR_INC_VALUE_CUMULATIVE( - MONITOR_FLUSH_BATCH_TOTAL_PAGE, - MONITOR_FLUSH_BATCH_COUNT, - MONITOR_FLUSH_BATCH_PAGES, - n.flushed); - } + if (n_processed) { + *n_processed = n_flushed; } return(success); @@ -1870,9 +2177,8 @@ list and puts it on the free list. It is called from user threads when they are unable to find a replaceable page at the tail of the LRU list i.e.: when the background LRU flushing in the page_cleaner thread is not fast enough to keep pace with the workload. -@return TRUE if success. */ -UNIV_INTERN -ibool +@return true if success. */ +bool buf_flush_single_page_from_LRU( /*===========================*/ buf_pool_t* buf_pool) /*!< in/out: buffer pool instance */ @@ -1883,46 +2189,56 @@ buf_flush_single_page_from_LRU( buf_pool_mutex_enter(buf_pool); - for (bpage = buf_pool->single_scan_itr.start(), - scanned = 0, freed = FALSE; + for (bpage = buf_pool->single_scan_itr.start(), scanned = 0, + freed = false; bpage != NULL; ++scanned, bpage = buf_pool->single_scan_itr.get()) { ut_ad(buf_pool_mutex_own(buf_pool)); - buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage); + buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage); buf_pool->single_scan_itr.set(prev); + BPageMutex* block_mutex; + + block_mutex = buf_page_get_mutex(bpage); - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); if (buf_flush_ready_for_replace(bpage)) { /* block is ready for eviction i.e., it is clean and is not IO-fixed or buffer fixed. */ mutex_exit(block_mutex); + if (buf_LRU_free_page(bpage, true)) { buf_pool_mutex_exit(buf_pool); - freed = TRUE; + freed = true; break; } + } else if (buf_flush_ready_for_flush( - bpage, BUF_FLUSH_SINGLE_PAGE)) { - /* Block is ready for flush. Dispatch an IO - request. We'll put it on free list in IO - completion routine. The following call, if - successful, will release the buffer pool and - block mutex. */ - freed = buf_flush_page(buf_pool, bpage, - BUF_FLUSH_SINGLE_PAGE, true); + bpage, BUF_FLUSH_SINGLE_PAGE)) { + + /* Block is ready for flush. Try and dispatch an IO + request. We'll put it on free list in IO completion + routine if it is not buffer fixed. The following call + will release the buffer pool and block mutex. + + Note: There is no guarantee that this page has actually + been freed, only that it has been flushed to disk */ + + freed = buf_flush_page( + buf_pool, bpage, BUF_FLUSH_SINGLE_PAGE, true); + if (freed) { - /* block and buffer pool mutex have - already been reelased. */ break; } + mutex_exit(block_mutex); } else { mutex_exit(block_mutex); } + + ut_ad(!mutex_own(block_mutex)); } if (!freed) { @@ -1943,78 +2259,87 @@ buf_flush_single_page_from_LRU( return(freed); } -/*********************************************************************//** -Clears up tail of the LRU lists: +/** +Clears up tail of the LRU list of a given buffer pool instance: * Put replaceable pages at the tail of LRU to the free list * Flush dirty pages at the tail of LRU to the disk The depth to which we scan each buffer pool is controlled by dynamic config parameter innodb_LRU_scan_depth. +@param buf_pool buffer pool instance @return total pages flushed */ -UNIV_INTERN +static ulint -buf_flush_LRU_tail(void) -/*====================*/ +buf_flush_LRU_list( + buf_pool_t* buf_pool) { - ulint total_flushed = 0; + ulint scan_depth, withdraw_depth; + flush_counters_t n; + + memset(&n, 0, sizeof(flush_counters_t)); if(buf_mtflu_init_done()) { return(buf_mtflu_flush_LRU_tail()); } - for (ulint i = 0; i < srv_buf_pool_instances; i++) { - - buf_pool_t* buf_pool = buf_pool_from_array(i); - ulint scan_depth; - flush_counters_t n; - - /* srv_LRU_scan_depth can be arbitrarily large value. - We cap it with current LRU size. */ - buf_pool_mutex_enter(buf_pool); - scan_depth = UT_LIST_GET_LEN(buf_pool->LRU); - buf_pool_mutex_exit(buf_pool); - - scan_depth = ut_min(srv_LRU_scan_depth, scan_depth); + ut_ad(buf_pool); - /* Currently page_cleaner is the only thread - that can trigger an LRU flush. It is possible - that a batch triggered during last iteration is - still running, */ - if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) { - continue; - } + /* srv_LRU_scan_depth can be arbitrarily large value. + We cap it with current LRU size. */ + buf_pool_mutex_enter(buf_pool); + scan_depth = UT_LIST_GET_LEN(buf_pool->LRU); - buf_flush_batch(buf_pool, BUF_FLUSH_LRU, scan_depth, 0, &n); + if (buf_pool->curr_size < buf_pool->old_size + && buf_pool->withdraw_target > 0) { + withdraw_depth = buf_pool->withdraw_target + - UT_LIST_GET_LEN(buf_pool->withdraw); + } else { + withdraw_depth = 0; + } - buf_flush_end(buf_pool, BUF_FLUSH_LRU); + buf_pool_mutex_exit(buf_pool); - buf_flush_common(BUF_FLUSH_LRU, n.flushed); + if (withdraw_depth > srv_LRU_scan_depth) { + scan_depth = ut_min(withdraw_depth, scan_depth); + } else { + scan_depth = ut_min(static_cast(srv_LRU_scan_depth), + scan_depth); + } - if (n.flushed) { - MONITOR_INC_VALUE_CUMULATIVE( - MONITOR_LRU_BATCH_FLUSH_TOTAL_PAGE, - MONITOR_LRU_BATCH_FLUSH_COUNT, - MONITOR_LRU_BATCH_FLUSH_PAGES, - n.flushed); - } + /* Currently one of page_cleaners is the only thread + that can trigger an LRU flush at the same time. + So, it is not possible that a batch triggered during + last iteration is still running, */ + buf_flush_do_batch(buf_pool, BUF_FLUSH_LRU, scan_depth, + 0, &n); - if (n.evicted) { - MONITOR_INC_VALUE_CUMULATIVE( - MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE, - MONITOR_LRU_BATCH_EVICT_COUNT, - MONITOR_LRU_BATCH_EVICT_PAGES, - n.evicted); - } + return(n.flushed); +} +/*********************************************************************//** +Clears up tail of the LRU lists: +* Put replaceable pages at the tail of LRU to the free list +* Flush dirty pages at the tail of LRU to the disk +The depth to which we scan each buffer pool is controlled by dynamic +config parameter innodb_LRU_scan_depth. +@return total pages flushed */ +ulint +buf_flush_LRU_lists(void) +/*=====================*/ +{ + ulint n_flushed = 0; + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + n_flushed += buf_flush_LRU_list(buf_pool_from_array(i)); + } - total_flushed += (n.flushed + n.evicted); + if (n_flushed) { + buf_flush_stats(0, n_flushed); } - return(total_flushed); + return(n_flushed); } /*********************************************************************//** Wait for any possible LRU flushes that are in progress to end. */ -UNIV_INTERN void buf_flush_wait_LRU_batch_end(void) /*==============================*/ @@ -2037,26 +2362,6 @@ buf_flush_wait_LRU_batch_end(void) } } -/*********************************************************************//** -Flush a batch of dirty pages from the flush list -@return number of pages flushed, 0 if no page is flushed or if another -flush_list type batch is running */ -static -ulint -page_cleaner_do_flush_batch( -/*========================*/ - ulint n_to_flush, /*!< in: number of pages that - we should attempt to flush. */ - lsn_t lsn_limit) /*!< in: LSN up to which flushing - must happen */ -{ - ulint n_flushed; - - buf_flush_list(n_to_flush, lsn_limit, &n_flushed); - - return(n_flushed); -} - /*********************************************************************//** Calculates if flushing is required based on number of dirty pages in the buffer pool. @@ -2066,10 +2371,11 @@ ulint af_get_pct_for_dirty() /*==================*/ { - ulint dirty_pct = (ulint) buf_get_modified_ratio_pct(); + double dirty_pct = buf_get_modified_ratio_pct(); - if (dirty_pct > 0 && srv_max_buf_pool_modified_pct == 0) { - return(100); + if (dirty_pct == 0.0) { + /* No pages modified */ + return(0); } ut_a(srv_max_dirty_pages_pct_lwm @@ -2078,16 +2384,16 @@ af_get_pct_for_dirty() if (srv_max_dirty_pages_pct_lwm == 0) { /* The user has not set the option to preflush dirty pages as we approach the high water mark. */ - if (dirty_pct > srv_max_buf_pool_modified_pct) { + if (dirty_pct >= srv_max_buf_pool_modified_pct) { /* We have crossed the high water mark of dirty pages In this case we start flushing at 100% of innodb_io_capacity. */ return(100); } - } else if (dirty_pct > srv_max_dirty_pages_pct_lwm) { + } else if (dirty_pct >= srv_max_dirty_pages_pct_lwm) { /* We should start flushing pages gradually. */ - return (ulint) ((dirty_pct * 100) - / (srv_max_buf_pool_modified_pct + 1)); + return(static_cast((dirty_pct * 100) + / (srv_max_buf_pool_modified_pct + 1))); } return(0); @@ -2136,22 +2442,23 @@ af_get_pct_for_lsn( /*********************************************************************//** This function is called approximately once every second by the page_cleaner thread. Based on various factors it decides if there is a -need to do flushing. If flushing is needed it is performed and the -number of pages flushed is returned. -@return number of pages flushed */ +need to do flushing. +@return number of pages recommended to be flushed +@param lsn_limit pointer to return LSN up to which flushing must happen +@param last_pages_in the number of pages flushed by the last flush_list + flushing. */ static ulint -page_cleaner_flush_pages_if_needed(void) +page_cleaner_flush_pages_recommendation( /*====================================*/ + lsn_t* lsn_limit, + ulint last_pages_in) { - static lsn_t lsn_avg_rate = 0; static lsn_t prev_lsn = 0; - static lsn_t last_lsn = 0; static ulint sum_pages = 0; - static ulint last_pages = 0; - static ulint prev_pages = 0; static ulint avg_page_rate = 0; static ulint n_iterations = 0; + static time_t prev_time; lsn_t oldest_lsn; lsn_t cur_lsn; lsn_t age; @@ -2160,7 +2467,6 @@ page_cleaner_flush_pages_if_needed(void) ulint pct_for_dirty = 0; ulint pct_for_lsn = 0; ulint pct_total = 0; - int age_factor = 0; cur_lsn = log_get_lsn_nowait(); @@ -2174,6 +2480,7 @@ page_cleaner_flush_pages_if_needed(void) if (prev_lsn == 0) { /* First time around. */ prev_lsn = cur_lsn; + prev_time = ut_time(); return(0); } @@ -2181,19 +2488,111 @@ page_cleaner_flush_pages_if_needed(void) return(0); } + sum_pages += last_pages_in; + + time_t curr_time = ut_time(); + double time_elapsed = difftime(curr_time, prev_time); + /* We update our variables every srv_flushing_avg_loops iterations to smooth out transition in workload. */ - if (++n_iterations >= srv_flushing_avg_loops) { + if (++n_iterations >= srv_flushing_avg_loops + || time_elapsed >= srv_flushing_avg_loops) { + + if (time_elapsed < 1) { + time_elapsed = 1; + } - avg_page_rate = ((sum_pages / srv_flushing_avg_loops) - + avg_page_rate) / 2; + avg_page_rate = static_cast( + ((static_cast(sum_pages) + / time_elapsed) + + avg_page_rate) / 2); /* How much LSN we have generated since last call. */ - lsn_rate = (cur_lsn - prev_lsn) / srv_flushing_avg_loops; + lsn_rate = static_cast( + static_cast(cur_lsn - prev_lsn) + / time_elapsed); lsn_avg_rate = (lsn_avg_rate + lsn_rate) / 2; + + /* aggregate stats of all slots */ + mutex_enter(&page_cleaner->mutex); + + ulint flush_tm = page_cleaner->flush_time; + ulint flush_pass = page_cleaner->flush_pass; + + page_cleaner->flush_time = 0; + page_cleaner->flush_pass = 0; + + ulint lru_tm = 0; + ulint list_tm = 0; + ulint lru_pass = 0; + ulint list_pass = 0; + + for (ulint i = 0; i < page_cleaner->n_slots; i++) { + page_cleaner_slot_t* slot; + + slot = &page_cleaner->slots[i]; + + lru_tm += slot->flush_lru_time; + lru_pass += slot->flush_lru_pass; + list_tm += slot->flush_list_time; + list_pass += slot->flush_list_pass; + + slot->flush_lru_time = 0; + slot->flush_lru_pass = 0; + slot->flush_list_time = 0; + slot->flush_list_pass = 0; + } + + mutex_exit(&page_cleaner->mutex); + + /* minimum values are 1, to avoid dividing by zero. */ + if (lru_tm < 1) { + lru_tm = 1; + } + if (list_tm < 1) { + list_tm = 1; + } + if (flush_tm < 1) { + flush_tm = 1; + } + + if (lru_pass < 1) { + lru_pass = 1; + } + if (list_pass < 1) { + list_pass = 1; + } + if (flush_pass < 1) { + flush_pass = 1; + } + + MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_TIME_SLOT, + list_tm / list_pass); + MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_TIME_SLOT, + lru_tm / lru_pass); + + MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_TIME_THREAD, + list_tm / (srv_n_page_cleaners * flush_pass)); + MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_TIME_THREAD, + lru_tm / (srv_n_page_cleaners * flush_pass)); + MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_TIME_EST, + flush_tm * list_tm / flush_pass + / (list_tm + lru_tm)); + MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_TIME_EST, + flush_tm * lru_tm / flush_pass + / (list_tm + lru_tm)); + MONITOR_SET(MONITOR_FLUSH_AVG_TIME, flush_tm / flush_pass); + + MONITOR_SET(MONITOR_FLUSH_ADAPTIVE_AVG_PASS, + list_pass / page_cleaner->n_slots); + MONITOR_SET(MONITOR_LRU_BATCH_FLUSH_AVG_PASS, + lru_pass / page_cleaner->n_slots); + MONITOR_SET(MONITOR_FLUSH_AVG_PASS, flush_pass); + prev_lsn = cur_lsn; + prev_time = curr_time; n_iterations = 0; @@ -2211,54 +2610,96 @@ page_cleaner_flush_pages_if_needed(void) pct_total = ut_max(pct_for_dirty, pct_for_lsn); + /* Estimate pages to be flushed for the lsn progress */ + ulint sum_pages_for_lsn = 0; + lsn_t target_lsn = oldest_lsn + + lsn_avg_rate * buf_flush_lsn_scan_factor; + + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_pool_t* buf_pool = buf_pool_from_array(i); + ulint pages_for_lsn = 0; + + buf_flush_list_mutex_enter(buf_pool); + for (buf_page_t* b = UT_LIST_GET_LAST(buf_pool->flush_list); + b != NULL; + b = UT_LIST_GET_PREV(list, b)) { + if (b->oldest_modification > target_lsn) { + break; + } + ++pages_for_lsn; + } + buf_flush_list_mutex_exit(buf_pool); + + sum_pages_for_lsn += pages_for_lsn; + + mutex_enter(&page_cleaner->mutex); + ut_ad(page_cleaner->slots[i].state + == PAGE_CLEANER_STATE_NONE); + page_cleaner->slots[i].n_pages_requested + = pages_for_lsn / buf_flush_lsn_scan_factor + 1; + mutex_exit(&page_cleaner->mutex); + } + + sum_pages_for_lsn /= buf_flush_lsn_scan_factor; + if(sum_pages_for_lsn < 1) { + sum_pages_for_lsn = 1; + } + /* Cap the maximum IO capacity that we are going to use by - max_io_capacity. */ - n_pages = (PCT_IO(pct_total) + avg_page_rate) / 2; + max_io_capacity. Limit the value to avoid too quick increase */ + ulint pages_for_lsn = + std::min(sum_pages_for_lsn, srv_max_io_capacity * 2); + + n_pages = (PCT_IO(pct_total) + avg_page_rate + pages_for_lsn) / 3; if (n_pages > srv_max_io_capacity) { n_pages = srv_max_io_capacity; } - if (last_pages && cur_lsn - last_lsn > lsn_avg_rate / 2) { - age_factor = static_cast(prev_pages / last_pages); + /* Normalize request for each instance */ + mutex_enter(&page_cleaner->mutex); + ut_ad(page_cleaner->n_slots_requested == 0); + ut_ad(page_cleaner->n_slots_flushing == 0); + ut_ad(page_cleaner->n_slots_finished == 0); + + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + /* if REDO has enough of free space, + don't care about age distribution of pages */ + page_cleaner->slots[i].n_pages_requested = pct_for_lsn > 30 ? + page_cleaner->slots[i].n_pages_requested + * n_pages / sum_pages_for_lsn + 1 + : n_pages / srv_buf_pool_instances; } + mutex_exit(&page_cleaner->mutex); MONITOR_SET(MONITOR_FLUSH_N_TO_FLUSH_REQUESTED, n_pages); - prev_pages = n_pages; - n_pages = page_cleaner_do_flush_batch( - n_pages, oldest_lsn + lsn_avg_rate * (age_factor + 1)); - - last_lsn= cur_lsn; - last_pages= n_pages + 1; + MONITOR_SET(MONITOR_FLUSH_N_TO_FLUSH_BY_AGE, sum_pages_for_lsn); MONITOR_SET(MONITOR_FLUSH_AVG_PAGE_RATE, avg_page_rate); MONITOR_SET(MONITOR_FLUSH_LSN_AVG_RATE, lsn_avg_rate); MONITOR_SET(MONITOR_FLUSH_PCT_FOR_DIRTY, pct_for_dirty); MONITOR_SET(MONITOR_FLUSH_PCT_FOR_LSN, pct_for_lsn); - if (n_pages) { - MONITOR_INC_VALUE_CUMULATIVE( - MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE, - MONITOR_FLUSH_ADAPTIVE_COUNT, - MONITOR_FLUSH_ADAPTIVE_PAGES, - n_pages); - - sum_pages += n_pages; - } + *lsn_limit = LSN_MAX; return(n_pages); } /*********************************************************************//** Puts the page_cleaner thread to sleep if it has finished work in less -than a second */ +than a second +@retval 0 wake up by event set, +@retval OS_SYNC_TIME_EXCEEDED if timeout was exceeded +@param next_loop_time time when next loop iteration should start +@param sig_count zero or the value returned by previous call of + os_event_reset() */ static -void -page_cleaner_sleep_if_needed( -/*=========================*/ - ulint next_loop_time) /*!< in: time when next loop iteration - should start */ +ulint +pc_sleep_if_needed( +/*===============*/ + ulint next_loop_time, + int64_t sig_count) { ulint cur_time = ut_time_ms(); @@ -2267,24 +2708,303 @@ page_cleaner_sleep_if_needed( ut_min() to avoid long sleep in case of wrap around. */ ulint sleep_us; - sleep_us = ut_min(1000000, (next_loop_time - cur_time) * 1000); + sleep_us = ut_min(static_cast(1000000), + (next_loop_time - cur_time) * 1000); + + return(os_event_wait_time_low(buf_flush_event, + sleep_us, sig_count)); + } + + return(OS_SYNC_TIME_EXCEEDED); +} + + + +/******************************************************************//** +Initialize page_cleaner. */ +void +buf_flush_page_cleaner_init(void) +/*=============================*/ +{ + ut_ad(page_cleaner == NULL); + + page_cleaner = static_cast( + ut_zalloc_nokey(sizeof(*page_cleaner))); - ib_int64_t sig_count = os_event_reset(buf_flush_event); + mutex_create(LATCH_ID_PAGE_CLEANER, &page_cleaner->mutex); - os_event_wait_time_low(buf_flush_event, sleep_us, sig_count); + page_cleaner->is_requested = os_event_create("pc_is_requested"); + page_cleaner->is_finished = os_event_create("pc_is_finished"); + + page_cleaner->n_slots = static_cast(srv_buf_pool_instances); + + page_cleaner->slots = static_cast( + ut_zalloc_nokey(page_cleaner->n_slots + * sizeof(*page_cleaner->slots))); + + page_cleaner->is_running = true; +} + +/** +Close page_cleaner. */ +static +void +buf_flush_page_cleaner_close(void) +{ + /* waiting for all worker threads exit */ + while (page_cleaner->n_workers > 0) { + os_thread_sleep(10000); } + + mutex_destroy(&page_cleaner->mutex); + + ut_free(page_cleaner->slots); + + os_event_destroy(page_cleaner->is_finished); + os_event_destroy(page_cleaner->is_requested); + + ut_free(page_cleaner); + + page_cleaner = NULL; } +/** +Requests for all slots to flush all buffer pool instances. +@param min_n wished minimum mumber of blocks flushed + (it is not guaranteed that the actual number is that big) +@param lsn_limit in the case BUF_FLUSH_LIST all blocks whose + oldest_modification is smaller than this should be flushed + (if their number does not exceed min_n), otherwise ignored +*/ +static +void +pc_request( + ulint min_n, + lsn_t lsn_limit) +{ + if (min_n != ULINT_MAX) { + /* Ensure that flushing is spread evenly amongst the + buffer pool instances. When min_n is ULINT_MAX + we need to flush everything up to the lsn limit + so no limit here. */ + min_n = (min_n + srv_buf_pool_instances - 1) + / srv_buf_pool_instances; + } + + mutex_enter(&page_cleaner->mutex); + + ut_ad(page_cleaner->n_slots_requested == 0); + ut_ad(page_cleaner->n_slots_flushing == 0); + ut_ad(page_cleaner->n_slots_finished == 0); + + page_cleaner->requested = (min_n > 0); + page_cleaner->lsn_limit = lsn_limit; + + for (ulint i = 0; i < page_cleaner->n_slots; i++) { + page_cleaner_slot_t* slot = &page_cleaner->slots[i]; + + ut_ad(slot->state == PAGE_CLEANER_STATE_NONE); + + if (min_n == ULINT_MAX) { + slot->n_pages_requested = ULINT_MAX; + } else if (min_n == 0) { + slot->n_pages_requested = 0; + } + + /* slot->n_pages_requested was already set by + page_cleaner_flush_pages_recommendation() */ + + slot->state = PAGE_CLEANER_STATE_REQUESTED; + } + + page_cleaner->n_slots_requested = page_cleaner->n_slots; + page_cleaner->n_slots_flushing = 0; + page_cleaner->n_slots_finished = 0; + + os_event_set(page_cleaner->is_requested); + + mutex_exit(&page_cleaner->mutex); +} + +/** +Do flush for one slot. +@return the number of the slots which has not been treated yet. */ +static +ulint +pc_flush_slot(void) +{ + ulint lru_tm = 0; + ulint list_tm = 0; + int lru_pass = 0; + int list_pass = 0; + + mutex_enter(&page_cleaner->mutex); + + if (page_cleaner->n_slots_requested > 0) { + page_cleaner_slot_t* slot = NULL; + ulint i; + + for (i = 0; i < page_cleaner->n_slots; i++) { + slot = &page_cleaner->slots[i]; + + if (slot->state == PAGE_CLEANER_STATE_REQUESTED) { + break; + } + } + + /* slot should be found because + page_cleaner->n_slots_requested > 0 */ + ut_a(i < page_cleaner->n_slots); + + buf_pool_t* buf_pool = buf_pool_from_array(i); + + page_cleaner->n_slots_requested--; + page_cleaner->n_slots_flushing++; + slot->state = PAGE_CLEANER_STATE_FLUSHING; + + if (page_cleaner->n_slots_requested == 0) { + os_event_reset(page_cleaner->is_requested); + } + + if (!page_cleaner->is_running) { + slot->n_flushed_lru = 0; + slot->n_flushed_list = 0; + goto finish_mutex; + } + + mutex_exit(&page_cleaner->mutex); + + lru_tm = ut_time_ms(); + + /* Flush pages from end of LRU if required */ + slot->n_flushed_lru = buf_flush_LRU_list(buf_pool); + + lru_tm = ut_time_ms() - lru_tm; + lru_pass++; + + if (!page_cleaner->is_running) { + slot->n_flushed_list = 0; + goto finish; + } + + /* Flush pages from flush_list if required */ + if (page_cleaner->requested) { + flush_counters_t n; + memset(&n, 0, sizeof(flush_counters_t)); + list_tm = ut_time_ms(); + + slot->succeeded_list = buf_flush_do_batch( + buf_pool, BUF_FLUSH_LIST, + slot->n_pages_requested, + page_cleaner->lsn_limit, + &n); + + slot->n_flushed_list = n.flushed; + + list_tm = ut_time_ms() - list_tm; + list_pass++; + } else { + slot->n_flushed_list = 0; + slot->succeeded_list = true; + } +finish: + mutex_enter(&page_cleaner->mutex); +finish_mutex: + page_cleaner->n_slots_flushing--; + page_cleaner->n_slots_finished++; + slot->state = PAGE_CLEANER_STATE_FINISHED; + + slot->flush_lru_time += lru_tm; + slot->flush_list_time += list_tm; + slot->flush_lru_pass += lru_pass; + slot->flush_list_pass += list_pass; + + if (page_cleaner->n_slots_requested == 0 + && page_cleaner->n_slots_flushing == 0) { + os_event_set(page_cleaner->is_finished); + } + } + + ulint ret = page_cleaner->n_slots_requested; + mutex_exit(&page_cleaner->mutex); + + return(ret); +} + +/** +Wait until all flush requests are finished. +@param n_flushed_lru number of pages flushed from the end of the LRU list. +@param n_flushed_list number of pages flushed from the end of the + flush_list. +@return true if all flush_list flushing batch were success. */ +static +bool +pc_wait_finished( + ulint* n_flushed_lru, + ulint* n_flushed_list) +{ + bool all_succeeded = true; + + *n_flushed_lru = 0; + *n_flushed_list = 0; + + os_event_wait(page_cleaner->is_finished); + + mutex_enter(&page_cleaner->mutex); + + ut_ad(page_cleaner->n_slots_requested == 0); + ut_ad(page_cleaner->n_slots_flushing == 0); + ut_ad(page_cleaner->n_slots_finished == page_cleaner->n_slots); + + for (ulint i = 0; i < page_cleaner->n_slots; i++) { + page_cleaner_slot_t* slot = &page_cleaner->slots[i]; + + ut_ad(slot->state == PAGE_CLEANER_STATE_FINISHED); + + *n_flushed_lru += slot->n_flushed_lru; + *n_flushed_list += slot->n_flushed_list; + all_succeeded &= slot->succeeded_list; + + slot->state = PAGE_CLEANER_STATE_NONE; + + slot->n_pages_requested = 0; + } + + page_cleaner->n_slots_finished = 0; + + os_event_reset(page_cleaner->is_finished); + + mutex_exit(&page_cleaner->mutex); + + return(all_succeeded); +} + +#ifdef UNIV_LINUX +/** +Set priority for page_cleaner threads. +@param[in] priority priority intended to set +@return true if set as intended */ +static +bool +buf_flush_page_cleaner_set_priority( + int priority) +{ + setpriority(PRIO_PROCESS, (pid_t)syscall(SYS_gettid), + priority); + return(getpriority(PRIO_PROCESS, (pid_t)syscall(SYS_gettid)) + == priority); +} +#endif /* UNIV_LINUX */ /******************************************************************//** page_cleaner thread tasked with flushing dirty pages from the buffer -pools. As of now we'll have only one instance of this thread. +pools. As of now we'll have only one coordinator. @return a dummy parameter */ -extern "C" UNIV_INTERN +extern "C" os_thread_ret_t -DECLARE_THREAD(buf_flush_page_cleaner_thread)( -/*==========================================*/ +DECLARE_THREAD(buf_flush_page_cleaner_coordinator)( +/*===============================================*/ void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ @@ -2292,35 +3012,248 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( ulint next_loop_time = ut_time_ms() + 1000; ulint n_flushed = 0; ulint last_activity = srv_get_activity_count(); - - ut_ad(!srv_read_only_mode); + ulint last_pages = 0; #ifdef UNIV_PFS_THREAD - pfs_register_thread(buf_page_cleaner_thread_key); + /* JAN: TODO: MySQL 5.7 PSI + pfs_register_thread(page_cleaner_thread_key); + */ #endif /* UNIV_PFS_THREAD */ #ifdef UNIV_DEBUG_THREAD_CREATION - fprintf(stderr, "InnoDB: page_cleaner thread running, id %lu\n", - os_thread_pf(os_thread_get_curr_id())); + ib::info() << "page_cleaner thread running, id " + << os_thread_pf(os_thread_get_curr_id()); #endif /* UNIV_DEBUG_THREAD_CREATION */ - buf_page_cleaner_is_active = TRUE; +#ifdef UNIV_LINUX + /* linux might be able to set different setting for each thread. + worth to try to set high priority for page cleaner threads */ + if (buf_flush_page_cleaner_set_priority( + buf_flush_page_cleaner_priority)) { + + ib::info() << "page_cleaner coordinator priority: " + << buf_flush_page_cleaner_priority; + } else { + ib::info() << "If the mysqld execution user is authorized," + " page cleaner thread priority can be changed." + " See the man page of setpriority()."; + } +#endif /* UNIV_LINUX */ + + buf_page_cleaner_is_active = true; + + while (!srv_read_only_mode + && srv_shutdown_state == SRV_SHUTDOWN_NONE + && recv_sys->heap != NULL) { + /* treat flushing requests during recovery. */ + ulint n_flushed_lru = 0; + ulint n_flushed_list = 0; + + os_event_wait(recv_sys->flush_start); + + if (srv_shutdown_state != SRV_SHUTDOWN_NONE + || recv_sys->heap == NULL) { + break; + } + + switch (recv_sys->flush_type) { + case BUF_FLUSH_LRU: + /* Flush pages from end of LRU if required */ + pc_request(0, LSN_MAX); + while (pc_flush_slot() > 0) {} + pc_wait_finished(&n_flushed_lru, &n_flushed_list); + break; + + case BUF_FLUSH_LIST: + /* Flush all pages */ + do { + pc_request(ULINT_MAX, LSN_MAX); + while (pc_flush_slot() > 0) {} + } while (!pc_wait_finished(&n_flushed_lru, + &n_flushed_list)); + break; + + default: + ut_ad(0); + } + + os_event_reset(recv_sys->flush_start); + os_event_set(recv_sys->flush_end); + } + + os_event_wait(buf_flush_event); + + ulint ret_sleep = 0; + ulint n_evicted = 0; + ulint n_flushed_last = 0; + ulint warn_interval = 1; + ulint warn_count = 0; + int64_t sig_count = os_event_reset(buf_flush_event); while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { - page_cleaner_sleep_if_needed(next_loop_time); + /* The page_cleaner skips sleep if the server is + idle and there are no pending IOs in the buffer pool + and there is work to do. */ + if (srv_check_activity(last_activity) + || buf_get_n_pending_read_ios() + || n_flushed == 0) { - next_loop_time = ut_time_ms() + 1000; + ret_sleep = pc_sleep_if_needed( + next_loop_time, sig_count); - if (srv_check_activity(last_activity)) { - last_activity = srv_get_activity_count(); + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { + break; + } + } else if (ut_time_ms() > next_loop_time) { + ret_sleep = OS_SYNC_TIME_EXCEEDED; + } else { + ret_sleep = 0; + } - /* Flush pages from flush_list if required */ - n_flushed += page_cleaner_flush_pages_if_needed(); + sig_count = os_event_reset(buf_flush_event); + + if (ret_sleep == OS_SYNC_TIME_EXCEEDED) { + ulint curr_time = ut_time_ms(); + + if (curr_time > next_loop_time + 3000) { + if (warn_count == 0) { + ib::info() << "page_cleaner: 1000ms" + " intended loop took " + << 1000 + curr_time + - next_loop_time + << "ms. The settings might not" + " be optimal. (flushed=" + << n_flushed_last + << " and evicted=" + << n_evicted + << ", during the time.)"; + if (warn_interval > 300) { + warn_interval = 600; + } else { + warn_interval *= 2; + } + warn_count = warn_interval; + } else { + --warn_count; + } + } else { + /* reset counter */ + warn_interval = 1; + warn_count = 0; + } - } else if (srv_idle_flush_pct) { - n_flushed = page_cleaner_do_flush_batch( - PCT_IO(100), - LSN_MAX); + next_loop_time = curr_time + 1000; + n_flushed_last = n_evicted = 0; + } + + if (ret_sleep != OS_SYNC_TIME_EXCEEDED + && srv_flush_sync + && buf_flush_sync_lsn > 0) { + /* woke up for flush_sync */ + mutex_enter(&page_cleaner->mutex); + lsn_t lsn_limit = buf_flush_sync_lsn; + buf_flush_sync_lsn = 0; + mutex_exit(&page_cleaner->mutex); + + /* Request flushing for threads */ + pc_request(ULINT_MAX, lsn_limit); + + ulint tm = ut_time_ms(); + + /* Coordinator also treats requests */ + while (pc_flush_slot() > 0) {} + + /* only coordinator is using these counters, + so no need to protect by lock. */ + page_cleaner->flush_time += ut_time_ms() - tm; + page_cleaner->flush_pass++; + + /* Wait for all slots to be finished */ + ulint n_flushed_lru = 0; + ulint n_flushed_list = 0; + pc_wait_finished(&n_flushed_lru, &n_flushed_list); + + if (n_flushed_list > 0 || n_flushed_lru > 0) { + buf_flush_stats(n_flushed_list, n_flushed_lru); + + MONITOR_INC_VALUE_CUMULATIVE( + MONITOR_FLUSH_SYNC_TOTAL_PAGE, + MONITOR_FLUSH_SYNC_COUNT, + MONITOR_FLUSH_SYNC_PAGES, + n_flushed_lru + n_flushed_list); + } + + n_flushed = n_flushed_lru + n_flushed_list; + + } else if (srv_check_activity(last_activity)) { + ulint n_to_flush; + lsn_t lsn_limit = 0; + + /* Estimate pages from flush_list to be flushed */ + if (ret_sleep == OS_SYNC_TIME_EXCEEDED) { + last_activity = srv_get_activity_count(); + n_to_flush = + page_cleaner_flush_pages_recommendation( + &lsn_limit, last_pages); + } else { + n_to_flush = 0; + } + + /* Request flushing for threads */ + pc_request(n_to_flush, lsn_limit); + + ulint tm = ut_time_ms(); + + /* Coordinator also treats requests */ + while (pc_flush_slot() > 0) { + /* No op */ + } + + /* only coordinator is using these counters, + so no need to protect by lock. */ + page_cleaner->flush_time += ut_time_ms() - tm; + page_cleaner->flush_pass++ ; + + /* Wait for all slots to be finished */ + ulint n_flushed_lru = 0; + ulint n_flushed_list = 0; + + pc_wait_finished(&n_flushed_lru, &n_flushed_list); + + if (n_flushed_list > 0 || n_flushed_lru > 0) { + buf_flush_stats(n_flushed_list, n_flushed_lru); + } + + if (ret_sleep == OS_SYNC_TIME_EXCEEDED) { + last_pages = n_flushed_list; + } + + n_evicted += n_flushed_lru; + n_flushed_last += n_flushed_list; + + n_flushed = n_flushed_lru + n_flushed_list; + + if (n_flushed_lru) { + MONITOR_INC_VALUE_CUMULATIVE( + MONITOR_LRU_BATCH_FLUSH_TOTAL_PAGE, + MONITOR_LRU_BATCH_FLUSH_COUNT, + MONITOR_LRU_BATCH_FLUSH_PAGES, + n_flushed_lru); + } + + if (n_flushed_list) { + MONITOR_INC_VALUE_CUMULATIVE( + MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE, + MONITOR_FLUSH_ADAPTIVE_COUNT, + MONITOR_FLUSH_ADAPTIVE_PAGES, + n_flushed_list); + } + + } else if (ret_sleep == OS_SYNC_TIME_EXCEEDED) { + /* no activity, slept enough */ + buf_flush_lists(PCT_IO(100), LSN_MAX, &n_flushed); + + n_flushed_last += n_flushed; if (n_flushed) { MONITOR_INC_VALUE_CUMULATIVE( @@ -2328,18 +3261,21 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( MONITOR_FLUSH_BACKGROUND_COUNT, MONITOR_FLUSH_BACKGROUND_PAGES, n_flushed); + } - } - /* Flush pages from end of LRU if required */ - buf_flush_LRU_tail(); + } else { + /* no activity, but woken up by event */ + n_flushed = 0; + } } ut_ad(srv_shutdown_state > 0); - - if (srv_fast_shutdown == 2) { - /* In very fast shutdown we simulate a crash of - buffer pool. We are not required to do any flushing */ + if (srv_fast_shutdown == 2 + || srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { + /* In very fast shutdown or when innodb failed to start, we + simulate a crash of the buffer pool. We are not required to do + any flushing. */ goto thread_exit; } @@ -2356,7 +3292,15 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( dirtied until we enter SRV_SHUTDOWN_FLUSH_PHASE phase. */ do { - n_flushed = page_cleaner_do_flush_batch(PCT_IO(100), LSN_MAX); + pc_request(ULINT_MAX, LSN_MAX); + + while (pc_flush_slot() > 0) {} + + ulint n_flushed_lru = 0; + ulint n_flushed_list = 0; + pc_wait_finished(&n_flushed_lru, &n_flushed_list); + + n_flushed = n_flushed_lru + n_flushed_list; /* We sleep only if there are no pages to flush */ if (n_flushed == 0) { @@ -2382,15 +3326,25 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( bool success; do { + pc_request(ULINT_MAX, LSN_MAX); + + while (pc_flush_slot() > 0) {} + + ulint n_flushed_lru = 0; + ulint n_flushed_list = 0; + success = pc_wait_finished(&n_flushed_lru, &n_flushed_list); + + n_flushed = n_flushed_lru + n_flushed_list; - success = buf_flush_list(PCT_IO(100), LSN_MAX, &n_flushed); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); + buf_flush_wait_LRU_batch_end(); } while (!success || n_flushed > 0); /* Some sanity checks */ ut_a(srv_get_active_thread_type() == SRV_NONE); ut_a(srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE); + for (ulint i = 0; i < srv_buf_pool_instances; i++) { buf_pool_t* buf_pool = buf_pool_from_array(i); ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == 0); @@ -2399,9 +3353,17 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( /* We have lived our life. Time to die. */ thread_exit: - buf_page_cleaner_is_active = FALSE; + /* All worker threads are waiting for the event here, + and no more access to page_cleaner structure by them. + Wakes worker threads up just to make them exit. */ + page_cleaner->is_running = false; + os_event_set(page_cleaner->is_requested); + + buf_flush_page_cleaner_close(); + + buf_page_cleaner_is_active = false; - os_event_free(buf_flush_event); + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ @@ -2410,6 +3372,85 @@ thread_exit: OS_THREAD_DUMMY_RETURN; } +/******************************************************************//** +Worker thread of page_cleaner. +@return a dummy parameter */ +extern "C" +os_thread_ret_t +DECLARE_THREAD(buf_flush_page_cleaner_worker)( +/*==========================================*/ + void* arg __attribute__((unused))) + /*!< in: a dummy parameter required by + os_thread_create */ +{ + mutex_enter(&page_cleaner->mutex); + page_cleaner->n_workers++; + mutex_exit(&page_cleaner->mutex); + +#ifdef UNIV_LINUX + /* linux might be able to set different setting for each thread + worth to try to set high priority for page cleaner threads */ + if (buf_flush_page_cleaner_set_priority( + buf_flush_page_cleaner_priority)) { + + ib::info() << "page_cleaner worker priority: " + << buf_flush_page_cleaner_priority; + } +#endif /* UNIV_LINUX */ + + while (true) { + os_event_wait(page_cleaner->is_requested); + + if (!page_cleaner->is_running) { + break; + } + + pc_flush_slot(); + } + + mutex_enter(&page_cleaner->mutex); + page_cleaner->n_workers--; + mutex_exit(&page_cleaner->mutex); + + os_thread_exit(NULL); + + OS_THREAD_DUMMY_RETURN; +} + +/*******************************************************************//** +Synchronously flush dirty blocks from the end of the flush list of all buffer +pool instances. +NOTE: The calling thread is not allowed to own any latches on pages! */ +void +buf_flush_sync_all_buf_pools(void) +/*==============================*/ +{ + bool success; + do { + success = buf_flush_lists(ULINT_MAX, LSN_MAX, NULL); + buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); + } while (!success); + + ut_a(success); +} + +/** Request IO burst and wake page_cleaner up. +@param[in] lsn_limit upper limit of LSN to be flushed */ +void +buf_flush_request_force( + lsn_t lsn_limit) +{ + /* adjust based on lsn_avg_rate not to get old */ + lsn_t lsn_target = lsn_limit + lsn_avg_rate * 3; + + mutex_enter(&page_cleaner->mutex); + if (lsn_target > buf_flush_sync_lsn) { + buf_flush_sync_lsn = lsn_target; + } + mutex_exit(&page_cleaner->mutex); + + os_event_set(buf_flush_event); +} #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /** Functor to validate the flush list. */ @@ -2422,7 +3463,7 @@ struct Check { /******************************************************************//** Validates the flush list. -@return TRUE if ok */ +@return TRUE if ok */ static ibool buf_flush_validate_low( @@ -2431,17 +3472,18 @@ buf_flush_validate_low( { buf_page_t* bpage; const ib_rbt_node_t* rnode = NULL; + Check check; ut_ad(buf_flush_list_mutex_own(buf_pool)); - UT_LIST_VALIDATE(list, buf_page_t, buf_pool->flush_list, Check()); + ut_list_validate(buf_pool->flush_list, check); bpage = UT_LIST_GET_FIRST(buf_pool->flush_list); /* If we are in recovery mode i.e.: flush_rbt != NULL then each block in the flush_list must also be present in the flush_rbt. */ - if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) { + if (buf_pool->flush_rbt != NULL) { rnode = rbt_first(buf_pool->flush_rbt); } @@ -2462,20 +3504,20 @@ buf_flush_validate_low( || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH); ut_a(om > 0); - if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) { - buf_page_t** prpage; + if (buf_pool->flush_rbt != NULL) { + buf_page_t** prpage; - ut_a(rnode); + ut_a(rnode != NULL); prpage = rbt_value(buf_page_t*, rnode); - ut_a(*prpage); + ut_a(*prpage != NULL); ut_a(*prpage == bpage); rnode = rbt_next(buf_pool->flush_rbt, rnode); } bpage = UT_LIST_GET_NEXT(list, bpage); - ut_a(!bpage || om >= bpage->oldest_modification); + ut_a(bpage == NULL || om >= bpage->oldest_modification); } /* By this time we must have exhausted the traversal of @@ -2487,8 +3529,7 @@ buf_flush_validate_low( /******************************************************************//** Validates the flush list. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool buf_flush_validate( /*===============*/ @@ -2509,17 +3550,16 @@ buf_flush_validate( #endif /* !UNIV_HOTBACKUP */ -#ifdef UNIV_DEBUG /******************************************************************//** Check if there are any dirty pages that belong to a space id in the flush list in a particular buffer pool. -@return number of dirty pages present in a single buffer pool */ -UNIV_INTERN +@return number of dirty pages present in a single buffer pool */ ulint buf_pool_get_dirty_pages_count( /*===========================*/ buf_pool_t* buf_pool, /*!< in: buffer pool */ - ulint id) /*!< in: space id to check */ + ulint id, /*!< in: space id to check */ + FlushObserver* observer) /*!< in: flush observer to check */ { ulint count = 0; @@ -2537,7 +3577,10 @@ buf_pool_get_dirty_pages_count( ut_ad(bpage->in_flush_list); ut_ad(bpage->oldest_modification > 0); - if (buf_page_get_space(bpage) == id) { + if ((observer != NULL + && observer == bpage->flush_observer) + || (observer == NULL + && id == bpage->id.space())) { ++count; } } @@ -2550,13 +3593,12 @@ buf_pool_get_dirty_pages_count( /******************************************************************//** Check if there are any dirty pages that belong to a space id in the flush list. -@return number of dirty pages present in all the buffer pools */ -UNIV_INTERN +@return number of dirty pages present in all the buffer pools */ ulint buf_flush_get_dirty_pages_count( /*============================*/ - ulint id) /*!< in: space id to check */ - + ulint id, /*!< in: space id to check */ + FlushObserver* observer) /*!< in: flush observer to check */ { ulint count = 0; @@ -2565,9 +3607,136 @@ buf_flush_get_dirty_pages_count( buf_pool = buf_pool_from_array(i); - count += buf_pool_get_dirty_pages_count(buf_pool, id); + count += buf_pool_get_dirty_pages_count(buf_pool, id, observer); } return(count); } -#endif /* UNIV_DEBUG */ + +/** FlushObserver constructor +@param[in] space_id table space id +@param[in] trx trx instance +@param[in] stage performance schema accounting object, +used by ALTER TABLE. It is passed to log_preflush_pool_modified_pages() +for accounting. */ +FlushObserver::FlushObserver( + ulint space_id, + trx_t* trx, + ut_stage_alter_t* stage) + : + m_space_id(space_id), + m_trx(trx), + m_stage(stage), + m_interrupted(false) +{ + m_flushed = UT_NEW_NOKEY(std::vector(srv_buf_pool_instances)); + m_removed = UT_NEW_NOKEY(std::vector(srv_buf_pool_instances)); + + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + m_flushed->at(i) = 0; + m_removed->at(i) = 0; + } + +#ifdef FLUSH_LIST_OBSERVER_DEBUG + ib::info() << "FlushObserver constructor: " << m_trx->id; +#endif /* FLUSH_LIST_OBSERVER_DEBUG */ +} + +/** FlushObserver deconstructor */ +FlushObserver::~FlushObserver() +{ + ut_ad(buf_flush_get_dirty_pages_count(m_space_id, this) == 0); + + UT_DELETE(m_flushed); + UT_DELETE(m_removed); + +#ifdef FLUSH_LIST_OBSERVER_DEBUG + ib::info() << "FlushObserver deconstructor: " << m_trx->id; +#endif /* FLUSH_LIST_OBSERVER_DEBUG */ +} + +/** Check whether trx is interrupted +@return true if trx is interrupted */ +bool +FlushObserver::check_interrupted() +{ + if (trx_is_interrupted(m_trx)) { + interrupted(); + + return(true); + } + + return(false); +} + +/** Notify observer of a flush +@param[in] buf_pool buffer pool instance +@param[in] bpage buffer page to flush */ +void +FlushObserver::notify_flush( + buf_pool_t* buf_pool, + buf_page_t* bpage) +{ + ut_ad(buf_pool_mutex_own(buf_pool)); + + m_flushed->at(buf_pool->instance_no)++; + + if (m_stage != NULL) { + m_stage->inc(); + } + +#ifdef FLUSH_LIST_OBSERVER_DEBUG + ib::info() << "Flush <" << bpage->id.space() + << ", " << bpage->id.page_no() << ">"; +#endif /* FLUSH_LIST_OBSERVER_DEBUG */ +} + +/** Notify observer of a remove +@param[in] buf_pool buffer pool instance +@param[in] bpage buffer page flushed */ +void +FlushObserver::notify_remove( + buf_pool_t* buf_pool, + buf_page_t* bpage) +{ + ut_ad(buf_pool_mutex_own(buf_pool)); + + m_removed->at(buf_pool->instance_no)++; + +#ifdef FLUSH_LIST_OBSERVER_DEBUG + ib::info() << "Remove <" << bpage->id.space() + << ", " << bpage->id.page_no() << ">"; +#endif /* FLUSH_LIST_OBSERVER_DEBUG */ +} + +/** Flush dirty pages and wait. */ +void +FlushObserver::flush() +{ + buf_remove_t buf_remove; + + if (m_interrupted) { + buf_remove = BUF_REMOVE_FLUSH_NO_WRITE; + } else { + buf_remove = BUF_REMOVE_FLUSH_WRITE; + + if (m_stage != NULL) { + ulint pages_to_flush = + buf_flush_get_dirty_pages_count( + m_space_id, this); + + m_stage->begin_phase_flush(pages_to_flush); + } + } + + /* Flush or remove dirty pages. */ + buf_LRU_flush_or_remove_pages(m_space_id, buf_remove, m_trx); + + /* Wait for all dirty pages were flushed. */ + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + while (!is_complete(i)) { + + os_thread_sleep(2000); + } + } +} diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 30b991d24cf..1cb46aecf22 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -24,19 +24,16 @@ Created 11/5/1995 Heikki Tuuri *******************************************************/ #include "buf0lru.h" - -#ifndef UNIV_HOTBACKUP #ifdef UNIV_NONINL #include "buf0lru.ic" -#endif +#endif /* UNIV_NOINL */ +#ifndef UNIV_HOTBACKUP #include "ut0byte.h" -#include "ut0lst.h" #include "ut0rnd.h" -#include "sync0sync.h" #include "sync0rw.h" #include "hash0hash.h" -#include "os0sync.h" +#include "os0event.h" #include "fil0fil.h" #include "btr0btr.h" #include "buf0buddy.h" @@ -53,8 +50,6 @@ Created 11/5/1995 Heikki Tuuri #include "srv0mon.h" #include "lock0lock.h" -#include "ha_prototypes.h" - /** The number of blocks from the LRU_old pointer onward, including the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the whole LRU list length, except that the tolerance defined below @@ -62,7 +57,7 @@ is allowed. Note that the tolerance must be small enough such that for even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not allowed to point to either end of the LRU list. */ -#define BUF_LRU_OLD_TOLERANCE 20 +static const ulint BUF_LRU_OLD_TOLERANCE = 20; /** The minimum amount of non-old blocks when the LRU_old list exists (that is, when there are more than BUF_LRU_OLD_MIN_LEN blocks). @@ -79,7 +74,11 @@ We also release buf_pool->mutex after scanning this many pages of the flush_list when dropping a table. This is to ensure that other threads are not blocked for extended period of time when using very large buffer pools. */ -#define BUF_LRU_DROP_SEARCH_SIZE 1024 +static const ulint BUF_LRU_DROP_SEARCH_SIZE = 1024; + +/** We scan these many blocks when looking for a clean page to evict +during LRU eviction. */ +static const ulint BUF_LRU_SEARCH_SCAN_THRESHOLD = 100; /** We scan these many blocks when looking for a clean page to evict during LRU eviction. */ @@ -87,7 +86,7 @@ during LRU eviction. */ /** If we switch on the InnoDB monitor because there are too few available frames in the buffer pool, we set this to TRUE */ -static ibool buf_lru_switched_on_innodb_mon = FALSE; +static bool buf_lru_switched_on_innodb_mon = false; /******************************************************************//** These statistics are not 'of' LRU but 'for' LRU. We keep count of I/O @@ -103,11 +102,11 @@ uncompressed and compressed data), which must be clean. */ /** Number of intervals for which we keep the history of these stats. Each interval is 1 second, defined by the rate at which srv_error_monitor_thread() calls buf_LRU_stat_update(). */ -#define BUF_LRU_STAT_N_INTERVAL 50 +static const ulint BUF_LRU_STAT_N_INTERVAL = 50; /** Co-efficient with which we multiply I/O operations to equate them with page_zip_decompress() operations. */ -#define BUF_LRU_IO_TO_UNZIP_FACTOR 50 +static const ulint BUF_LRU_IO_TO_UNZIP_FACTOR = 50; /** Sampled values buf_LRU_stat_cur. Not protected by any mutex. Updated by buf_LRU_stat_update(). */ @@ -118,18 +117,18 @@ static ulint buf_LRU_stat_arr_ind; /** Current operation counters. Not protected by any mutex. Cleared by buf_LRU_stat_update(). */ -UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_cur; +buf_LRU_stat_t buf_LRU_stat_cur; /** Running sum of past values of buf_LRU_stat_cur. Updated by buf_LRU_stat_update(). Not Protected by any mutex. */ -UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_sum; +buf_LRU_stat_t buf_LRU_stat_sum; /* @} */ /** @name Heuristics for detecting index scan @{ */ /** Move blocks to "new" LRU list only if the first access was at least this many milliseconds ago. Not protected by any mutex or latch. */ -UNIV_INTERN uint buf_LRU_old_threshold_ms; +uint buf_LRU_old_threshold_ms; /* @} */ /******************************************************************//** @@ -165,8 +164,7 @@ buf_LRU_block_free_hashed_page( be in a state where it can be freed */ /******************************************************************//** -Increases LRU size in bytes with zip_size for compressed page, -UNIV_PAGE_SIZE for uncompressed page in inline function */ +Increases LRU size in bytes with page size inline function */ static inline void incr_LRU_size_in_bytes( @@ -175,24 +173,21 @@ incr_LRU_size_in_bytes( buf_pool_t* buf_pool) /*!< in: buffer pool instance */ { ut_ad(buf_pool_mutex_own(buf_pool)); - ulint zip_size = page_zip_get_size(&bpage->zip); - buf_pool->stat.LRU_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE; + + buf_pool->stat.LRU_bytes += bpage->size.physical(); + ut_ad(buf_pool->stat.LRU_bytes <= buf_pool->curr_pool_size); } /******************************************************************//** Determines if the unzip_LRU list should be used for evicting a victim instead of the general LRU list. -@return TRUE if should use unzip_LRU */ -UNIV_INTERN +@return TRUE if should use unzip_LRU */ ibool buf_LRU_evict_from_unzip_LRU( /*=========================*/ buf_pool_t* buf_pool) { - ulint io_avg; - ulint unzip_avg; - ut_ad(buf_pool_mutex_own(buf_pool)); /* If the unzip_LRU list is empty, we can only use the LRU. */ @@ -216,9 +211,10 @@ buf_LRU_evict_from_unzip_LRU( /* Calculate the average over past intervals, and add the values of the current interval. */ - io_avg = buf_LRU_stat_sum.io / BUF_LRU_STAT_N_INTERVAL + ulint io_avg = buf_LRU_stat_sum.io / BUF_LRU_STAT_N_INTERVAL + buf_LRU_stat_cur.io; - unzip_avg = buf_LRU_stat_sum.unzip / BUF_LRU_STAT_N_INTERVAL + + ulint unzip_avg = buf_LRU_stat_sum.unzip / BUF_LRU_STAT_N_INTERVAL + buf_LRU_stat_cur.unzip; /* Decide based on our formula. If the load is I/O bound @@ -228,27 +224,33 @@ buf_LRU_evict_from_unzip_LRU( return(unzip_avg <= io_avg * BUF_LRU_IO_TO_UNZIP_FACTOR); } -/******************************************************************//** -Attempts to drop page hash index on a batch of pages belonging to a -particular space id. */ +/** Attempts to drop page hash index on a batch of pages belonging to a +particular space id. +@param[in] space_id space id +@param[in] page_size page size +@param[in] arr array of page_no +@param[in] count number of entries in array */ static void buf_LRU_drop_page_hash_batch( -/*=========================*/ - ulint space_id, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - const ulint* arr, /*!< in: array of page_no */ - ulint count) /*!< in: number of entries in array */ + ulint space_id, + const page_size_t& page_size, + const ulint* arr, + ulint count) { - ulint i; - - ut_ad(arr != NULL); ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE); - for (i = 0; i < count; ++i) { - btr_search_drop_page_hash_when_freed(space_id, zip_size, - arr[i]); + for (ulint i = 0; i < count; ++i, ++arr) { + /* While our only caller + buf_LRU_drop_page_hash_for_tablespace() + is being executed for DROP TABLE or similar, + the table cannot be evicted from the buffer pool. + Note: this should not be executed for DROP TABLESPACE, + because DROP TABLESPACE would be refused if tables existed + in the tablespace, and a previous DROP TABLE would have + already removed the AHI entries. */ + btr_search_drop_page_hash_when_freed( + page_id_t(space_id, *arr), page_size); } } @@ -264,38 +266,33 @@ buf_LRU_drop_page_hash_for_tablespace( buf_pool_t* buf_pool, /*!< in: buffer pool instance */ ulint id) /*!< in: space id */ { - buf_page_t* bpage; - ulint* page_arr; - ulint num_entries; - ulint zip_size; + bool found; + const page_size_t page_size(fil_space_get_page_size(id, &found)); - zip_size = fil_space_get_zip_size(id); - - if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { + if (!found) { /* Somehow, the tablespace does not exist. Nothing to drop. */ ut_ad(0); return; } - page_arr = static_cast(ut_malloc( - sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE)); + ulint* page_arr = static_cast(ut_malloc_nokey( + sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE)); + + ulint num_entries = 0; buf_pool_mutex_enter(buf_pool); - num_entries = 0; scan_again: - bpage = UT_LIST_GET_LAST(buf_pool->LRU); - - while (bpage != NULL) { - buf_page_t* prev_bpage; - ibool is_fixed; + for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU); + bpage != NULL; + /* No op */) { - prev_bpage = UT_LIST_GET_PREV(LRU, bpage); + buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage); ut_a(buf_page_in_file(bpage)); if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE - || bpage->space != id + || bpage->id.space() != id || bpage->io_fix != BUF_IO_NONE) { /* Compressed pages are never hashed. Skip blocks of other tablespaces. @@ -306,17 +303,25 @@ next_page: } mutex_enter(&((buf_block_t*) bpage)->mutex); - is_fixed = bpage->buf_fix_count > 0 - || !((buf_block_t*) bpage)->index; - mutex_exit(&((buf_block_t*) bpage)->mutex); - if (is_fixed) { - goto next_page; + { + bool skip = bpage->buf_fix_count > 0 + || !((buf_block_t*) bpage)->index; + + mutex_exit(&((buf_block_t*) bpage)->mutex); + + if (skip) { + /* Skip this block, because there are + no adaptive hash index entries + pointing to it, or because we cannot + drop them due to the buffer-fix. */ + goto next_page; + } } /* Store the page number so that we can drop the hash index in a batch later. */ - page_arr[num_entries] = bpage->offset; + page_arr[num_entries] = bpage->id.page_no(); ut_a(num_entries < BUF_LRU_DROP_SEARCH_SIZE); ++num_entries; @@ -329,7 +334,7 @@ next_page: buf_pool_mutex_exit(buf_pool); buf_LRU_drop_page_hash_batch( - id, zip_size, page_arr, num_entries); + id, page_size, page_arr, num_entries); num_entries = 0; @@ -351,8 +356,9 @@ next_page: /* If, however, bpage has been removed from LRU list to the free list then we should restart the scan. bpage->state is protected by buf_pool mutex. */ - if (bpage + if (bpage != NULL && buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { + goto scan_again; } } @@ -360,7 +366,7 @@ next_page: buf_pool_mutex_exit(buf_pool); /* Drop any remaining batch of search hashed pages. */ - buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries); + buf_LRU_drop_page_hash_batch(id, page_size, page_arr, num_entries); ut_free(page_arr); } @@ -377,7 +383,7 @@ buf_flush_yield( buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */ buf_page_t* bpage) /*!< in/out: current page */ { - ib_mutex_t* block_mutex; + BPageMutex* block_mutex; ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_page_in_file(bpage)); @@ -385,6 +391,7 @@ buf_flush_yield( block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); + /* "Fix" the block so that the position cannot be changed after we release the buffer pool and block mutexes. */ @@ -400,6 +407,7 @@ buf_flush_yield( buf_pool_mutex_enter(buf_pool); mutex_enter(block_mutex); + /* "Unfix" the block now that we have both the buffer pool and block mutex again. */ buf_page_unset_sticky(bpage); @@ -477,8 +485,14 @@ buf_flush_or_remove_page( yet; maybe the system is currently reading it in, or flushing the modifications to the file */ return(false); + } + BPageMutex* block_mutex; + bool processed = false; + + block_mutex = buf_page_get_mutex(bpage); + /* We have to release the flush_list_mutex to obey the latching order. We are however guaranteed that the page will stay in the flush_list and won't be relocated because @@ -487,9 +501,6 @@ buf_flush_or_remove_page( buf_flush_list_mutex_exit(buf_pool); - bool processed; - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); - mutex_enter(block_mutex); ut_ad(bpage->oldest_modification != 0); @@ -497,28 +508,30 @@ buf_flush_or_remove_page( if (!flush) { buf_flush_remove(bpage); - processed = true; - - } else if (buf_flush_ready_for_flush(bpage, BUF_FLUSH_SINGLE_PAGE) - && buf_flush_page( - buf_pool, bpage, BUF_FLUSH_SINGLE_PAGE, false)) { - /* Wake possible simulated aio thread to actually - post the writes to the operating system */ - os_aio_simulated_wake_handler_threads(); + mutex_exit(block_mutex); - buf_pool_mutex_enter(buf_pool); + processed = true; - buf_flush_list_mutex_enter(buf_pool); + } else if (buf_flush_ready_for_flush(bpage, BUF_FLUSH_SINGLE_PAGE)) { - return(true); + /* The following call will release the buffer pool + and block mutex. */ + processed = buf_flush_page( + buf_pool, bpage, BUF_FLUSH_SINGLE_PAGE, false); + if (processed) { + /* Wake possible simulated aio thread to actually + post the writes to the operating system */ + os_aio_simulated_wake_handler_threads(); + buf_pool_mutex_enter(buf_pool); + } else { + mutex_exit(block_mutex); + } } else { - processed = false; + mutex_exit(block_mutex); } - mutex_exit(block_mutex); - buf_flush_list_mutex_enter(buf_pool); ut_ad(!mutex_own(block_mutex)); @@ -542,6 +555,7 @@ buf_flush_or_remove_pages( buf_pool_t* buf_pool, /*!< buffer pool instance */ ulint id, /*!< in: target space id for which to remove or flush pages */ + FlushObserver* observer, /*!< in: flush observer */ bool flush, /*!< in: flush to disk if true but don't remove else remove without flushing to disk */ @@ -568,7 +582,10 @@ rescan: prev = UT_LIST_GET_PREV(list, bpage); - if (buf_page_get_space(bpage) != id) { + /* If flush observer is NULL, flush page for space id, + or flush page for flush observer. */ + if ((observer != NULL && observer != bpage->flush_observer) + || (observer == NULL && id != bpage->id.space())) { /* Skip this block, as it does not belong to the target space. */ @@ -626,6 +643,16 @@ rescan: /* The check for trx is interrupted is expensive, we want to check every N iterations. */ if (!processed && trx && trx_is_interrupted(trx)) { + if (trx->flush_observer != NULL) { + if (flush) { + trx->flush_observer->interrupted(); + } else { + /* We should remove all pages with the + the flush observer. */ + continue; + } + } + buf_flush_list_mutex_exit(buf_pool); return(DB_INTERRUPTED); } @@ -647,6 +674,7 @@ buf_flush_dirty_pages( /*==================*/ buf_pool_t* buf_pool, /*!< buffer pool instance */ ulint id, /*!< in: space id */ + FlushObserver* observer, /*!< in: flush observer */ bool flush, /*!< in: flush to disk if true otherwise remove the pages without flushing */ const trx_t* trx) /*!< to check if the operation must @@ -657,7 +685,8 @@ buf_flush_dirty_pages( do { buf_pool_mutex_enter(buf_pool); - err = buf_flush_or_remove_pages(buf_pool, id, flush, trx); + err = buf_flush_or_remove_pages( + buf_pool, id, observer, flush, trx); buf_pool_mutex_exit(buf_pool); @@ -667,6 +696,13 @@ buf_flush_dirty_pages( os_thread_sleep(2000); } + if (err == DB_INTERRUPTED && observer != NULL) { + ut_a(flush); + + flush = false; + err = DB_FAIL; + } + /* DB_FAIL is a soft error, it means that the task wasn't completed, needs to be retried. */ @@ -675,7 +711,7 @@ buf_flush_dirty_pages( } while (err == DB_FAIL); ut_ad(err == DB_INTERRUPTED - || buf_pool_get_dirty_pages_count(buf_pool, id) == 0); + || buf_pool_get_dirty_pages_count(buf_pool, id, observer) == 0); } /******************************************************************//** @@ -702,18 +738,18 @@ scan_again: rw_lock_t* hash_lock; buf_page_t* prev_bpage; - ib_mutex_t* block_mutex = NULL; + BPageMutex* block_mutex; ut_a(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); prev_bpage = UT_LIST_GET_PREV(LRU, bpage); - /* bpage->space and bpage->io_fix are protected by + /* bpage->id.space() and bpage->io_fix are protected by buf_pool->mutex and the block_mutex. It is safe to check them while holding buf_pool->mutex only. */ - if (buf_page_get_space(bpage) != id) { + if (bpage->id.space() != id) { /* Skip this block, as it does not belong to the space that is being invalidated. */ goto next_page; @@ -725,14 +761,12 @@ scan_again: all_freed = FALSE; goto next_page; } else { - ulint fold = buf_page_address_fold( - bpage->space, bpage->offset); - - hash_lock = buf_page_hash_lock_get(buf_pool, fold); + hash_lock = buf_page_hash_lock_get(buf_pool, bpage->id); rw_lock_x_lock(hash_lock); block_mutex = buf_page_get_mutex(bpage); + mutex_enter(block_mutex); if (bpage->buf_fix_count > 0) { @@ -754,35 +788,30 @@ scan_again: ut_ad(mutex_own(block_mutex)); -#ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, - "Dropping space %lu page %lu\n", - (ulong) buf_page_get_space(bpage), - (ulong) buf_page_get_page_no(bpage)); - } -#endif + DBUG_PRINT("ib_buf", ("evict page %u:%u" + " state %u", + bpage->id.space(), + bpage->id.page_no(), + bpage->state)); + if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { /* Do nothing, because the adaptive hash index covers uncompressed pages only. */ } else if (((buf_block_t*) bpage)->index) { - ulint page_no; - ulint zip_size; - buf_pool_mutex_exit(buf_pool); - zip_size = buf_page_get_zip_size(bpage); - page_no = buf_page_get_page_no(bpage); - rw_lock_x_unlock(hash_lock); mutex_exit(block_mutex); /* Note that the following call will acquire - and release block->lock X-latch. */ + and release block->lock X-latch. + Note that the table cannot be evicted during + the execution of ALTER TABLE...DISCARD TABLESPACE + because MySQL is keeping the table handle open. */ btr_search_drop_page_hash_when_freed( - id, zip_size, page_no); + bpage->id, bpage->size); goto scan_again; } @@ -804,11 +833,9 @@ scan_again: ut_ad(!mutex_own(block_mutex)); -#ifdef UNIV_SYNC_DEBUG /* buf_LRU_block_remove_hashed() releases the hash_lock */ - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)); - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)); + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_S)); next_page: bpage = prev_bpage; @@ -839,22 +866,27 @@ buf_LRU_remove_pages( const trx_t* trx) /*!< to check if the operation must be interrupted */ { + FlushObserver* observer = (trx == NULL) ? NULL : trx->flush_observer; + switch (buf_remove) { case BUF_REMOVE_ALL_NO_WRITE: buf_LRU_remove_all_pages(buf_pool, id); break; case BUF_REMOVE_FLUSH_NO_WRITE: - ut_a(trx == 0); - buf_flush_dirty_pages(buf_pool, id, false, NULL); + /* Pass trx as NULL to avoid interruption check. */ + buf_flush_dirty_pages(buf_pool, id, observer, false, NULL); break; case BUF_REMOVE_FLUSH_WRITE: - ut_a(trx != 0); - buf_flush_dirty_pages(buf_pool, id, true, trx); - /* Ensure that all asynchronous IO is completed. */ - os_aio_wait_until_no_pending_writes(); - fil_flush(id); + buf_flush_dirty_pages(buf_pool, id, observer, true, trx); + + if (observer == NULL) { + /* Ensure that all asynchronous IO is completed. */ + os_aio_wait_until_no_pending_writes(); + fil_flush(id); + } + break; } } @@ -864,7 +896,6 @@ Flushes all dirty pages or removes all pages belonging to a given tablespace. A PROBLEM: if readahead is being started, what guarantees that it will not try to read in pages after this operation has completed? */ -UNIV_INTERN void buf_LRU_flush_or_remove_pages( /*==========================*/ @@ -909,13 +940,11 @@ buf_LRU_flush_or_remove_pages( #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /********************************************************************//** Insert a compressed block into buf_pool->zip_clean in the LRU order. */ -UNIV_INTERN void buf_LRU_insert_zip_clean( /*=====================*/ buf_page_t* bpage) /*!< in: pointer to the block in question */ { - buf_page_t* b; buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); @@ -923,20 +952,21 @@ buf_LRU_insert_zip_clean( /* Find the first successor of bpage in the LRU list that is in the zip_clean list. */ - b = bpage; + buf_page_t* b = bpage; + do { b = UT_LIST_GET_NEXT(LRU, b); } while (b && buf_page_get_state(b) != BUF_BLOCK_ZIP_PAGE); /* Insert bpage before b, i.e., after the predecessor of b. */ - if (b) { + if (b != NULL) { b = UT_LIST_GET_PREV(list, b); } - if (b) { - UT_LIST_INSERT_AFTER(list, buf_pool->zip_clean, b, bpage); + if (b != NULL) { + UT_LIST_INSERT_AFTER(buf_pool->zip_clean, b, bpage); } else { - UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage); + UT_LIST_ADD_FIRST(buf_pool->zip_clean, bpage); } } #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ @@ -944,34 +974,34 @@ buf_LRU_insert_zip_clean( /******************************************************************//** Try to free an uncompressed page of a compressed block from the unzip LRU list. The compressed page is preserved, and it need not be clean. -@return TRUE if freed */ -UNIV_INLINE -ibool +@return true if freed */ +static +bool buf_LRU_free_from_unzip_LRU_list( /*=============================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ibool scan_all) /*!< in: scan whole LRU list - if TRUE, otherwise scan only + bool scan_all) /*!< in: scan whole LRU list + if true, otherwise scan only srv_LRU_scan_depth / 2 blocks. */ { - buf_block_t* block; - ibool freed; - ulint scanned; - ut_ad(buf_pool_mutex_own(buf_pool)); if (!buf_LRU_evict_from_unzip_LRU(buf_pool)) { - return(FALSE); + return(false); } - for (block = UT_LIST_GET_LAST(buf_pool->unzip_LRU), - scanned = 0, freed = FALSE; - block != NULL && !freed + ulint scanned = 0; + bool freed = false; + + for (buf_block_t* block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); + block != NULL + && !freed && (scan_all || scanned < srv_LRU_scan_depth); ++scanned) { - buf_block_t* prev_block = UT_LIST_GET_PREV(unzip_LRU, - block); + buf_block_t* prev_block; + + prev_block = UT_LIST_GET_PREV(unzip_LRU, block); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_ad(block->in_unzip_LRU_list); @@ -989,43 +1019,44 @@ buf_LRU_free_from_unzip_LRU_list( MONITOR_LRU_UNZIP_SEARCH_SCANNED_PER_CALL, scanned); } + return(freed); } /******************************************************************//** Try to free a clean page from the common LRU list. -@return TRUE if freed */ -UNIV_INLINE -ibool +@return true if freed */ +static +bool buf_LRU_free_from_common_LRU_list( /*==============================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ibool scan_all) /*!< in: scan whole LRU list - if TRUE, otherwise scan only - srv_LRU_scan_depth / 2 blocks. */ + bool scan_all) /*!< in: scan whole LRU list + if true, otherwise scan only + up to BUF_LRU_SEARCH_SCAN_THRESHOLD */ { - buf_page_t* bpage; - ibool freed; - ulint scanned; - ut_ad(buf_pool_mutex_own(buf_pool)); - for (bpage = buf_pool->lru_scan_itr.start(), - scanned = 0, freed = false; - bpage != NULL && !freed + ulint scanned = 0; + bool freed = false; + + for (buf_page_t* bpage = buf_pool->lru_scan_itr.start(); + bpage != NULL + && !freed && (scan_all || scanned < BUF_LRU_SEARCH_SCAN_THRESHOLD); ++scanned, bpage = buf_pool->lru_scan_itr.get()) { - buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage); + buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage); + BPageMutex* mutex = buf_page_get_mutex(bpage); + buf_pool->lru_scan_itr.set(prev); - ib_mutex_t* mutex = buf_page_get_mutex(bpage); mutex_enter(mutex); ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); - unsigned accessed = buf_page_is_accessed(bpage); + unsigned accessed = buf_page_is_accessed(bpage); if (buf_flush_ready_for_replace(bpage)) { mutex_exit(mutex); @@ -1058,47 +1089,45 @@ buf_LRU_free_from_common_LRU_list( /******************************************************************//** Try to free a replaceable block. -@return TRUE if found and freed */ -UNIV_INTERN -ibool +@return true if found and freed */ +bool buf_LRU_scan_and_free_block( /*========================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ibool scan_all) /*!< in: scan whole LRU list - if TRUE, otherwise scan only - 'old' blocks. */ + bool scan_all) /*!< in: scan whole LRU list + if true, otherwise scan only + BUF_LRU_SEARCH_SCAN_THRESHOLD + blocks. */ { ut_ad(buf_pool_mutex_own(buf_pool)); return(buf_LRU_free_from_unzip_LRU_list(buf_pool, scan_all) - || buf_LRU_free_from_common_LRU_list( - buf_pool, scan_all)); + || buf_LRU_free_from_common_LRU_list(buf_pool, scan_all)); } /******************************************************************//** Returns TRUE if less than 25 % of the buffer pool in any instance is available. This can be used in heuristics to prevent huge transactions eating up the whole buffer pool for their locks. -@return TRUE if less than 25 % of buffer pool left */ -UNIV_INTERN +@return TRUE if less than 25 % of buffer pool left */ ibool buf_LRU_buf_pool_running_out(void) /*==============================*/ { - ulint i; ibool ret = FALSE; - for (i = 0; i < srv_buf_pool_instances && !ret; i++) { + for (ulint i = 0; i < srv_buf_pool_instances && !ret; i++) { buf_pool_t* buf_pool; buf_pool = buf_pool_from_array(i); buf_pool_mutex_enter(buf_pool); - if (!recv_recovery_on + if (!recv_recovery_is_on() && UT_LIST_GET_LEN(buf_pool->free) + UT_LIST_GET_LEN(buf_pool->LRU) - < buf_pool->curr_size / 4) { + < ut_min(buf_pool->curr_size, + buf_pool->old_size) / 4) { ret = TRUE; } @@ -1112,8 +1141,7 @@ buf_LRU_buf_pool_running_out(void) /******************************************************************//** Returns a free block from the buf_pool. The block is taken off the free list. If it is empty, returns NULL. -@return a free control block, or NULL if the buf_block->free list is empty */ -UNIV_INTERN +@return a free control block, or NULL if the buf_block->free list is empty */ buf_block_t* buf_LRU_get_free_only( /*==================*/ @@ -1123,25 +1151,42 @@ buf_LRU_get_free_only( ut_ad(buf_pool_mutex_own(buf_pool)); - block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free); + block = reinterpret_cast( + UT_LIST_GET_FIRST(buf_pool->free)); - if (block) { + while (block != NULL) { ut_ad(block->page.in_free_list); ut_d(block->page.in_free_list = FALSE); ut_ad(!block->page.in_flush_list); ut_ad(!block->page.in_LRU_list); ut_a(!buf_page_in_file(&block->page)); - UT_LIST_REMOVE(list, buf_pool->free, (&block->page)); + UT_LIST_REMOVE(buf_pool->free, &block->page); - mutex_enter(&block->mutex); + if (buf_pool->curr_size >= buf_pool->old_size + || UT_LIST_GET_LEN(buf_pool->withdraw) + >= buf_pool->withdraw_target + || !buf_block_will_withdrawn(buf_pool, block)) { + /* found valid free block */ + buf_page_mutex_enter(block); - buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE); - UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE); + buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE); + UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE); - ut_ad(buf_pool_from_block(block) == buf_pool); + ut_ad(buf_pool_from_block(block) == buf_pool); + + buf_page_mutex_exit(block); + break; + } + + /* This should be withdrawn */ + UT_LIST_ADD_LAST( + buf_pool->withdraw, + &block->page); + ut_d(block->in_withdraw_list = TRUE); - mutex_exit(&block->mutex); + block = reinterpret_cast( + UT_LIST_GET_FIRST(buf_pool->free)); } return(block); @@ -1160,28 +1205,23 @@ buf_LRU_check_size_of_non_data_objects( { ut_ad(buf_pool_mutex_own(buf_pool)); - if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free) + if (!recv_recovery_is_on() + && buf_pool->curr_size == buf_pool->old_size + && UT_LIST_GET_LEN(buf_pool->free) + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: ERROR: over 95 percent of the buffer pool" - " is occupied by\n" - "InnoDB: lock heaps or the adaptive hash index!" - " Check that your\n" - "InnoDB: transactions do not set too many row locks.\n" - "InnoDB: Your buffer pool size is %lu MB." - " Maybe you should make\n" - "InnoDB: the buffer pool bigger?\n" - "InnoDB: We intentionally generate a seg fault" - " to print a stack trace\n" - "InnoDB: on Linux!\n", - (ulong) (buf_pool->curr_size - / (1024 * 1024 / UNIV_PAGE_SIZE))); - - ut_error; - - } else if (!recv_recovery_on + ib::fatal() << "Over 95 percent of the buffer pool is" + " occupied by lock heaps or the adaptive hash index!" + " Check that your transactions do not set too many" + " row locks. Your buffer pool size is " + << (buf_pool->curr_size + / (1024 * 1024 / UNIV_PAGE_SIZE)) << " MB." + " Maybe you should make the buffer pool bigger?" + " We intentionally generate a seg fault to print" + " a stack trace on Linux!"; + + } else if (!recv_recovery_is_on() + && buf_pool->curr_size == buf_pool->old_size && (UT_LIST_GET_LEN(buf_pool->free) + UT_LIST_GET_LEN(buf_pool->LRU)) < buf_pool->curr_size / 3) { @@ -1192,27 +1232,23 @@ buf_LRU_check_size_of_non_data_objects( heaps or the adaptive hash index. This may be a memory leak! */ - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: WARNING: over 67 percent of" - " the buffer pool is occupied by\n" - "InnoDB: lock heaps or the adaptive" - " hash index! Check that your\n" - "InnoDB: transactions do not set too many" - " row locks.\n" - "InnoDB: Your buffer pool size is %lu MB." - " Maybe you should make\n" - "InnoDB: the buffer pool bigger?\n" - "InnoDB: Starting the InnoDB Monitor to print" - " diagnostics, including\n" - "InnoDB: lock heap and hash index sizes.\n", - (ulong) (buf_pool->curr_size - / (1024 * 1024 / UNIV_PAGE_SIZE))); - - buf_lru_switched_on_innodb_mon = TRUE; + ib::warn() << "Over 67 percent of the buffer pool is" + " occupied by lock heaps or the adaptive hash" + " index! Check that your transactions do not" + " set too many row locks. Your buffer pool" + " size is " + << (buf_pool->curr_size + / (1024 * 1024 / UNIV_PAGE_SIZE)) + << " MB. Maybe you should make the buffer pool" + " bigger?. Starting the InnoDB Monitor to print" + " diagnostics, including lock heap and hash" + " index sizes."; + + buf_lru_switched_on_innodb_mon = true; srv_print_innodb_monitor = TRUE; os_event_set(srv_monitor_event); } + } else if (buf_lru_switched_on_innodb_mon) { /* Switch off the InnoDB Monitor; this is a simple way @@ -1220,7 +1256,7 @@ buf_LRU_check_size_of_non_data_objects( but may also surprise users if the user also switched on the monitor! */ - buf_lru_switched_on_innodb_mon = FALSE; + buf_lru_switched_on_innodb_mon = false; srv_print_innodb_monitor = FALSE; } } @@ -1248,19 +1284,18 @@ we put it to free list to be used. * scan LRU list even if buf_pool->try_LRU_scan is not set * iteration > 1: * same as iteration 1 but sleep 10ms -@return the free control block, in state BUF_BLOCK_READY_FOR_USE */ -UNIV_INTERN +@return the free control block, in state BUF_BLOCK_READY_FOR_USE */ buf_block_t* buf_LRU_get_free_block( /*===================*/ buf_pool_t* buf_pool) /*!< in/out: buffer pool instance */ { buf_block_t* block = NULL; - ibool freed = FALSE; + bool freed = false; ulint n_iterations = 0; ulint flush_failures = 0; - ibool mon_value_was = FALSE; - ibool started_monitor = FALSE; + bool mon_value_was = false; + bool started_monitor = false; MONITOR_INC(MONITOR_LRU_GET_FREE_SEARCH); loop: @@ -1271,7 +1306,7 @@ loop: /* If there is a block in the free list, take it */ block = buf_LRU_get_free_only(buf_pool); - if (block) { + if (block != NULL) { buf_pool_mutex_exit(buf_pool); ut_ad(buf_pool_from_block(block) == buf_pool); @@ -1282,18 +1317,21 @@ loop: static_cast(mon_value_was); } + block->skip_flush_check = false; + block->page.flush_observer = NULL; return(block); } - freed = FALSE; + MONITOR_INC( MONITOR_LRU_GET_FREE_LOOPS ); + freed = false; if (buf_pool->try_LRU_scan || n_iterations > 0) { /* If no block was in the free list, search from the end of the LRU list and try to free a block there. If we are doing for the first time we'll scan only tail of the LRU list otherwise we scan the whole LRU list. */ - freed = buf_LRU_scan_and_free_block(buf_pool, - n_iterations > 0); + freed = buf_LRU_scan_and_free_block( + buf_pool, n_iterations > 0); if (!freed && n_iterations == 0) { /* Tell other threads that there is no point @@ -1312,42 +1350,33 @@ loop: if (freed) { goto loop; - } - if (n_iterations > 20) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: difficult to find free blocks in\n" - "InnoDB: the buffer pool (%lu search iterations)!\n" - "InnoDB: %lu failed attempts to flush a page!" - " Consider\n" - "InnoDB: increasing the buffer pool size.\n" - "InnoDB: It is also possible that" - " in your Unix version\n" - "InnoDB: fsync is very slow, or" - " completely frozen inside\n" - "InnoDB: the OS kernel. Then upgrading to" - " a newer version\n" - "InnoDB: of your operating system may help." - " Look at the\n" - "InnoDB: number of fsyncs in diagnostic info below.\n" - "InnoDB: Pending flushes (fsync) log: %lu;" - " buffer pool: %lu\n" - "InnoDB: %lu OS file reads, %lu OS file writes," - " %lu OS fsyncs\n" - "InnoDB: Starting InnoDB Monitor to print further\n" - "InnoDB: diagnostics to the standard output.\n", - (ulong) n_iterations, - (ulong) flush_failures, - (ulong) fil_n_pending_log_flushes, - (ulong) fil_n_pending_tablespace_flushes, - (ulong) os_n_file_reads, (ulong) os_n_file_writes, - (ulong) os_n_fsyncs); + if (n_iterations > 20 + && srv_buf_pool_old_size == srv_buf_pool_size) { + + ib::warn() << "Difficult to find free blocks in the buffer pool" + " (" << n_iterations << " search iterations)! " + << flush_failures << " failed attempts to" + " flush a page! Consider increasing the buffer pool" + " size. It is also possible that in your Unix version" + " fsync is very slow, or completely frozen inside" + " the OS kernel. Then upgrading to a newer version" + " of your operating system may help. Look at the" + " number of fsyncs in diagnostic info below." + " Pending flushes (fsync) log: " + << fil_n_pending_log_flushes + << "; buffer pool: " + << fil_n_pending_tablespace_flushes + << ". " << os_n_file_reads << " OS file reads, " + << os_n_file_writes << " OS file writes, " + << os_n_fsyncs + << " OS fsyncs. Starting InnoDB Monitor to print" + " further diagnostics to the standard output."; mon_value_was = srv_print_innodb_monitor; - started_monitor = TRUE; - srv_print_innodb_monitor = TRUE; + started_monitor = true; + srv_print_innodb_monitor = true; os_event_set(srv_monitor_event); } @@ -1355,7 +1384,13 @@ loop: find a free block then we should sleep here to let the page_cleaner do an LRU batch for us. */ + if (!srv_read_only_mode) { + os_event_set(buf_flush_event); + } + if (n_iterations > 1) { + + MONITOR_INC( MONITOR_LRU_GET_FREE_WAITS ); os_thread_sleep(10000); } @@ -1363,11 +1398,13 @@ loop: This call will flush one page from the LRU and put it on the free list. That means that the free block is up for grabs for all user threads. + TODO: A more elegant way would have been to return the freed up block to the caller here but the code that deals with removing the block from page_hash and LRU_list is fairly involved (particularly in case of compressed pages). We can do that in a separate patch sometime in future. */ + if (!buf_flush_single_page_from_LRU(buf_pool)) { MONITOR_INC(MONITOR_LRU_SINGLE_FLUSH_FAILURE_COUNT); ++flush_failures; @@ -1457,8 +1494,6 @@ buf_LRU_old_init( /*=============*/ buf_pool_t* buf_pool) { - buf_page_t* bpage; - ut_ad(buf_pool_mutex_own(buf_pool)); ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN); @@ -1466,10 +1501,13 @@ buf_LRU_old_init( the adjust function to move the LRU_old pointer to the right position */ - for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage != NULL; + for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU); + bpage != NULL; bpage = UT_LIST_GET_PREV(LRU, bpage)) { + ut_ad(bpage->in_LRU_list); ut_ad(buf_page_in_file(bpage)); + /* This loop temporarily violates the assertions of buf_page_set_old(). */ bpage->old = TRUE; @@ -1491,24 +1529,21 @@ buf_unzip_LRU_remove_block_if_needed( { buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - ut_ad(buf_pool); - ut_ad(bpage); ut_ad(buf_page_in_file(bpage)); ut_ad(buf_pool_mutex_own(buf_pool)); if (buf_page_belongs_to_unzip_LRU(bpage)) { - buf_block_t* block = (buf_block_t*) bpage; + buf_block_t* block = reinterpret_cast(bpage); ut_ad(block->in_unzip_LRU_list); ut_d(block->in_unzip_LRU_list = FALSE); - UT_LIST_REMOVE(unzip_LRU, buf_pool->unzip_LRU, block); + UT_LIST_REMOVE(buf_pool->unzip_LRU, block); } } /******************************************************************//** Adjust LRU hazard pointers if needed. */ - void buf_LRU_adjust_hp( /*==============*/ @@ -1529,10 +1564,7 @@ buf_LRU_remove_block( buf_page_t* bpage) /*!< in: control block */ { buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - ulint zip_size; - ut_ad(buf_pool); - ut_ad(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); ut_a(buf_page_in_file(bpage)); @@ -1546,7 +1578,7 @@ buf_LRU_remove_block( /* If the LRU_old pointer is defined and points to just this block, move it backward one step */ - if (UNIV_UNLIKELY(bpage == buf_pool->LRU_old)) { + if (bpage == buf_pool->LRU_old) { /* Below: the previous block is guaranteed to exist, because the LRU_old pointer is only allowed to differ @@ -1566,11 +1598,10 @@ buf_LRU_remove_block( } /* Remove the block from the LRU list */ - UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage); + UT_LIST_REMOVE(buf_pool->LRU, bpage); ut_d(bpage->in_LRU_list = FALSE); - zip_size = page_zip_get_size(&bpage->zip); - buf_pool->stat.LRU_bytes -= zip_size ? zip_size : UNIV_PAGE_SIZE; + buf_pool->stat.LRU_bytes -= bpage->size.physical(); buf_unzip_LRU_remove_block_if_needed(bpage); @@ -1578,8 +1609,10 @@ buf_LRU_remove_block( clear the "old" flags and return */ if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) { - for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage != NULL; + for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU); + bpage != NULL; bpage = UT_LIST_GET_NEXT(LRU, bpage)) { + /* This loop temporarily violates the assertions of buf_page_set_old(). */ bpage->old = FALSE; @@ -1605,7 +1638,6 @@ buf_LRU_remove_block( /******************************************************************//** Adds a block to the LRU list of decompressed zip pages. */ -UNIV_INTERN void buf_unzip_LRU_add_block( /*====================*/ @@ -1615,8 +1647,6 @@ buf_unzip_LRU_add_block( { buf_pool_t* buf_pool = buf_pool_from_block(block); - ut_ad(buf_pool); - ut_ad(block); ut_ad(buf_pool_mutex_own(buf_pool)); ut_a(buf_page_belongs_to_unzip_LRU(&block->page)); @@ -1625,18 +1655,17 @@ buf_unzip_LRU_add_block( ut_d(block->in_unzip_LRU_list = TRUE); if (old) { - UT_LIST_ADD_LAST(unzip_LRU, buf_pool->unzip_LRU, block); + UT_LIST_ADD_LAST(buf_pool->unzip_LRU, block); } else { - UT_LIST_ADD_FIRST(unzip_LRU, buf_pool->unzip_LRU, block); + UT_LIST_ADD_FIRST(buf_pool->unzip_LRU, block); } } /******************************************************************//** -Adds a block to the LRU list end. Please make sure that the zip_size is -already set into the page zip when invoking the function, so that we -can get correct zip_size from the buffer page when adding a block -into LRU */ -UNIV_INLINE +Adds a block to the LRU list end. Please make sure that the page_size is +already set when invoking the function, so that we can get correct +page_size from the buffer page when adding a block into LRU */ +static void buf_LRU_add_block_to_end_low( /*=========================*/ @@ -1644,14 +1673,12 @@ buf_LRU_add_block_to_end_low( { buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - ut_ad(buf_pool); - ut_ad(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); ut_a(buf_page_in_file(bpage)); ut_ad(!bpage->in_LRU_list); - UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage); + UT_LIST_ADD_LAST(buf_pool->LRU, bpage); ut_d(bpage->in_LRU_list = TRUE); incr_LRU_size_in_bytes(bpage, buf_pool); @@ -1684,10 +1711,9 @@ buf_LRU_add_block_to_end_low( } /******************************************************************//** -Adds a block to the LRU list. Please make sure that the zip_size is -already set into the page zip when invoking the function, so that we -can get correct zip_size from the buffer page when adding a block -into LRU */ +Adds a block to the LRU list. Please make sure that the page_size is +already set when invoking the function, so that we can get correct +page_size from the buffer page when adding a block into LRU */ UNIV_INLINE void buf_LRU_add_block_low( @@ -1707,7 +1733,7 @@ buf_LRU_add_block_low( if (!old || (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN)) { - UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, bpage); + UT_LIST_ADD_FIRST(buf_pool->LRU, bpage); bpage->freed_page_clock = buf_pool->freed_page_clock; } else { @@ -1720,8 +1746,9 @@ buf_LRU_add_block_low( ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old) || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old); #endif /* UNIV_LRU_DEBUG */ - UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, buf_pool->LRU_old, - bpage); + UT_LIST_INSERT_AFTER(buf_pool->LRU, buf_pool->LRU_old, + bpage); + buf_pool->LRU_old_len++; } @@ -1756,11 +1783,9 @@ buf_LRU_add_block_low( } /******************************************************************//** -Adds a block to the LRU list. Please make sure that the zip_size is -already set into the page zip when invoking the function, so that we -can get correct zip_size from the buffer page when adding a block -into LRU */ -UNIV_INTERN +Adds a block to the LRU list. Please make sure that the page_size is +already set when invoking the function, so that we can get correct +page_size from the buffer page when adding a block into LRU */ void buf_LRU_add_block( /*==============*/ @@ -1776,7 +1801,6 @@ buf_LRU_add_block( /******************************************************************//** Moves a block to the start of the LRU list. */ -UNIV_INTERN void buf_LRU_make_block_young( /*=====================*/ @@ -1796,7 +1820,6 @@ buf_LRU_make_block_young( /******************************************************************//** Moves a block to the end of the LRU list. */ -UNIV_INTERN void buf_LRU_make_block_old( /*===================*/ @@ -1817,7 +1840,6 @@ accessible via bpage. The caller must hold buf_pool->mutex and must not hold any buf_page_get_mutex() when calling this function. @return true if freed, false otherwise. */ -UNIV_INTERN bool buf_LRU_free_page( /*===============*/ @@ -1827,11 +1849,10 @@ buf_LRU_free_page( { buf_page_t* b = NULL; buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - const ulint fold = buf_page_address_fold(bpage->space, - bpage->offset); - rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, fold); - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, bpage->id); + + BPageMutex* block_mutex = buf_page_get_mutex(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_page_in_file(bpage)); @@ -1842,12 +1863,12 @@ buf_LRU_free_page( if (!buf_page_can_relocate(bpage)) { - /* Do not free buffer fixed or I/O-fixed blocks. */ + /* Do not free buffer fixed and I/O-fixed blocks. */ goto func_exit; } #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0); + ut_a(ibuf_count_get(bpage->id) == 0); #endif /* UNIV_IBUF_COUNT_DEBUG */ if (zip || !bpage->zip.data) { @@ -1878,28 +1899,19 @@ func_exit: ut_ad(bpage->in_LRU_list); ut_ad(!bpage->in_flush_list == !bpage->oldest_modification); -#ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, "Putting space %lu page %lu to free list\n", - (ulong) buf_page_get_space(bpage), - (ulong) buf_page_get_page_no(bpage)); - } -#endif /* UNIV_DEBUG */ + DBUG_PRINT("ib_buf", ("free page %u:%u", + bpage->id.space(), bpage->id.page_no())); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(hash_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)); ut_ad(buf_page_can_relocate(bpage)); if (!buf_LRU_block_remove_hashed(bpage, zip)) { return(true); } -#ifdef UNIV_SYNC_DEBUG /* buf_LRU_block_remove_hashed() releases the hash_lock */ - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX) - && !rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X) + && !rw_lock_own(hash_lock, RW_LOCK_S)); /* We have just freed a BUF_BLOCK_FILE_PAGE. If b != NULL then it was a compressed page with an uncompressed frame and @@ -1908,20 +1920,22 @@ func_exit: into the LRU and page_hash (and possibly flush_list). if b == NULL then it was a regular page that has been freed */ - if (b) { + if (b != NULL) { buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, b); rw_lock_x_lock(hash_lock); + mutex_enter(block_mutex); - ut_a(!buf_page_hash_get_low( - buf_pool, b->space, b->offset, fold)); + ut_a(!buf_page_hash_get_low(buf_pool, b->id)); b->state = b->oldest_modification ? BUF_BLOCK_ZIP_DIRTY : BUF_BLOCK_ZIP_PAGE; - UNIV_MEM_DESC(b->zip.data, - page_zip_get_size(&b->zip)); + + ut_ad(b->size.is_compressed()); + + UNIV_MEM_DESC(b->zip.data, b->size.physical()); /* The fields in_page_hash and in_LRU_list of the to-be-freed block descriptor should have @@ -1930,6 +1944,7 @@ func_exit: invokes buf_LRU_remove_block(). */ ut_ad(!bpage->in_page_hash); ut_ad(!bpage->in_LRU_list); + /* bpage->state was BUF_BLOCK_FILE_PAGE because b != NULL. The type cast below is thus valid. */ ut_ad(!((buf_block_t*) bpage)->in_unzip_LRU_list); @@ -1940,25 +1955,24 @@ func_exit: ut_ad(b->in_page_hash); ut_ad(b->in_LRU_list); - HASH_INSERT(buf_page_t, hash, - buf_pool->page_hash, fold, b); + HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, + b->id.fold(), b); /* Insert b where bpage was in the LRU list. */ - if (UNIV_LIKELY(prev_b != NULL)) { + if (prev_b != NULL) { ulint lru_len; ut_ad(prev_b->in_LRU_list); ut_ad(buf_page_in_file(prev_b)); - UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, - prev_b, b); + + UT_LIST_INSERT_AFTER(buf_pool->LRU, prev_b, b); incr_LRU_size_in_bytes(b, buf_pool); if (buf_page_is_old(b)) { buf_pool->LRU_old_len++; - if (UNIV_UNLIKELY - (buf_pool->LRU_old - == UT_LIST_GET_NEXT(LRU, b))) { + if (buf_pool->LRU_old + == UT_LIST_GET_NEXT(LRU, b)) { buf_pool->LRU_old = b; } @@ -1997,33 +2011,27 @@ func_exit: } bpage->zip.data = NULL; + page_zip_set_size(&bpage->zip, 0); + + bpage->size.copy_from(page_size_t(bpage->size.logical(), + bpage->size.logical(), + false)); + mutex_exit(block_mutex); /* Prevent buf_page_get_gen() from decompressing the block while we release buf_pool->mutex and block_mutex. */ block_mutex = buf_page_get_mutex(b); + mutex_enter(block_mutex); + buf_page_set_sticky(b); + mutex_exit(block_mutex); rw_lock_x_unlock(hash_lock); - - } else { - - /* There can be multiple threads doing an LRU scan to - free a block. The page_cleaner thread can be doing an - LRU batch whereas user threads can potentially be doing - multiple single page flushes. As we release - buf_pool->mutex below we need to make sure that no one - else considers this block as a victim for page - replacement. This block is already out of page_hash - and we are about to remove it from the LRU list and put - it on the free list. */ - mutex_enter(block_mutex); - buf_page_set_sticky(bpage); - mutex_exit(block_mutex); } buf_pool_mutex_exit(buf_pool); @@ -2040,8 +2048,8 @@ func_exit: UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame, UNIV_PAGE_SIZE); - if (b) { - ib_uint32_t checksum; + if (b != NULL) { + /* Compute and stamp the compressed page checksum while not holding any mutex. The block is already half-freed @@ -2049,12 +2057,13 @@ func_exit: buf_pool->page_hash, thus inaccessible by any other thread. */ - checksum = static_cast( - page_zip_calc_checksum( - b->zip.data, - page_zip_get_size(&b->zip), - static_cast( - srv_checksum_algorithm))); + ut_ad(b->size.is_compressed()); + + const uint32_t checksum = page_zip_calc_checksum( + b->zip.data, + b->size.physical(), + static_cast( + srv_checksum_algorithm)); mach_write_to_4(b->zip.data + FIL_PAGE_SPACE_OR_CHKSUM, checksum); @@ -2062,17 +2071,21 @@ func_exit: buf_pool_mutex_enter(buf_pool); - mutex_enter(block_mutex); - buf_page_unset_sticky(b != NULL ? b : bpage); - mutex_exit(block_mutex); + if (b != NULL) { + mutex_enter(block_mutex); + + buf_page_unset_sticky(b); + + mutex_exit(block_mutex); + } buf_LRU_block_free_hashed_page((buf_block_t*) bpage); + return(true); } /******************************************************************//** Puts a block back to the free list. */ -UNIV_INTERN void buf_LRU_block_free_non_file_page( /*=============================*/ @@ -2081,9 +2094,8 @@ buf_LRU_block_free_non_file_page( void* data; buf_pool_t* buf_pool = buf_pool_from_block(block); - ut_ad(block); ut_ad(buf_pool_mutex_own(buf_pool)); - ut_ad(mutex_own(&block->mutex)); + ut_ad(buf_page_mutex_own(block)); switch (buf_block_get_state(block)) { case BUF_BLOCK_MEMORY: @@ -2112,24 +2124,41 @@ buf_LRU_block_free_non_file_page( /* Wipe page_no and space_id */ memset(block->frame + FIL_PAGE_OFFSET, 0xfe, 4); memset(block->frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 0xfe, 4); -#endif +#endif /* UNIV_DEBUG */ data = block->page.zip.data; - if (data) { + if (data != NULL) { block->page.zip.data = NULL; - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); buf_pool_mutex_exit_forbid(buf_pool); - buf_buddy_free( - buf_pool, data, page_zip_get_size(&block->page.zip)); + ut_ad(block->page.size.is_compressed()); + + buf_buddy_free(buf_pool, data, block->page.size.physical()); buf_pool_mutex_exit_allow(buf_pool); - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); + page_zip_set_size(&block->page.zip, 0); + + block->page.size.copy_from( + page_size_t(block->page.size.logical(), + block->page.size.logical(), + false)); } - UT_LIST_ADD_FIRST(list, buf_pool->free, (&block->page)); - ut_d(block->page.in_free_list = TRUE); + if (buf_pool->curr_size < buf_pool->old_size + && UT_LIST_GET_LEN(buf_pool->withdraw) < buf_pool->withdraw_target + && buf_block_will_withdrawn(buf_pool, block)) { + /* This should be withdrawn */ + UT_LIST_ADD_LAST( + buf_pool->withdraw, + &block->page); + ut_d(block->in_withdraw_list = TRUE); + } else { + UT_LIST_ADD_FIRST(buf_pool->free, &block->page); + ut_d(block->page.in_free_list = TRUE); + } UNIV_MEM_ASSERT_AND_FREE(block->frame, UNIV_PAGE_SIZE); } @@ -2158,20 +2187,16 @@ buf_LRU_block_remove_hashed( bool zip) /*!< in: true if should remove also the compressed page of an uncompressed page */ { - ulint fold; const buf_page_t* hashed_bpage; buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); rw_lock_t* hash_lock; - ut_ad(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(mutex_own(buf_page_get_mutex(bpage))); - fold = buf_page_address_fold(bpage->space, bpage->offset); - hash_lock = buf_page_hash_lock_get(buf_pool, fold); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(hash_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + hash_lock = buf_page_hash_lock_get(buf_pool, bpage->id); + + ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)); ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE); ut_a(bpage->buf_fix_count == 0); @@ -2188,13 +2213,11 @@ buf_LRU_block_remove_hashed( buf_block_modify_clock_inc((buf_block_t*) bpage); if (bpage->zip.data) { const page_t* page = ((buf_block_t*) bpage)->frame; - const ulint zip_size - = page_zip_get_size(&bpage->zip); ut_a(!zip || bpage->oldest_modification == 0); + ut_ad(bpage->size.is_compressed()); - switch (UNIV_EXPECT(fil_page_get_type(page), - FIL_PAGE_INDEX)) { + switch (fil_page_get_type(page)) { case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: case FIL_PAGE_IBUF_BITMAP: @@ -2207,13 +2230,14 @@ buf_LRU_block_remove_hashed( to the compressed page, which will be preserved. */ memcpy(bpage->zip.data, page, - zip_size); + bpage->size.physical()); } break; case FIL_PAGE_TYPE_ZBLOB: case FIL_PAGE_TYPE_ZBLOB2: break; case FIL_PAGE_INDEX: + case FIL_PAGE_RTREE: #ifdef UNIV_ZIP_DEBUG ut_a(page_zip_validate( &bpage->zip, page, @@ -2221,14 +2245,16 @@ buf_LRU_block_remove_hashed( #endif /* UNIV_ZIP_DEBUG */ break; default: - ut_print_timestamp(stderr); - fputs(" InnoDB: ERROR: The compressed page" - " to be evicted seems corrupt:", stderr); - ut_print_buf(stderr, page, zip_size); - fputs("\nInnoDB: Possibly older version" - " of the page:", stderr); + ib::error() << "The compressed page to be" + " evicted seems corrupt:"; + ut_print_buf(stderr, page, + bpage->size.logical()); + + ib::error() << "Possibly older version of" + " the page:"; + ut_print_buf(stderr, bpage->zip.data, - zip_size); + bpage->size.physical()); putc('\n', stderr); ut_error; } @@ -2238,8 +2264,10 @@ buf_LRU_block_remove_hashed( /* fall through */ case BUF_BLOCK_ZIP_PAGE: ut_a(bpage->oldest_modification == 0); - UNIV_MEM_ASSERT_W(bpage->zip.data, - page_zip_get_size(&bpage->zip)); + if (bpage->size.is_compressed()) { + UNIV_MEM_ASSERT_W(bpage->zip.data, + bpage->size.physical()); + } break; case BUF_BLOCK_POOL_WATCH: case BUF_BLOCK_ZIP_DIRTY: @@ -2251,15 +2279,12 @@ buf_LRU_block_remove_hashed( break; } - hashed_bpage = buf_page_hash_get_low(buf_pool, bpage->space, - bpage->offset, fold); + hashed_bpage = buf_page_hash_get_low(buf_pool, bpage->id); + + if (bpage != hashed_bpage) { + ib::error() << "Page " << bpage->id + << " not found in the hash table"; - if (UNIV_UNLIKELY(bpage != hashed_bpage)) { - fprintf(stderr, - "InnoDB: Error: page %lu %lu not found" - " in the hash table\n", - (ulong) bpage->space, - (ulong) bpage->offset); #ifdef UNIV_DEBUG fprintf(stderr, "InnoDB: in_page_hash %lu in_zip_hash %lu\n" @@ -2268,24 +2293,21 @@ buf_LRU_block_remove_hashed( bpage->in_page_hash, bpage->in_zip_hash, bpage->in_free_list, bpage->in_flush_list, bpage->in_LRU_list, bpage->zip.data, - buf_page_get_zip_size(bpage), + bpage->size.logical(), buf_page_get_state(bpage)); #else fprintf(stderr, "InnoDB: zip.data %p zip_size %lu page_state %d\n", bpage->zip.data, - buf_page_get_zip_size(bpage), + bpage->size.logical(), buf_page_get_state(bpage)); #endif if (hashed_bpage) { - fprintf(stderr, - "InnoDB: In hash table we find block" - " %p of %lu %lu which is not %p\n", - (const void*) hashed_bpage, - (ulong) hashed_bpage->space, - (ulong) hashed_bpage->offset, - (const void*) bpage); + + ib::error() << "In hash table we find block " + << hashed_bpage << " of " << hashed_bpage->id + << " which is not " << bpage; } #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG @@ -2303,26 +2325,28 @@ buf_LRU_block_remove_hashed( ut_ad(!bpage->in_zip_hash); ut_ad(bpage->in_page_hash); ut_d(bpage->in_page_hash = FALSE); - HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage); + + HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, bpage->id.fold(), + bpage); + switch (buf_page_get_state(bpage)) { case BUF_BLOCK_ZIP_PAGE: ut_ad(!bpage->in_free_list); ut_ad(!bpage->in_flush_list); ut_ad(!bpage->in_LRU_list); ut_a(bpage->zip.data); - ut_a(buf_page_get_zip_size(bpage)); + ut_a(bpage->size.is_compressed()); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG - UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage); + UT_LIST_REMOVE(buf_pool->zip_clean, bpage); #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ mutex_exit(&buf_pool->zip_mutex); rw_lock_x_unlock(hash_lock); buf_pool_mutex_exit_forbid(buf_pool); - buf_buddy_free( - buf_pool, bpage->zip.data, - page_zip_get_size(&bpage->zip)); + buf_buddy_free(buf_pool, bpage->zip.data, + bpage->size.physical()); buf_pool_mutex_exit_allow(buf_pool); buf_page_free_descriptor(bpage); @@ -2337,11 +2361,6 @@ buf_LRU_block_remove_hashed( UNIV_PAGE_SIZE); buf_page_set_state(bpage, BUF_BLOCK_REMOVE_HASH); - if (buf_pool->flush_rbt == NULL) { - bpage->space = ULINT32_UNDEFINED; - bpage->offset = ULINT32_UNDEFINED; - } - /* Question: If we release bpage and hash mutex here then what protects us against: 1) Some other thread buffer fixing this page @@ -2374,12 +2393,16 @@ buf_LRU_block_remove_hashed( ut_ad(!bpage->in_LRU_list); buf_pool_mutex_exit_forbid(buf_pool); - buf_buddy_free( - buf_pool, data, - page_zip_get_size(&bpage->zip)); + buf_buddy_free(buf_pool, data, bpage->size.physical()); buf_pool_mutex_exit_allow(buf_pool); + page_zip_set_size(&bpage->zip, 0); + + bpage->size.copy_from( + page_size_t(bpage->size.logical(), + bpage->size.logical(), + false)); } return(true); @@ -2406,21 +2429,23 @@ buf_LRU_block_free_hashed_page( buf_block_t* block) /*!< in: block, must contain a file page and be in a state where it can be freed */ { -#ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_block(block); ut_ad(buf_pool_mutex_own(buf_pool)); -#endif - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); + + if (buf_pool->flush_rbt == NULL) { + block->page.id.reset(ULINT32_UNDEFINED, ULINT32_UNDEFINED); + } + buf_block_set_state(block, BUF_BLOCK_MEMORY); buf_LRU_block_free_non_file_page(block); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); } /******************************************************************//** Remove one page from LRU list and put it to free list */ -UNIV_INTERN void buf_LRU_free_one_page( /*==================*/ @@ -2429,10 +2454,9 @@ buf_LRU_free_one_page( may or may not be a hash index to the page */ { buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - const ulint fold = buf_page_address_fold(bpage->space, - bpage->offset); - rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, fold); - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + + rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, bpage->id); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); @@ -2444,16 +2468,15 @@ buf_LRU_free_one_page( } /* buf_LRU_block_remove_hashed() releases hash_lock and block_mutex */ -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX) - && !rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X) + && !rw_lock_own(hash_lock, RW_LOCK_S)); + ut_ad(!mutex_own(block_mutex)); } /**********************************************************************//** Updates buf_pool->LRU_old_ratio for one buffer pool instance. -@return updated old_pct */ +@return updated old_pct */ static uint buf_LRU_old_ratio_update_instance( @@ -2481,7 +2504,7 @@ buf_LRU_old_ratio_update_instance( buf_pool->LRU_old_ratio = ratio; if (UT_LIST_GET_LEN(buf_pool->LRU) - >= BUF_LRU_OLD_MIN_LEN) { + >= BUF_LRU_OLD_MIN_LEN) { buf_LRU_old_adjust_len(buf_pool); } @@ -2498,9 +2521,8 @@ buf_LRU_old_ratio_update_instance( /**********************************************************************//** Updates buf_pool->LRU_old_ratio. -@return updated old_pct */ -UNIV_INTERN -ulint +@return updated old_pct */ +uint buf_LRU_old_ratio_update( /*=====================*/ uint old_pct,/*!< in: Reserve this percentage of @@ -2509,10 +2531,9 @@ buf_LRU_old_ratio_update( FALSE=just assign buf_pool->LRU_old_ratio during the initialization of InnoDB */ { - ulint i; - ulint new_ratio = 0; + uint new_ratio = 0; - for (i = 0; i < srv_buf_pool_instances; i++) { + for (ulint i = 0; i < srv_buf_pool_instances; i++) { buf_pool_t* buf_pool; buf_pool = buf_pool_from_array(i); @@ -2527,24 +2548,22 @@ buf_LRU_old_ratio_update( /********************************************************************//** Update the historical stats that we are collecting for LRU eviction policy at the end of each interval. */ -UNIV_INTERN void buf_LRU_stat_update(void) /*=====================*/ { - ulint i; buf_LRU_stat_t* item; buf_pool_t* buf_pool; - ibool evict_started = FALSE; + bool evict_started = FALSE; buf_LRU_stat_t cur_stat; /* If we haven't started eviction yet then don't update stats. */ - for (i = 0; i < srv_buf_pool_instances; i++) { + for (ulint i = 0; i < srv_buf_pool_instances; i++) { buf_pool = buf_pool_from_array(i); if (buf_pool->freed_page_clock != 0) { - evict_started = TRUE; + evict_started = true; break; } } @@ -2586,33 +2605,32 @@ buf_LRU_validate_instance( /*======================*/ buf_pool_t* buf_pool) { - buf_page_t* bpage; - buf_block_t* block; ulint old_len; ulint new_len; - ut_ad(buf_pool); buf_pool_mutex_enter(buf_pool); if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) { ut_a(buf_pool->LRU_old); old_len = buf_pool->LRU_old_len; + new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU) * buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV, UT_LIST_GET_LEN(buf_pool->LRU) - (BUF_LRU_OLD_TOLERANCE + BUF_LRU_NON_OLD_MIN_LEN)); + ut_a(old_len >= new_len - BUF_LRU_OLD_TOLERANCE); ut_a(old_len <= new_len + BUF_LRU_OLD_TOLERANCE); } - UT_LIST_VALIDATE(LRU, buf_page_t, buf_pool->LRU, CheckInLRUList()); + CheckInLRUList::validate(buf_pool); old_len = 0; - for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU); + for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage != NULL; bpage = UT_LIST_GET_NEXT(LRU, bpage)) { @@ -2650,21 +2668,19 @@ buf_LRU_validate_instance( ut_a(buf_pool->LRU_old_len == old_len); - UT_LIST_VALIDATE(list, buf_page_t, buf_pool->free, CheckInFreeList()); + CheckInFreeList::validate(buf_pool); - for (bpage = UT_LIST_GET_FIRST(buf_pool->free); + for (buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->free); bpage != NULL; bpage = UT_LIST_GET_NEXT(list, bpage)) { ut_a(buf_page_get_state(bpage) == BUF_BLOCK_NOT_USED); } - UT_LIST_VALIDATE( - unzip_LRU, buf_block_t, buf_pool->unzip_LRU, - CheckUnzipLRUAndLRUList()); + CheckUnzipLRUAndLRUList::validate(buf_pool); - for (block = UT_LIST_GET_FIRST(buf_pool->unzip_LRU); - block; + for (buf_block_t* block = UT_LIST_GET_FIRST(buf_pool->unzip_LRU); + block != NULL; block = UT_LIST_GET_NEXT(unzip_LRU, block)) { ut_ad(block->in_unzip_LRU_list); @@ -2677,15 +2693,12 @@ buf_LRU_validate_instance( /**********************************************************************//** Validates the LRU list. -@return TRUE */ -UNIV_INTERN +@return TRUE */ ibool buf_LRU_validate(void) /*==================*/ { - ulint i; - - for (i = 0; i < srv_buf_pool_instances; i++) { + for (ulint i = 0; i < srv_buf_pool_instances; i++) { buf_pool_t* buf_pool; buf_pool = buf_pool_from_array(i); @@ -2699,25 +2712,21 @@ buf_LRU_validate(void) #if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** Prints the LRU list for one buffer pool instance. */ -UNIV_INTERN void buf_LRU_print_instance( /*===================*/ buf_pool_t* buf_pool) { - const buf_page_t* bpage; - - ut_ad(buf_pool); buf_pool_mutex_enter(buf_pool); - bpage = UT_LIST_GET_FIRST(buf_pool->LRU); - - while (bpage != NULL) { + for (const buf_page_t* bpage = UT_LIST_GET_FIRST(buf_pool->LRU); + bpage != NULL; + bpage = UT_LIST_GET_NEXT(LRU, bpage)) { mutex_enter(buf_page_get_mutex(bpage)); - fprintf(stderr, "BLOCK space %lu page %lu ", - (ulong) buf_page_get_space(bpage), - (ulong) buf_page_get_page_no(bpage)); + + fprintf(stderr, "BLOCK space %u page %u ", + bpage->id.space(), bpage->id.page_no()); if (buf_page_is_old(bpage)) { fputs("old ", stderr); @@ -2742,17 +2751,17 @@ buf_LRU_print_instance( case BUF_BLOCK_FILE_PAGE: frame = buf_block_get_frame((buf_block_t*) bpage); fprintf(stderr, "\ntype %lu" - " index id %llu\n", + " index id " UINT32PF "\n", (ulong) fil_page_get_type(frame), - (ullint) btr_page_get_index_id(frame)); + btr_page_get_index_id(frame)); break; case BUF_BLOCK_ZIP_PAGE: frame = bpage->zip.data; fprintf(stderr, "\ntype %lu size %lu" - " index id %llu\n", + " index id " UINT32PF "\n", (ulong) fil_page_get_type(frame), - (ulong) buf_page_get_zip_size(bpage), - (ullint) btr_page_get_index_id(frame)); + (ulong) bpage->size.physical(), + btr_page_get_index_id(frame)); break; default: @@ -2762,7 +2771,6 @@ buf_LRU_print_instance( } mutex_exit(buf_page_get_mutex(bpage)); - bpage = UT_LIST_GET_NEXT(LRU, bpage); } buf_pool_mutex_exit(buf_pool); @@ -2770,15 +2778,13 @@ buf_LRU_print_instance( /**********************************************************************//** Prints the LRU list. */ -UNIV_INTERN void buf_LRU_print(void) /*===============*/ { - ulint i; - buf_pool_t* buf_pool; + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + buf_pool_t* buf_pool; - for (i = 0; i < srv_buf_pool_instances; i++) { buf_pool = buf_pool_from_array(i); buf_LRU_print_instance(buf_pool); } diff --git a/storage/innobase/buf/buf0mtflu.cc b/storage/innobase/buf/buf0mtflu.cc index e990ba785e7..06aaac39dd9 100644 --- a/storage/innobase/buf/buf0mtflu.cc +++ b/storage/innobase/buf/buf0mtflu.cc @@ -43,7 +43,6 @@ Modified 06/02/2014 Jan Lindström jan.lindstrom@skysql.com #include "ibuf0ibuf.h" #include "log0log.h" #include "os0file.h" -#include "os0sync.h" #include "trx0sys.h" #include "srv0mon.h" #include "mysql/plugin.h" @@ -122,7 +121,6 @@ typedef struct wrk_itm typedef struct thread_data { os_thread_id_t wthread_id; /*!< Identifier */ - os_thread_t wthread; /*!< Thread id */ wthr_status_t wt_status; /*!< Worker thread status */ } thread_data_t; @@ -130,7 +128,7 @@ typedef struct thread_data typedef struct thread_sync { /* Global variables used by all threads */ - os_fast_mutex_t thread_global_mtx; /*!< Mutex used protecting below + ib_mutex_t thread_global_mtx; /*!< Mutex used protecting below variables */ ulint n_threads; /*!< Number of threads */ ib_wqueue_t *wq; /*!< Work Queue */ @@ -149,7 +147,7 @@ typedef struct thread_sync static int mtflush_work_initialized = -1; static thread_sync_t* mtflush_ctx=NULL; -static os_fast_mutex_t mtflush_mtx; +static ib_mutex_t mtflush_mtx; /******************************************************************//** Set multi-threaded flush work initialized. */ @@ -211,7 +209,7 @@ buf_mtflu_flush_pool_instance( buf_pool_mutex_enter(work_item->wr.buf_pool); work_item->wr.min = UT_LIST_GET_LEN(work_item->wr.buf_pool->LRU); buf_pool_mutex_exit(work_item->wr.buf_pool); - work_item->wr.min = ut_min(srv_LRU_scan_depth,work_item->wr.min); + work_item->wr.min = ut_min((ulint)srv_LRU_scan_depth,(ulint)work_item->wr.min); } buf_flush_batch(work_item->wr.buf_pool, @@ -324,7 +322,7 @@ DECLARE_THREAD(mtflush_io_thread)( ulint i; /* Find correct slot for this thread */ - os_fast_mutex_lock(&(mtflush_io->thread_global_mtx)); + mutex_enter(&(mtflush_io->thread_global_mtx)); for(i=0; i < mtflush_io->n_threads; i ++) { if (mtflush_io->thread_data[i].wthread_id == os_thread_get_curr_id()) { break; @@ -333,7 +331,7 @@ DECLARE_THREAD(mtflush_io_thread)( ut_a(i <= mtflush_io->n_threads); this_thread_data = &mtflush_io->thread_data[i]; - os_fast_mutex_unlock(&(mtflush_io->thread_global_mtx)); + mutex_exit(&(mtflush_io->thread_global_mtx)); while (TRUE) { @@ -389,7 +387,7 @@ buf_mtflu_io_thread_exit(void) been processed. Thus, we can get this mutex if and only if work queue is empty. */ - os_fast_mutex_lock(&mtflush_mtx); + mutex_enter(&mtflush_mtx); /* Make sure the work queue is empty */ ut_a(ib_wqueue_is_empty(mtflush_io->wq)); @@ -408,7 +406,7 @@ buf_mtflu_io_thread_exit(void) } /* Requests sent */ - os_fast_mutex_unlock(&mtflush_mtx); + mutex_exit(&mtflush_mtx); /* Wait until all work items on a work queue are processed */ while(!ib_wqueue_is_empty(mtflush_io->wq)) { @@ -440,7 +438,7 @@ buf_mtflu_io_thread_exit(void) ib_wqueue_nowait(mtflush_io->wq); } - os_fast_mutex_lock(&mtflush_mtx); + mutex_enter(&mtflush_mtx); ut_a(ib_wqueue_is_empty(mtflush_io->wq)); ut_a(ib_wqueue_is_empty(mtflush_io->wr_cq)); @@ -460,9 +458,9 @@ buf_mtflu_io_thread_exit(void) mem_heap_free(mtflush_io->wheap); mem_heap_free(mtflush_io->rheap); - os_fast_mutex_unlock(&mtflush_mtx); - os_fast_mutex_free(&mtflush_mtx); - os_fast_mutex_free(&mtflush_io->thread_global_mtx); + mutex_exit(&mtflush_mtx); + mutex_free(&mtflush_mtx); + mutex_free(&mtflush_io->thread_global_mtx); } /******************************************************************//** @@ -505,8 +503,8 @@ buf_mtflu_handler_init( mtflush_ctx->wheap = mtflush_heap; mtflush_ctx->rheap = mtflush_heap2; - os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &mtflush_ctx->thread_global_mtx); - os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &mtflush_mtx); + mutex_create(LATCH_ID_MTFLUSH_THREAD_MUTEX, &mtflush_ctx->thread_global_mtx); + mutex_create(LATCH_ID_MTFLUSH_MUTEX, &mtflush_mtx); /* Create threads for page-compression-flush */ for(i=0; i < n_threads; i++) { @@ -514,7 +512,7 @@ buf_mtflu_handler_init( mtflush_ctx->thread_data[i].wt_status = WTHR_INITIALIZED; - mtflush_ctx->thread_data[i].wthread = os_thread_create( + os_thread_create( mtflush_io_thread, ((void *) mtflush_ctx), &new_thread_id); @@ -647,11 +645,11 @@ buf_mtflu_flush_list( } /* This lock is to safequard against re-entry if any. */ - os_fast_mutex_lock(&mtflush_mtx); + mutex_enter(&mtflush_mtx); buf_mtflu_flush_work_items(srv_buf_pool_instances, cnt, BUF_FLUSH_LIST, min_n, lsn_limit); - os_fast_mutex_unlock(&mtflush_mtx); + mutex_exit(&mtflush_mtx); for (i = 0; i < srv_buf_pool_instances; i++) { if (n_processed) { @@ -704,10 +702,10 @@ buf_mtflu_flush_LRU_tail(void) } /* This lock is to safeguard against re-entry if any */ - os_fast_mutex_lock(&mtflush_mtx); + mutex_enter(&mtflush_mtx); buf_mtflu_flush_work_items(srv_buf_pool_instances, cnt, BUF_FLUSH_LRU, srv_LRU_scan_depth, 0); - os_fast_mutex_unlock(&mtflush_mtx); + mutex_exit(&mtflush_mtx); for (i = 0; i < srv_buf_pool_instances; i++) { total_flushed += cnt[i].flushed+cnt[i].evicted; diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 285fc465160..5de0412afb2 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2015. MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -24,11 +24,12 @@ The database buffer read Created 11/5/1995 Heikki Tuuri *******************************************************/ -#include "buf0rea.h" +#include "ha_prototypes.h" +#include +#include "buf0rea.h" #include "fil0fil.h" #include "mtr0mtr.h" - #include "buf0buf.h" #include "buf0flu.h" #include "buf0lru.h" @@ -39,8 +40,6 @@ Created 11/5/1995 Heikki Tuuri #include "os0file.h" #include "srv0start.h" #include "srv0srv.h" -#include "mysql/plugin.h" -#include "mysql/service_thd_wait.h" /** There must be at least this many pages in buf_pool in the area to start a random read-ahead */ @@ -91,62 +90,50 @@ buf_read_page_handle_error( buf_pool_mutex_exit(buf_pool); } -/********************************************************************//** -Low-level function which reads a page asynchronously from a file to the +/** Low-level function which reads a page asynchronously from a file to the buffer buf_pool if it is not already there, in which case does nothing. Sets the io_fix flag and sets an exclusive lock on the buffer frame. The flag is cleared and the x-lock released by an i/o-handler thread. + +@param[out] err DB_SUCCESS, DB_TABLESPACE_DELETED or + DB_TABLESPACE_TRUNCATED if we are trying + to read from a non-existent tablespace, a + tablespace which is just now being dropped, + or a tablespace which is truncated +@param[in] sync true if synchronous aio is desired +@param[in] type IO type, SIMULATED, IGNORE_MISSING +@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ..., +@param[in] page_id page id +@param[in] unzip true=request uncompressed page @return 1 if a read request was queued, 0 if the page already resided in buf_pool, or if the page is in the doublewrite buffer blocks in which case it is never read into the pool, or if the tablespace does -not exist or is being dropped -@return 1 if read request is issued. 0 if it is not */ +not exist or is being dropped */ static ulint buf_read_page_low( -/*==============*/ - dberr_t* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED if we are - trying to read from a non-existent tablespace, or a - tablespace which is just now being dropped */ - bool sync, /*!< in: true if synchronous aio is desired */ - ulint mode, /*!< in: BUF_READ_IBUF_PAGES_ONLY, ..., - ORed to OS_AIO_SIMULATED_WAKE_LATER (see below - at read-ahead functions) */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - ibool unzip, /*!< in: TRUE=request uncompressed page */ - ib_int64_t tablespace_version, /*!< in: if the space memory object has - this timestamp different from what we are giving here, - treat the tablespace as dropped; this is a timestamp we - use to stop dangling page reads from a tablespace - which we have DISCARDed + IMPORTed back */ - ulint offset, /*!< in: page number */ + dberr_t* err, + bool sync, + ulint type, + ulint mode, + const page_id_t& page_id, + const page_size_t& page_size, + bool unzip, buf_page_t** rbpage) /*!< out: page */ { buf_page_t* bpage; - ulint wake_later; - ibool ignore_nonexistent_pages; *err = DB_SUCCESS; - wake_later = mode & OS_AIO_SIMULATED_WAKE_LATER; - mode = mode & ~OS_AIO_SIMULATED_WAKE_LATER; - - ignore_nonexistent_pages = mode & BUF_READ_IGNORE_NONEXISTENT_PAGES; - mode &= ~BUF_READ_IGNORE_NONEXISTENT_PAGES; - - if (space == TRX_SYS_SPACE && buf_dblwr_page_inside(offset)) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: trying to read" - " doublewrite buffer page %lu\n", - (ulong) offset); + if (page_id.space() == TRX_SYS_SPACE + && buf_dblwr_page_inside(page_id.page_no())) { + ib::error() << "Trying to read doublewrite buffer page " + << page_id; return(0); } - if (ibuf_bitmap_page(zip_size, offset) - || trx_sys_hdr_page(space, offset)) { + if (ibuf_bitmap_page(page_id, page_size) || trx_sys_hdr_page(page_id)) { /* Trx sys header is so low in the latching order that we play safe and do not leave the i/o-completion to an asynchronous @@ -161,60 +148,81 @@ buf_read_page_low( or is being dropped; if we succeed in initing the page in the buffer pool for read, then DISCARD cannot proceed until the read has completed */ - bpage = buf_page_init_for_read(err, mode, space, zip_size, unzip, - tablespace_version, offset); + bpage = buf_page_init_for_read(err, mode, page_id, page_size, unzip); + if (bpage == NULL) { return(0); } -#ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, - "Posting read request for page %lu, sync %s\n", - (ulong) offset, sync ? "true" : "false"); - } -#endif + DBUG_PRINT("ib_buf", ("read page %u:%u size=%u unzip=%u,%s", + (unsigned) page_id.space(), + (unsigned) page_id.page_no(), + (unsigned) page_size.physical(), + (unsigned) unzip, + sync ? "sync" : "async")); ut_ad(buf_page_in_file(bpage)); - byte* frame = zip_size ? bpage->zip.data : ((buf_block_t*) bpage)->frame; - if (sync) { thd_wait_begin(NULL, THD_WAIT_DISKIO); } - if (zip_size) { - *err = fil_io(OS_FILE_READ | wake_later - | ignore_nonexistent_pages, - sync, space, zip_size, offset, 0, zip_size, - frame, bpage, &bpage->write_size); + void* dst; + + if (page_size.is_compressed()) { + dst = bpage->zip.data; } else { ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); - *err = fil_io(OS_FILE_READ | wake_later - | ignore_nonexistent_pages, - sync, space, 0, offset, 0, UNIV_PAGE_SIZE, - frame, bpage, - &bpage->write_size); + dst = ((buf_block_t*) bpage)->frame; } + IORequest request(type | IORequest::READ); + + ut_ad(dst != NULL); + ut_ad(bpage->zip.data != NULL || ((buf_block_t*)bpage)->frame != NULL); + + *err = fil_io( + request, sync, page_id, page_size, 0, page_size.physical(), + dst, bpage, NULL); + + ut_ad(dst != NULL); + ut_ad(bpage->zip.data != NULL || ((buf_block_t*)bpage)->frame != NULL); + ut_ad(bpage->id.space() == page_id.space()); + if (sync) { thd_wait_end(NULL); } if (*err != DB_SUCCESS) { - if (ignore_nonexistent_pages || *err == DB_TABLESPACE_DELETED) { + if (*err == DB_TABLESPACE_TRUNCATED) { + /* Remove the page which is outside the + truncated tablespace bounds when recovering + from a crash happened during a truncation */ + buf_read_page_handle_error(bpage); + if (recv_recovery_on) { + mutex_enter(&recv_sys->mutex); + ut_ad(recv_sys->n_addrs > 0); + recv_sys->n_addrs--; + mutex_exit(&recv_sys->mutex); + } + return(0); + } else if (IORequest::ignore_missing(type) + || *err == DB_TABLESPACE_DELETED) { buf_read_page_handle_error(bpage); return(0); } - /* else */ + ut_error; } if (sync) { + ut_ad(dst != NULL); + ut_ad(bpage->zip.data != NULL || ((buf_block_t*)bpage)->frame != NULL); /* The i/o is already completed when we arrive from fil_read */ + if (!buf_page_io_complete(bpage)) { if (rbpage) { *rbpage = bpage; @@ -230,8 +238,7 @@ buf_read_page_low( return(1); } -/********************************************************************//** -Applies a random read-ahead in buf_pool if there are at least a threshold +/** Applies a random read-ahead in buf_pool if there are at least a threshold value of accessed pages from the random read-ahead area. Does not read any page, not even the one at the position (space, offset), if the read-ahead mechanism is not activated. NOTE 1: the calling thread may own latches on @@ -240,24 +247,20 @@ end up waiting for these latches! NOTE 2: the calling thread must want access to the page given: this rule is set to prevent unintended read-aheads performed by ibuf routines, a situation which could result in a deadlock if the OS does not support asynchronous i/o. +@param[in] page_id page id of a page which the current thread +wants to access +@param[in] page_size page size +@param[in] inside_ibuf TRUE if we are inside ibuf routine @return number of page read requests issued; NOTE that if we read ibuf pages, it may happen that the page at the given page number does not -get read even if we return a positive value! -@return number of page read requests issued */ -UNIV_INTERN +get read even if we return a positive value! */ ulint buf_read_ahead_random( -/*==================*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes, - or 0 */ - ulint offset, /*!< in: page number of a page which - the current thread wants to access */ - ibool inside_ibuf) /*!< in: TRUE if we are inside ibuf - routine */ + const page_id_t& page_id, + const page_size_t& page_size, + ibool inside_ibuf) { - buf_pool_t* buf_pool = buf_pool_get(space, offset); - ib_int64_t tablespace_version; + buf_pool_t* buf_pool = buf_pool_get(page_id); ulint recent_blocks = 0; ulint ibuf_mode; ulint count; @@ -277,8 +280,7 @@ buf_read_ahead_random( return(0); } - if (ibuf_bitmap_page(zip_size, offset) - || trx_sys_hdr_page(space, offset)) { + if (ibuf_bitmap_page(page_id, page_size) || trx_sys_hdr_page(page_id)) { /* If it is an ibuf bitmap page or trx sys hdr, we do no read-ahead, as that could break the ibuf page access @@ -287,19 +289,22 @@ buf_read_ahead_random( return(0); } - /* Remember the tablespace version before we ask te tablespace size - below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we - do not try to read outside the bounds of the tablespace! */ - - tablespace_version = fil_space_get_version(space); - - low = (offset / buf_read_ahead_random_area) + low = (page_id.page_no() / buf_read_ahead_random_area) * buf_read_ahead_random_area; - high = (offset / buf_read_ahead_random_area + 1) + + high = (page_id.page_no() / buf_read_ahead_random_area + 1) * buf_read_ahead_random_area; - if (high > fil_space_get_size(space)) { - high = fil_space_get_size(space); + /* Remember the tablespace version before we ask the tablespace size + below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we + do not try to read outside the bounds of the tablespace! */ + if (fil_space_t* space = fil_space_acquire(page_id.space())) { + if (high > space->size) { + high = space->size; + } + fil_space_release(space); + } else { + return(0); } buf_pool_mutex_enter(buf_pool); @@ -315,10 +320,10 @@ buf_read_ahead_random( that is, reside near the start of the LRU list. */ for (i = low; i < high; i++) { - const buf_page_t* bpage = - buf_page_hash_get(buf_pool, space, i); + const buf_page_t* bpage = buf_page_hash_get( + buf_pool, page_id_t(page_id.space(), i)); - if (bpage + if (bpage != NULL && buf_page_is_accessed(bpage) && buf_page_peek_if_young(bpage)) { @@ -352,21 +357,22 @@ read_ahead: /* It is only sensible to do read-ahead in the non-sync aio mode: hence FALSE as the first parameter */ - if (!ibuf_bitmap_page(zip_size, i)) { + const page_id_t cur_page_id(page_id.space(), i); + + if (!ibuf_bitmap_page(cur_page_id, page_size)) { + buf_page_t* rpage = NULL; count += buf_read_page_low( &err, false, - ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER, - space, zip_size, FALSE, - tablespace_version, i, NULL); + IORequest::DO_NOT_WAKE, + ibuf_mode, + cur_page_id, page_size, false, &rpage); + if (err == DB_TABLESPACE_DELETED) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: in random" - " readahead trying to access\n" - "InnoDB: tablespace %lu page %lu,\n" - "InnoDB: but the tablespace does not" - " exist or is just being dropped.\n", - (ulong) space, (ulong) i); + ib::warn() << "Random readahead trying to" + " access page " << cur_page_id + << " in nonexisting or" + " being-dropped tablespace"; + break; } } } @@ -377,14 +383,12 @@ read_ahead: os_aio_simulated_wake_handler_threads(); -#ifdef UNIV_DEBUG - if (buf_debug_prints && (count > 0)) { - fprintf(stderr, - "Random read-ahead space %lu offset %lu pages %lu\n", - (ulong) space, (ulong) offset, - (ulong) count); + if (count) { + DBUG_PRINT("ib_buf", ("random read-ahead %u pages, %u:%u", + (unsigned) count, + (unsigned) page_id.space(), + (unsigned) page_id.page_no())); } -#endif /* UNIV_DEBUG */ /* Read ahead is considered one I/O operation for the purpose of LRU policy decision. */ @@ -395,42 +399,37 @@ read_ahead: return(count); } -/********************************************************************//** -High-level function which reads a page asynchronously from a file to the +/** High-level function which reads a page asynchronously from a file to the buffer buf_pool if it is not already there. Sets the io_fix flag and sets an exclusive lock on the buffer frame. The flag is cleared and the x-lock released by the i/o-handler thread. +@param[in] page_id page id +@param[in] page_size page size @return TRUE if page has been read in, FALSE in case of failure */ -UNIV_INTERN ibool buf_read_page( -/*==========*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes, or 0 */ - ulint offset, /*!< in: page number */ + const page_id_t& page_id, + const page_size_t& page_size, buf_page_t** bpage) /*!< out: page */ { - ib_int64_t tablespace_version; ulint count; dberr_t err; - tablespace_version = fil_space_get_version(space); + /* We do synchronous IO because our AIO completion code + is sub-optimal. See buf_page_io_complete(), we have to + acquire the buffer pool mutex before acquiring the block + mutex, required for updating the page state. The acquire + of the buffer pool mutex becomes an expensive bottleneck. */ - /* We do the i/o in the synchronous aio mode to save thread - switches: hence TRUE */ + count = buf_read_page_low( + &err, true, + 0, BUF_READ_ANY_PAGE, page_id, page_size, false, bpage); - count = buf_read_page_low(&err, true, BUF_READ_ANY_PAGE, space, - zip_size, FALSE, - tablespace_version, offset, bpage); srv_stats.buf_pool_reads.add(count); + if (err == DB_TABLESPACE_DELETED) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: trying to access" - " tablespace %lu page no. %lu,\n" - "InnoDB: but the tablespace does not exist" - " or is just being dropped.\n", - (ulong) space, (ulong) offset); + ib::error() << "trying to read page " << page_id + << " in nonexisting or being-dropped tablespace"; } /* Increment number of I/O operations used for LRU policy. */ @@ -439,37 +438,30 @@ buf_read_page( return(count > 0); } -/********************************************************************//** -High-level function which reads a page asynchronously from a file to the +/** High-level function which reads a page asynchronously from a file to the buffer buf_pool if it is not already there. Sets the io_fix flag and sets an exclusive lock on the buffer frame. The flag is cleared and the x-lock released by the i/o-handler thread. +@param[in] page_id page id +@param[in] page_size page size +@param[in] sync true if synchronous aio is desired @return TRUE if page has been read in, FALSE in case of failure */ -UNIV_INTERN ibool -buf_read_page_async( -/*================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ +buf_read_page_background( + const page_id_t& page_id, + const page_size_t& page_size, + bool sync) { - ulint zip_size; - ib_int64_t tablespace_version; ulint count; dberr_t err; + buf_page_t* rbpage = NULL; - zip_size = fil_space_get_zip_size(space); + count = buf_read_page_low( + &err, sync, + IORequest::DO_NOT_WAKE | IORequest::IGNORE_MISSING, + BUF_READ_ANY_PAGE, + page_id, page_size, false, &rbpage); - if (zip_size == ULINT_UNDEFINED) { - return(FALSE); - } - - tablespace_version = fil_space_get_version(space); - - count = buf_read_page_low(&err, true, BUF_READ_ANY_PAGE - | OS_AIO_SIMULATED_WAKE_LATER - | BUF_READ_IGNORE_NONEXISTENT_PAGES, - space, zip_size, FALSE, - tablespace_version, offset, NULL); srv_stats.buf_pool_reads.add(count); /* We do not increment number of I/O operations used for LRU policy @@ -482,8 +474,7 @@ buf_read_page_async( return(count > 0); } -/********************************************************************//** -Applies linear read-ahead if in the buf_pool the page is a border page of +/** Applies linear read-ahead if in the buf_pool the page is a border page of a linear read-ahead area and all the pages in the area have been accessed. Does not read any page if the read-ahead mechanism is not activated. Note that the algorithm looks at the 'natural' adjacent successor and @@ -505,28 +496,25 @@ latches! NOTE 3: the calling thread must want access to the page given: this rule is set to prevent unintended read-aheads performed by ibuf routines, a situation which could result in a deadlock if the OS does not support asynchronous io. -@return number of page read requests issued */ -UNIV_INTERN +@param[in] page_id page id; see NOTE 3 above +@param[in] page_size page size +@param[in] inside_ibuf TRUE if we are inside ibuf routine +@return number of page read requests issued */ ulint buf_read_ahead_linear( -/*==================*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes, or 0 */ - ulint offset, /*!< in: page number; see NOTE 3 above */ - ibool inside_ibuf) /*!< in: TRUE if we are inside ibuf routine */ + const page_id_t& page_id, + const page_size_t& page_size, + ibool inside_ibuf) { - buf_pool_t* buf_pool = buf_pool_get(space, offset); - ib_int64_t tablespace_version; + buf_pool_t* buf_pool = buf_pool_get(page_id); buf_page_t* bpage; buf_frame_t* frame; buf_page_t* pred_bpage = NULL; ulint pred_offset; ulint succ_offset; - ulint count; int asc_or_desc; ulint new_offset; ulint fail_count; - ulint ibuf_mode; ulint low, high; dberr_t err; ulint i; @@ -539,24 +527,23 @@ buf_read_ahead_linear( return(0); } - if (UNIV_UNLIKELY(srv_startup_is_before_trx_rollback_phase)) { + if (srv_startup_is_before_trx_rollback_phase) { /* No read-ahead to avoid thread deadlocks */ return(0); } - low = (offset / buf_read_ahead_linear_area) + low = (page_id.page_no() / buf_read_ahead_linear_area) * buf_read_ahead_linear_area; - high = (offset / buf_read_ahead_linear_area + 1) + high = (page_id.page_no() / buf_read_ahead_linear_area + 1) * buf_read_ahead_linear_area; - if ((offset != low) && (offset != high - 1)) { + if ((page_id.page_no() != low) && (page_id.page_no() != high - 1)) { /* This is not a border page of the area: return */ return(0); } - if (ibuf_bitmap_page(zip_size, offset) - || trx_sys_hdr_page(space, offset)) { + if (ibuf_bitmap_page(page_id, page_size) || trx_sys_hdr_page(page_id)) { /* If it is an ibuf bitmap page or trx sys hdr, we do no read-ahead, as that could break the ibuf page access @@ -568,18 +555,22 @@ buf_read_ahead_linear( /* Remember the tablespace version before we ask te tablespace size below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we do not try to read outside the bounds of the tablespace! */ + ulint space_size; - tablespace_version = fil_space_get_version(space); - - buf_pool_mutex_enter(buf_pool); - - if (high > fil_space_get_size(space)) { - buf_pool_mutex_exit(buf_pool); - /* The area is not whole, return */ + if (fil_space_t* space = fil_space_acquire(page_id.space())) { + space_size = space->size; + fil_space_release(space); + if (high > space_size) { + /* The area is not whole */ + return(0); + } + } else { return(0); } + buf_pool_mutex_enter(buf_pool); + if (buf_pool->n_pend_reads > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { buf_pool_mutex_exit(buf_pool); @@ -593,19 +584,20 @@ buf_read_ahead_linear( asc_or_desc = 1; - if (offset == low) { + if (page_id.page_no() == low) { asc_or_desc = -1; } /* How many out of order accessed pages can we ignore when working out the access pattern for linear readahead */ - threshold = ut_min((64 - srv_read_ahead_threshold), + threshold = ut_min(static_cast(64 - srv_read_ahead_threshold), BUF_READ_AHEAD_AREA(buf_pool)); fail_count = 0; for (i = low; i < high; i++) { - bpage = buf_page_hash_get(buf_pool, space, i); + bpage = buf_page_hash_get(buf_pool, + page_id_t(page_id.space(), i)); if (bpage == NULL || !buf_page_is_accessed(bpage)) { /* Not accessed */ @@ -643,7 +635,7 @@ buf_read_ahead_linear( /* If we got this far, we know that enough pages in the area have been accessed in the right order: linear read-ahead can be sensible */ - bpage = buf_page_hash_get(buf_pool, space, offset); + bpage = buf_page_hash_get(buf_pool, page_id); if (bpage == NULL) { buf_pool_mutex_exit(buf_pool); @@ -674,12 +666,14 @@ buf_read_ahead_linear( buf_pool_mutex_exit(buf_pool); - if ((offset == low) && (succ_offset == offset + 1)) { + if ((page_id.page_no() == low) + && (succ_offset == page_id.page_no() + 1)) { /* This is ok, we can continue */ new_offset = pred_offset; - } else if ((offset == high - 1) && (pred_offset == offset - 1)) { + } else if ((page_id.page_no() == high - 1) + && (pred_offset == page_id.page_no() - 1)) { /* This is ok, we can continue */ new_offset = succ_offset; @@ -700,19 +694,19 @@ buf_read_ahead_linear( return(0); } - if (high > fil_space_get_size(space)) { + if (high > space_size) { /* The area is not whole, return */ return(0); } + ulint count = 0; + /* If we got this far, read-ahead can be sensible: do it */ - ibuf_mode = inside_ibuf - ? BUF_READ_IBUF_PAGES_ONLY | OS_AIO_SIMULATED_WAKE_LATER - : BUF_READ_ANY_PAGE | OS_AIO_SIMULATED_WAKE_LATER; + ulint ibuf_mode; - count = 0; + ibuf_mode = inside_ibuf ? BUF_READ_IBUF_PAGES_ONLY : BUF_READ_ANY_PAGE; /* Since Windows XP seems to schedule the i/o handler thread very eagerly, and consequently it does not wait for the @@ -724,20 +718,22 @@ buf_read_ahead_linear( /* It is only sensible to do read-ahead in the non-sync aio mode: hence FALSE as the first parameter */ - if (!ibuf_bitmap_page(zip_size, i)) { + const page_id_t cur_page_id(page_id.space(), i); + + if (!ibuf_bitmap_page(cur_page_id, page_size)) { + buf_page_t* rpage = NULL; + count += buf_read_page_low( &err, false, - ibuf_mode, - space, zip_size, FALSE, tablespace_version, i, NULL); + IORequest::DO_NOT_WAKE, + ibuf_mode, cur_page_id, page_size, false, &rpage); + if (err == DB_TABLESPACE_DELETED) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: in" - " linear readahead trying to access\n" - "InnoDB: tablespace %lu page %lu,\n" - "InnoDB: but the tablespace does not" - " exist or is just being dropped.\n", - (ulong) space, (ulong) i); + ib::warn() << "linear readahead trying to" + " access page " + << page_id_t(page_id.space(), i) + << " in nonexisting or being-dropped" + " tablespace"; } } } @@ -748,13 +744,13 @@ buf_read_ahead_linear( os_aio_simulated_wake_handler_threads(); -#ifdef UNIV_DEBUG - if (buf_debug_prints && (count > 0)) { - fprintf(stderr, - "LINEAR read-ahead space %lu offset %lu pages %lu\n", - (ulong) space, (ulong) offset, (ulong) count); + if (count) { + DBUG_PRINT("ib_buf", ("linear read-ahead %lu pages, " + UINT32PF ":" UINT32PF, + count, + page_id.space(), + page_id.page_no())); } -#endif /* UNIV_DEBUG */ /* Read ahead is considered one I/O operation for the purpose of LRU policy decision. */ @@ -768,7 +764,6 @@ buf_read_ahead_linear( Issues read requests for pages which the ibuf module wants to read in, in order to contract the insert buffer tree. Technically, this function is like a read-ahead function. */ -UNIV_INTERN void buf_read_ibuf_merge_pages( /*======================*/ @@ -778,7 +773,7 @@ buf_read_ibuf_merge_pages( to get read in, before this function returns */ const ulint* space_ids, /*!< in: array of space ids */ - const ib_int64_t* space_versions,/*!< in: the spaces must have + const ib_uint64_t* space_versions,/*!< in: the spaces must have this version number (timestamp), otherwise we discard the read; we use this @@ -792,100 +787,94 @@ buf_read_ibuf_merge_pages( ulint n_stored) /*!< in: number of elements in the arrays */ { - ulint i; - #ifdef UNIV_IBUF_DEBUG ut_a(n_stored < UNIV_PAGE_SIZE); #endif - for (i = 0; i < n_stored; i++) { - dberr_t err; - buf_pool_t* buf_pool; - ulint zip_size = fil_space_get_zip_size(space_ids[i]); + for (ulint i = 0; i < n_stored; i++) { + const page_id_t page_id(space_ids[i], page_nos[i]); + + buf_pool_t* buf_pool = buf_pool_get(page_id); + buf_page_t* rpage = NULL; - buf_pool = buf_pool_get(space_ids[i], page_nos[i]); + bool found; + const page_size_t page_size(fil_space_get_page_size( + space_ids[i], &found)); + + if (!found) { + /* The tablespace was not found, remove the + entries for that page */ + ibuf_merge_or_delete_for_page(NULL, page_id, + NULL, FALSE); + continue; + } while (buf_pool->n_pend_reads > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { os_thread_sleep(500000); } - if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { + dberr_t err; - goto tablespace_deleted; - } + buf_read_page_low(&err, + sync && (i + 1 == n_stored), + 0, + BUF_READ_ANY_PAGE, page_id, page_size, + true, &rpage); - buf_read_page_low(&err, sync && (i + 1 == n_stored), - BUF_READ_ANY_PAGE, space_ids[i], - zip_size, TRUE, space_versions[i], - page_nos[i], NULL); - - if (UNIV_UNLIKELY(err == DB_TABLESPACE_DELETED)) { -tablespace_deleted: + if (err == DB_TABLESPACE_DELETED) { /* We have deleted or are deleting the single-table tablespace: remove the entries for that page */ - - ibuf_merge_or_delete_for_page(NULL, space_ids[i], - page_nos[i], - zip_size, FALSE); + ibuf_merge_or_delete_for_page(NULL, page_id, + &page_size, FALSE); } } os_aio_simulated_wake_handler_threads(); -#ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, - "Ibuf merge read-ahead space %lu pages %lu\n", - (ulong) space_ids[0], (ulong) n_stored); + if (n_stored) { + DBUG_PRINT("ib_buf", + ("ibuf merge read-ahead %u pages, space %u", + unsigned(n_stored), unsigned(space_ids[0]))); } -#endif /* UNIV_DEBUG */ } -/********************************************************************//** -Issues read requests for pages which recovery wants to read in. */ -UNIV_INTERN +/** Issues read requests for pages which recovery wants to read in. +@param[in] sync true if the caller wants this function to wait +for the highest address page to get read in, before this function returns +@param[in] space_id tablespace id +@param[in] page_nos array of page numbers to read, with the +highest page number the last in the array +@param[in] n_stored number of page numbers in the array */ void buf_read_recv_pages( -/*================*/ - ibool sync, /*!< in: TRUE if the caller - wants this function to wait - for the highest address page - to get read in, before this - function returns */ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in - bytes, or 0 */ - const ulint* page_nos, /*!< in: array of page numbers - to read, with the highest page - number the last in the - array */ - ulint n_stored) /*!< in: number of page numbers - in the array */ + bool sync, + ulint space_id, + const ulint* page_nos, + ulint n_stored) { - ib_int64_t tablespace_version; - ulint count; - dberr_t err; - ulint i; - - zip_size = fil_space_get_zip_size(space); - - if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { - /* It is a single table tablespace and the .ibd file is - missing: do nothing */ + ulint count; + dberr_t err; + ulint i; + fil_space_t* space = fil_space_get(space_id); + if (space == NULL) { + /* The tablespace is missing: do nothing */ return; } - tablespace_version = fil_space_get_version(space); + fil_space_open_if_needed(space); + + const page_size_t page_size(space->flags); for (i = 0; i < n_stored; i++) { - buf_pool_t* buf_pool; + buf_pool_t* buf_pool; + const page_id_t cur_page_id(space_id, page_nos[i]); + buf_page_t* rpage = NULL; count = 0; - os_aio_print_debug = FALSE; - buf_pool = buf_pool_get(space, page_nos[i]); + buf_pool = buf_pool_get(cur_page_id); while (buf_pool->n_pend_reads >= recv_n_pool_free_frames / 2) { os_aio_simulated_wake_handler_threads(); @@ -893,42 +882,34 @@ buf_read_recv_pages( count++; - if (count > 1000) { - fprintf(stderr, - "InnoDB: Error: InnoDB has waited for" - " 10 seconds for pending\n" - "InnoDB: reads to the buffer pool to" - " be finished.\n" - "InnoDB: Number of pending reads %lu," - " pending pread calls %lu\n", - (ulong) buf_pool->n_pend_reads, - (ulong) os_file_n_pending_preads); - - os_aio_print_debug = TRUE; + if (!(count % 1000)) { + + ib::error() + << "Waited for " << count / 100 + << " seconds for " + << buf_pool->n_pend_reads + << " pending reads"; } } - os_aio_print_debug = FALSE; - if ((i + 1 == n_stored) && sync) { - buf_read_page_low(&err, true, BUF_READ_ANY_PAGE, space, - zip_size, TRUE, tablespace_version, - page_nos[i], NULL); + buf_read_page_low( + &err, true, + 0, + BUF_READ_ANY_PAGE, + cur_page_id, page_size, true, &rpage); } else { - buf_read_page_low(&err, false, BUF_READ_ANY_PAGE - | OS_AIO_SIMULATED_WAKE_LATER, - space, zip_size, TRUE, - tablespace_version, page_nos[i], NULL); + buf_read_page_low( + &err, false, + IORequest::DO_NOT_WAKE, + BUF_READ_ANY_PAGE, + cur_page_id, page_size, true, &rpage); } } os_aio_simulated_wake_handler_threads(); -#ifdef UNIV_DEBUG - if (buf_debug_prints) { - fprintf(stderr, - "Recovery applies read-ahead pages %lu\n", - (ulong) n_stored); - } -#endif /* UNIV_DEBUG */ + DBUG_PRINT("ib_buf", ("recovery read-ahead (%u pages)", + unsigned(n_stored))); } + diff --git a/storage/innobase/data/data0data.cc b/storage/innobase/data/data0data.cc index 593af089b00..b4df86963a9 100644 --- a/storage/innobase/data/data0data.cc +++ b/storage/innobase/data/data0data.cc @@ -23,6 +23,8 @@ SQL data field and tuple Created 5/30/1994 Heikki Tuuri *************************************************************************/ +#include "ha_prototypes.h" + #include "data0data.h" #ifdef UNIV_NONINL @@ -36,36 +38,36 @@ Created 5/30/1994 Heikki Tuuri #include "page0zip.h" #include "dict0dict.h" #include "btr0cur.h" +#include "row0upd.h" -#include #endif /* !UNIV_HOTBACKUP */ #ifdef UNIV_DEBUG /** Dummy variable to catch access to uninitialized fields. In the debug version, dtuple_create() will make all fields of dtuple_t point to data_error. */ -UNIV_INTERN byte data_error; +byte data_error; # ifndef UNIV_DEBUG_VALGRIND /** this is used to fool the compiler in dtuple_validate */ -UNIV_INTERN ulint data_dummy; +ulint data_dummy; # endif /* !UNIV_DEBUG_VALGRIND */ #endif /* UNIV_DEBUG */ #ifndef UNIV_HOTBACKUP -/************************************************************//** -Compare two data tuples, respecting the collation of character fields. -@return 1, 0 , -1 if tuple1 is greater, equal, less, respectively, -than tuple2 */ -UNIV_INTERN +/** Compare two data tuples. +@param[in] tuple1 first data tuple +@param[in] tuple2 second data tuple +@return positive, 0, negative if tuple1 is greater, equal, less, than tuple2, +respectively */ int dtuple_coll_cmp( -/*============*/ - const dtuple_t* tuple1, /*!< in: tuple 1 */ - const dtuple_t* tuple2) /*!< in: tuple 2 */ + const dtuple_t* tuple1, + const dtuple_t* tuple2) { ulint n_fields; ulint i; + int cmp; ut_ad(tuple1 != NULL); ut_ad(tuple2 != NULL); @@ -76,30 +78,20 @@ dtuple_coll_cmp( n_fields = dtuple_get_n_fields(tuple1); - if (n_fields != dtuple_get_n_fields(tuple2)) { - - return(n_fields < dtuple_get_n_fields(tuple2) ? -1 : 1); - } + cmp = (int) n_fields - (int) dtuple_get_n_fields(tuple2); - for (i = 0; i < n_fields; i++) { - int cmp; + for (i = 0; cmp == 0 && i < n_fields; i++) { const dfield_t* field1 = dtuple_get_nth_field(tuple1, i); const dfield_t* field2 = dtuple_get_nth_field(tuple2, i); - cmp = cmp_dfield_dfield(field1, field2); - - if (cmp) { - return(cmp); - } } - return(0); + return(cmp); } /*********************************************************************//** Sets number of fields used in a tuple. Normally this is set in dtuple_create, but if you want later to set it smaller, you can use this. */ -UNIV_INTERN void dtuple_set_n_fields( /*================*/ @@ -114,20 +106,20 @@ dtuple_set_n_fields( /**********************************************************//** Checks that a data field is typed. -@return TRUE if ok */ +@return TRUE if ok */ static ibool dfield_check_typed_no_assert( /*=========================*/ const dfield_t* field) /*!< in: data field */ { - if (dfield_get_type(field)->mtype > DATA_MYSQL - || dfield_get_type(field)->mtype < DATA_VARCHAR) { + if (dfield_get_type(field)->mtype > DATA_MTYPE_CURRENT_MAX + || dfield_get_type(field)->mtype < DATA_MTYPE_CURRENT_MIN) { + + ib::error() << "Data field type " + << dfield_get_type(field)->mtype + << ", len " << dfield_get_len(field); - fprintf(stderr, - "InnoDB: Error: data field type %lu, len %lu\n", - (ulong) dfield_get_type(field)->mtype, - (ulong) dfield_get_len(field)); return(FALSE); } @@ -136,8 +128,7 @@ dfield_check_typed_no_assert( /**********************************************************//** Checks that a data tuple is typed. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtuple_check_typed_no_assert( /*=========================*/ @@ -147,9 +138,8 @@ dtuple_check_typed_no_assert( ulint i; if (dtuple_get_n_fields(tuple) > REC_MAX_N_FIELDS) { - fprintf(stderr, - "InnoDB: Error: index entry has %lu fields\n", - (ulong) dtuple_get_n_fields(tuple)); + ib::error() << "Index entry has " + << dtuple_get_n_fields(tuple) << " fields"; dump: fputs("InnoDB: Tuple contents: ", stderr); dtuple_print(stderr, tuple); @@ -174,22 +164,18 @@ dump: #ifdef UNIV_DEBUG /**********************************************************//** Checks that a data field is typed. Asserts an error if not. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dfield_check_typed( /*===============*/ const dfield_t* field) /*!< in: data field */ { - if (dfield_get_type(field)->mtype > DATA_MYSQL - || dfield_get_type(field)->mtype < DATA_VARCHAR) { - - fprintf(stderr, - "InnoDB: Error: data field type %lu, len %lu\n", - (ulong) dfield_get_type(field)->mtype, - (ulong) dfield_get_len(field)); + if (dfield_get_type(field)->mtype > DATA_MTYPE_CURRENT_MAX + || dfield_get_type(field)->mtype < DATA_MTYPE_CURRENT_MIN) { - ut_error; + ib::fatal() << "Data field type " + << dfield_get_type(field)->mtype + << ", len " << dfield_get_len(field); } return(TRUE); @@ -197,8 +183,7 @@ dfield_check_typed( /**********************************************************//** Checks that a data tuple is typed. Asserts an error if not. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtuple_check_typed( /*===============*/ @@ -220,8 +205,7 @@ dtuple_check_typed( /**********************************************************//** Validates the consistency of a tuple which must be complete, i.e, all fields must have been set. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtuple_validate( /*============*/ @@ -274,7 +258,6 @@ dtuple_validate( #ifndef UNIV_HOTBACKUP /*************************************************************//** Pretty prints a dfield value according to its data type. */ -UNIV_INTERN void dfield_print( /*=========*/ @@ -317,7 +300,6 @@ dfield_print( /*************************************************************//** Pretty prints a dfield value according to its data type. Also the hex string is printed if a string contains non-printable characters. */ -UNIV_INTERN void dfield_print_also_hex( /*==================*/ @@ -391,16 +373,16 @@ dfield_print_also_hex( case 6: id = mach_read_from_6(data); - fprintf(stderr, "%llu", (ullint) id); + fprintf(stderr, IB_ID_FMT, id); break; case 7: id = mach_read_from_7(data); - fprintf(stderr, "%llu", (ullint) id); + fprintf(stderr, IB_ID_FMT, id); break; case 8: id = mach_read_from_8(data); - fprintf(stderr, "%llu", (ullint) id); + fprintf(stderr, IB_ID_FMT, id); break; default: goto print_hex; @@ -428,9 +410,7 @@ dfield_print_also_hex( break; default: - id = mach_ull_read_compressed(data); - - fprintf(stderr, "mix_id " TRX_ID_FMT, id); + goto print_hex; } break; @@ -487,7 +467,7 @@ dfield_print_raw( { ulint len = dfield_get_len(dfield); if (!dfield_is_null(dfield)) { - ulint print_len = ut_min(len, 1000); + ulint print_len = ut_min(len, static_cast(1000)); ut_print_buf(f, dfield_get_data(dfield), print_len); if (len != print_len) { fprintf(f, "(total %lu bytes%s)", @@ -501,7 +481,6 @@ dfield_print_raw( /**********************************************************//** The following function prints the contents of a tuple. */ -UNIV_INTERN void dtuple_print( /*=========*/ @@ -527,6 +506,62 @@ dtuple_print( ut_ad(dtuple_validate(tuple)); } +/** Print the contents of a tuple. +@param[out] o output stream +@param[in] field array of data fields +@param[in] n number of data fields */ +void +dfield_print( + std::ostream& o, + const dfield_t* field, + ulint n) +{ + for (ulint i = 0; i < n; i++, field++) { + const void* data = dfield_get_data(field); + const ulint len = dfield_get_len(field); + + if (i) { + o << ','; + } + + if (dfield_is_null(field)) { + o << "NULL"; + } else if (dfield_is_ext(field)) { + ulint local_len = len - BTR_EXTERN_FIELD_REF_SIZE; + ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE); + + o << '[' + << local_len + << '+' << BTR_EXTERN_FIELD_REF_SIZE << ']'; + ut_print_buf(o, data, local_len); + ut_print_buf_hex(o, static_cast(data) + + local_len, + BTR_EXTERN_FIELD_REF_SIZE); + } else { + o << '[' << len << ']'; + ut_print_buf(o, data, len); + } + } +} + +/** Print the contents of a tuple. +@param[out] o output stream +@param[in] tuple data tuple */ +void +dtuple_print( + std::ostream& o, + const dtuple_t* tuple) +{ + const ulint n = dtuple_get_n_fields(tuple); + + o << "TUPLE (info_bits=" << dtuple_get_info_bits(tuple) + << ", " << n << " fields): {"; + + dfield_print(o, tuple->fields, n); + + o << "}"; +} + /**************************************************************//** Moves parts of long fields in entry to the big record vector so that the size of tuple drops below the maximum record size allowed in the @@ -535,11 +570,11 @@ to determine uniquely the insertion place of the tuple in the index. @return own: created big record vector, NULL if we are not able to shorten the entry enough, i.e., if there are too many fixed-length or short fields in entry or the index is clustered */ -UNIV_INTERN big_rec_t* dtuple_convert_big_rec( /*===================*/ dict_index_t* index, /*!< in: index */ + upd_t* upd, /*!< in/out: update vector */ dtuple_t* entry, /*!< in/out: index entry */ ulint* n_ext) /*!< in/out: number of externally stored columns */ @@ -571,9 +606,7 @@ dtuple_convert_big_rec( size = rec_get_converted_size(index, entry, *n_ext); if (UNIV_UNLIKELY(size > 1000000000)) { - fprintf(stderr, - "InnoDB: Warning: tuple size very big: %lu\n", - (ulong) size); + ib::warn() << "Tuple size is very big: " << size; fputs("InnoDB: Tuple contents: ", stderr); dtuple_print(stderr, entry); putc('\n', stderr); @@ -582,15 +615,7 @@ dtuple_convert_big_rec( heap = mem_heap_create(size + dtuple_get_n_fields(entry) * sizeof(big_rec_field_t) + 1000); - vector = static_cast( - mem_heap_alloc(heap, sizeof(big_rec_t))); - - vector->heap = heap; - - vector->fields = static_cast( - mem_heap_alloc( - heap, - dtuple_get_n_fields(entry) * sizeof(big_rec_field_t))); + vector = big_rec_t::alloc(heap, dtuple_get_n_fields(entry)); /* Decide which fields to shorten: the algorithm is to look for a variable-length field that yields the biggest savings when @@ -602,12 +627,12 @@ dtuple_convert_big_rec( *n_ext), dict_table_is_comp(index->table), dict_index_get_n_fields(index), - dict_table_zip_size(index->table))) { + dict_table_page_size(index->table))) { + ulint i; ulint longest = 0; ulint longest_i = ULINT_MAX; byte* data; - big_rec_field_t* b; for (i = dict_index_get_n_unique_in_tree(index); i < dtuple_get_n_fields(entry); i++) { @@ -624,7 +649,7 @@ dtuple_convert_big_rec( || dfield_is_ext(dfield) || dfield_get_len(dfield) <= local_len || dfield_get_len(dfield) - <= BTR_EXTERN_FIELD_REF_SIZE * 2) { + <= BTR_EXTERN_LOCAL_STORED_MAX_SIZE) { goto skip_field; } @@ -645,8 +670,7 @@ dtuple_convert_big_rec( there we always store locally columns whose length is up to local_len == 788 bytes. @see rec_init_offsets_comp_ordinary */ - if (ifield->col->mtype != DATA_BLOB - && ifield->col->len < 256) { + if (!DATA_BIG_COL(ifield->col)) { goto skip_field; } @@ -675,10 +699,12 @@ skip_field: ifield = dict_index_get_nth_field(index, longest_i); local_prefix_len = local_len - BTR_EXTERN_FIELD_REF_SIZE; - b = &vector->fields[n_fields]; - b->field_no = longest_i; - b->len = dfield_get_len(dfield) - local_prefix_len; - b->data = (char*) dfield_get_data(dfield) + local_prefix_len; + vector->append( + big_rec_field_t( + longest_i, + dfield_get_len(dfield) - local_prefix_len, + static_cast(dfield_get_data(dfield)) + + local_prefix_len)); /* Allocate the locally stored part of the column. */ data = static_cast(mem_heap_alloc(heap, local_len)); @@ -702,9 +728,30 @@ skip_field: n_fields++; (*n_ext)++; ut_ad(n_fields < dtuple_get_n_fields(entry)); + + if (upd && !upd->is_modified(longest_i)) { + + DEBUG_SYNC_C("ib_mv_nonupdated_column_offpage"); + + upd_field_t upd_field; + upd_field.field_no = longest_i; + upd_field.orig_len = 0; + upd_field.exp = NULL; + upd_field.old_v_val = NULL; + dfield_copy(&upd_field.new_val, + dfield->clone(upd->heap)); + upd->append(upd_field); + ut_ad(upd->is_modified(longest_i)); + + ut_ad(upd_field.new_val.len + >= BTR_EXTERN_FIELD_REF_SIZE); + ut_ad(upd_field.new_val.len == local_len); + ut_ad(upd_field.new_val.len == dfield_get_len(dfield)); + } } - vector->n_fields = n_fields; + ut_ad(n_fields == vector->n_fields); + return(vector); } @@ -712,7 +759,6 @@ skip_field: Puts back to entry the data stored in vector. Note that to ensure the fields in entry can accommodate the data, vector must have been created from entry with dtuple_convert_big_rec. */ -UNIV_INTERN void dtuple_convert_back_big_rec( /*========================*/ @@ -748,4 +794,57 @@ dtuple_convert_back_big_rec( mem_heap_free(vector->heap); } + +/** Allocate a big_rec_t object in the given memory heap, and for storing +n_fld number of fields. +@param[in] heap memory heap in which this object is allocated +@param[in] n_fld maximum number of fields that can be stored in + this object + +@return the allocated object */ +big_rec_t* +big_rec_t::alloc( + mem_heap_t* heap, + ulint n_fld) +{ + big_rec_t* rec = static_cast( + mem_heap_alloc(heap, sizeof(big_rec_t))); + + new(rec) big_rec_t(n_fld); + + rec->heap = heap; + rec->fields = static_cast( + mem_heap_alloc(heap, + n_fld * sizeof(big_rec_field_t))); + + rec->n_fields = 0; + return(rec); +} + +/** Create a deep copy of this object +@param[in] heap the memory heap in which the clone will be + created. + +@return the cloned object. */ +dfield_t* +dfield_t::clone( + mem_heap_t* heap) +{ + const ulint size = len == UNIV_SQL_NULL ? 0 : len; + dfield_t* obj = static_cast( + mem_heap_alloc(heap, sizeof(dfield_t) + size)); + + obj->ext = ext; + obj->len = len; + obj->type = type; + + if (len != UNIV_SQL_NULL) { + obj->data = obj + 1; + memcpy(obj->data, data, len); + } else { + obj->data = 0; + } + + return(obj); +} #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/data/data0type.cc b/storage/innobase/data/data0type.cc index 0b9e08544a5..8fb3761531c 100644 --- a/storage/innobase/data/data0type.cc +++ b/storage/innobase/data/data0type.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -23,6 +23,8 @@ Data types Created 1/16/1996 Heikki Tuuri *******************************************************/ +#include "ha_prototypes.h" + #include "data0type.h" #ifdef UNIV_NONINL @@ -30,21 +32,18 @@ Created 1/16/1996 Heikki Tuuri #endif #ifndef UNIV_HOTBACKUP -# include "ha_prototypes.h" - /* At the database startup we store the default-charset collation number of this MySQL installation to this global variable. If we have < 4.1.2 format column definitions, or records in the insert buffer, we use this charset-collation code for them. */ -UNIV_INTERN ulint data_mysql_default_charset_coll; +ulint data_mysql_default_charset_coll; /*********************************************************************//** Determine how many bytes the first n characters of the given string occupy. If the string is shorter than n characters, returns the number of bytes the characters in the string occupy. -@return length of the prefix, in bytes */ -UNIV_INTERN +@return length of the prefix, in bytes */ ulint dtype_get_at_most_n_mbchars( /*========================*/ @@ -84,8 +83,7 @@ dtype_get_at_most_n_mbchars( /*********************************************************************//** Checks if a data main type is a string type. Also a BLOB is considered a string type. -@return TRUE if string type */ -UNIV_INTERN +@return TRUE if string type */ ibool dtype_is_string_type( /*=================*/ @@ -105,8 +103,7 @@ dtype_is_string_type( Checks if a type is a binary string type. Note that for tables created with < 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column. For those DATA_BLOB columns this function currently returns FALSE. -@return TRUE if binary string type */ -UNIV_INTERN +@return TRUE if binary string type */ ibool dtype_is_binary_string_type( /*========================*/ @@ -128,8 +125,7 @@ Checks if a type is a non-binary string type. That is, dtype_is_string_type is TRUE and dtype_is_binary_string_type is FALSE. Note that for tables created with < 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column. For those DATA_BLOB columns this function currently returns TRUE. -@return TRUE if non-binary string type */ -UNIV_INTERN +@return TRUE if non-binary string type */ ibool dtype_is_non_binary_string_type( /*============================*/ @@ -149,7 +145,6 @@ dtype_is_non_binary_string_type( Forms a precise type from the < 4.1.2 format precise type plus the charset-collation code. @return precise type, including the charset-collation code */ -UNIV_INTERN ulint dtype_form_prtype( /*==============*/ @@ -165,8 +160,7 @@ dtype_form_prtype( /*********************************************************************//** Validates a data type structure. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtype_validate( /*===========*/ @@ -174,7 +168,7 @@ dtype_validate( { ut_a(type); ut_a(type->mtype >= DATA_VARCHAR); - ut_a(type->mtype <= DATA_MYSQL); + ut_a(type->mtype <= DATA_MTYPE_MAX); if (type->mtype == DATA_SYS) { ut_a((type->prtype & DATA_MYSQL_TYPE_MASK) < DATA_N_SYS_COLS); @@ -190,7 +184,6 @@ dtype_validate( #ifndef UNIV_HOTBACKUP /*********************************************************************//** Prints a data type structure. */ -UNIV_INTERN void dtype_print( /*========*/ @@ -226,6 +219,17 @@ dtype_print( fputs("DATA_BLOB", stderr); break; + case DATA_POINT: + fputs("DATA_POINT", stderr); + break; + + case DATA_VAR_POINT: + fputs("DATA_VAR_POINT", stderr); + + case DATA_GEOMETRY: + fputs("DATA_GEOMETRY", stderr); + break; + case DATA_INT: fputs("DATA_INT", stderr); break; diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc index 80724372f27..4ffcf640a26 100644 --- a/storage/innobase/dict/dict0boot.cc +++ b/storage/innobase/dict/dict0boot.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2016, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -24,6 +24,8 @@ Data dictionary creation and booting Created 4/18/1996 Heikki Tuuri *******************************************************/ +#include "ha_prototypes.h" + #include "dict0boot.h" #ifdef UNIV_NONINL @@ -42,8 +44,7 @@ Created 4/18/1996 Heikki Tuuri /**********************************************************************//** Gets a pointer to the dictionary header and x-latches its page. -@return pointer to the dictionary header, page x-latched */ -UNIV_INTERN +@return pointer to the dictionary header, page x-latched */ dict_hdr_t* dict_hdr_get( /*=========*/ @@ -52,8 +53,8 @@ dict_hdr_get( buf_block_t* block; dict_hdr_t* header; - block = buf_page_get(DICT_HDR_SPACE, 0, DICT_HDR_PAGE_NO, - RW_X_LATCH, mtr); + block = buf_page_get(page_id_t(DICT_HDR_SPACE, DICT_HDR_PAGE_NO), + univ_page_size, RW_X_LATCH, mtr); header = DICT_HDR + buf_block_get_frame(block); buf_block_dbg_add_level(block, SYNC_DICT_HEADER); @@ -63,23 +64,57 @@ dict_hdr_get( /**********************************************************************//** Returns a new table, index, or space id. */ -UNIV_INTERN void dict_hdr_get_new_id( /*================*/ - table_id_t* table_id, /*!< out: table id - (not assigned if NULL) */ - index_id_t* index_id, /*!< out: index id - (not assigned if NULL) */ - ulint* space_id) /*!< out: space id - (not assigned if NULL) */ + table_id_t* table_id, /*!< out: table id + (not assigned if NULL) */ + index_id_t* index_id, /*!< out: index id + (not assigned if NULL) */ + ulint* space_id, /*!< out: space id + (not assigned if NULL) */ + const dict_table_t* table, /*!< in: table */ + bool disable_redo) /*!< in: if true and table + object is NULL + then disable-redo */ { dict_hdr_t* dict_hdr; ib_id_t id; mtr_t mtr; mtr_start(&mtr); + if (table) { + dict_disable_redo_if_temporary(table, &mtr); + } else if (disable_redo) { + /* In non-read-only mode we need to ensure that space-id header + page is written to disk else if page is removed from buffer + cache and re-loaded it would assign temporary tablespace id + to another tablespace. + This is not a case with read-only mode as there is no new object + that is created except temporary tablespace. */ + mtr_set_log_mode(&mtr, + (srv_read_only_mode ? MTR_LOG_NONE : MTR_LOG_NO_REDO)); + } + /* Server started and let's say space-id = x + - table created with file-per-table + - space-id = x + 1 + - crash + Case 1: If it was redo logged then we know that it will be + restored to x + 1 + Case 2: if not redo-logged + Header will have the old space-id = x + This is OK because on restart there is no object with + space id = x + 1 + Case 3: + space-id = x (on start) + space-id = x+1 (temp-table allocation) - no redo logging + space-id = x+2 (non-temp-table allocation), this get's + redo logged. + If there is a crash there will be only 2 entries + x (original) and x+2 (new) and disk hdr will be updated + to reflect x + 2 entry. + We cannot allocate the same space id to different objects. */ dict_hdr = dict_hdr_get(&mtr); if (table_id) { @@ -111,7 +146,6 @@ dict_hdr_get_new_id( /**********************************************************************//** Writes the current value of the row id counter to the dictionary header file page. */ -UNIV_INTERN void dict_hdr_flush_row_id(void) /*=======================*/ @@ -120,7 +154,7 @@ dict_hdr_flush_row_id(void) row_id_t id; mtr_t mtr; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); id = dict_sys->row_id; @@ -136,7 +170,7 @@ dict_hdr_flush_row_id(void) /*****************************************************************//** Creates the file page for the dictionary header. This function is called only at the database creation. -@return TRUE if succeed */ +@return TRUE if succeed */ static ibool dict_hdr_create( @@ -154,7 +188,7 @@ dict_hdr_create( block = fseg_create(DICT_HDR_SPACE, 0, DICT_HDR + DICT_HDR_FSEG_HEADER, mtr); - ut_a(DICT_HDR_PAGE_NO == buf_block_get_page_no(block)); + ut_a(DICT_HDR_PAGE_NO == block->page.id.page_no()); dict_header = dict_hdr_get(mtr); @@ -180,9 +214,9 @@ dict_hdr_create( system tables */ /*--------------------------*/ - root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, - DICT_HDR_SPACE, 0, DICT_TABLES_ID, - dict_ind_redundant, mtr); + root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, DICT_HDR_SPACE, + univ_page_size, DICT_TABLES_ID, + dict_ind_redundant, NULL, mtr); if (root_page_no == FIL_NULL) { return(FALSE); @@ -191,9 +225,9 @@ dict_hdr_create( mlog_write_ulint(dict_header + DICT_HDR_TABLES, root_page_no, MLOG_4BYTES, mtr); /*--------------------------*/ - root_page_no = btr_create(DICT_UNIQUE, DICT_HDR_SPACE, 0, - DICT_TABLE_IDS_ID, - dict_ind_redundant, mtr); + root_page_no = btr_create(DICT_UNIQUE, DICT_HDR_SPACE, + univ_page_size, DICT_TABLE_IDS_ID, + dict_ind_redundant, NULL, mtr); if (root_page_no == FIL_NULL) { return(FALSE); @@ -202,9 +236,9 @@ dict_hdr_create( mlog_write_ulint(dict_header + DICT_HDR_TABLE_IDS, root_page_no, MLOG_4BYTES, mtr); /*--------------------------*/ - root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, - DICT_HDR_SPACE, 0, DICT_COLUMNS_ID, - dict_ind_redundant, mtr); + root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, DICT_HDR_SPACE, + univ_page_size, DICT_COLUMNS_ID, + dict_ind_redundant, NULL, mtr); if (root_page_no == FIL_NULL) { return(FALSE); @@ -213,9 +247,9 @@ dict_hdr_create( mlog_write_ulint(dict_header + DICT_HDR_COLUMNS, root_page_no, MLOG_4BYTES, mtr); /*--------------------------*/ - root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, - DICT_HDR_SPACE, 0, DICT_INDEXES_ID, - dict_ind_redundant, mtr); + root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, DICT_HDR_SPACE, + univ_page_size, DICT_INDEXES_ID, + dict_ind_redundant, NULL, mtr); if (root_page_no == FIL_NULL) { return(FALSE); @@ -224,9 +258,9 @@ dict_hdr_create( mlog_write_ulint(dict_header + DICT_HDR_INDEXES, root_page_no, MLOG_4BYTES, mtr); /*--------------------------*/ - root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, - DICT_HDR_SPACE, 0, DICT_FIELDS_ID, - dict_ind_redundant, mtr); + root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE, DICT_HDR_SPACE, + univ_page_size, DICT_FIELDS_ID, + dict_ind_redundant, NULL, mtr); if (root_page_no == FIL_NULL) { return(FALSE); @@ -243,7 +277,6 @@ dict_hdr_create( Initializes the data dictionary memory structures when the database is started. This function is also called when the data dictionary is created. @return DB_SUCCESS or error code. */ -UNIV_INTERN dberr_t dict_boot(void) /*===========*/ @@ -263,8 +296,8 @@ dict_boot(void) ut_ad(DICT_NUM_FIELDS__SYS_TABLE_IDS == 2); ut_ad(DICT_NUM_COLS__SYS_COLUMNS == 7); ut_ad(DICT_NUM_FIELDS__SYS_COLUMNS == 9); - ut_ad(DICT_NUM_COLS__SYS_INDEXES == 7); - ut_ad(DICT_NUM_FIELDS__SYS_INDEXES == 9); + ut_ad(DICT_NUM_COLS__SYS_INDEXES == 8); + ut_ad(DICT_NUM_FIELDS__SYS_INDEXES == 10); ut_ad(DICT_NUM_COLS__SYS_FIELDS == 3); ut_ad(DICT_NUM_FIELDS__SYS_FIELDS == 5); ut_ad(DICT_NUM_COLS__SYS_FOREIGN == 4); @@ -280,7 +313,7 @@ dict_boot(void) heap = mem_heap_create(450); - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); /* Get the dictionary header */ dict_hdr = dict_hdr_get(&mtr); @@ -302,10 +335,11 @@ dict_boot(void) /* Insert into the dictionary cache the descriptions of the basic system tables */ /*-------------------------*/ - table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0, 0); + table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0, 0, 0); - dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, + MAX_FULL_NAME_LEN); + dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 8); /* ROW_FORMAT = (N_COLS >> 31) ? COMPACT : REDUNDANT */ dict_mem_table_add_col(table, heap, "N_COLS", DATA_INT, 0, 4); /* The low order bit of TYPE is always set to 1. If the format @@ -354,9 +388,10 @@ dict_boot(void) ut_a(error == DB_SUCCESS); /*-------------------------*/ - table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0, 0); + table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, + 7, 0, 0, 0); - dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 8); dict_mem_table_add_col(table, heap, "POS", DATA_INT, 0, 4); dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0); dict_mem_table_add_col(table, heap, "MTYPE", DATA_INT, 0, 4); @@ -386,15 +421,17 @@ dict_boot(void) ut_a(error == DB_SUCCESS); /*-------------------------*/ - table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0, 0); + table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, + DICT_NUM_COLS__SYS_INDEXES, 0, 0, 0); - dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 0); - dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "TABLE_ID", DATA_BINARY, 0, 8); + dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 8); dict_mem_table_add_col(table, heap, "NAME", DATA_BINARY, 0, 0); dict_mem_table_add_col(table, heap, "N_FIELDS", DATA_INT, 0, 4); dict_mem_table_add_col(table, heap, "TYPE", DATA_INT, 0, 4); dict_mem_table_add_col(table, heap, "SPACE", DATA_INT, 0, 4); dict_mem_table_add_col(table, heap, "PAGE_NO", DATA_INT, 0, 4); + dict_mem_table_add_col(table, heap, "MERGE_THRESHOLD", DATA_INT, 0, 4); table->id = DICT_INDEXES_ID; @@ -418,9 +455,9 @@ dict_boot(void) ut_a(error == DB_SUCCESS); /*-------------------------*/ - table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0, 0); + table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0, 0, 0); - dict_mem_table_add_col(table, heap, "INDEX_ID", DATA_BINARY, 0, 0); + dict_mem_table_add_col(table, heap, "INDEX_ID", DATA_BINARY, 0, 8); dict_mem_table_add_col(table, heap, "POS", DATA_INT, 0, 4); dict_mem_table_add_col(table, heap, "COL_NAME", DATA_BINARY, 0, 0); @@ -459,17 +496,15 @@ dict_boot(void) if (srv_read_only_mode && !ibuf_is_empty()) { if (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Change buffer must be empty when --innodb-read-only " - "is set! " - "You can try to recover the database with innodb_force_recovery=5"); + ib::error() << "Change buffer must be empty when" + " --innodb-read-only is set!" + "You can try to recover the database with innodb_force_recovery=5"; err = DB_ERROR; } else { - ib_logf(IB_LOG_LEVEL_WARN, - "Change buffer not empty when --innodb-read-only " - "is set! but srv_force_recovery = %lu, ignoring.", - srv_force_recovery); + ib::warn() << "Change buffer not empty when --innodb-read-only " + "is set! but srv_force_recovery = " << srv_force_recovery + << " , ignoring."; } } @@ -482,7 +517,7 @@ dict_boot(void) dict_load_sys_table(dict_sys->sys_fields); } - mutex_exit(&(dict_sys->mutex)); + mutex_exit(&dict_sys->mutex); } return(err); @@ -502,7 +537,6 @@ dict_insert_initial_data(void) /*****************************************************************//** Creates and initializes the data dictionary at the server bootstrap. @return DB_SUCCESS or error code. */ -UNIV_INTERN dberr_t dict_create(void) /*=============*/ diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index d423f16f61c..5c7d41a9edb 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -23,6 +23,8 @@ Database object creation Created 1/8/1996 Heikki Tuuri *******************************************************/ +#include "ha_prototypes.h" + #include "dict0crea.h" #ifdef UNIV_NONINL @@ -44,12 +46,14 @@ Created 1/8/1996 Heikki Tuuri #include "ut0vec.h" #include "dict0priv.h" #include "fts0priv.h" -#include "ha_prototypes.h" +#include "fsp0space.h" +#include "fsp0sysspace.h" +#include "srv0start.h" /*****************************************************************//** Based on a table object, this function builds the entry to be inserted in the SYS_TABLES system table. -@return the tuple which should be inserted */ +@return the tuple which should be inserted */ static dtuple_t* dict_create_sys_tables_tuple( @@ -78,7 +82,8 @@ dict_create_sys_tables_tuple( dfield = dtuple_get_nth_field( entry, DICT_COL__SYS_TABLES__NAME); - dfield_set_data(dfield, table->name, ut_strlen(table->name)); + dfield_set_data(dfield, + table->name.m_name, strlen(table->name.m_name)); /* 1: DB_TRX_ID added later */ /* 2: DB_ROLL_PTR added later */ @@ -96,7 +101,11 @@ dict_create_sys_tables_tuple( entry, DICT_COL__SYS_TABLES__N_COLS); ptr = static_cast(mem_heap_alloc(heap, 4)); - mach_write_to_4(ptr, table->n_def + + /* If there is any virtual column, encode it in N_COLS */ + mach_write_to_4(ptr, dict_table_encode_n_col( + static_cast(table->n_def), + static_cast(table->n_v_def)) | ((table->flags & DICT_TF_COMPACT) << 31)); dfield_set_data(dfield, ptr, 4); @@ -128,7 +137,7 @@ dict_create_sys_tables_tuple( ptr = static_cast(mem_heap_alloc(heap, 4)); /* Be sure all non-used bits are zero. */ - ut_a(!(table->flags2 & ~DICT_TF2_BIT_MASK)); + ut_a(!(table->flags2 & DICT_TF2_UNUSED_BIT_MASK)); mach_write_to_4(ptr, table->flags2); dfield_set_data(dfield, ptr, 4); @@ -154,7 +163,7 @@ dict_create_sys_tables_tuple( /*****************************************************************//** Based on a table object, this function builds the entry to be inserted in the SYS_COLUMNS system table. -@return the tuple which should be inserted */ +@return the tuple which should be inserted */ static dtuple_t* dict_create_sys_columns_tuple( @@ -171,11 +180,23 @@ dict_create_sys_columns_tuple( dfield_t* dfield; byte* ptr; const char* col_name; + ulint num_base = 0; + ulint v_col_no = ULINT_UNDEFINED; ut_ad(table); ut_ad(heap); - column = dict_table_get_nth_col(table, i); + /* Any column beyond table->n_def would be virtual columns */ + if (i >= table->n_def) { + dict_v_col_t* v_col = dict_table_get_nth_v_col( + table, i - table->n_def); + column = &v_col->m_col; + num_base = v_col->num_base; + v_col_no = column->ind; + } else { + column = dict_table_get_nth_col(table, i); + ut_ad(!dict_col_is_virtual(column)); + } sys_columns = dict_sys->sys_columns; @@ -195,7 +216,15 @@ dict_create_sys_columns_tuple( dfield = dtuple_get_nth_field(entry, DICT_COL__SYS_COLUMNS__POS); ptr = static_cast(mem_heap_alloc(heap, 4)); - mach_write_to_4(ptr, i); + + if (v_col_no != ULINT_UNDEFINED) { + /* encode virtual column's position in MySQL table and InnoDB + table in "POS" */ + mach_write_to_4(ptr, dict_create_v_col_pos( + i - table->n_def, v_col_no)); + } else { + mach_write_to_4(ptr, i); + } dfield_set_data(dfield, ptr, 4); @@ -204,7 +233,12 @@ dict_create_sys_columns_tuple( /* 4: NAME ---------------------------*/ dfield = dtuple_get_nth_field(entry, DICT_COL__SYS_COLUMNS__NAME); - col_name = dict_table_get_col_name(table, i); + if (i >= table->n_def) { + col_name = dict_table_get_v_col_name(table, i - table->n_def); + } else { + col_name = dict_table_get_col_name(table, i); + } + dfield_set_data(dfield, col_name, ut_strlen(col_name)); /* 5: MTYPE --------------------------*/ @@ -235,7 +269,7 @@ dict_create_sys_columns_tuple( dfield = dtuple_get_nth_field(entry, DICT_COL__SYS_COLUMNS__PREC); ptr = static_cast(mem_heap_alloc(heap, 4)); - mach_write_to_4(ptr, 0/* unused */); + mach_write_to_4(ptr, num_base); dfield_set_data(dfield, ptr, 4); /*---------------------------------*/ @@ -243,9 +277,77 @@ dict_create_sys_columns_tuple( return(entry); } +/** Based on a table object, this function builds the entry to be inserted +in the SYS_VIRTUAL system table. Each row maps a virtual column to one of +its base column. +@param[in] table table +@param[in] v_col_n virtual column number +@param[in] b_col_n base column sequence num +@param[in] heap memory heap +@return the tuple which should be inserted */ +static +dtuple_t* +dict_create_sys_virtual_tuple( + const dict_table_t* table, + ulint v_col_n, + ulint b_col_n, + mem_heap_t* heap) +{ + dict_table_t* sys_virtual; + dtuple_t* entry; + const dict_col_t* base_column; + dfield_t* dfield; + byte* ptr; + + ut_ad(table); + ut_ad(heap); + + ut_ad(v_col_n < table->n_v_def); + dict_v_col_t* v_col = dict_table_get_nth_v_col(table, v_col_n); + base_column = v_col->base_col[b_col_n]; + + sys_virtual = dict_sys->sys_virtual; + + entry = dtuple_create(heap, DICT_NUM_COLS__SYS_VIRTUAL + + DATA_N_SYS_COLS); + + dict_table_copy_types(entry, sys_virtual); + + /* 0: TABLE_ID -----------------------*/ + dfield = dtuple_get_nth_field(entry, DICT_COL__SYS_VIRTUAL__TABLE_ID); + + ptr = static_cast(mem_heap_alloc(heap, 8)); + mach_write_to_8(ptr, table->id); + + dfield_set_data(dfield, ptr, 8); + + /* 1: POS ---------------------------*/ + dfield = dtuple_get_nth_field(entry, DICT_COL__SYS_VIRTUAL__POS); + + ptr = static_cast(mem_heap_alloc(heap, 4)); + ulint v_col_no = dict_create_v_col_pos(v_col_n, v_col->m_col.ind); + mach_write_to_4(ptr, v_col_no); + + dfield_set_data(dfield, ptr, 4); + + /* 2: BASE_POS ----------------------------*/ + dfield = dtuple_get_nth_field(entry, DICT_COL__SYS_VIRTUAL__BASE_POS); + + ptr = static_cast(mem_heap_alloc(heap, 4)); + mach_write_to_4(ptr, base_column->ind); + + dfield_set_data(dfield, ptr, 4); + + /* 3: DB_TRX_ID added later */ + /* 4: DB_ROLL_PTR added later */ + + /*---------------------------------*/ + return(entry); +} + /***************************************************************//** Builds a table definition to insert. -@return DB_SUCCESS or error code */ +@return DB_SUCCESS or error code */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_build_table_def_step( @@ -255,85 +357,222 @@ dict_build_table_def_step( { dict_table_t* table; dtuple_t* row; - dberr_t error; - const char* path; + dberr_t err = DB_SUCCESS; + + table = node->table; + + trx_t* trx = thr_get_trx(thr); + dict_table_assign_new_id(table, trx); + + err = dict_build_tablespace_for_table(table, node); + + if (err != DB_SUCCESS) { + return(err); + } + + row = dict_create_sys_tables_tuple(table, node->heap); + + ins_node_set_new_row(node->tab_def, row); + + return(err); +} + +/** Build a tablespace to store various objects. +@param[in,out] tablespace Tablespace object describing what to build. +@return DB_SUCCESS or error code. */ +dberr_t +dict_build_tablespace( + Tablespace* tablespace) +{ + dberr_t err = DB_SUCCESS; mtr_t mtr; ulint space = 0; - bool use_tablespace; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(tablespace); - table = node->table; - use_tablespace = DICT_TF2_FLAG_IS_SET(table, DICT_TF2_USE_TABLESPACE); + DBUG_EXECUTE_IF("out_of_tablespace_disk", + return(DB_OUT_OF_FILE_SPACE);); + /* Get a new space id. */ + dict_hdr_get_new_id(NULL, NULL, &space, NULL, false); + if (space == ULINT_UNDEFINED) { + return(DB_ERROR); + } + tablespace->set_space_id(space); + + Datafile* datafile = tablespace->first_datafile(); + + /* We create a new generic empty tablespace. + We initially let it be 4 pages: + - page 0 is the fsp header and an extent descriptor page, + - page 1 is an ibuf bitmap page, + - page 2 is the first inode page, + - page 3 will contain the root of the clustered index of the + first table we create here. */ + + err = fil_ibd_create( + space, + tablespace->name(), + datafile->filepath(), + tablespace->flags(), + FIL_IBD_FILE_INITIAL_SIZE, + tablespace->encryption_mode(), + tablespace->key_id()); + + if (err != DB_SUCCESS) { + return(err); + } + + /* Update SYS_TABLESPACES and SYS_DATAFILES */ + err = dict_replace_tablespace_and_filepath( + tablespace->space_id(), tablespace->name(), + datafile->filepath(), tablespace->flags()); + if (err != DB_SUCCESS) { + os_file_delete(innodb_data_file_key, datafile->filepath()); + return(err); + } + + mtr_start(&mtr); + mtr.set_named_space(space); + + /* Once we allow temporary general tablespaces, we must do this; + mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); */ + ut_a(!FSP_FLAGS_GET_TEMPORARY(tablespace->flags())); + + fsp_header_init(space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); + + mtr_commit(&mtr); + + return(err); +} + +/** Builds a tablespace to contain a table, using file-per-table=1. +@param[in,out] table Table to build in its own tablespace. +@param[in] node Table create node +@return DB_SUCCESS or error code */ +dberr_t +dict_build_tablespace_for_table( + dict_table_t* table, + tab_node_t* node) +{ + dberr_t err = DB_SUCCESS; + mtr_t mtr; + ulint space = 0; + bool needs_file_per_table; + char* filepath; - dict_hdr_get_new_id(&table->id, NULL, NULL); + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); - thr_get_trx(thr)->table_id = table->id; + needs_file_per_table + = DICT_TF2_FLAG_IS_SET(table, DICT_TF2_USE_FILE_PER_TABLE); - /* Always set this bit for all new created tables */ + /* Always set this bit for all new created tables */ DICT_TF2_FLAG_SET(table, DICT_TF2_FTS_AUX_HEX_NAME); DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name", DICT_TF2_FLAG_UNSET(table, DICT_TF2_FTS_AUX_HEX_NAME);); - if (use_tablespace) { - /* This table will not use the system tablespace. - Get a new space id. */ - dict_hdr_get_new_id(NULL, NULL, &space); + if (needs_file_per_table) { + /* This table will need a new tablespace. */ + + ut_ad(dict_table_get_format(table) <= UNIV_FORMAT_MAX); + ut_ad(DICT_TF_GET_ZIP_SSIZE(table->flags) == 0 + || dict_table_get_format(table) >= UNIV_FORMAT_B); + + /* Get a new tablespace ID */ + dict_hdr_get_new_id(NULL, NULL, &space, table, false); DBUG_EXECUTE_IF( "ib_create_table_fail_out_of_space_ids", space = ULINT_UNDEFINED; ); - if (UNIV_UNLIKELY(space == ULINT_UNDEFINED)) { + if (space == ULINT_UNDEFINED) { return(DB_ERROR); } + table->space = static_cast(space); + + /* Determine the tablespace flags. */ + bool is_temp = dict_table_is_temporary(table); + bool has_data_dir = DICT_TF_HAS_DATA_DIR(table->flags); + ulint fsp_flags = dict_tf_to_fsp_flags(table->flags, is_temp); + + /* Determine the full filepath */ + if (is_temp) { + /* Temporary table filepath contains a full path + and a filename without the extension. */ + ut_ad(table->dir_path_of_temp_table); + filepath = fil_make_filepath( + table->dir_path_of_temp_table, + NULL, IBD, false); + + } else if (has_data_dir) { + ut_ad(table->data_dir_path); + filepath = fil_make_filepath( + table->data_dir_path, + table->name.m_name, IBD, true); + + } else { + /* Make the tablespace file in the default dir + using the table name */ + filepath = fil_make_filepath( + NULL, table->name.m_name, IBD, false); + } /* We create a new single-table tablespace for the table. We initially let it be 4 pages: - page 0 is the fsp header and an extent descriptor page, - page 1 is an ibuf bitmap page, - page 2 is the first inode page, - - page 3 will contain the root of the clustered index of the - table we create here. */ + - page 3 will contain the root of the clustered index of + the table we create here. */ - path = table->data_dir_path ? table->data_dir_path - : table->dir_path_of_temp_table; - - ut_ad(dict_table_get_format(table) <= UNIV_FORMAT_MAX); - ut_ad(!dict_table_zip_size(table) - || dict_table_get_format(table) >= UNIV_FORMAT_B); - - error = fil_create_new_single_table_tablespace( - space, table->name, path, - dict_tf_to_fsp_flags(table->flags), - table->flags2, + err = fil_ibd_create( + space, table->name.m_name, filepath, fsp_flags, FIL_IBD_FILE_INITIAL_SIZE, - node->mode, node->key_id); + node ? node->mode : FIL_SPACE_ENCRYPTION_DEFAULT, + node ? node->key_id : FIL_DEFAULT_ENCRYPTION_KEY); - table->space = (unsigned int) space; + ut_free(filepath); - if (error != DB_SUCCESS) { + if (err != DB_SUCCESS) { - return(error); + return(err); } mtr_start(&mtr); + mtr.set_named_space(table->space); + dict_disable_redo_if_temporary(table, &mtr); fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); } else { - /* Create in the system tablespace: disallow Barracuda - features by keeping only the first bit which says whether - the row format is redundant or compact */ - table->flags &= DICT_TF_COMPACT; - } - - row = dict_create_sys_tables_tuple(table, node->heap); + /* We do not need to build a tablespace for this table. It + is already built. Just find the correct tablespace ID. */ + + if (DICT_TF_HAS_SHARED_SPACE(table->flags)) { + ut_ad(table->tablespace != NULL); + + ut_ad(table->space == fil_space_get_id_by_name( + table->tablespace())); + } else if (dict_table_is_temporary(table)) { + /* Use the shared temporary tablespace. + Note: The temp tablespace supports all non-Compressed + row formats whereas the system tablespace only + supports Redundant and Compact */ + ut_ad(dict_tf_get_rec_format(table->flags) + != REC_FORMAT_COMPRESSED); + table->space = static_cast( + srv_tmp_space.space_id()); + } else { + /* Create in the system tablespace. */ + ut_ad(table->space == srv_sys_space.space_id()); + } - ins_node_set_new_row(node->tab_def, row); + DBUG_EXECUTE_IF("ib_ddl_crash_during_tablespace_alloc", + DBUG_SUICIDE();); + } return(DB_SUCCESS); } @@ -353,10 +592,25 @@ dict_build_col_def_step( ins_node_set_new_row(node->col_def, row); } +/** Builds a SYS_VIRTUAL row definition to insert. +@param[in] node table create node */ +static +void +dict_build_v_col_def_step( + tab_node_t* node) +{ + dtuple_t* row; + + row = dict_create_sys_virtual_tuple(node->table, node->col_no, + node->base_col_no, + node->heap); + ins_node_set_new_row(node->v_col_def, row); +} + /*****************************************************************//** Based on an index object, this function builds the entry to be inserted in the SYS_INDEXES system table. -@return the tuple which should be inserted */ +@return the tuple which should be inserted */ static dtuple_t* dict_create_sys_indexes_tuple( @@ -372,7 +626,7 @@ dict_create_sys_indexes_tuple( dfield_t* dfield; byte* ptr; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(index); ut_ad(heap); @@ -380,7 +634,8 @@ dict_create_sys_indexes_tuple( table = dict_table_get_low(index->table_name); - entry = dtuple_create(heap, 7 + DATA_N_SYS_COLS); + entry = dtuple_create( + heap, DICT_NUM_COLS__SYS_INDEXES + DATA_N_SYS_COLS); dict_table_copy_types(entry, sys_indexes); @@ -408,7 +663,16 @@ dict_create_sys_indexes_tuple( dfield = dtuple_get_nth_field( entry, DICT_COL__SYS_INDEXES__NAME); - dfield_set_data(dfield, index->name, ut_strlen(index->name)); + if (!index->is_committed()) { + ulint len = strlen(index->name) + 1; + char* name = static_cast( + mem_heap_alloc(heap, len)); + *name = *TEMP_INDEX_PREFIX_STR; + memcpy(name + 1, index->name, len - 1); + dfield_set_data(dfield, name, len); + } else { + dfield_set_data(dfield, index->name, strlen(index->name)); + } /* 5: N_FIELDS ----------------------*/ dfield = dtuple_get_nth_field( @@ -448,6 +712,16 @@ dict_create_sys_indexes_tuple( dfield_set_data(dfield, ptr, 4); + /* 9: MERGE_THRESHOLD ----------------*/ + + dfield = dtuple_get_nth_field( + entry, DICT_COL__SYS_INDEXES__MERGE_THRESHOLD); + + ptr = static_cast(mem_heap_alloc(heap, 4)); + mach_write_to_4(ptr, DICT_INDEX_MERGE_THRESHOLD_DEFAULT); + + dfield_set_data(dfield, ptr, 4); + /*--------------------------------*/ return(entry); @@ -456,7 +730,7 @@ dict_create_sys_indexes_tuple( /*****************************************************************//** Based on an index object, this function builds the entry to be inserted in the SYS_FIELDS system table. -@return the tuple which should be inserted */ +@return the tuple which should be inserted */ static dtuple_t* dict_create_sys_fields_tuple( @@ -538,7 +812,7 @@ dict_create_sys_fields_tuple( /*****************************************************************//** Creates the tuple with which the index entry is searched for writing the index tree root page number, if such a tree is created. -@return the tuple for search */ +@return the tuple for search */ static dtuple_t* dict_create_search_tuple( @@ -573,7 +847,7 @@ dict_create_search_tuple( /***************************************************************//** Builds an index definition row to insert. -@return DB_SUCCESS or error code */ +@return DB_SUCCESS or error code */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_build_index_def_step( @@ -586,7 +860,7 @@ dict_build_index_def_step( dtuple_t* row; trx_t* trx; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); trx = thr_get_trx(thr); @@ -608,7 +882,7 @@ dict_build_index_def_step( ut_ad((UT_LIST_GET_LEN(table->indexes) > 0) || dict_index_is_clust(index)); - dict_hdr_get_new_id(NULL, &index->id, NULL); + dict_hdr_get_new_id(NULL, &index->id, NULL, table, false); /* Inherit the space id from the table; we store all indexes of a table in the same tablespace */ @@ -628,6 +902,48 @@ dict_build_index_def_step( return(DB_SUCCESS); } +/***************************************************************//** +Builds an index definition without updating SYSTEM TABLES. +@return DB_SUCCESS or error code */ +void +dict_build_index_def( +/*=================*/ + const dict_table_t* table, /*!< in: table */ + dict_index_t* index, /*!< in/out: index */ + trx_t* trx) /*!< in/out: InnoDB transaction handle */ +{ + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); + + if (trx->table_id == 0) { + /* Record only the first table id. */ + trx->table_id = table->id; + } + + ut_ad((UT_LIST_GET_LEN(table->indexes) > 0) + || dict_index_is_clust(index)); + + if (!dict_table_is_intrinsic(table)) { + dict_hdr_get_new_id(NULL, &index->id, NULL, table, false); + } else { + /* Index are re-loaded in process of creation using id. + If same-id is used for all indexes only first index will always + be retrieved when expected is iterative return of all indexes*/ + if (UT_LIST_GET_LEN(table->indexes) > 0) { + index->id = UT_LIST_GET_LAST(table->indexes)->id + 1; + } else { + index->id = 1; + } + } + + /* Inherit the space id from the table; we store all indexes of a + table in the same tablespace */ + + index->space = table->space; + + /* Note that the index was created by this transaction. */ + index->trx_id = trx->id; +} + /***************************************************************//** Builds a field definition row to insert. */ static @@ -648,20 +964,20 @@ dict_build_field_def_step( /***************************************************************//** Creates an index tree for the index if it is not a member of a cluster. -@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_create_index_tree_step( /*========================*/ ind_node_t* node) /*!< in: index create node */ { + mtr_t mtr; + btr_pcur_t pcur; dict_index_t* index; dict_table_t* sys_indexes; dtuple_t* search_tuple; - btr_pcur_t pcur; - mtr_t mtr; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); index = node->index; @@ -678,6 +994,13 @@ dict_create_index_tree_step( mtr_start(&mtr); + const bool missing = index->table->ibd_file_missing + || dict_table_is_discarded(index->table); + + if (!missing) { + mtr.set_named_space(index->space); + } + search_tuple = dict_create_search_tuple(node->ind_row, node->heap); btr_pcur_open(UT_LIST_GET_FIRST(sys_indexes->indexes), @@ -688,16 +1011,14 @@ dict_create_index_tree_step( dberr_t err = DB_SUCCESS; - ulint zip_size = dict_table_zip_size(index->table); - - if (node->index->table->ibd_file_missing - || dict_table_is_discarded(node->index->table)) { + if (missing) { node->page_no = FIL_NULL; } else { node->page_no = btr_create( - index->type, index->space, zip_size, - index->id, index, &mtr); + index->type, index->space, + dict_table_page_size(index->table), + index->id, index, NULL, &mtr); if (node->page_no == FIL_NULL) { err = DB_OUT_OF_FILE_SPACE; @@ -719,169 +1040,207 @@ dict_create_index_tree_step( return(err); } -/*******************************************************************//** -Drops the index tree associated with a row in SYS_INDEXES table. */ -UNIV_INTERN -void +/***************************************************************//** +Creates an index tree for the index if it is not a member of a cluster. +Don't update SYSTEM TABLES. +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ +dberr_t +dict_create_index_tree_in_mem( +/*==========================*/ + dict_index_t* index, /*!< in/out: index */ + const trx_t* trx) /*!< in: InnoDB transaction handle */ +{ + mtr_t mtr; + ulint page_no = FIL_NULL; + + ut_ad(mutex_own(&dict_sys->mutex) + || dict_table_is_intrinsic(index->table)); + + if (index->type == DICT_FTS) { + /* FTS index does not need an index tree */ + return(DB_SUCCESS); + } + + mtr_start(&mtr); + mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); + + dberr_t err = DB_SUCCESS; + + /* Currently this function is being used by temp-tables only. + Import/Discard of temp-table is blocked and so this assert. */ + ut_ad(index->table->ibd_file_missing == 0 + && !dict_table_is_discarded(index->table)); + + page_no = btr_create( + index->type, index->space, + dict_table_page_size(index->table), + index->id, index, NULL, &mtr); + + index->page = page_no; + index->trx_id = trx->id; + + if (page_no == FIL_NULL) { + err = DB_OUT_OF_FILE_SPACE; + } + + mtr_commit(&mtr); + + return(err); +} + +/** Drop the index tree associated with a row in SYS_INDEXES table. +@param[in,out] rec SYS_INDEXES record +@param[in,out] pcur persistent cursor on rec +@param[in,out] mtr mini-transaction +@return whether freeing the B-tree was attempted */ +bool dict_drop_index_tree( -/*=================*/ - rec_t* rec, /*!< in/out: record in the clustered index - of SYS_INDEXES table */ - mtr_t* mtr) /*!< in: mtr having the latch on the record page */ + rec_t* rec, + btr_pcur_t* pcur, + mtr_t* mtr) { - ulint root_page_no; - ulint space; - ulint zip_size; const byte* ptr; ulint len; + ulint space; + ulint root_page_no; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); ut_a(!dict_table_is_comp(dict_sys->sys_indexes)); - ptr = rec_get_nth_field_old( - rec, DICT_FLD__SYS_INDEXES__PAGE_NO, &len); + + ptr = rec_get_nth_field_old(rec, DICT_FLD__SYS_INDEXES__PAGE_NO, &len); ut_ad(len == 4); + btr_pcur_store_position(pcur, mtr); + root_page_no = mtr_read_ulint(ptr, MLOG_4BYTES, mtr); if (root_page_no == FIL_NULL) { /* The tree has already been freed */ - return; + return(false); } + mlog_write_ulint(const_cast(ptr), FIL_NULL, MLOG_4BYTES, mtr); + ptr = rec_get_nth_field_old( rec, DICT_FLD__SYS_INDEXES__SPACE, &len); ut_ad(len == 4); space = mtr_read_ulint(ptr, MLOG_4BYTES, mtr); - zip_size = fil_space_get_zip_size(space); - if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { + ptr = rec_get_nth_field_old( + rec, DICT_FLD__SYS_INDEXES__ID, &len); + + ut_ad(len == 8); + + bool found; + const page_size_t page_size(fil_space_get_page_size(space, + &found)); + + if (!found) { /* It is a single table tablespace and the .ibd file is missing: do nothing */ - return; + return(false); } - /* We free all the pages but the root page first; this operation - may span several mini-transactions */ - - btr_free_but_not_root(space, zip_size, root_page_no); + /* If tablespace is scheduled for truncate, do not try to drop + the indexes in that tablespace. There is a truncate fixup action + which will take care of it. */ + if (srv_is_tablespace_truncated(space)) { + return(false); + } - /* Then we free the root page in the same mini-transaction where - we write FIL_NULL to the appropriate field in the SYS_INDEXES - record: this mini-transaction marks the B-tree totally freed */ + btr_free_if_exists(page_id_t(space, root_page_no), page_size, + mach_read_from_8(ptr), mtr); - /* printf("Dropping index tree in space %lu root page %lu\n", space, - root_page_no); */ - btr_free_root(space, zip_size, root_page_no, mtr); + return(true); +} - page_rec_write_field(rec, DICT_FLD__SYS_INDEXES__PAGE_NO, - FIL_NULL, mtr); +/*******************************************************************//** +Drops the index tree but don't update SYS_INDEXES table. */ +void +dict_drop_index_tree_in_mem( +/*========================*/ + const dict_index_t* index, /*!< in: index */ + ulint page_no) /*!< in: index page-no */ +{ + ut_ad(mutex_own(&dict_sys->mutex) + || dict_table_is_intrinsic(index->table)); + ut_ad(dict_table_is_temporary(index->table)); + + ulint root_page_no = page_no; + ulint space = index->space; + bool found; + const page_size_t page_size(fil_space_get_page_size(space, + &found)); + + /* If tree has already been freed or it is a single table + tablespace and the .ibd file is missing do nothing, + else free the all the pages */ + if (root_page_no != FIL_NULL && found) { + btr_free(page_id_t(space, root_page_no), page_size); + } } /*******************************************************************//** -Truncates the index tree associated with a row in SYS_INDEXES table. +Recreate the index tree associated with a row in SYS_INDEXES table. @return new root page number, or FIL_NULL on failure */ -UNIV_INTERN ulint -dict_truncate_index_tree( +dict_recreate_index_tree( /*=====================*/ - dict_table_t* table, /*!< in: the table the index belongs to */ - ulint space, /*!< in: 0=truncate, - nonzero=create the index tree in the - given tablespace */ + const dict_table_t* + table, /*!< in/out: the table the index belongs to */ btr_pcur_t* pcur, /*!< in/out: persistent cursor pointing to record in the clustered index of SYS_INDEXES table. The cursor may be repositioned in this call. */ - mtr_t* mtr) /*!< in: mtr having the latch - on the record page. The mtr may be - committed and restarted in this call. */ + mtr_t* mtr) /*!< in/out: mtr having the latch + on the record page. */ { - ulint root_page_no; - ibool drop = !space; - ulint zip_size; - ulint type; - index_id_t index_id; - rec_t* rec; - const byte* ptr; + ut_ad(mutex_own(&dict_sys->mutex)); + ut_a(!dict_table_is_comp(dict_sys->sys_indexes)); + ulint len; - dict_index_t* index; - bool has_been_dropped = false; + rec_t* rec = btr_pcur_get_rec(pcur); - ut_ad(mutex_own(&(dict_sys->mutex))); - ut_a(!dict_table_is_comp(dict_sys->sys_indexes)); - rec = btr_pcur_get_rec(pcur); - ptr = rec_get_nth_field_old( + const byte* ptr = rec_get_nth_field_old( rec, DICT_FLD__SYS_INDEXES__PAGE_NO, &len); ut_ad(len == 4); - root_page_no = mtr_read_ulint(ptr, MLOG_4BYTES, mtr); - - if (drop && root_page_no == FIL_NULL) { - has_been_dropped = true; - drop = FALSE; - } - - ptr = rec_get_nth_field_old( - rec, DICT_FLD__SYS_INDEXES__SPACE, &len); + ulint root_page_no = mtr_read_ulint(ptr, MLOG_4BYTES, mtr); + ptr = rec_get_nth_field_old(rec, DICT_FLD__SYS_INDEXES__SPACE, &len); ut_ad(len == 4); - if (drop) { - space = mtr_read_ulint(ptr, MLOG_4BYTES, mtr); - } + ut_a(table->space == mtr_read_ulint(ptr, MLOG_4BYTES, mtr)); - zip_size = fil_space_get_zip_size(space); + ulint space = table->space; + bool found; + const page_size_t page_size(fil_space_get_page_size(space, + &found)); - if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { - /* It is a single table tablespace and the .ibd file is - missing: do nothing */ + if (!found) { + /* It is a single table tablespae and the .ibd file is + missing: do nothing. */ + + ib::warn() + << "Trying to TRUNCATE a missing .ibd file of table " + << table->name << "!"; - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Trying to TRUNCATE" - " a missing .ibd file of table %s!\n", table->name); return(FIL_NULL); } - ptr = rec_get_nth_field_old( - rec, DICT_FLD__SYS_INDEXES__TYPE, &len); + ptr = rec_get_nth_field_old(rec, DICT_FLD__SYS_INDEXES__TYPE, &len); ut_ad(len == 4); - type = mach_read_from_4(ptr); + ulint type = mach_read_from_4(ptr); ptr = rec_get_nth_field_old(rec, DICT_FLD__SYS_INDEXES__ID, &len); ut_ad(len == 8); - index_id = mach_read_from_8(ptr); - - if (!drop) { - - goto create; - } - - /* We free all the pages but the root page first; this operation - may span several mini-transactions */ - - btr_free_but_not_root(space, zip_size, root_page_no); - - /* Then we free the root page in the same mini-transaction where - we create the b-tree and write its new root page number to the - appropriate field in the SYS_INDEXES record: this mini-transaction - marks the B-tree totally truncated */ - - btr_block_get(space, zip_size, root_page_no, RW_X_LATCH, NULL, mtr); - - btr_free_root(space, zip_size, root_page_no, mtr); -create: - /* We will temporarily write FIL_NULL to the PAGE_NO field - in SYS_INDEXES, so that the database will not get into an - inconsistent state in case it crashes between the mtr_commit() - below and the following mtr_commit() call. */ - page_rec_write_field(rec, DICT_FLD__SYS_INDEXES__PAGE_NO, - FIL_NULL, mtr); + index_id_t index_id = mach_read_from_8(ptr); /* We will need to commit the mini-transaction in order to avoid deadlocks in the btr_create() call, because otherwise we would @@ -890,53 +1249,109 @@ create: mtr_commit(mtr); mtr_start(mtr); + mtr->set_named_space(space); btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, mtr); /* Find the index corresponding to this SYS_INDEXES record. */ - for (index = UT_LIST_GET_FIRST(table->indexes); - index; + for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); + index != NULL; index = UT_LIST_GET_NEXT(indexes, index)) { if (index->id == index_id) { if (index->type & DICT_FTS) { return(FIL_NULL); } else { - if (has_been_dropped) { - fprintf(stderr, " InnoDB: Trying to" - " TRUNCATE a missing index of" - " table %s!\n", - index->table->name); - } - - root_page_no = btr_create(type, space, zip_size, - index_id, index, mtr); + root_page_no = btr_create( + type, space, page_size, index_id, + index, NULL, mtr); index->page = (unsigned int) root_page_no; return(root_page_no); } } } - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Index %llu of table %s is missing\n" - "InnoDB: from the data dictionary during TRUNCATE!\n", - (ullint) index_id, - table->name); + ib::error() << "Failed to create index with index id " << index_id + << " of table " << table->name; return(FIL_NULL); } +/*******************************************************************//** +Truncates the index tree but don't update SYSTEM TABLES. +@return DB_SUCCESS or error */ +dberr_t +dict_truncate_index_tree_in_mem( +/*============================*/ + dict_index_t* index) /*!< in/out: index */ +{ + mtr_t mtr; + bool truncate; + ulint space = index->space; + + ut_ad(mutex_own(&dict_sys->mutex) + || dict_table_is_intrinsic(index->table)); + ut_ad(dict_table_is_temporary(index->table)); + + ulint type = index->type; + ulint root_page_no = index->page; + + if (root_page_no == FIL_NULL) { + + /* The tree has been freed. */ + ib::warn() << "Trying to TRUNCATE a missing index of table " + << index->table->name << "!"; + + truncate = false; + } else { + truncate = true; + } + + bool found; + const page_size_t page_size(fil_space_get_page_size(space, + &found)); + + if (!found) { + + /* It is a single table tablespace and the .ibd file is + missing: do nothing */ + + ib::warn() + << "Trying to TRUNCATE a missing .ibd file of table " + << index->table->name << "!"; + } + + /* If table to truncate resides in its on own tablespace that will + be re-created on truncate then we can ignore freeing of existing + tablespace objects. */ + + if (truncate) { + btr_free(page_id_t(space, root_page_no), page_size); + } + + mtr_start(&mtr); + mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); + + root_page_no = btr_create( + type, space, page_size, index->id, index, NULL, &mtr); + + DBUG_EXECUTE_IF("ib_err_trunc_temp_recreate_index", + root_page_no = FIL_NULL;); + + index->page = root_page_no; + + mtr_commit(&mtr); + + return(index->page == FIL_NULL ? DB_ERROR : DB_SUCCESS); +} + /*********************************************************************//** Creates a table create graph. -@return own: table create node */ -UNIV_INTERN +@return own: table create node */ tab_node_t* tab_create_graph_create( /*====================*/ dict_table_t* table, /*!< in: table to create, built as a memory data structure */ mem_heap_t* heap, /*!< in: heap where created */ - bool commit, /*!< in: true if the commit node should be - added to the query graph */ fil_encryption_t mode, /*!< in: encryption mode */ ulint key_id) /*!< in: encryption key_id */ { @@ -962,28 +1377,24 @@ tab_create_graph_create( heap); node->col_def->common.parent = node; - if (commit) { - node->commit_node = trx_commit_node_create(heap); - node->commit_node->common.parent = node; - } else { - node->commit_node = 0; - } + node->v_col_def = ins_node_create(INS_DIRECT, dict_sys->sys_virtual, + heap); + node->v_col_def->common.parent = node; return(node); } -/*********************************************************************//** -Creates an index create graph. -@return own: index create node */ -UNIV_INTERN +/** Creates an index create graph. +@param[in] index index to create, built as a memory data structure +@param[in,out] heap heap where created +@param[in] add_v new virtual columns added in the same clause with + add index +@return own: index create node */ ind_node_t* ind_create_graph_create( -/*====================*/ - dict_index_t* index, /*!< in: index to create, built as a memory data - structure */ - mem_heap_t* heap, /*!< in: heap where created */ - bool commit) /*!< in: true if the commit node should be - added to the query graph */ + dict_index_t* index, + mem_heap_t* heap, + const dict_add_v_col_t* add_v) { ind_node_t* node; @@ -994,6 +1405,8 @@ ind_create_graph_create( node->index = index; + node->add_v = add_v; + node->state = INDEX_BUILD_INDEX_DEF; node->page_no = FIL_NULL; node->heap = mem_heap_create(256); @@ -1006,20 +1419,12 @@ ind_create_graph_create( dict_sys->sys_fields, heap); node->field_def->common.parent = node; - if (commit) { - node->commit_node = trx_commit_node_create(heap); - node->commit_node->common.parent = node; - } else { - node->commit_node = 0; - } - return(node); } /***********************************************************//** Creates a table. This is a high-level function used in SQL execution graphs. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* dict_create_table_step( /*===================*/ @@ -1030,7 +1435,7 @@ dict_create_table_step( trx_t* trx; ut_ad(thr); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); trx = thr_get_trx(thr); @@ -1062,7 +1467,8 @@ dict_create_table_step( if (node->state == TABLE_BUILD_COL_DEF) { - if (node->col_no < (node->table)->n_def) { + if (node->col_no < (static_cast(node->table->n_def) + + static_cast(node->table->n_v_def))) { dict_build_col_def_step(node); @@ -1072,24 +1478,57 @@ dict_create_table_step( return(thr); } else { - node->state = TABLE_COMMIT_WORK; + /* Move on to SYS_VIRTUAL table */ + node->col_no = 0; + node->base_col_no = 0; + node->state = TABLE_BUILD_V_COL_DEF; } } - if (node->state == TABLE_COMMIT_WORK) { + if (node->state == TABLE_BUILD_V_COL_DEF) { + + if (node->col_no < static_cast(node->table->n_v_def)) { + dict_v_col_t* v_col = dict_table_get_nth_v_col( + node->table, node->col_no); - /* Table was correctly defined: do NOT commit the transaction - (CREATE TABLE does NOT do an implicit commit of the current - transaction) */ + /* If no base column */ + while (v_col->num_base == 0) { + node->col_no++; + if (node->col_no == static_cast( + (node->table)->n_v_def)) { + node->state = TABLE_ADD_TO_CACHE; + break; + } + + v_col = dict_table_get_nth_v_col( + node->table, node->col_no); + node->base_col_no = 0; + } - node->state = TABLE_ADD_TO_CACHE; + if (node->state != TABLE_ADD_TO_CACHE) { + ut_ad(node->col_no == v_col->v_pos); + dict_build_v_col_def_step(node); + + if (node->base_col_no < v_col->num_base - 1) { + /* move on to next base column */ + node->base_col_no++; + } else { + /* move on to next virtual column */ + node->col_no++; + node->base_col_no = 0; + } - /* thr->run_node = node->commit_node; + thr->run_node = node->v_col_def; - return(thr); */ + return(thr); + } + } else { + node->state = TABLE_ADD_TO_CACHE; + } } if (node->state == TABLE_ADD_TO_CACHE) { + DBUG_EXECUTE_IF("ib_ddl_crash_during_create", DBUG_SUICIDE();); dict_table_add_to_cache(node->table, TRUE, node->heap); @@ -1119,8 +1558,7 @@ function_exit: /***********************************************************//** Creates an index. This is a high-level function used in SQL execution graphs. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* dict_create_index_step( /*===================*/ @@ -1131,7 +1569,7 @@ dict_create_index_step( trx_t* trx; ut_ad(thr); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); trx = thr_get_trx(thr); @@ -1180,11 +1618,9 @@ dict_create_index_step( index_id_t index_id = node->index->id; - err = dict_index_add_to_cache( - node->table, node->index, FIL_NULL, - trx_is_strict(trx) - || dict_table_get_format(node->table) - >= UNIV_FORMAT_B); + err = dict_index_add_to_cache_w_vcol( + node->table, node->index, node->add_v, FIL_NULL, + trx_is_strict(trx)); node->index = dict_index_get_if_in_cache_low(index_id); ut_a((node->index == NULL) == (err != DB_SUCCESS)); @@ -1244,20 +1680,6 @@ dict_create_index_step( dict_index_add_to_cache(). */ ut_ad(node->index->trx_id == trx->id); ut_ad(node->index->table->def_trx_id == trx->id); - node->state = INDEX_COMMIT_WORK; - } - - if (node->state == INDEX_COMMIT_WORK) { - - /* Index was correctly defined: do NOT commit the transaction - (CREATE INDEX does NOT currently do an implicit commit of - the current transaction) */ - - node->state = INDEX_CREATE_INDEX_TREE; - - /* thr->run_node = node->commit_node; - - return(thr); */ } function_exit: @@ -1316,7 +1738,7 @@ dict_check_if_system_table_exists( /* This table has already been created, and it is OK. Ensure that it can't be evicted from the table LRU cache. */ - dict_table_move_from_lru_to_non_lru(sys_table); + dict_table_prevent_eviction(sys_table); } mutex_exit(&dict_sys->mutex); @@ -1328,8 +1750,7 @@ dict_check_if_system_table_exists( Creates the foreign key constraints system tables inside InnoDB at server bootstrap or server start if they are not found or are not of the right form. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t dict_create_or_check_foreign_constraint_tables(void) /*================================================*/ @@ -1366,22 +1787,19 @@ dict_create_or_check_foreign_constraint_tables(void) /* Check which incomplete table definition to drop. */ if (sys_foreign_err == DB_CORRUPTION) { - ib_logf(IB_LOG_LEVEL_WARN, - "Dropping incompletely created " - "SYS_FOREIGN table."); + ib::warn() << "Dropping incompletely created" + " SYS_FOREIGN table."; row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE, TRUE); } if (sys_foreign_cols_err == DB_CORRUPTION) { - ib_logf(IB_LOG_LEVEL_WARN, - "Dropping incompletely created " - "SYS_FOREIGN_COLS table."); + ib::warn() << "Dropping incompletely created" + " SYS_FOREIGN_COLS table."; row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE, TRUE); } - ib_logf(IB_LOG_LEVEL_WARN, - "Creating foreign key constraint system tables."); + ib::warn() << "Creating foreign key constraint system tables."; /* NOTE: in dict_load_foreigns we use the fact that there are 2 secondary indexes on SYS_FOREIGN, and they @@ -1422,11 +1840,10 @@ dict_create_or_check_foreign_constraint_tables(void) FALSE, trx); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Creation of SYS_FOREIGN and SYS_FOREIGN_COLS " - "has failed with error %lu. Tablespace is full. " - "Dropping incompletely created tables.", - (ulong) err); + + ib::error() << "Creation of SYS_FOREIGN and SYS_FOREIGN_COLS" + " failed: " << ut_strerr(err) << ". Tablespace is" + " full. Dropping incompletely created tables."; ut_ad(err == DB_OUT_OF_FILE_SPACE || err == DB_TOO_MANY_CONCURRENT_TRXS); @@ -1448,8 +1865,7 @@ dict_create_or_check_foreign_constraint_tables(void) srv_file_per_table = srv_file_per_table_backup; if (err == DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_INFO, - "Foreign key constraint system tables created"); + ib::info() << "Foreign key constraint system tables created"; } /* Note: The master thread has not been started at this point. */ @@ -1465,9 +1881,118 @@ dict_create_or_check_foreign_constraint_tables(void) return(err); } +/** Creates the virtual column system table (SYS_VIRTUAL) inside InnoDB +at server bootstrap or server start if the table is not found or is +not of the right form. +@return DB_SUCCESS or error code */ +dberr_t +dict_create_or_check_sys_virtual() +{ + trx_t* trx; + my_bool srv_file_per_table_backup; + dberr_t err; + + ut_a(srv_get_active_thread_type() == SRV_NONE); + + /* Note: The master thread has not been started at this point. */ + err = dict_check_if_system_table_exists( + "SYS_VIRTUAL", DICT_NUM_FIELDS__SYS_VIRTUAL + 1, 1); + + if (err == DB_SUCCESS) { + mutex_enter(&dict_sys->mutex); + dict_sys->sys_virtual = dict_table_get_low("SYS_VIRTUAL"); + mutex_exit(&dict_sys->mutex); + return(DB_SUCCESS); + } + + if (srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO + || srv_read_only_mode) { + ib::error() << "Cannot create sys_virtual system tables;" + " running in read-only mode."; + return(DB_ERROR); + } + + trx = trx_allocate_for_mysql(); + + trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); + + trx->op_info = "creating sys_virtual tables"; + + row_mysql_lock_data_dictionary(trx); + + /* Check which incomplete table definition to drop. */ + + if (err == DB_CORRUPTION) { + ib::warn() << "Dropping incompletely created" + " SYS_VIRTUAL table."; + row_drop_table_for_mysql("SYS_VIRTUAL", trx, false, TRUE); + } + + ib::info() << "Creating sys_virtual system tables."; + + srv_file_per_table_backup = srv_file_per_table; + + /* We always want SYSTEM tables to be created inside the system + tablespace. */ + + srv_file_per_table = 0; + + err = que_eval_sql( + NULL, + "PROCEDURE CREATE_SYS_VIRTUAL_TABLES_PROC () IS\n" + "BEGIN\n" + "CREATE TABLE\n" + "SYS_VIRTUAL(TABLE_ID BIGINT, POS INT," + " BASE_POS INT);\n" + "CREATE UNIQUE CLUSTERED INDEX BASE_IDX" + " ON SYS_VIRTUAL(TABLE_ID, POS, BASE_POS);\n" + "END;\n", + FALSE, trx); + + if (err != DB_SUCCESS) { + + ib::error() << "Creation of SYS_VIRTUAL" + " failed: " << ut_strerr(err) << ". Tablespace is" + " full or too many transactions." + " Dropping incompletely created tables."; + + ut_ad(err == DB_OUT_OF_FILE_SPACE + || err == DB_TOO_MANY_CONCURRENT_TRXS); + + row_drop_table_for_mysql("SYS_VIRTUAL", trx, false, TRUE); + + if (err == DB_OUT_OF_FILE_SPACE) { + err = DB_MUST_GET_MORE_FILE_SPACE; + } + } + + trx_commit_for_mysql(trx); + + row_mysql_unlock_data_dictionary(trx); + + trx_free_for_mysql(trx); + + srv_file_per_table = srv_file_per_table_backup; + + if (err == DB_SUCCESS) { + ib::info() << "sys_virtual table created"; + } + + /* Note: The master thread has not been started at this point. */ + /* Confirm and move to the non-LRU part of the table LRU list. */ + dberr_t sys_virtual_err = dict_check_if_system_table_exists( + "SYS_VIRTUAL", DICT_NUM_FIELDS__SYS_VIRTUAL + 1, 1); + ut_a(sys_virtual_err == DB_SUCCESS); + mutex_enter(&dict_sys->mutex); + dict_sys->sys_virtual = dict_table_get_low("SYS_VIRTUAL"); + mutex_exit(&dict_sys->mutex); + + return(err); +} + /****************************************************************//** Evaluate the given foreign key SQL statement. -@return error code or DB_SUCCESS */ +@return error code or DB_SUCCESS */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_foreign_eval_sql( @@ -1489,9 +2014,9 @@ dict_foreign_eval_sql( ut_print_timestamp(ef); fputs(" Error in foreign key constraint creation for table ", ef); - ut_print_name(ef, trx, TRUE, name); + ut_print_name(ef, trx, name); fputs(".\nA foreign key constraint of name ", ef); - ut_print_name(ef, trx, TRUE, id); + ut_print_name(ef, trx, id); fputs("\nalready exists." " (Note that internally InnoDB adds 'databasename'\n" "in front of the user-defined constraint name.)\n" @@ -1510,15 +2035,14 @@ dict_foreign_eval_sql( } if (error != DB_SUCCESS) { - fprintf(stderr, - "InnoDB: Foreign key constraint creation failed:\n" - "InnoDB: internal error number %lu\n", (ulong) error); + ib::error() << "Foreign key constraint creation failed: " + << ut_strerr(error); mutex_enter(&dict_foreign_err_mutex); ut_print_timestamp(ef); fputs(" Internal error in foreign key constraint creation" " for table ", ef); - ut_print_name(ef, trx, TRUE, name); + ut_print_name(ef, trx, name); fputs(".\n" "See the MySQL .err log in the datadir" " for more information.\n", ef); @@ -1533,7 +2057,7 @@ dict_foreign_eval_sql( /********************************************************************//** Add a single foreign key field definition to the data dictionary tables in the database. -@return error code or DB_SUCCESS */ +@return error code or DB_SUCCESS */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t dict_create_add_foreign_field_to_dictionary( @@ -1543,6 +2067,8 @@ dict_create_add_foreign_field_to_dictionary( const dict_foreign_t* foreign, /*!< in: foreign */ trx_t* trx) /*!< in/out: transaction */ { + DBUG_ENTER("dict_create_add_foreign_field_to_dictionary"); + pars_info_t* info = pars_info_create(); pars_info_add_str_literal(info, "id", foreign->id); @@ -1555,7 +2081,7 @@ dict_create_add_foreign_field_to_dictionary( pars_info_add_str_literal(info, "ref_col_name", foreign->referenced_col_names[field_nr]); - return(dict_foreign_eval_sql( + DBUG_RETURN(dict_foreign_eval_sql( info, "PROCEDURE P () IS\n" "BEGIN\n" @@ -1583,7 +2109,7 @@ dict_foreign_def_get( tbname = dict_remove_db_name(foreign->id); bufend = innobase_convert_name(tablebuf, MAX_TABLE_NAME_LEN, - tbname, strlen(tbname), trx->mysql_thd, FALSE); + tbname, strlen(tbname), trx->mysql_thd); tablebuf[bufend - tablebuf] = '\0'; sprintf(fk_def, @@ -1594,7 +2120,7 @@ dict_foreign_def_get( innobase_convert_name(buf, MAX_TABLE_NAME_LEN, foreign->foreign_col_names[i], strlen(foreign->foreign_col_names[i]), - trx->mysql_thd, FALSE); + trx->mysql_thd); strcat(fk_def, buf); if (i < foreign->n_fields-1) { strcat(fk_def, (char *)","); @@ -1606,7 +2132,7 @@ dict_foreign_def_get( bufend = innobase_convert_name(tablebuf, MAX_TABLE_NAME_LEN, foreign->referenced_table_name, strlen(foreign->referenced_table_name), - trx->mysql_thd, TRUE); + trx->mysql_thd); tablebuf[bufend - tablebuf] = '\0'; strcat(fk_def, tablebuf); @@ -1617,7 +2143,7 @@ dict_foreign_def_get( bufend = innobase_convert_name(buf, MAX_TABLE_NAME_LEN, foreign->referenced_col_names[i], strlen(foreign->referenced_col_names[i]), - trx->mysql_thd, FALSE); + trx->mysql_thd); buf[bufend - buf] = '\0'; strcat(fk_def, buf); if (i < foreign->n_fields-1) { @@ -1649,14 +2175,14 @@ dict_foreign_def_get_fields( bufend = innobase_convert_name(fieldbuf, MAX_TABLE_NAME_LEN, foreign->foreign_col_names[col_no], strlen(foreign->foreign_col_names[col_no]), - trx->mysql_thd, FALSE); + trx->mysql_thd); fieldbuf[bufend - fieldbuf] = '\0'; bufend = innobase_convert_name(fieldbuf2, MAX_TABLE_NAME_LEN, foreign->referenced_col_names[col_no], strlen(foreign->referenced_col_names[col_no]), - trx->mysql_thd, FALSE); + trx->mysql_thd); fieldbuf2[bufend - fieldbuf2] = '\0'; *field = fieldbuf; @@ -1665,8 +2191,7 @@ dict_foreign_def_get_fields( /********************************************************************//** Add a foreign key definition to the data dictionary tables. -@return error code or DB_SUCCESS */ -UNIV_INTERN +@return error code or DB_SUCCESS */ dberr_t dict_create_add_foreign_to_dictionary( /*==================================*/ @@ -1676,6 +2201,9 @@ dict_create_add_foreign_to_dictionary( trx_t* trx) /*!< in/out: dictionary transaction */ { dberr_t error; + + DBUG_ENTER("dict_create_add_foreign_to_dictionary"); + pars_info_t* info = pars_info_create(); pars_info_add_str_literal(info, "id", foreign->id); @@ -1688,6 +2216,11 @@ dict_create_add_foreign_to_dictionary( pars_info_add_int4_literal(info, "n_cols", foreign->n_fields + (foreign->type << 24)); + DBUG_PRINT("dict_create_add_foreign_to_dictionary", + ("'%s', '%s', '%s', %d", foreign->id, name, + foreign->referenced_table_name, + foreign->n_fields + (foreign->type << 24))); + error = dict_foreign_eval_sql(info, "PROCEDURE P () IS\n" "BEGIN\n" @@ -1704,11 +2237,11 @@ dict_create_add_foreign_to_dictionary( char* fk_def; innobase_convert_name(tablename, MAX_TABLE_NAME_LEN, - table->name, strlen(table->name), - trx->mysql_thd, TRUE); + table->name.m_name, strlen(table->name.m_name), + trx->mysql_thd); innobase_convert_name(buf, MAX_TABLE_NAME_LEN, - foreign->id, strlen(foreign->id), trx->mysql_thd, FALSE); + foreign->id, strlen(foreign->id), trx->mysql_thd); fk_def = dict_foreign_def_get((dict_foreign_t*)foreign, trx); @@ -1721,7 +2254,7 @@ dict_create_add_foreign_to_dictionary( tablename, buf, fk_def); } - return(error); + DBUG_RETURN(error); } for (ulint i = 0; i < foreign->n_fields; i++) { @@ -1736,10 +2269,10 @@ dict_create_add_foreign_to_dictionary( char* fk_def; innobase_convert_name(tablename, MAX_TABLE_NAME_LEN, - table->name, strlen(table->name), - trx->mysql_thd, TRUE); + table->name.m_name, strlen(table->name.m_name), + trx->mysql_thd); innobase_convert_name(buf, MAX_TABLE_NAME_LEN, - foreign->id, strlen(foreign->id), trx->mysql_thd, FALSE); + foreign->id, strlen(foreign->id), trx->mysql_thd); fk_def = dict_foreign_def_get((dict_foreign_t*)foreign, trx); dict_foreign_def_get_fields((dict_foreign_t*)foreign, trx, &field, &field2, i); @@ -1750,11 +2283,11 @@ dict_create_add_foreign_to_dictionary( " Error in foreign key definition: %s.", tablename, buf, i+1, fk_def); - return(error); + DBUG_RETURN(error); } } - return(error); + DBUG_RETURN(error); } /** Adds the given set of foreign key objects to the dictionary tables @@ -1767,7 +2300,6 @@ the dictionary tables local_fk_set belong to @param[in,out] trx transaction @return error code or DB_SUCCESS */ -UNIV_INTERN dberr_t dict_create_add_foreigns_to_dictionary( /*===================================*/ @@ -1778,12 +2310,17 @@ dict_create_add_foreigns_to_dictionary( dict_foreign_t* foreign; dberr_t error; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex) + || dict_table_is_intrinsic(table)); + + if (dict_table_is_intrinsic(table)) { + goto exit_loop; + } if (NULL == dict_table_get_low("SYS_FOREIGN")) { - fprintf(stderr, - "InnoDB: table SYS_FOREIGN not found" - " in internal data dictionary\n"); + + ib::error() << "Table SYS_FOREIGN not found" + " in internal data dictionary"; return(DB_ERROR); } @@ -1795,8 +2332,8 @@ dict_create_add_foreigns_to_dictionary( foreign = *it; ut_ad(foreign->id != NULL); - error = dict_create_add_foreign_to_dictionary((dict_table_t*)table, table->name, - foreign, trx); + error = dict_create_add_foreign_to_dictionary( + (dict_table_t*)table, table->name.m_name, foreign, trx); if (error != DB_SUCCESS) { @@ -1804,9 +2341,13 @@ dict_create_add_foreigns_to_dictionary( } } +exit_loop: trx->op_info = "committing foreign key definitions"; - trx_commit(trx); + if (trx_is_started(trx)) { + + trx_commit(trx); + } trx->op_info = ""; @@ -1817,8 +2358,7 @@ dict_create_add_foreigns_to_dictionary( Creates the tablespaces and datafiles system tables inside InnoDB at server bootstrap or server start if they are not found or are not of the right form. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t dict_create_or_check_sys_tablespace(void) /*=====================================*/ @@ -1854,22 +2394,19 @@ dict_create_or_check_sys_tablespace(void) /* Check which incomplete table definition to drop. */ if (sys_tablespaces_err == DB_CORRUPTION) { - ib_logf(IB_LOG_LEVEL_WARN, - "Dropping incompletely created " - "SYS_TABLESPACES table."); + ib::warn() << "Dropping incompletely created" + " SYS_TABLESPACES table."; row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE, TRUE); } if (sys_datafiles_err == DB_CORRUPTION) { - ib_logf(IB_LOG_LEVEL_WARN, - "Dropping incompletely created " - "SYS_DATAFILES table."); + ib::warn() << "Dropping incompletely created" + " SYS_DATAFILES table."; row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE, TRUE); } - ib_logf(IB_LOG_LEVEL_INFO, - "Creating tablespace and datafile system tables."); + ib::info() << "Creating tablespace and datafile system tables."; /* We always want SYSTEM tables to be created inside the system tablespace. */ @@ -1892,11 +2429,10 @@ dict_create_or_check_sys_tablespace(void) FALSE, trx); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Creation of SYS_TABLESPACES and SYS_DATAFILES " - "has failed with error %lu. Tablespace is full. " - "Dropping incompletely created tables.", - (ulong) err); + + ib::error() << "Creation of SYS_TABLESPACES and SYS_DATAFILES" + " has failed with error " << ut_strerr(err) + << ". Dropping incompletely created tables."; ut_a(err == DB_OUT_OF_FILE_SPACE || err == DB_TOO_MANY_CONCURRENT_TRXS); @@ -1918,8 +2454,7 @@ dict_create_or_check_sys_tablespace(void) srv_file_per_table = srv_file_per_table_backup; if (err == DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_INFO, - "Tablespace and datafile system tables created."); + ib::info() << "Tablespace and datafile system tables created."; } /* Note: The master thread has not been started at this point. */ @@ -1936,29 +2471,34 @@ dict_create_or_check_sys_tablespace(void) return(err); } -/********************************************************************//** -Add a single tablespace definition to the data dictionary tables in the -database. -@return error code or DB_SUCCESS */ -UNIV_INTERN +/** Put a tablespace definition into the data dictionary, +replacing what was there previously. +@param[in] space Tablespace id +@param[in] name Tablespace name +@param[in] flags Tablespace flags +@param[in] path Tablespace path +@param[in] trx Transaction +@param[in] commit If true, commit the transaction +@return error code or DB_SUCCESS */ dberr_t -dict_create_add_tablespace_to_dictionary( -/*=====================================*/ - ulint space, /*!< in: tablespace id */ - const char* name, /*!< in: tablespace name */ - ulint flags, /*!< in: tablespace flags */ - const char* path, /*!< in: tablespace path */ - trx_t* trx, /*!< in/out: transaction */ - bool commit) /*!< in: if true then commit the - transaction */ +dict_replace_tablespace_in_dictionary( + ulint space_id, + const char* name, + ulint flags, + const char* path, + trx_t* trx, + bool commit) { + if (!srv_sys_tablespaces_open) { + /* Startup procedure is not yet ready for updates. */ + return(DB_SUCCESS); + } + dberr_t error; pars_info_t* info = pars_info_create(); - ut_a(space > TRX_SYS_SPACE); - - pars_info_add_int4_literal(info, "space", space); + pars_info_add_int4_literal(info, "space", space_id); pars_info_add_str_literal(info, "name", name); @@ -1968,11 +2508,27 @@ dict_create_add_tablespace_to_dictionary( error = que_eval_sql(info, "PROCEDURE P () IS\n" + "p CHAR;\n" + + "DECLARE CURSOR c IS\n" + " SELECT PATH FROM SYS_DATAFILES\n" + " WHERE SPACE=:space FOR UPDATE;\n" + "BEGIN\n" - "INSERT INTO SYS_TABLESPACES VALUES" + "OPEN c;\n" + "FETCH c INTO p;\n" + + "IF (SQL % NOTFOUND) THEN" + " DELETE FROM SYS_TABLESPACES " + "WHERE SPACE=:space;\n" + " INSERT INTO SYS_TABLESPACES VALUES" "(:space, :name, :flags);\n" - "INSERT INTO SYS_DATAFILES VALUES" + " INSERT INTO SYS_DATAFILES VALUES" "(:space, :path);\n" + "ELSIF p <> :path THEN\n" + " UPDATE SYS_DATAFILES SET PATH=:path" + " WHERE CURRENT OF c;\n" + "END IF;\n" "END;\n", FALSE, trx); @@ -1989,3 +2545,66 @@ dict_create_add_tablespace_to_dictionary( return(error); } + +/** Delete records from SYS_TABLESPACES and SYS_DATAFILES associated +with a particular tablespace ID. +@param[in] space Tablespace ID +@param[in,out] trx Current transaction +@return DB_SUCCESS if OK, dberr_t if the operation failed */ + +dberr_t +dict_delete_tablespace_and_datafiles( + ulint space, + trx_t* trx) +{ + dberr_t err = DB_SUCCESS; + + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(srv_sys_tablespaces_open); + + trx->op_info = "delete tablespace and datafiles from dictionary"; + + pars_info_t* info = pars_info_create(); + ut_a(!is_system_tablespace(space)); + pars_info_add_int4_literal(info, "space", space); + + err = que_eval_sql(info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "DELETE FROM SYS_TABLESPACES\n" + "WHERE SPACE = :space;\n" + "DELETE FROM SYS_DATAFILES\n" + "WHERE SPACE = :space;\n" + "END;\n", + FALSE, trx); + + if (err != DB_SUCCESS) { + ib::warn() << "Could not delete space_id " + << space << " from data dictionary"; + } + + trx->op_info = ""; + + return(err); +} + +/** Assign a new table ID and put it into the table cache and the transaction. +@param[in,out] table Table that needs an ID +@param[in,out] trx Transaction */ +void +dict_table_assign_new_id( + dict_table_t* table, + trx_t* trx) +{ + if (dict_table_is_intrinsic(table)) { + /* There is no significance of this table->id (if table is + intrinsic) so assign it default instead of something meaningful + to avoid confusion.*/ + table->id = ULINT_UNDEFINED; + } else { + dict_hdr_get_new_id(&table->id, NULL, NULL, table, false); + } + + trx->table_id = table->id; +} diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 0310e5e1d66..687353cb1b9 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -2,7 +2,7 @@ Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2015, MariaDB Corporation. +Copyright (c) 2013, 2016, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -25,6 +25,10 @@ Data dictionary system Created 1/8/1996 Heikki Tuuri ***********************************************************************/ +#include "ha_prototypes.h" +#include +#include + #include "dict0dict.h" #include "fts0fts.h" #include "fil0fil.h" @@ -37,13 +41,11 @@ Created 1/8/1996 Heikki Tuuri #endif /** dummy index for ROW_FORMAT=REDUNDANT supremum and infimum records */ -UNIV_INTERN dict_index_t* dict_ind_redundant; -/** dummy index for ROW_FORMAT=COMPACT supremum and infimum records */ -UNIV_INTERN dict_index_t* dict_ind_compact; +dict_index_t* dict_ind_redundant; #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG /** Flag to control insert buffer debugging. */ -extern UNIV_INTERN uint ibuf_debug; +extern uint ibuf_debug; #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ /********************************************************************** @@ -52,46 +54,44 @@ void ib_warn_row_too_big(const dict_table_t* table); #ifndef UNIV_HOTBACKUP +#include "btr0btr.h" +#include "btr0cur.h" +#include "btr0sea.h" #include "buf0buf.h" #include "data0type.h" -#include "mach0data.h" #include "dict0boot.h" -#include "dict0mem.h" #include "dict0crea.h" +#include "dict0mem.h" +#include "dict0priv.h" #include "dict0stats.h" -#include "trx0undo.h" -#include "btr0btr.h" -#include "btr0cur.h" -#include "btr0sea.h" +#include "fsp0sysspace.h" +#include "fts0fts.h" +#include "fts0types.h" +#include "lock0lock.h" +#include "mach0data.h" +#include "mem0mem.h" #include "os0once.h" -#include "page0zip.h" #include "page0page.h" +#include "page0zip.h" #include "pars0pars.h" #include "pars0sym.h" #include "que0que.h" #include "rem0cmp.h" -#include "fts0fts.h" -#include "fts0types.h" -#include "m_ctype.h" /* my_isspace() */ -#include "ha_prototypes.h" /* innobase_strcasecmp(), innobase_casedn_str() */ +#include "row0log.h" +#include "row0merge.h" +#include "row0mysql.h" +#include "row0upd.h" #include "srv0mon.h" #include "srv0start.h" -#include "lock0lock.h" -#include "dict0priv.h" -#include "row0upd.h" -#include "row0mysql.h" -#include "row0merge.h" -#include "row0log.h" -#include "ut0ut.h" /* ut_format_name() */ -#include "m_string.h" -#include "my_sys.h" -#include "mysqld.h" /* system_charset_info */ -#include "strfunc.h" /* strconvert() */ +#include "sync0sync.h" +#include "trx0undo.h" +#include "ut0new.h" -#include +#include +#include /** the dictionary system */ -UNIV_INTERN dict_sys_t* dict_sys = NULL; +dict_sys_t* dict_sys = NULL; /** @brief the data dictionary rw-latch protecting dict_sys @@ -101,29 +101,15 @@ in S-mode; we cannot trust that MySQL protects implicit or background operations a table drop since MySQL does not know of them; therefore we need this; NOTE: a transaction which reserves this must keep book on the mode in trx_t::dict_operation_lock_mode */ -UNIV_INTERN rw_lock_t dict_operation_lock; +rw_lock_t* dict_operation_lock; /** Percentage of compression failures that are allowed in a single round */ -UNIV_INTERN ulong zip_failure_threshold_pct = 5; +ulong zip_failure_threshold_pct = 5; /** Maximum percentage of a page that can be allowed as a pad to avoid compression failures */ -UNIV_INTERN ulong zip_pad_max = 50; - -/* Keys to register rwlocks and mutexes with performance schema */ -#ifdef UNIV_PFS_RWLOCK -UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key; -UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key; -UNIV_INTERN mysql_pfs_key_t index_online_log_key; -UNIV_INTERN mysql_pfs_key_t dict_table_stats_key; -#endif /* UNIV_PFS_RWLOCK */ - -#ifdef UNIV_PFS_MUTEX -UNIV_INTERN mysql_pfs_key_t zip_pad_mutex_key; -UNIV_INTERN mysql_pfs_key_t dict_sys_mutex_key; -UNIV_INTERN mysql_pfs_key_t dict_foreign_err_mutex_key; -#endif /* UNIV_PFS_MUTEX */ +ulong zip_pad_max = 50; #define DICT_HEAP_SIZE 100 /*!< initial memory heap size when creating a table or index object */ @@ -143,17 +129,20 @@ static bool innodb_index_stats_not_found_reported = false; /*******************************************************************//** Tries to find column names for the index and sets the col field of the index. +@param[in] table table +@param[in] index index +@param[in] add_v new virtual columns added along with an add index call @return TRUE if the column names were found */ static ibool dict_index_find_cols( -/*=================*/ - dict_table_t* table, /*!< in: table */ - dict_index_t* index); /*!< in: index */ + const dict_table_t* table, + dict_index_t* index, + const dict_add_v_col_t* add_v); /*******************************************************************//** Builds the internal dictionary cache representation for a clustered index, containing also system fields not defined by the user. -@return own: the internal representation of the clustered index */ +@return own: the internal representation of the clustered index */ static dict_index_t* dict_index_build_internal_clust( @@ -164,7 +153,7 @@ dict_index_build_internal_clust( /*******************************************************************//** Builds the internal dictionary cache representation for a non-clustered index, containing also system fields not defined by the user. -@return own: the internal representation of the non-clustered index */ +@return own: the internal representation of the non-clustered index */ static dict_index_t* dict_index_build_internal_non_clust( @@ -174,35 +163,13 @@ dict_index_build_internal_non_clust( a non-clustered index */ /**********************************************************************//** Builds the internal dictionary cache representation for an FTS index. -@return own: the internal representation of the FTS index */ +@return own: the internal representation of the FTS index */ static dict_index_t* dict_index_build_internal_fts( /*==========================*/ dict_table_t* table, /*!< in: table */ dict_index_t* index); /*!< in: user representation of an FTS index */ -/**********************************************************************//** -Prints a column data. */ -static -void -dict_col_print_low( -/*===============*/ - const dict_table_t* table, /*!< in: table */ - const dict_col_t* col); /*!< in: column */ -/**********************************************************************//** -Prints an index data. */ -static -void -dict_index_print_low( -/*=================*/ - dict_index_t* index); /*!< in: index */ -/**********************************************************************//** -Prints a field data. */ -static -void -dict_field_print_low( -/*=================*/ - const dict_field_t* field); /*!< in: field */ /**********************************************************************//** Removes an index from the dictionary cache. */ @@ -242,25 +209,13 @@ dict_non_lru_find_table( /* Stream for storing detailed information about the latest foreign key and unique key errors. Only created if !srv_read_only_mode */ -UNIV_INTERN FILE* dict_foreign_err_file = NULL; +FILE* dict_foreign_err_file = NULL; /* mutex protecting the foreign and unique error buffers */ -UNIV_INTERN ib_mutex_t dict_foreign_err_mutex; - -/******************************************************************//** -Makes all characters in a NUL-terminated UTF-8 string lower case. */ -UNIV_INTERN -void -dict_casedn_str( -/*============*/ - char* a) /*!< in/out: string to put in lower case */ -{ - innobase_casedn_str(a); -} +ib_mutex_t dict_foreign_err_mutex; /********************************************************************//** Checks if the database name in two table names is the same. -@return TRUE if same db name */ -UNIV_INTERN +@return TRUE if same db name */ ibool dict_tables_have_same_db( /*=====================*/ @@ -280,8 +235,7 @@ dict_tables_have_same_db( /********************************************************************//** Return the end of table name where we have removed dbname and '/'. -@return table name */ -UNIV_INTERN +@return table name */ const char* dict_remove_db_name( /*================*/ @@ -296,8 +250,7 @@ dict_remove_db_name( /********************************************************************//** Get the database name length in a table name. -@return database name length */ -UNIV_INTERN +@return database name length */ ulint dict_get_db_name_len( /*=================*/ @@ -312,22 +265,20 @@ dict_get_db_name_len( /********************************************************************//** Reserves the dictionary system mutex for MySQL. */ -UNIV_INTERN void dict_mutex_enter_for_mysql_func(const char * file, ulint line) /*============================*/ { - mutex_enter_func(&(dict_sys->mutex), file, line); + mutex_enter(&dict_sys->mutex); } /********************************************************************//** Releases the dictionary system mutex for MySQL. */ -UNIV_INTERN void dict_mutex_exit_for_mysql(void) /*===========================*/ { - mutex_exit(&(dict_sys->mutex)); + mutex_exit(&dict_sys->mutex); } /** Allocate and init a dict_table_t's stats latch. @@ -340,7 +291,10 @@ dict_table_stats_latch_alloc( { dict_table_t* table = static_cast(table_void); - table->stats_latch = new(std::nothrow) rw_lock_t; + /* Note: rw_lock_create() will call the constructor */ + + table->stats_latch = static_cast( + ut_malloc_nokey(sizeof(rw_lock_t))); ut_a(table->stats_latch != NULL); @@ -357,7 +311,7 @@ dict_table_stats_latch_free( dict_table_t* table) { rw_lock_free(table->stats_latch); - delete table->stats_latch; + ut_free(table->stats_latch); } /** Create a dict_table_t's stats latch or delay for lazy creation. @@ -366,7 +320,6 @@ or from a thread that has not shared the table object with other threads. @param[in,out] table table whose stats latch to create @param[in] enabled if false then the latch is disabled and dict_table_stats_lock()/unlock() become noop on this table. */ - void dict_table_stats_latch_create( dict_table_t* table, @@ -378,23 +331,15 @@ dict_table_stats_latch_create( return; } -#ifdef HAVE_ATOMIC_BUILTINS /* We create this lazily the first time it is used. */ table->stats_latch = NULL; table->stats_latch_created = os_once::NEVER_DONE; -#else /* HAVE_ATOMIC_BUILTINS */ - - dict_table_stats_latch_alloc(table); - - table->stats_latch_created = os_once::DONE; -#endif /* HAVE_ATOMIC_BUILTINS */ } /** Destroy a dict_table_t's stats latch. This function is only called from either single threaded environment or from a thread that has not shared the table object with other threads. @param[in,out] table table whose stats latch to destroy */ - void dict_table_stats_latch_destroy( dict_table_t* table) @@ -406,25 +351,20 @@ dict_table_stats_latch_destroy( } } -/**********************************************************************//** -Lock the appropriate latch to protect a given table's statistics. */ -UNIV_INTERN +/** Lock the appropriate latch to protect a given table's statistics. +@param[in] table table whose stats to lock +@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */ void dict_table_stats_lock( -/*==================*/ - dict_table_t* table, /*!< in: table */ - ulint latch_mode) /*!< in: RW_S_LATCH or RW_X_LATCH */ + dict_table_t* table, + ulint latch_mode) { ut_ad(table != NULL); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); -#ifdef HAVE_ATOMIC_BUILTINS os_once::do_or_wait_for_done( &table->stats_latch_created, dict_table_stats_latch_alloc, table); -#else /* HAVE_ATOMIC_BUILTINS */ - ut_ad(table->stats_latch_created == os_once::DONE); -#endif /* HAVE_ATOMIC_BUILTINS */ if (table->stats_latch == NULL) { /* This is a dummy table object that is private in the current @@ -447,15 +387,13 @@ dict_table_stats_lock( } } -/**********************************************************************//** -Unlock the latch that has been locked by dict_table_stats_lock() */ -UNIV_INTERN +/** Unlock the latch that has been locked by dict_table_stats_lock(). +@param[in] table table whose stats to unlock +@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */ void dict_table_stats_unlock( -/*====================*/ - dict_table_t* table, /*!< in: table */ - ulint latch_mode) /*!< in: RW_S_LATCH or - RW_X_LATCH */ + dict_table_t* table, + ulint latch_mode) { ut_ad(table != NULL); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); @@ -507,12 +445,12 @@ dict_table_try_drop_aborted( ut_ad(table->id == table_id); } - if (table && table->n_ref_count == ref_count && table->drop_aborted) { + if (table && table->get_ref_count() == ref_count && table->drop_aborted) { /* Silence a debug assertion in row_merge_drop_indexes(). */ - ut_d(table->n_ref_count++); + ut_d(table->acquire()); row_merge_drop_indexes(trx, table, TRUE); - ut_d(table->n_ref_count--); - ut_ad(table->n_ref_count == ref_count); + ut_d(table->release()); + ut_ad(table->get_ref_count() == ref_count); trx_commit_for_mysql(trx); } @@ -536,7 +474,7 @@ dict_table_try_drop_aborted_and_mutex_exit( if (try_drop && table != NULL && table->drop_aborted - && table->n_ref_count == 1 + && table->get_ref_count() == 1 && dict_table_get_first_index(table)) { /* Attempt to drop the indexes whose online creation @@ -553,7 +491,6 @@ dict_table_try_drop_aborted_and_mutex_exit( /********************************************************************//** Decrements the count of open handles to a table. */ -UNIV_INTERN void dict_table_close( /*=============*/ @@ -563,22 +500,28 @@ dict_table_close( indexes after an aborted online index creation */ { - if (!dict_locked) { + if (!dict_locked && !dict_table_is_intrinsic(table)) { mutex_enter(&dict_sys->mutex); } - ut_ad(mutex_own(&dict_sys->mutex)); - ut_a(table->n_ref_count > 0); + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); + ut_a(table->get_ref_count() > 0); - --table->n_ref_count; + table->release(); + + /* Intrinsic table is not added to dictionary cache so skip other + cache specific actions. */ + if (dict_table_is_intrinsic(table)) { + return; + } /* Force persistent stats re-read upon next open of the table so that FLUSH TABLE can be used to forcibly fetch stats from disk if they have been manually modified. We reset table->stat_initialized only if table reference count is 0 because we do not want too frequent stats re-reads (e.g. in other cases than FLUSH TABLE). */ - if (strchr(table->name, '/') != NULL - && table->n_ref_count == 0 + if (strchr(table->name.m_name, '/') != NULL + && table->get_ref_count() == 0 && dict_stats_is_persistent_enabled(table)) { dict_stats_deinit(table); @@ -602,7 +545,7 @@ dict_table_close( drop_aborted = try_drop && table->drop_aborted - && table->n_ref_count == 1 + && table->get_ref_count() == 1 && dict_table_get_first_index(table); mutex_exit(&dict_sys->mutex); @@ -614,11 +557,76 @@ dict_table_close( } #endif /* !UNIV_HOTBACKUP */ +/********************************************************************//** +Closes the only open handle to a table and drops a table while assuring +that dict_sys->mutex is held the whole time. This assures that the table +is not evicted after the close when the count of open handles goes to zero. +Because dict_sys->mutex is held, we do not need to call +dict_table_prevent_eviction(). */ +void +dict_table_close_and_drop( +/*======================*/ + trx_t* trx, /*!< in: data dictionary transaction */ + dict_table_t* table) /*!< in/out: table */ +{ + ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(trx->dict_operation != TRX_DICT_OP_NONE); + ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE)); + + dict_table_close(table, TRUE, FALSE); + +#if defined UNIV_DEBUG || defined UNIV_DDL_DEBUG + /* Nobody should have initialized the stats of the newly created + table when this is called. So we know that it has not been added + for background stats gathering. */ + ut_a(!table->stat_initialized); +#endif /* UNIV_DEBUG || UNIV_DDL_DEBUG */ + + row_merge_drop_table(trx, table); +} + +/** Check if the table has a given (non_virtual) column. +@param[in] table table object +@param[in] col_name column name +@param[in] col_nr column number guessed, 0 as default +@return column number if the table has the specified column, +otherwise table->n_def */ +ulint +dict_table_has_column( + const dict_table_t* table, + const char* col_name, + ulint col_nr) +{ + ulint col_max = table->n_def; + + ut_ad(table); + ut_ad(col_name); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + + if (col_nr < col_max + && innobase_strcasecmp( + col_name, dict_table_get_col_name(table, col_nr)) == 0) { + return(col_nr); + } + + /** The order of column may changed, check it with other columns */ + for (ulint i = 0; i < col_max; i++) { + if (i != col_nr + && innobase_strcasecmp( + col_name, dict_table_get_col_name(table, i)) == 0) { + + return(i); + } + } + + return(col_max); +} + /**********************************************************************//** Returns a column's name. @return column name. NOTE: not guaranteed to stay valid if table is modified in any way (columns added, etc.). */ -UNIV_INTERN const char* dict_table_get_col_name( /*====================*/ @@ -676,49 +684,145 @@ dict_table_get_col_name_for_mysql( return(s); } +/** Returns a virtual column's name. +@param[in] table target table +@param[in] col_nr virtual column number (nth virtual column) +@return column name or NULL if column number out of range. */ +const char* +dict_table_get_v_col_name( + const dict_table_t* table, + ulint col_nr) +{ + const char* s; + + ut_ad(table); + ut_ad(col_nr < table->n_v_def); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + + if (col_nr >= table->n_v_def) { + return(NULL); + } + + s = table->v_col_names; + + if (s != NULL) { + for (ulint i = 0; i < col_nr; i++) { + s += strlen(s) + 1; + } + } + + return(s); +} + +/** Search virtual column's position in InnoDB according to its position +in original table's position +@param[in] table target table +@param[in] col_nr column number (nth column in the MySQL table) +@return virtual column's position in InnoDB, ULINT_UNDEFINED if not find */ +static +ulint +dict_table_get_v_col_pos_for_mysql( + const dict_table_t* table, + ulint col_nr) +{ + ulint i; + + ut_ad(table); + ut_ad(col_nr < static_cast(table->n_t_def)); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + + for (i = 0; i < table->n_v_def; i++) { + if (col_nr == dict_get_v_col_mysql_pos( + table->v_cols[i].m_col.ind)) { + break; + } + } + + if (i == table->n_v_def) { + return(ULINT_UNDEFINED); + } + + return(i); +} + +/** Returns a virtual column's name according to its original +MySQL table position. +@param[in] table target table +@param[in] col_nr column number (nth column in the table) +@return column name. */ +static +const char* +dict_table_get_v_col_name_mysql( + const dict_table_t* table, + ulint col_nr) +{ + ulint i = dict_table_get_v_col_pos_for_mysql(table, col_nr); + + if (i == ULINT_UNDEFINED) { + return(NULL); + } + + return(dict_table_get_v_col_name(table, i)); +} + +/** Get nth virtual column according to its original MySQL table position +@param[in] table target table +@param[in] col_nr column number in MySQL Table definition +@return dict_v_col_t ptr */ +dict_v_col_t* +dict_table_get_nth_v_col_mysql( + const dict_table_t* table, + ulint col_nr) +{ + ulint i = dict_table_get_v_col_pos_for_mysql(table, col_nr); + + if (i == ULINT_UNDEFINED) { + return(NULL); + } + + return(dict_table_get_nth_v_col(table, i)); +} + #ifndef UNIV_HOTBACKUP /** Allocate and init the autoinc latch of a given table. This function must not be called concurrently on the same table object. @param[in,out] table_void table whose autoinc latch to create */ +static void dict_table_autoinc_alloc( void* table_void) { dict_table_t* table = static_cast(table_void); - table->autoinc_mutex = new (std::nothrow) ib_mutex_t(); + table->autoinc_mutex = UT_NEW_NOKEY(ib_mutex_t()); ut_a(table->autoinc_mutex != NULL); - mutex_create(autoinc_mutex_key, - table->autoinc_mutex, SYNC_DICT_AUTOINC_MUTEX); + mutex_create(LATCH_ID_AUTOINC, table->autoinc_mutex); } /** Allocate and init the zip_pad_mutex of a given index. This function must not be called concurrently on the same index object. @param[in,out] index_void index whose zip_pad_mutex to create */ +static void dict_index_zip_pad_alloc( void* index_void) { dict_index_t* index = static_cast(index_void); - index->zip_pad.mutex = new (std::nothrow) os_fast_mutex_t; + index->zip_pad.mutex = UT_NEW_NOKEY(SysMutex()); ut_a(index->zip_pad.mutex != NULL); - os_fast_mutex_init(zip_pad_mutex_key, index->zip_pad.mutex); + mutex_create(LATCH_ID_ZIP_PAD_MUTEX, index->zip_pad.mutex); } + /********************************************************************//** Acquire the autoinc lock. */ -UNIV_INTERN void dict_table_autoinc_lock( /*====================*/ dict_table_t* table) /*!< in/out: table */ { -#ifdef HAVE_ATOMIC_BUILTINS os_once::do_or_wait_for_done( &table->autoinc_mutex_created, dict_table_autoinc_alloc, table); -#else /* HAVE_ATOMIC_BUILTINS */ - ut_ad(table->autoinc_mutex_created == os_once::DONE); -#endif /* HAVE_ATOMIC_BUILTINS */ mutex_enter(table->autoinc_mutex); } @@ -729,20 +833,16 @@ void dict_index_zip_pad_lock( dict_index_t* index) { -#ifdef HAVE_ATOMIC_BUILTINS os_once::do_or_wait_for_done( &index->zip_pad.mutex_created, dict_index_zip_pad_alloc, index); -#else /* HAVE_ATOMIC_BUILTINS */ - ut_ad(index->zip_pad.mutex_created == os_once::DONE); -#endif /* HAVE_ATOMIC_BUILTINS */ - os_fast_mutex_lock(index->zip_pad.mutex); + mutex_enter(index->zip_pad.mutex); } + /********************************************************************//** Unconditionally set the autoinc counter. */ -UNIV_INTERN void dict_table_autoinc_initialize( /*==========================*/ @@ -754,16 +854,14 @@ dict_table_autoinc_initialize( table->autoinc = value; } -/************************************************************************ -Get all the FTS indexes on a table. -@return number of FTS indexes */ -UNIV_INTERN +/** Get all the FTS indexes on a table. +@param[in] table table +@param[out] indexes all FTS indexes on this table +@return number of FTS indexes */ ulint dict_table_get_all_fts_indexes( -/*===========================*/ - dict_table_t* table, /*!< in: table */ - ib_vector_t* indexes) /*!< out: all FTS indexes on this - table */ + const dict_table_t* table, + ib_vector_t* indexes) { dict_index_t* index; @@ -783,7 +881,6 @@ dict_table_get_all_fts_indexes( /** Store autoinc value when the table is evicted. @param[in] table table evicted */ -UNIV_INTERN void dict_table_autoinc_store( const dict_table_t* table) @@ -802,7 +899,6 @@ dict_table_autoinc_store( /** Restore autoinc value when the table is loaded. @param[in] table table loaded */ -UNIV_INTERN void dict_table_autoinc_restore( dict_table_t* table) @@ -823,8 +919,7 @@ dict_table_autoinc_restore( /********************************************************************//** Reads the next autoinc value (== autoinc counter value), 0 if not yet initialized. -@return value for a new row, or 0 */ -UNIV_INTERN +@return value for a new row, or 0 */ ib_uint64_t dict_table_autoinc_read( /*====================*/ @@ -838,7 +933,6 @@ dict_table_autoinc_read( /********************************************************************//** Updates the autoinc counter if the value supplied is greater than the current value. */ -UNIV_INTERN void dict_table_autoinc_update_if_greater( /*=================================*/ @@ -856,7 +950,6 @@ dict_table_autoinc_update_if_greater( /********************************************************************//** Release the autoinc lock. */ -UNIV_INTERN void dict_table_autoinc_unlock( /*======================*/ @@ -866,35 +959,39 @@ dict_table_autoinc_unlock( } #endif /* !UNIV_HOTBACKUP */ -/********************************************************************//** -Looks for column n in an index. +/** Looks for column n in an index. +@param[in] index index +@param[in] n column number +@param[in] inc_prefix true=consider column prefixes too +@param[in] is_virtual true==virtual column +@param[out] prefix_col_pos col num if prefix @return position in internal representation of the index; ULINT_UNDEFINED if not contained */ -UNIV_INTERN ulint dict_index_get_nth_col_or_prefix_pos( -/*=================================*/ - const dict_index_t* index, /*!< in: index */ - ulint n, /*!< in: column number */ - ibool inc_prefix, /*!< in: TRUE=consider - column prefixes too */ - ulint* prefix_col_pos) /*!< out: col num if prefix */ + const dict_index_t* index, + ulint n, + bool inc_prefix, + bool is_virtual, + ulint* prefix_col_pos) { const dict_field_t* field; const dict_col_t* col; ulint pos; ulint n_fields; - ulint prefixed_pos_dummy; ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - if (!prefix_col_pos) { - prefix_col_pos = &prefixed_pos_dummy; + if (prefix_col_pos) { + *prefix_col_pos = ULINT_UNDEFINED; } - *prefix_col_pos = ULINT_UNDEFINED; - col = dict_table_get_nth_col(index->table, n); + if (is_virtual) { + col = &(dict_table_get_nth_v_col(index->table, n)->m_col); + } else { + col = dict_table_get_nth_col(index->table, n); + } if (dict_index_is_clust(index)) { @@ -907,7 +1004,9 @@ dict_index_get_nth_col_or_prefix_pos( field = dict_index_get_nth_field(index, pos); if (col == field->col) { - *prefix_col_pos = pos; + if (prefix_col_pos) { + *prefix_col_pos = pos; + } if (inc_prefix || field->prefix_len == 0) { return(pos); } @@ -918,15 +1017,16 @@ dict_index_get_nth_col_or_prefix_pos( } #ifndef UNIV_HOTBACKUP -/********************************************************************//** -Returns TRUE if the index contains a column or a prefix of that column. -@return TRUE if contains the column or its prefix */ -UNIV_INTERN +/** Returns TRUE if the index contains a column or a prefix of that column. +@param[in] index index +@param[in] n column number +@param[in] is_virtual whether it is a virtual col +@return TRUE if contains the column or its prefix */ ibool dict_index_contains_col_or_prefix( -/*==============================*/ - const dict_index_t* index, /*!< in: index */ - ulint n) /*!< in: column number */ + const dict_index_t* index, + ulint n, + bool is_virtual) { const dict_field_t* field; const dict_col_t* col; @@ -941,7 +1041,11 @@ dict_index_contains_col_or_prefix( return(TRUE); } - col = dict_table_get_nth_col(index->table, n); + if (is_virtual) { + col = &dict_table_get_nth_v_col(index->table, n)->m_col; + } else { + col = dict_table_get_nth_col(index->table, n); + } n_fields = dict_index_get_n_fields(index); @@ -964,7 +1068,6 @@ column in index2. That is, we must be able to construct the prefix in index2 from the prefix in index. @return position in internal representation of the index; ULINT_UNDEFINED if not contained */ -UNIV_INTERN ulint dict_index_get_nth_field_pos( /*=========================*/ @@ -984,9 +1087,22 @@ dict_index_get_nth_field_pos( n_fields = dict_index_get_n_fields(index); + /* Are we looking for a MBR (Minimum Bound Box) field of + a spatial index */ + bool is_mbr_fld = (n == 0 && dict_index_is_spatial(index2)); + for (pos = 0; pos < n_fields; pos++) { field = dict_index_get_nth_field(index, pos); + /* The first field of a spatial index is a transformed + MBR (Minimum Bound Box) field made out of original column, + so its field->col still points to original cluster index + col, but the actual content is different. So we cannot + consider them equal if neither of them is MBR field */ + if (pos == 0 && dict_index_is_spatial(index) && !is_mbr_fld) { + continue; + } + if (field->col == field2->col && (field->prefix_len == 0 || (field->prefix_len >= field2->prefix_len @@ -1001,8 +1117,7 @@ dict_index_get_nth_field_pos( /**********************************************************************//** Returns a table object based on table id. -@return table, NULL if does not exist */ -UNIV_INTERN +@return table, NULL if does not exist */ dict_table_t* dict_table_open_on_id( /*==================*/ @@ -1031,7 +1146,7 @@ dict_table_open_on_id( dict_move_to_mru(table); } - ++table->n_ref_count; + table->acquire(); MONITOR_INC(MONITOR_TABLE_REFERENCE); } @@ -1046,23 +1161,22 @@ dict_table_open_on_id( /********************************************************************//** Looks for column n position in the clustered index. -@return position in internal representation of the clustered index */ -UNIV_INTERN +@return position in internal representation of the clustered index */ ulint dict_table_get_nth_col_pos( /*=======================*/ const dict_table_t* table, /*!< in: table */ - ulint n) /*!< in: column number */ + ulint n, /*!< in: column number */ + ulint* prefix_col_pos) { return(dict_index_get_nth_col_pos(dict_table_get_first_index(table), - n, NULL)); + n, prefix_col_pos)); } /********************************************************************//** Checks if a column is in the ordering columns of the clustered index of a table. Column prefixes are treated like whole columns. -@return TRUE if the column, or its prefix, is in the clustered key */ -UNIV_INTERN +@return TRUE if the column, or its prefix, is in the clustered key */ ibool dict_table_col_in_clustered_key( /*============================*/ @@ -1097,38 +1211,43 @@ dict_table_col_in_clustered_key( /**********************************************************************//** Inits the data dictionary module. */ -UNIV_INTERN void dict_init(void) /*===========*/ { - dict_sys = static_cast(mem_zalloc(sizeof(*dict_sys))); + dict_operation_lock = static_cast( + ut_zalloc_nokey(sizeof(*dict_operation_lock))); + + dict_sys = static_cast(ut_zalloc_nokey(sizeof(*dict_sys))); + + UT_LIST_INIT(dict_sys->table_LRU, &dict_table_t::table_LRU); + UT_LIST_INIT(dict_sys->table_non_LRU, &dict_table_t::table_LRU); - mutex_create(dict_sys_mutex_key, &dict_sys->mutex, SYNC_DICT); + mutex_create(LATCH_ID_DICT_SYS, &dict_sys->mutex); + + dict_sys->table_hash = hash_create( + buf_pool_get_curr_size() + / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); + + dict_sys->table_id_hash = hash_create( + buf_pool_get_curr_size() + / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); - dict_sys->table_hash = hash_create(buf_pool_get_curr_size() - / (DICT_POOL_PER_TABLE_HASH - * UNIV_WORD_SIZE)); - dict_sys->table_id_hash = hash_create(buf_pool_get_curr_size() - / (DICT_POOL_PER_TABLE_HASH - * UNIV_WORD_SIZE)); rw_lock_create(dict_operation_lock_key, - &dict_operation_lock, SYNC_DICT_OPERATION); + dict_operation_lock, SYNC_DICT_OPERATION); if (!srv_read_only_mode) { - dict_foreign_err_file = os_file_create_tmpfile(NULL); + dict_foreign_err_file = os_file_create_tmpfile(); ut_a(dict_foreign_err_file); - - mutex_create(dict_foreign_err_mutex_key, - &dict_foreign_err_mutex, SYNC_NO_ORDER_CHECK); } + mutex_create(LATCH_ID_DICT_FOREIGN_ERR, &dict_foreign_err_mutex); + dict_sys->autoinc_map = new autoinc_map_t(); } /**********************************************************************//** Move to the most recently used segment of the LRU list. */ -UNIV_INTERN void dict_move_to_mru( /*=============*/ @@ -1140,9 +1259,9 @@ dict_move_to_mru( ut_a(table->can_be_evicted); - UT_LIST_REMOVE(table_LRU, dict_sys->table_LRU, table); + UT_LIST_REMOVE(dict_sys->table_LRU, table); - UT_LIST_ADD_FIRST(table_LRU, dict_sys->table_LRU, table); + UT_LIST_ADD_FIRST(dict_sys->table_LRU, table); ut_ad(dict_lru_validate()); } @@ -1152,8 +1271,7 @@ Returns a table object and increment its open handle count. NOTE! This is a high-level function to be used mainly from outside the 'dict' module. Inside this directory dict_table_get_low is usually the appropriate function. -@return table, NULL if does not exist */ -UNIV_INTERN +@return table, NULL if does not exist */ dict_table_t* dict_table_open_on_name( /*====================*/ @@ -1167,9 +1285,11 @@ dict_table_open_on_name( loading a table definition */ { dict_table_t* table; + DBUG_ENTER("dict_table_open_on_name"); + DBUG_PRINT("dict_table_open_on_name", ("table: '%s'", table_name)); if (!dict_locked) { - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); } ut_ad(table_name); @@ -1178,7 +1298,7 @@ dict_table_open_on_name( table = dict_table_check_if_in_cache_low(table_name); if (table == NULL) { - table = dict_load_table(table_name, TRUE, ignore_err); + table = dict_load_table(table_name, true, ignore_err); } ut_ad(!table || table->cached); @@ -1197,42 +1317,38 @@ dict_table_open_on_name( dict_move_to_mru(table); } - ++table->n_ref_count; + table->acquire(); if (!dict_locked) { mutex_exit(&dict_sys->mutex); } - return (table); + DBUG_RETURN(table); } /* If table is corrupted, return NULL */ else if (ignore_err == DICT_ERR_IGNORE_NONE && table->corrupted) { /* Make life easy for drop table. */ - if (table->can_be_evicted) { - dict_table_move_from_lru_to_non_lru(table); - } + dict_table_prevent_eviction(table); if (!dict_locked) { mutex_exit(&dict_sys->mutex); } - ut_print_timestamp(stderr); + ib::info() << "Table " + << table->name + << " is corrupted. Please drop the table" + " and recreate it"; - fprintf(stderr, " InnoDB: table "); - ut_print_name(stderr, NULL, TRUE, table->name); - fprintf(stderr, "is corrupted. Please drop the table " - "and recreate\n"); - - return(NULL); + DBUG_RETURN(NULL); } if (table->can_be_evicted) { dict_move_to_mru(table); } - ++table->n_ref_count; + table->acquire(); MONITOR_INC(MONITOR_TABLE_REFERENCE); } @@ -1243,13 +1359,12 @@ dict_table_open_on_name( dict_table_try_drop_aborted_and_mutex_exit(table, try_drop); } - return(table); + DBUG_RETURN(table); } #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** Adds system columns to a table object. */ -UNIV_INTERN void dict_table_add_system_columns( /*==========================*/ @@ -1257,19 +1372,27 @@ dict_table_add_system_columns( mem_heap_t* heap) /*!< in: temporary heap */ { ut_ad(table); - ut_ad(table->n_def == table->n_cols - DATA_N_SYS_COLS); + ut_ad(table->n_def == + (table->n_cols - dict_table_get_n_sys_cols(table))); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_ad(!table->cached); /* NOTE: the system columns MUST be added in the following order (so that they can be indexed by the numerical value of DATA_ROW_ID, etc.) and as the last columns of the table memory object. - The clustered index will not always physically contain all - system columns. */ + The clustered index will not always physically contain all system + columns. + Intrinsic table don't need DB_ROLL_PTR as UNDO logging is turned off + for these tables. */ dict_mem_table_add_col(table, heap, "DB_ROW_ID", DATA_SYS, DATA_ROW_ID | DATA_NOT_NULL, DATA_ROW_ID_LEN); + +#if (DATA_ITT_N_SYS_COLS != 2) +#error "DATA_ITT_N_SYS_COLS != 2" +#endif + #if DATA_ROW_ID != 0 #error "DATA_ROW_ID != 0" #endif @@ -1279,24 +1402,51 @@ dict_table_add_system_columns( #if DATA_TRX_ID != 1 #error "DATA_TRX_ID != 1" #endif - dict_mem_table_add_col(table, heap, "DB_ROLL_PTR", DATA_SYS, - DATA_ROLL_PTR | DATA_NOT_NULL, - DATA_ROLL_PTR_LEN); + + if (!dict_table_is_intrinsic(table)) { + dict_mem_table_add_col(table, heap, "DB_ROLL_PTR", DATA_SYS, + DATA_ROLL_PTR | DATA_NOT_NULL, + DATA_ROLL_PTR_LEN); #if DATA_ROLL_PTR != 2 #error "DATA_ROLL_PTR != 2" #endif - /* This check reminds that if a new system column is added to - the program, it should be dealt with here */ + /* This check reminds that if a new system column is added to + the program, it should be dealt with here */ #if DATA_N_SYS_COLS != 3 #error "DATA_N_SYS_COLS != 3" #endif + } } #ifndef UNIV_HOTBACKUP +/** Mark if table has big rows. +@param[in,out] table table handler */ +void +dict_table_set_big_rows( + dict_table_t* table) +{ + ulint row_len = 0; + for (ulint i = 0; i < table->n_def; i++) { + ulint col_len = dict_col_get_max_size( + dict_table_get_nth_col(table, i)); + + row_len += col_len; + + /* If we have a single unbounded field, or several gigantic + fields, mark the maximum row size as BIG_ROW_SIZE. */ + if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) { + row_len = BIG_ROW_SIZE; + + break; + } + } + + table->big_rows = (row_len >= BIG_ROW_SIZE) ? TRUE : FALSE; +} + /**********************************************************************//** Adds a table object to the dictionary cache. */ -UNIV_INTERN void dict_table_add_to_cache( /*====================*/ @@ -1306,47 +1456,25 @@ dict_table_add_to_cache( { ulint fold; ulint id_fold; - ulint i; - ulint row_len; ut_ad(dict_lru_validate()); - - /* The lower limit for what we consider a "big" row */ -#define BIG_ROW_SIZE 1024 - - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); dict_table_add_system_columns(table, heap); table->cached = TRUE; - fold = ut_fold_string(table->name); + fold = ut_fold_string(table->name.m_name); id_fold = ut_fold_ull(table->id); - row_len = 0; - for (i = 0; i < table->n_def; i++) { - ulint col_len = dict_col_get_max_size( - dict_table_get_nth_col(table, i)); - - row_len += col_len; - - /* If we have a single unbounded field, or several gigantic - fields, mark the maximum row size as BIG_ROW_SIZE. */ - if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) { - row_len = BIG_ROW_SIZE; - - break; - } - } - - table->big_rows = row_len >= BIG_ROW_SIZE; + dict_table_set_big_rows(table); /* Look for a table with the same name: error if such exists */ { dict_table_t* table2; HASH_SEARCH(name_hash, dict_sys->table_hash, fold, dict_table_t*, table2, ut_ad(table2->cached), - ut_strcmp(table2->name, table->name) == 0); + !strcmp(table2->name.m_name, table->name.m_name)); ut_a(table2 == NULL); #ifdef UNIV_DEBUG @@ -1386,9 +1514,9 @@ dict_table_add_to_cache( table->can_be_evicted = can_be_evicted; if (table->can_be_evicted) { - UT_LIST_ADD_FIRST(table_LRU, dict_sys->table_LRU, table); + UT_LIST_ADD_FIRST(dict_sys->table_LRU, table); } else { - UT_LIST_ADD_FIRST(table_LRU, dict_sys->table_non_LRU, table); + UT_LIST_ADD_FIRST(dict_sys->table_non_LRU, table); } dict_table_autoinc_restore(table); @@ -1396,7 +1524,7 @@ dict_table_add_to_cache( ut_ad(dict_lru_validate()); dict_sys->size += mem_heap_get_size(table->heap) - + strlen(table->name) + 1; + + strlen(table->name.m_name) + 1; } /**********************************************************************//** @@ -1409,15 +1537,13 @@ dict_table_can_be_evicted( const dict_table_t* table) /*!< in: table to test */ { ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_a(table->can_be_evicted); ut_a(table->foreign_set.empty()); ut_a(table->referenced_set.empty()); - if (table->n_ref_count == 0) { + if (table->get_ref_count() == 0) { dict_index_t* index; /* The transaction commit and rollback are called from @@ -1447,7 +1573,7 @@ dict_table_can_be_evicted( See also: dict_index_remove_from_cache_low() */ - if (btr_search_info_get_ref_count(info) > 0) { + if (btr_search_info_get_ref_count(info, index) > 0) { return(FALSE); } } @@ -1464,7 +1590,6 @@ should not be part of FK relationship and currently not used in any user transaction. There is no guarantee that it will remove a table. @return number of tables evicted. If the number of tables in the dict_LRU is less than max_tables it will not do anything. */ -UNIV_INTERN ulint dict_make_room_in_cache( /*====================*/ @@ -1480,9 +1605,7 @@ dict_make_room_in_cache( ut_a(pct_check > 0); ut_a(pct_check <= 100); ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(dict_lru_validate()); i = len = UT_LIST_GET_LEN(dict_sys->table_LRU); @@ -1524,7 +1647,6 @@ dict_make_room_in_cache( /**********************************************************************//** Move a table to the non-LRU list from the LRU list. */ -UNIV_INTERN void dict_table_move_from_lru_to_non_lru( /*================================*/ @@ -1535,42 +1657,22 @@ dict_table_move_from_lru_to_non_lru( ut_a(table->can_be_evicted); - UT_LIST_REMOVE(table_LRU, dict_sys->table_LRU, table); + UT_LIST_REMOVE(dict_sys->table_LRU, table); - UT_LIST_ADD_LAST(table_LRU, dict_sys->table_non_LRU, table); + UT_LIST_ADD_LAST(dict_sys->table_non_LRU, table); table->can_be_evicted = FALSE; } -/**********************************************************************//** -Move a table to the LRU list from the non-LRU list. */ -UNIV_INTERN -void -dict_table_move_from_non_lru_to_lru( -/*================================*/ - dict_table_t* table) /*!< in: table to move from non-LRU to LRU */ -{ - ut_ad(mutex_own(&dict_sys->mutex)); - ut_ad(dict_non_lru_find_table(table)); - - ut_a(!table->can_be_evicted); - - UT_LIST_REMOVE(table_LRU, dict_sys->table_non_LRU, table); - - UT_LIST_ADD_LAST(table_LRU, dict_sys->table_LRU, table); - - table->can_be_evicted = TRUE; -} - -/**********************************************************************//** -Looks for an index with the given id given a table instance. -@return index or NULL */ +/** Looks for an index with the given id given a table instance. +@param[in] table table instance +@param[in] id index id +@return index or NULL */ UNIV_INTERN dict_index_t* dict_table_find_index_on_id( -/*========================*/ - const dict_table_t* table, /*!< in: table instance */ - index_id_t id) /*!< in: index id */ + const dict_table_t* table, + index_id_t id) { dict_index_t* index; @@ -1592,8 +1694,7 @@ dict_table_find_index_on_id( Looks for an index with the given id. NOTE that we do not reserve the dictionary mutex: this function is for emergency purposes like printing info of a corrupt database page! -@return index or NULL if not found in cache */ -UNIV_INTERN +@return index or NULL if not found in cache */ dict_index_t* dict_index_find_on_id_low( /*======================*/ @@ -1648,8 +1749,7 @@ struct dict_foreign_remove_partial /**********************************************************************//** Renames a table object. -@return TRUE if success */ -UNIV_INTERN +@return TRUE if success */ dberr_t dict_table_rename_in_cache( /*=======================*/ @@ -1664,20 +1764,16 @@ dict_table_rename_in_cache( dict_index_t* index; ulint fold; char old_name[MAX_FULL_NAME_LEN + 1]; - os_file_type_t ftype; - ibool exists; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); /* store the old/current name to an automatic variable */ - if (strlen(table->name) + 1 <= sizeof(old_name)) { - memcpy(old_name, table->name, strlen(table->name) + 1); + if (strlen(table->name.m_name) + 1 <= sizeof(old_name)) { + strcpy(old_name, table->name.m_name); } else { - ut_print_timestamp(stderr); - fprintf(stderr, "InnoDB: too long table name: '%s', " - "max length is %d\n", table->name, - MAX_FULL_NAME_LEN); - ut_error; + ib::fatal() << "Too long table name: " + << table->name + << ", max length is " << MAX_FULL_NAME_LEN; } fold = ut_fold_string(new_name); @@ -1686,16 +1782,15 @@ dict_table_rename_in_cache( dict_table_t* table2; HASH_SEARCH(name_hash, dict_sys->table_hash, fold, dict_table_t*, table2, ut_ad(table2->cached), - (ut_strcmp(table2->name, new_name) == 0)); + (ut_strcmp(table2->name.m_name, new_name) == 0)); DBUG_EXECUTE_IF("dict_table_rename_in_cache_failure", if (table2 == NULL) { table2 = (dict_table_t*) -1; } ); if (table2) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot rename table '%s' to '%s' since the " - "dictionary cache already contains '%s'.", - old_name, new_name, new_name); + ib::error() << "Cannot rename table '" << old_name + << "' to '" << new_name << "' since the" + " dictionary cache already contains '" << new_name << "'."; return(DB_ERROR); } @@ -1703,48 +1798,50 @@ dict_table_rename_in_cache( .ibd file and rebuild the .isl file if needed. */ if (dict_table_is_discarded(table)) { + os_file_type_t type; + bool exists; char* filepath; - ut_ad(table->space != TRX_SYS_SPACE); + ut_ad(dict_table_is_file_per_table(table)); + ut_ad(!dict_table_is_temporary(table)); - if (DICT_TF_HAS_DATA_DIR(table->flags)) { + /* Make sure the data_dir_path is set. */ + dict_get_and_save_data_dir_path(table, true); - dict_get_and_save_data_dir_path(table, true); + if (DICT_TF_HAS_DATA_DIR(table->flags)) { ut_a(table->data_dir_path); - filepath = os_file_make_remote_pathname( - table->data_dir_path, table->name, "ibd"); + filepath = fil_make_filepath( + table->data_dir_path, table->name.m_name, + IBD, true); } else { - filepath = fil_make_ibd_name(table->name, false); + filepath = fil_make_filepath( + NULL, table->name.m_name, IBD, false); + } + + if (filepath == NULL) { + return(DB_OUT_OF_MEMORY); } fil_delete_tablespace(table->space, BUF_REMOVE_ALL_NO_WRITE); /* Delete any temp file hanging around. */ - if (os_file_status(filepath, &exists, &ftype) + if (os_file_status(filepath, &exists, &type) && exists - && !os_file_delete_if_exists(innodb_file_temp_key, - filepath)) { - - ib_logf(IB_LOG_LEVEL_INFO, - "Delete of %s failed.", filepath); - } - - mem_free(filepath); - - } else if (table->space != TRX_SYS_SPACE) { - if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY)) { - ut_print_timestamp(stderr); - fputs(" InnoDB: Error: trying to rename a" - " TEMPORARY TABLE ", stderr); - ut_print_name(stderr, NULL, TRUE, old_name); - if (table->dir_path_of_temp_table != NULL) { - fputs(" (", stderr); - ut_print_filename( - stderr, table->dir_path_of_temp_table); - fputs(" )\n", stderr); - } + && !os_file_delete_if_exists(innodb_temp_file_key, + filepath, NULL)) { + + ib::info() << "Delete of " << filepath << " failed."; + } + + ut_free(filepath); + } else if (dict_table_is_file_per_table(table)) { + if (table->dir_path_of_temp_table != NULL) { + ib::error() << "Trying to rename a TEMPORARY TABLE " + << old_name + << " ( " << table->dir_path_of_temp_table + << " )"; return(DB_ERROR); } @@ -1755,35 +1852,27 @@ dict_table_rename_in_cache( new_path = os_file_make_new_pathname( old_path, new_name); - err = fil_create_link_file(new_name, new_path); + err = RemoteDatafile::create_link_file( + new_name, new_path); + if (err != DB_SUCCESS) { - mem_free(new_path); - mem_free(old_path); + ut_free(new_path); + ut_free(old_path); return(DB_TABLESPACE_EXISTS); } - } else { - new_path = fil_make_ibd_name(new_name, false); } - /* New filepath must not exist. */ - err = fil_rename_tablespace_check( - table->space, old_path, new_path, false); - if (err != DB_SUCCESS) { - mem_free(old_path); - mem_free(new_path); - return(err); - } - - ibool success = fil_rename_tablespace( - old_name, table->space, new_name, new_path); + bool success = fil_rename_tablespace( + table->space, old_path, new_name, new_path); - mem_free(old_path); - mem_free(new_path); + ut_free(old_path); /* If the tablespace is remote, a new .isl file was created If success, delete the old one. If not, delete the new one. */ - if (DICT_TF_HAS_DATA_DIR(table->flags)) { - fil_delete_link_file(success ? old_name : new_name); + if (new_path) { + + ut_free(new_path); + RemoteDatafile::delete_link_file(success ? old_name : new_name); } if (!success) { @@ -1795,16 +1884,16 @@ dict_table_rename_in_cache( HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash, ut_fold_string(old_name), table); - if (strlen(new_name) > strlen(table->name)) { + if (strlen(new_name) > strlen(table->name.m_name)) { /* We allocate MAX_FULL_NAME_LEN + 1 bytes here to avoid memory fragmentation, we assume a repeated calls of ut_realloc() with the same size do not cause fragmentation */ ut_a(strlen(new_name) <= MAX_FULL_NAME_LEN); - table->name = static_cast( - ut_realloc(table->name, MAX_FULL_NAME_LEN + 1)); + table->name.m_name = static_cast( + ut_realloc(table->name.m_name, MAX_FULL_NAME_LEN + 1)); } - memcpy(table->name, new_name, strlen(new_name) + 1); + strcpy(table->name.m_name, new_name); /* Add table to hash table of tables */ HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold, @@ -1818,7 +1907,7 @@ dict_table_rename_in_cache( index != NULL; index = dict_table_get_next_index(index)) { - index->table_name = table->name; + index->table_name = table->name.m_name; } if (!rename_also_foreigns) { @@ -1876,15 +1965,16 @@ dict_table_rename_in_cache( } if (ut_strlen(foreign->foreign_table_name) - < ut_strlen(table->name)) { + < ut_strlen(table->name.m_name)) { /* Allocate a longer name buffer; TODO: store buf len to save memory */ foreign->foreign_table_name = mem_heap_strdup( - foreign->heap, table->name); + foreign->heap, table->name.m_name); dict_mem_foreign_table_name_lookup_set(foreign, TRUE); } else { - strcpy(foreign->foreign_table_name, table->name); + strcpy(foreign->foreign_table_name, + table->name.m_name); dict_mem_foreign_table_name_lookup_set(foreign, FALSE); } if (strchr(foreign->id, '/')) { @@ -1960,20 +2050,21 @@ dict_table_rename_in_cache( char table_name[MAX_TABLE_NAME_LEN] = ""; uint errors = 0; - if (strlen(table->name) > strlen(old_name)) { + if (strlen(table->name.m_name) + > strlen(old_name)) { foreign->id = static_cast( mem_heap_alloc( foreign->heap, - strlen(table->name) + strlen(table->name.m_name) + strlen(old_id) + 1)); } /* Convert the table name to UTF-8 */ - strncpy(table_name, table->name, + strncpy(table_name, table->name.m_name, MAX_TABLE_NAME_LEN); innobase_convert_to_system_charset( strchr(table_name, '/') + 1, - strchr(table->name, '/') + 1, + strchr(table->name.m_name, '/') + 1, MAX_TABLE_NAME_LEN, &errors); if (errors) { @@ -1981,7 +2072,7 @@ dict_table_rename_in_cache( from charset my_charset_filename to UTF-8. This means that the table name is already in UTF-8 (#mysql#50). */ - strncpy(table_name, table->name, + strncpy(table_name, table->name.m_name, MAX_TABLE_NAME_LEN); } @@ -2001,9 +2092,10 @@ dict_table_rename_in_cache( } else { /* This is a >= 4.0.18 format id where the user gave the id name */ - db_len = dict_get_db_name_len(table->name) + 1; + db_len = dict_get_db_name_len( + table->name.m_name) + 1; - if (dict_get_db_name_len(table->name) + if (db_len - 1 > dict_get_db_name_len(foreign->id)) { foreign->id = static_cast( @@ -2015,13 +2107,14 @@ dict_table_rename_in_cache( /* Replace the database prefix in id with the one from table->name */ - ut_memcpy(foreign->id, table->name, db_len); + ut_memcpy(foreign->id, + table->name.m_name, db_len); strcpy(foreign->id + db_len, dict_remove_db_name(old_id)); } - mem_free(old_id); + ut_free(old_id); } table->foreign_set.erase(it); @@ -2042,18 +2135,19 @@ dict_table_rename_in_cache( foreign = *it; if (ut_strlen(foreign->referenced_table_name) - < ut_strlen(table->name)) { + < ut_strlen(table->name.m_name)) { /* Allocate a longer name buffer; TODO: store buf len to save memory */ foreign->referenced_table_name = mem_heap_strdup( - foreign->heap, table->name); + foreign->heap, table->name.m_name); dict_mem_referenced_table_name_lookup_set( foreign, TRUE); } else { /* Use the same buffer */ - strcpy(foreign->referenced_table_name, table->name); + strcpy(foreign->referenced_table_name, + table->name.m_name); dict_mem_referenced_table_name_lookup_set( foreign, FALSE); @@ -2066,7 +2160,6 @@ dict_table_rename_in_cache( /**********************************************************************//** Change the id of a table object in the dictionary cache. This is used in DISCARD TABLESPACE. */ -UNIV_INTERN void dict_table_change_id_in_cache( /*==========================*/ @@ -2074,7 +2167,7 @@ dict_table_change_id_in_cache( table_id_t new_id) /*!< in: new id to set */ { ut_ad(table); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* Remove the table from the hash table of id's */ @@ -2099,13 +2192,13 @@ dict_table_remove_from_cache_low( { dict_foreign_t* foreign; dict_index_t* index; - ulint size; + lint size; ut_ad(table); ut_ad(dict_lru_validate()); - ut_a(table->n_ref_count == 0); + ut_a(table->get_ref_count() == 0); ut_a(table->n_rec_locks == 0); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* Remove the foreign constraints from the cache */ @@ -2135,7 +2228,7 @@ dict_table_remove_from_cache_low( /* Remove table from the hash tables of tables */ HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash, - ut_fold_string(table->name), table); + ut_fold_string(table->name.m_name), table); HASH_DELETE(dict_table_t, id_hash, dict_sys->table_id_hash, ut_fold_ull(table->id), table); @@ -2143,10 +2236,10 @@ dict_table_remove_from_cache_low( /* Remove table from LRU or non-LRU list. */ if (table->can_be_evicted) { ut_ad(dict_lru_find_table(table)); - UT_LIST_REMOVE(table_LRU, dict_sys->table_LRU, table); + UT_LIST_REMOVE(dict_sys->table_LRU, table); } else { ut_ad(dict_non_lru_find_table(table)); - UT_LIST_REMOVE(table_LRU, dict_sys->table_non_LRU, table); + UT_LIST_REMOVE(dict_sys->table_non_LRU, table); } ut_ad(dict_lru_validate()); @@ -2161,25 +2254,24 @@ dict_table_remove_from_cache_low( trx_t* trx = trx_allocate_for_background(); ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + /* Mimic row_mysql_lock_data_dictionary(). */ trx->dict_operation_lock_mode = RW_X_LATCH; trx_set_dict_operation(trx, TRX_DICT_OP_INDEX); /* Silence a debug assertion in row_merge_drop_indexes(). */ - ut_d(table->n_ref_count++); + ut_d(table->acquire()); row_merge_drop_indexes(trx, table, TRUE); - ut_d(table->n_ref_count--); - ut_ad(table->n_ref_count == 0); + ut_d(table->release()); + ut_ad(table->get_ref_count() == 0); trx_commit_for_mysql(trx); trx->dict_operation_lock_mode = 0; trx_free_for_background(trx); } - size = mem_heap_get_size(table->heap) + strlen(table->name) + 1; + size = mem_heap_get_size(table->heap) + strlen(table->name.m_name) + 1; ut_ad(dict_sys->size >= size); @@ -2190,7 +2282,6 @@ dict_table_remove_from_cache_low( /**********************************************************************//** Removes a table object from the dictionary cache. */ -UNIV_INTERN void dict_table_remove_from_cache( /*=========================*/ @@ -2202,8 +2293,7 @@ dict_table_remove_from_cache( /****************************************************************//** If the given column name is reserved for InnoDB system columns, return TRUE. -@return TRUE if name is reserved */ -UNIV_INTERN +@return TRUE if name is reserved */ ibool dict_col_name_is_reserved( /*======================*/ @@ -2231,159 +2321,107 @@ dict_col_name_is_reserved( return(FALSE); } -#if 1 /* This function is not very accurate at determining - whether an UNDO record will be too big. See innodb_4k.test, - Bug 13336585, for a testcase that shows an index that can - be created but cannot be updated. */ - /****************************************************************//** -If an undo log record for this table might not fit on a single page, -return TRUE. -@return TRUE if the undo log record could become too big */ -static -ibool -dict_index_too_big_for_undo( -/*========================*/ - const dict_table_t* table, /*!< in: table */ - const dict_index_t* new_index) /*!< in: index */ +Return maximum size of the node pointer record. +@return maximum size of the record in bytes */ +ulint +dict_index_node_ptr_max_size( +/*=========================*/ + const dict_index_t* index) /*!< in: index */ { - /* Make sure that all column prefixes will fit in the undo log record - in trx_undo_page_report_modify() right after trx_undo_page_init(). */ - - ulint i; - const dict_index_t* clust_index - = dict_table_get_first_index(table); - ulint undo_page_len - = TRX_UNDO_PAGE_HDR - TRX_UNDO_PAGE_HDR_SIZE - + 2 /* next record pointer */ - + 1 /* type_cmpl */ - + 11 /* trx->undo_no */ + 11 /* table->id */ - + 1 /* rec_get_info_bits() */ - + 11 /* DB_TRX_ID */ - + 11 /* DB_ROLL_PTR */ - + 10 + FIL_PAGE_DATA_END /* trx_undo_left() */ - + 2/* pointer to previous undo log record */; + ulint comp; + ulint i; + /* maximum possible storage size of a record */ + ulint rec_max_size; - /* FTS index consists of auxiliary tables, they shall be excluded from - index row size check */ - if (new_index->type & DICT_FTS) { - return(false); + if (dict_index_is_ibuf(index)) { + /* cannot estimate accurately */ + /* This is universal index for change buffer. + The max size of the entry is about max key length * 2. + (index key + primary key to be inserted to the index) + (The max key length is UNIV_PAGE_SIZE / 16 * 3 at + ha_innobase::max_supported_key_length(), + considering MAX_KEY_LENGTH = 3072 at MySQL imposes + the 3500 historical InnoDB value for 16K page size case.) + For the universal index, node_ptr contains most of the entry. + And 512 is enough to contain ibuf columns and meta-data */ + return(UNIV_PAGE_SIZE / 8 * 3 + 512); } - if (!clust_index) { - ut_a(dict_index_is_clust(new_index)); - clust_index = new_index; - } + comp = dict_table_is_comp(index->table); - /* Add the size of the ordering columns in the - clustered index. */ - for (i = 0; i < clust_index->n_uniq; i++) { - const dict_col_t* col - = dict_index_get_nth_col(clust_index, i); + /* Each record has page_no, length of page_no and header. */ + rec_max_size = comp + ? REC_NODE_PTR_SIZE + 1 + REC_N_NEW_EXTRA_BYTES + : REC_NODE_PTR_SIZE + 2 + REC_N_OLD_EXTRA_BYTES; - /* Use the maximum output size of - mach_write_compressed(), although the encoded - length should always fit in 2 bytes. */ - undo_page_len += 5 + dict_col_get_max_size(col); + if (comp) { + /* Include the "null" flags in the + maximum possible record size. */ + rec_max_size += UT_BITS_IN_BYTES(index->n_nullable); + } else { + /* For each column, include a 2-byte offset and a + "null" flag. */ + rec_max_size += 2 * index->n_fields; } - /* Add the old values of the columns to be updated. - First, the amount and the numbers of the columns. - These are written by mach_write_compressed() whose - maximum output length is 5 bytes. However, given that - the quantities are below REC_MAX_N_FIELDS (10 bits), - the maximum length is 2 bytes per item. */ - undo_page_len += 2 * (dict_table_get_n_cols(table) + 1); - - for (i = 0; i < clust_index->n_def; i++) { + /* Compute the maximum possible record size. */ + for (i = 0; i < dict_index_get_n_unique_in_tree(index); i++) { + const dict_field_t* field + = dict_index_get_nth_field(index, i); const dict_col_t* col - = dict_index_get_nth_col(clust_index, i); - ulint max_size - = dict_col_get_max_size(col); - ulint fixed_size - = dict_col_get_fixed_size(col, - dict_table_is_comp(table)); - ulint max_prefix - = col->max_prefix; - - if (fixed_size) { - /* Fixed-size columns are stored locally. */ - max_size = fixed_size; - } else if (max_size <= BTR_EXTERN_FIELD_REF_SIZE * 2) { - /* Short columns are stored locally. */ - } else if (!col->ord_part - || (col->max_prefix - < (ulint) DICT_MAX_FIELD_LEN_BY_FORMAT(table))) { - /* See if col->ord_part would be set - because of new_index. Also check if the new - index could have longer prefix on columns - that already had ord_part set */ - ulint j; - - for (j = 0; j < new_index->n_uniq; j++) { - if (dict_index_get_nth_col( - new_index, j) == col) { - const dict_field_t* field - = dict_index_get_nth_field( - new_index, j); - - if (field->prefix_len - > col->max_prefix) { - max_prefix = - field->prefix_len; - } - - goto is_ord_part; - } - } - - if (col->ord_part) { - goto is_ord_part; - } + = dict_field_get_col(field); + ulint field_max_size; + ulint field_ext_max_size; - /* This is not an ordering column in any index. - Thus, it can be stored completely externally. */ - max_size = BTR_EXTERN_FIELD_REF_SIZE; - } else { - ulint max_field_len; -is_ord_part: - max_field_len = DICT_MAX_FIELD_LEN_BY_FORMAT(table); + /* Determine the maximum length of the index field. */ - /* This is an ordering column in some index. - A long enough prefix must be written to the - undo log. See trx_undo_page_fetch_ext(). */ - max_size = ut_min(max_size, max_field_len); + field_max_size = dict_col_get_fixed_size(col, comp); + if (field_max_size) { + /* dict_index_add_col() should guarantee this */ + ut_ad(!field->prefix_len + || field->fixed_len == field->prefix_len); + /* Fixed lengths are not encoded + in ROW_FORMAT=COMPACT. */ + rec_max_size += field_max_size; + continue; + } - /* We only store the needed prefix length in undo log */ - if (max_prefix) { - ut_ad(dict_table_get_format(table) - >= UNIV_FORMAT_B); + field_max_size = dict_col_get_max_size(col); + field_ext_max_size = field_max_size < 256 ? 1 : 2; - max_size = ut_min(max_prefix, max_size); - } + if (field->prefix_len + && field->prefix_len < field_max_size) { + field_max_size = field->prefix_len; + } - max_size += BTR_EXTERN_FIELD_REF_SIZE; + if (comp) { + /* Add the extra size for ROW_FORMAT=COMPACT. + For ROW_FORMAT=REDUNDANT, these bytes were + added to rec_max_size before this loop. */ + rec_max_size += field_ext_max_size; } - undo_page_len += 5 + max_size; + rec_max_size += field_max_size; } - return(undo_page_len >= UNIV_PAGE_SIZE); + return(rec_max_size); } -#endif /****************************************************************//** If a record of this index might not fit on a single B-tree page, return TRUE. -@return TRUE if the index record could become too big */ +@return TRUE if the index record could become too big */ static ibool dict_index_too_big_for_tree( /*========================*/ const dict_table_t* table, /*!< in: table */ - const dict_index_t* new_index) /*!< in: index */ + const dict_index_t* new_index, /*!< in: index */ + bool strict) /*!< in: TRUE=report error if + records could be too big to + fit in an B-tree page */ { - ulint zip_size; ulint comp; ulint i; /* maximum possible storage size of a record */ @@ -2404,20 +2442,22 @@ dict_index_too_big_for_tree( return(FALSE);); comp = dict_table_is_comp(table); - zip_size = dict_table_zip_size(table); - if (zip_size && zip_size < UNIV_PAGE_SIZE) { + const page_size_t page_size(dict_table_page_size(table)); + + if (page_size.is_compressed() + && page_size.physical() < univ_page_size.physical()) { /* On a compressed page, two records must fit in the - uncompressed page modification log. On compressed - pages with zip_size == UNIV_PAGE_SIZE, this limit will - never be reached. */ + uncompressed page modification log. On compressed pages + with size.physical() == univ_page_size.physical(), + this limit will never be reached. */ ut_ad(comp); /* The maximum allowed record size is the size of an empty page, minus a byte for recoding the heap number in the page modification log. The maximum allowed node pointer size is half that. */ page_rec_max = page_zip_empty_size(new_index->n_fields, - zip_size); + page_size.physical()); if (page_rec_max) { page_rec_max--; } @@ -2428,9 +2468,12 @@ dict_index_too_big_for_tree( rec_max_size = 2; } else { /* The maximum allowed record size is half a B-tree - page. No additional sparse page directory entry will - be generated for the first few user records. */ - page_rec_max = page_get_free_space_of_empty(comp) / 2; + page(16k for 64k page size). No additional sparse + page directory entry will be generated for the first + few user records. */ + page_rec_max = srv_page_size == UNIV_PAGE_SIZE_MAX + ? REC_MAX_DATA_SIZE - 1 + : page_get_free_space_of_empty(comp) / 2; page_ptr_max = page_rec_max; /* Each record has a header. */ rec_max_size = comp @@ -2461,7 +2504,7 @@ dict_index_too_big_for_tree( ulint field_ext_max_size; /* In dtuple_convert_big_rec(), variable-length columns - that are longer than BTR_EXTERN_FIELD_REF_SIZE * 2 + that are longer than BTR_EXTERN_LOCAL_STORED_MAX_SIZE may be chosen for external storage. Fixed-length columns, and all columns of secondary @@ -2490,16 +2533,16 @@ dict_index_too_big_for_tree( if (field->prefix_len < field_max_size) { field_max_size = field->prefix_len; } - } else if (field_max_size > BTR_EXTERN_FIELD_REF_SIZE * 2 + } else if (field_max_size > BTR_EXTERN_LOCAL_STORED_MAX_SIZE && dict_index_is_clust(new_index)) { /* In the worst case, we have a locally stored - column of BTR_EXTERN_FIELD_REF_SIZE * 2 bytes. + column of BTR_EXTERN_LOCAL_STORED_MAX_SIZE bytes. The length can be stored in one byte. If the column were stored externally, the lengths in the clustered index page would be BTR_EXTERN_FIELD_REF_SIZE and 2. */ - field_max_size = BTR_EXTERN_FIELD_REF_SIZE * 2; + field_max_size = BTR_EXTERN_LOCAL_STORED_MAX_SIZE; field_ext_max_size = 1; } @@ -2513,7 +2556,15 @@ add_field_size: rec_max_size += field_max_size; /* Check the size limit on leaf pages. */ - if (UNIV_UNLIKELY(rec_max_size >= page_rec_max)) { + if (rec_max_size >= page_rec_max) { + ib::error_or_warn(strict) + << "Cannot add field " << field->name + << " in table " << table->name + << " because after adding it, the row size is " + << rec_max_size + << " which is greater than maximum allowed" + " size (" << page_rec_max + << ") for a record on index leaf page."; return(TRUE); } @@ -2534,36 +2585,62 @@ add_field_size: return(FALSE); } -/**********************************************************************//** -Adds an index to the dictionary cache. -@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */ -UNIV_INTERN +/** Adds an index to the dictionary cache. +@param[in,out] table table on which the index is +@param[in,out] index index; NOTE! The index memory + object is freed in this function! +@param[in] page_no root page number of the index +@param[in] strict TRUE=refuse to create the index + if records could be too big to fit in + an B-tree page +@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */ dberr_t dict_index_add_to_cache( -/*====================*/ - dict_table_t* table, /*!< in: table on which the index is */ - dict_index_t* index, /*!< in, own: index; NOTE! The index memory - object is freed in this function! */ - ulint page_no,/*!< in: root page number of the index */ - ibool strict) /*!< in: TRUE=refuse to create the index - if records could be too big to fit in - an B-tree page */ + dict_table_t* table, + dict_index_t* index, + ulint page_no, + ibool strict) +{ + return(dict_index_add_to_cache_w_vcol( + table, index, NULL, page_no, strict)); +} + +/** Adds an index to the dictionary cache, with possible indexing newly +added column. +@param[in,out] table table on which the index is +@param[in,out] index index; NOTE! The index memory + object is freed in this function! +@param[in] add_v new virtual column that being added along with + an add index call +@param[in] page_no root page number of the index +@param[in] strict TRUE=refuse to create the index + if records could be too big to fit in + an B-tree page +@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */ +dberr_t +dict_index_add_to_cache_w_vcol( + dict_table_t* table, + dict_index_t* index, + const dict_add_v_col_t* add_v, + ulint page_no, + ibool strict) { dict_index_t* new_index; ulint n_ord; ulint i; ut_ad(index); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); ut_ad(index->n_def == index->n_fields); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); ut_ad(!dict_index_is_online_ddl(index)); + ut_ad(!dict_index_is_ibuf(index)); - ut_ad(mem_heap_validate(index->heap)); + ut_d(mem_heap_validate(index->heap)); ut_a(!dict_index_is_clust(index) || UT_LIST_GET_LEN(table->indexes) == 0); - if (!dict_index_find_cols(table, index)) { + if (!dict_index_find_cols(table, index, add_v)) { dict_mem_index_free(index); return(DB_CORRUPTION); @@ -2585,92 +2662,26 @@ dict_index_add_to_cache( new_index->n_fields = new_index->n_def; new_index->trx_id = index->trx_id; + new_index->set_committed(index->is_committed()); + new_index->allow_duplicates = index->allow_duplicates; + new_index->nulls_equal = index->nulls_equal; + new_index->disable_ahi = index->disable_ahi; - if (dict_index_too_big_for_tree(table, new_index)) { + if (dict_index_too_big_for_tree(table, new_index, strict)) { if (strict) { -too_big: dict_mem_index_free(new_index); dict_mem_index_free(index); return(DB_TOO_BIG_RECORD); } else if (current_thd != NULL) { /* Avoid the warning to be printed during recovery. */ - ib_warn_row_too_big(table); + ib_warn_row_too_big((const dict_table_t*)table); } } - if (dict_index_is_univ(index)) { - n_ord = new_index->n_fields; - } else { - n_ord = new_index->n_uniq; - } - -#if 1 /* The following code predetermines whether to call - dict_index_too_big_for_undo(). This function is not - accurate. See innodb_4k.test, Bug 13336585, for a - testcase that shows an index that can be created but - cannot be updated. */ - - switch (dict_table_get_format(table)) { - case UNIV_FORMAT_A: - /* ROW_FORMAT=REDUNDANT and ROW_FORMAT=COMPACT store - prefixes of externally stored columns locally within - the record. There are no special considerations for - the undo log record size. */ - goto undo_size_ok; - - case UNIV_FORMAT_B: - /* In ROW_FORMAT=DYNAMIC and ROW_FORMAT=COMPRESSED, - column prefix indexes require that prefixes of - externally stored columns are written to the undo log. - This may make the undo log record bigger than the - record on the B-tree page. The maximum size of an - undo log record is the page size. That must be - checked for below. */ - break; - -#if UNIV_FORMAT_B != UNIV_FORMAT_MAX -# error "UNIV_FORMAT_B != UNIV_FORMAT_MAX" -#endif - } - - for (i = 0; i < n_ord; i++) { - const dict_field_t* field - = dict_index_get_nth_field(new_index, i); - const dict_col_t* col - = dict_field_get_col(field); - - /* In dtuple_convert_big_rec(), variable-length columns - that are longer than BTR_EXTERN_FIELD_REF_SIZE * 2 - may be chosen for external storage. If the column appears - in an ordering column of an index, a longer prefix determined - by dict_max_field_len_store_undo() will be copied to the undo - log by trx_undo_page_report_modify() and - trx_undo_page_fetch_ext(). It suffices to check the - capacity of the undo log whenever new_index includes - a column prefix on a column that may be stored externally. */ - - if (field->prefix_len /* prefix index */ - && (!col->ord_part /* not yet ordering column */ - || field->prefix_len > col->max_prefix) - && !dict_col_get_fixed_size(col, TRUE) /* variable-length */ - && dict_col_get_max_size(col) - > BTR_EXTERN_FIELD_REF_SIZE * 2 /* long enough */) { - - if (dict_index_too_big_for_undo(table, new_index)) { - /* An undo log record might not fit in - a single page. Refuse to create this index. */ - - goto too_big; - } + n_ord = new_index->n_uniq; - break; - } - } - -undo_size_ok: -#endif /* Flag the ordering columns and also set column max_prefix */ for (i = 0; i < n_ord; i++) { @@ -2684,26 +2695,23 @@ undo_size_ok: } } - if (!dict_index_is_univ(new_index)) { - - new_index->stat_n_diff_key_vals = - static_cast(mem_heap_zalloc( + new_index->stat_n_diff_key_vals = + static_cast(mem_heap_zalloc( new_index->heap, dict_index_get_n_unique(new_index) * sizeof(*new_index->stat_n_diff_key_vals))); - new_index->stat_n_sample_sizes = - static_cast(mem_heap_zalloc( + new_index->stat_n_sample_sizes = + static_cast(mem_heap_zalloc( new_index->heap, dict_index_get_n_unique(new_index) * sizeof(*new_index->stat_n_sample_sizes))); - new_index->stat_n_non_null_key_vals = - static_cast(mem_heap_zalloc( + new_index->stat_n_non_null_key_vals = + static_cast(mem_heap_zalloc( new_index->heap, dict_index_get_n_unique(new_index) * sizeof(*new_index->stat_n_non_null_key_vals))); - } new_index->stat_index_size = 1; new_index->stat_n_leaf_pages = 1; @@ -2717,17 +2725,48 @@ undo_size_ok: /* Add the new index as the last index for the table */ - UT_LIST_ADD_LAST(indexes, table->indexes, new_index); + UT_LIST_ADD_LAST(table->indexes, new_index); new_index->table = table; - new_index->table_name = table->name; + new_index->table_name = table->name.m_name; new_index->search_info = btr_search_info_create(new_index->heap); new_index->page = page_no; rw_lock_create(index_tree_rw_lock_key, &new_index->lock, - dict_index_is_ibuf(index) - ? SYNC_IBUF_INDEX_TREE : SYNC_INDEX_TREE); + SYNC_INDEX_TREE); - dict_sys->size += mem_heap_get_size(new_index->heap); + /* Intrinsic table are not added to dictionary cache instead are + cached to session specific thread cache. */ + if (!dict_table_is_intrinsic(table)) { + dict_sys->size += mem_heap_get_size(new_index->heap); + } + + /* Check if key part of the index is unique. */ + if (dict_table_is_intrinsic(table)) { + + new_index->rec_cache.fixed_len_key = true; + for (i = 0; i < new_index->n_uniq; i++) { + + const dict_field_t* field; + field = dict_index_get_nth_field(new_index, i); + + if (!field->fixed_len) { + new_index->rec_cache.fixed_len_key = false; + break; + } + } + + new_index->rec_cache.key_has_null_cols = false; + for (i = 0; i < new_index->n_uniq; i++) { + + const dict_field_t* field; + field = dict_index_get_nth_field(new_index, i); + + if (!(field->col->prtype & DATA_NOT_NULL)) { + new_index->rec_cache.key_has_null_cols = true; + break; + } + } + } dict_mem_index_free(index); @@ -2745,14 +2784,14 @@ dict_index_remove_from_cache_low( ibool lru_evict) /*!< in: TRUE if index being evicted to make room in the table LRU list */ { - ulint size; + lint size; ulint retries = 0; btr_search_t* info; ut_ad(table && index); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); /* No need to acquire the dict_index_t::lock here because there can't be any active operations on this index (or table). */ @@ -2777,7 +2816,7 @@ dict_index_remove_from_cache_low( zero. See also: dict_table_can_be_evicted() */ do { - ulint ref_count = btr_search_info_get_ref_count(info); + ulint ref_count = btr_search_info_get_ref_count(info, index); if (ref_count == 0) { break; @@ -2789,16 +2828,11 @@ dict_index_remove_from_cache_low( if (retries % 500 == 0) { /* No luck after 5 seconds of wait. */ - fprintf(stderr, "InnoDB: Error: Waited for" - " %lu secs for hash index" - " ref_count (%lu) to drop" - " to 0.\n" - "index: \"%s\"" - " table: \"%s\"\n", - retries/100, - ref_count, - index->name, - table->name); + ib::error() << "Waited for " << retries / 100 + << " secs for hash index" + " ref_count (" << ref_count << ") to drop to 0." + " index: " << index->name + << " table: " << table->name; } /* To avoid a hang here we commit suicide if the @@ -2810,11 +2844,52 @@ dict_index_remove_from_cache_low( rw_lock_free(&index->lock); + /* The index is being dropped, remove any compression stats for it. */ + if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) { + mutex_enter(&page_zip_stat_per_index_mutex); + page_zip_stat_per_index.erase(index->id); + mutex_exit(&page_zip_stat_per_index_mutex); + } + /* Remove the index from the list of indexes of the table */ - UT_LIST_REMOVE(indexes, table->indexes, index); + UT_LIST_REMOVE(table->indexes, index); + + /* Remove the index from affected virtual column index list */ + if (dict_index_has_virtual(index)) { + const dict_col_t* col; + const dict_v_col_t* vcol; + + for (ulint i = 0; i < dict_index_get_n_fields(index); i++) { + col = dict_index_get_nth_col(index, i); + if (dict_col_is_virtual(col)) { + vcol = reinterpret_cast( + col); + + /* This could be NULL, when we do add virtual + column, add index together. We do not need to + track this virtual column's index */ + if (vcol->v_indexes == NULL) { + continue; + } + + dict_v_idx_list::iterator it; + + for (it = vcol->v_indexes->begin(); + it != vcol->v_indexes->end(); ++it) { + dict_v_idx_t v_index = *it; + if (v_index.index == index) { + vcol->v_indexes->erase(it); + break; + } + } + } + + } + } size = mem_heap_get_size(index->heap); + ut_ad(!dict_table_is_intrinsic(table)); ut_ad(dict_sys->size >= size); dict_sys->size -= size; @@ -2824,7 +2899,6 @@ dict_index_remove_from_cache_low( /**********************************************************************//** Removes an index from the dictionary cache. */ -UNIV_INTERN void dict_index_remove_from_cache( /*=========================*/ @@ -2834,43 +2908,97 @@ dict_index_remove_from_cache( dict_index_remove_from_cache_low(table, index, FALSE); } -/*******************************************************************//** -Tries to find column names for the index and sets the col field of the +/** Tries to find column names for the index and sets the col field of the index. +@param[in] table table +@param[in,out] index index +@param[in] add_v new virtual columns added along with an add index call @return TRUE if the column names were found */ static ibool dict_index_find_cols( -/*=================*/ - dict_table_t* table, /*!< in: table */ - dict_index_t* index) /*!< in: index */ + const dict_table_t* table, + dict_index_t* index, + const dict_add_v_col_t* add_v) { - ulint i; + std::vector > col_added; + std::vector > v_col_added; - ut_ad(table && index); + ut_ad(table != NULL && index != NULL); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); - for (i = 0; i < index->n_fields; i++) { + for (ulint i = 0; i < index->n_fields; i++) { ulint j; dict_field_t* field = dict_index_get_nth_field(index, i); for (j = 0; j < table->n_cols; j++) { if (!innobase_strcasecmp(dict_table_get_col_name(table, j), field->name)) { + + /* Check if same column is being assigned again + which suggest that column has duplicate name. */ + bool exists = + std::find(col_added.begin(), + col_added.end(), j) + != col_added.end(); + + if (exists) { + /* Duplicate column found. */ + goto dup_err; + } + field->col = dict_table_get_nth_col(table, j); + col_added.push_back(j); + + goto found; + } + } + + /* Let's check if it is a virtual column */ + for (j = 0; j < table->n_v_cols; j++) { + if (!strcmp(dict_table_get_v_col_name(table, j), + field->name)) { + + /* Check if same column is being assigned again + which suggest that column has duplicate name. */ + bool exists = + std::find(v_col_added.begin(), + v_col_added.end(), j) + != v_col_added.end(); + + if (exists) { + /* Duplicate column found. */ + break; + } + + field->col = reinterpret_cast( + dict_table_get_nth_v_col(table, j)); + + v_col_added.push_back(j); + goto found; } } + if (add_v) { + for (j = 0; j < add_v->n_v_col; j++) { + if (!strcmp(add_v->v_col_name[j], + field->name)) { + field->col = const_cast( + &add_v->v_col[j].m_col); + goto found; + } + } + } + +dup_err: #ifdef UNIV_DEBUG /* It is an error not to find a matching column. */ - fputs("InnoDB: Error: no matching column for ", stderr); - ut_print_name(stderr, NULL, FALSE, field->name); - fputs(" in ", stderr); - dict_index_name_print(stderr, NULL, index); - fputs("!\n", stderr); + ib::error() << "No matching column for " << field->name + << " in index " << index->name + << " of table " << table->name; #endif /* UNIV_DEBUG */ return(FALSE); @@ -2884,7 +3012,6 @@ found: /*******************************************************************//** Adds a column to index. */ -UNIV_INTERN void dict_index_add_col( /*===============*/ @@ -2896,15 +3023,51 @@ dict_index_add_col( dict_field_t* field; const char* col_name; - col_name = dict_table_get_col_name(table, dict_col_get_no(col)); + if (dict_col_is_virtual(col)) { + dict_v_col_t* v_col = reinterpret_cast(col); + + /* When v_col->v_indexes==NULL, + ha_innobase::commit_inplace_alter_table(commit=true) + will evict and reload the table definition, and + v_col->v_indexes will not be NULL for the new table. */ + if (v_col->v_indexes != NULL) { + /* Register the index with the virtual column index + list */ + struct dict_v_idx_t new_idx + = {index, index->n_def}; + + v_col->v_indexes->push_back(new_idx); + + } + + col_name = dict_table_get_v_col_name_mysql( + table, dict_col_get_no(col)); + } else { + col_name = dict_table_get_col_name(table, dict_col_get_no(col)); + } dict_mem_index_add_field(index, col_name, prefix_len); field = dict_index_get_nth_field(index, index->n_def - 1); field->col = col; - field->fixed_len = (unsigned int) dict_col_get_fixed_size( - col, dict_table_is_comp(table)); + + /* DATA_POINT is a special type, whose fixed_len should be: + 1) DATA_MBR_LEN, when it's indexed in R-TREE. In this case, + it must be the first col to be added. + 2) DATA_POINT_LEN(be equal to fixed size of column), when it's + indexed in B-TREE, + 3) DATA_POINT_LEN, if a POINT col is the PRIMARY KEY, and we are + adding the PK col to other B-TREE/R-TREE. */ + /* TODO: We suppose the dimension is 2 now. */ + if (dict_index_is_spatial(index) && DATA_POINT_MTYPE(col->mtype) + && index->n_def == 1) { + field->fixed_len = DATA_MBR_LEN; + } else { + field->fixed_len = static_cast( + dict_col_get_fixed_size( + col, dict_table_is_comp(table))); + } if (prefix_len && field->fixed_len > prefix_len) { field->fixed_len = (unsigned int) prefix_len; @@ -2950,6 +3113,7 @@ dict_index_copy( for (i = start; i < end; i++) { field = dict_index_get_nth_field(index2, i); + dict_index_add_col(index1, table, field->col, field->prefix_len); } @@ -2957,7 +3121,6 @@ dict_index_copy( /*******************************************************************//** Copies types of fields contained in index to tuple. */ -UNIV_INTERN void dict_index_copy_types( /*==================*/ @@ -2968,7 +3131,7 @@ dict_index_copy_types( { ulint i; - if (dict_index_is_univ(index)) { + if (dict_index_is_ibuf(index)) { dtuple_set_types_binary(tuple, n_fields); return; @@ -2981,14 +3144,45 @@ dict_index_copy_types( ifield = dict_index_get_nth_field(index, i); dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i)); dict_col_copy_type(dict_field_get_col(ifield), dfield_type); + if (dict_index_is_spatial(index) + && DATA_GEOMETRY_MTYPE(dfield_type->mtype)) { + dfield_type->prtype |= DATA_GIS_MBR; + } } } +/** Copies types of virtual columns contained in table to tuple and sets all +fields of the tuple to the SQL NULL value. This function should +be called right after dtuple_create(). +@param[in,out] tuple data tuple +@param[in] table table +*/ +void +dict_table_copy_v_types( + dtuple_t* tuple, + const dict_table_t* table) +{ + /* tuple could have more virtual columns than existing table, + if we are calling this for creating index along with adding + virtual columns */ + ulint n_fields = ut_min(dtuple_get_n_v_fields(tuple), + static_cast(table->n_v_def)); + + for (ulint i = 0; i < n_fields; i++) { + + dfield_t* dfield = dtuple_get_nth_v_field(tuple, i); + dtype_t* dtype = dfield_get_type(dfield); + + dfield_set_null(dfield); + dict_col_copy_type( + &(dict_table_get_nth_v_col(table, i)->m_col), + dtype); + } +} /*******************************************************************//** Copies types of columns contained in table to tuple and sets all fields of the tuple to the SQL NULL value. This function should be called right after dtuple_create(). */ -UNIV_INTERN void dict_table_copy_types( /*==================*/ @@ -3005,13 +3199,14 @@ dict_table_copy_types( dfield_set_null(dfield); dict_col_copy_type(dict_table_get_nth_col(table, i), dtype); } + + dict_table_copy_v_types(tuple, table); } /******************************************************************** Wait until all the background threads of the given table have exited, i.e., bg_threads == 0. Note: bg_threads_mutex must be reserved when calling this. */ -UNIV_INTERN void dict_table_wait_for_bg_threads_to_exit( /*===================================*/ @@ -3021,9 +3216,7 @@ dict_table_wait_for_bg_threads_to_exit( { fts_t* fts = table->fts; -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&fts->bg_threads_mutex)); -#endif /* UNIV_SYNC_DEBUG */ while (fts->bg_threads > 0) { mutex_exit(&fts->bg_threads_mutex); @@ -3037,7 +3230,7 @@ dict_table_wait_for_bg_threads_to_exit( /*******************************************************************//** Builds the internal dictionary cache representation for a clustered index, containing also system fields not defined by the user. -@return own: the internal representation of the clustered index */ +@return own: the internal representation of the clustered index */ static dict_index_t* dict_index_build_internal_clust( @@ -3054,11 +3247,13 @@ dict_index_build_internal_clust( ut_ad(table && index); ut_ad(dict_index_is_clust(index)); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(!dict_index_is_ibuf(index)); + + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* Create a new index object with certainly enough fields */ - new_index = dict_mem_index_create(table->name, + new_index = dict_mem_index_create(table->name.m_name, index->name, table->space, index->type, index->n_fields + table->n_cols); @@ -3073,12 +3268,7 @@ dict_index_build_internal_clust( /* Copy the fields of index */ dict_index_copy(new_index, index, table, 0, index->n_fields); - if (dict_index_is_univ(index)) { - /* No fixed number of fields determines an entry uniquely */ - - new_index->n_uniq = REC_MAX_N_FIELDS; - - } else if (dict_index_is_unique(index)) { + if (dict_index_is_unique(index)) { /* Only the fields defined so far are needed to identify the index entry uniquely */ @@ -3090,10 +3280,9 @@ dict_index_build_internal_clust( new_index->trx_id_offset = 0; - if (!dict_index_is_ibuf(index)) { - /* Add system columns, trx id first */ + /* Add system columns, trx id first */ - trx_id_pos = new_index->n_def; + trx_id_pos = new_index->n_def; #if DATA_ROW_ID != 0 # error "DATA_ROW_ID != 0" @@ -3105,63 +3294,69 @@ dict_index_build_internal_clust( # error "DATA_ROLL_PTR != 2" #endif - if (!dict_index_is_unique(index)) { - dict_index_add_col(new_index, table, - dict_table_get_sys_col( - table, DATA_ROW_ID), - 0); - trx_id_pos++; - } - + if (!dict_index_is_unique(index)) { dict_index_add_col(new_index, table, - dict_table_get_sys_col(table, DATA_TRX_ID), + dict_table_get_sys_col( + table, DATA_ROW_ID), 0); + trx_id_pos++; + } - dict_index_add_col(new_index, table, - dict_table_get_sys_col(table, - DATA_ROLL_PTR), - 0); + dict_index_add_col( + new_index, table, + dict_table_get_sys_col(table, DATA_TRX_ID), 0); - for (i = 0; i < trx_id_pos; i++) { - ulint fixed_size = dict_col_get_fixed_size( - dict_index_get_nth_col(new_index, i), - dict_table_is_comp(table)); + for (i = 0; i < trx_id_pos; i++) { - if (fixed_size == 0) { - new_index->trx_id_offset = 0; + ulint fixed_size = dict_col_get_fixed_size( + dict_index_get_nth_col(new_index, i), + dict_table_is_comp(table)); - break; - } + if (fixed_size == 0) { + new_index->trx_id_offset = 0; + + break; + } - if (dict_index_get_nth_field(new_index, i)->prefix_len - > 0) { - new_index->trx_id_offset = 0; + dict_field_t* field = dict_index_get_nth_field( + new_index, i); + if (field->prefix_len > 0) { + new_index->trx_id_offset = 0; - break; - } + break; + } - /* Add fixed_size to new_index->trx_id_offset. - Because the latter is a bit-field, an overflow - can theoretically occur. Check for it. */ - fixed_size += new_index->trx_id_offset; + /* Add fixed_size to new_index->trx_id_offset. + Because the latter is a bit-field, an overflow + can theoretically occur. Check for it. */ + fixed_size += new_index->trx_id_offset; - new_index->trx_id_offset = fixed_size; + new_index->trx_id_offset = fixed_size; - if (new_index->trx_id_offset != fixed_size) { - /* Overflow. Pretend that this is a - variable-length PRIMARY KEY. */ - ut_ad(0); - new_index->trx_id_offset = 0; - break; - } + if (new_index->trx_id_offset != fixed_size) { + /* Overflow. Pretend that this is a + variable-length PRIMARY KEY. */ + ut_ad(0); + new_index->trx_id_offset = 0; + break; } + } + + /* UNDO logging is turned-off for intrinsic table and so + DATA_ROLL_PTR system columns are not added as default system + columns to such tables. */ + if (!dict_table_is_intrinsic(table)) { + dict_index_add_col( + new_index, table, + dict_table_get_sys_col(table, DATA_ROLL_PTR), + 0); } /* Remember the table columns already contained in new_index */ indexed = static_cast( - mem_zalloc(table->n_cols * sizeof *indexed)); + ut_zalloc_nokey(table->n_cols * sizeof *indexed)); /* Mark the table columns already contained in new_index */ for (i = 0; i < new_index->n_def; i++) { @@ -3179,7 +3374,8 @@ dict_index_build_internal_clust( /* Add to new_index non-system columns of table not yet included there */ - for (i = 0; i + DATA_N_SYS_COLS < (ulint) table->n_cols; i++) { + ulint n_sys_cols = dict_table_get_n_sys_cols(table); + for (i = 0; i + n_sys_cols < (ulint) table->n_cols; i++) { dict_col_t* col = dict_table_get_nth_col(table, i); ut_ad(col->mtype != DATA_SYS); @@ -3189,10 +3385,9 @@ dict_index_build_internal_clust( } } - mem_free(indexed); + ut_free(indexed); - ut_ad(dict_index_is_ibuf(index) - || (UT_LIST_GET_LEN(table->indexes) == 0)); + ut_ad(UT_LIST_GET_LEN(table->indexes) == 0); new_index->cached = TRUE; @@ -3202,7 +3397,7 @@ dict_index_build_internal_clust( /*******************************************************************//** Builds the internal dictionary cache representation for a non-clustered index, containing also system fields not defined by the user. -@return own: the internal representation of the non-clustered index */ +@return own: the internal representation of the non-clustered index */ static dict_index_t* dict_index_build_internal_non_clust( @@ -3219,7 +3414,8 @@ dict_index_build_internal_non_clust( ut_ad(table && index); ut_ad(!dict_index_is_clust(index)); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(!dict_index_is_ibuf(index)); + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* The clustered index should be the first in the list of indexes */ @@ -3227,11 +3423,11 @@ dict_index_build_internal_non_clust( ut_ad(clust_index); ut_ad(dict_index_is_clust(clust_index)); - ut_ad(!dict_index_is_univ(clust_index)); + ut_ad(!dict_index_is_ibuf(clust_index)); /* Create a new index */ new_index = dict_mem_index_create( - table->name, index->name, index->space, index->type, + table->name.m_name, index->name, index->space, index->type, index->n_fields + 1 + clust_index->n_uniq); /* Copy other relevant data from the old index @@ -3246,13 +3442,17 @@ dict_index_build_internal_non_clust( /* Remember the table columns already contained in new_index */ indexed = static_cast( - mem_zalloc(table->n_cols * sizeof *indexed)); + ut_zalloc_nokey(table->n_cols * sizeof *indexed)); /* Mark the table columns already contained in new_index */ for (i = 0; i < new_index->n_def; i++) { field = dict_index_get_nth_field(new_index, i); + if (dict_col_is_virtual(field->col)) { + continue; + } + /* If there is only a prefix of the column in the index field, do not mark the column as contained in the index */ @@ -3272,10 +3472,15 @@ dict_index_build_internal_non_clust( if (!indexed[field->col->ind]) { dict_index_add_col(new_index, table, field->col, field->prefix_len); + } else if (dict_index_is_spatial(index)) { + /*For spatial index, we still need to add the + field to index. */ + dict_index_add_col(new_index, table, field->col, + field->prefix_len); } } - mem_free(indexed); + ut_free(indexed); if (dict_index_is_unique(index)) { new_index->n_uniq = index->n_fields; @@ -3295,7 +3500,7 @@ dict_index_build_internal_non_clust( /*********************************************************************** Builds the internal dictionary cache representation for an FTS index. -@return own: the internal representation of the FTS index */ +@return own: the internal representation of the FTS index */ static dict_index_t* dict_index_build_internal_fts( @@ -3307,14 +3512,12 @@ dict_index_build_internal_fts( ut_ad(table && index); ut_ad(index->type == DICT_FTS); -#ifdef UNIV_SYNC_DEBUG - ut_ad(mutex_own(&(dict_sys->mutex))); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); /* Create a new index */ new_index = dict_mem_index_create( - table->name, index->name, index->space, index->type, + table->name.m_name, index->name, index->space, index->type, index->n_fields); /* Copy other relevant data from the old index struct to the new @@ -3348,10 +3551,26 @@ dict_index_build_internal_fts( #define DB_FOREIGN_KEY_COLS_NOT_EQUAL 202 #define DB_FOREIGN_KEY_INDEX_NOT_FOUND 203 +/** Check whether the dict_table_t is a partition. +A partitioned table on the SQL level is composed of InnoDB tables, +where each InnoDB table is a [sub]partition including its secondary indexes +which belongs to the partition. +@param[in] table Table to check. +@return true if the dict_table_t is a partition else false. */ +UNIV_INLINE +bool +dict_table_is_partition( + const dict_table_t* table) +{ + /* Check both P and p on all platforms in case it was moved to/from + WIN. */ + return(strstr(table->name.m_name, "#p#") + || strstr(table->name.m_name, "#P#")); +} + /*********************************************************************//** Checks if a table is referenced by foreign keys. -@return TRUE if table is referenced by a foreign key */ -UNIV_INTERN +@return TRUE if table is referenced by a foreign key */ ibool dict_table_is_referenced_by_foreign_key( /*====================================*/ @@ -3362,13 +3581,12 @@ dict_table_is_referenced_by_foreign_key( /**********************************************************************//** Removes a foreign constraint struct from the dictionary cache. */ -UNIV_INTERN void dict_foreign_remove_from_cache( /*===========================*/ dict_foreign_t* foreign) /*!< in, own: foreign constraint */ { - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); ut_a(foreign); if (foreign->referenced_table != NULL) { @@ -3385,7 +3603,7 @@ dict_foreign_remove_from_cache( /**********************************************************************//** Looks for the foreign constraint from the foreign and referenced lists of a table. -@return foreign constraint */ +@return foreign constraint */ static dict_foreign_t* dict_foreign_find( @@ -3393,7 +3611,7 @@ dict_foreign_find( dict_table_t* table, /*!< in: table object */ dict_foreign_t* foreign) /*!< in: foreign constraint */ { - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(dict_foreign_set_validate(table->foreign_set)); ut_ad(dict_foreign_set_validate(table->referenced_set)); @@ -3418,8 +3636,7 @@ dict_foreign_find( Tries to find an index whose first fields are the columns in the array, in the same order and is not marked for deletion and is not the same as types_idx. -@return matching index, NULL if not found */ -UNIV_INTERN +@return matching index, NULL if not found */ dict_index_t* dict_foreign_find_index( /*====================*/ @@ -3462,6 +3679,8 @@ dict_foreign_find_index( while (index != NULL) { if (types_idx != index && !(index->type & DICT_FTS) + && !dict_index_is_spatial(index) + && !dict_index_has_virtual(index) && !index->to_be_dropped && dict_foreign_qualify_index( table, col_names, columns, n_cols, @@ -3537,12 +3756,9 @@ dict_foreign_error_report( fputs(fk_str.c_str(), file); putc('\n', file); if (fk->foreign_index) { - fputs("The index in the foreign key in table is ", file); - ut_print_name(file, NULL, FALSE, fk->foreign_index->name); - fputs("\n" - "See " REFMAN "innodb-foreign-key-constraints.html\n" - "for correct foreign key definition.\n", - file); + fprintf(file, "The index in the foreign key in table is" + " %s\n%s\n", fk->foreign_index->name(), + FOREIGN_KEY_CONSTRAINTS_MSG); } mutex_exit(&dict_foreign_err_mutex); } @@ -3552,8 +3768,7 @@ Adds a foreign key constraint object to the dictionary cache. May free the object if there already is an object with the same identifier in. At least one of the foreign table and the referenced table must already be in the dictionary cache! -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t dict_foreign_add_to_cache( /*======================*/ @@ -3575,7 +3790,10 @@ dict_foreign_add_to_cache( ibool added_to_referenced_list= FALSE; FILE* ef = dict_foreign_err_file; - ut_ad(mutex_own(&(dict_sys->mutex))); + DBUG_ENTER("dict_foreign_add_to_cache"); + DBUG_PRINT("dict_foreign_add_to_cache", ("id: %s", foreign->id)); + + ut_ad(mutex_own(&dict_sys->mutex)); for_table = dict_table_check_if_in_cache_low( foreign->foreign_table_name_lookup); @@ -3594,11 +3812,13 @@ dict_foreign_add_to_cache( if (for_in_cache) { /* Free the foreign object */ - mem_heap_free(foreign->heap); + dict_foreign_free(foreign); } else { for_in_cache = foreign; + } + if (ref_table && !for_in_cache->referenced_table) { ulint index_error; ulint err_col; @@ -3621,11 +3841,12 @@ dict_foreign_add_to_cache( "referenced table do not match" " the ones in table."); - if (for_in_cache == foreign) { - mem_heap_free(foreign->heap); - } + if (for_in_cache == foreign) { + mem_heap_free(foreign->heap); + } - return(DB_CANNOT_ADD_CONSTRAINT); + + DBUG_RETURN(DB_CANNOT_ADD_CONSTRAINT); } for_in_cache->referenced_table = ref_table; @@ -3669,23 +3890,23 @@ dict_foreign_add_to_cache( if (for_in_cache == foreign) { if (added_to_referenced_list) { - const dict_foreign_set::size_type n - = ref_table->referenced_set + const dict_foreign_set::size_type + n = ref_table->referenced_set .erase(for_in_cache); ut_a(n == 1); /* the number of elements removed must be one */ } - mem_heap_free(foreign->heap); } - return(DB_CANNOT_ADD_CONSTRAINT); + DBUG_RETURN(DB_CANNOT_ADD_CONSTRAINT); } for_in_cache->foreign_table = for_table; for_in_cache->foreign_index = index; + std::pair ret = for_table->foreign_set.insert(for_in_cache); @@ -3696,24 +3917,23 @@ dict_foreign_add_to_cache( /* We need to move the table to the non-LRU end of the table LRU list. Otherwise it will be evicted from the cache. */ - if (ref_table != NULL && ref_table->can_be_evicted) { - dict_table_move_from_lru_to_non_lru(ref_table); + if (ref_table != NULL) { + dict_table_prevent_eviction(ref_table); } - if (for_table != NULL && for_table->can_be_evicted) { - dict_table_move_from_lru_to_non_lru(for_table); + if (for_table != NULL) { + dict_table_prevent_eviction(for_table); } ut_ad(dict_lru_validate()); - - return(DB_SUCCESS); + DBUG_RETURN(DB_SUCCESS); } /*********************************************************************//** Scans from pointer onwards. Stops if is at the start of a copy of 'string' where characters are compared without case sensitivity, and only outside `` or "" quotes. Stops also at NUL. -@return scanned up to this */ +@return scanned up to this */ static const char* dict_scan_to( @@ -3773,7 +3993,7 @@ static const char* dict_accept( /*========*/ - struct charset_info_st* cs,/*!< in: the character set of ptr */ + CHARSET_INFO* cs, /*!< in: the character set of ptr */ const char* ptr, /*!< in: scan from this */ const char* string, /*!< in: accept only this string as the next non-whitespace string */ @@ -3804,12 +4024,12 @@ dict_accept( /*********************************************************************//** Scans an id. For the lexical definition of an 'id', see the code below. Strips backquotes or double quotes from around the id. -@return scanned to */ +@return scanned to */ static const char* dict_scan_id( /*=========*/ - struct charset_info_st* cs,/*!< in: the character set of ptr */ + CHARSET_INFO* cs, /*!< in: the character set of ptr */ const char* ptr, /*!< in: scanned to */ mem_heap_t* heap, /*!< in: heap where to allocate the id (NULL=id will not be allocated, but it @@ -3872,7 +4092,7 @@ dict_scan_id( len = ptr - s; } - if (UNIV_UNLIKELY(!heap)) { + if (heap == NULL) { /* no heap given: id will point to source string */ *id = s; return(ptr); @@ -3926,12 +4146,12 @@ convert_id: /*********************************************************************//** Tries to scan a column name. -@return scanned to */ +@return scanned to */ static const char* dict_scan_col( /*==========*/ - struct charset_info_st* cs, /*!< in: the character set of ptr */ + CHARSET_INFO* cs, /*!< in: the character set of ptr */ const char* ptr, /*!< in: scanned to */ ibool* success,/*!< out: TRUE if success */ dict_table_t* table, /*!< in: table in which the column is */ @@ -3981,7 +4201,6 @@ Open a table from its database and table name, this is currently used by foreign constraint parser to get the referenced table. @return complete table name with database and table name, allocated from heap memory passed in */ -UNIV_INTERN char* dict_get_referenced_table( /*======================*/ @@ -4024,13 +4243,13 @@ dict_get_referenced_table( memcpy(ref + database_name_len + 1, table_name, table_name_len + 1); } else { -#ifndef __WIN__ +#ifndef _WIN32 if (innobase_get_lower_case_table_names() == 1) { innobase_casedn_str(ref); } #else innobase_casedn_str(ref); -#endif /* !__WIN__ */ +#endif /* !_WIN32 */ *table = dict_table_get_low(ref); } @@ -4038,12 +4257,12 @@ dict_get_referenced_table( } /*********************************************************************//** Scans a table name from an SQL string. -@return scanned to */ +@return scanned to */ static const char* dict_scan_table_name( /*=================*/ - struct charset_info_st* cs,/*!< in: the character set of ptr */ + CHARSET_INFO* cs, /*!< in: the character set of ptr */ const char* ptr, /*!< in: scanned to */ dict_table_t** table, /*!< out: table object or NULL */ const char* name, /*!< in: foreign key table name */ @@ -4113,12 +4332,12 @@ dict_scan_table_name( /*********************************************************************//** Skips one id. The id is allowed to contain also '.'. -@return scanned to */ +@return scanned to */ static const char* dict_skip_word( /*===========*/ - struct charset_info_st* cs,/*!< in: the character set of ptr */ + CHARSET_INFO* cs, /*!< in: the character set of ptr */ const char* ptr, /*!< in: scanned to */ ibool* success)/*!< out: TRUE if success, FALSE if just spaces left in string or a syntax error */ @@ -4143,7 +4362,7 @@ Removes MySQL comments from an SQL string. A comment is either (c) '[slash][asterisk]' till the next '[asterisk][slash]' (like the familiar C comment syntax). @return own: SQL string stripped from comments; the caller must free -this with mem_free()! */ +this with ut_free()! */ static char* dict_strip_comments( @@ -4163,7 +4382,7 @@ dict_strip_comments( DBUG_PRINT("dict_strip_comments", ("%s", sql_string)); - str = static_cast(mem_alloc(sql_length + 1)); + str = static_cast(ut_malloc_nokey(sql_length + 1)); sptr = sql_string; ptr = str; @@ -4252,8 +4471,7 @@ end_of_string: Finds the highest [number] for foreign key constraints of the table. Looks only at the >= 4.0.18-format id's, which are of the form databasename/tablename_ibfk_[number]. -@return highest number, 0 if table has no new format foreign key constraints */ -UNIV_INTERN +@return highest number, 0 if table has no new format foreign key constraints */ ulint dict_table_get_highest_foreign_id( /*==============================*/ @@ -4265,9 +4483,11 @@ dict_table_get_highest_foreign_id( ulint id; ulint len; + DBUG_ENTER("dict_table_get_highest_foreign_id"); + ut_a(table); - len = ut_strlen(table->name); + len = ut_strlen(table->name.m_name); for (dict_foreign_set::iterator it = table->foreign_set.begin(); it != table->foreign_set.end(); @@ -4284,7 +4504,7 @@ dict_table_get_highest_foreign_id( MAX_TABLE_NAME_LEN); if (ut_strlen(fkid) > ((sizeof dict_ibfk) - 1) + len - && 0 == ut_memcmp(fkid, table->name, len) + && 0 == ut_memcmp(fkid, table->name.m_name, len) && 0 == ut_memcmp(fkid + len, dict_ibfk, (sizeof dict_ibfk) - 1) && fkid[len + ((sizeof dict_ibfk) - 1)] != '0') { @@ -4303,7 +4523,10 @@ dict_table_get_highest_foreign_id( } } - return(biggest_id); + DBUG_PRINT("dict_table_get_highest_foreign_id", + ("id: %lu", biggest_id)); + + DBUG_RETURN(biggest_id); } /*********************************************************************//** @@ -4422,32 +4645,16 @@ dict_foreign_push_index_error( } /*********************************************************************//** -Scans a table create SQL string and adds to the data dictionary the foreign -key constraints declared in the string. This function should be called after -the indexes for a table have been created. Each foreign key constraint must -be accompanied with indexes in both participating tables. The indexes are -allowed to contain more fields than mentioned in the constraint. -@return error code or DB_SUCCESS */ +@return error code or DB_SUCCESS */ static dberr_t dict_create_foreign_constraints_low( -/*================================*/ - trx_t* trx, /*!< in: transaction */ - mem_heap_t* heap, /*!< in: memory heap */ - struct charset_info_st* cs,/*!< in: the character set of sql_string */ - const char* sql_string, - /*!< in: CREATE TABLE or ALTER TABLE statement - where foreign keys are declared like: - FOREIGN KEY (a, b) REFERENCES table2(c, d), - table2 can be written also with the database - name before it: test.table2; the default - database is the database of parameter name */ - const char* name, /*!< in: table full name in the normalized form - database_name/table_name */ - ibool reject_fks) - /*!< in: if TRUE, fail with error code - DB_CANNOT_ADD_CONSTRAINT if any foreign - keys are found. */ + trx_t* trx, + mem_heap_t* heap, + CHARSET_INFO* cs, + const char* sql_string, + const char* name, + ibool reject_fks) { dict_table_t* table = NULL; dict_table_t* referenced_table = NULL; @@ -4486,7 +4693,7 @@ dict_create_foreign_constraints_low( char operation[8]; ut_ad(!srv_read_only_mode); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); table = dict_table_get_low(name); /* First check if we are actually doing an ALTER TABLE, and in that @@ -4511,14 +4718,14 @@ dict_create_foreign_constraints_low( char *bufend; bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, create_table_name, strlen(create_table_name), - trx->mysql_thd, TRUE); + trx->mysql_thd); create_name[bufend-create_name]='\0'; ptr = orig; } else { char *bufend; ptr = orig; bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, - name, strlen(name), trx->mysql_thd, TRUE); + name, strlen(name), trx->mysql_thd); create_name[bufend-create_name]='\0'; } @@ -4566,26 +4773,21 @@ dict_create_foreign_constraints_low( if (table_to_alter) { char *bufend; bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, - table_to_alter->name, strlen(table_to_alter->name), - trx->mysql_thd, TRUE); + table_to_alter->name.m_name, strlen(table_to_alter->name.m_name), + trx->mysql_thd); create_name[bufend-create_name]='\0'; } else { char *bufend; bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, referenced_table_name, strlen(referenced_table_name), - trx->mysql_thd, TRUE); + trx->mysql_thd); create_name[bufend-create_name]='\0'; } if (!success) { - mutex_enter(&dict_foreign_err_mutex); - dict_foreign_error_report_low(ef, create_name); - fprintf(ef, - "%s table %s with foreign key constraint" - " failed. Table %s not found from data dictionary." - " Error close to %s.\n", - operation, create_name, create_name, orig); + ib::error() << "Could not find the table " << create_name << " being" << operation << " near to " + << orig; mutex_exit(&dict_foreign_err_mutex); ib_push_warning(trx, DB_ERROR, @@ -4846,12 +5048,11 @@ col_loop1: mutex_enter(&dict_foreign_err_mutex); dict_foreign_error_report_low(ef, create_name); fputs("There is no index in table ", ef); - ut_print_name(ef, NULL, TRUE, create_name); + ut_print_name(ef, NULL, create_name); fprintf(ef, " where the columns appear\n" - "as the first columns. Constraint:\n%s\n" - "See " REFMAN "innodb-foreign-key-constraints.html\n" - "for correct foreign key definition.\n", - start_of_latest_foreign); + "as the first columns. Constraint:\n%s\n%s", + start_of_latest_foreign, + FOREIGN_KEY_CONSTRAINTS_MSG); dict_foreign_push_index_error(trx, operation, create_name, start_of_latest_foreign, column_names, index_error, err_col, err_index, table, ef); @@ -4877,6 +5078,23 @@ col_loop1: return(DB_CANNOT_ADD_CONSTRAINT); } + /* Don't allow foreign keys on partitioned tables yet. */ + ptr1 = dict_scan_to(ptr, "PARTITION"); + if (ptr1) { + ptr1 = dict_accept(cs, ptr1, "PARTITION", &success); + if (success && my_isspace(cs, *ptr1)) { + ptr2 = dict_accept(cs, ptr1, "BY", &success); + if (success) { + my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); + return(DB_CANNOT_ADD_CONSTRAINT); + } + } + } + if (dict_table_is_partition(table)) { + my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); + return(DB_CANNOT_ADD_CONSTRAINT); + } + /* Let us create a constraint struct */ foreign = dict_mem_foreign_create(); @@ -4889,19 +5107,19 @@ col_loop1: same MySQL 'database' as the table itself. We store the name to foreign->id. */ - db_len = dict_get_db_name_len(table->name); + db_len = dict_get_db_name_len(table->name.m_name); foreign->id = static_cast(mem_heap_alloc( foreign->heap, db_len + strlen(constraint_name) + 2)); - ut_memcpy(foreign->id, table->name, db_len); + ut_memcpy(foreign->id, table->name.m_name, db_len); foreign->id[db_len] = '/'; strcpy(foreign->id + db_len + 1, constraint_name); } if (foreign->id == NULL) { - error = dict_create_add_foreign_id(&number, - table->name, foreign); + error = dict_create_add_foreign_id( + &number, table->name.m_name, foreign); if (error != DB_SUCCESS) { dict_foreign_free(foreign); return(error); @@ -4919,7 +5137,7 @@ col_loop1: foreign->foreign_table = table; foreign->foreign_table_name = mem_heap_strdup( - foreign->heap, table->name); + foreign->heap, table->name.m_name); dict_mem_foreign_table_name_lookup_set(foreign, TRUE); foreign->foreign_index = index; @@ -4947,7 +5165,7 @@ col_loop1: bufend = innobase_convert_name(buf, MAX_TABLE_NAME_LEN, referenced_table_name, strlen(referenced_table_name), - trx->mysql_thd, TRUE); + trx->mysql_thd); buf[bufend - buf] = '\0'; ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, @@ -4966,7 +5184,14 @@ col_loop1: return(DB_CANNOT_ADD_CONSTRAINT); } - orig = ptr; + /* Don't allow foreign keys on partitioned tables yet. */ + if (referenced_table && dict_table_is_partition(referenced_table)) { + /* How could one make a referenced table to be a partition? */ + ut_ad(0); + my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); + return(DB_CANNOT_ADD_CONSTRAINT); + } + ptr = dict_accept(cs, ptr, "(", &success); if (!success) { @@ -5258,11 +5483,9 @@ try_find_index: "tables created with >= InnoDB-4.1.12," " and such columns in old tables\n" "cannot be referenced by such columns" - " in new tables.\n" - "See " REFMAN - "innodb-foreign-key-constraints.html\n" - "for correct foreign key definition.\n", - start_of_latest_foreign); + " in new tables.\n%s\n", + start_of_latest_foreign, + FOREIGN_KEY_CONSTRAINTS_MSG); dict_foreign_push_index_error(trx, operation, create_name, start_of_latest_foreign, column_names, index_error, err_col, err_index, referenced_table, ef); @@ -5296,8 +5519,7 @@ try_find_index: /************************************************************************** Determines whether a string starts with the specified keyword. -@return TRUE if str starts with keyword */ -UNIV_INTERN +@return TRUE if str starts with keyword */ ibool dict_str_starts_with_keyword( /*=========================*/ @@ -5305,40 +5527,40 @@ dict_str_starts_with_keyword( const char* str, /*!< in: string to scan for keyword */ const char* keyword) /*!< in: keyword to look for */ { - struct charset_info_st* cs = innobase_get_charset(thd); - ibool success; + CHARSET_INFO* cs = innobase_get_charset(thd); + ibool success; dict_accept(cs, str, keyword, &success); return(success); } -/*********************************************************************//** -Scans a table create SQL string and adds to the data dictionary the foreign -key constraints declared in the string. This function should be called after -the indexes for a table have been created. Each foreign key constraint must -be accompanied with indexes in both participating tables. The indexes are -allowed to contain more fields than mentioned in the constraint. -@return error code or DB_SUCCESS */ -UNIV_INTERN +/** Scans a table create SQL string and adds to the data dictionary +the foreign key constraints declared in the string. This function +should be called after the indexes for a table have been created. +Each foreign key constraint must be accompanied with indexes in +bot participating tables. The indexes are allowed to contain more +fields than mentioned in the constraint. + +@param[in] trx transaction +@param[in] sql_string table create statement where + foreign keys are declared like: + FOREIGN KEY (a, b) REFERENCES table2(c, d), + table2 can be written also with the database + name before it: test.table2; the default + database id the database of parameter name +@param[in] sql_length length of sql_string +@param[in] name table full name in normalized form +@param[in] reject_fks if TRUE, fail with error code + DB_CANNOT_ADD_CONSTRAINT if any + foreign keys are found. +@return error code or DB_SUCCESS */ dberr_t dict_create_foreign_constraints( -/*============================*/ - trx_t* trx, /*!< in: transaction */ - const char* sql_string, /*!< in: table create statement where - foreign keys are declared like: - FOREIGN KEY (a, b) REFERENCES - table2(c, d), table2 can be written - also with the database - name before it: test.table2; the - default database id the database of - parameter name */ - size_t sql_length, /*!< in: length of sql_string */ - const char* name, /*!< in: table full name in the - normalized form - database_name/table_name */ - ibool reject_fks) /*!< in: if TRUE, fail with error - code DB_CANNOT_ADD_CONSTRAINT if - any foreign keys are found. */ + trx_t* trx, + const char* sql_string, + size_t sql_length, + const char* name, + ibool reject_fks) { char* str; dberr_t err; @@ -5351,11 +5573,11 @@ dict_create_foreign_constraints( heap = mem_heap_create(10000); err = dict_create_foreign_constraints_low( - trx, heap, innobase_get_charset(trx->mysql_thd), str, name, - reject_fks); + trx, heap, innobase_get_charset(trx->mysql_thd), + str, name, reject_fks); mem_heap_free(heap); - mem_free(str); + ut_free(str); return(err); } @@ -5364,7 +5586,6 @@ dict_create_foreign_constraints( Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement. @return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the constraint id does not match */ -UNIV_INTERN dberr_t dict_foreign_parse_drop_constraints( /*================================*/ @@ -5383,7 +5604,7 @@ dict_foreign_parse_drop_constraints( const char* ptr; const char* ptr1; const char* id; - struct charset_info_st* cs; + CHARSET_INFO* cs; ut_a(trx); ut_a(trx->mysql_thd); @@ -5395,18 +5616,18 @@ dict_foreign_parse_drop_constraints( *constraints_to_drop = static_cast( mem_heap_alloc(heap, 1000 * sizeof(char*))); - ptr = innobase_get_stmt(trx->mysql_thd, &len); + ptr = innobase_get_stmt_unsafe(trx->mysql_thd, &len); str = dict_strip_comments(ptr, len); ptr = str; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); loop: ptr = dict_scan_to(ptr, "DROP"); if (*ptr == '\0') { - mem_free(str); + ut_free(str); return(DB_SUCCESS); } @@ -5464,19 +5685,16 @@ loop: mutex_enter(&dict_foreign_err_mutex); rewind(ef); ut_print_timestamp(ef); - fputs(" Error in dropping of a foreign key " - "constraint of table ", ef); - ut_print_name(ef, NULL, TRUE, table->name); - fputs(",\nin SQL command\n", ef); - fputs(str, ef); - fputs("\nCannot find a constraint with the " - "given id ", ef); - ut_print_name(ef, NULL, FALSE, id); - fputs(".\n", ef); + fputs(" Error in dropping of a foreign key" + " constraint of table ", ef); + ut_print_name(ef, NULL, table->name.m_name); + fprintf(ef, ",\nin SQL command\n%s" + "\nCannot find a constraint with the" + " given id %s.\n", str, id); mutex_exit(&dict_foreign_err_mutex); } - mem_free(str); + ut_free(str); return(DB_CANNOT_DROP_CONSTRAINT); } @@ -5492,13 +5710,13 @@ syntax_error: ut_print_timestamp(ef); fputs(" Syntax error in dropping of a" " foreign key constraint of table ", ef); - ut_print_name(ef, NULL, TRUE, table->name); + ut_print_name(ef, NULL, table->name.m_name); fprintf(ef, ",\n" "close to:\n%s\n in SQL command\n%s\n", ptr, str); mutex_exit(&dict_foreign_err_mutex); } - mem_free(str); + ut_free(str); return(DB_CANNOT_DROP_CONSTRAINT); } @@ -5508,14 +5726,13 @@ syntax_error: /**********************************************************************//** Returns an index object if it is found in the dictionary cache. Assumes that dict_sys->mutex is already being held. -@return index, NULL if not found */ -UNIV_INTERN +@return index, NULL if not found */ dict_index_t* dict_index_get_if_in_cache_low( /*===========================*/ index_id_t index_id) /*!< in: index id */ { - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); return(dict_index_find_on_id_low(index_id)); } @@ -5523,8 +5740,7 @@ dict_index_get_if_in_cache_low( #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** Returns an index object if it is found in the dictionary cache. -@return index, NULL if not found */ -UNIV_INTERN +@return index, NULL if not found */ dict_index_t* dict_index_get_if_in_cache( /*=======================*/ @@ -5536,11 +5752,11 @@ dict_index_get_if_in_cache( return(NULL); } - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); index = dict_index_get_if_in_cache_low(index_id); - mutex_exit(&(dict_sys->mutex)); + mutex_exit(&dict_sys->mutex); return(index); } @@ -5550,8 +5766,7 @@ dict_index_get_if_in_cache( /**********************************************************************//** Checks that a tuple has n_fields_cmp value in a sensible range, so that no comparison can occur with the page number field in a node pointer. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dict_index_check_search_tuple( /*==========================*/ @@ -5567,8 +5782,7 @@ dict_index_check_search_tuple( /**********************************************************************//** Builds a node pointer out of a physical record and a page number. -@return own: node pointer */ -UNIV_INTERN +@return own: node pointer */ dtuple_t* dict_index_build_node_ptr( /*======================*/ @@ -5587,7 +5801,7 @@ dict_index_build_node_ptr( byte* buf; ulint n_unique; - if (dict_index_is_univ(index)) { + if (dict_index_is_ibuf(index)) { /* In a universal index tree, we take the whole record as the node pointer if the record is on the leaf level, on non-leaf levels we remove the last field, which @@ -5601,7 +5815,7 @@ dict_index_build_node_ptr( n_unique--; } } else { - n_unique = dict_index_get_n_unique_in_tree(index); + n_unique = dict_index_get_n_unique_in_tree_nonleaf(index); } tuple = dtuple_create(heap, n_unique + 1); @@ -5637,8 +5851,7 @@ dict_index_build_node_ptr( /**********************************************************************//** Copies an initial segment of a physical record, long enough to specify an index entry uniquely. -@return pointer to the prefix record */ -UNIV_INTERN +@return pointer to the prefix record */ rec_t* dict_index_copy_rec_order_prefix( /*=============================*/ @@ -5654,11 +5867,15 @@ dict_index_copy_rec_order_prefix( UNIV_PREFETCH_R(rec); - if (dict_index_is_univ(index)) { + if (dict_index_is_ibuf(index)) { ut_a(!dict_table_is_comp(index->table)); n = rec_get_n_fields_old(rec); } else { - n = dict_index_get_n_unique_in_tree(index); + if (page_is_leaf(page_align(rec))) { + n = dict_index_get_n_unique_in_tree(index); + } else { + n = dict_index_get_n_unique_in_tree_nonleaf(index); + } } *n_fields = n; @@ -5667,8 +5884,7 @@ dict_index_copy_rec_order_prefix( /**********************************************************************//** Builds a typed data tuple out of a physical record. -@return own: data tuple */ -UNIV_INTERN +@return own: data tuple */ dtuple_t* dict_index_build_data_tuple( /*========================*/ @@ -5695,7 +5911,6 @@ dict_index_build_data_tuple( /*********************************************************************//** Calculates the minimum record length in an index. */ -UNIV_INTERN ulint dict_index_calc_min_rec_len( /*========================*/ @@ -5744,185 +5959,9 @@ dict_index_calc_min_rec_len( return(sum); } -/**********************************************************************//** -Prints info of a foreign key constraint. */ -static -void -dict_foreign_print_low( -/*===================*/ - dict_foreign_t* foreign) /*!< in: foreign key constraint */ -{ - ulint i; - - ut_ad(mutex_own(&(dict_sys->mutex))); - - fprintf(stderr, " FOREIGN KEY CONSTRAINT %s: %s (", - foreign->id, foreign->foreign_table_name); - - for (i = 0; i < foreign->n_fields; i++) { - fprintf(stderr, " %s", foreign->foreign_col_names[i]); - } - - fprintf(stderr, " )\n" - " REFERENCES %s (", - foreign->referenced_table_name); - - for (i = 0; i < foreign->n_fields; i++) { - fprintf(stderr, " %s", foreign->referenced_col_names[i]); - } - - fputs(" )\n", stderr); -} - -/**********************************************************************//** -Prints a table data. */ -UNIV_INTERN -void -dict_table_print( -/*=============*/ - dict_table_t* table) /*!< in: table */ -{ - dict_index_t* index; - ulint i; - - ut_ad(mutex_own(&(dict_sys->mutex))); - - dict_table_stats_lock(table, RW_X_LATCH); - - if (!table->stat_initialized) { - dict_stats_update_transient(table); - } - - fprintf(stderr, - "--------------------------------------\n" - "TABLE: name %s, id %llu, flags %lx, columns %lu," - " indexes %lu, appr.rows " UINT64PF "\n" - " COLUMNS: ", - table->name, - (ullint) table->id, - (ulong) table->flags, - (ulong) table->n_cols, - (ulong) UT_LIST_GET_LEN(table->indexes), - table->stat_n_rows); - - for (i = 0; i < (ulint) table->n_cols; i++) { - dict_col_print_low(table, dict_table_get_nth_col(table, i)); - fputs("; ", stderr); - } - - putc('\n', stderr); - - index = UT_LIST_GET_FIRST(table->indexes); - - while (index != NULL) { - dict_index_print_low(index); - index = UT_LIST_GET_NEXT(indexes, index); - } - - dict_table_stats_unlock(table, RW_X_LATCH); - - std::for_each(table->foreign_set.begin(), - table->foreign_set.end(), - dict_foreign_print_low); - - std::for_each(table->referenced_set.begin(), - table->referenced_set.end(), - dict_foreign_print_low); -} - -/**********************************************************************//** -Prints a column data. */ -static -void -dict_col_print_low( -/*===============*/ - const dict_table_t* table, /*!< in: table */ - const dict_col_t* col) /*!< in: column */ -{ - dtype_t type; - - ut_ad(mutex_own(&(dict_sys->mutex))); - - dict_col_copy_type(col, &type); - fprintf(stderr, "%s: ", dict_table_get_col_name(table, - dict_col_get_no(col))); - - dtype_print(&type); -} - -/**********************************************************************//** -Prints an index data. */ -static -void -dict_index_print_low( -/*=================*/ - dict_index_t* index) /*!< in: index */ -{ - ib_int64_t n_vals; - ulint i; - - ut_a(index->table->stat_initialized); - - ut_ad(mutex_own(&(dict_sys->mutex))); - - if (index->n_user_defined_cols > 0) { - n_vals = index->stat_n_diff_key_vals[ - index->n_user_defined_cols - 1]; - } else { - n_vals = index->stat_n_diff_key_vals[0]; - } - - fprintf(stderr, - " INDEX: name %s, id %llu, fields %lu/%lu," - " uniq %lu, type %lu\n" - " root page %lu, appr.key vals %lu," - " leaf pages %lu, size pages %lu\n" - " FIELDS: ", - index->name, - (ullint) index->id, - (ulong) index->n_user_defined_cols, - (ulong) index->n_fields, - (ulong) index->n_uniq, - (ulong) index->type, - (ulong) index->page, - (ulong) n_vals, - (ulong) index->stat_n_leaf_pages, - (ulong) index->stat_index_size); - - for (i = 0; i < index->n_fields; i++) { - dict_field_print_low(dict_index_get_nth_field(index, i)); - } - - putc('\n', stderr); - -#ifdef UNIV_BTR_PRINT - btr_print_size(index); - - btr_print_index(index, 7); -#endif /* UNIV_BTR_PRINT */ -} - -/**********************************************************************//** -Prints a field data. */ -static -void -dict_field_print_low( -/*=================*/ - const dict_field_t* field) /*!< in: field */ -{ - ut_ad(mutex_own(&(dict_sys->mutex))); - - fprintf(stderr, " %s", field->name); - - if (field->prefix_len != 0) { - fprintf(stderr, "(%lu)", (ulong) field->prefix_len); - } -} - /**********************************************************************//** Outputs info on a foreign key of a table in a format suitable for CREATE TABLE. */ -UNIV_INTERN std::string dict_print_info_on_foreign_key_in_create_format( /*============================================*/ @@ -5958,6 +5997,7 @@ dict_print_info_on_foreign_key_in_create_format( for (i = 0;;) { str.append(ut_get_name(trx, FALSE, foreign->foreign_col_names[i])); + if (++i < foreign->n_fields) { str.append(", "); } else { @@ -6022,7 +6062,6 @@ dict_print_info_on_foreign_key_in_create_format( /**********************************************************************//** Outputs info on foreign keys of a table. */ -UNIV_INTERN std::string dict_print_info_on_foreign_keys( /*============================*/ @@ -6036,7 +6075,7 @@ dict_print_info_on_foreign_keys( dict_foreign_t* foreign; std::string str; - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); for (dict_foreign_set::iterator it = table->foreign_set.begin(); it != table->foreign_set.end(); @@ -6103,37 +6142,21 @@ dict_print_info_on_foreign_keys( } } - mutex_exit(&(dict_sys->mutex)); + mutex_exit(&dict_sys->mutex); return str; } -/********************************************************************//** -Displays the names of the index and the table. */ -UNIV_INTERN -void -dict_index_name_print( -/*==================*/ - FILE* file, /*!< in: output stream */ - const trx_t* trx, /*!< in: transaction */ - const dict_index_t* index) /*!< in: index to print */ -{ - fputs("index ", file); - ut_print_name(file, trx, FALSE, index->name); - fputs(" of table ", file); - ut_print_name(file, trx, TRUE, index->table_name); -} - -/**********************************************************************//** -Find a table in dict_sys->table_LRU list with specified space id +/** Given a space_id of a file-per-table tablespace, search the +dict_sys->table_LRU list and return the dict_table_t* pointer for it. +@param space_id Tablespace ID @return table if found, NULL if not */ static dict_table_t* -dict_find_table_by_space( -/*=====================*/ - ulint space_id) /*!< in: space ID */ +dict_find_single_table_by_space( + ulint space_id) { - dict_table_t* table; + dict_table_t* table; ulint num_item; ulint count = 0; @@ -6149,11 +6172,14 @@ dict_find_table_by_space( /* This function intentionally does not acquire mutex as it is used by error handling code in deep call stack as last means to avoid - killing the server, so it worth to risk some consequencies for + killing the server, so it worth to risk some consequences for the action. */ while (table && count < num_item) { if (table->space == space_id) { - return(table); + if (dict_table_is_file_per_table(table)) { + return(table); + } + return(NULL); } table = UT_LIST_GET_NEXT(table_LRU, table); @@ -6167,7 +6193,6 @@ dict_find_table_by_space( Flags a table with specified space_id corrupted in the data dictionary cache @return TRUE if successful */ -UNIV_INTERN ibool dict_set_corrupted_by_space( /*========================*/ @@ -6175,7 +6200,7 @@ dict_set_corrupted_by_space( { dict_table_t* table; - table = dict_find_table_by_space(space_id); + table = dict_find_single_table_by_space(space_id); if (!table) { return(FALSE); @@ -6191,7 +6216,6 @@ dict_set_corrupted_by_space( /**********************************************************************//** Flags an index corrupted both in the data dictionary cache and in the SYS_INDEXES */ -UNIV_INTERN void dict_set_corrupted( /*===============*/ @@ -6205,7 +6229,6 @@ dict_set_corrupted( dtuple_t* tuple; dfield_t* dfield; byte* buf; - char* table_name; const char* status; btr_cur_t cursor; bool locked = RW_X_LATCH == trx->dict_operation_lock_mode; @@ -6219,9 +6242,13 @@ dict_set_corrupted( ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(sync_thread_levels_empty_except_dict()); -#endif +#ifdef UNIV_DEBUG + { + dict_sync_check check(true); + + ut_ad(!sync_check_iterate(check)); + } +#endif /* UNIV_DEBUG */ /* Mark the table as corrupted only if the clustered index is corrupted */ @@ -6281,15 +6308,8 @@ fail: mtr_commit(&mtr); mem_heap_empty(heap); - table_name = static_cast(mem_heap_alloc(heap, FN_REFLEN + 1)); - *innobase_convert_name( - table_name, FN_REFLEN, - index->table_name, strlen(index->table_name), - NULL, TRUE) = 0; - - ib_logf(IB_LOG_LEVEL_ERROR, "%s corruption of %s in table %s in %s", - status, index->name, table_name, ctx); - + ib::error() << status << " corruption of " << index->name + << " in table " << index->table->name << " in " << ctx; mem_heap_free(heap); func_exit: @@ -6298,18 +6318,16 @@ func_exit: } } -/**********************************************************************//** -Flags an index corrupted in the data dictionary cache only. This +/** Flags an index corrupted in the data dictionary cache only. This is used mostly to mark a corrupted index when index's own dictionary -is corrupted, and we force to load such index for repair purpose */ -UNIV_INTERN +is corrupted, and we force to load such index for repair purpose +@param[in,out] index index which is corrupted */ void dict_set_corrupted_index_cache_only( -/*================================*/ - dict_index_t* index, /*!< in/out: index */ - dict_table_t* table) /*!< in/out: table */ + dict_index_t* index) { ut_ad(index != NULL); + ut_ad(index->table != NULL); ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); @@ -6317,24 +6335,127 @@ dict_set_corrupted_index_cache_only( /* Mark the table as corrupted only if the clustered index is corrupted */ if (dict_index_is_clust(index)) { - dict_table_t* corrupt_table; + index->table->corrupted = TRUE; + } + + index->type |= DICT_CORRUPT; +} + +/** Sets merge_threshold in the SYS_INDEXES +@param[in,out] index index +@param[in] merge_threshold value to set */ +void +dict_index_set_merge_threshold( + dict_index_t* index, + ulint merge_threshold) +{ + mem_heap_t* heap; + mtr_t mtr; + dict_index_t* sys_index; + dtuple_t* tuple; + dfield_t* dfield; + byte* buf; + btr_cur_t cursor; + + ut_ad(index != NULL); + ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); + ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); + + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&(dict_sys->mutex)); + + heap = mem_heap_create(sizeof(dtuple_t) + 2 * (sizeof(dfield_t) + + sizeof(que_fork_t) + sizeof(upd_node_t) + + sizeof(upd_t) + 12)); + + mtr_start(&mtr); + + sys_index = UT_LIST_GET_FIRST(dict_sys->sys_indexes->indexes); + + /* Find the index row in SYS_INDEXES */ + tuple = dtuple_create(heap, 2); + + dfield = dtuple_get_nth_field(tuple, 0); + buf = static_cast(mem_heap_alloc(heap, 8)); + mach_write_to_8(buf, index->table->id); + dfield_set_data(dfield, buf, 8); + + dfield = dtuple_get_nth_field(tuple, 1); + buf = static_cast(mem_heap_alloc(heap, 8)); + mach_write_to_8(buf, index->id); + dfield_set_data(dfield, buf, 8); + + dict_index_copy_types(tuple, sys_index, 2); + + btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_GE, + BTR_MODIFY_LEAF, + &cursor, 0, __FILE__, __LINE__, &mtr); - corrupt_table = (table != NULL) ? table : index->table; - ut_ad((index->table != NULL) || (table != NULL) - || index->table == table); + if (cursor.up_match == dtuple_get_n_fields(tuple) + && rec_get_n_fields_old(btr_cur_get_rec(&cursor)) + == DICT_NUM_FIELDS__SYS_INDEXES) { + ulint len; + byte* field = rec_get_nth_field_old( + btr_cur_get_rec(&cursor), + DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD, &len); - if (corrupt_table) { - corrupt_table->corrupted = TRUE; + ut_ad(len == 4); + + if (len == 4) { + mlog_write_ulint(field, merge_threshold, + MLOG_4BYTES, &mtr); } } - index->type |= DICT_CORRUPT; + mtr_commit(&mtr); + mem_heap_free(heap); + + mutex_exit(&(dict_sys->mutex)); + rw_lock_x_unlock(dict_operation_lock); } + +#ifdef UNIV_DEBUG +/** Sets merge_threshold for all indexes in the list of tables +@param[in] list pointer to the list of tables */ +inline +void +dict_set_merge_threshold_list_debug( + UT_LIST_BASE_NODE_T(dict_table_t)* list, + uint merge_threshold_all) +{ + for (dict_table_t* table = UT_LIST_GET_FIRST(*list); + table != NULL; + table = UT_LIST_GET_NEXT(table_LRU, table)) { + for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); + index != NULL; + index = UT_LIST_GET_NEXT(indexes, index)) { + rw_lock_x_lock(dict_index_get_lock(index)); + index->merge_threshold = merge_threshold_all; + rw_lock_x_unlock(dict_index_get_lock(index)); + } + } +} + +/** Sets merge_threshold for all indexes in dictionary cache for debug. +@param[in] merge_threshold_all value to set for all indexes */ +void +dict_set_merge_threshold_all_debug( + uint merge_threshold_all) +{ + mutex_enter(&dict_sys->mutex); + + dict_set_merge_threshold_list_debug( + &dict_sys->table_LRU, merge_threshold_all); + dict_set_merge_threshold_list_debug( + &dict_sys->table_non_LRU, merge_threshold_all); + + mutex_exit(&dict_sys->mutex); +} +#endif /* UNIV_DEBUG */ #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** -Inits dict_ind_redundant and dict_ind_compact. */ -UNIV_INTERN +Inits dict_ind_redundant. */ void dict_ind_init(void) /*===============*/ @@ -6342,7 +6463,7 @@ dict_ind_init(void) dict_table_t* table; /* create dummy table and index for REDUNDANT infimum and supremum */ - table = dict_mem_table_create("SYS_DUMMY1", DICT_HDR_SPACE, 1, 0, 0); + table = dict_mem_table_create("SYS_DUMMY1", DICT_HDR_SPACE, 1, 0, 0, 0); dict_mem_table_add_col(table, NULL, NULL, DATA_CHAR, DATA_ENGLISH | DATA_NOT_NULL, 8); @@ -6351,26 +6472,13 @@ dict_ind_init(void) dict_index_add_col(dict_ind_redundant, table, dict_table_get_nth_col(table, 0), 0); dict_ind_redundant->table = table; - - /* create dummy table and index for COMPACT infimum and supremum */ - table = dict_mem_table_create("SYS_DUMMY2", - DICT_HDR_SPACE, 1, - DICT_TF_COMPACT, 0); - dict_mem_table_add_col(table, NULL, NULL, DATA_CHAR, - DATA_ENGLISH | DATA_NOT_NULL, 8); - dict_ind_compact = dict_mem_index_create("SYS_DUMMY2", "SYS_DUMMY2", - DICT_HDR_SPACE, 0, 1); - dict_index_add_col(dict_ind_compact, table, - dict_table_get_nth_col(table, 0), 0); - dict_ind_compact->table = table; - /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */ - dict_ind_redundant->cached = dict_ind_compact->cached = TRUE; + dict_ind_redundant->cached = TRUE; } #ifndef UNIV_HOTBACKUP /**********************************************************************//** -Frees dict_ind_redundant and dict_ind_compact. */ +Frees dict_ind_redundant. */ static void dict_ind_free(void) @@ -6378,33 +6486,31 @@ dict_ind_free(void) { dict_table_t* table; - table = dict_ind_compact->table; - dict_mem_index_free(dict_ind_compact); - dict_ind_compact = NULL; - dict_mem_table_free(table); - table = dict_ind_redundant->table; dict_mem_index_free(dict_ind_redundant); dict_ind_redundant = NULL; dict_mem_table_free(table); } -/**********************************************************************//** -Get index by name -@return index, NULL if does not exist */ -UNIV_INTERN +/** Get an index by name. +@param[in] table the table where to look for the index +@param[in] name the index name to look for +@param[in] committed true=search for committed, +false=search for uncommitted +@return index, NULL if does not exist */ dict_index_t* dict_table_get_index_on_name( -/*=========================*/ - dict_table_t* table, /*!< in: table */ - const char* name) /*!< in: name of the index to find */ + dict_table_t* table, + const char* name, + bool committed) { dict_index_t* index; index = dict_table_get_first_index(table); while (index != NULL) { - if (innobase_strcasecmp(index->name, name) == 0) { + if (index->is_committed() == committed + && innobase_strcasecmp(index->name, name) == 0) { return(index); } @@ -6419,7 +6525,6 @@ dict_table_get_index_on_name( Replace the index passed in with another equivalent index in the foreign key lists of the table. @return whether all replacements were found */ -UNIV_INTERN bool dict_foreign_replace_index( /*=======================*/ @@ -6490,42 +6595,9 @@ dict_foreign_replace_index( return(found); } -/**********************************************************************//** -In case there is more than one index with the same name return the index -with the min(id). -@return index, NULL if does not exist */ -UNIV_INTERN -dict_index_t* -dict_table_get_index_on_name_and_min_id( -/*=====================================*/ - dict_table_t* table, /*!< in: table */ - const char* name) /*!< in: name of the index to find */ -{ - dict_index_t* index; - dict_index_t* min_index; /* Index with matching name and min(id) */ - - min_index = NULL; - index = dict_table_get_first_index(table); - - while (index != NULL) { - if (ut_strcmp(index->name, name) == 0) { - if (!min_index || index->id < min_index->id) { - - min_index = index; - } - } - - index = dict_table_get_next_index(index); - } - - return(min_index); - -} - #ifdef UNIV_DEBUG /**********************************************************************//** Check for duplicate index entries in a table [using the index name] */ -UNIV_INTERN void dict_table_check_for_dup_indexes( /*=============================*/ @@ -6548,7 +6620,7 @@ dict_table_check_for_dup_indexes( index1 = UT_LIST_GET_FIRST(table->indexes); do { - if (*index1->name == TEMP_INDEX_PREFIX) { + if (!index1->is_committed()) { ut_a(!dict_index_is_clust(index1)); switch (check) { @@ -6573,7 +6645,9 @@ dict_table_check_for_dup_indexes( for (index2 = UT_LIST_GET_NEXT(indexes, index1); index2 != NULL; index2 = UT_LIST_GET_NEXT(indexes, index2)) { - ut_ad(ut_strcmp(index1->name, index2->name)); + ut_ad(index1->is_committed() + != index2->is_committed() + || strcmp(index1->name, index2->name) != 0); } index1 = UT_LIST_GET_NEXT(indexes, index1); @@ -6599,7 +6673,6 @@ types. The order of the columns does not matter. The caller must own the dictionary mutex. dict_table_schema_check() @{ @return DB_SUCCESS if the table exists and contains the necessary columns */ -UNIV_INTERN dberr_t dict_table_schema_check( /*====================*/ @@ -6644,7 +6717,7 @@ dict_table_schema_check( ut_snprintf(errstr, errstr_sz, "Table %s not found.", ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf))); + buf, sizeof(buf))); return(DB_TABLE_NOT_FOUND); } else { return(DB_STATS_DO_NOT_EXIST); @@ -6657,20 +6730,19 @@ dict_table_schema_check( ut_snprintf(errstr, errstr_sz, "Tablespace for table %s is missing.", ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf))); + buf, sizeof(buf))); return(DB_TABLE_NOT_FOUND); } - if ((ulint) table->n_def - DATA_N_SYS_COLS != req_schema->n_cols) { - /* the table has a different number of columns than - required */ - + ulint n_sys_cols = dict_table_get_n_sys_cols(table); + if ((ulint) table->n_def - n_sys_cols != req_schema->n_cols) { + /* the table has a different number of columns than required */ ut_snprintf(errstr, errstr_sz, - "%s has %d columns but should have %lu.", + "%s has %lu columns but should have %lu.", ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf)), - table->n_def - DATA_N_SYS_COLS, + buf, sizeof(buf)), + table->n_def - n_sys_cols, req_schema->n_cols); return(DB_ERROR); @@ -6682,44 +6754,20 @@ dict_table_schema_check( be O(n_cols) if the columns are in the same order in both arrays. */ for (i = 0; i < req_schema->n_cols; i++) { - ulint j; - - /* check if i'th column is the same in both arrays */ - if (innobase_strcasecmp(req_schema->columns[i].name, - dict_table_get_col_name(table, i)) == 0) { - - /* we found the column in table->cols[] quickly */ - j = i; - } else { - - /* columns in both arrays are not in the same order, - do a full scan of the second array */ - for (j = 0; j < table->n_def; j++) { - const char* name; - - name = dict_table_get_col_name(table, j); - - if (innobase_strcasecmp(name, - req_schema->columns[i].name) == 0) { - - /* found the column on j'th - position */ - break; - } - } + ulint j = dict_table_has_column( + table, req_schema->columns[i].name, i); - if (j == table->n_def) { + if (j == table->n_def) { - ut_snprintf(errstr, errstr_sz, - "required column %s " - "not found in table %s.", - req_schema->columns[i].name, - ut_format_name( - req_schema->table_name, - TRUE, buf, sizeof(buf))); + ut_snprintf(errstr, errstr_sz, + "required column %s" + " not found in table %s.", + req_schema->columns[i].name, + ut_format_name( + req_schema->table_name, + buf, sizeof(buf))); - return(DB_ERROR); - } + return(DB_ERROR); } /* we found a column with the same name on j'th position, @@ -6731,11 +6779,11 @@ dict_table_schema_check( CREATE_TYPES_NAMES(); ut_snprintf(errstr, errstr_sz, - "Column %s in table %s is %s " - "but should be %s (length mismatch).", + "Column %s in table %s is %s" + " but should be %s (length mismatch).", req_schema->columns[i].name, ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf)), + buf, sizeof(buf)), actual_type, req_type); return(DB_ERROR); @@ -6755,11 +6803,11 @@ dict_table_schema_check( CREATE_TYPES_NAMES(); ut_snprintf(errstr, errstr_sz, - "Column %s in table %s is %s " - "but should be %s (type mismatch).", + "Column %s in table %s is %s" + " but should be %s (type mismatch).", req_schema->columns[i].name, ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf)), + buf, sizeof(buf)), actual_type, req_type); return(DB_ERROR); @@ -6774,11 +6822,11 @@ dict_table_schema_check( CREATE_TYPES_NAMES(); ut_snprintf(errstr, errstr_sz, - "Column %s in table %s is %s " - "but should be %s (flags mismatch).", + "Column %s in table %s is %s" + " but should be %s (flags mismatch).", req_schema->columns[i].name, ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf)), + buf, sizeof(buf)), actual_type, req_type); return(DB_ERROR); @@ -6791,7 +6839,7 @@ dict_table_schema_check( "Table %s has " ULINTPF " foreign key(s) pointing" " to other tables, but it must have %lu.", ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf)), + buf, sizeof(buf)), static_cast(table->foreign_set.size()), req_schema->n_foreign); return(DB_ERROR); @@ -6804,7 +6852,7 @@ dict_table_schema_check( "but there must be %lu.", static_cast(table->referenced_set.size()), ut_format_name(req_schema->table_name, - TRUE, buf, sizeof(buf)), + buf, sizeof(buf)), req_schema->n_referenced); return(DB_ERROR); } @@ -6818,7 +6866,6 @@ Converts a database and table name from filesystem encoding (e.g. d@i1b/a@q1b@1Kc, same format as used in dict_table_t::name) in two strings in UTF8 encoding (e.g. dцb and aÑŽbØc). The output buffers must be at least MAX_DB_UTF8_LEN and MAX_TABLE_UTF8_LEN bytes. */ -UNIV_INTERN void dict_fs2utf8( /*=========*/ @@ -6842,7 +6889,7 @@ dict_fs2utf8( strconvert( &my_charset_filename, db, db_len, system_charset_info, - db_utf8, static_cast(db_utf8_size), &errors); + db_utf8, db_utf8_size, &errors); /* convert each # to @0023 in table name and store the result in buf */ const char* table = dict_remove_db_name(db_and_table); @@ -6868,7 +6915,7 @@ dict_fs2utf8( errors = 0; strconvert( &my_charset_filename, buf, (uint) (buf_p - buf), system_charset_info, - table_utf8, static_cast(table_utf8_size), + table_utf8, table_utf8_size, &errors); if (errors != 0) { @@ -6877,9 +6924,55 @@ dict_fs2utf8( } } +/** Resize the hash tables besed on the current buffer pool size. */ +void +dict_resize() +{ + dict_table_t* table; + + mutex_enter(&dict_sys->mutex); + + /* all table entries are in table_LRU and table_non_LRU lists */ + hash_table_free(dict_sys->table_hash); + hash_table_free(dict_sys->table_id_hash); + + dict_sys->table_hash = hash_create( + buf_pool_get_curr_size() + / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); + + dict_sys->table_id_hash = hash_create( + buf_pool_get_curr_size() + / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); + + for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table; + table = UT_LIST_GET_NEXT(table_LRU, table)) { + ulint fold = ut_fold_string(table->name.m_name); + ulint id_fold = ut_fold_ull(table->id); + + HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, + fold, table); + + HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, + id_fold, table); + } + + for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); table; + table = UT_LIST_GET_NEXT(table_LRU, table)) { + ulint fold = ut_fold_string(table->name.m_name); + ulint id_fold = ut_fold_ull(table->id); + + HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, + fold, table); + + HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, + id_fold, table); + } + + mutex_exit(&dict_sys->mutex); +} + /**********************************************************************//** Closes the data dictionary module. */ -UNIV_INTERN void dict_close(void) /*============*/ @@ -6899,9 +6992,7 @@ dict_close(void) table = static_cast( HASH_GET_NEXT(name_hash, prev_table)); -#ifdef UNIV_DEBUG - ut_a(prev_table->magic_n == DICT_TABLE_MAGIC_N); -#endif + ut_ad(prev_table->magic_n == DICT_TABLE_MAGIC_N); /* Acquire only because it's a pre-condition. */ mutex_enter(&dict_sys->mutex); @@ -6921,23 +7012,26 @@ dict_close(void) mutex_free(&dict_sys->mutex); - rw_lock_free(&dict_operation_lock); - memset(&dict_operation_lock, 0x0, sizeof(dict_operation_lock)); + rw_lock_free(dict_operation_lock); - if (!srv_read_only_mode) { - mutex_free(&dict_foreign_err_mutex); - } + ut_free(dict_operation_lock); + dict_operation_lock = NULL; + + mutex_free(&dict_foreign_err_mutex); delete dict_sys->autoinc_map; - mem_free(dict_sys); + ut_ad(dict_sys->size == 0); + + ut_free(dict_sys); + dict_sys = NULL; } #ifdef UNIV_DEBUG /**********************************************************************//** Validate the dictionary table LRU list. -@return TRUE if valid */ +@return TRUE if valid */ static ibool dict_lru_validate(void) @@ -7025,7 +7119,6 @@ Check an index to see whether its first fields are the columns in the array, in the same order and is not marked for deletion and is not the same as types_idx. @return true if the index qualifies, otherwise false */ -UNIV_INTERN bool dict_foreign_qualify_index( /*=======================*/ @@ -7068,6 +7161,8 @@ dict_foreign_qualify_index( field = dict_index_get_nth_field(index, i); col_no = dict_col_get_no(field->col); + ut_ad(!dict_col_is_virtual(field->col)); + if (field->prefix_len != 0) { /* We do not accept column prefix indexes here */ @@ -7134,7 +7229,7 @@ dict_index_zip_pad_update( ut_ad(total > 0); - if(zip_threshold == 0) { + if (zip_threshold == 0) { /* User has just disabled the padding. */ return; } @@ -7160,15 +7255,10 @@ dict_index_zip_pad_update( beyond max pad size. */ if (info->pad + ZIP_PAD_INCR < (UNIV_PAGE_SIZE * zip_pad_max) / 100) { -#ifdef HAVE_ATOMIC_BUILTINS /* Use atomics even though we have the mutex. This is to ensure that we are able to read - info->pad atomically where atomics are - supported. */ + info->pad atomically. */ os_atomic_increment_ulint(&info->pad, ZIP_PAD_INCR); -#else /* HAVE_ATOMIC_BUILTINS */ - info->pad += ZIP_PAD_INCR; -#endif /* HAVE_ATOMIC_BUILTINS */ MONITOR_INC(MONITOR_PAD_INCREMENTS); } @@ -7187,15 +7277,10 @@ dict_index_zip_pad_update( && info->pad > 0) { ut_ad(info->pad % ZIP_PAD_INCR == 0); -#ifdef HAVE_ATOMIC_BUILTINS /* Use atomics even though we have the mutex. This is to ensure that we are able to read - info->pad atomically where atomics are - supported. */ + info->pad atomically. */ os_atomic_decrement_ulint(&info->pad, ZIP_PAD_INCR); -#else /* HAVE_ATOMIC_BUILTINS */ - info->pad -= ZIP_PAD_INCR; -#endif /* HAVE_ATOMIC_BUILTINS */ info->n_rounds = 0; @@ -7207,7 +7292,6 @@ dict_index_zip_pad_update( /*********************************************************************//** This function should be called whenever a page is successfully compressed. Updates the compression padding information. */ -UNIV_INTERN void dict_index_zip_success( /*===================*/ @@ -7230,7 +7314,6 @@ dict_index_zip_success( /*********************************************************************//** This function should be called whenever a page compression attempt fails. Updates the compression padding information. */ -UNIV_INTERN void dict_index_zip_failure( /*===================*/ @@ -7254,7 +7337,6 @@ dict_index_zip_failure( /*********************************************************************//** Return the optimal page size, for which page will likely compress. @return page size beyond which page might not compress */ -UNIV_INTERN ulint dict_index_zip_pad_optimal_page_size( /*=================================*/ @@ -7273,16 +7355,9 @@ dict_index_zip_pad_optimal_page_size( } /* We use atomics to read index->zip_pad.pad. Here we use zero - as increment as are not changing the value of the 'pad'. On - platforms where atomics are not available we grab the mutex. */ + as increment as are not changing the value of the 'pad'. */ -#ifdef HAVE_ATOMIC_BUILTINS pad = os_atomic_increment_ulint(&index->zip_pad.pad, 0); -#else /* HAVE_ATOMIC_BUILTINS */ - dict_index_zip_pad_lock(index); - pad = index->zip_pad.pad; - dict_index_zip_pad_unlock(index); -#endif /* HAVE_ATOMIC_BUILTINS */ ut_ad(pad < UNIV_PAGE_SIZE); sz = UNIV_PAGE_SIZE - pad; @@ -7294,10 +7369,58 @@ dict_index_zip_pad_optimal_page_size( return(ut_max(sz, min_sz)); } +/** Convert a 32 bit integer table flags to the 32 bit FSP Flags. +Fsp Flags are written into the tablespace header at the offset +FSP_SPACE_FLAGS and are also stored in the fil_space_t::flags field. +The following chart shows the translation of the low order bit. +Other bits are the same. + Low order bit + | REDUNDANT | COMPACT | COMPRESSED | DYNAMIC +dict_table_t::flags | 0 | 1 | 1 | 1 +fil_space_t::flags | 0 | 0 | 1 | 1 +@param[in] table_flags dict_table_t::flags +@param[in] is_temp whether the tablespace is temporary +@return tablespace flags (fil_space_t::flags) */ +ulint +dict_tf_to_fsp_flags( + ulint table_flags, + bool is_temp) +{ + DBUG_EXECUTE_IF("dict_tf_to_fsp_flags_failure", + return(ULINT_UNDEFINED);); + + bool has_atomic_blobs = + DICT_TF_HAS_ATOMIC_BLOBS(table_flags); + page_size_t page_size = dict_tf_get_page_size(table_flags); + bool has_data_dir = DICT_TF_HAS_DATA_DIR(table_flags); + bool is_shared = DICT_TF_HAS_SHARED_SPACE(table_flags); + bool page_compression = DICT_TF_GET_PAGE_COMPRESSION(table_flags); + ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(table_flags); + ulint atomic_writes = DICT_TF_GET_ATOMIC_WRITES(table_flags); + + ut_ad(!page_size.is_compressed() || has_atomic_blobs); + + /* General tablespaces that are not compressed do not get the + flags for dynamic row format (POST_ANTELOPE & ATOMIC_BLOBS) */ + if (is_shared && !page_size.is_compressed()) { + has_atomic_blobs = false; + } + + ulint fsp_flags = fsp_flags_init(page_size, + has_atomic_blobs, + has_data_dir, + is_shared, + is_temp, + page_compression, + page_compression_level, + atomic_writes); + + return(fsp_flags); +} + /*************************************************************//** Convert table flag to row format string. @return row format name. */ -UNIV_INTERN const char* dict_tf_to_row_format_string( /*=========================*/ @@ -7317,4 +7440,93 @@ dict_tf_to_row_format_string( ut_error; return(0); } + +/** Look for any dictionary objects that are found in the given tablespace. +@param[in] space Tablespace ID to search for. +@return true if tablespace is empty. */ +bool +dict_tablespace_is_empty( + ulint space_id) +{ + btr_pcur_t pcur; + const rec_t* rec; + mtr_t mtr; + bool found = false; + + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&dict_sys->mutex); + mtr_start(&mtr); + + for (rec = dict_startscan_system(&pcur, &mtr, SYS_TABLES); + rec != NULL; + rec = dict_getnext_system(&pcur, &mtr)) { + const byte* field; + ulint len; + ulint space_id_for_table; + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__SPACE, &len); + ut_ad(len == 4); + space_id_for_table = mach_read_from_4(field); + + if (space_id_for_table == space_id) { + found = true; + } + } + + mtr_commit(&mtr); + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + + return(!found); +} #endif /* !UNIV_HOTBACKUP */ + +/** Determine the extent size (in pages) for the given table +@param[in] table the table whose extent size is being + calculated. +@return extent size in pages (256, 128 or 64) */ +ulint +dict_table_extent_size( + const dict_table_t* table) +{ + const ulint mb_1 = 1024 * 1024; + const ulint mb_2 = 2 * mb_1; + const ulint mb_4 = 4 * mb_1; + + page_size_t page_size = dict_table_page_size(table); + ulint pages_in_extent = FSP_EXTENT_SIZE; + + if (page_size.is_compressed()) { + + ulint disk_page_size = page_size.physical(); + + switch (disk_page_size) { + case 1024: + pages_in_extent = mb_1/1024; + break; + case 2048: + pages_in_extent = mb_1/2048; + break; + case 4096: + pages_in_extent = mb_1/4096; + break; + case 8192: + pages_in_extent = mb_1/8192; + break; + case 16384: + pages_in_extent = mb_1/16384; + break; + case 32768: + pages_in_extent = mb_2/32768; + break; + case 65536: + pages_in_extent = mb_4/65536; + break; + default: + ut_ad(0); + } + } + + return(pages_in_extent); +} diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 04e31aff088..5267cf1a199 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -25,27 +25,32 @@ from dictionary tables Created 4/24/1996 Heikki Tuuri *******************************************************/ -#include "dict0load.h" -#include "mysql_version.h" +#include "ha_prototypes.h" +#include "dict0load.h" #ifdef UNIV_NONINL #include "dict0load.ic" #endif +#include "mysql_version.h" #include "btr0pcur.h" #include "btr0btr.h" -#include "page0page.h" -#include "mach0data.h" -#include "dict0dict.h" #include "dict0boot.h" +#include "dict0crea.h" +#include "dict0dict.h" +#include "dict0mem.h" +#include "dict0priv.h" #include "dict0stats.h" +#include "fsp0file.h" +#include "fsp0sysspace.h" +#include "fts0priv.h" +#include "mach0data.h" +#include "page0page.h" #include "rem0cmp.h" #include "srv0start.h" #include "srv0srv.h" -#include "dict0crea.h" -#include "dict0priv.h" -#include "ha_prototypes.h" /* innobase_casedn_str() */ -#include "fts0priv.h" +#include +#include /** Following are the InnoDB system tables. The positions in this array are referenced by enum dict_system_table_id. */ @@ -57,17 +62,57 @@ static const char* SYSTEM_TABLE_NAME[] = { "SYS_FOREIGN", "SYS_FOREIGN_COLS", "SYS_TABLESPACES", - "SYS_DATAFILES" + "SYS_DATAFILES", + "SYS_VIRTUAL" }; +/** Loads a table definition and also all its index definitions. + +Loads those foreign key constraints whose referenced table is already in +dictionary cache. If a foreign key constraint is not loaded, then the +referenced table is pushed into the output stack (fk_tables), if it is not +NULL. These tables must be subsequently loaded so that all the foreign +key constraints are loaded into memory. + +@param[in] name Table name in the db/tablename format +@param[in] cached true=add to cache, false=do not +@param[in] ignore_err Error to be ignored when loading table + and its index definition +@param[out] fk_tables Related table names that must also be + loaded to ensure that all foreign key + constraints are loaded. +@return table, NULL if does not exist; if the table is stored in an +.ibd file, but the file does not exist, then we set the +ibd_file_missing flag TRUE in the table object we return */ +static +dict_table_t* +dict_load_table_one( + table_name_t& name, + bool cached, + dict_err_ignore_t ignore_err, + dict_names_t& fk_tables); + +/** Loads a table definition from a SYS_TABLES record to dict_table_t. +Does not load any columns or indexes. +@param[in] name Table name +@param[in] rec SYS_TABLES record +@param[out,own] table Table, or NULL +@return error message, or NULL on success */ +static +const char* +dict_load_table_low( + table_name_t& name, + const rec_t* rec, + dict_table_t** table); + /* If this flag is TRUE, then we will load the cluster index's (and tables') metadata even if it is marked as "corrupted". */ -UNIV_INTERN my_bool srv_load_corrupted = FALSE; +my_bool srv_load_corrupted = FALSE; #ifdef UNIV_DEBUG /****************************************************************//** Compare the name of an index column. -@return TRUE if the i'th column of index is 'name'. */ +@return TRUE if the i'th column of index is 'name'. */ static ibool name_of_col_is( @@ -89,7 +134,6 @@ name_of_col_is( Finds the first table name in the given database. @return own: table name, NULL if does not exist; the caller must free the memory in the string! */ -UNIV_INTERN char* dict_get_first_table_name_in_db( /*============================*/ @@ -106,7 +150,7 @@ dict_get_first_table_name_in_db( ulint len; mtr_t mtr; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); heap = mem_heap_create(1000); @@ -169,69 +213,9 @@ loop: goto loop; } -/********************************************************************//** -Prints to the standard output information on all tables found in the data -dictionary system table. */ -UNIV_INTERN -void -dict_print(void) -/*============*/ -{ - dict_table_t* table; - btr_pcur_t pcur; - const rec_t* rec; - mem_heap_t* heap; - mtr_t mtr; - - /* Enlarge the fatal semaphore wait timeout during the InnoDB table - monitor printout */ - - os_increment_counter_by_amount( - server_mutex, - srv_fatal_semaphore_wait_threshold, - SRV_SEMAPHORE_WAIT_EXTENSION); - - heap = mem_heap_create(1000); - mutex_enter(&(dict_sys->mutex)); - mtr_start(&mtr); - - rec = dict_startscan_system(&pcur, &mtr, SYS_TABLES); - - while (rec) { - const char* err_msg; - - err_msg = static_cast( - dict_process_sys_tables_rec_and_mtr_commit( - heap, rec, &table, DICT_TABLE_LOAD_FROM_CACHE, - &mtr)); - - if (!err_msg) { - dict_table_print(table); - } else { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: %s\n", err_msg); - } - - mem_heap_empty(heap); - - mtr_start(&mtr); - rec = dict_getnext_system(&pcur, &mtr); - } - - mtr_commit(&mtr); - mutex_exit(&(dict_sys->mutex)); - mem_heap_free(heap); - - /* Restore the fatal semaphore wait timeout */ - os_decrement_counter_by_amount( - server_mutex, - srv_fatal_semaphore_wait_threshold, - SRV_SEMAPHORE_WAIT_EXTENSION); -} - /********************************************************************//** This function gets the next system table record as it scans the table. -@return the next record if found, NULL if end of scan */ +@return the next record if found, NULL if end of scan */ static const rec_t* dict_getnext_system_low( @@ -263,8 +247,7 @@ dict_getnext_system_low( /********************************************************************//** This function opens a system table, and returns the first record. -@return first record of the system table */ -UNIV_INTERN +@return first record of the system table */ const rec_t* dict_startscan_system( /*==================*/ @@ -293,8 +276,7 @@ dict_startscan_system( /********************************************************************//** This function gets the next system table record as it scans the table. -@return the next record if found, NULL if end of scan */ -UNIV_INTERN +@return the next record if found, NULL if end of scan */ const rec_t* dict_getnext_system( /*================*/ @@ -318,7 +300,6 @@ This function processes one SYS_TABLES record and populate the dict_table_t struct for the table. Extracted out of dict_print() to be used by both monitor table output and information schema innodb_sys_tables output. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_tables_rec_and_mtr_commit( /*=======================================*/ @@ -335,7 +316,7 @@ dict_process_sys_tables_rec_and_mtr_commit( ulint len; const char* field; const char* err_msg = NULL; - char* table_name; + table_name_t table_name; field = (const char*) rec_get_nth_field_old( rec, DICT_FLD__SYS_TABLES__NAME, &len); @@ -345,7 +326,7 @@ dict_process_sys_tables_rec_and_mtr_commit( ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); /* Get the table name */ - table_name = mem_heap_strdupl(heap, field, len); + table_name.m_name = mem_heap_strdupl(heap, field, len); /* If DICT_TABLE_LOAD_FROM_CACHE is set, first check whether there is cached dict_table_t struct */ @@ -354,7 +335,7 @@ dict_process_sys_tables_rec_and_mtr_commit( /* Commit before load the table again */ mtr_commit(mtr); - *table = dict_table_get_low(table_name); + *table = dict_table_get_low(table_name.m_name); if (!(*table)) { err_msg = "Table not found in cache"; @@ -376,7 +357,6 @@ This function parses a SYS_INDEXES record and populate a dict_index_t structure with the information from the record. For detail information about SYS_INDEXES fields, please refer to dict_boot() function. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_indexes_rec( /*=========================*/ @@ -403,7 +383,6 @@ dict_process_sys_indexes_rec( This function parses a SYS_COLUMNS record and populate a dict_column_t structure with the information from the record. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_columns_rec( /*=========================*/ @@ -411,22 +390,47 @@ dict_process_sys_columns_rec( const rec_t* rec, /*!< in: current SYS_COLUMNS rec */ dict_col_t* column, /*!< out: dict_col_t to be filled */ table_id_t* table_id, /*!< out: table id */ - const char** col_name) /*!< out: column name */ + const char** col_name, /*!< out: column name */ + ulint* nth_v_col) /*!< out: if virtual col, this is + record's sequence number */ { const char* err_msg; /* Parse the record, and get "dict_col_t" struct filled */ err_msg = dict_load_column_low(NULL, heap, column, - table_id, col_name, rec); + table_id, col_name, rec, nth_v_col); return(err_msg); } +/** This function parses a SYS_VIRTUAL record and extracts virtual column +information +@param[in,out] heap heap memory +@param[in] rec current SYS_COLUMNS rec +@param[in,out] table_id table id +@param[in,out] pos virtual column position +@param[in,out] base_pos base column position +@return error message, or NULL on success */ +const char* +dict_process_sys_virtual_rec( + mem_heap_t* heap, + const rec_t* rec, + table_id_t* table_id, + ulint* pos, + ulint* base_pos) +{ + const char* err_msg; + + /* Parse the record, and get "dict_col_t" struct filled */ + err_msg = dict_load_virtual_low(NULL, heap, NULL, table_id, + pos, base_pos, rec); + + return(err_msg); +} /********************************************************************//** This function parses a SYS_FIELDS record and populates a dict_field_t structure with the information from the record. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_fields_rec( /*========================*/ @@ -461,7 +465,6 @@ This function parses a SYS_FOREIGN record and populate a dict_foreign_t structure with the information from the record. For detail information about SYS_FOREIGN fields, please refer to dict_load_foreign() function. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_foreign_rec( /*=========================*/ @@ -542,7 +545,6 @@ err_len: This function parses a SYS_FOREIGN_COLS record and extract necessary information from the record and return to caller. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_foreign_col_rec( /*=============================*/ @@ -612,7 +614,6 @@ err_len: This function parses a SYS_TABLESPACES record, extracts necessary information from the record and returns to caller. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_tablespaces( /*=========================*/ @@ -679,7 +680,6 @@ err_len: This function parses a SYS_DATAFILES record, extracts necessary information from the record and returns it to the caller. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_datafiles( /*=======================*/ @@ -729,65 +729,14 @@ err_len: return(NULL); } -/********************************************************************//** -Determine the flags of a table as stored in SYS_TABLES.TYPE and N_COLS. -@return ULINT_UNDEFINED if error, else a valid dict_table_t::flags. */ +/** Get the first filepath from SYS_DATAFILES for a given space_id. +@param[in] space_id Tablespace ID +@return First filepath (caller must invoke ut_free() on it) +@retval NULL if no SYS_DATAFILES entry was found. */ static -ulint -dict_sys_tables_get_flags( -/*======================*/ - const rec_t* rec) /*!< in: a record of SYS_TABLES */ -{ - const byte* field; - ulint len; - ulint type; - ulint n_cols; - - /* read the 4 byte flags from the TYPE field */ - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__TYPE, &len); - ut_a(len == 4); - type = mach_read_from_4(field); - - /* The low order bit of SYS_TABLES.TYPE is always set to 1. But in - dict_table_t::flags the low order bit is used to determine if the - row format is Redundant or Compact when the format is Antelope. - Read the 4 byte N_COLS field and look at the high order bit. It - should be set for COMPACT and later. It should not be set for - REDUNDANT. */ - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__N_COLS, &len); - ut_a(len == 4); - n_cols = mach_read_from_4(field); - - /* This validation function also combines the DICT_N_COLS_COMPACT - flag in n_cols into the type field to effectively make it a - dict_table_t::flags. */ - - if (ULINT_UNDEFINED == dict_sys_tables_type_validate(type, n_cols)) { - return(ULINT_UNDEFINED); - } - - return(dict_sys_tables_type_to_tf(type, n_cols)); -} - -/********************************************************************//** -Gets the filepath for a spaceid from SYS_DATAFILES and checks it against -the contents of a link file. This function is called when there is no -fil_node_t entry for this space ID so both durable locations on disk -must be checked and compared. -We use a temporary heap here for the table lookup, but not for the path -returned which the caller must free. -This function can return NULL if the space ID is not found in SYS_DATAFILES, -then the caller will assume that the ibd file is in the normal datadir. -@return own: A copy of the first datafile found in SYS_DATAFILES.PATH for -the given space ID. NULL if space ID is zero or not found. */ -UNIV_INTERN char* dict_get_first_path( -/*================*/ - ulint space, /*!< in: space id */ - const char* name) /*!< in: tablespace name */ + ulint space_id) { mtr_t mtr; dict_table_t* sys_datafiles; @@ -799,15 +748,16 @@ dict_get_first_path( const rec_t* rec; const byte* field; ulint len; - char* dict_filepath = NULL; + char* filepath = NULL; mem_heap_t* heap = mem_heap_create(1024); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); mtr_start(&mtr); sys_datafiles = dict_table_get_low("SYS_DATAFILES"); sys_index = UT_LIST_GET_FIRST(sys_datafiles->indexes); + ut_ad(!dict_table_is_comp(sys_datafiles)); ut_ad(name_of_col_is(sys_datafiles, sys_index, DICT_FLD__SYS_DATAFILES__SPACE, "SPACE")); @@ -818,7 +768,7 @@ dict_get_first_path( dfield = dtuple_get_nth_field(tuple, DICT_FLD__SYS_DATAFILES__SPACE); buf = static_cast(mem_heap_alloc(heap, 4)); - mach_write_to_4(buf, space); + mach_write_to_4(buf, space_id); dfield_set_data(dfield, buf, 4); dict_index_copy_types(tuple, sys_index, 1); @@ -828,44 +778,155 @@ dict_get_first_path( rec = btr_pcur_get_rec(&pcur); - /* If the file-per-table tablespace was created with - an earlier version of InnoDB, then this record is not - in SYS_DATAFILES. But a link file still might exist. */ + /* Get the filepath from this SYS_DATAFILES record. */ + if (btr_pcur_is_on_user_rec(&pcur)) { + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_DATAFILES__SPACE, &len); + ut_a(len == 4); + + if (space_id == mach_read_from_4(field)) { + /* A record for this space ID was found. */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_DATAFILES__PATH, &len); + + ut_ad(len > 0); + ut_ad(len < OS_FILE_MAX_PATH); + if (len > 0 && len != UNIV_SQL_NULL) { + filepath = mem_strdupl( + reinterpret_cast(field), + len); + ut_ad(filepath != NULL); + + /* The dictionary may have been written on + another OS. */ + os_normalize_path(filepath); + } + } + } + + btr_pcur_close(&pcur); + mtr_commit(&mtr); + mem_heap_free(heap); + + return(filepath); +} + +/** Gets the space name from SYS_TABLESPACES for a given space ID. +@param[in] space_id Tablespace ID +@param[in] callers_heap A heap to allocate from, may be NULL +@return Tablespace name (caller is responsible to free it) +@retval NULL if no dictionary entry was found. */ +static +char* +dict_get_space_name( + ulint space_id, + mem_heap_t* callers_heap) +{ + mtr_t mtr; + dict_table_t* sys_tablespaces; + dict_index_t* sys_index; + dtuple_t* tuple; + dfield_t* dfield; + byte* buf; + btr_pcur_t pcur; + const rec_t* rec; + const byte* field; + ulint len; + char* space_name = NULL; + mem_heap_t* heap = mem_heap_create(1024); + + ut_ad(mutex_own(&dict_sys->mutex)); + + sys_tablespaces = dict_table_get_low("SYS_TABLESPACES"); + if (sys_tablespaces == NULL) { + ut_a(!srv_sys_tablespaces_open); + return(NULL); + } + + sys_index = UT_LIST_GET_FIRST(sys_tablespaces->indexes); + + ut_ad(!dict_table_is_comp(sys_tablespaces)); + ut_ad(name_of_col_is(sys_tablespaces, sys_index, + DICT_FLD__SYS_TABLESPACES__SPACE, "SPACE")); + ut_ad(name_of_col_is(sys_tablespaces, sys_index, + DICT_FLD__SYS_TABLESPACES__NAME, "NAME")); + + tuple = dtuple_create(heap, 1); + dfield = dtuple_get_nth_field(tuple, DICT_FLD__SYS_TABLESPACES__SPACE); + + buf = static_cast(mem_heap_alloc(heap, 4)); + mach_write_to_4(buf, space_id); + + dfield_set_data(dfield, buf, 4); + dict_index_copy_types(tuple, sys_index, 1); + + mtr_start(&mtr); + + btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, + BTR_SEARCH_LEAF, &pcur, &mtr); + + rec = btr_pcur_get_rec(&pcur); + + /* Get the tablespace name from this SYS_TABLESPACES record. */ if (btr_pcur_is_on_user_rec(&pcur)) { - /* A record for this space ID was found. */ field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_DATAFILES__PATH, &len); - ut_a(len > 0 || len == UNIV_SQL_NULL); - ut_a(len < OS_FILE_MAX_PATH); - dict_filepath = mem_strdupl((char*) field, len); - ut_a(dict_filepath); + rec, DICT_FLD__SYS_TABLESPACES__SPACE, &len); + ut_a(len == 4); + + if (space_id == mach_read_from_4(field)) { + /* A record for this space ID was found. */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLESPACES__NAME, &len); + + ut_ad(len > 0); + ut_ad(len < OS_FILE_MAX_PATH); + + if (len > 0 && len != UNIV_SQL_NULL) { + /* Found a tablespace name. */ + if (callers_heap == NULL) { + space_name = mem_strdupl( + reinterpret_cast< + const char*>(field), + len); + } else { + space_name = mem_heap_strdupl( + callers_heap, + reinterpret_cast< + const char*>(field), + len); + } + ut_ad(space_name); + } + } } btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap); - return(dict_filepath); + return(space_name); } -/********************************************************************//** -Update the record for space_id in SYS_TABLESPACES to this filepath. -@return DB_SUCCESS if OK, dberr_t if the insert failed */ -UNIV_INTERN +/** Update the record for space_id in SYS_TABLESPACES to this filepath. +@param[in] space_id Tablespace ID +@param[in] filepath Tablespace filepath +@return DB_SUCCESS if OK, dberr_t if the insert failed */ dberr_t dict_update_filepath( -/*=================*/ - ulint space_id, /*!< in: space id */ - const char* filepath) /*!< in: filepath */ + ulint space_id, + const char* filepath) { + if (!srv_sys_tablespaces_open) { + /* Startup procedure is not yet ready for updates. */ + return(DB_SUCCESS); + } + dberr_t err = DB_SUCCESS; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); trx = trx_allocate_for_background(); trx->op_info = "update filepath"; @@ -892,39 +953,48 @@ dict_update_filepath( if (err == DB_SUCCESS) { /* We just updated SYS_DATAFILES due to the contents in a link file. Make a note that we did this. */ - ib_logf(IB_LOG_LEVEL_INFO, - "The InnoDB data dictionary table SYS_DATAFILES " - "for tablespace ID %lu was updated to use file %s.", - (ulong) space_id, filepath); + ib::info() << "The InnoDB data dictionary table SYS_DATAFILES" + " for tablespace ID " << space_id + << " was updated to use file " << filepath << "."; } else { - ib_logf(IB_LOG_LEVEL_WARN, - "Problem updating InnoDB data dictionary table " - "SYS_DATAFILES for tablespace ID %lu to file %s.", - (ulong) space_id, filepath); + ib::warn() << "Error occurred while updating InnoDB data" + " dictionary table SYS_DATAFILES for tablespace ID " + << space_id << " to file " << filepath << ": " + << ut_strerr(err) << "."; } return(err); } -/********************************************************************//** -Insert records into SYS_TABLESPACES and SYS_DATAFILES. -@return DB_SUCCESS if OK, dberr_t if the insert failed */ -UNIV_INTERN +/** Replace records in SYS_TABLESPACES and SYS_DATAFILES associated with +the given space_id using an independent transaction. +@param[in] space_id Tablespace ID +@param[in] name Tablespace name +@param[in] filepath First filepath +@param[in] fsp_flags Tablespace flags +@return DB_SUCCESS if OK, dberr_t if the insert failed */ dberr_t -dict_insert_tablespace_and_filepath( -/*================================*/ - ulint space, /*!< in: space id */ - const char* name, /*!< in: talespace name */ - const char* filepath, /*!< in: filepath */ - ulint fsp_flags) /*!< in: tablespace flags */ +dict_replace_tablespace_and_filepath( + ulint space_id, + const char* name, + const char* filepath, + ulint fsp_flags) { + if (!srv_sys_tablespaces_open) { + /* Startup procedure is not yet ready for updates. + Return success since this will likely get updated + later. */ + return(DB_SUCCESS); + } + dberr_t err = DB_SUCCESS; trx_t* trx; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(mutex_own(&(dict_sys->mutex))); + DBUG_EXECUTE_IF("innodb_fail_to_update_tablespace_dict", + return(DB_INTERRUPTED);); + + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(filepath); trx = trx_allocate_for_background(); @@ -934,9 +1004,9 @@ dict_insert_tablespace_and_filepath( /* A record for this space ID was not found in SYS_DATAFILES. Assume the record is also missing in - SYS_TABLESPACES. Insert records onto them both. */ - err = dict_create_add_tablespace_to_dictionary( - space, name, fsp_flags, filepath, trx, false); + SYS_TABLESPACES. Insert records into them both. */ + err = dict_replace_tablespace_in_dictionary( + space_id, name, fsp_flags, filepath, trx, false); trx_commit_for_mysql(trx); trx->dict_operation_lock_mode = 0; @@ -945,214 +1015,458 @@ dict_insert_tablespace_and_filepath( return(err); } -/********************************************************************//** -This function looks at each table defined in SYS_TABLES. It checks the -tablespace for any table with a space_id > 0. It looks up the tablespace -in SYS_DATAFILES to ensure the correct path. - -In a crash recovery we already have all the tablespace objects created. -This function compares the space id information in the InnoDB data dictionary -to what we already read with fil_load_single_table_tablespaces(). - -In a normal startup, we create the tablespace objects for every table in -InnoDB's data dictionary, if the corresponding .ibd file exists. -We also scan the biggest space id, and store it to fil_system. */ -UNIV_INTERN -void -dict_check_tablespaces_and_store_max_id( -/*====================================*/ - dict_check_t dict_check) /*!< in: how to check */ +/** Check the validity of a SYS_TABLES record +Make sure the fields are the right length and that they +do not contain invalid contents. +@param[in] rec SYS_TABLES record +@return error message, or NULL on success */ +static +const char* +dict_sys_tables_rec_check( + const rec_t* rec) { - dict_table_t* sys_tables; - dict_index_t* sys_index; - btr_pcur_t pcur; - const rec_t* rec; - ulint max_space_id; - mtr_t mtr; + const byte* field; + ulint len; - rw_lock_x_lock(&dict_operation_lock); - mutex_enter(&(dict_sys->mutex)); + ut_ad(mutex_own(&dict_sys->mutex)); - mtr_start(&mtr); + if (rec_get_deleted_flag(rec, 0)) { + return("delete-marked record in SYS_TABLES"); + } - sys_tables = dict_table_get_low("SYS_TABLES"); - sys_index = UT_LIST_GET_FIRST(sys_tables->indexes); - ut_ad(!dict_table_is_comp(sys_tables)); + if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_TABLES) { + return("wrong number of columns in SYS_TABLES record"); + } - max_space_id = mtr_read_ulint(dict_hdr_get(&mtr) - + DICT_HDR_MAX_SPACE_ID, - MLOG_4BYTES, &mtr); - fil_set_max_space_id_if_bigger(max_space_id); + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_TABLES__NAME, &len); + if (len == 0 || len == UNIV_SQL_NULL) { +err_len: + return("incorrect column length in SYS_TABLES"); + } + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_TABLES__DB_TRX_ID, &len); + if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) { + goto err_len; + } + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_TABLES__DB_ROLL_PTR, &len); + if (len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL) { + goto err_len; + } - btr_pcur_open_at_index_side(true, sys_index, BTR_SEARCH_LEAF, &pcur, - true, 0, &mtr); -loop: - btr_pcur_move_to_next_user_rec(&pcur, &mtr); + rec_get_nth_field_offs_old(rec, DICT_FLD__SYS_TABLES__ID, &len); + if (len != 8) { + goto err_len; + } - rec = btr_pcur_get_rec(&pcur); + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__N_COLS, &len); + if (field == NULL || len != 4) { + goto err_len; + } - if (!btr_pcur_is_on_user_rec(&pcur)) { - /* end of index */ + rec_get_nth_field_offs_old(rec, DICT_FLD__SYS_TABLES__TYPE, &len); + if (len != 4) { + goto err_len; + } - btr_pcur_close(&pcur); - mtr_commit(&mtr); + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_TABLES__MIX_ID, &len); + if (len != 8) { + goto err_len; + } - /* We must make the tablespace cache aware of the biggest - known space id */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len); + if (field == NULL || len != 4) { + goto err_len; + } - /* printf("Biggest space id in data dictionary %lu\n", - max_space_id); */ - fil_set_max_space_id_if_bigger(max_space_id); + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_TABLES__CLUSTER_ID, &len); + if (len != UNIV_SQL_NULL) { + goto err_len; + } - mutex_exit(&(dict_sys->mutex)); - rw_lock_x_unlock(&dict_operation_lock); + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__SPACE, &len); + if (field == NULL || len != 4) { + goto err_len; + } - return; + return(NULL); +} + +/** Read and return the contents of a SYS_TABLESPACES record. +@param[in] rec A record of SYS_TABLESPACES +@param[out] id Pointer to the space_id for this table +@param[in,out] name Buffer for Tablespace Name of length NAME_LEN +@param[out] flags Pointer to tablespace flags +@return true if the record was read correctly, false if not. */ +bool +dict_sys_tablespaces_rec_read( + const rec_t* rec, + ulint* id, + char* name, + ulint* flags) +{ + const byte* field; + ulint len; + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLESPACES__SPACE, &len); + if (len != DICT_FLD_LEN_SPACE) { + ib::error() << "Wrong field length in SYS_TABLESPACES.SPACE: " + << len; + return(false); } + *id = mach_read_from_4(field); - if (!rec_get_deleted_flag(rec, 0)) { + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLESPACES__NAME, &len); + if (len == 0 || len == UNIV_SQL_NULL) { + ib::error() << "Wrong field length in SYS_TABLESPACES.NAME: " + << len; + return(false); + } + strncpy(name, reinterpret_cast(field), NAME_LEN); - /* We found one */ - const byte* field; - ulint len; - ulint space_id; - ulint flags; - char* name; + /* read the 4 byte flags from the TYPE field */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLESPACES__FLAGS, &len); + if (len != 4) { + ib::error() << "Wrong field length in SYS_TABLESPACES.FLAGS: " + << len; + return(false); + } + *flags = mach_read_from_4(field); - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__NAME, &len); + return(true); +} - name = mem_strdupl((char*) field, len); +/** Load and check each general tablespace mentioned in the SYS_TABLESPACES. +Ignore system and file-per-table tablespaces. +If it is valid, add it to the file_system list. +@param[in] validate true when the previous shutdown was not clean +@return the highest space ID found. */ +UNIV_INLINE +ulint +dict_check_sys_tablespaces( + bool validate) +{ + ulint max_space_id = 0; + btr_pcur_t pcur; + const rec_t* rec; + mtr_t mtr; - char table_name[MAX_FULL_NAME_LEN + 1]; + DBUG_ENTER("dict_check_sys_tablespaces"); - innobase_format_name( - table_name, sizeof(table_name), name, FALSE); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); - flags = dict_sys_tables_get_flags(rec); - if (UNIV_UNLIKELY(flags == ULINT_UNDEFINED)) { - /* Read again the 4 bytes from rec. */ - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__TYPE, &len); - ut_ad(len == 4); /* this was checked earlier */ - flags = mach_read_from_4(field); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Table '%s' in InnoDB data dictionary" - " has unknown type %lx", table_name, flags); - mem_free(name); - goto loop; + /* Before traversing it, let's make sure we have + SYS_TABLESPACES and SYS_DATAFILES loaded. */ + dict_table_get_low("SYS_TABLESPACES"); + dict_table_get_low("SYS_DATAFILES"); + + mtr_start(&mtr); + + for (rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES); + rec != NULL; + rec = dict_getnext_system(&pcur, &mtr)) + { + char space_name[NAME_LEN]; + ulint space_id = 0; + ulint fsp_flags; + + if (!dict_sys_tablespaces_rec_read(rec, &space_id, + space_name, &fsp_flags)) { + continue; } - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__SPACE, &len); - ut_a(len == 4); + /* Ignore system and file-per-table tablespaces. */ + if (is_system_tablespace(space_id) + || !fsp_is_shared_tablespace(fsp_flags)) { + continue; + } + + /* Ignore tablespaces that already are in the tablespace + cache. */ + if (fil_space_for_table_exists_in_mem( + space_id, space_name, false, true, NULL, 0, NULL)) { + /* Recovery can open a datafile that does not + match SYS_DATAFILES. If they don't match, update + SYS_DATAFILES. */ + char *dict_path = dict_get_first_path(space_id); + char *fil_path = fil_space_get_first_path(space_id); + if (dict_path && fil_path + && strcmp(dict_path, fil_path)) { + dict_update_filepath(space_id, fil_path); + } + ut_free(dict_path); + ut_free(fil_path); + continue; + } - space_id = mach_read_from_4(field); + /* Set the expected filepath from the data dictionary. + If the file is found elsewhere (from an ISL or the default + location) or this path is the same file but looks different, + fil_ibd_open() will update the dictionary with what is + opened. */ + char* filepath = dict_get_first_path(space_id); + + validate = true; /* Encryption */ + + /* Check that the .ibd file exists. */ + dberr_t err = fil_ibd_open( + validate, + !srv_read_only_mode && srv_log_file_size != 0, + FIL_TYPE_TABLESPACE, + space_id, + fsp_flags, + space_name, + filepath, + NULL); - btr_pcur_store_position(&pcur, &mtr); + if (err != DB_SUCCESS) { + ib::warn() << "Ignoring tablespace " + << id_name_t(space_name) + << " because it could not be opened."; + } - mtr_commit(&mtr); + max_space_id = ut_max(max_space_id, space_id); + + ut_free(filepath); + } + + mtr_commit(&mtr); + + DBUG_RETURN(max_space_id); +} + +/** Read and return 5 integer fields from a SYS_TABLES record. +@param[in] rec A record of SYS_TABLES +@param[in] name Table Name, the same as SYS_TABLES.NAME +@param[out] table_id Pointer to the table_id for this table +@param[out] space_id Pointer to the space_id for this table +@param[out] n_cols Pointer to number of columns for this table. +@param[out] flags Pointer to table flags +@param[out] flags2 Pointer to table flags2 +@return true if the record was read correctly, false if not. */ +static +bool +dict_sys_tables_rec_read( + const rec_t* rec, + const table_name_t& table_name, + table_id_t* table_id, + ulint* space_id, + ulint* n_cols, + ulint* flags, + ulint* flags2) +{ + const byte* field; + ulint len; + ulint type; + + *flags2 = 0; + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__ID, &len); + ut_ad(len == 8); + *table_id = static_cast(mach_read_from_8(field)); + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__SPACE, &len); + ut_ad(len == 4); + *space_id = mach_read_from_4(field); - /* For tables created with old versions of InnoDB, - SYS_TABLES.MIX_LEN may contain garbage. Such tables - would always be in ROW_FORMAT=REDUNDANT. Pretend that - all such tables are non-temporary. That is, do not - suppress error printouts about temporary or discarded - tablespaces not being found. */ + /* Read the 4 byte flags from the TYPE field */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__TYPE, &len); + ut_a(len == 4); + type = mach_read_from_4(field); + /* The low order bit of SYS_TABLES.TYPE is always set to 1. But in + dict_table_t::flags the low order bit is used to determine if the + row format is Redundant (0) or Compact (1) when the format is Antelope. + Read the 4 byte N_COLS field and look at the high order bit. It + should be set for COMPACT and later. It should not be set for + REDUNDANT. */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__N_COLS, &len); + ut_a(len == 4); + *n_cols = mach_read_from_4(field); + + /* This validation function also combines the DICT_N_COLS_COMPACT + flag in n_cols into the type field to effectively make it a + dict_table_t::flags. */ + + if (ULINT_UNDEFINED == dict_sys_tables_type_validate(type, *n_cols)) { + ib::error() << "Table " << table_name << " in InnoDB" + " data dictionary contains invalid flags." + " SYS_TABLES.TYPE=" << type << + " SYS_TABLES.N_COLS=" << *n_cols; + *flags = ULINT_UNDEFINED; + return(false); + } + + *flags = dict_sys_tables_type_to_tf(type, *n_cols); + + /* For tables created with old versions of InnoDB, there may be + garbage in SYS_TABLES.MIX_LEN where flags2 are found. Such tables + would always be in ROW_FORMAT=REDUNDANT which do not have the + high bit set in n_cols, and flags would be zero. */ + if (*flags != 0 || *n_cols & DICT_N_COLS_COMPACT) { + + /* Get flags2 from SYS_TABLES.MIX_LEN */ field = rec_get_nth_field_old( rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len); + *flags2 = mach_read_from_4(field); + + if (!dict_tf2_is_valid(*flags, *flags2)) { + ib::error() << "Table " << table_name << " in InnoDB" + " data dictionary contains invalid flags." + " SYS_TABLES.MIX_LEN=" << *flags2; + *flags2 = ULINT_UNDEFINED; + return(false); + } - bool is_temp = false; - bool discarded = false; - ib_uint32_t flags2 = static_cast( - mach_read_from_4(field)); + /* DICT_TF2_FTS will be set when indexes are being loaded */ + *flags2 &= ~DICT_TF2_FTS; - /* Check that the tablespace (the .ibd file) really - exists; print a warning to the .err log if not. - Do not print warnings for temporary tables or for - tablespaces that have been discarded. */ + /* Now that we have used this bit, unset it. */ + *n_cols &= ~DICT_N_COLS_COMPACT; + } - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__N_COLS, &len); + return(true); +} - /* MIX_LEN valid only for ROW_FORMAT > REDUNDANT. */ - if (mach_read_from_4(field) & DICT_N_COLS_COMPACT) { +/** Load and check each non-predefined tablespace mentioned in SYS_TABLES. +Search SYS_TABLES and check each tablespace mentioned that has not +already been added to the fil_system. If it is valid, add it to the +file_system list. Perform extra validation on the table if recovery from +the REDO log occurred. +@param[in] validate Whether to do validation on the table. +@return the highest space ID found. */ +UNIV_INLINE +ulint +dict_check_sys_tables( + bool validate) +{ + ulint max_space_id = 0; + btr_pcur_t pcur; + const rec_t* rec; + mtr_t mtr; + + DBUG_ENTER("dict_check_sys_tables"); + + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(mutex_own(&dict_sys->mutex)); + + mtr_start(&mtr); - is_temp = !!(flags2 & DICT_TF2_TEMPORARY); - discarded = !!(flags2 & DICT_TF2_DISCARDED); + /* Before traversing SYS_TABLES, let's make sure we have + SYS_TABLESPACES and SYS_DATAFILES loaded. */ + dict_table_t* sys_tablespaces; + dict_table_t* sys_datafiles; + sys_tablespaces = dict_table_get_low("SYS_TABLESPACES"); + ut_a(sys_tablespaces != NULL); + sys_datafiles = dict_table_get_low("SYS_DATAFILES"); + ut_a(sys_datafiles != NULL); + + for (rec = dict_startscan_system(&pcur, &mtr, SYS_TABLES); + rec != NULL; + rec = dict_getnext_system(&pcur, &mtr)) { + const byte* field; + ulint len; + char* space_name; + table_name_t table_name; + table_id_t table_id; + ulint space_id; + ulint n_cols; + ulint flags; + ulint flags2; + + /* If a table record is not useable, ignore it and continue + on to the next record. Error messages were logged. */ + if (dict_sys_tables_rec_check(rec) != NULL) { + continue; } - if (space_id == 0) { - /* The system tablespace always exists. */ - ut_ad(!discarded); - goto next_tablespace; + /* Copy the table name from rec */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__NAME, &len); + table_name.m_name = mem_strdupl((char*) field, len); + DBUG_PRINT("dict_check_sys_tables", + ("name: %p, '%s'", table_name.m_name, + table_name.m_name)); + + dict_sys_tables_rec_read(rec, table_name, + &table_id, &space_id, + &n_cols, &flags, &flags2); + if (flags == ULINT_UNDEFINED + || is_system_tablespace(space_id)) { + ut_free(table_name.m_name); + continue; } - switch (dict_check) { - case DICT_CHECK_ALL_LOADED: - /* All tablespaces should have been found in - fil_load_single_table_tablespaces(). */ - if (fil_space_for_table_exists_in_mem( - space_id, name, TRUE, !(is_temp || discarded), - false, NULL, 0) - && !(is_temp || discarded)) { - /* If user changes the path of .ibd files in - *.isl files before doing crash recovery , - then this leads to inconsistency in - SYS_DATAFILES system table because the - tables are loaded from the updated path - but the SYS_DATAFILES still points to the - old path.Therefore after crash recovery - update SYS_DATAFILES with the updated path.*/ - ut_ad(space_id); - ut_ad(recv_needed_recovery); - char *dict_path = dict_get_first_path(space_id, - name); - char *remote_path = fil_read_link_file(name); - if(dict_path && remote_path) { - if(strcmp(dict_path,remote_path)) { - dict_update_filepath(space_id, - remote_path); - } - } - if(dict_path) - mem_free(dict_path); - if(remote_path) - mem_free(remote_path); - } - break; + if (flags2 & DICT_TF2_DISCARDED) { + ib::info() << "Ignoring tablespace " << table_name + << " because the DISCARD flag is set ."; + ut_free(table_name.m_name); + continue; + } - case DICT_CHECK_SOME_LOADED: - /* Some tablespaces may have been opened in - trx_resurrect_table_locks(). */ - if (fil_space_for_table_exists_in_mem( - space_id, name, FALSE, FALSE, - false, NULL, 0)) { - break; - } - /* fall through */ - case DICT_CHECK_NONE_LOADED: - if (discarded) { - ib_logf(IB_LOG_LEVEL_INFO, - "DISCARD flag set for table '%s'," - " ignored.", - table_name); - break; + /* If the table is not a predefined tablespace then it must + be in a file-per-table or shared tablespace. + Note that flags2 is not available for REDUNDANT tables, + so don't check those. */ + ut_ad(DICT_TF_HAS_SHARED_SPACE(flags) + || !DICT_TF_GET_COMPACT(flags) + || flags2 & DICT_TF2_USE_FILE_PER_TABLE); + + /* Look up the tablespace name in the data dictionary if this + is a shared tablespace. For file-per-table, the table_name + and the tablespace_name are the same. + Some hidden tables like FTS AUX tables may not be found in + the dictionary since they can always be found in the default + location. If so, then dict_get_space_name() will return NULL, + the space name must be the table_name, and the filepath can be + discovered in the default location.*/ + char* shared_space_name = dict_get_space_name(space_id, NULL); + space_name = shared_space_name == NULL + ? table_name.m_name + : shared_space_name; + + /* Now that we have the proper name for this tablespace, + whether it is a shared tablespace or a single table + tablespace, look to see if it is already in the tablespace + cache. */ + if (fil_space_for_table_exists_in_mem( + space_id, space_name, false, true, NULL, 0, NULL)) { + /* Recovery can open a datafile that does not + match SYS_DATAFILES. If they don't match, update + SYS_DATAFILES. */ + char *dict_path = dict_get_first_path(space_id); + char *fil_path = fil_space_get_first_path(space_id); + if (dict_path && fil_path + && strcmp(dict_path, fil_path)) { + dict_update_filepath(space_id, fil_path); } + ut_free(dict_path); + ut_free(fil_path); + ut_free(table_name.m_name); + ut_free(shared_space_name); + continue; + } - /* It is a normal database startup: create the - space object and check that the .ibd file exists. - If the table uses a remote tablespace, look for the - space_id in SYS_DATAFILES to find the filepath */ - - /* Use the remote filepath if known. */ - char* filepath = NULL; - if (DICT_TF_HAS_DATA_DIR(flags)) { - filepath = dict_get_first_path( - space_id, name); - } + /* Set the expected filepath from the data dictionary. + If the file is found elsewhere (from an ISL or the default + location) or this path is the same file but looks different, + fil_ibd_open() will update the dictionary with what is + opened. */ + char* filepath = dict_get_first_path(space_id); /* We need to read page 0 to get (optional) IV regardless if encryptions is turned on or not, @@ -1160,49 +1474,99 @@ loop: already encrypted table */ bool read_page_0 = true; - /* We set the 2nd param (fix_dict = true) - here because we already have an x-lock on - dict_operation_lock and dict_sys->mutex. Besides, - this is at startup and we are now single threaded. - If the filepath is not known, it will need to - be discovered. */ - dberr_t err = fil_open_single_table_tablespace( - read_page_0, srv_read_only_mode ? false : true, - space_id, dict_tf_to_fsp_flags(flags), - name, filepath, NULL); - - if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Tablespace open failed for '%s', " - "ignored.", table_name); - } + /* Check that the .ibd file exists. */ + bool is_temp = flags2 & DICT_TF2_TEMPORARY; + ulint fsp_flags = dict_tf_to_fsp_flags(flags, is_temp); - if (filepath) { - mem_free(filepath); - } + validate = true; - break; - } + dberr_t err = fil_ibd_open( + validate, + !srv_read_only_mode && srv_log_file_size != 0, + FIL_TYPE_TABLESPACE, + space_id, + fsp_flags, + space_name, + filepath, + NULL); - if (space_id > max_space_id) { - max_space_id = space_id; + if (err != DB_SUCCESS) { + ib::warn() << "Ignoring tablespace " + << id_name_t(space_name) + << " because it could not be opened."; } -next_tablespace: - mem_free(name); - mtr_start(&mtr); + max_space_id = ut_max(max_space_id, space_id); - btr_pcur_restore_position(BTR_SEARCH_LEAF, &pcur, &mtr); + ut_free(table_name.m_name); + ut_free(shared_space_name); + ut_free(filepath); } - goto loop; + mtr_commit(&mtr); + + DBUG_RETURN(max_space_id); +} + +/** Check each tablespace found in the data dictionary. +Look at each general tablespace found in SYS_TABLESPACES. +Then look at each table defined in SYS_TABLES that has a space_id > 0 +to find all the file-per-table tablespaces. + +In a crash recovery we already have some tablespace objects created from +processing the REDO log. Any other tablespace in SYS_TABLESPACES not +previously used in recovery will be opened here. We will compare the +space_id information in the data dictionary to what we find in the +tablespace file. In addition, more validation will be done if recovery +was needed and force_recovery is not set. + +We also scan the biggest space id, and store it to fil_system. +@param[in] validate true if recovery was needed */ +void +dict_check_tablespaces_and_store_max_id( + bool validate) +{ + mtr_t mtr; + + DBUG_ENTER("dict_check_tablespaces_and_store_max_id"); + + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&dict_sys->mutex); + + /* Initialize the max space_id from sys header */ + mtr_start(&mtr); + ulint max_space_id = mtr_read_ulint( + dict_hdr_get(&mtr) + DICT_HDR_MAX_SPACE_ID, + MLOG_4BYTES, &mtr); + mtr_commit(&mtr); + + fil_set_max_space_id_if_bigger(max_space_id); + + /* Open all general tablespaces found in SYS_TABLESPACES. */ + ulint max1 = dict_check_sys_tablespaces(validate); + + /* Open all tablespaces referenced in SYS_TABLES. + This will update SYS_TABLESPACES and SYS_DATAFILES if it + finds any file-per-table tablespaces not already there. */ + ulint max2 = dict_check_sys_tables(validate); + + /* Store the max space_id found */ + max_space_id = ut_max(max1, max2); + fil_set_max_space_id_if_bigger(max_space_id); + + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + + DBUG_VOID_RETURN; } +/** Error message for a delete-marked record in dict_load_column_low() */ +static const char* dict_load_column_del = "delete-marked record in SYS_COLUMN"; + /********************************************************************//** Loads a table column definition from a SYS_COLUMNS record to dict_table_t. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_load_column_low( /*=================*/ @@ -1216,7 +1580,10 @@ dict_load_column_low( or NULL if table != NULL */ table_id_t* table_id, /*!< out: table id */ const char** col_name, /*!< out: column name */ - const rec_t* rec) /*!< in: SYS_COLUMNS record */ + const rec_t* rec, /*!< in: SYS_COLUMNS record */ + ulint* nth_v_col) /*!< out: if not NULL, this + records the "n" of "nth" virtual + column */ { char* name; const byte* field; @@ -1225,11 +1592,12 @@ dict_load_column_low( ulint prtype; ulint col_len; ulint pos; + ulint num_base; ut_ad(table || column); if (rec_get_deleted_flag(rec, 0)) { - return("delete-marked record in SYS_COLUMNS"); + return(dict_load_column_del); } if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_COLUMNS) { @@ -1252,16 +1620,11 @@ err_len: field = rec_get_nth_field_old( rec, DICT_FLD__SYS_COLUMNS__POS, &len); if (len != 4) { - goto err_len; } pos = mach_read_from_4(field); - if (table && table->n_def != pos) { - return("SYS_COLUMNS.POS mismatch"); - } - rec_get_nth_field_offs_old( rec, DICT_FLD__SYS_COLUMNS__DB_TRX_ID, &len); if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) { @@ -1321,6 +1684,10 @@ err_len: } } + if (table && table->n_def != pos && !(prtype & DATA_VIRTUAL)) { + return("SYS_COLUMNS.POS mismatch"); + } + field = rec_get_nth_field_old( rec, DICT_FLD__SYS_COLUMNS__LEN, &len); if (len != 4) { @@ -1332,18 +1699,124 @@ err_len: if (len != 4) { goto err_len; } + num_base = mach_read_from_4(field); - if (!column) { - dict_mem_table_add_col(table, heap, name, mtype, - prtype, col_len); + if (column == NULL) { + if (prtype & DATA_VIRTUAL) { +#ifdef UNIV_DEBUG + dict_v_col_t* vcol = +#endif + dict_mem_table_add_v_col( + table, heap, name, mtype, + prtype, col_len, + dict_get_v_col_mysql_pos(pos), num_base); + ut_ad(vcol->v_pos == dict_get_v_col_pos(pos)); + } else { + ut_ad(num_base == 0); + dict_mem_table_add_col(table, heap, name, mtype, + prtype, col_len); + } } else { dict_mem_fill_column_struct(column, pos, mtype, prtype, col_len); } + /* Report the virtual column number */ + if ((prtype & DATA_VIRTUAL) && nth_v_col != NULL) { + *nth_v_col = dict_get_v_col_pos(pos); + } + return(NULL); } +/** Error message for a delete-marked record in dict_load_virtual_low() */ +static const char* dict_load_virtual_del = "delete-marked record in SYS_VIRTUAL"; + +/** Loads a virtual column "mapping" (to base columns) information +from a SYS_VIRTUAL record +@param[in,out] table table +@param[in,out] heap memory heap +@param[in,out] column mapped base column's dict_column_t +@param[in,out] table_id table id +@param[in,out] pos virtual column position +@param[in,out] base_pos base column position +@param[in] rec SYS_VIRTUAL record +@return error message, or NULL on success */ +const char* +dict_load_virtual_low( + dict_table_t* table, + mem_heap_t* heap, + dict_col_t** column, + table_id_t* table_id, + ulint* pos, + ulint* base_pos, + const rec_t* rec) +{ + const byte* field; + ulint len; + ulint base; + + if (rec_get_deleted_flag(rec, 0)) { + return(dict_load_virtual_del); + } + + if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_VIRTUAL) { + return("wrong number of columns in SYS_VIRTUAL record"); + } + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_VIRTUAL__TABLE_ID, &len); + if (len != 8) { +err_len: + return("incorrect column length in SYS_VIRTUAL"); + } + + if (table_id != NULL) { + *table_id = mach_read_from_8(field); + } else if (table->id != mach_read_from_8(field)) { + return("SYS_VIRTUAL.TABLE_ID mismatch"); + } + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_VIRTUAL__POS, &len); + if (len != 4) { + goto err_len; + } + + if (pos != NULL) { + *pos = mach_read_from_4(field); + } + + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_VIRTUAL__BASE_POS, &len); + if (len != 4) { + goto err_len; + } + + base = mach_read_from_4(field); + + if (base_pos != NULL) { + *base_pos = base; + } + + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_VIRTUAL__DB_TRX_ID, &len); + if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) { + goto err_len; + } + + rec_get_nth_field_offs_old( + rec, DICT_FLD__SYS_VIRTUAL__DB_ROLL_PTR, &len); + if (len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL) { + goto err_len; + } + + if (column != NULL) { + *column = dict_table_get_nth_col(table, base); + } + + return(NULL); +} /********************************************************************//** Loads definitions for table columns. */ static @@ -1363,8 +1836,9 @@ dict_load_columns( byte* buf; ulint i; mtr_t mtr; + ulint n_skipped = 0; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); mtr_start(&mtr); @@ -1388,26 +1862,37 @@ dict_load_columns( btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); - for (i = 0; i + DATA_N_SYS_COLS < (ulint) table->n_cols; i++) { + + ut_ad(table->n_t_cols == static_cast( + table->n_cols) + static_cast(table->n_v_cols)); + + for (i = 0; + i + DATA_N_SYS_COLS < table->n_t_cols + n_skipped; + i++) { const char* err_msg; const char* name = NULL; + ulint nth_v_col = ULINT_UNDEFINED; rec = btr_pcur_get_rec(&pcur); ut_a(btr_pcur_is_on_user_rec(&pcur)); err_msg = dict_load_column_low(table, heap, NULL, NULL, - &name, rec); + &name, rec, &nth_v_col); - if (err_msg) { - fprintf(stderr, "InnoDB: %s\n", err_msg); - ut_error; + if (err_msg == dict_load_column_del) { + n_skipped++; + goto next_rec; + } else if (err_msg) { + ib::fatal() << err_msg; } /* Note: Currently we have one DOC_ID column that is - shared by all FTS indexes on a table. */ + shared by all FTS indexes on a table. And only non-virtual + column can be used for FULLTEXT index */ if (innobase_strcasecmp(name, - FTS_DOC_ID_COL_NAME) == 0) { + FTS_DOC_ID_COL_NAME) == 0 + && nth_v_col == ULINT_UNDEFINED) { dict_col_t* col; /* As part of normal loading of tables the table->flag is not set for tables with FTS @@ -1424,7 +1909,7 @@ dict_load_columns( ut_a(table->fts->doc_col == ULINT_UNDEFINED); - col = dict_table_get_nth_col(table, i); + col = dict_table_get_nth_col(table, i - n_skipped); ut_ad(col->len == sizeof(doc_id_t)); @@ -1435,7 +1920,103 @@ dict_load_columns( table, DICT_TF2_FTS_ADD_DOC_ID); } - table->fts->doc_col = i; + table->fts->doc_col = i - n_skipped; + } +next_rec: + btr_pcur_move_to_next_user_rec(&pcur, &mtr); + } + + btr_pcur_close(&pcur); + mtr_commit(&mtr); +} + +/** Loads SYS_VIRTUAL info for one virtual column +@param[in,out] table table +@param[in] nth_v_col virtual column sequence num +@param[in,out] v_col virtual column +@param[in,out] heap memory heap +*/ +static +void +dict_load_virtual_one_col( + dict_table_t* table, + ulint nth_v_col, + dict_v_col_t* v_col, + mem_heap_t* heap) +{ + dict_table_t* sys_virtual; + dict_index_t* sys_virtual_index; + btr_pcur_t pcur; + dtuple_t* tuple; + dfield_t* dfield; + const rec_t* rec; + byte* buf; + ulint i = 0; + mtr_t mtr; + ulint skipped = 0; + + ut_ad(mutex_own(&dict_sys->mutex)); + + if (v_col->num_base == 0) { + return; + } + + mtr_start(&mtr); + + sys_virtual = dict_table_get_low("SYS_VIRTUAL"); + sys_virtual_index = UT_LIST_GET_FIRST(sys_virtual->indexes); + ut_ad(!dict_table_is_comp(sys_virtual)); + + ut_ad(name_of_col_is(sys_virtual, sys_virtual_index, + DICT_FLD__SYS_VIRTUAL__POS, "POS")); + + tuple = dtuple_create(heap, 2); + + /* table ID field */ + dfield = dtuple_get_nth_field(tuple, 0); + + buf = static_cast(mem_heap_alloc(heap, 8)); + mach_write_to_8(buf, table->id); + + dfield_set_data(dfield, buf, 8); + + /* virtual column pos field */ + dfield = dtuple_get_nth_field(tuple, 1); + + buf = static_cast(mem_heap_alloc(heap, 4)); + ulint vcol_pos = dict_create_v_col_pos(nth_v_col, v_col->m_col.ind); + mach_write_to_4(buf, vcol_pos); + + dfield_set_data(dfield, buf, 4); + + dict_index_copy_types(tuple, sys_virtual_index, 2); + + btr_pcur_open_on_user_rec(sys_virtual_index, tuple, PAGE_CUR_GE, + BTR_SEARCH_LEAF, &pcur, &mtr); + + for (i = 0; i < v_col->num_base + skipped; i++) { + const char* err_msg; + ulint pos; + + ut_ad(btr_pcur_is_on_user_rec(&pcur)); + + rec = btr_pcur_get_rec(&pcur); + + ut_a(btr_pcur_is_on_user_rec(&pcur)); + + err_msg = dict_load_virtual_low(table, heap, + &v_col->base_col[i - skipped], + NULL, + &pos, NULL, rec); + + if (err_msg) { + if (err_msg != dict_load_virtual_del) { + ib::fatal() << err_msg; + } else { + skipped++; + } + } else { + ut_ad(pos == vcol_pos); } btr_pcur_move_to_next_user_rec(&pcur, &mtr); @@ -1445,6 +2026,23 @@ dict_load_columns( mtr_commit(&mtr); } +/** Loads info from SYS_VIRTUAL for virtual columns. +@param[in,out] table table +@param[in] heap memory heap +*/ +static +void +dict_load_virtual( + dict_table_t* table, + mem_heap_t* heap) +{ + for (ulint i = 0; i < table->n_v_cols; i++) { + dict_v_col_t* v_col = dict_table_get_nth_v_col(table, i); + + dict_load_virtual_one_col(table, i, v_col, heap); + } +} + /** Error message for a delete-marked record in dict_load_field_low() */ static const char* dict_load_field_del = "delete-marked record in SYS_FIELDS"; @@ -1452,7 +2050,6 @@ static const char* dict_load_field_del = "delete-marked record in SYS_FIELDS"; Loads an index field definition from a SYS_FIELDS record to dict_index_t. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_load_field_low( /*================*/ @@ -1592,7 +2189,7 @@ dict_load_fields( mtr_t mtr; dberr_t error; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); mtr_start(&mtr); @@ -1630,7 +2227,7 @@ dict_load_fields( goto next_rec; } else if (err_msg) { - fprintf(stderr, "InnoDB: %s\n", err_msg); + ib::error() << err_msg; error = DB_CORRUPTION; goto func_exit; } @@ -1656,7 +2253,6 @@ If allocate=TRUE, we will create a dict_index_t structure and fill it accordingly. If allocated=FALSE, the dict_index_t will be supplied by the caller and filled with information read from the record. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_load_index_low( /*================*/ @@ -1679,6 +2275,7 @@ dict_load_index_low( ulint n_fields; ulint type; ulint space; + ulint merge_threshold; if (allocate) { /* If allocate=TRUE, no dict_index_t will @@ -1690,7 +2287,27 @@ dict_load_index_low( return(dict_load_index_del); } - if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_INDEXES) { + if (rec_get_n_fields_old(rec) == DICT_NUM_FIELDS__SYS_INDEXES) { + /* MERGE_THRESHOLD exists */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD, &len); + switch (len) { + case 4: + merge_threshold = mach_read_from_4(field); + break; + case UNIV_SQL_NULL: + merge_threshold = DICT_INDEX_MERGE_THRESHOLD_DEFAULT; + break; + default: + return("incorrect MERGE_THRESHOLD length" + " in SYS_INDEXES"); + } + } else if (rec_get_n_fields_old(rec) + == DICT_NUM_FIELDS__SYS_INDEXES - 1) { + /* MERGE_THRESHOLD doesn't exist */ + + merge_threshold = DICT_INDEX_MERGE_THRESHOLD_DEFAULT; + } else { return("wrong number of columns in SYS_INDEXES record"); } @@ -1781,6 +2398,7 @@ err_len: (*index)->id = id; (*index)->page = mach_read_from_4(field); ut_ad((*index)->page); + (*index)->merge_threshold = merge_threshold; return(NULL); } @@ -1810,7 +2428,7 @@ dict_load_indexes( mtr_t mtr; dberr_t error = DB_SUCCESS; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); mtr_start(&mtr); @@ -1845,11 +2463,10 @@ dict_load_indexes( for drop table */ if (dict_table_get_first_index(table) == NULL && !(ignore_err & DICT_ERR_IGNORE_CORRUPT)) { - ib_logf(IB_LOG_LEVEL_WARN, - "Cannot load table %s " - "because it has no indexes in " - "InnoDB internal data dictionary.", - table->name); + ib::warn() << "Cannot load table " + << table->name + << " because it has no indexes in" + " InnoDB internal data dictionary."; error = DB_CORRUPTION; goto func_exit; } @@ -1860,15 +2477,20 @@ dict_load_indexes( rec = btr_pcur_get_rec(&pcur); if ((ignore_err & DICT_ERR_IGNORE_RECOVER_LOCK) - && rec_get_n_fields_old(rec) - == DICT_NUM_FIELDS__SYS_INDEXES) { + && (rec_get_n_fields_old(rec) + == DICT_NUM_FIELDS__SYS_INDEXES + /* a record for older SYS_INDEXES table + (missing merge_threshold column) is acceptable. */ + || rec_get_n_fields_old(rec) + == DICT_NUM_FIELDS__SYS_INDEXES - 1)) { const byte* field; ulint len; field = rec_get_nth_field_old( rec, DICT_FLD__SYS_INDEXES__NAME, &len); if (len != UNIV_SQL_NULL - && char(*field) == char(TEMP_INDEX_PREFIX)) { + && static_cast(*field) + == static_cast(*TEMP_INDEX_PREFIX_STR)) { /* Skip indexes whose name starts with TEMP_INDEX_PREFIX, because they will be dropped during crash recovery. */ @@ -1876,8 +2498,8 @@ dict_load_indexes( } } - err_msg = dict_load_index_low(buf, table->name, heap, rec, - TRUE, &index); + err_msg = dict_load_index_low( + buf, table->name.m_name, heap, rec, TRUE, &index); ut_ad((index == NULL && err_msg != NULL) || (index != NULL && err_msg == NULL)); @@ -1887,13 +2509,15 @@ dict_load_indexes( if (dict_table_get_first_index(table) == NULL && !(ignore_err & DICT_ERR_IGNORE_CORRUPT)) { - ib_logf(IB_LOG_LEVEL_WARN, - "Failed to load the " - "clustered index for table %s " - "because of the following error: %s. " - "Refusing to load the rest of the " - "indexes (if any) and the whole table " - "altogether.", table->name, err_msg); + + ib::warn() << "Failed to load the" + " clustered index for table " + << table->name + << " because of the following error: " + << err_msg << "." + " Refusing to load the rest of the" + " indexes (if any) and the whole table" + " altogether."; error = DB_CORRUPTION; goto func_exit; } @@ -1903,7 +2527,7 @@ dict_load_indexes( /* Skip delete-marked records. */ goto next_rec; } else if (err_msg) { - fprintf(stderr, "InnoDB: %s\n", err_msg); + ib::error() << err_msg; if (ignore_err & DICT_ERR_IGNORE_CORRUPT) { goto next_rec; } @@ -1915,10 +2539,10 @@ dict_load_indexes( /* Check whether the index is corrupted */ if (dict_index_is_corrupted(index)) { - ut_print_timestamp(stderr); - fputs(" InnoDB: ", stderr); - dict_index_name_print(stderr, NULL, index); - fputs(" is corrupted\n", stderr); + + ib::error() << "Index " << index->name + << " of table " << table->name + << " is corrupted"; if (!srv_load_corrupted && !(ignore_err & DICT_ERR_IGNORE_CORRUPT) @@ -1934,15 +2558,14 @@ dict_load_indexes( DICT_ERR_IGNORE_CORRUPT 3) if the index corrupted is a secondary index */ - ut_print_timestamp(stderr); - fputs(" InnoDB: load corrupted index ", stderr); - dict_index_name_print(stderr, NULL, index); - putc('\n', stderr); + ib::info() << "Load corrupted index " + << index->name + << " of table " << table->name; } } if (index->type & DICT_FTS - && !DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS)) { + && !dict_table_has_fts_index(table)) { /* This should have been created by now. */ ut_a(table->fts != NULL); DICT_TF2_FLAG_SET(table, DICT_TF2_FTS); @@ -1951,10 +2574,12 @@ dict_load_indexes( /* We check for unsupported types first, so that the subsequent checks are relevant for the supported types. */ if (index->type & ~(DICT_CLUSTERED | DICT_UNIQUE - | DICT_CORRUPT | DICT_FTS)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Unknown type %lu of index %s of table %s", - (ulong) index->type, index->name, table->name); + | DICT_CORRUPT | DICT_FTS + | DICT_SPATIAL | DICT_VIRTUAL)) { + + ib::error() << "Unknown type " << index->type + << " of index " << index->name + << " of table " << table->name; error = DB_UNSUPPORTED; dict_mem_index_free(index); @@ -1963,11 +2588,9 @@ dict_load_indexes( && !table->ibd_file_missing && (!(index->type & DICT_FTS))) { - fprintf(stderr, - "InnoDB: Error: trying to load index %s" - " for table %s\n" - "InnoDB: but the index tree has been freed!\n", - index->name, table->name); + ib::error() << "Trying to load index " << index->name + << " for table " << table->name + << ", but the index tree has been freed!"; if (ignore_err & DICT_ERR_IGNORE_INDEX_ROOT) { /* If caller can tolerate this error, @@ -1978,12 +2601,11 @@ dict_load_indexes( dictionary cache for such metadata corruption, since we would always be able to set it when loading the dictionary cache */ - dict_set_corrupted_index_cache_only( - index, table); + ut_ad(index->table == table); + dict_set_corrupted_index_cache_only(index); - fprintf(stderr, - "InnoDB: Index is corrupt but forcing" - " load into data dictionary\n"); + ib::info() << "Index is corrupt but forcing" + " load into data dictionary"; } else { corrupted: dict_mem_index_free(index); @@ -1993,13 +2615,9 @@ corrupted: } else if (!dict_index_is_clust(index) && NULL == dict_table_get_first_index(table)) { - fputs("InnoDB: Error: trying to load index ", - stderr); - ut_print_name(stderr, NULL, FALSE, index->name); - fputs(" for table ", stderr); - ut_print_name(stderr, NULL, TRUE, table->name); - fputs("\nInnoDB: but the first index" - " is not clustered!\n", stderr); + ib::error() << "Trying to load index " << index->name + << " for table " << table->name + << ", but the first index is not clustered!"; goto corrupted; } else if (dict_is_sys_table(table->id) @@ -2030,8 +2648,16 @@ next_rec: btr_pcur_move_to_next_user_rec(&pcur, &mtr); } + ut_ad(table->fts_doc_id_index == NULL); + + if (table->fts != NULL) { + table->fts_doc_id_index = dict_table_get_index_on_name( + table, FTS_DOC_ID_INDEX_NAME); + } + /* If the table contains FTS indexes, populate table->fts->indexes */ - if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS)) { + if (dict_table_has_fts_index(table)) { + ut_ad(table->fts_doc_id_index != NULL); /* table->fts->indexes should have been created. */ ut_a(table->fts->indexes != NULL); dict_table_get_all_fts_indexes(table, table->fts->indexes); @@ -2044,151 +2670,44 @@ func_exit: return(error); } -/********************************************************************//** -Loads a table definition from a SYS_TABLES record to dict_table_t. +/** Loads a table definition from a SYS_TABLES record to dict_table_t. Does not load any columns or indexes. +@param[in] name Table name +@param[in] rec SYS_TABLES record +@param[out,own] table table, or NULL @return error message, or NULL on success */ -UNIV_INTERN +static const char* dict_load_table_low( -/*================*/ - const char* name, /*!< in: table name */ - const rec_t* rec, /*!< in: SYS_TABLES record */ - dict_table_t** table) /*!< out,own: table, or NULL */ + table_name_t& name, + const rec_t* rec, + dict_table_t** table) { - const byte* field; - ulint len; - ulint space; + table_id_t table_id; + ulint space_id; ulint n_cols; - ulint flags = 0; + ulint t_num; + ulint flags; ulint flags2; + ulint n_v_col; - if (rec_get_deleted_flag(rec, 0)) { - return("delete-marked record in SYS_TABLES"); - } - - if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_TABLES) { - return("wrong number of columns in SYS_TABLES record"); - } - - rec_get_nth_field_offs_old( - rec, DICT_FLD__SYS_TABLES__NAME, &len); - if (len == 0 || len == UNIV_SQL_NULL) { -err_len: - return("incorrect column length in SYS_TABLES"); - } - rec_get_nth_field_offs_old( - rec, DICT_FLD__SYS_TABLES__DB_TRX_ID, &len); - if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) { - goto err_len; - } - rec_get_nth_field_offs_old( - rec, DICT_FLD__SYS_TABLES__DB_ROLL_PTR, &len); - if (len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL) { - goto err_len; - } - - rec_get_nth_field_offs_old(rec, DICT_FLD__SYS_TABLES__ID, &len); - if (len != 8) { - goto err_len; - } - - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__N_COLS, &len); - if (len != 4) { - goto err_len; - } - - n_cols = mach_read_from_4(field); - - rec_get_nth_field_offs_old(rec, DICT_FLD__SYS_TABLES__TYPE, &len); - if (len != 4) { - goto err_len; - } - - rec_get_nth_field_offs_old( - rec, DICT_FLD__SYS_TABLES__MIX_ID, &len); - if (len != 8) { - goto err_len; - } - - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len); - if (len != 4) { - goto err_len; - } - - /* MIX_LEN may hold additional flags in post-antelope file formats. */ - flags2 = mach_read_from_4(field); - - /* DICT_TF2_FTS will be set when indexes is being loaded */ - flags2 &= ~DICT_TF2_FTS; - - rec_get_nth_field_offs_old( - rec, DICT_FLD__SYS_TABLES__CLUSTER_ID, &len); - if (len != UNIV_SQL_NULL) { - goto err_len; - } - - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__SPACE, &len); - if (len != 4) { - goto err_len; + const char* error_text = dict_sys_tables_rec_check(rec); + if (error_text != NULL) { + return(error_text); } - space = mach_read_from_4(field); - - /* Check if the tablespace exists and has the right name */ - flags = dict_sys_tables_get_flags(rec); + dict_sys_tables_rec_read(rec, name, &table_id, &space_id, + &t_num, &flags, &flags2); - if (UNIV_UNLIKELY(flags == ULINT_UNDEFINED)) { - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__TYPE, &len); - ut_ad(len == 4); /* this was checked earlier */ - flags = mach_read_from_4(field); - - ut_print_timestamp(stderr); - fputs(" InnoDB: Error: table ", stderr); - ut_print_filename(stderr, name); - fprintf(stderr, "\n" - "InnoDB: in InnoDB data dictionary" - " has unknown type %lx.\n", - (ulong) flags); + if (flags == ULINT_UNDEFINED) { return("incorrect flags in SYS_TABLES"); } - /* The high-order bit of N_COLS is the "compact format" flag. - For tables in that format, MIX_LEN may hold additional flags. */ - if (n_cols & DICT_N_COLS_COMPACT) { - ut_ad(flags & DICT_TF_COMPACT); - - if (flags2 & ~DICT_TF2_BIT_MASK) { - ut_print_timestamp(stderr); - fputs(" InnoDB: Warning: table ", stderr); - ut_print_filename(stderr, name); - fprintf(stderr, "\n" - "InnoDB: in InnoDB data dictionary" - " has unknown flags %lx.\n", - (ulong) flags2); - - /* Clean it up and keep going */ - flags2 &= DICT_TF2_BIT_MASK; - } - } else { - /* Do not trust the MIX_LEN field when the - row format is Redundant. */ - flags2 = 0; - } + dict_table_decode_n_col(t_num, &n_cols, &n_v_col); - /* See if the tablespace is available. */ *table = dict_mem_table_create( - name, space, n_cols & ~DICT_N_COLS_COMPACT, flags, flags2); - - field = rec_get_nth_field_old(rec, DICT_FLD__SYS_TABLES__ID, &len); - ut_ad(len == 8); /* this was checked earlier */ - - (*table)->id = mach_read_from_8(field); - + name.m_name, space_id, n_cols + n_v_col, n_v_col, flags, flags2); + (*table)->id = table_id; (*table)->ibd_file_missing = FALSE; return(NULL); @@ -2200,47 +2719,44 @@ table->data_dir_path and replace the 'databasename/tablename.ibd' portion with 'tablename'. This allows SHOW CREATE TABLE to return the correct DATA DIRECTORY path. Make this data directory path only if it has not yet been saved. */ -UNIV_INTERN void dict_save_data_dir_path( /*====================*/ dict_table_t* table, /*!< in/out: table */ char* filepath) /*!< in: filepath of tablespace */ { - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); ut_a(DICT_TF_HAS_DATA_DIR(table->flags)); ut_a(!table->data_dir_path); ut_a(filepath); /* Be sure this filepath is not the default filepath. */ - char* default_filepath = fil_make_ibd_name(table->name, false); - if (strcmp(filepath, default_filepath)) { - ulint pathlen = strlen(filepath); - ut_a(pathlen < OS_FILE_MAX_PATH); - ut_a(0 == strcmp(filepath + pathlen - 4, ".ibd")); - - table->data_dir_path = mem_heap_strdup(table->heap, filepath); - os_file_make_data_dir_path(table->data_dir_path); - } else { - /* This does not change SYS_DATAFILES or SYS_TABLES - or FSP_FLAGS on the header page of the tablespace, - but it makes dict_table_t consistent */ - table->flags &= ~DICT_TF_MASK_DATA_DIR; + char* default_filepath = fil_make_filepath( + NULL, table->name.m_name, IBD, false); + if (default_filepath) { + if (0 != strcmp(filepath, default_filepath)) { + ulint pathlen = strlen(filepath); + ut_a(pathlen < OS_FILE_MAX_PATH); + ut_a(0 == strcmp(filepath + pathlen - 4, DOT_IBD)); + + table->data_dir_path = mem_heap_strdup( + table->heap, filepath); + os_file_make_data_dir_path(table->data_dir_path); + } + + ut_free(default_filepath); } - mem_free(default_filepath); } -/*****************************************************************//** -Make sure the data_file_name is saved in dict_table_t if needed. Try to -read it from the file dictionary first, then from SYS_DATAFILES. */ -UNIV_INTERN +/** Make sure the data_dir_path is saved in dict_table_t if DATA DIRECTORY +was used. Try to read it from the fil_system first, then from SYS_DATAFILES. +@param[in] table Table object +@param[in] dict_mutex_own true if dict_sys->mutex is owned already */ void dict_get_and_save_data_dir_path( -/*============================*/ - dict_table_t* table, /*!< in/out: table */ - bool dict_mutex_own) /*!< in: true if dict_sys->mutex - is owned already */ + dict_table_t* table, + bool dict_mutex_own) { bool is_temp = DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY); @@ -2250,15 +2766,23 @@ dict_get_and_save_data_dir_path( if (!dict_mutex_own) { dict_mutex_enter_for_mysql(); } - if (!path) { - path = dict_get_first_path( - table->space, table->name); + + if (path == NULL) { + path = dict_get_first_path(table->space); } - if (path) { + if (path != NULL) { table->flags |= (1 << DICT_TF_POS_DATA_DIR); dict_save_data_dir_path(table, path); - mem_free(path); + ut_free(path); + } + + if (table->data_dir_path == NULL) { + /* Since we did not set the table data_dir_path, + unset the flag. This does not change SYS_DATAFILES + or SYS_TABLES or FSP_FLAGS on the header page of the + tablespace, but it makes dict_table_t consistent. */ + table->flags &= ~DICT_TF_MASK_DATA_DIR; } if (!dict_mutex_own) { @@ -2267,25 +2791,266 @@ dict_get_and_save_data_dir_path( } } -/********************************************************************//** -Loads a table definition and also all its index definitions, and also +/** Make sure the tablespace name is saved in dict_table_t if the table +uses a general tablespace. +Try to read it from the fil_system_t first, then from SYS_TABLESPACES. +@param[in] table Table object +@param[in] dict_mutex_own) true if dict_sys->mutex is owned already */ +void +dict_get_and_save_space_name( + dict_table_t* table, + bool dict_mutex_own) +{ + /* Do this only for general tablespaces. */ + if (!DICT_TF_HAS_SHARED_SPACE(table->flags)) { + return; + } + + bool use_cache = true; + if (table->tablespace != NULL) { + + if (srv_sys_tablespaces_open + && dict_table_has_temp_general_tablespace_name( + table->tablespace)) { + /* We previous saved the temporary name, + get the real one now. */ + use_cache = false; + } else { + /* Keep and use this name */ + return; + } + } + + if (use_cache) { + fil_space_t* space = fil_space_acquire_silent(table->space); + + if (space != NULL) { + /* Use this name unless it is a temporary general + tablespace name and we can now replace it. */ + if (!srv_sys_tablespaces_open + || !dict_table_has_temp_general_tablespace_name( + space->name)) { + + /* Use this tablespace name */ + table->tablespace = mem_heap_strdup( + table->heap, space->name); + + fil_space_release(space); + return; + } + fil_space_release(space); + } + } + + /* Read it from the dictionary. */ + if (srv_sys_tablespaces_open) { + if (!dict_mutex_own) { + dict_mutex_enter_for_mysql(); + } + + table->tablespace = dict_get_space_name( + table->space, table->heap); + + if (!dict_mutex_own) { + dict_mutex_exit_for_mysql(); + } + } +} + +/** Loads a table definition and also all its index definitions, and also the cluster definition if the table is a member in a cluster. Also loads all foreign key constraints where the foreign key is in the table or where -a foreign key references columns in this table. Adds all these to the data -dictionary cache. +a foreign key references columns in this table. +@param[in] name Table name in the dbname/tablename format +@param[in] cached true=add to cache, false=do not +@param[in] ignore_err Error to be ignored when loading + table and its index definition @return table, NULL if does not exist; if the table is stored in an -.ibd file, but the file does not exist, then we set the -ibd_file_missing flag TRUE in the table object we return */ -UNIV_INTERN +.ibd file, but the file does not exist, then we set the ibd_file_missing +flag in the table object we return. */ dict_table_t* dict_load_table( -/*============*/ - const char* name, /*!< in: table name in the - databasename/tablename format */ - ibool cached, /*!< in: TRUE=add to cache, FALSE=do not */ + const char* name, + bool cached, dict_err_ignore_t ignore_err) - /*!< in: error to be ignored when loading - table and its indexes' definition */ +{ + dict_names_t fk_list; + dict_table_t* result; + dict_names_t::iterator i; + table_name_t table_name; + + DBUG_ENTER("dict_load_table"); + DBUG_PRINT("dict_load_table", ("loading table: '%s'", name)); + + ut_ad(mutex_own(&dict_sys->mutex)); + + table_name.m_name = const_cast(name); + + result = dict_table_check_if_in_cache_low(name); + + if (!result) { + result = dict_load_table_one(table_name, cached, ignore_err, + fk_list); + while (!fk_list.empty()) { + table_name_t fk_table_name; + dict_table_t* fk_table; + + fk_table_name.m_name = + const_cast(fk_list.front()); + fk_table = dict_table_check_if_in_cache_low( + fk_table_name.m_name); + if (!fk_table) { + dict_load_table_one(fk_table_name, cached, + ignore_err, fk_list); + } + fk_list.pop_front(); + } + } + + DBUG_RETURN(result); +} + +/** Opens a tablespace for dict_load_table_one() +@param[in,out] table A table that refers to the tablespace to open +@param[in] heap A memory heap +@param[in] ignore_err Whether to ignore an error. */ +UNIV_INLINE +void +dict_load_tablespace( + dict_table_t* table, + mem_heap_t* heap, + dict_err_ignore_t ignore_err) +{ + /* The system tablespace is always available. */ + if (is_system_tablespace(table->space)) { + return; + } + + if (table->flags2 & DICT_TF2_DISCARDED) { + ib::warn() << "Tablespace for table " << table->name + << " is set as discarded."; + table->ibd_file_missing = TRUE; + return; + } + + if (dict_table_is_temporary(table)) { + /* Do not bother to retry opening temporary tables. */ + table->ibd_file_missing = TRUE; + return; + } + + /* A file-per-table table name is also the tablespace name. + A general tablespace name is not the same as the table name. + Use the general tablespace name if it can be read from the + dictionary, if not use 'innodb_general_##. */ + char* shared_space_name = NULL; + char* space_name; + if (DICT_TF_HAS_SHARED_SPACE(table->flags)) { + if (srv_sys_tablespaces_open) { + shared_space_name = + dict_get_space_name(table->space, NULL); + + } else { + /* Make the temporary tablespace name. */ + shared_space_name = static_cast( + ut_malloc_nokey( + strlen(general_space_name) + 20)); + + sprintf(shared_space_name, "%s_" ULINTPF, + general_space_name, + static_cast(table->space)); + } + space_name = shared_space_name; + } else { + space_name = table->name.m_name; + } + + /* The tablespace may already be open. */ + if (fil_space_for_table_exists_in_mem( + table->space, space_name, false, + true, heap, table->id, table)) { + ut_free(shared_space_name); + return; + } + + if (!(ignore_err & DICT_ERR_IGNORE_RECOVER_LOCK)) { + ib::error() << "Failed to find tablespace for table " + << table->name << " in the cache. Attempting" + " to load the tablespace with space id " + << table->space; + } + + /* Use the remote filepath if needed. This parameter is optional + in the call to fil_ibd_open(). If not supplied, it will be built + from the space_name. */ + char* filepath = NULL; + if (DICT_TF_HAS_DATA_DIR(table->flags)) { + /* This will set table->data_dir_path from either + fil_system or SYS_DATAFILES */ + dict_get_and_save_data_dir_path(table, true); + + if (table->data_dir_path) { + filepath = fil_make_filepath( + table->data_dir_path, + table->name.m_name, IBD, true); + } + + } else if (DICT_TF_HAS_SHARED_SPACE(table->flags)) { + /* Set table->tablespace from either + fil_system or SYS_TABLESPACES */ + dict_get_and_save_space_name(table, true); + + /* Set the filepath from either + fil_system or SYS_DATAFILES. */ + filepath = dict_get_first_path(table->space); + if (filepath == NULL) { + ib::warn() << "Could not find the filepath" + " for table " << table->name << + ", space ID " << table->space; + } + } + + /* Try to open the tablespace. We set the 2nd param (fix_dict) to + false because we do not have an x-lock on dict_operation_lock */ + ulint fsp_flags = dict_tf_to_fsp_flags(table->flags, false); + dberr_t err = fil_ibd_open( + true, false, FIL_TYPE_TABLESPACE, table->space, + fsp_flags, space_name, filepath, table); + + if (err != DB_SUCCESS) { + /* We failed to find a sensible tablespace file */ + table->ibd_file_missing = TRUE; + } + + ut_free(shared_space_name); + ut_free(filepath); +} + +/** Loads a table definition and also all its index definitions. + +Loads those foreign key constraints whose referenced table is already in +dictionary cache. If a foreign key constraint is not loaded, then the +referenced table is pushed into the output stack (fk_tables), if it is not +NULL. These tables must be subsequently loaded so that all the foreign +key constraints are loaded into memory. + +@param[in] name Table name in the db/tablename format +@param[in] cached true=add to cache, false=do not +@param[in] ignore_err Error to be ignored when loading table + and its index definition +@param[out] fk_tables Related table names that must also be + loaded to ensure that all foreign key + constraints are loaded. +@return table, NULL if does not exist; if the table is stored in an +.ibd file, but the file does not exist, then we set the +ibd_file_missing flag TRUE in the table object we return */ +static +dict_table_t* +dict_load_table_one( + table_name_t& name, + bool cached, + dict_err_ignore_t ignore_err, + dict_names_t& fk_tables) { dberr_t err; dict_table_t* table; @@ -2298,11 +3063,13 @@ dict_load_table( const rec_t* rec; const byte* field; ulint len; - char* filepath = NULL; const char* err_msg; mtr_t mtr; - ut_ad(mutex_own(&(dict_sys->mutex))); + DBUG_ENTER("dict_load_table_one"); + DBUG_PRINT("dict_load_table_one", ("table: %s", name.m_name)); + + ut_ad(mutex_own(&dict_sys->mutex)); heap = mem_heap_create(32000); @@ -2325,7 +3092,7 @@ dict_load_table( tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); - dfield_set_data(dfield, name, ut_strlen(name)); + dfield_set_data(dfield, name.m_name, ut_strlen(name.m_name)); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, @@ -2340,14 +3107,15 @@ err_exit: mtr_commit(&mtr); mem_heap_free(heap); - return(NULL); + DBUG_RETURN(NULL); } field = rec_get_nth_field_old( rec, DICT_FLD__SYS_TABLES__NAME, &len); /* Check if the table name in record is the searched one */ - if (len != ut_strlen(name) || ut_memcmp(name, field, len) != 0) { + if (len != ut_strlen(name.m_name) + || 0 != ut_memcmp(name.m_name, field, len)) { goto err_exit; } @@ -2356,79 +3124,19 @@ err_exit: if (err_msg) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: %s\n", err_msg); + ib::error() << err_msg; goto err_exit; } - char table_name[MAX_FULL_NAME_LEN + 1]; - - innobase_format_name(table_name, sizeof(table_name), name, FALSE); - btr_pcur_close(&pcur); mtr_commit(&mtr); - if (table->space == 0) { - /* The system tablespace is always available. */ - } else if (table->flags2 & DICT_TF2_DISCARDED) { - - ib_logf(IB_LOG_LEVEL_WARN, - "Table '%s' tablespace is set as discarded.", - table_name); - - table->ibd_file_missing = TRUE; - - } else if (!fil_space_for_table_exists_in_mem( - table->space, name, FALSE, FALSE, true, heap, - table->id)) { - - if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY)) { - /* Do not bother to retry opening temporary tables. */ - table->ibd_file_missing = TRUE; - - } else { - if (!(ignore_err & DICT_ERR_IGNORE_RECOVER_LOCK)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Failed to find tablespace for " - "table '%s' in the cache. " - "Attempting to load the tablespace " - "with space id %lu.", - table_name, (ulong) table->space); - } - - /* Use the remote filepath if needed. */ - /* This needs to be added to the tablex1 - from SYS_DATAFILES */ - dict_get_and_save_data_dir_path(table, true); - - if (table->data_dir_path) { - filepath = os_file_make_remote_pathname( - table->data_dir_path, - table->name, "ibd"); - } - - /* Try to open the tablespace. We set the - 2nd param (fix_dict = false) here because we - do not have an x-lock on dict_operation_lock */ - err = fil_open_single_table_tablespace( - true, false, table->space, - dict_tf_to_fsp_flags(table->flags), - name, filepath, table); - - if (err != DB_SUCCESS) { - /* We failed to find a sensible - tablespace file */ - - table->ibd_file_missing = TRUE; - } - if (filepath) { - mem_free(filepath); - } - } - } + dict_load_tablespace(table, heap, ignore_err); dict_load_columns(table, heap); + dict_load_virtual(table, heap); + if (cached) { dict_table_add_to_cache(table, TRUE, heap); } else { @@ -2453,13 +3161,11 @@ err_exit: /* Refuse to load the table if the table has a corrupted cluster index */ if (!srv_load_corrupted) { - fprintf(stderr, "InnoDB: Error: Load table "); - ut_print_name(stderr, NULL, TRUE, table->name); - fprintf(stderr, " failed, the table has corrupted" - " clustered indexes. Turn on" - " 'innodb_force_load_corrupted'" - " to drop it\n"); + ib::error() << "Load table " << table->name + << " failed, the table has" + " corrupted clustered indexes. Turn on" + " 'innodb_force_load_corrupted' to drop it"; dict_table_remove_from_cache(table); table = NULL; goto func_exit; @@ -2484,15 +3190,15 @@ err_exit: if (!cached || table->ibd_file_missing) { /* Don't attempt to load the indexes from disk. */ } else if (err == DB_SUCCESS) { - err = dict_load_foreigns(table->name, NULL, true, true, - ignore_err); + err = dict_load_foreigns(table->name.m_name, NULL, + true, true, + ignore_err, fk_tables); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_WARN, - "Load table '%s' failed, the table has missing " - "foreign key indexes. Turn off " - "'foreign_key_checks' and try again.", - table->name); + ib::warn() << "Load table " << table->name + << " failed, the table has missing" + " foreign key indexes. Turn off" + " 'foreign_key_checks' and try again."; dict_table_remove_from_cache(table); table = NULL; @@ -2547,13 +3253,12 @@ func_exit: ut_ad(err != DB_SUCCESS || dict_foreign_set_validate(*table)); - return(table); + DBUG_RETURN(table); } /***********************************************************************//** Loads a table object based on the table id. -@return table; NULL if table does not exist */ -UNIV_INTERN +@return table; NULL if table does not exist */ dict_table_t* dict_load_table_on_id( /*==================*/ @@ -2574,7 +3279,7 @@ dict_load_table_on_id( dict_table_t* table; mtr_t mtr; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); table = NULL; @@ -2634,10 +3339,9 @@ check_rec: field = rec_get_nth_field_old(rec, DICT_FLD__SYS_TABLE_IDS__NAME, &len); /* Load the table definition to memory */ - table = dict_load_table( - mem_heap_strdupl( - heap, (char*) field, len), - TRUE, ignore_err); + char* table_name = mem_heap_strdupl( + heap, (char*) field, len); + table = dict_load_table(table_name, true, ignore_err); } } } @@ -2746,7 +3450,6 @@ dict_table_open_on_index_id( This function is called when the database is booted. Loads system table index definitions except for the clustered index which is added to the dictionary cache at booting before calling this function. */ -UNIV_INTERN void dict_load_sys_table( /*================*/ @@ -2754,7 +3457,7 @@ dict_load_sys_table( { mem_heap_t* heap; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); heap = mem_heap_create(1000); @@ -2791,7 +3494,7 @@ dict_load_foreign_cols( mtr_t mtr; size_t id_len; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); id_len = strlen(foreign->id); @@ -2848,20 +3551,21 @@ dict_load_foreign_cols( rec, DICT_FLD__SYS_FOREIGN_COLS__REF_COL_NAME, &ref_col_name_len); - ib_logf(IB_LOG_LEVEL_ERROR, - "Unable to load columns names for foreign " - "key '%s' because it was not found in " - "InnoDB internal table SYS_FOREIGN_COLS. The " - "closest entry we found is: " - "(ID='%.*s', POS=%lu, FOR_COL_NAME='%.*s', " - "REF_COL_NAME='%.*s')", - foreign->id, - (int) len, field, - mach_read_from_4(pos), - (int) for_col_name_len, for_col_name, - (int) ref_col_name_len, ref_col_name); - - ut_error; + ib::fatal sout; + + sout << "Unable to load column names for foreign" + " key '" << foreign->id + << "' because it was not found in" + " InnoDB internal table SYS_FOREIGN_COLS. The" + " closest entry we found is:" + " (ID='"; + sout.write(field, len); + sout << "', POS=" << mach_read_from_4(pos) + << ", FOR_COL_NAME='"; + sout.write(for_col_name, for_col_name_len); + sout << "', REF_COL_NAME='"; + sout.write(ref_col_name, ref_col_name_len); + sout << "')"; } field = rec_get_nth_field_old( @@ -2887,8 +3591,9 @@ dict_load_foreign_cols( } /***********************************************************************//** -Loads a foreign key constraint to the dictionary cache. -@return DB_SUCCESS or error code */ +Loads a foreign key constraint to the dictionary cache. If the referenced +table is not yet loaded, it is added in the output parameter (fk_tables). +@return DB_SUCCESS or error code */ static MY_ATTRIBUTE((nonnull(1), warn_unused_result)) dberr_t dict_load_foreign( @@ -2906,8 +3611,15 @@ dict_load_foreign( bool check_charsets, /*!< in: whether to check charset compatibility */ - dict_err_ignore_t ignore_err) + dict_err_ignore_t ignore_err, /*!< in: error to be ignored */ + dict_names_t& fk_tables) + /*!< out: the foreign key constraint is added + to the dictionary cache only if the referenced + table is already in cache. Otherwise, the + foreign key constraint is not added to cache, + and the referenced table is added to this + stack. */ { dict_foreign_t* foreign; dict_table_t* sys_foreign; @@ -2925,7 +3637,11 @@ dict_load_foreign( dict_table_t* ref_table; size_t id_len; - ut_ad(mutex_own(&(dict_sys->mutex))); + DBUG_ENTER("dict_load_foreign"); + DBUG_PRINT("dict_load_foreign", + ("id: '%s', check_recursive: %d", id, check_recursive)); + + ut_ad(mutex_own(&dict_sys->mutex)); id_len = strlen(id); @@ -2952,16 +3668,15 @@ dict_load_foreign( || rec_get_deleted_flag(rec, 0)) { /* Not found */ - fprintf(stderr, - "InnoDB: Error: cannot load foreign constraint " - "%s: could not find the relevant record in " - "SYS_FOREIGN\n", id); + ib::error() << "Cannot load foreign constraint " << id + << ": could not find the relevant record in " + << "SYS_FOREIGN"; btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap2); - return(DB_ERROR); + DBUG_RETURN(DB_ERROR); } field = rec_get_nth_field_old(rec, DICT_FLD__SYS_FOREIGN__ID, &len); @@ -2969,16 +3684,19 @@ dict_load_foreign( /* Check if the id in record is the searched one */ if (len != id_len || ut_memcmp(id, field, len) != 0) { - fprintf(stderr, - "InnoDB: Error: cannot load foreign constraint " - "%s: found %.*s instead in SYS_FOREIGN\n", - id, (int) len, field); + { + ib::error err; + err << "Cannot load foreign constraint " << id + << ": found "; + err.write(field, len); + err << " instead in SYS_FOREIGN"; + } btr_pcur_close(&pcur); mtr_commit(&mtr); mem_heap_free(heap2); - return(DB_ERROR); + DBUG_RETURN(DB_ERROR); } /* Read the table names and the number of columns associated @@ -3008,6 +3726,8 @@ dict_load_foreign( foreign->heap, (char*) field, len); dict_mem_foreign_table_name_lookup_set(foreign, TRUE); + const ulint foreign_table_name_len = len; + field = rec_get_nth_field_old( rec, DICT_FLD__SYS_FOREIGN__REF_NAME, &len); foreign->referenced_table_name = mem_heap_strdupl( @@ -3020,54 +3740,33 @@ dict_load_foreign( dict_load_foreign_cols(foreign); ref_table = dict_table_check_if_in_cache_low( - foreign->referenced_table_name_lookup); - - /* We could possibly wind up in a deep recursive calls if - we call dict_table_get_low() again here if there - is a chain of tables concatenated together with - foreign constraints. In such case, each table is - both a parent and child of the other tables, and - act as a "link" in such table chains. - To avoid such scenario, we would need to check the - number of ancesters the current table has. If that - exceeds DICT_FK_MAX_CHAIN_LEN, we will stop loading - the child table. - Foreign constraints are loaded in a Breath First fashion, - that is, the index on FOR_NAME is scanned first, and then - index on REF_NAME. So foreign constrains in which - current table is a child (foreign table) are loaded first, - and then those constraints where current table is a - parent (referenced) table. - Thus we could check the parent (ref_table) table's - reference count (fk_max_recusive_level) to know how deep the - recursive call is. If the parent table (ref_table) is already - loaded, and its fk_max_recusive_level is larger than - DICT_FK_MAX_CHAIN_LEN, we will stop the recursive loading - by skipping loading the child table. It will not affect foreign - constraint check for DMLs since child table will be loaded - at that time for the constraint check. */ - if (!ref_table - || ref_table->fk_max_recusive_level < DICT_FK_MAX_RECURSIVE_LOAD) { - - /* If the foreign table is not yet in the dictionary cache, we - have to load it so that we are able to make type comparisons - in the next function call. */ - - for_table = dict_table_get_low(foreign->foreign_table_name_lookup); - - if (for_table && ref_table && check_recursive) { - /* This is to record the longest chain of ancesters - this table has, if the parent has more ancesters - than this table has, record it after add 1 (for this - parent */ - if (ref_table->fk_max_recusive_level - >= for_table->fk_max_recusive_level) { - for_table->fk_max_recusive_level = - ref_table->fk_max_recusive_level + 1; - } - } + foreign->referenced_table_name_lookup); + for_table = dict_table_check_if_in_cache_low( + foreign->foreign_table_name_lookup); + + if (!for_table) { + /* To avoid recursively loading the tables related through + the foreign key constraints, the child table name is saved + here. The child table will be loaded later, along with its + foreign key constraint. */ + + lint old_size = mem_heap_get_size(ref_table->heap); + + ut_a(ref_table != NULL); + fk_tables.push_back( + mem_heap_strdupl(ref_table->heap, + foreign->foreign_table_name_lookup, + foreign_table_name_len)); + + lint new_size = mem_heap_get_size(ref_table->heap); + dict_sys->size += new_size - old_size; + + dict_foreign_remove_from_cache(foreign); + DBUG_RETURN(DB_SUCCESS); } + ut_a(for_table || ref_table); + /* Note that there may already be a foreign constraint object in the dictionary cache for this constraint: then the following call only sets the pointers in it to point to the appropriate table @@ -3076,18 +3775,21 @@ dict_load_foreign( a new foreign key constraint but loading one from the data dictionary. */ - return(dict_foreign_add_to_cache(foreign, col_names, check_charsets, - ignore_err)); + DBUG_RETURN(dict_foreign_add_to_cache(foreign, col_names, + check_charsets, + ignore_err)); } /***********************************************************************//** Loads foreign key constraints where the table is either the foreign key holder or where the table is referenced by a foreign key. Adds these -constraints to the data dictionary. Note that we know that the dictionary -cache already contains all constraints where the other relevant table is -already in the dictionary cache. -@return DB_SUCCESS or error code */ -UNIV_INTERN +constraints to the data dictionary. + +The foreign key constraint is loaded only if the referenced table is also +in the dictionary cache. If the referenced table is not in dictionary +cache, then it is added to the output parameter (fk_tables). + +@return DB_SUCCESS or error code */ dberr_t dict_load_foreigns( const char* table_name, /*!< in: table name */ @@ -3098,8 +3800,12 @@ dict_load_foreigns( chained by FK */ bool check_charsets, /*!< in: whether to check charset compatibility */ - dict_err_ignore_t ignore_err) /*!< in: error to be ignored */ -/*===============*/ + dict_err_ignore_t ignore_err, /*!< in: error to be ignored */ + dict_names_t& fk_tables) + /*!< out: stack of table + names which must be loaded + subsequently to load all the + foreign key constraints. */ { ulint tuple_buf[(DTUPLE_EST_ALLOC(1) + sizeof(ulint) - 1) / sizeof(ulint)]; @@ -3114,18 +3820,17 @@ dict_load_foreigns( dberr_t err; mtr_t mtr; - ut_ad(mutex_own(&(dict_sys->mutex))); + DBUG_ENTER("dict_load_foreigns"); + + ut_ad(mutex_own(&dict_sys->mutex)); sys_foreign = dict_table_get_low("SYS_FOREIGN"); if (sys_foreign == NULL) { /* No foreign keys defined yet in this database */ - fprintf(stderr, - "InnoDB: Error: no foreign key system tables" - " in the database\n"); - - return(DB_ERROR); + ib::info() << "No foreign key system tables in the database"; + DBUG_RETURN(DB_ERROR); } ut_ad(!dict_table_is_comp(sys_foreign)); @@ -3139,7 +3844,7 @@ dict_load_foreigns( ut_ad(!dict_index_is_clust(sec_index)); start_load: - tuple = dtuple_create_from_mem(tuple_buf, sizeof(tuple_buf), 1); + tuple = dtuple_create_from_mem(tuple_buf, sizeof(tuple_buf), 1, 0); dfield = dtuple_get_nth_field(tuple, 0); dfield_set_data(dfield, table_name, ut_strlen(table_name)); @@ -3211,12 +3916,13 @@ loop: /* Load the foreign constraint definition to the dictionary cache */ err = dict_load_foreign(fk_id, col_names, - check_recursive, check_charsets, ignore_err); + check_recursive, check_charsets, ignore_err, + fk_tables); if (err != DB_SUCCESS) { btr_pcur_close(&pcur); - return(err); + DBUG_RETURN(err); } mtr_start(&mtr); @@ -3245,5 +3951,5 @@ load_next_index: goto start_load; } - return(DB_SUCCESS); + DBUG_RETURN(DB_SUCCESS); } diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc index f8ea0005665..89e9861db45 100644 --- a/storage/innobase/dict/dict0mem.cc +++ b/storage/innobase/dict/dict0mem.cc @@ -24,6 +24,11 @@ Data dictionary memory object creation Created 1/8/1996 Heikki Tuuri ***********************************************************************/ +#ifndef UNIV_HOTBACKUP +#include "ha_prototypes.h" +#include +#endif /* !UNIV_HOTBACKUP */ + #include "dict0mem.h" #ifdef UNIV_NONINL @@ -36,25 +41,17 @@ Created 1/8/1996 Heikki Tuuri #include "dict0dict.h" #include "fts0priv.h" #include "ut0crc32.h" + #ifndef UNIV_HOTBACKUP -# include "ha_prototypes.h" /* innobase_casedn_str(), - innobase_get_lower_case_table_names */ -# include "mysql_com.h" /* NAME_LEN */ # include "lock0lock.h" #endif /* !UNIV_HOTBACKUP */ -#ifdef UNIV_BLOB_DEBUG -# include "ut0rbt.h" -#endif /* UNIV_BLOB_DEBUG */ + +#include "sync0sync.h" #include #define DICT_HEAP_SIZE 100 /*!< initial memory heap size when creating a table or index object */ -#ifdef UNIV_PFS_MUTEX -/* Key to register autoinc_mutex with performance schema */ -UNIV_INTERN mysql_pfs_key_t autoinc_mutex_key; -#endif /* UNIV_PFS_MUTEX */ - /** System databases */ static const char* innobase_system_databases[] = { "mysql/", @@ -64,20 +61,55 @@ static const char* innobase_system_databases[] = { }; /** An interger randomly initialized at startup used to make a temporary -table name as unique as possible. */ +table name as unuique as possible. */ static ib_uint32_t dict_temp_file_num; +/** Display an identifier. +@param[in,out] s output stream +@param[in] id_name SQL identifier (other than table name) +@return the output stream */ +std::ostream& +operator<<( + std::ostream& s, + const id_name_t& id_name) +{ + const char q = '`'; + const char* c = id_name; + s << q; + for (; *c != 0; c++) { + if (*c == q) { + s << *c; + } + s << *c; + } + s << q; + return(s); +} + +/** Display a table name. +@param[in,out] s output stream +@param[in] table_name table name +@return the output stream */ +std::ostream& +operator<<( + std::ostream& s, + const table_name_t& table_name) +{ + return(s << ut_get_name(NULL, table_name.m_name)); +} + /**********************************************************************//** Creates a table memory object. -@return own: table object */ -UNIV_INTERN +@return own: table object */ dict_table_t* dict_mem_table_create( /*==================*/ const char* name, /*!< in: table name */ ulint space, /*!< in: space where the clustered index of the table is placed */ - ulint n_cols, /*!< in: number of columns */ + ulint n_cols, /*!< in: total number of columns including + virtual and non-virtual columns */ + ulint n_v_cols,/*!< in: number of virtual columns */ ulint flags, /*!< in: table flags */ ulint flags2) /*!< in: table flags2 */ { @@ -85,30 +117,36 @@ dict_mem_table_create( mem_heap_t* heap; ut_ad(name); - ut_a(dict_tf_is_valid(flags)); - ut_a(!(flags2 & ~DICT_TF2_BIT_MASK)); + ut_a(dict_tf2_is_valid(flags, flags2)); + ut_a(!(flags2 & DICT_TF2_UNUSED_BIT_MASK)); heap = mem_heap_create(DICT_HEAP_SIZE); table = static_cast( - mem_heap_zalloc(heap, sizeof(dict_table_t))); + mem_heap_zalloc(heap, sizeof(*table))); + + lock_table_lock_list_init(&table->locks); + + UT_LIST_INIT(table->indexes, &dict_index_t::indexes); table->heap = heap; + ut_d(table->magic_n = DICT_TABLE_MAGIC_N); + table->flags = (unsigned int) flags; table->flags2 = (unsigned int) flags2; - table->name = static_cast(ut_malloc(strlen(name) + 1)); - memcpy(table->name, name, strlen(name) + 1); - table->is_system_db = dict_mem_table_is_system(table->name); + table->name.m_name = mem_strdup(name); + table->is_system_db = dict_mem_table_is_system(table->name.m_name); table->space = (unsigned int) space; - table->n_cols = (unsigned int) (n_cols + DATA_N_SYS_COLS); + table->n_t_cols = (unsigned int) (n_cols + + dict_table_get_n_sys_cols(table)); + table->n_v_cols = (unsigned int) (n_v_cols); + table->n_cols = table->n_t_cols - table->n_v_cols; table->cols = static_cast( - mem_heap_alloc(heap, - (n_cols + DATA_N_SYS_COLS) - * sizeof(dict_col_t))); - - ut_d(table->magic_n = DICT_TABLE_MAGIC_N); + mem_heap_alloc(heap, table->n_cols * sizeof(dict_col_t))); + table->v_cols = static_cast( + mem_heap_alloc(heap, n_v_cols * sizeof(*table->v_cols))); /* true means that the stats latch will be enabled - dict_table_stats_lock() will not be noop. */ @@ -118,9 +156,12 @@ dict_mem_table_create( table->autoinc_lock = static_cast( mem_heap_alloc(heap, lock_get_size())); + /* lazy creation of table autoinc latch */ dict_table_autoinc_create_lazy(table); table->autoinc = 0; + table->sess_row_id = 0; + table->sess_trx_id = 0; /* The number of transactions that are either waiting on the AUTOINC lock or have been granted the lock. */ @@ -138,6 +179,10 @@ dict_mem_table_create( } #endif /* !UNIV_HOTBACKUP */ + if (DICT_TF_HAS_SHARED_SPACE(table->flags)) { + dict_get_and_save_space_name(table, true); + } + new(&table->foreign_set) dict_foreign_set(); new(&table->referenced_set) dict_foreign_set(); @@ -176,7 +221,6 @@ dict_mem_table_is_system( /****************************************************************//** Free a table memory object. */ -UNIV_INTERN void dict_mem_table_free( /*================*/ @@ -186,13 +230,11 @@ dict_mem_table_free( ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_d(table->cached = FALSE); - if (dict_table_has_fts_index(table) - || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID) - || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) { + if (dict_table_has_fts_index(table) + || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID) + || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) { if (table->fts) { - if (table->cached) { - fts_optimize_remove_table(table); - } + fts_optimize_remove_table(table); fts_free(table); } @@ -206,13 +248,24 @@ dict_mem_table_free( table->foreign_set.~dict_foreign_set(); table->referenced_set.~dict_foreign_set(); - ut_free(table->name); + ut_free(table->name.m_name); + table->name.m_name = NULL; + + /* Clean up virtual index info structures that are registered + with virtual columns */ + for (ulint i = 0; i < table->n_v_def; i++) { + dict_v_col_t* vcol + = dict_table_get_nth_v_col(table, i); + + UT_DELETE(vcol->v_indexes); + } + mem_heap_free(table->heap); } /****************************************************************//** Append 'name' to 'col_names'. @see dict_table_t::col_names -@return new column names array */ +@return new column names array */ static const char* dict_add_col_name( @@ -260,7 +313,6 @@ dict_add_col_name( /**********************************************************************//** Adds a column definition to a table. */ -UNIV_INTERN void dict_mem_table_add_col( /*===================*/ @@ -278,13 +330,17 @@ dict_mem_table_add_col( ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_ad(!heap == !name); + ut_ad(!(prtype & DATA_VIRTUAL)); + i = table->n_def++; + table->n_t_def++; + if (name) { - if (UNIV_UNLIKELY(table->n_def == table->n_cols)) { + if (table->n_def == table->n_cols) { heap = table->heap; } - if (UNIV_LIKELY(i != 0) && UNIV_UNLIKELY(table->col_names == NULL)) { + if (i && !table->col_names) { /* All preceding column names are empty. */ char* s = static_cast( mem_heap_zalloc(heap, table->n_def)); @@ -301,6 +357,82 @@ dict_mem_table_add_col( dict_mem_fill_column_struct(col, i, mtype, prtype, len); } +/** Adds a virtual column definition to a table. +@param[in,out] table table +@param[in,out] heap temporary memory heap, or NULL. It is + used to store name when we have not finished + adding all columns. When all columns are + added, the whole name will copy to memory from + table->heap +@param[in] name column name +@param[in] mtype main datatype +@param[in] prtype precise type +@param[in] len length +@param[in] pos position in a table +@param[in] num_base number of base columns +@return the virtual column definition */ +dict_v_col_t* +dict_mem_table_add_v_col( + dict_table_t* table, + mem_heap_t* heap, + const char* name, + ulint mtype, + ulint prtype, + ulint len, + ulint pos, + ulint num_base) +{ + dict_v_col_t* v_col; + ulint i; + + ut_ad(table); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + ut_ad(!heap == !name); + + ut_ad(prtype & DATA_VIRTUAL); + + i = table->n_v_def++; + + table->n_t_def++; + + if (name != NULL) { + if (table->n_v_def == table->n_v_cols) { + heap = table->heap; + } + + if (i && !table->v_col_names) { + /* All preceding column names are empty. */ + char* s = static_cast( + mem_heap_zalloc(heap, table->n_v_def)); + + table->v_col_names = s; + } + + table->v_col_names = dict_add_col_name(table->v_col_names, + i, name, heap); + } + + v_col = dict_table_get_nth_v_col(table, i); + + dict_mem_fill_column_struct(&v_col->m_col, pos, mtype, prtype, len); + v_col->v_pos = i; + + if (num_base != 0) { + v_col->base_col = static_cast(mem_heap_zalloc( + table->heap, num_base * sizeof( + *v_col->base_col))); + } else { + v_col->base_col = NULL; + } + + v_col->num_base = num_base; + + /* Initialize the index list for virtual columns */ + v_col->v_indexes = UT_NEW_NOKEY(dict_v_idx_list()); + + return(v_col); +} + /**********************************************************************//** Renames a column of a table in the data dictionary cache. */ static MY_ATTRIBUTE((nonnull)) @@ -310,17 +442,20 @@ dict_mem_table_col_rename_low( dict_table_t* table, /*!< in/out: table */ unsigned i, /*!< in: column offset corresponding to s */ const char* to, /*!< in: new column name */ - const char* s) /*!< in: pointer to table->col_names */ + const char* s, /*!< in: pointer to table->col_names */ + bool is_virtual) + /*!< in: if this is a virtual column */ { + char* t_col_names = const_cast( + is_virtual ? table->v_col_names : table->col_names); + ulint n_col = is_virtual ? table->n_v_def : table->n_def; + size_t from_len = strlen(s), to_len = strlen(to); ut_ad(i < table->n_def); ut_ad(from_len <= NAME_LEN); ut_ad(to_len <= NAME_LEN); - char from[NAME_LEN]; - strncpy(from, s, NAME_LEN); - if (from_len == to_len) { /* The easy case: simply replace the column name in table->col_names. */ @@ -329,13 +464,13 @@ dict_mem_table_col_rename_low( /* We need to adjust all affected index->field pointers, as in dict_index_add_col(). First, copy table->col_names. */ - ulint prefix_len = s - table->col_names; + ulint prefix_len = s - t_col_names; - for (; i < table->n_def; i++) { + for (; i < n_col; i++) { s += strlen(s) + 1; } - ulint full_len = s - table->col_names; + ulint full_len = s - t_col_names; char* col_names; if (to_len > from_len) { @@ -344,14 +479,14 @@ dict_mem_table_col_rename_low( table->heap, full_len + to_len - from_len)); - memcpy(col_names, table->col_names, prefix_len); + memcpy(col_names, t_col_names, prefix_len); } else { - col_names = const_cast(table->col_names); + col_names = const_cast(t_col_names); } memcpy(col_names + prefix_len, to, to_len); memmove(col_names + prefix_len + to_len, - table->col_names + (prefix_len + from_len), + t_col_names + (prefix_len + from_len), full_len - (prefix_len + from_len)); /* Replace the field names in every index. */ @@ -364,8 +499,16 @@ dict_mem_table_col_rename_low( dict_field_t* field = dict_index_get_nth_field( index, i); + + /* if is_virtual and that in field->col does + not match, continue */ + if ((!is_virtual) != + (!dict_col_is_virtual(field->col))) { + continue; + } + ulint name_ofs - = field->name - table->col_names; + = field->name - t_col_names; if (name_ofs <= prefix_len) { field->name = col_names + name_ofs; } else { @@ -376,7 +519,16 @@ dict_mem_table_col_rename_low( } } - table->col_names = col_names; + if (is_virtual) { + table->v_col_names = col_names; + } else { + table->col_names = col_names; + } + } + + /* Virtual columns are not allowed for foreign key */ + if (is_virtual) { + return; } dict_foreign_t* foreign; @@ -388,54 +540,14 @@ dict_mem_table_col_rename_low( foreign = *it; - if (foreign->foreign_index == NULL) { - /* We may go here when we set foreign_key_checks to 0, - and then try to rename a column and modify the - corresponding foreign key constraint. The index - would have been dropped, we have to find an equivalent - one */ - for (unsigned f = 0; f < foreign->n_fields; f++) { - if (strcmp(foreign->foreign_col_names[f], from) - == 0) { - - char** rc = const_cast( - foreign->foreign_col_names - + f); - - if (to_len <= strlen(*rc)) { - memcpy(*rc, to, to_len + 1); - } else { - *rc = static_cast( - mem_heap_dup( - foreign->heap, - to, - to_len + 1)); - } - } - } - - dict_index_t* new_index = dict_foreign_find_index( - foreign->foreign_table, NULL, - foreign->foreign_col_names, - foreign->n_fields, NULL, true, false, - NULL, NULL, NULL); - /* There must be an equivalent index in this case. */ - ut_ad(new_index != NULL); - - foreign->foreign_index = new_index; - - } else { - - for (unsigned f = 0; f < foreign->n_fields; f++) { - /* These can point straight to - table->col_names, because the foreign key - constraints will be freed at the same time - when the table object is freed. */ - foreign->foreign_col_names[f] - = dict_index_get_nth_field( - foreign->foreign_index, - f)->name; - } + for (unsigned f = 0; f < foreign->n_fields; f++) { + /* These can point straight to + table->col_names, because the foreign key + constraints will be freed at the same time + when the table object is freed. */ + foreign->foreign_col_names[f] + = dict_index_get_nth_field( + foreign->foreign_index, f)->name; } } @@ -445,8 +557,6 @@ dict_mem_table_col_rename_low( foreign = *it; - ut_ad(foreign->referenced_index != NULL); - for (unsigned f = 0; f < foreign->n_fields; f++) { /* foreign->referenced_col_names[] need to be copies, because the constraint may become @@ -478,18 +588,20 @@ dict_mem_table_col_rename_low( /**********************************************************************//** Renames a column of a table in the data dictionary cache. */ -UNIV_INTERN void dict_mem_table_col_rename( /*======================*/ dict_table_t* table, /*!< in/out: table */ unsigned nth_col,/*!< in: column index */ const char* from, /*!< in: old column name */ - const char* to) /*!< in: new column name */ + const char* to, /*!< in: new column name */ + bool is_virtual) + /*!< in: if this is a virtual column */ { - const char* s = table->col_names; + const char* s = is_virtual ? table->v_col_names : table->col_names; - ut_ad(nth_col < table->n_def); + ut_ad((!is_virtual && nth_col < table->n_def) + || (is_virtual && nth_col < table->n_v_def)); for (unsigned i = 0; i < nth_col; i++) { size_t len = strlen(s); @@ -501,13 +613,12 @@ dict_mem_table_col_rename( Proceed with the renaming anyway. */ ut_ad(!strcmp(from, s)); - dict_mem_table_col_rename_low(table, nth_col, to, s); + dict_mem_table_col_rename_low(table, nth_col, to, s, is_virtual); } /**********************************************************************//** This function populates a dict_col_t memory structure with supplied information. */ -UNIV_INTERN void dict_mem_fill_column_struct( /*========================*/ @@ -537,8 +648,7 @@ dict_mem_fill_column_struct( /**********************************************************************//** Creates an index memory object. -@return own: index object */ -UNIV_INTERN +@return own: index object */ dict_index_t* dict_mem_index_create( /*==================*/ @@ -565,20 +675,32 @@ dict_mem_index_create( space, type, n_fields); dict_index_zip_pad_mutex_create_lazy(index); + + if (type & DICT_SPATIAL) { + mutex_create(LATCH_ID_RTR_SSN_MUTEX, &index->rtr_ssn.mutex); + index->rtr_track = static_cast( + mem_heap_alloc( + heap, + sizeof(*index->rtr_track))); + mutex_create(LATCH_ID_RTR_ACTIVE_MUTEX, + &index->rtr_track->rtr_active_mutex); + index->rtr_track->rtr_active = UT_NEW_NOKEY(rtr_info_active()); + } + return(index); } #ifndef UNIV_HOTBACKUP /**********************************************************************//** Creates and initializes a foreign constraint memory object. -@return own: foreign constraint struct */ -UNIV_INTERN +@return own: foreign constraint struct */ dict_foreign_t* dict_mem_foreign_create(void) /*=========================*/ { dict_foreign_t* foreign; mem_heap_t* heap; + DBUG_ENTER("dict_mem_foreign_create"); heap = mem_heap_create(100); @@ -587,7 +709,9 @@ dict_mem_foreign_create(void) foreign->heap = heap; - return(foreign); + DBUG_PRINT("dict_mem_foreign_create", ("heap: %p", heap)); + + DBUG_RETURN(foreign); } /**********************************************************************//** @@ -595,7 +719,6 @@ Sets the foreign_table_name_lookup pointer based on the value of lower_case_table_names. If that is 0 or 1, foreign_table_name_lookup will point to foreign_table_name. If 2, then another string is allocated from foreign->heap and set to lower case. */ -UNIV_INTERN void dict_mem_foreign_table_name_lookup_set( /*===================================*/ @@ -626,7 +749,6 @@ Sets the referenced_table_name_lookup pointer based on the value of lower_case_table_names. If that is 0 or 1, referenced_table_name_lookup will point to referenced_table_name. If 2, then another string is allocated from foreign->heap and set to lower case. */ -UNIV_INTERN void dict_mem_referenced_table_name_lookup_set( /*======================================*/ @@ -657,7 +779,6 @@ dict_mem_referenced_table_name_lookup_set( Adds a field definition to an index. NOTE: does not take a copy of the column name if the field is a column. The memory occupied by the column name may be released only after publishing the index. */ -UNIV_INTERN void dict_mem_index_add_field( /*=====================*/ @@ -682,7 +803,6 @@ dict_mem_index_add_field( /**********************************************************************//** Frees an index memory object. */ -UNIV_INTERN void dict_mem_index_free( /*================*/ @@ -690,15 +810,25 @@ dict_mem_index_free( { ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); -#ifdef UNIV_BLOB_DEBUG - if (index->blobs) { - mutex_free(&index->blobs_mutex); - rbt_free(index->blobs); - } -#endif /* UNIV_BLOB_DEBUG */ dict_index_zip_pad_mutex_destroy(index); + if (dict_index_is_spatial(index)) { + rtr_info_active::iterator it; + rtr_info_t* rtr_info; + + for (it = index->rtr_track->rtr_active->begin(); + it != index->rtr_track->rtr_active->end(); ++it) { + rtr_info = *it; + + rtr_info->index = NULL; + } + + mutex_destroy(&index->rtr_ssn.mutex); + mutex_destroy(&index->rtr_track->rtr_active_mutex); + UT_DELETE(index->rtr_track->rtr_active); + } + mem_heap_free(index->heap); } @@ -714,7 +844,6 @@ reasonably unique temporary file name. @param[in] dbtab Table name in the form database/table name @param[in] id Table id @return A unique temporary tablename suitable for InnoDB use */ -UNIV_INTERN char* dict_mem_create_temporary_tablename( mem_heap_t* heap, @@ -723,18 +852,14 @@ dict_mem_create_temporary_tablename( { size_t size; char* name; - const char* dbend = strchr(dbtab, '/'); + const char* dbend = strchr(dbtab, '/'); ut_ad(dbend); - size_t dblen = dbend - dbtab + 1; + size_t dblen = dbend - dbtab + 1; -#ifdef HAVE_ATOMIC_BUILTINS - /* Increment a randomly initialized number for each temp file. */ + /* Increment a randomly initialized number for each temp file. */ os_atomic_increment_uint32(&dict_temp_file_num, 1); -#else /* HAVE_ATOMIC_BUILTINS */ - dict_temp_file_num++; -#endif /* HAVE_ATOMIC_BUILTINS */ - size = tmp_file_prefix_length + 3 + 20 + 1 + 10 + dblen; + size = dblen + (sizeof(TEMP_FILE_PREFIX) + 3 + 20 + 1 + 10); name = static_cast(mem_heap_alloc(heap, size)); memcpy(name, dbtab, dblen); ut_snprintf(name + dblen, size - dblen, @@ -745,15 +870,13 @@ dict_mem_create_temporary_tablename( } /** Initialize dict memory variables */ - void dict_mem_init(void) { /* Initialize a randomly distributed temporary file number */ - ib_uint32_t now = static_cast(ut_time()); + ib_uint32_t now = static_cast(ut_time()); - const byte* buf = reinterpret_cast(&now); - ut_ad(ut_crc32 != NULL); + const byte* buf = reinterpret_cast(&now); dict_temp_file_num = ut_crc32(buf, sizeof(now)); diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 5c283f693d5..67bf672ab49 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -27,24 +27,17 @@ Created Jan 06, 2010 Vasil Dimov #include "univ.i" -#include "btr0btr.h" /* btr_get_size() */ -#include "btr0cur.h" /* btr_estimate_number_of_different_key_vals() */ -#include "dict0dict.h" /* dict_table_get_first_index(), dict_fs2utf8() */ -#include "dict0mem.h" /* DICT_TABLE_MAGIC_N */ +#include "ut0ut.h" +#include "ut0rnd.h" +#include "dyn0buf.h" +#include "row0sel.h" +#include "trx0trx.h" +#include "pars0pars.h" #include "dict0stats.h" -#include "data0type.h" /* dtype_t */ -#include "db0err.h" /* dberr_t */ -#include "page0page.h" /* page_align() */ -#include "pars0pars.h" /* pars_info_create() */ -#include "pars0types.h" /* pars_info_t */ -#include "que0que.h" /* que_eval_sql() */ -#include "rem0cmp.h" /* REC_MAX_N_FIELDS,cmp_rec_rec_with_match() */ -#include "row0sel.h" /* sel_node_t */ -#include "row0types.h" /* sel_node_t */ -#include "trx0trx.h" /* trx_create() */ -#include "trx0roll.h" /* trx_rollback_to_savepoint() */ -#include "ut0rnd.h" /* ut_rnd_interval() */ -#include "ut0ut.h" /* ut_format_name(), ut_time() */ +#include "ha_prototypes.h" +#include "ut0new.h" +#include +#include "btr0btr.h" #include #include @@ -144,18 +137,15 @@ of keys. For example if a btree level is: index: 0,1,2,3,4,5,6,7,8,9,10,11,12 data: b,b,b,b,b,b,g,g,j,j,j, x, y then we would store 5,7,10,11,12 in the array. */ -typedef std::vector boundaries_t; +typedef std::vector > boundaries_t; -/* This is used to arrange the index based on the index name. -@return true if index_name1 is smaller than index_name2. */ -struct index_cmp -{ - bool operator()(const char* index_name1, const char* index_name2) const { - return(strcmp(index_name1, index_name2) < 0); - } -}; +/** Allocator type used for index_map_t. */ +typedef ut_allocator > + index_map_t_allocator; -typedef std::map index_map_t; +/** Auxiliary map used for sorting indexes by name in dict_stats_save(). */ +typedef std::map index_map_t; /*********************************************************************//** Checks whether an index should be ignored in stats manipulations: @@ -171,8 +161,9 @@ dict_stats_should_ignore_index( { return((index->type & DICT_FTS) || dict_index_is_corrupted(index) + || dict_index_is_spatial(index) || index->to_be_dropped - || *index->name == TEMP_INDEX_PREFIX); + || !index->is_committed()); } /*********************************************************************//** @@ -252,7 +243,7 @@ dict_stats_persistent_storage_check( dberr_t ret; if (!caller_has_dict_sys_mutex) { - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); } ut_ad(mutex_own(&dict_sys->mutex)); @@ -267,12 +258,11 @@ dict_stats_persistent_storage_check( } if (!caller_has_dict_sys_mutex) { - mutex_exit(&(dict_sys->mutex)); + mutex_exit(&dict_sys->mutex); } if (ret != DB_SUCCESS && ret != DB_STATS_DO_NOT_EXIST) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: %s\n", errstr); + ib::error() << errstr; return(false); } else if (ret == DB_STATS_DO_NOT_EXIST) { return false; @@ -300,9 +290,8 @@ dict_stats_exec_sql( { dberr_t err; bool trx_started = false; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(mutex_own(&dict_sys->mutex)); if (!dict_stats_persistent_storage_check(true)) { @@ -312,8 +301,13 @@ dict_stats_exec_sql( if (trx == NULL) { trx = trx_allocate_for_background(); - trx_start_if_not_started(trx); trx_started = true; + + if (srv_read_only_mode) { + trx_start_internal_read_only(trx); + } else { + trx_start_internal(trx); + } } err = que_eval_sql(pinfo, sql, FALSE, trx); /* pinfo is freed here */ @@ -388,7 +382,7 @@ dict_stats_table_clone_create( heap_size = 0; heap_size += sizeof(dict_table_t); - heap_size += strlen(table->name) + 1; + heap_size += strlen(table->name.m_name) + 1; for (index = dict_table_get_first_index(table); index != NULL; @@ -398,7 +392,7 @@ dict_stats_table_clone_create( continue; } - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); ulint n_uniq = dict_index_get_n_unique(index); @@ -428,8 +422,7 @@ dict_stats_table_clone_create( t->heap = heap; - UNIV_MEM_ASSERT_RW_ABORT(table->name, strlen(table->name) + 1); - t->name = (char*) mem_heap_strdup(heap, table->name); + t->name.m_name = mem_heap_strdup(heap, table->name.m_name); t->corrupted = table->corrupted; @@ -438,7 +431,7 @@ dict_stats_table_clone_create( dict_table_stats_lock()/unlock() routines will do nothing. */ dict_table_stats_latch_create(t, false); - UT_LIST_INIT(t->indexes); + UT_LIST_INIT(t->indexes, &dict_index_t::indexes); for (index = dict_table_get_first_index(table); index != NULL; @@ -448,7 +441,7 @@ dict_stats_table_clone_create( continue; } - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); dict_index_t* idx; @@ -457,10 +450,9 @@ dict_stats_table_clone_create( UNIV_MEM_ASSERT_RW_ABORT(&index->id, sizeof(index->id)); idx->id = index->id; - UNIV_MEM_ASSERT_RW_ABORT(index->name, strlen(index->name) + 1); - idx->name = (char*) mem_heap_strdup(heap, index->name); + idx->name = mem_heap_strdup(heap, index->name); - idx->table_name = t->name; + idx->table_name = t->name.m_name; idx->table = t; @@ -469,6 +461,7 @@ dict_stats_table_clone_create( idx->to_be_dropped = 0; idx->online_status = ONLINE_INDEX_COMPLETE; + idx->set_committed(true); idx->n_uniq = index->n_uniq; @@ -476,13 +469,12 @@ dict_stats_table_clone_create( heap, idx->n_uniq * sizeof(idx->fields[0])); for (ulint i = 0; i < idx->n_uniq; i++) { - UNIV_MEM_ASSERT_RW_ABORT(index->fields[i].name, strlen(index->fields[i].name) + 1); - idx->fields[i].name = (char*) mem_heap_strdup( + idx->fields[i].name = mem_heap_strdup( heap, index->fields[i].name); } /* hook idx into t->indexes */ - UT_LIST_ADD_LAST(indexes, t->indexes, idx); + UT_LIST_ADD_LAST(t->indexes, idx); idx->stat_n_diff_key_vals = (ib_uint64_t*) mem_heap_alloc( heap, @@ -533,7 +525,7 @@ dict_stats_empty_index( /*!< in: whether to empty defrag stats */ { ut_ad(!(index->type & DICT_FTS)); - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); ulint n_uniq = index->n_uniq; @@ -607,7 +599,7 @@ dict_stats_empty_table( continue; } - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); dict_stats_empty_index(index, empty_defrag_stats); } @@ -730,7 +722,7 @@ dict_stats_copy( continue; } - ut_ad(!dict_index_is_univ(dst_idx)); + ut_ad(!dict_index_is_ibuf(dst_idx)); if (!INDEX_EQ(src_idx, dst_idx)) { for (src_idx = dict_table_get_first_index(src); @@ -787,8 +779,7 @@ dict_stats_copy( dst->stat_initialized = TRUE; } -/*********************************************************************//** -Duplicate the stats of a table and its indexes. +/** Duplicate the stats of a table and its indexes. This function creates a dummy dict_table_t object and copies the input table's stats into it. The returned table object is not in the dictionary cache and cannot be accessed by any other threads. In addition to the @@ -810,12 +801,12 @@ dict_index_t::stat_defrag_n_pages_freed dict_index_t::stat_defrag_n_page_split The returned object should be freed with dict_stats_snapshot_free() when no longer needed. +@param[in] table table whose stats to copy @return incomplete table object */ static dict_table_t* dict_stats_snapshot_create( -/*=======================*/ - dict_table_t* table) /*!< in: table whose stats to copy */ + dict_table_t* table) { mutex_enter(&dict_sys->mutex); @@ -883,7 +874,10 @@ dict_stats_update_transient_for_index( } else { mtr_t mtr; ulint size; + mtr_start(&mtr); + dict_disable_redo_if_temporary(index->table, &mtr); + mtr_s_lock(dict_index_get_lock(index), &mtr); size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr); @@ -908,6 +902,9 @@ dict_stats_update_transient_for_index( index->stat_n_leaf_pages = size; + /* We don't handle the return value since it will be false + only when some thread is dropping the table and we don't + have to empty the statistics of the to be dropped index */ btr_estimate_number_of_different_key_vals(index); } } @@ -918,7 +915,6 @@ is relatively quick and is used to calculate transient statistics that are not saved on disk. This was the only way to calculate statistics before the Persistent Statistics feature was introduced. */ -UNIV_INTERN void dict_stats_update_transient( /*========================*/ @@ -939,20 +935,17 @@ dict_stats_update_transient( } else if (index == NULL) { /* Table definition is corrupt */ - char buf[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: table %s has no indexes. " - "Cannot calculate statistics.\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf))); + ib::warn() << "Table " << table->name + << " has no indexes. Cannot calculate statistics."; dict_stats_empty_table(table, false); return; } for (; index != NULL; index = dict_table_get_next_index(index)) { - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); - if (index->type & DICT_FTS) { + if (index->type & DICT_FTS || dict_index_is_spatial(index)) { continue; } @@ -1046,7 +1039,7 @@ dict_stats_analyze_index_level( index->table->name, index->name, level); ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_S_LOCK)); + MTR_MEMO_SX_LOCK)); n_uniq = dict_index_get_n_unique(index); @@ -1080,7 +1073,7 @@ dict_stats_analyze_index_level( on the desired level. */ btr_pcur_open_at_index_side( - true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED, + true, index, BTR_SEARCH_TREE | BTR_ALREADY_S_LATCHED, &pcur, true, level, mtr); btr_pcur_move_to_next_on_page(&pcur); @@ -1119,8 +1112,6 @@ dict_stats_analyze_index_level( btr_pcur_is_on_user_rec(&pcur); btr_pcur_move_to_next_user_rec(&pcur, mtr)) { - ulint matched_fields = 0; - ulint matched_bytes = 0; bool rec_is_last_on_page; rec = btr_pcur_get_rec(&pcur); @@ -1180,6 +1171,8 @@ dict_stats_analyze_index_level( (*total_recs)++; if (prev_rec != NULL) { + ulint matched_fields; + prev_rec_offsets = rec_get_offsets( prev_rec, index, prev_rec_offsets, n_uniq, &heap); @@ -1190,8 +1183,7 @@ dict_stats_analyze_index_level( prev_rec_offsets, index, FALSE, - &matched_fields, - &matched_bytes); + &matched_fields); for (i = matched_fields; i < n_uniq; i++) { @@ -1320,12 +1312,7 @@ dict_stats_analyze_index_level( btr_leaf_page_release(btr_pcur_get_block(&pcur), BTR_SEARCH_LEAF, mtr); btr_pcur_close(&pcur); - - if (prev_rec_buf != NULL) { - - mem_free(prev_rec_buf); - } - + ut_free(prev_rec_buf); mem_heap_free(heap); } @@ -1368,7 +1355,7 @@ dict_stats_scan_page( const rec_t** out_rec, ulint* offsets1, ulint* offsets2, - dict_index_t* index, + const dict_index_t* index, const page_t* page, ulint n_prefix, page_scan_method_t scan_method, @@ -1420,8 +1407,7 @@ dict_stats_scan_page( while (!page_rec_is_supremum(next_rec)) { - ulint matched_fields = 0; - ulint matched_bytes = 0; + ulint matched_fields; offsets_next_rec = rec_get_offsets(next_rec, index, offsets_next_rec, @@ -1432,8 +1418,7 @@ dict_stats_scan_page( the first n_prefix fields */ cmp_rec_rec_with_match(rec, next_rec, offsets_rec, offsets_next_rec, - index, FALSE, &matched_fields, - &matched_bytes); + index, FALSE, &matched_fields); if (matched_fields < n_prefix) { /* rec != next_rec, => rec is non-boring */ @@ -1441,7 +1426,7 @@ dict_stats_scan_page( (*n_diff)++; if (scan_method == QUIT_ON_FIRST_NON_BORING) { - goto func_exit; + break; } } @@ -1454,7 +1439,7 @@ dict_stats_scan_page( place where offsets_rec was pointing before because we have just 2 placeholders where data is actually stored: - offsets_onstack1 and offsets_onstack2 and we + offsets1 and offsets2 and we are using them in circular fashion (offsets[_next]_rec are just pointers to those placeholders). */ @@ -1472,7 +1457,6 @@ dict_stats_scan_page( next_rec = get_next(next_rec); } -func_exit: /* offsets1,offsets2 should have been big enough */ ut_a(heap == NULL); *out_rec = rec; @@ -1498,10 +1482,7 @@ dict_stats_analyze_index_below_cur( ib_uint64_t* n_external_pages) { dict_index_t* index; - ulint space; - ulint zip_size; buf_block_t* block; - ulint page_no; const page_t* page; mem_heap_t* heap; const rec_t* rec; @@ -1534,15 +1515,15 @@ dict_stats_analyze_index_below_cur( rec_offs_set_n_alloc(offsets1, size); rec_offs_set_n_alloc(offsets2, size); - space = dict_index_get_space(index); - zip_size = dict_table_zip_size(index->table); - rec = btr_cur_get_rec(cur); offsets_rec = rec_get_offsets(rec, index, offsets1, ULINT_UNDEFINED, &heap); - page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec); + page_id_t page_id(dict_index_get_space(index), + btr_node_ptr_get_child_page_no( + rec, offsets_rec)); + const page_size_t page_size(dict_table_page_size(index->table)); /* assume no external pages by default - in case we quit from this function without analyzing any leaf pages */ @@ -1553,9 +1534,11 @@ dict_stats_analyze_index_below_cur( /* descend to the leaf level on the B-tree */ for (;;) { - block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, + dberr_t err = DB_SUCCESS; + + block = buf_page_get_gen(page_id, page_size, RW_S_LATCH, NULL /* no guessed block */, - BUF_GET, __FILE__, __LINE__, &mtr); + BUF_GET, __FILE__, __LINE__, &mtr, &err); page = buf_block_get_frame(block); @@ -1599,7 +1582,8 @@ dict_stats_analyze_index_below_cur( /* we have a non-boring record in rec, descend below it */ - page_no = btr_node_ptr_get_child_page_no(rec, offsets_rec); + page_id.set_page_no( + btr_node_ptr_get_child_page_no(rec, offsets_rec)); } /* make sure we got a leaf page as a result from the above loop */ @@ -1693,20 +1677,20 @@ dict_stats_analyze_index_for_n_prefix( ib_uint64_t i; #if 0 - DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu, n_prefix=%lu, " - "n_diff_on_level=" UINT64PF ")\n", + DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu, n_prefix=%lu," + " n_diff_on_level=" UINT64PF ")\n", __func__, index->table->name, index->name, level, n_prefix, n_diff_data->n_diff_on_level); #endif ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), - MTR_MEMO_S_LOCK)); + MTR_MEMO_SX_LOCK)); /* Position pcur on the leftmost record on the leftmost page on the desired level. */ btr_pcur_open_at_index_side( - true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED, + true, index, BTR_SEARCH_TREE | BTR_ALREADY_S_LATCHED, &pcur, true, n_diff_data->level, mtr); btr_pcur_move_to_next_on_page(&pcur); @@ -1833,7 +1817,7 @@ dict_stats_analyze_index_for_n_prefix( &n_external_pages); /* We adjust n_diff_on_leaf_page here to avoid counting - one record twice - once as the last on some page and once + one value twice - once as the last on some page and once as the first on another page. Consider the following example: Leaf level: page: (2,2,2,2,3,3) @@ -1881,7 +1865,7 @@ dict_stats_index_set_n_diff( ut_ad(data->n_leaf_pages_to_analyze > 0); ut_ad(data->n_recs_on_level > 0); - ulint n_ordinary_leaf_pages; + ib_uint64_t n_ordinary_leaf_pages; if (data->level == 1) { /* If we know the number of records on level 1, then @@ -1953,10 +1937,15 @@ dict_stats_analyze_index( ulint size; DBUG_ENTER("dict_stats_analyze_index"); - DBUG_PRINT("info", ("index: %s, online status: %d", index->name, + DBUG_PRINT("info", ("index: %s, online status: %d", index->name(), dict_index_get_online_status(index))); - DEBUG_PRINTF(" %s(index=%s)\n", __func__, index->name); + /* Disable update statistic for Rtree */ + if (dict_index_is_spatial(index)) { + DBUG_VOID_RETURN; + } + + DEBUG_PRINTF(" %s(index=%s)\n", __func__, index->name()); dict_stats_empty_index(index, false); @@ -1987,7 +1976,7 @@ dict_stats_analyze_index( mtr_start(&mtr); - mtr_s_lock(dict_index_get_lock(index), &mtr); + mtr_sx_lock(dict_index_get_lock(index), &mtr); root_level = btr_height_get(index, &mtr); @@ -2006,11 +1995,11 @@ dict_stats_analyze_index( || N_SAMPLE_PAGES(index) * n_uniq > index->stat_n_leaf_pages) { if (root_level == 0) { - DEBUG_PRINTF(" %s(): just one page, " - "doing full scan\n", __func__); + DEBUG_PRINTF(" %s(): just one page," + " doing full scan\n", __func__); } else { - DEBUG_PRINTF(" %s(): too many pages requested for " - "sampling, doing full scan\n", __func__); + DEBUG_PRINTF(" %s(): too many pages requested for" + " sampling, doing full scan\n", __func__); } /* do full scan of level 0; save results directly @@ -2036,16 +2025,18 @@ dict_stats_analyze_index( /* For each level that is being scanned in the btree, this contains the number of different key values for all possible n-column prefixes. */ - ib_uint64_t* n_diff_on_level = new ib_uint64_t[n_uniq]; + ib_uint64_t* n_diff_on_level = UT_NEW_ARRAY( + ib_uint64_t, n_uniq, mem_key_dict_stats_n_diff_on_level); /* For each level that is being scanned in the btree, this contains the index of the last record from each group of equal records (when comparing only the first n columns, n=1..n_uniq). */ - boundaries_t* n_diff_boundaries = new boundaries_t[n_uniq]; + boundaries_t* n_diff_boundaries = UT_NEW_ARRAY_NOKEY(boundaries_t, + n_uniq); /* For each n-column prefix this array contains the input data that is used to calculate dict_index_t::stat_n_diff_key_vals[]. */ - n_diff_data_t* n_diff_data = new n_diff_data_t[n_uniq]; + n_diff_data_t* n_diff_data = UT_NEW_ARRAY_NOKEY(n_diff_data_t, n_uniq); /* total_recs is also used to estimate the number of pages on one level below, so at the start we have 1 page (the root) */ @@ -2066,15 +2057,15 @@ dict_stats_analyze_index( for (n_prefix = n_uniq; n_prefix >= 1; n_prefix--) { - DEBUG_PRINTF(" %s(): searching level with >=%llu " - "distinct records, n_prefix=%lu\n", + DEBUG_PRINTF(" %s(): searching level with >=%llu" + " distinct records, n_prefix=%lu\n", __func__, N_DIFF_REQUIRED(index), n_prefix); /* Commit the mtr to release the tree S lock to allow other threads to do some work too. */ mtr_commit(&mtr); mtr_start(&mtr); - mtr_s_lock(dict_index_get_lock(index), &mtr); + mtr_sx_lock(dict_index_get_lock(index), &mtr); if (root_level != btr_height_get(index, &mtr)) { /* Just quit if the tree has changed beyond recognition here. The old stats from previous @@ -2213,9 +2204,9 @@ found_level: mtr_commit(&mtr); - delete[] n_diff_boundaries; + UT_DELETE_ARRAY(n_diff_boundaries); - delete[] n_diff_on_level; + UT_DELETE_ARRAY(n_diff_on_level); /* n_prefix == 0 means that the above loop did not end up prematurely due to tree being changed and so n_diff_data[] is set up. */ @@ -2223,7 +2214,7 @@ found_level: dict_stats_index_set_n_diff(n_diff_data, index); } - delete[] n_diff_data; + UT_DELETE_ARRAY(n_diff_data); dict_stats_assert_initialized_index(index); DBUG_VOID_RETURN; @@ -2261,7 +2252,7 @@ dict_stats_update_persistent( return(DB_CORRUPTION); } - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); dict_stats_analyze_index(index); @@ -2279,9 +2270,9 @@ dict_stats_update_persistent( index != NULL; index = dict_table_get_next_index(index)) { - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); - if (index->type & DICT_FTS) { + if (index->type & DICT_FTS || dict_index_is_spatial(index)) { continue; } @@ -2336,23 +2327,20 @@ dict_stats_save_index_stat( const char* stat_description, trx_t* trx) { - pars_info_t* pinfo; dberr_t ret; + pars_info_t* pinfo; char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(mutex_own(&dict_sys->mutex)); - dict_fs2utf8(index->table->name, db_utf8, sizeof(db_utf8), + dict_fs2utf8(index->table->name.m_name, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); pinfo = pars_info_create(); pars_info_add_str_literal(pinfo, "database_name", db_utf8); pars_info_add_str_literal(pinfo, "table_name", table_utf8); - UNIV_MEM_ASSERT_RW_ABORT(index->name, strlen(index->name)); pars_info_add_str_literal(pinfo, "index_name", index->name); UNIV_MEM_ASSERT_RW_ABORT(&last_update, 4); pars_info_add_int4_literal(pinfo, "last_update", last_update); @@ -2400,17 +2388,11 @@ dict_stats_save_index_stat( if (ret != DB_SUCCESS) { if (innodb_index_stats_not_found == false && index->stats_error_printed == false) { - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Cannot save index statistics for table " - "%s, index %s, stat name \"%s\": %s\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index)), - stat_name, ut_strerr(ret)); + ib::error() << "Cannot save index statistics for table " + << index->table->name + << ", index " << index->name + << ", stat name \"" << stat_name << "\": " + << ut_strerr(ret); index->stats_error_printed = true; } } @@ -2419,15 +2401,14 @@ dict_stats_save_index_stat( } /** Save the table's statistics into the persistent statistics storage. -@param[in] table_orig table whose stats to save -@param[in] only_for_index if this is non-NULL, then stats for indexes -that are not equal to it will not be saved, if NULL, then all -indexes' stats are saved +@param[in] table_orig table whose stats to save +@param[in] only_for_index if this is non-NULL, then stats for indexes +that are not equal to it will not be saved, if NULL, then all indexes' stats +are saved @return DB_SUCCESS or error code */ static dberr_t dict_stats_save( -/*============*/ dict_table_t* table_orig, const index_id_t* only_for_index) { @@ -2440,10 +2421,10 @@ dict_stats_save( table = dict_stats_snapshot_create(table_orig); - dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8), + dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); - rw_lock_x_lock(&dict_operation_lock); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); /* MySQL's timestamp is 4 byte, so we use @@ -2485,16 +2466,11 @@ dict_stats_save( "END;", NULL); if (ret != DB_SUCCESS) { - char buf[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Cannot save table statistics for table " - "%s: %s\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf)), - ut_strerr(ret)); + ib::error() << "Cannot save table statistics for table " + << table->name << ": " << ut_strerr(ret); mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); dict_stats_snapshot_free(table); @@ -2502,10 +2478,17 @@ dict_stats_save( } trx_t* trx = trx_allocate_for_background(); - trx_start_if_not_started(trx); + + if (srv_read_only_mode) { + trx_start_internal_read_only(trx); + } else { + trx_start_internal(trx); + } dict_index_t* index; - index_map_t indexes; + index_map_t indexes( + (ut_strcmp_functor()), + index_map_t_allocator(mem_key_dict_stats_index_map_t)); /* Below we do all the modifications in innodb_index_stats in a single transaction for performance reasons. Modifying more than one row in a @@ -2540,7 +2523,7 @@ dict_stats_save( continue; } - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); for (ulint i = 0; i < index->n_uniq; i++) { @@ -2551,10 +2534,10 @@ dict_stats_save( ut_snprintf(stat_name, sizeof(stat_name), "n_diff_pfx%02lu", i + 1); - /* craft a string that contains the columns names */ + /* craft a string that contains the column names */ ut_snprintf(stat_description, sizeof(stat_description), - "%s", index->fields[0].name); + "%s", index->fields[0].name()); for (j = 1; j <= i; j++) { size_t len; @@ -2562,7 +2545,7 @@ dict_stats_save( ut_snprintf(stat_description + len, sizeof(stat_description) - len, - ",%s", index->fields[j].name); + ",%s", index->fields[j].name()); } ret = dict_stats_save_index_stat( @@ -2601,7 +2584,7 @@ end: trx_free_for_background(trx); mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); dict_stats_snapshot_free(table); @@ -2760,7 +2743,8 @@ dict_stats_fetch_index_stats_step( index != NULL; index = dict_table_get_next_index(index)) { - if (strlen(index->name) == len + if (index->is_committed() + && strlen(index->name) == len && memcmp(index->name, data, len) == 0) { /* the corresponding index was found */ break; @@ -2849,11 +2833,14 @@ dict_stats_fetch_index_stats_step( #define PFX "n_diff_pfx" #define PFX_LEN 10 + /** JAN: TODO: MySQL 5.7 native_strncasecmp() */ if (stat_name_len == 4 /* strlen("size") */ + // && native_strncasecmp("size", stat_name, stat_name_len) == 0) { && strncasecmp("size", stat_name, stat_name_len) == 0) { index->stat_index_size = (ulint) stat_value; arg->stats_were_modified = true; } else if (stat_name_len == 12 /* strlen("n_leaf_pages") */ + // && native_strncasecmp("n_leaf_pages", stat_name, stat_name_len) && strncasecmp("n_leaf_pages", stat_name, stat_name_len) == 0) { index->stat_n_leaf_pages = (ulint) stat_value; @@ -2869,6 +2856,7 @@ dict_stats_fetch_index_stats_step( index->stat_defrag_n_pages_freed = (ulint) stat_value; arg->stats_were_modified = true; } else if (stat_name_len > PFX_LEN /* e.g. stat_name=="n_diff_pfx01" */ + // && native_strncasecmp(PFX, stat_name, PFX_LEN) == 0) { && strncasecmp(PFX, stat_name, PFX_LEN) == 0) { const char* num_ptr; @@ -2886,24 +2874,19 @@ dict_stats_fetch_index_stats_step( char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; - dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8), + dict_fs2utf8(table->name.m_name, + db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Ignoring strange row from " - "%s WHERE " - "database_name = '%s' AND " - "table_name = '%s' AND " - "index_name = '%s' AND " - "stat_name = '%.*s'; because stat_name " - "is malformed\n", - INDEX_STATS_NAME_PRINT, - db_utf8, - table_utf8, - index->name, - (int) stat_name_len, - stat_name); + ib::info out; + out << "Ignoring strange row from " + << INDEX_STATS_NAME_PRINT << " WHERE" + " database_name = '" << db_utf8 + << "' AND table_name = '" << table_utf8 + << "' AND index_name = '" << index->name() + << "' AND stat_name = '"; + out.write(stat_name, stat_name_len); + out << "'; because stat_name is malformed"; return(TRUE); } /* else */ @@ -2919,26 +2902,21 @@ dict_stats_fetch_index_stats_step( char db_utf8[MAX_DB_UTF8_LEN]; char table_utf8[MAX_TABLE_UTF8_LEN]; - dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8), + dict_fs2utf8(table->name.m_name, + db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Ignoring strange row from " - "%s WHERE " - "database_name = '%s' AND " - "table_name = '%s' AND " - "index_name = '%s' AND " - "stat_name = '%.*s'; because stat_name is " - "out of range, the index has %lu unique " - "columns\n", - INDEX_STATS_NAME_PRINT, - db_utf8, - table_utf8, - index->name, - (int) stat_name_len, - stat_name, - n_uniq); + ib::info out; + out << "Ignoring strange row from " + << INDEX_STATS_NAME_PRINT << " WHERE" + " database_name = '" << db_utf8 + << "' AND table_name = '" << table_utf8 + << "' AND index_name = '" << index->name() + << "' AND stat_name = '"; + out.write(stat_name, stat_name_len); + out << "'; because stat_name is out of range, the index" + " has " << n_uniq << " unique columns"; + return(TRUE); } /* else */ @@ -2997,9 +2975,13 @@ dict_stats_fetch_from_ps( trx->isolation_level = TRX_ISO_READ_UNCOMMITTED; - trx_start_if_not_started(trx); + if (srv_read_only_mode) { + trx_start_internal_read_only(trx); + } else { + trx_start_internal(trx); + } - dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8), + dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); pinfo = pars_info_create(); @@ -3106,7 +3088,6 @@ dict_stats_empty_defrag_modified_counter( /*********************************************************************//** Fetches or calculates new estimates for index statistics. */ -UNIV_INTERN void dict_stats_update_for_index( /*========================*/ @@ -3131,18 +3112,13 @@ dict_stats_update_for_index( index->stats_error_printed == false) { /* Fall back to transient stats since the persistent storage is not present or is corrupted */ - char buf_table[MAX_FULL_NAME_LEN]; - char buf_index[MAX_FULL_NAME_LEN]; - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s index %s but the required " - "persistent statistics storage is not present or is " - "corrupted. Using transient stats instead.\n", - ut_format_name(index->table->name, TRUE, - buf_table, sizeof(buf_table)), - ut_format_name(index->name, FALSE, - buf_index, sizeof(buf_index))); + + ib::info() << "Recalculation of persistent statistics" + " requested for table " << index->table->name + << " index " << index->name + << " but the required" + " persistent statistics storage is not present or is" + " corrupted. Using transient stats instead."; index->stats_error_printed = false; } } @@ -3158,7 +3134,6 @@ dict_stats_update_for_index( Calculates new estimates for table and index statistics. The statistics are used in query optimization. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t dict_stats_update( /*==============*/ @@ -3169,17 +3144,15 @@ dict_stats_update( the persistent statistics storage */ { - char buf[MAX_FULL_NAME_LEN]; - ut_ad(!mutex_own(&dict_sys->mutex)); if (table->ibd_file_missing) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: cannot calculate statistics for table %s " - "because the .ibd file is missing. For help, please " - "refer to " REFMAN "innodb-troubleshooting.html\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf))); + + ib::warn() << "Cannot calculate statistics for table " + << table->name + << " because the .ibd file is missing. " + << TROUBLESHOOTING_MSG; + dict_stats_empty_table(table, true); return(DB_TABLESPACE_DELETED); } else if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { @@ -3205,7 +3178,7 @@ dict_stats_update( /* InnoDB internal tables (e.g. SYS_TABLES) cannot have persistent stats enabled */ - ut_a(strchr(table->name, '/') != NULL); + ut_a(strchr(table->name.m_name, '/') != NULL); /* check if the persistent statistics storage exists before calling the potentially slow function @@ -3231,13 +3204,12 @@ dict_stats_update( if (innodb_table_stats_not_found == false && table->stats_error_printed == false) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Recalculation of persistent statistics " - "requested for table %s but the required persistent " - "statistics storage is not present or is corrupted. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, buf, sizeof(buf))); + ib::warn() << "Recalculation of persistent statistics" + " requested for table " + << table->name + << " but the required persistent" + " statistics storage is not present or is corrupted." + " Using transient stats instead."; table->stats_error_printed = true; } @@ -3277,7 +3249,7 @@ dict_stats_update( /* InnoDB internal tables (e.g. SYS_TABLES) cannot have persistent stats enabled */ - ut_a(strchr(table->name, '/') != NULL); + ut_a(strchr(table->name.m_name, '/') != NULL); if (!dict_stats_persistent_storage_check(false)) { /* persistent statistics storage does not exist @@ -3285,17 +3257,14 @@ dict_stats_update( if (innodb_table_stats_not_found == false && table->stats_error_printed == false) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: Fetch of persistent " - "statistics requested for table %s but the " - "required system tables %s and %s are not " - "present or have unexpected structure. " - "Using transient stats instead.\n", - ut_format_name(table->name, TRUE, - buf, sizeof(buf)), - TABLE_STATS_NAME_PRINT, - INDEX_STATS_NAME_PRINT); + ib::error() << "Fetch of persistent statistics" + " requested for table " + << table->name + << " but the required system tables " + << TABLE_STATS_NAME_PRINT + << " and " << INDEX_STATS_NAME_PRINT + << " are not present or have unexpected" + " structure. Using transient stats instead."; table->stats_error_printed = true; } @@ -3348,20 +3317,18 @@ dict_stats_update( DICT_STATS_RECALC_PERSISTENT)); } - ut_format_name(table->name, TRUE, buf, sizeof(buf)); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Trying to use table %s which has " - "persistent statistics enabled, but auto " - "recalculation turned off and the statistics " - "do not exist in %s and %s. Please either run " - "\"ANALYZE TABLE %s;\" manually or enable the " - "auto recalculation with " - "\"ALTER TABLE %s STATS_AUTO_RECALC=1;\". " - "InnoDB will now use transient statistics for " - "%s.\n", - buf, TABLE_STATS_NAME, INDEX_STATS_NAME, buf, - buf, buf); + ib::info() << "Trying to use table " << table->name + << " which has persistent statistics enabled," + " but auto recalculation turned off and the" + " statistics do not exist in " + TABLE_STATS_NAME_PRINT + " and " INDEX_STATS_NAME_PRINT + ". Please either run \"ANALYZE TABLE " + << table->name << ";\" manually or enable the" + " auto recalculation with \"ALTER TABLE " + << table->name << " STATS_AUTO_RECALC=1;\"." + " InnoDB will now use transient statistics for " + << table->name << "."; goto transient; default: @@ -3370,16 +3337,12 @@ dict_stats_update( if (innodb_table_stats_not_found == false && table->stats_error_printed == false) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error fetching persistent statistics " - "for table %s from %s and %s: %s. " - "Using transient stats method instead.\n", - ut_format_name(table->name, TRUE, buf, - sizeof(buf)), - TABLE_STATS_NAME, - INDEX_STATS_NAME, - ut_strerr(err)); + ib::error() << "Error fetching persistent statistics" + " for table " + << table->name + << " from " TABLE_STATS_NAME_PRINT " and " + INDEX_STATS_NAME_PRINT ": " << ut_strerr(err) + << ". Using transient stats method instead."; } goto transient; @@ -3410,7 +3373,6 @@ marko: If ibuf merges are not disabled, we need to scan the *.ibd files. But we shouldn't open *.ibd files before we have rolled back dict transactions and opened the SYS_* records for the *.ibd files. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t dict_stats_drop_index( /*==================*/ @@ -3445,7 +3407,7 @@ dict_stats_drop_index( pars_info_add_str_literal(pinfo, "index_name", iname); - rw_lock_x_lock(&dict_operation_lock); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); ret = dict_stats_exec_sql( @@ -3459,7 +3421,7 @@ dict_stats_drop_index( "END;\n", NULL); mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); if (ret == DB_STATS_DO_NOT_EXIST) { ret = DB_SUCCESS; @@ -3467,12 +3429,12 @@ dict_stats_drop_index( if (ret != DB_SUCCESS) { ut_snprintf(errstr, errstr_sz, - "Unable to delete statistics for index %s " - "from %s%s: %s. They can be deleted later using " - "DELETE FROM %s WHERE " - "database_name = '%s' AND " - "table_name = '%s' AND " - "index_name = '%s';", + "Unable to delete statistics for index %s" + " from %s%s: %s. They can be deleted later using" + " DELETE FROM %s WHERE" + " database_name = '%s' AND" + " table_name = '%s' AND" + " index_name = '%s';", iname, INDEX_STATS_NAME_PRINT, (ret == DB_LOCK_WAIT_TIMEOUT @@ -3507,9 +3469,7 @@ dict_stats_delete_from_table_stats( pars_info_t* pinfo; dberr_t ret; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(mutex_own(&dict_sys->mutex)); pinfo = pars_info_create(); @@ -3545,9 +3505,7 @@ dict_stats_delete_from_index_stats( pars_info_t* pinfo; dberr_t ret; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(mutex_own(&dict_sys->mutex)); pinfo = pars_info_create(); @@ -3572,7 +3530,6 @@ Removes the statistics for a table and all of its indexes from the persistent statistics storage if it exists and if there is data stored for the table. This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t dict_stats_drop_table( /*==================*/ @@ -3585,9 +3542,7 @@ dict_stats_drop_table( char table_utf8[MAX_TABLE_UTF8_LEN]; dberr_t ret; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(mutex_own(&dict_sys->mutex)); /* skip tables that do not contain a database name @@ -3620,16 +3575,16 @@ dict_stats_drop_table( if (ret != DB_SUCCESS) { ut_snprintf(errstr, errstr_sz, - "Unable to delete statistics for table %s.%s: %s. " - "They can be deleted later using " + "Unable to delete statistics for table %s.%s: %s." + " They can be deleted later using" - "DELETE FROM %s WHERE " - "database_name = '%s' AND " - "table_name = '%s'; " + " DELETE FROM %s WHERE" + " database_name = '%s' AND" + " table_name = '%s';" - "DELETE FROM %s WHERE " - "database_name = '%s' AND " - "table_name = '%s';", + " DELETE FROM %s WHERE" + " database_name = '%s' AND" + " table_name = '%s';", db_utf8, table_utf8, ut_strerr(ret), @@ -3653,8 +3608,8 @@ Creates its own transaction and commits it. @return DB_SUCCESS or error code */ UNIV_INLINE dberr_t -dict_stats_rename_in_table_stats( -/*=============================*/ +dict_stats_rename_table_in_table_stats( +/*===================================*/ const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */ const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */ const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */ @@ -3663,9 +3618,7 @@ dict_stats_rename_in_table_stats( pars_info_t* pinfo; dberr_t ret; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(mutex_own(&dict_sys->mutex)); pinfo = pars_info_create(); @@ -3677,7 +3630,7 @@ dict_stats_rename_in_table_stats( ret = dict_stats_exec_sql( pinfo, - "PROCEDURE RENAME_IN_TABLE_STATS () IS\n" + "PROCEDURE RENAME_TABLE_IN_TABLE_STATS () IS\n" "BEGIN\n" "UPDATE \"" TABLE_STATS_NAME "\" SET\n" "database_name = :new_dbname_utf8,\n" @@ -3699,8 +3652,8 @@ Creates its own transaction and commits it. @return DB_SUCCESS or error code */ UNIV_INLINE dberr_t -dict_stats_rename_in_index_stats( -/*=============================*/ +dict_stats_rename_table_in_index_stats( +/*===================================*/ const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */ const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */ const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */ @@ -3709,9 +3662,7 @@ dict_stats_rename_in_index_stats( pars_info_t* pinfo; dberr_t ret; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(mutex_own(&dict_sys->mutex)); pinfo = pars_info_create(); @@ -3723,7 +3674,7 @@ dict_stats_rename_in_index_stats( ret = dict_stats_exec_sql( pinfo, - "PROCEDURE RENAME_IN_INDEX_STATS () IS\n" + "PROCEDURE RENAME_TABLE_IN_INDEX_STATS () IS\n" "BEGIN\n" "UPDATE \"" INDEX_STATS_NAME "\" SET\n" "database_name = :new_dbname_utf8,\n" @@ -3740,7 +3691,6 @@ dict_stats_rename_in_index_stats( Renames a table in InnoDB persistent stats storage. This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t dict_stats_rename_table( /*====================*/ @@ -3756,9 +3706,7 @@ dict_stats_rename_table( char new_table_utf8[MAX_TABLE_UTF8_LEN]; dberr_t ret; -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(dict_operation_lock, RW_LOCK_X)); ut_ad(!mutex_own(&dict_sys->mutex)); /* skip innodb_table_stats and innodb_index_stats themselves */ @@ -3776,14 +3724,14 @@ dict_stats_rename_table( dict_fs2utf8(new_name, new_db_utf8, sizeof(new_db_utf8), new_table_utf8, sizeof(new_table_utf8)); - rw_lock_x_lock(&dict_operation_lock); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); ulint n_attempts = 0; do { n_attempts++; - ret = dict_stats_rename_in_table_stats( + ret = dict_stats_rename_table_in_table_stats( old_db_utf8, old_table_utf8, new_db_utf8, new_table_utf8); @@ -3798,9 +3746,9 @@ dict_stats_rename_table( if (ret != DB_SUCCESS) { mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); os_thread_sleep(200000 /* 0.2 sec */); - rw_lock_x_lock(&dict_operation_lock); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); } } while ((ret == DB_DEADLOCK @@ -3810,16 +3758,16 @@ dict_stats_rename_table( if (ret != DB_SUCCESS) { ut_snprintf(errstr, errstr_sz, - "Unable to rename statistics from " - "%s.%s to %s.%s in %s: %s. " - "They can be renamed later using " + "Unable to rename statistics from" + " %s.%s to %s.%s in %s: %s." + " They can be renamed later using" - "UPDATE %s SET " - "database_name = '%s', " - "table_name = '%s' " - "WHERE " - "database_name = '%s' AND " - "table_name = '%s';", + " UPDATE %s SET" + " database_name = '%s'," + " table_name = '%s'" + " WHERE" + " database_name = '%s' AND" + " table_name = '%s';", old_db_utf8, old_table_utf8, new_db_utf8, new_table_utf8, @@ -3830,7 +3778,7 @@ dict_stats_rename_table( new_db_utf8, new_table_utf8, old_db_utf8, old_table_utf8); mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); return(ret); } /* else */ @@ -3839,7 +3787,7 @@ dict_stats_rename_table( do { n_attempts++; - ret = dict_stats_rename_in_index_stats( + ret = dict_stats_rename_table_in_index_stats( old_db_utf8, old_table_utf8, new_db_utf8, new_table_utf8); @@ -3854,9 +3802,9 @@ dict_stats_rename_table( if (ret != DB_SUCCESS) { mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); os_thread_sleep(200000 /* 0.2 sec */); - rw_lock_x_lock(&dict_operation_lock); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); } } while ((ret == DB_DEADLOCK @@ -3865,20 +3813,20 @@ dict_stats_rename_table( && n_attempts < 5); mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); if (ret != DB_SUCCESS) { ut_snprintf(errstr, errstr_sz, - "Unable to rename statistics from " - "%s.%s to %s.%s in %s: %s. " - "They can be renamed later using " + "Unable to rename statistics from" + " %s.%s to %s.%s in %s: %s." + " They can be renamed later using" - "UPDATE %s SET " - "database_name = '%s', " - "table_name = '%s' " - "WHERE " - "database_name = '%s' AND " - "table_name = '%s';", + " UPDATE %s SET" + " database_name = '%s'," + " table_name = '%s'" + " WHERE" + " database_name = '%s' AND" + " table_name = '%s';", old_db_utf8, old_table_utf8, new_db_utf8, new_table_utf8, @@ -3903,10 +3851,12 @@ dict_stats_save_defrag_summary( { dberr_t ret; lint now = (lint) ut_time(); - if (dict_index_is_univ(index)) { + + if (dict_stats_should_ignore_index(index)) { return DB_SUCCESS; } - rw_lock_x_lock(&dict_operation_lock); + + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); ret = dict_stats_save_index_stat(index, now, "n_pages_freed", index->stat_defrag_n_pages_freed, @@ -3916,7 +3866,7 @@ dict_stats_save_defrag_summary( NULL); mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); return (ret); } @@ -3945,16 +3895,17 @@ dict_stats_save_defrag_stats( return(DB_CORRUPTION); } - if (dict_index_is_univ(index)) { + if (dict_stats_should_ignore_index(index)) { return DB_SUCCESS; } lint now = (lint) ut_time(); mtr_t mtr; - ulint n_leaf_pages; - ulint n_leaf_reserved; + ulint n_leaf_pages=0; + ulint n_leaf_reserved=0; mtr_start(&mtr); mtr_s_lock(dict_index_get_lock(index), &mtr); + n_leaf_reserved = btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES, &n_leaf_pages, &mtr); mtr_commit(&mtr); @@ -3966,7 +3917,7 @@ dict_stats_save_defrag_stats( return DB_SUCCESS; } - rw_lock_x_lock(&dict_operation_lock); + rw_lock_x_lock(dict_operation_lock); mutex_enter(&dict_sys->mutex); ret = dict_stats_save_index_stat(index, now, "n_page_split", @@ -3999,13 +3950,70 @@ dict_stats_save_defrag_stats( end: mutex_exit(&dict_sys->mutex); - rw_lock_x_unlock(&dict_operation_lock); + rw_lock_x_unlock(dict_operation_lock); return (ret); } +/*********************************************************************//** +Renames an index in InnoDB persistent stats storage. +This function creates its own transaction and commits it. +@return DB_SUCCESS or error code. DB_STATS_DO_NOT_EXIST will be returned +if the persistent stats do not exist. */ +dberr_t +dict_stats_rename_index( +/*====================*/ + const dict_table_t* table, /*!< in: table whose index + is renamed */ + const char* old_index_name, /*!< in: old index name */ + const char* new_index_name) /*!< in: new index name */ +{ + rw_lock_x_lock(dict_operation_lock); + mutex_enter(&dict_sys->mutex); + + if (!dict_stats_persistent_storage_check(true)) { + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + return(DB_STATS_DO_NOT_EXIST); + } + + char dbname_utf8[MAX_DB_UTF8_LEN]; + char tablename_utf8[MAX_TABLE_UTF8_LEN]; + + dict_fs2utf8(table->name.m_name, dbname_utf8, sizeof(dbname_utf8), + tablename_utf8, sizeof(tablename_utf8)); + + pars_info_t* pinfo; + + pinfo = pars_info_create(); + + pars_info_add_str_literal(pinfo, "dbname_utf8", dbname_utf8); + pars_info_add_str_literal(pinfo, "tablename_utf8", tablename_utf8); + pars_info_add_str_literal(pinfo, "new_index_name", new_index_name); + pars_info_add_str_literal(pinfo, "old_index_name", old_index_name); + + dberr_t ret; + + ret = dict_stats_exec_sql( + pinfo, + "PROCEDURE RENAME_INDEX_IN_INDEX_STATS () IS\n" + "BEGIN\n" + "UPDATE \"" INDEX_STATS_NAME "\" SET\n" + "index_name = :new_index_name\n" + "WHERE\n" + "database_name = :dbname_utf8 AND\n" + "table_name = :tablename_utf8 AND\n" + "index_name = :old_index_name;\n" + "END;\n", NULL); + + mutex_exit(&dict_sys->mutex); + rw_lock_x_unlock(dict_operation_lock); + + return(ret); +} + /* tests @{ */ -#ifdef UNIV_COMPILE_TEST_FUNCS +#ifdef UNIV_ENABLE_UNIT_TEST_DICT_STATS /* The following unit tests test some of the functions in this file individually, such testing cannot be performed by the mysql-test framework @@ -4049,7 +4057,7 @@ test_dict_table_schema_check() /* prevent any data dictionary modifications while we are checking the tables' structure */ - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); /* check that a valid table is reported as valid */ schema.n_cols = 7; @@ -4066,11 +4074,11 @@ test_dict_table_schema_check() schema.columns[1].len = 8; if (dict_table_schema_check(&schema, errstr, sizeof(errstr)) != DB_SUCCESS) { - printf("OK: test.tcheck.c02 has different length and is " - "reported as corrupted\n"); + printf("OK: test.tcheck.c02 has different length and is" + " reported as corrupted\n"); } else { - printf("OK: test.tcheck.c02 has different length but is " - "reported as ok\n"); + printf("OK: test.tcheck.c02 has different length but is" + " reported as ok\n"); goto test_dict_table_schema_check_end; } schema.columns[1].len = 4; @@ -4080,11 +4088,11 @@ test_dict_table_schema_check() schema.columns[1].prtype_mask |= DATA_NOT_NULL; if (dict_table_schema_check(&schema, errstr, sizeof(errstr)) != DB_SUCCESS) { - printf("OK: test.tcheck.c02 does not have NOT NULL while " - "it should and is reported as corrupted\n"); + printf("OK: test.tcheck.c02 does not have NOT NULL while" + " it should and is reported as corrupted\n"); } else { - printf("ERROR: test.tcheck.c02 does not have NOT NULL while " - "it should and is not reported as corrupted\n"); + printf("ERROR: test.tcheck.c02 does not have NOT NULL while" + " it should and is not reported as corrupted\n"); goto test_dict_table_schema_check_end; } schema.columns[1].prtype_mask &= ~DATA_NOT_NULL; @@ -4093,23 +4101,23 @@ test_dict_table_schema_check() schema.n_cols = 6; if (dict_table_schema_check(&schema, errstr, sizeof(errstr)) == DB_SUCCESS) { - printf("ERROR: test.tcheck has more columns but is not " - "reported as corrupted\n"); + printf("ERROR: test.tcheck has more columns but is not" + " reported as corrupted\n"); goto test_dict_table_schema_check_end; } else { - printf("OK: test.tcheck has more columns and is " - "reported as corrupted\n"); + printf("OK: test.tcheck has more columns and is" + " reported as corrupted\n"); } /* check a table that has some columns missing */ schema.n_cols = 8; if (dict_table_schema_check(&schema, errstr, sizeof(errstr)) != DB_SUCCESS) { - printf("OK: test.tcheck has missing columns and is " - "reported as corrupted\n"); + printf("OK: test.tcheck has missing columns and is" + " reported as corrupted\n"); } else { - printf("ERROR: test.tcheck has missing columns but is " - "reported as ok\n"); + printf("ERROR: test.tcheck has missing columns but is" + " reported as ok\n"); goto test_dict_table_schema_check_end; } @@ -4125,7 +4133,7 @@ test_dict_table_schema_check() test_dict_table_schema_check_end: - mutex_exit(&(dict_sys->mutex)); + mutex_exit(&dict_sys->mutex); } /* @} */ @@ -4177,13 +4185,13 @@ test_dict_stats_save() dberr_t ret; /* craft a dummy dict_table_t */ - table.name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME); + table.name.m_name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME); table.stat_n_rows = TEST_N_ROWS; table.stat_clustered_index_size = TEST_CLUSTERED_INDEX_SIZE; table.stat_sum_of_other_index_sizes = TEST_SUM_OF_OTHER_INDEX_SIZES; - UT_LIST_INIT(table.indexes); - UT_LIST_ADD_LAST(indexes, table.indexes, &index1); - UT_LIST_ADD_LAST(indexes, table.indexes, &index2); + UT_LIST_INIT(table.indexes, &dict_index_t::indexes); + UT_LIST_ADD_LAST(table.indexes, &index1); + UT_LIST_ADD_LAST(table.indexes, &index2); ut_d(table.magic_n = DICT_TABLE_MAGIC_N); ut_d(index1.magic_n = DICT_INDEX_MAGIC_N); @@ -4227,8 +4235,8 @@ test_dict_stats_save() ut_a(ret == DB_SUCCESS); - printf("\nOK: stats saved successfully, now go ahead and read " - "what's inside %s and %s:\n\n", + printf("\nOK: stats saved successfully, now go ahead and read" + " what's inside %s and %s:\n\n", TABLE_STATS_NAME_PRINT, INDEX_STATS_NAME_PRINT); @@ -4329,10 +4337,10 @@ test_dict_stats_fetch_from_ps() dberr_t ret; /* craft a dummy dict_table_t */ - table.name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME); - UT_LIST_INIT(table.indexes); - UT_LIST_ADD_LAST(indexes, table.indexes, &index1); - UT_LIST_ADD_LAST(indexes, table.indexes, &index2); + table.name.m_name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME); + UT_LIST_INIT(table.indexes, &dict_index_t::indexes); + UT_LIST_ADD_LAST(table.indexes, &index1); + UT_LIST_ADD_LAST(table.indexes, &index2); ut_d(table.magic_n = DICT_TABLE_MAGIC_N); index1.name = TEST_IDX1_NAME; @@ -4390,7 +4398,7 @@ test_dict_stats_all() } /* @} */ -#endif /* UNIV_COMPILE_TEST_FUNCS */ +#endif /* UNIV_ENABLE_UNIT_TEST_DICT_STATS */ /* @} */ #endif /* UNIV_HOTBACKUP */ diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 7aefa6a1d4d..eca3756152a 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -23,11 +23,13 @@ Code used for background table and index stats gathering. Created Apr 25, 2012 Vasil Dimov *******************************************************/ -#include "row0mysql.h" -#include "srv0start.h" +#include "dict0dict.h" #include "dict0dict.h" #include "dict0stats.h" #include "dict0stats_bg.h" +#include "row0mysql.h" +#include "srv0start.h" +#include "ut0new.h" #ifdef UNIV_NONINL # include "dict0stats_bg.ic" @@ -41,34 +43,47 @@ Created Apr 25, 2012 Vasil Dimov #define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE) /** Event to wake up the stats thread */ -UNIV_INTERN os_event_t dict_stats_event = NULL; +os_event_t dict_stats_event = NULL; /** This mutex protects the "recalc_pool" variable. */ static ib_mutex_t recalc_pool_mutex; static ib_mutex_t defrag_pool_mutex; -#ifdef HAVE_PSI_INTERFACE -static mysql_pfs_key_t recalc_pool_mutex_key; static mysql_pfs_key_t defrag_pool_mutex_key; -#endif /* HAVE_PSI_INTERFACE */ /** The number of tables that can be added to "recalc_pool" before it is enlarged */ -static const ulint RECALC_POOL_INITIAL_SLOTS = 128; +static const ulint RECALC_POOL_INITIAL_SLOTS = 128; +static const ulint DEFRAG_POOL_INITIAL_SLOTS = 128; + +/** Allocator type, used by std::vector */ +typedef ut_allocator + recalc_pool_allocator_t; /** The multitude of tables whose stats are to be automatically recalculated - an STL vector */ -typedef std::vector recalc_pool_t; -static recalc_pool_t recalc_pool; +typedef std::vector + recalc_pool_t; + +/** Iterator type for iterating over the elements of objects of type +recalc_pool_t. */ +typedef recalc_pool_t::iterator + recalc_pool_iterator_t; -typedef recalc_pool_t::iterator recalc_pool_iterator_t; +/** Pool where we store information on which tables are to be processed +by background statistics gathering. */ +static recalc_pool_t* recalc_pool; /** Indices whose defrag stats need to be saved to persistent storage.*/ struct defrag_pool_item_t { table_id_t table_id; index_id_t index_id; }; -typedef std::vector defrag_pool_t; -static defrag_pool_t defrag_pool; + +typedef ut_allocator + defrag_pool_allocator_t; +typedef std::vector + defrag_pool_t; +static defrag_pool_t* defrag_pool; typedef defrag_pool_t::iterator defrag_pool_iterator_t; /*****************************************************************//** @@ -79,9 +94,18 @@ dict_stats_pool_init() /*=========================*/ { ut_ad(!srv_read_only_mode); - - recalc_pool.reserve(RECALC_POOL_INITIAL_SLOTS); - defrag_pool.reserve(RECALC_POOL_INITIAL_SLOTS); + /* JAN: TODO: MySQL 5.7 PSI + const PSI_memory_key key = mem_key_dict_stats_bg_recalc_pool_t; + const PSI_memory_key key2 = mem_key_dict_defrag_pool_t; + + recalc_pool = UT_NEW(recalc_pool_t(recalc_pool_allocator_t(key)), key); + defrag_pool = UT_NEW(defrag_pool_t(defrag_pool_allocator_t(key2)), key2); + + defrag_pool->reserve(DEFRAG_POOL_INITIAL_SLOTS); + recalc_pool->reserve(RECALC_POOL_INITIAL_SLOTS); + */ + recalc_pool = new std::vector(); + defrag_pool = new std::vector(); } /*****************************************************************//** @@ -94,22 +118,11 @@ dict_stats_pool_deinit() { ut_ad(!srv_read_only_mode); - recalc_pool.clear(); - defrag_pool.clear(); - /* - recalc_pool may still have its buffer allocated. It will free it when - its destructor is called. - The problem is, memory leak detector is run before the recalc_pool's - destructor is invoked, and will report recalc_pool's buffer as leaked - memory. To avoid that, we force recalc_pool to surrender its buffer - to empty_pool object, which will free it when leaving this function: - */ - recalc_pool_t recalc_empty_pool; - defrag_pool_t defrag_empty_pool; - memset(&recalc_empty_pool, 0, sizeof(recalc_pool_t)); - memset(&defrag_empty_pool, 0, sizeof(defrag_pool_t)); - recalc_pool.swap(recalc_empty_pool); - defrag_pool.swap(defrag_empty_pool); + recalc_pool->clear(); + defrag_pool->clear(); + + UT_DELETE(recalc_pool); + UT_DELETE(defrag_pool); } /*****************************************************************//** @@ -118,7 +131,6 @@ background stats gathering thread. Only the table id is added to the list, so the table can be closed after being enqueued and it will be opened when needed. If the table does not exist later (has been DROPped), then it will be removed from the pool and skipped. */ -UNIV_INTERN void dict_stats_recalc_pool_add( /*=======================*/ @@ -129,8 +141,8 @@ dict_stats_recalc_pool_add( mutex_enter(&recalc_pool_mutex); /* quit if already in the list */ - for (recalc_pool_iterator_t iter = recalc_pool.begin(); - iter != recalc_pool.end(); + for (recalc_pool_iterator_t iter = recalc_pool->begin(); + iter != recalc_pool->end(); ++iter) { if (*iter == table->id) { @@ -139,7 +151,7 @@ dict_stats_recalc_pool_add( } } - recalc_pool.push_back(table->id); + recalc_pool->push_back(table->id); mutex_exit(&recalc_pool_mutex); @@ -161,14 +173,14 @@ dict_stats_recalc_pool_get( mutex_enter(&recalc_pool_mutex); - if (recalc_pool.empty()) { + if (recalc_pool->empty()) { mutex_exit(&recalc_pool_mutex); return(false); } - *id = recalc_pool[0]; + *id = recalc_pool->at(0); - recalc_pool.erase(recalc_pool.begin()); + recalc_pool->erase(recalc_pool->begin()); mutex_exit(&recalc_pool_mutex); @@ -178,7 +190,6 @@ dict_stats_recalc_pool_get( /*****************************************************************//** Delete a given table from the auto recalc pool. dict_stats_recalc_pool_del() */ -UNIV_INTERN void dict_stats_recalc_pool_del( /*=======================*/ @@ -191,13 +202,13 @@ dict_stats_recalc_pool_del( ut_ad(table->id > 0); - for (recalc_pool_iterator_t iter = recalc_pool.begin(); - iter != recalc_pool.end(); + for (recalc_pool_iterator_t iter = recalc_pool->begin(); + iter != recalc_pool->end(); ++iter) { if (*iter == table->id) { /* erase() invalidates the iterator */ - recalc_pool.erase(iter); + recalc_pool->erase(iter); break; } } @@ -224,8 +235,8 @@ dict_stats_defrag_pool_add( mutex_enter(&defrag_pool_mutex); /* quit if already in the list */ - for (defrag_pool_iterator_t iter = defrag_pool.begin(); - iter != defrag_pool.end(); + for (defrag_pool_iterator_t iter = defrag_pool->begin(); + iter != defrag_pool->end(); ++iter) { if ((*iter).table_id == index->table->id && (*iter).index_id == index->id) { @@ -236,7 +247,7 @@ dict_stats_defrag_pool_add( item.table_id = index->table->id; item.index_id = index->id; - defrag_pool.push_back(item); + defrag_pool->push_back(item); mutex_exit(&defrag_pool_mutex); @@ -260,16 +271,16 @@ dict_stats_defrag_pool_get( mutex_enter(&defrag_pool_mutex); - if (defrag_pool.empty()) { + if (defrag_pool->empty()) { mutex_exit(&defrag_pool_mutex); return(false); } - defrag_pool_item_t& item = defrag_pool.back(); + defrag_pool_item_t& item = defrag_pool->back(); *table_id = item.table_id; *index_id = item.index_id; - defrag_pool.pop_back(); + defrag_pool->pop_back(); mutex_exit(&defrag_pool_mutex); @@ -292,14 +303,14 @@ dict_stats_defrag_pool_del( mutex_enter(&defrag_pool_mutex); - defrag_pool_iterator_t iter = defrag_pool.begin(); - while (iter != defrag_pool.end()) { + defrag_pool_iterator_t iter = defrag_pool->begin(); + while (iter != defrag_pool->end()) { if ((table && (*iter).table_id == table->id) || (index && (*iter).table_id == index->table->id && (*iter).index_id == index->id)) { /* erase() invalidates the iterator */ - iter = defrag_pool.erase(iter); + iter = defrag_pool->erase(iter); if (index) break; } else { @@ -319,7 +330,6 @@ The background stats thread is guaranteed not to start using the specified table after this function returns and before the caller unlocks the data dictionary because it sets the BG_STAT_IN_PROGRESS bit in table->stats_bg_flag under dict_sys->mutex. */ -UNIV_INTERN void dict_stats_wait_bg_to_stop_using_table( /*===================================*/ @@ -335,14 +345,13 @@ dict_stats_wait_bg_to_stop_using_table( /*****************************************************************//** Initialize global variables needed for the operation of dict_stats_thread() Must be called before dict_stats_thread() is started. */ -UNIV_INTERN void dict_stats_thread_init() /*====================*/ { ut_a(!srv_read_only_mode); - dict_stats_event = os_event_create(); + dict_stats_event = os_event_create(0); /* The recalc_pool_mutex is acquired from: 1) the background stats gathering thread before any other latch @@ -357,19 +366,18 @@ dict_stats_thread_init() and dict_operation_lock (SYNC_DICT_OPERATION) have been locked (thus a level size()) dict_stats_process_entry_from_defrag_pool(); os_event_reset(dict_stats_event); diff --git a/storage/innobase/dyn/dyn0dyn.cc b/storage/innobase/dyn/dyn0dyn.cc deleted file mode 100644 index 3ef5297a7c9..00000000000 --- a/storage/innobase/dyn/dyn0dyn.cc +++ /dev/null @@ -1,66 +0,0 @@ -/***************************************************************************** - -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. - -This program is free software; you can redistribute it and/or modify it under -the terms of the GNU General Public License as published by the Free Software -Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA - -*****************************************************************************/ - -/**************************************************//** -@file dyn/dyn0dyn.cc -The dynamically allocated array - -Created 2/5/1996 Heikki Tuuri -*******************************************************/ - -#include "dyn0dyn.h" -#ifdef UNIV_NONINL -#include "dyn0dyn.ic" -#endif - -/************************************************************//** -Adds a new block to a dyn array. -@return created block */ -UNIV_INTERN -dyn_block_t* -dyn_array_add_block( -/*================*/ - dyn_array_t* arr) /*!< in/out: dyn array */ -{ - mem_heap_t* heap; - dyn_block_t* block; - - ut_ad(arr); - ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N); - - if (arr->heap == NULL) { - UT_LIST_INIT(arr->base); - UT_LIST_ADD_FIRST(list, arr->base, arr); - - arr->heap = mem_heap_create(sizeof(dyn_block_t)); - } - - block = dyn_array_get_last_block(arr); - block->used = block->used | DYN_BLOCK_FULL_FLAG; - - heap = arr->heap; - - block = static_cast( - mem_heap_alloc(heap, sizeof(dyn_block_t))); - - block->used = 0; - - UT_LIST_ADD_LAST(list, arr->base, block); - - return(block); -} diff --git a/storage/innobase/eval/eval0eval.cc b/storage/innobase/eval/eval0eval.cc index ccc54781102..a525cb604ea 100644 --- a/storage/innobase/eval/eval0eval.cc +++ b/storage/innobase/eval/eval0eval.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -60,8 +60,7 @@ NOTE that this memory must be explicitly freed when the query graph is freed. If the node already has an allocated buffer, that buffer is freed here. NOTE that this is the only function where dynamic memory should be allocated for a query node val field. -@return pointer to allocated buffer */ -UNIV_INTERN +@return pointer to allocated buffer */ byte* eval_node_alloc_val_buf( /*====================*/ @@ -80,14 +79,14 @@ eval_node_alloc_val_buf( data = static_cast(dfield_get_data(dfield)); - if (data && data != &eval_dummy) { - mem_free(data); + if (data != &eval_dummy) { + ut_free(data); } if (size == 0) { data = &eval_dummy; } else { - data = static_cast(mem_alloc(size)); + data = static_cast(ut_malloc_nokey(size)); } que_node_set_val_buf_size(node, size); @@ -101,7 +100,6 @@ eval_node_alloc_val_buf( Free the buffer from global dynamic memory for a value of a que_node, if it has been allocated in the above function. The freeing for pushed column values is done in sel_col_prefetch_buf_free. */ -UNIV_INTERN void eval_node_free_val_buf( /*===================*/ @@ -120,7 +118,7 @@ eval_node_free_val_buf( if (que_node_get_val_buf_size(node) > 0) { ut_a(data); - mem_free(data); + ut_free(data); } } @@ -135,12 +133,9 @@ eval_cmp_like( que_node_t* arg2) /* !< in: right operand */ { ib_like_t op; - int res; que_node_t* arg3; que_node_t* arg4; - dfield_t* dfield; - dtype_t* dtype; - ibool val = TRUE; + const dfield_t* dfield; arg3 = que_node_get_like_node(arg2); @@ -148,51 +143,23 @@ eval_cmp_like( ut_a(arg3); dfield = que_node_get_val(arg3); - dtype = dfield_get_type(dfield); - - ut_a(dtype_get_mtype(dtype) == DATA_INT); - op = static_cast(mach_read_from_4(static_cast(dfield_get_data(dfield)))); + ut_ad(dtype_get_mtype(dfield_get_type(dfield)) == DATA_INT); + op = static_cast( + mach_read_from_4(static_cast( + dfield_get_data(dfield)))); switch (op) { - case IB_LIKE_PREFIX: - - arg4 = que_node_get_next(arg3); - res = cmp_dfield_dfield_like_prefix( - que_node_get_val(arg1), - que_node_get_val(arg4)); - break; - - case IB_LIKE_SUFFIX: - - arg4 = que_node_get_next(arg3); - res = cmp_dfield_dfield_like_suffix( - que_node_get_val(arg1), - que_node_get_val(arg4)); - break; - - case IB_LIKE_SUBSTR: - + case IB_LIKE_PREFIX: arg4 = que_node_get_next(arg3); - res = cmp_dfield_dfield_like_substr( - que_node_get_val(arg1), - que_node_get_val(arg4)); - break; - - case IB_LIKE_EXACT: - res = cmp_dfield_dfield( - que_node_get_val(arg1), - que_node_get_val(arg2)); - break; - - default: - ut_error; - } - - if (res != 0) { - val = FALSE; + return(!cmp_dfield_dfield_like_prefix(que_node_get_val(arg1), + que_node_get_val(arg4))); + case IB_LIKE_EXACT: + return(!cmp_dfield_dfield(que_node_get_val(arg1), + que_node_get_val(arg2))); } - return(val); + ut_error; + return(FALSE); } /********************************************************************* @@ -206,53 +173,47 @@ eval_cmp( que_node_t* arg1; que_node_t* arg2; int res; - int func; - ibool val = TRUE; + ibool val = FALSE; /* remove warning */ ut_ad(que_node_get_type(cmp_node) == QUE_NODE_FUNC); arg1 = cmp_node->args; arg2 = que_node_get_next(arg1); - func = cmp_node->func; - - if (func == PARS_LIKE_TOKEN_EXACT - || func == PARS_LIKE_TOKEN_PREFIX - || func == PARS_LIKE_TOKEN_SUFFIX - || func == PARS_LIKE_TOKEN_SUBSTR) { - - val = eval_cmp_like(arg1, arg2); - } else { + switch (cmp_node->func) { + case '<': + case '=': + case '>': + case PARS_LE_TOKEN: + case PARS_NE_TOKEN: + case PARS_GE_TOKEN: res = cmp_dfield_dfield( que_node_get_val(arg1), que_node_get_val(arg2)); - if (func == '=') { - if (res != 0) { - val = FALSE; - } - } else if (func == '<') { - if (res != -1) { - val = FALSE; - } - } else if (func == PARS_LE_TOKEN) { - if (res == 1) { - val = FALSE; - } - } else if (func == PARS_NE_TOKEN) { - if (res == 0) { - val = FALSE; - } - } else if (func == PARS_GE_TOKEN) { - if (res == -1) { - val = FALSE; - } - } else { - ut_ad(func == '>'); - - if (res != 1) { - val = FALSE; - } + switch (cmp_node->func) { + case '<': + val = (res < 0); + break; + case '=': + val = (res == 0); + break; + case '>': + val = (res > 0); + break; + case PARS_LE_TOKEN: + val = (res <= 0); + break; + case PARS_NE_TOKEN: + val = (res != 0); + break; + case PARS_GE_TOKEN: + val = (res >= 0); + break; } + break; + default: + val = eval_cmp_like(arg1, arg2); + break; } eval_node_set_ibool_val(cmp_node, val); @@ -870,7 +831,6 @@ eval_predefined( /*****************************************************************//** Evaluates a function node. */ -UNIV_INTERN void eval_func( /*======*/ diff --git a/storage/innobase/eval/eval0proc.cc b/storage/innobase/eval/eval0proc.cc index e6f3a32cd48..cdd6fdc2a0a 100644 --- a/storage/innobase/eval/eval0proc.cc +++ b/storage/innobase/eval/eval0proc.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1998, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1998, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -31,8 +31,7 @@ Created 1/20/1998 Heikki Tuuri /**********************************************************************//** Performs an execution step of an if-statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* if_step( /*====*/ @@ -108,8 +107,7 @@ if_step( /**********************************************************************//** Performs an execution step of a while-statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* while_step( /*=======*/ @@ -144,8 +142,7 @@ while_step( /**********************************************************************//** Performs an execution step of an assignment statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* assign_step( /*========*/ @@ -171,8 +168,7 @@ assign_step( /**********************************************************************//** Performs an execution step of a for-loop node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* for_step( /*=====*/ @@ -233,8 +229,7 @@ for_step( /**********************************************************************//** Performs an execution step of an exit statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* exit_step( /*======*/ @@ -265,8 +260,7 @@ exit_step( /**********************************************************************//** Performs an execution step of a return-statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* return_step( /*========*/ diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index 2db3063d6b5..fb872628e4f 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -122,13 +122,11 @@ void fil_space_crypt_init() /*==================*/ { - mutex_create(fil_crypt_key_mutex_key, - &fil_crypt_key_mutex, SYNC_NO_ORDER_CHECK); + mutex_create(LATCH_ID_FIL_CRYPT_MUTEX, &fil_crypt_key_mutex); - fil_crypt_throttle_sleep_event = os_event_create(); + fil_crypt_throttle_sleep_event = os_event_create(0); - mutex_create(fil_crypt_stat_mutex_key, - &crypt_stat_mutex, SYNC_NO_ORDER_CHECK); + mutex_create(LATCH_ID_FIL_CRYPT_STAT_MUTEX, &crypt_stat_mutex); memset(&crypt_stat, 0, sizeof(crypt_stat)); } @@ -139,7 +137,7 @@ void fil_space_crypt_cleanup() /*=====================*/ { - os_event_free(fil_crypt_throttle_sleep_event); + os_event_destroy(fil_crypt_throttle_sleep_event); } /****************************************************************** @@ -204,8 +202,7 @@ fil_space_create_crypt_data( crypt_data->min_key_version = encryption_key_get_latest_version(key_id); } - mutex_create(fil_crypt_data_mutex_key, - &crypt_data->mutex, SYNC_NO_ORDER_CHECK); + mutex_create(LATCH_ID_FIL_CRYPT_DATA_MUTEX, &crypt_data->mutex); crypt_data->locker = crypt_data_scheme_locker; my_random_bytes(crypt_data->iv, sizeof(crypt_data->iv)); crypt_data->encryption = encrypt_mode; @@ -258,20 +255,6 @@ fil_space_read_crypt_data( } if (memcmp(page + offset, CRYPT_MAGIC, MAGIC_SZ) != 0) { -#ifdef UNIV_DEBUG - ib_logf(IB_LOG_LEVEL_WARN, - "Found potentially bogus bytes on " - "page 0 offset %lu for space %lu : " - "[ %.2x %.2x %.2x %.2x %.2x %.2x ]. " - "Assuming space is not encrypted!.", - offset, space, - page[offset + 0], - page[offset + 1], - page[offset + 2], - page[offset + 3], - page[offset + 4], - page[offset + 5]); -#endif /* Crypt data is not stored. */ return NULL; } @@ -280,18 +263,17 @@ fil_space_read_crypt_data( if (! (type == CRYPT_SCHEME_UNENCRYPTED || type == CRYPT_SCHEME_1)) { - - ib_logf(IB_LOG_LEVEL_ERROR, - "Found non sensible crypt scheme: %lu for space %lu " - " offset: %lu bytes: " - "[ %.2x %.2x %.2x %.2x %.2x %.2x ].", - type, space, offset, - page[offset + 0 + MAGIC_SZ], - page[offset + 1 + MAGIC_SZ], - page[offset + 2 + MAGIC_SZ], - page[offset + 3 + MAGIC_SZ], - page[offset + 4 + MAGIC_SZ], - page[offset + 5 + MAGIC_SZ]); + ib::error() << "Found non sensible crypt scheme: " + << type << " for space: " + << space << " offset: " + << offset << " bytes: [" + << page[offset + 0 + MAGIC_SZ] + << page[offset + 1 + MAGIC_SZ] + << page[offset + 2 + MAGIC_SZ] + << page[offset + 3 + MAGIC_SZ] + << page[offset + 4 + MAGIC_SZ] + << page[offset + 5 + MAGIC_SZ] + << "]."; ut_error; } @@ -299,17 +281,18 @@ fil_space_read_crypt_data( ulint iv_length = mach_read_from_1(page + offset + MAGIC_SZ + 1); if (! (iv_length == sizeof(crypt_data->iv))) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Found non sensible iv length: %lu for space %lu " - " offset: %lu type: %lu bytes: " - "[ %.2x %.2x %.2x %.2x %.2x %.2x ].", - iv_length, space, offset, type, - page[offset + 0 + MAGIC_SZ], - page[offset + 1 + MAGIC_SZ], - page[offset + 2 + MAGIC_SZ], - page[offset + 3 + MAGIC_SZ], - page[offset + 4 + MAGIC_SZ], - page[offset + 5 + MAGIC_SZ]); + ib::error() << "Found non sensible iv length: " + << iv_length << " for space: " + << space << " offset: " + << offset << " type: " + << type << " bytes: [" + << page[offset + 0 + MAGIC_SZ] + << page[offset + 1 + MAGIC_SZ] + << page[offset + 2 + MAGIC_SZ] + << page[offset + 3 + MAGIC_SZ] + << page[offset + 4 + MAGIC_SZ] + << page[offset + 5 + MAGIC_SZ] + << "]."; ut_error; } @@ -331,8 +314,7 @@ fil_space_read_crypt_data( crypt_data->key_id = key_id; crypt_data->page0_offset = offset; crypt_data->encryption = encryption; - mutex_create(fil_crypt_data_mutex_key, - &crypt_data->mutex, SYNC_NO_ORDER_CHECK); + mutex_create(LATCH_ID_FIL_CRYPT_DATA_MUTEX, &crypt_data->mutex); crypt_data->locker = crypt_data_scheme_locker; crypt_data->inited = true; memcpy(crypt_data->iv, page + offset + MAGIC_SZ + 2, iv_length); @@ -352,13 +334,17 @@ fil_space_destroy_crypt_data( /* Make sure that this thread owns the crypt_data and make it unawailable, this does not fully avoid the race between drop table and crypt thread */ + mutex_enter(&fil_crypt_threads_mutex); mutex_enter(&(*crypt_data)->mutex); (*crypt_data)->inited = false; mutex_exit(&(*crypt_data)->mutex); + /* JAN: TODO: mutex_free(& (*crypt_data)->mutex); memset(*crypt_data, 0, sizeof(fil_space_crypt_t)); free(*crypt_data); (*crypt_data) = NULL; + */ + mutex_exit(&fil_crypt_threads_mutex); } } @@ -550,17 +536,16 @@ fil_encrypt_buf( ulint offset, /*!< in: Page offset */ lsn_t lsn, /*!< in: lsn */ byte* src_frame, /*!< in: Source page to be encrypted */ - ulint zip_size, /*!< in: compressed size if - row format compressed */ + const page_size_t& page_size, /*!< in: page size */ byte* dst_frame) /*!< in: outbut buffer */ { - ulint page_size = (zip_size) ? zip_size : UNIV_PAGE_SIZE; + ulint size = page_size.physical(); uint key_version = fil_crypt_get_latest_key_version(crypt_data); if (key_version == ENCRYPTION_KEY_VERSION_INVALID) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Unknown key id %u. Can't continue!\n", - crypt_data->key_id); + ib::error() << "Unknown key id: " + << crypt_data->key_id + << " Can't continue!"; ut_error; } @@ -580,7 +565,7 @@ fil_encrypt_buf( /* Calculate the start offset in a page */ ulint unencrypted_bytes = header_len + FIL_PAGE_DATA_END; - ulint srclen = page_size - unencrypted_bytes; + ulint srclen = size - unencrypted_bytes; const byte* src = src_frame + header_len; byte* dst = dst_frame + header_len; uint32 dstlen = 0; @@ -594,12 +579,10 @@ fil_encrypt_buf( space, offset, lsn); if (! ((rc == MY_AES_OK) && ((ulint) dstlen == srclen))) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Unable to encrypt data-block " - " src: %p srclen: %ld buf: %p buflen: %d." - " return-code: %d. Can't continue!\n", - src, (long)srclen, - dst, dstlen, rc); + ib::error() << "Unable to encrypt data-block " + << " src: " << src << " srclen: " << srclen + << " buf: " << dst << " buflen: " << dstlen + << " return-code: "<< rc << " Can't continue!"; ut_error; } @@ -609,18 +592,18 @@ fil_encrypt_buf( to sector boundary is written. */ if (!page_compressed) { /* FIL page trailer is also not encrypted */ - memcpy(dst_frame + page_size - FIL_PAGE_DATA_END, - src_frame + page_size - FIL_PAGE_DATA_END, + memcpy(dst_frame + page_size.physical() - FIL_PAGE_DATA_END, + src_frame + page_size.physical() - FIL_PAGE_DATA_END, FIL_PAGE_DATA_END); } else { /* Clean up rest of buffer */ - memset(dst_frame+header_len+srclen, 0, page_size - (header_len+srclen)); + memset(dst_frame+header_len+srclen, 0, page_size.physical() - (header_len+srclen)); } /* handle post encryption checksum */ ib_uint32_t checksum = 0; - checksum = fil_crypt_calculate_checksum(zip_size, dst_frame); + checksum = fil_crypt_calculate_checksum(page_size, dst_frame); // store the post-encryption checksum after the key-version mach_write_to_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4, checksum); @@ -640,8 +623,7 @@ fil_space_encrypt( ulint offset, /*!< in: Page offset */ lsn_t lsn, /*!< in: lsn */ byte* src_frame, /*!< in: Source page to be encrypted */ - ulint zip_size, /*!< in: compressed size if - row_format compressed */ + const page_size_t& page_size, /*!< in: page size */ byte* dst_frame) /*!< in: outbut buffer */ { fil_space_crypt_t* crypt_data = NULL; @@ -664,7 +646,60 @@ fil_space_encrypt( ut_a(crypt_data != NULL && crypt_data->encryption != FIL_SPACE_ENCRYPTION_OFF); - byte* tmp = fil_encrypt_buf(crypt_data, space, offset, lsn, src_frame, zip_size, dst_frame); + byte* tmp = fil_encrypt_buf(crypt_data, space, offset, lsn, src_frame, page_size, dst_frame); + +#ifdef UNIV_DEBUG + if (tmp) { + /* Verify that encrypted buffer is not corrupted */ + byte* tmp_mem = (byte *)malloc(UNIV_PAGE_SIZE); + dberr_t err = DB_SUCCESS; + byte* src = src_frame; + bool page_compressed_encrypted = (mach_read_from_2(tmp+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED); + byte* comp_mem = NULL; + byte* uncomp_mem = NULL; + + if (page_compressed_encrypted) { + comp_mem = (byte *)malloc(UNIV_PAGE_SIZE); + uncomp_mem = (byte *)malloc(UNIV_PAGE_SIZE); + memcpy(comp_mem, src_frame, UNIV_PAGE_SIZE); + fil_decompress_page(uncomp_mem, comp_mem, page_size.physical(), NULL); + src = uncomp_mem; + } + + bool corrupted1 = buf_page_is_corrupted(true, src, page_size, fsp_is_checksum_disabled(space)); + bool ok = fil_space_decrypt(crypt_data, tmp_mem, page_size, tmp, &err); + + /* Need to decompress the page if it was also compressed */ + if (page_compressed_encrypted) { + memcpy(comp_mem, tmp_mem, UNIV_PAGE_SIZE); + fil_decompress_page(tmp_mem, comp_mem, page_size.physical(), NULL); + } + + bool corrupted = buf_page_is_corrupted(true, tmp_mem, page_size, fsp_is_checksum_disabled(space)); + bool different = memcmp(src, tmp_mem, page_size.physical()); + + if (!ok || corrupted || corrupted1 || err != DB_SUCCESS || different) { + fprintf(stderr, "JAN: ok %d corrupted %d corrupted1 %d err %d different %d\n", ok , corrupted, corrupted1, err, different); + fprintf(stderr, "JAN1: src_frame\n"); + buf_page_print(src_frame, page_size, BUF_PAGE_PRINT_NO_CRASH); + fprintf(stderr, "JAN2: encrypted_frame\n"); + buf_page_print(tmp, page_size, BUF_PAGE_PRINT_NO_CRASH); + fprintf(stderr, "JAN1: decrypted_frame\n"); + buf_page_print(tmp_mem, page_size, BUF_PAGE_PRINT_NO_CRASH); + ut_error; + } + + free(tmp_mem); + + if (comp_mem) { + free(comp_mem); + } + + if (uncomp_mem) { + free(uncomp_mem); + } + } +#endif /* UNIV_DEBUG */ return tmp; } @@ -704,7 +739,7 @@ fil_space_decrypt( /*==============*/ fil_space_crypt_t* crypt_data, /*!< in: crypt data */ byte* tmp_frame, /*!< in: temporary buffer */ - ulint page_size, /*!< in: page size */ + const page_size_t& page_size, /*!< in: page size */ byte* src_frame, /*!< in: out: page buffer */ dberr_t* err) /*!< in: out: DB_SUCCESS or error code */ @@ -730,13 +765,14 @@ fil_space_decrypt( data file (ibdata*, not *.ibd), if not clear it. */ #ifdef UNIV_DEBUG - ib_logf(IB_LOG_LEVEL_WARN, - "Page on space %lu offset %lu has key_version %u" - " when it shoud be undefined.", - space, offset, key_version); + ib::warn() + << "Page on space "<< space << " offset " << offset + << " has key_version " << key_version + << " when it shoud be undefined."; #endif mach_write_to_4(src_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0); } + return false; } @@ -756,7 +792,7 @@ fil_space_decrypt( const byte* src = src_frame + header_len; byte* dst = tmp_frame + header_len; uint32 dstlen = 0; - ulint srclen = page_size - (header_len + FIL_PAGE_DATA_END); + ulint srclen = page_size.physical() - (header_len + FIL_PAGE_DATA_END); if (page_compressed) { srclen = mach_read_from_2(src_frame + FIL_PAGE_DATA); @@ -773,12 +809,11 @@ fil_space_decrypt( return false; } - ib_logf(IB_LOG_LEVEL_FATAL, - "Unable to decrypt data-block " - " src: %p srclen: %ld buf: %p buflen: %d." - " return-code: %d. Can't continue!\n", - src, (long)srclen, - dst, dstlen, rc); + ib::error() << "Unable to decrypt data-block " + << " src: " << src << "srclen: " + << srclen << " buf: " << dst << "buflen: " + << dstlen << " return-code: " << rc + << " Can't continue!"; ut_error; } @@ -788,8 +823,8 @@ fil_space_decrypt( to sector boundary is written. */ if (!page_compressed) { /* Copy FIL trailer */ - memcpy(tmp_frame + page_size - FIL_PAGE_DATA_END, - src_frame + page_size - FIL_PAGE_DATA_END, + memcpy(tmp_frame + page_size.physical() - FIL_PAGE_DATA_END, + src_frame + page_size.physical() - FIL_PAGE_DATA_END, FIL_PAGE_DATA_END); // clear key-version & crypt-checksum from dst @@ -811,7 +846,7 @@ fil_space_decrypt( /*==============*/ ulint space, /*!< in: Fil space id */ byte* tmp_frame, /*!< in: temporary buffer */ - ulint page_size, /*!< in: page size */ + const page_size_t& page_size, /*!< in: page size */ byte* src_frame) /*!< in/out: page buffer */ { dberr_t err = DB_SUCCESS; @@ -828,7 +863,7 @@ fil_space_decrypt( if (encrypted) { /* Copy the decrypted page back to page buffer, not really any other options. */ - memcpy(src_frame, tmp_frame, page_size); + memcpy(src_frame, tmp_frame, page_size.physical()); } res = src_frame; @@ -845,14 +880,14 @@ UNIV_INTERN ulint fil_crypt_calculate_checksum( /*=========================*/ - ulint zip_size, /*!< in: zip_size or 0 */ - byte* dst_frame) /*!< in: page where to calculate */ + const page_size_t& page_size, /*!< in: page size */ + byte* dst_frame) /*!< in: page where to calculate */ { ib_uint32_t checksum = 0; srv_checksum_algorithm_t algorithm = static_cast(srv_checksum_algorithm); - if (zip_size == 0) { + if (!page_size.is_compressed()) { switch (algorithm) { case SRV_CHECKSUM_ALGORITHM_CRC32: case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: @@ -871,7 +906,7 @@ fil_crypt_calculate_checksum( * if new enum is added and not handled here */ } } else { - checksum = page_zip_calc_checksum(dst_frame, zip_size, + checksum = page_zip_calc_checksum(dst_frame, page_size.physical(), algorithm); } @@ -887,9 +922,8 @@ UNIV_INTERN bool fil_space_verify_crypt_checksum( /*============================*/ - const byte* src_frame, /*!< in: page the verify */ - ulint zip_size) /*!< in: compressed size if - row_format compressed */ + const byte* src_frame, /*!< in: page the verify */ + const page_size_t& page_size) /*!< in: page size */ { // key version uint key_version = mach_read_from_4( @@ -924,7 +958,7 @@ fil_space_verify_crypt_checksum( srv_checksum_algorithm_t save_checksum_algorithm = (srv_checksum_algorithm_t)srv_checksum_algorithm; - if (zip_size == 0 && + if (!page_size.is_compressed() && (save_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB || save_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_INNODB)) { /* handle ALGORITHM_INNODB specially, @@ -938,7 +972,7 @@ fil_space_verify_crypt_checksum( } /* verify checksums */ - ibool corrupted = buf_page_is_corrupted(false, src_frame, zip_size); + ibool corrupted = buf_page_is_corrupted(false, src_frame, page_size, false); /** restore frame & algorithm */ srv_checksum_algorithm = save_checksum_algorithm; @@ -951,11 +985,7 @@ fil_space_verify_crypt_checksum( UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM, checksum_field2); - if (!corrupted) { - return true; // page was encrypted and checksum matched - } else { - return false; // page was encrypted but checksum didn't match - } + return (!corrupted); } /***********************************************************************/ @@ -986,9 +1016,9 @@ fil_crypt_get_key_state( new_state->rotate_key_age = srv_fil_crypt_rotate_key_age; if (new_state->key_version == ENCRYPTION_KEY_VERSION_INVALID) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Used key_id %u can't be found from key file.", - new_state->key_id); + ib::error() << "Used key_id " + << new_state->key_id + << " can't be found from key file."; } ut_a(new_state->key_version != ENCRYPTION_KEY_VERSION_INVALID); @@ -1133,26 +1163,30 @@ fil_crypt_start_encrypting_space( /* 2 - get page 0 */ ulint offset = 0; - ulint zip_size = fil_space_get_zip_size(space); - buf_block_t* block = buf_page_get_gen(space, zip_size, offset, + const page_id_t page_id(space, offset); + bool tsfound; + const page_size_t page_size = fil_space_get_page_size(space, &tsfound); + dberr_t err = DB_SUCCESS; + buf_block_t* block = buf_page_get_gen(page_id, page_size, RW_X_LATCH, NULL, BUF_GET, __FILE__, __LINE__, - &mtr); + &mtr, &err); if (fil_crypt_is_closing(space) || - fil_space_found_by_id(space) == NULL) { + fil_space_found_by_id(space) == NULL || + err != DB_SUCCESS) { mtr_commit(&mtr); break; } /* 3 - compute location to store crypt data */ byte* frame = buf_block_get_frame(block); - ulint maxsize; + ulint maxsize = 0; ut_ad(crypt_data); crypt_data->page0_offset = - fsp_header_get_crypt_offset(zip_size, &maxsize); + fsp_header_get_crypt_offset(page_size, &maxsize); /* 4 - write crypt data to page 0 */ fil_space_write_crypt_data_low(crypt_data, @@ -1169,7 +1203,7 @@ fil_crypt_start_encrypting_space( } /* record lsn of update */ - lsn_t end_lsn = mtr.end_lsn; + lsn_t end_lsn = mtr.commit_lsn(); /* 4 - sync tablespace before publishing crypt data */ @@ -1181,7 +1215,7 @@ fil_crypt_start_encrypting_space( ulint n_pages = 0; ulint sum_pages = 0; do { - success = buf_flush_list(ULINT_MAX, end_lsn, &n_pages); + success = buf_flush_lists(ULINT_MAX, end_lsn, &n_pages); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); sum_pages += n_pages; } while (!success && @@ -1279,7 +1313,7 @@ fil_crypt_space_needs_rotation( /* Make sure that tablespace is found and it is normal tablespace */ if (fil_space_found_by_id(space) == NULL || - fil_space_get_type(space) != FIL_TABLESPACE) { + fil_space_get_type(space) != FIL_TYPE_TABLESPACE) { return false; } @@ -1702,11 +1736,15 @@ static bool fil_crypt_is_page_uninitialized( /*============================*/ - const byte *frame, /*!< in: Page */ - uint zip_size) /*!< in: compressed size if - row_format compressed */ + const byte* frame, /*!< in: Page */ + const page_size_t& page_size) /*!< in: page size */ { - if (zip_size) { + if (fil_page_get_type(frame) == FIL_PAGE_TYPE_ALLOCATED) { + /* empty pages aren't encrypted */ + return true; + } + + if (page_size.is_compressed()) { ulint stored_checksum = mach_read_from_4( frame + FIL_PAGE_SPACE_OR_CHKSUM); /* empty pages aren't encrypted */ @@ -1714,7 +1752,7 @@ fil_crypt_is_page_uninitialized( return true; } } else { - ulint size = UNIV_PAGE_SIZE; + ulint size = page_size.logical(); ulint checksum_field1 = mach_read_from_4( frame + FIL_PAGE_SPACE_OR_CHKSUM); ulint checksum_field2 = mach_read_from_4( @@ -1728,8 +1766,8 @@ fil_crypt_is_page_uninitialized( return false; } -#define fil_crypt_get_page_throttle(state,space,zip_size,offset,mtr,sleeptime_ms) \ - fil_crypt_get_page_throttle_func(state, space, zip_size, offset, mtr, \ +#define fil_crypt_get_page_throttle(state,space,page_size,offset,mtr,sleeptime_ms) \ + fil_crypt_get_page_throttle_func(state, space, page_size, offset, mtr, \ sleeptime_ms, __FILE__, __LINE__) /*********************************************************************** @@ -1741,17 +1779,20 @@ fil_crypt_get_page_throttle_func( /*=============================*/ rotate_thread_t* state, /*!< in/out: Key rotation state */ ulint space, /*!< in: FIL space id */ - uint zip_size, /*!< in: compressed size if - row_format compressed */ + const page_size_t& page_size, /*!< in: page size */ ulint offset, /*!< in: page offsett */ mtr_t* mtr, /*!< in/out: minitransaction */ ulint* sleeptime_ms, /*!< out: sleep time */ const char* file, /*!< in: file name */ ulint line) /*!< in: file line */ { - buf_block_t* block = buf_page_try_get_func(space, offset, RW_X_LATCH, - true, - file, line, mtr); + const page_id_t& page_id = page_id_t(space, offset); + dberr_t err = DB_SUCCESS; + buf_block_t* block = NULL; + + // JAN: TODO: + // buf_block_t* block = buf_page_try_get_func(page_id, file, line, mtr); + if (block != NULL) { /* page was in buffer pool */ state->crypt_stat.pages_read_from_cache++; @@ -1768,12 +1809,12 @@ fil_crypt_get_page_throttle_func( state->crypt_stat.pages_read_from_disk++; - ullint start = ut_time_us(NULL); - block = buf_page_get_gen(space, zip_size, offset, + uintmax_t start = ut_time_us(NULL); + block = buf_page_get_gen(page_id, page_size, RW_X_LATCH, NULL, BUF_GET_POSSIBLY_FREED, - file, line, mtr); - ullint end = ut_time_us(NULL); + file, line, mtr, &err); + uintmax_t end = ut_time_us(NULL); if (end < start) { end = start; // safety... @@ -1812,8 +1853,7 @@ btr_scrub_get_block_and_allocation_status( /*======================================*/ rotate_thread_t* state, /*!< in/out: Key rotation state */ ulint space, /*!< in: FIL space id */ - uint zip_size, /*!< in: compressed size if - row_format compressed */ + const page_size_t& page_size, /*!< in: page size */ ulint offset, /*!< in: page offsett */ mtr_t* mtr, /*!< in/out: minitransaction */ @@ -1832,7 +1872,7 @@ btr_scrub_get_block_and_allocation_status( /* this is easy case, we lock fil_space_latch first and then block */ block = fil_crypt_get_page_throttle(state, - space, zip_size, + space, page_size, offset, mtr, sleeptime_ms); mtr_commit(&local_mtr); @@ -1849,7 +1889,7 @@ btr_scrub_get_block_and_allocation_status( */ block = fil_crypt_get_page_throttle(state, - space, zip_size, + space, page_size, offset, mtr, sleeptime_ms); } @@ -1869,7 +1909,8 @@ fil_crypt_rotate_page( { ulint space = state->space; ulint offset = state->offset; - const uint zip_size = fil_space_get_zip_size(space); + bool tsfound; + const page_size_t page_size = fil_space_get_page_size(space, &tsfound); ulint sleeptime_ms = 0; /* check if tablespace is closing before reading page */ @@ -1885,7 +1926,7 @@ fil_crypt_rotate_page( mtr_t mtr; mtr_start(&mtr); buf_block_t* block = fil_crypt_get_page_throttle(state, - space, zip_size, + space, page_size, offset, &mtr, &sleeptime_ms); @@ -1902,7 +1943,7 @@ fil_crypt_rotate_page( fil_space_crypt_t *crypt_data = fil_space_get_crypt_data(space); if (kv == 0 && - fil_crypt_is_page_uninitialized(frame, zip_size)) { + fil_crypt_is_page_uninitialized(frame, page_size)) { ; } else if (fil_crypt_needs_rotation( crypt_data->encryption, @@ -1943,7 +1984,7 @@ fil_crypt_rotate_page( } mtr_commit(&mtr); - lsn_t end_lsn = mtr.end_lsn; + lsn_t end_lsn = mtr.commit_lsn(); if (needs_scrubbing == BTR_SCRUB_PAGE) { mtr_start(&mtr); @@ -1951,8 +1992,9 @@ fil_crypt_rotate_page( * refetch page and allocation status */ btr_scrub_page_allocation_status_t allocated; + block = btr_scrub_get_block_and_allocation_status( - state, space, zip_size, offset, &mtr, + state, space, page_size, offset, &mtr, &allocated, &sleeptime_ms); @@ -1966,7 +2008,7 @@ fil_crypt_rotate_page( /* we need to refetch it once more now that we have * index locked */ block = btr_scrub_get_block_and_allocation_status( - state, space, zip_size, offset, &mtr, + state, space, page_size, offset, &mtr, &allocated, &sleeptime_ms); @@ -2073,15 +2115,15 @@ fil_crypt_flush_space( bool success = false; ulint n_pages = 0; ulint sum_pages = 0; - ullint start = ut_time_us(NULL); + uintmax_t start = ut_time_us(NULL); do { - success = buf_flush_list(ULINT_MAX, end_lsn, &n_pages); + success = buf_flush_lists(ULINT_MAX, end_lsn, &n_pages); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); sum_pages += n_pages; } while (!success && !fil_crypt_is_closing(space)); - ullint end = ut_time_us(NULL); + uintmax_t end = ut_time_us(NULL); if (sum_pages && end > start) { state->cnt_waited += sum_pages; @@ -2101,18 +2143,27 @@ fil_crypt_flush_space( mtr_t mtr; mtr_start(&mtr); ulint offset = 0; // page 0 - const uint zip_size = fil_space_get_zip_size(space); - buf_block_t* block = buf_page_get_gen(space, zip_size, offset, + const page_id_t page_id(space, offset); + bool tsfound; + const page_size_t page_size = fil_space_get_page_size(space, &tsfound); + dberr_t err = DB_SUCCESS; + + buf_block_t* block = buf_page_get_gen(page_id, page_size, RW_X_LATCH, NULL, BUF_GET, - __FILE__, __LINE__, &mtr); - byte* frame = buf_block_get_frame(block); - ulint maxsize; - crypt_data->page0_offset = - fsp_header_get_crypt_offset(zip_size, &maxsize); + __FILE__, __LINE__, &mtr, &err); + + if (block && err == DB_SUCCESS) { + byte* frame = buf_block_get_frame(block); + ulint maxsize=0; + + crypt_data->page0_offset = + fsp_header_get_crypt_offset(page_size, &maxsize); + + fil_space_write_crypt_data(space, frame, + crypt_data->page0_offset, + ULINT_MAX, &mtr); + } - fil_space_write_crypt_data(space, frame, - crypt_data->page0_offset, - ULINT_MAX, &mtr); mtr_commit(&mtr); } } @@ -2325,9 +2376,10 @@ fil_crypt_set_thread_cnt( for (uint i = 0; i < add; i++) { os_thread_id_t rotation_thread_id; os_thread_create(fil_crypt_thread, NULL, &rotation_thread_id); - ib_logf(IB_LOG_LEVEL_INFO, - "Creating #%d thread id %lu total threads %u.", - i+1, os_thread_pf(rotation_thread_id), new_cnt); + ib::info() << "Creating " + << i+1 << " encryption thread id " + << os_thread_pf(rotation_thread_id) + << " total threads " << new_cnt << "."; } } else if (new_cnt < srv_n_fil_crypt_threads) { srv_n_fil_crypt_threads = new_cnt; @@ -2383,12 +2435,11 @@ void fil_crypt_threads_init() /*====================*/ { - ut_ad(mutex_own(&fil_system->mutex)); if (!fil_crypt_threads_inited) { - fil_crypt_event = os_event_create(); - fil_crypt_threads_event = os_event_create(); - mutex_create(fil_crypt_threads_mutex_key, - &fil_crypt_threads_mutex, SYNC_NO_ORDER_CHECK); + fil_crypt_event = os_event_create(0); + fil_crypt_threads_event = os_event_create(0); + mutex_create(LATCH_ID_FIL_CRYPT_THREADS_MUTEX, + &fil_crypt_threads_mutex); uint cnt = srv_n_fil_crypt_threads; srv_n_fil_crypt_threads = 0; @@ -2415,8 +2466,8 @@ void fil_crypt_threads_cleanup() /*=======================*/ { - os_event_free(fil_crypt_event); - os_event_free(fil_crypt_threads_event); + os_event_destroy(fil_crypt_event); + os_event_destroy(fil_crypt_threads_event); fil_crypt_threads_inited = false; } @@ -2494,9 +2545,10 @@ fil_space_crypt_close_tablespace( uint now = time(0); if (now >= last + 30) { - ib_logf(IB_LOG_LEVEL_WARN, - "Waited %u seconds to drop space: %lu.", - now - start, space); + ib::warn() << "Waited " + << now - start + << " seconds to drop space: " + << space << "."; last = now; } } @@ -2581,8 +2633,10 @@ fil_space_get_scrub_status( memset(status, 0, sizeof(*status)); if (crypt_data != NULL) { + bool tsfound; + const page_size_t page_size = fil_space_get_page_size(id, &tsfound); status->space = id; - status->compressed = fil_space_get_zip_size(id) > 0; + status->compressed = page_size.is_compressed(); mutex_enter(&crypt_data->mutex); status->last_scrub_completed = crypt_data->rotate_state.scrubbing.last_scrub_completed; diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index e9e164e5e1a..a28be694f94 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -24,50 +24,40 @@ The tablespace memory cache Created 10/25/1995 Heikki Tuuri *******************************************************/ -#include "fil0fil.h" +#include "ha_prototypes.h" #include "fil0pagecompress.h" #include "fsp0pagecompress.h" #include "fil0crypt.h" -#include -#include - -#include "mem0mem.h" -#include "hash0hash.h" -#include "os0file.h" -#include "mach0data.h" +#include "btr0btr.h" #include "buf0buf.h" #include "buf0flu.h" -#include "log0recv.h" +#include "dict0boot.h" +#include "dict0dict.h" +#include "fsp0file.h" #include "fsp0fsp.h" -#include "srv0srv.h" -#include "srv0start.h" -#include "mtr0mtr.h" +#include "fsp0space.h" +#include "fsp0sysspace.h" +#include "hash0hash.h" +#include "log0recv.h" +#include "mach0data.h" +#include "mem0mem.h" #include "mtr0log.h" -#include "dict0dict.h" -#include "page0page.h" +#include "os0file.h" #include "page0zip.h" -#include "trx0sys.h" #include "row0mysql.h" -#include "os0file.h" +#include "row0trunc.h" +#include "srv0start.h" +#include "trx0purge.h" +#include "ut0new.h" #ifndef UNIV_HOTBACKUP # include "buf0lru.h" # include "ibuf0ibuf.h" +# include "os0event.h" # include "sync0sync.h" -# include "os0sync.h" #else /* !UNIV_HOTBACKUP */ # include "srv0srv.h" -static ulint srv_data_read, srv_data_written; #endif /* !UNIV_HOTBACKUP */ -#include "zlib.h" -#ifdef __linux__ -#include -#include -#include -#endif -#include "row0mysql.h" - -MYSQL_PLUGIN_IMPORT extern my_bool lower_case_file_system; /* IMPLEMENTATION OF THE TABLESPACE MEMORY CACHE @@ -125,51 +115,63 @@ out of the LRU-list and keep a count of pending operations. When an operation completes, we decrement the count and return the file node to the LRU-list if the count drops to zero. */ -/** When mysqld is run, the default directory "." is the mysqld datadir, -but in the MySQL Embedded Server Library and mysqlbackup it is not the default -directory, and we must set the base file path explicitly */ -UNIV_INTERN const char* fil_path_to_mysql_datadir = "."; +/** This tablespace name is used internally during recovery to open a +general tablespace before the data dictionary are recovered and available. */ +const char general_space_name[] = "innodb_general"; + +/** Reference to the server data directory. Usually it is the +current working directory ".", but in the MySQL Embedded Server Library +it is an absolute path. */ +const char* fil_path_to_mysql_datadir; +Folder folder_mysql_datadir; + +/** Common InnoDB file extentions */ +const char* dot_ext[] = { "", ".ibd", ".isl", ".cfg" }; /** The number of fsyncs done to the log */ -UNIV_INTERN ulint fil_n_log_flushes = 0; +ulint fil_n_log_flushes = 0; /** Number of pending redo log flushes */ -UNIV_INTERN ulint fil_n_pending_log_flushes = 0; +ulint fil_n_pending_log_flushes = 0; /** Number of pending tablespace flushes */ -UNIV_INTERN ulint fil_n_pending_tablespace_flushes = 0; +ulint fil_n_pending_tablespace_flushes = 0; /** Number of files currently open */ -UNIV_INTERN ulint fil_n_file_opened = 0; +ulint fil_n_file_opened = 0; /** The null file address */ -UNIV_INTERN fil_addr_t fil_addr_null = {FIL_NULL, 0}; - -#ifdef UNIV_PFS_MUTEX -/* Key to register fil_system_mutex with performance schema */ -UNIV_INTERN mysql_pfs_key_t fil_system_mutex_key; -#endif /* UNIV_PFS_MUTEX */ - -#ifdef UNIV_PFS_RWLOCK -/* Key to register file space latch with performance schema */ -UNIV_INTERN mysql_pfs_key_t fil_space_latch_key; -#endif /* UNIV_PFS_RWLOCK */ +fil_addr_t fil_addr_null = {FIL_NULL, 0}; /** The tablespace memory cache. This variable is NULL before the module is initialized. */ -fil_system_t* fil_system = NULL; +static fil_system_t* fil_system = NULL; -/** Determine if (i) is a user tablespace id or not. */ -# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces_open) +#ifdef UNIV_HOTBACKUP +static ulint srv_data_read; +static ulint srv_data_written; +#endif /* UNIV_HOTBACKUP */ /** Determine if user has explicitly disabled fsync(). */ -#ifndef __WIN__ +#ifndef _WIN32 # define fil_buffering_disabled(s) \ - ((s)->purpose == FIL_TABLESPACE \ + ((s)->purpose == FIL_TYPE_TABLESPACE \ && srv_unix_file_flush_method \ == SRV_UNIX_O_DIRECT_NO_FSYNC) -#else /* __WIN__ */ +#else /* _WIN32 */ # define fil_buffering_disabled(s) (0) -#endif /* __WIN__ */ +#endif /* __WIN32 */ + +/** Determine if the space id is a user tablespace id or not. +@param[in] space_id Space ID to check +@return true if it is a user tablespace ID */ +UNIV_INLINE +bool +fil_is_user_tablespace_id( + ulint space_id) +{ + return(space_id > srv_undo_tablespaces_open + && space_id != srv_tmp_space.space_id()); +} #ifdef UNIV_DEBUG /** Try fil_validate() every this many times */ @@ -177,9 +179,9 @@ fil_system_t* fil_system = NULL; /******************************************************************//** Checks the consistency of the tablespace cache some of the time. -@return TRUE if ok or the check was skipped */ +@return true if ok or the check was skipped */ static -ibool +bool fil_validate_skip(void) /*===================*/ { @@ -192,7 +194,7 @@ fil_validate_skip(void) reduce the call frequency of the costly fil_validate() check in debug builds. */ if (--fil_validate_count > 0) { - return(TRUE); + return(true); } fil_validate_count = FIL_VALIDATE_SKIP; @@ -202,15 +204,24 @@ fil_validate_skip(void) /********************************************************************//** Determines if a file node belongs to the least-recently-used list. -@return TRUE if the file belongs to fil_system->LRU mutex. */ +@return true if the file belongs to fil_system->LRU mutex. */ UNIV_INLINE -ibool +bool fil_space_belongs_in_lru( /*=====================*/ const fil_space_t* space) /*!< in: file space */ { - return(space->purpose == FIL_TABLESPACE - && fil_is_user_tablespace_id(space->id)); + switch (space->purpose) { + case FIL_TYPE_LOG: + return(false); + case FIL_TYPE_TABLESPACE: + case FIL_TYPE_TEMPORARY: + case FIL_TYPE_IMPORT: + return(fil_is_user_tablespace_id(space->id)); + } + + ut_ad(0); + return(false); } /********************************************************************//** @@ -228,98 +239,72 @@ fil_node_prepare_for_io( fil_node_t* node, /*!< in: file node */ fil_system_t* system, /*!< in: tablespace memory cache */ fil_space_t* space); /*!< in: space */ -/********************************************************************//** + +/** Updates the data structures when an i/o operation finishes. Updates the -pending i/o's field in the node appropriately. */ +pending i/o's field in the node appropriately. +@param[in,out] node file node +@param[in,out] system tablespace instance +@param[in] type IO context */ static void fil_node_complete_io( -/*=================*/ - fil_node_t* node, /*!< in: file node */ - fil_system_t* system, /*!< in: tablespace memory cache */ - ulint type); /*!< in: OS_FILE_WRITE or OS_FILE_READ; marks - the node as modified if - type == OS_FILE_WRITE */ -/*******************************************************************//** -Frees a space object from the tablespace memory cache. Closes the files in -the chain but does not delete them. There must not be any pending i/o's or -flushes on the files. -@return TRUE on success */ -static -ibool -fil_space_free( -/*===========*/ - ulint id, /* in: space id */ - ibool x_latched); /* in: TRUE if caller has space->latch - in X mode */ -/********************************************************************//** -Reads data from a space to a buffer. Remember that the possible incomplete + fil_node_t* node, + fil_system_t* system, + const IORequest& type); + +/** Reads data from a space to a buffer. Remember that the possible incomplete blocks at the end of file are ignored: they are not taken into account when calculating the byte offset within a space. +@param[in] page_id page id +@param[in] page_size page size +@param[in] byte_offset remainder of offset in bytes; in aio this +must be divisible by the OS block size +@param[in] len how many bytes to read; this must not cross a +file boundary; in aio this must be a block size multiple +@param[in,out] buf buffer where to store data read; in aio this +must be appropriately aligned @return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do i/o on a tablespace which does not exist */ UNIV_INLINE dberr_t fil_read( -/*=====*/ - bool sync, /*!< in: true if synchronous aio is desired */ - ulint space_id, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint block_offset, /*!< in: offset in number of blocks */ - ulint byte_offset, /*!< in: remainder of offset in bytes; in aio - this must be divisible by the OS block size */ - ulint len, /*!< in: how many bytes to read; this must not - cross a file boundary; in aio this must be a - block size multiple */ - void* buf, /*!< in/out: buffer where to store data read; - in aio this must be appropriately aligned */ - void* message, /*!< in: message for aio handler if non-sync - aio used, else ignored */ - ulint* write_size) /*!< in/out: Actual write size initialized - after fist successfull trim - operation for this page and if - initialized we do not trim again if - actual page size does not decrease. */ -{ - return(fil_io(OS_FILE_READ, sync, space_id, zip_size, block_offset, - byte_offset, len, buf, message, write_size)); + const page_id_t& page_id, + const page_size_t& page_size, + ulint byte_offset, + ulint len, + void* buf) +{ + return(fil_io(IORequestRead, true, page_id, page_size, + byte_offset, len, buf, NULL, NULL)); } -/********************************************************************//** -Writes data to a space from a buffer. Remember that the possible incomplete +/** Writes data to a space from a buffer. Remember that the possible incomplete blocks at the end of file are ignored: they are not taken into account when calculating the byte offset within a space. +@param[in] page_id page id +@param[in] page_size page size +@param[in] byte_offset remainder of offset in bytes; in aio this +must be divisible by the OS block size +@param[in] len how many bytes to write; this must not cross +a file boundary; in aio this must be a block size multiple +@param[in] buf buffer from which to write; in aio this must +be appropriately aligned @return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do i/o on a tablespace which does not exist */ UNIV_INLINE dberr_t fil_write( -/*======*/ - bool sync, /*!< in: true if synchronous aio is desired */ - ulint space_id, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint block_offset, /*!< in: offset in number of blocks */ - ulint byte_offset, /*!< in: remainder of offset in bytes; in aio - this must be divisible by the OS block size */ - ulint len, /*!< in: how many bytes to write; this must - not cross a file boundary; in aio this must - be a block size multiple */ - void* buf, /*!< in: buffer from which to write; in aio - this must be appropriately aligned */ - void* message, /*!< in: message for aio handler if non-sync - aio used, else ignored */ - ulint* write_size) /*!< in/out: Actual write size initialized - after fist successfull trim - operation for this page and if - initialized we do not trim again if - actual page size does not decrease. */ + const page_id_t& page_id, + const page_size_t& page_size, + ulint byte_offset, + ulint len, + void* buf) { ut_ad(!srv_read_only_mode); - return(fil_io(OS_FILE_WRITE, sync, space_id, zip_size, block_offset, - byte_offset, len, buf, message, write_size)); + return(fil_io(IORequestWrite, true, page_id, page_size, + byte_offset, len, buf, NULL, NULL)); } /*******************************************************************//** @@ -341,39 +326,6 @@ fil_space_get_by_id( return(space); } -/*******************************************************************//** -Returns the table space by a given id, NULL if not found. */ -fil_space_t* -fil_space_found_by_id( -/*==================*/ - ulint id) /*!< in: space id */ -{ - fil_space_t* space = NULL; - mutex_enter(&fil_system->mutex); - space = fil_space_get_by_id(id); - - /* Not found if space is being deleted */ - if (space && space->stop_new_ops) { - space = NULL; - } - - mutex_exit(&fil_system->mutex); - return space; -} - -/****************************************************************//** -Get space id from fil node */ -ulint -fil_node_get_space_id( -/*==================*/ - fil_node_t* node) /*!< in: Compressed node*/ -{ - ut_ad(node); - ut_ad(node->space); - - return (node->space->id); -} - /*******************************************************************//** Returns the table space by a given name, NULL if not found. */ UNIV_INLINE @@ -398,18 +350,37 @@ fil_space_get_by_name( } #ifndef UNIV_HOTBACKUP +/** Look up a tablespace. +The caller should hold an InnoDB table lock or a MDL that prevents +the tablespace from being dropped during the operation, +or the caller should be in single-threaded crash recovery mode +(no user connections that could drop tablespaces). +If this is not the case, fil_space_acquire() and fil_space_release() +should be used instead. +@param[in] id tablespace ID +@return tablespace, or NULL if not found */ +fil_space_t* +fil_space_get( + ulint id) +{ + mutex_enter(&fil_system->mutex); + fil_space_t* space = fil_space_get_by_id(id); + mutex_exit(&fil_system->mutex); + ut_ad(space == NULL || space->purpose != FIL_TYPE_LOG); + return(space); +} /*******************************************************************//** Returns the version number of a tablespace, -1 if not found. @return version number, -1 if the tablespace does not exist in the memory cache */ UNIV_INTERN -ib_int64_t +ib_uint64_t fil_space_get_version( /*==================*/ ulint id) /*!< in: space id */ { fil_space_t* space; - ib_int64_t version = -1; + ib_uint64_t version = -1; ut_ad(fil_system); @@ -425,16 +396,14 @@ fil_space_get_version( return(version); } - -/*******************************************************************//** -Returns the latch of a file space. -@return latch protecting storage allocation */ -UNIV_INTERN +/** Returns the latch of a file space. +@param[in] id space id +@param[out] flags tablespace flags +@return latch protecting storage allocation */ rw_lock_t* fil_space_get_latch( -/*================*/ - ulint id, /*!< in: space id */ - ulint* flags) /*!< out: tablespace flags */ + ulint id, + ulint* flags) { fil_space_t* space; @@ -455,17 +424,14 @@ fil_space_get_latch( return(&(space->latch)); } -/*******************************************************************//** -Returns the type of a file space. -@return ULINT_UNDEFINED, or FIL_TABLESPACE or FIL_LOG */ -UNIV_INTERN -ulint +/** Gets the type of a file space. +@param[in] id tablespace identifier +@return file type */ +fil_type_t fil_space_get_type( -/*===============*/ - ulint id) /*!< in: space id */ + ulint id) { fil_space_t* space; - ulint type = ULINT_UNDEFINED; ut_ad(fil_system); @@ -473,160 +439,253 @@ fil_space_get_type( space = fil_space_get_by_id(id); + ut_a(space); + mutex_exit(&fil_system->mutex); - if (space) { - type = space->purpose; - } + return(space->purpose); +} + +/** Note that a tablespace has been imported. +It is initially marked as FIL_TYPE_IMPORT so that no logging is +done during the import process when the space ID is stamped to each page. +Now we change it to FIL_SPACE_TABLESPACE to start redo and undo logging. +NOTE: temporary tablespaces are never imported. +@param[in] id tablespace identifier */ +void +fil_space_set_imported( + ulint id) +{ + ut_ad(fil_system != NULL); + + mutex_enter(&fil_system->mutex); + + fil_space_t* space = fil_space_get_by_id(id); + + ut_ad(space->purpose == FIL_TYPE_IMPORT); + space->purpose = FIL_TYPE_TABLESPACE; - return(type); + mutex_exit(&fil_system->mutex); } #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** Checks if all the file nodes in a space are flushed. The caller must hold the fil_system mutex. -@return true if all are flushed */ +@return true if all are flushed */ static bool fil_space_is_flushed( /*=================*/ fil_space_t* space) /*!< in: space */ { - fil_node_t* node; - ut_ad(mutex_own(&fil_system->mutex)); - node = UT_LIST_GET_FIRST(space->chain); + for (const fil_node_t* node = UT_LIST_GET_FIRST(space->chain); + node != NULL; + node = UT_LIST_GET_NEXT(chain, node)) { - while (node) { if (node->modification_counter > node->flush_counter) { ut_ad(!fil_buffering_disabled(space)); return(false); } - - node = UT_LIST_GET_NEXT(chain, node); } return(true); } -/*******************************************************************//** -Appends a new file to the chain of files of a space. File must be closed. -@return pointer to the file name, or NULL on error */ -UNIV_INTERN -char* -fil_node_create( -/*============*/ - const char* name, /*!< in: file name (file must be closed) */ - ulint size, /*!< in: file size in database blocks, rounded - downwards to an integer */ - ulint id, /*!< in: space id where to append */ - ibool is_raw) /*!< in: TRUE if a raw device or - a raw disk partition */ +#if !defined(NO_FALLOCATE) && defined(UNIV_LINUX) + +#include +/** FusionIO atomic write control info */ +#define DFS_IOCTL_ATOMIC_WRITE_SET _IOW(0x95, 2, uint) + +/** +Try and enable FusionIO atomic writes. +@param[in] file OS file handle +@return true if successful */ +bool +fil_fusionio_enable_atomic_write(os_file_t file) +{ + if (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT) { + + uint atomic = 1; + + ut_a(file != -1); + + if (ioctl(file, DFS_IOCTL_ATOMIC_WRITE_SET, &atomic) != -1) { + + return(true); + } + } + + return(false); +} +#endif /* !NO_FALLOCATE && UNIV_LINUX */ + +/** Append a file to the chain of files of a space. +@param[in] name file name of a file that is not open +@param[in] size file size in entire database blocks +@param[in,out] space tablespace from fil_space_create() +@param[in] is_raw whether this is a raw device or partition +@param[in] punch_hole true if supported for this node +@param[in] atomic_write true if the file has atomic write enabled +@param[in] max_pages maximum number of pages in file, +ULINT_MAX means the file size is unlimited. +@return pointer to the file name +@retval NULL if error */ +static +fil_node_t* +fil_node_create_low( + const char* name, + ulint size, + fil_space_t* space, + bool is_raw, + bool punch_hole, + bool atomic_write, + ulint max_pages = ULINT_MAX) { fil_node_t* node; - fil_space_t* space; - ut_a(fil_system); - ut_a(name); + ut_ad(name != NULL); + ut_ad(fil_system != NULL); - mutex_enter(&fil_system->mutex); + if (space == NULL) { + return(NULL); + } - node = static_cast(mem_zalloc(sizeof(fil_node_t))); + node = reinterpret_cast(ut_zalloc_nokey(sizeof(*node))); node->name = mem_strdup(name); ut_a(!is_raw || srv_start_raw_disk_in_use); - node->sync_event = os_event_create(); + node->sync_event = os_event_create("fsync_event"); + node->is_raw_disk = is_raw; + node->size = size; + node->magic_n = FIL_NODE_MAGIC_N; - space = fil_space_get_by_id(id); + node->init_size = size; + node->max_size = max_pages; - if (!space) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: Could not find tablespace %lu for\n" - "InnoDB: file ", (ulong) id); - ut_print_filename(stderr, name); - fputs(" in the tablespace memory cache.\n", stderr); - mem_free(node->name); + mutex_enter(&fil_system->mutex); - mem_free(node); + space->size += size; - mutex_exit(&fil_system->mutex); + node->space = space; - return(NULL); - } + os_file_stat_t stat_info; - space->size += size; +#ifdef UNIV_DEBUG + dberr_t err = +#endif /* UNIV_DEBUG */ - node->space = space; + os_file_get_status( + node->name, &stat_info, false, + fsp_is_system_temporary(space->id) ? true : srv_read_only_mode); - UT_LIST_ADD_LAST(chain, space->chain, node); + ut_ad(err == DB_SUCCESS); - if (id < SRV_LOG_SPACE_FIRST_ID && fil_system->max_assigned_id < id) { + node->block_size = stat_info.block_size; - fil_system->max_assigned_id = id; + if (!(IORequest::is_punch_hole_supported() && punch_hole) + || node->block_size >= srv_page_size) { + + fil_no_punch_hole(node); + } else { + node->punch_hole = punch_hole; } + node->atomic_write = atomic_write; + + UT_LIST_ADD_LAST(space->chain, node); mutex_exit(&fil_system->mutex); - return(node->name); + return(node); } -/********************************************************************//** -Opens a file of a node of a tablespace. The caller must own the fil_system -mutex. +/** Appends a new file to the chain of files of a space. File must be closed. +@param[in] name file name (file must be closed) +@param[in] size file size in database blocks, rounded downwards to + an integer +@param[in,out] space space where to append +@param[in] is_raw true if a raw device or a raw disk partition +@param[in] atomic_write true if the file has atomic write enabled +@param[in] max_pages maximum number of pages in file, +ULINT_MAX means the file size is unlimited. +@return pointer to the file name +@retval NULL if error */ +char* +fil_node_create( + const char* name, + ulint size, + fil_space_t* space, + bool is_raw, + bool atomic_write, + ulint max_pages) +{ + fil_node_t* node; + + node = fil_node_create_low( + name, size, space, is_raw, IORequest::is_punch_hole_supported(), + atomic_write, max_pages); + + return(node == NULL ? NULL : node->name); +} + +/** Open a file node of a tablespace. +The caller must own the fil_system mutex. +@param[in,out] node File node @return false if the file can't be opened, otherwise true */ static bool fil_node_open_file( -/*===============*/ - fil_node_t* node, /*!< in: file node */ - fil_system_t* system, /*!< in: tablespace memory cache */ - fil_space_t* space) /*!< in: space */ + fil_node_t* node) { os_offset_t size_bytes; - ibool ret; - ibool success; + bool success; byte* buf2; byte* page; ulint space_id; - ulint flags=0; - ulint page_size; - ulint atomic_writes=0; + ulint flags; + ulint min_size; + bool read_only_mode; + fil_space_t* space = node->space; - ut_ad(mutex_own(&(system->mutex))); + ut_ad(mutex_own(&fil_system->mutex)); ut_a(node->n_pending == 0); - ut_a(node->open == FALSE); - - if (node->size == 0) { - /* It must be a single-table tablespace and we do not know the - size of the file yet. First we open the file in the normal - mode, no async I/O here, for simplicity. Then do some checks, - and close the file again. - NOTE that we could not use the simple file read function - os_file_read() in Windows to read from a file opened for - async I/O! */ + ut_a(!node->is_open); + + read_only_mode = !fsp_is_system_temporary(space->id) + && srv_read_only_mode; + + if (node->size == 0 + || (space->purpose == FIL_TYPE_TABLESPACE + && node == UT_LIST_GET_FIRST(space->chain) + && !undo::Truncate::was_tablespace_truncated(space->id) + && srv_startup_is_before_trx_rollback_phase)) { + /* We do not know the size of the file yet. First we + open the file in the normal mode, no async I/O here, + for simplicity. Then do some checks, and close the + file again. NOTE that we could not use the simple + file read function os_file_read() in Windows to read + from a file opened for async I/O! */ node->handle = os_file_create_simple_no_error_handling( - innodb_file_data_key, node->name, OS_FILE_OPEN, - OS_FILE_READ_ONLY, &success, 0); + innodb_data_file_key, node->name, OS_FILE_OPEN, + OS_FILE_READ_ONLY, read_only_mode, &success); + if (!success) { /* The following call prints an error message */ os_file_get_last_error(true); - ut_print_timestamp(stderr); - - ib_logf(IB_LOG_LEVEL_WARN, "InnoDB: Error: cannot " - "open %s\n. InnoDB: Have you deleted .ibd " - "files under a running mysqld server?\n", - node->name); + ib::warn() << "Cannot open '" << node->name << "'." + " Have you deleted .ibd files under a" + " running mysqld server?"; return(false); } @@ -634,9 +693,6 @@ fil_node_open_file( size_bytes = os_file_get_size(node->handle); ut_a(size_bytes != (os_offset_t) -1); - node->file_block_size = os_file_get_block_size(node->handle, node->name); - space->file_block_size = node->file_block_size; - #ifdef UNIV_HOTBACKUP if (space->id == 0) { node->size = (ulint) (size_bytes / UNIV_PAGE_SIZE); @@ -644,79 +700,61 @@ fil_node_open_file( goto add_size; } #endif /* UNIV_HOTBACKUP */ - ut_a(space->purpose != FIL_LOG); - ut_a(fil_is_user_tablespace_id(space->id)); - - if (size_bytes < FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE) { - fprintf(stderr, - "InnoDB: Error: the size of single-table" - " tablespace file %s\n" - "InnoDB: is only " UINT64PF "," - " should be at least %lu!\n", - node->name, - size_bytes, - (ulong) (FIL_IBD_FILE_INITIAL_SIZE - * UNIV_PAGE_SIZE)); - - ut_a(0); - } + ut_a(space->purpose != FIL_TYPE_LOG); /* Read the first page of the tablespace */ - buf2 = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); + buf2 = static_cast(ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); + /* Align the memory for file i/o if we might have O_DIRECT set */ page = static_cast(ut_align(buf2, UNIV_PAGE_SIZE)); + ut_ad(page == page_align(page)); - success = os_file_read(node->handle, page, 0, UNIV_PAGE_SIZE); + IORequest request(IORequest::READ); + + success = os_file_read( + request, + node->handle, page, 0, UNIV_PAGE_SIZE); space_id = fsp_header_get_space_id(page); flags = fsp_header_get_flags(page); - page_size = fsp_flags_get_page_size(flags); - atomic_writes = fsp_flags_get_atomic_writes(flags); + /* Close the file now that we have read the space id from it */ + os_file_close(node->handle); - ut_free(buf2); + const page_size_t page_size(flags); - /* Close the file now that we have read the space id from it */ + min_size = FIL_IBD_FILE_INITIAL_SIZE * page_size.physical(); - os_file_close(node->handle); + if (size_bytes < min_size) { - if (UNIV_UNLIKELY(space_id != space->id)) { - fprintf(stderr, - "InnoDB: Error: tablespace id is %lu" - " in the data dictionary\n" - "InnoDB: but in file %s it is %lu!\n", - space->id, node->name, space_id); + ib::error() << "The size of tablespace file " + << node->name << " is only " << size_bytes + << ", should be at least " << min_size << "!"; ut_error; } - if (UNIV_UNLIKELY(space_id == ULINT_UNDEFINED - || space_id == 0)) { - fprintf(stderr, - "InnoDB: Error: tablespace id %lu" - " in file %s is not sensible\n", - (ulong) space_id, node->name); - - ut_error; + if (space_id != space->id) { + ib::fatal() << "Tablespace id is " << space->id + << " in the data dictionary but in file " + << node->name << " it is " << space_id << "!"; } - if (UNIV_UNLIKELY(fsp_flags_get_page_size(space->flags) - != page_size)) { - fprintf(stderr, - "InnoDB: Error: tablespace file %s" - " has page size 0x%lx\n" - "InnoDB: but the data dictionary" - " expects page size 0x%lx!\n", - node->name, flags, - fsp_flags_get_page_size(space->flags)); + const page_size_t space_page_size(space->flags); - ut_error; + if (!page_size.equals_to(space_page_size)) { + ib::fatal() << "Tablespace file " << node->name + << " has page size " << page_size + << " (flags=" << ib::hex(flags) << ") but the" + " data dictionary expects page size " + << space_page_size << " (flags=" + << ib::hex(space->flags) << ")!"; } - if (UNIV_UNLIKELY(space->flags != flags)) { + if (space->flags != flags) { ulint sflags = (space->flags & ~FSP_FLAGS_MASK_DATA_DIR); ulint fflags = (flags & ~FSP_FLAGS_MASK_DATA_DIR_ORACLE); @@ -732,103 +770,110 @@ fil_node_open_file( space->flags, node->name, flags, space->flags); flags = space->flags; - } - - if (!dict_tf_verify_flags(space->flags, flags)) { - fprintf(stderr, - "InnoDB: Error: table flags are 0x%lx" - " in the data dictionary\n" - "InnoDB: but the flags in file %s are 0x%lx!\n", - space->flags, node->name, flags); - ut_error; + } else { + ib::fatal() + << "Table flags are " + << ib::hex(space->flags) << " in the data" + " dictionary but the flags in file " + << node->name << " are " << ib::hex(flags) + << "!"; } } - if (size_bytes >= (1024*1024)) { - /* Truncate the size to whole extent size. */ - size_bytes = ut_2pow_round(size_bytes, (1024*1024)); + + { + ulint size = fsp_header_get_field( + page, FSP_SIZE); + ulint free_limit = fsp_header_get_field( + page, FSP_FREE_LIMIT); + ulint free_len = flst_get_len( + FSP_HEADER_OFFSET + FSP_FREE + page); + ut_ad(space->size_in_header == 0 + || space->size_in_header == size); + ut_ad(space->free_limit == 0 + || space->free_limit == free_limit); + ut_ad(space->free_len == 0 + || space->free_len == free_len); + space->size_in_header = size; + space->free_limit = free_limit; + space->free_len = free_len; } - if (!fsp_flags_is_compressed(flags)) { - node->size = (ulint) (size_bytes / UNIV_PAGE_SIZE); - } else { + ut_free(buf2); + + if (node->size == 0) { + ulint extent_size; + + extent_size = page_size.physical() * FSP_EXTENT_SIZE; + /* Truncate the size to a multiple of extent size. */ + if (size_bytes >= extent_size) { + size_bytes = ut_2pow_round(size_bytes, + extent_size); + } + node->size = (ulint) - (size_bytes - / fsp_flags_get_zip_size(flags)); - } + (size_bytes / page_size.physical()); #ifdef UNIV_HOTBACKUP add_size: #endif /* UNIV_HOTBACKUP */ - space->size += node->size; + space->size += node->size; + } } - atomic_writes = fsp_flags_get_atomic_writes(space->flags); - /* printf("Opening file %s\n", node->name); */ /* Open the file for reading and writing, in Windows normally in the unbuffered async I/O mode, though global variables may make os_file_create() to fall back to the normal file I/O mode. */ - if (space->purpose == FIL_LOG) { - node->handle = os_file_create(innodb_file_log_key, - node->name, OS_FILE_OPEN, - OS_FILE_AIO, OS_LOG_FILE, - &ret, atomic_writes); + if (space->purpose == FIL_TYPE_LOG) { + node->handle = os_file_create( + innodb_log_file_key, node->name, OS_FILE_OPEN, + OS_FILE_AIO, OS_LOG_FILE, read_only_mode, &success); } else if (node->is_raw_disk) { - node->handle = os_file_create(innodb_file_data_key, - node->name, - OS_FILE_OPEN_RAW, - OS_FILE_AIO, OS_DATA_FILE, - &ret, atomic_writes); + node->handle = os_file_create( + innodb_data_file_key, node->name, OS_FILE_OPEN_RAW, + OS_FILE_AIO, OS_DATA_FILE, read_only_mode, &success); } else { - node->handle = os_file_create(innodb_file_data_key, - node->name, OS_FILE_OPEN, - OS_FILE_AIO, OS_DATA_FILE, - &ret, atomic_writes); - } - - if (node->file_block_size == 0) { - node->file_block_size = os_file_get_block_size(node->handle, node->name); - space->file_block_size = node->file_block_size; + node->handle = os_file_create( + innodb_data_file_key, node->name, OS_FILE_OPEN, + OS_FILE_AIO, OS_DATA_FILE, read_only_mode, &success); } - ut_a(ret); + ut_a(success); - node->open = TRUE; + node->is_open = true; - system->n_open++; + fil_system->n_open++; fil_n_file_opened++; if (fil_space_belongs_in_lru(space)) { /* Put the node to the LRU list */ - UT_LIST_ADD_FIRST(LRU, system->LRU, node); + UT_LIST_ADD_FIRST(fil_system->LRU, node); } return(true); } -/**********************************************************************//** -Closes a file. */ +/** Close a file node. +@param[in,out] node File node */ static void fil_node_close_file( -/*================*/ - fil_node_t* node, /*!< in: file node */ - fil_system_t* system) /*!< in: tablespace memory cache */ + fil_node_t* node) { - ibool ret; + bool ret; - ut_ad(node && system); - ut_ad(mutex_own(&(system->mutex))); - ut_a(node->open); + ut_ad(mutex_own(&(fil_system->mutex))); + ut_a(node->is_open); ut_a(node->n_pending == 0); ut_a(node->n_pending_flushes == 0); ut_a(!node->being_extended); #ifndef UNIV_HOTBACKUP ut_a(node->modification_counter == node->flush_counter + || node->space->purpose == FIL_TYPE_TEMPORARY || srv_fast_shutdown == 2); #endif /* !UNIV_HOTBACKUP */ @@ -837,33 +882,33 @@ fil_node_close_file( /* printf("Closing file %s\n", node->name); */ - node->open = FALSE; - ut_a(system->n_open > 0); - system->n_open--; + node->is_open = false; + ut_a(fil_system->n_open > 0); + fil_system->n_open--; fil_n_file_opened--; if (fil_space_belongs_in_lru(node->space)) { - ut_a(UT_LIST_GET_LEN(system->LRU) > 0); + ut_a(UT_LIST_GET_LEN(fil_system->LRU) > 0); /* The node is in the LRU list, remove it */ - UT_LIST_REMOVE(LRU, system->LRU, node); + UT_LIST_REMOVE(fil_system->LRU, node); } } /********************************************************************//** Tries to close a file in the LRU list. The caller must hold the fil_sys mutex. -@return TRUE if success, FALSE if should retry later; since i/o's +@return true if success, false if should retry later; since i/o's generally complete in < 100 ms, and as InnoDB writes at most 128 pages from the buffer pool in a batch, and then immediately flushes the files, there is a good chance that the next time we find a suitable node from the LRU list */ static -ibool +bool fil_try_to_close_file_in_LRU( /*=========================*/ - ibool print_info) /*!< in: if TRUE, prints information why it + bool print_info) /*!< in: if true, prints information why it cannot close a file */ { fil_node_t* node; @@ -871,9 +916,8 @@ fil_try_to_close_file_in_LRU( ut_ad(mutex_own(&fil_system->mutex)); if (print_info) { - fprintf(stderr, - "InnoDB: fil_sys open file LRU len %lu\n", - (ulong) UT_LIST_GET_LEN(fil_system->LRU)); + ib::info() << "fil_sys open file LRU len " + << UT_LIST_GET_LEN(fil_system->LRU); } for (node = UT_LIST_GET_LAST(fil_system->LRU); @@ -884,9 +928,9 @@ fil_try_to_close_file_in_LRU( && node->n_pending_flushes == 0 && !node->being_extended) { - fil_node_close_file(node, fil_system); + fil_node_close_file(node); - return(TRUE); + return(true); } if (!print_info) { @@ -894,30 +938,26 @@ fil_try_to_close_file_in_LRU( } if (node->n_pending_flushes > 0) { - fputs("InnoDB: cannot close file ", stderr); - ut_print_filename(stderr, node->name); - fprintf(stderr, ", because n_pending_flushes %lu\n", - (ulong) node->n_pending_flushes); + + ib::info() << "Cannot close file " << node->name + << ", because n_pending_flushes " + << node->n_pending_flushes; } if (node->modification_counter != node->flush_counter) { - fputs("InnoDB: cannot close file ", stderr); - ut_print_filename(stderr, node->name); - fprintf(stderr, - ", because mod_count %ld != fl_count %ld\n", - (long) node->modification_counter, - (long) node->flush_counter); - + ib::warn() << "Cannot close file " << node->name + << ", because modification count " + << node->modification_counter << + " != flush count " << node->flush_counter; } if (node->being_extended) { - fputs("InnoDB: cannot close file ", stderr); - ut_print_filename(stderr, node->name); - fprintf(stderr, ", because it is being extended\n"); + ib::info() << "Cannot close file " << node->name + << ", because it is being extended"; } } - return(FALSE); + return(false); } /*******************************************************************//** @@ -931,150 +971,138 @@ fil_mutex_enter_and_prepare_for_io( ulint space_id) /*!< in: space id */ { fil_space_t* space; - ibool success; - ibool print_info = FALSE; + bool success; + bool print_info = false; ulint count = 0; ulint count2 = 0; -retry: - mutex_enter(&fil_system->mutex); - - if (space_id == 0 || space_id >= SRV_LOG_SPACE_FIRST_ID) { - /* We keep log files and system tablespace files always open; - this is important in preventing deadlocks in this module, as - a page read completion often performs another read from the - insert buffer. The insert buffer is in tablespace 0, and we - cannot end up waiting in this function. */ + for (;;) { + mutex_enter(&fil_system->mutex); - return; - } + if (space_id == 0 || space_id >= SRV_LOG_SPACE_FIRST_ID) { + /* We keep log files and system tablespace files always + open; this is important in preventing deadlocks in this + module, as a page read completion often performs + another read from the insert buffer. The insert buffer + is in tablespace 0, and we cannot end up waiting in + this function. */ + return; + } - space = fil_space_get_by_id(space_id); + space = fil_space_get_by_id(space_id); - if (space != NULL && space->stop_ios) { - /* We are going to do a rename file and want to stop new i/o's - for a while */ + if (space != NULL && space->stop_ios) { + /* We are going to do a rename file and want to stop + new i/o's for a while. */ - if (count2 > 20000) { - fputs("InnoDB: Warning: tablespace ", stderr); - ut_print_filename(stderr, space->name); - fprintf(stderr, - " has i/o ops stopped for a long time %lu\n", - (ulong) count2); - } + if (count2 > 20000) { + ib::warn() << "Tablespace " << space->name + << " has i/o ops stopped for a long" + " time " << count2; + } - mutex_exit(&fil_system->mutex); + mutex_exit(&fil_system->mutex); #ifndef UNIV_HOTBACKUP - /* Wake the i/o-handler threads to make sure pending - i/o's are performed */ - os_aio_simulated_wake_handler_threads(); + /* Wake the i/o-handler threads to make sure pending + i/o's are performed */ + os_aio_simulated_wake_handler_threads(); - /* The sleep here is just to give IO helper threads a - bit of time to do some work. It is not required that - all IO related to the tablespace being renamed must - be flushed here as we do fil_flush() in - fil_rename_tablespace() as well. */ - os_thread_sleep(20000); + /* The sleep here is just to give IO helper threads a + bit of time to do some work. It is not required that + all IO related to the tablespace being renamed must + be flushed here as we do fil_flush() in + fil_rename_tablespace() as well. */ + os_thread_sleep(20000); #endif /* UNIV_HOTBACKUP */ - /* Flush tablespaces so that we can close modified - files in the LRU list */ - fil_flush_file_spaces(FIL_TABLESPACE); + /* Flush tablespaces so that we can close modified + files in the LRU list */ + fil_flush_file_spaces(FIL_TYPE_TABLESPACE); - os_thread_sleep(20000); - - count2++; - - goto retry; - } + os_thread_sleep(20000); - if (fil_system->n_open < fil_system->max_n_open) { + count2++; - return; - } + continue; + } - /* If the file is already open, no need to do anything; if the space - does not exist, we handle the situation in the function which called - this function */ + if (fil_system->n_open < fil_system->max_n_open) { - if (!space || UT_LIST_GET_FIRST(space->chain)->open) { + return; + } - return; - } + /* If the file is already open, no need to do anything; if the + space does not exist, we handle the situation in the function + which called this function. */ - if (count > 1) { - print_info = TRUE; - } + if (space == NULL || UT_LIST_GET_FIRST(space->chain)->is_open) { - /* Too many files are open, try to close some */ -close_more: - success = fil_try_to_close_file_in_LRU(print_info); + return; + } - if (success && fil_system->n_open >= fil_system->max_n_open) { + if (count > 1) { + print_info = true; + } - goto close_more; - } + /* Too many files are open, try to close some */ + do { + success = fil_try_to_close_file_in_LRU(print_info); - if (fil_system->n_open < fil_system->max_n_open) { - /* Ok */ + } while (success + && fil_system->n_open >= fil_system->max_n_open); - return; - } + if (fil_system->n_open < fil_system->max_n_open) { + /* Ok */ + return; + } - if (count >= 2) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: too many (%lu) files stay open" - " while the maximum\n" - "InnoDB: allowed value would be %lu.\n" - "InnoDB: You may need to raise the value of" - " innodb_open_files in\n" - "InnoDB: my.cnf.\n", - (ulong) fil_system->n_open, - (ulong) fil_system->max_n_open); + if (count >= 2) { + ib::warn() << "Too many (" << fil_system->n_open + << ") files stay open while the maximum" + " allowed value would be " + << fil_system->max_n_open << ". You may need" + " to raise the value of innodb_open_files in" + " my.cnf."; - return; - } + return; + } - mutex_exit(&fil_system->mutex); + mutex_exit(&fil_system->mutex); #ifndef UNIV_HOTBACKUP - /* Wake the i/o-handler threads to make sure pending i/o's are - performed */ - os_aio_simulated_wake_handler_threads(); + /* Wake the i/o-handler threads to make sure pending i/o's are + performed */ + os_aio_simulated_wake_handler_threads(); - os_thread_sleep(20000); + os_thread_sleep(20000); #endif - /* Flush tablespaces so that we can close modified files in the LRU - list */ - - fil_flush_file_spaces(FIL_TABLESPACE); + /* Flush tablespaces so that we can close modified files in + the LRU list. */ - count++; + fil_flush_file_spaces(FIL_TYPE_TABLESPACE); - goto retry; + count++; + } } -/*******************************************************************//** -Frees a file node object from a tablespace memory cache. */ +/** Prepare to free a file node object from a tablespace memory cache. +@param[in,out] node file node +@param[in] space tablespace */ static void -fil_node_free( -/*==========*/ - fil_node_t* node, /*!< in, own: file node */ - fil_system_t* system, /*!< in: tablespace memory cache */ - fil_space_t* space) /*!< in: space where the file node is chained */ +fil_node_close_to_free( + fil_node_t* node, + fil_space_t* space) { - ut_ad(node && system && space); - ut_ad(mutex_own(&(system->mutex))); + ut_ad(mutex_own(&fil_system->mutex)); ut_a(node->magic_n == FIL_NODE_MAGIC_N); ut_a(node->n_pending == 0); ut_a(!node->being_extended); - if (node->open) { + if (node->is_open) { /* We fool the assertion in fil_node_close_file() to think there are no unflushed modifications in the file */ @@ -1091,145 +1119,200 @@ fil_node_free( space->is_in_unflushed_spaces = false; - UT_LIST_REMOVE(unflushed_spaces, - system->unflushed_spaces, - space); + UT_LIST_REMOVE(fil_system->unflushed_spaces, space); } - fil_node_close_file(node, system); + fil_node_close_file(node); } - - space->size -= node->size; - - UT_LIST_REMOVE(chain, space->chain, node); - - os_event_free(node->sync_event); - mem_free(node->name); - mem_free(node); } -#ifdef UNIV_LOG_ARCHIVE -/****************************************************************//** -Drops files from the start of a file space, so that its size is cut by -the amount given. */ -UNIV_INTERN +/** Detach a space object from the tablespace memory cache. +Closes the files in the chain but does not delete them. +There must not be any pending i/o's or flushes on the files. +@param[in,out] space tablespace */ +static void -fil_space_truncate_start( -/*=====================*/ - ulint id, /*!< in: space id */ - ulint trunc_len) /*!< in: truncate by this much; it is an error - if this does not equal to the combined size of - some initial files in the space */ +fil_space_detach( + fil_space_t* space) { - fil_node_t* node; - fil_space_t* space; + ut_ad(mutex_own(&fil_system->mutex)); - mutex_enter(&fil_system->mutex); + HASH_DELETE(fil_space_t, hash, fil_system->spaces, space->id, space); - space = fil_space_get_by_id(id); + fil_space_t* fnamespace = fil_space_get_by_name(space->name); - ut_a(space); + ut_a(space == fnamespace); - while (trunc_len > 0) { - node = UT_LIST_GET_FIRST(space->chain); + HASH_DELETE(fil_space_t, name_hash, fil_system->name_hash, + ut_fold_string(space->name), space); - ut_a(node->size * UNIV_PAGE_SIZE <= trunc_len); + if (space->is_in_unflushed_spaces) { - trunc_len -= node->size * UNIV_PAGE_SIZE; + ut_ad(!fil_buffering_disabled(space)); + space->is_in_unflushed_spaces = false; - fil_node_free(node, fil_system, space); + UT_LIST_REMOVE(fil_system->unflushed_spaces, space); } - mutex_exit(&fil_system->mutex); + UT_LIST_REMOVE(fil_system->space_list, space); + + ut_a(space->magic_n == FIL_SPACE_MAGIC_N); + ut_a(space->n_pending_flushes == 0); + + for (fil_node_t* fil_node = UT_LIST_GET_FIRST(space->chain); + fil_node != NULL; + fil_node = UT_LIST_GET_NEXT(chain, fil_node)) { + + fil_node_close_to_free(fil_node, space); + } } -#endif /* UNIV_LOG_ARCHIVE */ -/*******************************************************************//** -Creates a space memory object and puts it to the 'fil system' hash table. -If there is an error, prints an error message to the .err log. -@return TRUE if success */ -UNIV_INTERN -ibool -fil_space_create( -/*=============*/ - const char* name, /*!< in: space name */ - ulint id, /*!< in: space id */ - ulint flags, /*!< in: tablespace flags */ - ulint purpose,/*!< in: FIL_TABLESPACE, or FIL_LOG if log */ - fil_space_crypt_t* crypt_data) /*!< in: crypt data */ +/** Free a tablespace object on which fil_space_detach() was invoked. +There must not be any pending i/o's or flushes on the files. +@param[in,out] space tablespace */ +static +void +fil_space_free_low( + fil_space_t* space) { - fil_space_t* space; + /* The tablespace must not be in fil_system->named_spaces. */ + ut_ad(srv_fast_shutdown == 2 || space->max_lsn == 0); + + for (fil_node_t* node = UT_LIST_GET_FIRST(space->chain); + node != NULL; ) { + ut_d(space->size -= node->size); + os_event_destroy(node->sync_event); + ut_free(node->name); + fil_node_t* old_node = node; + node = UT_LIST_GET_NEXT(chain, node); + ut_free(old_node); + } - DBUG_EXECUTE_IF("fil_space_create_failure", return(false);); + ut_ad(space->size == 0); - ut_a(fil_system); + rw_lock_free(&space->latch); - /* Look for a matching tablespace and if found free it. */ - do { - mutex_enter(&fil_system->mutex); + ut_free(space->name); + ut_free(space); +} - space = fil_space_get_by_name(name); +/** Frees a space object from the tablespace memory cache. +Closes the files in the chain but does not delete them. +There must not be any pending i/o's or flushes on the files. +@param[in] id tablespace identifier +@param[in] x_latched whether the caller holds X-mode space->latch +@return true if success */ +bool +fil_space_free( + ulint id, + bool x_latched) +{ + ut_ad(id != TRX_SYS_SPACE); - if (space != 0) { - ib_logf(IB_LOG_LEVEL_WARN, - "Tablespace '%s' exists in the cache " - "with id %lu != %lu", - name, (ulong) space->id, (ulong) id); + mutex_enter(&fil_system->mutex); + fil_space_t* space = fil_space_get_by_id(id); - if (id == 0 || purpose != FIL_TABLESPACE) { + if (space != NULL) { + fil_space_detach(space); + } - mutex_exit(&fil_system->mutex); + mutex_exit(&fil_system->mutex); - return(FALSE); - } + if (space != NULL) { + if (x_latched) { + rw_lock_x_unlock(&space->latch); + } - ib_logf(IB_LOG_LEVEL_WARN, - "Freeing existing tablespace '%s' entry " - "from the cache with id %lu", - name, (ulong) id); + bool need_mutex = !recv_recovery_on; - ibool success = fil_space_free(space->id, FALSE); - ut_a(success); + if (need_mutex) { + log_mutex_enter(); + } - mutex_exit(&fil_system->mutex); + ut_ad(log_mutex_own()); + + if (space->max_lsn != 0) { + ut_d(space->max_lsn = 0); + UT_LIST_REMOVE(fil_system->named_spaces, space); } - } while (space != 0); + if (need_mutex) { + log_mutex_exit(); + } - space = fil_space_get_by_id(id); + fil_space_free_low(space); + } + + return(space != NULL); +} + +/** Create a space memory object and put it to the fil_system hash table. +The tablespace name is independent from the tablespace file-name. +Error messages are issued to the server log. +@param[in] name Tablespace name +@param[in] id Tablespace identifier +@param[in] flags Tablespace flags +@param[in] purpose Tablespace purpose +@return pointer to created tablespace, to be filled in with fil_node_create() +@retval NULL on failure (such as when the same tablespace exists) */ +fil_space_t* +fil_space_create( + const char* name, + ulint id, + ulint flags, + fil_type_t purpose, + fil_space_crypt_t* crypt_data) /*!< in: crypt data */ +{ + fil_space_t* space; + + ut_ad(fil_system); + ut_ad(fsp_flags_is_valid(flags)); + ut_ad(srv_page_size == UNIV_PAGE_SIZE_ORIG || flags != 0); - if (space != 0) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Trying to add tablespace '%s' with id %lu " - "to the tablespace memory cache, but tablespace '%s' " - "with id %lu already exists in the cache!", - name, (ulong) id, space->name, (ulong) space->id); + DBUG_EXECUTE_IF("fil_space_create_failure", return(NULL);); + mutex_enter(&fil_system->mutex); + + /* Look for a matching tablespace. */ + space = fil_space_get_by_name(name); + + if (space != NULL) { mutex_exit(&fil_system->mutex); - return(FALSE); + ib::warn() << "Tablespace '" << name << "' exists in the" + " cache with id " << space->id << " != " << id; + + return(NULL); } - space = static_cast(mem_zalloc(sizeof(*space))); + space = fil_space_get_by_id(id); + + if (space != NULL) { + ib::error() << "Trying to add tablespace '" << name + << "' with id " << id + << " to the tablespace memory cache, but tablespace '" + << space->name << "' already exists in the cache!"; + mutex_exit(&fil_system->mutex); + return(NULL); + } + + space = static_cast(ut_zalloc_nokey(sizeof(*space))); - space->name = mem_strdup(name); space->id = id; + space->name = mem_strdup(name); - fil_system->tablespace_version++; - space->tablespace_version = fil_system->tablespace_version; - space->mark = FALSE; + UT_LIST_INIT(space->chain, &fil_node_t::chain); - if (purpose == FIL_TABLESPACE && !recv_recovery_on + if (fil_type_is_data(purpose) + && !recv_recovery_on && id > fil_system->max_assigned_id) { if (!fil_system->space_id_reuse_warned) { - fil_system->space_id_reuse_warned = TRUE; + fil_system->space_id_reuse_warned = true; - ib_logf(IB_LOG_LEVEL_WARN, - "Allocated tablespace %lu, old maximum " - "was %lu", - (ulong) id, - (ulong) fil_system->max_assigned_id); + ib::warn() << "Allocated tablespace ID " << id + << " for " << name << ", old maximum was " + << fil_system->max_assigned_id; } fil_system->max_assigned_id = id; @@ -1239,38 +1322,52 @@ fil_space_create( space->flags = flags; space->magic_n = FIL_SPACE_MAGIC_N; - space->printed_compression_failure = false; rw_lock_create(fil_space_latch_key, &space->latch, SYNC_FSP); + if (space->purpose == FIL_TYPE_TEMPORARY) { + ut_d(space->latch.set_temp_fsp()); + } + HASH_INSERT(fil_space_t, hash, fil_system->spaces, id, space); HASH_INSERT(fil_space_t, name_hash, fil_system->name_hash, ut_fold_string(name), space); - space->is_in_unflushed_spaces = false; - UT_LIST_ADD_LAST(space_list, fil_system->space_list, space); + UT_LIST_ADD_LAST(fil_system->space_list, space); + + if (id < SRV_LOG_SPACE_FIRST_ID && id > fil_system->max_assigned_id) { + + fil_system->max_assigned_id = id; + } space->crypt_data = crypt_data; + if (crypt_data) { + space->read_page0 = true; + /* If table could be encrypted print info */ + ib::info() << "Tablespace ID " << id << " name " << space->name + << ":" << fil_crypt_get_mode(crypt_data) + << " " << fil_crypt_get_type(crypt_data); + } + mutex_exit(&fil_system->mutex); - return(TRUE); + return(space); } /*******************************************************************//** Assigns a new space id for a new single-table tablespace. This works simply by incrementing the global counter. If 4 billion id's is not enough, we may need to recycle id's. -@return TRUE if assigned, FALSE if not */ -UNIV_INTERN -ibool +@return true if assigned, false if not */ +bool fil_assign_new_space_id( /*====================*/ ulint* space_id) /*!< in/out: space id */ { ulint id; - ibool success; + bool success; mutex_enter(&fil_system->mutex); @@ -1283,17 +1380,12 @@ fil_assign_new_space_id( id++; if (id > (SRV_LOG_SPACE_FIRST_ID / 2) && (id % 1000000UL == 0)) { - ut_print_timestamp(stderr); - fprintf(stderr, - "InnoDB: Warning: you are running out of new" - " single-table tablespace id's.\n" - "InnoDB: Current counter is %lu and it" - " must not exceed %lu!\n" - "InnoDB: To reset the counter to zero" - " you have to dump all your tables and\n" - "InnoDB: recreate the whole InnoDB installation.\n", - (ulong) id, - (ulong) SRV_LOG_SPACE_FIRST_ID); + ib::warn() << "You are running out of new single-table" + " tablespace id's. Current counter is " << id + << " and it must not exceed" << SRV_LOG_SPACE_FIRST_ID + << "! To reset the counter to zero you have to dump" + " all your tables and recreate the whole InnoDB" + " installation."; } success = (id < SRV_LOG_SPACE_FIRST_ID); @@ -1301,15 +1393,11 @@ fil_assign_new_space_id( if (success) { *space_id = fil_system->max_assigned_id = id; } else { - ut_print_timestamp(stderr); - fprintf(stderr, - "InnoDB: You have run out of single-table" - " tablespace id's!\n" - "InnoDB: Current counter is %lu.\n" - "InnoDB: To reset the counter to zero you" - " have to dump all your tables and\n" - "InnoDB: recreate the whole InnoDB installation.\n", - (ulong) id); + ib::warn() << "You have run out of single-table tablespace" + " id's! Current counter is " << id + << ". To reset the counter to zero" + " you have to dump all your tables and" + " recreate the whole InnoDB installation."; *space_id = ULINT_UNDEFINED; } @@ -1319,86 +1407,9 @@ fil_assign_new_space_id( } /*******************************************************************//** -Frees a space object from the tablespace memory cache. Closes the files in -the chain but does not delete them. There must not be any pending i/o's or -flushes on the files. -@return TRUE if success */ -static -ibool -fil_space_free( -/*===========*/ - /* out: TRUE if success */ - ulint id, /* in: space id */ - ibool x_latched) /* in: TRUE if caller has space->latch - in X mode */ -{ - fil_space_t* space; - fil_space_t* fnamespace; - - ut_ad(mutex_own(&fil_system->mutex)); - - space = fil_space_get_by_id(id); - - if (!space) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: trying to remove tablespace %lu" - " from the cache but\n" - "InnoDB: it is not there.\n", (ulong) id); - - return(FALSE); - } - - HASH_DELETE(fil_space_t, hash, fil_system->spaces, id, space); - - fnamespace = fil_space_get_by_name(space->name); - ut_a(fnamespace); - ut_a(space == fnamespace); - - HASH_DELETE(fil_space_t, name_hash, fil_system->name_hash, - ut_fold_string(space->name), space); - - if (space->is_in_unflushed_spaces) { - - ut_ad(!fil_buffering_disabled(space)); - space->is_in_unflushed_spaces = false; - - UT_LIST_REMOVE(unflushed_spaces, fil_system->unflushed_spaces, - space); - } - - UT_LIST_REMOVE(space_list, fil_system->space_list, space); - - ut_a(space->magic_n == FIL_SPACE_MAGIC_N); - ut_a(0 == space->n_pending_flushes); - - for (fil_node_t* fil_node = UT_LIST_GET_FIRST(space->chain); - fil_node != NULL; - fil_node = UT_LIST_GET_FIRST(space->chain)) { - - fil_node_free(fil_node, fil_system, space); - } - - ut_a(0 == UT_LIST_GET_LEN(space->chain)); - - if (x_latched) { - rw_lock_x_unlock(&space->latch); - } - - rw_lock_free(&(space->latch)); - - fil_space_destroy_crypt_data(&(space->crypt_data)); - - mem_free(space->name); - mem_free(space); - - return(TRUE); -} - -/*******************************************************************//** -Returns a pointer to the file_space_t that is in the memory cache +Returns a pointer to the fil_space_t that is in the memory cache associated with a space id. The caller must lock fil_system->mutex. -@return file_space_t pointer, NULL if space not found */ +@return file_space_t pointer, NULL if space not found */ UNIV_INLINE fil_space_t* fil_space_get_space( @@ -1411,11 +1422,16 @@ fil_space_get_space( ut_ad(fil_system); space = fil_space_get_by_id(id); - if (space == NULL) { - return(NULL); + if (space == NULL || space->size != 0) { + return(space); } - if (space->size == 0 && space->purpose == FIL_TABLESPACE) { + switch (space->purpose) { + case FIL_TYPE_LOG: + break; + case FIL_TYPE_TEMPORARY: + case FIL_TYPE_TABLESPACE: + case FIL_TYPE_IMPORT: ut_a(id != 0); mutex_exit(&fil_system->mutex); @@ -1434,42 +1450,36 @@ fil_space_get_space( } /* The following code must change when InnoDB supports - multiple datafiles per tablespace. Note that there is small - change that space is found from tablespace list but - we have not yet created node for it and as we hold - fil_system mutex here fil_node_create can't continue. */ - ut_a(UT_LIST_GET_LEN(space->chain) == 1 || UT_LIST_GET_LEN(space->chain) == 0); + multiple datafiles per tablespace. */ + ut_a(1 == UT_LIST_GET_LEN(space->chain)); node = UT_LIST_GET_FIRST(space->chain); - if (node) { - /* It must be a single-table tablespace and we have not opened - the file yet; the following calls will open it and update the - size fields */ - - if (!fil_node_prepare_for_io(node, fil_system, space)) { - /* The single-table tablespace can't be opened, - because the ibd file is missing. */ - return(NULL); - } - fil_node_complete_io(node, fil_system, OS_FILE_READ); + /* It must be a single-table tablespace and we have not opened + the file yet; the following calls will open it and update the + size fields */ + + if (!fil_node_prepare_for_io(node, fil_system, space)) { + /* The single-table tablespace can't be opened, + because the ibd file is missing. */ + return(NULL); } + + fil_node_complete_io(node, fil_system, IORequestRead); } return(space); } -/*******************************************************************//** -Returns the path from the first fil_node_t found for the space ID sent. +/** Returns the path from the first fil_node_t found with this space ID. The caller is responsible for freeing the memory allocated here for the value returned. -@return own: A copy of fil_node_t::path, NULL if space ID is zero +@param[in] id Tablespace ID +@return own: A copy of fil_node_t::path, NULL if space ID is zero or not found. */ -UNIV_INTERN char* fil_space_get_first_path( -/*=====================*/ - ulint id) /*!< in: space id */ + ulint id) { fil_space_t* space; fil_node_t* node; @@ -1502,8 +1512,7 @@ fil_space_get_first_path( /*******************************************************************//** Returns the size of the space in pages. The tablespace must be cached in the memory cache. -@return space size, 0 if space not found */ -UNIV_INTERN +@return space size, 0 if space not found */ ulint fil_space_get_size( /*===============*/ @@ -1527,8 +1536,7 @@ fil_space_get_size( /*******************************************************************//** Returns the flags of the space. The tablespace must be cached in the memory cache. -@return flags, ULINT_UNDEFINED if space not found */ -UNIV_INTERN +@return flags, ULINT_UNDEFINED if space not found */ ulint fil_space_get_flags( /*================*/ @@ -1539,10 +1547,6 @@ fil_space_get_flags( ut_ad(fil_system); - if (!id) { - return(0); - } - mutex_enter(&fil_system->mutex); space = fil_space_get_space(id); @@ -1560,71 +1564,128 @@ fil_space_get_flags( return(flags); } -/*******************************************************************//** -Returns the compressed page size of the space, or 0 if the space -is not compressed. The tablespace must be cached in the memory cache. -@return compressed page size, ULINT_UNDEFINED if space not found */ -UNIV_INTERN -ulint -fil_space_get_zip_size( -/*===================*/ - ulint id) /*!< in: space id */ +/** Check if table is mark for truncate. +@param[in] id space id +@return true if tablespace is marked for truncate. */ +bool +fil_space_is_being_truncated( + ulint id) { - ulint flags; - - flags = fil_space_get_flags(id); + bool mark_for_truncate; + mutex_enter(&fil_system->mutex); + mark_for_truncate = fil_space_get_by_id(id)->is_being_truncated; + mutex_exit(&fil_system->mutex); + return(mark_for_truncate); +} - if (flags && flags != ULINT_UNDEFINED) { +/** Open each fil_node_t of a named fil_space_t if not already open. +@param[in] name Tablespace name +@return true if all nodes are open */ +bool +fil_space_open( + const char* name) +{ + ut_ad(fil_system != NULL); - return(fsp_flags_get_zip_size(flags)); - } + mutex_enter(&fil_system->mutex); - return(flags); -} + fil_space_t* space = fil_space_get_by_name(name); + fil_node_t* node; -/*******************************************************************//** -Checks if the pair space, page_no refers to an existing page in a tablespace -file space. The tablespace must be cached in the memory cache. -@return TRUE if the address is meaningful */ -UNIV_INTERN -ibool -fil_check_adress_in_tablespace( -/*===========================*/ - ulint id, /*!< in: space id */ - ulint page_no)/*!< in: page number */ -{ - if (fil_space_get_size(id) > page_no) { + for (node = UT_LIST_GET_FIRST(space->chain); + node != NULL; + node = UT_LIST_GET_NEXT(chain, node)) { - return(TRUE); + if (!node->is_open + && !fil_node_open_file(node)) { + mutex_exit(&fil_system->mutex); + return(false); + } } - return(FALSE); + mutex_exit(&fil_system->mutex); + + return(true); } -/****************************************************************//** -Initializes the tablespace memory cache. */ -UNIV_INTERN +/** Close each fil_node_t of a named fil_space_t if open. +@param[in] name Tablespace name */ void -fil_init( -/*=====*/ - ulint hash_size, /*!< in: hash table size */ - ulint max_n_open) /*!< in: max number of open files */ +fil_space_close( + const char* name) { - ut_a(fil_system == NULL); + if (fil_system == NULL) { + return; + } - ut_a(hash_size > 0); - ut_a(max_n_open > 0); + mutex_enter(&fil_system->mutex); - fil_system = static_cast( - mem_zalloc(sizeof(fil_system_t))); + fil_space_t* space = fil_space_get_by_name(name); + if (space == NULL) { + mutex_exit(&fil_system->mutex); + return; + } - mutex_create(fil_system_mutex_key, - &fil_system->mutex, SYNC_ANY_LATCH); + for (fil_node_t* node = UT_LIST_GET_FIRST(space->chain); + node != NULL; + node = UT_LIST_GET_NEXT(chain, node)) { + + if (node->is_open) { + fil_node_close_file(node); + } + } + + mutex_exit(&fil_system->mutex); +} + +/** Returns the page size of the space and whether it is compressed or not. +The tablespace must be cached in the memory cache. +@param[in] id space id +@param[out] found true if tablespace was found +@return page size */ +const page_size_t +fil_space_get_page_size( + ulint id, + bool* found) +{ + const ulint flags = fil_space_get_flags(id); + + if (flags == ULINT_UNDEFINED) { + *found = false; + return(univ_page_size); + } + + *found = true; + + return(page_size_t(flags)); +} + +/****************************************************************//** +Initializes the tablespace memory cache. */ +void +fil_init( +/*=====*/ + ulint hash_size, /*!< in: hash table size */ + ulint max_n_open) /*!< in: max number of open files */ +{ + ut_a(fil_system == NULL); + + ut_a(hash_size > 0); + ut_a(max_n_open > 0); + + fil_system = static_cast( + ut_zalloc_nokey(sizeof(*fil_system))); + + mutex_create(LATCH_ID_FIL_SYSTEM, &fil_system->mutex); fil_system->spaces = hash_create(hash_size); fil_system->name_hash = hash_create(hash_size); - UT_LIST_INIT(fil_system->LRU); + UT_LIST_INIT(fil_system->LRU, &fil_node_t::LRU); + UT_LIST_INIT(fil_system->space_list, &fil_space_t::space_list); + UT_LIST_INIT(fil_system->unflushed_spaces, + &fil_space_t::unflushed_spaces); + UT_LIST_INIT(fil_system->named_spaces, &fil_space_t::named_spaces); fil_system->max_n_open = max_n_open; @@ -1637,7 +1698,6 @@ database server shutdown. This should be called at a server startup after the space objects for the log and the system tablespace have been created. The purpose of this operation is to make sure we never run out of file descriptors if we need to read from the insert buffer or to write to the log. */ -UNIV_INTERN void fil_open_log_and_system_tablespace_files(void) /*==========================================*/ @@ -1661,9 +1721,8 @@ fil_open_log_and_system_tablespace_files(void) node != NULL; node = UT_LIST_GET_NEXT(chain, node)) { - if (!node->open) { - if (!fil_node_open_file(node, fil_system, - space)) { + if (!node->is_open) { + if (!fil_node_open_file(node)) { /* This func is called during server's startup. If some file of log or system tablespace is missing, the server @@ -1675,25 +1734,20 @@ fil_open_log_and_system_tablespace_files(void) if (fil_system->max_n_open < 10 + fil_system->n_open) { - fprintf(stderr, - "InnoDB: Warning: you must" - " raise the value of" - " innodb_open_files in\n" - "InnoDB: my.cnf! Remember that" - " InnoDB keeps all log files" - " and all system\n" - "InnoDB: tablespace files open" + ib::warn() << "You must raise the value of" + " innodb_open_files in my.cnf!" + " Remember that InnoDB keeps all" + " log files and all system" + " tablespace files open" " for the whole time mysqld is" - " running, and\n" - "InnoDB: needs to open also" + " running, and needs to open also" " some .ibd files if the" - " file-per-table storage\n" - "InnoDB: model is used." - " Current open files %lu," - " max allowed" - " open files %lu.\n", - (ulong) fil_system->n_open, - (ulong) fil_system->max_n_open); + " file-per-table storage model is used." + " Current open files " + << fil_system->n_open + << ", max allowed open files " + << fil_system->max_n_open + << "."; } } } @@ -1704,18 +1758,20 @@ fil_open_log_and_system_tablespace_files(void) /*******************************************************************//** Closes all open files. There must not be any pending i/o's or not flushed modifications in the files. */ -UNIV_INTERN void fil_close_all_files(void) /*=====================*/ { fil_space_t* space; - mutex_enter(&fil_system->mutex); + /* At shutdown, we should not have any files in this list. */ + ut_ad(srv_fast_shutdown == 2 + || UT_LIST_GET_LEN(fil_system->named_spaces) == 0); - space = UT_LIST_GET_FIRST(fil_system->space_list); + mutex_enter(&fil_system->mutex); - while (space != NULL) { + for (space = UT_LIST_GET_FIRST(fil_system->space_list); + space != NULL; ) { fil_node_t* node; fil_space_t* prev_space = space; @@ -1723,23 +1779,25 @@ fil_close_all_files(void) node != NULL; node = UT_LIST_GET_NEXT(chain, node)) { - if (node->open) { - fil_node_close_file(node, fil_system); + if (node->is_open) { + fil_node_close_file(node); } } space = UT_LIST_GET_NEXT(space_list, space); - - fil_space_free(prev_space->id, FALSE); + fil_space_detach(prev_space); + fil_space_free_low(prev_space); } mutex_exit(&fil_system->mutex); + + ut_ad(srv_fast_shutdown == 2 + || UT_LIST_GET_LEN(fil_system->named_spaces) == 0); } /*******************************************************************//** Closes the redo log files. There must not be any pending i/o's or not flushed modifications in the files. */ -UNIV_INTERN void fil_close_log_files( /*================*/ @@ -1755,24 +1813,28 @@ fil_close_log_files( fil_node_t* node; fil_space_t* prev_space = space; - if (space->purpose != FIL_LOG) { + if (space->purpose != FIL_TYPE_LOG) { space = UT_LIST_GET_NEXT(space_list, space); continue; } + /* Log files are not in the fil_system->named_spaces list. */ + ut_ad(space->max_lsn == 0); + for (node = UT_LIST_GET_FIRST(space->chain); node != NULL; node = UT_LIST_GET_NEXT(chain, node)) { - if (node->open) { - fil_node_close_file(node, fil_system); + if (node->is_open) { + fil_node_close_file(node); } } space = UT_LIST_GET_NEXT(space_list, space); if (free) { - fil_space_free(prev_space->id, FALSE); + fil_space_detach(prev_space); + fil_space_free_low(prev_space); } } @@ -1782,17 +1844,13 @@ fil_close_log_files( /*******************************************************************//** Sets the max tablespace id counter if the given number is bigger than the previous value. */ -UNIV_INTERN void fil_set_max_space_id_if_bigger( /*===========================*/ ulint max_id) /*!< in: maximum known id */ { if (max_id >= SRV_LOG_SPACE_FIRST_ID) { - fprintf(stderr, - "InnoDB: Fatal error: max tablespace id" - " is too high, %lu\n", (ulong) max_id); - ut_error; + ib::fatal() << "Max tablespace id is too high, " << max_id; } mutex_enter(&fil_system->mutex); @@ -1805,348 +1863,115 @@ fil_set_max_space_id_if_bigger( mutex_exit(&fil_system->mutex); } -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page header -of the first page of a data file of the system tablespace (space 0), -which is uncompressed. */ -static MY_ATTRIBUTE((warn_unused_result)) +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ dberr_t -fil_write_lsn_and_arch_no_to_file( -/*==============================*/ - ulint space, /*!< in: space to write to */ - ulint sum_of_sizes, /*!< in: combined size of previous files - in space, in database pages */ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no MY_ATTRIBUTE((unused))) - /*!< in: archived log number to write */ +fil_write_flushed_lsn( + lsn_t lsn) { byte* buf1; byte* buf; dberr_t err; - buf1 = static_cast(mem_alloc(2 * UNIV_PAGE_SIZE)); + buf1 = static_cast(ut_malloc_nokey(2 * UNIV_PAGE_SIZE)); buf = static_cast(ut_align(buf1, UNIV_PAGE_SIZE)); - err = fil_read(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); + const page_id_t page_id(TRX_SYS_SPACE, 0); + + err = fil_read(page_id, univ_page_size, 0, univ_page_size.physical(), + buf); + if (err == DB_SUCCESS) { - mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, - lsn); + mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, lsn); + + err = fil_write(page_id, univ_page_size, 0, + univ_page_size.physical(), buf); - err = fil_write(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); + fil_flush_file_spaces(FIL_TYPE_TABLESPACE); } - mem_free(buf1); + ut_free(buf1); return(err); } -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN -dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no) /*!< in: latest archived log file number */ +#ifndef UNIV_HOTBACKUP +/** Acquire a tablespace when it could be dropped concurrently. +Used by background threads that do not necessarily hold proper locks +for concurrency control. +@param[in] id tablespace ID +@param[in] silent whether to silently ignore missing tablespaces +@return the tablespace, or NULL if missing or being deleted */ +inline +fil_space_t* +fil_space_acquire_low( + ulint id, + bool silent) { fil_space_t* space; - fil_node_t* node; - dberr_t err; mutex_enter(&fil_system->mutex); - for (space = UT_LIST_GET_FIRST(fil_system->space_list); - space != NULL; - space = UT_LIST_GET_NEXT(space_list, space)) { - - /* We only write the lsn to all existing data files which have - been open during the lifetime of the mysqld process; they are - represented by the space objects in the tablespace memory - cache. Note that all data files in the system tablespace 0 - and the UNDO log tablespaces (if separate) are always open. */ - - if (space->purpose == FIL_TABLESPACE - && !fil_is_user_tablespace_id(space->id)) { - ulint sum_of_sizes = 0; - - for (node = UT_LIST_GET_FIRST(space->chain); - node != NULL; - node = UT_LIST_GET_NEXT(chain, node)) { - - mutex_exit(&fil_system->mutex); - - err = fil_write_lsn_and_arch_no_to_file( - space->id, sum_of_sizes, lsn, - arch_log_no); - - if (err != DB_SUCCESS) { - - return(err); - } - - mutex_enter(&fil_system->mutex); + space = fil_space_get_by_id(id); - sum_of_sizes += node->size; - } + if (space == NULL) { + if (!silent) { + ib::warn() << "Trying to access missing" + " tablespace " << id; } + } else if (space->stop_new_ops || space->is_being_truncated) { + space = NULL; + } else { + space->n_pending_ops++; } mutex_exit(&fil_system->mutex); - return(DB_SUCCESS); -} - -/*******************************************************************//** -Checks the consistency of the first data page of a tablespace -at database startup. -@retval NULL on success, or if innodb_force_recovery is set -@return pointer to an error message string */ -static MY_ATTRIBUTE((warn_unused_result)) -const char* -fil_check_first_page( -/*=================*/ - const page_t* page) /*!< in: data page */ -{ - ulint space_id; - ulint flags; - - if (srv_force_recovery >= SRV_FORCE_IGNORE_CORRUPT) { - return(NULL); - } - - space_id = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SPACE_ID + page); - flags = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + page); - - if (UNIV_PAGE_SIZE != fsp_flags_get_page_size(flags)) { - fprintf(stderr, - "InnoDB: Error: Current page size %lu != " - " page size on page %lu\n", - UNIV_PAGE_SIZE, fsp_flags_get_page_size(flags)); - - return("innodb-page-size mismatch"); - } - - if (!space_id && !flags) { - ulint nonzero_bytes = UNIV_PAGE_SIZE; - const byte* b = page; - - while (!*b && --nonzero_bytes) { - b++; - } - - if (!nonzero_bytes) { - return("space header page consists of zero bytes"); - } - } - - if (buf_page_is_corrupted( - false, page, fsp_flags_get_zip_size(flags))) { - return("checksum mismatch"); - } - - if (page_get_space_id(page) == space_id - && page_get_page_no(page) == 0) { - return(NULL); - } - - return("inconsistent data in space header"); + return(space); } -/*******************************************************************//** -Reads the flushed lsn, arch no, space_id and tablespace flag fields from -the first page of a data file at database startup. -@retval NULL on success, or if innodb_force_recovery is set -@return pointer to an error message string */ -UNIV_INTERN -const char* -fil_read_first_page( -/*================*/ - os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: tablespace flags */ - ulint* space_id, /*!< out: tablespace ID */ -#ifdef UNIV_LOG_ARCHIVE - ulint* min_arch_log_no, /*!< out: min of archived - log numbers in data files */ - ulint* max_arch_log_no, /*!< out: max of archived - log numbers in data files */ -#endif /* UNIV_LOG_ARCHIVE */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*< out: crypt data */ +/** Acquire a tablespace when it could be dropped concurrently. +Used by background threads that do not necessarily hold proper locks +for concurrency control. +@param[in] id tablespace ID +@return the tablespace, or NULL if missing or being deleted */ +fil_space_t* +fil_space_acquire( + ulint id) { - byte* buf; - byte* page; - lsn_t flushed_lsn; - const char* check_msg = NULL; - fil_space_crypt_t* cdata; - - - buf = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); - - /* Align the memory for a possible read from a raw device */ - - page = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); - - os_file_read(data_file, page, 0, UNIV_PAGE_SIZE); - - /* The FSP_HEADER on page 0 is only valid for the first file - in a tablespace. So if this is not the first datafile, leave - *flags and *space_id as they were read from the first file and - do not validate the first page. */ - if (!one_read_already) { - *flags = fsp_header_get_flags(page); - *space_id = fsp_header_get_space_id(page); - } - - if (!one_read_already) { - check_msg = fil_check_first_page(page); - } - - flushed_lsn = mach_read_from_8(page + - FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); - - ulint space = fsp_header_get_space_id(page); - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(*flags), NULL); - cdata = fil_space_read_crypt_data(space, page, offset); - - if (crypt_data) { - *crypt_data = cdata; - } - - /* If file space is encrypted we need to have at least some - encryption service available where to get keys */ - if ((cdata && cdata->encryption == FIL_SPACE_ENCRYPTION_ON) || - (srv_encrypt_tables && - cdata && cdata->encryption == FIL_SPACE_ENCRYPTION_DEFAULT)) { - - if (!encryption_key_id_exists(cdata->key_id)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Tablespace id %ld is encrypted but encryption service" - " or used key_id %u is not available. Can't continue opening tablespace.", - space, cdata->key_id); - - return ("table encrypted but encryption service not available."); - - } - } - - ut_free(buf); - - if (check_msg) { - return(check_msg); - } - - if (!one_read_already) { - *min_flushed_lsn = flushed_lsn; - *max_flushed_lsn = flushed_lsn; -#ifdef UNIV_LOG_ARCHIVE - *min_arch_log_no = arch_log_no; - *max_arch_log_no = arch_log_no; -#endif /* UNIV_LOG_ARCHIVE */ - return(NULL); - } - - if (*min_flushed_lsn > flushed_lsn) { - *min_flushed_lsn = flushed_lsn; - } - if (*max_flushed_lsn < flushed_lsn) { - *max_flushed_lsn = flushed_lsn; - } -#ifdef UNIV_LOG_ARCHIVE - if (*min_arch_log_no > arch_log_no) { - *min_arch_log_no = arch_log_no; - } - if (*max_arch_log_no < arch_log_no) { - *max_arch_log_no = arch_log_no; - } -#endif /* UNIV_LOG_ARCHIVE */ - - return(NULL); + return(fil_space_acquire_low(id, false)); } -/*================ SINGLE-TABLE TABLESPACES ==========================*/ - -#ifndef UNIV_HOTBACKUP -/*******************************************************************//** -Increments the count of pending operation, if space is not being deleted. -@return TRUE if being deleted, and operation should be skipped */ -UNIV_INTERN -ibool -fil_inc_pending_ops( -/*================*/ - ulint id, /*!< in: space id */ - ibool print_err) /*!< in: need to print error or not */ +/** Acquire a tablespace that may not exist. +Used by background threads that do not necessarily hold proper locks +for concurrency control. +@param[in] id tablespace ID +@return the tablespace, or NULL if missing or being deleted */ +fil_space_t* +fil_space_acquire_silent( + ulint id) { - fil_space_t* space; - - mutex_enter(&fil_system->mutex); - - space = fil_space_get_by_id(id); - - if (space == NULL) { - if (print_err) { - fprintf(stderr, - "InnoDB: Error: trying to do an operation on a" - " dropped tablespace %lu\n", - (ulong) id); - } - } - - if (space == NULL || space->stop_new_ops) { - mutex_exit(&fil_system->mutex); - - return(TRUE); - } - - space->n_pending_ops++; - - mutex_exit(&fil_system->mutex); - - return(FALSE); + return(fil_space_acquire_low(id, true)); } -/*******************************************************************//** -Decrements the count of pending operations. */ -UNIV_INTERN +/** Release a tablespace acquired with fil_space_acquire(). +@param[in,out] space tablespace to release */ void -fil_decr_pending_ops( -/*=================*/ - ulint id) /*!< in: space id */ +fil_space_release( + fil_space_t* space) { - fil_space_t* space; - mutex_enter(&fil_system->mutex); - - space = fil_space_get_by_id(id); - - if (space == NULL) { - fprintf(stderr, - "InnoDB: Error: decrementing pending operation" - " of a dropped tablespace %lu\n", - (ulong) id); - } - - if (space != NULL) { - space->n_pending_ops--; - } - + ut_ad(space->magic_n == FIL_SPACE_MAGIC_N); + ut_ad(space->n_pending_ops > 0); + space->n_pending_ops--; mutex_exit(&fil_system->mutex); } #endif /* !UNIV_HOTBACKUP */ /********************************************************//** Creates the database directory for a table if it does not exist yet. */ -static void fil_create_directory_for_tablename( /*===============================*/ @@ -2160,305 +1985,522 @@ fil_create_directory_for_tablename( len = strlen(fil_path_to_mysql_datadir); namend = strchr(name, '/'); ut_a(namend); - path = static_cast(mem_alloc(len + (namend - name) + 2)); + path = static_cast(ut_malloc_nokey(len + (namend - name) + 2)); memcpy(path, fil_path_to_mysql_datadir, len); path[len] = '/'; memcpy(path + len + 1, name, namend - name); path[len + (namend - name) + 1] = 0; - srv_normalize_path_for_win(path); + os_normalize_path_for_win(path); - ut_a(os_file_create_directory(path, FALSE)); - mem_free(path); + bool success = os_file_create_directory(path, false); + ut_a(success); + + ut_free(path); } #ifndef UNIV_HOTBACKUP -/********************************************************//** -Writes a log record about an .ibd file create/rename/delete. */ +/** Write a log record about an operation on a tablespace file. +@param[in] type MLOG_FILE_NAME or MLOG_FILE_DELETE +or MLOG_FILE_CREATE2 or MLOG_FILE_RENAME2 +@param[in] space_id tablespace identifier +@param[in] first_page_no first page number in the file +@param[in] path file path +@param[in] new_path if type is MLOG_FILE_RENAME2, the new name +@param[in] flags if type is MLOG_FILE_CREATE2, the space flags +@param[in,out] mtr mini-transaction */ static void fil_op_write_log( -/*=============*/ - ulint type, /*!< in: MLOG_FILE_CREATE, - MLOG_FILE_CREATE2, - MLOG_FILE_DELETE, or - MLOG_FILE_RENAME */ - ulint space_id, /*!< in: space id */ - ulint log_flags, /*!< in: redo log flags (stored - in the page number field) */ - ulint flags, /*!< in: compressed page size - and file format - if type==MLOG_FILE_CREATE2, or 0 */ - const char* name, /*!< in: table name in the familiar - 'databasename/tablename' format, or - the file path in the case of - MLOG_FILE_DELETE */ - const char* new_name, /*!< in: if type is MLOG_FILE_RENAME, - the new table name in the - 'databasename/tablename' format */ - mtr_t* mtr) /*!< in: mini-transaction handle */ -{ - byte* log_ptr; - ulint len; - - log_ptr = mlog_open(mtr, 11 + 2 + 1); - - if (!log_ptr) { + mlog_id_t type, + ulint space_id, + ulint first_page_no, + const char* path, + const char* new_path, + ulint flags, + mtr_t* mtr) +{ + byte* log_ptr; + ulint len; + + ut_ad(first_page_no == 0); + + /* fil_name_parse() requires that there be at least one path + separator and that the file path end with ".ibd". */ + ut_ad(strchr(path, OS_PATH_SEPARATOR) != NULL); + ut_ad(strcmp(&path[strlen(path) - strlen(DOT_IBD)], DOT_IBD) == 0); + + log_ptr = mlog_open(mtr, 11 + 4 + 2 + 1); + + if (log_ptr == NULL) { /* Logging in mtr is switched off during crash recovery: in that case mlog_open returns NULL */ return; } - log_ptr = mlog_write_initial_log_record_for_file_op( - type, space_id, log_flags, log_ptr, mtr); + log_ptr = mlog_write_initial_log_record_low( + type, space_id, first_page_no, log_ptr, mtr); + if (type == MLOG_FILE_CREATE2) { mach_write_to_4(log_ptr, flags); log_ptr += 4; } + /* Let us store the strings as null-terminated for easier readability and handling */ - len = strlen(name) + 1; + len = strlen(path) + 1; mach_write_to_2(log_ptr, len); log_ptr += 2; mlog_close(mtr, log_ptr); - mlog_catenate_string(mtr, (byte*) name, len); + mlog_catenate_string( + mtr, reinterpret_cast(path), len); - if (type == MLOG_FILE_RENAME) { - len = strlen(new_name) + 1; + switch (type) { + case MLOG_FILE_RENAME2: + ut_ad(strchr(new_path, OS_PATH_SEPARATOR) != NULL); + len = strlen(new_path) + 1; log_ptr = mlog_open(mtr, 2 + len); ut_a(log_ptr); mach_write_to_2(log_ptr, len); log_ptr += 2; mlog_close(mtr, log_ptr); - mlog_catenate_string(mtr, (byte*) new_name, len); + mlog_catenate_string( + mtr, reinterpret_cast(new_path), len); + break; + case MLOG_FILE_NAME: + case MLOG_FILE_DELETE: + case MLOG_FILE_CREATE2: + break; + default: + ut_ad(0); } } -#endif -/*******************************************************************//** -Parses the body of a log record written about an .ibd file operation. That is, -the log record part after the standard (type, space id, page no) header of the -log record. +/** Write redo log for renaming a file. +@param[in] space_id tablespace id +@param[in] first_page_no first page number in the file +@param[in] old_name tablespace file name +@param[in] new_name tablespace file name after renaming +@param[in,out] mtr mini-transaction */ +static +void +fil_name_write_rename( + ulint space_id, + ulint first_page_no, + const char* old_name, + const char* new_name, + mtr_t* mtr) +{ + ut_ad(!is_predefined_tablespace(space_id)); -If desired, also replays the delete or rename operation if the .ibd file -exists and the space id in it matches. Replays the create operation if a file -at that path does not exist yet. If the database directory for the file to be -created does not exist, then we create the directory, too. + fil_op_write_log( + MLOG_FILE_RENAME2, + space_id, first_page_no, old_name, new_name, 0, mtr); +} -Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to -the datadir that we should use in replaying the file operations. +/** Write MLOG_FILE_NAME for a file. +@param[in] space_id tablespace id +@param[in] first_page_no first page number in the file +@param[in] name tablespace file name +@param[in,out] mtr mini-transaction */ +static +void +fil_name_write( + ulint space_id, + ulint first_page_no, + const char* name, + mtr_t* mtr) +{ + fil_op_write_log( + MLOG_FILE_NAME, space_id, first_page_no, name, NULL, 0, mtr); +} -InnoDB recovery does not replay these fully since it always sets the space id -to zero. But mysqlbackup does replay them. TODO: If remote tablespaces are -used, mysqlbackup will only create tables in the default directory since -MLOG_FILE_CREATE and MLOG_FILE_CREATE2 only know the tablename, not the path. +/** Write MLOG_FILE_NAME for a file. +@param[in] space tablespace +@param[in] first_page_no first page number in the file +@param[in] file tablespace file +@param[in,out] mtr mini-transaction */ +static +void +fil_name_write( + const fil_space_t* space, + ulint first_page_no, + const fil_node_t* file, + mtr_t* mtr) +{ + fil_name_write(space->id, first_page_no, file->name, mtr); +} +#endif -@return end of log record, or NULL if the record was not completely -contained between ptr and end_ptr */ -UNIV_INTERN -byte* -fil_op_log_parse_or_replay( -/*=======================*/ - byte* ptr, /*!< in: buffer containing the log record body, - or an initial segment of it, if the record does - not fir completely between ptr and end_ptr */ - byte* end_ptr, /*!< in: buffer end */ - ulint type, /*!< in: the type of this log record */ - ulint space_id, /*!< in: the space id of the tablespace in - question, or 0 if the log record should - only be parsed but not replayed */ - ulint log_flags) /*!< in: redo log flags - (stored in the page number parameter) */ -{ - ulint name_len; - ulint new_name_len; - const char* name; - const char* new_name = NULL; - ulint flags = 0; +/********************************************************//** +Recreates table indexes by applying +TRUNCATE log record during recovery. +@return DB_SUCCESS or error code */ +dberr_t +fil_recreate_table( +/*===============*/ + ulint space_id, /*!< in: space id */ + ulint format_flags, /*!< in: page format */ + ulint flags, /*!< in: tablespace flags */ + const char* name, /*!< in: table name */ + truncate_t& truncate) /*!< in: The information of + TRUNCATE log record */ +{ + dberr_t err = DB_SUCCESS; + bool found; + const page_size_t page_size(fil_space_get_page_size(space_id, + &found)); + + if (!found) { + ib::info() << "Missing .ibd file for table '" << name + << "' with tablespace " << space_id; + return(DB_ERROR); + } - if (type == MLOG_FILE_CREATE2) { - if (end_ptr < ptr + 4) { + ut_ad(!truncate_t::s_fix_up_active); + truncate_t::s_fix_up_active = true; - return(NULL); - } + /* Step-1: Scan for active indexes from REDO logs and drop + all the indexes using low level function that take root_page_no + and space-id. */ + truncate.drop_indexes(space_id); - flags = mach_read_from_4(ptr); - ptr += 4; + /* Step-2: Scan for active indexes and re-create them. */ + err = truncate.create_indexes( + name, space_id, page_size, flags, format_flags); + if (err != DB_SUCCESS) { + ib::info() << "Failed to create indexes for the table '" + << name << "' with tablespace " << space_id + << " while fixing up truncate action"; + return(err); } - if (end_ptr < ptr + 2) { + truncate_t::s_fix_up_active = false; - return(NULL); - } + return(err); +} - name_len = mach_read_from_2(ptr); +/********************************************************//** +Recreates the tablespace and table indexes by applying +TRUNCATE log record during recovery. +@return DB_SUCCESS or error code */ +dberr_t +fil_recreate_tablespace( +/*====================*/ + ulint space_id, /*!< in: space id */ + ulint format_flags, /*!< in: page format */ + ulint flags, /*!< in: tablespace flags */ + const char* name, /*!< in: table name */ + truncate_t& truncate, /*!< in: The information of + TRUNCATE log record */ + lsn_t recv_lsn) /*!< in: the end LSN of + the log record */ +{ + dberr_t err = DB_SUCCESS; + mtr_t mtr; - ptr += 2; + ut_ad(!truncate_t::s_fix_up_active); + truncate_t::s_fix_up_active = true; - if (end_ptr < ptr + name_len) { + /* Step-1: Invalidate buffer pool pages belonging to the tablespace + to re-create. */ + buf_LRU_flush_or_remove_pages(space_id, BUF_REMOVE_ALL_NO_WRITE, 0); - return(NULL); + /* Remove all insert buffer entries for the tablespace */ + ibuf_delete_for_discarded_space(space_id); + + /* Step-2: truncate tablespace (reset the size back to original or + default size) of tablespace. */ + err = truncate.truncate( + space_id, truncate.get_dir_path(), name, flags, true); + + if (err != DB_SUCCESS) { + + ib::info() << "Cannot access .ibd file for table '" + << name << "' with tablespace " << space_id + << " while truncating"; + return(DB_ERROR); } - name = (const char*) ptr; + bool found; + const page_size_t& page_size = + fil_space_get_page_size(space_id, &found); - ptr += name_len; + if (!found) { + ib::info() << "Missing .ibd file for table '" << name + << "' with tablespace " << space_id; + return(DB_ERROR); + } - if (type == MLOG_FILE_RENAME) { - if (end_ptr < ptr + 2) { + /* Step-3: Initialize Header. */ + if (page_size.is_compressed()) { + byte* buf; + page_t* page; - return(NULL); - } + buf = static_cast(ut_zalloc_nokey(3 * UNIV_PAGE_SIZE)); - new_name_len = mach_read_from_2(ptr); + /* Align the memory for file i/o */ + page = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); - ptr += 2; + flags = fsp_flags_set_page_size(flags, univ_page_size); - if (end_ptr < ptr + new_name_len) { + fsp_header_init_fields(page, space_id, flags); - return(NULL); - } + mach_write_to_4( + page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id); - new_name = (const char*) ptr; + page_zip_des_t page_zip; + page_zip_set_size(&page_zip, page_size.physical()); + page_zip.data = page + UNIV_PAGE_SIZE; - ptr += new_name_len; - } +#ifdef UNIV_DEBUG + page_zip.m_start = +#endif /* UNIV_DEBUG */ + page_zip.m_end = page_zip.m_nonempty = page_zip.n_blobs = 0; + buf_flush_init_for_writing( + NULL, page, &page_zip, 0, + fsp_is_checksum_disabled(space_id)); - /* We managed to parse a full log record body */ - /* - printf("Parsed log rec of type %lu space %lu\n" - "name %s\n", type, space_id, name); + err = fil_write(page_id_t(space_id, 0), page_size, 0, + page_size.physical(), page_zip.data); - if (type == MLOG_FILE_RENAME) { - printf("new name %s\n", new_name); + ut_free(buf); + + if (err != DB_SUCCESS) { + ib::info() << "Failed to clean header of the" + " table '" << name << "' with tablespace " + << space_id; + return(err); + } } - */ - if (!space_id) { - return(ptr); + + mtr_start(&mtr); + /* Don't log the operation while fixing up table truncate operation + as crash at this level can still be sustained with recovery restarting + from last checkpoint. */ + mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); + + /* Initialize the first extent descriptor page and + the second bitmap page for the new tablespace. */ + fsp_header_init(space_id, FIL_IBD_FILE_INITIAL_SIZE, &mtr); + mtr_commit(&mtr); + + /* Step-4: Re-Create Indexes to newly re-created tablespace. + This operation will restore tablespace back to what it was + when it was created during CREATE TABLE. */ + err = truncate.create_indexes( + name, space_id, page_size, flags, format_flags); + if (err != DB_SUCCESS) { + return(err); } - /* Let us try to perform the file operation, if sensible. Note that - mysqlbackup has at this stage already read in all space id info to the - fil0fil.cc data structures. + /* Step-5: Write new created pages into ibd file handle and + flush it to disk for the tablespace, in case i/o-handler thread + deletes the bitmap page from buffer. */ + mtr_start(&mtr); - NOTE that our algorithm is not guaranteed to work correctly if there - were renames of tables during the backup. See mysqlbackup code for more - on the problem. */ + mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); - switch (type) { - case MLOG_FILE_DELETE: - if (fil_tablespace_exists_in_mem(space_id)) { - dberr_t err = fil_delete_tablespace( - space_id, BUF_REMOVE_FLUSH_NO_WRITE); - ut_a(err == DB_SUCCESS); - } + mutex_enter(&fil_system->mutex); - break; + fil_space_t* space = fil_space_get_by_id(space_id); - case MLOG_FILE_RENAME: - /* In order to replay the rename, the following must hold: - * The new name is not already used. - * A tablespace is open in memory with the old name. - * The space ID for that tablepace matches this log entry. - This will prevent unintended renames during recovery. */ - - if (fil_get_space_id_for_table(new_name) == ULINT_UNDEFINED - && space_id == fil_get_space_id_for_table(name)) { - /* Create the database directory for the new name, if - it does not exist yet */ - fil_create_directory_for_tablename(new_name); - - if (!fil_rename_tablespace(name, space_id, - new_name, NULL)) { - ut_error; - } - } + mutex_exit(&fil_system->mutex); - break; + fil_node_t* node = UT_LIST_GET_FIRST(space->chain); - case MLOG_FILE_CREATE: - case MLOG_FILE_CREATE2: - if (fil_tablespace_exists_in_mem(space_id)) { - /* Do nothing */ - } else if (fil_get_space_id_for_table(name) - != ULINT_UNDEFINED) { - /* Do nothing */ - } else if (log_flags & MLOG_FILE_FLAG_TEMP) { - /* Temporary table, do nothing */ + for (ulint page_no = 0; page_no < node->size; ++page_no) { + + const page_id_t cur_page_id(space_id, page_no); + + buf_block_t* block = buf_page_get(cur_page_id, page_size, + RW_X_LATCH, &mtr); + + byte* page = buf_block_get_frame(block); + + if (!fsp_flags_is_compressed(flags)) { + + ut_ad(!page_size.is_compressed()); + + buf_flush_init_for_writing( + block, page, NULL, recv_lsn, + fsp_is_checksum_disabled(space_id)); + + err = fil_write(cur_page_id, page_size, 0, + page_size.physical(), page); } else { - const char* path = NULL; - - /* Create the database directory for name, if it does - not exist yet */ - fil_create_directory_for_tablename(name); - - if (fil_create_new_single_table_tablespace( - space_id, name, path, flags, - DICT_TF2_USE_TABLESPACE, - FIL_IBD_FILE_INITIAL_SIZE, - FIL_SPACE_ENCRYPTION_DEFAULT, - FIL_DEFAULT_ENCRYPTION_KEY) != DB_SUCCESS) { - ut_error; + ut_ad(page_size.is_compressed()); + + /* We don't want to rewrite empty pages. */ + + if (fil_page_get_type(page) != 0) { + page_zip_des_t* page_zip = + buf_block_get_page_zip(block); + + buf_flush_init_for_writing( + block, page, page_zip, recv_lsn, + fsp_is_checksum_disabled(space_id)); + + err = fil_write(cur_page_id, page_size, 0, + page_size.physical(), + page_zip->data); + } else { +#ifdef UNIV_DEBUG + const byte* data = block->page.zip.data; + + /* Make sure that the page is really empty */ + for (ulint i = 0; + i < page_size.physical(); + ++i) { + + ut_a(data[i] == 0); + } +#endif /* UNIV_DEBUG */ } } - break; - - default: - ut_error; + if (err != DB_SUCCESS) { + ib::info() << "Cannot write page " << page_no + << " into a .ibd file for table '" + << name << "' with tablespace " << space_id; + } } - return(ptr); + mtr_commit(&mtr); + + truncate_t::s_fix_up_active = false; + + return(err); } -/*******************************************************************//** -Allocates a file name for the EXPORT/IMPORT config file name. The -string must be freed by caller with mem_free(). -@return own: file name */ -static -char* -fil_make_cfg_name( -/*==============*/ - const char* filepath) /*!< in: .ibd file name */ +/** Replay a file rename operation if possible. +@param[in] space_id tablespace identifier +@param[in] first_page_no first page number in the file +@param[in] name old file name +@param[in] new_name new file name +@return whether the operation was successfully applied +(the name did not exist, or new_name did not exist and +name was successfully renamed to new_name) */ +bool +fil_op_replay_rename( + ulint space_id, + ulint first_page_no, + const char* name, + const char* new_name) { - char* cfg_name; +#ifdef UNIV_HOTBACKUP + ut_ad(recv_replay_file_ops); +#endif /* UNIV_HOTBACKUP */ + ut_ad(first_page_no == 0); + + /* In order to replay the rename, the following must hold: + * The new name is not already used. + * A tablespace exists with the old name. + * The space ID for that tablepace matches this log entry. + This will prevent unintended renames during recovery. */ + fil_space_t* space = fil_space_get(space_id); + + if (space == NULL) { + return(true); + } + + const bool name_match + = strcmp(name, UT_LIST_GET_FIRST(space->chain)->name) == 0; + + if (!name_match) { + return(true); + } + + /* Create the database directory for the new name, if + it does not exist yet */ - /* Create a temporary file path by replacing the .ibd suffix - with .cfg. */ + const char* namend = strrchr(new_name, OS_PATH_SEPARATOR); + ut_a(namend != NULL); - ut_ad(strlen(filepath) > 4); + char* dir = static_cast( + ut_malloc_nokey(namend - new_name + 1)); - cfg_name = mem_strdup(filepath); - ut_snprintf(cfg_name + strlen(cfg_name) - 3, 4, "cfg"); - return(cfg_name); + memcpy(dir, new_name, namend - new_name); + dir[namend - new_name] = '\0'; + + bool success = os_file_create_directory(dir, false); + ut_a(success); + + ulint dirlen = 0; + + if (const char* dirend = strrchr(dir, OS_PATH_SEPARATOR)) { + dirlen = dirend - dir + 1; + } + + ut_free(dir); + + /* New path must not exist. */ + bool exists; + os_file_type_t ftype; + + if (!os_file_status(new_name, &exists, &ftype) + || exists) { + ib::error() << "Cannot replay rename '" << name + << "' to '" << new_name << "'" + " for space ID " << space_id + << " because the target file exists." + " Remove either file and try again."; + return(false); + } + + char* new_table = mem_strdupl( + new_name + dirlen, + strlen(new_name + dirlen) + - 4 /* remove ".ibd" */); + + ut_ad(new_table[namend - new_name - dirlen] + == OS_PATH_SEPARATOR); +#if OS_PATH_SEPARATOR != '/' + new_table[namend - new_name - dirlen] = '/'; +#endif + + if (!fil_rename_tablespace( + space_id, name, new_table, new_name)) { + ut_error; + } + + ut_free(new_table); + return(true); } -/*******************************************************************//** -Check for change buffer merges. -@return 0 if no merges else count + 1. */ +/** File operations for tablespace */ +enum fil_operation_t { + FIL_OPERATION_DELETE, /*!< delete a single-table tablespace */ + FIL_OPERATION_CLOSE, /*!< close a single-table tablespace */ + FIL_OPERATION_TRUNCATE /*!< truncate a single-table tablespace */ +}; + +/** Check for pending operations. +@param[in] space tablespace +@param[in] count number of attempts so far +@return 0 if no operations else count + 1. */ static ulint -fil_ibuf_check_pending_ops( -/*=======================*/ - fil_space_t* space, /*!< in/out: Tablespace to check */ - ulint count) /*!< in: number of attempts so far */ +fil_check_pending_ops( + fil_space_t* space, + ulint count) { ut_ad(mutex_own(&fil_system->mutex)); - if (space != 0 && space->n_pending_ops != 0) { + const ulint n_pending_ops = space ? space->n_pending_ops : 0; + + if (n_pending_ops) { if (count > 5000) { - ib_logf(IB_LOG_LEVEL_WARN, - "Trying to close/delete tablespace " - "'%s' but there are %lu pending change " - "buffer merges on it.", - space->name, - (ulong) space->n_pending_ops); + ib::warn() << "Trying to close/delete/truncate" + " tablespace '" << space->name + << "' but there are " << n_pending_ops + << " pending operations on it."; } return(count + 1); @@ -2474,13 +2516,23 @@ static ulint fil_check_pending_io( /*=================*/ - fil_space_t* space, /*!< in/out: Tablespace to check */ - fil_node_t** node, /*!< out: Node in space list */ - ulint count) /*!< in: number of attempts so far */ + fil_operation_t operation, /*!< in: File operation */ + fil_space_t* space, /*!< in/out: Tablespace to check */ + fil_node_t** node, /*!< out: Node in space list */ + ulint count) /*!< in: number of attempts so far */ { ut_ad(mutex_own(&fil_system->mutex)); ut_a(space->n_pending_ops == 0); + switch (operation) { + case FIL_OPERATION_DELETE: + case FIL_OPERATION_CLOSE: + break; + case FIL_OPERATION_TRUNCATE: + space->is_being_truncated = true; + break; + } + /* The following code must change when InnoDB supports multiple datafiles per tablespace. */ ut_a(UT_LIST_GET_LEN(space->chain) == 1); @@ -2492,13 +2544,12 @@ fil_check_pending_io( ut_a(!(*node)->being_extended); if (count > 1000) { - ib_logf(IB_LOG_LEVEL_WARN, - "Trying to close/delete tablespace '%s' " - "but there are %lu flushes " - " and %lu pending i/o's on it.", - space->name, - (ulong) space->n_pending_flushes, - (ulong) (*node)->n_pending); + ib::warn() << "Trying to delete/close/truncate" + " tablespace '" << space->name + << "' but there are " + << space->n_pending_flushes + << " flushes and " << (*node)->n_pending + << " pending i/o's on it."; } return(count + 1); @@ -2514,35 +2565,34 @@ static dberr_t fil_check_pending_operations( /*=========================*/ - ulint id, /*!< in: space id */ - fil_space_t** space, /*!< out: tablespace instance in memory */ - char** path) /*!< out/own: tablespace path */ + ulint id, /*!< in: space id */ + fil_operation_t operation, /*!< in: File operation */ + fil_space_t** space, /*!< out: tablespace instance + in memory */ + char** path) /*!< out/own: tablespace path */ { ulint count = 0; - ut_a(id != TRX_SYS_SPACE); + ut_a(!is_system_tablespace(id)); ut_ad(space); *space = 0; - /* Wait for crypt threads to stop accessing space */ - fil_space_crypt_close_tablespace(id); - mutex_enter(&fil_system->mutex); fil_space_t* sp = fil_space_get_by_id(id); if (sp) { - sp->stop_new_ops = TRUE; + sp->stop_new_ops = true; } mutex_exit(&fil_system->mutex); - /* Check for pending change buffer merges. */ + /* Check for pending operations. */ do { mutex_enter(&fil_system->mutex); sp = fil_space_get_by_id(id); - count = fil_ibuf_check_pending_ops(sp, count); + count = fil_check_pending_ops(sp, count); mutex_exit(&fil_system->mutex); @@ -2568,7 +2618,7 @@ fil_check_pending_operations( fil_node_t* node; - count = fil_check_pending_io(sp, &node, count); + count = fil_check_pending_io(operation, sp, &node, count); if (count == 0) { *path = mem_strdup(node->name); @@ -2591,8 +2641,7 @@ fil_check_pending_operations( /*******************************************************************//** Closes a single-table tablespace. The tablespace must be cached in the memory cache. Free all pages used by the tablespace. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ dberr_t fil_close_tablespace( /*=================*/ @@ -2601,10 +2650,12 @@ fil_close_tablespace( { char* path = 0; fil_space_t* space = 0; + dberr_t err; - ut_a(id != TRX_SYS_SPACE); + ut_a(!is_system_tablespace(id)); - dberr_t err = fil_check_pending_operations(id, &space, &path); + err = fil_check_pending_operations(id, FIL_OPERATION_CLOSE, + &space, &path); if (err != DB_SUCCESS) { return(err); @@ -2615,69 +2666,64 @@ fil_close_tablespace( rw_lock_x_lock(&space->latch); -#ifndef UNIV_HOTBACKUP /* Invalidate in the buffer pool all pages belonging to the - tablespace. Since we have set space->stop_new_ops = TRUE, readahead + tablespace. Since we have set space->stop_new_ops = true, readahead or ibuf merge can no longer read more pages of this tablespace to the buffer pool. Thus we can clean the tablespace out of the buffer pool completely and permanently. The flag stop_new_ops also prevents fil_flush() from being applied to this tablespace. */ buf_LRU_flush_or_remove_pages(id, BUF_REMOVE_FLUSH_WRITE, trx); -#endif - mutex_enter(&fil_system->mutex); /* If the free is successful, the X lock will be released before the space memory data structure is freed. */ - if (!fil_space_free(id, TRUE)) { + if (!fil_space_free(id, true)) { rw_lock_x_unlock(&space->latch); err = DB_TABLESPACE_NOT_FOUND; } else { err = DB_SUCCESS; } - mutex_exit(&fil_system->mutex); - /* If it is a delete then also delete any generated files, otherwise when we drop the database the remove directory will fail. */ - char* cfg_name = fil_make_cfg_name(path); - - os_file_delete_if_exists(innodb_file_data_key, cfg_name); + char* cfg_name = fil_make_filepath(path, NULL, CFG, false); + if (cfg_name != NULL) { + os_file_delete_if_exists(innodb_data_file_key, cfg_name, NULL); + ut_free(cfg_name); + } - mem_free(path); - mem_free(cfg_name); + ut_free(path); return(err); } -/*******************************************************************//** -Deletes a single-table tablespace. The tablespace must be cached in the -memory cache. -@return DB_SUCCESS or error */ -UNIV_INTERN +/** Deletes an IBD tablespace, either general or single-table. +The tablespace must be cached in the memory cache. This will delete the +datafile, fil_space_t & fil_node_t entries from the file_system_t cache. +@param[in] space_id Tablespace id +@param[in] buf_remove Specify the action to take on the pages +for this table in the buffer pool. +@return DB_SUCCESS or error */ dberr_t fil_delete_tablespace( -/*==================*/ - ulint id, /*!< in: space id */ - buf_remove_t buf_remove) /*!< in: specify the action to take - on the tables pages in the buffer - pool */ + ulint id, + buf_remove_t buf_remove) { char* path = 0; fil_space_t* space = 0; - ut_a(id != TRX_SYS_SPACE); + ut_a(!is_system_tablespace(id)); - dberr_t err = fil_check_pending_operations(id, &space, &path); + dberr_t err = fil_check_pending_operations( + id, FIL_OPERATION_DELETE, &space, &path); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot delete tablespace %lu because it is not " - "found in the tablespace memory cache.", - (ulong) id); + ib::error() << "Cannot delete tablespace " << id + << " because it is not found in the tablespace" + " memory cache."; return(err); } @@ -2685,18 +2731,6 @@ fil_delete_tablespace( ut_a(space); ut_a(path != 0); - /* Important: We rely on the data dictionary mutex to ensure - that a race is not possible here. It should serialize the tablespace - drop/free. We acquire an X latch only to avoid a race condition - when accessing the tablespace instance via: - - fsp_get_available_space_in_free_extents(). - - There our main motivation is to reduce the contention on the - dictionary mutex. */ - - rw_lock_x_lock(&space->latch); - #ifndef UNIV_HOTBACKUP /* IMPORTANT: Because we have set space::stop_new_ops there can't be any new ibuf merges, reads or flushes. We are here @@ -2713,11 +2747,11 @@ fil_delete_tablespace( We deal with pending write requests in the following function where we'd minimally evict all dirty pages belonging to this - space from the flush_list. Not that if a block is IO-fixed + space from the flush_list. Note that if a block is IO-fixed we'll wait for IO to complete. - To deal with potential read requests by checking the - ::stop_new_ops flag in fil_io() */ + To deal with potential read requests, we will check the + ::stop_new_ops flag in fil_io(). */ buf_LRU_flush_or_remove_pages(id, buf_remove, 0); @@ -2726,364 +2760,538 @@ fil_delete_tablespace( /* If it is a delete then also delete any generated files, otherwise when we drop the database the remove directory will fail. */ { - char* cfg_name = fil_make_cfg_name(path); - os_file_delete_if_exists(innodb_file_data_key, cfg_name); - mem_free(cfg_name); +#ifdef UNIV_HOTBACKUP + /* When replaying the operation in MySQL Enterprise + Backup, we do not try to write any log record. */ +#else /* UNIV_HOTBACKUP */ + /* Before deleting the file, write a log record about + it, so that InnoDB crash recovery will expect the file + to be gone. */ + mtr_t mtr; + + mtr_start(&mtr); + fil_op_write_log(MLOG_FILE_DELETE, id, 0, path, NULL, 0, &mtr); + mtr_commit(&mtr); + /* Even if we got killed shortly after deleting the + tablespace file, the record must have already been + written to the redo log. */ + log_write_up_to(mtr.commit_lsn(), true); +#endif /* UNIV_HOTBACKUP */ + + char* cfg_name = fil_make_filepath(path, NULL, CFG, false); + if (cfg_name != NULL) { + os_file_delete_if_exists(innodb_data_file_key, cfg_name, NULL); + ut_free(cfg_name); + } } /* Delete the link file pointing to the ibd file we are deleting. */ if (FSP_FLAGS_HAS_DATA_DIR(space->flags)) { - fil_delete_link_file(space->name); + + RemoteDatafile::delete_link_file(space->name); + + } else if (FSP_FLAGS_GET_SHARED(space->flags)) { + + RemoteDatafile::delete_link_file(base_name(path)); + } mutex_enter(&fil_system->mutex); /* Double check the sanity of pending ops after reacquiring the fil_system::mutex. */ - if (fil_space_get_by_id(id)) { + if (const fil_space_t* s = fil_space_get_by_id(id)) { + ut_a(s == space); ut_a(space->n_pending_ops == 0); ut_a(UT_LIST_GET_LEN(space->chain) == 1); fil_node_t* node = UT_LIST_GET_FIRST(space->chain); ut_a(node->n_pending == 0); - } - - if (!fil_space_free(id, TRUE)) { - err = DB_TABLESPACE_NOT_FOUND; - } - mutex_exit(&fil_system->mutex); + fil_space_detach(space); + mutex_exit(&fil_system->mutex); - if (err != DB_SUCCESS) { - rw_lock_x_unlock(&space->latch); - } else if (!os_file_delete(innodb_file_data_key, path) - && !os_file_delete_if_exists(innodb_file_data_key, path)) { + log_mutex_enter(); - /* Note: This is because we have removed the - tablespace instance from the cache. */ + if (space->max_lsn != 0) { + ut_d(space->max_lsn = 0); + UT_LIST_REMOVE(fil_system->named_spaces, space); + } - err = DB_IO_ERROR; - } + log_mutex_exit(); + fil_space_free_low(space); - if (err == DB_SUCCESS) { -#ifndef UNIV_HOTBACKUP - /* Write a log record about the deletion of the .ibd - file, so that mysqlbackup can replay it in the - --apply-log phase. We use a dummy mtr and the familiar - log write mechanism. */ - mtr_t mtr; + if (!os_file_delete(innodb_data_file_key, path) + && !os_file_delete_if_exists( + innodb_data_file_key, path, NULL)) { - /* When replaying the operation in mysqlbackup, do not try - to write any log record */ - mtr_start(&mtr); + /* Note: This is because we have removed the + tablespace instance from the cache. */ - fil_op_write_log(MLOG_FILE_DELETE, id, 0, 0, path, NULL, &mtr); - mtr_commit(&mtr); -#endif - err = DB_SUCCESS; + err = DB_IO_ERROR; + } + } else { + mutex_exit(&fil_system->mutex); + err = DB_TABLESPACE_NOT_FOUND; } - mem_free(path); + ut_free(path); return(err); } -/*******************************************************************//** -Returns TRUE if a single-table tablespace is being deleted. -@return TRUE if being deleted */ -UNIV_INTERN -ibool -fil_tablespace_is_being_deleted( -/*============================*/ - ulint id) /*!< in: space id */ +/** Truncate the tablespace to needed size. +@param[in] space_id id of tablespace to truncate +@param[in] size_in_pages truncate size. +@return true if truncate was successful. */ +bool +fil_truncate_tablespace( + ulint space_id, + ulint size_in_pages) { - fil_space_t* space; - ibool is_being_deleted; + /* Step-1: Prepare tablespace for truncate. This involves + stopping all the new operations + IO on that tablespace + and ensuring that related pages are flushed to disk. */ + if (fil_prepare_for_truncate(space_id) != DB_SUCCESS) { + return(false); + } + /* Step-2: Invalidate buffer pool pages belonging to the tablespace + to re-create. Remove all insert buffer entries for the tablespace */ + buf_LRU_flush_or_remove_pages(space_id, BUF_REMOVE_ALL_NO_WRITE, 0); + + /* Step-3: Truncate the tablespace and accordingly update + the fil_space_t handler that is used to access this tablespace. */ mutex_enter(&fil_system->mutex); + fil_space_t* space = fil_space_get_by_id(space_id); - space = fil_space_get_by_id(id); + /* The following code must change when InnoDB supports + multiple datafiles per tablespace. */ + ut_a(UT_LIST_GET_LEN(space->chain) == 1); - ut_a(space != NULL); + fil_node_t* node = UT_LIST_GET_FIRST(space->chain); - is_being_deleted = space->stop_new_ops; + ut_ad(node->is_open); - mutex_exit(&fil_system->mutex); + space->size = node->size = size_in_pages; - return(is_being_deleted); -} + bool success = os_file_truncate(node->name, node->handle, 0); + if (success) { -#ifndef UNIV_HOTBACKUP -/*******************************************************************//** -Discards a single-table tablespace. The tablespace must be cached in the -memory cache. Discarding is like deleting a tablespace, but + os_offset_t size = size_in_pages * UNIV_PAGE_SIZE; - 1. We do not drop the table from the data dictionary; + success = os_file_set_size( + node->name, node->handle, size, srv_read_only_mode); - 2. We remove all insert buffer entries for the tablespace immediately; - in DROP TABLE they are only removed gradually in the background; + if (success) { + space->stop_new_ops = false; + space->is_being_truncated = false; + } + } - 3. Free all the pages in use by the tablespace. -@return DB_SUCCESS or error */ -UNIV_INTERN + mutex_exit(&fil_system->mutex); + + return(success); +} + +/*******************************************************************//** +Prepare for truncating a single-table tablespace. +1) Check pending operations on a tablespace; +2) Remove all insert buffer entries for the tablespace; +@return DB_SUCCESS or error */ dberr_t -fil_discard_tablespace( -/*===================*/ - ulint id) /*!< in: space id */ +fil_prepare_for_truncate( +/*=====================*/ + ulint id) /*!< in: space id */ { - dberr_t err; + char* path = 0; + fil_space_t* space = 0; - switch (err = fil_delete_tablespace(id, BUF_REMOVE_ALL_NO_WRITE)) { - case DB_SUCCESS: - break; + ut_a(!is_system_tablespace(id)); - case DB_IO_ERROR: - ib_logf(IB_LOG_LEVEL_WARN, - "While deleting tablespace %lu in DISCARD TABLESPACE." - " File rename/delete failed: %s", - (ulong) id, ut_strerr(err)); - break; + dberr_t err = fil_check_pending_operations( + id, FIL_OPERATION_TRUNCATE, &space, &path); - case DB_TABLESPACE_NOT_FOUND: - ib_logf(IB_LOG_LEVEL_WARN, - "Cannot delete tablespace %lu in DISCARD " - "TABLESPACE. %s", - (ulong) id, ut_strerr(err)); - break; + ut_free(path); - default: - ut_error; + if (err == DB_TABLESPACE_NOT_FOUND) { + ib::error() << "Cannot truncate tablespace " << id + << " because it is not found in the tablespace" + " memory cache."; } - /* Remove all insert buffer entries for the tablespace */ - - ibuf_delete_for_discarded_space(id); - return(err); } -#endif /* !UNIV_HOTBACKUP */ -/*******************************************************************//** -Renames the memory cache structures of a single-table tablespace. -@return TRUE if success */ -static -ibool -fil_rename_tablespace_in_mem( -/*=========================*/ - fil_space_t* space, /*!< in: tablespace memory object */ - fil_node_t* node, /*!< in: file node of that tablespace */ - const char* new_name, /*!< in: new name */ - const char* new_path) /*!< in: new file path */ +/**********************************************************************//** +Reinitialize the original tablespace header with the same space id +for single tablespace */ +void +fil_reinit_space_header( +/*====================*/ + ulint id, /*!< in: space id */ + ulint size) /*!< in: size in blocks */ { - fil_space_t* space2; - const char* old_name = space->name; + ut_a(!is_system_tablespace(id)); - ut_ad(mutex_own(&fil_system->mutex)); + /* Invalidate in the buffer pool all pages belonging + to the tablespace */ + buf_LRU_flush_or_remove_pages(id, BUF_REMOVE_ALL_NO_WRITE, 0); + + /* Remove all insert buffer entries for the tablespace */ + ibuf_delete_for_discarded_space(id); + + mutex_enter(&fil_system->mutex); + + fil_space_t* space = fil_space_get_by_id(id); + + /* The following code must change when InnoDB supports + multiple datafiles per tablespace. */ + ut_a(UT_LIST_GET_LEN(space->chain) == 1); + + fil_node_t* node = UT_LIST_GET_FIRST(space->chain); + + space->size = node->size = size; + + mutex_exit(&fil_system->mutex); + + mtr_t mtr; + + mtr_start(&mtr); + mtr.set_named_space(id); + + fsp_header_init(id, size, &mtr); + + mtr_commit(&mtr); +} + +#ifdef UNIV_DEBUG +/** Increase redo skipped count for a tablespace. +@param[in] id space id */ +void +fil_space_inc_redo_skipped_count( + ulint id) +{ + fil_space_t* space; + + mutex_enter(&fil_system->mutex); + + space = fil_space_get_by_id(id); + + ut_a(space != NULL); + + space->redo_skipped_count++; + + mutex_exit(&fil_system->mutex); +} + +/** Decrease redo skipped count for a tablespace. +@param[in] id space id */ +void +fil_space_dec_redo_skipped_count( + ulint id) +{ + fil_space_t* space; + + mutex_enter(&fil_system->mutex); + + space = fil_space_get_by_id(id); + + ut_a(space != NULL); + ut_a(space->redo_skipped_count > 0); + + space->redo_skipped_count--; + + mutex_exit(&fil_system->mutex); +} + +/** +Check whether a single-table tablespace is redo skipped. +@param[in] id space id +@return true if redo skipped */ +bool +fil_space_is_redo_skipped( + ulint id) +{ + fil_space_t* space; + bool is_redo_skipped; + + mutex_enter(&fil_system->mutex); + + space = fil_space_get_by_id(id); + + ut_a(space != NULL); + + is_redo_skipped = space->redo_skipped_count > 0; + + mutex_exit(&fil_system->mutex); + + return(is_redo_skipped); +} +#endif + +#ifndef UNIV_HOTBACKUP +/*******************************************************************//** +Discards a single-table tablespace. The tablespace must be cached in the +memory cache. Discarding is like deleting a tablespace, but + + 1. We do not drop the table from the data dictionary; + + 2. We remove all insert buffer entries for the tablespace immediately; + in DROP TABLE they are only removed gradually in the background; + + 3. Free all the pages in use by the tablespace. +@return DB_SUCCESS or error */ +dberr_t +fil_discard_tablespace( +/*===================*/ + ulint id) /*!< in: space id */ +{ + dberr_t err; + + switch (err = fil_delete_tablespace(id, BUF_REMOVE_ALL_NO_WRITE)) { + case DB_SUCCESS: + break; + + case DB_IO_ERROR: + ib::warn() << "While deleting tablespace " << id + << " in DISCARD TABLESPACE. File rename/delete" + " failed: " << ut_strerr(err); + break; + + case DB_TABLESPACE_NOT_FOUND: + ib::warn() << "Cannot delete tablespace " << id + << " in DISCARD TABLESPACE: " << ut_strerr(err); + break; + + default: + ut_error; + } + + /* Remove all insert buffer entries for the tablespace */ + + ibuf_delete_for_discarded_space(id); + + return(err); +} +#endif /* !UNIV_HOTBACKUP */ + +/*******************************************************************//** +Renames the memory cache structures of a single-table tablespace. +@return true if success */ +static +bool +fil_rename_tablespace_in_mem( +/*=========================*/ + fil_space_t* space, /*!< in: tablespace memory object */ + fil_node_t* node, /*!< in: file node of that tablespace */ + const char* new_name, /*!< in: new name */ + const char* new_path) /*!< in: new file path */ +{ + fil_space_t* space2; + const char* old_name = space->name; + + ut_ad(mutex_own(&fil_system->mutex)); space2 = fil_space_get_by_name(old_name); if (space != space2) { - fputs("InnoDB: Error: cannot find ", stderr); - ut_print_filename(stderr, old_name); - fputs(" in tablespace memory cache\n", stderr); - - return(FALSE); + ib::error() << "Cannot find " << old_name + << " in tablespace memory cache"; + return(false); } space2 = fil_space_get_by_name(new_name); if (space2 != NULL) { - fputs("InnoDB: Error: ", stderr); - ut_print_filename(stderr, new_name); - fputs(" is already in tablespace memory cache\n", stderr); + ib::error() << new_name + << " is already in tablespace memory cache"; - return(FALSE); + return(false); } HASH_DELETE(fil_space_t, name_hash, fil_system->name_hash, ut_fold_string(space->name), space); - mem_free(space->name); - mem_free(node->name); + ut_free(space->name); + ut_free(node->name); space->name = mem_strdup(new_name); node->name = mem_strdup(new_path); HASH_INSERT(fil_space_t, name_hash, fil_system->name_hash, - ut_fold_string(new_name), space); - return(TRUE); + ut_fold_string(space->name), space); + return(true); } /*******************************************************************//** -Allocates a file name for a single-table tablespace. The string must be freed -by caller with mem_free(). -@return own: file name */ -UNIV_INTERN +Allocates and builds a file name from a path, a table or tablespace name +and a suffix. The string must be freed by caller with ut_free(). +@param[in] path NULL or the direcory path or the full path and filename. +@param[in] name NULL if path is full, or Table/Tablespace name +@param[in] suffix NULL or the file extention to use. +@param[in] trim_name true if the last name on the path should be trimmed. +@return own: file name */ char* -fil_make_ibd_name( -/*==============*/ - const char* name, /*!< in: table name or a dir path */ - bool is_full_path) /*!< in: TRUE if it is a dir path */ +fil_make_filepath( + const char* path, + const char* name, + ib_extention ext, + bool trim_name) { - char* filename; - ulint namelen = strlen(name); - ulint dirlen = strlen(fil_path_to_mysql_datadir); - ulint pathlen = dirlen + namelen + sizeof "/.ibd"; - - filename = static_cast(mem_alloc(pathlen)); + /* The path may contain the basename of the file, if so we do not + need the name. If the path is NULL, we can use the default path, + but there needs to be a name. */ + ut_ad(path != NULL || name != NULL); - if (is_full_path) { - memcpy(filename, name, namelen); - memcpy(filename + namelen, ".ibd", sizeof ".ibd"); - } else { - ut_snprintf(filename, pathlen, "%s/%s.ibd", - fil_path_to_mysql_datadir, name); + /* If we are going to strip a name off the path, there better be a + path and a new name to put back on. */ + ut_ad(!trim_name || (path != NULL && name != NULL)); + if (path == NULL) { + path = fil_path_to_mysql_datadir; } - srv_normalize_path_for_win(filename); - - return(filename); -} - -/*******************************************************************//** -Allocates a file name for a tablespace ISL file (InnoDB Symbolic Link). -The string must be freed by caller with mem_free(). -@return own: file name */ -UNIV_INTERN -char* -fil_make_isl_name( -/*==============*/ - const char* name) /*!< in: table name */ -{ - char* filename; - ulint namelen = strlen(name); - ulint dirlen = strlen(fil_path_to_mysql_datadir); - ulint pathlen = dirlen + namelen + sizeof "/.isl"; - - filename = static_cast(mem_alloc(pathlen)); + ulint len = 0; /* current length */ + ulint path_len = strlen(path); + ulint name_len = (name ? strlen(name) : 0); + const char* suffix = dot_ext[ext]; + ulint suffix_len = strlen(suffix); + ulint full_len = path_len + 1 + name_len + suffix_len + 1; - ut_snprintf(filename, pathlen, "%s/%s.isl", - fil_path_to_mysql_datadir, name); + char* full_name = static_cast(ut_malloc_nokey(full_len)); + if (full_name == NULL) { + return NULL; + } - srv_normalize_path_for_win(filename); + /* If the name is a relative path, do not prepend "./". */ + if (path[0] == '.' + && (path[1] == '\0' || path[1] == OS_PATH_SEPARATOR) + && name != NULL && name[0] == '.') { + path = NULL; + path_len = 0; + } - return(filename); -} + if (path != NULL) { + memcpy(full_name, path, path_len); + len = path_len; + full_name[len] = '\0'; + os_normalize_path(full_name); + } -/** Test if a tablespace file can be renamed to a new filepath by checking -if that the old filepath exists and the new filepath does not exist. -@param[in] space_id tablespace id -@param[in] old_path old filepath -@param[in] new_path new filepath -@param[in] is_discarded whether the tablespace is discarded -@return innodb error code */ -dberr_t -fil_rename_tablespace_check( - ulint space_id, - const char* old_path, - const char* new_path, - bool is_discarded) -{ - ulint exists = false; - os_file_type_t ftype; + if (trim_name) { + /* Find the offset of the last DIR separator and set it to + null in order to strip off the old basename from this path. */ + char* last_dir_sep = strrchr(full_name, OS_PATH_SEPARATOR); + if (last_dir_sep) { + last_dir_sep[0] = '\0'; + len = strlen(full_name); + } + } - if (!is_discarded - && os_file_status(old_path, &exists, &ftype) - && !exists) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot rename '%s' to '%s' for space ID %lu" - " because the source file does not exist.", - old_path, new_path, space_id); + if (name != NULL) { + if (len && full_name[len - 1] != OS_PATH_SEPARATOR) { + /* Add a DIR separator */ + full_name[len] = OS_PATH_SEPARATOR; + full_name[++len] = '\0'; + } - return(DB_TABLESPACE_NOT_FOUND); + char* ptr = &full_name[len]; + memcpy(ptr, name, name_len); + len += name_len; + full_name[len] = '\0'; + os_normalize_path(ptr); } - exists = false; - if (!os_file_status(new_path, &exists, &ftype) || exists) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot rename '%s' to '%s' for space ID %lu" - " because the target file exists." - " Remove the target file and try again.", - old_path, new_path, space_id); + /* Make sure that the specified suffix is at the end of the filepath + string provided. This assumes that the suffix starts with '.'. + If the first char of the suffix is found in the filepath at the same + length as the suffix from the end, then we will assume that there is + a previous suffix that needs to be replaced. */ + if (suffix != NULL) { + /* Need room for the trailing null byte. */ + ut_ad(len < full_len); + + if ((len > suffix_len) + && (full_name[len - suffix_len] == suffix[0])) { + /* Another suffix exists, make it the one requested. */ + memcpy(&full_name[len - suffix_len], suffix, suffix_len); - return(DB_TABLESPACE_EXISTS); + } else { + /* No previous suffix, add it. */ + ut_ad(len + suffix_len < full_len); + memcpy(&full_name[len], suffix, suffix_len); + full_name[len + suffix_len] = '\0'; + } } - return(DB_SUCCESS); + return(full_name); } -/*******************************************************************//** -Renames a single-table tablespace. The tablespace must be cached in the -tablespace memory cache. -@return TRUE if success */ -UNIV_INTERN -ibool +/** Rename a single-table tablespace. +The tablespace must exist in the memory cache. +@param[in] id tablespace identifier +@param[in] old_path old file name +@param[in] new_name new table name in the +databasename/tablename format +@param[in] new_path_in new file name, +or NULL if it is located in the normal data directory +@return true if success */ +bool fil_rename_tablespace( -/*==================*/ - const char* old_name_in, /*!< in: old table name in the - standard databasename/tablename - format of InnoDB, or NULL if we - do the rename based on the space - id only */ - ulint id, /*!< in: space id */ - const char* new_name, /*!< in: new table name in the - standard databasename/tablename - format of InnoDB */ - const char* new_path_in) /*!< in: new full datafile path - if the tablespace is remotely - located, or NULL if it is located - in the normal data directory. */ -{ - ibool success; + ulint id, + const char* old_path, + const char* new_name, + const char* new_path_in) +{ + bool sleep = false; + bool flush = false; fil_space_t* space; fil_node_t* node; ulint count = 0; - char* new_path; - char* old_name; - char* old_path; - const char* not_given = "(name not specified)"; - + char* old_name = NULL; + const char* new_path = new_path_in; ut_a(id != 0); + if (new_path == NULL) { + new_path = fil_make_filepath(NULL, new_name, IBD, false); + } + + ut_ad(strchr(new_name, '/') != NULL); + ut_ad(strchr(new_path, OS_PATH_SEPARATOR) != NULL); retry: count++; if (!(count % 1000)) { - ut_print_timestamp(stderr); - fputs(" InnoDB: Warning: problems renaming ", stderr); - ut_print_filename(stderr, - old_name_in ? old_name_in : not_given); - fputs(" to ", stderr); - ut_print_filename(stderr, new_name); - fprintf(stderr, ", %lu iterations\n", (ulong) count); + ib::warn() << "Cannot rename " << old_path << " to " + << new_path << ", retried " << count << " times." + " There are either pending IOs or flushes or" + " the file is being extended."; } mutex_enter(&fil_system->mutex); space = fil_space_get_by_id(id); + bool success = false; + DBUG_EXECUTE_IF("fil_rename_tablespace_failure_1", space = NULL; ); if (space == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot find space id %lu in the tablespace " - "memory cache, though the table '%s' in a " - "rename operation should have that id.", - (ulong) id, old_name_in ? old_name_in : not_given); - mutex_exit(&fil_system->mutex); + ib::error() << "Cannot find space id " << id + << " in the tablespace memory cache, though the file '" + << old_path + << "' in a rename operation should have that id."; - return(FALSE); + goto func_exit; } if (count > 25000) { - space->stop_ios = FALSE; - mutex_exit(&fil_system->mutex); - - return(FALSE); + space->stop_ios = false; + goto func_exit; } /* We temporarily close the .ibd file because we do not trust that operating systems can rename an open file. For the closing we have to wait until there are no pending i/o's or flushes on the file. */ - space->stop_ios = TRUE; + space->stop_ios = true; /* The following code must change when InnoDB supports multiple datafiles per tablespace. */ @@ -3096,44 +3304,35 @@ retry: /* There are pending i/o's or flushes or the file is currently being extended, sleep for a while and retry */ + sleep = true; - mutex_exit(&fil_system->mutex); + } else if (node->modification_counter > node->flush_counter) { + /* Flush the space */ + sleep = flush = true; - os_thread_sleep(20000); + } else if (node->is_open) { + /* Close the file */ - goto retry; + fil_node_close_file(node); + } - } else if (node->modification_counter > node->flush_counter) { - /* Flush the space */ + if (sleep) { mutex_exit(&fil_system->mutex); os_thread_sleep(20000); - fil_flush(id); + if (flush) { + fil_flush(id); + } + sleep = flush = false; goto retry; - - } else if (node->open) { - /* Close the file */ - - fil_node_close_file(node, fil_system); } - /* Check that the old name in the space is right */ - - if (old_name_in) { - old_name = mem_strdup(old_name_in); - ut_a(strcmp(space->name, old_name) == 0); - } else { - old_name = mem_strdup(space->name); - } - old_path = mem_strdup(node->name); + old_name = mem_strdup(space->name); /* Rename the tablespace and the node in the memory cache */ - new_path = new_path_in ? mem_strdup(new_path_in) - : fil_make_ibd_name(new_name, false); - success = fil_rename_tablespace_in_mem( space, node, new_name, new_path); @@ -3143,23 +3342,27 @@ retry: goto skip_second_rename; ); success = os_file_rename( - innodb_file_data_key, old_path, new_path); + innodb_data_file_key, old_path, new_path); DBUG_EXECUTE_IF("fil_rename_tablespace_failure_2", skip_second_rename: - success = FALSE; ); + success = false; ); if (!success) { /* We have to revert the changes we made to the tablespace memory cache */ - ut_a(fil_rename_tablespace_in_mem( - space, node, old_name, old_path)); + bool reverted = fil_rename_tablespace_in_mem( + space, node, old_name, old_path); + + ut_a(reverted); } } - space->stop_ios = FALSE; + ut_free(old_name); + space->stop_ios = false; +func_exit: mutex_exit(&fil_system->mutex); #ifndef UNIV_HOTBACKUP @@ -3167,330 +3370,165 @@ skip_second_rename: mtr_t mtr; mtr_start(&mtr); - - fil_op_write_log(MLOG_FILE_RENAME, id, 0, 0, old_name, new_name, - &mtr); + fil_name_write_rename(id, 0, old_path, new_path, &mtr); mtr_commit(&mtr); } #endif /* !UNIV_HOTBACKUP */ - mem_free(new_path); - mem_free(old_path); - mem_free(old_name); + if (new_path != new_path_in) { + ut_free(const_cast(new_path)); + } return(success); } -/*******************************************************************//** -Creates a new InnoDB Symbolic Link (ISL) file. It is always created -under the 'datadir' of MySQL. The datadir is the directory of a -running mysqld program. We can refer to it by simply using the path '.'. -@return DB_SUCCESS or error code */ -UNIV_INTERN +/** Create a new General or Single-Table tablespace +@param[in] space_id Tablespace ID +@param[in] name Tablespace name in dbname/tablename format. +For general tablespaces, the 'dbname/' part may be missing. +@param[in] path Path and filename of the datafile to create. +@param[in] flags Tablespace flags +@param[in] size Initial size of the tablespace file in pages, +must be >= FIL_IBD_FILE_INITIAL_SIZE +@return DB_SUCCESS or error code */ dberr_t -fil_create_link_file( -/*=================*/ - const char* tablename, /*!< in: tablename */ - const char* filepath) /*!< in: pathname of tablespace */ +fil_ibd_create( + ulint space_id, + const char* name, + const char* path, + ulint flags, + ulint size, + fil_encryption_t mode, /*!< in: encryption mode */ + ulint key_id) /*!< in: encryption key_id */ { - dberr_t err = DB_SUCCESS; - char* link_filepath; - char* prev_filepath = fil_read_link_file(tablename); - + os_file_t file; + dberr_t err; + byte* buf2; + byte* page; + bool success; + bool is_temp = FSP_FLAGS_GET_TEMPORARY(flags); + bool has_data_dir = FSP_FLAGS_HAS_DATA_DIR(flags); + bool has_shared_space = FSP_FLAGS_GET_SHARED(flags); + fil_space_t* space = NULL; + fil_space_crypt_t *crypt_data = NULL; + + ut_ad(!is_system_tablespace(space_id)); ut_ad(!srv_read_only_mode); + ut_a(space_id < SRV_LOG_SPACE_FIRST_ID); + ut_a(size >= FIL_IBD_FILE_INITIAL_SIZE); + ut_a(fsp_flags_is_valid(flags)); - if (prev_filepath) { - /* Truncate will call this with an existing - link file which contains the same filepath. */ - if (0 == strcmp(prev_filepath, filepath)) { - mem_free(prev_filepath); - return(DB_SUCCESS); + /* Create the subdirectories in the path, if they are + not there already. */ + if (!has_shared_space) { + err = os_file_create_subdirs_if_needed(path); + if (err != DB_SUCCESS) { + return(err); } - mem_free(prev_filepath); } - link_filepath = fil_make_isl_name(tablename); + file = os_file_create( + innodb_data_file_key, path, + OS_FILE_CREATE | OS_FILE_ON_ERROR_NO_EXIT, + OS_FILE_NORMAL, + OS_DATA_FILE, + srv_read_only_mode, + &success); - /** Check if the file already exists. */ - FILE* file = NULL; - ibool exists; - os_file_type_t ftype; + if (!success) { + /* The following call will print an error message */ + ulint error = os_file_get_last_error(true); - bool success = os_file_status(link_filepath, &exists, &ftype); + ib::error() << "Cannot create file '" << path << "'"; - ulint error = 0; - if (success && !exists) { - file = fopen(link_filepath, "w"); - if (file == NULL) { - /* This call will print its own error message */ - error = os_file_get_last_error(true); + if (error == OS_FILE_ALREADY_EXISTS) { + ib::error() << "The file '" << path << "'" + " already exists though the" + " corresponding table did not exist" + " in the InnoDB data dictionary." + " Have you moved InnoDB .ibd files" + " around without using the SQL commands" + " DISCARD TABLESPACE and IMPORT TABLESPACE," + " or did mysqld crash in the middle of" + " CREATE TABLE?" + " You can resolve the problem by removing" + " the file '" << path + << "' under the 'datadir' of MySQL."; + + return(DB_TABLESPACE_EXISTS); } - } else { - error = OS_FILE_ALREADY_EXISTS; - } - if (error != 0) { - - ut_print_timestamp(stderr); - fputs(" InnoDB: Cannot create file ", stderr); - ut_print_filename(stderr, link_filepath); - fputs(".\n", stderr); - if (error == OS_FILE_ALREADY_EXISTS) { - fputs("InnoDB: The link file: ", stderr); - ut_print_filename(stderr, filepath); - fputs(" already exists.\n", stderr); - err = DB_TABLESPACE_EXISTS; - } else if (error == OS_FILE_DISK_FULL) { - err = DB_OUT_OF_FILE_SPACE; - } else if (error == OS_FILE_OPERATION_NOT_SUPPORTED) { - err = DB_UNSUPPORTED; - } else { - err = DB_ERROR; + if (error == OS_FILE_DISK_FULL) { + return(DB_OUT_OF_FILE_SPACE); } - /* file is not open, no need to close it. */ - mem_free(link_filepath); - return(err); + return(DB_ERROR); } - ulint rbytes = fwrite(filepath, 1, strlen(filepath), file); - if (rbytes != strlen(filepath)) { - os_file_get_last_error(true); - ib_logf(IB_LOG_LEVEL_ERROR, - "cannot write link file " - "%s",filepath); - err = DB_ERROR; - } + bool atomic_write; - /* Close the file, we only need it at startup */ - fclose(file); +#if !defined(NO_FALLOCATE) && defined(UNIV_LINUX) + if (fil_fusionio_enable_atomic_write(file)) { - mem_free(link_filepath); + /* This is required by FusionIO HW/Firmware */ + int ret = posix_fallocate(file, 0, size * UNIV_PAGE_SIZE); - return(err); -} + if (ret != 0) { -/*******************************************************************//** -Deletes an InnoDB Symbolic Link (ISL) file. */ -UNIV_INTERN -void -fil_delete_link_file( -/*=================*/ - const char* tablename) /*!< in: name of table */ -{ - char* link_filepath = fil_make_isl_name(tablename); + ib::error() << + "posix_fallocate(): Failed to preallocate" + " data for file " << path + << ", desired size " + << size * UNIV_PAGE_SIZE + << " Operating system error number " << ret + << ". Check" + " that the disk is not full or a disk quota" + " exceeded. Make sure the file system supports" + " this function. Some operating system error" + " numbers are described at " REFMAN + " operating-system-error-codes.html"; - os_file_delete_if_exists(innodb_file_data_key, link_filepath); + success = false; + } else { + success = true; + } - mem_free(link_filepath); -} + atomic_write = true; + } else { + atomic_write = false; -/*******************************************************************//** -Reads an InnoDB Symbolic Link (ISL) file. -It is always created under the 'datadir' of MySQL. The name is of the -form {databasename}/{tablename}. and the isl file is expected to be in a -'{databasename}' directory called '{tablename}.isl'. The caller must free -the memory of the null-terminated path returned if it is not null. -@return own: filepath found in link file, NULL if not found. */ -UNIV_INTERN -char* -fil_read_link_file( -/*===============*/ - const char* name) /*!< in: tablespace name */ -{ - char* filepath = NULL; - char* link_filepath; - FILE* file = NULL; + success = os_file_set_size( + path, file, size * UNIV_PAGE_SIZE, srv_read_only_mode); + } +#else + atomic_write = false; - /* The .isl file is in the 'normal' tablespace location. */ - link_filepath = fil_make_isl_name(name); + success = os_file_set_size( + path, file, size * UNIV_PAGE_SIZE, srv_read_only_mode); - file = fopen(link_filepath, "r+b"); +#endif /* !NO_FALLOCATE && UNIV_LINUX */ - mem_free(link_filepath); + if (!success) { + os_file_close(file); + os_file_delete(innodb_data_file_key, path); + return(DB_OUT_OF_FILE_SPACE); + } - if (file) { - filepath = static_cast(mem_alloc(OS_FILE_MAX_PATH)); + /* Note: We are actually punching a hole, previous contents will + be lost after this call, if it succeeds. In this case the file + should be full of NULs. */ - os_file_read_string(file, filepath, OS_FILE_MAX_PATH); - fclose(file); + bool punch_hole = os_is_sparse_file_supported(path, file); - if (strlen(filepath)) { - /* Trim whitespace from end of filepath */ - ulint lastch = strlen(filepath) - 1; - while (lastch > 4 && filepath[lastch] <= 0x20) { - filepath[lastch--] = 0x00; - } - srv_normalize_path_for_win(filepath); - } - } - - return(filepath); -} - -/*******************************************************************//** -Opens a handle to the file linked to in an InnoDB Symbolic Link file. -@return TRUE if remote linked tablespace file is found and opened. */ -UNIV_INTERN -ibool -fil_open_linked_file( -/*===============*/ - const char* tablename, /*!< in: database/tablename */ - char** remote_filepath,/*!< out: remote filepath */ - os_file_t* remote_file, /*!< out: remote file handle */ - ulint atomic_writes) /*!< in: atomic writes table option - value */ -{ - ibool success; - - *remote_filepath = fil_read_link_file(tablename); - if (*remote_filepath == NULL) { - return(FALSE); - } - - /* The filepath provided is different from what was - found in the link file. */ - *remote_file = os_file_create_simple_no_error_handling( - innodb_file_data_key, *remote_filepath, - OS_FILE_OPEN, OS_FILE_READ_ONLY, - &success, atomic_writes); - - if (!success) { - char* link_filepath = fil_make_isl_name(tablename); - - /* The following call prints an error message */ - os_file_get_last_error(true); - - ib_logf(IB_LOG_LEVEL_ERROR, - "A link file was found named '%s' " - "but the linked tablespace '%s' " - "could not be opened.", - link_filepath, *remote_filepath); - - mem_free(link_filepath); - mem_free(*remote_filepath); - *remote_filepath = NULL; - } - - return(success); -} - -/*******************************************************************//** -Creates a new single-table tablespace to a database directory of MySQL. -Database directories are under the 'datadir' of MySQL. The datadir is the -directory of a running mysqld program. We can refer to it by simply the -path '.'. Tables created with CREATE TEMPORARY TABLE we place in the temp -dir of the mysqld server. - -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -fil_create_new_single_table_tablespace( -/*===================================*/ - ulint space_id, /*!< in: space id */ - const char* tablename, /*!< in: the table name in the usual - databasename/tablename format - of InnoDB */ - const char* dir_path, /*!< in: NULL or a dir path */ - ulint flags, /*!< in: tablespace flags */ - ulint flags2, /*!< in: table flags2 */ - ulint size, /*!< in: the initial size of the - tablespace file in pages, - must be >= FIL_IBD_FILE_INITIAL_SIZE */ - fil_encryption_t mode, /*!< in: encryption mode */ - ulint key_id) /*!< in: encryption key_id */ -{ - os_file_t file; - ibool ret; - dberr_t err; - byte* buf2; - byte* page; - char* path; - ibool success; - /* TRUE if a table is created with CREATE TEMPORARY TABLE */ - bool is_temp = !!(flags2 & DICT_TF2_TEMPORARY); - bool has_data_dir = FSP_FLAGS_HAS_DATA_DIR(flags); - ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(flags); - fil_space_crypt_t *crypt_data = NULL; - - ut_a(space_id > 0); - ut_ad(!srv_read_only_mode); - ut_a(space_id < SRV_LOG_SPACE_FIRST_ID); - ut_a(size >= FIL_IBD_FILE_INITIAL_SIZE); - ut_a(fsp_flags_is_valid(flags)); - - if (is_temp) { - /* Temporary table filepath */ - ut_ad(dir_path); - path = fil_make_ibd_name(dir_path, true); - } else if (has_data_dir) { - ut_ad(dir_path); - path = os_file_make_remote_pathname(dir_path, tablename, "ibd"); - - /* Since this tablespace file will be created in a - remote directory, let's create the subdirectories - in the path, if they are not there already. */ - success = os_file_create_subdirs_if_needed(path); - if (!success) { - err = DB_ERROR; - goto error_exit_3; - } - } else { - path = fil_make_ibd_name(tablename, false); - } - - file = os_file_create( - innodb_file_data_key, path, - OS_FILE_CREATE | OS_FILE_ON_ERROR_NO_EXIT, - OS_FILE_NORMAL, - OS_DATA_FILE, - &ret, - atomic_writes); - - if (ret == FALSE) { - /* The following call will print an error message */ - ulint error = os_file_get_last_error(true); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot create file '%s'\n", path); - - if (error == OS_FILE_ALREADY_EXISTS) { - ib_logf(IB_LOG_LEVEL_ERROR, - "The file '%s' already exists though the " - "corresponding table did not exist " - "in the InnoDB data dictionary. " - "Have you moved InnoDB .ibd files " - "around without using the SQL commands " - "DISCARD TABLESPACE and IMPORT TABLESPACE, " - "or did mysqld crash in the middle of " - "CREATE TABLE? " - "You can resolve the problem by removing " - "the file '%s' under the 'datadir' of MySQL.", - path, path); + if (punch_hole) { - err = DB_TABLESPACE_EXISTS; - goto error_exit_3; - } + dberr_t punch_err; - if (error == OS_FILE_OPERATION_NOT_SUPPORTED) { - err = DB_UNSUPPORTED; - goto error_exit_3; - } + punch_err = os_file_punch_hole(file, 0, size * UNIV_PAGE_SIZE); - if (error == OS_FILE_DISK_FULL) { - err = DB_OUT_OF_FILE_SPACE; - goto error_exit_3; + if (punch_err != DB_SUCCESS) { + punch_hole = false; } - - err = DB_ERROR; - goto error_exit_3; - } - - ret = os_file_set_size(path, file, size * UNIV_PAGE_SIZE); - - if (!ret) { - err = DB_OUT_OF_FILE_SPACE; - goto error_exit_2; } /* printf("Creating tablespace %s id %lu\n", path, space_id); */ @@ -3504,7 +3542,7 @@ fil_create_new_single_table_tablespace( with zeros from the call of os_file_set_size(), until a buffer pool flush would write to it. */ - buf2 = static_cast(ut_malloc(3 * UNIV_PAGE_SIZE)); + buf2 = static_cast(ut_malloc_nokey(3 * UNIV_PAGE_SIZE)); /* Align the memory for file i/o if we might have O_DIRECT set */ page = static_cast(ut_align(buf2, UNIV_PAGE_SIZE)); @@ -3512,56 +3550,81 @@ fil_create_new_single_table_tablespace( /* Add the UNIV_PAGE_SIZE to the table flags and write them to the tablespace header. */ - flags = fsp_flags_set_page_size(flags, UNIV_PAGE_SIZE); + flags = fsp_flags_set_page_size(flags, univ_page_size); fsp_header_init_fields(page, space_id, flags); mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id); - ut_ad(fsp_flags_is_valid(flags)); - if (!(fsp_flags_is_compressed(flags))) { - buf_flush_init_for_writing(page, NULL, 0); - ret = os_file_write(path, file, page, 0, UNIV_PAGE_SIZE); + const page_size_t page_size(flags); + IORequest request(IORequest::WRITE); + + if (!page_size.is_compressed()) { + + buf_flush_init_for_writing( + NULL, page, NULL, 0, + fsp_is_checksum_disabled(space_id)); + + err = os_file_write( + request, path, file, page, 0, page_size.physical()); + + ut_ad(err != DB_IO_NO_PUNCH_HOLE); + } else { page_zip_des_t page_zip; - ulint zip_size; - - zip_size = fsp_flags_get_zip_size(flags); - page_zip_set_size(&page_zip, zip_size); + page_zip_set_size(&page_zip, page_size.physical()); page_zip.data = page + UNIV_PAGE_SIZE; #ifdef UNIV_DEBUG page_zip.m_start = #endif /* UNIV_DEBUG */ page_zip.m_end = page_zip.m_nonempty = page_zip.n_blobs = 0; - buf_flush_init_for_writing(page, &page_zip, 0); - ret = os_file_write(path, file, page_zip.data, 0, zip_size); + + buf_flush_init_for_writing( + NULL, page, &page_zip, 0, + fsp_is_checksum_disabled(space_id)); + + err = os_file_write( + request, path, file, page_zip.data, 0, + page_size.physical()); + + ut_a(err != DB_IO_NO_PUNCH_HOLE); + + punch_hole = false; } ut_free(buf2); - if (!ret) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Could not write the first page to tablespace " - "'%s'", path); + if (err != DB_SUCCESS) { - err = DB_ERROR; - goto error_exit_2; + ib::error() + << "Could not write the first page to" + << " tablespace '" << path << "'"; + + os_file_close(file); + os_file_delete(innodb_data_file_key, path); + + return(DB_ERROR); } - ret = os_file_flush(file); + success = os_file_flush(file); - if (!ret) { - ib_logf(IB_LOG_LEVEL_ERROR, - "File flush of tablespace '%s' failed", path); - err = DB_ERROR; - goto error_exit_2; + if (!success) { + ib::error() << "File flush of tablespace '" + << path << "' failed"; + os_file_close(file); + os_file_delete(innodb_data_file_key, path); + return(DB_ERROR); } - if (has_data_dir) { - /* Now that the IBD file is created, make the ISL file. */ - err = fil_create_link_file(tablename, path); + if (has_data_dir || has_shared_space) { + /* Make the ISL file if the IBD file is not + in the default location. */ + err = RemoteDatafile::create_link_file(name, path, + has_shared_space); if (err != DB_SUCCESS) { - goto error_exit_2; + os_file_close(file); + os_file_delete(innodb_data_file_key, path); + return(err); } } @@ -3572,31 +3635,26 @@ fil_create_new_single_table_tablespace( crypt_data = fil_space_create_crypt_data(mode, key_id); } - success = fil_space_create(tablename, space_id, flags, FIL_TABLESPACE, - crypt_data); + space = fil_space_create(name, space_id, flags, is_temp + ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, crypt_data); + + if (!fil_node_create_low( + path, size, space, false, punch_hole, atomic_write)) { - if (!success || !fil_node_create(path, size, space_id, FALSE)) { err = DB_ERROR; goto error_exit_1; } #ifndef UNIV_HOTBACKUP - { - mtr_t mtr; - ulint mlog_file_flag = 0; - - if (is_temp) { - mlog_file_flag |= MLOG_FILE_FLAG_TEMP; - } + if (!is_temp) { + mtr_t mtr; + const fil_node_t* file = UT_LIST_GET_FIRST(space->chain); mtr_start(&mtr); - - fil_op_write_log(flags - ? MLOG_FILE_CREATE2 - : MLOG_FILE_CREATE, - space_id, mlog_file_flag, flags, - tablename, NULL, &mtr); - + fil_op_write_log( + MLOG_FILE_CREATE2, space_id, 0, file->name, + NULL, space->flags, &mtr); + fil_name_write(space, 0, file, &mtr); mtr_commit(&mtr); } #endif @@ -3606,60 +3664,20 @@ fil_create_new_single_table_tablespace( These labels reflect the order in which variables are assigned or actions are done. */ error_exit_1: - if (has_data_dir && err != DB_SUCCESS) { - fil_delete_link_file(tablename); + if (err != DB_SUCCESS && (has_data_dir || has_shared_space)) { + RemoteDatafile::delete_link_file(name); } -error_exit_2: + os_file_close(file); if (err != DB_SUCCESS) { - os_file_delete(innodb_file_data_key, path); + os_file_delete(innodb_data_file_key, path); } -error_exit_3: - mem_free(path); return(err); } #ifndef UNIV_HOTBACKUP -/********************************************************************//** -Report information about a bad tablespace. */ -static -void -fil_report_bad_tablespace( -/*======================*/ - const char* filepath, /*!< in: filepath */ - const char* check_msg, /*!< in: fil_check_first_page() */ - ulint found_id, /*!< in: found space ID */ - ulint found_flags, /*!< in: found flags */ - ulint expected_id, /*!< in: expected space id */ - ulint expected_flags) /*!< in: expected flags */ -{ - if (check_msg) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Error %s in file '%s'," - "tablespace id=%lu, flags=%lu. " - "Please refer to " - REFMAN "innodb-troubleshooting-datadict.html " - "for how to resolve the issue.", - check_msg, filepath, - (ulong) expected_id, (ulong) expected_flags); - return; - } - - ib_logf(IB_LOG_LEVEL_ERROR, - "In file '%s', tablespace id and flags are %lu and %lu, " - "but in the InnoDB data dictionary they are %lu and %lu. " - "Have you moved InnoDB .ibd files around without using the " - "commands DISCARD TABLESPACE and IMPORT TABLESPACE? " - "Please refer to " - REFMAN "innodb-troubleshooting-datadict.html " - "for how to resolve the issue.", - filepath, (ulong) found_id, (ulong) found_flags, - (ulong) expected_id, (ulong) expected_flags); -} - -/********************************************************************//** -Tries to open a single-table tablespace and optionally checks that the +/** Try to open a single-table tablespace and optionally check that the space id in it is correct. If this does not succeed, print an error message to the .err log. This function is used to open a tablespace when we start mysqld after the dictionary has been booted, and also in IMPORT TABLESPACE. @@ -3673,290 +3691,216 @@ If the validate boolean is set, we read the first page of the file and check that the space id in the file is what we expect. We assume that this function runs much faster if no check is made, since accessing the file inode probably is much faster (the OS caches them) than accessing -the first page of the file. This boolean may be initially FALSE, but if +the first page of the file. This boolean may be initially false, but if a remote tablespace is found it will be changed to true. If the fix_dict boolean is set, then it is safe to use an internal SQL statement to update the dictionary tables if they are incorrect. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@param[in] validate true if we should validate the tablespace +@param[in] fix_dict true if the dictionary is available to be fixed +@param[in] purpose FIL_TYPE_TABLESPACE or FIL_TYPE_TEMPORARY +@param[in] id tablespace ID +@param[in] flags tablespace flags +@param[in] space_name tablespace name of the datafile +If file-per-table, it is the table name in the databasename/tablename format +@param[in] path_in expected filepath, usually read from dictionary +@return DB_SUCCESS or error code */ dberr_t -fil_open_single_table_tablespace( -/*=============================*/ - bool validate, /*!< in: Do we validate tablespace? */ - bool fix_dict, /*!< in: Can we fix the dictionary? */ - ulint id, /*!< in: space id */ - ulint flags, /*!< in: tablespace flags */ - const char* tablename, /*!< in: table name in the - databasename/tablename format */ - const char* path_in, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ +fil_ibd_open( + bool validate, + bool fix_dict, + fil_type_t purpose, + ulint id, + ulint flags, + const char* space_name, + const char* path_in, + dict_table_t* table) { dberr_t err = DB_SUCCESS; bool dict_filepath_same_as_default = false; bool link_file_found = false; bool link_file_is_bad = false; - fsp_open_info def; - fsp_open_info dict; - fsp_open_info remote; + bool is_shared = FSP_FLAGS_GET_SHARED(flags); + Datafile df_default; /* default location */ + Datafile df_dict; /* dictionary location */ + RemoteDatafile df_remote; /* remote location */ ulint tablespaces_found = 0; ulint valid_tablespaces_found = 0; - ulint atomic_writes = 0; - fil_space_crypt_t* crypt_data = NULL; -#ifdef UNIV_SYNC_DEBUG - ut_ad(!fix_dict || rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - ut_ad(!fix_dict || mutex_own(&(dict_sys->mutex))); + ut_ad(!fix_dict || rw_lock_own(dict_operation_lock, RW_LOCK_X)); - /* Table flags can be ULINT_UNDEFINED if - dict_tf_to_fsp_flags_failure is set. */ - if (flags != ULINT_UNDEFINED) { - if (!fsp_flags_is_valid(flags)) { - return(DB_CORRUPTION); - } - } else { + ut_ad(!fix_dict || mutex_own(&dict_sys->mutex)); + ut_ad(!fix_dict || !srv_read_only_mode); + ut_ad(!fix_dict || srv_log_file_size != 0); + ut_ad(fil_type_is_data(purpose)); + + if (!fsp_flags_is_valid(flags)) { return(DB_CORRUPTION); } - atomic_writes = fsp_flags_get_atomic_writes(flags); + df_default.init(space_name, flags); + df_dict.init(space_name, flags); + df_remote.init(space_name, flags); - /* If the tablespace was relocated, we do not - compare the DATA_DIR flag */ - ulint mod_flags = flags & ~FSP_FLAGS_MASK_DATA_DIR; + /* Discover the correct file by looking in three possible locations + while avoiding unecessary effort. */ - memset(&def, 0, sizeof(def)); - memset(&dict, 0, sizeof(dict)); - memset(&remote, 0, sizeof(remote)); + if (is_shared) { + /* Shared tablespaces will have a path_in since the filename + is not generated from the tablespace name. Use the basename + from this path_in with the default datadir as a filepath to + the default location */ + ut_a(path_in); + const char* sep = strrchr(path_in, OS_PATH_SEPARATOR); + const char* basename = (sep == NULL) ? path_in : &sep[1]; + df_default.make_filepath(NULL, basename, IBD); - /* Discover the correct filepath. We will always look for an ibd - in the default location. If it is remote, it should not be here. */ - def.filepath = fil_make_ibd_name(tablename, false); + /* Always validate shared tablespaces. */ + validate = true; - /* The path_in was read from SYS_DATAFILES. */ - if (path_in) { - if (strcmp(def.filepath, path_in)) { - dict.filepath = mem_strdup(path_in); - /* possibility of multiple files. */ - validate = true; - } else { - dict_filepath_same_as_default = true; - } + /* Set the ISL filepath in the default location. */ + df_remote.set_link_filepath(path_in); + } else { + /* We will always look for an ibd in the default location. */ + df_default.make_filepath(NULL, space_name, IBD); } - link_file_found = fil_open_linked_file( - tablename, &remote.filepath, &remote.file, atomic_writes); - remote.success = link_file_found; - if (remote.success) { - /* possibility of multiple files. */ + /* Look for a filepath embedded in an ISL where the default file + would be. */ + if (df_remote.open_read_only(true) == DB_SUCCESS) { + ut_ad(df_remote.is_open()); + + /* Always validate a file opened from an ISL pointer */ validate = true; - tablespaces_found++; - - /* A link file was found. MySQL does not allow a DATA - DIRECTORY to be be the same as the default filepath. */ - ut_a(strcmp(def.filepath, remote.filepath)); - - /* If there was a filepath found in SYS_DATAFILES, - we hope it was the same as this remote.filepath found - in the ISL file. */ - if (dict.filepath - && (0 == strcmp(dict.filepath, remote.filepath))) { - remote.success = FALSE; - os_file_close(remote.file); - mem_free(remote.filepath); - remote.filepath = NULL; - tablespaces_found--; + ++tablespaces_found; + link_file_found = true; + if (table) { + table->crypt_data = df_remote.get_crypt_info(); } + + } else if (df_remote.filepath() != NULL) { + /* An ISL file was found but contained a bad filepath in it. + Better validate anything we do find. */ + validate = true; } - /* Attempt to open the tablespace at other possible filepaths. */ - if (dict.filepath) { - dict.file = os_file_create_simple_no_error_handling( - innodb_file_data_key, dict.filepath, OS_FILE_OPEN, - OS_FILE_READ_ONLY, &dict.success, atomic_writes); - if (dict.success) { - /* possibility of multiple files. */ + /* Attempt to open the tablespace at the dictionary filepath. */ + if (path_in) { + if (df_default.same_filepath_as(path_in)) { + dict_filepath_same_as_default = true; + } else { + /* Dict path is not the default path. Always validate + remote files. If default is opened, it was moved. */ validate = true; - tablespaces_found++; - } - } - /* Always look for a file at the default location. */ - ut_a(def.filepath); - def.file = os_file_create_simple_no_error_handling( - innodb_file_data_key, def.filepath, OS_FILE_OPEN, - OS_FILE_READ_ONLY, &def.success, atomic_writes); - if (def.success) { - tablespaces_found++; - } + df_dict.set_filepath(path_in); + if (df_dict.open_read_only(true) == DB_SUCCESS) { + ut_ad(df_dict.is_open()); + ++tablespaces_found; - /* We have now checked all possible tablespace locations and - have a count of how many we found. If things are normal, we - only found 1. */ - if (!validate && tablespaces_found == 1) { - goto skip_validate; + if (table) { + table->crypt_data = df_dict.get_crypt_info(); + } + } + } } - /* Read the first page of the datadir tablespace, if found. */ - if (def.success) { - def.check_msg = fil_read_first_page( - def.file, FALSE, &def.flags, &def.id, -#ifdef UNIV_LOG_ARCHIVE - &space_arch_log_no, &space_arch_log_no, -#endif /* UNIV_LOG_ARCHIVE */ - &def.lsn, &def.lsn, &def.crypt_data); - def.valid = !def.check_msg; - + /* Always look for a file at the default location. But don't log + an error if the tablespace is already open in remote or dict. */ + ut_a(df_default.filepath()); + const bool strict = (tablespaces_found == 0); + if (df_default.open_read_only(strict) == DB_SUCCESS) { + ut_ad(df_default.is_open()); + ++tablespaces_found; if (table) { - table->crypt_data = def.crypt_data; - } - - /* Validate this single-table-tablespace with SYS_TABLES, - but do not compare the DATA_DIR flag, in case the - tablespace was relocated. */ - - ulint newf = def.flags; - if (newf != mod_flags) { - if (FSP_FLAGS_HAS_DATA_DIR(newf)) { - newf = (newf & ~FSP_FLAGS_MASK_DATA_DIR); - } else if(FSP_FLAGS_HAS_DATA_DIR_ORACLE(newf)) { - newf = (newf & ~FSP_FLAGS_MASK_DATA_DIR_ORACLE); - } + table->crypt_data = df_default.get_crypt_info(); } + } - if (def.valid && def.id == id - && newf == mod_flags) { - valid_tablespaces_found++; - } else { - def.valid = false; - /* Do not use this tablespace. */ - fil_report_bad_tablespace( - def.filepath, def.check_msg, def.id, - def.flags, id, flags); - } + /* Check if multiple locations point to the same file. */ + if (tablespaces_found > 1 && df_default.same_as(df_remote)) { + /* A link file was found with the default path in it. + Use the default path and delete the link file. */ + --tablespaces_found; + df_remote.delete_link_file(); + df_remote.close(); + } + if (tablespaces_found > 1 && df_default.same_as(df_dict)) { + --tablespaces_found; + df_dict.close(); + } + if (tablespaces_found > 1 && df_remote.same_as(df_dict)) { + --tablespaces_found; + df_dict.close(); } - /* Read the first page of the remote tablespace */ - if (remote.success) { - remote.check_msg = fil_read_first_page( - remote.file, FALSE, &remote.flags, &remote.id, -#ifdef UNIV_LOG_ARCHIVE - &remote.arch_log_no, &remote.arch_log_no, -#endif /* UNIV_LOG_ARCHIVE */ - &remote.lsn, &remote.lsn, &remote.crypt_data); - remote.valid = !remote.check_msg; + bool atomic_write; - if (table) { - table->crypt_data = remote.crypt_data; - } - - /* Validate this single-table-tablespace with SYS_TABLES, - but do not compare the DATA_DIR flag, in case the - tablespace was relocated. */ - ulint newf = remote.flags; - if (newf != mod_flags) { - if (FSP_FLAGS_HAS_DATA_DIR(newf)) { - newf = (newf & ~FSP_FLAGS_MASK_DATA_DIR); - } else if(FSP_FLAGS_HAS_DATA_DIR_ORACLE(newf)) { - newf = (newf & ~FSP_FLAGS_MASK_DATA_DIR_ORACLE); - } - } +#if !defined(NO_FALLOCATE) && defined(UNIV_LINUX) + if (!srv_use_doublewrite_buf && df_default.is_open()) { - if (remote.valid && remote.id == id - && newf == mod_flags) { - valid_tablespaces_found++; - } else { - remote.valid = false; - /* Do not use this linked tablespace. */ - fil_report_bad_tablespace( - remote.filepath, remote.check_msg, remote.id, - remote.flags, id, flags); - link_file_is_bad = true; - } + atomic_write = fil_fusionio_enable_atomic_write( + df_default.handle()); + } else { + atomic_write = false; + } +#else + atomic_write = false; +#endif /* !NO_FALLOCATE && UNIV_LINUX */ + + /* We have now checked all possible tablespace locations and + have a count of how many unique files we found. If things are + normal, we only found 1. */ + if (!validate && tablespaces_found == 1) { + goto skip_validate; } - /* Read the first page of the datadir tablespace, if found. */ - if (dict.success) { - dict.check_msg = fil_read_first_page( - dict.file, FALSE, &dict.flags, &dict.id, -#ifdef UNIV_LOG_ARCHIVE - &dict.arch_log_no, &dict.arch_log_no, -#endif /* UNIV_LOG_ARCHIVE */ - &dict.lsn, &dict.lsn, &dict.crypt_data); - dict.valid = !dict.check_msg; + /* Read and validate the first page of these three tablespace + locations, if found. */ + valid_tablespaces_found += + (df_remote.validate_to_dd(id, flags) == DB_SUCCESS) ? 1 : 0; - if (table) { - table->crypt_data = dict.crypt_data; - } - - /* Validate this single-table-tablespace with SYS_TABLES, - but do not compare the DATA_DIR flag, in case the - tablespace was relocated. */ - ulint newf = dict.flags; - if (newf != mod_flags) { - if (FSP_FLAGS_HAS_DATA_DIR(newf)) { - newf = (newf & ~FSP_FLAGS_MASK_DATA_DIR); - } else if(FSP_FLAGS_HAS_DATA_DIR_ORACLE(newf)) { - newf = (newf & ~FSP_FLAGS_MASK_DATA_DIR_ORACLE); - } - } + valid_tablespaces_found += + (df_default.validate_to_dd(id, flags) == DB_SUCCESS) ? 1 : 0; - if (dict.valid && dict.id == id - && newf == mod_flags) { - valid_tablespaces_found++; - } else { - dict.valid = false; - /* Do not use this tablespace. */ - fil_report_bad_tablespace( - dict.filepath, dict.check_msg, dict.id, - dict.flags, id, flags); - } - } + valid_tablespaces_found += + (df_dict.validate_to_dd(id, flags) == DB_SUCCESS) ? 1 : 0; /* Make sense of these three possible locations. First, bail out if no tablespace files were found. */ if (valid_tablespaces_found == 0) { /* The following call prints an error message */ os_file_get_last_error(true); + ib::error() << "Could not find a valid tablespace file for `" + << space_name << "`. " << TROUBLESHOOT_DATADICT_MSG; - ib_logf(IB_LOG_LEVEL_ERROR, - "Could not find a valid tablespace file for '%s'. " - "See " REFMAN "innodb-troubleshooting-datadict.html " - "for how to resolve the issue.", - tablename); - - err = DB_CORRUPTION; - - goto cleanup_and_exit; + return(DB_CORRUPTION); } /* Do not open any tablespaces if more than one tablespace with the correct space ID and flags were found. */ if (tablespaces_found > 1) { - ib_logf(IB_LOG_LEVEL_ERROR, - "A tablespace for %s has been found in " - "multiple places;", tablename); - if (def.success) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Default location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - def.filepath, def.lsn, - (ulong) def.id, (ulong) def.flags); - } - if (remote.success) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Remote location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - remote.filepath, remote.lsn, - (ulong) remote.id, (ulong) remote.flags); - } - if (dict.success) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Dictionary location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - dict.filepath, dict.lsn, - (ulong) dict.id, (ulong) dict.flags); + ib::error() << "A tablespace for `" << space_name + << "` has been found in multiple places;"; + + if (df_default.is_open()) { + ib::error() << "Default location: " + << df_default.filepath() + << ", Space ID=" << df_default.space_id() + << ", Flags=" << df_default.flags(); + } + if (df_remote.is_open()) { + ib::error() << "Remote location: " + << df_remote.filepath() + << ", Space ID=" << df_remote.space_id() + << ", Flags=" << df_remote.flags(); + } + if (df_dict.is_open()) { + ib::error() << "Dictionary location: " + << df_dict.filepath() + << ", Space ID=" << df_dict.space_id() + << ", Flags=" << df_dict.flags(); } /* Force-recovery will allow some tablespaces to be @@ -3967,41 +3911,41 @@ fil_open_single_table_tablespace( recovery and there is only one good tablespace, ignore any bad tablespaces. */ if (valid_tablespaces_found > 1 || srv_force_recovery > 0) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Will not open the tablespace for '%s'", - tablename); - - if (def.success != def.valid - || dict.success != dict.valid - || remote.success != remote.valid) { - err = DB_CORRUPTION; - } else { - err = DB_ERROR; + ib::error() << "Will not open tablespace `" + << space_name << "`"; + + /* If the file is not open it cannot be valid. */ + ut_ad(df_default.is_open() || !df_default.is_valid()); + ut_ad(df_dict.is_open() || !df_dict.is_valid()); + ut_ad(df_remote.is_open() || !df_remote.is_valid()); + + /* Having established that, this is an easy way to + look for corrupted data files. */ + if (df_default.is_open() != df_default.is_valid() + || df_dict.is_open() != df_dict.is_valid() + || df_remote.is_open() != df_remote.is_valid()) { + return(DB_CORRUPTION); } - goto cleanup_and_exit; + return(DB_ERROR); } /* There is only one valid tablespace found and we did not use srv_force_recovery during REDO. Use this one tablespace and clean up invalid tablespace pointers */ - if (def.success && !def.valid) { - def.success = false; - os_file_close(def.file); + if (df_default.is_open() && !df_default.is_valid()) { + df_default.close(); tablespaces_found--; } - if (dict.success && !dict.valid) { - dict.success = false; - os_file_close(dict.file); + if (df_dict.is_open() && !df_dict.is_valid()) { + df_dict.close(); /* Leave dict.filepath so that SYS_DATAFILES can be corrected below. */ tablespaces_found--; } - if (remote.success && !remote.valid) { - remote.success = false; - os_file_close(remote.file); - mem_free(remote.filepath); - remote.filepath = NULL; + if (df_remote.is_open() && !df_remote.is_valid()) { + df_remote.close(); tablespaces_found--; + link_file_is_bad = true; } } @@ -4015,95 +3959,89 @@ fil_open_single_table_tablespace( goto skip_validate; } - /* We may need to change what is stored in SYS_DATAFILES or - SYS_TABLESPACES or adjust the link file. - Since a failure to update SYS_TABLESPACES or SYS_DATAFILES does - not prevent opening and using the single_table_tablespace either - this time or the next, we do not check the return code or fail - to open the tablespace. But dict_update_filepath() will issue a - warning to the log. */ - if (dict.filepath) { - if (remote.success) { - dict_update_filepath(id, remote.filepath); - } else if (def.success) { - dict_update_filepath(id, def.filepath); + /* We may need to update what is stored in SYS_DATAFILES or + SYS_TABLESPACES or adjust the link file. Since a failure to + update SYS_TABLESPACES or SYS_DATAFILES does not prevent opening + and using the tablespace either this time or the next, we do not + check the return code or fail to open the tablespace. But if it + fails, dict_update_filepath() will issue a warning to the log. */ + if (df_dict.filepath()) { + ut_ad(path_in != NULL); + ut_ad(df_dict.same_filepath_as(path_in)); + + if (df_remote.is_open()) { + if (!df_remote.same_filepath_as(path_in)) { + dict_update_filepath(id, df_remote.filepath()); + } + + } else if (df_default.is_open()) { + ut_ad(!dict_filepath_same_as_default); + dict_update_filepath(id, df_default.filepath()); if (link_file_is_bad) { - fil_delete_link_file(tablename); + RemoteDatafile::delete_link_file(space_name); } - } else if (!link_file_found || link_file_is_bad) { - ut_ad(dict.success); + + } else if (!is_shared + && (!link_file_found || link_file_is_bad)) { + ut_ad(df_dict.is_open()); /* Fix the link file if we got our filepath from the dictionary but a link file did not exist or it did not point to a valid file. */ - fil_delete_link_file(tablename); - fil_create_link_file(tablename, dict.filepath); + RemoteDatafile::delete_link_file(space_name); + RemoteDatafile::create_link_file( + space_name, df_dict.filepath()); } - } else if (remote.success && dict_filepath_same_as_default) { - dict_update_filepath(id, remote.filepath); + } else if (df_remote.is_open()) { + if (dict_filepath_same_as_default) { + dict_update_filepath(id, df_remote.filepath()); + + } else if (path_in == NULL) { + /* SYS_DATAFILES record for this space ID + was not found. */ + dict_replace_tablespace_and_filepath( + id, space_name, df_remote.filepath(), flags); + } - } else if (remote.success && path_in == NULL) { - /* SYS_DATAFILES record for this space ID was not found. */ - dict_insert_tablespace_and_filepath( - id, tablename, remote.filepath, flags); + } else if (df_default.is_open()) { + /* We opened the tablespace in the default location. + SYS_DATAFILES.PATH needs to be updated if it is different + from this default path or if the SYS_DATAFILES.PATH was not + supplied and it should have been. Also update the dictionary + if we found an ISL file (since !df_remote.is_open). Since + path_in is not suppled for file-per-table, we must assume + that it matched the ISL. */ + if ((path_in != NULL && !dict_filepath_same_as_default) + || (path_in == NULL + && (DICT_TF_HAS_DATA_DIR(flags) + || DICT_TF_HAS_SHARED_SPACE(flags))) + || df_remote.filepath() != NULL) { + dict_replace_tablespace_and_filepath( + id, space_name, df_default.filepath(), flags); + } } skip_validate: - if (remote.success) - crypt_data = remote.crypt_data; - else if (dict.success) - crypt_data = dict.crypt_data; - else if (def.success) - crypt_data = def.crypt_data; + if (err == DB_SUCCESS) { + fil_space_t* space = fil_space_create( + space_name, id, flags, purpose, + df_remote.is_open() ? df_remote.get_crypt_info() : + df_dict.is_open() ? df_dict.get_crypt_info() : + df_default.get_crypt_info()); - if (err != DB_SUCCESS) { - ; // Don't load the tablespace into the cache - } else if (!fil_space_create(tablename, id, flags, FIL_TABLESPACE, - crypt_data)) { - err = DB_ERROR; - } else { /* We do not measure the size of the file, that is why we pass the 0 below */ - if (!fil_node_create(remote.success ? remote.filepath : - dict.success ? dict.filepath : - def.filepath, 0, id, FALSE)) { - err = DB_ERROR; - } - } + if (fil_node_create_low( + df_remote.is_open() ? df_remote.filepath() : + df_dict.is_open() ? df_dict.filepath() : + df_default.filepath(), 0, space, false, + true, atomic_write) == NULL) { -cleanup_and_exit: - if (remote.success) { - os_file_close(remote.file); - } - if (remote.filepath) { - mem_free(remote.filepath); - } - if (remote.crypt_data && remote.crypt_data != crypt_data) { - if (err == DB_SUCCESS) { - fil_space_destroy_crypt_data(&remote.crypt_data); - } - } - if (dict.success) { - os_file_close(dict.file); - } - if (dict.filepath) { - mem_free(dict.filepath); - } - if (dict.crypt_data && dict.crypt_data != crypt_data) { - fil_space_destroy_crypt_data(&dict.crypt_data); - } - if (def.success) { - os_file_close(def.file); - } - if (def.crypt_data && def.crypt_data != crypt_data) { - if (err == DB_SUCCESS) { - fil_space_destroy_crypt_data(&def.crypt_data); + err = DB_ERROR; } } - mem_free(def.filepath); - return(err); } #endif /* !UNIV_HOTBACKUP */ @@ -4111,527 +4049,399 @@ cleanup_and_exit: #ifdef UNIV_HOTBACKUP /*******************************************************************//** Allocates a file name for an old version of a single-table tablespace. -The string must be freed by caller with mem_free()! -@return own: file name */ +The string must be freed by caller with ut_free()! +@return own: file name */ static char* fil_make_ibbackup_old_name( /*=======================*/ const char* name) /*!< in: original file name */ { - static const char suffix[] = "_ibbackup_old_vers_"; - char* path; - ulint len = strlen(name); + static const char suffix[] = "_ibbackup_old_vers_"; + char* path; + ulint len = strlen(name); - path = static_cast(mem_alloc(len + (15 + sizeof suffix))); + path = static_cast(ut_malloc_nokey(len + 15 + sizeof(suffix))); memcpy(path, name, len); - memcpy(path + len, suffix, (sizeof suffix) - 1); + memcpy(path + len, suffix, sizeof(suffix) - 1); ut_sprintf_timestamp_without_extra_chars( - path + len + ((sizeof suffix) - 1)); + path + len + sizeof(suffix) - 1); return(path); } #endif /* UNIV_HOTBACKUP */ - -/*******************************************************************//** -Determine the space id of the given file descriptor by reading a few -pages from the beginning of the .ibd file. -@return true if space id was successfully identified, or false. */ -static +/** Looks for a pre-existing fil_space_t with the given tablespace ID +and, if found, returns the name and filepath in newly allocated buffers +that the caller must free. +@param[in] space_id The tablespace ID to search for. +@param[out] name Name of the tablespace found. +@param[out] filepath The filepath of the first datafile for the +tablespace. +@return true if tablespace is found, false if not. */ bool -fil_user_tablespace_find_space_id( -/*==============================*/ - fsp_open_info* fsp) /* in/out: contains file descriptor, which is - used as input. contains space_id, which is - the output */ +fil_space_read_name_and_filepath( + ulint space_id, + char** name, + char** filepath) { - bool st; - os_offset_t file_size; - - file_size = os_file_get_size(fsp->file); + bool success = false; + *name = NULL; + *filepath = NULL; - if (file_size == (os_offset_t) -1) { - ib_logf(IB_LOG_LEVEL_ERROR, "Could not get file size: %s", - fsp->filepath); - return(false); - } - - /* Assuming a page size, read the space_id from each page and store it - in a map. Find out which space_id is agreed on by majority of the - pages. Choose that space_id. */ - for (ulint page_size = UNIV_ZIP_SIZE_MIN; - page_size <= UNIV_PAGE_SIZE_MAX; page_size <<= 1) { + mutex_enter(&fil_system->mutex); - /* map[space_id] = count of pages */ - std::map verify; + fil_space_t* space = fil_space_get_by_id(space_id); - ulint page_count = 64; - ulint valid_pages = 0; + if (space != NULL) { + *name = mem_strdup(space->name); - /* Adjust the number of pages to analyze based on file size */ - while ((page_count * page_size) > file_size) { - --page_count; - } + fil_node_t* node = UT_LIST_GET_FIRST(space->chain); + *filepath = mem_strdup(node->name); - ib_logf(IB_LOG_LEVEL_INFO, "Page size:%lu Pages to analyze:" - "%lu", page_size, page_count); + success = true; + } - byte* buf = static_cast(ut_malloc(2*page_size)); - byte* page = static_cast(ut_align(buf, page_size)); + mutex_exit(&fil_system->mutex); - for (ulint j = 0; j < page_count; ++j) { + return(success); +} - st = os_file_read(fsp->file, page, (j* page_size), page_size); +/** Convert a file name to a tablespace name. +@param[in] filename directory/databasename/tablename.ibd +@return database/tablename string, to be freed with ut_free() */ +char* +fil_path_to_space_name( + const char* filename) +{ + /* Strip the file name prefix and suffix, leaving + only databasename/tablename. */ + ulint filename_len = strlen(filename); + const char* end = filename + filename_len; +#ifdef HAVE_MEMRCHR + const char* tablename = 1 + static_cast( + memrchr(filename, OS_PATH_SEPARATOR, + filename_len)); + const char* dbname = 1 + static_cast( + memrchr(filename, OS_PATH_SEPARATOR, + tablename - filename - 1)); +#else /* HAVE_MEMRCHR */ + const char* tablename = filename; + const char* dbname = NULL; + + while (const char* t = static_cast( + memchr(tablename, OS_PATH_SEPARATOR, + end - tablename))) { + dbname = tablename; + tablename = t + 1; + } +#endif /* HAVE_MEMRCHR */ + + ut_ad(dbname != NULL); + ut_ad(tablename > dbname); + ut_ad(tablename < end); + ut_ad(end - tablename > 4); + ut_ad(memcmp(end - 4, DOT_IBD, 4) == 0); + + char* name = mem_strdupl(dbname, end - dbname - 4); + + ut_ad(name[tablename - dbname - 1] == OS_PATH_SEPARATOR); +#if OS_PATH_SEPARATOR != '/' + /* space->name uses '/', not OS_PATH_SEPARATOR. */ + name[tablename - dbname - 1] = '/'; +#endif - if (!st) { - ib_logf(IB_LOG_LEVEL_INFO, - "READ FAIL: page_no:%lu", j); - continue; - } + return(name); +} - bool uncompressed_ok = false; +/** Discover the correct IBD file to open given a remote or missing +filepath from the REDO log. MEB and administrators can move a crashed +database to another location on the same machine and try to recover it. +Remote IBD files might be moved as well to the new location. + The problem with this is that the REDO log contains the old location +which may be still accessible. During recovery, if files are found in +both locations, we can chose on based on these priorities; +1. Default location +2. ISL location +3. REDO location +@param[in] space_id tablespace ID +@param[in] df Datafile object with path from redo +@return true if a valid datafile was found, false if not */ +bool +fil_ibd_discover( + ulint space_id, + Datafile& df) +{ + Datafile df_def_gen; /* default general datafile */ + Datafile df_def_per; /* default file-per-table datafile */ + RemoteDatafile df_rem_gen; /* remote general datafile*/ + RemoteDatafile df_rem_per; /* remote file-per-table datafile */ + + /* Look for the datafile in the default location. If it is + a general tablespace, it will be in the datadir. */ + const char* filename = df.filepath(); + const char* basename = base_name(filename); + df_def_gen.init(basename, 0); + df_def_gen.make_filepath(NULL, basename, IBD); + if (df_def_gen.open_read_only(false) == DB_SUCCESS + && df_def_gen.validate_for_recovery() == DB_SUCCESS + && df_def_gen.space_id() == space_id) { + df.set_filepath(df_def_gen.filepath()); + df.open_read_only(false); + return(true); + } + + /* If this datafile is file-per-table it will have a schema dir. */ + ulint sep_found = 0; + const char* db = basename; + for (; db > filename && sep_found < 2; db--) { + if (db[0] == OS_PATH_SEPARATOR) { + sep_found++; + } + } + if (sep_found == 2) { + db += 2; + df_def_per.init(db, 0); + df_def_per.make_filepath(NULL, db, IBD); + if (df_def_per.open_read_only(false) == DB_SUCCESS + && df_def_per.validate_for_recovery() == DB_SUCCESS + && df_def_per.space_id() == space_id) { + df.set_filepath(df_def_per.filepath()); + df.open_read_only(false); + return(true); + } + } + + /* Did not find a general or file-per-table datafile in the + default location. Look for a remote general tablespace. */ + df_rem_gen.set_name(basename); + if (df_rem_gen.open_link_file() == DB_SUCCESS) { + + /* An ISL file was found with contents. */ + if (df_rem_gen.open_read_only(false) != DB_SUCCESS + || df_rem_gen.validate_for_recovery() != DB_SUCCESS) { + + /* Assume that this ISL file is intended to be used. + Do not continue looking for another if this file + cannot be opened or is not a valid IBD file. */ + ib::error() << "ISL file '" + << df_rem_gen.link_filepath() + << "' was found but the linked file '" + << df_rem_gen.filepath() + << "' could not be opened or is not correct."; + return(false); + } - /* For uncompressed pages, the page size must be equal - to UNIV_PAGE_SIZE. */ - if (page_size == UNIV_PAGE_SIZE) { - uncompressed_ok = !buf_page_is_corrupted( - false, page, 0); + /* Use this file if it has the space_id from the MLOG + record. */ + if (df_rem_gen.space_id() == space_id) { + df.set_filepath(df_rem_gen.filepath()); + df.open_read_only(false); + return(true); + } + + /* Since old MLOG records can use the same basename in + multiple CREATE/DROP sequences, this ISL file could be + pointing to a later version of this basename.ibd file + which has a different space_id. Keep looking. */ + } + + /* Look for a remote file-per-table tablespace. */ + if (sep_found == 2) { + df_rem_per.set_name(db); + if (df_rem_per.open_link_file() == DB_SUCCESS) { + + /* An ISL file was found with contents. */ + if (df_rem_per.open_read_only(false) != DB_SUCCESS + || df_rem_per.validate_for_recovery() + != DB_SUCCESS) { + + /* Assume that this ISL file is intended to + be used. Do not continue looking for another + if this file cannot be opened or is not + a valid IBD file. */ + ib::error() << "ISL file '" + << df_rem_per.link_filepath() + << "' was found but the linked file '" + << df_rem_per.filepath() + << "' could not be opened or is" + " not correct."; + return(false); } - bool compressed_ok = false; - if (page_size <= UNIV_PAGE_SIZE_DEF) { - compressed_ok = !buf_page_is_corrupted( - false, page, page_size); + /* Use this file if it has the space_id from the + MLOG record. */ + if (df_rem_per.space_id() == space_id) { + df.set_filepath(df_rem_per.filepath()); + df.open_read_only(false); + return(true); } - if (uncompressed_ok || compressed_ok) { - - ulint space_id = mach_read_from_4(page - + FIL_PAGE_SPACE_ID); - - if (space_id > 0) { - ib_logf(IB_LOG_LEVEL_INFO, - "VALID: space:%lu " - "page_no:%lu page_size:%lu", - space_id, j, page_size); - verify[space_id]++; - ++valid_pages; - } - } + /* Since old MLOG records can use the same basename + in multiple CREATE/DROP TABLE sequences, this ISL + file could be pointing to a later version of this + basename.ibd file which has a different space_id. + Keep looking. */ } + } - ut_free(buf); - - ib_logf(IB_LOG_LEVEL_INFO, "Page size: %lu, Possible space_id " - "count:%lu", page_size, (ulint) verify.size()); - - const ulint pages_corrupted = 3; - for (ulint missed = 0; missed <= pages_corrupted; ++missed) { - - for (std::map::iterator - m = verify.begin(); m != verify.end(); ++m ) { - - ib_logf(IB_LOG_LEVEL_INFO, "space_id:%lu, " - "Number of pages matched: %lu/%lu " - "(%lu)", m->first, m->second, - valid_pages, page_size); - - if (m->second == (valid_pages - missed)) { - - ib_logf(IB_LOG_LEVEL_INFO, - "Chosen space:%lu\n", m->first); - - fsp->id = m->first; - return(true); - } - } - - } + /* No ISL files were found in the default location. Use the location + given in the redo log. */ + if (df.open_read_only(false) == DB_SUCCESS + && df.validate_for_recovery() == DB_SUCCESS + && df.space_id() == space_id) { + return(true); } + /* A datafile was not discovered for the filename given. */ return(false); } -/*******************************************************************//** -Finds the given page_no of the given space id from the double write buffer, -and copies it to the corresponding .ibd file. -@return true if copy was successful, or false. */ -bool -fil_user_tablespace_restore_page( -/*==============================*/ - fsp_open_info* fsp, /* in: contains space id and .ibd - file information */ - ulint page_no) /* in: page_no to obtain from double - write buffer */ +/** Open an ibd tablespace and add it to the InnoDB data structures. +This is similar to fil_ibd_open() except that it is used while processing +the REDO log, so the data dictionary is not available and very little +validation is done. The tablespace name is extracred from the +dbname/tablename.ibd portion of the filename, which assumes that the file +is a file-per-table tablespace. Any name will do for now. General +tablespace names will be read from the dictionary after it has been +recovered. The tablespace flags are read at this time from the first page +of the file in validate_for_recovery(). +@param[in] space_id tablespace ID +@param[in] filename path/to/databasename/tablename.ibd +@param[out] space the tablespace, or NULL on error +@return status of the operation */ +enum fil_load_status +fil_ibd_load( + ulint space_id, + const char* filename, + fil_space_t*& space) { - bool err; - ulint flags; - ulint zip_size; - ulint page_size; - ulint buflen; - byte* page; + /* If the a space is already in the file system cache with this + space ID, then there is nothing to do. */ + mutex_enter(&fil_system->mutex); + space = fil_space_get_by_id(space_id); + mutex_exit(&fil_system->mutex); - ib_logf(IB_LOG_LEVEL_INFO, "Restoring page %lu of tablespace %lu", - page_no, fsp->id); + if (space != NULL) { + /* Compare the filename we are trying to open with the + filename from the first node of the tablespace we opened + previously. Fail if it is different. */ + fil_node_t* node = UT_LIST_GET_FIRST(space->chain); - // find if double write buffer has page_no of given space id - page = recv_sys->dblwr.find_page(fsp->id, page_no); + if (0 != strcmp(innobase_basename(filename), + innobase_basename(node->name))) { + ib::info() << "Ignoring data file '" << filename + << "' with space ID " << space->id + << ". Another data file called " << node->name + << " exists with the same space ID."; - if (!page) { - ib_logf(IB_LOG_LEVEL_WARN, "Doublewrite does not have " - "page_no=%lu of space: %lu", page_no, fsp->id); - err = false; - goto out; + space = NULL; + return(FIL_LOAD_ID_CHANGED); + } + return(FIL_LOAD_OK); } - flags = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + page); - zip_size = fsp_flags_get_zip_size(flags); - page_size = fsp_flags_get_page_size(flags); + /* If the filepath in the redo log is a default location in or + under the datadir, then just try to open it there. */ + Datafile file; + file.set_filepath(filename); - ut_ad(page_no == page_get_page_no(page)); + Folder folder(filename, dirname_length(filename)); + if (folder_mysql_datadir >= folder) { + file.open_read_only(false); + } - buflen = zip_size ? zip_size: page_size; - - ib_logf(IB_LOG_LEVEL_INFO, "Writing %lu bytes into file: %s", - buflen, fsp->filepath); - - err = os_file_write(fsp->filepath, fsp->file, page, - (zip_size ? zip_size : page_size) * page_no, - buflen); - - os_file_flush(fsp->file); -out: - return(err); -} - -/********************************************************************//** -Opens an .ibd file and adds the associated single-table tablespace to the -InnoDB fil0fil.cc data structures. -Set fsp->success to TRUE if tablespace is valid, FALSE if not. */ -static -void -fil_validate_single_table_tablespace( -/*=================================*/ - const char* tablename, /*!< in: database/tablename */ - fsp_open_info* fsp) /*!< in/out: tablespace info */ -{ - bool restore_attempted = false; - -check_first_page: - fsp->success = TRUE; - fsp->encryption_error = 0; - if (const char* check_msg = fil_read_first_page( - fsp->file, FALSE, &fsp->flags, &fsp->id, -#ifdef UNIV_LOG_ARCHIVE - &fsp->arch_log_no, &fsp->arch_log_no, -#endif /* UNIV_LOG_ARCHIVE */ - &fsp->lsn, &fsp->lsn, &fsp->crypt_data)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "%s in tablespace %s (table %s)", - check_msg, fsp->filepath, tablename); - fsp->success = FALSE; - } - - if (!fsp->success) { - if (!restore_attempted) { - if (!fil_user_tablespace_find_space_id(fsp)) { - return; - } - restore_attempted = true; - - if (fsp->id > 0 - && !fil_user_tablespace_restore_page(fsp, 0)) { - return; - } - goto check_first_page; + if (!file.is_open()) { + /* The file has been moved or it is a remote datafile. */ + if (!fil_ibd_discover(space_id, file) + || !file.is_open()) { + return(FIL_LOAD_NOT_FOUND); } - return; - } - - if (fsp->id == ULINT_UNDEFINED || fsp->id == 0) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Tablespace is not sensible;" - " Table: %s Space ID: %lu Filepath: %s\n", - tablename, (ulong) fsp->id, fsp->filepath); - fsp->success = FALSE; - return; } - mutex_enter(&fil_system->mutex); - fil_space_t* space = fil_space_get_by_id(fsp->id); - mutex_exit(&fil_system->mutex); - if (space != NULL) { - char* prev_filepath = fil_space_get_first_path(fsp->id); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Attempted to open a previously opened tablespace. " - "Previous tablespace %s uses space ID: %lu at " - "filepath: %s. Cannot open tablespace %s which uses " - "space ID: %lu at filepath: %s", - space->name, (ulong) space->id, prev_filepath, - tablename, (ulong) fsp->id, fsp->filepath); - - mem_free(prev_filepath); - fsp->success = FALSE; - return; - } - - fsp->success = TRUE; -} - - -/********************************************************************//** -Opens an .ibd file and adds the associated single-table tablespace to the -InnoDB fil0fil.cc data structures. */ -static -void -fil_load_single_table_tablespace( -/*=============================*/ - const char* dbname, /*!< in: database name */ - const char* filename) /*!< in: file name (not a path), - including the .ibd or .isl extension */ -{ - char* tablename; - ulint tablename_len; - ulint dbname_len = strlen(dbname); - ulint filename_len = strlen(filename); - fsp_open_info def; - fsp_open_info remote; os_offset_t size; - fil_space_t* space; - - memset(&def, 0, sizeof(def)); - memset(&remote, 0, sizeof(remote)); - - /* The caller assured that the extension is ".ibd" or ".isl". */ - ut_ad(0 == memcmp(filename + filename_len - 4, ".ibd", 4) - || 0 == memcmp(filename + filename_len - 4, ".isl", 4)); - /* Build up the tablename in the standard form database/table. */ - tablename = static_cast( - mem_alloc(dbname_len + filename_len + 2)); - - /* When lower_case_table_names = 2 it is possible that the - dbname is in upper case ,but while storing it in fil_space_t - we must convert it into lower case */ - sprintf(tablename, "%s" , dbname); - tablename[dbname_len] = '\0'; - - if (lower_case_file_system) { - dict_casedn_str(tablename); - } - - sprintf(tablename+dbname_len,"/%s",filename); - tablename_len = strlen(tablename) - strlen(".ibd"); - tablename[tablename_len] = '\0'; - - /* There may be both .ibd and .isl file in the directory. - And it is possible that the .isl file refers to a different - .ibd file. If so, we open and compare them the first time - one of them is sent to this function. So if this table has - already been loaded, there is nothing to do.*/ - mutex_enter(&fil_system->mutex); - space = fil_space_get_by_name(tablename); - if (space) { - mem_free(tablename); - mutex_exit(&fil_system->mutex); - return; - } - mutex_exit(&fil_system->mutex); - - /* Build up the filepath of the .ibd tablespace in the datadir. - This must be freed independent of def.success. */ - def.filepath = fil_make_ibd_name(tablename, false); - -#ifdef __WIN__ -# ifndef UNIV_HOTBACKUP - /* If lower_case_table_names is 0 or 2, then MySQL allows database - directory names with upper case letters. On Windows, all table and - database names in InnoDB are internally always in lower case. Put the - file path to lower case, so that we are consistent with InnoDB's - internal data dictionary. */ - - dict_casedn_str(def.filepath); -# endif /* !UNIV_HOTBACKUP */ -#endif - - /* Check for a link file which locates a remote tablespace. */ - remote.success = fil_open_linked_file( - tablename, &remote.filepath, &remote.file, FALSE); - - /* Read the first page of the remote tablespace */ - if (remote.success) { - fil_validate_single_table_tablespace(tablename, &remote); - if (!remote.success) { - os_file_close(remote.file); - mem_free(remote.filepath); + /* Read and validate the first page of the tablespace. + Assign a tablespace name based on the tablespace type. */ + switch (file.validate_for_recovery()) { + os_offset_t minimum_size; + case DB_SUCCESS: + if (file.space_id() != space_id) { + ib::info() << "Ignoring data file '" + << file.filepath() + << "' with space ID " << file.space_id() + << ", since the redo log references " + << file.filepath() << " with space ID " + << space_id << "."; + return(FIL_LOAD_ID_CHANGED); } - } + /* Get and test the file size. */ + size = os_file_get_size(file.handle()); - /* Try to open the tablespace in the datadir. */ - def.file = os_file_create_simple_no_error_handling( - innodb_file_data_key, def.filepath, OS_FILE_OPEN, - OS_FILE_READ_WRITE, &def.success, FALSE); - - /* Read the first page of the remote tablespace */ - if (def.success) { - fil_validate_single_table_tablespace(tablename, &def); - if (!def.success) { - os_file_close(def.file); - } - } + /* Every .ibd file is created >= 4 pages in size. + Smaller files cannot be OK. */ + minimum_size = FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE; - if (!def.success && !remote.success) { + if (size == static_cast(-1)) { + /* The following call prints an error message */ + os_file_get_last_error(true); - if (def.encryption_error || remote.encryption_error) { - fprintf(stderr, - "InnoDB: Error: could not open single-table" - " tablespace file %s. Encryption error!\n", def.filepath); - return; - } + ib::error() << "Could not measure the size of" + " single-table tablespace file '" + << file.filepath() << "'"; - /* The following call prints an error message */ - os_file_get_last_error(true); - fprintf(stderr, - "InnoDB: Error: could not open single-table" - " tablespace file %s\n", def.filepath); - - if (!strncmp(filename, - tmp_file_prefix, tmp_file_prefix_length)) { - /* Ignore errors for #sql tablespaces. */ - mem_free(tablename); - if (remote.filepath) { - mem_free(remote.filepath); - } - if (def.filepath) { - mem_free(def.filepath); - } - return; - } -no_good_file: - fprintf(stderr, - "InnoDB: We do not continue the crash recovery," - " because the table may become\n" - "InnoDB: corrupt if we cannot apply the log" - " records in the InnoDB log to it.\n" - "InnoDB: To fix the problem and start mysqld:\n" - "InnoDB: 1) If there is a permission problem" - " in the file and mysqld cannot\n" - "InnoDB: open the file, you should" - " modify the permissions.\n" - "InnoDB: 2) If the table is not needed, or you" - " can restore it from a backup,\n" - "InnoDB: then you can remove the .ibd file," - " and InnoDB will do a normal\n" - "InnoDB: crash recovery and ignore that table.\n" - "InnoDB: 3) If the file system or the" - " disk is broken, and you cannot remove\n" - "InnoDB: the .ibd file, you can set" - " innodb_force_recovery > 0 in my.cnf\n" - "InnoDB: and force InnoDB to continue crash" - " recovery here.\n"); -will_not_choose: - mem_free(tablename); - if (remote.filepath) { - mem_free(remote.filepath); - } - if (def.filepath) { - mem_free(def.filepath); - } - - if (srv_force_recovery > 0) { - ib_logf(IB_LOG_LEVEL_INFO, - "innodb_force_recovery was set to %lu. " - "Continuing crash recovery even though we " - "cannot access the .ibd file of this table.", - srv_force_recovery); - return; + } else if (size < minimum_size) { +#ifndef UNIV_HOTBACKUP + ib::error() << "The size of tablespace file '" + << file.filepath() << "' is only " << size + << ", should be at least " << minimum_size + << "!"; +#else + /* In MEB, we work around this error. */ + file.set_space_id(ULINT_UNDEFINED); + file.set_flags(0); + break; +#endif /* !UNIV_HOTBACKUP */ + } else { + /* Everything is fine so far. */ + break; } - exit(1); - } - - if (def.success && remote.success) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Tablespaces for %s have been found in two places;\n" - "Location 1: SpaceID: %lu LSN: %lu File: %s\n" - "Location 2: SpaceID: %lu LSN: %lu File: %s\n" - "You must delete one of them.", - tablename, (ulong) def.id, (ulong) def.lsn, - def.filepath, (ulong) remote.id, (ulong) remote.lsn, - remote.filepath); - - def.success = FALSE; - os_file_close(def.file); - os_file_close(remote.file); - goto will_not_choose; - } - - /* At this point, only one tablespace is open */ - ut_a(def.success == !remote.success); - - fsp_open_info* fsp = def.success ? &def : &remote; - - /* Get and test the file size. */ - size = os_file_get_size(fsp->file); + /* Fall through to error handling */ - if (size == (os_offset_t) -1) { - /* The following call prints an error message */ - os_file_get_last_error(true); + case DB_TABLESPACE_EXISTS: + return(FIL_LOAD_INVALID); - ib_logf(IB_LOG_LEVEL_ERROR, - "could not measure the size of single-table " - "tablespace file %s", fsp->filepath); - - os_file_close(fsp->file); - goto no_good_file; + default: + return(FIL_LOAD_NOT_FOUND); } - /* Every .ibd file is created >= 4 pages in size. Smaller files - cannot be ok. */ - ulong minimum_size = FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE; - if (size < minimum_size) { -#ifndef UNIV_HOTBACKUP - ib_logf(IB_LOG_LEVEL_ERROR, - "The size of single-table tablespace file %s " - "is only " UINT64PF ", should be at least %lu!", - fsp->filepath, size, minimum_size); - os_file_close(fsp->file); - goto no_good_file; -#else - fsp->id = ULINT_UNDEFINED; - fsp->flags = 0; -#endif /* !UNIV_HOTBACKUP */ - } + ut_ad(space == NULL); #ifdef UNIV_HOTBACKUP - if (fsp->id == ULINT_UNDEFINED || fsp->id == 0) { + if (file.space_id() == ULINT_UNDEFINED || file.space_id() == 0) { char* new_path; - fprintf(stderr, - "InnoDB: Renaming tablespace %s of id %lu,\n" - "InnoDB: to %s_ibbackup_old_vers_\n" - "InnoDB: because its size %" PRId64 " is too small" - " (< 4 pages 16 kB each),\n" - "InnoDB: or the space id in the file header" - " is not sensible.\n" - "InnoDB: This can happen in an mysqlbackup run," - " and is not dangerous.\n", - fsp->filepath, fsp->id, fsp->filepath, size); - os_file_close(fsp->file); - - new_path = fil_make_ibbackup_old_name(fsp->filepath); + ib::info() << "Renaming tablespace file '" << file.filepath() + << "' with space ID " << file.space_id() << " to " + << file.name() << "_ibbackup_old_vers_" + " because its size " << size() << " is too small" + " (< 4 pages 16 kB each), or the space id in the" + " file header is not sensible. This can happen in" + " an mysqlbackup run, and is not dangerous."; + file.close(); + + new_path = fil_make_ibbackup_old_name(file.filepath()); bool success = os_file_rename( - innodb_file_data_key, fsp->filepath, new_path); + innodb_data_file_key, file.filepath(), new_path); ut_a(success); - mem_free(new_path); + ut_free(new_path); - goto func_exit_after_close; + return(FIL_LOAD_ID_CHANGED); } /* A backup may contain the same space several times, if the space got @@ -4642,78 +4452,54 @@ will_not_choose: destroy valuable data. */ mutex_enter(&fil_system->mutex); + space = fil_space_get_by_id(space_id); + mutex_exit(&fil_system->mutex); - space = fil_space_get_by_id(fsp->id); - - if (space) { - char* new_path; - - fprintf(stderr, - "InnoDB: Renaming tablespace %s of id %lu,\n" - "InnoDB: to %s_ibbackup_old_vers_\n" - "InnoDB: because space %s with the same id\n" - "InnoDB: was scanned earlier. This can happen" - " if you have renamed tables\n" - "InnoDB: during an mysqlbackup run.\n", - fsp->filepath, fsp->id, fsp->filepath, - space->name); - os_file_close(fsp->file); - - new_path = fil_make_ibbackup_old_name(fsp->filepath); + if (space != NULL) { + ib::info() << "Renaming data file '" << file.filepath() + << "' with space ID " << space_id << " to " + << file.name() + << "_ibbackup_old_vers_ because space " + << space->name << " with the same id was scanned" + " earlier. This can happen if you have renamed tables" + " during an mysqlbackup run."; + file.close(); - mutex_exit(&fil_system->mutex); + char* new_path = fil_make_ibbackup_old_name(file.filepath()); bool success = os_file_rename( - innodb_file_data_key, fsp->filepath, new_path); + innodb_data_file_key, file.filepath(), new_path); ut_a(success); - mem_free(new_path); - - goto func_exit_after_close; + ut_free(new_path); + return(FIL_LOAD_OK); } - mutex_exit(&fil_system->mutex); #endif /* UNIV_HOTBACKUP */ - ibool file_space_create_success = fil_space_create( - tablename, fsp->id, fsp->flags, FIL_TABLESPACE, - fsp->crypt_data); - if (!file_space_create_success) { - if (srv_force_recovery > 0) { - fprintf(stderr, - "InnoDB: innodb_force_recovery was set" - " to %lu. Continuing crash recovery\n" - "InnoDB: even though the tablespace" - " creation of this table failed.\n", - srv_force_recovery); - goto func_exit; - } + bool is_temp = FSP_FLAGS_GET_TEMPORARY(file.flags()); + space = fil_space_create( + file.name(), space_id, file.flags(), + is_temp ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, + file.get_crypt_info()); - /* Exit here with a core dump, stack, etc. */ - ut_a(file_space_create_success); + if (space == NULL) { + return(FIL_LOAD_INVALID); } + ut_ad(space->id == file.space_id()); + ut_ad(space->id == space_id); + /* We do not use the size information we have about the file, because the rounding formula for extents and pages is somewhat complex; we let fil_node_open() do that task. */ - if (!fil_node_create(fsp->filepath, 0, fsp->id, FALSE)) { + if (!fil_node_create_low(file.filepath(), 0, space, + false, true, false)) { ut_error; } -func_exit: - os_file_close(fsp->file); - -#ifdef UNIV_HOTBACKUP -func_exit_after_close: -#else - ut_ad(!mutex_own(&fil_system->mutex)); -#endif - mem_free(tablename); - if (remote.success) { - mem_free(remote.filepath); - } - mem_free(def.filepath); + return(FIL_LOAD_OK); } /***********************************************************************//** @@ -4722,7 +4508,6 @@ directory. We retry 100 times if os_file_readdir_next_file() returns -1. The idea is to read as much good data as we can and jump over bad data. @return 0 if ok, -1 if error even after the retries, 1 if at the end of the directory */ -static int fil_file_readdir_next_file( /*=======================*/ @@ -4741,10 +4526,10 @@ fil_file_readdir_next_file( return(ret); } - ib_logf(IB_LOG_LEVEL_ERROR, - "os_file_readdir_next_file() returned -1 in " - "directory %s, crash recovery may have failed " - "for some .ibd files!", dirname); + ib::error() << "os_file_readdir_next_file() returned -1 in" + " directory " << dirname + << ", crash recovery may have failed" + " for some .ibd files!"; *err = DB_ERROR; } @@ -4752,196 +4537,6 @@ fil_file_readdir_next_file( return(-1); } -/********************************************************************//** -At the server startup, if we need crash recovery, scans the database -directories under the MySQL datadir, looking for .ibd files. Those files are -single-table tablespaces. We need to know the space id in each of them so that -we know into which file we should look to check the contents of a page stored -in the doublewrite buffer, also to know where to apply log records where the -space id is != 0. -@return DB_SUCCESS or error number */ -UNIV_INTERN -dberr_t -fil_load_single_table_tablespaces(void) -/*===================================*/ -{ - int ret; - char* dbpath = NULL; - ulint dbpath_len = 100; - os_file_dir_t dir; - os_file_dir_t dbdir; - os_file_stat_t dbinfo; - os_file_stat_t fileinfo; - dberr_t err = DB_SUCCESS; - - /* The datadir of MySQL is always the default directory of mysqld */ - - dir = os_file_opendir(fil_path_to_mysql_datadir, TRUE); - - if (dir == NULL) { - - return(DB_ERROR); - } - - dbpath = static_cast(mem_alloc(dbpath_len)); - - /* Scan all directories under the datadir. They are the database - directories of MySQL. */ - - ret = fil_file_readdir_next_file(&err, fil_path_to_mysql_datadir, dir, - &dbinfo); - while (ret == 0) { - ulint len; - /* printf("Looking at %s in datadir\n", dbinfo.name); */ - - if (dbinfo.type == OS_FILE_TYPE_FILE - || dbinfo.type == OS_FILE_TYPE_UNKNOWN) { - - goto next_datadir_item; - } - - /* We found a symlink or a directory; try opening it to see - if a symlink is a directory */ - - len = strlen(fil_path_to_mysql_datadir) - + strlen (dbinfo.name) + 2; - if (len > dbpath_len) { - dbpath_len = len; - - if (dbpath) { - mem_free(dbpath); - } - - dbpath = static_cast(mem_alloc(dbpath_len)); - } - ut_snprintf(dbpath, dbpath_len, - "%s/%s", fil_path_to_mysql_datadir, dbinfo.name); - srv_normalize_path_for_win(dbpath); - - dbdir = os_file_opendir(dbpath, FALSE); - - if (dbdir != NULL) { - - /* We found a database directory; loop through it, - looking for possible .ibd files in it */ - - ret = fil_file_readdir_next_file(&err, dbpath, dbdir, - &fileinfo); - while (ret == 0) { - - if (fileinfo.type == OS_FILE_TYPE_DIR) { - - goto next_file_item; - } - - /* We found a symlink or a file */ - if (strlen(fileinfo.name) > 4 - && (0 == strcmp(fileinfo.name - + strlen(fileinfo.name) - 4, - ".ibd") - || 0 == strcmp(fileinfo.name - + strlen(fileinfo.name) - 4, - ".isl"))) { - /* The name ends in .ibd or .isl; - try opening the file */ - fil_load_single_table_tablespace( - dbinfo.name, fileinfo.name); - } -next_file_item: - ret = fil_file_readdir_next_file(&err, - dbpath, dbdir, - &fileinfo); - } - - if (0 != os_file_closedir(dbdir)) { - fputs("InnoDB: Warning: could not" - " close database directory ", stderr); - ut_print_filename(stderr, dbpath); - putc('\n', stderr); - - err = DB_ERROR; - } - } - -next_datadir_item: - ret = fil_file_readdir_next_file(&err, - fil_path_to_mysql_datadir, - dir, &dbinfo); - } - - mem_free(dbpath); - - if (0 != os_file_closedir(dir)) { - fprintf(stderr, - "InnoDB: Error: could not close MySQL datadir\n"); - - return(DB_ERROR); - } - - return(err); -} - -/*******************************************************************//** -Returns TRUE if a single-table tablespace does not exist in the memory cache, -or is being deleted there. -@return TRUE if does not exist or is being deleted */ -UNIV_INTERN -ibool -fil_tablespace_deleted_or_being_deleted_in_mem( -/*===========================================*/ - ulint id, /*!< in: space id */ - ib_int64_t version)/*!< in: tablespace_version should be this; if - you pass -1 as the value of this, then this - parameter is ignored */ -{ - fil_space_t* space; - - ut_ad(fil_system); - - mutex_enter(&fil_system->mutex); - - space = fil_space_get_by_id(id); - - if (space == NULL || space->stop_new_ops) { - mutex_exit(&fil_system->mutex); - - return(TRUE); - } - - if (version != ((ib_int64_t)-1) - && space->tablespace_version != version) { - mutex_exit(&fil_system->mutex); - - return(TRUE); - } - - mutex_exit(&fil_system->mutex); - - return(FALSE); -} - -/*******************************************************************//** -Returns TRUE if a single-table tablespace exists in the memory cache. -@return TRUE if exists */ -UNIV_INTERN -ibool -fil_tablespace_exists_in_mem( -/*=========================*/ - ulint id) /*!< in: space id */ -{ - fil_space_t* space; - - ut_ad(fil_system); - - mutex_enter(&fil_system->mutex); - - space = fil_space_get_by_id(id); - - mutex_exit(&fil_system->mutex); - - return(space != NULL); -} - /*******************************************************************//** Report that a tablespace for a table was not found. */ static @@ -4951,51 +4546,39 @@ fil_report_missing_tablespace( const char* name, /*!< in: table name */ ulint space_id) /*!< in: table's space id */ { - char index_name[MAX_FULL_NAME_LEN + 1]; - - innobase_format_name(index_name, sizeof(index_name), name, TRUE); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Table %s in the InnoDB data dictionary has tablespace id %lu, " - "but tablespace with that id or name does not exist. Have " - "you deleted or moved .ibd files? This may also be a table " - "created with CREATE TEMPORARY TABLE whose .ibd and .frm " - "files MySQL automatically removed, but the table still " - "exists in the InnoDB internal data dictionary.", - name, space_id); + ib::error() << "Table " << name + << " in the InnoDB data dictionary has tablespace id " + << space_id << "," + " but tablespace with that id or name does not exist. Have" + " you deleted or moved .ibd files? This may also be a table" + " created with CREATE TEMPORARY TABLE whose .ibd and .frm" + " files MySQL automatically removed, but the table still" + " exists in the InnoDB internal data dictionary."; } -/*******************************************************************//** -Returns TRUE if a matching tablespace exists in the InnoDB tablespace memory -cache. Note that if we have not done a crash recovery at the database startup, -there may be many tablespaces which are not yet in the memory cache. -@return TRUE if a matching tablespace exists in the memory cache */ -UNIV_INTERN -ibool +/** Returns true if a matching tablespace exists in the InnoDB tablespace +memory cache. Note that if we have not done a crash recovery at the database +startup, there may be many tablespaces which are not yet in the memory cache. +@param[in] id Tablespace ID +@param[in] name Tablespace name used in fil_space_create(). +@param[in] print_error_if_does_not_exist + Print detailed error information to the +error log if a matching tablespace is not found from memory. +@param[in] adjust_space Whether to adjust space id on mismatch +@param[in] heap Heap memory +@param[in] table_id table id +@return true if a matching tablespace exists in the memory cache */ +bool fil_space_for_table_exists_in_mem( -/*==============================*/ - ulint id, /*!< in: space id */ - const char* name, /*!< in: table name used in - fil_space_create(). Either the - standard 'dbname/tablename' format - or table->dir_path_of_temp_table */ - ibool mark_space, /*!< in: in crash recovery, at database - startup we mark all spaces which have - an associated table in the InnoDB - data dictionary, so that - we can print a warning about orphaned - tablespaces */ - ibool print_error_if_does_not_exist, - /*!< in: print detailed error - information to the .err log if a - matching tablespace is not found from - memory */ - bool adjust_space, /*!< in: whether to adjust space id - when find table space mismatch */ - mem_heap_t* heap, /*!< in: heap memory */ - table_id_t table_id) /*!< in: table id */ -{ - fil_space_t* fnamespace; + ulint id, + const char* name, + bool print_error_if_does_not_exist, + bool adjust_space, + mem_heap_t* heap, + table_id_t table_id, + dict_table_t* table) +{ + fil_space_t* fnamespace = NULL; fil_space_t* space; ut_ad(fil_system); @@ -5006,27 +4589,60 @@ fil_space_for_table_exists_in_mem( space = fil_space_get_by_id(id); - /* Look if there is a space with the same name; the name is the - directory path from the datadir to the file */ + /* If tablespace contains encryption information + copy it also to table. */ + if (space && space->crypt_data && + table && !table->crypt_data) { + table->crypt_data = space->crypt_data; + } + + if (space != NULL + && FSP_FLAGS_GET_SHARED(space->flags) + && adjust_space + && srv_sys_tablespaces_open + && 0 == strncmp(space->name, general_space_name, + strlen(general_space_name))) { + /* This name was assigned during recovery in fil_ibd_load(). + This general tablespace was opened from an MLOG_FILE_NAME log + entry where the tablespace name does not exist. Replace the + temporary name with this name and return this space. */ + HASH_DELETE(fil_space_t, name_hash, fil_system->name_hash, + ut_fold_string(space->name), space); + ut_free(space->name); + space->name = mem_strdup(name); + HASH_INSERT(fil_space_t, name_hash, fil_system->name_hash, + ut_fold_string(space->name), space); + + mutex_exit(&fil_system->mutex); + + return(true); + } - fnamespace = fil_space_get_by_name(name); - if (space && space == fnamespace) { - /* Found */ + if (space != NULL) { + if (FSP_FLAGS_GET_SHARED(space->flags) + && !srv_sys_tablespaces_open) { - if (mark_space) { - space->mark = TRUE; + /* No need to check the name */ + mutex_exit(&fil_system->mutex); + return(true); } - mutex_exit(&fil_system->mutex); + /* If this space has the expected name, use it. */ + fnamespace = fil_space_get_by_name(name); + if (space == fnamespace) { + /* Found */ - return(TRUE); + mutex_exit(&fil_system->mutex); + + return(true); + } } /* Info from "fnamespace" comes from the ibd file itself, it can - be different from data obtained from System tables since it is - not transactional. If adjust_space is set, and the mismatching - space are between a user table and its temp table, we shall - adjust the ibd file name according to system table info */ + be different from data obtained from System tables since file + operations are not transactional. If adjust_space is set, and the + mismatching space are between a user table and its temp table, we + shall adjust the ibd file name according to system table info */ if (adjust_space && space != NULL && row_is_mysql_tmp_table_name(space->name) @@ -5038,19 +4654,23 @@ fil_space_for_table_exists_in_mem( DBUG_SUICIDE();); if (fnamespace) { - char* tmp_name; + const char* tmp_name; tmp_name = dict_mem_create_temporary_tablename( heap, name, table_id); - fil_rename_tablespace(fnamespace->name, fnamespace->id, - tmp_name, NULL); + fil_rename_tablespace( + fnamespace->id, + UT_LIST_GET_FIRST(fnamespace->chain)->name, + tmp_name, NULL); } DBUG_EXECUTE_IF("ib_crash_after_adjust_one_fil_space", DBUG_SUICIDE();); - fil_rename_tablespace(space->name, id, name, NULL); + fil_rename_tablespace( + id, UT_LIST_GET_FIRST(space->chain)->name, + name, NULL); DBUG_EXECUTE_IF("ib_crash_after_adjust_fil_space", DBUG_SUICIDE();); @@ -5060,14 +4680,14 @@ fil_space_for_table_exists_in_mem( ut_ad(space == fnamespace); mutex_exit(&fil_system->mutex); - return(TRUE); + return(true); } if (!print_error_if_does_not_exist) { mutex_exit(&fil_system->mutex); - return(FALSE); + return(false); } if (space == NULL) { @@ -5076,49 +4696,33 @@ fil_space_for_table_exists_in_mem( fil_report_missing_tablespace(name, id); } } else { - ut_print_timestamp(stderr); - fputs(" InnoDB: Error: table ", stderr); - ut_print_filename(stderr, name); - fprintf(stderr, "\n" - "InnoDB: in InnoDB data dictionary has" - " tablespace id %lu,\n" - "InnoDB: but a tablespace with that id" - " does not exist. There is\n" - "InnoDB: a tablespace of name %s and id %lu," - " though. Have\n" - "InnoDB: you deleted or moved .ibd files?\n", - (ulong) id, fnamespace->name, - (ulong) fnamespace->id); + ib::error() << "Table " << name << " in InnoDB data" + " dictionary has tablespace id " << id + << ", but a tablespace with that id does not" + " exist. There is a tablespace of name " + << fnamespace->name << " and id " + << fnamespace->id << ", though. Have you" + " deleted or moved .ibd files?"; } error_exit: - fputs("InnoDB: Please refer to\n" - "InnoDB: " REFMAN "innodb-troubleshooting-datadict.html\n" - "InnoDB: for how to resolve the issue.\n", stderr); + ib::warn() << TROUBLESHOOT_DATADICT_MSG; mutex_exit(&fil_system->mutex); - return(FALSE); + return(false); } if (0 != strcmp(space->name, name)) { - ut_print_timestamp(stderr); - fputs(" InnoDB: Error: table ", stderr); - ut_print_filename(stderr, name); - fprintf(stderr, "\n" - "InnoDB: in InnoDB data dictionary has" - " tablespace id %lu,\n" - "InnoDB: but the tablespace with that id" - " has name %s.\n" - "InnoDB: Have you deleted or moved .ibd files?\n", - (ulong) id, space->name); + + ib::error() << "Table " << name << " in InnoDB data dictionary" + " has tablespace id " << id << ", but the tablespace" + " with that id has name " << space->name << "." + " Have you deleted or moved .ibd files?"; if (fnamespace != NULL) { - fputs("InnoDB: There is a tablespace" - " with the right name\n" - "InnoDB: ", stderr); - ut_print_filename(stderr, fnamespace->name); - fprintf(stderr, ", but its id is %lu.\n", - (ulong) fnamespace->id); + ib::error() << "There is a tablespace with the right" + " name: " << fnamespace->name << ", but its id" + " is " << fnamespace->id << "."; } goto error_exit; @@ -5126,107 +4730,129 @@ error_exit: mutex_exit(&fil_system->mutex); - return(FALSE); + return(false); } -/*******************************************************************//** -Checks if a single-table tablespace for a given table name exists in the -tablespace memory cache. -@return space id, ULINT_UNDEFINED if not found */ -UNIV_INTERN +/** Return the space ID based on the tablespace name. +The tablespace must be found in the tablespace memory cache. +This call is made from external to this module, so the mutex is not owned. +@param[in] tablespace Tablespace name +@return space ID if tablespace found, ULINT_UNDEFINED if space not. */ ulint -fil_get_space_id_for_table( -/*=======================*/ - const char* tablename) /*!< in: table name in the standard - 'databasename/tablename' format */ +fil_space_get_id_by_name( + const char* tablespace) { - fil_space_t* fnamespace; - ulint id = ULINT_UNDEFINED; - - ut_ad(fil_system); - mutex_enter(&fil_system->mutex); - /* Look if there is a space with the same name. */ - - fnamespace = fil_space_get_by_name(tablename); - - if (fnamespace) { - id = fnamespace->id; - } + /* Search for a space with the same name. */ + fil_space_t* space = fil_space_get_by_name(tablespace); + ulint id = (space == NULL) ? ULINT_UNDEFINED : space->id; mutex_exit(&fil_system->mutex); return(id); } -/**********************************************************************//** -Tries to extend a data file so that it would accommodate the number of pages -given. The tablespace must be cached in the memory cache. If the space is big -enough already, does nothing. -@return TRUE if success */ -UNIV_INTERN -ibool -fil_extend_space_to_desired_size( -/*=============================*/ - ulint* actual_size, /*!< out: size of the space after extension; - if we ran out of disk space this may be lower - than the desired size */ - ulint space_id, /*!< in: space id */ - ulint size_after_extend)/*!< in: desired size in pages after the - extension; if the current space size is bigger - than this already, the function does nothing */ +/** +Fill the pages with NULs +@param[in] node File node +@param[in] page_size physical page size +@param[in] start Offset from the start of the file in bytes +@param[in] len Length in bytes +@param[in] read_only_mode + if true, then read only mode checks are enforced. +@return DB_SUCCESS or error code */ +static +dberr_t +fil_write_zeros( + const fil_node_t* node, + ulint page_size, + os_offset_t start, + ulint len, + bool read_only_mode) { - fil_node_t* node; - fil_space_t* space; - byte* buf2; - byte* buf; - ulint buf_size; - ulint start_page_no; - ulint file_start_page_no; - ulint page_size; - ulint pages_added; - ibool success; + ut_a(len > 0); - ut_ad(!srv_read_only_mode); + /* Extend at most 1M at a time */ + ulint n_bytes = ut_min(static_cast(1024 * 1024), len); + byte* ptr = reinterpret_cast(ut_zalloc_nokey(n_bytes + + page_size)); + byte* buf = reinterpret_cast(ut_align(ptr, page_size)); -retry: - pages_added = 0; - success = TRUE; + os_offset_t offset = start; + dberr_t err = DB_SUCCESS; + const os_offset_t end = start + len; + IORequest request(IORequest::WRITE); - fil_mutex_enter_and_prepare_for_io(space_id); + while (offset < end) { - space = fil_space_get_by_id(space_id); - ut_a(space); +#ifdef UNIV_HOTBACKUP + err = = os_file_write( + request, node->name, node->handle, buf, offset, + n_bytes); +#else + err = os_aio( + request, OS_AIO_SYNC, node->name, + node->handle, buf, offset, n_bytes, read_only_mode, + NULL, NULL, NULL); +#endif /* UNIV_HOTBACKUP */ - if (space->size >= size_after_extend) { - /* Space already big enough */ + if (err != DB_SUCCESS) { + break; + } - *actual_size = space->size; + offset += n_bytes; - mutex_exit(&fil_system->mutex); + n_bytes = ut_min(n_bytes, static_cast(end - offset)); - return(TRUE); + DBUG_EXECUTE_IF("ib_crash_during_tablespace_extension", + DBUG_SUICIDE();); } - page_size = fsp_flags_get_zip_size(space->flags); + ut_free(ptr); + + return(err); +} + +/** Try to extend a tablespace if it is smaller than the specified size. +@param[in,out] space tablespace +@param[in] size desired size in pages +@return whether the tablespace is at least as big as requested */ +bool +fil_space_extend( + fil_space_t* space, + ulint size) +{ + /* In read-only mode we allow write to shared temporary tablespace + as intrinsic table created by Optimizer reside in this tablespace. */ + ut_ad(!srv_read_only_mode || fsp_is_system_temporary(space->id)); + +retry: + bool success = true; + + fil_mutex_enter_and_prepare_for_io(space->id); - if (!page_size) { - page_size = UNIV_PAGE_SIZE; + if (space->size >= size) { + /* Space already big enough */ + mutex_exit(&fil_system->mutex); + return(true); } - node = UT_LIST_GET_LAST(space->chain); + page_size_t pageSize(space->flags); + const ulint page_size = pageSize.physical(); + fil_node_t* node = UT_LIST_GET_LAST(space->chain); if (!node->being_extended) { /* Mark this node as undergoing extension. This flag is used by other threads to wait for the extension opereation to finish. */ - node->being_extended = TRUE; + node->being_extended = true; } else { /* Another thread is currently extending the file. Wait - for it to finish. - It'd have been better to use event driven mechanism but - the entire module is peppered with polling stuff. */ + for it to finish. It'd have been better to use an event + driven mechanism but the entire module is peppered with + polling code. */ + mutex_exit(&fil_system->mutex); os_thread_sleep(100000); goto retry; @@ -5245,142 +4871,131 @@ retry: we have set the node->being_extended flag. */ mutex_exit(&fil_system->mutex); - start_page_no = space->size; - file_start_page_no = space->size - node->size; + ulint pages_added; - /* Determine correct file block size */ - if (node->file_block_size == 0) { - node->file_block_size = os_file_get_block_size(node->handle, node->name); - space->file_block_size = node->file_block_size; - } + /* Note: This code is going to be executed independent of FusionIO HW + if the OS supports posix_fallocate() */ -#ifdef HAVE_POSIX_FALLOCATE - if (srv_use_posix_fallocate) { - os_offset_t start_offset = start_page_no * page_size; - os_offset_t n_pages = (size_after_extend - start_page_no); - os_offset_t len = n_pages * page_size; + ut_ad(size > space->size); - if (posix_fallocate(node->handle, start_offset, len) == -1) { - ib_logf(IB_LOG_LEVEL_ERROR, "preallocating file " - "space for file \'%s\' failed. Current size " - INT64PF ", desired size " INT64PF "\n", - node->name, start_offset, len+start_offset); - os_file_handle_error_no_exit(node->name, "posix_fallocate", FALSE, __FILE__, __LINE__); - success = FALSE; - } else { - success = TRUE; - } + os_offset_t node_start = os_file_get_size(node->handle); + ut_a(node_start != (os_offset_t) -1); - DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", - success = FALSE; errno = 28; os_has_said_disk_full = TRUE;); + /* Node first page number */ + ulint node_first_page = space->size - node->size; - mutex_enter(&fil_system->mutex); + /* Number of physical pages in the node/file */ + ulint n_node_physical_pages + = static_cast(node_start) / page_size; - if (success) { - node->size += (size_after_extend - start_page_no); - space->size += (size_after_extend - start_page_no); + /* Number of pages to extend in the node/file */ + lint n_node_extend; - os_has_said_disk_full = FALSE; - } + n_node_extend = size - (node_first_page + node->size); - /* If posix_fallocate was used to extent the file space - we need to complete the io. Because no actual writes were - dispatched read operation is enough here. Without this - there will be assertion at shutdown indicating that - all IO is not completed. */ - fil_node_complete_io(node, fil_system, OS_FILE_READ); - goto file_extended; - } -#endif + /* If we already have enough physical pages to satisfy the + extend request on the node then ignore it */ + if (node->size + n_node_extend > n_node_physical_pages) { - /* Extend at most 64 pages at a time */ - buf_size = ut_min(64, size_after_extend - start_page_no) * page_size; - buf2 = static_cast(mem_alloc(buf_size + page_size)); - buf = static_cast(ut_align(buf2, page_size)); + DBUG_EXECUTE_IF("ib_crash_during_tablespace_extension", + DBUG_SUICIDE();); - memset(buf, 0, buf_size); + os_offset_t len; + dberr_t err = DB_SUCCESS; - while (start_page_no < size_after_extend) { - ulint n_pages - = ut_min(buf_size / page_size, - size_after_extend - start_page_no); + len = ((node->size + n_node_extend) * page_size) - node_start; + ut_ad(len > 0); - os_offset_t offset - = ((os_offset_t) (start_page_no - file_start_page_no)) - * page_size; +#if !defined(NO_FALLOCATE) && defined(UNIV_LINUX) + /* This is required by FusionIO HW/Firmware */ + int ret = posix_fallocate(node->handle, node_start, len); const char* name = node->name == NULL ? space->name : node->name; -#ifdef UNIV_HOTBACKUP - success = os_file_write(name, node->handle, buf, - offset, page_size * n_pages); -#else - success = os_aio(OS_FILE_WRITE, 0, OS_AIO_SYNC, - name, node->handle, buf, - offset, page_size * n_pages, page_size, - node, NULL, 0); -#endif /* UNIV_HOTBACKUP */ + /* We already pass the valid offset and len in, if EINVAL + is returned, it could only mean that the file system doesn't + support fallocate(), currently one known case is + ext3 FS with O_DIRECT. We ignore EINVAL here so that the + error message won't flood. */ + if (ret != 0 && ret != EINVAL) { + ib::error() + << "posix_fallocate(): Failed to preallocate" + " data for file " + << node->name << ", desired size " + << len << " bytes." + " Operating system error number " + << ret << ". Check" + " that the disk is not full or a disk quota" + " exceeded. Make sure the file system supports" + " this function. Some operating system error" + " numbers are described at " REFMAN + " operating-system-error-codes.html"; - DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28", - success = FALSE; errno = 28; os_has_said_disk_full = TRUE;); + err = DB_IO_ERROR; + } +#endif /* NO_FALLOCATE || !UNIV_LINUX */ - if (success) { - os_has_said_disk_full = FALSE; - } else { - /* Let us measure the size of the file to determine - how much we were able to extend it */ - os_offset_t size; + if (!node->atomic_write || err == DB_IO_ERROR) { - size = os_file_get_size(node->handle); - ut_a(size != (os_offset_t) -1); + bool read_only_mode; - n_pages = ((ulint) (size / page_size)) - - node->size - pages_added; + read_only_mode = (space->purpose != FIL_TYPE_TEMPORARY + ? false : srv_read_only_mode); - pages_added += n_pages; - break; + err = fil_write_zeros( + node, page_size, node_start, + static_cast(len), read_only_mode); + + if (err != DB_SUCCESS) { + + ib::warn() + << "Error while writing " << len + << " zeroes to " << node->name + << " starting at offset " << node_start; + } } - start_page_no += n_pages; - pages_added += n_pages; - } + /* Check how many pages actually added */ + os_offset_t end = os_file_get_size(node->handle); + ut_a(end != static_cast(-1) && end >= node_start); + + os_has_said_disk_full = !(success = (end == node_start + len)); + + pages_added = static_cast(end - node_start) / page_size; - mem_free(buf2); + } else { + success = true; + pages_added = n_node_extend; + os_has_said_disk_full = FALSE; + } mutex_enter(&fil_system->mutex); ut_a(node->being_extended); - space->size += pages_added; node->size += pages_added; + space->size += pages_added; + node->being_extended = false; - fil_node_complete_io(node, fil_system, OS_FILE_WRITE); - - /* At this point file has been extended */ -file_extended: - - node->being_extended = FALSE; - *actual_size = space->size; + fil_node_complete_io(node, fil_system, IORequestWrite); #ifndef UNIV_HOTBACKUP - if (space_id == 0) { - ulint pages_per_mb = (1024 * 1024) / page_size; + /* Keep the last data file size info up to date, rounded to + full megabytes */ + ulint pages_per_mb = (1024 * 1024) / page_size; + ulint size_in_pages = ((node->size / pages_per_mb) * pages_per_mb); - /* Keep the last data file size info up to date, rounded to - full megabytes */ - - srv_data_file_sizes[srv_n_data_files - 1] - = (node->size / pages_per_mb) * pages_per_mb; + if (space->id == srv_sys_space.space_id()) { + srv_sys_space.set_last_file_size(size_in_pages); + } else if (space->id == srv_tmp_space.space_id()) { + srv_tmp_space.set_last_file_size(size_in_pages); } #endif /* !UNIV_HOTBACKUP */ - /* - printf("Extended %s to %lu, actual size %lu pages\n", space->name, - size_after_extend, *actual_size); */ mutex_exit(&fil_system->mutex); - fil_flush(space_id); + fil_flush(space->id); return(success); } @@ -5391,60 +5006,55 @@ Extends all tablespaces to the size stored in the space header. During the mysqlbackup --apply-log phase we extended the spaces on-demand so that log records could be applied, but that may have left spaces still too small compared to the size stored in the space header. */ -UNIV_INTERN void fil_extend_tablespaces_to_stored_len(void) /*======================================*/ { - fil_space_t* space; byte* buf; ulint actual_size; ulint size_in_header; dberr_t error; - ibool success; + bool success; - buf = mem_alloc(UNIV_PAGE_SIZE); + buf = ut_malloc_nokey(UNIV_PAGE_SIZE); mutex_enter(&fil_system->mutex); - space = UT_LIST_GET_FIRST(fil_system->space_list); + for (fil_space_t* space = UT_LIST_GET_FIRST(fil_system->space_list); + space != NULL; + space = UT_LIST_GET_NEXT(space_list, space)) { - while (space) { - ut_a(space->purpose == FIL_TABLESPACE); + ut_a(space->purpose == FIL_TYPE_TABLESPACE); mutex_exit(&fil_system->mutex); /* no need to protect with a mutex, because this is a single-threaded operation */ - error = fil_read(TRUE, space->id, - fsp_flags_get_zip_size(space->flags), - 0, 0, UNIV_PAGE_SIZE, buf, NULL, 0); + error = fil_read( + page_id_t(space->id, 0), + page_size_t(space->flags), + 0, univ_page_size.physical(), buf); + ut_a(error == DB_SUCCESS); - size_in_header = fsp_get_size_low(buf); + size_in_header = fsp_header_get_field(buf, FSP_SIZE); - success = fil_extend_space_to_desired_size( - &actual_size, space->id, size_in_header); + success = fil_space_extend(space, size_in_header); if (!success) { - fprintf(stderr, - "InnoDB: Error: could not extend the" - " tablespace of %s\n" - "InnoDB: to the size stored in header," - " %lu pages;\n" - "InnoDB: size after extension %lu pages\n" - "InnoDB: Check that you have free disk space" - " and retry!\n", - space->name, size_in_header, actual_size); + ib::error() << "Could not extend the tablespace of " + << space->name << " to the size stored in" + " header, " << size_in_header << " pages;" + " size after extension " << actual_size + << " pages. Check that you have free disk" + " space and retry!"; ut_a(success); } mutex_enter(&fil_system->mutex); - - space = UT_LIST_GET_NEXT(space_list, space); } mutex_exit(&fil_system->mutex); - mem_free(buf); + ut_free(buf); } #endif @@ -5452,9 +5062,8 @@ fil_extend_tablespaces_to_stored_len(void) /*******************************************************************//** Tries to reserve free extents in a file space. -@return TRUE if succeed */ -UNIV_INTERN -ibool +@return true if succeed */ +bool fil_space_reserve_free_extents( /*===========================*/ ulint id, /*!< in: space id */ @@ -5462,7 +5071,7 @@ fil_space_reserve_free_extents( ulint n_to_reserve) /*!< in: how many one wants to reserve */ { fil_space_t* space; - ibool success; + bool success; ut_ad(fil_system); @@ -5473,10 +5082,10 @@ fil_space_reserve_free_extents( ut_a(space); if (space->n_reserved_extents + n_to_reserve > n_free_now) { - success = FALSE; + success = false; } else { space->n_reserved_extents += n_to_reserve; - success = TRUE; + success = true; } mutex_exit(&fil_system->mutex); @@ -5486,7 +5095,6 @@ fil_space_reserve_free_extents( /*******************************************************************//** Releases free extents in a file space. */ -UNIV_INTERN void fil_space_release_free_extents( /*===========================*/ @@ -5512,7 +5120,6 @@ fil_space_release_free_extents( /*******************************************************************//** Gets the number of reserved extents. If the database is silent, this number should be zero. */ -UNIV_INTERN ulint fil_space_get_n_reserved_extents( /*=============================*/ @@ -5558,19 +5165,15 @@ fil_node_prepare_for_io( ut_ad(mutex_own(&(system->mutex))); if (system->n_open > system->max_n_open + 5) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: open files %lu" - " exceeds the limit %lu\n", - (ulong) system->n_open, - (ulong) system->max_n_open); + ib::warn() << "Open files " << system->n_open + << " exceeds the limit " << system->max_n_open; } - if (node->open == FALSE) { + if (!node->is_open) { /* File is closed: open it */ ut_a(node->n_pending == 0); - if (!fil_node_open_file(node, system, space)) { + if (!fil_node_open_file(node)) { return(false); } } @@ -5580,7 +5183,7 @@ fil_node_prepare_for_io( ut_a(UT_LIST_GET_LEN(system->LRU) > 0); - UT_LIST_REMOVE(LRU, system->LRU, node); + UT_LIST_REMOVE(system->LRU, node); } node->n_pending++; @@ -5597,21 +5200,23 @@ fil_node_complete_io( /*=================*/ fil_node_t* node, /*!< in: file node */ fil_system_t* system, /*!< in: tablespace memory cache */ - ulint type) /*!< in: OS_FILE_WRITE or OS_FILE_READ; marks - the node as modified if - type == OS_FILE_WRITE */ + const IORequest&type) /*!< in: IO_TYPE_*, marks the node as + modified if TYPE_IS_WRITE() */ { - ut_ad(node); - ut_ad(system); - ut_ad(mutex_own(&(system->mutex))); - + ut_ad(mutex_own(&system->mutex)); ut_a(node->n_pending > 0); - node->n_pending--; + --node->n_pending; + + ut_ad(type.validate()); + + if (type.is_write()) { + + ut_ad(!srv_read_only_mode + || fsp_is_system_temporary(node->space->id)); + + ++system->modification_counter; - if (type == OS_FILE_WRITE) { - ut_ad(!srv_read_only_mode); - system->modification_counter++; node->modification_counter = system->modification_counter; if (fil_buffering_disabled(node->space)) { @@ -5625,179 +5230,88 @@ fil_node_complete_io( } else if (!node->space->is_in_unflushed_spaces) { node->space->is_in_unflushed_spaces = true; - UT_LIST_ADD_FIRST(unflushed_spaces, - system->unflushed_spaces, - node->space); + + UT_LIST_ADD_FIRST( + system->unflushed_spaces, node->space); } } if (node->n_pending == 0 && fil_space_belongs_in_lru(node->space)) { /* The node must be put back to the LRU list */ - UT_LIST_ADD_FIRST(LRU, system->LRU, node); + UT_LIST_ADD_FIRST(system->LRU, node); } } -/********************************************************************//** -Report information about an invalid page access. */ +/** Report information about an invalid page access. */ static void fil_report_invalid_page_access( -/*===========================*/ ulint block_offset, /*!< in: block offset */ ulint space_id, /*!< in: space id */ const char* space_name, /*!< in: space name */ ulint byte_offset, /*!< in: byte offset */ ulint len, /*!< in: I/O length */ - ulint type) /*!< in: I/O type */ -{ - fprintf(stderr, - "InnoDB: Error: trying to access page number %lu" - " in space %lu,\n" - "InnoDB: space name %s,\n" - "InnoDB: which is outside the tablespace bounds.\n" - "InnoDB: Byte offset %lu, len %lu, i/o type %lu.\n" - "InnoDB: If you get this error at mysqld startup," - " please check that\n" - "InnoDB: your my.cnf matches the ibdata files" - " that you have in the\n" - "InnoDB: MySQL server.\n", - (ulong) block_offset, (ulong) space_id, space_name, - (ulong) byte_offset, (ulong) len, (ulong) type); -} - -/********************************************************************//** -Find correct node from file space -@return node */ -static -fil_node_t* -fil_space_get_node( - fil_space_t* space, /*!< in: file spage */ - ulint space_id, /*!< in: space id */ - ulint* block_offset, /*!< in/out: offset in number of blocks */ - ulint byte_offset, /*!< in: remainder of offset in bytes; in - aio this must be divisible by the OS block - size */ - ulint len) /*!< in: how many bytes to read or write; this - must not cross a file boundary; in aio this - must be a block size multiple */ -{ - fil_node_t* node; - ut_ad(mutex_own(&fil_system->mutex)); - - node = UT_LIST_GET_FIRST(space->chain); - - for (;;) { - if (node == NULL) { - return(NULL); - } else if (fil_is_user_tablespace_id(space->id) - && node->size == 0) { - - /* We do not know the size of a single-table tablespace - before we open the file */ - break; - } else if (node->size > *block_offset) { - /* Found! */ - break; - } else { - *block_offset -= node->size; - node = UT_LIST_GET_NEXT(chain, node); - } - } - - return (node); -} -/********************************************************************//** -Return block size of node in file space -@return file block size */ -UNIV_INTERN -ulint -fil_space_get_block_size( -/*=====================*/ - ulint space_id, - ulint block_offset, - ulint len) + bool is_read) /*!< in: I/O type */ { - ulint block_size = 512; - ut_ad(!mutex_own(&fil_system->mutex)); - - mutex_enter(&fil_system->mutex); - fil_space_t* space = fil_space_get_space(space_id); - - if (space) { - fil_node_t* node = fil_space_get_node(space, space_id, &block_offset, 0, len); - - if (node) { - block_size = node->file_block_size; - } - } - mutex_exit(&fil_system->mutex); + ib::error() + << "Trying to access page number " << block_offset << " in" + " space " << space_id << ", space name " << space_name << "," + " which is outside the tablespace bounds. Byte offset " + << byte_offset << ", len " << len << ", i/o type " << + (is_read ? "read" : "write") + << ". If you get this error at mysqld startup, please check" + " that your my.cnf matches the ibdata files that you have in" + " the MySQL server."; + + ib::error() << "Server exits" +#ifdef UNIV_DEBUG + << " at " << __FILE__ << "[" << __LINE__ << "]" +#endif + << "."; - return block_size; + _exit(1); } -/********************************************************************//** -Reads or writes data. This operation is asynchronous (aio). -@return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do -i/o on a tablespace which does not exist */ -UNIV_INTERN +/** Reads or writes data. This operation could be asynchronous (aio). + +@param[in,out] type IO context +@param[in] sync true if synchronous aio is desired +@param[in] page_id page id +@param[in] page_size page size +@param[in] byte_offset remainder of offset in bytes; in aio this + must be divisible by the OS block size +@param[in] len how many bytes to read or write; this must + not cross a file boundary; in aio this must + be a block size multiple +@param[in,out] buf buffer where to store read data or from where + to write; in aio this must be appropriately + aligned +@param[in] message message for aio handler if non-sync aio + used, else ignored + +@return DB_SUCCESS, DB_TABLESPACE_DELETED or DB_TABLESPACE_TRUNCATED + if we are trying to do i/o on a tablespace which does not exist */ dberr_t fil_io( -/*===*/ - ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE, - ORed to OS_FILE_LOG, if a log i/o - and ORed to OS_AIO_SIMULATED_WAKE_LATER - if simulated aio and we want to post a - batch of i/os; NOTE that a simulated batch - may introduce hidden chances of deadlocks, - because i/os are not actually handled until - all have been posted: use with great - caution! */ - bool sync, /*!< in: true if synchronous aio is desired */ - ulint space_id, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint block_offset, /*!< in: offset in number of blocks */ - ulint byte_offset, /*!< in: remainder of offset in bytes; in - aio this must be divisible by the OS block - size */ - ulint len, /*!< in: how many bytes to read or write; this - must not cross a file boundary; in aio this - must be a block size multiple */ - void* buf, /*!< in/out: buffer where to store read data - or from where to write; in aio this must be - appropriately aligned */ - void* message, /*!< in: message for aio handler if non-sync - aio used, else ignored */ - ulint* write_size) /*!< in/out: Actual write size initialized - after fist successfull trim - operation for this page and if - initialized we do not trim again if - actual page size does not decrease. */ -{ - ulint mode; - fil_space_t* space; - fil_node_t* node; - ibool ret; - ulint is_log; - ulint wake_later; - os_offset_t offset; - bool ignore_nonexistent_pages; - - is_log = type & OS_FILE_LOG; - type = type & ~OS_FILE_LOG; - - wake_later = type & OS_AIO_SIMULATED_WAKE_LATER; - type = type & ~OS_AIO_SIMULATED_WAKE_LATER; + const IORequest& type, + bool sync, + const page_id_t& page_id, + const page_size_t& page_size, + ulint byte_offset, + ulint len, + void* buf, + void* message, + ulint* write_size) +{ + os_offset_t offset; + IORequest req_type(type); - ignore_nonexistent_pages = type & BUF_READ_IGNORE_NONEXISTENT_PAGES; - type &= ~BUF_READ_IGNORE_NONEXISTENT_PAGES; + ut_ad(req_type.validate()); - ut_ad(byte_offset < UNIV_PAGE_SIZE); - ut_ad(!zip_size || !byte_offset); - ut_ad(ut_is_2pow(zip_size)); - ut_ad(buf); ut_ad(len > 0); + ut_ad(byte_offset < UNIV_PAGE_SIZE); + ut_ad(!page_size.is_compressed() || byte_offset == 0); ut_ad(UNIV_PAGE_SIZE == (ulong)(1 << UNIV_PAGE_SIZE_SHIFT)); #if (1 << UNIV_PAGE_SIZE_SHIFT_MAX) != UNIV_PAGE_SIZE_MAX # error "(1 << UNIV_PAGE_SIZE_SHIFT_MAX) != UNIV_PAGE_SIZE_MAX" @@ -5806,23 +5320,36 @@ fil_io( # error "(1 << UNIV_PAGE_SIZE_SHIFT_MIN) != UNIV_PAGE_SIZE_MIN" #endif ut_ad(fil_validate_skip()); + #ifndef UNIV_HOTBACKUP -# ifndef UNIV_LOG_DEBUG - /* ibuf bitmap pages must be read in the sync aio mode: */ + + /* ibuf bitmap pages must be read in the sync AIO mode: */ ut_ad(recv_no_ibuf_operations - || type == OS_FILE_WRITE - || !ibuf_bitmap_page(zip_size, block_offset) + || req_type.is_write() + || !ibuf_bitmap_page(page_id, page_size) || sync - || is_log); -# endif /* UNIV_LOG_DEBUG */ + || req_type.is_log()); + + ulint mode; + if (sync) { + mode = OS_AIO_SYNC; - } else if (is_log) { + + } else if (req_type.is_log()) { + mode = OS_AIO_LOG; - } else if (type == OS_FILE_READ + + } else if (req_type.is_read() && !recv_no_ibuf_operations - && ibuf_page(space_id, zip_size, block_offset, NULL)) { + && ibuf_page(page_id, page_size, NULL)) { + mode = OS_AIO_IBUF; + + /* Reduce probability of deadlock bugs in connection with ibuf: + do not let the ibuf i/o handler sleep */ + + req_type.clear_do_not_wake(); } else { mode = OS_AIO_NORMAL; } @@ -5831,70 +5358,116 @@ fil_io( mode = OS_AIO_SYNC; #endif /* !UNIV_HOTBACKUP */ - if (type == OS_FILE_READ) { + if (req_type.is_read()) { + srv_stats.data_read.add(len); - } else if (type == OS_FILE_WRITE) { - ut_ad(!srv_read_only_mode); + + } else if (req_type.is_write()) { + + ut_ad(!srv_read_only_mode + || fsp_is_system_temporary(page_id.space())); + srv_stats.data_written.add(len); - if (fil_page_is_index_page((byte *)buf)) { - srv_stats.index_pages_written.inc(); - } else { - srv_stats.non_index_pages_written.inc(); - } } /* Reserve the fil_system mutex and make sure that we can open at least one file while holding it, if the file is not already open */ - fil_mutex_enter_and_prepare_for_io(space_id); + fil_mutex_enter_and_prepare_for_io(page_id.space()); - space = fil_space_get_by_id(space_id); + fil_space_t* space = fil_space_get_by_id(page_id.space()); /* If we are deleting a tablespace we don't allow async read operations - on that. However, we do allow write and sync read operations */ - if (space == 0 - || (type == OS_FILE_READ && !sync && space->stop_new_ops)) { + on that. However, we do allow write operations and sync read operations. */ + if (space == NULL + || (req_type.is_read() + && !sync + && space->stop_new_ops + && !space->is_being_truncated)) { + mutex_exit(&fil_system->mutex); - ib_logf(IB_LOG_LEVEL_ERROR, - "Trying to do i/o to a tablespace which does " - "not exist. i/o type %lu, space id %lu, " - "page no. %lu, i/o length %lu bytes", - (ulong) type, (ulong) space_id, (ulong) block_offset, - (ulong) len); + if (!req_type.ignore_missing()) { + ib::error() + << "Trying to do I/O to a tablespace which" + " does not exist. I/O type: " + << (req_type.is_read() ? "read" : "write") + << ", page: " << page_id + << ", I/O length: " << len << " bytes"; + } return(DB_TABLESPACE_DELETED); } - ut_ad(mode != OS_AIO_IBUF || space->purpose == FIL_TABLESPACE); + ut_ad(mode != OS_AIO_IBUF || fil_type_is_data(space->purpose)); - node = fil_space_get_node(space, space_id, &block_offset, byte_offset, len); + ulint cur_page_no = page_id.page_no(); + fil_node_t* node = UT_LIST_GET_FIRST(space->chain); - if (!node) { - if (ignore_nonexistent_pages) { - mutex_exit(&fil_system->mutex); - return(DB_ERROR); - } - fil_report_invalid_page_access( - block_offset, space_id, space->name, - byte_offset, len, type); + for (;;) { - ut_error; + if (node == NULL) { + + if (req_type.ignore_missing()) { + mutex_exit(&fil_system->mutex); + return(DB_ERROR); + } + + fil_report_invalid_page_access( + page_id.page_no(), page_id.space(), + space->name, byte_offset, len, + req_type.is_read()); + + } else if (fil_is_user_tablespace_id(space->id) + && node->size == 0) { + + /* We do not know the size of a single-table tablespace + before we open the file */ + break; + + } else if (node->size > cur_page_no) { + /* Found! */ + break; + + } else { + if (space->id != srv_sys_space.space_id() + && UT_LIST_GET_LEN(space->chain) == 1 + && (srv_is_tablespace_truncated(space->id) + || space->is_being_truncated + || srv_was_tablespace_truncated(space->id)) + && req_type.is_read()) { + + /* Handle page which is outside the truncated + tablespace bounds when recovering from a crash + happened during a truncation */ + mutex_exit(&fil_system->mutex); + return(DB_TABLESPACE_TRUNCATED); + } + + cur_page_no -= node->size; + + node = UT_LIST_GET_NEXT(chain, node); + } } /* Open file if closed */ if (!fil_node_prepare_for_io(node, fil_system, space)) { - if (space->purpose == FIL_TABLESPACE + if (fil_type_is_data(space->purpose) && fil_is_user_tablespace_id(space->id)) { mutex_exit(&fil_system->mutex); - ib_logf(IB_LOG_LEVEL_ERROR, - "Trying to do i/o to a tablespace which " - "exists without .ibd data file. " - "i/o type %lu, space id %lu, page no %lu, " - "i/o length %lu bytes", - (ulong) type, (ulong) space_id, - (ulong) block_offset, (ulong) len); + if (!req_type.ignore_missing()) { + ib::error() + << "Trying to do I/O to a tablespace" + " which exists without .ibd data file." + " I/O type: " + << (req_type.is_read() + ? "read" : "write") + << ", page: " + << page_id_t(page_id.space(), + cur_page_no) + << ", I/O length: " << len << " bytes"; + } return(DB_TABLESPACE_DELETED); } @@ -5908,14 +5481,22 @@ fil_io( /* Check that at least the start offset is within the bounds of a single-table tablespace, including rollback tablespaces. */ - if (UNIV_UNLIKELY(node->size <= block_offset) - && space->id != 0 && space->purpose == FIL_TABLESPACE) { + if (node->size <= cur_page_no + && space->id != srv_sys_space.space_id() + && fil_type_is_data(space->purpose)) { + + if (req_type.ignore_missing()) { + /* If we can tolerate the non-existent pages, we + should return with DB_ERROR and let caller decide + what to do. */ + fil_node_complete_io(node, fil_system, req_type); + mutex_exit(&fil_system->mutex); + return(DB_ERROR); + } fil_report_invalid_page_access( - block_offset, space_id, space->name, byte_offset, - len, type); - - ut_error; + page_id.page_no(), page_id.space(), + space->name, byte_offset, len, req_type.is_read()); } /* Now we have made the changes in the data structures of fil_system */ @@ -5923,74 +5504,126 @@ fil_io( /* Calculate the low 32 bits and the high 32 bits of the file offset */ - if (!zip_size) { - offset = ((os_offset_t) block_offset << UNIV_PAGE_SIZE_SHIFT) - + byte_offset; + if (!page_size.is_compressed()) { + + offset = ((os_offset_t) cur_page_no + << UNIV_PAGE_SIZE_SHIFT) + byte_offset; - ut_a(node->size - block_offset + ut_a(node->size - cur_page_no >= ((byte_offset + len + (UNIV_PAGE_SIZE - 1)) / UNIV_PAGE_SIZE)); } else { - ulint zip_size_shift; - switch (zip_size) { - case 1024: zip_size_shift = 10; break; - case 2048: zip_size_shift = 11; break; - case 4096: zip_size_shift = 12; break; - case 8192: zip_size_shift = 13; break; - case 16384: zip_size_shift = 14; break; - case 32768: zip_size_shift = 15; break; - case 65536: zip_size_shift = 16; break; + ulint size_shift; + + switch (page_size.physical()) { + case 1024: size_shift = 10; break; + case 2048: size_shift = 11; break; + case 4096: size_shift = 12; break; + case 8192: size_shift = 13; break; + case 16384: size_shift = 14; break; + case 32768: size_shift = 15; break; + case 65536: size_shift = 16; break; default: ut_error; } - offset = ((os_offset_t) block_offset << zip_size_shift) + + offset = ((os_offset_t) cur_page_no << size_shift) + byte_offset; - ut_a(node->size - block_offset - >= (len + (zip_size - 1)) / zip_size); + + ut_a(node->size - cur_page_no + >= (len + (page_size.physical() - 1)) + / page_size.physical()); } - /* Do aio */ + /* Do AIO */ ut_a(byte_offset % OS_FILE_LOG_BLOCK_SIZE == 0); ut_a((len % OS_FILE_LOG_BLOCK_SIZE) == 0); const char* name = node->name == NULL ? space->name : node->name; + /* Don't compress the log, page 0 of all tablespaces, tables + compresssed with the old scheme and all pages from the system + tablespace. */ + + if (req_type.is_write() + && !req_type.is_log() + && !page_size.is_compressed() + && page_id.page_no() > 0 + && IORequest::is_punch_hole_supported() + && node->punch_hole) { + + ut_ad(!req_type.is_log()); + + req_type.set_punch_hole(); + + req_type.compression_algorithm(space->compression_type); + + } else { + req_type.clear_compressed(); + } + + req_type.block_size(node->block_size); + + dberr_t err; + #ifdef UNIV_HOTBACKUP /* In mysqlbackup do normal i/o, not aio */ - if (type == OS_FILE_READ) { - ret = os_file_read(node->handle, buf, offset, len); + if (req_type.is_read()) { + + err = os_file_read(req_type, node->handle, buf, offset, len); + } else { - ut_ad(!srv_read_only_mode); - ret = os_file_write(name, node->handle, buf, - offset, len); + + ut_ad(!srv_read_only_mode + || fsp_is_system_temporary(page_id.space())); + + err = os_file_write( + req_type, node->name, node->handle, buf, offset, len); } #else /* Queue the aio request */ - ret = os_aio(type, is_log, mode | wake_later, name, node->handle, buf, - offset, len, zip_size ? zip_size : UNIV_PAGE_SIZE, node, - message, write_size); + err = os_aio( + req_type, + mode, node->name, node->handle, buf, offset, len, + fsp_is_system_temporary(page_id.space()) + ? false : srv_read_only_mode, + node, message, NULL); #endif /* UNIV_HOTBACKUP */ + if (err == DB_IO_NO_PUNCH_HOLE) { + + err = DB_SUCCESS; + + if (node->punch_hole) { + + ib::warn() + << "Punch hole failed for '" + << node->name << "'"; + } + + fil_no_punch_hole(node); + } + + /* We an try to recover the page from the double write buffer if + the decompression fails or the page is corrupt. */ + + ut_a(req_type.is_dblwr_recover() || err == DB_SUCCESS); - if (mode == OS_AIO_SYNC) { + if (sync) { /* The i/o operation is already completed when we return from os_aio: */ mutex_enter(&fil_system->mutex); - fil_node_complete_io(node, fil_system, type); + fil_node_complete_io(node, fil_system, req_type); mutex_exit(&fil_system->mutex); ut_ad(fil_validate_skip()); } - if (!ret) { - return(DB_OUT_OF_FILE_SPACE); - } - - return(DB_SUCCESS); + return(err); } #ifndef UNIV_HOTBACKUP @@ -5999,41 +5632,23 @@ Waits for an aio operation to complete. This function is used to write the handler for completed requests. The aio array of pending requests is divided into segments (see os0file.cc for more info). The thread specifies which segment it wants to wait for. */ -UNIV_INTERN void fil_aio_wait( /*=========*/ ulint segment) /*!< in: the number of the segment in the aio array to wait for */ { - ibool ret; - fil_node_t* fil_node; + fil_node_t* node; + IORequest type; void* message; - ulint type; ut_ad(fil_validate_skip()); - if (srv_use_native_aio) { - srv_set_io_thread_op_info(segment, "native aio handle"); -#ifdef WIN_ASYNC_IO - ret = os_aio_windows_handle( - segment, 0, &fil_node, &message, &type); -#elif defined(LINUX_NATIVE_AIO) - ret = os_aio_linux_handle( - segment, &fil_node, &message, &type); -#else - ut_error; - ret = 0; /* Eliminate compiler warning */ -#endif /* WIN_ASYNC_IO */ - } else { - srv_set_io_thread_op_info(segment, "simulated aio handle"); + dberr_t err = os_aio_handler(segment, &node, &message, &type); - ret = os_aio_simulated_handle( - segment, &fil_node, &message, &type); - } + ut_a(err == DB_SUCCESS); - ut_a(ret); - if (fil_node == NULL) { + if (node == NULL) { ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS); return; } @@ -6042,7 +5657,7 @@ fil_aio_wait( mutex_enter(&fil_system->mutex); - fil_node_complete_io(fil_node, fil_system, type); + fil_node_complete_io(node, fil_system, type); mutex_exit(&fil_system->mutex); @@ -6054,36 +5669,48 @@ fil_aio_wait( deadlocks in the i/o system. We keep tablespace 0 data files always open, and use a special i/o thread to serve insert buffer requests. */ - if (fil_node->space->purpose == FIL_TABLESPACE) { + switch (node->space->purpose) { + case FIL_TYPE_TABLESPACE: + case FIL_TYPE_TEMPORARY: + case FIL_TYPE_IMPORT: srv_set_io_thread_op_info(segment, "complete io for buf page"); - buf_page_io_complete(static_cast(message)); - } else { + + /* async single page writes from the dblwr buffer don't have + access to the page */ + if (message != NULL) { + buf_page_io_complete(static_cast(message)); + } + return; + case FIL_TYPE_LOG: srv_set_io_thread_op_info(segment, "complete io for log"); log_io_complete(static_cast(message)); + return; } + + ut_ad(0); } #endif /* UNIV_HOTBACKUP */ /**********************************************************************//** Flushes to disk possible writes cached by the OS. If the space does not exist or is being dropped, does not do anything. */ -UNIV_INTERN void fil_flush( /*======*/ ulint space_id) /*!< in: file space id (this can be a group of log files or a tablespace of the database) */ { - fil_space_t* space; fil_node_t* node; os_file_t file; - mutex_enter(&fil_system->mutex); - space = fil_space_get_by_id(space_id); + fil_space_t* space = fil_space_get_by_id(space_id); - if (!space || space->stop_new_ops) { + if (space == NULL + || space->purpose == FIL_TYPE_TEMPORARY + || space->stop_new_ops + || space->is_being_truncated) { mutex_exit(&fil_system->mutex); return; @@ -6117,26 +5744,32 @@ fil_flush( node != NULL; node = UT_LIST_GET_NEXT(chain, node)) { - ib_int64_t old_mod_counter = node->modification_counter; + int64_t old_mod_counter = node->modification_counter; if (old_mod_counter <= node->flush_counter) { continue; } - ut_a(node->open); + ut_a(node->is_open); - if (space->purpose == FIL_TABLESPACE) { + switch (space->purpose) { + case FIL_TYPE_TEMPORARY: + ut_ad(0); // we already checked for this + case FIL_TYPE_TABLESPACE: + case FIL_TYPE_IMPORT: fil_n_pending_tablespace_flushes++; - } else { + break; + case FIL_TYPE_LOG: fil_n_pending_log_flushes++; fil_n_log_flushes++; + break; } -#ifdef __WIN__ +#ifdef _WIN32 if (node->is_raw_disk) { goto skip_flush; } -#endif /* __WIN__ */ +#endif /* _WIN32 */ retry: if (node->n_pending_flushes > 0) { /* We want to avoid calling os_file_flush() on @@ -6144,8 +5777,7 @@ retry: not know what bugs OS's may contain in file i/o */ - ib_int64_t sig_count = - os_event_reset(node->sync_event); + int64_t sig_count = os_event_reset(node->sync_event); mutex_exit(&fil_system->mutex); @@ -6161,7 +5793,7 @@ retry: goto retry; } - ut_a(node->open); + ut_a(node->is_open); file = node->handle; node->n_pending_flushes++; @@ -6184,17 +5816,24 @@ skip_flush: space->is_in_unflushed_spaces = false; UT_LIST_REMOVE( - unflushed_spaces, fil_system->unflushed_spaces, space); } } - if (space->purpose == FIL_TABLESPACE) { + switch (space->purpose) { + case FIL_TYPE_TEMPORARY: + ut_ad(0); // we already checked for this + case FIL_TYPE_TABLESPACE: + case FIL_TYPE_IMPORT: fil_n_pending_tablespace_flushes--; - } else { + continue; + case FIL_TYPE_LOG: fil_n_pending_log_flushes--; + continue; } + + ut_ad(0); } space->n_pending_flushes--; @@ -6202,19 +5841,18 @@ skip_flush: mutex_exit(&fil_system->mutex); } -/**********************************************************************//** -Flushes to disk the writes in file spaces of the given type possibly cached by -the OS. */ -UNIV_INTERN +/** Flush to disk the writes in file spaces of the given type +possibly cached by the OS. +@param[in] purpose FIL_TYPE_TABLESPACE or FIL_TYPE_LOG */ void fil_flush_file_spaces( -/*==================*/ - ulint purpose) /*!< in: FIL_TABLESPACE, FIL_LOG */ + fil_type_t purpose) { fil_space_t* space; ulint* space_ids; ulint n_space_ids; - ulint i; + + ut_ad(purpose == FIL_TYPE_TABLESPACE || purpose == FIL_TYPE_LOG); mutex_enter(&fil_system->mutex); @@ -6230,7 +5868,7 @@ fil_flush_file_spaces( on a space that was just removed from the list by fil_flush(). Thus, the space could be dropped and the memory overwritten. */ space_ids = static_cast( - mem_alloc(n_space_ids * sizeof *space_ids)); + ut_malloc_nokey(n_space_ids * sizeof(*space_ids))); n_space_ids = 0; @@ -6238,7 +5876,9 @@ fil_flush_file_spaces( space; space = UT_LIST_GET_NEXT(unflushed_spaces, space)) { - if (space->purpose == purpose && !space->stop_new_ops) { + if (space->purpose == purpose + && !space->stop_new_ops + && !space->is_being_truncated) { space_ids[n_space_ids++] = space->id; } @@ -6248,68 +5888,76 @@ fil_flush_file_spaces( /* Flush the spaces. It will not hurt to call fil_flush() on a non-existing space id. */ - for (i = 0; i < n_space_ids; i++) { + for (ulint i = 0; i < n_space_ids; i++) { fil_flush(space_ids[i]); } - mem_free(space_ids); + ut_free(space_ids); } -/** Functor to validate the space list. */ +/** Functor to validate the file node list of a tablespace. */ struct Check { + /** Total size of file nodes visited so far */ + ulint size; + /** Total number of open files visited so far */ + ulint n_open; + + /** Constructor */ + Check() : size(0), n_open(0) {} + + /** Visit a file node + @param[in] elem file node to visit */ void operator()(const fil_node_t* elem) { - ut_a(elem->open || !elem->n_pending); + ut_a(elem->is_open || !elem->n_pending); + n_open += elem->is_open; + size += elem->size; + } + + /** Validate a tablespace. + @param[in] space tablespace to validate + @return number of open file nodes */ + static ulint validate(const fil_space_t* space) + { + ut_ad(mutex_own(&fil_system->mutex)); + Check check; + ut_list_validate(space->chain, check); + ut_a(space->size == check.size); + return(check.n_open); } }; /******************************************************************//** Checks the consistency of the tablespace cache. -@return TRUE if ok */ -UNIV_INTERN -ibool +@return true if ok */ +bool fil_validate(void) /*==============*/ { fil_space_t* space; fil_node_t* fil_node; ulint n_open = 0; - ulint i; mutex_enter(&fil_system->mutex); /* Look for spaces in the hash table */ - for (i = 0; i < hash_get_n_cells(fil_system->spaces); i++) { + for (ulint i = 0; i < hash_get_n_cells(fil_system->spaces); i++) { for (space = static_cast( HASH_GET_FIRST(fil_system->spaces, i)); space != 0; space = static_cast( - HASH_GET_NEXT(hash, space))) { - - UT_LIST_VALIDATE( - chain, fil_node_t, space->chain, Check()); + HASH_GET_NEXT(hash, space))) { - for (fil_node = UT_LIST_GET_FIRST(space->chain); - fil_node != 0; - fil_node = UT_LIST_GET_NEXT(chain, fil_node)) { - - if (fil_node->n_pending > 0) { - ut_a(fil_node->open); - } - - if (fil_node->open) { - n_open++; - } - } + n_open += Check::validate(space); } } ut_a(fil_system->n_open == n_open); - UT_LIST_CHECK(LRU, fil_node_t, fil_system->LRU); + UT_LIST_CHECK(fil_system->LRU); for (fil_node = UT_LIST_GET_FIRST(fil_system->LRU); fil_node != 0; @@ -6317,20 +5965,19 @@ fil_validate(void) ut_a(fil_node->n_pending == 0); ut_a(!fil_node->being_extended); - ut_a(fil_node->open); + ut_a(fil_node->is_open); ut_a(fil_space_belongs_in_lru(fil_node->space)); } mutex_exit(&fil_system->mutex); - return(TRUE); + return(true); } /********************************************************************//** -Returns TRUE if file address is undefined. -@return TRUE if undefined */ -UNIV_INTERN -ibool +Returns true if file address is undefined. +@return true if undefined */ +bool fil_addr_is_null( /*=============*/ fil_addr_t addr) /*!< in: address */ @@ -6340,8 +5987,7 @@ fil_addr_is_null( /********************************************************************//** Get the predecessor of a file page. -@return FIL_PAGE_PREV */ -UNIV_INTERN +@return FIL_PAGE_PREV */ ulint fil_page_get_prev( /*==============*/ @@ -6352,8 +5998,7 @@ fil_page_get_prev( /********************************************************************//** Get the successor of a file page. -@return FIL_PAGE_NEXT */ -UNIV_INTERN +@return FIL_PAGE_NEXT */ ulint fil_page_get_next( /*==============*/ @@ -6364,7 +6009,6 @@ fil_page_get_next( /*********************************************************************//** Sets the file page type. */ -UNIV_INTERN void fil_page_set_type( /*==============*/ @@ -6376,35 +6020,33 @@ fil_page_set_type( mach_write_to_2(page + FIL_PAGE_TYPE, type); } -/*********************************************************************//** -Gets the file page type. -@return type; NOTE that if the type has not been written to page, the -return value not defined */ -UNIV_INTERN -ulint -fil_page_get_type( -/*==============*/ - const byte* page) /*!< in: file page */ +/** Reset the page type. +Data files created before MySQL 5.1 may contain garbage in FIL_PAGE_TYPE. +In MySQL 3.23.53, only undo log pages and index pages were tagged. +Any other pages were written with uninitialized bytes in FIL_PAGE_TYPE. +@param[in] page_id page number +@param[in,out] page page with invalid FIL_PAGE_TYPE +@param[in] type expected page type +@param[in,out] mtr mini-transaction */ +void +fil_page_reset_type( + const page_id_t& page_id, + byte* page, + ulint type, + mtr_t* mtr) { - ut_ad(page); - - return(mach_read_from_2(page + FIL_PAGE_TYPE)); + ib::info() + << "Resetting invalid page " << page_id << " type " + << fil_page_get_type(page) << " to " << type << "."; + mlog_write_ulint(page + FIL_PAGE_TYPE, type, MLOG_2BYTES, mtr); } /****************************************************************//** Closes the tablespace memory cache. */ -UNIV_INTERN void fil_close(void) /*===========*/ { - fil_space_crypt_cleanup(); - -#ifndef UNIV_HOTBACKUP - /* The mutex should already have been freed. */ - ut_ad(fil_system->mutex.magic_n == 0); -#endif /* !UNIV_HOTBACKUP */ - hash_table_free(fil_system->spaces); hash_table_free(fil_system->name_hash); @@ -6413,8 +6055,9 @@ fil_close(void) ut_a(UT_LIST_GET_LEN(fil_system->unflushed_spaces) == 0); ut_a(UT_LIST_GET_LEN(fil_system->space_list) == 0); - mem_free(fil_system); + mutex_free(&fil_system->mutex); + ut_free(fil_system); fil_system = NULL; } @@ -6455,15 +6098,15 @@ struct fil_iterator_t { /********************************************************************//** TODO: This can be made parallel trivially by chunking up the file and creating -a callback per thread. . Main benefit will be to use multiple CPUs for +a callback per thread. Main benefit will be to use multiple CPUs for checksums and compressed tables. We have to do compressed tables block by block right now. Secondly we need to decompress/compress and copy too much of data. These are CPU intensive. Iterate over all the pages in the tablespace. -@param iter - Tablespace iterator -@param block - block to use for IO -@param callback - Callback to inspect and update page contents +@param iter Tablespace iterator +@param block block to use for IO +@param callback Callback to inspect and update page contents @retval DB_SUCCESS or error code */ static dberr_t @@ -6480,22 +6123,32 @@ fil_iterate( ut_ad(!srv_read_only_mode); - /* TODO: For compressed tables we do a lot of useless - copying for non-index pages. Unfortunately, it is - required by buf_zip_decompress() */ + /* For old style compressed tables we do a lot of useless copying + for non-index pages. Unfortunately, it is required by + buf_zip_decompress() */ + + ulint read_type = IORequest::READ; + ulint write_type = IORequest::WRITE; for (offset = iter.start; offset < iter.end; offset += n_bytes) { - byte* io_buffer = iter.io_buffer; + byte* io_buffer = iter.io_buffer; block->frame = io_buffer; - if (callback.get_zip_size() > 0) { + if (callback.get_page_size().is_compressed()) { page_zip_des_init(&block->page.zip); page_zip_set_size(&block->page.zip, iter.page_size); + + block->page.size.copy_from( + page_size_t(iter.page_size, + univ_page_size.logical(), + true)); + block->page.zip.data = block->frame + UNIV_PAGE_SIZE; ut_d(block->page.zip.m_external = true); - ut_ad(iter.page_size == callback.get_zip_size()); + ut_ad(iter.page_size + == callback.get_page_size().physical()); /* Zip IO is done in the compressed page buffer. */ io_buffer = block->page.zip.data; @@ -6513,6 +6166,9 @@ fil_iterate( ut_ad(n_bytes > 0); ut_ad(!(n_bytes % iter.page_size)); + dberr_t err; + IORequest read_request(read_type); + byte* readptr = io_buffer; byte* writeptr = io_buffer; bool encrypted = false; @@ -6527,11 +6183,15 @@ fil_iterate( writeptr = iter.crypt_io_buffer; } - if (!os_file_read(iter.file, readptr, offset, (ulint) n_bytes)) { + err = os_file_read( + read_request, iter.file, readptr, offset, + (ulint) n_bytes); + + if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); + ib::error() << "os_file_read() failed"; - return(DB_IO_ERROR); + return(err); } bool updated = false; @@ -6540,8 +6200,7 @@ fil_iterate( bool decrypted = false; for (ulint i = 0; i < n_pages_read; ++i) { - ulint size = iter.page_size; - dberr_t err = DB_SUCCESS; + ulint size = iter.page_size; byte* src = (readptr + (i * size)); byte* dst = (io_buffer + (i * size)); @@ -6556,7 +6215,7 @@ fil_iterate( decrypted = fil_space_decrypt( iter.crypt_data, dst, //dst - iter.page_size, + callback.get_page_size(), src, // src &err); // src @@ -6568,7 +6227,7 @@ fil_iterate( updated = true; } else { /* TODO: remove unnecessary memcpy's */ - memcpy(dst, src, size); + memcpy(dst, src, iter.page_size); } } @@ -6579,7 +6238,8 @@ fil_iterate( updated = true; } - buf_block_set_file_page(block, space_id, page_no++); + buf_block_set_file_page( + block, page_id_t(space_id, page_no++)); if ((err = callback(page_off, block)) != DB_SUCCESS) { @@ -6626,12 +6286,12 @@ fil_iterate( offset, lsn, src, - iter.page_size == UNIV_PAGE_SIZE ? 0 : iter.page_size, + callback.get_page_size(), dest); if (tmp == src) { /* TODO: remove unnecessary memcpy's */ - memcpy(dest, src, size); + memcpy(dest, src, iter.page_size); } updated = true; @@ -6641,15 +6301,29 @@ fil_iterate( block->frame += iter.page_size; } - /* A page was updated in the set, write back to disk. */ + IORequest write_request(write_type); + + /* A page was updated in the set, write back to disk. + Note: We don't have the compression algorithm, we write + out the imported file as uncompressed. */ + if (updated - && !os_file_write( + && (err = os_file_write( + write_request, iter.filepath, iter.file, writeptr, - offset, (ulint) n_bytes)) { + offset, (ulint) n_bytes)) != DB_SUCCESS) { + + /* This is not a hard error */ + if (err == DB_IO_NO_PUNCH_HOLE) { - ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed"); + err = DB_SUCCESS; + write_type &= ~IORequest::PUNCH_HOLE; - return(DB_IO_ERROR); + } else { + ib::error() << "os_file_write() failed"; + + return(err); + } } } @@ -6658,11 +6332,10 @@ fil_iterate( /********************************************************************//** Iterate over all the pages in the tablespace. -@param table - the table definiton in the server -@param n_io_buffers - number of blocks to read and write together -@param callback - functor that will do the page updates -@return DB_SUCCESS or error code */ -UNIV_INTERN +@param table the table definiton in the server +@param n_io_buffers number of blocks to read and write together +@param callback functor that will do the page updates +@return DB_SUCCESS or error code */ dberr_t fil_tablespace_iterate( /*===================*/ @@ -6673,6 +6346,7 @@ fil_tablespace_iterate( dberr_t err; os_file_t file; char* filepath; + bool success; ut_a(n_io_buffers > 0); ut_ad(!srv_read_only_mode); @@ -6680,49 +6354,51 @@ fil_tablespace_iterate( DBUG_EXECUTE_IF("ib_import_trigger_corruption_1", return(DB_CORRUPTION);); + /* Make sure the data_dir_path is set. */ + dict_get_and_save_data_dir_path(table, false); + if (DICT_TF_HAS_DATA_DIR(table->flags)) { - dict_get_and_save_data_dir_path(table, false); ut_a(table->data_dir_path); - filepath = os_file_make_remote_pathname( - table->data_dir_path, table->name, "ibd"); + filepath = fil_make_filepath( + table->data_dir_path, table->name.m_name, IBD, true); } else { - filepath = fil_make_ibd_name(table->name, false); + filepath = fil_make_filepath( + NULL, table->name.m_name, IBD, false); } - { - ibool success; + if (filepath == NULL) { + return(DB_OUT_OF_MEMORY); + } - file = os_file_create_simple_no_error_handling( - innodb_file_data_key, filepath, - OS_FILE_OPEN, OS_FILE_READ_WRITE, &success, FALSE); + file = os_file_create_simple_no_error_handling( + innodb_data_file_key, filepath, + OS_FILE_OPEN, OS_FILE_READ_WRITE, srv_read_only_mode, &success); - DBUG_EXECUTE_IF("fil_tablespace_iterate_failure", - { - static bool once; + DBUG_EXECUTE_IF("fil_tablespace_iterate_failure", + { + static bool once; - if (!once || ut_rnd_interval(0, 10) == 5) { - once = true; - success = FALSE; - os_file_close(file); - } - }); + if (!once || ut_rnd_interval(0, 10) == 5) { + once = true; + success = false; + os_file_close(file); + } + }); - if (!success) { - /* The following call prints an error message */ - os_file_get_last_error(true); + if (!success) { + /* The following call prints an error message */ + os_file_get_last_error(true); - ib_logf(IB_LOG_LEVEL_ERROR, - "Trying to import a tablespace, but could not " - "open the tablespace file %s", filepath); + ib::error() << "Trying to import a tablespace, but could not" + " open the tablespace file " << filepath; - mem_free(filepath); + ut_free(filepath); - return(DB_TABLESPACE_NOT_FOUND); + return(DB_TABLESPACE_NOT_FOUND); - } else { - err = DB_SUCCESS; - } + } else { + err = DB_SUCCESS; } callback.set_file(filepath, file); @@ -6731,27 +6407,33 @@ fil_tablespace_iterate( ut_a(file_size != (os_offset_t) -1); /* The block we will use for every physical page */ - buf_block_t block; + buf_block_t* block; + + block = reinterpret_cast(ut_zalloc_nokey(sizeof(*block))); - memset(&block, 0x0, sizeof(block)); + mutex_create(LATCH_ID_BUF_BLOCK_MUTEX, &block->mutex); /* Allocate a page to read in the tablespace header, so that we - can determine the page size and zip_size (if it is compressed). + can determine the page size and zip size (if it is compressed). We allocate an extra page in case it is a compressed table. One page is to ensure alignement. */ - void* page_ptr = mem_alloc(3 * UNIV_PAGE_SIZE); + void* page_ptr = ut_malloc_nokey(3 * UNIV_PAGE_SIZE); byte* page = static_cast(ut_align(page_ptr, UNIV_PAGE_SIZE)); - fil_buf_block_init(&block, page); + fil_buf_block_init(block, page); /* Read the first page and determine the page and zip size. */ - if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) { + IORequest request(IORequest::READ); + + err = os_file_read(request, file, page, 0, UNIV_PAGE_SIZE); + + if (err != DB_SUCCESS) { err = DB_IO_ERROR; - } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) { + } else if ((err = callback.init(file_size, block)) == DB_SUCCESS) { fil_iterator_t iter; iter.file = file; @@ -6760,10 +6442,10 @@ fil_tablespace_iterate( iter.filepath = filepath; iter.file_size = file_size; iter.n_io_buffers = n_io_buffers; - iter.page_size = callback.get_page_size(); + iter.page_size = callback.get_page_size().physical(); ulint crypt_data_offset = fsp_header_get_crypt_offset( - callback.get_zip_size(), 0); + callback.get_page_size(), 0); /* read (optional) crypt data */ iter.crypt_data = fil_space_read_crypt_data( @@ -6772,22 +6454,15 @@ fil_tablespace_iterate( /* Compressed pages can't be optimised for block IO for now. We do the IMPORT page by page. */ - if (callback.get_zip_size() > 0) { + if (callback.get_page_size().is_compressed()) { iter.n_io_buffers = 1; - ut_a(iter.page_size == callback.get_zip_size()); - } - - /** If tablespace is encrypted, it needs extra buffers */ - if (iter.crypt_data != NULL) { - /* decrease io buffers so that memory - * consumption doesnt double - * note: the +1 is to avoid n_io_buffers getting down to 0 */ - iter.n_io_buffers = (iter.n_io_buffers + 1) / 2; + ut_a(iter.page_size + == callback.get_page_size().physical()); } /** Add an extra page for compressed page scratch area. */ - void* io_buffer = mem_alloc( + void* io_buffer = ut_malloc_nokey( (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); iter.io_buffer = static_cast( @@ -6795,79 +6470,72 @@ fil_tablespace_iterate( void* crypt_io_buffer = NULL; if (iter.crypt_data != NULL) { - crypt_io_buffer = mem_alloc( + crypt_io_buffer = ut_malloc_nokey( iter.n_io_buffers * UNIV_PAGE_SIZE); iter.crypt_io_buffer = static_cast( crypt_io_buffer); } - err = fil_iterate(iter, &block, callback); + err = fil_iterate(iter, block, callback); - mem_free(io_buffer); - - if (iter.crypt_data != NULL) { - mem_free(crypt_io_buffer); - iter.crypt_io_buffer = NULL; - fil_space_destroy_crypt_data(&iter.crypt_data); - } + ut_free(io_buffer); } if (err == DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk"); + ib::info() << "Sync to disk"; if (!os_file_flush(file)) { - ib_logf(IB_LOG_LEVEL_INFO, "os_file_flush() failed!"); + ib::info() << "os_file_flush() failed!"; err = DB_IO_ERROR; } else { - ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk - done!"); + ib::info() << "Sync to disk - done!"; } } os_file_close(file); - mem_free(page_ptr); - mem_free(filepath); + ut_free(page_ptr); + ut_free(filepath); + + mutex_free(&block->mutex); + + ut_free(block); return(err); } -/** -Set the tablespace compressed table size. -@return DB_SUCCESS if it is valie or DB_CORRUPTION if not */ -dberr_t -PageCallback::set_zip_size(const buf_frame_t* page) UNIV_NOTHROW +/** Set the tablespace table size. +@param[in] page a page belonging to the tablespace */ +void +PageCallback::set_page_size( + const buf_frame_t* page) UNIV_NOTHROW { - m_zip_size = fsp_header_get_zip_size(page); - - if (!ut_is_2pow(m_zip_size) || m_zip_size > UNIV_ZIP_SIZE_MAX) { - return(DB_CORRUPTION); - } - - return(DB_SUCCESS); + m_page_size.copy_from(fsp_header_get_page_size(page)); } /********************************************************************//** Delete the tablespace file and any related files like .cfg. -This should not be called for temporary tables. */ -UNIV_INTERN +This should not be called for temporary tables. +@param[in] ibd_filepath File path of the IBD tablespace */ void fil_delete_file( /*============*/ - const char* ibd_name) /*!< in: filepath of the ibd - tablespace */ + const char* ibd_filepath) { /* Force a delete of any stale .ibd files that are lying around. */ - ib_logf(IB_LOG_LEVEL_INFO, "Deleting %s", ibd_name); - - os_file_delete_if_exists(innodb_file_data_key, ibd_name); - - char* cfg_name = fil_make_cfg_name(ibd_name); + ib::info() << "Deleting " << ibd_filepath; - os_file_delete_if_exists(innodb_file_data_key, cfg_name); + os_file_delete_if_exists(innodb_data_file_key, ibd_filepath, NULL); - mem_free(cfg_name); + char* cfg_filepath = fil_make_filepath( + ibd_filepath, NULL, CFG, false); + if (cfg_filepath != NULL) { + os_file_delete_if_exists( + innodb_data_file_key, cfg_filepath, NULL); + ut_free(cfg_filepath); + } } /** @@ -6875,7 +6543,6 @@ Iterate over all the spaces in the space list and fetch the tablespace names. It will return a copy of the name that must be freed by the caller using: delete[]. @return DB_SUCCESS if all OK. */ -UNIV_INTERN dberr_t fil_get_space_names( /*================*/ @@ -6891,12 +6558,12 @@ fil_get_space_names( space != NULL; space = UT_LIST_GET_NEXT(space_list, space)) { - if (space->purpose == FIL_TABLESPACE) { + if (space->purpose == FIL_TYPE_TABLESPACE) { ulint len; char* name; - len = strlen(space->name); - name = new(std::nothrow) char[len + 1]; + len = ::strlen(space->name); + name = UT_NEW_ARRAY_NOKEY(char, len + 1); if (name == 0) { /* Caller to free elements allocated so far. */ @@ -6916,272 +6583,713 @@ fil_get_space_names( return(err); } +/** Return the next fil_node_t in the current or next fil_space_t. +Once started, the caller must keep calling this until it returns NULL. +fil_space_acquire() and fil_space_release() are invoked here which +blocks a concurrent operation from dropping the tablespace. +@param[in] prev_node Pointer to the previous fil_node_t. +If NULL, use the first fil_space_t on fil_system->space_list. +@return pointer to the next fil_node_t. +@retval NULL if this was the last file node */ +const fil_node_t* +fil_node_next( + const fil_node_t* prev_node) +{ + fil_space_t* space; + const fil_node_t* node = prev_node; + + mutex_enter(&fil_system->mutex); + + if (node == NULL) { + space = UT_LIST_GET_FIRST(fil_system->space_list); + + /* We can trust that space is not NULL because at least the + system tablespace is always present and loaded first. */ + space->n_pending_ops++; + + node = UT_LIST_GET_FIRST(space->chain); + ut_ad(node != NULL); + } else { + space = node->space; + ut_ad(space->n_pending_ops > 0); + node = UT_LIST_GET_NEXT(chain, node); + + if (node == NULL) { + /* Move on to the next fil_space_t */ + space->n_pending_ops--; + space = UT_LIST_GET_NEXT(space_list, space); + + /* Skip spaces that are being dropped or truncated. */ + while (space != NULL + && (space->stop_new_ops + || space->is_being_truncated)) { + space = UT_LIST_GET_NEXT(space_list, space); + } + + if (space != NULL) { + space->n_pending_ops++; + node = UT_LIST_GET_FIRST(space->chain); + ut_ad(node != NULL); + } + } + } + + mutex_exit(&fil_system->mutex); + + return(node); +} + /** Generate redo log for swapping two .ibd files @param[in] old_table old table @param[in] new_table new table @param[in] tmp_name temporary table name @param[in,out] mtr mini-transaction -@return innodb error code */ -UNIV_INTERN -dberr_t +@return whether the operation succeeded */ +bool fil_mtr_rename_log( const dict_table_t* old_table, const dict_table_t* new_table, const char* tmp_name, mtr_t* mtr) { - dberr_t err = DB_SUCCESS; - char* old_path; - - /* If neither table is file-per-table, - there will be no renaming of files. */ - if (old_table->space == TRX_SYS_SPACE - && new_table->space == TRX_SYS_SPACE) { - return(DB_SUCCESS); + const char* old_dir = DICT_TF_HAS_DATA_DIR(old_table->flags) + ? old_table->data_dir_path + : NULL; + const char* new_dir = DICT_TF_HAS_DATA_DIR(new_table->flags) + ? new_table->data_dir_path + : NULL; + + char* old_path = fil_make_filepath( + new_dir, old_table->name.m_name, IBD, false); + char* new_path = fil_make_filepath( + new_dir, new_table->name.m_name, IBD, false); + char* tmp_path = fil_make_filepath( + old_dir, tmp_name, IBD, false); + + if (!old_path || !new_path || !tmp_path) { + ut_free(old_path); + ut_free(new_path); + ut_free(tmp_path); + return(false); } - if (DICT_TF_HAS_DATA_DIR(old_table->flags)) { - old_path = os_file_make_remote_pathname( - old_table->data_dir_path, old_table->name, "ibd"); - } else { - old_path = fil_make_ibd_name(old_table->name, false); + if (!is_system_tablespace(old_table->space)) { + fil_name_write_rename( + old_table->space, 0, old_path, tmp_path, mtr); } - if (old_path == NULL) { - return(DB_OUT_OF_MEMORY); + + if (!is_system_tablespace(new_table->space)) { + fil_name_write_rename( + new_table->space, 0, new_path, old_path, mtr); } - if (old_table->space != TRX_SYS_SPACE) { - char* tmp_path; + ut_free(old_path); + ut_free(new_path); + ut_free(tmp_path); + return(true); +} - if (DICT_TF_HAS_DATA_DIR(old_table->flags)) { - tmp_path = os_file_make_remote_pathname( - old_table->data_dir_path, tmp_name, "ibd"); - } - else { - tmp_path = fil_make_ibd_name(tmp_name, false); - } +#ifdef UNIV_DEBUG +/** Check that a tablespace is valid for mtr_commit(). +@param[in] space persistent tablespace that has been changed */ +static +void +fil_space_validate_for_mtr_commit( + const fil_space_t* space) +{ + ut_ad(!mutex_own(&fil_system->mutex)); + ut_ad(space != NULL); + ut_ad(space->purpose == FIL_TYPE_TABLESPACE); + ut_ad(!is_predefined_tablespace(space->id)); + + /* We are serving mtr_commit(). While there is an active + mini-transaction, we should have !space->stop_new_ops. This is + guaranteed by meta-data locks or transactional locks, or + dict_operation_lock (X-lock in DROP, S-lock in purge). + + However, a file I/O thread can invoke change buffer merge + while fil_check_pending_operations() is waiting for operations + to quiesce. This is not a problem, because + ibuf_merge_or_delete_for_page() would call + fil_space_acquire() before mtr_start() and + fil_space_release() after mtr_commit(). This is why + n_pending_ops should not be zero if stop_new_ops is set. */ + ut_ad(!space->stop_new_ops + || space->is_being_truncated /* TRUNCATE sets stop_new_ops */ + || space->n_pending_ops > 0); +} +#endif /* UNIV_DEBUG */ - if (tmp_path == NULL) { - mem_free(old_path); - return(DB_OUT_OF_MEMORY); - } +/** Write a MLOG_FILE_NAME record for a persistent tablespace. +@param[in] space tablespace +@param[in,out] mtr mini-transaction */ +static +void +fil_names_write( + const fil_space_t* space, + mtr_t* mtr) +{ + ut_ad(UT_LIST_GET_LEN(space->chain) == 1); + fil_name_write(space, 0, UT_LIST_GET_FIRST(space->chain), mtr); +} - /* Temp filepath must not exist. */ - err = fil_rename_tablespace_check( - old_table->space, old_path, tmp_path, - dict_table_is_discarded(old_table)); - mem_free(tmp_path); - if (err != DB_SUCCESS) { - mem_free(old_path); - return(err); - } +/** Note that a non-predefined persistent tablespace has been modified +by redo log. +@param[in,out] space tablespace */ +void +fil_names_dirty( + fil_space_t* space) +{ + ut_ad(log_mutex_own()); + ut_ad(recv_recovery_is_on()); + ut_ad(log_sys->lsn != 0); + ut_ad(space->max_lsn == 0); + ut_d(fil_space_validate_for_mtr_commit(space)); + + UT_LIST_ADD_LAST(fil_system->named_spaces, space); + space->max_lsn = log_sys->lsn; +} - fil_op_write_log(MLOG_FILE_RENAME, old_table->space, - 0, 0, old_table->name, tmp_name, mtr); - } +/** Write MLOG_FILE_NAME records when a non-predefined persistent +tablespace was modified for the first time since the latest +fil_names_clear(). +@param[in,out] space tablespace +@param[in,out] mtr mini-transaction */ +void +fil_names_dirty_and_write( + fil_space_t* space, + mtr_t* mtr) +{ + ut_ad(log_mutex_own()); + ut_d(fil_space_validate_for_mtr_commit(space)); + ut_ad(space->max_lsn == log_sys->lsn); + + UT_LIST_ADD_LAST(fil_system->named_spaces, space); + fil_names_write(space, mtr); + + DBUG_EXECUTE_IF("fil_names_write_bogus", + { + char bogus_name[] = "./test/bogus file.ibd"; + os_normalize_path_for_win(bogus_name); + fil_name_write( + SRV_LOG_SPACE_FIRST_ID, 0, + bogus_name, mtr); + }); +} - if (new_table->space != TRX_SYS_SPACE) { +/** On a log checkpoint, reset fil_names_dirty_and_write() flags +and write out MLOG_FILE_NAME and MLOG_CHECKPOINT if needed. +@param[in] lsn checkpoint LSN +@param[in] do_write whether to always write MLOG_CHECKPOINT +@return whether anything was written to the redo log +@retval false if no flags were set and nothing written +@retval true if anything was written to the redo log */ +bool +fil_names_clear( + lsn_t lsn, + bool do_write) +{ + mtr_t mtr; - /* Destination filepath must not exist unless this ALTER - TABLE starts and ends with a file_per-table tablespace. */ - if (old_table->space == TRX_SYS_SPACE) { - char* new_path = NULL; + ut_ad(log_mutex_own()); - if (DICT_TF_HAS_DATA_DIR(new_table->flags)) { - new_path = os_file_make_remote_pathname( - new_table->data_dir_path, - new_table->name, "ibd"); - } - else { - new_path = fil_make_ibd_name( - new_table->name, false); - } + if (log_sys->append_on_checkpoint) { + mtr_write_log(log_sys->append_on_checkpoint); + do_write = true; + } - if (new_path == NULL) { - mem_free(old_path); - return(DB_OUT_OF_MEMORY); - } + mtr.start(); - err = fil_rename_tablespace_check( - new_table->space, new_path, old_path, - dict_table_is_discarded(new_table)); - mem_free(new_path); - if (err != DB_SUCCESS) { - mem_free(old_path); - return(err); - } + for (fil_space_t* space = UT_LIST_GET_FIRST(fil_system->named_spaces); + space != NULL; ) { + fil_space_t* next = UT_LIST_GET_NEXT(named_spaces, space); + + ut_ad(space->max_lsn > 0); + if (space->max_lsn < lsn) { + /* The tablespace was last dirtied before the + checkpoint LSN. Remove it from the list, so + that if the tablespace is not going to be + modified any more, subsequent checkpoints will + avoid calling fil_names_write() on it. */ + space->max_lsn = 0; + UT_LIST_REMOVE(fil_system->named_spaces, space); } - fil_op_write_log(MLOG_FILE_RENAME, new_table->space, - 0, 0, new_table->name, old_table->name, mtr); + /* max_lsn is the last LSN where fil_names_dirty_and_write() + was called. If we kept track of "min_lsn" (the first LSN + where max_lsn turned nonzero), we could avoid the + fil_names_write() call if min_lsn > lsn. */ - } + fil_names_write(space, &mtr); + do_write = true; - mem_free(old_path); + space = next; + } - return(err); -} + if (do_write) { + mtr.commit_checkpoint(lsn); + } else { + ut_ad(!mtr.has_modifications()); + } -/****************************************************************//** -Acquire fil_system mutex */ -void -fil_system_enter(void) -/*==================*/ -{ - ut_ad(!mutex_own(&fil_system->mutex)); - mutex_enter(&fil_system->mutex); + return(do_write); } -/****************************************************************//** -Release fil_system mutex */ -void -fil_system_exit(void) +/** Truncate a single-table tablespace. The tablespace must be cached +in the memory cache. +@param space_id space id +@param dir_path directory path +@param tablename the table name in the usual + databasename/tablename format of InnoDB +@param flags tablespace flags +@param trunc_to_default truncate to default size if tablespace + is being newly re-initialized. +@return DB_SUCCESS or error */ +dberr_t +truncate_t::truncate( /*=================*/ + ulint space_id, + const char* dir_path, + const char* tablename, + ulint flags, + bool trunc_to_default) { - ut_ad(mutex_own(&fil_system->mutex)); - mutex_exit(&fil_system->mutex); -} + dberr_t err = DB_SUCCESS; + char* path; + bool has_data_dir = FSP_FLAGS_HAS_DATA_DIR(flags); + ut_a(!is_system_tablespace(space_id)); -/****************************************************************** -Get id of first tablespace or ULINT_UNDEFINED if none */ -UNIV_INTERN -ulint -fil_get_first_space() -/*=================*/ -{ - ulint out_id = ULINT_UNDEFINED; - fil_space_t* space; + if (has_data_dir) { + ut_ad(dir_path != NULL); - mutex_enter(&fil_system->mutex); + path = fil_make_filepath(dir_path, tablename, IBD, true); - space = UT_LIST_GET_FIRST(fil_system->space_list); - if (space != NULL) { - do - { - if (!space->stop_new_ops) { - out_id = space->id; - break; - } - space = UT_LIST_GET_NEXT(space_list, space); - } while (space != NULL); + } else { + path = fil_make_filepath(NULL, tablename, IBD, false); } - mutex_exit(&fil_system->mutex); + if (path == NULL) { + return(DB_OUT_OF_MEMORY); + } - return out_id; -} + mutex_enter(&fil_system->mutex); -/****************************************************************** -Get id of first tablespace that has node or ULINT_UNDEFINED if none */ -UNIV_INTERN -ulint -fil_get_first_space_safe() -/*======================*/ -{ - ulint out_id = ULINT_UNDEFINED; - fil_space_t* space; + fil_space_t* space = fil_space_get_by_id(space_id); - mutex_enter(&fil_system->mutex); + /* The following code must change when InnoDB supports + multiple datafiles per tablespace. */ + ut_a(UT_LIST_GET_LEN(space->chain) == 1); - space = UT_LIST_GET_FIRST(fil_system->space_list); - if (space != NULL) { - do - { - if (!space->stop_new_ops && UT_LIST_GET_LEN(space->chain) > 0) { - out_id = space->id; - break; - } + fil_node_t* node = UT_LIST_GET_FIRST(space->chain); - space = UT_LIST_GET_NEXT(space_list, space); - } while (space != NULL); + if (trunc_to_default) { + space->size = node->size = FIL_IBD_FILE_INITIAL_SIZE; + } + + const bool already_open = node->is_open; + + if (!already_open) { + + bool ret; + + node->handle = os_file_create_simple_no_error_handling( + innodb_data_file_key, path, OS_FILE_OPEN, + OS_FILE_READ_WRITE, + fsp_is_system_temporary(space_id) + ? false : srv_read_only_mode, &ret); + + if (!ret) { + ib::error() << "Failed to open tablespace file " + << path << "."; + + ut_free(path); + + return(DB_ERROR); + } + + node->is_open = true; + } + + os_offset_t trunc_size = trunc_to_default + ? FIL_IBD_FILE_INITIAL_SIZE + : space->size; + + const bool success = os_file_truncate( + path, node->handle, trunc_size * UNIV_PAGE_SIZE); + + if (!success) { + ib::error() << "Cannot truncate file " << path + << " in TRUNCATE TABLESPACE."; + err = DB_ERROR; + } + + space->stop_new_ops = false; + space->is_being_truncated = false; + + /* If we opened the file in this function, close it. */ + if (!already_open) { + bool closed = os_file_close(node->handle); + + if (!closed) { + + ib::error() << "Failed to close tablespace file " + << path << "."; + + err = DB_ERROR; + } else { + node->is_open = false; + } } mutex_exit(&fil_system->mutex); - return out_id; + ut_free(path); + + return(err); } -/****************************************************************** -Get id of next tablespace or ULINT_UNDEFINED if none */ -UNIV_INTERN -ulint -fil_get_next_space( -/*===============*/ - ulint id) /*!< in: previous space id */ +/** +Note that the file system where the file resides doesn't support PUNCH HOLE. +Called from AIO handlers when IO returns DB_IO_NO_PUNCH_HOLE +@param[in,out] node Node to set */ +void +fil_no_punch_hole(fil_node_t* node) { - bool found; - fil_space_t* space; - ulint out_id = ULINT_UNDEFINED; + node->punch_hole = false; +} - mutex_enter(&fil_system->mutex); +/** Set the compression type for the tablespace +@param[in] space Space ID of tablespace for which to set +@param[in] algorithm Text representation of the algorithm +@return DB_SUCCESS or error code */ +dberr_t +fil_set_compression( + ulint space_id, + const char* algorithm) +{ + ut_ad(!is_system_or_undo_tablespace(space_id)); + + if (is_shared_tablespace(space_id)) { + + return(DB_IO_NO_PUNCH_HOLE_TABLESPACE); + } + + dberr_t err; + Compression compression; + + if (algorithm == NULL || strlen(algorithm) == 0) { + +#ifndef UNIV_DEBUG + compression.m_type = Compression::NONE; +#else + compression.m_type = static_cast( + srv_debug_compress); + + switch (compression.m_type) { + case Compression::LZ4: + case Compression::NONE: + case Compression::ZLIB: + break; + + default: + ut_error; + } + +#endif /* UNIV_DEBUG */ + + err = DB_SUCCESS; + + } else { + + err = Compression::check(algorithm, &compression); + + ut_ad(err == DB_SUCCESS || err == DB_UNSUPPORTED); + } + + fil_space_t* space = fil_space_get(space_id); - space = fil_space_get_by_id(id); if (space == NULL) { - /* we didn't find it...search for space with space->id > id */ - found = false; - space = UT_LIST_GET_FIRST(fil_system->space_list); + + err = DB_NOT_FOUND; + } else { - /* we found it, take next available space */ - found = true; + + space->compression_type = compression.m_type; + + if (space->compression_type != Compression::NONE + && err == DB_SUCCESS) { + + const fil_node_t* node; + + node = UT_LIST_GET_FIRST(space->chain); + + if (!node->punch_hole) { + + return(DB_IO_NO_PUNCH_HOLE_FS); + } + } } - while ((space = UT_LIST_GET_NEXT(space_list, space)) != NULL) { + return(err); +} - if (!found && space->id <= id) - continue; +/** Get the compression algorithm for a tablespace. +@param[in] space_id Space ID to check +@return the compression algorithm */ +Compression::Type +fil_get_compression( + ulint space_id) +{ + fil_space_t* space = fil_space_get(space_id); - if (!space->stop_new_ops && UT_LIST_GET_LEN(space->chain) > 0) { - /* inc reference to prevent drop */ - out_id = space->id; - break; + return(space == NULL ? Compression::NONE : space->compression_type); +} + +/** Build the basic folder name from the path and length provided +@param[in] path pathname (may also include the file basename) +@param[in] len length of the path, in bytes */ +void +Folder::make_path(const char* path, size_t len) +{ + if (is_absolute_path(path)) { + m_folder = mem_strdupl(path, len); + m_folder_len = len; + } + else { + size_t n = 2 + len + strlen(fil_path_to_mysql_datadir); + m_folder = static_cast(ut_malloc_nokey(n)); + m_folder_len = 0; + + if (path != fil_path_to_mysql_datadir) { + /* Put the mysqld datadir into m_folder first. */ + ut_ad(fil_path_to_mysql_datadir[0] != '\0'); + m_folder_len = strlen(fil_path_to_mysql_datadir); + memcpy(m_folder, fil_path_to_mysql_datadir, + m_folder_len); + if (m_folder[m_folder_len - 1] != OS_PATH_SEPARATOR) { + m_folder[m_folder_len++] = OS_PATH_SEPARATOR; + } } + + /* Append the path. */ + memcpy(m_folder + m_folder_len, path, len); + m_folder_len += len; + m_folder[m_folder_len] = '\0'; } - mutex_exit(&fil_system->mutex); + os_normalize_path(m_folder); +} - return out_id; +/** Resolve a relative path in m_folder to an absolute path +in m_abs_path setting m_abs_len. */ +void +Folder::make_abs_path() +{ + my_realpath(m_abs_path, m_folder, MYF(0)); + m_abs_len = strlen(m_abs_path); + + ut_ad(m_abs_len + 1 < sizeof(m_abs_path)); + + /* Folder::related_to() needs a trailing separator. */ + if (m_abs_path[m_abs_len - 1] != OS_PATH_SEPARATOR) { + m_abs_path[m_abs_len] = OS_PATH_SEPARATOR; + m_abs_path[++m_abs_len] = '\0'; + } } -/****************************************************************** -Get id of next tablespace that has node or ULINT_UNDEFINED if none */ +/** Constructor +@param[in] path pathname (may also include the file basename) +@param[in] len length of the path, in bytes */ +Folder::Folder(const char* path, size_t len) +{ + make_path(path, len); + make_abs_path(); +} + +/** Assignment operator +@param[in] folder folder string provided */ +class Folder& +Folder::operator=(const char* path) +{ + ut_free(m_folder); + make_path(path, strlen(path)); + make_abs_path(); + + return(*this); +} + +/** Determine if two folders are equal +@param[in] other folder to compare to +@return whether the folders are equal */ +bool Folder::operator==(const Folder& other) const +{ + return(m_abs_len == other.m_abs_len + && !memcmp(m_abs_path, other.m_abs_path, m_abs_len)); +} + +/** Determine if the left folder is the same or an ancestor of +(contains) the right folder. +@param[in] other folder to compare to +@return whether this is the same or an ancestor of the other folder. */ +bool Folder::operator>=(const Folder& other) const +{ + return(m_abs_len <= other.m_abs_len + && (!memcmp(other.m_abs_path, m_abs_path, m_abs_len))); +} + +/** Determine if the left folder is an ancestor of (contains) +the right folder. +@param[in] other folder to compare to +@return whether this is an ancestor of the other folder */ +bool Folder::operator>(const Folder& other) const +{ + return(m_abs_len < other.m_abs_len + && (!memcmp(other.m_abs_path, m_abs_path, m_abs_len))); +} + +/** Determine if the directory referenced by m_folder exists. +@return whether the directory exists */ +bool +Folder::exists() +{ + bool exists; + os_file_type_t type; + +#ifdef _WIN32 + /* Temporarily strip the trailing_separator since it will cause + _stat64() to fail on Windows unless the path is the root of some + drive; like "c:\". _stat64() will fail if it is "c:". */ + size_t len = strlen(m_abs_path); + if (m_abs_path[m_abs_len - 1] == OS_PATH_SEPARATOR + && m_abs_path[m_abs_len - 2] != ':') { + m_abs_path[m_abs_len - 1] = '\0'; + } +#endif /* WIN32 */ + + bool ret = os_file_status(m_abs_path, &exists, &type); + +#ifdef _WIN32 + /* Put the separator back on. */ + if (m_abs_path[m_abs_len - 1] == '\0') { + m_abs_path[m_abs_len - 1] = OS_PATH_SEPARATOR; + } +#endif /* WIN32 */ + + return(ret && exists && type == OS_FILE_TYPE_DIR); +} + +/* Unit Tests */ +#ifdef UNIV_ENABLE_UNIT_TEST_MAKE_FILEPATH +#define MF fil_make_filepath +#define DISPLAY ib::info() << path +void +test_make_filepath() +{ + char* path; + const char* long_path = + "this/is/a/very/long/path/including/a/very/" + "looooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooong" + "/folder/name"; + path = MF("/this/is/a/path/with/a/filename", NULL, IBD, false); DISPLAY; + path = MF("/this/is/a/path/with/a/filename", NULL, ISL, false); DISPLAY; + path = MF("/this/is/a/path/with/a/filename", NULL, CFG, false); DISPLAY; + path = MF("/this/is/a/path/with/a/filename.ibd", NULL, IBD, false); DISPLAY; + path = MF("/this/is/a/path/with/a/filename.ibd", NULL, IBD, false); DISPLAY; + path = MF("/this/is/a/path/with/a/filename.dat", NULL, IBD, false); DISPLAY; + path = MF(NULL, "tablespacename", NO_EXT, false); DISPLAY; + path = MF(NULL, "tablespacename", IBD, false); DISPLAY; + path = MF(NULL, "dbname/tablespacename", NO_EXT, false); DISPLAY; + path = MF(NULL, "dbname/tablespacename", IBD, false); DISPLAY; + path = MF(NULL, "dbname/tablespacename", ISL, false); DISPLAY; + path = MF(NULL, "dbname/tablespacename", CFG, false); DISPLAY; + path = MF(NULL, "dbname\\tablespacename", NO_EXT, false); DISPLAY; + path = MF(NULL, "dbname\\tablespacename", IBD, false); DISPLAY; + path = MF("/this/is/a/path", "dbname/tablespacename", IBD, false); DISPLAY; + path = MF("/this/is/a/path", "dbname/tablespacename", IBD, true); DISPLAY; + path = MF("./this/is/a/path", "dbname/tablespacename.ibd", IBD, true); DISPLAY; + path = MF("this\\is\\a\\path", "dbname/tablespacename", IBD, true); DISPLAY; + path = MF("/this/is/a/path", "dbname\\tablespacename", IBD, true); DISPLAY; + path = MF(long_path, NULL, IBD, false); DISPLAY; + path = MF(long_path, "tablespacename", IBD, false); DISPLAY; + path = MF(long_path, "tablespacename", IBD, true); DISPLAY; +} +#endif /* UNIV_ENABLE_UNIT_TEST_MAKE_FILEPATH */ +/* @} */ + +/*******************************************************************//** +Increments the count of pending operation, if space is not being deleted. +@return TRUE if being deleted, and operation should be skipped */ UNIV_INTERN -ulint -fil_get_next_space_safe( -/*====================*/ - ulint id) /*!< in: previous space id */ +ibool +fil_inc_pending_ops( +/*================*/ + ulint id, /*!< in: space id */ + ibool print_err) /*!< in: need to print error or not */ { - bool found; - fil_space_t* space; - ulint out_id = ULINT_UNDEFINED; + fil_space_t* space; mutex_enter(&fil_system->mutex); space = fil_space_get_by_id(id); + if (space == NULL) { - /* we didn't find it...search for space with space->id > id */ - found = false; - space = UT_LIST_GET_FIRST(fil_system->space_list); - } else { - /* we found it, take next available space */ - found = true; + if (print_err) { + fprintf(stderr, + "InnoDB: Error: trying to do an operation on a" + " dropped tablespace %lu\n", + (ulong) id); + } } - while ((space = UT_LIST_GET_NEXT(space_list, space)) != NULL) { - - if (!found && space->id <= id) - continue; + if (space == NULL || space->stop_new_ops) { + mutex_exit(&fil_system->mutex); - if (!space->stop_new_ops) { - /* inc reference to prevent drop */ - out_id = space->id; - break; - } + return(TRUE); } + space->n_pending_ops++; + mutex_exit(&fil_system->mutex); - return out_id; + return(FALSE); +} + +/*******************************************************************//** +Decrements the count of pending operations. */ +UNIV_INTERN +void +fil_decr_pending_ops( +/*=================*/ + ulint id) /*!< in: space id */ +{ + fil_space_t* space; + + mutex_enter(&fil_system->mutex); + + space = fil_space_get_by_id(id); + + if (space == NULL) { + fprintf(stderr, + "InnoDB: Error: decrementing pending operation" + " of a dropped tablespace %lu\n", + (ulong) id); + } + + if (space != NULL) { + space->n_pending_ops--; + } + + mutex_exit(&fil_system->mutex); } /****************************************************************** @@ -7263,3 +7371,193 @@ fil_space_set_crypt_data( return ret_crypt_data; } + +/****************************************************************** +Get id of first tablespace that has node or ULINT_UNDEFINED if none */ +UNIV_INTERN +ulint +fil_get_first_space_safe() +/*======================*/ +{ + ulint out_id = ULINT_UNDEFINED; + fil_space_t* space; + + mutex_enter(&fil_system->mutex); + + space = UT_LIST_GET_FIRST(fil_system->space_list); + if (space != NULL) { + do + { + if (!space->stop_new_ops && UT_LIST_GET_LEN(space->chain) > 0) { + out_id = space->id; + break; + } + + space = UT_LIST_GET_NEXT(space_list, space); + } while (space != NULL); + } + + mutex_exit(&fil_system->mutex); + + return out_id; +} + +/****************************************************************** +Get id of next tablespace that has node or ULINT_UNDEFINED if none */ +UNIV_INTERN +ulint +fil_get_next_space_safe( +/*====================*/ + ulint id) /*!< in: previous space id */ +{ + bool found; + fil_space_t* space; + ulint out_id = ULINT_UNDEFINED; + + mutex_enter(&fil_system->mutex); + + space = fil_space_get_by_id(id); + if (space == NULL) { + /* we didn't find it...search for space with space->id > id */ + found = false; + space = UT_LIST_GET_FIRST(fil_system->space_list); + } else { + /* we found it, take next available space */ + found = true; + } + + while ((space = UT_LIST_GET_NEXT(space_list, space)) != NULL) { + + if (!found && space->id <= id) + continue; + + if (!space->stop_new_ops) { + /* inc reference to prevent drop */ + out_id = space->id; + break; + } + } + + mutex_exit(&fil_system->mutex); + + return out_id; +} + + +/********************************************************************//** +Find correct node from file space +@return node */ +static +fil_node_t* +fil_space_get_node( + fil_space_t* space, /*!< in: file spage */ + ulint space_id, /*!< in: space id */ + ulint* block_offset, /*!< in/out: offset in number of blocks */ + ulint byte_offset, /*!< in: remainder of offset in bytes; in + aio this must be divisible by the OS block + size */ + ulint len) /*!< in: how many bytes to read or write; this + must not cross a file boundary; in aio this + must be a block size multiple */ +{ + fil_node_t* node; + ut_ad(mutex_own(&fil_system->mutex)); + + node = UT_LIST_GET_FIRST(space->chain); + + for (;;) { + if (node == NULL) { + return(NULL); + } else if (fil_is_user_tablespace_id(space->id) + && node->size == 0) { + + /* We do not know the size of a single-table tablespace + before we open the file */ + break; + } else if (node->size > *block_offset) { + /* Found! */ + break; + } else { + *block_offset -= node->size; + node = UT_LIST_GET_NEXT(chain, node); + } + } + + return (node); +} + +/********************************************************************//** +Return block size of node in file space +@return file block size */ +UNIV_INTERN +ulint +fil_space_get_block_size( +/*=====================*/ + ulint space_id, + ulint block_offset, + ulint len) +{ + ulint block_size = 512; + ut_ad(!mutex_own(&fil_system->mutex)); + + mutex_enter(&fil_system->mutex); + fil_space_t* space = fil_space_get_space(space_id); + + if (space) { + fil_node_t* node = fil_space_get_node(space, space_id, &block_offset, 0, len); + + if (node) { + block_size = node->block_size; + } + } + + /* Currently supporting block size up to 4K, + fall back to default if bigger requested. */ + if (block_size > 4096) { + block_size = 512; + } + + mutex_exit(&fil_system->mutex); + + return block_size; +} + +/*******************************************************************//** +Returns the table space by a given id, NULL if not found. */ +fil_space_t* +fil_space_found_by_id( +/*==================*/ + ulint id) /*!< in: space id */ +{ + fil_space_t* space = NULL; + mutex_enter(&fil_system->mutex); + space = fil_space_get_by_id(id); + + /* Not found if space is being deleted */ + if (space && space->stop_new_ops) { + space = NULL; + } + + mutex_exit(&fil_system->mutex); + return space; +} + +/****************************************************************//** +Acquire fil_system mutex */ +void +fil_system_enter(void) +/*==================*/ +{ + ut_ad(!mutex_own(&fil_system->mutex)); + mutex_enter(&fil_system->mutex); +} + +/****************************************************************//** +Release fil_system mutex */ +void +fil_system_exit(void) +/*=================*/ +{ + ut_ad(mutex_own(&fil_system->mutex)); + mutex_exit(&fil_system->mutex); +} diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index 5c6ef3bfd0d..4479c06f1b2 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -52,7 +52,6 @@ Updated 14/02/2015 # include "buf0lru.h" # include "ibuf0ibuf.h" # include "sync0sync.h" -# include "os0sync.h" #else /* !UNIV_HOTBACKUP */ # include "srv0srv.h" static ulint srv_data_read, srv_data_written; @@ -119,10 +118,10 @@ fil_compress_page( if (!out_buf) { allocated = true; - out_buf = static_cast(ut_malloc(UNIV_PAGE_SIZE)); + out_buf = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); #ifdef HAVE_LZO if (comp_method == PAGE_LZO_ALGORITHM) { - lzo_mem = static_cast(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); + lzo_mem = static_cast(ut_malloc_nokey(LZO1X_1_15_MEM_COMPRESS)); memset(lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); } #endif @@ -173,20 +172,6 @@ fil_compress_page( write_size = err; if (err == 0) { - /* If error we leave the actual page as it was */ - -#ifndef UNIV_PAGECOMPRESS_DEBUG - if (space->printed_compression_failure == false) { -#endif - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space %lu name %s len %lu rt %d write %lu.", - space_id, fil_space_name(space), len, err, write_size); - space->printed_compression_failure = true; -#ifndef UNIV_PAGECOMPRESS_DEBUG - } -#endif - srv_stats.pages_page_compression_error.inc(); - *out_len = len; goto err_exit; } break; @@ -197,15 +182,6 @@ fil_compress_page( buf, len, out_buf+header_len, &write_size, lzo_mem); if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { - if (space->printed_compression_failure == false) { - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space %lu name %s len %lu err %d write_size %lu.", - space_id, fil_space_name(space), len, err, write_size); - space->printed_compression_failure = true; - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; goto err_exit; } @@ -226,15 +202,7 @@ fil_compress_page( (size_t)write_size); if (err != LZMA_OK || out_pos > UNIV_PAGE_SIZE-header_len) { - if (space->printed_compression_failure == false) { - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space %lu name %s len %lu err %d write_size %lu", - space_id, fil_space_name(space), len, err, out_pos); - space->printed_compression_failure = true; - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; + write_size = out_pos; goto err_exit; } @@ -257,15 +225,6 @@ fil_compress_page( 0); if (err != BZ_OK || write_size > UNIV_PAGE_SIZE-header_len) { - if (space->printed_compression_failure == false) { - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space %lu name %s len %lu err %d write_size %lu.", - space_id, fil_space_name(space), len, err, write_size); - space->printed_compression_failure = true; - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; goto err_exit; } break; @@ -284,15 +243,7 @@ fil_compress_page( (size_t*)&write_size); if (cstatus != SNAPPY_OK || write_size > UNIV_PAGE_SIZE-header_len) { - if (space->printed_compression_failure == false) { - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space %lu name %s len %lu err %d write_size %lu.", - space_id, fil_space_name(space), len, (int)cstatus, write_size); - space->printed_compression_failure = true; - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; + err = (int)cstatus; goto err_exit; } break; @@ -303,17 +254,6 @@ fil_compress_page( err = compress2(out_buf+header_len, (ulong*)&write_size, buf, len, comp_level); if (err != Z_OK) { - /* If error we leave the actual page as it was */ - - if (space->printed_compression_failure == false) { - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space %lu name %s len %lu rt %d write %lu.", - space_id, fil_space_name(space), len, err, write_size); - space->printed_compression_failure = true; - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; goto err_exit; } break; @@ -360,14 +300,16 @@ fil_compress_page( byte *comp_page; byte *uncomp_page; - comp_page = static_cast(ut_malloc(UNIV_PAGE_SIZE)); - uncomp_page = static_cast(ut_malloc(UNIV_PAGE_SIZE)); + comp_page = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); + uncomp_page = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); memcpy(comp_page, out_buf, UNIV_PAGE_SIZE); + bool tsfound; + const page_size_t page_size = fil_space_get_page_size(space_id, &tsfound); fil_decompress_page(uncomp_page, comp_page, len, NULL); - if(buf_page_is_corrupted(false, uncomp_page, 0)) { - buf_page_print(uncomp_page, 0, BUF_PAGE_PRINT_NO_CRASH); + if(buf_page_is_corrupted(false, uncomp_page, page_size, false)) { + buf_page_print(uncomp_page, page_size, BUF_PAGE_PRINT_NO_CRASH); ut_error; } @@ -423,6 +365,26 @@ fil_compress_page( } err_exit: + /* If error we leave the actual page as it was */ + +#ifndef UNIV_PAGECOMPRESS_DEBUG + if (space && space->printed_compression_failure == false) { +#endif + ib::warn() << "Compression failed for space: " + << space_id << " name: " + << fil_space_name(space) << " len: " + << len << " err: " << err << " write_size: " + << write_size + << " compression method: " + << fil_get_compression_alg_name(comp_method) + << "."; + space->printed_compression_failure = true; +#ifndef UNIV_PAGECOMPRESS_DEBUG + } +#endif + srv_stats.pages_page_compression_error.inc(); + *out_len = len; + if (allocated) { ut_free(out_buf); #ifdef HAVE_LZO @@ -472,13 +434,13 @@ fil_decompress_page( /* Do not try to uncompressed pages that are not compressed */ if (ptype != FIL_PAGE_PAGE_COMPRESSED && ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED && - ptype != FIL_PAGE_TYPE_COMPRESSED) { + ptype != FIL_PAGE_COMPRESSED) { return; } // If no buffer was given, we need to allocate temporal buffer if (page_buf == NULL) { - in_buf = static_cast(ut_malloc(UNIV_PAGE_SIZE)); + in_buf = static_cast(ut_malloc_nokey(UNIV_PAGE_SIZE)); memset(in_buf, 0, UNIV_PAGE_SIZE); } else { in_buf = page_buf; @@ -489,13 +451,13 @@ fil_decompress_page( if (mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) != BUF_NO_CHECKSUM_MAGIC || (ptype != FIL_PAGE_PAGE_COMPRESSED && ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: We try to uncompress corrupted page" - " CRC %lu type %lu len %lu.", - mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM), - mach_read_from_2(buf+FIL_PAGE_TYPE), len); + ib::error() << "Corruption: We try to uncompress corrupted page:" + << " CRC " + << mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) + << " page_type " + << mach_read_from_2(buf+FIL_PAGE_TYPE) + << " page len " << len << "."; - fflush(stderr); if (return_error) { goto error_return; } @@ -513,11 +475,11 @@ fil_decompress_page( actual_size = mach_read_from_2(buf+FIL_PAGE_DATA); /* Check if payload size is corrupted */ if (actual_size == 0 || actual_size > UNIV_PAGE_SIZE) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: We try to uncompress corrupted page" - " actual size %lu compression %s.", - actual_size, fil_get_compression_alg_name(compression_alg)); - fflush(stderr); + ib::error() << "Corruption: We try to uncompress corrupted page" + << " actual size: " << actual_size + << " compression method: " + << fil_get_compression_alg_name(compression_alg) + << "."; if (return_error) { goto error_return; } @@ -531,31 +493,20 @@ fil_decompress_page( } #ifdef UNIV_PAGECOMPRESS_DEBUG - ib_logf(IB_LOG_LEVEL_INFO, - "Preparing for decompress for len %lu\n", - actual_size); + ib::info() << "Preparing for decompress for len " + << actual_size << "."; #endif /* UNIV_PAGECOMPRESS_DEBUG */ - switch(compression_alg) { case PAGE_ZLIB_ALGORITHM: err= uncompress(in_buf, &len, buf+header_len, (unsigned long)actual_size); /* If uncompress fails it means that page is corrupted */ if (err != Z_OK) { - - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but uncompress failed with error %d " - " size %lu len %lu.", - err, actual_size, len); - - fflush(stderr); - + goto err_exit; if (return_error) { goto error_return; } - ut_error; } break; @@ -564,17 +515,10 @@ fil_decompress_page( err = LZ4_decompress_fast((const char *)buf+header_len, (char *)in_buf, len); if (err != (int)actual_size) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only %d bytes " - " size %lu len %lu.", - err, actual_size, len); - fflush(stderr); - + goto err_exit; if (return_error) { goto error_return; } - ut_error; } break; #endif /* HAVE_LZ4 */ @@ -585,17 +529,11 @@ fil_decompress_page( actual_size,(unsigned char *)in_buf, &olen, NULL); if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only %ld bytes" - " size %lu len %lu.", - olen, actual_size, len); - fflush(stderr); - + len = olen; + goto err_exit; if (return_error) { goto error_return; } - ut_error; } break; } @@ -621,17 +559,11 @@ fil_decompress_page( if (ret != LZMA_OK || (dst_pos == 0 || dst_pos > UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only %ld bytes" - " size %lu len %lu.", - dst_pos, actual_size, len); - fflush(stderr); - + len = dst_pos; + goto err_exit; if (return_error) { goto error_return; } - ut_error; } break; @@ -650,17 +582,11 @@ fil_decompress_page( 0); if (err != BZ_OK || (dst_pos == 0 || dst_pos > UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only %du bytes" - " size %lu len %lu err %d.", - dst_pos, actual_size, len, err); - fflush(stderr); - + len = dst_pos; + goto err_exit; if (return_error) { goto error_return; } - ut_error; } break; } @@ -678,33 +604,21 @@ fil_decompress_page( (size_t*)&olen); if (cstatus != SNAPPY_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only %lu bytes" - " size %lu len %lu err %d.", - olen, actual_size, len, (int)cstatus); - fflush(stderr); - + err = (int)cstatus; + len = olen; + goto err_exit; if (return_error) { goto error_return; } - ut_error; } break; } #endif /* HAVE_SNAPPY */ default: - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but compression algorithm %s" - " is not known." - ,fil_get_compression_alg_name(compression_alg)); - - fflush(stderr); + goto err_exit; if (return_error) { goto error_return; } - ut_error; break; } @@ -719,4 +633,30 @@ error_return: if (page_buf == NULL) { ut_free(in_buf); } + + return; + +err_exit: + /* Note that as we have found the page is corrupted, so + all this could be incorrect. */ + ulint space_id = mach_read_from_4(buf+FIL_PAGE_SPACE_ID); + fil_system_enter(); + fil_space_t* space = fil_space_get_by_id(space_id); + fil_system_exit(); + + bool tsfound; + const page_size_t page_size = fil_space_get_page_size(space_id, &tsfound); + + ib::error() << "Corruption: Page is marked as compressed" + << " space: " << space_id << " name: " + << (space ? fil_space_name(space) : "NULL") + << " but uncompress failed with error: " << err + << " size: " << actual_size + << " len: " << len + << " compression method: " + << fil_get_compression_alg_name(compression_alg) << "."; + + buf_page_print(buf, page_size, BUF_PAGE_PRINT_NO_CRASH); + + ut_error; } diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc new file mode 100644 index 00000000000..b3e85c58e42 --- /dev/null +++ b/storage/innobase/fsp/fsp0file.cc @@ -0,0 +1,1130 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file fsp/fsp0file.cc +Tablespace data file implementation + +Created 2013-7-26 by Kevin Lewis +*******************************************************/ + +#include "ha_prototypes.h" + +#include "fil0fil.h" +#include "fsp0types.h" +#include "fsp0sysspace.h" +#include "os0file.h" +#include "page0page.h" +#include "srv0start.h" +#include "ut0new.h" +#include "fil0crypt.h" + +/** Initialize the name and flags of this datafile. +@param[in] name tablespace name, will be copied +@param[in] flags tablespace flags */ +void +Datafile::init( + const char* name, + ulint flags) +{ + ut_ad(m_name == NULL); + ut_ad(name != NULL); + + m_name = mem_strdup(name); + m_flags = flags; +} + +/** Release the resources. */ +void +Datafile::shutdown() +{ + close(); + + ut_free(m_name); + m_name = NULL; + + free_filepath(); + + if (m_crypt_info) { + fil_space_destroy_crypt_data(&m_crypt_info); + } + + free_first_page(); +} + +/** Create/open a data file. +@param[in] read_only_mode if true, then readonly mode checks are enforced. +@return DB_SUCCESS or error code */ +dberr_t +Datafile::open_or_create(bool read_only_mode) +{ + bool success; + ut_a(m_filepath != NULL); + ut_ad(m_handle == OS_FILE_CLOSED); + + m_handle = os_file_create( + innodb_data_file_key, m_filepath, m_open_flags, + OS_FILE_NORMAL, OS_DATA_FILE, read_only_mode, &success); + + if (!success) { + m_last_os_error = os_file_get_last_error(true); + ib::error() << "Cannot open datafile '" << m_filepath << "'"; + return(DB_CANNOT_OPEN_FILE); + } + + return(DB_SUCCESS); +} + +/** Open a data file in read-only mode to check if it exists so that it +can be validated. +@param[in] strict whether to issue error messages +@return DB_SUCCESS or error code */ +dberr_t +Datafile::open_read_only(bool strict) +{ + bool success = false; + ut_ad(m_handle == OS_FILE_CLOSED); + + /* This function can be called for file objects that do not need + to be opened, which is the case when the m_filepath is NULL */ + if (m_filepath == NULL) { + return(DB_ERROR); + } + + set_open_flags(OS_FILE_OPEN); + m_handle = os_file_create_simple_no_error_handling( + innodb_data_file_key, m_filepath, m_open_flags, + OS_FILE_READ_ONLY, true, &success); + + if (success) { + m_exists = true; + init_file_info(); + + return(DB_SUCCESS); + } + + if (strict) { + m_last_os_error = os_file_get_last_error(true); + ib::error() << "Cannot open datafile for read-only: '" + << m_filepath << "' OS error: " << m_last_os_error; + } + + return(DB_CANNOT_OPEN_FILE); +} + +/** Open a data file in read-write mode during start-up so that +doublewrite pages can be restored and then it can be validated.* +@param[in] read_only_mode if true, then readonly mode checks are enforced. +@return DB_SUCCESS or error code */ +dberr_t +Datafile::open_read_write(bool read_only_mode) +{ + bool success = false; + ut_ad(m_handle == OS_FILE_CLOSED); + + /* This function can be called for file objects that do not need + to be opened, which is the case when the m_filepath is NULL */ + if (m_filepath == NULL) { + return(DB_ERROR); + } + + set_open_flags(OS_FILE_OPEN); + m_handle = os_file_create_simple_no_error_handling( + innodb_data_file_key, m_filepath, m_open_flags, + OS_FILE_READ_WRITE, read_only_mode, &success); + + if (!success) { + m_last_os_error = os_file_get_last_error(true); + ib::error() << "Cannot open datafile for read-write: '" + << m_filepath << "'"; + return(DB_CANNOT_OPEN_FILE); + } + + m_exists = true; + + init_file_info(); + + return(DB_SUCCESS); +} + +/** Initialize OS specific file info. */ +void +Datafile::init_file_info() +{ +#ifdef _WIN32 + GetFileInformationByHandle(m_handle, &m_file_info); +#else + fstat(m_handle, &m_file_info); +#endif /* WIN32 */ +} + +/** Close a data file. +@return DB_SUCCESS or error code */ +dberr_t +Datafile::close() +{ + if (m_handle != OS_FILE_CLOSED) { + ibool success = os_file_close(m_handle); + ut_a(success); + + m_handle = OS_FILE_CLOSED; + } + + return(DB_SUCCESS); +} + +/** Make a full filepath from a directory path and a filename. +Prepend the dirpath to filename using the extension given. +If dirpath is NULL, prepend the default datadir to filepath. +Store the result in m_filepath. +@param[in] dirpath directory path +@param[in] filename filename or filepath +@param[in] ext filename extension */ +void +Datafile::make_filepath( + const char* dirpath, + const char* filename, + ib_extention ext) +{ + ut_ad(dirpath != NULL || filename != NULL); + + free_filepath(); + + m_filepath = fil_make_filepath(dirpath, filename, ext, false); + + ut_ad(m_filepath != NULL); + + set_filename(); +} + +/** Set the filepath by duplicating the filepath sent in. This is the +name of the file with its extension and absolute or relative path. +@param[in] filepath filepath to set */ +void +Datafile::set_filepath(const char* filepath) +{ + free_filepath(); + m_filepath = static_cast(ut_malloc_nokey(strlen(filepath) + 1)); + ::strcpy(m_filepath, filepath); + set_filename(); +} + +/** Free the filepath buffer. */ +void +Datafile::free_filepath() +{ + if (m_filepath != NULL) { + ut_free(m_filepath); + m_filepath = NULL; + m_filename = NULL; + } +} + +/** Do a quick test if the filepath provided looks the same as this filepath +byte by byte. If they are two different looking paths to the same file, +same_as() will be used to show that after the files are opened. +@param[in] other filepath to compare with +@retval true if it is the same filename by byte comparison +@retval false if it looks different */ +bool +Datafile::same_filepath_as( + const char* other) const +{ + return(0 == strcmp(m_filepath, other)); +} + +/** Test if another opened datafile is the same file as this object. +@param[in] other Datafile to compare with +@return true if it is the same file, else false */ +bool +Datafile::same_as( + const Datafile& other) const +{ +#ifdef _WIN32 + return(m_file_info.dwVolumeSerialNumber + == other.m_file_info.dwVolumeSerialNumber + && m_file_info.nFileIndexHigh + == other.m_file_info.nFileIndexHigh + && m_file_info.nFileIndexLow + == other.m_file_info.nFileIndexLow); +#else + return(m_file_info.st_ino == other.m_file_info.st_ino + && m_file_info.st_dev == other.m_file_info.st_dev); +#endif /* WIN32 */ +} + +/** Allocate and set the datafile or tablespace name in m_name. +If a name is provided, use it; else if the datafile is file-per-table, +extract a file-per-table tablespace name from m_filepath; else it is a +general tablespace, so just call it that for now. The value of m_name +will be freed in the destructor. +@param[in] name tablespace name if known, NULL if not */ +void +Datafile::set_name(const char* name) +{ + ut_free(m_name); + + if (name != NULL) { + m_name = mem_strdup(name); + } else if (fsp_is_file_per_table(m_space_id, m_flags)) { + m_name = fil_path_to_space_name(m_filepath); + } else { + /* Give this general tablespace a temporary name. */ + m_name = static_cast( + ut_malloc_nokey(strlen(general_space_name) + 20)); + + sprintf(m_name, "%s_" ULINTPF, general_space_name, m_space_id); + } +} + +/** Reads a few significant fields from the first page of the first +datafile. The Datafile must already be open. +@param[in] read_only_mode If true, then readonly mode checks are enforced. +@return DB_SUCCESS or DB_IO_ERROR if page cannot be read */ +dberr_t +Datafile::read_first_page(bool read_only_mode) +{ + if (m_handle == OS_FILE_CLOSED) { + + dberr_t err = open_or_create(read_only_mode); + + if (err != DB_SUCCESS) { + return(err); + } + } + + m_first_page_buf = static_cast( + ut_malloc_nokey(2 * UNIV_PAGE_SIZE_MAX)); + + /* Align the memory for a possible read from a raw device */ + + m_first_page = static_cast( + ut_align(m_first_page_buf, UNIV_PAGE_SIZE)); + + IORequest request; + dberr_t err = DB_ERROR; + size_t page_size = UNIV_PAGE_SIZE_MAX; + + /* Don't want unnecessary complaints about partial reads. */ + + request.disable_partial_io_warnings(); + + while (page_size >= UNIV_PAGE_SIZE_MIN) { + + ulint n_read = 0; + + err = os_file_read_no_error_handling( + request, m_handle, m_first_page, 0, page_size, &n_read); + + if (err == DB_IO_ERROR && n_read >= UNIV_PAGE_SIZE_MIN) { + + page_size >>= 1; + + } else if (err == DB_SUCCESS) { + + ut_a(n_read == page_size); + + break; + + } else { + + ib::error() + << "Cannot read first page of '" + << m_filepath << "' " + << ut_strerr(err); + break; + } + } + + if (err == DB_SUCCESS && m_order == 0) { + + m_flags = fsp_header_get_flags(m_first_page); + + m_space_id = fsp_header_get_space_id(m_first_page); + } + + const page_size_t page_sz = fsp_header_get_page_size(m_first_page); + ulint offset = fsp_header_get_crypt_offset(page_sz, NULL); + m_crypt_info = fil_space_read_crypt_data(m_space_id, m_first_page, offset); + + return(err); +} + +/** Free the first page from memory when it is no longer needed. */ +void +Datafile::free_first_page() +{ + if (m_first_page_buf) { + ut_free(m_first_page_buf); + m_first_page_buf = NULL; + m_first_page = NULL; + } +} + +/** Validates the datafile and checks that it conforms with the expected +space ID and flags. The file should exist and be successfully opened +in order for this function to validate it. +@param[in] space_id The expected tablespace ID. +@param[in] flags The expected tablespace flags. +@retval DB_SUCCESS if tablespace is valid, DB_ERROR if not. +m_is_valid is also set true on success, else false. */ +dberr_t +Datafile::validate_to_dd( + ulint space_id, + ulint flags) +{ + dberr_t err; + + if (!is_open()) { + return DB_ERROR; + } + + /* Validate this single-table-tablespace with the data dictionary, + but do not compare the DATA_DIR flag, in case the tablespace was + remotely located. */ + err = validate_first_page(); + if (err != DB_SUCCESS) { + return(err); + } + + /* Make sure the datafile we found matched the space ID. + If the datafile is a file-per-table tablespace then also match + the row format and zip page size. */ + if (m_space_id == space_id + && (m_flags & FSP_FLAGS_MASK_SHARED + || (m_flags & ~FSP_FLAGS_MASK_DATA_DIR) + == (flags & ~FSP_FLAGS_MASK_DATA_DIR))) { + /* Datafile matches the tablespace expected. */ + return(DB_SUCCESS); + } + + /* else do not use this tablespace. */ + m_is_valid = false; + + ib::error() << "In file '" << m_filepath << "', tablespace id and" + " flags are " << m_space_id << " and " << m_flags << ", but in" + " the InnoDB data dictionary they are " << space_id << " and " + << flags << ". Have you moved InnoDB .ibd files around without" + " using the commands DISCARD TABLESPACE and IMPORT TABLESPACE?" + " " << TROUBLESHOOT_DATADICT_MSG; + + return(DB_ERROR); +} + +/** Validates this datafile for the purpose of recovery. The file should +exist and be successfully opened. We initially open it in read-only mode +because we just want to read the SpaceID. However, if the first page is +corrupt and needs to be restored from the doublewrite buffer, we will +reopen it in write mode and ry to restore that page. +@retval DB_SUCCESS if tablespace is valid, DB_ERROR if not. +m_is_valid is also set true on success, else false. */ +dberr_t +Datafile::validate_for_recovery() +{ + dberr_t err; + + ut_ad(is_open()); + ut_ad(!srv_read_only_mode); + + err = validate_first_page(); + + switch (err) { + case DB_SUCCESS: + case DB_TABLESPACE_EXISTS: + break; + + default: + /* Re-open the file in read-write mode Attempt to restore + page 0 from doublewrite and read the space ID from a survey + of the first few pages. */ + close(); + err = open_read_write(srv_read_only_mode); + if (err != DB_SUCCESS) { + ib::error() << "Datafile '" << m_filepath << "' could not" + " be opened in read-write mode so that the" + " doublewrite pages could be restored."; + return(err); + }; + + err = find_space_id(); + if (err != DB_SUCCESS || m_space_id == 0) { + ib::error() << "Datafile '" << m_filepath << "' is" + " corrupted. Cannot determine the space ID from" + " the first 64 pages."; + return(err); + } + + err = restore_from_doublewrite(0); + if (err != DB_SUCCESS) { + return(err); + } + + /* Free the previously read first page and then re-validate. */ + free_first_page(); + err = validate_first_page(); + } + + if (err == DB_SUCCESS) { + set_name(NULL); + } + + return(err); +} + +/** Check the consistency of the first page of a datafile when the +tablespace is opened. This occurs before the fil_space_t is created +so the Space ID found here must not already be open. +m_is_valid is set true on success, else false. +@param[out] flush_lsn contents of FIL_PAGE_FILE_FLUSH_LSN +(only valid for the first file of the system tablespace) +@retval DB_SUCCESS on if the datafile is valid +@retval DB_CORRUPTION if the datafile is not readable +@retval DB_TABLESPACE_EXISTS if there is a duplicate space_id */ +dberr_t +Datafile::validate_first_page(lsn_t* flush_lsn) +{ + char* prev_name; + char* prev_filepath; + const char* error_txt = NULL; + + m_is_valid = true; + + if (m_first_page == NULL + && read_first_page(srv_read_only_mode) != DB_SUCCESS) { + + error_txt = "Cannot read first page"; + } else { + ut_ad(m_first_page_buf); + ut_ad(m_first_page); + + if (flush_lsn != NULL) { + + *flush_lsn = mach_read_from_8( + m_first_page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + } + } + + /* Check if the whole page is blank. */ + if (error_txt == NULL + && m_space_id == srv_sys_space.space_id() + && !m_flags) { + const byte* b = m_first_page; + ulint nonzero_bytes = UNIV_PAGE_SIZE; + + while (*b == '\0' && --nonzero_bytes != 0) { + + b++; + } + + if (nonzero_bytes == 0) { + error_txt = "Header page consists of zero bytes"; + } + } + + const page_size_t page_size(m_flags); + + if (error_txt != NULL) { + + /* skip the next few tests */ + } else if (univ_page_size.logical() != page_size.logical()) { + + /* Page size must be univ_page_size. */ + + ib::error() + << "Data file '" << m_filepath << "' uses page size " + << page_size.logical() << ", but the innodb_page_size" + " start-up parameter is " + << univ_page_size.logical(); + + free_first_page(); + + return(DB_ERROR); + + } else if (page_get_page_no(m_first_page) != 0) { + + /* First page must be number 0 */ + error_txt = "Header page contains inconsistent data"; + + } else if (m_space_id == ULINT_UNDEFINED) { + + /* The space_id can be most anything, except -1. */ + error_txt = "A bad Space ID was found"; + + } else if (buf_page_is_corrupted( + false, m_first_page, page_size, + fsp_is_checksum_disabled(m_space_id))) { + + /* Look for checksum and other corruptions. */ + error_txt = "Checksum mismatch"; + } + + if (error_txt != NULL) { + ib::error() << error_txt << " in datafile: " << m_filepath + << ", Space ID:" << m_space_id << ", Flags: " + << m_flags << ". " << TROUBLESHOOT_DATADICT_MSG; + m_is_valid = false; + + free_first_page(); + + return(DB_CORRUPTION); + + } + + if (fil_space_read_name_and_filepath( + m_space_id, &prev_name, &prev_filepath)) { + + if (0 == strcmp(m_filepath, prev_filepath)) { + ut_free(prev_name); + ut_free(prev_filepath); + return(DB_SUCCESS); + } + + /* Make sure the space_id has not already been opened. */ + ib::error() << "Attempted to open a previously opened" + " tablespace. Previous tablespace " << prev_name + << " at filepath: " << prev_filepath + << " uses space ID: " << m_space_id + << ". Cannot open filepath: " << m_filepath + << " which uses the same space ID."; + + ut_free(prev_name); + ut_free(prev_filepath); + + m_is_valid = false; + + free_first_page(); + + return(is_predefined_tablespace(m_space_id) + ? DB_CORRUPTION + : DB_TABLESPACE_EXISTS); + } + + return(DB_SUCCESS); +} + +/** Determine the space id of the given file descriptor by reading a few +pages from the beginning of the .ibd file. +@return DB_SUCCESS if space id was successfully identified, else DB_ERROR. */ +dberr_t +Datafile::find_space_id() +{ + os_offset_t file_size; + + ut_ad(m_handle != OS_FILE_CLOSED); + + file_size = os_file_get_size(m_handle); + + if (file_size == (os_offset_t) -1) { + ib::error() << "Could not get file size of datafile '" + << m_filepath << "'"; + return(DB_CORRUPTION); + } + + /* Assuming a page size, read the space_id from each page and store it + in a map. Find out which space_id is agreed on by majority of the + pages. Choose that space_id. */ + for (ulint page_size = UNIV_ZIP_SIZE_MIN; + page_size <= UNIV_PAGE_SIZE_MAX; + page_size <<= 1) { + + /* map[space_id] = count of pages */ + typedef std::map< + ulint, + ulint, + std::less, + ut_allocator > > + Pages; + + Pages verify; + ulint page_count = 64; + ulint valid_pages = 0; + + /* Adjust the number of pages to analyze based on file size */ + while ((page_count * page_size) > file_size) { + --page_count; + } + + ib::info() + << "Page size:" << page_size + << ". Pages to analyze:" << page_count; + + byte* buf = static_cast( + ut_malloc_nokey(2 * UNIV_PAGE_SIZE_MAX)); + + byte* page = static_cast( + ut_align(buf, UNIV_SECTOR_SIZE)); + + for (ulint j = 0; j < page_count; ++j) { + + dberr_t err; + ulint n_bytes = j * page_size; + IORequest request(IORequest::READ); + + err = os_file_read( + request, m_handle, page, n_bytes, page_size); + + if (err == DB_IO_DECOMPRESS_FAIL) { + + /* If the page was compressed on the fly then + try and decompress the page */ + + n_bytes = os_file_compressed_page_size(page); + + if (n_bytes != ULINT_UNDEFINED) { + + err = os_file_read( + request, + m_handle, page, page_size, + UNIV_PAGE_SIZE_MAX); + + if (err != DB_SUCCESS) { + + ib::info() + << "READ FAIL: " + << "page_no:" << j; + continue; + } + } + + } else if (err != DB_SUCCESS) { + + ib::info() + << "READ FAIL: page_no:" << j; + + continue; + } + + bool noncompressed_ok = false; + + /* For noncompressed pages, the page size must be + equal to univ_page_size.physical(). */ + if (page_size == univ_page_size.physical()) { + noncompressed_ok = !buf_page_is_corrupted( + false, page, univ_page_size, false); + } + + bool compressed_ok = false; + + /* file-per-table tablespaces can be compressed with + the same physical and logical page size. General + tablespaces must have different physical and logical + page sizes in order to be compressed. For this check, + assume the page is compressed if univ_page_size. + logical() is equal to or less than 16k and the + page_size we are checking is equal to or less than + univ_page_size.logical(). */ + if (univ_page_size.logical() <= UNIV_PAGE_SIZE_DEF + && page_size <= univ_page_size.logical()) { + const page_size_t compr_page_size( + page_size, univ_page_size.logical(), + true); + + compressed_ok = !buf_page_is_corrupted( + false, page, compr_page_size, false); + } + + if (noncompressed_ok || compressed_ok) { + + ulint space_id = mach_read_from_4(page + + FIL_PAGE_SPACE_ID); + + if (space_id > 0) { + + ib::info() + << "VALID: space:" + << space_id << " page_no:" << j + << " page_size:" << page_size; + + ++valid_pages; + + ++verify[space_id]; + } + } + } + + ut_free(buf); + + ib::info() + << "Page size: " << page_size + << ". Possible space_id count:" << verify.size(); + + const ulint pages_corrupted = 3; + + for (ulint missed = 0; missed <= pages_corrupted; ++missed) { + + for (Pages::const_iterator it = verify.begin(); + it != verify.end(); + ++it) { + + ib::info() << "space_id:" << it->first + << ", Number of pages matched: " + << it->second << "/" << valid_pages + << " (" << page_size << ")"; + + if (it->second == (valid_pages - missed)) { + ib::info() << "Chosen space:" + << it->first; + + m_space_id = it->first; + return(DB_SUCCESS); + } + } + + } + } + + return(DB_CORRUPTION); +} + + +/** Finds a given page of the given space id from the double write buffer +and copies it to the corresponding .ibd file. +@param[in] page_no Page number to restore +@return DB_SUCCESS if page was restored from doublewrite, else DB_ERROR */ +dberr_t +Datafile::restore_from_doublewrite( + ulint restore_page_no) +{ + /* Find if double write buffer contains page_no of given space id. */ + const byte* page = recv_sys->dblwr.find_page( + m_space_id, restore_page_no); + + if (page == NULL) { + /* If the first page of the given user tablespace is not there + in the doublewrite buffer, then the recovery is going to fail + now. Hence this is treated as an error. */ + + ib::error() + << "Corrupted page " + << page_id_t(m_space_id, restore_page_no) + << " of datafile '" << m_filepath + << "' could not be found in the doublewrite buffer."; + + return(DB_CORRUPTION); + } + + const ulint flags = mach_read_from_4( + FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + page); + + const page_size_t page_size(flags); + + ut_a(page_get_page_no(page) == restore_page_no); + + ib::info() << "Restoring page " + << page_id_t(m_space_id, restore_page_no) + << " of datafile '" << m_filepath + << "' from the doublewrite buffer. Writing " + << page_size.physical() << " bytes into file '" + << m_filepath << "'"; + + IORequest request(IORequest::WRITE); + + /* Note: The pages are written out as uncompressed because we don't + have the compression algorithm information at this point. */ + + request.disable_compression(); + + return(os_file_write( + request, + m_filepath, m_handle, page, 0, page_size.physical())); +} + +/** Create a link filename based on the contents of m_name, +open that file, and read the contents into m_filepath. +@retval DB_SUCCESS if remote linked tablespace file is opened and read. +@retval DB_CANNOT_OPEN_FILE if the link file does not exist. */ +dberr_t +RemoteDatafile::open_link_file() +{ + set_link_filepath(NULL); + m_filepath = read_link_file(m_link_filepath); + + return(m_filepath == NULL ? DB_CANNOT_OPEN_FILE : DB_SUCCESS); +} + +/** Opens a handle to the file linked to in an InnoDB Symbolic Link file +in read-only mode so that it can be validated. +@param[in] strict whether to issue error messages +@return DB_SUCCESS if remote linked tablespace file is found and opened. */ +dberr_t +RemoteDatafile::open_read_only(bool strict) +{ + if (m_filepath == NULL && open_link_file() == DB_CANNOT_OPEN_FILE) { + return(DB_ERROR); + } + + dberr_t err = Datafile::open_read_only(strict); + + if (err != DB_SUCCESS && strict) { + /* The following call prints an error message */ + os_file_get_last_error(true); + ib::error() << "A link file was found named '" + << m_link_filepath << "' but the linked tablespace '" + << m_filepath << "' could not be opened read-only."; + } + + return(err); +} + +/** Opens a handle to the file linked to in an InnoDB Symbolic Link file +in read-write mode so that it can be restored from doublewrite and validated. +@param[in] read_only_mode If true, then readonly mode checks are enforced. +@return DB_SUCCESS if remote linked tablespace file is found and opened. */ +dberr_t +RemoteDatafile::open_read_write(bool read_only_mode) +{ + if (m_filepath == NULL && open_link_file() == DB_CANNOT_OPEN_FILE) { + return(DB_ERROR); + } + + dberr_t err = Datafile::open_read_write(read_only_mode); + + if (err != DB_SUCCESS) { + /* The following call prints an error message */ + m_last_os_error = os_file_get_last_error(true); + ib::error() << "A link file was found named '" + << m_link_filepath << "' but the linked data file '" + << m_filepath << "' could not be opened for writing."; + } + + return(err); +} + +/** Release the resources. */ +void +RemoteDatafile::shutdown() +{ + Datafile::shutdown(); + + if (m_link_filepath != 0) { + ut_free(m_link_filepath); + m_link_filepath = 0; + } +} + +/** Set the link filepath. Use default datadir, the base name of +the path provided without its suffix, plus DOT_ISL. +@param[in] path filepath which contains a basename to use. + If NULL, use m_name as the basename. */ +void +RemoteDatafile::set_link_filepath(const char* path) +{ + if (m_link_filepath != NULL) { + return; + } + + if (path != NULL && FSP_FLAGS_GET_SHARED(flags())) { + /* Make the link_filepath based on the basename. */ + ut_ad(strcmp(&path[strlen(path) - strlen(DOT_IBD)], + DOT_IBD) == 0); + + m_link_filepath = fil_make_filepath(NULL, base_name(path), + ISL, false); + } else { + /* Make the link_filepath based on the m_name. */ + m_link_filepath = fil_make_filepath(NULL, name(), ISL, false); + } +} + +/** Creates a new InnoDB Symbolic Link (ISL) file. It is always created +under the 'datadir' of MySQL. The datadir is the directory of a +running mysqld program. We can refer to it by simply using the path ".". +@param[in] name tablespace name +@param[in] filepath remote filepath of tablespace datafile +@param[in] is_shared true for general tablespace, + false for file-per-table +@return DB_SUCCESS or error code */ +dberr_t +RemoteDatafile::create_link_file( + const char* name, + const char* filepath, + bool is_shared) +{ + bool success; + dberr_t err = DB_SUCCESS; + char* link_filepath = NULL; + char* prev_filepath = NULL; + + ut_ad(!srv_read_only_mode); + ut_ad(0 == strcmp(&filepath[strlen(filepath) - 4], DOT_IBD)); + + if (is_shared) { + /* The default location for a shared tablespace is the + datadir. We previously made sure that this filepath is + not under the datadir. If it is in the datadir there + is no need for a link file. */ + + size_t len = dirname_length(filepath); + if (len == 0) { + /* File is in the datadir. */ + return(DB_SUCCESS); + } + + Folder folder(filepath, len); + + if (folder_mysql_datadir == folder) { + /* File is in the datadir. */ + return(DB_SUCCESS); + } + + /* Use the file basename to build the ISL filepath. */ + link_filepath = fil_make_filepath(NULL, base_name(filepath), + ISL, false); + } else { + link_filepath = fil_make_filepath(NULL, name, ISL, false); + } + + if (link_filepath == NULL) { + return(DB_ERROR); + } + + prev_filepath = read_link_file(link_filepath); + + if (prev_filepath) { + /* Truncate will call this with an existing + link file which contains the same filepath. */ + bool same = !strcmp(prev_filepath, filepath); + ut_free(prev_filepath); + if (same) { + ut_free(link_filepath); + return(DB_SUCCESS); + } + } + + /** Check if the file already exists. */ + FILE* file = NULL; + bool exists; + os_file_type_t ftype; + + success = os_file_status(link_filepath, &exists, &ftype); + ulint error = 0; + + if (success && !exists) { + file = fopen(link_filepath, "w"); + if (file == NULL) { + /* This call will print its own error message */ + error = os_file_get_last_error(true); + } + } else { + error = OS_FILE_ALREADY_EXISTS; + } + + if (error != 0) { + ib::error() << "Cannot create file " << link_filepath << "."; + + if (error == OS_FILE_ALREADY_EXISTS) { + ib::error() << "The link file: " << link_filepath + << " already exists."; + err = DB_TABLESPACE_EXISTS; + + } else if (error == OS_FILE_DISK_FULL) { + err = DB_OUT_OF_FILE_SPACE; + + } else { + err = DB_ERROR; + } + + /* file is not open, no need to close it. */ + ut_free(link_filepath); + return(err); + } + + ulint rbytes = fwrite(filepath, 1, strlen(filepath), file); + + if (rbytes != strlen(filepath)) { + error = os_file_get_last_error(true); + ib::error() << + "Cannot write link file: " + << filepath; + err = DB_ERROR; + } + + /* Close the file, we only need it at startup */ + fclose(file); + + ut_free(link_filepath); + + return(err); +} + +/** Delete an InnoDB Symbolic Link (ISL) file. */ +void +RemoteDatafile::delete_link_file(void) +{ + ut_ad(m_link_filepath != NULL); + + if (m_link_filepath != NULL) { + os_file_delete_if_exists(innodb_data_file_key, + m_link_filepath, NULL); + } +} + +/** Delete an InnoDB Symbolic Link (ISL) file by name. +@param[in] name tablespace name */ +void +RemoteDatafile::delete_link_file( + const char* name) +{ + char* link_filepath = fil_make_filepath(NULL, name, ISL, false); + + if (link_filepath != NULL) { + os_file_delete_if_exists( + innodb_data_file_key, link_filepath, NULL); + + ut_free(link_filepath); + } +} + +/** Read an InnoDB Symbolic Link (ISL) file by name. +It is always created under the datadir of MySQL. +For file-per-table tablespaces, the isl file is expected to be +in a 'database' directory and called 'tablename.isl'. +For general tablespaces, there will be no 'database' directory. +The 'basename.isl' will be in the datadir. +The caller must free the memory returned if it is not null. +@param[in] link_filepath filepath of the ISL file +@return Filepath of the IBD file read from the ISL file */ +char* +RemoteDatafile::read_link_file( + const char* link_filepath) +{ + char* filepath = NULL; + FILE* file = NULL; + + file = fopen(link_filepath, "r+b"); + if (file == NULL) { + return(NULL); + } + + filepath = static_cast( + ut_malloc_nokey(OS_FILE_MAX_PATH)); + + os_file_read_string(file, filepath, OS_FILE_MAX_PATH); + fclose(file); + + if (filepath[0] != '\0') { + /* Trim whitespace from end of filepath */ + ulint last_ch = strlen(filepath) - 1; + while (last_ch > 4 && filepath[last_ch] <= 0x20) { + filepath[last_ch--] = 0x00; + } + os_normalize_path(filepath); + } + + return(filepath); +} diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index d51e36bc0ba..109299a502b 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -23,6 +23,8 @@ File space management Created 11/29/1995 Heikki Tuuri ***********************************************************************/ +#include "ha_prototypes.h" + #include "fsp0fsp.h" #ifdef UNIV_NONINL @@ -39,9 +41,9 @@ Created 11/29/1995 Heikki Tuuri #ifdef UNIV_HOTBACKUP # include "fut0lst.h" #else /* UNIV_HOTBACKUP */ -# include "sync0sync.h" # include "fut0fut.h" # include "srv0srv.h" +# include "srv0start.h" # include "ibuf0ibuf.h" # include "btr0btr.h" # include "btr0sea.h" @@ -49,48 +51,22 @@ Created 11/29/1995 Heikki Tuuri # include "log0log.h" #endif /* UNIV_HOTBACKUP */ #include "dict0mem.h" -#include "srv0start.h" - +#include "fsp0sysspace.h" +#include "fsp0types.h" #ifndef UNIV_HOTBACKUP -/** Flag to indicate if we have printed the tablespace full error. */ -static ibool fsp_tbs_full_error_printed = FALSE; -/**********************************************************************//** -Returns an extent to the free list of a space. */ +/** Returns an extent to the free list of a space. +@param[in] page_id page id in the extent +@param[in] page_size page size +@param[in,out] mtr mini-transaction */ static void fsp_free_extent( -/*============*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page, /*!< in: page offset in the extent */ - mtr_t* mtr); /*!< in/out: mini-transaction */ -/**********************************************************************//** -Frees an extent of a segment to the space free list. */ -static -void -fseg_free_extent( -/*=============*/ - fseg_inode_t* seg_inode, /*!< in: segment inode */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page, /*!< in: page offset in the extent */ - mtr_t* mtr); /*!< in/out: mini-transaction */ -/**********************************************************************//** -Calculates the number of pages reserved by a segment, and how -many pages are currently used. -@return number of reserved pages */ -static -ulint -fseg_n_reserved_pages_low( -/*======================*/ - fseg_inode_t* header, /*!< in: segment inode */ - ulint* used, /*!< out: number of pages used (not - more than reserved) */ - mtr_t* mtr); /*!< in/out: mini-transaction */ + const page_id_t& page_id, + const page_size_t& page_size, + mtr_t* mtr); + /********************************************************************//** Marks a page used. The page must reside within the extents of the given segment. */ @@ -102,115 +78,298 @@ fseg_mark_page_used( ulint page, /*!< in: page offset */ xdes_t* descr, /*!< in: extent descriptor */ mtr_t* mtr); /*!< in/out: mini-transaction */ -/**********************************************************************//** -Returns the first extent descriptor for a segment. We think of the extent -lists of the segment catenated in the order FSEG_FULL -> FSEG_NOT_FULL --> FSEG_FREE. -@return the first extent descriptor, or NULL if none */ + +/** Returns the first extent descriptor for a segment. +We think of the extent lists of the segment catenated in the order +FSEG_FULL -> FSEG_NOT_FULL -> FSEG_FREE. +@param[in] inode segment inode +@param[in] space_id space id +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@return the first extent descriptor, or NULL if none */ static xdes_t* fseg_get_first_extent( -/*==================*/ - fseg_inode_t* inode, /*!< in: segment inode */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - mtr_t* mtr); /*!< in/out: mini-transaction */ -/**********************************************************************//** -Puts new extents to the free list if -there are free extents above the free limit. If an extent happens -to contain an extent descriptor page, the extent is put to -the FSP_FREE_FRAG list with the page marked as used. */ -static + fseg_inode_t* inode, + ulint space_id, + const page_size_t& page_size, + mtr_t* mtr); + +/** Put new extents to the free list if there are free extents above the free +limit. If an extent happens to contain an extent descriptor page, the extent +is put to the FSP_FREE_FRAG list with the page marked as used. +@param[in] init_space true if this is a single-table tablespace +and we are only initializing the first extent and the first bitmap pages; +then we will not allocate more extents +@param[in,out] space tablespace +@param[in,out] header tablespace header +@param[in,out] mtr mini-transaction */ +static UNIV_COLD void fsp_fill_free_list( -/*===============*/ - ibool init_space, /*!< in: TRUE if this is a single-table - tablespace and we are only initing - the tablespace's first extent - descriptor page and ibuf bitmap page; - then we do not allocate more extents */ - ulint space, /*!< in: space */ - fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ - UNIV_COLD MY_ATTRIBUTE((nonnull)); -/**********************************************************************//** -Allocates a single free page from a segment. This function implements -the intelligent allocation strategy which tries to minimize file space -fragmentation. -@retval NULL if no page could be allocated -@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded + bool init_space, + fil_space_t* space, + fsp_header_t* header, + mtr_t* mtr); + +/** Allocates a single free page from a segment. +This function implements the intelligent allocation strategy which tries +to minimize file space fragmentation. +@param[in,out] space tablespace +@param[in] page_size page size +@param[in,out] seg_inode segment inode +@param[in] hint hint of which page would be desirable +@param[in] direction if the new page is needed because of +an index page split, and records are inserted there in order, into which +direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR +@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH +@param[in,out] mtr mini-transaction +@param[in,out] init_mtr mtr or another mini-transaction in +which the page should be initialized. If init_mtr != mtr, but the page is +already latched in mtr, do not initialize the page +@param[in] has_done_reservation TRUE if the space has already been +reserved, in this case we will never return NULL +@retval NULL if no page could be allocated +@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) -@retval block (not allocated or initialized) otherwise */ +@retval block (not allocated or initialized) otherwise */ static buf_block_t* fseg_alloc_free_page_low( -/*=====================*/ - ulint space, /*!< in: space */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - fseg_inode_t* seg_inode, /*!< in/out: segment inode */ - ulint hint, /*!< in: hint of which page would be - desirable */ - byte direction, /*!< in: if the new page is needed because - of an index page split, and records are - inserted there in order, into which - direction they go alphabetically: FSP_DOWN, - FSP_UP, FSP_NO_DIR */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction - in which the page should be initialized. - If init_mtr!=mtr, but the page is already - latched in mtr, do not initialize the page. */ - MY_ATTRIBUTE((warn_unused_result, nonnull)); -#endif /* !UNIV_HOTBACKUP */ - -/**********************************************************************//** -Reads the file space size stored in the header page. -@return tablespace size stored in the space header */ -UNIV_INTERN -ulint -fsp_get_size_low( -/*=============*/ - page_t* page) /*!< in: header page (page 0 in the tablespace) */ -{ - return(mach_read_from_4(page + FSP_HEADER_OFFSET + FSP_SIZE)); -} - -#ifndef UNIV_HOTBACKUP -/**********************************************************************//** -Gets a pointer to the space header and x-locks its page. -@return pointer to the space header, page x-locked */ + fil_space_t* space, + const page_size_t& page_size, + fseg_inode_t* seg_inode, + ulint hint, + byte direction, + rw_lock_type_t rw_latch, + mtr_t* mtr, + mtr_t* init_mtr +#ifdef UNIV_DEBUG + , ibool has_done_reservation +#endif /* UNIV_DEBUG */ +) + __attribute__((warn_unused_result)); + +/** Gets a pointer to the space header and x-locks its page. +@param[in] id space id +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@return pointer to the space header, page x-locked */ UNIV_INLINE fsp_header_t* fsp_get_space_header( -/*=================*/ - ulint id, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint id, + const page_size_t& page_size, + mtr_t* mtr) { buf_block_t* block; fsp_header_t* header; - ut_ad(ut_is_2pow(zip_size)); - ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX); - ut_ad(!zip_size || zip_size >= UNIV_ZIP_SIZE_MIN); - ut_ad(id || !zip_size); + ut_ad(id != 0 || !page_size.is_compressed()); - block = buf_page_get(id, zip_size, 0, RW_X_LATCH, mtr); + block = buf_page_get(page_id_t(id, 0), page_size, RW_SX_LATCH, mtr); header = FSP_HEADER_OFFSET + buf_block_get_frame(block); buf_block_dbg_add_level(block, SYNC_FSP_PAGE); ut_ad(id == mach_read_from_4(FSP_SPACE_ID + header)); - ut_ad(zip_size == fsp_flags_get_zip_size( - mach_read_from_4(FSP_SPACE_FLAGS + header))); +#ifdef UNIV_DEBUG + const ulint flags = mach_read_from_4(FSP_SPACE_FLAGS + header); + ut_ad(page_size_t(flags).equals_to(page_size)); +#endif /* UNIV_DEBUG */ return(header); } +/** Convert a 32 bit integer tablespace flags to the 32 bit table flags. +This can only be done for a tablespace that was built as a file-per-table +tablespace. Note that the fsp_flags cannot show the difference between a +Compact and Redundant table, so an extra Compact boolean must be supplied. + Low order bit + | REDUNDANT | COMPACT | COMPRESSED | DYNAMIC +fil_space_t::flags | 0 | 0 | 1 | 1 +dict_table_t::flags | 0 | 1 | 1 | 1 +@param[in] fsp_flags fil_space_t::flags +@param[in] compact true if not Redundant row format +@return tablespace flags (fil_space_t::flags) */ +ulint +fsp_flags_to_dict_tf( + ulint fsp_flags, + bool compact) +{ + /* If the table in this file-per-table tablespace is Compact + row format, the low order bit will not indicate Compact. */ + bool post_antelope = FSP_FLAGS_GET_POST_ANTELOPE(fsp_flags); + ulint zip_ssize = FSP_FLAGS_GET_ZIP_SSIZE(fsp_flags); + bool atomic_blobs = FSP_FLAGS_HAS_ATOMIC_BLOBS(fsp_flags); + bool data_dir = FSP_FLAGS_HAS_DATA_DIR(fsp_flags); + bool shared_space = FSP_FLAGS_GET_SHARED(fsp_flags); + bool page_compressed = FSP_FLAGS_GET_PAGE_COMPRESSION(fsp_flags); + ulint comp_level = FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(fsp_flags); + bool atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(fsp_flags); + /* FSP_FLAGS_GET_TEMPORARY(fsp_flags) does not have an equivalent + flag position in the table flags. But it would go into flags2 if + any code is created where that is needed. */ + + ulint flags = dict_tf_init(post_antelope | compact, zip_ssize, + atomic_blobs, data_dir, shared_space, + page_compressed, comp_level, atomic_writes); + + return(flags); +} + +/** Validate the tablespace flags. +These flags are stored in the tablespace header at offset FSP_SPACE_FLAGS. +They should be 0 for ROW_FORMAT=COMPACT and ROW_FORMAT=REDUNDANT. +The newer row formats, COMPRESSED and DYNAMIC, use a file format > Antelope +so they should have a file format number plus the DICT_TF_COMPACT bit set. +@param[in] flags Tablespace flags +@return true if valid, false if not */ +bool +fsp_flags_is_valid( + ulint flags) +{ + bool post_antelope = FSP_FLAGS_GET_POST_ANTELOPE(flags); + ulint zip_ssize = FSP_FLAGS_GET_ZIP_SSIZE(flags); + bool atomic_blobs = FSP_FLAGS_HAS_ATOMIC_BLOBS(flags); + ulint page_ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); + bool has_data_dir = FSP_FLAGS_HAS_DATA_DIR(flags); + bool is_shared = FSP_FLAGS_GET_SHARED(flags); + bool is_temp = FSP_FLAGS_GET_TEMPORARY(flags); + ulint unused = FSP_FLAGS_GET_UNUSED(flags); + bool page_compression = FSP_FLAGS_GET_PAGE_COMPRESSION(flags); + ulint page_compression_level = FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(flags); + ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(flags); + + DBUG_EXECUTE_IF("fsp_flags_is_valid_failure", return(false);); + + /* The Antelope row formats REDUNDANT and COMPACT did + not use tablespace flags, so the entire 4-byte field + is zero for Antelope row formats. */ + if (flags == 0) { + return(true); + } + + /* Barracuda row formats COMPRESSED and DYNAMIC use a feature called + ATOMIC_BLOBS which builds on the page structure introduced for the + COMPACT row format by allowing long fields to be broken into prefix + and externally stored parts. So if it is Post_antelope, it uses + Atomic BLOBs. */ + if (post_antelope != atomic_blobs) { + fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted atomic_blobs %d\n", + flags, atomic_blobs); + return(false); + } + + /* Make sure there are no bits that we do not know about. */ + if (unused != 0) { + fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted unused %lu\n", + flags, unused); + return(false); + } + + /* The zip ssize can be zero if it is other than compressed row format, + or it could be from 1 to the max. */ + if (zip_ssize > PAGE_ZIP_SSIZE_MAX) { + fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted zip_ssize %lu max %d\n", + flags, zip_ssize, PAGE_ZIP_SSIZE_MAX); + return(false); + } + + /* The actual page size must be within 4k and 16K (3 =< ssize =< 5). */ + if (page_ssize != 0 + && (page_ssize < UNIV_PAGE_SSIZE_MIN + || page_ssize > UNIV_PAGE_SSIZE_MAX)) { + fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted page_ssize %lu min:%lu:max:%lu\n", + flags, page_ssize, UNIV_PAGE_SSIZE_MIN, UNIV_PAGE_SSIZE_MAX); + return(false); + } + + /* Only single-table tablespaces use the DATA DIRECTORY clause. + It is not compatible with the TABLESPACE clause. Nor is it + compatible with the TEMPORARY clause. */ + if (has_data_dir && (is_shared || is_temp)) { + fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted has_data_dir %d is_shared %d is_temp %d\n", + flags, has_data_dir, is_shared, is_temp); + return(false); + } + + /* Page compression level requires page compression and atomic blobs + to be set */ + if (page_compression_level || page_compression) { + if (!page_compression || !atomic_blobs) { + fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted page_compression %d\n" + "InnoDB: Error: page_compression_level %lu atomic_blobs %d\n", + flags, page_compression, page_compression_level, atomic_blobs); + return(false); + } + } + + if (atomic_writes > ATOMIC_WRITES_OFF) { + fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted atomic_writes %lu\n", + flags, atomic_writes); + return (false); + } +#if UNIV_FORMAT_MAX != UNIV_FORMAT_B +# error UNIV_FORMAT_MAX != UNIV_FORMAT_B, Add more validations. +#endif +#if FSP_FLAGS_POS_UNUSED != 13 + //# error You have added a new FSP_FLAG without adding a validation check. +#endif + + return(true); +} + +/** Check if tablespace is system temporary. +@param[in] space_id tablespace ID +@return true if tablespace is system temporary. */ +bool +fsp_is_system_temporary( + ulint space_id) +{ + return(space_id == srv_tmp_space.space_id()); +} + +/** Check if checksum is disabled for the given space. +@param[in] space_id tablespace ID +@return true if checksum is disabled for given space. */ +bool +fsp_is_checksum_disabled( + ulint space_id) +{ + return(fsp_is_system_temporary(space_id)); +} + +/** Check if tablespace is file-per-table. +@param[in] space_id tablespace ID +@param[in] fsp_flags tablespace flags +@return true if tablespace is file-per-table. */ +bool +fsp_is_file_per_table( + ulint space_id, + ulint fsp_flags) +{ + return(!is_system_tablespace(space_id) + && !fsp_is_shared_tablespace(fsp_flags)); +} + +#ifdef UNIV_DEBUG + +/** Skip some of the sanity checks that are time consuming even in debug mode +and can affect frequent verification runs that are done to ensure stability of +the product. +@return true if check should be skipped for given space. */ +bool +fsp_skip_sanity_check( + ulint space_id) +{ + return(srv_skip_temp_table_checks_debug + && fsp_is_system_temporary(space_id)); +} + +#endif /* UNIV_DEBUG */ + /**********************************************************************//** Gets a descriptor bit of a page. -@return TRUE if free */ +@return TRUE if free */ UNIV_INLINE ibool xdes_mtr_get_bit( @@ -221,8 +380,8 @@ xdes_mtr_get_bit( 0 ... FSP_EXTENT_SIZE - 1 */ mtr_t* mtr) /*!< in: mini-transaction */ { - ut_ad(mtr->state == MTR_ACTIVE); - ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr->is_active()); + ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); return(xdes_get_bit(descr, bit, offset)); } @@ -245,7 +404,7 @@ xdes_set_bit( ulint bit_index; ulint descr_byte; - ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); ut_ad((bit == XDES_FREE_BIT) || (bit == XDES_CLEAN_BIT)); ut_ad(offset < FSP_EXTENT_SIZE); @@ -254,8 +413,7 @@ xdes_set_bit( byte_index = index / 8; bit_index = index % 8; - descr_byte = mtr_read_ulint(descr + XDES_BITMAP + byte_index, - MLOG_1BYTE, mtr); + descr_byte = mach_read_from_1(descr + XDES_BITMAP + byte_index); descr_byte = ut_bit_set_nth(descr_byte, bit_index, val); mlog_write_ulint(descr + XDES_BITMAP + byte_index, descr_byte, @@ -266,7 +424,7 @@ xdes_set_bit( Looks for a descriptor bit having the desired value. Starts from hint and scans upward; at the end of the extent the search is wrapped to the start of the extent. -@return bit index of the bit, ULINT_UNDEFINED if not found */ +@return bit index of the bit, ULINT_UNDEFINED if not found */ UNIV_INLINE ulint xdes_find_bit( @@ -283,7 +441,7 @@ xdes_find_bit( ut_ad(descr && mtr); ut_ad(val <= TRUE); ut_ad(hint < FSP_EXTENT_SIZE); - ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); for (i = hint; i < FSP_EXTENT_SIZE; i++) { if (val == xdes_mtr_get_bit(descr, bit, i, mtr)) { @@ -303,7 +461,7 @@ xdes_find_bit( /**********************************************************************//** Returns the number of used pages in a descriptor. -@return number of pages used */ +@return number of pages used */ UNIV_INLINE ulint xdes_get_n_used( @@ -314,7 +472,7 @@ xdes_get_n_used( ulint count = 0; ut_ad(descr && mtr); - ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); for (ulint i = 0; i < FSP_EXTENT_SIZE; ++i) { if (FALSE == xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) { count++; @@ -326,7 +484,7 @@ xdes_get_n_used( /**********************************************************************//** Returns true if extent contains no used pages. -@return TRUE if totally free */ +@return TRUE if totally free */ UNIV_INLINE ibool xdes_is_free( @@ -344,7 +502,7 @@ xdes_is_free( /**********************************************************************//** Returns true if extent contains no free pages. -@return TRUE if full */ +@return TRUE if full */ UNIV_INLINE ibool xdes_is_full( @@ -373,14 +531,14 @@ xdes_set_state( ut_ad(descr && mtr); ut_ad(state >= XDES_FREE); ut_ad(state <= XDES_FSEG); - ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); mlog_write_ulint(descr + XDES_STATE, state, MLOG_4BYTES, mtr); } /**********************************************************************//** Gets the state of an xdes. -@return state */ +@return state */ UNIV_INLINE ulint xdes_get_state( @@ -391,9 +549,9 @@ xdes_get_state( ulint state; ut_ad(descr && mtr); - ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); - state = mtr_read_ulint(descr + XDES_STATE, MLOG_4BYTES, mtr); + state = mach_read_from_4(descr + XDES_STATE); ut_ad(state - 1 < XDES_FSEG); return(state); } @@ -410,7 +568,7 @@ xdes_init( ulint i; ut_ad(descr && mtr); - ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX)); ut_ad((XDES_SIZE - XDES_BITMAP) % 4 == 0); for (i = XDES_BITMAP; i < XDES_SIZE; i += 4) { @@ -420,93 +578,114 @@ xdes_init( xdes_set_state(descr, XDES_FREE, mtr); } -/********************************************************************//** -Gets pointer to a the extent descriptor of a page. The page where the extent -descriptor resides is x-locked. This function no longer extends the data -file. +/** Get pointer to a the extent descriptor of a page. +@param[in,out] sp_header tablespace header page, x-latched +@param[in] space tablespace identifier +@param[in] offset page offset +@param[in,out] mtr mini-transaction +@param[in] init_space whether the tablespace is being initialized +@param[out] desc_block descriptor block, or NULL if it is +the same as the tablespace header @return pointer to the extent descriptor, NULL if the page does not -exist in the space or if the offset is >= the free limit */ -UNIV_INLINE MY_ATTRIBUTE((nonnull, warn_unused_result)) +exist in the space or if the offset exceeds free limit */ +UNIV_INLINE MY_ATTRIBUTE((nonnull(1,4), warn_unused_result)) xdes_t* xdes_get_descriptor_with_space_hdr( -/*===============================*/ - fsp_header_t* sp_header, /*!< in/out: space header, x-latched - in mtr */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page offset; if equal - to the free limit, we try to - add new extents to the space - free list */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fsp_header_t* sp_header, + ulint space, + ulint offset, + mtr_t* mtr, + bool init_space = false, + buf_block_t** desc_block = NULL) { ulint limit; ulint size; - ulint zip_size; ulint descr_page_no; + ulint flags; page_t* descr_page; - - ut_ad(mtr_memo_contains(mtr, fil_space_get_latch(space, NULL), - MTR_MEMO_X_LOCK)); - ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_X_FIX)); +#ifdef UNIV_DEBUG + const fil_space_t* fspace = fil_space_get(space); + ut_ad(fspace != NULL); +#endif /* UNIV_DEBUG */ + ut_ad(mtr_memo_contains(mtr, &fspace->latch, MTR_MEMO_X_LOCK)); + ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_SX_FIX)); ut_ad(page_offset(sp_header) == FSP_HEADER_OFFSET); /* Read free limit and space size */ limit = mach_read_from_4(sp_header + FSP_FREE_LIMIT); size = mach_read_from_4(sp_header + FSP_SIZE); - zip_size = fsp_flags_get_zip_size( - mach_read_from_4(sp_header + FSP_SPACE_FLAGS)); + flags = mach_read_from_4(sp_header + FSP_SPACE_FLAGS); + ut_ad(limit == fspace->free_limit + || (fspace->free_limit == 0 + && (init_space + || fspace->purpose == FIL_TYPE_TEMPORARY + || (srv_startup_is_before_trx_rollback_phase + && fspace->id <= srv_undo_tablespaces)))); + ut_ad(size == fspace->size_in_header); + ut_ad(flags == fspace->flags); if ((offset >= size) || (offset >= limit)) { return(NULL); } - descr_page_no = xdes_calc_descriptor_page(zip_size, offset); + const page_size_t page_size(flags); + + descr_page_no = xdes_calc_descriptor_page(page_size, offset); + + buf_block_t* block; if (descr_page_no == 0) { /* It is on the space header page */ descr_page = page_align(sp_header); + block = NULL; } else { - buf_block_t* block; + block = buf_page_get( + page_id_t(space, descr_page_no), page_size, + RW_SX_LATCH, mtr); - block = buf_page_get(space, zip_size, descr_page_no, - RW_X_LATCH, mtr); buf_block_dbg_add_level(block, SYNC_FSP_PAGE); descr_page = buf_block_get_frame(block); } + if (desc_block != NULL) { + *desc_block = block; + } + return(descr_page + XDES_ARR_OFFSET - + XDES_SIZE * xdes_calc_descriptor_index(zip_size, offset)); + + XDES_SIZE * xdes_calc_descriptor_index(page_size, offset)); } -/********************************************************************//** -Gets pointer to a the extent descriptor of a page. The page where the -extent descriptor resides is x-locked. If the page offset is equal to -the free limit of the space, adds new extents from above the free limit -to the space free list, if not free limit == space size. This adding +/** Gets pointer to a the extent descriptor of a page. +The page where the extent descriptor resides is x-locked. If the page offset +is equal to the free limit of the space, adds new extents from above the free +limit to the space free list, if not free limit == space size. This adding is necessary to make the descriptor defined, as they are uninitialized above the free limit. +@param[in] space_id space id +@param[in] offset page offset; if equal to the free limit, we +try to add new extents to the space free list +@param[in] page_size page size +@param[in,out] mtr mini-transaction @return pointer to the extent descriptor, NULL if the page does not exist in the space or if the offset exceeds the free limit */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) xdes_t* xdes_get_descriptor( -/*================*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint offset, /*!< in: page offset; if equal to the free limit, - we try to add new extents to the space free list */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint space_id, + ulint offset, + const page_size_t& page_size, + mtr_t* mtr) { buf_block_t* block; fsp_header_t* sp_header; - block = buf_page_get(space, zip_size, 0, RW_X_LATCH, mtr); + block = buf_page_get(page_id_t(space_id, 0), page_size, + RW_SX_LATCH, mtr); + buf_block_dbg_add_level(block, SYNC_FSP_PAGE); sp_header = FSP_HEADER_OFFSET + buf_block_get_frame(block); - return(xdes_get_descriptor_with_space_hdr(sp_header, space, offset, + return(xdes_get_descriptor_with_space_hdr(sp_header, space_id, offset, mtr)); } @@ -514,14 +693,13 @@ xdes_get_descriptor( Gets pointer to a the extent descriptor if the file address of the descriptor list node is known. The page where the extent descriptor resides is x-locked. -@return pointer to the extent descriptor */ +@return pointer to the extent descriptor */ UNIV_INLINE xdes_t* xdes_lst_get_descriptor( /*====================*/ ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ + const page_size_t& page_size, fil_addr_t lst_node,/*!< in: file address of the list node contained in the descriptor */ mtr_t* mtr) /*!< in/out: mini-transaction */ @@ -531,7 +709,7 @@ xdes_lst_get_descriptor( ut_ad(mtr); ut_ad(mtr_memo_contains(mtr, fil_space_get_latch(space, NULL), MTR_MEMO_X_LOCK)); - descr = fut_get_ptr(space, zip_size, lst_node, RW_X_LATCH, mtr) + descr = fut_get_ptr(space, page_size, lst_node, RW_SX_LATCH, mtr) - XDES_FLST_NODE; return(descr); @@ -539,7 +717,7 @@ xdes_lst_get_descriptor( /********************************************************************//** Returns page offset of the first page in extent described by a descriptor. -@return offset of the first page in extent */ +@return offset of the first page in extent */ UNIV_INLINE ulint xdes_get_offset( @@ -565,52 +743,89 @@ fsp_init_file_page_low( page_t* page = buf_block_get_frame(block); page_zip_des_t* page_zip= buf_block_get_page_zip(block); -#ifndef UNIV_HOTBACKUP - block->check_index_page_at_flush = FALSE; -#endif /* !UNIV_HOTBACKUP */ + if (!fsp_is_system_temporary(block->page.id.space())) { + memset(page, 0, UNIV_PAGE_SIZE); + } + + mach_write_to_4(page + FIL_PAGE_OFFSET, block->page.id.page_no()); + mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, + block->page.id.space()); if (page_zip) { - memset(page, 0, UNIV_PAGE_SIZE); memset(page_zip->data, 0, page_zip_get_size(page_zip)); - mach_write_to_4(page + FIL_PAGE_OFFSET, - buf_block_get_page_no(block)); - mach_write_to_4(page - + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, - buf_block_get_space(block)); memcpy(page_zip->data + FIL_PAGE_OFFSET, page + FIL_PAGE_OFFSET, 4); memcpy(page_zip->data + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, 4); + } +} + +#ifndef UNIV_HOTBACKUP +# ifdef UNIV_DEBUG +/** Assert that the mini-transaction is compatible with +updating an allocation bitmap page. +@param[in] id tablespace identifier +@param[in] mtr mini-transaction */ +static +void +fsp_space_modify_check( + ulint id, + const mtr_t* mtr) +{ + switch (mtr->get_log_mode()) { + case MTR_LOG_SHORT_INSERTS: + case MTR_LOG_NONE: + /* These modes are only allowed within a non-bitmap page + when there is a higher-level redo log record written. */ + break; + case MTR_LOG_NO_REDO: +#ifdef UNIV_DEBUG + { + const fil_type_t type = fil_space_get_type(id); + ut_a(id == srv_tmp_space.space_id() + || srv_is_tablespace_truncated(id) + || fil_space_is_being_truncated(id) + || fil_space_get_flags(id) == ULINT_UNDEFINED + || type == FIL_TYPE_TEMPORARY + || type == FIL_TYPE_IMPORT + || fil_space_is_redo_skipped(id)); + } +#endif /* UNIV_DEBUG */ + return; + case MTR_LOG_ALL: + /* We must not write redo log for the shared temporary + tablespace. */ + ut_ad(id != srv_tmp_space.space_id()); + /* If we write redo log, the tablespace must exist. */ + ut_ad(fil_space_get_type(id) == FIL_TYPE_TABLESPACE); + ut_ad(mtr->is_named_space(id)); return; } - memset(page, 0, UNIV_PAGE_SIZE); - mach_write_to_4(page + FIL_PAGE_OFFSET, buf_block_get_page_no(block)); - mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, - buf_block_get_space(block)); + ut_ad(0); } +# endif /* UNIV_DEBUG */ -#ifndef UNIV_HOTBACKUP -/***********************************************************//** -Inits a file page whose prior contents should be ignored. */ +/** Initialize a file page. +@param[in,out] block file page +@param[in,out] mtr mini-transaction */ static void fsp_init_file_page( -/*===============*/ - buf_block_t* block, /*!< in: pointer to a page */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + buf_block_t* block, + mtr_t* mtr) { fsp_init_file_page_low(block); + ut_d(fsp_space_modify_check(block->page.id.space(), mtr)); mlog_write_initial_log_record(buf_block_get_frame(block), - MLOG_INIT_FILE_PAGE, mtr); + MLOG_INIT_FILE_PAGE2, mtr); } #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Parses a redo log record of a file page init. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* fsp_parse_init_file_page( /*=====================*/ @@ -618,7 +833,8 @@ fsp_parse_init_file_page( byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */ buf_block_t* block) /*!< in: block or NULL */ { - ut_ad(ptr && end_ptr); + ut_ad(ptr != NULL); + ut_ad(end_ptr != NULL); if (block) { fsp_init_file_page_low(block); @@ -629,7 +845,6 @@ fsp_parse_init_file_page( /**********************************************************************//** Initializes the fsp system. */ -UNIV_INTERN void fsp_init(void) /*==========*/ @@ -652,7 +867,6 @@ fsp_init(void) Writes the space id and flags to a tablespace header. The flags contain row type, physical/compressed page size, and logical/uncompressed page size of the tablespace. */ -UNIV_INTERN void fsp_header_init_fields( /*===================*/ @@ -669,32 +883,37 @@ fsp_header_init_fields( } #ifndef UNIV_HOTBACKUP -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ -UNIV_INTERN -void +/** Initializes the space header of a new created space and creates also the +insert buffer tree root if space == 0. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr min-transaction +@return true on success, otherwise false. */ +bool fsp_header_init( -/*============*/ - ulint space, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint space_id, + ulint size, + mtr_t* mtr) { fsp_header_t* header; buf_block_t* block; page_t* page; - ulint flags; - ulint zip_size; ut_ad(mtr); - mtr_x_lock(fil_space_get_latch(space, &flags), mtr); + fil_space_t* space = mtr_x_lock_space(space_id, mtr); + + const page_id_t page_id(space_id, 0); + const page_size_t page_size(space->flags); - zip_size = fsp_flags_get_zip_size(flags); - block = buf_page_create(space, 0, zip_size, mtr); - buf_page_get(space, zip_size, 0, RW_X_LATCH, mtr); + block = buf_page_create(page_id, page_size, mtr); + buf_page_get(page_id, page_size, RW_SX_LATCH, mtr); buf_block_dbg_add_level(block, SYNC_FSP_PAGE); + space->size_in_header = size; + space->free_len = 0; + space->free_limit = 0; + /* The prior contents of the file page should be ignored */ fsp_init_file_page(block, mtr); @@ -705,12 +924,12 @@ fsp_header_init( header = FSP_HEADER_OFFSET + page; - mlog_write_ulint(header + FSP_SPACE_ID, space, MLOG_4BYTES, mtr); + mlog_write_ulint(header + FSP_SPACE_ID, space_id, MLOG_4BYTES, mtr); mlog_write_ulint(header + FSP_NOT_USED, 0, MLOG_4BYTES, mtr); mlog_write_ulint(header + FSP_SIZE, size, MLOG_4BYTES, mtr); mlog_write_ulint(header + FSP_FREE_LIMIT, 0, MLOG_4BYTES, mtr); - mlog_write_ulint(header + FSP_SPACE_FLAGS, flags, + mlog_write_ulint(header + FSP_SPACE_FLAGS, space->flags, MLOG_4BYTES, mtr); mlog_write_ulint(header + FSP_FRAG_N_USED, 0, MLOG_4BYTES, mtr); @@ -721,26 +940,29 @@ fsp_header_init( flst_init(header + FSP_SEG_INODES_FREE, mtr); mlog_write_ull(header + FSP_SEG_ID, 1, mtr); - if (space == 0) { - fsp_fill_free_list(FALSE, space, header, mtr); - btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, - 0, 0, DICT_IBUF_ID_MIN + space, - dict_ind_redundant, mtr); - } else { - fsp_fill_free_list(TRUE, space, header, mtr); + + fsp_fill_free_list(!is_system_tablespace(space_id), + space, header, mtr); + + if (space_id == srv_sys_space.space_id()) { + if (btr_create(DICT_CLUSTERED | DICT_IBUF, + 0, univ_page_size, DICT_IBUF_ID_MIN + space_id, + dict_ind_redundant, NULL, mtr) == FIL_NULL) { + return(false); + } } ulint maxsize = 0; - ulint offset = fsp_header_get_crypt_offset(zip_size, &maxsize); - fil_space_write_crypt_data(space, page, offset, maxsize, mtr); -} + ulint offset = fsp_header_get_crypt_offset(page_size, &maxsize); + fil_space_write_crypt_data(space_id, page, offset, maxsize, mtr); + return(true); +} #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** Reads the space id from the first page of a tablespace. -@return space id, ULINT UNDEFINED if error */ -UNIV_INTERN +@return space id, ULINT UNDEFINED if error */ ulint fsp_header_get_space_id( /*====================*/ @@ -757,71 +979,52 @@ fsp_header_get_space_id( id = ULINT_UNDEFINED;); if (id != fsp_id) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Space id in fsp header %lu,but in the page header " - "%lu", fsp_id, id); - + ib::error() << "Space ID in fsp header is " << fsp_id + << ", but in the page header it is " << id << "."; return(ULINT_UNDEFINED); } return(id); } -/**********************************************************************//** -Reads the space flags from the first page of a tablespace. -@return flags */ -UNIV_INTERN -ulint -fsp_header_get_flags( -/*=================*/ - const page_t* page) /*!< in: first page of a tablespace */ -{ - ut_ad(!page_offset(page)); - - return(mach_read_from_4(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + page)); -} - -/**********************************************************************//** -Reads the compressed page size from the first page of a tablespace. -@return compressed page size in bytes, or 0 if uncompressed */ -UNIV_INTERN -ulint -fsp_header_get_zip_size( -/*====================*/ - const page_t* page) /*!< in: first page of a tablespace */ +/** Reads the page size from the first page of a tablespace. +@param[in] page first page of a tablespace +@return page size */ +page_size_t +fsp_header_get_page_size( + const page_t* page) { - ulint flags = fsp_header_get_flags(page); - - return(fsp_flags_get_zip_size(flags)); + return(page_size_t(fsp_header_get_flags(page))); } #ifndef UNIV_HOTBACKUP /**********************************************************************//** Increases the space size field of a space. */ -UNIV_INTERN void fsp_header_inc_size( /*================*/ - ulint space, /*!< in: space id */ + ulint space_id, /*!< in: space id */ ulint size_inc, /*!< in: size increment in pages */ mtr_t* mtr) /*!< in/out: mini-transaction */ { fsp_header_t* header; ulint size; - ulint flags; ut_ad(mtr); - mtr_x_lock(fil_space_get_latch(space, &flags), mtr); + fil_space_t* space = mtr_x_lock_space(space_id, mtr); + ut_d(fsp_space_modify_check(space_id, mtr)); - header = fsp_get_space_header(space, - fsp_flags_get_zip_size(flags), - mtr); + header = fsp_get_space_header( + space_id, page_size_t(space->flags), mtr); - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr); + size = mach_read_from_4(header + FSP_SIZE); + ut_ad(size == space->size_in_header); - mlog_write_ulint(header + FSP_SIZE, size + size_inc, MLOG_4BYTES, - mtr); + size += size_inc; + + mlog_write_ulint(header + FSP_SIZE, size, MLOG_4BYTES, mtr); + space->size_in_header = size; } /**********************************************************************//** @@ -829,8 +1032,7 @@ Gets the size of the system tablespace from the tablespace header. If we do not have an auto-extending data file, this should be equal to the size of the data files. If there is an auto-extending data file, this can be smaller. -@return size in pages */ -UNIV_INTERN +@return size in pages */ ulint fsp_header_get_tablespace_size(void) /*================================*/ @@ -841,263 +1043,250 @@ fsp_header_get_tablespace_size(void) mtr_start(&mtr); - mtr_x_lock(fil_space_get_latch(0, NULL), &mtr); +#ifdef UNIV_DEBUG + fil_space_t* space = +#endif /* UNIV_DEBUG */ + mtr_x_lock_space(TRX_SYS_SPACE, &mtr); - header = fsp_get_space_header(0, 0, &mtr); + header = fsp_get_space_header(TRX_SYS_SPACE, univ_page_size, &mtr); - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, &mtr); + size = mach_read_from_4(header + FSP_SIZE); + ut_ad(space->size_in_header == size); mtr_commit(&mtr); return(size); } -/***********************************************************************//** -Tries to extend a single-table tablespace so that a page would fit in the +/** Try to extend a single-table tablespace so that a page would fit in the data file. -@return TRUE if success */ -static UNIV_COLD MY_ATTRIBUTE((nonnull, warn_unused_result)) -ibool +@param[in,out] space tablespace +@param[in] page_no page number +@param[in,out] header tablespace header +@param[in,out] mtr mini-transaction +@return true if success */ +static UNIV_COLD __attribute__((warn_unused_result)) +bool fsp_try_extend_data_file_with_pages( -/*================================*/ - ulint space, /*!< in: space */ - ulint page_no, /*!< in: page number */ - fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fil_space_t* space, + ulint page_no, + fsp_header_t* header, + mtr_t* mtr) { - ibool success; - ulint actual_size; + bool success; ulint size; - ut_a(space != 0); + ut_a(!is_system_tablespace(space->id)); + ut_d(fsp_space_modify_check(space->id, mtr)); - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr); + size = mach_read_from_4(header + FSP_SIZE); + ut_ad(size == space->size_in_header); ut_a(page_no >= size); - success = fil_extend_space_to_desired_size(&actual_size, space, - page_no + 1); - /* actual_size now has the space size in pages; it may be less than - we wanted if we ran out of disk space */ - - mlog_write_ulint(header + FSP_SIZE, actual_size, MLOG_4BYTES, mtr); + success = fil_space_extend(space, page_no + 1); + /* The size may be less than we wanted if we ran out of disk space. */ + mlog_write_ulint(header + FSP_SIZE, space->size, MLOG_4BYTES, mtr); + space->size_in_header = space->size; return(success); } -/***********************************************************************//** -Tries to extend the last data file of a tablespace if it is auto-extending. -@return FALSE if not auto-extending */ -static UNIV_COLD MY_ATTRIBUTE((nonnull)) -ibool +/** Try to extend the last data file of a tablespace if it is auto-extending. +@param[in,out] space tablespace +@param[in,out] header tablespace header +@param[in,out] mtr mini-transaction +@return whether the tablespace was extended */ +ulint fsp_try_extend_data_file( -/*=====================*/ - ulint* actual_increase,/*!< out: actual increase in pages, where - we measure the tablespace size from - what the header field says; it may be - the actual file size rounded down to - megabyte */ - ulint space, /*!< in: space */ - fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fil_space_t* space, + fsp_header_t* header, + mtr_t* mtr, + ulint* n_pages_added) { - ulint size; - ulint zip_size; - ulint new_size; - ulint old_size; - ulint size_increase; - ulint actual_size; - ibool success; + ulint size; /* current number of pages in the datafile */ + ulint size_increase; /* number of pages to extend this file */ + const char* OUT_OF_SPACE_MSG = + "ran out of space. Please add another file or use" + " 'autoextend' for the last file in setting"; - *actual_increase = 0; + ut_d(fsp_space_modify_check(space->id, mtr)); - if (space == 0 && !srv_auto_extend_last_data_file) { + if (space->id == srv_sys_space.space_id() + && !srv_sys_space.can_auto_extend_last_file()) { /* We print the error message only once to avoid spamming the error log. Note that we don't need - to reset the flag to FALSE as dealing with this + to reset the flag to false as dealing with this error requires server restart. */ - if (fsp_tbs_full_error_printed == FALSE) { - fprintf(stderr, - "InnoDB: Error: Data file(s) ran" - " out of space.\n" - "Please add another data file or" - " use \'autoextend\' for the last" - " data file.\n"); - fsp_tbs_full_error_printed = TRUE; + if (!srv_sys_space.get_tablespace_full_status()) { + ib::error() << "Tablespace " << srv_sys_space.name() + << " " << OUT_OF_SPACE_MSG + << " innodb_data_file_path."; + srv_sys_space.set_tablespace_full_status(true); } - return(FALSE); + return(false); + } else if (fsp_is_system_temporary(space->id) + && !srv_tmp_space.can_auto_extend_last_file()) { + + /* We print the error message only once to avoid + spamming the error log. Note that we don't need + to reset the flag to false as dealing with this + error requires server restart. */ + if (!srv_tmp_space.get_tablespace_full_status()) { + ib::error() << "Tablespace " << srv_tmp_space.name() + << " " << OUT_OF_SPACE_MSG + << " innodb_temp_data_file_path."; + srv_tmp_space.set_tablespace_full_status(true); + } + return(false); } - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr); - zip_size = fsp_flags_get_zip_size( + size = mach_read_from_4(header + FSP_SIZE); + ut_ad(size == space->size_in_header); + + const page_size_t page_size( mach_read_from_4(header + FSP_SPACE_FLAGS)); - old_size = size; + if (space->id == srv_sys_space.space_id()) { - if (space == 0) { - if (!srv_last_file_size_max) { - size_increase = SRV_AUTO_EXTEND_INCREMENT; - } else { - if (srv_last_file_size_max - < srv_data_file_sizes[srv_n_data_files - 1]) { - - fprintf(stderr, - "InnoDB: Error: Last data file size" - " is %lu, max size allowed %lu\n", - (ulong) srv_data_file_sizes[ - srv_n_data_files - 1], - (ulong) srv_last_file_size_max); - } + size_increase = srv_sys_space.get_increment(); - size_increase = srv_last_file_size_max - - srv_data_file_sizes[srv_n_data_files - 1]; - if (size_increase > SRV_AUTO_EXTEND_INCREMENT) { - size_increase = SRV_AUTO_EXTEND_INCREMENT; - } - } - } else { - /* We extend single-table tablespaces first one extent - at a time, but 4 at a time for bigger tablespaces. It is - not enough to extend always by one extent, because we need - to add at least one extent to FSP_FREE. - A single extent descriptor page will track many extents. - And the extent that uses its extent descriptor page is - put onto the FSP_FREE_FRAG list. Extents that do not - use their extent descriptor page are added to FSP_FREE. - The physical page size is used to determine how many - extents are tracked on one extent descriptor page. */ - ulint extent_size; /*!< one megabyte, in pages */ - ulint threshold; /*!< The size of the tablespace - (in number of pages) where we - start allocating more than one - extent at a time. */ - - if (!zip_size) { - extent_size = FSP_EXTENT_SIZE; - } else { - extent_size = FSP_EXTENT_SIZE - * UNIV_PAGE_SIZE / zip_size; - } + } else if (space->id == srv_tmp_space.space_id()) { - /* Threshold is set at 32mb except when the page - size is small enough that it must be done sooner. - For page size less than 4k, we may reach the - extent contains extent descriptor page before - 32 mb. */ - threshold = ut_min((32 * extent_size), - (zip_size ? zip_size : UNIV_PAGE_SIZE)); + size_increase = srv_tmp_space.get_increment(); - if (size < extent_size) { + } else { + ulint extent_pages + = fsp_get_extent_size_in_pages(page_size); + if (size < extent_pages) { /* Let us first extend the file to extent_size */ - success = fsp_try_extend_data_file_with_pages( - space, extent_size - 1, header, mtr); - if (!success) { - new_size = mtr_read_ulint(header + FSP_SIZE, - MLOG_4BYTES, mtr); - - *actual_increase = new_size - old_size; - - return(FALSE); + if (!fsp_try_extend_data_file_with_pages( + space, extent_pages - 1, header, mtr)) { + return(false); } - size = extent_size; + size = extent_pages; } - if (size < threshold) { - size_increase = extent_size; - } else { - /* Below in fsp_fill_free_list() we assume - that we add at most FSP_FREE_ADD extents at - a time */ - size_increase = FSP_FREE_ADD * extent_size; - } + size_increase = fsp_get_pages_to_extend_ibd(page_size, size); } if (size_increase == 0) { - return(TRUE); + return(false); } - success = fil_extend_space_to_desired_size(&actual_size, space, - size + size_increase); - if (!success) { - + if (!fil_space_extend(space, size + size_increase)) { return(false); } + *n_pages_added = size_increase; + /* We ignore any fragments of a full megabyte when storing the size to the space header */ - if (!zip_size) { - new_size = ut_calc_align_down(actual_size, - (1024 * 1024) / UNIV_PAGE_SIZE); + space->size_in_header = ut_calc_align_down( + space->size, (1024 * 1024) / page_size.physical()); + + mlog_write_ulint( + header + FSP_SIZE, space->size_in_header, MLOG_4BYTES, mtr); + + return(true); +} + +/** Calculate the number of pages to extend a datafile. +We extend single-table and general tablespaces first one extent at a time, +but 4 at a time for bigger tablespaces. It is not enough to extend always +by one extent, because we need to add at least one extent to FSP_FREE. +A single extent descriptor page will track many extents. And the extent +that uses its extent descriptor page is put onto the FSP_FREE_FRAG list. +Extents that do not use their extent descriptor page are added to FSP_FREE. +The physical page size is used to determine how many extents are tracked +on one extent descriptor page. See xdes_calc_descriptor_page(). +@param[in] page_size page_size of the datafile +@param[in] size current number of pages in the datafile +@return number of pages to extend the file. */ +ulint +fsp_get_pages_to_extend_ibd( + const page_size_t& page_size, + ulint size) +{ + ulint size_increase; /* number of pages to extend this file */ + ulint extent_size; /* one megabyte, in pages */ + ulint threshold; /* The size of the tablespace (in number + of pages) where we start allocating more + than one extent at a time. */ + + extent_size = fsp_get_extent_size_in_pages(page_size); + + /* The threshold is set at 32MiB except when the physical page + size is small enough that it must be done sooner. */ + threshold = ut_min(32 * extent_size, page_size.physical()); + + if (size < threshold) { + size_increase = extent_size; } else { - new_size = ut_calc_align_down(actual_size, - (1024 * 1024) / zip_size); + /* Below in fsp_fill_free_list() we assume + that we add at most FSP_FREE_ADD extents at + a time */ + size_increase = FSP_FREE_ADD * extent_size; } - mlog_write_ulint(header + FSP_SIZE, new_size, MLOG_4BYTES, mtr); - - *actual_increase = new_size - old_size; - return(TRUE); + return(size_increase); } -/**********************************************************************//** -Puts new extents to the free list if there are free extents above the free +/** Put new extents to the free list if there are free extents above the free limit. If an extent happens to contain an extent descriptor page, the extent -is put to the FSP_FREE_FRAG list with the page marked as used. */ +is put to the FSP_FREE_FRAG list with the page marked as used. +@param[in] init_space true if this is a single-table tablespace +and we are only initializing the first extent and the first bitmap pages; +then we will not allocate more extents +@param[in,out] space tablespace +@param[in,out] header tablespace header +@param[in,out] mtr mini-transaction */ static void fsp_fill_free_list( -/*===============*/ - ibool init_space, /*!< in: TRUE if this is a single-table - tablespace and we are only initing - the tablespace's first extent - descriptor page and ibuf bitmap page; - then we do not allocate more extents */ - ulint space, /*!< in: space */ - fsp_header_t* header, /*!< in/out: space header */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + bool init_space, + fil_space_t* space, + fsp_header_t* header, + mtr_t* mtr) { ulint limit; ulint size; - ulint zip_size; + ulint flags; xdes_t* descr; ulint count = 0; ulint frag_n_used; - ulint actual_increase; ulint i; - mtr_t ibuf_mtr; ut_ad(header != NULL); ut_ad(mtr != NULL); ut_ad(page_offset(header) == FSP_HEADER_OFFSET); + ut_d(fsp_space_modify_check(space->id, mtr)); /* Check if we can fill free list from above the free list limit */ - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr); - limit = mtr_read_ulint(header + FSP_FREE_LIMIT, MLOG_4BYTES, mtr); - - zip_size = fsp_flags_get_zip_size( - mach_read_from_4(FSP_SPACE_FLAGS + header)); - ut_a(ut_is_2pow(zip_size)); - ut_a(zip_size <= UNIV_ZIP_SIZE_MAX); - ut_a(!zip_size || zip_size >= UNIV_ZIP_SIZE_MIN); - - if (space == 0 && srv_auto_extend_last_data_file - && size < limit + FSP_EXTENT_SIZE * FSP_FREE_ADD) { - - /* Try to increase the last data file size */ - fsp_try_extend_data_file(&actual_increase, space, header, mtr); - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr); - } - - if (space != 0 && !init_space - && size < limit + FSP_EXTENT_SIZE * FSP_FREE_ADD) { - - /* Try to increase the .ibd file size */ - fsp_try_extend_data_file(&actual_increase, space, header, mtr); - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr); + size = mach_read_from_4(header + FSP_SIZE); + limit = mach_read_from_4(header + FSP_FREE_LIMIT); + flags = mach_read_from_4(header + FSP_SPACE_FLAGS); + + ut_ad(size == space->size_in_header); + ut_ad(limit == space->free_limit); + ut_ad(flags == space->flags); + + const page_size_t page_size(flags); + + if (size < limit + FSP_EXTENT_SIZE * FSP_FREE_ADD) { + if ((!init_space && !is_system_tablespace(space->id)) + || (space->id == srv_sys_space.space_id() + && srv_sys_space.can_auto_extend_last_file()) + || (space->id == srv_tmp_space.space_id() + && srv_tmp_space.can_auto_extend_last_file())) { + ulint n_pages = 0; + fsp_try_extend_data_file(space, header, mtr, &n_pages); + size = space->size_in_header; + } } i = limit; @@ -1105,17 +1294,14 @@ fsp_fill_free_list( while ((init_space && i < 1) || ((i + FSP_EXTENT_SIZE <= size) && (count < FSP_FREE_ADD))) { - ibool init_xdes; - if (zip_size) { - init_xdes = ut_2pow_remainder(i, zip_size) == 0; - } else { - init_xdes = ut_2pow_remainder(i, UNIV_PAGE_SIZE) == 0; - } + bool init_xdes + = (ut_2pow_remainder(i, page_size.physical()) == 0); + space->free_limit = i + FSP_EXTENT_SIZE; mlog_write_ulint(header + FSP_FREE_LIMIT, i + FSP_EXTENT_SIZE, MLOG_4BYTES, mtr); - if (UNIV_UNLIKELY(init_xdes)) { + if (init_xdes) { buf_block_t* block; @@ -1124,12 +1310,15 @@ fsp_fill_free_list( pages should be ignored. */ if (i > 0) { + const page_id_t page_id(space->id, i); + block = buf_page_create( - space, i, zip_size, mtr); - buf_page_get(space, zip_size, i, - RW_X_LATCH, mtr); - buf_block_dbg_add_level(block, - SYNC_FSP_PAGE); + page_id, page_size, mtr); + + buf_page_get( + page_id, page_size, RW_SX_LATCH, mtr); + + buf_block_dbg_add_level(block, SYNC_FSP_PAGE); fsp_init_file_page(block, mtr); mlog_write_ulint(buf_block_get_frame(block) @@ -1140,28 +1329,52 @@ fsp_fill_free_list( /* Initialize the ibuf bitmap page in a separate mini-transaction because it is low in the latching - order, and we must be able to release its latch - before returning from the fsp routine */ + order, and we must be able to release its latch. + Note: Insert-Buffering is disabled for tables that + reside in the temp-tablespace. */ + if (space->id != srv_tmp_space.space_id()) { + mtr_t ibuf_mtr; + + mtr_start(&ibuf_mtr); + ibuf_mtr.set_named_space(space); + + /* Avoid logging while truncate table + fix-up is active. */ + if (space->purpose == FIL_TYPE_TEMPORARY + || srv_is_tablespace_truncated( + space->id)) { + mtr_set_log_mode( + &ibuf_mtr, MTR_LOG_NO_REDO); + } + + const page_id_t page_id( + space->id, + i + FSP_IBUF_BITMAP_OFFSET); + + block = buf_page_create( + page_id, page_size, &ibuf_mtr); - mtr_start(&ibuf_mtr); + buf_page_get( + page_id, page_size, RW_SX_LATCH, + &ibuf_mtr); - block = buf_page_create(space, - i + FSP_IBUF_BITMAP_OFFSET, - zip_size, &ibuf_mtr); - buf_page_get(space, zip_size, - i + FSP_IBUF_BITMAP_OFFSET, - RW_X_LATCH, &ibuf_mtr); - buf_block_dbg_add_level(block, SYNC_FSP_PAGE); + buf_block_dbg_add_level(block, SYNC_FSP_PAGE); - fsp_init_file_page(block, &ibuf_mtr); + fsp_init_file_page(block, &ibuf_mtr); - ibuf_bitmap_page_init(block, &ibuf_mtr); + ibuf_bitmap_page_init(block, &ibuf_mtr); - mtr_commit(&ibuf_mtr); + mtr_commit(&ibuf_mtr); + } } - descr = xdes_get_descriptor_with_space_hdr(header, space, i, - mtr); + buf_block_t* desc_block = NULL; + descr = xdes_get_descriptor_with_space_hdr( + header, space->id, i, mtr, init_space, &desc_block); + if (desc_block != NULL) { + fil_block_check_type( + desc_block, FIL_PAGE_TYPE_XDES, mtr); + } xdes_init(descr, mtr); if (UNIV_UNLIKELY(init_xdes)) { @@ -1177,8 +1390,8 @@ fsp_fill_free_list( flst_add_last(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE, mtr); - frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, - MLOG_4BYTES, mtr); + frag_n_used = mach_read_from_4( + header + FSP_FRAG_N_USED); mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used + 2, MLOG_4BYTES, mtr); } else { @@ -1189,32 +1402,41 @@ fsp_fill_free_list( i += FSP_EXTENT_SIZE; } + + space->free_len += count; } -/**********************************************************************//** -Allocates a new free extent. -@return extent descriptor, NULL if cannot be allocated */ +/** Allocates a new free extent. +@param[in] space_id tablespace identifier +@param[in] page_size page size +@param[in] hint hint of which extent would be desirable: any +page offset in the extent goes; the hint must not be > FSP_FREE_LIMIT +@param[in,out] mtr mini-transaction +@return extent descriptor, NULL if cannot be allocated */ static xdes_t* fsp_alloc_free_extent( -/*==================*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint hint, /*!< in: hint of which extent would be desirable: any - page offset in the extent goes; the hint must not - be > FSP_FREE_LIMIT */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint space_id, + const page_size_t& page_size, + ulint hint, + mtr_t* mtr) { fsp_header_t* header; fil_addr_t first; xdes_t* descr; + buf_block_t* desc_block = NULL; - ut_ad(mtr); + header = fsp_get_space_header(space_id, page_size, mtr); - header = fsp_get_space_header(space, zip_size, mtr); + descr = xdes_get_descriptor_with_space_hdr( + header, space_id, hint, mtr, false, &desc_block); - descr = xdes_get_descriptor_with_space_hdr(header, space, hint, mtr); + fil_space_t* space = fil_space_get(space_id); + ut_a(space != NULL); + + if (desc_block != NULL) { + fil_block_check_type(desc_block, FIL_PAGE_TYPE_XDES, mtr); + } if (descr && (xdes_get_state(descr, mtr) == XDES_FREE)) { /* Ok, we can take this extent */ @@ -1223,7 +1445,7 @@ fsp_alloc_free_extent( first = flst_get_first(header + FSP_FREE, mtr); if (fil_addr_is_null(first)) { - fsp_fill_free_list(FALSE, space, header, mtr); + fsp_fill_free_list(false, space, header, mtr); first = flst_get_first(header + FSP_FREE, mtr); } @@ -1233,10 +1455,12 @@ fsp_alloc_free_extent( return(NULL); /* No free extents left */ } - descr = xdes_lst_get_descriptor(space, zip_size, first, mtr); + descr = xdes_lst_get_descriptor( + space_id, page_size, first, mtr); } flst_remove(header + FSP_FREE, descr + XDES_FLST_NODE, mtr); + space->free_len--; return(descr); } @@ -1259,8 +1483,7 @@ fsp_alloc_from_free_frag( xdes_set_bit(descr, XDES_FREE_BIT, bit, FALSE, mtr); /* Update the FRAG_N_USED field */ - frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, MLOG_4BYTES, - mtr); + frag_n_used = mach_read_from_4(header + FSP_FRAG_N_USED); frag_n_used++; mlog_write_ulint(header + FSP_FRAG_N_USED, frag_n_used, MLOG_4BYTES, mtr); @@ -1278,49 +1501,62 @@ fsp_alloc_from_free_frag( } } -/**********************************************************************//** -Gets a buffer block for an allocated page. - +/** Gets a buffer block for an allocated page. NOTE: If init_mtr != mtr, the block will only be initialized if it was not previously x-latched. It is assumed that the block has been x-latched only by mtr, and freed in mtr in that case. - +@param[in] page_id page id of the allocated page +@param[in] page_size page size of the allocated page +@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH +@param[in,out] mtr mini-transaction of the allocation +@param[in,out] init_mtr mini-transaction for initializing the page @return block, initialized if init_mtr==mtr or rw_lock_x_lock_count(&block->lock) == 1 */ static buf_block_t* fsp_page_create( -/*============*/ - ulint space, /*!< in: space id of the allocated page */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page_no, /*!< in: page number of the allocated page */ - mtr_t* mtr, /*!< in: mini-transaction of the allocation */ - mtr_t* init_mtr) /*!< in: mini-transaction for initializing - the page */ + const page_id_t& page_id, + const page_size_t& page_size, + rw_lock_type_t rw_latch, + mtr_t* mtr, + mtr_t* init_mtr) { - buf_block_t* block - = buf_page_create(space, page_no, zip_size, init_mtr); -#ifdef UNIV_SYNC_DEBUG + buf_block_t* block = buf_page_create(page_id, page_size, init_mtr); + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX) - == rw_lock_own(&block->lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + == rw_lock_own(&block->lock, RW_LOCK_X)); + + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_SX_FIX) + == rw_lock_own(&block->lock, RW_LOCK_SX)); + + ut_ad(rw_latch == RW_X_LATCH || rw_latch == RW_SX_LATCH); /* Mimic buf_page_get(), but avoid the buf_pool->page_hash lookup. */ - rw_lock_x_lock(&block->lock); + if (rw_latch == RW_X_LATCH) { + rw_lock_x_lock(&block->lock); + } else { + rw_lock_sx_lock(&block->lock); + } mutex_enter(&block->mutex); + buf_block_buf_fix_inc(block, __FILE__, __LINE__); + mutex_exit(&block->mutex); - mtr_memo_push(init_mtr, block, MTR_MEMO_PAGE_X_FIX); + mtr_memo_push(init_mtr, block, rw_latch == RW_X_LATCH + ? MTR_MEMO_PAGE_X_FIX : MTR_MEMO_PAGE_SX_FIX); if (init_mtr == mtr - || rw_lock_get_x_lock_count(&block->lock) == 1) { + || (rw_latch == RW_X_LATCH + ? rw_lock_get_x_lock_count(&block->lock) == 1 + : rw_lock_get_sx_lock_count(&block->lock) == 1)) { /* Initialize the page, unless it was already - X-latched in mtr. (In this case, we would want to + SX-latched in mtr. (In this case, we would want to allocate another page that has not been freed in mtr.) */ ut_ad(init_mtr == mtr - || !mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + || !mtr_memo_contains_flagged(mtr, block, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); fsp_init_file_page(block, init_mtr); } @@ -1328,24 +1564,28 @@ fsp_page_create( return(block); } -/**********************************************************************//** -Allocates a single free page from a space. The page is marked as used. -@retval NULL if no page could be allocated -@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded +/** Allocates a single free page from a space. +The page is marked as used. +@param[in] space space id +@param[in] page_size page size +@param[in] hint hint of which page would be desirable +@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH +@param[in,out] mtr mini-transaction +@param[in,out] init_mtr mini-transaction in which the page should be +initialized (may be the same as mtr) +@retval NULL if no page could be allocated +@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) -@retval block (not allocated or initialized) otherwise */ +@retval block (not allocated or initialized) otherwise */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) buf_block_t* fsp_alloc_free_page( -/*================*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint hint, /*!< in: hint of which page would be desirable */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - mtr_t* init_mtr)/*!< in/out: mini-transaction in which the - page should be initialized - (may be the same as mtr) */ + ulint space, + const page_size_t& page_size, + ulint hint, + rw_lock_type_t rw_latch, + mtr_t* mtr, + mtr_t* init_mtr) { fsp_header_t* header; fil_addr_t first; @@ -1357,7 +1597,8 @@ fsp_alloc_free_page( ut_ad(mtr); ut_ad(init_mtr); - header = fsp_get_space_header(space, zip_size, mtr); + ut_d(fsp_space_modify_check(space, mtr)); + header = fsp_get_space_header(space, page_size, mtr); /* Get the hinted descriptor */ descr = xdes_get_descriptor_with_space_hdr(header, space, hint, mtr); @@ -1376,7 +1617,7 @@ fsp_alloc_free_page( FREE_FRAG list. But we will allocate our page from the the free extent anyway. */ - descr = fsp_alloc_free_extent(space, zip_size, + descr = fsp_alloc_free_extent(space, page_size, hint, mtr); if (descr == NULL) { @@ -1389,7 +1630,7 @@ fsp_alloc_free_page( flst_add_last(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE, mtr); } else { - descr = xdes_lst_get_descriptor(space, zip_size, + descr = xdes_lst_get_descriptor(space, page_size, first, mtr); } @@ -1412,24 +1653,27 @@ fsp_alloc_free_page( page_no = xdes_get_offset(descr) + free; - space_size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr); + space_size = mach_read_from_4(header + FSP_SIZE); + ut_ad(space_size == fil_space_get(space)->size_in_header + || (space == TRX_SYS_SPACE + && srv_startup_is_before_trx_rollback_phase)); if (space_size <= page_no) { /* It must be that we are extending a single-table tablespace whose size is still < 64 pages */ - ut_a(space != 0); + ut_a(!is_system_tablespace(space)); if (page_no >= FSP_EXTENT_SIZE) { - fprintf(stderr, - "InnoDB: Error: trying to extend a" - " single-table tablespace %lu\n" - "InnoDB: by single page(s) though the" - " space size %lu. Page no %lu.\n", - (ulong) space, (ulong) space_size, - (ulong) page_no); + ib::error() << "Trying to extend a single-table" + " tablespace " << space << " , by single" + " page(s) though the space size " << space_size + << ". Page no " << page_no << "."; return(NULL); } - if (!fsp_try_extend_data_file_with_pages(space, page_no, + + fil_space_t* fspace = fil_space_get(space); + + if (!fsp_try_extend_data_file_with_pages(fspace, page_no, header, mtr)) { /* No disk space left */ return(NULL); @@ -1437,20 +1681,21 @@ fsp_alloc_free_page( } fsp_alloc_from_free_frag(header, descr, free, mtr); - return(fsp_page_create(space, zip_size, page_no, mtr, init_mtr)); + return(fsp_page_create(page_id_t(space, page_no), page_size, + rw_latch, mtr, init_mtr)); } -/**********************************************************************//** -Frees a single page of a space. The page is marked as free and clean. */ +/** Frees a single page of a space. +The page is marked as free and clean. +@param[in] page_id page id +@param[in] page_size page size +@param[in,out] mtr mini-transaction */ static void fsp_free_page( -/*==========*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page, /*!< in: page offset */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + const page_id_t& page_id, + const page_size_t& page_size, + mtr_t* mtr) { fsp_header_t* header; xdes_t* descr; @@ -1458,21 +1703,21 @@ fsp_free_page( ulint frag_n_used; ut_ad(mtr); + ut_d(fsp_space_modify_check(page_id.space(), mtr)); /* fprintf(stderr, "Freeing page %lu in space %lu\n", page, space); */ - header = fsp_get_space_header(space, zip_size, mtr); + header = fsp_get_space_header( + page_id.space(), page_size, mtr); - descr = xdes_get_descriptor_with_space_hdr(header, space, page, mtr); + descr = xdes_get_descriptor_with_space_hdr( + header, page_id.space(), page_id.page_no(), mtr); state = xdes_get_state(descr, mtr); if (state != XDES_FREE_FRAG && state != XDES_FULL_FRAG) { - fprintf(stderr, - "InnoDB: Error: File space extent descriptor" - " of page %lu has state %lu\n", - (ulong) page, - (ulong) state); + ib::error() << "File space extent descriptor of page " + << page_id << " has state " << state; fputs("InnoDB: Dump of descriptor: ", stderr); ut_print_buf(stderr, ((byte*) descr) - 50, 200); putc('\n', stderr); @@ -1491,12 +1736,10 @@ fsp_free_page( } if (xdes_mtr_get_bit(descr, XDES_FREE_BIT, - page % FSP_EXTENT_SIZE, mtr)) { + page_id.page_no() % FSP_EXTENT_SIZE, mtr)) { - fprintf(stderr, - "InnoDB: Error: File space extent descriptor" - " of page %lu says it is free\n" - "InnoDB: Dump of descriptor: ", (ulong) page); + ib::error() << "File space extent descriptor of page " + << page_id << " says it is free. Dump of descriptor: "; ut_print_buf(stderr, ((byte*) descr) - 50, 200); putc('\n', stderr); /* Crash in debug version, so that we get a core dump @@ -1509,8 +1752,10 @@ fsp_free_page( return; } - xdes_set_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, TRUE, mtr); - xdes_set_bit(descr, XDES_CLEAN_BIT, page % FSP_EXTENT_SIZE, TRUE, mtr); + const ulint bit = page_id.page_no() % FSP_EXTENT_SIZE; + + xdes_set_bit(descr, XDES_FREE_BIT, bit, TRUE, mtr); + xdes_set_bit(descr, XDES_CLEAN_BIT, bit, TRUE, mtr); frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, MLOG_4BYTES, mtr); @@ -1534,84 +1779,78 @@ fsp_free_page( /* The extent has become free: move it to another list */ flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE, mtr); - fsp_free_extent(space, zip_size, page, mtr); + fsp_free_extent(page_id, page_size, mtr); } - - mtr->n_freed_pages++; } -/**********************************************************************//** -Returns an extent to the free list of a space. */ +/** Returns an extent to the free list of a space. +@param[in] page_id page id in the extent +@param[in] page_size page size +@param[in,out] mtr mini-transaction */ static void fsp_free_extent( -/*============*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page, /*!< in: page offset in the extent */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + const page_id_t& page_id, + const page_size_t& page_size, + mtr_t* mtr) { fsp_header_t* header; xdes_t* descr; ut_ad(mtr); - header = fsp_get_space_header(space, zip_size, mtr); - - descr = xdes_get_descriptor_with_space_hdr(header, space, page, mtr); - - if (xdes_get_state(descr, mtr) == XDES_FREE) { + header = fsp_get_space_header(page_id.space(), page_size, mtr); - ut_print_buf(stderr, (byte*) descr - 500, 1000); - putc('\n', stderr); + descr = xdes_get_descriptor_with_space_hdr( + header, page_id.space(), page_id.page_no(), mtr); - ut_error; - } + ut_a(xdes_get_state(descr, mtr) != XDES_FREE); xdes_init(descr, mtr); flst_add_last(header + FSP_FREE, descr + XDES_FLST_NODE, mtr); + fil_space_get(page_id.space())->free_len++; } -/**********************************************************************//** -Returns the nth inode slot on an inode page. -@return segment inode */ +/** Returns the nth inode slot on an inode page. +@param[in] page segment inode page +@param[in] i inode index on page +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@return segment inode */ UNIV_INLINE fseg_inode_t* fsp_seg_inode_page_get_nth_inode( -/*=============================*/ - page_t* page, /*!< in: segment inode page */ - ulint i, /*!< in: inode index on page */ - ulint zip_size MY_ATTRIBUTE((unused)), - /*!< in: compressed page size, or 0 */ - mtr_t* mtr MY_ATTRIBUTE((unused))) - /*!< in/out: mini-transaction */ + page_t* page, + ulint i, + const page_size_t& page_size, + mtr_t* mtr) { - ut_ad(i < FSP_SEG_INODES_PER_PAGE(zip_size)); - ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); + ut_ad(i < FSP_SEG_INODES_PER_PAGE(page_size)); + ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_SX_FIX)); return(page + FSEG_ARR_OFFSET + FSEG_INODE_SIZE * i); } -/**********************************************************************//** -Looks for a used segment inode on a segment inode page. -@return segment inode index, or ULINT_UNDEFINED if not found */ +/** Looks for a used segment inode on a segment inode page. +@param[in] page segment inode page +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@return segment inode index, or ULINT_UNDEFINED if not found */ static ulint fsp_seg_inode_page_find_used( -/*=========================*/ - page_t* page, /*!< in: segment inode page */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + page_t* page, + const page_size_t& page_size, + mtr_t* mtr) { ulint i; fseg_inode_t* inode; - for (i = 0; i < FSP_SEG_INODES_PER_PAGE(zip_size); i++) { + for (i = 0; i < FSP_SEG_INODES_PER_PAGE(page_size); i++) { inode = fsp_seg_inode_page_get_nth_inode( - page, i, zip_size, mtr); + page, i, page_size, mtr); if (mach_read_from_8(inode + FSEG_ID)) { /* This is used */ @@ -1625,24 +1864,26 @@ fsp_seg_inode_page_find_used( return(ULINT_UNDEFINED); } -/**********************************************************************//** -Looks for an unused segment inode on a segment inode page. -@return segment inode index, or ULINT_UNDEFINED if not found */ +/** Looks for an unused segment inode on a segment inode page. +@param[in] page segment inode page +@param[in] i search forward starting from this index +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@return segment inode index, or ULINT_UNDEFINED if not found */ static ulint fsp_seg_inode_page_find_free( -/*=========================*/ - page_t* page, /*!< in: segment inode page */ - ulint i, /*!< in: search forward starting from this index */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + page_t* page, + ulint i, + const page_size_t& page_size, + mtr_t* mtr) { - for (; i < FSP_SEG_INODES_PER_PAGE(zip_size); i++) { + for (; i < FSP_SEG_INODES_PER_PAGE(page_size); i++) { fseg_inode_t* inode; inode = fsp_seg_inode_page_get_nth_inode( - page, i, zip_size, mtr); + page, i, page_size, mtr); if (!mach_read_from_8(inode + FSEG_ID)) { /* This is unused */ @@ -1658,7 +1899,7 @@ fsp_seg_inode_page_find_free( /**********************************************************************//** Allocates a new file segment inode page. -@return TRUE if could be allocated */ +@return TRUE if could be allocated */ static ibool fsp_alloc_seg_inode_page( @@ -1670,16 +1911,15 @@ fsp_alloc_seg_inode_page( buf_block_t* block; page_t* page; ulint space; - ulint zip_size; ut_ad(page_offset(space_header) == FSP_HEADER_OFFSET); space = page_get_space_id(page_align(space_header)); - zip_size = fsp_flags_get_zip_size( - mach_read_from_4(FSP_SPACE_FLAGS + space_header)); + const page_size_t page_size(mach_read_from_4(FSP_SPACE_FLAGS + + space_header)); - block = fsp_alloc_free_page(space, zip_size, 0, mtr, mtr); + block = fsp_alloc_free_page(space, page_size, 0, RW_SX_LATCH, mtr, mtr); if (block == NULL) { @@ -1687,19 +1927,17 @@ fsp_alloc_seg_inode_page( } buf_block_dbg_add_level(block, SYNC_FSP_PAGE); - ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1); - - block->check_index_page_at_flush = FALSE; + ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1); page = buf_block_get_frame(block); mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_INODE, MLOG_2BYTES, mtr); - for (ulint i = 0; i < FSP_SEG_INODES_PER_PAGE(zip_size); i++) { + for (ulint i = 0; i < FSP_SEG_INODES_PER_PAGE(page_size); i++) { inode = fsp_seg_inode_page_get_nth_inode( - page, i, zip_size, mtr); + page, i, page_size, mtr); mlog_write_ull(inode + FSEG_ID, 0, mtr); } @@ -1713,7 +1951,7 @@ fsp_alloc_seg_inode_page( /**********************************************************************//** Allocates a new file segment inode. -@return segment inode, or NULL if not enough space */ +@return segment inode, or NULL if not enough space */ static fseg_inode_t* fsp_alloc_seg_inode( @@ -1721,45 +1959,40 @@ fsp_alloc_seg_inode( fsp_header_t* space_header, /*!< in: space header */ mtr_t* mtr) /*!< in/out: mini-transaction */ { - ulint page_no; buf_block_t* block; page_t* page; fseg_inode_t* inode; - ibool success; - ulint zip_size; ulint n; ut_ad(page_offset(space_header) == FSP_HEADER_OFFSET); - if (flst_get_len(space_header + FSP_SEG_INODES_FREE, mtr) == 0) { - /* Allocate a new segment inode page */ - - success = fsp_alloc_seg_inode_page(space_header, mtr); - - if (!success) { - - return(NULL); - } + /* Allocate a new segment inode page if needed. */ + if (flst_get_len(space_header + FSP_SEG_INODES_FREE) == 0 + && !fsp_alloc_seg_inode_page(space_header, mtr)) { + return(NULL); } - page_no = flst_get_first(space_header + FSP_SEG_INODES_FREE, mtr).page; - - zip_size = fsp_flags_get_zip_size( + const page_size_t page_size( mach_read_from_4(FSP_SPACE_FLAGS + space_header)); - block = buf_page_get(page_get_space_id(page_align(space_header)), - zip_size, page_no, RW_X_LATCH, mtr); + + const page_id_t page_id( + page_get_space_id(page_align(space_header)), + flst_get_first(space_header + FSP_SEG_INODES_FREE, mtr).page); + + block = buf_page_get(page_id, page_size, RW_SX_LATCH, mtr); buf_block_dbg_add_level(block, SYNC_FSP_PAGE); + fil_block_check_type(block, FIL_PAGE_INODE, mtr); page = buf_block_get_frame(block); - n = fsp_seg_inode_page_find_free(page, 0, zip_size, mtr); + n = fsp_seg_inode_page_find_free(page, 0, page_size, mtr); ut_a(n != ULINT_UNDEFINED); - inode = fsp_seg_inode_page_get_nth_inode(page, n, zip_size, mtr); + inode = fsp_seg_inode_page_get_nth_inode(page, n, page_size, mtr); if (ULINT_UNDEFINED == fsp_seg_inode_page_find_free(page, n + 1, - zip_size, mtr)) { + page_size, mtr)) { /* There are no other unused headers left on the page: move it to another list */ @@ -1775,29 +2008,32 @@ fsp_alloc_seg_inode( return(inode); } -/**********************************************************************//** -Frees a file segment inode. */ +/** Frees a file segment inode. +@param[in] space space id +@param[in] page_size page size +@param[in,out] inode segment inode +@param[in,out] mtr mini-transaction */ static void fsp_free_seg_inode( -/*===============*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - fseg_inode_t* inode, /*!< in: segment inode */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint space, + const page_size_t& page_size, + fseg_inode_t* inode, + mtr_t* mtr) { page_t* page; fsp_header_t* space_header; + ut_d(fsp_space_modify_check(space, mtr)); + page = page_align(inode); - space_header = fsp_get_space_header(space, zip_size, mtr); + space_header = fsp_get_space_header(space, page_size, mtr); ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); if (ULINT_UNDEFINED - == fsp_seg_inode_page_find_free(page, 0, zip_size, mtr)) { + == fsp_seg_inode_page_find_free(page, 0, page_size, mtr)) { /* Move the page to another list */ @@ -1812,29 +2048,33 @@ fsp_free_seg_inode( mlog_write_ulint(inode + FSEG_MAGIC_N, 0xfa051ce3, MLOG_4BYTES, mtr); if (ULINT_UNDEFINED - == fsp_seg_inode_page_find_used(page, zip_size, mtr)) { + == fsp_seg_inode_page_find_used(page, page_size, mtr)) { /* There are no other used headers left on the page: free it */ flst_remove(space_header + FSP_SEG_INODES_FREE, page + FSEG_INODE_PAGE_NODE, mtr); - fsp_free_page(space, zip_size, page_get_page_no(page), mtr); + fsp_free_page(page_id_t(space, page_get_page_no(page)), + page_size, mtr); } } -/**********************************************************************//** -Returns the file segment inode, page x-latched. -@return segment inode, page x-latched; NULL if the inode is free */ +/** Returns the file segment inode, page x-latched. +@param[in] header segment header +@param[in] space space id +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@param[out] block inode block, or NULL to ignore +@return segment inode, page x-latched; NULL if the inode is free */ static fseg_inode_t* fseg_inode_try_get( -/*===============*/ - fseg_header_t* header, /*!< in: segment header */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fseg_header_t* header, + ulint space, + const page_size_t& page_size, + mtr_t* mtr, + buf_block_t** block) { fil_addr_t inode_addr; fseg_inode_t* inode; @@ -1843,7 +2083,8 @@ fseg_inode_try_get( inode_addr.boffset = mach_read_from_2(header + FSEG_HDR_OFFSET); ut_ad(space == mach_read_from_4(header + FSEG_HDR_SPACE)); - inode = fut_get_ptr(space, zip_size, inode_addr, RW_X_LATCH, mtr); + inode = fut_get_ptr(space, page_size, inode_addr, RW_SX_LATCH, mtr, + block); if (UNIV_UNLIKELY(!mach_read_from_8(inode + FSEG_ID))) { @@ -1856,28 +2097,31 @@ fseg_inode_try_get( return(inode); } -/**********************************************************************//** -Returns the file segment inode, page x-latched. -@return segment inode, page x-latched */ +/** Returns the file segment inode, page x-latched. +@param[in] header segment header +@param[in] space space id +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@param[out] block inode block +@return segment inode, page x-latched */ static fseg_inode_t* fseg_inode_get( -/*===========*/ - fseg_header_t* header, /*!< in: segment header */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fseg_header_t* header, + ulint space, + const page_size_t& page_size, + mtr_t* mtr, + buf_block_t** block = NULL) { fseg_inode_t* inode - = fseg_inode_try_get(header, space, zip_size, mtr); + = fseg_inode_try_get(header, space, page_size, mtr, block); ut_a(inode); return(inode); } /**********************************************************************//** Gets the page number from the nth fragment page slot. -@return page number, FIL_NULL if not in use */ +@return page number, FIL_NULL if not in use */ UNIV_INLINE ulint fseg_get_nth_frag_page_no( @@ -1889,7 +2133,7 @@ fseg_get_nth_frag_page_no( { ut_ad(inode && mtr); ut_ad(n < FSEG_FRAG_ARR_N_SLOTS); - ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); return(mach_read_from_4(inode + FSEG_FRAG_ARR + n * FSEG_FRAG_SLOT_SIZE)); @@ -1908,7 +2152,7 @@ fseg_set_nth_frag_page_no( { ut_ad(inode && mtr); ut_ad(n < FSEG_FRAG_ARR_N_SLOTS); - ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); mlog_write_ulint(inode + FSEG_FRAG_ARR + n * FSEG_FRAG_SLOT_SIZE, @@ -1917,7 +2161,7 @@ fseg_set_nth_frag_page_no( /**********************************************************************//** Finds a fragment page slot which is free. -@return slot index; ULINT_UNDEFINED if none found */ +@return slot index; ULINT_UNDEFINED if none found */ static ulint fseg_find_free_frag_page_slot( @@ -1944,7 +2188,7 @@ fseg_find_free_frag_page_slot( /**********************************************************************//** Finds a fragment page slot which is used and last in the array. -@return slot index; ULINT_UNDEFINED if none found */ +@return slot index; ULINT_UNDEFINED if none found */ static ulint fseg_find_last_used_frag_page_slot( @@ -1972,7 +2216,7 @@ fseg_find_last_used_frag_page_slot( /**********************************************************************//** Calculates reserved fragment page slots. -@return number of fragment pages */ +@return number of fragment pages */ static ulint fseg_get_n_frag_pages( @@ -1998,11 +2242,10 @@ fseg_get_n_frag_pages( Creates a new segment. @return the block where the segment header is placed, x-latched, NULL if could not create segment because of lack of space */ -UNIV_INTERN buf_block_t* fseg_create_general( /*================*/ - ulint space, /*!< in: space id */ + ulint space_id,/*!< in: space id */ ulint page, /*!< in: page where the segment header is placed: if this is != 0, the page must belong to another segment, if this is 0, a new page will be allocated and it @@ -2017,50 +2260,54 @@ fseg_create_general( operation */ mtr_t* mtr) /*!< in/out: mini-transaction */ { - ulint flags; - ulint zip_size; fsp_header_t* space_header; fseg_inode_t* inode; ib_id_t seg_id; buf_block_t* block = 0; /* remove warning */ fseg_header_t* header = 0; /* remove warning */ - rw_lock_t* latch; - ibool success; ulint n_reserved; ulint i; + DBUG_ENTER("fseg_create_general"); + ut_ad(mtr); ut_ad(byte_offset + FSEG_HEADER_SIZE <= UNIV_PAGE_SIZE - FIL_PAGE_DATA_END); + ut_d(fsp_space_modify_check(space_id, mtr)); - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); + fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); if (page != 0) { - block = buf_page_get(space, zip_size, page, RW_X_LATCH, mtr); + block = buf_page_get(page_id_t(space_id, page), page_size, + RW_SX_LATCH, mtr); + header = byte_offset + buf_block_get_frame(block); - } - mtr_x_lock(latch, mtr); + const ulint type = space_id == TRX_SYS_SPACE + && page == TRX_SYS_PAGE_NO + ? FIL_PAGE_TYPE_TRX_SYS + : FIL_PAGE_TYPE_SYS; + + fil_block_check_type(block, type, mtr); + } - if (rw_lock_get_x_lock_count(latch) == 1) { + if (rw_lock_get_x_lock_count(&space->latch) == 1) { /* This thread did not own the latch before this call: free excess pages from the insert buffer free list */ - if (space == IBUF_SPACE_ID) { + if (space_id == IBUF_SPACE_ID) { ibuf_free_excess_pages(); } } - if (!has_done_reservation) { - success = fsp_reserve_free_extents(&n_reserved, space, 2, - FSP_NORMAL, mtr); - if (!success) { - return(NULL); - } + if (!has_done_reservation + && !fsp_reserve_free_extents(&n_reserved, space_id, 2, + FSP_NORMAL, mtr)) { + DBUG_RETURN(NULL); } - space_header = fsp_get_space_header(space, zip_size, mtr); + space_header = fsp_get_space_header(space_id, page_size, mtr); inode = fsp_alloc_seg_inode(space_header, mtr); @@ -2090,17 +2337,26 @@ fseg_create_general( } if (page == 0) { - block = fseg_alloc_free_page_low(space, zip_size, - inode, 0, FSP_UP, mtr, mtr); + block = fseg_alloc_free_page_low(space, page_size, + inode, 0, FSP_UP, RW_SX_LATCH, + mtr, mtr +#ifdef UNIV_DEBUG + , has_done_reservation +#endif /* UNIV_DEBUG */ + ); + + /* The allocation cannot fail if we have already reserved a + space for the page. */ + ut_ad(!has_done_reservation || block != NULL); if (block == NULL) { - fsp_free_seg_inode(space, zip_size, inode, mtr); + fsp_free_seg_inode(space_id, page_size, inode, mtr); goto funct_exit; } - ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1); + ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1); header = byte_offset + buf_block_get_frame(block); mlog_write_ulint(buf_block_get_frame(block) + FIL_PAGE_TYPE, @@ -2114,22 +2370,21 @@ fseg_create_general( page_get_page_no(page_align(inode)), MLOG_4BYTES, mtr); - mlog_write_ulint(header + FSEG_HDR_SPACE, space, MLOG_4BYTES, mtr); + mlog_write_ulint(header + FSEG_HDR_SPACE, space_id, MLOG_4BYTES, mtr); funct_exit: if (!has_done_reservation) { - fil_space_release_free_extents(space, n_reserved); + fil_space_release_free_extents(space_id, n_reserved); } - return(block); + DBUG_RETURN(block); } /**********************************************************************//** Creates a new segment. @return the block where the segment header is placed, x-latched, NULL if could not create segment because of lack of space */ -UNIV_INTERN buf_block_t* fseg_create( /*========*/ @@ -2148,7 +2403,7 @@ fseg_create( /**********************************************************************//** Calculates the number of pages reserved by a segment, and how many pages are currently used. -@return number of reserved pages */ +@return number of reserved pages */ static ulint fseg_n_reserved_pages_low( @@ -2161,16 +2416,16 @@ fseg_n_reserved_pages_low( ulint ret; ut_ad(inode && used && mtr); - ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX)); - *used = mtr_read_ulint(inode + FSEG_NOT_FULL_N_USED, MLOG_4BYTES, mtr) - + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FULL, mtr) + *used = mach_read_from_4(inode + FSEG_NOT_FULL_N_USED) + + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FULL) + fseg_get_n_frag_pages(inode, mtr); ret = fseg_get_n_frag_pages(inode, mtr) - + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FREE, mtr) - + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_NOT_FULL, mtr) - + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FULL, mtr); + + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FREE) + + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_NOT_FULL) + + FSP_EXTENT_SIZE * flst_get_len(inode + FSEG_FULL); return(ret); } @@ -2178,8 +2433,7 @@ fseg_n_reserved_pages_low( /**********************************************************************//** Calculates the number of pages reserved by a segment, and how many pages are currently used. -@return number of reserved pages */ -UNIV_INTERN +@return number of reserved pages */ ulint fseg_n_reserved_pages( /*==================*/ @@ -2189,40 +2443,39 @@ fseg_n_reserved_pages( { ulint ret; fseg_inode_t* inode; - ulint space; - ulint flags; - ulint zip_size; - rw_lock_t* latch; + ulint space_id; + fil_space_t* space; - space = page_get_space_id(page_align(header)); - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); + space_id = page_get_space_id(page_align(header)); + space = mtr_x_lock_space(space_id, mtr); - mtr_x_lock(latch, mtr); + const page_size_t page_size(space->flags); - inode = fseg_inode_get(header, space, zip_size, mtr); + inode = fseg_inode_get(header, space_id, page_size, mtr); ret = fseg_n_reserved_pages_low(inode, used, mtr); return(ret); } -/*********************************************************************//** -Tries to fill the free list of a segment with consecutive free extents. +/** Tries to fill the free list of a segment with consecutive free extents. This happens if the segment is big enough to allow extents in the free list, the free list is empty, and the extents can be allocated consecutively from -the hint onward. */ +the hint onward. +@param[in] inode segment inode +@param[in] space space id +@param[in] page_size page size +@param[in] hint hint which extent would be good as the first +extent +@param[in,out] mtr mini-transaction */ static void fseg_fill_free_list( -/*================*/ - fseg_inode_t* inode, /*!< in: segment inode */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint hint, /*!< in: hint which extent would be good as - the first extent */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fseg_inode_t* inode, + ulint space, + const page_size_t& page_size, + ulint hint, + mtr_t* mtr) { xdes_t* descr; ulint i; @@ -2232,6 +2485,7 @@ fseg_fill_free_list( ut_ad(inode && mtr); ut_ad(!((page_offset(inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); + ut_d(fsp_space_modify_check(space, mtr)); reserved = fseg_n_reserved_pages_low(inode, &used, mtr); @@ -2242,14 +2496,14 @@ fseg_fill_free_list( return; } - if (flst_get_len(inode + FSEG_FREE, mtr) > 0) { + if (flst_get_len(inode + FSEG_FREE) > 0) { /* Free list is not empty */ return; } for (i = 0; i < FSEG_FREE_LIST_MAX_LEN; i++) { - descr = xdes_get_descriptor(space, zip_size, hint, mtr); + descr = xdes_get_descriptor(space, hint, page_size, mtr); if ((descr == NULL) || (XDES_FREE != xdes_get_state(descr, mtr))) { @@ -2259,7 +2513,7 @@ fseg_fill_free_list( return; } - descr = fsp_alloc_free_extent(space, zip_size, hint, mtr); + descr = fsp_alloc_free_extent(space, page_size, hint, mtr); xdes_set_state(descr, XDES_FSEG, mtr); @@ -2273,23 +2527,25 @@ fseg_fill_free_list( } } -/*********************************************************************//** -Allocates a free extent for the segment: looks first in the free list of the -segment, then tries to allocate from the space free list. NOTE that the extent -returned still resides in the segment free list, it is not yet taken off it! -@retval NULL if no page could be allocated -@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded +/** Allocates a free extent for the segment: looks first in the free list of +the segment, then tries to allocate from the space free list. +NOTE that the extent returned still resides in the segment free list, it is +not yet taken off it! +@param[in] inode segment inode +@param[in] space space id +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@retval NULL if no page could be allocated +@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) -@retval block (not allocated or initialized) otherwise */ +@retval block (not allocated or initialized) otherwise */ static xdes_t* fseg_alloc_free_extent( -/*===================*/ - fseg_inode_t* inode, /*!< in: segment inode */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fseg_inode_t* inode, + ulint space, + const page_size_t& page_size, + mtr_t* mtr) { xdes_t* descr; ib_id_t seg_id; @@ -2297,16 +2553,17 @@ fseg_alloc_free_extent( ut_ad(!((page_offset(inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); + ut_d(fsp_space_modify_check(space, mtr)); - if (flst_get_len(inode + FSEG_FREE, mtr) > 0) { + if (flst_get_len(inode + FSEG_FREE) > 0) { /* Segment free list is not empty, allocate from it */ first = flst_get_first(inode + FSEG_FREE, mtr); - descr = xdes_lst_get_descriptor(space, zip_size, first, mtr); + descr = xdes_lst_get_descriptor(space, page_size, first, mtr); } else { /* Segment free list was empty, allocate from space */ - descr = fsp_alloc_free_extent(space, zip_size, 0, mtr); + descr = fsp_alloc_free_extent(space, page_size, 0, mtr); if (descr == NULL) { @@ -2320,7 +2577,7 @@ fseg_alloc_free_extent( flst_add_last(inode + FSEG_FREE, descr + XDES_FLST_NODE, mtr); /* Try to fill the segment free list */ - fseg_fill_free_list(inode, space, zip_size, + fseg_fill_free_list(inode, space, page_size, xdes_get_offset(descr) + FSP_EXTENT_SIZE, mtr); } @@ -2328,37 +2585,44 @@ fseg_alloc_free_extent( return(descr); } -/**********************************************************************//** -Allocates a single free page from a segment. This function implements -the intelligent allocation strategy which tries to minimize file space -fragmentation. -@retval NULL if no page could be allocated -@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded +/** Allocates a single free page from a segment. +This function implements the intelligent allocation strategy which tries to +minimize file space fragmentation. +@param[in,out] space tablespace +@param[in] page_size page size +@param[in,out] seg_inode segment inode +@param[in] hint hint of which page would be desirable +@param[in] direction if the new page is needed because of +an index page split, and records are inserted there in order, into which +direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR +@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH +@param[in,out] mtr mini-transaction +@param[in,out] init_mtr mtr or another mini-transaction in +which the page should be initialized. If init_mtr != mtr, but the page is +already latched in mtr, do not initialize the page +@param[in] has_done_reservation TRUE if the space has already been +reserved, in this case we will never return NULL +@retval NULL if no page could be allocated +@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) -@retval block (not allocated or initialized) otherwise */ +@retval block (not allocated or initialized) otherwise */ static buf_block_t* fseg_alloc_free_page_low( -/*=====================*/ - ulint space, /*!< in: space */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - fseg_inode_t* seg_inode, /*!< in/out: segment inode */ - ulint hint, /*!< in: hint of which page would be - desirable */ - byte direction, /*!< in: if the new page is needed because - of an index page split, and records are - inserted there in order, into which - direction they go alphabetically: FSP_DOWN, - FSP_UP, FSP_NO_DIR */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction - in which the page should be initialized. - If init_mtr!=mtr, but the page is already - latched in mtr, do not initialize the page. */ + fil_space_t* space, + const page_size_t& page_size, + fseg_inode_t* seg_inode, + ulint hint, + byte direction, + rw_lock_type_t rw_latch, + mtr_t* mtr, + mtr_t* init_mtr +#ifdef UNIV_DEBUG + , ibool has_done_reservation +#endif /* UNIV_DEBUG */ +) { fsp_header_t* space_header; - ulint space_size; ib_id_t seg_id; ulint used; ulint reserved; @@ -2366,30 +2630,34 @@ fseg_alloc_free_page_low( ulint ret_page; /*!< the allocated page offset, FIL_NULL if could not be allocated */ xdes_t* ret_descr; /*!< the extent of the allocated page */ - ibool success; ulint n; + const ulint space_id = space->id; ut_ad(mtr); ut_ad((direction >= FSP_UP) && (direction <= FSP_NO_DIR)); ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); + ut_ad(space->purpose == FIL_TYPE_TEMPORARY + || space->purpose == FIL_TYPE_TABLESPACE); seg_id = mach_read_from_8(seg_inode + FSEG_ID); ut_ad(seg_id); + ut_d(fsp_space_modify_check(space_id, mtr)); + ut_ad(fil_page_get_type(page_align(seg_inode)) == FIL_PAGE_INODE); reserved = fseg_n_reserved_pages_low(seg_inode, &used, mtr); - space_header = fsp_get_space_header(space, zip_size, mtr); + space_header = fsp_get_space_header(space_id, page_size, mtr); - descr = xdes_get_descriptor_with_space_hdr(space_header, space, + descr = xdes_get_descriptor_with_space_hdr(space_header, space_id, hint, mtr); if (descr == NULL) { /* Hint outside space or too high above free limit: reset hint */ /* The file space header page is always allocated. */ hint = 0; - descr = xdes_get_descriptor(space, zip_size, hint, mtr); + descr = xdes_get_descriptor(space_id, hint, page_size, mtr); } /* In the big if-else below we look for ret_page and ret_descr */ @@ -2416,7 +2684,8 @@ take_hinted_page: ========================================================= the hinted page ===============*/ - ret_descr = fsp_alloc_free_extent(space, zip_size, hint, mtr); + ret_descr = fsp_alloc_free_extent( + space_id, page_size, hint, mtr); ut_a(ret_descr == descr); @@ -2426,7 +2695,7 @@ take_hinted_page: ret_descr + XDES_FLST_NODE, mtr); /* Try to fill the segment free list */ - fseg_fill_free_list(seg_inode, space, zip_size, + fseg_fill_free_list(seg_inode, space_id, page_size, hint + FSP_EXTENT_SIZE, mtr); goto take_hinted_page; /*-----------------------------------------------------------*/ @@ -2434,8 +2703,8 @@ take_hinted_page: && ((reserved - used) < reserved / FSEG_FILLFACTOR) && (used >= FSEG_FRAG_LIMIT) && (!!(ret_descr - = fseg_alloc_free_extent(seg_inode, - space, zip_size, mtr)))) { + = fseg_alloc_free_extent( + seg_inode, space_id, page_size, mtr)))) { /* 3. We take any free extent (which was already assigned above =============================================================== @@ -2448,6 +2717,7 @@ take_hinted_page: if (direction == FSP_DOWN) { ret_page += FSP_EXTENT_SIZE - 1; } + ut_ad(!has_done_reservation || ret_page != FIL_NULL); /*-----------------------------------------------------------*/ } else if ((xdes_get_state(descr, mtr) == XDES_FSEG) && mach_read_from_8(descr + XDES_ID) == seg_id @@ -2463,33 +2733,37 @@ take_hinted_page: ret_page = xdes_get_offset(ret_descr) + xdes_find_bit(ret_descr, XDES_FREE_BIT, TRUE, hint % FSP_EXTENT_SIZE, mtr); + ut_ad(!has_done_reservation || ret_page != FIL_NULL); /*-----------------------------------------------------------*/ } else if (reserved - used > 0) { /* 5. We take any unused page from the segment ==============================================*/ fil_addr_t first; - if (flst_get_len(seg_inode + FSEG_NOT_FULL, mtr) > 0) { + if (flst_get_len(seg_inode + FSEG_NOT_FULL) > 0) { first = flst_get_first(seg_inode + FSEG_NOT_FULL, mtr); - } else if (flst_get_len(seg_inode + FSEG_FREE, mtr) > 0) { + } else if (flst_get_len(seg_inode + FSEG_FREE) > 0) { first = flst_get_first(seg_inode + FSEG_FREE, mtr); } else { - ut_error; + ut_ad(!has_done_reservation); return(NULL); } - ret_descr = xdes_lst_get_descriptor(space, zip_size, + ret_descr = xdes_lst_get_descriptor(space_id, page_size, first, mtr); ret_page = xdes_get_offset(ret_descr) + xdes_find_bit(ret_descr, XDES_FREE_BIT, TRUE, 0, mtr); + ut_ad(!has_done_reservation || ret_page != FIL_NULL); /*-----------------------------------------------------------*/ } else if (used < FSEG_FRAG_LIMIT) { /* 6. We allocate an individual page from the space ===================================================*/ buf_block_t* block = fsp_alloc_free_page( - space, zip_size, hint, mtr, init_mtr); + space_id, page_size, hint, rw_latch, mtr, init_mtr); + + ut_ad(!has_done_reservation || block != NULL); if (block != NULL) { /* Put the page in the fragment page array of the @@ -2498,7 +2772,7 @@ take_hinted_page: ut_a(n != ULINT_UNDEFINED); fseg_set_nth_frag_page_no( - seg_inode, n, buf_block_get_page_no(block), + seg_inode, n, block->page.id.page_no(), mtr); } @@ -2510,45 +2784,43 @@ take_hinted_page: /* 7. We allocate a new extent and take its first page ======================================================*/ ret_descr = fseg_alloc_free_extent(seg_inode, - space, zip_size, mtr); + space_id, page_size, mtr); if (ret_descr == NULL) { ret_page = FIL_NULL; + ut_ad(!has_done_reservation); } else { ret_page = xdes_get_offset(ret_descr); + ut_ad(!has_done_reservation || ret_page != FIL_NULL); } } if (ret_page == FIL_NULL) { /* Page could not be allocated */ + ut_ad(!has_done_reservation); return(NULL); } - if (space != 0) { - space_size = fil_space_get_size(space); - - if (space_size <= ret_page) { - /* It must be that we are extending a single-table - tablespace whose size is still < 64 pages */ - - if (ret_page >= FSP_EXTENT_SIZE) { - fprintf(stderr, - "InnoDB: Error (2): trying to extend" - " a single-table tablespace %lu\n" - "InnoDB: by single page(s) though" - " the space size %lu. Page no %lu.\n", - (ulong) space, (ulong) space_size, - (ulong) ret_page); - return(NULL); - } + if (space->size <= ret_page && !is_system_tablespace(space_id)) { + /* It must be that we are extending a single-table + tablespace whose size is still < 64 pages */ - success = fsp_try_extend_data_file_with_pages( - space, ret_page, space_header, mtr); - if (!success) { - /* No disk space left */ - return(NULL); - } + if (ret_page >= FSP_EXTENT_SIZE) { + ib::error() << "Error (2): trying to extend" + " a single-table tablespace " << space_id + << " by single page(s) though the" + << " space size " << space->size + << ". Page no " << ret_page << "."; + ut_ad(!has_done_reservation); + return(NULL); + } + + if (!fsp_try_extend_data_file_with_pages( + space, ret_page, space_header, mtr)) { + /* No disk space left */ + ut_ad(!has_done_reservation); + return(NULL); } } @@ -2560,7 +2832,7 @@ got_hinted_page: The extent is still in the appropriate list (FSEG_NOT_FULL or FSEG_FREE), and the page is not yet marked as used. */ - ut_ad(xdes_get_descriptor(space, zip_size, ret_page, mtr) + ut_ad(xdes_get_descriptor(space_id, ret_page, page_size, mtr) == ret_descr); ut_ad(xdes_mtr_get_bit( @@ -2570,11 +2842,10 @@ got_hinted_page: fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr); } - return(fsp_page_create( - space, fsp_flags_get_zip_size( - mach_read_from_4(FSP_SPACE_FLAGS - + space_header)), - ret_page, mtr, init_mtr)); + ut_ad(space->flags + == mach_read_from_4(FSP_SPACE_FLAGS + space_header)); + return(fsp_page_create(page_id_t(space_id, ret_page), page_size, + rw_latch, mtr, init_mtr)); } /**********************************************************************//** @@ -2585,7 +2856,6 @@ fragmentation. @retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) @retval block (not allocated or initialized) otherwise */ -UNIV_INTERN buf_block_t* fseg_alloc_free_page_general( /*=========================*/ @@ -2609,85 +2879,87 @@ fseg_alloc_free_page_general( latched in mtr, do not initialize the page. */ { fseg_inode_t* inode; - ulint space; - ulint flags; - ulint zip_size; - rw_lock_t* latch; + ulint space_id; + fil_space_t* space; + buf_block_t* iblock; buf_block_t* block; ulint n_reserved; - space = page_get_space_id(page_align(seg_header)); + space_id = page_get_space_id(page_align(seg_header)); + space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); - latch = fil_space_get_latch(space, &flags); - - zip_size = fsp_flags_get_zip_size(flags); - - mtr_x_lock(latch, mtr); - - if (rw_lock_get_x_lock_count(latch) == 1) { + if (rw_lock_get_x_lock_count(&space->latch) == 1) { /* This thread did not own the latch before this call: free excess pages from the insert buffer free list */ - if (space == IBUF_SPACE_ID) { + if (space_id == IBUF_SPACE_ID) { ibuf_free_excess_pages(); } } - inode = fseg_inode_get(seg_header, space, zip_size, mtr); + inode = fseg_inode_get(seg_header, space_id, page_size, mtr, &iblock); + fil_block_check_type(iblock, FIL_PAGE_INODE, mtr); if (!has_done_reservation - && !fsp_reserve_free_extents(&n_reserved, space, 2, + && !fsp_reserve_free_extents(&n_reserved, space_id, 2, FSP_NORMAL, mtr)) { return(NULL); } - block = fseg_alloc_free_page_low(space, zip_size, + block = fseg_alloc_free_page_low(space, page_size, inode, hint, direction, - mtr, init_mtr); + RW_X_LATCH, mtr, init_mtr +#ifdef UNIV_DEBUG + , has_done_reservation +#endif /* UNIV_DEBUG */ + ); + + /* The allocation cannot fail if we have already reserved a + space for the page. */ + ut_ad(!has_done_reservation || block != NULL); + if (!has_done_reservation) { - fil_space_release_free_extents(space, n_reserved); + fil_space_release_free_extents(space_id, n_reserved); } return(block); } -/**********************************************************************//** -Checks that we have at least 2 frag pages free in the first extent of a +/** Check that we have at least 2 frag pages free in the first extent of a single-table tablespace, and they are also physically initialized to the data file. That is we have already extended the data file so that those pages are inside the data file. If not, this function extends the tablespace with pages. -@return TRUE if there were >= 3 free pages, or we were able to extend */ +@param[in,out] space tablespace +@param[in,out] space_header tablespace header, x-latched +@param[in] size size of the tablespace in pages, +must be less than FSP_EXTENT_SIZE/2 +@param[in,out] mtr mini-transaction +@return true if there were at least 3 free pages, or we were able to extend */ static -ibool +bool fsp_reserve_free_pages( -/*===================*/ - ulint space, /*!< in: space id, must be != 0 */ - fsp_header_t* space_header, /*!< in: header of that space, - x-latched */ - ulint size, /*!< in: size of the tablespace in - pages, must be < FSP_EXTENT_SIZE/2 */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fil_space_t* space, + fsp_header_t* space_header, + ulint size, + mtr_t* mtr) { xdes_t* descr; ulint n_used; - ut_a(space != 0); - ut_a(size < FSP_EXTENT_SIZE / 2); + ut_a(!is_system_tablespace(space->id)); + ut_a(size < FSP_EXTENT_SIZE); - descr = xdes_get_descriptor_with_space_hdr(space_header, space, 0, - mtr); + descr = xdes_get_descriptor_with_space_hdr( + space_header, space->id, 0, mtr); n_used = xdes_get_n_used(descr, mtr); ut_a(n_used <= size); - if (size >= n_used + 2) { - - return(TRUE); - } - - return(fsp_try_extend_data_file_with_pages(space, n_used + 1, - space_header, mtr)); + return(size >= n_used + 2 + || fsp_try_extend_data_file_with_pages( + space, n_used + 1, space_header, mtr)); } /**********************************************************************//** @@ -2715,77 +2987,75 @@ function we would liberally reserve several 64 page extents for every page split or merge in a B-tree. But we do not want to waste disk space if the table only occupies < 32 pages. That is why we apply different rules in that special case, just ensuring that there are 3 free pages available. -@return TRUE if we were able to make the reservation */ -UNIV_INTERN -ibool +@return TRUE if we were able to make the reservation */ +bool fsp_reserve_free_extents( /*=====================*/ ulint* n_reserved,/*!< out: number of extents actually reserved; if we return TRUE and the tablespace size is < 64 pages, then this can be 0, otherwise it is n_ext */ - ulint space, /*!< in: space id */ + ulint space_id,/*!< in: space id */ ulint n_ext, /*!< in: number of extents to reserve */ - ulint alloc_type,/*!< in: FSP_NORMAL, FSP_UNDO, or FSP_CLEANING */ + fsp_reserve_t alloc_type, + /*!< in: page reservation type */ mtr_t* mtr) /*!< in/out: mini-transaction */ { fsp_header_t* space_header; - rw_lock_t* latch; ulint n_free_list_ext; ulint free_limit; ulint size; - ulint flags; - ulint zip_size; ulint n_free; ulint n_free_up; ulint reserve= 0; - ibool success; - ulint n_pages_added; size_t total_reserved = 0; ulint rounds = 0; + ulint n_pages_added; ut_ad(mtr); *n_reserved = n_ext; - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); + fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); - mtr_x_lock(latch, mtr); - - space_header = fsp_get_space_header(space, zip_size, mtr); + space_header = fsp_get_space_header(space_id, page_size, mtr); try_again: - size = mtr_read_ulint(space_header + FSP_SIZE, MLOG_4BYTES, mtr); + size = mach_read_from_4(space_header + FSP_SIZE); + ut_ad(size == space->size_in_header); - if (size < FSP_EXTENT_SIZE / 2) { + if (alloc_type != FSP_BLOB && size < FSP_EXTENT_SIZE) { /* Use different rules for small single-table tablespaces */ *n_reserved = 0; return(fsp_reserve_free_pages(space, space_header, size, mtr)); } - n_free_list_ext = flst_get_len(space_header + FSP_FREE, mtr); + n_free_list_ext = flst_get_len(space_header + FSP_FREE); + ut_ad(space->free_len == n_free_list_ext); free_limit = mtr_read_ulint(space_header + FSP_FREE_LIMIT, MLOG_4BYTES, mtr); + ut_ad(space->free_limit == free_limit); /* Below we play safe when counting free extents above the free limit: some of them will contain extent descriptor pages, and therefore will not be free extents */ - n_free_up = (size - free_limit) / FSP_EXTENT_SIZE; + if (size >= free_limit) { + n_free_up = (size - free_limit) / FSP_EXTENT_SIZE; + } else { + ut_ad(alloc_type == FSP_BLOB); + n_free_up = 0; + } if (n_free_up > 0) { n_free_up--; - if (!zip_size) { - n_free_up -= n_free_up - / (UNIV_PAGE_SIZE / FSP_EXTENT_SIZE); - } else { - n_free_up -= n_free_up - / (zip_size / FSP_EXTENT_SIZE); - } + n_free_up -= n_free_up / (page_size.physical() + / FSP_EXTENT_SIZE); } n_free = n_free_list_ext + n_free_up; - if (alloc_type == FSP_NORMAL) { + switch (alloc_type) { + case FSP_NORMAL: /* We reserve 1 extent + 0.5 % of the space size to undo logs and 1 extent + 0.5 % to cleaning operations; NOTE: this source code is duplicated in the function below! */ @@ -2796,7 +3066,8 @@ try_again: goto try_to_extend; } - } else if (alloc_type == FSP_UNDO) { + break; + case FSP_UNDO: /* We reserve 0.5 % of the space size to cleaning operations */ reserve = 1 + ((size / FSP_EXTENT_SIZE) * 1) / 200; @@ -2805,133 +3076,72 @@ try_again: goto try_to_extend; } - } else { - ut_a(alloc_type == FSP_CLEANING); + break; + case FSP_CLEANING: + case FSP_BLOB: + break; + default: + ut_error; } - success = fil_space_reserve_free_extents(space, n_free, n_ext); - *n_reserved = n_ext; - - if (success) { - return(TRUE); + if (fil_space_reserve_free_extents(space_id, n_free, n_ext)) { + return(true); } try_to_extend: - success = fsp_try_extend_data_file(&n_pages_added, space, - space_header, mtr); + n_pages_added = 0; - if (success && n_pages_added > 0) { + if (fsp_try_extend_data_file(space, space_header, mtr, &n_pages_added)) { rounds++; total_reserved += n_pages_added; - if (rounds > 50) { - ib_logf(IB_LOG_LEVEL_INFO, - "Space id %lu trying to reserve %lu extents actually reserved %lu " - " reserve %lu free %lu size %lu rounds %lu total_reserved %llu", - space, n_ext, n_pages_added, reserve, n_free, size, rounds, (ullint) total_reserved); + if (rounds > 10) { + ib::info() << "Space id: " + << space << " trying to reserve: " + << n_ext << " extents actually reserved: " + << n_pages_added << " reserve: " + << reserve << " free: " << n_free + << " size: " << size + << " rounds: " << rounds + << " total_reserved: " << total_reserved << "."; } goto try_again; } - return(FALSE); + return(false); } -/**********************************************************************//** -This function should be used to get information on how much we still -will be able to insert new data to the database without running out the -tablespace. Only free extents are taken into account and we also subtract -the safety margin required by the above function fsp_reserve_free_extents. -@return available space in kB */ -UNIV_INTERN -ullint +/** Calculate how many KiB of new data we will be able to insert to the +tablespace without running out of space. +@param[in] space_id tablespace ID +@return available space in KiB +@retval UINTMAX_MAX if unknown */ +uintmax_t fsp_get_available_space_in_free_extents( -/*====================================*/ - ulint space) /*!< in: space id */ + ulint space_id) { - fsp_header_t* space_header; - ulint n_free_list_ext; - ulint free_limit; - ulint size; - ulint flags; - ulint zip_size; - ulint n_free; - ulint n_free_up; - ulint reserve; - rw_lock_t* latch; - mtr_t mtr; - - /* The convoluted mutex acquire is to overcome latching order - issues: The problem is that the fil_mutex is at a lower level - than the tablespace latch and the buffer pool mutex. We have to - first prevent any operations on the file system by acquiring the - dictionary mutex. Then acquire the tablespace latch to obey the - latching order and then release the dictionary mutex. That way we - ensure that the tablespace instance can't be freed while we are - examining its contents (see fil_space_free()). - - However, there is one further complication, we release the fil_mutex - when we need to invalidate the the pages in the buffer pool and we - reacquire the fil_mutex when deleting and freeing the tablespace - instance in fil0fil.cc. Here we need to account for that situation - too. */ - - mutex_enter(&dict_sys->mutex); - - /* At this stage there is no guarantee that the tablespace even - exists in the cache. */ - - if (fil_tablespace_deleted_or_being_deleted_in_mem(space, -1)) { - - mutex_exit(&dict_sys->mutex); - - return(ULLINT_UNDEFINED); - } - - mtr_start(&mtr); - - latch = fil_space_get_latch(space, &flags); - - /* This should ensure that the tablespace instance can't be freed - by another thread. However, the tablespace pages can still be freed - from the buffer pool. We need to check for that again. */ - - zip_size = fsp_flags_get_zip_size(flags); - - mtr_x_lock(latch, &mtr); - - mutex_exit(&dict_sys->mutex); - - /* At this point it is possible for the tablespace to be deleted and - its pages removed from the buffer pool. We need to check for that - situation. However, the tablespace instance can't be deleted because - our latching above should ensure that. */ - - if (fil_tablespace_is_being_deleted(space)) { - - mtr_commit(&mtr); - - return(ULLINT_UNDEFINED); + FilSpace space(space_id); + if (space() == NULL) { + return(UINTMAX_MAX); } - /* From here on even if the user has dropped the tablespace, the - pages _must_ still exist in the buffer pool and the tablespace - instance _must_ be in the file system hash table. */ - - space_header = fsp_get_space_header(space, zip_size, &mtr); - - size = mtr_read_ulint(space_header + FSP_SIZE, MLOG_4BYTES, &mtr); - - n_free_list_ext = flst_get_len(space_header + FSP_FREE, &mtr); - - free_limit = mtr_read_ulint(space_header + FSP_FREE_LIMIT, - MLOG_4BYTES, &mtr); - mtr_commit(&mtr); + return(fsp_get_available_space_in_free_extents(space)); +} - if (size < FSP_EXTENT_SIZE) { - ut_a(space != 0); /* This must be a single-table - tablespace */ +/** Calculate how many KiB of new data we will be able to insert to the +tablespace without running out of space. Start with a space object that has +been acquired by the caller who holds it for the calculation, +@param[in] space tablespace object from fil_space_acquire() +@return available space in KiB */ +uintmax_t +fsp_get_available_space_in_free_extents( + const fil_space_t* space) +{ + ut_ad(space->n_pending_ops > 0); + ulint size_in_header = space->size_in_header; + if (size_in_header < FSP_EXTENT_SIZE) { return(0); /* TODO: count free frag pages and return a value based on that */ } @@ -2939,41 +3149,30 @@ fsp_get_available_space_in_free_extents( /* Below we play safe when counting free extents above the free limit: some of them will contain extent descriptor pages, and therefore will not be free extents */ + ut_ad(size_in_header >= space->free_limit); + ulint n_free_up = + (size_in_header - space->free_limit) / FSP_EXTENT_SIZE; - n_free_up = (size - free_limit) / FSP_EXTENT_SIZE; - + page_size_t page_size(space->flags); if (n_free_up > 0) { n_free_up--; - if (!zip_size) { - n_free_up -= n_free_up - / (UNIV_PAGE_SIZE / FSP_EXTENT_SIZE); - } else { - n_free_up -= n_free_up - / (zip_size / FSP_EXTENT_SIZE); - } + n_free_up -= n_free_up / (page_size.physical() + / FSP_EXTENT_SIZE); } - n_free = n_free_list_ext + n_free_up; - /* We reserve 1 extent + 0.5 % of the space size to undo logs and 1 extent + 0.5 % to cleaning operations; NOTE: this source code is duplicated in the function above! */ - reserve = 2 + ((size / FSP_EXTENT_SIZE) * 2) / 200; + ulint reserve = 2 + ((size_in_header / FSP_EXTENT_SIZE) * 2) / 200; + ulint n_free = space->free_len + n_free_up; if (reserve > n_free) { return(0); } - if (!zip_size) { - return((ullint) (n_free - reserve) - * FSP_EXTENT_SIZE - * (UNIV_PAGE_SIZE / 1024)); - } else { - return((ullint) (n_free - reserve) - * FSP_EXTENT_SIZE - * (zip_size / 1024)); - } + return(static_cast(n_free - reserve) + * FSP_EXTENT_SIZE * (page_size.physical() / 1024)); } /********************************************************************//** @@ -2990,6 +3189,7 @@ fseg_mark_page_used( { ulint not_full_n_used; + ut_ad(fil_page_get_type(page_align(seg_inode)) == FIL_PAGE_INODE); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); @@ -3031,18 +3231,21 @@ fseg_mark_page_used( } } -/**********************************************************************//** -Frees a single page of a segment. */ +/** Frees a single page of a segment. +@param[in] seg_inode segment inode +@param[in] page_id page id +@param[in] page_size page size +@param[in] ahi whether we may need to drop the adaptive +hash index +@param[in,out] mtr mini-transaction */ static void fseg_free_page_low( -/*===============*/ - fseg_inode_t* seg_inode, /*!< in: segment inode */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page, /*!< in: page offset */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fseg_inode_t* seg_inode, + const page_id_t& page_id, + const page_size_t& page_size, + bool ahi, + mtr_t* mtr) { xdes_t* descr; ulint not_full_n_used; @@ -3056,34 +3259,30 @@ fseg_free_page_low( ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); + ut_d(fsp_space_modify_check(page_id.space(), mtr)); /* Drop search system page hash index if the page is found in the pool and is hashed */ - btr_search_drop_page_hash_when_freed(space, zip_size, page); + if (ahi) { + btr_search_drop_page_hash_when_freed(page_id, page_size); + } - descr = xdes_get_descriptor(space, zip_size, page, mtr); + descr = xdes_get_descriptor(page_id.space(), page_id.page_no(), + page_size, mtr); if (xdes_mtr_get_bit(descr, XDES_FREE_BIT, - page % FSP_EXTENT_SIZE, mtr)) { + page_id.page_no() % FSP_EXTENT_SIZE, mtr)) { fputs("InnoDB: Dump of the tablespace extent descriptor: ", stderr); ut_print_buf(stderr, descr, 40); - - fprintf(stderr, "\n" - "InnoDB: Serious error! InnoDB is trying to" - " free page %lu\n" - "InnoDB: though it is already marked as free" - " in the tablespace!\n" - "InnoDB: The tablespace free space info is corrupt.\n" - "InnoDB: You may need to dump your" - " InnoDB tables and recreate the whole\n" - "InnoDB: database!\n", (ulong) page); + ib::error() << "InnoDB is trying to free page " << page_id + << " though it is already marked as free in the" + " tablespace! The tablespace free space info is" + " corrupt. You may need to dump your tables and" + " recreate the whole database!"; crash: - fputs("InnoDB: Please refer to\n" - "InnoDB: " REFMAN "forcing-innodb-recovery.html\n" - "InnoDB: about forcing recovery.\n", stderr); - ut_error; + ib::fatal() << FORCE_RECOVERY_MSG; } state = xdes_get_state(descr, mtr); @@ -3093,7 +3292,7 @@ crash: for (i = 0;; i++) { if (fseg_get_nth_frag_page_no(seg_inode, i, mtr) - == page) { + == page_id.page_no()) { fseg_set_nth_frag_page_no(seg_inode, i, FIL_NULL, mtr); @@ -3101,7 +3300,7 @@ crash: } } - fsp_free_page(space, zip_size, page, mtr); + fsp_free_page(page_id, page_size, mtr); return; } @@ -3110,15 +3309,7 @@ crash: descr_id = mach_read_from_8(descr + XDES_ID); seg_id = mach_read_from_8(seg_inode + FSEG_ID); -#if 0 - fprintf(stderr, - "InnoDB: InnoDB is freeing space %lu page %lu,\n" - "InnoDB: which belongs to descr seg %llu\n" - "InnoDB: segment %llu.\n", - (ulong) space, (ulong) page, - (ullint) descr_id, - (ullint) seg_id); -#endif /* 0 */ + if (UNIV_UNLIKELY(descr_id != seg_id)) { fputs("InnoDB: Dump of the tablespace extent descriptor: ", stderr); @@ -3127,15 +3318,9 @@ crash: ut_print_buf(stderr, seg_inode, 40); putc('\n', stderr); - fprintf(stderr, - "InnoDB: Serious error: InnoDB is trying to" - " free space %lu page %lu,\n" - "InnoDB: which does not belong to" - " segment %llu but belongs\n" - "InnoDB: to segment %llu.\n", - (ulong) space, (ulong) page, - (ullint) descr_id, - (ullint) seg_id); + ib::error() << "InnoDB is trying to free page " << page_id + << ", which does not belong to segment " << descr_id + << " but belongs to segment " << seg_id << "."; goto crash; } @@ -3156,82 +3341,74 @@ crash: not_full_n_used - 1, MLOG_4BYTES, mtr); } - xdes_set_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, TRUE, mtr); - xdes_set_bit(descr, XDES_CLEAN_BIT, page % FSP_EXTENT_SIZE, TRUE, mtr); + const ulint bit = page_id.page_no() % FSP_EXTENT_SIZE; + + xdes_set_bit(descr, XDES_FREE_BIT, bit, TRUE, mtr); + xdes_set_bit(descr, XDES_CLEAN_BIT, bit, TRUE, mtr); if (xdes_is_free(descr, mtr)) { /* The extent has become free: free it to space */ flst_remove(seg_inode + FSEG_NOT_FULL, descr + XDES_FLST_NODE, mtr); - fsp_free_extent(space, zip_size, page, mtr); + fsp_free_extent(page_id, page_size, mtr); } - - mtr->n_freed_pages++; } /**********************************************************************//** Frees a single page of a segment. */ -UNIV_INTERN void fseg_free_page( /*===========*/ fseg_header_t* seg_header, /*!< in: segment header */ - ulint space, /*!< in: space id */ + ulint space_id,/*!< in: space id */ ulint page, /*!< in: page offset */ + bool ahi, /*!< in: whether we may need to drop + the adaptive hash index */ mtr_t* mtr) /*!< in/out: mini-transaction */ { - ulint flags; - ulint zip_size; - fseg_inode_t* seg_inode; - rw_lock_t* latch; - - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); + fseg_inode_t* seg_inode; + buf_block_t* iblock; + const fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); - mtr_x_lock(latch, mtr); + seg_inode = fseg_inode_get(seg_header, space_id, page_size, mtr, + &iblock); + fil_block_check_type(iblock, FIL_PAGE_INODE, mtr); - seg_inode = fseg_inode_get(seg_header, space, zip_size, mtr); + const page_id_t page_id(space_id, page); - fseg_free_page_low(seg_inode, space, zip_size, page, mtr); + fseg_free_page_low(seg_inode, page_id, page_size, ahi, mtr); -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - buf_page_set_file_page_was_freed(space, page); -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ + ut_d(buf_page_set_file_page_was_freed(page_id)); } /**********************************************************************//** Checks if a single page of a segment is free. -@return true if free */ -UNIV_INTERN +@return true if free */ bool fseg_page_is_free( /*==============*/ fseg_header_t* seg_header, /*!< in: segment header */ - ulint space, /*!< in: space id */ + ulint space_id, /*!< in: space id */ ulint page) /*!< in: page offset */ { mtr_t mtr; ibool is_free; - ulint flags; - rw_lock_t* latch; xdes_t* descr; - ulint zip_size; fseg_inode_t* seg_inode; - latch = fil_space_get_latch(space, &flags); - zip_size = dict_tf_get_zip_size(flags); - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); + const fil_space_t* space = mtr_x_lock_space(space_id, &mtr); + const page_size_t page_size(space->flags); - seg_inode = fseg_inode_get(seg_header, space, zip_size, &mtr); + seg_inode = fseg_inode_get(seg_header, space_id, page_size, &mtr); ut_a(seg_inode); ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); - descr = xdes_get_descriptor(space, zip_size, page, &mtr); + descr = xdes_get_descriptor(space_id, page, page_size, &mtr); ut_a(descr); is_free = xdes_mtr_get_bit( @@ -3242,17 +3419,43 @@ fseg_page_is_free( return(is_free); } +/**********************************************************************//** +Checks if a single page is free. +@return true if free */ +UNIV_INTERN +bool +fsp_page_is_free_func( +/*==============*/ + ulint space_id, /*!< in: space id */ + ulint page_no, /*!< in: page offset */ + mtr_t* mtr, /*!< in/out: mini-transaction */ + const char *file, + ulint line) +{ + ut_ad(mtr); + + const fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); + + xdes_t* descr = xdes_get_descriptor(space_id, page_no, page_size, mtr); + ut_a(descr); + + return xdes_mtr_get_bit( + descr, XDES_FREE_BIT, page_no % FSP_EXTENT_SIZE, mtr); +} + /**********************************************************************//** Frees an extent of a segment to the space free list. */ -static +static __attribute__((nonnull)) void fseg_free_extent( /*=============*/ fseg_inode_t* seg_inode, /*!< in: segment inode */ ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ + const page_size_t& page_size, ulint page, /*!< in: a page in the extent */ + bool ahi, /*!< in: whether we may need to drop + the adaptive hash index */ mtr_t* mtr) /*!< in/out: mini-transaction */ { ulint first_page_in_extent; @@ -3264,23 +3467,29 @@ fseg_free_extent( ut_ad(seg_inode != NULL); ut_ad(mtr != NULL); - descr = xdes_get_descriptor(space, zip_size, page, mtr); + descr = xdes_get_descriptor(space, page, page_size, mtr); ut_a(xdes_get_state(descr, mtr) == XDES_FSEG); ut_a(!memcmp(descr + XDES_ID, seg_inode + FSEG_ID, 8)); ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); + ut_d(fsp_space_modify_check(space, mtr)); first_page_in_extent = page - (page % FSP_EXTENT_SIZE); - for (i = 0; i < FSP_EXTENT_SIZE; i++) { - if (!xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) { + if (ahi) { + for (i = 0; i < FSP_EXTENT_SIZE; i++) { + if (!xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) { - /* Drop search system page hash index if the page is - found in the pool and is hashed */ + /* Drop search system page hash index + if the page is found in the pool and + is hashed */ - btr_search_drop_page_hash_when_freed( - space, zip_size, first_page_in_extent + i); + btr_search_drop_page_hash_when_freed( + page_id_t(space, + first_page_in_extent + i), + page_size); + } } } @@ -3304,15 +3513,15 @@ fseg_free_extent( MLOG_4BYTES, mtr); } - fsp_free_extent(space, zip_size, page, mtr); + fsp_free_extent(page_id_t(space, page), page_size, mtr); -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG +#ifdef UNIV_DEBUG for (i = 0; i < FSP_EXTENT_SIZE; i++) { - buf_page_set_file_page_was_freed(space, - first_page_in_extent + i); + buf_page_set_file_page_was_freed( + page_id_t(space, first_page_in_extent + i)); } -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ +#endif /* UNIV_DEBUG */ } /**********************************************************************//** @@ -3320,8 +3529,7 @@ Frees part of a segment. This function can be used to free a segment by repeatedly calling this function in different mini-transactions. Doing the freeing in a single mini-transaction might result in too big a mini-transaction. -@return TRUE if freeing completed */ -UNIV_INTERN +@return TRUE if freeing completed */ ibool fseg_free_step( /*===========*/ @@ -3329,51 +3537,52 @@ fseg_free_step( resides on the first page of the frag list of the segment, this pointer becomes obsolete after the last freeing step */ + bool ahi, /*!< in: whether we may need to drop + the adaptive hash index */ mtr_t* mtr) /*!< in/out: mini-transaction */ { ulint n; ulint page; xdes_t* descr; fseg_inode_t* inode; - ulint space; - ulint flags; - ulint zip_size; + ulint space_id; ulint header_page; - rw_lock_t* latch; - space = page_get_space_id(page_align(header)); - header_page = page_get_page_no(page_align(header)); + DBUG_ENTER("fseg_free_step"); - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); + space_id = page_get_space_id(page_align(header)); + header_page = page_get_page_no(page_align(header)); - mtr_x_lock(latch, mtr); + const fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); - descr = xdes_get_descriptor(space, zip_size, header_page, mtr); + descr = xdes_get_descriptor(space_id, header_page, page_size, mtr); /* Check that the header resides on a page which has not been freed yet */ ut_a(xdes_mtr_get_bit(descr, XDES_FREE_BIT, header_page % FSP_EXTENT_SIZE, mtr) == FALSE); + buf_block_t* iblock; - inode = fseg_inode_try_get(header, space, zip_size, mtr); + inode = fseg_inode_try_get(header, space_id, page_size, mtr, &iblock); - if (UNIV_UNLIKELY(inode == NULL)) { - fprintf(stderr, "double free of inode from %u:%u\n", - (unsigned) space, (unsigned) header_page); - return(TRUE); + if (inode == NULL) { + ib::info() << "Double free of inode from " + << page_id_t(space_id, header_page); + DBUG_RETURN(TRUE); } - descr = fseg_get_first_extent(inode, space, zip_size, mtr); + fil_block_check_type(iblock, FIL_PAGE_INODE, mtr); + descr = fseg_get_first_extent(inode, space_id, page_size, mtr); if (descr != NULL) { /* Free the extent held by the segment */ page = xdes_get_offset(descr); - fseg_free_extent(inode, space, zip_size, page, mtr); + fseg_free_extent(inode, space_id, page_size, page, ahi, mtr); - return(FALSE); + DBUG_RETURN(FALSE); } /* Free a frag page */ @@ -3381,64 +3590,65 @@ fseg_free_step( if (n == ULINT_UNDEFINED) { /* Freeing completed: free the segment inode */ - fsp_free_seg_inode(space, zip_size, inode, mtr); + fsp_free_seg_inode(space_id, page_size, inode, mtr); - return(TRUE); + DBUG_RETURN(TRUE); } - fseg_free_page_low(inode, space, zip_size, - fseg_get_nth_frag_page_no(inode, n, mtr), mtr); + fseg_free_page_low( + inode, + page_id_t(space_id, fseg_get_nth_frag_page_no(inode, n, mtr)), + page_size, ahi, mtr); n = fseg_find_last_used_frag_page_slot(inode, mtr); if (n == ULINT_UNDEFINED) { /* Freeing completed: free the segment inode */ - fsp_free_seg_inode(space, zip_size, inode, mtr); + fsp_free_seg_inode(space_id, page_size, inode, mtr); - return(TRUE); + DBUG_RETURN(TRUE); } - return(FALSE); + DBUG_RETURN(FALSE); } /**********************************************************************//** Frees part of a segment. Differs from fseg_free_step because this function leaves the header page unfreed. -@return TRUE if freeing completed, except the header page */ -UNIV_INTERN +@return TRUE if freeing completed, except the header page */ ibool fseg_free_step_not_header( /*======================*/ fseg_header_t* header, /*!< in: segment header which must reside on the first fragment page of the segment */ + bool ahi, /*!< in: whether we may need to drop + the adaptive hash index */ mtr_t* mtr) /*!< in/out: mini-transaction */ { ulint n; ulint page; xdes_t* descr; fseg_inode_t* inode; - ulint space; - ulint flags; - ulint zip_size; + ulint space_id; ulint page_no; - rw_lock_t* latch; - - space = page_get_space_id(page_align(header)); - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); + space_id = page_get_space_id(page_align(header)); + ut_ad(mtr->is_named_space(space_id)); - mtr_x_lock(latch, mtr); + const fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); + buf_block_t* iblock; - inode = fseg_inode_get(header, space, zip_size, mtr); + inode = fseg_inode_get(header, space_id, page_size, mtr, &iblock); + fil_block_check_type(iblock, FIL_PAGE_INODE, mtr); - descr = fseg_get_first_extent(inode, space, zip_size, mtr); + descr = fseg_get_first_extent(inode, space_id, page_size, mtr); if (descr != NULL) { /* Free the extent held by the segment */ page = xdes_get_offset(descr); - fseg_free_extent(inode, space, zip_size, page, mtr); + fseg_free_extent(inode, space_id, page_size, page, ahi, mtr); return(FALSE); } @@ -3458,45 +3668,47 @@ fseg_free_step_not_header( return(TRUE); } - fseg_free_page_low(inode, space, zip_size, page_no, mtr); + fseg_free_page_low(inode, page_id_t(space_id, page_no), page_size, ahi, + mtr); return(FALSE); } -/**********************************************************************//** -Returns the first extent descriptor for a segment. We think of the extent -lists of the segment catenated in the order FSEG_FULL -> FSEG_NOT_FULL --> FSEG_FREE. -@return the first extent descriptor, or NULL if none */ +/** Returns the first extent descriptor for a segment. +We think of the extent lists of the segment catenated in the order +FSEG_FULL -> FSEG_NOT_FULL -> FSEG_FREE. +@param[in] inode segment inode +@param[in] space_id space id +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@return the first extent descriptor, or NULL if none */ static xdes_t* fseg_get_first_extent( -/*==================*/ - fseg_inode_t* inode, /*!< in: segment inode */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + fseg_inode_t* inode, + ulint space_id, + const page_size_t& page_size, + mtr_t* mtr) { fil_addr_t first; xdes_t* descr; ut_ad(inode && mtr); - ut_ad(space == page_get_space_id(page_align(inode))); + ut_ad(space_id == page_get_space_id(page_align(inode))); ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); first = fil_addr_null; - if (flst_get_len(inode + FSEG_FULL, mtr) > 0) { + if (flst_get_len(inode + FSEG_FULL) > 0) { first = flst_get_first(inode + FSEG_FULL, mtr); - } else if (flst_get_len(inode + FSEG_NOT_FULL, mtr) > 0) { + } else if (flst_get_len(inode + FSEG_NOT_FULL) > 0) { first = flst_get_first(inode + FSEG_NOT_FULL, mtr); - } else if (flst_get_len(inode + FSEG_FREE, mtr) > 0) { + } else if (flst_get_len(inode + FSEG_FREE) > 0) { first = flst_get_first(inode + FSEG_FREE, mtr); } @@ -3505,14 +3717,15 @@ fseg_get_first_extent( return(NULL); } - descr = xdes_lst_get_descriptor(space, zip_size, first, mtr); + descr = xdes_lst_get_descriptor(space_id, page_size, first, mtr); return(descr); } +#ifdef UNIV_DEBUG /*******************************************************************//** Validates a segment. -@return TRUE if ok */ +@return TRUE if ok */ static ibool fseg_validate_low( @@ -3520,7 +3733,7 @@ fseg_validate_low( fseg_inode_t* inode, /*!< in: segment inode */ mtr_t* mtr2) /*!< in/out: mini-transaction */ { - ulint space; + ulint space_id; ib_id_t seg_id; mtr_t mtr; xdes_t* descr; @@ -3528,10 +3741,10 @@ fseg_validate_low( ulint n_used = 0; ulint n_used2 = 0; - ut_ad(mtr_memo_contains_page(mtr2, inode, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr2, inode, MTR_MEMO_PAGE_SX_FIX)); ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); - space = page_get_space_id(page_align(inode)); + space_id = page_get_space_id(page_align(inode)); seg_id = mach_read_from_8(inode + FSEG_ID); n_used = mtr_read_ulint(inode + FSEG_NOT_FULL_N_USED, @@ -3544,14 +3757,13 @@ fseg_validate_low( node_addr = flst_get_first(inode + FSEG_FREE, mtr2); while (!fil_addr_is_null(node_addr)) { - ulint flags; - ulint zip_size; - mtr_start(&mtr); - mtr_x_lock(fil_space_get_latch(space, &flags), &mtr); - zip_size = fsp_flags_get_zip_size(flags); + const fil_space_t* space = mtr_x_lock_space( + space_id, &mtr); + + const page_size_t page_size(space->flags); - descr = xdes_lst_get_descriptor(space, zip_size, + descr = xdes_lst_get_descriptor(space_id, page_size, node_addr, &mtr); ut_a(xdes_get_n_used(descr, &mtr) == 0); @@ -3567,14 +3779,12 @@ fseg_validate_low( node_addr = flst_get_first(inode + FSEG_NOT_FULL, mtr2); while (!fil_addr_is_null(node_addr)) { - ulint flags; - ulint zip_size; - mtr_start(&mtr); - mtr_x_lock(fil_space_get_latch(space, &flags), &mtr); - zip_size = fsp_flags_get_zip_size(flags); + const fil_space_t* space = mtr_x_lock_space( + space_id, &mtr); + const page_size_t page_size(space->flags); - descr = xdes_lst_get_descriptor(space, zip_size, + descr = xdes_lst_get_descriptor(space_id, page_size, node_addr, &mtr); ut_a(xdes_get_n_used(descr, &mtr) > 0); @@ -3593,14 +3803,12 @@ fseg_validate_low( node_addr = flst_get_first(inode + FSEG_FULL, mtr2); while (!fil_addr_is_null(node_addr)) { - ulint flags; - ulint zip_size; - mtr_start(&mtr); - mtr_x_lock(fil_space_get_latch(space, &flags), &mtr); - zip_size = fsp_flags_get_zip_size(flags); + const fil_space_t* space = mtr_x_lock_space( + space_id, &mtr); + const page_size_t page_size(space->flags); - descr = xdes_lst_get_descriptor(space, zip_size, + descr = xdes_lst_get_descriptor(space_id, page_size, node_addr, &mtr); ut_a(xdes_get_n_used(descr, &mtr) == FSP_EXTENT_SIZE); @@ -3616,11 +3824,9 @@ fseg_validate_low( return(TRUE); } -#ifdef UNIV_DEBUG /*******************************************************************//** Validates a segment. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool fseg_validate( /*==========*/ @@ -3629,16 +3835,14 @@ fseg_validate( { fseg_inode_t* inode; ibool ret; - ulint space; - ulint flags; - ulint zip_size; + ulint space_id; - space = page_get_space_id(page_align(header)); + space_id = page_get_space_id(page_align(header)); - mtr_x_lock(fil_space_get_latch(space, &flags), mtr); - zip_size = fsp_flags_get_zip_size(flags); + const fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); - inode = fseg_inode_get(header, space, zip_size, mtr); + inode = fseg_inode_get(header, space_id, page_size, mtr); ret = fseg_validate_low(inode, mtr); @@ -3646,6 +3850,7 @@ fseg_validate( } #endif /* UNIV_DEBUG */ +#ifdef UNIV_BTR_PRINT /*******************************************************************//** Writes info of a segment. */ static @@ -3666,7 +3871,7 @@ fseg_print_low( ulint page_no; ib_id_t seg_id; - ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX)); space = page_get_space_id(page_align(inode)); page_no = page_get_page_no(page_align(inode)); @@ -3677,27 +3882,24 @@ fseg_print_low( n_used = mtr_read_ulint(inode + FSEG_NOT_FULL_N_USED, MLOG_4BYTES, mtr); n_frag = fseg_get_n_frag_pages(inode, mtr); - n_free = flst_get_len(inode + FSEG_FREE, mtr); - n_not_full = flst_get_len(inode + FSEG_NOT_FULL, mtr); - n_full = flst_get_len(inode + FSEG_FULL, mtr); - - fprintf(stderr, - "SEGMENT id %llu space %lu; page %lu;" - " res %lu used %lu; full ext %lu\n" - "fragm pages %lu; free extents %lu;" - " not full extents %lu: pages %lu\n", - (ullint) seg_id, - (ulong) space, (ulong) page_no, - (ulong) reserved, (ulong) used, (ulong) n_full, - (ulong) n_frag, (ulong) n_free, (ulong) n_not_full, - (ulong) n_used); + n_free = flst_get_len(inode + FSEG_FREE); + n_not_full = flst_get_len(inode + FSEG_NOT_FULL); + n_full = flst_get_len(inode + FSEG_FULL); + + ib::info() << "SEGMENT id " << seg_id + << " space " << space << ";" + << " page " << page_no << ";" + << " res " << reserved << " used " << used << ";" + << " full ext " << n_full << ";" + << " fragm pages " << n_frag << ";" + << " free extents " << n_free << ";" + << " not full extents " << n_not_full << ": pages " << n_used; + ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE); } -#ifdef UNIV_BTR_PRINT /*******************************************************************//** Writes info of a segment. */ -UNIV_INTERN void fseg_print( /*=======*/ @@ -3705,425 +3907,41 @@ fseg_print( mtr_t* mtr) /*!< in/out: mini-transaction */ { fseg_inode_t* inode; - ulint space; - ulint flags; - ulint zip_size; + ulint space_id; - space = page_get_space_id(page_align(header)); + space_id = page_get_space_id(page_align(header)); + const fil_space_t* space = mtr_x_lock_space(space_id, mtr); + const page_size_t page_size(space->flags); - mtr_x_lock(fil_space_get_latch(space, &flags), mtr); - zip_size = fsp_flags_get_zip_size(flags); - - inode = fseg_inode_get(header, space, zip_size, mtr); + inode = fseg_inode_get(header, space_id, page_size, mtr); fseg_print_low(inode, mtr); } #endif /* UNIV_BTR_PRINT */ +#endif /* !UNIV_HOTBACKUP */ -/*******************************************************************//** -Validates the file space system and its segments. -@return TRUE if ok */ -UNIV_INTERN -ibool -fsp_validate( -/*=========*/ - ulint space) /*!< in: space id */ -{ - fsp_header_t* header; - fseg_inode_t* seg_inode; - page_t* seg_inode_page; - rw_lock_t* latch; - ulint size; - ulint flags; - ulint zip_size; - ulint free_limit; - ulint frag_n_used; - mtr_t mtr; - mtr_t mtr2; - xdes_t* descr; - fil_addr_t node_addr; - fil_addr_t next_node_addr; - ulint descr_count = 0; - ulint n_used = 0; - ulint n_used2 = 0; - ulint n_full_frag_pages; - ulint n; - ulint seg_inode_len_free; - ulint seg_inode_len_full; - - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); - ut_a(ut_is_2pow(zip_size)); - ut_a(zip_size <= UNIV_ZIP_SIZE_MAX); - ut_a(!zip_size || zip_size >= UNIV_ZIP_SIZE_MIN); - - /* Start first a mini-transaction mtr2 to lock out all other threads - from the fsp system */ - mtr_start(&mtr2); - mtr_x_lock(latch, &mtr2); - - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, &mtr); - free_limit = mtr_read_ulint(header + FSP_FREE_LIMIT, - MLOG_4BYTES, &mtr); - frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, - MLOG_4BYTES, &mtr); - - n_full_frag_pages = FSP_EXTENT_SIZE - * flst_get_len(header + FSP_FULL_FRAG, &mtr); - - if (UNIV_UNLIKELY(free_limit > size)) { - - ut_a(space != 0); - ut_a(size < FSP_EXTENT_SIZE); - } - - flst_validate(header + FSP_FREE, &mtr); - flst_validate(header + FSP_FREE_FRAG, &mtr); - flst_validate(header + FSP_FULL_FRAG, &mtr); - - mtr_commit(&mtr); - - /* Validate FSP_FREE list */ - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - node_addr = flst_get_first(header + FSP_FREE, &mtr); - - mtr_commit(&mtr); - - while (!fil_addr_is_null(node_addr)) { - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - descr_count++; - descr = xdes_lst_get_descriptor(space, zip_size, - node_addr, &mtr); - - ut_a(xdes_get_n_used(descr, &mtr) == 0); - ut_a(xdes_get_state(descr, &mtr) == XDES_FREE); - - node_addr = flst_get_next_addr(descr + XDES_FLST_NODE, &mtr); - mtr_commit(&mtr); - } - - /* Validate FSP_FREE_FRAG list */ - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - node_addr = flst_get_first(header + FSP_FREE_FRAG, &mtr); - - mtr_commit(&mtr); - - while (!fil_addr_is_null(node_addr)) { - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - descr_count++; - descr = xdes_lst_get_descriptor(space, zip_size, - node_addr, &mtr); - - ut_a(xdes_get_n_used(descr, &mtr) > 0); - ut_a(xdes_get_n_used(descr, &mtr) < FSP_EXTENT_SIZE); - ut_a(xdes_get_state(descr, &mtr) == XDES_FREE_FRAG); - - n_used += xdes_get_n_used(descr, &mtr); - node_addr = flst_get_next_addr(descr + XDES_FLST_NODE, &mtr); - - mtr_commit(&mtr); - } - - /* Validate FSP_FULL_FRAG list */ - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - node_addr = flst_get_first(header + FSP_FULL_FRAG, &mtr); - - mtr_commit(&mtr); - - while (!fil_addr_is_null(node_addr)) { - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - descr_count++; - descr = xdes_lst_get_descriptor(space, zip_size, - node_addr, &mtr); - - ut_a(xdes_get_n_used(descr, &mtr) == FSP_EXTENT_SIZE); - ut_a(xdes_get_state(descr, &mtr) == XDES_FULL_FRAG); - - node_addr = flst_get_next_addr(descr + XDES_FLST_NODE, &mtr); - mtr_commit(&mtr); - } - - /* Validate segments */ - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - - node_addr = flst_get_first(header + FSP_SEG_INODES_FULL, &mtr); - - seg_inode_len_full = flst_get_len(header + FSP_SEG_INODES_FULL, &mtr); - - mtr_commit(&mtr); - - while (!fil_addr_is_null(node_addr)) { - - n = 0; - do { - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - seg_inode_page = fut_get_ptr( - space, zip_size, node_addr, RW_X_LATCH, &mtr) - - FSEG_INODE_PAGE_NODE; - - seg_inode = fsp_seg_inode_page_get_nth_inode( - seg_inode_page, n, zip_size, &mtr); - ut_a(mach_read_from_8(seg_inode + FSEG_ID) != 0); - fseg_validate_low(seg_inode, &mtr); - - descr_count += flst_get_len(seg_inode + FSEG_FREE, - &mtr); - descr_count += flst_get_len(seg_inode + FSEG_FULL, - &mtr); - descr_count += flst_get_len(seg_inode + FSEG_NOT_FULL, - &mtr); - - n_used2 += fseg_get_n_frag_pages(seg_inode, &mtr); - - next_node_addr = flst_get_next_addr( - seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); - mtr_commit(&mtr); - } while (++n < FSP_SEG_INODES_PER_PAGE(zip_size)); - - node_addr = next_node_addr; - } - - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - - node_addr = flst_get_first(header + FSP_SEG_INODES_FREE, &mtr); - - seg_inode_len_free = flst_get_len(header + FSP_SEG_INODES_FREE, &mtr); - - mtr_commit(&mtr); - - while (!fil_addr_is_null(node_addr)) { - - n = 0; - - do { - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - seg_inode_page = fut_get_ptr( - space, zip_size, node_addr, RW_X_LATCH, &mtr) - - FSEG_INODE_PAGE_NODE; - - seg_inode = fsp_seg_inode_page_get_nth_inode( - seg_inode_page, n, zip_size, &mtr); - if (mach_read_from_8(seg_inode + FSEG_ID)) { - fseg_validate_low(seg_inode, &mtr); - - descr_count += flst_get_len( - seg_inode + FSEG_FREE, &mtr); - descr_count += flst_get_len( - seg_inode + FSEG_FULL, &mtr); - descr_count += flst_get_len( - seg_inode + FSEG_NOT_FULL, &mtr); - n_used2 += fseg_get_n_frag_pages( - seg_inode, &mtr); - } - - next_node_addr = flst_get_next_addr( - seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); - mtr_commit(&mtr); - } while (++n < FSP_SEG_INODES_PER_PAGE(zip_size)); - - node_addr = next_node_addr; - } - - ut_a(descr_count * FSP_EXTENT_SIZE == free_limit); - if (!zip_size) { - ut_a(n_used + n_full_frag_pages - == n_used2 + 2 * ((free_limit + (UNIV_PAGE_SIZE - 1)) - / UNIV_PAGE_SIZE) - + seg_inode_len_full + seg_inode_len_free); - } else { - ut_a(n_used + n_full_frag_pages - == n_used2 + 2 * ((free_limit + (zip_size - 1)) - / zip_size) - + seg_inode_len_full + seg_inode_len_free); - } - ut_a(frag_n_used == n_used); - - mtr_commit(&mtr2); - - return(TRUE); -} - -/*******************************************************************//** -Prints info of a file space. */ -UNIV_INTERN -void -fsp_print( -/*======*/ - ulint space) /*!< in: space id */ +#ifdef UNIV_DEBUG +/** Print the file segment header to the given output stream. +@param[in] out the output stream into which the object is printed. +@retval the output stream into which the object was printed. */ +std::ostream& +fseg_header::to_stream(std::ostream& out) const { - fsp_header_t* header; - fseg_inode_t* seg_inode; - page_t* seg_inode_page; - rw_lock_t* latch; - ulint flags; - ulint zip_size; - ulint size; - ulint free_limit; - ulint frag_n_used; - fil_addr_t node_addr; - fil_addr_t next_node_addr; - ulint n_free; - ulint n_free_frag; - ulint n_full_frag; - ib_id_t seg_id; - ulint n; - ulint n_segs = 0; - mtr_t mtr; - mtr_t mtr2; - - latch = fil_space_get_latch(space, &flags); - zip_size = fsp_flags_get_zip_size(flags); + const ulint space = mtr_read_ulint(m_header + FSEG_HDR_SPACE, + MLOG_4BYTES, m_mtr); - /* Start first a mini-transaction mtr2 to lock out all other threads - from the fsp system */ + const ulint page_no = mtr_read_ulint(m_header + FSEG_HDR_PAGE_NO, + MLOG_4BYTES, m_mtr); - mtr_start(&mtr2); + const ulint offset = mtr_read_ulint(m_header + FSEG_HDR_OFFSET, + MLOG_2BYTES, m_mtr); - mtr_x_lock(latch, &mtr2); + out << "[fseg_header_t: space=" << space << ", page=" + << page_no << ", offset=" << offset << "]"; - mtr_start(&mtr); - - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - - size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, &mtr); - - free_limit = mtr_read_ulint(header + FSP_FREE_LIMIT, MLOG_4BYTES, - &mtr); - frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED, MLOG_4BYTES, - &mtr); - n_free = flst_get_len(header + FSP_FREE, &mtr); - n_free_frag = flst_get_len(header + FSP_FREE_FRAG, &mtr); - n_full_frag = flst_get_len(header + FSP_FULL_FRAG, &mtr); - - seg_id = mach_read_from_8(header + FSP_SEG_ID); - - fprintf(stderr, - "FILE SPACE INFO: id %lu\n" - "size %lu, free limit %lu, free extents %lu\n" - "not full frag extents %lu: used pages %lu," - " full frag extents %lu\n" - "first seg id not used %llu\n", - (ulong) space, - (ulong) size, (ulong) free_limit, (ulong) n_free, - (ulong) n_free_frag, (ulong) frag_n_used, (ulong) n_full_frag, - (ullint) seg_id); - - mtr_commit(&mtr); - - /* Print segments */ - - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - - node_addr = flst_get_first(header + FSP_SEG_INODES_FULL, &mtr); - - mtr_commit(&mtr); - - while (!fil_addr_is_null(node_addr)) { - - n = 0; - - do { - - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - seg_inode_page = fut_get_ptr( - space, zip_size, node_addr, RW_X_LATCH, &mtr) - - FSEG_INODE_PAGE_NODE; - - seg_inode = fsp_seg_inode_page_get_nth_inode( - seg_inode_page, n, zip_size, &mtr); - ut_a(mach_read_from_8(seg_inode + FSEG_ID) != 0); - fseg_print_low(seg_inode, &mtr); - - n_segs++; - - next_node_addr = flst_get_next_addr( - seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); - mtr_commit(&mtr); - } while (++n < FSP_SEG_INODES_PER_PAGE(zip_size)); - - node_addr = next_node_addr; - } - - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - header = fsp_get_space_header(space, zip_size, &mtr); - - node_addr = flst_get_first(header + FSP_SEG_INODES_FREE, &mtr); - - mtr_commit(&mtr); - - while (!fil_addr_is_null(node_addr)) { - - n = 0; - - do { - - mtr_start(&mtr); - mtr_x_lock(latch, &mtr); - - seg_inode_page = fut_get_ptr( - space, zip_size, node_addr, RW_X_LATCH, &mtr) - - FSEG_INODE_PAGE_NODE; - - seg_inode = fsp_seg_inode_page_get_nth_inode( - seg_inode_page, n, zip_size, &mtr); - if (mach_read_from_8(seg_inode + FSEG_ID)) { - - fseg_print_low(seg_inode, &mtr); - n_segs++; - } - - next_node_addr = flst_get_next_addr( - seg_inode_page + FSEG_INODE_PAGE_NODE, &mtr); - mtr_commit(&mtr); - } while (++n < FSP_SEG_INODES_PER_PAGE(zip_size)); - - node_addr = next_node_addr; - } - - mtr_commit(&mtr2); - - fprintf(stderr, "NUMBER of file segments: %lu\n", (ulong) n_segs); + return(out); } -#endif /* !UNIV_HOTBACKUP */ +#endif /* UNIV_DEBUG */ /**********************************************************************//** Compute offset after xdes where crypt data can be stored @@ -4131,13 +3949,14 @@ Compute offset after xdes where crypt data can be stored ulint fsp_header_get_crypt_offset( /*========================*/ - ulint zip_size, /*!< in: zip_size */ - ulint* max_size) /*!< out: free space available for crypt data */ + const page_size_t& page_size,/*!< in: page size */ + ulint* max_size) /*!< out: free space available for crypt data */ { ulint pageno = 0; /* compute first page_no that will have xdes stored on page != 0*/ + for (ulint i = 0; - (pageno = xdes_calc_descriptor_page(zip_size, i)) == 0; ) + (pageno = xdes_calc_descriptor_page(page_size, i)) == 0; ) i++; /* use pageno prior to this...i.e last page on page 0 */ @@ -4145,40 +3964,13 @@ fsp_header_get_crypt_offset( pageno--; ulint iv_offset = XDES_ARR_OFFSET + - XDES_SIZE * (1 + xdes_calc_descriptor_index(zip_size, pageno)); + XDES_SIZE * (1 + xdes_calc_descriptor_index(page_size, pageno)); if (max_size != NULL) { /* return how much free space there is available on page */ - *max_size = (zip_size ? zip_size : UNIV_PAGE_SIZE) - + *max_size = (page_size.logical() ? page_size.logical() : UNIV_PAGE_SIZE) - (FSP_HEADER_OFFSET + iv_offset + FIL_PAGE_DATA_END); } return FSP_HEADER_OFFSET + iv_offset; } - -/**********************************************************************//** -Checks if a single page is free. -@return true if free */ -UNIV_INTERN -bool -fsp_page_is_free_func( -/*==============*/ - ulint space, /*!< in: space id */ - ulint page_no, /*!< in: page offset */ - mtr_t* mtr, /*!< in/out: mini-transaction */ - const char *file, - ulint line) -{ - ulint flags; - - ut_ad(mtr); - - mtr_x_lock_func(fil_space_get_latch(space, &flags), file, line, mtr); - ulint zip_size = fsp_flags_get_zip_size(flags); - - xdes_t* descr = xdes_get_descriptor(space, zip_size, page_no, mtr); - ut_a(descr); - - return xdes_mtr_get_bit( - descr, XDES_FREE_BIT, page_no % FSP_EXTENT_SIZE, mtr); -} diff --git a/storage/innobase/fsp/fsp0space.cc b/storage/innobase/fsp/fsp0space.cc new file mode 100644 index 00000000000..6f1ef8ceb9d --- /dev/null +++ b/storage/innobase/fsp/fsp0space.cc @@ -0,0 +1,291 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file fsp/fsp0space.cc +General shared tablespace implementation. + +Created 2012-11-16 by Sunny Bains as srv/srv0space.cc +*******************************************************/ + +#include "ha_prototypes.h" + +#include "fsp0space.h" +#include "fsp0sysspace.h" +#include "fsp0fsp.h" +#include "os0file.h" + +#include "my_sys.h" + + +/** Check if two tablespaces have common data file names. +@param other_space Tablespace to check against this. +@return true if they have the same data filenames and paths */ +bool +Tablespace::intersection( + const Tablespace* other_space) +{ + files_t::const_iterator end = other_space->m_files.end(); + + for (files_t::const_iterator it = other_space->m_files.begin(); + it != end; + ++it) { + + if (find(it->m_filename)) { + + return(true); + } + } + + return(false); +} + +/** Frees the memory allocated by the SysTablespace object. */ +void +Tablespace::shutdown() +{ + files_t::iterator end = m_files.end(); + + for (files_t::iterator it = m_files.begin(); it != end; ++it) { + it->shutdown(); + } + + m_files.clear(); + + m_space_id = ULINT_UNDEFINED; +} + +/** Get the sum of the file sizes of each Datafile in a tablespace +@return ULINT_UNDEFINED if the size is invalid else the sum of sizes */ +ulint +Tablespace::get_sum_of_sizes() const +{ + ulint sum = 0; + + files_t::const_iterator end = m_files.end(); + + for (files_t::const_iterator it = m_files.begin(); it != end; ++it) { + +#ifndef _WIN32 + if (sizeof(off_t) < 5 + && it->m_size >= (1UL << (32UL - UNIV_PAGE_SIZE_SHIFT))) { + + ib::error() << "File size must be < 4 GB with this" + " MySQL binary-operating system combination." + " In some OS's < 2 GB"; + + return(ULINT_UNDEFINED); + } +#endif /* _WIN32 */ + sum += it->m_size; + } + + return(sum); +} + +/** Note that the data file was found. +@param[in,out] file Data file object to set */ +void +Tablespace::file_found(Datafile& file) +{ + /* Note that the file exists and can be opened + in the appropriate mode. */ + file.m_exists = true; + + file.set_open_flags( + &file == &m_files.front() + ? OS_FILE_OPEN_RETRY : OS_FILE_OPEN); +} + +/** Open or Create the data files if they do not exist. +@param[in] is_temp whether this is a temporary tablespace +@return DB_SUCCESS or error code */ +dberr_t +Tablespace::open_or_create(bool is_temp) +{ + fil_space_t* space = NULL; + dberr_t err = DB_SUCCESS; + + ut_ad(!m_files.empty()); + + files_t::iterator begin = m_files.begin(); + files_t::iterator end = m_files.end(); + + for (files_t::iterator it = begin; it != end; ++it) { + + if (it->m_exists) { + err = it->open_or_create( + m_ignore_read_only + ? false : srv_read_only_mode); + } else { + err = it->open_or_create( + m_ignore_read_only + ? false : srv_read_only_mode); + + /* Set the correct open flags now that we have + successfully created the file. */ + if (err == DB_SUCCESS) { + file_found(*it); + } + } + + if (err != DB_SUCCESS) { + break; + } + + bool atomic_write; + +#if !defined(NO_FALLOCATE) && defined(UNIV_LINUX) + if (!srv_use_doublewrite_buf) { + atomic_write = fil_fusionio_enable_atomic_write( + it->m_handle); + } else { + atomic_write = false; + } +#else + atomic_write = false; +#endif /* !NO_FALLOCATE && UNIV_LINUX */ + + /* We can close the handle now and open the tablespace + the proper way. */ + it->close(); + + if (it == begin) { + /* First data file. */ + + ulint flags; + + flags = fsp_flags_set_page_size(0, univ_page_size); + + /* Create the tablespace entry for the multi-file + tablespace in the tablespace manager. */ + space = fil_space_create( + m_name, m_space_id, flags, is_temp + ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, it->m_crypt_info); + } + + ut_a(fil_validate()); + + /* Create the tablespace node entry for this data file. */ + if (!fil_node_create( + it->m_filepath, it->m_size, space, false, + atomic_write)) { + + err = DB_ERROR; + break; + } + } + + return(err); +} + +/** Find a filename in the list of Datafiles for a tablespace +@return true if the filename exists in the data files */ +bool +Tablespace::find(const char* filename) +{ + files_t::const_iterator end = m_files.end(); + + for (files_t::const_iterator it = m_files.begin(); it != end; ++it) { + + if (innobase_strcasecmp(filename, it->m_filename) == 0) { + return(true); + } + } + + return(false); +} + + +/** Delete all the data files. */ +void +Tablespace::delete_files() +{ + files_t::iterator end = m_files.end(); + + for (files_t::iterator it = m_files.begin(); it != end; ++it) { + + it->close(); + + bool file_pre_exists; + bool success = os_file_delete_if_exists( + innodb_data_file_key, it->m_filepath, &file_pre_exists); + + if (success && file_pre_exists) { + ib::info() << "Removed temporary tablespace data" + " file: \"" << it->m_name << "\""; + } + } +} + +/** Check if undo tablespace. +@return true if undo tablespace */ +bool +Tablespace::is_undo_tablespace( + ulint id) +{ + return(id <= srv_undo_tablespaces_open + && id != srv_sys_space.space_id() + && id != srv_tmp_space.space_id()); +} + +/** Use the ADD DATAFILE path to create a Datafile object and add it to the +front of m_files. +Parse the datafile path into a path and a filename with extension 'ibd'. +This datafile_path provided may or may not be an absolute path, but it +must end with the extension .ibd and have a basename of at least 1 byte. + +Set tablespace m_path member and add a Datafile with the filename. +@param[in] datafile_path full path of the tablespace file. */ +dberr_t +Tablespace::add_datafile( + const char* datafile_added) +{ + /* The path provided ends in ".ibd". This was assured by + validate_create_tablespace_info() */ + ut_d(const char* dot = strrchr(datafile_added, '.')); + ut_ad(dot != NULL && 0 == strcmp(dot, DOT_IBD)); + + char* filepath = mem_strdup(datafile_added); + os_normalize_path(filepath); + + /* If the path is an absolute path, separate it onto m_path and a + basename. For relative paths, make the whole thing a basename so that + it can be appended to the datadir. */ + bool is_abs_path = is_absolute_path(filepath); + size_t dirlen = (is_abs_path ? dirname_length(filepath) : 0); + const char* basename = filepath + dirlen; + + /* If the pathname contains a directory separator, fill the + m_path member which is the default directory for files in this + tablespace. Leave it null otherwise. */ + if (dirlen > 0) { + set_path(filepath, dirlen); + } + + /* Now add a new Datafile and set the filepath + using the m_path created above. */ + m_files.push_back(Datafile(m_name, m_flags, + FIL_IBD_FILE_INITIAL_SIZE, 0)); + Datafile* datafile = &m_files.back(); + datafile->make_filepath(m_path, basename, IBD); + + ut_free(filepath); + + return(DB_SUCCESS); +} diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc new file mode 100644 index 00000000000..b1d3ab92c7e --- /dev/null +++ b/storage/innobase/fsp/fsp0sysspace.cc @@ -0,0 +1,1050 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file fsp/fsp0space.cc +Multi file, shared, system tablespace implementation. + +Created 2012-11-16 by Sunny Bains as srv/srv0space.cc +Refactored 2013-7-26 by Kevin Lewis +*******************************************************/ + +#include "ha_prototypes.h" + +#include "fsp0sysspace.h" +#include "srv0start.h" +#include "trx0sys.h" +#ifndef UNIV_HOTBACKUP +#include "dict0load.h" +#include "mem0mem.h" +#include "os0file.h" +#include "row0mysql.h" +#include "ut0new.h" + +/** The server header file is included to access opt_initialize global variable. +If server passes the option for create/open DB to SE, we should remove such +direct reference to server header and global variable */ +#include "mysqld.h" +#else +my_bool opt_initialize = 0; +#endif /* !UNIV_HOTBACKUP */ + +/** The control info of the system tablespace. */ +SysTablespace srv_sys_space; + +/** The control info of a temporary table shared tablespace. */ +SysTablespace srv_tmp_space; + +/** If the last data file is auto-extended, we add this many pages to it +at a time. We have to make this public because it is a config variable. */ +ulong sys_tablespace_auto_extend_increment; + +#ifdef UNIV_DEBUG +/** Control if extra debug checks need to be done for temporary tablespace. +Default = true that is disable such checks. +This variable is not exposed to end-user but still kept as variable for +developer to enable it during debug. */ +bool srv_skip_temp_table_checks_debug = true; +#endif /* UNIV_DEBUG */ + +/** Convert a numeric string that optionally ends in G or M or K, + to a number containing megabytes. +@param[in] str String with a quantity in bytes +@param[out] megs The number in megabytes +@return next character in string */ +char* +SysTablespace::parse_units( + char* ptr, + ulint* megs) +{ + char* endp; + + *megs = strtoul(ptr, &endp, 10); + + ptr = endp; + + switch (*ptr) { + case 'G': case 'g': + *megs *= 1024; + /* fall through */ + case 'M': case 'm': + ++ptr; + break; + case 'K': case 'k': + *megs /= 1024; + ++ptr; + break; + default: + *megs /= 1024 * 1024; + break; + } + + return(ptr); +} + +/** Parse the input params and populate member variables. +@param[in] filepath path to data files +@param[in] supports_raw true if the tablespace supports raw devices +@return true on success parse */ +bool +SysTablespace::parse_params( + const char* filepath_spec, + bool supports_raw) +{ + char* filepath; + ulint size; + char* input_str; + ulint n_files = 0; + + ut_ad(m_last_file_size_max == 0); + ut_ad(!m_auto_extend_last_file); + + char* new_str = mem_strdup(filepath_spec); + char* str = new_str; + + input_str = str; + + /*---------------------- PASS 1 ---------------------------*/ + /* First calculate the number of data files and check syntax: + filepath:size[K |M | G];filepath:size[K |M | G]... . + Note that a Windows path may contain a drive name and a ':'. */ + while (*str != '\0') { + filepath = str; + + while ((*str != ':' && *str != '\0') + || (*str == ':' + && (*(str + 1) == '\\' || *(str + 1) == '/' + || *(str + 1) == ':'))) { + str++; + } + + if (*str == '\0') { + ut_free(new_str); + + ib::error() + << "syntax error in file path or size" + " specified is less than 1 megabyte"; + return(false); + } + + str++; + + str = parse_units(str, &size); + + if (0 == strncmp(str, ":autoextend", + (sizeof ":autoextend") - 1)) { + + str += (sizeof ":autoextend") - 1; + + if (0 == strncmp(str, ":max:", + (sizeof ":max:") - 1)) { + + str += (sizeof ":max:") - 1; + + str = parse_units(str, &size); + } + + if (*str != '\0') { + ut_free(new_str); + ib::error() + << "syntax error in file path or" + << " size specified is less than" + << " 1 megabyte"; + return(false); + } + } + + if (::strlen(str) >= 6 + && *str == 'n' + && *(str + 1) == 'e' + && *(str + 2) == 'w') { + + if (!supports_raw) { + ib::error() + << "Tablespace doesn't support raw" + " devices"; + ut_free(new_str); + return(false); + } + + str += 3; + } + + if (*str == 'r' && *(str + 1) == 'a' && *(str + 2) == 'w') { + str += 3; + + if (!supports_raw) { + ib::error() + << "Tablespace doesn't support raw" + " devices"; + ut_free(new_str); + return(false); + } + } + + if (size == 0) { + + ut_free(new_str); + + ib::error() + << "syntax error in file path or size" + " specified is less than 1 megabyte"; + + return(false); + } + + ++n_files; + + if (*str == ';') { + str++; + } else if (*str != '\0') { + ut_free(new_str); + + ib::error() + << "syntax error in file path or size" + " specified is less than 1 megabyte"; + return(false); + } + } + + if (n_files == 0) { + + /* filepath_spec must contain at least one data file + definition */ + + ut_free(new_str); + + ib::error() + << "syntax error in file path or size specified" + " is less than 1 megabyte"; + + return(false); + } + + /*---------------------- PASS 2 ---------------------------*/ + /* Then store the actual values to our arrays */ + str = input_str; + ulint order = 0; + + while (*str != '\0') { + filepath = str; + + /* Note that we must step over the ':' in a Windows filepath; + a Windows path normally looks like C:\ibdata\ibdata1:1G, but + a Windows raw partition may have a specification like + \\.\C::1Gnewraw or \\.\PHYSICALDRIVE2:1Gnewraw */ + + while ((*str != ':' && *str != '\0') + || (*str == ':' + && (*(str + 1) == '\\' || *(str + 1) == '/' + || *(str + 1) == ':'))) { + str++; + } + + if (*str == ':') { + /* Make filepath a null-terminated string */ + *str = '\0'; + str++; + } + + str = parse_units(str, &size); + + if (0 == strncmp(str, ":autoextend", + (sizeof ":autoextend") - 1)) { + + m_auto_extend_last_file = true; + + str += (sizeof ":autoextend") - 1; + + if (0 == strncmp(str, ":max:", + (sizeof ":max:") - 1)) { + + str += (sizeof ":max:") - 1; + + str = parse_units(str, &m_last_file_size_max); + } + + if (*str != '\0') { + ut_free(new_str); + ib::error() << "syntax error in file path or" + " size specified is less than 1" + " megabyte"; + return(false); + } + } + + m_files.push_back(Datafile(filepath, flags(), size, order)); + Datafile* datafile = &m_files.back(); + datafile->make_filepath(path(), filepath, NO_EXT); + + if (::strlen(str) >= 6 + && *str == 'n' + && *(str + 1) == 'e' + && *(str + 2) == 'w') { + + ut_a(supports_raw); + + str += 3; + + /* Initialize new raw device only during initialize */ + /* JAN: TODO: MySQL 5.7 used opt_initialize */ + m_files.back().m_type = + opt_bootstrap ? SRV_NEW_RAW : SRV_OLD_RAW; + } + + if (*str == 'r' && *(str + 1) == 'a' && *(str + 2) == 'w') { + + ut_a(supports_raw); + + str += 3; + + /* Initialize new raw device only during initialize */ + if (m_files.back().m_type == SRV_NOT_RAW) { + /* JAN: TODO: MySQL 5.7 used opt_initialize */ + m_files.back().m_type = + opt_bootstrap ? SRV_NEW_RAW : SRV_OLD_RAW; + } + } + + if (*str == ';') { + ++str; + } + order++; + } + + ut_ad(n_files == ulint(m_files.size())); + + ut_free(new_str); + + return(true); +} + +/** Frees the memory allocated by the parse method. */ +void +SysTablespace::shutdown() +{ + Tablespace::shutdown(); + + m_auto_extend_last_file = 0; + m_last_file_size_max = 0; + m_created_new_raw = 0; + m_is_tablespace_full = false; + m_sanity_checks_done = false; +} + +/** Verify the size of the physical file. +@param[in] file data file object +@return DB_SUCCESS if OK else error code. */ +dberr_t +SysTablespace::check_size( + Datafile& file) +{ + os_offset_t size = os_file_get_size(file.m_handle); + ut_a(size != (os_offset_t) -1); + + /* Round size downward to megabytes */ + ulint rounded_size_pages = (ulint) (size >> UNIV_PAGE_SIZE_SHIFT); + + /* If last file */ + if (&file == &m_files.back() && m_auto_extend_last_file) { + + if (file.m_size > rounded_size_pages + || (m_last_file_size_max > 0 + && m_last_file_size_max < rounded_size_pages)) { + ib::error() << "The Auto-extending " << name() + << " data file '" << file.filepath() << "' is" + " of a different size " << rounded_size_pages + << " pages (rounded down to MB) than specified" + " in the .cnf file: initial " << file.m_size + << " pages, max " << m_last_file_size_max + << " (relevant if non-zero) pages!"; + return(DB_ERROR); + } + + file.m_size = rounded_size_pages; + } + + if (rounded_size_pages != file.m_size) { + ib::error() << "The " << name() << " data file '" + << file.filepath() << "' is of a different size " + << rounded_size_pages << " pages (rounded down to MB)" + " than the " << file.m_size << " pages specified in" + " the .cnf file!"; + return(DB_ERROR); + } + + return(DB_SUCCESS); +} + +/** Set the size of the file. +@param[in] file data file object +@return DB_SUCCESS or error code */ +dberr_t +SysTablespace::set_size( + Datafile& file) +{ + ut_a(!srv_read_only_mode || m_ignore_read_only); + + /* We created the data file and now write it full of zeros */ + ib::info() << "Setting file '" << file.filepath() << "' size to " + << (file.m_size >> (20 - UNIV_PAGE_SIZE_SHIFT)) << " MB." + " Physically writing the file full; Please wait ..."; + + bool success = os_file_set_size( + file.m_filepath, file.m_handle, + static_cast(file.m_size << UNIV_PAGE_SIZE_SHIFT), + m_ignore_read_only ? false : srv_read_only_mode); + + if (success) { + ib::info() << "File '" << file.filepath() << "' size is now " + << (file.m_size >> (20 - UNIV_PAGE_SIZE_SHIFT)) + << " MB."; + } else { + ib::error() << "Could not set the file size of '" + << file.filepath() << "'. Probably out of disk space"; + + return(DB_ERROR); + } + + return(DB_SUCCESS); +} + +/** Create a data file. +@param[in] file data file object +@return DB_SUCCESS or error code */ +dberr_t +SysTablespace::create_file( + Datafile& file) +{ + dberr_t err = DB_SUCCESS; + + ut_a(!file.m_exists); + ut_a(!srv_read_only_mode || m_ignore_read_only); + + switch (file.m_type) { + case SRV_NEW_RAW: + + /* The partition is opened, not created; then it is + written over */ + m_created_new_raw = true; + + /* Fall through. */ + + case SRV_OLD_RAW: + + srv_start_raw_disk_in_use = TRUE; + + /* Fall through. */ + + case SRV_NOT_RAW: + err = file.open_or_create( + m_ignore_read_only ? false : srv_read_only_mode); + break; + } + + + if (err == DB_SUCCESS && file.m_type != SRV_OLD_RAW) { + err = set_size(file); + } + + return(err); +} + +/** Open a data file. +@param[in] file data file object +@return DB_SUCCESS or error code */ +dberr_t +SysTablespace::open_file( + Datafile& file) +{ + dberr_t err = DB_SUCCESS; + + ut_a(file.m_exists); + + switch (file.m_type) { + case SRV_NEW_RAW: + /* The partition is opened, not created; then it is + written over */ + m_created_new_raw = true; + + /* Fall through */ + + case SRV_OLD_RAW: + srv_start_raw_disk_in_use = TRUE; + + if (srv_read_only_mode && !m_ignore_read_only) { + ib::error() << "Can't open a raw device '" + << file.m_filepath << "' when" + " --innodb-read-only is set"; + + return(DB_ERROR); + } + + /* Fall through */ + + case SRV_NOT_RAW: + err = file.open_or_create( + m_ignore_read_only ? false : srv_read_only_mode); + + if (err != DB_SUCCESS) { + return(err); + } + break; + } + + switch (file.m_type) { + case SRV_NEW_RAW: + /* Set file size for new raw device. */ + err = set_size(file); + break; + + case SRV_NOT_RAW: + /* Check file size for existing file. */ + err = check_size(file); + break; + + case SRV_OLD_RAW: + err = DB_SUCCESS; + break; + + } + + if (err != DB_SUCCESS) { + file.close(); + } + + return(err); +} + +/** Check the tablespace header for this tablespace. +@param[out] flushed_lsn the value of FIL_PAGE_FILE_FLUSH_LSN +@return DB_SUCCESS or error code */ +dberr_t +SysTablespace::read_lsn_and_check_flags(lsn_t* flushed_lsn) +{ + dberr_t err; + + /* Only relevant for the system tablespace. */ + ut_ad(space_id() == TRX_SYS_SPACE); + + files_t::iterator it = m_files.begin(); + + ut_a(it->m_exists); + + if (it->m_handle == OS_FILE_CLOSED) { + + err = it->open_or_create( + m_ignore_read_only ? false : srv_read_only_mode); + + if (err != DB_SUCCESS) { + return(err); + } + } + + err = it->read_first_page( + m_ignore_read_only ? false : srv_read_only_mode); + + m_crypt_info = it->m_crypt_info; + + if (err != DB_SUCCESS) { + return(err); + } + + ut_a(it->order() == 0); + + + buf_dblwr_init_or_load_pages(it->handle(), it->filepath()); + + /* Check the contents of the first page of the + first datafile. */ + for (int retry = 0; retry < 2; ++retry) { + + err = it->validate_first_page(flushed_lsn); + + if (err != DB_SUCCESS + && (retry == 1 + || it->restore_from_doublewrite(0) != DB_SUCCESS)) { + + it->close(); + + return(err); + } + } + + /* Make sure the tablespace space ID matches the + space ID on the first page of the first datafile. */ + if (space_id() != it->m_space_id) { + + ib::error() + << "The " << name() << " data file '" << it->name() + << "' has the wrong space ID. It should be " + << space_id() << ", but " << it->m_space_id + << " was found"; + + it->close(); + + return(err); + } + + it->close(); + + return(DB_SUCCESS); +} + +/** Check if a file can be opened in the correct mode. +@param[in] file data file object +@param[out] reason exact reason if file_status check failed. +@return DB_SUCCESS or error code. */ +dberr_t +SysTablespace::check_file_status( + const Datafile& file, + file_status_t& reason) +{ + os_file_stat_t stat; + + memset(&stat, 0x0, sizeof(stat)); + + dberr_t err = os_file_get_status( + file.m_filepath, &stat, true, + m_ignore_read_only ? false : srv_read_only_mode); + + reason = FILE_STATUS_VOID; + /* File exists but we can't read the rw-permission settings. */ + switch (err) { + case DB_FAIL: + ib::error() << "os_file_get_status() failed on '" + << file.filepath() + << "'. Can't determine file permissions"; + err = DB_ERROR; + reason = FILE_STATUS_RW_PERMISSION_ERROR; + break; + + case DB_SUCCESS: + + /* Note: stat.rw_perm is only valid for "regular" files */ + + if (stat.type == OS_FILE_TYPE_FILE) { + + if (!stat.rw_perm) { + const char *p = (!srv_read_only_mode + || m_ignore_read_only) + ? "writable" + : "readable"; + + ib::error() << "The " << name() << " data file" + << " '" << file.name() << "' must be " + << p; + + err = DB_ERROR; + reason = FILE_STATUS_READ_WRITE_ERROR; + } + + } else { + /* Not a regular file, bail out. */ + ib::error() << "The " << name() << " data file '" + << file.name() << "' is not a regular" + " InnoDB data file."; + + err = DB_ERROR; + reason = FILE_STATUS_NOT_REGULAR_FILE_ERROR; + } + break; + + case DB_NOT_FOUND: + break; + + default: + ut_ad(0); + } + + return(err); +} + +/** Note that the data file was not found. +@param[in] file data file object +@param[out] create_new_db true if a new instance to be created +@return DB_SUCESS or error code */ +dberr_t +SysTablespace::file_not_found( + Datafile& file, + bool* create_new_db) +{ + file.m_exists = false; + + if (srv_read_only_mode && !m_ignore_read_only) { + ib::error() << "Can't create file '" << file.filepath() + << "' when --innodb-read-only is set"; + + return(DB_ERROR); + + } else if (&file == &m_files.front()) { + + /* First data file. */ + ut_a(!*create_new_db); + *create_new_db = TRUE; + + if (space_id() == TRX_SYS_SPACE) { + ib::info() << "The first " << name() << " data file '" + << file.name() << "' did not exist." + " A new tablespace will be created!"; + } + + } else { + ib::info() << "Need to create a new " << name() + << " data file '" << file.name() << "'."; + } + + /* Set the file create mode. */ + switch (file.m_type) { + case SRV_NOT_RAW: + file.set_open_flags(OS_FILE_CREATE); + break; + + case SRV_NEW_RAW: + case SRV_OLD_RAW: + file.set_open_flags(OS_FILE_OPEN_RAW); + break; + } + + return(DB_SUCCESS); +} + +/** Note that the data file was found. +@param[in,out] file data file object +@return true if a new instance to be created */ +bool +SysTablespace::file_found( + Datafile& file) +{ + /* Note that the file exists and can be opened + in the appropriate mode. */ + file.m_exists = true; + + /* Set the file open mode */ + switch (file.m_type) { + case SRV_NOT_RAW: + file.set_open_flags( + &file == &m_files.front() + ? OS_FILE_OPEN_RETRY : OS_FILE_OPEN); + break; + + case SRV_NEW_RAW: + case SRV_OLD_RAW: + file.set_open_flags(OS_FILE_OPEN_RAW); + break; + } + + /* Need to create the system tablespace for new raw device. */ + return(file.m_type == SRV_NEW_RAW); +} + +/** Check the data file specification. +@param[out] create_new_db true if a new database is to be created +@param[in] min_expected_size Minimum expected tablespace size in bytes +@return DB_SUCCESS if all OK else error code */ +dberr_t +SysTablespace::check_file_spec( + bool* create_new_db, + ulint min_expected_size) +{ + *create_new_db = FALSE; + + if (m_files.size() >= 1000) { + ib::error() << "There must be < 1000 data files in " + << name() << " but " << m_files.size() << " have been" + " defined."; + + return(DB_ERROR); + } + + ulint tablespace_size = get_sum_of_sizes(); + if (tablespace_size == ULINT_UNDEFINED) { + return(DB_ERROR); + } else if (tablespace_size + < min_expected_size / UNIV_PAGE_SIZE) { + + ib::error() << "Tablespace size must be at least " + << min_expected_size / (1024 * 1024) << " MB"; + + return(DB_ERROR); + } + + dberr_t err = DB_SUCCESS; + + ut_a(!m_files.empty()); + + /* If there is more than one data file and the last data file + doesn't exist, that is OK. We allow adding of new data files. */ + + files_t::iterator begin = m_files.begin(); + files_t::iterator end = m_files.end(); + + for (files_t::iterator it = begin; it != end; ++it) { + + file_status_t reason_if_failed; + err = check_file_status(*it, reason_if_failed); + + if (err == DB_NOT_FOUND) { + + err = file_not_found(*it, create_new_db); + + if (err != DB_SUCCESS) { + break; + } + + } else if (err != DB_SUCCESS) { + if (reason_if_failed == FILE_STATUS_READ_WRITE_ERROR) { + const char* p = (!srv_read_only_mode + || m_ignore_read_only) + ? "writable" : "readable"; + ib::error() << "The " << name() << " data file" + << " '" << it->name() << "' must be " + << p; + } + + ut_a(err != DB_FAIL); + break; + + } else if (*create_new_db) { + ib::error() << "The " << name() << " data file '" + << begin->m_name << "' was not found but" + " one of the other data files '" << it->m_name + << "' exists."; + + err = DB_ERROR; + break; + + } else { + *create_new_db = file_found(*it); + } + } + + /* We assume doublewirte blocks in the first data file. */ + if (err == DB_SUCCESS && *create_new_db + && begin->m_size < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 3) { + ib::error() << "The " << name() << " data file " + << "'" << begin->name() << "' must be at least " + << TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 3 * UNIV_PAGE_SIZE + / (1024 * 1024) << " MB"; + + err = DB_ERROR; + } + + return(err); +} + +/** Open or create the data files +@param[in] is_temp whether this is a temporary tablespace +@param[in] create_new_db whether we are creating a new database +@param[out] sum_new_sizes sum of sizes of the new files added +@param[out] flush_lsn FIL_PAGE_FILE_FLUSH_LSN of first file +@return DB_SUCCESS or error code */ +dberr_t +SysTablespace::open_or_create( + bool is_temp, + bool create_new_db, + ulint* sum_new_sizes, + lsn_t* flush_lsn) +{ + dberr_t err = DB_SUCCESS; + fil_space_t* space = NULL; + + ut_ad(!m_files.empty()); + + if (sum_new_sizes) { + *sum_new_sizes = 0; + } + + files_t::iterator begin = m_files.begin(); + files_t::iterator end = m_files.end(); + + ut_ad(begin->order() == 0); + + for (files_t::iterator it = begin; it != end; ++it) { + + if (it->m_exists) { + err = open_file(*it); + + /* For new raw device increment new size. */ + if (sum_new_sizes && it->m_type == SRV_NEW_RAW) { + + *sum_new_sizes += it->m_size; + } + + } else { + err = create_file(*it); + + if (sum_new_sizes) { + *sum_new_sizes += it->m_size; + } + + /* Set the correct open flags now that we have + successfully created the file. */ + if (err == DB_SUCCESS) { + /* We ignore new_db OUT parameter here + as the information is known at this stage */ + file_found(*it); + } + } + + if (err != DB_SUCCESS) { + return(err); + } + +#if !defined(NO_FALLOCATE) && defined(UNIV_LINUX) + /* Note: This should really be per node and not per + tablespace because a tablespace can contain multiple + files (nodes). The implication is that all files of + the tablespace should be on the same medium. */ + + if (fil_fusionio_enable_atomic_write(it->m_handle)) { + + if (srv_use_doublewrite_buf) { + ib::info() << "FusionIO atomic IO enabled," + " disabling the double write buffer"; + + srv_use_doublewrite_buf = false; + } + + it->m_atomic_write = true; + } else { + it->m_atomic_write = false; + } +#else + it->m_atomic_write = false; +#endif /* !NO_FALLOCATE && UNIV_LINUX*/ + } + + if (!create_new_db && flush_lsn) { + /* Validate the header page in the first datafile + and read LSNs fom the others. */ + err = read_lsn_and_check_flags(flush_lsn); + if (err != DB_SUCCESS) { + return(err); + } + } + + /* Close the curent handles, add space and file info to the + fil_system cache and the Data Dictionary, and re-open them + in file_system cache so that they stay open until shutdown. */ + ulint node_counter = 0; + for (files_t::iterator it = begin; it != end; ++it) { + it->close(); + it->m_exists = true; + + if (it == begin) { + /* First data file. */ + + /* Create the tablespace entry for the multi-file + tablespace in the tablespace manager. */ + + if (!m_crypt_info) { + /* Create default crypt info for system + tablespace if it does not yet exists. */ + m_crypt_info = fil_space_create_crypt_data( + FIL_SPACE_ENCRYPTION_DEFAULT, + FIL_DEFAULT_ENCRYPTION_KEY); + } + + space = fil_space_create( + name(), space_id(), flags(), is_temp + ? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE, m_crypt_info); + } + + ut_a(fil_validate()); + + ulint max_size = (++node_counter == m_files.size() + ? (m_last_file_size_max == 0 + ? ULINT_MAX + : m_last_file_size_max) + : it->m_size); + + /* Add the datafile to the fil_system cache. */ + if (!fil_node_create( + it->m_filepath, it->m_size, + space, it->m_type != SRV_NOT_RAW, + it->m_atomic_write, max_size)) { + + err = DB_ERROR; + break; + } + } + + return(err); +} + +/** Normalize the file size, convert from megabytes to number of pages. */ +void +SysTablespace::normalize() +{ + files_t::iterator end = m_files.end(); + + for (files_t::iterator it = m_files.begin(); it != end; ++it) { + + it->m_size *= (1024 * 1024) / UNIV_PAGE_SIZE; + } + + m_last_file_size_max *= (1024 * 1024) / UNIV_PAGE_SIZE; +} + + +/** +@return next increment size */ +ulint +SysTablespace::get_increment() const +{ + ulint increment; + + if (m_last_file_size_max == 0) { + increment = get_autoextend_increment(); + } else { + + if (!is_valid_size()) { + ib::error() << "The last data file in " << name() + << " has a size of " << last_file_size() + << " but the max size allowed is " + << m_last_file_size_max; + } + + increment = m_last_file_size_max - last_file_size(); + } + + if (increment > get_autoextend_increment()) { + increment = get_autoextend_increment(); + } + + return(increment); +} + + +/** +@return true if configured to use raw devices */ +bool +SysTablespace::has_raw_device() +{ + files_t::iterator end = m_files.end(); + + for (files_t::iterator it = m_files.begin(); it != end; ++it) { + + if (it->is_raw_device()) { + return(true); + } + } + + return(false); +} diff --git a/storage/innobase/fts/fts0ast.cc b/storage/innobase/fts/fts0ast.cc index 030b972440f..14e52a99e18 100644 --- a/storage/innobase/fts/fts0ast.cc +++ b/storage/innobase/fts/fts0ast.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -23,7 +23,8 @@ Full Text Search parser helper file. Created 2007/3/16 Sunny Bains. ***********************************************************************/ -#include "mem0mem.h" +#include "ha_prototypes.h" + #include "fts0ast.h" #include "fts0pars.h" #include "fts0fts.h" @@ -49,8 +50,7 @@ fts_ast_node_create(void) { fts_ast_node_t* node; - node = (fts_ast_node_t*) ut_malloc(sizeof(*node)); - memset(node, 0x0, sizeof(*node)); + node = (fts_ast_node_t*) ut_zalloc_nokey(sizeof(*node)); return(node); } @@ -58,7 +58,6 @@ fts_ast_node_create(void) /******************************************************************//** Create a operator fts_ast_node_t. @return new node */ -UNIV_INTERN fts_ast_node_t* fts_ast_create_node_oper( /*=====================*/ @@ -79,7 +78,6 @@ fts_ast_create_node_oper( This function takes ownership of the ptr and is responsible for free'ing it @return new node or a node list with tokenized words */ -UNIV_INTERN fts_ast_node_t* fts_ast_create_node_term( /*=====================*/ @@ -96,14 +94,12 @@ fts_ast_create_node_term( /* Scan the incoming string and filter out any "non-word" characters */ while (cur_pos < len) { fts_string_t str; - ulint offset; ulint cur_len; cur_len = innobase_mysql_fts_get_token( state->charset, reinterpret_cast(ptr->str) + cur_pos, - reinterpret_cast(ptr->str) + len, - &str, &offset); + reinterpret_cast(ptr->str) + len, &str); if (cur_len == 0) { break; @@ -151,11 +147,41 @@ fts_ast_create_node_term( return((node_list != NULL) ? node_list : first_node); } +/******************************************************************//** +Create an AST term node, makes a copy of ptr for plugin parser +@return node */ +fts_ast_node_t* +fts_ast_create_node_term_for_parser( +/*================================*/ + void* arg, /*!< in: ast state */ + const char* ptr, /*!< in: term string */ + const ulint len) /*!< in: term string length */ +{ + fts_ast_node_t* node = NULL; + + /* '%' as first char is forbidden for LIKE in internal SQL parser; + '%' as last char is reserved for wildcard search;*/ + if (len == 0 || len > fts_max_token_size + || ptr[0] == '%' || ptr[len - 1] == '%') { + return(NULL); + } + + node = fts_ast_node_create(); + + node->type = FTS_AST_TERM; + + node->term.ptr = fts_ast_string_create( + reinterpret_cast(ptr), len); + + fts_ast_state_add_node(static_cast(arg), node); + + return(node); +} + /******************************************************************//** This function takes ownership of the ptr and is responsible for free'ing it. @return new node */ -UNIV_INTERN fts_ast_node_t* fts_ast_create_node_text( /*=====================*/ @@ -193,11 +219,30 @@ fts_ast_create_node_text( return(node); } +/******************************************************************//** +Create an AST phrase list node for plugin parser +@return node */ +fts_ast_node_t* +fts_ast_create_node_phrase_list( +/*============================*/ + void* arg) /*!< in: ast state */ +{ + fts_ast_node_t* node = fts_ast_node_create(); + + node->type = FTS_AST_PARSER_PHRASE_LIST; + + node->text.distance = ULINT_UNDEFINED; + node->list.head = node->list.tail = NULL; + + fts_ast_state_add_node(static_cast(arg), node); + + return(node); +} + /******************************************************************//** This function takes ownership of the expr and is responsible for free'ing it. @return new node */ -UNIV_INTERN fts_ast_node_t* fts_ast_create_node_list( /*=====================*/ @@ -218,7 +263,6 @@ fts_ast_create_node_list( Create a sub-expression list node. This function takes ownership of expr and is responsible for deleting it. @return new node */ -UNIV_INTERN fts_ast_node_t* fts_ast_create_node_subexp_list( /*============================*/ @@ -244,7 +288,8 @@ fts_ast_free_list( fts_ast_node_t* node) /*!< in: ast node to free */ { ut_a(node->type == FTS_AST_LIST - || node->type == FTS_AST_SUBEXP_LIST); + || node->type == FTS_AST_SUBEXP_LIST + || node->type == FTS_AST_PARSER_PHRASE_LIST); for (node = node->list.head; node != NULL; @@ -257,7 +302,6 @@ fts_ast_free_list( /********************************************************************//** Free a fts_ast_node_t instance. @return next node to free */ -UNIV_INTERN fts_ast_node_t* fts_ast_free_node( /*==============*/ @@ -282,6 +326,7 @@ fts_ast_free_node( case FTS_AST_LIST: case FTS_AST_SUBEXP_LIST: + case FTS_AST_PARSER_PHRASE_LIST: fts_ast_free_list(node); node->list.head = node->list.tail = NULL; break; @@ -305,7 +350,6 @@ fts_ast_free_node( This AST takes ownership of the expr and is responsible for free'ing it. @return in param "list" */ -UNIV_INTERN fts_ast_node_t* fts_ast_add_node( /*=============*/ @@ -318,7 +362,8 @@ fts_ast_add_node( ut_a(!elem->next); ut_a(node->type == FTS_AST_LIST - || node->type == FTS_AST_SUBEXP_LIST); + || node->type == FTS_AST_SUBEXP_LIST + || node->type == FTS_AST_PARSER_PHRASE_LIST); if (!node->list.head) { ut_a(!node->list.tail); @@ -337,7 +382,6 @@ fts_ast_add_node( /******************************************************************//** For tracking node allocations, in case there is an error during parsing. */ -UNIV_INTERN void fts_ast_state_add_node( /*===================*/ @@ -356,7 +400,6 @@ fts_ast_state_add_node( /******************************************************************//** Set the wildcard attribute of a term. */ -UNIV_INTERN void fts_ast_term_set_wildcard( /*======================*/ @@ -381,9 +424,8 @@ fts_ast_term_set_wildcard( /******************************************************************//** Set the proximity attribute of a text node. */ -UNIV_INTERN void -fts_ast_term_set_distance( +fts_ast_text_set_distance( /*======================*/ fts_ast_node_t* node, /*!< in/out: text node */ ulint distance) /*!< in: the text proximity @@ -401,7 +443,6 @@ fts_ast_term_set_distance( /******************************************************************//** Free node and expr allocations. */ -UNIV_INTERN void fts_ast_state_free( /*===============*/ @@ -429,13 +470,19 @@ fts_ast_state_free( } /******************************************************************//** -Print an ast node. */ -UNIV_INTERN +Print an ast node recursively. */ +static void -fts_ast_node_print( -/*===============*/ - fts_ast_node_t* node) /*!< in: ast node to print */ +fts_ast_node_print_recursive( +/*=========================*/ + fts_ast_node_t* node, /*!< in: ast node to print */ + ulint level) /*!< in: recursive level */ { + /* Print alignment blank */ + for (ulint i = 0; i < level; i++) { + printf(" "); + } + switch (node->type) { case FTS_AST_TEXT: printf("TEXT: "); @@ -448,38 +495,53 @@ fts_ast_node_print( break; case FTS_AST_LIST: - printf("LIST: "); - node = node->list.head; + printf("LIST: \n"); - while (node) { - fts_ast_node_print(node); - node = node->next; + for (node = node->list.head; node; node = node->next) { + fts_ast_node_print_recursive(node, level + 1); } break; case FTS_AST_SUBEXP_LIST: - printf("SUBEXP_LIST: "); - node = node->list.head; + printf("SUBEXP_LIST: \n"); - while (node) { - fts_ast_node_print(node); - node = node->next; + for (node = node->list.head; node; node = node->next) { + fts_ast_node_print_recursive(node, level + 1); } + break; + case FTS_AST_OPER: printf("OPER: %d\n", node->oper); break; + case FTS_AST_PARSER_PHRASE_LIST: + printf("PARSER_PHRASE_LIST: \n"); + + for (node = node->list.head; node; node = node->next) { + fts_ast_node_print_recursive(node, level + 1); + } + break; + default: ut_error; } } +/******************************************************************//** +Print an ast node */ +void +fts_ast_node_print( +/*===============*/ + fts_ast_node_t* node) /*!< in: ast node to print */ +{ + fts_ast_node_print_recursive(node, 0); +} + /******************************************************************//** Traverse the AST - in-order traversal, except for the FTX_EXIST and FTS_IGNORE nodes, which will be ignored in the first pass of each level, and visited in a second and third pass after all other nodes in the same level are visited. @return DB_SUCCESS if all went well */ -UNIV_INTERN dberr_t fts_ast_visit( /*==========*/ @@ -531,7 +593,7 @@ fts_ast_visit( node && (error == DB_SUCCESS); node = node->next) { - switch(node->type) { + switch (node->type) { case FTS_AST_LIST: if (visit_pass != FTS_PASS_FIRST) { break; @@ -630,7 +692,6 @@ has one more byte than len @param[in] str pointer to string @param[in] len length of the string @return ast string with NUL-terminator */ -UNIV_INTERN fts_ast_string_t* fts_ast_string_create( const byte* str, @@ -640,9 +701,10 @@ fts_ast_string_create( ut_ad(len > 0); - ast_str = static_cast - (ut_malloc(sizeof(fts_ast_string_t))); - ast_str->str = static_cast(ut_malloc(len + 1)); + ast_str = static_cast( + ut_malloc_nokey(sizeof(fts_ast_string_t))); + + ast_str->str = static_cast(ut_malloc_nokey(len + 1)); ast_str->len = len; memcpy(ast_str->str, str, len); @@ -654,7 +716,6 @@ fts_ast_string_create( /** Free an ast string instance @param[in,out] ast_str string to free */ -UNIV_INTERN void fts_ast_string_free( fts_ast_string_t* ast_str) @@ -670,7 +731,6 @@ Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul @param[in] str string to translate @param[in] base the base @return translated number */ -UNIV_INTERN ulint fts_ast_string_to_ul( const fts_ast_string_t* ast_str, @@ -683,7 +743,6 @@ fts_ast_string_to_ul( /** Print the ast string @param[in] str string to print */ -UNIV_INTERN void fts_ast_string_print( const fts_ast_string_t* ast_str) @@ -720,6 +779,7 @@ fts_ast_oper_name_get(fts_ast_oper_t oper) return("FTS_EXIST_SKIP"); } ut_ad(0); + return("FTS_UNKNOWN"); } const char* @@ -738,7 +798,10 @@ fts_ast_node_type_get(fts_ast_type_t type) return("FTS_AST_LIST"); case FTS_AST_SUBEXP_LIST: return("FTS_AST_SUBEXP_LIST"); + case FTS_AST_PARSER_PHRASE_LIST: + return("FTS_AST_PARSER_PHRASE_LIST"); } ut_ad(0); + return("FTS_UNKNOWN"); } #endif /* UNIV_DEBUG */ diff --git a/storage/innobase/fts/fts0blex.cc b/storage/innobase/fts/fts0blex.cc index 2d71934fa0e..183e05edd04 100644 --- a/storage/innobase/fts/fts0blex.cc +++ b/storage/innobase/fts/fts0blex.cc @@ -479,9 +479,10 @@ this program; if not, write to the Free Software Foundation, Inc., /* Required for reentrant parser */ #define YY_DECL int fts_blexer(YYSTYPE* val, yyscan_t yyscanner) +#define exit(A) ut_error #define YY_NO_INPUT 1 -#line 484 "fts0blex.cc" +#line 485 "fts0blex.cc" #define INITIAL 0 @@ -706,7 +707,7 @@ YY_DECL register int yy_act; struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; -#line 43 "fts0blex.l" +#line 44 "fts0blex.l" #line 712 "fts0blex.cc" @@ -790,12 +791,12 @@ do_action: /* This label is used only to access EOF actions. */ case 1: YY_RULE_SETUP -#line 45 "fts0blex.l" +#line 46 "fts0blex.l" /* Ignore whitespace */ ; YY_BREAK case 2: YY_RULE_SETUP -#line 47 "fts0blex.l" +#line 48 "fts0blex.l" { val->oper = fts0bget_text(yyscanner)[0]; @@ -804,7 +805,7 @@ YY_RULE_SETUP YY_BREAK case 3: YY_RULE_SETUP -#line 53 "fts0blex.l" +#line 54 "fts0blex.l" { val->token = fts_ast_string_create(reinterpret_cast(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); @@ -813,7 +814,7 @@ YY_RULE_SETUP YY_BREAK case 4: YY_RULE_SETUP -#line 59 "fts0blex.l" +#line 60 "fts0blex.l" { val->token = fts_ast_string_create(reinterpret_cast(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); @@ -822,7 +823,7 @@ YY_RULE_SETUP YY_BREAK case 5: YY_RULE_SETUP -#line 65 "fts0blex.l" +#line 66 "fts0blex.l" { val->token = fts_ast_string_create(reinterpret_cast(fts0bget_text(yyscanner)), fts0bget_leng(yyscanner)); @@ -832,12 +833,12 @@ YY_RULE_SETUP case 6: /* rule 6 can match eol */ YY_RULE_SETUP -#line 71 "fts0blex.l" +#line 72 "fts0blex.l" YY_BREAK case 7: YY_RULE_SETUP -#line 73 "fts0blex.l" +#line 74 "fts0blex.l" ECHO; YY_BREAK #line 843 "fts0blex.cc" @@ -1953,5 +1954,5 @@ void fts0bfree (void * ptr , yyscan_t yyscanner MY_ATTRIBUTE((unused) #define YYTABLES_NAME "yytables" -#line 73 "fts0blex.l" +#line 74 "fts0blex.l" diff --git a/storage/innobase/fts/fts0blex.l b/storage/innobase/fts/fts0blex.l index ae6e8ffaa48..ce61fc6b2d9 100644 --- a/storage/innobase/fts/fts0blex.l +++ b/storage/innobase/fts/fts0blex.l @@ -30,6 +30,7 @@ this program; if not, write to the Free Software Foundation, Inc., /* Required for reentrant parser */ #define YY_DECL int fts_blexer(YYSTYPE* val, yyscan_t yyscanner) +#define exit(A) ut_error %} diff --git a/storage/innobase/fts/fts0config.cc b/storage/innobase/fts/fts0config.cc index 5b4ae5c39f7..740ee87fe01 100644 --- a/storage/innobase/fts/fts0config.cc +++ b/storage/innobase/fts/fts0config.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -28,7 +28,7 @@ Created 2007/5/9 Sunny Bains #include "fts0priv.h" -#ifndef UNIV_NONINL +#ifdef UNIV_NONINL #include "fts0types.ic" #include "fts0vlc.ic" #endif @@ -69,7 +69,6 @@ fts_config_fetch_value( Get value from the config table. The caller must ensure that enough space is allocated for value to hold the column contents. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_get_value( /*=================*/ @@ -85,6 +84,7 @@ fts_config_get_value( que_t* graph; dberr_t error; ulint name_len = strlen(name); + char table_name[MAX_FULL_NAME_LEN]; info = pars_info_create(); @@ -100,12 +100,14 @@ fts_config_get_value( pars_info_bind_varchar_literal(info, "name", (byte*) name, name_len); fts_table->suffix = "CONFIG"; + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); graph = fts_parse_sql( fts_table, info, "DECLARE FUNCTION my_func;\n" - "DECLARE CURSOR c IS SELECT value FROM \"%s\"" + "DECLARE CURSOR c IS SELECT value FROM $table_name" " WHERE key = :name;\n" "BEGIN\n" "" @@ -132,7 +134,6 @@ fts_config_get_value( /*********************************************************************//** Create the config table name for retrieving index specific value. @return index config parameter name */ -UNIV_INTERN char* fts_config_create_index_param_name( /*===============================*/ @@ -146,9 +147,9 @@ fts_config_create_index_param_name( len = strlen(param); /* Caller is responsible for deleting name. */ - name = static_cast(ut_malloc( + name = static_cast(ut_malloc_nokey( len + FTS_AUX_MIN_TABLE_ID_LENGTH + 2)); - strcpy(name, param); + ::strcpy(name, param); name[len] = '_'; fts_write_object_id(index->id, name + len + 1, @@ -163,7 +164,6 @@ Get value specific to an FTS index from the config table. The caller must ensure that enough space is allocated for value to hold the column contents. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_get_index_value( /*=======================*/ @@ -194,7 +194,6 @@ fts_config_get_index_value( /******************************************************************//** Set the value in the config table for name. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_set_value( /*=================*/ @@ -212,6 +211,7 @@ fts_config_set_value( undo_no_t undo_no; undo_no_t n_rows_updated; ulint name_len = strlen(name); + char table_name[MAX_FULL_NAME_LEN]; info = pars_info_create(); @@ -220,10 +220,13 @@ fts_config_set_value( value->f_str, value->f_len); fts_table->suffix = "CONFIG"; + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); graph = fts_parse_sql( fts_table, info, - "BEGIN UPDATE \"%s\" SET value = :value WHERE key = :name;"); + "BEGIN UPDATE $table_name SET value = :value" + " WHERE key = :name;"); trx->op_info = "setting FTS config value"; @@ -245,10 +248,13 @@ fts_config_set_value( pars_info_bind_varchar_literal( info, "value", value->f_str, value->f_len); + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); + graph = fts_parse_sql( fts_table, info, "BEGIN\n" - "INSERT INTO \"%s\" VALUES(:name, :value);"); + "INSERT INTO $table_name VALUES(:name, :value);"); trx->op_info = "inserting FTS config value"; @@ -263,7 +269,6 @@ fts_config_set_value( /******************************************************************//** Set the value specific to an FTS index in the config table. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_set_index_value( /*=======================*/ @@ -294,7 +299,6 @@ fts_config_set_index_value( /******************************************************************//** Get an ulint value from the config table. @return DB_SUCCESS if all OK else error code */ -UNIV_INTERN dberr_t fts_config_get_index_ulint( /*=======================*/ @@ -309,15 +313,14 @@ fts_config_get_index_ulint( /* We set the length of value to the max bytes it can hold. This information is used by the callback that reads the value.*/ value.f_len = FTS_MAX_CONFIG_VALUE_LEN; - value.f_str = static_cast(ut_malloc(value.f_len + 1)); + value.f_str = static_cast(ut_malloc_nokey(value.f_len + 1)); error = fts_config_get_index_value(trx, index, name, &value); if (UNIV_UNLIKELY(error != DB_SUCCESS)) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: (%s) reading `%s'\n", - ut_strerr(error), name); + ib::error() << "(" << ut_strerr(error) << ") reading `" + << name << "'"; } else { *int_value = strtoul((char*) value.f_str, NULL, 10); } @@ -330,7 +333,6 @@ fts_config_get_index_ulint( /******************************************************************//** Set an ulint value in the config table. @return DB_SUCCESS if all OK else error code */ -UNIV_INTERN dberr_t fts_config_set_index_ulint( /*=======================*/ @@ -345,7 +347,7 @@ fts_config_set_index_ulint( /* We set the length of value to the max bytes it can hold. This information is used by the callback that reads the value.*/ value.f_len = FTS_MAX_CONFIG_VALUE_LEN; - value.f_str = static_cast(ut_malloc(value.f_len + 1)); + value.f_str = static_cast(ut_malloc_nokey(value.f_len + 1)); // FIXME: Get rid of snprintf ut_a(FTS_MAX_INT_LEN < FTS_MAX_CONFIG_VALUE_LEN); @@ -356,10 +358,9 @@ fts_config_set_index_ulint( error = fts_config_set_index_value(trx, index, name, &value); if (UNIV_UNLIKELY(error != DB_SUCCESS)) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: (%s) writing `%s'\n", - ut_strerr(error), name); + ib::error() << "(" << ut_strerr(error) << ") writing `" + << name << "'"; } ut_free(value.f_str); @@ -370,7 +371,6 @@ fts_config_set_index_ulint( /******************************************************************//** Get an ulint value from the config table. @return DB_SUCCESS if all OK else error code */ -UNIV_INTERN dberr_t fts_config_get_ulint( /*=================*/ @@ -386,15 +386,13 @@ fts_config_get_ulint( /* We set the length of value to the max bytes it can hold. This information is used by the callback that reads the value.*/ value.f_len = FTS_MAX_CONFIG_VALUE_LEN; - value.f_str = static_cast(ut_malloc(value.f_len + 1)); + value.f_str = static_cast(ut_malloc_nokey(value.f_len + 1)); error = fts_config_get_value(trx, fts_table, name, &value); if (UNIV_UNLIKELY(error != DB_SUCCESS)) { - ut_print_timestamp(stderr); - - fprintf(stderr, " InnoDB: Error: (%s) reading `%s'\n", - ut_strerr(error), name); + ib::error() << "(" << ut_strerr(error) << ") reading `" + << name << "'"; } else { *int_value = strtoul((char*) value.f_str, NULL, 10); } @@ -407,7 +405,6 @@ fts_config_get_ulint( /******************************************************************//** Set an ulint value in the config table. @return DB_SUCCESS if all OK else error code */ -UNIV_INTERN dberr_t fts_config_set_ulint( /*=================*/ @@ -423,21 +420,18 @@ fts_config_set_ulint( /* We set the length of value to the max bytes it can hold. This information is used by the callback that reads the value.*/ value.f_len = FTS_MAX_CONFIG_VALUE_LEN; - value.f_str = static_cast(ut_malloc(value.f_len + 1)); + value.f_str = static_cast(ut_malloc_nokey(value.f_len + 1)); - // FIXME: Get rid of snprintf ut_a(FTS_MAX_INT_LEN < FTS_MAX_CONFIG_VALUE_LEN); - value.f_len = snprintf( + value.f_len = my_snprintf( (char*) value.f_str, FTS_MAX_INT_LEN, "%lu", int_value); error = fts_config_set_value(trx, fts_table, name, &value); if (UNIV_UNLIKELY(error != DB_SUCCESS)) { - ut_print_timestamp(stderr); - - fprintf(stderr, " InnoDB: Error: (%s) writing `%s'\n", - ut_strerr(error), name); + ib::error() << "(" << ut_strerr(error) << ") writing `" + << name << "'"; } ut_free(value.f_str); @@ -448,7 +442,6 @@ fts_config_set_ulint( /******************************************************************//** Increment the value in the config table for column name. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_increment_value( /*=======================*/ @@ -465,11 +458,12 @@ fts_config_increment_value( que_t* graph = NULL; ulint name_len = strlen(name); pars_info_t* info = pars_info_create(); + char table_name[MAX_FULL_NAME_LEN]; /* We set the length of value to the max bytes it can hold. This information is used by the callback that reads the value.*/ value.f_len = FTS_MAX_CONFIG_VALUE_LEN; - value.f_str = static_cast(ut_malloc(value.f_len + 1)); + value.f_str = static_cast(ut_malloc_nokey(value.f_len + 1)); *value.f_str = '\0'; @@ -479,11 +473,13 @@ fts_config_increment_value( info, "my_func", fts_config_fetch_value, &value); fts_table->suffix = "CONFIG"; + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "config_table", table_name); graph = fts_parse_sql( fts_table, info, "DECLARE FUNCTION my_func;\n" - "DECLARE CURSOR c IS SELECT value FROM \"%s\"" + "DECLARE CURSOR c IS SELECT value FROM $config_table" " WHERE key = :name FOR UPDATE;\n" "BEGIN\n" "" @@ -511,8 +507,7 @@ fts_config_increment_value( ut_a(FTS_MAX_CONFIG_VALUE_LEN > FTS_MAX_INT_LEN); - // FIXME: Get rid of snprintf - value.f_len = snprintf( + value.f_len = my_snprintf( (char*) value.f_str, FTS_MAX_INT_LEN, "%lu", int_value); fts_config_set_value(trx, fts_table, name, &value); @@ -520,10 +515,8 @@ fts_config_increment_value( if (UNIV_UNLIKELY(error != DB_SUCCESS)) { - ut_print_timestamp(stderr); - - fprintf(stderr, " InnoDB: Error: (%s) " - "while incrementing %s.\n", ut_strerr(error), name); + ib::error() << "(" << ut_strerr(error) << ") while" + " incrementing " << name << "."; } ut_free(value.f_str); @@ -534,7 +527,6 @@ fts_config_increment_value( /******************************************************************//** Increment the per index value in the config table for column name. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_increment_index_value( /*=============================*/ diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 0337cf6dfe7..5edee71a13a 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -21,26 +21,26 @@ this program; if not, write to the Free Software Foundation, Inc., Full Text Search interface ***********************************************************************/ +#include "ha_prototypes.h" + #include "trx0roll.h" #include "row0mysql.h" #include "row0upd.h" #include "dict0types.h" #include "row0sel.h" - #include "fts0fts.h" #include "fts0priv.h" #include "fts0types.h" - #include "fts0types.ic" #include "fts0vlc.ic" +#include "fts0plugin.h" #include "dict0priv.h" #include "dict0stats.h" #include "btr0pcur.h" -#include +#include "sync0sync.h" +#include "ut0new.h" -#include "ha_prototypes.h" - -#define FTS_MAX_ID_LEN 32 +static const ulint FTS_MAX_ID_LEN = 32; /** Column name from the FTS config table */ #define FTS_MAX_CACHE_SIZE_IN_MB "cache_size_in_mb" @@ -54,60 +54,44 @@ by looking up the key word in the obsolete table names */ /** This is maximum FTS cache for each table and would be a configurable variable */ -UNIV_INTERN ulong fts_max_cache_size; +ulong fts_max_cache_size; /** Whether the total memory used for FTS cache is exhausted, and we will need a sync to free some memory */ -UNIV_INTERN bool fts_need_sync = false; +bool fts_need_sync = false; /** Variable specifying the total memory allocated for FTS cache */ -UNIV_INTERN ulong fts_max_total_cache_size; +ulong fts_max_total_cache_size; /** This is FTS result cache limit for each query and would be a configurable variable */ -UNIV_INTERN ulong fts_result_cache_limit; +ulong fts_result_cache_limit; /** Variable specifying the maximum FTS max token size */ -UNIV_INTERN ulong fts_max_token_size; +ulong fts_max_token_size; /** Variable specifying the minimum FTS max token size */ -UNIV_INTERN ulong fts_min_token_size; +ulong fts_min_token_size; // FIXME: testing ib_time_t elapsed_time = 0; ulint n_nodes = 0; -/** Error condition reported by fts_utf8_decode() */ -const ulint UTF8_ERROR = 0xFFFFFFFF; - #ifdef FTS_CACHE_SIZE_DEBUG /** The cache size permissible lower limit (1K) */ static const ulint FTS_CACHE_SIZE_LOWER_LIMIT_IN_MB = 1; /** The cache size permissible upper limit (1G) */ static const ulint FTS_CACHE_SIZE_UPPER_LIMIT_IN_MB = 1024; -#endif /* FTS_CACHE_SIZE_DEBUG */ +#endif /** Time to sleep after DEADLOCK error before retrying operation. */ static const ulint FTS_DEADLOCK_RETRY_WAIT = 100000; -#ifdef UNIV_PFS_RWLOCK -UNIV_INTERN mysql_pfs_key_t fts_cache_rw_lock_key; -UNIV_INTERN mysql_pfs_key_t fts_cache_init_rw_lock_key; -#endif /* UNIV_PFS_RWLOCK */ - -#ifdef UNIV_PFS_MUTEX -UNIV_INTERN mysql_pfs_key_t fts_delete_mutex_key; -UNIV_INTERN mysql_pfs_key_t fts_optimize_mutex_key; -UNIV_INTERN mysql_pfs_key_t fts_bg_threads_mutex_key; -UNIV_INTERN mysql_pfs_key_t fts_doc_id_mutex_key; -UNIV_INTERN mysql_pfs_key_t fts_pll_tokenize_mutex_key; -#endif /* UNIV_PFS_MUTEX */ - /** variable to record innodb_fts_internal_tbl_name for information schema table INNODB_FTS_INSERTED etc. */ -UNIV_INTERN char* fts_internal_tbl_name = NULL; +char* fts_internal_tbl_name = NULL; /** InnoDB default stopword list: There are different versions of stopwords, the stop words listed @@ -164,64 +148,22 @@ struct fts_aux_table_t { char* name; /*!< Name of the table */ }; -/** SQL statements for creating the ancillary common FTS tables. */ -static const char* fts_create_common_tables_sql = { - "BEGIN\n" - "" - "CREATE TABLE \"%s_DELETED\" (\n" - " doc_id BIGINT UNSIGNED\n" - ") COMPACT;\n" - "CREATE UNIQUE CLUSTERED INDEX IND ON \"%s_DELETED\"(doc_id);\n" - "" - "CREATE TABLE \"%s_DELETED_CACHE\" (\n" - " doc_id BIGINT UNSIGNED\n" - ") COMPACT;\n" - "CREATE UNIQUE CLUSTERED INDEX IND " - "ON \"%s_DELETED_CACHE\"(doc_id);\n" - "" - "CREATE TABLE \"%s_BEING_DELETED\" (\n" - " doc_id BIGINT UNSIGNED\n" - ") COMPACT;\n" - "CREATE UNIQUE CLUSTERED INDEX IND " - "ON \"%s_BEING_DELETED\"(doc_id);\n" - "" - "CREATE TABLE \"%s_BEING_DELETED_CACHE\" (\n" - " doc_id BIGINT UNSIGNED\n" - ") COMPACT;\n" - "CREATE UNIQUE CLUSTERED INDEX IND " - "ON \"%s_BEING_DELETED_CACHE\"(doc_id);\n" - "" - "CREATE TABLE \"%s_CONFIG\" (\n" - " key CHAR(50),\n" - " value CHAR(200) NOT NULL\n" - ") COMPACT;\n" - "CREATE UNIQUE CLUSTERED INDEX IND ON \"%s_CONFIG\"(key);\n" -}; - #ifdef FTS_DOC_STATS_DEBUG /** Template for creating the FTS auxiliary index specific tables. This is mainly designed for the statistics work in the future */ static const char* fts_create_index_tables_sql = { "BEGIN\n" "" - "CREATE TABLE \"%s_DOC_ID\" (\n" + "CREATE TABLE $doc_id_table (\n" " doc_id BIGINT UNSIGNED,\n" " word_count INTEGER UNSIGNED NOT NULL\n" ") COMPACT;\n" - "CREATE UNIQUE CLUSTERED INDEX IND ON \"%s_DOC_ID\"(doc_id);\n" + "CREATE UNIQUE CLUSTERED INDEX IND ON $doc_id_table(doc_id);\n" }; #endif -/** Template for creating the ancillary FTS tables word index tables. */ -static const char* fts_create_index_sql = { - "BEGIN\n" - "" - "CREATE UNIQUE CLUSTERED INDEX FTS_INDEX_TABLE_IND " - "ON \"%s\"(word, first_doc_id);\n" -}; - /** FTS auxiliary table suffixes that are common to all FT indexes. */ -static const char* fts_common_tables[] = { +const char* fts_common_tables[] = { "BEING_DELETED", "BEING_DELETED_CACHE", "CONFIG", @@ -245,22 +187,29 @@ const fts_index_selector_t fts_index_selector[] = { static const char* fts_config_table_insert_values_sql = "BEGIN\n" "\n" - "INSERT INTO \"%s\" VALUES('" + "INSERT INTO $config_table VALUES('" FTS_MAX_CACHE_SIZE_IN_MB "', '256');\n" "" - "INSERT INTO \"%s\" VALUES('" + "INSERT INTO $config_table VALUES('" FTS_OPTIMIZE_LIMIT_IN_SECS "', '180');\n" "" - "INSERT INTO \"%s\" VALUES ('" + "INSERT INTO $config_table VALUES ('" FTS_SYNCED_DOC_ID "', '0');\n" "" - "INSERT INTO \"%s\" VALUES ('" + "INSERT INTO $config_table VALUES ('" FTS_TOTAL_DELETED_COUNT "', '0');\n" "" /* Note: 0 == FTS_TABLE_STATE_RUNNING */ - "INSERT INTO \"%s\" VALUES ('" + "INSERT INTO $config_table VALUES ('" FTS_TABLE_STATE "', '0');\n"; -/** Run SYNC on the table, i.e., write out data from the cache to the +/** FTS tokenize parmameter for plugin parser */ +struct fts_tokenize_param_t { + fts_doc_t* result_doc; /*!< Result doc for tokens */ + ulint add_pos; /*!< Added position for tokens */ +}; + +/****************************************************************//** +Run SYNC on the table, i.e., write out data from the cache to the FTS auxiliary INDEX table and clear the cache at the end. @param[in,out] sync sync state @param[in] unlock_cache whether unlock cache lock when write node @@ -334,6 +283,39 @@ fts_update_sync_doc_id( trx_t* trx) /*!< in: update trx, or NULL */ MY_ATTRIBUTE((nonnull(1))); +/** Get a character set based on precise type. +@param prtype precise type +@return the corresponding character set */ +UNIV_INLINE +CHARSET_INFO* +fts_get_charset(ulint prtype) +{ +#ifdef UNIV_DEBUG + switch (prtype & DATA_MYSQL_TYPE_MASK) { + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_VARCHAR: + break; + default: + ut_error; + } +#endif /* UNIV_DEBUG */ + + uint cs_num = (uint) dtype_get_charset_coll(prtype); + + if (CHARSET_INFO* cs = get_charset(cs_num, MYF(MY_WME))) { + return(cs); + } + + ib::fatal() << "Unable to find charset-collation " << cs_num; + return(NULL); +} + /****************************************************************//** This function loads the default InnoDB stopword list */ static @@ -351,9 +333,9 @@ fts_load_default_stopword( heap = static_cast(allocator->arg); if (!stopword_info->cached_stopword) { - /* For default stopword, we always use fts_utf8_string_cmp() */ - stopword_info->cached_stopword = rbt_create( - sizeof(fts_tokenizer_word_t), fts_utf8_string_cmp); + stopword_info->cached_stopword = rbt_create_arg_cmp( + sizeof(fts_tokenizer_word_t), innobase_fts_text_cmp, + &my_charset_latin1); } stop_words = stopword_info->cached_stopword; @@ -373,7 +355,7 @@ fts_load_default_stopword( str.f_len = ut_strlen(word); str.f_str = reinterpret_cast(word); - fts_utf8_string_dup(&new_word.text, &str, heap); + fts_string_dup(&new_word.text, &str, heap); rbt_insert(stop_words, &new_word, &new_word); } @@ -494,7 +476,7 @@ fts_load_user_stopword( info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" - " SELECT value " + " SELECT value" " FROM $table_stopword;\n" "BEGIN\n" "\n" @@ -518,18 +500,15 @@ fts_load_user_stopword( fts_sql_rollback(trx); - ut_print_timestamp(stderr); - if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout reading user stopword table. " - "Retrying!\n"); + ib::warn() << "Lock wait timeout reading user" + " stopword table. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error '%s' " - "while reading user stopword table.\n", - ut_strerr(error)); + ib::error() << "Error '" << ut_strerr(error) + << "' while reading user stopword" + " table."; ret = FALSE; break; } @@ -569,7 +548,7 @@ fts_index_cache_init( index_cache->doc_stats = ib_vector_create( allocator, sizeof(fts_doc_stats_t), 4); - for (i = 0; fts_index_selector[i].value; ++i) { + for (i = 0; i < FTS_NUM_AUX_INDEX; ++i) { ut_a(index_cache->ins_graph[i] == NULL); ut_a(index_cache->sel_graph[i] == NULL); } @@ -577,7 +556,6 @@ fts_index_cache_init( /*********************************************************************//** Initialize FTS cache. */ -UNIV_INTERN void fts_cache_init( /*===========*/ @@ -610,7 +588,6 @@ fts_cache_init( /****************************************************************//** Create a FTS cache. */ -UNIV_INTERN fts_cache_t* fts_cache_create( /*=============*/ @@ -632,15 +609,11 @@ fts_cache_create( fts_cache_init_rw_lock_key, &cache->init_lock, SYNC_FTS_CACHE_INIT); - mutex_create( - fts_delete_mutex_key, &cache->deleted_lock, SYNC_FTS_OPTIMIZE); + mutex_create(LATCH_ID_FTS_DELETE, &cache->deleted_lock); - mutex_create( - fts_optimize_mutex_key, &cache->optimize_lock, - SYNC_FTS_OPTIMIZE); + mutex_create(LATCH_ID_FTS_OPTIMIZE, &cache->optimize_lock); - mutex_create( - fts_doc_id_mutex_key, &cache->doc_id_lock, SYNC_FTS_OPTIMIZE); + mutex_create(LATCH_ID_FTS_DOC_ID, &cache->doc_id_lock); /* This is the heap used to create the cache itself. */ cache->self_heap = ib_heap_allocator_create(heap); @@ -649,13 +622,11 @@ fts_cache_create( cache->sync_heap = ib_heap_allocator_create(heap); cache->sync_heap->arg = NULL; - fts_need_sync = false; - cache->sync = static_cast( mem_heap_zalloc(heap, sizeof(fts_sync_t))); cache->sync->table = table; - cache->sync->event = os_event_create(); + cache->sync->event = os_event_create(0); /* Create the index cache vector that will hold the inverted indexes. */ cache->indexes = ib_vector_create( @@ -675,7 +646,6 @@ fts_cache_create( /*******************************************************************//** Add a newly create index into FTS cache */ -UNIV_INTERN void fts_add_index( /*==========*/ @@ -714,9 +684,8 @@ fts_reset_get_doc( fts_get_doc_t* get_doc; ulint i; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&cache->init_lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own(&cache->init_lock, RW_LOCK_X)); + ib_vector_reset(cache->get_docs); for (i = 0; i < ib_vector_size(cache->indexes); i++) { @@ -791,7 +760,6 @@ fts_in_index_cache( Check indexes in the fts->indexes is also present in index cache and table->indexes list @return TRUE if all indexes match */ -UNIV_INTERN ibool fts_check_cached_index( /*===================*/ @@ -827,7 +795,6 @@ fts_check_cached_index( /*******************************************************************//** Drop auxiliary tables related to an FTS index @return DB_SUCCESS or error number */ -UNIV_INTERN dberr_t fts_drop_index( /*===========*/ @@ -910,7 +877,6 @@ fts_drop_index( /****************************************************************//** Free the query graph but check whether dict_sys->mutex is already held */ -UNIV_INTERN void fts_que_graph_free_check_lock( /*==========================*/ @@ -947,7 +913,6 @@ fts_que_graph_free_check_lock( /****************************************************************//** Create an FTS index cache. */ -UNIV_INTERN CHARSET_INFO* fts_index_get_charset( /*==================*/ @@ -960,9 +925,7 @@ fts_index_get_charset( field = dict_index_get_nth_field(index, 0); prtype = field->col->prtype; - charset = innobase_get_fts_charset( - (int) (prtype & DATA_MYSQL_TYPE_MASK), - (uint) dtype_get_charset_coll(prtype)); + charset = fts_get_charset(prtype); #ifdef FTS_DEBUG /* Set up charset info for this index. Please note all @@ -973,9 +936,7 @@ fts_index_get_charset( field = dict_index_get_nth_field(index, i); prtype = field->col->prtype; - fld_charset = innobase_get_fts_charset( - (int)(prtype & DATA_MYSQL_TYPE_MASK), - (uint) dtype_get_charset_coll(prtype)); + fld_charset = fts_get_charset(prtype); /* All FTS columns should have the same charset */ if (charset) { @@ -992,7 +953,6 @@ fts_index_get_charset( /****************************************************************//** Create an FTS index cache. @return Index Cache */ -UNIV_INTERN fts_index_cache_t* fts_cache_index_cache_create( /*=========================*/ @@ -1005,9 +965,7 @@ fts_cache_index_cache_create( ut_a(cache != NULL); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&cache->init_lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own(&cache->init_lock, RW_LOCK_X)); /* Must not already exist in the cache vector. */ ut_a(fts_find_index_cache(cache, index) == NULL); @@ -1021,7 +979,7 @@ fts_cache_index_cache_create( index_cache->charset = fts_index_get_charset(index); - n_bytes = sizeof(que_t*) * sizeof(fts_index_selector); + n_bytes = sizeof(que_t*) * FTS_NUM_AUX_INDEX; index_cache->ins_graph = static_cast( mem_heap_zalloc(static_cast( @@ -1077,7 +1035,6 @@ fts_words_free( /** Clear cache. @param[in,out] cache fts cache */ -UNIV_INTERN void fts_cache_clear( fts_cache_t* cache) @@ -1097,7 +1054,7 @@ fts_cache_clear( index_cache->words = NULL; - for (j = 0; fts_index_selector[j].value; ++j) { + for (j = 0; j < FTS_NUM_AUX_INDEX; ++j) { if (index_cache->ins_graph[j] != NULL) { @@ -1124,6 +1081,8 @@ fts_cache_clear( mem_heap_free(static_cast(cache->sync_heap->arg)); cache->sync_heap->arg = NULL; + fts_need_sync = false; + cache->total_size = 0; mutex_enter((ib_mutex_t*) &cache->deleted_lock); @@ -1143,10 +1102,8 @@ fts_get_index_cache( { ulint i; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own((rw_lock_t*) &cache->lock, RW_LOCK_EX) - || rw_lock_own((rw_lock_t*) &cache->init_lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own((rw_lock_t*) &cache->lock, RW_LOCK_X) + || rw_lock_own((rw_lock_t*) &cache->init_lock, RW_LOCK_X)); for (i = 0; i < ib_vector_size(cache->indexes); ++i) { fts_index_cache_t* index_cache; @@ -1176,9 +1133,7 @@ fts_get_index_get_doc( { ulint i; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own((rw_lock_t*) &cache->init_lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own((rw_lock_t*) &cache->init_lock, RW_LOCK_X)); for (i = 0; i < ib_vector_size(cache->get_docs); ++i) { fts_get_doc_t* get_doc; @@ -1198,7 +1153,6 @@ fts_get_index_get_doc( /**********************************************************************//** Free the FTS cache. */ -UNIV_INTERN void fts_cache_destroy( /*==============*/ @@ -1209,7 +1163,7 @@ fts_cache_destroy( mutex_free(&cache->optimize_lock); mutex_free(&cache->deleted_lock); mutex_free(&cache->doc_id_lock); - os_event_free(cache->sync->event); + os_event_destroy(cache->sync->event); if (cache->stopword_info.cached_stopword) { rbt_free(cache->stopword_info.cached_stopword); @@ -1237,14 +1191,13 @@ fts_tokenizer_word_get( fts_tokenizer_word_t* word; ib_rbt_bound_t parent; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&cache->lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own(&cache->lock, RW_LOCK_X)); /* If it is a stopword, do not index it */ - if (cache->stopword_info.cached_stopword != NULL - && rbt_search(cache->stopword_info.cached_stopword, - &parent, text) == 0) { + if (!fts_check_token(text, + cache->stopword_info.cached_stopword, + index_cache->index->is_ngram, + index_cache->charset)) { return(NULL); } @@ -1259,7 +1212,7 @@ fts_tokenizer_word_get( new_word.nodes = ib_vector_create( cache->sync_heap, sizeof(fts_node_t), 4); - fts_utf8_string_dup(&new_word.text, text, heap); + fts_string_dup(&new_word.text, text, heap); parent.last = rbt_add_node( index_cache->words, &parent, &new_word); @@ -1281,7 +1234,6 @@ fts_tokenizer_word_get( /**********************************************************************//** Add the given doc_id/word positions to the given node's ilist. */ -UNIV_INTERN void fts_cache_node_add_positions( /*=========================*/ @@ -1298,11 +1250,12 @@ fts_cache_node_add_positions( byte* ptr_start; ulint doc_id_delta; -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG if (cache) { - ut_ad(rw_lock_own(&cache->lock, RW_LOCK_EX)); + ut_ad(rw_lock_own(&cache->lock, RW_LOCK_X)); } -#endif +#endif /* UNIV_DEBUG */ + ut_ad(doc_id >= node->last_doc_id); /* Calculate the space required to store the ilist. */ @@ -1343,7 +1296,7 @@ fts_cache_node_add_positions( new_size = (ulint)(1.2 * new_size); } - ilist = static_cast(ut_malloc(new_size)); + ilist = static_cast(ut_malloc_nokey(new_size)); ptr = ilist + node->ilist_size; node->ilist_size_alloc = new_size; @@ -1412,9 +1365,7 @@ fts_cache_add_doc( return; } -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&cache->lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own(&cache->lock, RW_LOCK_X)); n_words = rbt_size(tokens); @@ -1504,9 +1455,8 @@ fts_drop_table( error = row_drop_table_for_mysql(table_name, trx, true, false); if (error != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Unable to drop FTS index aux table %s: %s", - table_name, ut_strerr(error)); + ib::error() << "Unable to drop FTS index aux table " + << table_name << ": " << ut_strerr(error); } } else { error = DB_FAIL; @@ -1553,7 +1503,6 @@ fts_rename_one_aux_table( Rename auxiliary tables for all fts index for a table. This(rename) is due to database name change @return DB_SUCCESS or error code */ - dberr_t fts_rename_aux_tables( /*==================*/ @@ -1568,17 +1517,15 @@ fts_rename_aux_tables( /* Rename common auxiliary tables */ for (i = 0; fts_common_tables[i] != NULL; ++i) { - char* old_table_name; + char old_table_name[MAX_FULL_NAME_LEN]; dberr_t err = DB_SUCCESS; fts_table.suffix = fts_common_tables[i]; - old_table_name = fts_get_table_name(&fts_table); + fts_get_table_name(&fts_table, old_table_name); err = fts_rename_one_aux_table(new_name, old_table_name, trx); - mem_free(old_table_name); - if (err != DB_SUCCESS) { return(err); } @@ -1596,13 +1543,13 @@ fts_rename_aux_tables( FTS_INIT_INDEX_TABLE(&fts_table, NULL, FTS_INDEX_TABLE, index); - for (ulint j = 0; fts_index_selector[j].value; ++j) { + for (ulint j = 0; j < FTS_NUM_AUX_INDEX; ++j) { dberr_t err; - char* old_table_name; + char old_table_name[MAX_FULL_NAME_LEN]; fts_table.suffix = fts_get_suffix(j); - old_table_name = fts_get_table_name(&fts_table); + fts_get_table_name(&fts_table, old_table_name); err = fts_rename_one_aux_table( new_name, old_table_name, trx); @@ -1611,8 +1558,6 @@ fts_rename_aux_tables( err = DB_DEADLOCK; fts_sql_rollback(trx);); - mem_free(old_table_name); - if (err != DB_SUCCESS) { return(err); } @@ -1640,11 +1585,11 @@ fts_drop_common_tables( for (i = 0; fts_common_tables[i] != NULL; ++i) { dberr_t err; - char* table_name; + char table_name[MAX_FULL_NAME_LEN]; fts_table->suffix = fts_common_tables[i]; - table_name = fts_get_table_name(fts_table); + fts_get_table_name(fts_table, table_name); err = fts_drop_table(trx, table_name); @@ -1652,8 +1597,6 @@ fts_drop_common_tables( if (err != DB_SUCCESS && err != DB_FAIL) { error = err; } - - mem_free(table_name); } return(error); @@ -1663,7 +1606,6 @@ fts_drop_common_tables( Since we do a horizontal split on the index table, we need to drop all the split tables. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_drop_index_split_tables( /*========================*/ @@ -1677,13 +1619,13 @@ fts_drop_index_split_tables( FTS_INIT_INDEX_TABLE(&fts_table, NULL, FTS_INDEX_TABLE, index); - for (i = 0; fts_index_selector[i].value; ++i) { + for (i = 0; i < FTS_NUM_AUX_INDEX; ++i) { dberr_t err; - char* table_name; + char table_name[MAX_FULL_NAME_LEN]; fts_table.suffix = fts_get_suffix(i); - table_name = fts_get_table_name(&fts_table); + fts_get_table_name(&fts_table, table_name); err = fts_drop_table(trx, table_name); @@ -1691,8 +1633,6 @@ fts_drop_index_split_tables( if (err != DB_SUCCESS && err != DB_FAIL) { error = err; } - - mem_free(table_name); } return(error); @@ -1701,7 +1641,6 @@ fts_drop_index_split_tables( /****************************************************************//** Drops FTS auxiliary tables for an FTS index @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_drop_index_tables( /*==================*/ @@ -1729,11 +1668,11 @@ fts_drop_index_tables( FTS_INIT_INDEX_TABLE(&fts_table, NULL, FTS_INDEX_TABLE, index); for (ulint i = 0; index_tables[i] != NULL; ++i) { - char* table_name; + char table_name[MAX_FULL_NAME_LEN]; fts_table.suffix = index_tables[i]; - table_name = fts_get_table_name(&fts_table); + fts_get_table_name(&fts_table, table_name); err = fts_drop_table(trx, table_name); @@ -1741,8 +1680,6 @@ fts_drop_index_tables( if (err != DB_SUCCESS && err != DB_FAIL) { error = err; } - - mem_free(table_name); } #endif /* FTS_DOC_STATS_DEBUG */ @@ -1788,7 +1725,6 @@ Drops the ancillary tables needed for supporting an FTS index on a given table. row_mysql_lock_data_dictionary must have been called before this. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_drop_tables( /*============*/ @@ -1811,46 +1747,171 @@ fts_drop_tables( return(error); } -/*********************************************************************//** -Prepare the SQL, so that all '%s' are replaced by the common prefix. -@return sql string, use mem_free() to free the memory */ +/** Extract only the required flags from table->flags2 for FTS Aux +tables. +@param[in] in_flags2 Table flags2 +@return extracted flags2 for FTS aux tables */ +static inline +ulint +fts_get_table_flags2_for_aux_tables( + ulint flags2) +{ + /* Extract the file_per_table flag & temporary file flag + from the main FTS table flags2 */ + return((flags2 & DICT_TF2_USE_FILE_PER_TABLE) | + (flags2 & DICT_TF2_TEMPORARY)); +} + +/** Create dict_table_t object for FTS Aux tables. +@param[in] aux_table_name FTS Aux table name +@param[in] table table object of FTS Index +@param[in] n_cols number of columns for FTS Aux table +@return table object for FTS Aux table */ static -char* -fts_prepare_sql( -/*============*/ - fts_table_t* fts_table, /*!< in: table name info */ - const char* my_template) /*!< in: sql template */ +dict_table_t* +fts_create_in_mem_aux_table( + const char* aux_table_name, + const dict_table_t* table, + ulint n_cols) { - char* sql; - char* name_prefix; + dict_table_t* new_table = dict_mem_table_create( + aux_table_name, table->space, n_cols, 0, table->flags, + fts_get_table_flags2_for_aux_tables(table->flags2)); - name_prefix = fts_get_table_name_prefix(fts_table); - sql = ut_strreplace(my_template, "%s", name_prefix); - mem_free(name_prefix); + if (DICT_TF_HAS_SHARED_SPACE(table->flags)) { + ut_ad(table->space == fil_space_get_id_by_name( + table->tablespace())); + new_table->tablespace = mem_heap_strdup( + new_table->heap, table->tablespace); + } + + if (DICT_TF_HAS_DATA_DIR(table->flags)) { + ut_ad(table->data_dir_path != NULL); + new_table->data_dir_path = mem_heap_strdup( + new_table->heap, table->data_dir_path); + } - return(sql); + return(new_table); } -/*********************************************************************//** -Creates the common ancillary tables needed for supporting an FTS index +/** Function to create on FTS common table. +@param[in,out] trx InnoDB transaction +@param[in] table Table that has FTS Index +@param[in] fts_table_name FTS AUX table name +@param[in] fts_suffix FTS AUX table suffix +@param[in] heap heap +@return table object if created, else NULL */ +static +dict_table_t* +fts_create_one_common_table( + trx_t* trx, + const dict_table_t* table, + const char* fts_table_name, + const char* fts_suffix, + mem_heap_t* heap) +{ + dict_table_t* new_table = NULL; + dberr_t error; + bool is_config = strcmp(fts_suffix, "CONFIG") == 0; + + if (!is_config) { + + new_table = fts_create_in_mem_aux_table( + fts_table_name, table, FTS_DELETED_TABLE_NUM_COLS); + + dict_mem_table_add_col( + new_table, heap, "doc_id", DATA_INT, DATA_UNSIGNED, + FTS_DELETED_TABLE_COL_LEN); + } else { + /* Config table has different schema. */ + new_table = fts_create_in_mem_aux_table( + fts_table_name, table, FTS_CONFIG_TABLE_NUM_COLS); + + dict_mem_table_add_col( + new_table, heap, "key", DATA_VARCHAR, 0, + FTS_CONFIG_TABLE_KEY_COL_LEN); + + dict_mem_table_add_col( + new_table, heap, "value", DATA_VARCHAR, DATA_NOT_NULL, + FTS_CONFIG_TABLE_VALUE_COL_LEN); + } + + error = row_create_table_for_mysql(new_table, NULL, trx, false, + FIL_SPACE_ENCRYPTION_DEFAULT, FIL_DEFAULT_ENCRYPTION_KEY); + + if (error == DB_SUCCESS) { + + dict_index_t* index = dict_mem_index_create( + fts_table_name, "FTS_COMMON_TABLE_IND", + new_table->space, DICT_UNIQUE|DICT_CLUSTERED, 1); + + if (!is_config) { + dict_mem_index_add_field(index, "doc_id", 0); + } else { + dict_mem_index_add_field(index, "key", 0); + } + + /* We save and restore trx->dict_operation because + row_create_index_for_mysql() changes the operation to + TRX_DICT_OP_TABLE. */ + trx_dict_op_t op = trx_get_dict_operation(trx); + + error = row_create_index_for_mysql(index, trx, NULL, NULL); + + trx->dict_operation = op; + } + + if (error != DB_SUCCESS) { + trx->error_state = error; + dict_mem_table_free(new_table); + new_table = NULL; + ib::warn() << "Failed to create FTS common table " + << fts_table_name; + } + return(new_table); +} + +/** Creates the common auxiliary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have been called before this. +The following tables are created. +CREATE TABLE $FTS_PREFIX_DELETED + (doc_id BIGINT UNSIGNED, UNIQUE CLUSTERED INDEX on doc_id) +CREATE TABLE $FTS_PREFIX_DELETED_CACHE + (doc_id BIGINT UNSIGNED, UNIQUE CLUSTERED INDEX on doc_id) +CREATE TABLE $FTS_PREFIX_BEING_DELETED + (doc_id BIGINT UNSIGNED, UNIQUE CLUSTERED INDEX on doc_id) +CREATE TABLE $FTS_PREFIX_BEING_DELETED_CACHE + (doc_id BIGINT UNSIGNED, UNIQUE CLUSTERED INDEX on doc_id) +CREATE TABLE $FTS_PREFIX_CONFIG + (key CHAR(50), value CHAR(200), UNIQUE CLUSTERED INDEX on key) +@param[in,out] trx transaction +@param[in] table table with FTS index +@param[in] name table name normalized +@param[in] skip_doc_id_index Skip index on doc id @return DB_SUCCESS if succeed */ -UNIV_INTERN dberr_t fts_create_common_tables( -/*=====================*/ - trx_t* trx, /*!< in: transaction */ - const dict_table_t* table, /*!< in: table with FTS index */ - const char* name, /*!< in: table name normalized.*/ - bool skip_doc_id_index)/*!< in: Skip index on doc id */ + trx_t* trx, + const dict_table_t* table, + const char* name, + bool skip_doc_id_index) { - char* sql; dberr_t error; que_t* graph; fts_table_t fts_table; mem_heap_t* heap = mem_heap_create(1024); pars_info_t* info; + char fts_name[MAX_FULL_NAME_LEN]; + char full_name[sizeof(fts_common_tables) / sizeof(char*)] + [MAX_FULL_NAME_LEN]; + + dict_index_t* index = NULL; + trx_dict_op_t op; + /* common_tables vector is used for dropping FTS common tables + on error condition. */ + std::vector common_tables; + std::vector::const_iterator it; FTS_INIT_FTS_TABLE(&fts_table, NULL, FTS_COMMON_TABLE, table); @@ -1862,23 +1923,39 @@ fts_create_common_tables( } /* Create the FTS tables that are common to an FTS index. */ - sql = fts_prepare_sql(&fts_table, fts_create_common_tables_sql); - graph = fts_parse_sql_no_dict_lock(NULL, NULL, sql); - mem_free(sql); + for (ulint i = 0; fts_common_tables[i] != NULL; ++i) { - error = fts_eval_sql(trx, graph); + fts_table.suffix = fts_common_tables[i]; + fts_get_table_name(&fts_table, full_name[i]); + dict_table_t* common_table = fts_create_one_common_table( + trx, table, full_name[i], fts_table.suffix, heap); - que_graph_free(graph); + if (common_table == NULL) { + error = DB_ERROR; + goto func_exit; + } else { + common_tables.push_back(common_table); + } - if (error != DB_SUCCESS) { + DBUG_EXECUTE_IF("ib_fts_aux_table_error", + /* Return error after creating FTS_AUX_CONFIG table. */ + if (i == 4) { + error = DB_ERROR; + goto func_exit; + } + ); - goto func_exit; } /* Write the default settings to the config table. */ + info = pars_info_create(); + fts_table.suffix = "CONFIG"; + fts_get_table_name(&fts_table, fts_name); + pars_info_bind_id(info, true, "config_table", fts_name); + graph = fts_parse_sql_no_dict_lock( - &fts_table, NULL, fts_config_table_insert_values_sql); + &fts_table, info, fts_config_table_insert_values_sql); error = fts_eval_sql(trx, graph); @@ -1889,133 +1966,133 @@ fts_create_common_tables( goto func_exit; } - info = pars_info_create(); + index = dict_mem_index_create( + name, FTS_DOC_ID_INDEX_NAME, table->space, + DICT_UNIQUE, 1); + dict_mem_index_add_field(index, FTS_DOC_ID_COL_NAME, 0); - pars_info_bind_id(info, TRUE, "table_name", name); - pars_info_bind_id(info, TRUE, "index_name", FTS_DOC_ID_INDEX_NAME); - pars_info_bind_id(info, TRUE, "doc_id_col_name", FTS_DOC_ID_COL_NAME); + op = trx_get_dict_operation(trx); - /* Create the FTS DOC_ID index on the hidden column. Currently this - is common for any FT index created on the table. */ - graph = fts_parse_sql_no_dict_lock( - NULL, - info, - mem_heap_printf( - heap, - "BEGIN\n" - "" - "CREATE UNIQUE INDEX $index_name ON $table_name(" - "$doc_id_col_name);\n")); + error = row_create_index_for_mysql(index, trx, NULL, NULL); - error = fts_eval_sql(trx, graph); - que_graph_free(graph); + trx->dict_operation = op; func_exit: if (error != DB_SUCCESS) { - /* We have special error handling here */ - - trx->error_state = DB_SUCCESS; - trx_rollback_to_savepoint(trx, NULL); - - row_drop_table_for_mysql(table->name, trx, FALSE, TRUE); - - trx->error_state = DB_SUCCESS; + for (it = common_tables.begin(); it != common_tables.end(); + ++it) { + row_drop_table_for_mysql( + (*it)->name.m_name, trx, true, FALSE); + } } + common_tables.clear(); mem_heap_free(heap); return(error); } - -/*************************************************************//** -Wrapper function of fts_create_index_tables_low(), create auxiliary -tables for an FTS index -@return: DB_SUCCESS or error code */ +/** Creates one FTS auxiliary index table for an FTS index. +@param[in,out] trx transaction +@param[in] index the index instance +@param[in] fts_table fts_table structure +@param[in] heap memory heap +@return DB_SUCCESS or error code */ static dict_table_t* fts_create_one_index_table( -/*=======================*/ - trx_t* trx, /*!< in: transaction */ - const dict_index_t* - index, /*!< in: the index instance */ - fts_table_t* fts_table, /*!< in: fts_table structure */ - mem_heap_t* heap) /*!< in: heap */ + trx_t* trx, + const dict_index_t* index, + fts_table_t* fts_table, + mem_heap_t* heap) { dict_field_t* field; dict_table_t* new_table = NULL; - char* table_name = fts_get_table_name(fts_table); + char table_name[MAX_FULL_NAME_LEN]; dberr_t error; CHARSET_INFO* charset; - ulint flags2 = 0; ut_ad(index->type & DICT_FTS); - if (srv_file_per_table) { - flags2 = DICT_TF2_USE_TABLESPACE; - } + fts_get_table_name(fts_table, table_name); - new_table = dict_mem_table_create(table_name, 0, 5, 1, flags2); + new_table = fts_create_in_mem_aux_table( + table_name, fts_table->table, + FTS_AUX_INDEX_TABLE_NUM_COLS); field = dict_index_get_nth_field(index, 0); - charset = innobase_get_fts_charset( - (int)(field->col->prtype & DATA_MYSQL_TYPE_MASK), - (uint) dtype_get_charset_coll(field->col->prtype)); + charset = fts_get_charset(field->col->prtype); - if (strcmp(charset->name, "latin1_swedish_ci") == 0) { - dict_mem_table_add_col(new_table, heap, "word", DATA_VARCHAR, - field->col->prtype, FTS_MAX_WORD_LEN); - } else { - dict_mem_table_add_col(new_table, heap, "word", DATA_VARMYSQL, - field->col->prtype, FTS_MAX_WORD_LEN); - } + dict_mem_table_add_col(new_table, heap, "word", + charset == &my_charset_latin1 + ? DATA_VARCHAR : DATA_VARMYSQL, + field->col->prtype, + FTS_INDEX_WORD_LEN); dict_mem_table_add_col(new_table, heap, "first_doc_id", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, - sizeof(doc_id_t)); + FTS_INDEX_FIRST_DOC_ID_LEN); dict_mem_table_add_col(new_table, heap, "last_doc_id", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, - sizeof(doc_id_t)); + FTS_INDEX_LAST_DOC_ID_LEN); dict_mem_table_add_col(new_table, heap, "doc_count", DATA_INT, - DATA_NOT_NULL | DATA_UNSIGNED, 4); + DATA_NOT_NULL | DATA_UNSIGNED, + FTS_INDEX_DOC_COUNT_LEN); - dict_mem_table_add_col(new_table, heap, "ilist", DATA_BLOB, - 4130048, 0); + /* The precise type calculation is as follows: + least signficiant byte: MySQL type code (not applicable for sys cols) + second least : DATA_NOT_NULL | DATA_BINARY_TYPE + third least : the MySQL charset-collation code (DATA_MTYPE_MAX) */ - error = row_create_table_for_mysql(new_table, trx, false, FIL_SPACE_ENCRYPTION_DEFAULT, FIL_DEFAULT_ENCRYPTION_KEY); + dict_mem_table_add_col( + new_table, heap, "ilist", DATA_BLOB, + (DATA_MTYPE_MAX << 16) | DATA_UNSIGNED | DATA_NOT_NULL, + FTS_INDEX_ILIST_LEN); + + error = row_create_table_for_mysql(new_table, NULL, trx, false, + FIL_SPACE_ENCRYPTION_DEFAULT, FIL_DEFAULT_ENCRYPTION_KEY); + + if (error == DB_SUCCESS) { + dict_index_t* index = dict_mem_index_create( + table_name, "FTS_INDEX_TABLE_IND", new_table->space, + DICT_UNIQUE|DICT_CLUSTERED, 2); + dict_mem_index_add_field(index, "word", 0); + dict_mem_index_add_field(index, "first_doc_id", 0); + + trx_dict_op_t op = trx_get_dict_operation(trx); + + error = row_create_index_for_mysql(index, trx, NULL, NULL); + + trx->dict_operation = op; + } if (error != DB_SUCCESS) { trx->error_state = error; dict_mem_table_free(new_table); new_table = NULL; - ib_logf(IB_LOG_LEVEL_WARN, - "Fail to create FTS index table %s", table_name); + ib::warn() << "Failed to create FTS index table " + << table_name; } - mem_free(table_name); - return(new_table); } -/*************************************************************//** -Wrapper function of fts_create_index_tables_low(), create auxiliary -tables for an FTS index -@return: DB_SUCCESS or error code */ -UNIV_INTERN +/** Create auxiliary index tables for an FTS index. +@param[in,out] trx transaction +@param[in] index the index instance +@param[in] table_name table name +@param[in] table_id the table id +@return DB_SUCCESS or error code */ dberr_t fts_create_index_tables_low( -/*========================*/ - trx_t* trx, /*!< in: transaction */ - const dict_index_t* - index, /*!< in: the index instance */ - const char* table_name, /*!< in: the table name */ - table_id_t table_id) /*!< in: the table id */ - + trx_t* trx, + const dict_index_t* index, + const char* table_name, + table_id_t table_id) { ulint i; - que_t* graph; fts_table_t fts_table; dberr_t error = DB_SUCCESS; mem_heap_t* heap = mem_heap_create(1024); @@ -2027,20 +2104,28 @@ fts_create_index_tables_low( fts_table.table = index->table; #ifdef FTS_DOC_STATS_DEBUG - char* sql; - /* Create the FTS auxiliary tables that are specific to an FTS index. */ - sql = fts_prepare_sql(&fts_table, fts_create_index_tables_sql); + info = pars_info_create(); + + fts_table.suffix = "DOC_ID"; + fts_get_table_name(&fts_table, fts_name); - graph = fts_parse_sql_no_dict_lock(NULL, NULL, sql); - mem_free(sql); + pars_info_bind_id(info, true, "doc_id_table", fts_name); + + graph = fts_parse_sql_no_dict_lock(NULL, info, + fts_create_index_tables_sql); error = fts_eval_sql(trx, graph); que_graph_free(graph); #endif /* FTS_DOC_STATS_DEBUG */ - for (i = 0; fts_index_selector[i].value && error == DB_SUCCESS; ++i) { + /* aux_idx_tables vector is used for dropping FTS AUX INDEX + tables on error condition. */ + std::vector aux_idx_tables; + std::vector::const_iterator it; + + for (i = 0; i < FTS_NUM_AUX_INDEX && error == DB_SUCCESS; ++i) { dict_table_t* new_table; /* Create the FTS auxiliary tables that are specific @@ -2051,46 +2136,57 @@ fts_create_index_tables_low( new_table = fts_create_one_index_table( trx, index, &fts_table, heap); - if (!new_table) { + if (new_table == NULL) { error = DB_FAIL; break; + } else { + aux_idx_tables.push_back(new_table); } - graph = fts_parse_sql_no_dict_lock( - &fts_table, NULL, fts_create_index_sql); - - error = fts_eval_sql(trx, graph); - que_graph_free(graph); + DBUG_EXECUTE_IF("ib_fts_index_table_error", + /* Return error after creating FTS_INDEX_5 + aux table. */ + if (i == 4) { + error = DB_FAIL; + break; + } + ); } if (error != DB_SUCCESS) { - /* We have special error handling here */ - - trx->error_state = DB_SUCCESS; - trx_rollback_to_savepoint(trx, NULL); - - row_drop_table_for_mysql(table_name, trx, FALSE, TRUE); - - trx->error_state = DB_SUCCESS; + for (it = aux_idx_tables.begin(); it != aux_idx_tables.end(); + ++it) { + row_drop_table_for_mysql( + (*it)->name.m_name, trx, true, FALSE); + } } + aux_idx_tables.clear(); mem_heap_free(heap); return(error); } -/******************************************************************//** -Creates the column specific ancillary tables needed for supporting an +/** Creates the column specific ancillary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have been called before this. + +All FTS AUX Index tables have the following schema. +CREAT TABLE $FTS_PREFIX_INDEX_[1-6]( + word VARCHAR(FTS_MAX_WORD_LEN), + first_doc_id INT NOT NULL, + last_doc_id UNSIGNED NOT NULL, + doc_count UNSIGNED INT NOT NULL, + ilist VARBINARY NOT NULL, + UNIQUE CLUSTERED INDEX ON (word, first_doc_id)) +@param[in,out] trx transaction +@param[in] index index instance @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_create_index_tables( -/*====================*/ - trx_t* trx, /*!< in: transaction */ - const dict_index_t* index) /*!< in: the index instance */ + trx_t* trx, + const dict_index_t* index) { dberr_t err; dict_table_t* table; @@ -2098,7 +2194,8 @@ fts_create_index_tables( table = dict_table_get_low(index->table_name); ut_a(table != NULL); - err = fts_create_index_tables_low(trx, index, table->name, table->id); + err = fts_create_index_tables_low( + trx, index, table->name.m_name, table->id); if (err == DB_SUCCESS) { trx_commit(trx); @@ -2244,7 +2341,7 @@ fts_savepoint_create( /******************************************************************//** Create an FTS trx. -@return FTS trx */ +@return FTS trx */ static fts_trx_t* fts_trx_create( @@ -2427,7 +2524,6 @@ fts_trx_table_add_op( /******************************************************************//** Notify the FTS system about an operation on an FTS-indexed table. */ -UNIV_INTERN void fts_trx_add_op( /*===========*/ @@ -2509,7 +2605,7 @@ fts_get_max_cache_size( information is used by the callback that reads the value. */ value.f_n_char = 0; value.f_len = FTS_MAX_CONFIG_VALUE_LEN; - value.f_str = ut_malloc(value.f_len + 1); + value.f_str = ut_malloc_nokey(value.f_len + 1); error = fts_config_get_value( trx, fts_table, FTS_MAX_CACHE_SIZE_IN_MB, &value); @@ -2521,35 +2617,32 @@ fts_get_max_cache_size( if (cache_size_in_mb > FTS_CACHE_SIZE_UPPER_LIMIT_IN_MB) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Warning: FTS max cache size " - " (%lu) out of range. Minimum value is " - "%luMB and the maximum values is %luMB, " - "setting cache size to upper limit\n", - cache_size_in_mb, - FTS_CACHE_SIZE_LOWER_LIMIT_IN_MB, - FTS_CACHE_SIZE_UPPER_LIMIT_IN_MB); + ib::warn() << "FTS max cache size (" + << cache_size_in_mb << ") out of range." + " Minimum value is " + << FTS_CACHE_SIZE_LOWER_LIMIT_IN_MB + << "MB and the maximum value is " + << FTS_CACHE_SIZE_UPPER_LIMIT_IN_MB + << "MB, setting cache size to upper limit"; cache_size_in_mb = FTS_CACHE_SIZE_UPPER_LIMIT_IN_MB; } else if (cache_size_in_mb < FTS_CACHE_SIZE_LOWER_LIMIT_IN_MB) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Warning: FTS max cache size " - " (%lu) out of range. Minimum value is " - "%luMB and the maximum values is %luMB, " - "setting cache size to lower limit\n", - cache_size_in_mb, - FTS_CACHE_SIZE_LOWER_LIMIT_IN_MB, - FTS_CACHE_SIZE_UPPER_LIMIT_IN_MB); + ib::warn() << "FTS max cache size (" + << cache_size_in_mb << ") out of range." + " Minimum value is " + << FTS_CACHE_SIZE_LOWER_LIMIT_IN_MB + << "MB and the maximum value is" + << FTS_CACHE_SIZE_UPPER_LIMIT_IN_MB + << "MB, setting cache size to lower limit"; cache_size_in_mb = FTS_CACHE_SIZE_LOWER_LIMIT_IN_MB; } } else { - ut_print_timestamp(stderr); - fprintf(stderr, "InnoDB: Error: (%lu) reading max cache " - "config value from config table\n", error); + ib::error() << "(" << ut_strerr(error) << ") reading max" + " cache config value from config table"; } ut_free(value.f_str); @@ -2562,7 +2655,6 @@ fts_get_max_cache_size( /*********************************************************************//** Get the total number of words in the FTS for a particular FTS index. @return DB_SUCCESS if all OK else error code */ -UNIV_INTERN dberr_t fts_get_total_word_count( /*=====================*/ @@ -2579,7 +2671,7 @@ fts_get_total_word_count( information is used by the callback that reads the value. */ value.f_n_char = 0; value.f_len = FTS_MAX_CONFIG_VALUE_LEN; - value.f_str = static_cast(ut_malloc(value.f_len + 1)); + value.f_str = static_cast(ut_malloc_nokey(value.f_len + 1)); error = fts_config_get_index_value( trx, index, FTS_TOTAL_WORD_COUNT, &value); @@ -2589,9 +2681,8 @@ fts_get_total_word_count( value.f_str[value.f_len] = 0; *total = strtoul((char*) value.f_str, NULL, 10); } else { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: (%s) reading total words " - "value from config table\n", ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) << ") reading total" + " words value from config table"; } ut_free(value.f_str); @@ -2604,7 +2695,6 @@ fts_get_total_word_count( Update the next and last Doc ID in the CONFIG table to be the input "doc_id" value (+ 1). We would do so after each FTS index build or table truncate */ -UNIV_INTERN void fts_update_next_doc_id( /*===================*/ @@ -2626,7 +2716,6 @@ fts_update_next_doc_id( /*********************************************************************//** Get the next available document id. @return DB_SUCCESS if OK */ -UNIV_INTERN dberr_t fts_get_next_doc_id( /*================*/ @@ -2638,23 +2727,19 @@ fts_get_next_doc_id( /* If the Doc ID system has not yet been initialized, we will consult the CONFIG table and user table to re-establish the initial value of the Doc ID */ + if (cache->first_doc_id == FTS_NULL_DOC_ID) { + fts_init_doc_id(table); + } - if (cache->first_doc_id != 0 || !fts_init_doc_id(table)) { - if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) { - *doc_id = FTS_NULL_DOC_ID; - return(DB_SUCCESS); - } - - /* Otherwise, simply increment the value in cache */ - mutex_enter(&cache->doc_id_lock); - *doc_id = ++cache->next_doc_id; - mutex_exit(&cache->doc_id_lock); - } else { - mutex_enter(&cache->doc_id_lock); - *doc_id = cache->next_doc_id; - mutex_exit(&cache->doc_id_lock); + if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) { + *doc_id = FTS_NULL_DOC_ID; + return(DB_SUCCESS); } + mutex_enter(&cache->doc_id_lock); + *doc_id = ++cache->next_doc_id; + mutex_exit(&cache->doc_id_lock); + return(DB_SUCCESS); } @@ -2681,6 +2766,7 @@ fts_cmp_set_sync_doc_id( fts_table_t fts_table; que_t* graph = NULL; fts_cache_t* cache = table->fts->cache; + char table_name[MAX_FULL_NAME_LEN]; retry: ut_a(table->fts->doc_col != ULINT_UNDEFINED); @@ -2689,7 +2775,7 @@ retry: fts_table.type = FTS_COMMON_TABLE; fts_table.table = table; - fts_table.parent = table->name; + fts_table.parent = table->name.m_name; trx = trx_allocate_for_background(); @@ -2700,10 +2786,13 @@ retry: pars_info_bind_function( info, "my_func", fts_fetch_store_doc_id, doc_id); + fts_get_table_name(&fts_table, table_name); + pars_info_bind_id(info, true, "config_table", table_name); + graph = fts_parse_sql( &fts_table, info, "DECLARE FUNCTION my_func;\n" - "DECLARE CURSOR c IS SELECT value FROM \"%s\"" + "DECLARE CURSOR c IS SELECT value FROM $config_table" " WHERE key = 'synced_doc_id' FOR UPDATE;\n" "BEGIN\n" "" @@ -2747,7 +2836,7 @@ retry: if (doc_id_cmp > *doc_id) { error = fts_update_sync_doc_id( - table, table->name, cache->synced_doc_id, trx); + table, table->name.m_name, cache->synced_doc_id, trx); } *doc_id = cache->next_doc_id; @@ -2759,10 +2848,8 @@ func_exit: } else { *doc_id = 0; - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: (%s) " - "while getting next doc id.\n", ut_strerr(error)); - + ib::error() << "(" << ut_strerr(error) << ") while getting" + " next doc id."; fts_sql_rollback(trx); if (error == DB_DEADLOCK) { @@ -2797,6 +2884,7 @@ fts_update_sync_doc_id( dberr_t error; ibool local_trx = FALSE; fts_cache_t* cache = table->fts->cache; + char fts_name[MAX_FULL_NAME_LEN]; fts_table.suffix = "CONFIG"; fts_table.table_id = table->id; @@ -2805,7 +2893,7 @@ fts_update_sync_doc_id( if (table_name) { fts_table.parent = table_name; } else { - fts_table.parent = table->name; + fts_table.parent = table->name.m_name; } if (!trx) { @@ -2822,10 +2910,13 @@ fts_update_sync_doc_id( pars_info_bind_varchar_literal(info, "doc_id", id, id_len); + fts_get_table_name(&fts_table, fts_name); + pars_info_bind_id(info, true, "table_name", fts_name); + graph = fts_parse_sql( &fts_table, info, - "BEGIN " - "UPDATE \"%s\" SET value = :doc_id" + "BEGIN" + " UPDATE $table_name SET value = :doc_id" " WHERE key = 'synced_doc_id';"); error = fts_eval_sql(trx, graph); @@ -2838,9 +2929,8 @@ fts_update_sync_doc_id( cache->synced_doc_id = doc_id; } else { - ib_logf(IB_LOG_LEVEL_ERROR, - "(%s) while updating last doc id.", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) << ") while" + " updating last doc id."; fts_sql_rollback(trx); } @@ -2853,7 +2943,6 @@ fts_update_sync_doc_id( /*********************************************************************//** Create a new fts_doc_ids_t. @return new fts_doc_ids_t */ -UNIV_INTERN fts_doc_ids_t* fts_doc_ids_create(void) /*====================*/ @@ -2874,7 +2963,6 @@ fts_doc_ids_create(void) /*********************************************************************//** Free a fts_doc_ids_t. */ - void fts_doc_ids_free( /*=============*/ @@ -2972,6 +3060,7 @@ fts_delete( /* Note the deleted document for OPTIMIZE to purge. */ if (error == DB_SUCCESS) { + char table_name[MAX_FULL_NAME_LEN]; trx->op_info = "adding doc id to FTS DELETED"; @@ -2979,10 +3068,13 @@ fts_delete( fts_table.suffix = "DELETED"; + fts_get_table_name(&fts_table, table_name); + pars_info_bind_id(info, true, "deleted", table_name); + graph = fts_parse_sql( &fts_table, info, - "BEGIN INSERT INTO \"%s\" VALUES (:doc_id);"); + "BEGIN INSERT INTO $deleted VALUES (:doc_id);"); error = fts_eval_sql(trx, graph); @@ -3030,7 +3122,6 @@ fts_modify( /*********************************************************************//** Create a new document id. @return DB_SUCCESS if all went well else error */ -UNIV_INTERN dberr_t fts_create_doc_id( /*==============*/ @@ -3137,7 +3228,6 @@ fts_commit_table( The given transaction is about to be committed; do whatever is necessary from the FTS system's POV. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_commit( /*=======*/ @@ -3168,7 +3258,6 @@ fts_commit( /*********************************************************************//** Initialize a document. */ -UNIV_INTERN void fts_doc_init( /*=========*/ @@ -3183,7 +3272,6 @@ fts_doc_init( /*********************************************************************//** Free document. */ -UNIV_INTERN void fts_doc_free( /*=========*/ @@ -3195,9 +3283,7 @@ fts_doc_free( rbt_free(doc->tokens); } -#ifdef UNIV_DEBUG - memset(doc, 0, sizeof(*doc)); -#endif /* UNIV_DEBUG */ + ut_d(memset(doc, 0, sizeof(*doc))); mem_heap_free(heap); } @@ -3206,7 +3292,6 @@ fts_doc_free( Callback function for fetch that stores a row id to the location pointed. The column's type must be DATA_FIXBINARY, DATA_BINARY_TYPE, length = 8. @return always returns NULL */ -UNIV_INTERN void* fts_fetch_row_id( /*=============*/ @@ -3232,7 +3317,6 @@ fts_fetch_row_id( Callback function for fetch that stores the text of an FTS document, converting each column to UTF-16. @return always FALSE */ -UNIV_INTERN ibool fts_query_expansion_fetch_doc( /*==========================*/ @@ -3271,13 +3355,11 @@ fts_query_expansion_fetch_doc( } if (!doc_charset) { - ulint prtype = dfield->type.prtype; - doc_charset = innobase_get_fts_charset( - (int)(prtype & DATA_MYSQL_TYPE_MASK), - (uint) dtype_get_charset_coll(prtype)); + doc_charset = fts_get_charset(dfield->type.prtype); } doc.charset = doc_charset; + doc.is_ngram = result_doc->is_ngram; if (dfield_is_ext(dfield)) { /* We ignore columns that are stored externally, this @@ -3294,9 +3376,11 @@ fts_query_expansion_fetch_doc( } if (field_no == 0) { - fts_tokenize_document(&doc, result_doc); + fts_tokenize_document(&doc, result_doc, + result_doc->parser); } else { - fts_tokenize_document_next(&doc, doc_len, result_doc); + fts_tokenize_document_next(&doc, doc_len, result_doc, + result_doc->parser); } exp = que_node_get_next(exp); @@ -3341,6 +3425,7 @@ fts_fetch_doc_from_rec( ulint i; ulint doc_len = 0; ulint processed_doc = 0; + st_mysql_ftparser* parser; if (!get_doc) { return; @@ -3348,6 +3433,7 @@ fts_fetch_doc_from_rec( index = get_doc->index_cache->index; table = get_doc->index_cache->index->table; + parser = get_doc->index_cache->index->parser; clust_rec = btr_pcur_get_rec(pcur); @@ -3359,23 +3445,18 @@ fts_fetch_doc_from_rec( clust_pos = dict_col_get_clust_pos(col, clust_index); if (!get_doc->index_cache->charset) { - ulint prtype = ifield->col->prtype; - - get_doc->index_cache->charset = - innobase_get_fts_charset( - (int) (prtype & DATA_MYSQL_TYPE_MASK), - (uint) dtype_get_charset_coll(prtype)); + get_doc->index_cache->charset = fts_get_charset( + ifield->col->prtype); } if (rec_offs_nth_extern(offsets, clust_pos)) { doc->text.f_str = btr_rec_copy_externally_stored_field( clust_rec, offsets, - dict_table_zip_size(table), + dict_table_page_size(table), clust_pos, &doc->text.f_len, static_cast( - doc->self_heap->arg), - NULL); + doc->self_heap->arg)); } else { doc->text.f_str = (byte*) rec_get_nth_field( clust_rec, offsets, clust_pos, @@ -3384,6 +3465,7 @@ fts_fetch_doc_from_rec( doc->found = TRUE; doc->charset = get_doc->index_cache->charset; + doc->is_ngram = index->is_ngram; /* Null Field */ if (doc->text.f_len == UNIV_SQL_NULL || doc->text.f_len == 0) { @@ -3391,9 +3473,9 @@ fts_fetch_doc_from_rec( } if (processed_doc == 0) { - fts_tokenize_document(doc, NULL); + fts_tokenize_document(doc, NULL, parser); } else { - fts_tokenize_document_next(doc, doc_len, NULL); + fts_tokenize_document_next(doc, doc_len, NULL, parser); } processed_doc++; @@ -3447,8 +3529,7 @@ fts_add_doc_by_id( heap = mem_heap_create(512); clust_index = dict_table_get_first_index(table); - fts_id_index = dict_table_get_index_on_name( - table, FTS_DOC_ID_INDEX_NAME); + fts_id_index = table->fts_doc_id_index; /* Check whether the index on FTS_DOC_ID is cluster index */ is_id_cluster = (clust_index == fts_id_index); @@ -3634,7 +3715,6 @@ fts_read_ulint( /*********************************************************************//** Get maximum Doc ID in a table if index "FTS_DOC_ID_INDEX" exists @return max Doc ID or 0 if index "FTS_DOC_ID_INDEX" does not exist */ -UNIV_INTERN doc_id_t fts_get_max_doc_id( /*===============*/ @@ -3646,7 +3726,7 @@ fts_get_max_doc_id( mtr_t mtr; btr_pcur_t pcur; - index = dict_table_get_index_on_name(table, FTS_DOC_ID_INDEX_NAME); + index = table->fts_doc_id_index; if (!index) { return(0); @@ -3704,7 +3784,6 @@ func_exit: /*********************************************************************//** Fetch document with the given document id. @return DB_SUCCESS if OK else error */ -UNIV_INTERN dberr_t fts_doc_fetch_by_doc_id( /*====================*/ @@ -3833,7 +3912,6 @@ fts_doc_fetch_by_doc_id( /*********************************************************************//** Write out a single word's data as new entry/entries in the INDEX table. @return DB_SUCCESS if all OK. */ -UNIV_INTERN dberr_t fts_write_node( /*===========*/ @@ -3849,11 +3927,15 @@ fts_write_node( ib_time_t start_time; doc_id_t last_doc_id; doc_id_t first_doc_id; + char table_name[MAX_FULL_NAME_LEN]; if (*graph) { info = (*graph)->info; } else { info = pars_info_create(); + + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "index_table_name", table_name); } pars_info_bind_varchar_literal(info, "token", word->f_str, word->f_len); @@ -3879,13 +3961,14 @@ fts_write_node( DATA_BLOB, DATA_BINARY_TYPE); if (!*graph) { + *graph = fts_parse_sql( fts_table, info, "BEGIN\n" - "INSERT INTO \"%s\" VALUES " - "(:token, :first_doc_id," - " :last_doc_id, :doc_count, :ilist);"); + "INSERT INTO $index_table_name VALUES" + " (:token, :first_doc_id," + " :last_doc_id, :doc_count, :ilist);"); } start_time = ut_time(); @@ -3910,6 +3993,7 @@ fts_sync_add_deleted_cache( pars_info_t* info; que_t* graph; fts_table_t fts_table; + char table_name[MAX_FULL_NAME_LEN]; doc_id_t dummy = 0; dberr_t error = DB_SUCCESS; ulint n_elems = ib_vector_size(doc_ids); @@ -3925,10 +4009,13 @@ fts_sync_add_deleted_cache( FTS_INIT_FTS_TABLE( &fts_table, "DELETED_CACHE", FTS_COMMON_TABLE, sync->table); + fts_get_table_name(&fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); + graph = fts_parse_sql( &fts_table, info, - "BEGIN INSERT INTO \"%s\" VALUES (:doc_id);"); + "BEGIN INSERT INTO $table_name VALUES (:doc_id);"); for (i = 0; i < n_elems && error == DB_SUCCESS; ++i) { fts_update_t* update; @@ -4055,11 +4142,8 @@ fts_sync_write_words( n_nodes += ib_vector_size(word->nodes); if (error != DB_SUCCESS && !print_error) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error (%s) writing " - "word node to FTS auxiliary index " - "table.\n", ut_strerr(error)); - + ib::error() << "(" << ut_strerr(error) << ") writing" + " word node to FTS auxiliary index table."; print_error = TRUE; } } @@ -4102,6 +4186,7 @@ fts_sync_write_doc_stat( doc_id_t doc_id; dberr_t error = DB_SUCCESS; ib_uint32_t word_count; + char table_name[MAX_FULL_NAME_LEN]; if (*graph) { info = (*graph)->info; @@ -4124,10 +4209,15 @@ fts_sync_write_doc_stat( FTS_INIT_INDEX_TABLE( &fts_table, "DOC_ID", FTS_INDEX_TABLE, index); + fts_get_table_name(&fts_table, table_name); + + pars_info_bind_id(info, true, "doc_id_table", table_name); + *graph = fts_parse_sql( &fts_table, info, - "BEGIN INSERT INTO \"%s\" VALUES (:doc_id, :count);"); + "BEGIN" + " INSERT INTO $doc_id_table VALUES (:doc_id, :count);"); } for (;;) { @@ -4137,18 +4227,15 @@ fts_sync_write_doc_stat( break; /* Exit the loop. */ } else { - ut_print_timestamp(stderr); if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout writing to FTS doc_id. " - "Retrying!\n"); + ib::warn() << "Lock wait timeout writing to" + " FTS doc_id. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: (%s) " - "while writing to FTS doc_id.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) + << ") while writing to FTS doc_id."; break; /* Exit the loop. */ } @@ -4249,6 +4336,7 @@ fts_is_word_in_index( { pars_info_t* info; dberr_t error; + char table_name[MAX_FULL_NAME_LEN]; trx->op_info = "looking up word in FTS index"; @@ -4258,6 +4346,8 @@ fts_is_word_in_index( info = pars_info_create(); } + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); pars_info_bind_function(info, "my_func", fts_lookup_word, found); pars_info_bind_varchar_literal(info, "word", word->f_str, word->f_len); @@ -4268,8 +4358,8 @@ fts_is_word_in_index( "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" " SELECT doc_count\n" - " FROM \"%s\"\n" - " WHERE word = :word " + " FROM $table_name\n" + " WHERE word = :word" " ORDER BY first_doc_id;\n" "BEGIN\n" "\n" @@ -4290,18 +4380,15 @@ fts_is_word_in_index( break; /* Exit the loop. */ } else { - ut_print_timestamp(stderr); if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout reading FTS index. " - "Retrying!\n"); + ib::warn() << "Lock wait timeout reading" + " FTS index. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: (%s) " - "while reading FTS index.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) + << ") while reading FTS index."; break; /* Exit the loop. */ } @@ -4330,12 +4417,10 @@ fts_sync_begin( sync->trx = trx_allocate_for_background(); if (fts_enable_diag_print) { - ib_logf(IB_LOG_LEVEL_INFO, - "FTS SYNC for table %s, deleted count: %ld size: " - "%lu bytes", - sync->table->name, - ib_vector_size(cache->deleted_doc_ids), - cache->total_size); + ib::info() << "FTS SYNC for table " << sync->table->name + << ", deleted count: " + << ib_vector_size(cache->deleted_doc_ids) + << " size: " << cache->total_size << " bytes"; } } @@ -4356,8 +4441,7 @@ fts_sync_index( trx->op_info = "doing SYNC index"; if (fts_enable_diag_print) { - ib_logf(IB_LOG_LEVEL_INFO, - "SYNC words: %ld", rbt_size(index_cache->words)); + ib::info() << "SYNC words: " << rbt_size(index_cache->words); } ut_ad(rbt_validate(index_cache->words)); @@ -4454,18 +4538,16 @@ fts_sync_commit( fts_sql_rollback(trx); - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: (%s) during SYNC.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) << ") during SYNC."; } if (fts_enable_diag_print && elapsed_time) { - ib_logf(IB_LOG_LEVEL_INFO, - "SYNC for table %s: SYNC time : %lu secs: " - "elapsed %lf ins/sec", - sync->table->name, - (ulong) (ut_time() - sync->start_time), - (double) n_nodes/ (double) elapsed_time); + ib::info() << "SYNC for table " << sync->table->name + << ": SYNC time: " + << (ut_time() - sync->start_time) + << " secs: elapsed " + << (double) n_nodes / elapsed_time + << " ins/sec"; } trx_free_for_background(trx); @@ -4572,10 +4654,6 @@ begin_sync: index_cache = static_cast( ib_vector_get(cache->indexes, i)); - if (index_cache->index->to_be_dropped) { - continue; - } - error = fts_sync_index(sync, index_cache); if (error != DB_SUCCESS && !sync->interrupted) { @@ -4608,7 +4686,7 @@ begin_sync: end_sync: if (error == DB_SUCCESS && !sync->interrupted) { error = fts_sync_commit(sync); - } else { + } else { fts_sync_rollback(sync); } @@ -4637,7 +4715,6 @@ FTS auxiliary INDEX table and clear the cache at the end. @param[in] unlock_cache whether unlock cache when write node @param[in] wait whether wait for existing sync to finish @return DB_SUCCESS on success, error code on failure. */ -UNIV_INTERN dberr_t fts_sync_table( dict_table_t* table, @@ -4655,40 +4732,132 @@ fts_sync_table( return(err); } -/******************************************************************** -Process next token from document starting at the given position, i.e., add -the token's start position to the token's list of positions. -@return number of characters handled in this call */ -static -ulint -fts_process_token( -/*==============*/ - fts_doc_t* doc, /* in/out: document to - tokenize */ - fts_doc_t* result, /* out: if provided, save - result here */ - ulint start_pos, /*!< in: start position in text */ - ulint add_pos) /*!< in: add this position to all - tokens from this tokenization */ +/** Check fts token +1. for ngram token, check whether the token contains any words in stopwords +2. for non-ngram token, check if it's stopword or less than fts_min_token_size +or greater than fts_max_token_size. +@param[in] token token string +@param[in] stopwords stopwords rb tree +@param[in] is_ngram is ngram parser +@param[in] cs token charset +@retval true if it is not stopword and length in range +@retval false if it is stopword or lenght not in range */ +bool +fts_check_token( + const fts_string_t* token, + const ib_rbt_t* stopwords, + bool is_ngram, + const CHARSET_INFO* cs) { - ulint ret; - fts_string_t str; - ulint offset = 0; - fts_doc_t* result_doc; + ut_ad(cs != NULL || stopwords == NULL); - /* Determine where to save the result. */ - result_doc = (result) ? result : doc; + if (!is_ngram) { + ib_rbt_bound_t parent; - /* The length of a string in characters is set here only. */ - ret = innobase_mysql_fts_get_token( - doc->charset, doc->text.f_str + start_pos, - doc->text.f_str + doc->text.f_len, &str, &offset); + if (token->f_n_char < fts_min_token_size + || token->f_n_char > fts_max_token_size + || (stopwords != NULL + && rbt_search(stopwords, &parent, token) == 0)) { + return(false); + } else { + return(true); + } + } + /* Check token for ngram. */ + DBUG_EXECUTE_IF( + "fts_instrument_ignore_ngram_check", + return(true); + ); + + /* We ignore fts_min_token_size when ngram */ + ut_ad(token->f_n_char > 0 + && token->f_n_char <= fts_max_token_size); + + if (stopwords == NULL) { + return(true); + } + + /*Ngram checks whether the token contains any words in stopwords. + We can't simply use CONTAIN to search in stopwords, because it's + built on COMPARE. So we need to tokenize the token into words + from unigram to f_n_char, and check them separately. */ + for (ulint ngram_token_size = 1; ngram_token_size <= token->f_n_char; + ngram_token_size ++) { + const char* start; + const char* next; + const char* end; + ulint char_len; + ulint n_chars; + + start = reinterpret_cast(token->f_str); + next = start; + end = start + token->f_len; + n_chars = 0; + + while (next < end) { + /* TODO: JAN: MySQL 5.7 used my_mbcharlen_ptr here + char_len = my_mbcharlen_ptr(cs, next, end);*/ + char_len = cs->cset->charlen(cs, (const uchar*)next, (const uchar*)end); + + if (next + char_len > end || char_len == 0) { + break; + } else { + /* Skip SPACE */ + if (char_len == 1 && *next == ' ') { + start = next + 1; + next = start; + n_chars = 0; + + continue; + } + + next += char_len; + n_chars++; + } + + if (n_chars == ngram_token_size) { + fts_string_t ngram_token; + ngram_token.f_str = + reinterpret_cast( + const_cast(start)); + ngram_token.f_len = next - start; + ngram_token.f_n_char = ngram_token_size; + + ib_rbt_bound_t parent; + if (rbt_search(stopwords, &parent, + &ngram_token) == 0) { + return(false); + } + + /* Move a char forward */ + /* JAN: TODO: MySQL 5.7 + start += my_mbcharlen_ptr(cs, start, end); */ + start += cs->cset->charlen(cs, (const uchar*)next, (const uchar*)end); + n_chars = ngram_token_size - 1; + } + } + } + + return(true); +} + +/** Add the token and its start position to the token's list of positions. +@param[in,out] result_doc result doc rb tree +@param[in] str token string +@param[in] position token position */ +static +void +fts_add_token( + fts_doc_t* result_doc, + fts_string_t str, + ulint position) +{ /* Ignore string whose character number is less than "fts_min_token_size" or more than "fts_max_token_size" */ - if (str.f_n_char >= fts_min_token_size - && str.f_n_char <= fts_max_token_size) { + if (fts_check_token(&str, NULL, result_doc->is_ngram, + result_doc->charset)) { mem_heap_t* heap; fts_string_t t_str; @@ -4700,14 +4869,15 @@ fts_process_token( t_str.f_n_char = str.f_n_char; - t_str.f_len = str.f_len * doc->charset->casedn_multiply + 1; + t_str.f_len = str.f_len * result_doc->charset->casedn_multiply + 1; t_str.f_str = static_cast( mem_heap_alloc(heap, t_str.f_len)); newlen = innobase_fts_casedn_str( - doc->charset, (char*) str.f_str, str.f_len, - (char*) t_str.f_str, t_str.f_len); + result_doc->charset, + reinterpret_cast(str.f_str), str.f_len, + reinterpret_cast(t_str.f_str), t_str.f_len); t_str.f_len = newlen; t_str.f_str[newlen] = 0; @@ -4724,56 +4894,247 @@ fts_process_token( new_token.positions = ib_vector_create( result_doc->self_heap, sizeof(ulint), 32); - ut_a(new_token.text.f_n_char >= fts_min_token_size); - ut_a(new_token.text.f_n_char <= fts_max_token_size); - parent.last = rbt_add_node( result_doc->tokens, &parent, &new_token); ut_ad(rbt_validate(result_doc->tokens)); } -#ifdef FTS_CHARSET_DEBUG - offset += start_pos + add_pos; -#endif /* FTS_CHARSET_DEBUG */ - - offset += start_pos + ret - str.f_len + add_pos; - token = rbt_value(fts_token_t, parent.last); - ib_vector_push(token->positions, &offset); + ib_vector_push(token->positions, &position); } +} + +/******************************************************************** +Process next token from document starting at the given position, i.e., add +the token's start position to the token's list of positions. +@return number of characters handled in this call */ +static +ulint +fts_process_token( +/*==============*/ + fts_doc_t* doc, /* in/out: document to + tokenize */ + fts_doc_t* result, /* out: if provided, save + result here */ + ulint start_pos, /*!< in: start position in text */ + ulint add_pos) /*!< in: add this position to all + tokens from this tokenization */ +{ + ulint ret; + fts_string_t str; + ulint position; + fts_doc_t* result_doc; + byte buf[FTS_MAX_WORD_LEN + 1]; + + str.f_str = buf; + + /* Determine where to save the result. */ + result_doc = (result != NULL) ? result : doc; + + /* The length of a string in characters is set here only. */ + + ret = innobase_mysql_fts_get_token( + doc->charset, doc->text.f_str + start_pos, + doc->text.f_str + doc->text.f_len, &str); + + position = start_pos + ret - str.f_len + add_pos; + + fts_add_token(result_doc, str, position); return(ret); } +/*************************************************************//** +Get token char size by charset +@return token size */ +ulint +fts_get_token_size( +/*===============*/ + const CHARSET_INFO* cs, /*!< in: Character set */ + const char* token, /*!< in: token */ + ulint len) /*!< in: token length */ +{ + char* start; + char* end; + ulint size = 0; + + /* const_cast is for reinterpret_cast below, or it will fail. */ + start = const_cast(token); + end = start + len; + while (start < end) { + int ctype; + int mbl; + + mbl = cs->cset->ctype( + cs, &ctype, + reinterpret_cast(start), + reinterpret_cast(end)); + + size++; + + start += mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1); + } + + return(size); +} + +/*************************************************************//** +FTS plugin parser 'myql_parser' callback function for document tokenize. +Refer to 'st_mysql_ftparser_param' for more detail. +@return always returns 0 */ +int +fts_tokenize_document_internal( +/*===========================*/ + MYSQL_FTPARSER_PARAM* param, /*!< in: parser parameter */ + /* JAN: TODO: MySQL 5.7 + char* doc,*/ + const char* doc,/*!< in/out: document */ + int len) /*!< in: document length */ +{ + fts_string_t str; + byte buf[FTS_MAX_WORD_LEN + 1]; + /* JAN: TODO: MySQL 5.7 + MYSQL_FTPARSER_BOOLEAN_INFO bool_info = + { FT_TOKEN_WORD, 0, 0, 0, 0, 0, ' ', 0 }; + */ + MYSQL_FTPARSER_BOOLEAN_INFO bool_info = + { FT_TOKEN_WORD, 0, 0, 0, 0, 0, (char *)" "}; + + ut_ad(len >= 0); + + str.f_str = buf; + + for (ulint i = 0, inc = 0; i < static_cast(len); i += inc) { + inc = innobase_mysql_fts_get_token( + const_cast(param->cs), + (uchar*)(doc) + i, + (uchar*)(doc) + len, + &str); + + if (str.f_len > 0) { + /* JAN: TODO: MySQL 5.7 + bool_info.position = + static_cast(i + inc - str.f_len); + ut_ad(bool_info.position >= 0); + */ + + /* Stop when add word fails */ + if (param->mysql_add_word( + param, + reinterpret_cast(str.f_str), + static_cast(str.f_len), + &bool_info)) { + break; + } + } + } + + return(0); +} + +/******************************************************************//** +FTS plugin parser 'myql_add_word' callback function for document tokenize. +Refer to 'st_mysql_ftparser_param' for more detail. +@return always returns 0 */ +static +int +fts_tokenize_add_word_for_parser( +/*=============================*/ + MYSQL_FTPARSER_PARAM* param, /* in: parser paramter */ + const char* word, /* in: token word */ + int word_len, /* in: word len */ + MYSQL_FTPARSER_BOOLEAN_INFO* boolean_info) /* in: word boolean info */ +{ + fts_string_t str; + fts_tokenize_param_t* fts_param; + fts_doc_t* result_doc; + ulint position; + + fts_param = static_cast(param->mysql_ftparam); + result_doc = fts_param->result_doc; + ut_ad(result_doc != NULL); + + str.f_str = (byte*)(word); + str.f_len = word_len; + str.f_n_char = fts_get_token_size( + const_cast(param->cs), word, word_len); + + /* JAN: TODO: MySQL 5.7 FTS + ut_ad(boolean_info->position >= 0); + position = boolean_info->position + fts_param->add_pos; + */ + position = fts_param->add_pos; + + fts_add_token(result_doc, str, position); + + return(0); +} + +/******************************************************************//** +Parse a document using an external / user supplied parser */ +static +void +fts_tokenize_by_parser( +/*===================*/ + fts_doc_t* doc, /* in/out: document to tokenize */ + st_mysql_ftparser* parser, /* in: plugin fts parser */ + fts_tokenize_param_t* fts_param) /* in: fts tokenize param */ +{ + MYSQL_FTPARSER_PARAM param; + + ut_a(parser); + + /* Set paramters for param */ + param.mysql_parse = fts_tokenize_document_internal; + param.mysql_add_word = fts_tokenize_add_word_for_parser; + param.mysql_ftparam = fts_param; + param.cs = doc->charset; + param.doc = reinterpret_cast(doc->text.f_str); + param.length = static_cast(doc->text.f_len); + param.mode= MYSQL_FTPARSER_SIMPLE_MODE; + + PARSER_INIT(parser, ¶m); + parser->parse(¶m); + PARSER_DEINIT(parser, ¶m); +} + /******************************************************************//** Tokenize a document. */ -UNIV_INTERN void fts_tokenize_document( /*==================*/ fts_doc_t* doc, /* in/out: document to tokenize */ - fts_doc_t* result) /* out: if provided, save + fts_doc_t* result, /* out: if provided, save the result token here */ + st_mysql_ftparser* parser) /* in: plugin fts parser */ { - ulint inc; - ut_a(!doc->tokens); ut_a(doc->charset); doc->tokens = rbt_create_arg_cmp( sizeof(fts_token_t), innobase_fts_text_cmp, (void*) doc->charset); - for (ulint i = 0; i < doc->text.f_len; i += inc) { - inc = fts_process_token(doc, result, i, 0); - ut_a(inc > 0); + if (parser != NULL) { + fts_tokenize_param_t fts_param; + + fts_param.result_doc = (result != NULL) ? result : doc; + fts_param.add_pos = 0; + + fts_tokenize_by_parser(doc, parser, &fts_param); + } else { + ulint inc; + + for (ulint i = 0; i < doc->text.f_len; i += inc) { + inc = fts_process_token(doc, result, i, 0); + ut_a(inc > 0); + } } } /******************************************************************//** Continue to tokenize a document. */ -UNIV_INTERN void fts_tokenize_document_next( /*=======================*/ @@ -4781,22 +5142,31 @@ fts_tokenize_document_next( tokenize */ ulint add_pos, /*!< in: add this position to all tokens from this tokenization */ - fts_doc_t* result) /*!< out: if provided, save + fts_doc_t* result, /*!< out: if provided, save the result token here */ + st_mysql_ftparser* parser) /* in: plugin fts parser */ { - ulint inc; - ut_a(doc->tokens); - for (ulint i = 0; i < doc->text.f_len; i += inc) { - inc = fts_process_token(doc, result, i, add_pos); - ut_a(inc > 0); + if (parser) { + fts_tokenize_param_t fts_param; + + fts_param.result_doc = (result != NULL) ? result : doc; + fts_param.add_pos = add_pos; + + fts_tokenize_by_parser(doc, parser, &fts_param); + } else { + ulint inc; + + for (ulint i = 0; i < doc->text.f_len; i += inc) { + inc = fts_process_token(doc, result, i, add_pos); + ut_a(inc > 0); + } } } /******************************************************************** Create the vector of fts_get_doc_t instances. */ -UNIV_INTERN ib_vector_t* fts_get_docs_create( /*================*/ @@ -4804,19 +5174,16 @@ fts_get_docs_create( fts_get_doc_t instances */ fts_cache_t* cache) /*!< in: fts cache */ { - ulint i; ib_vector_t* get_docs; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&cache->init_lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own(&cache->init_lock, RW_LOCK_X)); + /* We need one instance of fts_get_doc_t per index. */ - get_docs = ib_vector_create( - cache->self_heap, sizeof(fts_get_doc_t), 4); + get_docs = ib_vector_create(cache->self_heap, sizeof(fts_get_doc_t), 4); /* Create the get_doc instance, we need one of these per FTS index. */ - for (i = 0; i < ib_vector_size(cache->indexes); ++i) { + for (ulint i = 0; i < ib_vector_size(cache->indexes); ++i) { dict_index_t** index; fts_get_doc_t* get_doc; @@ -4868,7 +5235,6 @@ fts_get_docs_clear( /*********************************************************************//** Get the initial Doc ID by consulting the CONFIG table @return initial Doc ID */ -UNIV_INTERN doc_id_t fts_init_doc_id( /*============*/ @@ -4943,7 +5309,6 @@ fts_is_index_updated( /*********************************************************************//** Fetch COUNT(*) from specified table. @return the number of rows in the table */ -UNIV_INTERN ulint fts_get_rows_count( /*===============*/ @@ -4954,6 +5319,7 @@ fts_get_rows_count( que_t* graph; dberr_t error; ulint count = 0; + char table_name[MAX_FULL_NAME_LEN]; trx = trx_allocate_for_background(); @@ -4963,13 +5329,16 @@ fts_get_rows_count( pars_info_bind_function(info, "my_func", fts_read_ulint, &count); + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); + graph = fts_parse_sql( fts_table, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" - " SELECT COUNT(*) " - " FROM \"%s\";\n" + " SELECT COUNT(*)" + " FROM $table_name;\n" "BEGIN\n" "\n" "OPEN c;\n" @@ -4991,18 +5360,14 @@ fts_get_rows_count( } else { fts_sql_rollback(trx); - ut_print_timestamp(stderr); - if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout reading FTS table. " - "Retrying!\n"); + ib::warn() << "lock wait timeout reading" + " FTS table. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: (%s) " - "while reading FTS table.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) + << ") while reading FTS table."; break; /* Exit the loop. */ } @@ -5123,7 +5488,6 @@ fts_savepoint_free( /*********************************************************************//** Free an FTS trx. */ -UNIV_INTERN void fts_trx_free( /*=========*/ @@ -5167,7 +5531,6 @@ fts_trx_free( /*********************************************************************//** Extract the doc id from the FTS hidden column. @return doc id that was extracted from rec */ -UNIV_INTERN doc_id_t fts_get_doc_id_from_row( /*====================*/ @@ -5191,37 +5554,37 @@ fts_get_doc_id_from_row( return(doc_id); } -/*********************************************************************//** -Extract the doc id from the FTS hidden column. +/** Extract the doc id from the record that belongs to index. +@param[in] table table +@param[in] rec record contains FTS_DOC_ID +@param[in] index index of rec +@param[in] heap heap memory @return doc id that was extracted from rec */ -UNIV_INTERN doc_id_t fts_get_doc_id_from_rec( -/*====================*/ - dict_table_t* table, /*!< in: table */ - const rec_t* rec, /*!< in: rec */ - mem_heap_t* heap) /*!< in: heap */ + dict_table_t* table, + const rec_t* rec, + const dict_index_t* index, + mem_heap_t* heap) { ulint len; const byte* data; ulint col_no; doc_id_t doc_id = 0; - dict_index_t* clust_index; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; mem_heap_t* my_heap = heap; ut_a(table->fts->doc_col != ULINT_UNDEFINED); - clust_index = dict_table_get_first_index(table); - rec_offs_init(offsets_); offsets = rec_get_offsets( - rec, clust_index, offsets, ULINT_UNDEFINED, &my_heap); + rec, index, offsets, ULINT_UNDEFINED, &my_heap); + + col_no = dict_col_get_index_pos( + &table->cols[table->fts->doc_col], index); - col_no = dict_col_get_clust_pos( - &table->cols[table->fts->doc_col], clust_index); ut_ad(col_no != ULINT_UNDEFINED); data = rec_get_nth_field(rec, offsets, col_no, &len); @@ -5240,7 +5603,6 @@ fts_get_doc_id_from_rec( /*********************************************************************//** Search the index specific cache for a particular FTS index. @return the index specific cache else NULL */ -UNIV_INTERN fts_index_cache_t* fts_find_index_cache( /*=================*/ @@ -5256,7 +5618,6 @@ fts_find_index_cache( /*********************************************************************//** Search cache for word. @return the word node vector if found else NULL */ -UNIV_INTERN const ib_vector_t* fts_cache_find_word( /*================*/ @@ -5265,12 +5626,12 @@ fts_cache_find_word( { ib_rbt_bound_t parent; const ib_vector_t* nodes = NULL; -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG dict_table_t* table = index_cache->index->table; fts_cache_t* cache = table->fts->cache; - ut_ad(rw_lock_own((rw_lock_t*) &cache->lock, RW_LOCK_EX)); -#endif + ut_ad(rw_lock_own(&cache->lock, RW_LOCK_X)); +#endif /* UNIV_DEBUG */ /* Lookup the word in the rb tree */ if (rbt_search(index_cache->words, &parent, text) == 0) { @@ -5287,20 +5648,15 @@ fts_cache_find_word( /*********************************************************************//** Check cache for deleted doc id. @return TRUE if deleted */ -UNIV_INTERN ibool fts_cache_is_deleted_doc_id( /*========================*/ const fts_cache_t* cache, /*!< in: cache ito search */ doc_id_t doc_id) /*!< in: doc id to search for */ { - ulint i; - -#ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&cache->deleted_lock)); -#endif - for (i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) { + for (ulint i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) { const fts_update_t* update; update = static_cast( @@ -5317,16 +5673,13 @@ fts_cache_is_deleted_doc_id( /*********************************************************************//** Append deleted doc ids to vector. */ -UNIV_INTERN void fts_cache_append_deleted_doc_ids( /*=============================*/ const fts_cache_t* cache, /*!< in: cache to use */ ib_vector_t* vector) /*!< in: append to this vector */ { - ulint i; - - mutex_enter((ib_mutex_t*) &cache->deleted_lock); + mutex_enter(const_cast(&cache->deleted_lock)); if (cache->deleted_doc_ids == NULL) { mutex_exit((ib_mutex_t*) &cache->deleted_lock); @@ -5334,7 +5687,7 @@ fts_cache_append_deleted_doc_ids( } - for (i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) { + for (ulint i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) { fts_update_t* update; update = static_cast( @@ -5351,7 +5704,6 @@ Wait for the background thread to start. We poll to detect change of state, which is acceptable, since the wait should happen only once during startup. @return true if the thread started else FALSE (i.e timed out) */ -UNIV_INTERN ibool fts_wait_for_background_thread_to_start( /*====================================*/ @@ -5397,10 +5749,9 @@ fts_wait_for_background_thread_to_start( } if (count >= FTS_BACKGROUND_THREAD_WAIT_COUNT) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error the background thread " - "for the FTS table %s refuses to start\n", - table->name); + ib::error() << "The background thread for the FTS" + " table " << table->name + << " refuses to start"; count = 0; } @@ -5411,7 +5762,6 @@ fts_wait_for_background_thread_to_start( /*********************************************************************//** Add the FTS document id hidden column. */ -UNIV_INTERN void fts_add_doc_id_column( /*==================*/ @@ -5429,16 +5779,23 @@ fts_add_doc_id_column( DICT_TF2_FLAG_SET(table, DICT_TF2_FTS_HAS_DOC_ID); } -/*********************************************************************//** -Update the query graph with a new document id. -@return Doc ID used */ -UNIV_INTERN +/** Add new fts doc id to the update vector. +@param[in] table the table that contains the FTS index. +@param[in,out] ufield the fts doc id field in the update vector. + No new memory is allocated for this in this + function. +@param[in,out] next_doc_id the fts doc id that has been added to the + update vector. If 0, a new fts doc id is + automatically generated. The memory provided + for this argument will be used by the update + vector. Ensure that the life time of this + memory matches that of the update vector. +@return the fts doc id used in the update vector */ doc_id_t fts_update_doc_id( -/*==============*/ - dict_table_t* table, /*!< in: table */ - upd_field_t* ufield, /*!< out: update node */ - doc_id_t* next_doc_id) /*!< in/out: buffer for writing */ + dict_table_t* table, + upd_field_t* ufield, + doc_id_t* next_doc_id) { doc_id_t doc_id; dberr_t error = DB_SUCCESS; @@ -5470,6 +5827,7 @@ fts_update_doc_id( fts_write_doc_id((byte*) next_doc_id, doc_id); ufield->new_val.data = next_doc_id; + ufield->new_val.ext = 0; } return(doc_id); @@ -5479,7 +5837,6 @@ fts_update_doc_id( Check if the table has an FTS index. This is the non-inline version of dict_table_has_fts_index(). @return TRUE if table has an FTS index */ -UNIV_INTERN ibool fts_dict_table_has_fts_index( /*=========================*/ @@ -5488,61 +5845,78 @@ fts_dict_table_has_fts_index( return(dict_table_has_fts_index(table)); } +/** fts_t constructor. +@param[in] table table with FTS indexes +@param[in,out] heap memory heap where 'this' is stored */ +fts_t::fts_t( + const dict_table_t* table, + mem_heap_t* heap) + : + bg_threads(0), + fts_status(0), + add_wq(NULL), + cache(NULL), + doc_col(ULINT_UNDEFINED), + fts_heap(heap) +{ + ut_a(table->fts == NULL); + + mutex_create(LATCH_ID_FTS_BG_THREADS, &bg_threads_mutex); + + ib_alloc_t* heap_alloc = ib_heap_allocator_create(fts_heap); + + indexes = ib_vector_create(heap_alloc, sizeof(dict_index_t*), 4); + + dict_table_get_all_fts_indexes(table, indexes); +} + +/** fts_t destructor. */ +fts_t::~fts_t() +{ + mutex_free(&bg_threads_mutex); + + ut_ad(add_wq == NULL); + + if (cache != NULL) { + fts_cache_clear(cache); + fts_cache_destroy(cache); + cache = NULL; + } + + /* There is no need to call ib_vector_free() on this->indexes + because it is stored in this->fts_heap. */ +} + /*********************************************************************//** Create an instance of fts_t. @return instance of fts_t */ -UNIV_INTERN fts_t* fts_create( /*=======*/ dict_table_t* table) /*!< in/out: table with FTS indexes */ { fts_t* fts; - ib_alloc_t* heap_alloc; mem_heap_t* heap; - ut_a(!table->fts); - heap = mem_heap_create(512); fts = static_cast(mem_heap_alloc(heap, sizeof(*fts))); - memset(fts, 0x0, sizeof(*fts)); - - fts->fts_heap = heap; - - fts->doc_col = ULINT_UNDEFINED; - - mutex_create( - fts_bg_threads_mutex_key, &fts->bg_threads_mutex, - SYNC_FTS_BG_THREADS); - - heap_alloc = ib_heap_allocator_create(heap); - fts->indexes = ib_vector_create(heap_alloc, sizeof(dict_index_t*), 4); - dict_table_get_all_fts_indexes(table, fts->indexes); + new(fts) fts_t(table, heap); return(fts); } /*********************************************************************//** Free the FTS resources. */ -UNIV_INTERN void fts_free( /*=====*/ dict_table_t* table) /*!< in/out: table with FTS indexes */ { - fts_t* fts = table->fts; - - mutex_free(&fts->bg_threads_mutex); - - ut_ad(!fts->add_wq); + fts_t* fts = table->fts; - if (fts->cache) { - fts_cache_clear(fts->cache); - fts_cache_destroy(fts->cache); - fts->cache = NULL; - } + fts->~fts_t(); mem_heap_free(fts->fts_heap); @@ -5551,7 +5925,6 @@ fts_free( /*********************************************************************//** Signal FTS threads to initiate shutdown. */ -UNIV_INTERN void fts_start_shutdown( /*===============*/ @@ -5569,7 +5942,6 @@ fts_start_shutdown( /*********************************************************************//** Wait for FTS threads to shutdown. */ -UNIV_INTERN void fts_shutdown( /*=========*/ @@ -5614,7 +5986,6 @@ fts_savepoint_copy( /*********************************************************************//** Take a FTS savepoint. */ -UNIV_INTERN void fts_savepoint_take( /*===============*/ @@ -5674,7 +6045,6 @@ fts_savepoint_lookup( Release the savepoint data identified by name. All savepoints created after the named savepoint are kept. @return DB_SUCCESS or error code */ -UNIV_INTERN void fts_savepoint_release( /*==================*/ @@ -5717,7 +6087,6 @@ fts_savepoint_release( /**********************************************************************//** Refresh last statement savepoint. */ -UNIV_INTERN void fts_savepoint_laststmt_refresh( /*===========================*/ @@ -5793,7 +6162,6 @@ fts_undo_last_stmt( /**********************************************************************//** Rollback to savepoint indentified by name. @return DB_SUCCESS or error code */ -UNIV_INTERN void fts_savepoint_rollback_last_stmt( /*=============================*/ @@ -5843,7 +6211,6 @@ fts_savepoint_rollback_last_stmt( /**********************************************************************//** Rollback to savepoint indentified by name. @return DB_SUCCESS or error code */ -UNIV_INTERN void fts_savepoint_rollback( /*===================*/ @@ -5904,16 +6271,17 @@ fts_savepoint_rollback( } } -/**********************************************************************//** -Check if a table is an FTS auxiliary table name. -@return TRUE if the name matches an auxiliary table name pattern */ +/** Check if a table is an FTS auxiliary table name. +@param[out] table FTS table info +@param[in] name Table name +@param[in] len Length of table name +@return true if the name matches an auxiliary table name pattern */ static -ibool +bool fts_is_aux_table_name( -/*==================*/ - fts_aux_table_t*table, /*!< out: table info */ - const char* name, /*!< in: table name */ - ulint len) /*!< in: length of table name */ + fts_aux_table_t* table, + const char* name, + ulint len) { const char* ptr; char* end; @@ -5943,14 +6311,14 @@ fts_is_aux_table_name( /* Try and read the table id. */ if (!fts_read_object_id(&table->parent_id, ptr)) { - return(FALSE); + return(false); } /* Skip the table id. */ ptr = static_cast(memchr(ptr, '_', len)); if (ptr == NULL) { - return(FALSE); + return(false); } /* Skip the underscore. */ @@ -5962,7 +6330,7 @@ fts_is_aux_table_name( for (i = 0; fts_common_tables[i] != NULL; ++i) { if (strncmp(ptr, fts_common_tables[i], len) == 0) { - return(TRUE); + return(true); } } @@ -5974,14 +6342,14 @@ fts_is_aux_table_name( /* Try and read the index id. */ if (!fts_read_object_id(&table->index_id, ptr)) { - return(FALSE); + return(false); } /* Skip the table id. */ ptr = static_cast(memchr(ptr, '_', len)); if (ptr == NULL) { - return(FALSE); + return(false); } /* Skip the underscore. */ @@ -5990,20 +6358,20 @@ fts_is_aux_table_name( len = end - ptr; /* Search the FT index specific array. */ - for (i = 0; fts_index_selector[i].value; ++i) { + for (i = 0; i < FTS_NUM_AUX_INDEX; ++i) { if (strncmp(ptr, fts_get_suffix(i), len) == 0) { - return(TRUE); + return(true); } } /* Other FT index specific table(s). */ if (strncmp(ptr, "DOC_ID", len) == 0) { - return(TRUE); + return(true); } } - return(FALSE); + return(false); } /**********************************************************************//** @@ -6107,7 +6475,6 @@ fts_set_hex_format( /*****************************************************************//** Update the DICT_TF2_FTS_AUX_HEX_NAME flag in SYS_TABLES. @return DB_SUCCESS or error code. */ -UNIV_INTERN dberr_t fts_update_hex_format_flag( /*=======================*/ @@ -6126,8 +6493,8 @@ fts_update_hex_format_flag( "PROCEDURE UPDATE_HEX_FORMAT_FLAG() IS\n" "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS\n" - " SELECT MIX_LEN " - " FROM SYS_TABLES " + " SELECT MIX_LEN" + " FROM SYS_TABLES" " WHERE ID = :table_id FOR UPDATE;" "\n" "BEGIN\n" @@ -6162,7 +6529,7 @@ fts_update_hex_format_flag( ut_a(flags2 != ULINT32_UNDEFINED); - return (err); + return(err); } /*********************************************************************//** @@ -6178,7 +6545,7 @@ fts_rename_one_aux_table_to_hex_format( { const char* ptr; fts_table_t fts_table; - char* new_name; + char new_name[MAX_FULL_NAME_LEN]; dberr_t error; ptr = strchr(aux_table->name, '/'); @@ -6219,12 +6586,12 @@ fts_rename_one_aux_table_to_hex_format( ut_a(fts_table.suffix != NULL); - fts_table.parent = parent_table->name; + fts_table.parent = parent_table->name.m_name; fts_table.table_id = aux_table->parent_id; fts_table.index_id = aux_table->index_id; fts_table.table = parent_table; - new_name = fts_get_table_name(&fts_table); + fts_get_table_name(&fts_table, new_name); ut_ad(strcmp(new_name, aux_table->name) != 0); if (trx_get_dict_operation(trx) == TRX_DICT_OP_NONE) { @@ -6235,19 +6602,15 @@ fts_rename_one_aux_table_to_hex_format( FALSE); if (error != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_WARN, - "Failed to rename aux table \'%s\' to " - "new format \'%s\'. ", - aux_table->name, new_name); + ib::warn() << "Failed to rename aux table '" + << aux_table->name << "' to new format '" + << new_name << "'."; } else { - ib_logf(IB_LOG_LEVEL_INFO, - "Renamed aux table \'%s\' to \'%s\'.", - aux_table->name, new_name); + ib::info() << "Renamed aux table '" << aux_table->name + << "' to '" << new_name << "'."; } - mem_free(new_name); - - return (error); + return(error); } /**********************************************************************//** @@ -6276,12 +6639,10 @@ fts_rename_aux_tables_to_hex_format_low( error = fts_update_hex_format_flag(trx, parent_table->id, true); if (error != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_WARN, - "Setting parent table %s to hex format failed.", - parent_table->name); - + ib::warn() << "Setting parent table " << parent_table->name + << " to hex format failed."; fts_sql_rollback(trx); - return (error); + return(error); } DICT_TF2_FLAG_SET(parent_table, DICT_TF2_FTS_AUX_HEX_NAME); @@ -6312,10 +6673,9 @@ fts_rename_aux_tables_to_hex_format_low( if (error != DB_SUCCESS) { dict_table_close(table, TRUE, FALSE); - ib_logf(IB_LOG_LEVEL_WARN, - "Failed to rename one aux table %s " - "Will revert all successful rename " - "operations.", aux_table->name); + ib::warn() << "Failed to rename one aux table " + << aux_table->name << ". Will revert" + " all successful rename operations."; fts_sql_rollback(trx); break; @@ -6325,9 +6685,8 @@ fts_rename_aux_tables_to_hex_format_low( dict_table_close(table, TRUE, FALSE); if (error != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_WARN, - "Setting aux table %s to hex format failed.", - aux_table->name); + ib::warn() << "Setting aux table " << aux_table->name + << " to hex format failed."; fts_sql_rollback(trx); break; @@ -6336,10 +6695,13 @@ fts_rename_aux_tables_to_hex_format_low( if (error != DB_SUCCESS) { ut_ad(count != ib_vector_size(tables)); + /* If rename fails, thr trx would be rolled back, we can't use it any more, we'll start a new background trx to do the reverting. */ - ut_a(trx->state == TRX_STATE_NOT_STARTED); + + ut_ad(!trx_is_started(trx)); + bool not_rename = false; /* Try to revert those succesful rename operations @@ -6374,7 +6736,7 @@ fts_rename_aux_tables_to_hex_format_low( trx_start_for_ddl(trx_bg, TRX_DICT_OP_TABLE); DICT_TF2_FLAG_UNSET(table, DICT_TF2_FTS_AUX_HEX_NAME); - err = row_rename_table_for_mysql(table->name, + err = row_rename_table_for_mysql(table->name.m_name, aux_table->name, trx_bg, FALSE); @@ -6382,9 +6744,9 @@ fts_rename_aux_tables_to_hex_format_low( dict_table_close(table, TRUE, FALSE); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_WARN, "Failed to revert " - "table %s. Please revert manually.", - table->name); + ib::warn() << "Failed to revert table " + << table->name << ". Please revert" + " manually."; fts_sql_rollback(trx_bg); trx_free_for_background(trx_bg); /* Continue to clear aux tables' flags2 */ @@ -6399,7 +6761,7 @@ fts_rename_aux_tables_to_hex_format_low( DICT_TF2_FLAG_UNSET(parent_table, DICT_TF2_FTS_AUX_HEX_NAME); } - return (error); + return(error); } /**********************************************************************//** @@ -6413,14 +6775,19 @@ fts_fake_hex_to_dec( { ib_id_t dec_id = 0; char tmp_id[FTS_AUX_MIN_TABLE_ID_LENGTH]; - int ret MY_ATTRIBUTE((unused)); - ret = sprintf(tmp_id, UINT64PFx, id); +#ifdef UNIV_DEBUG + int ret = +#endif /* UNIV_DEBUG */ + sprintf(tmp_id, UINT64PFx, id); ut_ad(ret == 16); +#ifdef UNIV_DEBUG + ret = +#endif /* UNIV_DEBUG */ #ifdef _WIN32 - ret = sscanf(tmp_id, "%016llu", &dec_id); + sscanf(tmp_id, "%016llu", &dec_id); #else - ret = sscanf(tmp_id, "%016" PRIu64, &dec_id); + sscanf(tmp_id, "%016llu", &dec_id); #endif /* _WIN32 */ ut_ad(ret == 1); @@ -6486,7 +6853,7 @@ fts_set_index_corrupt( } for (ulint j = 0; j < ib_vector_size(fts->indexes); j++) { - dict_index_t* index = static_cast( + dict_index_t* index = static_cast( ib_vector_getp_const(fts->indexes, j)); if (index->id == id) { dict_set_corrupted(index, trx, @@ -6591,12 +6958,10 @@ fts_rename_aux_tables_to_hex_format( if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_WARN, - "Rollback operations on all aux tables of table %s. " - "All the fts index associated with the table are " - "marked as corrupted. Please rebuild the " - "index again.", parent_table->name); - fts_sql_rollback(trx_rename); + ib::warn() << "Rollback operations on all aux tables of " + "table "<< parent_table->name << ". All the fts index " + "associated with the table are marked as corrupted. " + "Please rebuild the index again."; /* Corrupting the fts index related to parent table. */ trx_t* trx_corrupt; @@ -6626,25 +6991,18 @@ fts_set_parent_hex_format_flag( { if (!DICT_TF2_FLAG_IS_SET(parent_table, DICT_TF2_FTS_AUX_HEX_NAME)) { - DBUG_EXECUTE_IF("parent_table_flag_fail", - ib_logf(IB_LOG_LEVEL_FATAL, - "Setting parent table %s to hex format " - "failed. Please try to restart the server " - "again, if it doesn't work, the system " - "tables might be corrupted.", - parent_table->name); - return;); + DBUG_EXECUTE_IF("parent_table_flag_fail", DBUG_SUICIDE();); dberr_t err = fts_update_hex_format_flag( trx, parent_table->id, true); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Setting parent table %s to hex format " - "failed. Please try to restart the server " - "again, if it doesn't work, the system " - "tables might be corrupted.", - parent_table->name); + ib::fatal() << "Setting parent table " + << parent_table->name + << "to hex format failed. Please try " + << "to restart the server again, if it " + << "doesn't work, the system tables " + << "might be corrupted."; } else { DICT_TF2_FLAG_SET( parent_table, DICT_TF2_FTS_AUX_HEX_NAME); @@ -6682,15 +7040,16 @@ fts_drop_obsolete_aux_table_from_vector( failure, since server would try to drop it on next restart, even if the table was broken. */ - ib_logf(IB_LOG_LEVEL_WARN, - "Fail to drop obsolete aux table '%s', which " - "is harmless. will try to drop it on next " - "restart.", aux_drop_table->name); + ib::warn() << "Failed to drop obsolete aux table " + << aux_drop_table->name << ", which is " + << "harmless. will try to drop it on next " + << "restart."; + fts_sql_rollback(trx_drop); } else { - ib_logf(IB_LOG_LEVEL_INFO, - "Dropped obsolete aux table '%s'.", - aux_drop_table->name); + ib::info() << "Dropped obsolete aux" + " table '" << aux_drop_table->name + << "'."; fts_sql_commit(trx_drop); } @@ -6716,16 +7075,22 @@ fts_drop_aux_table_from_vector( /* Check for the validity of the parent table */ if (!fts_valid_parent_table(aux_drop_table)) { - ib_logf(IB_LOG_LEVEL_WARN, - "Parent table of FTS auxiliary table %s not " - "found.", aux_drop_table->name); + + ib::warn() << "Parent table of FTS auxiliary table " + << aux_drop_table->name << " not found."; + dberr_t err = fts_drop_table(trx, aux_drop_table->name); if (err == DB_FAIL) { - char* path = fil_make_ibd_name( - aux_drop_table->name, false); - os_file_delete_if_exists(innodb_file_data_key, - path); - mem_free(path); + + char* path = fil_make_filepath( + NULL, aux_drop_table->name, IBD, false); + + if (path != NULL) { + os_file_delete_if_exists( + innodb_data_file_key, + path , NULL); + ut_free(path); + } } } } @@ -6796,7 +7161,8 @@ fts_check_and_drop_orphaned_tables( orig_parent_id = aux_table->parent_id; orig_index_id = aux_table->index_id; - if (table == NULL || strcmp(table->name, aux_table->name)) { + if (table == NULL + || strcmp(table->name.m_name, aux_table->name)) { bool fake_aux = false; @@ -6831,7 +7197,7 @@ fts_check_and_drop_orphaned_tables( || orig_parent_id != next_aux_table->parent_id) && (!ib_vector_is_empty(aux_tables_to_rename))) { - ulint parent_id = fts_fake_hex_to_dec( + ib_id_t parent_id = fts_fake_hex_to_dec( aux_table->parent_id); parent_table = dict_table_open_on_id( @@ -6893,7 +7259,7 @@ fts_check_and_drop_orphaned_tables( } if (table != NULL) { - dict_table_close(table, true, false); + dict_table_close(table, TRUE, FALSE); } if (!rename) { @@ -6904,7 +7270,7 @@ fts_check_and_drop_orphaned_tables( } /* Filter out the fake aux table by comparing with the - current valid auxiliary table name . */ + current valid auxiliary table name. */ for (ulint count = 0; count < ib_vector_size(invalid_aux_tables); count++) { fts_aux_table_t* invalid_aux; @@ -6926,7 +7292,7 @@ fts_check_and_drop_orphaned_tables( if (i + 1 < ib_vector_size(tables)) { next_aux_table = static_cast( - ib_vector_get(tables, i + 1)); + ib_vector_get(tables, i + 1)); } if (next_aux_table == NULL @@ -6939,7 +7305,6 @@ fts_check_and_drop_orphaned_tables( if (!ib_vector_is_empty(aux_tables_to_rename)) { fts_rename_aux_tables_to_hex_format( aux_tables_to_rename, parent_table); - } else { fts_set_parent_hex_format_flag( parent_table, trx); @@ -6955,16 +7320,9 @@ fts_check_and_drop_orphaned_tables( aux_table->parent_id, TRUE, DICT_TABLE_OP_NORMAL); if (drop) { - ib_vector_push(drop_aux_tables, aux_table); + ib_vector_push(drop_aux_tables, aux_table); } else { if (FTS_IS_OBSOLETE_AUX_TABLE(aux_table->name)) { - - /* Current table could be one of the three - obsolete tables, in this case, we should - always try to drop it but not rename it. - This could happen when we try to upgrade - from older server to later one, which doesn't - contain these obsolete tables. */ ib_vector_push(obsolete_aux_tables, aux_table); continue; } @@ -6973,22 +7331,36 @@ fts_check_and_drop_orphaned_tables( /* If the aux table is in decimal format, we should rename it, so push it to aux_tables_to_rename */ if (!drop && rename) { - ib_vector_push(aux_tables_to_rename, aux_table); + bool rename_table = true; + for (ulint count = 0; + count < ib_vector_size(aux_tables_to_rename); + count++) { + fts_aux_table_t* rename_aux = + static_cast( + ib_vector_get(aux_tables_to_rename, + count)); + if (strcmp(rename_aux->name, + aux_table->name) == 0) { + rename_table = false; + break; + } + } + + if (rename_table) { + ib_vector_push(aux_tables_to_rename, + aux_table); + } } if (i + 1 < ib_vector_size(tables)) { next_aux_table = static_cast( - ib_vector_get(tables, i + 1)); + ib_vector_get(tables, i + 1)); } if ((next_aux_table == NULL || orig_parent_id != next_aux_table->parent_id) && !ib_vector_is_empty(aux_tables_to_rename)) { - /* All aux tables of parent table, whose id is - last_parent_id, have been checked, try to rename - them if necessary. We had better use a new background - trx to rename rather than the original trx, in case - any failure would cause a complete rollback. */ + ut_ad(rename); ut_ad(!DICT_TF2_FLAG_IS_SET( parent_table, DICT_TF2_FTS_AUX_HEX_NAME)); @@ -7003,21 +7375,22 @@ fts_check_and_drop_orphaned_tables( table = dict_table_open_on_id( aux_table->id, TRUE, DICT_TABLE_OP_NORMAL); + if (table != NULL - && strcmp(table->name, aux_table->name)) { + && strcmp(table->name.m_name, aux_table->name)) { dict_table_close(table, TRUE, FALSE); table = NULL; } if (table != NULL && !DICT_TF2_FLAG_IS_SET( - table, - DICT_TF2_FTS_AUX_HEX_NAME)) { + table, + DICT_TF2_FTS_AUX_HEX_NAME)) { DBUG_EXECUTE_IF("aux_table_flag_fail", - ib_logf(IB_LOG_LEVEL_WARN, - "Setting aux table %s to hex " - "format failed.", table->name); + ib::warn() << "Setting aux table " + << table->name << " to hex " + "format failed."; fts_set_index_corrupt( trx, aux_table->index_id, parent_table); @@ -7027,9 +7400,9 @@ fts_check_and_drop_orphaned_tables( trx, table->id, true); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_WARN, - "Setting aux table %s to hex " - "format failed.", table->name); + ib::warn() << "Setting aux table " + << table->name << " to hex " + "format failed."; fts_set_index_corrupt( trx, aux_table->index_id, @@ -7050,7 +7423,7 @@ table_exit: ut_ad(parent_table != NULL); fts_set_parent_hex_format_flag( - parent_table, trx); + parent_table, trx); } if (parent_table != NULL) { @@ -7073,7 +7446,6 @@ table_exit: /**********************************************************************//** Drop all orphaned FTS auxiliary tables, those that don't have a parent table or FTS index defined on them. */ -UNIV_INTERN void fts_drop_orphaned_tables(void) /*==========================*/ @@ -7091,8 +7463,7 @@ fts_drop_orphaned_tables(void) error = fil_get_space_names(space_name_list); if (error == DB_OUT_OF_MEMORY) { - ib_logf(IB_LOG_LEVEL_ERROR, "Out of memory"); - ut_error; + ib::fatal() << "Out of memory"; } heap = mem_heap_create(1024); @@ -7122,7 +7493,7 @@ fts_drop_orphaned_tables(void) } else { ulint len = strlen(*it); - fts_aux_table->id = fil_get_space_id_for_table(*it); + fts_aux_table->id = fil_space_get_id_by_name(*it); /* We got this list from fil0fil.cc. The tablespace with this name must exist. */ @@ -7148,7 +7519,7 @@ fts_drop_orphaned_tables(void) info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" - " SELECT NAME, ID " + " SELECT NAME, ID" " FROM SYS_TABLES;\n" "BEGIN\n" "\n" @@ -7172,18 +7543,14 @@ fts_drop_orphaned_tables(void) fts_sql_rollback(trx); - ut_print_timestamp(stderr); - if (error == DB_LOCK_WAIT_TIMEOUT) { - ib_logf(IB_LOG_LEVEL_WARN, - "lock wait timeout reading SYS_TABLES. " - "Retrying!"); + ib::warn() << "lock wait timeout reading" + " SYS_TABLES. Retrying!"; trx->error_state = DB_SUCCESS; } else { - ib_logf(IB_LOG_LEVEL_ERROR, - "(%s) while reading SYS_TABLES.", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) + << ") while reading SYS_TABLES."; break; /* Exit the loop. */ } @@ -7205,7 +7572,7 @@ fts_drop_orphaned_tables(void) it != space_name_list.end(); ++it) { - delete[] *it; + UT_DELETE_ARRAY(*it); } } @@ -7213,11 +7580,10 @@ fts_drop_orphaned_tables(void) Check whether user supplied stopword table is of the right format. Caller is responsible to hold dictionary locks. @return the stopword column charset if qualifies */ -UNIV_INTERN CHARSET_INFO* fts_valid_stopword_table( /*=====================*/ - const char* stopword_table_name) /*!< in: Stopword table + const char* stopword_table_name) /*!< in: Stopword table name */ { dict_table_t* table; @@ -7230,9 +7596,8 @@ fts_valid_stopword_table( table = dict_table_get_low(stopword_table_name); if (!table) { - fprintf(stderr, - "InnoDB: user stopword table %s does not exist.\n", - stopword_table_name); + ib::error() << "User stopword table " << stopword_table_name + << " does not exist."; return(NULL); } else { @@ -7241,10 +7606,9 @@ fts_valid_stopword_table( col_name = dict_table_get_col_name(table, 0); if (ut_strcmp(col_name, "value")) { - fprintf(stderr, - "InnoDB: invalid column name for stopword " - "table %s. Its first column must be named as " - "'value'.\n", stopword_table_name); + ib::error() << "Invalid column name for stopword" + " table " << stopword_table_name << ". Its" + " first column must be named as 'value'."; return(NULL); } @@ -7253,10 +7617,9 @@ fts_valid_stopword_table( if (col->mtype != DATA_VARCHAR && col->mtype != DATA_VARMYSQL) { - fprintf(stderr, - "InnoDB: invalid column type for stopword " - "table %s. Its first column must be of " - "varchar type\n", stopword_table_name); + ib::error() << "Invalid column type for stopword" + " table " << stopword_table_name << ". Its" + " first column must be of varchar type"; return(NULL); } @@ -7264,9 +7627,7 @@ fts_valid_stopword_table( ut_ad(col); - return(innobase_get_fts_charset( - static_cast(col->prtype & DATA_MYSQL_TYPE_MASK), - static_cast(dtype_get_charset_coll(col->prtype)))); + return(fts_get_charset(col->prtype)); } /**********************************************************************//** @@ -7275,7 +7636,6 @@ records/fetches stopword configuration to/from FTS configure table, depending on whether we are creating or reloading the FTS. @return TRUE if load operation is successful */ -UNIV_INTERN ibool fts_load_stopword( /*==============*/ @@ -7389,8 +7749,9 @@ cleanup: } if (!cache->stopword_info.cached_stopword) { - cache->stopword_info.cached_stopword = rbt_create( - sizeof(fts_tokenizer_word_t), fts_utf8_string_cmp); + cache->stopword_info.cached_stopword = rbt_create_arg_cmp( + sizeof(fts_tokenizer_word_t), innobase_fts_text_cmp, + &my_charset_latin1); } return(error == DB_SUCCESS); @@ -7454,6 +7815,7 @@ fts_init_recover_doc( sel_node_t* node = static_cast(row); que_node_t* exp = node->select_list; fts_cache_t* cache = get_doc->cache; + st_mysql_ftparser* parser = get_doc->index_cache->index->parser; fts_doc_init(&doc); doc.found = TRUE; @@ -7487,26 +7849,22 @@ fts_init_recover_doc( ut_ad(get_doc); if (!get_doc->index_cache->charset) { - ulint prtype = dfield->type.prtype; - - get_doc->index_cache->charset = - innobase_get_fts_charset( - (int)(prtype & DATA_MYSQL_TYPE_MASK), - (uint) dtype_get_charset_coll(prtype)); + get_doc->index_cache->charset = fts_get_charset( + dfield->type.prtype); } doc.charset = get_doc->index_cache->charset; + doc.is_ngram = get_doc->index_cache->index->is_ngram; if (dfield_is_ext(dfield)) { dict_table_t* table = cache->sync->table; - ulint zip_size = dict_table_zip_size(table); doc.text.f_str = btr_copy_externally_stored_field( &doc.text.f_len, static_cast(dfield_get_data(dfield)), - zip_size, len, - static_cast(doc.self_heap->arg), - NULL); + dict_table_page_size(table), len, + static_cast(doc.self_heap->arg) + ); } else { doc.text.f_str = static_cast( dfield_get_data(dfield)); @@ -7515,9 +7873,9 @@ fts_init_recover_doc( } if (field_no == 1) { - fts_tokenize_document(&doc, NULL); + fts_tokenize_document(&doc, NULL, parser); } else { - fts_tokenize_document_next(&doc, doc_len, NULL); + fts_tokenize_document_next(&doc, doc_len, NULL, parser); } exp = que_node_get_next(exp); @@ -7546,7 +7904,6 @@ used. There are documents that have not yet sync-ed to auxiliary tables from last server abnormally shutdown, we will need to bring such document into FTS cache before any further operations @return TRUE if all OK */ -UNIV_INTERN ibool fts_init_index( /*===========*/ @@ -7590,7 +7947,7 @@ fts_init_index( dropped, and we re-initialize the Doc ID system for subsequent insertion */ if (ib_vector_is_empty(cache->get_docs)) { - index = dict_table_get_index_on_name(table, FTS_DOC_ID_INDEX_NAME); + index = table->fts_doc_id_index; ut_a(index); @@ -7633,3 +7990,61 @@ func_exit: return(TRUE); } + +/** Check if the all the auxillary tables associated with FTS index are in +consistent state. For now consistency is check only by ensuring +index->page_no != FIL_NULL +@param[out] base_table table has host fts index +@param[in,out] trx trx handler +@return true if check certifies auxillary tables are sane false otherwise. */ +bool +fts_is_corrupt( + dict_table_t* base_table, + trx_t* trx) +{ + bool sane = true; + fts_table_t fts_table; + + /* Iterate over the common table and check for their sanity. */ + FTS_INIT_FTS_TABLE(&fts_table, NULL, FTS_COMMON_TABLE, base_table); + + for (ulint i = 0; fts_common_tables[i] != NULL && sane; ++i) { + + char table_name[MAX_FULL_NAME_LEN]; + + fts_table.suffix = fts_common_tables[i]; + fts_get_table_name(&fts_table, table_name); + + dict_table_t* aux_table = dict_table_open_on_name( + table_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); + + if (aux_table == NULL) { + dict_set_corrupted( + dict_table_get_first_index(base_table), + trx, "FTS_SANITY_CHECK"); + ut_ad(base_table->corrupted == TRUE); + sane = false; + continue; + } + + for (dict_index_t* aux_table_index = + UT_LIST_GET_FIRST(aux_table->indexes); + aux_table_index != NULL; + aux_table_index = + UT_LIST_GET_NEXT(indexes, aux_table_index)) { + + /* Check if auxillary table needed for FTS is sane. */ + if (aux_table_index->page == FIL_NULL) { + dict_set_corrupted( + dict_table_get_first_index(base_table), + trx, "FTS_SANITY_CHECK"); + ut_ad(base_table->corrupted == TRUE); + sane = false; + } + } + + dict_table_close(aux_table, FALSE, FALSE); + } + + return(sane); +} diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc index d9f2532578e..89cdcf26591 100644 --- a/storage/innobase/fts/fts0opt.cc +++ b/storage/innobase/fts/fts0opt.cc @@ -25,6 +25,8 @@ Completed 2011/7/10 Sunny and Jimmy Yang ***********************************************************************/ +#include "ha_prototypes.h" + #include "fts0fts.h" #include "row0sel.h" #include "que0types.h" @@ -32,9 +34,10 @@ Completed 2011/7/10 Sunny and Jimmy Yang #include "fts0types.h" #include "ut0wqueue.h" #include "srv0start.h" +#include "ut0list.h" #include "zlib.h" -#ifndef UNIV_NONINL +#ifdef UNIV_NONINL #include "fts0types.ic" #include "fts0vlc.ic" #endif @@ -51,6 +54,9 @@ static const ulint FTS_OPTIMIZE_INTERVAL_IN_SECS = 300; /** Server is shutting down, so does we exiting the optimize thread */ static bool fts_opt_start_shutdown = false; +/** Event to wait for shutdown of the optimize thread */ +static os_event_t fts_opt_shutdown_event = NULL; + /** Initial size of nodes in fts_word_t. */ static const ulint FTS_WORD_NODES_INIT_SIZE = 64; @@ -215,11 +221,6 @@ struct fts_msg_del_t { this message by the consumer */ }; -/** Stop the optimize thread. */ -struct fts_msg_optimize_t { - dict_table_t* table; /*!< Table to optimize */ -}; - /** The FTS optimize message work queue message type. */ struct fts_msg_t { fts_msg_type_t type; /*!< Message type */ @@ -232,10 +233,10 @@ struct fts_msg_t { }; /** The number of words to read and optimize in a single pass. */ -UNIV_INTERN ulong fts_num_word_optimize; +ulong fts_num_word_optimize; // FIXME -UNIV_INTERN char fts_enable_diag_print; +char fts_enable_diag_print; /** ZLib compressed block size.*/ static ulint FTS_ZIP_BLOCK_SIZE = 1024; @@ -243,27 +244,30 @@ static ulint FTS_ZIP_BLOCK_SIZE = 1024; /** The amount of time optimizing in a single pass, in milliseconds. */ static ib_time_t fts_optimize_time_limit = 0; +/** It's defined in fts0fts.cc */ +extern const char* fts_common_tables[]; + /** SQL Statement for changing state of rows to be deleted from FTS Index. */ static const char* fts_init_delete_sql = "BEGIN\n" "\n" - "INSERT INTO \"%s_BEING_DELETED\"\n" - "SELECT doc_id FROM \"%s_DELETED\";\n" + "INSERT INTO $BEING_DELETED\n" + "SELECT doc_id FROM $DELETED;\n" "\n" - "INSERT INTO \"%s_BEING_DELETED_CACHE\"\n" - "SELECT doc_id FROM \"%s_DELETED_CACHE\";\n"; + "INSERT INTO $BEING_DELETED_CACHE\n" + "SELECT doc_id FROM $DELETED_CACHE;\n"; static const char* fts_delete_doc_ids_sql = "BEGIN\n" "\n" - "DELETE FROM \"%s_DELETED\" WHERE doc_id = :doc_id1;\n" - "DELETE FROM \"%s_DELETED_CACHE\" WHERE doc_id = :doc_id2;\n"; + "DELETE FROM $DELETED WHERE doc_id = :doc_id1;\n" + "DELETE FROM $DELETED_CACHE WHERE doc_id = :doc_id2;\n"; static const char* fts_end_delete_sql = "BEGIN\n" "\n" - "DELETE FROM \"%s_BEING_DELETED\";\n" - "DELETE FROM \"%s_BEING_DELETED_CACHE\";\n"; + "DELETE FROM $BEING_DELETED;\n" + "DELETE FROM $BEING_DELETED_CACHE;\n"; /**********************************************************************//** Initialize fts_zip_t. */ @@ -338,7 +342,6 @@ fts_zip_init( /**********************************************************************//** Create a fts_optimizer_word_t instance. @return new instance */ -UNIV_INTERN fts_word_t* fts_word_init( /*==========*/ @@ -405,7 +408,7 @@ fts_optimize_read_node( case 4: /* ILIST */ node->ilist_size_alloc = node->ilist_size = len; - node->ilist = static_cast(ut_malloc(len)); + node->ilist = static_cast(ut_malloc_nokey(len)); memcpy(node->ilist, data, len); break; @@ -423,7 +426,6 @@ fts_optimize_read_node( /**********************************************************************//** Callback function to fetch the rows in an FTS INDEX record. @return always returns non-NULL */ -UNIV_INTERN ibool fts_optimize_index_fetch_node( /*==========================*/ @@ -481,7 +483,6 @@ fts_optimize_index_fetch_node( /**********************************************************************//** Read the rows from the FTS inde. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_index_fetch_nodes( /*==================*/ @@ -494,21 +495,17 @@ fts_index_fetch_nodes( { pars_info_t* info; dberr_t error; + char table_name[MAX_FULL_NAME_LEN]; trx->op_info = "fetching FTS index nodes"; if (*graph) { info = (*graph)->info; } else { - info = pars_info_create(); - } - - pars_info_bind_function(info, "my_func", fetch->read_record, fetch); - pars_info_bind_varchar_literal(info, "word", word->f_str, word->f_len); - - if (!*graph) { ulint selected; + info = pars_info_create(); + ut_a(fts_table->type == FTS_INDEX_TABLE); selected = fts_select_index(fts_table->charset, @@ -516,14 +513,24 @@ fts_index_fetch_nodes( fts_table->suffix = fts_get_suffix(selected); + fts_get_table_name(fts_table, table_name); + + pars_info_bind_id(info, true, "table_name", table_name); + } + + pars_info_bind_function(info, "my_func", fetch->read_record, fetch); + pars_info_bind_varchar_literal(info, "word", word->f_str, word->f_len); + + if (!*graph) { + *graph = fts_parse_sql( fts_table, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" - " SELECT word, doc_count, first_doc_id, last_doc_id, " - "ilist\n" - " FROM \"%s\"\n" + " SELECT word, doc_count, first_doc_id, last_doc_id," + " ilist\n" + " FROM $table_name\n" " WHERE word LIKE :word\n" " ORDER BY first_doc_id;\n" "BEGIN\n" @@ -538,7 +545,7 @@ fts_index_fetch_nodes( "CLOSE c;"); } - for(;;) { + for (;;) { error = fts_eval_sql(trx, *graph); if (error == DB_SUCCESS) { @@ -548,18 +555,14 @@ fts_index_fetch_nodes( } else { fts_sql_rollback(trx); - ut_print_timestamp(stderr); - if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout reading FTS index. " - "Retrying!\n"); + ib::warn() << "lock wait timeout reading" + " FTS index. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: (%s) " - "while reading FTS index.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) + << ") while reading FTS index."; break; /* Exit the loop. */ } @@ -620,7 +623,8 @@ fts_zip_read_word( zip->zp->avail_in = FTS_MAX_WORD_LEN; } else { - zip->zp->avail_in = static_cast(zip->block_sz); + zip->zp->avail_in = + static_cast(zip->block_sz); } ++zip->pos; @@ -718,7 +722,9 @@ fts_fetch_index_words( if (zip->zp->avail_out == 0) { byte* block; - block = static_cast(ut_malloc(zip->block_sz)); + block = static_cast( + ut_malloc_nokey(zip->block_sz)); + ib_vector_push(zip->blocks, &block); zip->zp->next_out = block; @@ -775,7 +781,9 @@ fts_zip_deflate_end( ut_a(zip->zp->avail_out == 0); - block = static_cast(ut_malloc(FTS_MAX_WORD_LEN + 1)); + block = static_cast( + ut_malloc_nokey(FTS_MAX_WORD_LEN + 1)); + ib_vector_push(zip->blocks, &block); zip->zp->next_out = block; @@ -823,16 +831,13 @@ fts_index_fetch_words( } for (selected = fts_select_index( - optim->fts_index_table.charset, word->f_str, word->f_len); - fts_index_selector[selected].value; + optim->fts_index_table.charset, word->f_str, word->f_len); + selected < FTS_NUM_AUX_INDEX; selected++) { - optim->fts_index_table.suffix = fts_get_suffix(selected); + char table_name[MAX_FULL_NAME_LEN]; - /* We've search all indexes. */ - if (optim->fts_index_table.suffix == NULL) { - return(DB_TABLE_NOT_FOUND); - } + optim->fts_index_table.suffix = fts_get_suffix(selected); info = pars_info_create(); @@ -842,13 +847,16 @@ fts_index_fetch_words( pars_info_bind_varchar_literal( info, "word", word->f_str, word->f_len); + fts_get_table_name(&optim->fts_index_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); + graph = fts_parse_sql( &optim->fts_index_table, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" " SELECT word\n" - " FROM \"%s\"\n" + " FROM $table_name\n" " WHERE word > :word\n" " ORDER BY word;\n" "BEGIN\n" @@ -864,15 +872,13 @@ fts_index_fetch_words( zip = optim->zip; - for(;;) { + for (;;) { int err; if (!inited && ((err = deflateInit(zip->zp, 9)) != Z_OK)) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: ZLib deflateInit() " - "failed: %d\n", err); + ib::error() << "ZLib deflateInit() failed: " + << err; error = DB_ERROR; break; @@ -887,13 +893,9 @@ fts_index_fetch_words( } else { //FIXME fts_sql_rollback(optim->trx); - ut_print_timestamp(stderr); - if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: " - "Warning: lock wait " - "timeout reading document. " - "Retrying!\n"); + ib::warn() << "Lock wait timeout" + " reading document. Retrying!"; /* We need to reset the ZLib state. */ inited = FALSE; @@ -902,9 +904,8 @@ fts_index_fetch_words( optim->trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: (%s) " - "while reading document.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) + << ") while reading document."; break; /* Exit the loop. */ } @@ -978,7 +979,6 @@ fts_fetch_doc_ids( /**********************************************************************//** Read the rows from a FTS common auxiliary table. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_table_fetch_doc_ids( /*====================*/ @@ -990,6 +990,7 @@ fts_table_fetch_doc_ids( que_t* graph; pars_info_t* info = pars_info_create(); ibool alloc_bk_trx = FALSE; + char table_name[MAX_FULL_NAME_LEN]; ut_a(fts_table->suffix != NULL); ut_a(fts_table->type == FTS_COMMON_TABLE); @@ -1003,12 +1004,15 @@ fts_table_fetch_doc_ids( pars_info_bind_function(info, "my_func", fts_fetch_doc_ids, doc_ids); + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); + graph = fts_parse_sql( fts_table, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" - " SELECT doc_id FROM \"%s\";\n" + " SELECT doc_id FROM $table_name;\n" "BEGIN\n" "\n" "OPEN c;\n" @@ -1045,7 +1049,6 @@ fts_table_fetch_doc_ids( Do a binary search for a doc id in the array @return +ve index if found -ve index where it should be inserted if not found */ -UNIV_INTERN int fts_bsearch( /*========*/ @@ -1082,7 +1085,7 @@ fts_bsearch( } /* Not found. */ - return( (lower == 0) ? -1 : -lower); + return( (lower == 0) ? -1 : -(lower)); } /**********************************************************************//** @@ -1181,12 +1184,12 @@ fts_optimize_encode_node( new_size = enc_len > FTS_ILIST_MAX_SIZE ? enc_len : FTS_ILIST_MAX_SIZE; - node->ilist = static_cast(ut_malloc(new_size)); + node->ilist = static_cast(ut_malloc_nokey(new_size)); node->ilist_size_alloc = new_size; } else if ((node->ilist_size + enc_len) > node->ilist_size_alloc) { ulint new_size = node->ilist_size + enc_len; - byte* ilist = static_cast(ut_malloc(new_size)); + byte* ilist = static_cast(ut_malloc_nokey(new_size)); memcpy(ilist, node->ilist, node->ilist_size); @@ -1386,8 +1389,8 @@ fts_optimize_word( if (fts_enable_diag_print) { word->text.f_str[word->text.f_len] = 0; - fprintf(stderr, "FTS_OPTIMIZE: optimize \"%s\"\n", - word->text.f_str); + ib::info() << "FTS_OPTIMIZE: optimize \"" << word->text.f_str + << "\""; } while (i < size) { @@ -1461,15 +1464,15 @@ fts_optimize_write_word( que_t* graph; ulint selected; dberr_t error = DB_SUCCESS; - char* table_name = fts_get_table_name(fts_table); + char table_name[MAX_FULL_NAME_LEN]; info = pars_info_create(); ut_ad(fts_table->charset); if (fts_enable_diag_print) { - fprintf(stderr, "FTS_OPTIMIZE: processed \"%s\"\n", - word->f_str); + ib::info() << "FTS_OPTIMIZE: processed \"" << word->f_str + << "\""; } pars_info_bind_varchar_literal( @@ -1479,26 +1482,24 @@ fts_optimize_write_word( word->f_str, word->f_len); fts_table->suffix = fts_get_suffix(selected); + fts_get_table_name(fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); graph = fts_parse_sql( fts_table, info, - "BEGIN DELETE FROM \"%s\" WHERE word = :word;"); + "BEGIN DELETE FROM $table_name WHERE word = :word;"); error = fts_eval_sql(trx, graph); if (error != DB_SUCCESS) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: (%s) during optimize, " - "when deleting a word from the FTS index.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) << ") during optimize," + " when deleting a word from the FTS index."; } fts_que_graph_free(graph); graph = NULL; - mem_free(table_name); - /* Even if the operation needs to be rolled back and redone, we iterate over the nodes in order to free the ilist. */ for (i = 0; i < ib_vector_size(nodes); ++i) { @@ -1510,11 +1511,9 @@ fts_optimize_write_word( trx, &graph, fts_table, word, node); if (error != DB_SUCCESS) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: (%s) " - "during optimize, while adding a " - "word to the FTS index.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) << ")" + " during optimize, while adding a" + " word to the FTS index."; } } @@ -1532,7 +1531,6 @@ fts_optimize_write_word( /**********************************************************************//** Free fts_optimizer_word_t instanace.*/ -UNIV_INTERN void fts_word_free( /*==========*/ @@ -1624,12 +1622,12 @@ fts_optimize_create( optim->trx = trx_allocate_for_background(); - optim->fts_common_table.parent = table->name; + optim->fts_common_table.parent = table->name.m_name; optim->fts_common_table.table_id = table->id; optim->fts_common_table.type = FTS_COMMON_TABLE; optim->fts_common_table.table = table; - optim->fts_index_table.parent = table->name; + optim->fts_index_table.parent = table->name.m_name; optim->fts_index_table.table_id = table->id; optim->fts_index_table.type = FTS_INDEX_TABLE; optim->fts_index_table.table = table; @@ -1750,7 +1748,7 @@ fts_optimize_free( fts_doc_ids_free(optim->to_delete); fts_optimize_graph_free(&optim->graph); - mem_free(optim->name_prefix); + ut_free(optim->name_prefix); /* This will free the heap from which optim itself was allocated. */ mem_heap_free(heap); @@ -1804,9 +1802,9 @@ fts_optimize_words( fetch.read_arg = optim->words; fetch.read_record = fts_optimize_index_fetch_node; - fprintf(stderr, "%.*s\n", (int) word->f_len, word->f_str); + ib::info().write(word->f_str, word->f_len); - while(!optim->done) { + while (!optim->done) { dberr_t error; trx_t* trx = optim->trx; ulint selected; @@ -1853,13 +1851,12 @@ fts_optimize_words( } } } else if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, "InnoDB: Warning: lock wait timeout " - "during optimize. Retrying!\n"); + ib::warn() << "Lock wait timeout during optimize." + " Retrying!"; trx->error_state = DB_SUCCESS; } else if (error == DB_DEADLOCK) { - fprintf(stderr, "InnoDB: Warning: deadlock " - "during optimize. Retrying!\n"); + ib::warn() << "Deadlock during optimize. Retrying!"; trx->error_state = DB_SUCCESS; } else { @@ -1872,42 +1869,6 @@ fts_optimize_words( } } -/**********************************************************************//** -Select the FTS index to search. -@return TRUE if last index */ -static -ibool -fts_optimize_set_next_word( -/*=======================*/ - CHARSET_INFO* charset, /*!< in: charset */ - fts_string_t* word) /*!< in: current last word */ -{ - ulint selected; - ibool last = FALSE; - - selected = fts_select_next_index(charset, word->f_str, word->f_len); - - /* If this was the last index then reset to start. */ - if (fts_index_selector[selected].value == 0) { - /* Reset the last optimized word to '' if no - more words could be read from the FTS index. */ - word->f_len = 0; - *word->f_str = 0; - - last = TRUE; - } else { - ulint value = fts_index_selector[selected].value; - - ut_a(value <= 0xff); - - /* Set to the first character of the next slot. */ - word->f_len = 1; - *word->f_str = (byte) value; - } - - return(last); -} - /**********************************************************************//** Optimize is complete. Set the completion time, and reset the optimize start string for this FTS index to "". @@ -1940,8 +1901,8 @@ fts_optimize_index_completed( if (error != DB_SUCCESS) { - fprintf(stderr, "InnoDB: Error: (%s) while " - "updating last optimized word!\n", ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) << ") while updating" + " last optimized word!"; } return(error); @@ -1984,21 +1945,14 @@ fts_optimize_index_read_words( optim, word, fts_num_word_optimize); if (error == DB_SUCCESS) { - - /* If the search returned an empty set - try the next index in the horizontal split. */ - if (optim->zip->n_words > 0) { - break; - } else { - - fts_optimize_set_next_word( - optim->fts_index_table.charset, - word); - - if (word->f_len == 0) { - break; - } + /* Reset the last optimized word to '' if no + more words could be read from the FTS index. */ + if (optim->zip->n_words == 0) { + word->f_len = 0; + *word->f_str = 0; } + + break; } } @@ -2090,9 +2044,10 @@ fts_optimize_purge_deleted_doc_ids( pars_info_t* info; que_t* graph; fts_update_t* update; - char* sql_str; doc_id_t write_doc_id; dberr_t error = DB_SUCCESS; + char deleted[MAX_FULL_NAME_LEN]; + char deleted_cache[MAX_FULL_NAME_LEN]; info = pars_info_create(); @@ -2109,14 +2064,17 @@ fts_optimize_purge_deleted_doc_ids( fts_bind_doc_id(info, "doc_id1", &write_doc_id); fts_bind_doc_id(info, "doc_id2", &write_doc_id); - /* Since we only replace the table_id and don't construct the full - name, we do substitution ourselves. Remember to free sql_str. */ - sql_str = ut_strreplace( - fts_delete_doc_ids_sql, "%s", optim->name_prefix); + /* Make sure the following two names are consistent with the name + used in the fts_delete_doc_ids_sql */ + optim->fts_common_table.suffix = fts_common_tables[3]; + fts_get_table_name(&optim->fts_common_table, deleted); + pars_info_bind_id(info, true, fts_common_tables[3], deleted); - graph = fts_parse_sql(NULL, info, sql_str); + optim->fts_common_table.suffix = fts_common_tables[4]; + fts_get_table_name(&optim->fts_common_table, deleted_cache); + pars_info_bind_id(info, true, fts_common_tables[4], deleted_cache); - mem_free(sql_str); + graph = fts_parse_sql(NULL, info, fts_delete_doc_ids_sql); /* Delete the doc ids that were copied at the start. */ for (i = 0; i < ib_vector_size(optim->to_delete->doc_ids); ++i) { @@ -2157,17 +2115,26 @@ fts_optimize_purge_deleted_doc_id_snapshot( { dberr_t error; que_t* graph; - char* sql_str; + pars_info_t* info; + char being_deleted[MAX_FULL_NAME_LEN]; + char being_deleted_cache[MAX_FULL_NAME_LEN]; + + info = pars_info_create(); + + /* Make sure the following two names are consistent with the name + used in the fts_end_delete_sql */ + optim->fts_common_table.suffix = fts_common_tables[0]; + fts_get_table_name(&optim->fts_common_table, being_deleted); + pars_info_bind_id(info, true, fts_common_tables[0], being_deleted); - /* Since we only replace the table_id and don't construct - the full name, we do the '%s' substitution ourselves. */ - sql_str = ut_strreplace(fts_end_delete_sql, "%s", optim->name_prefix); + optim->fts_common_table.suffix = fts_common_tables[1]; + fts_get_table_name(&optim->fts_common_table, being_deleted_cache); + pars_info_bind_id(info, true, fts_common_tables[1], + being_deleted_cache); /* Delete the doc ids that were copied to delete pending state at the start of optimize. */ - graph = fts_parse_sql(NULL, NULL, sql_str); - - mem_free(sql_str); + graph = fts_parse_sql(NULL, info, fts_end_delete_sql); error = fts_eval_sql(optim->trx, graph); fts_que_graph_free(graph); @@ -2207,16 +2174,35 @@ fts_optimize_create_deleted_doc_id_snapshot( { dberr_t error; que_t* graph; - char* sql_str; + pars_info_t* info; + char being_deleted[MAX_FULL_NAME_LEN]; + char deleted[MAX_FULL_NAME_LEN]; + char being_deleted_cache[MAX_FULL_NAME_LEN]; + char deleted_cache[MAX_FULL_NAME_LEN]; - /* Since we only replace the table_id and don't construct the - full name, we do the substitution ourselves. */ - sql_str = ut_strreplace(fts_init_delete_sql, "%s", optim->name_prefix); + info = pars_info_create(); - /* Move doc_ids that are to be deleted to state being deleted. */ - graph = fts_parse_sql(NULL, NULL, sql_str); + /* Make sure the following four names are consistent with the name + used in the fts_init_delete_sql */ + optim->fts_common_table.suffix = fts_common_tables[0]; + fts_get_table_name(&optim->fts_common_table, being_deleted); + pars_info_bind_id(info, true, fts_common_tables[0], being_deleted); + + optim->fts_common_table.suffix = fts_common_tables[3]; + fts_get_table_name(&optim->fts_common_table, deleted); + pars_info_bind_id(info, true, fts_common_tables[3], deleted); + + optim->fts_common_table.suffix = fts_common_tables[1]; + fts_get_table_name(&optim->fts_common_table, being_deleted_cache); + pars_info_bind_id(info, true, fts_common_tables[1], + being_deleted_cache); - mem_free(sql_str); + optim->fts_common_table.suffix = fts_common_tables[4]; + fts_get_table_name(&optim->fts_common_table, deleted_cache); + pars_info_bind_id(info, true, fts_common_tables[4], deleted_cache); + + /* Move doc_ids that are to be deleted to state being deleted. */ + graph = fts_parse_sql(NULL, info, fts_init_delete_sql); error = fts_eval_sql(optim->trx, graph); @@ -2450,7 +2436,6 @@ fts_optimize_table_bk( /*********************************************************************//** Run OPTIMIZE on the given table. @return DB_SUCCESS if all OK */ -UNIV_INTERN dberr_t fts_optimize_table( /*===============*/ @@ -2460,8 +2445,9 @@ fts_optimize_table( fts_optimize_t* optim = NULL; fts_t* fts = table->fts; - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: FTS start optimize %s\n", table->name); + if (fts_enable_diag_print) { + ib::info() << "FTS start optimize " << table->name; + } optim = fts_optimize_create(table); @@ -2512,9 +2498,8 @@ fts_optimize_table( && optim->n_completed == ib_vector_size(fts->indexes)) { if (fts_enable_diag_print) { - fprintf(stderr, "FTS_OPTIMIZE: Completed " - "Optimize, cleanup DELETED " - "table\n"); + ib::info() << "FTS_OPTIMIZE: Completed" + " Optimize, cleanup DELETED table"; } if (ib_vector_size(optim->to_delete->doc_ids) > 0) { @@ -2535,8 +2520,9 @@ fts_optimize_table( fts_optimize_free(optim); - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: FTS end optimize %s\n", table->name); + if (fts_enable_diag_print) { + ib::info() << "FTS end optimize " << table->name; + } return(error); } @@ -2566,7 +2552,6 @@ fts_optimize_create_msg( /**********************************************************************//** Add the table to add to the OPTIMIZER's list. */ -UNIV_INTERN void fts_optimize_add_table( /*===================*/ @@ -2579,9 +2564,7 @@ fts_optimize_add_table( } /* Make sure table with FTS index cannot be evicted */ - if (table->can_be_evicted) { - dict_table_move_from_lru_to_non_lru(table); - } + dict_table_prevent_eviction(table); msg = fts_optimize_create_msg(FTS_MSG_ADD_TABLE, table); @@ -2590,7 +2573,6 @@ fts_optimize_add_table( /**********************************************************************//** Optimize a table. */ -UNIV_INTERN void fts_optimize_do_table( /*==================*/ @@ -2611,7 +2593,6 @@ fts_optimize_do_table( /**********************************************************************//** Remove the table from the OPTIMIZER's list. We do wait for acknowledgement from the consumer of the message. */ -UNIV_INTERN void fts_optimize_remove_table( /*======================*/ @@ -2628,16 +2609,15 @@ fts_optimize_remove_table( /* FTS optimizer thread is already exited */ if (fts_opt_start_shutdown) { - ib_logf(IB_LOG_LEVEL_INFO, - "Try to remove table %s after FTS optimize" - " thread exiting.", table->name); + ib::info() << "Try to remove table " << table->name + << " after FTS optimize thread exiting."; return; } msg = fts_optimize_create_msg(FTS_MSG_DEL_TABLE, NULL); /* We will wait on this event until signalled by the consumer. */ - event = os_event_create(); + event = os_event_create(0); remove = static_cast( mem_heap_alloc(msg->heap, sizeof(*remove))); @@ -2650,7 +2630,7 @@ fts_optimize_remove_table( os_event_wait(event); - os_event_free(event); + os_event_destroy(event); } /** Send sync fts cache for the table. @@ -2670,9 +2650,8 @@ fts_optimize_request_sync_table( /* FTS optimizer thread is already exited */ if (fts_opt_start_shutdown) { - ib_logf(IB_LOG_LEVEL_INFO, - "Try to sync table %s after FTS optimize" - " thread exiting.", table->name); + ib::info() << "Try to remove table " << table->name + << " after FTS optimize thread exiting."; return; } @@ -2703,7 +2682,7 @@ fts_optimize_find_slot( slot = static_cast(ib_vector_get(tables, i)); - if (slot->table->id == table->id) { + if (slot->table == table) { return(slot); } } @@ -2725,9 +2704,8 @@ fts_optimize_start_table( slot = fts_optimize_find_slot(tables, table); if (slot == NULL) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Error: table %s not registered " - "with the optimize thread.\n", table->name); + ib::error() << "Table " << table->name << " not registered" + " with the optimize thread."; } else { slot->last_run = 0; slot->completed = 0; @@ -2755,7 +2733,7 @@ fts_optimize_new_table( if (slot->state == FTS_STATE_EMPTY) { empty_slot = i; - } else if (slot->table->id == table->id) { + } else if (slot->table == table) { /* Already exists in our optimize queue. */ ut_ad(slot->table_id = table->id); return(FALSE); @@ -2802,13 +2780,13 @@ fts_optimize_del_table( slot = static_cast(ib_vector_get(tables, i)); - /* FIXME: Should we assert on this ? */ if (slot->state != FTS_STATE_EMPTY - && slot->table->id == table->id) { + && slot->table == table) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: FTS Optimize Removing " - "table %s\n", table->name); + if (fts_enable_diag_print) { + ib::info() << "FTS Optimize Removing table " + << table->name; + } slot->table = NULL; slot->state = FTS_STATE_EMPTY; @@ -2888,8 +2866,8 @@ fts_is_sync_needed( const ib_vector_t* tables) /*!< in: registered tables vector*/ { - ulint total_memory = 0; - double time_diff = difftime(ut_time(), last_check_sync_time); + ulint total_memory = 0; + double time_diff = difftime(ut_time(), last_check_sync_time); if (fts_need_sync || time_diff < 5) { return(false); @@ -2904,7 +2882,7 @@ fts_is_sync_needed( ib_vector_get_const(tables, i)); if (slot->state != FTS_STATE_EMPTY && slot->table - && slot->table->fts) { + && slot->table->fts && slot->table->fts->cache) { total_memory += slot->table->fts->cache->total_size; } @@ -2977,7 +2955,7 @@ fts_optimize_sync_table( /* Prevent DROP INDEX etc. from running when we are syncing cache in background. */ - if (!rw_lock_s_lock_nowait(&dict_operation_lock, __FILE__, __LINE__)) { + if (!rw_lock_s_lock_nowait(dict_operation_lock, __FILE__, __LINE__)) { /* Exit when fail to get dict operation lock. */ return; } @@ -2992,13 +2970,12 @@ fts_optimize_sync_table( dict_table_close(table, FALSE, FALSE); } - rw_lock_s_unlock(&dict_operation_lock); + rw_lock_s_unlock(dict_operation_lock); } /**********************************************************************//** Optimize all FTS tables. @return Dummy return */ -UNIV_INTERN os_thread_ret_t fts_optimize_thread( /*================*/ @@ -3010,7 +2987,6 @@ fts_optimize_thread( ulint current = 0; ibool done = FALSE; ulint n_tables = 0; - os_event_t exit_event = 0; ulint n_optimize = 0; ib_wqueue_t* wq = (ib_wqueue_t*) arg; @@ -3022,7 +2998,7 @@ fts_optimize_thread( tables = ib_vector_create(heap_alloc, sizeof(fts_slot_t), 4); - while(!done && srv_shutdown_state == SRV_SHUTDOWN_NONE) { + while (!done && srv_shutdown_state == SRV_SHUTDOWN_NONE) { /* If there is no message in the queue and we have tables to optimize then optimize the tables. */ @@ -3081,7 +3057,6 @@ fts_optimize_thread( case FTS_MSG_STOP: done = TRUE; - exit_event = (os_event_t) msg->ptr; break; case FTS_MSG_ADD_TABLE: @@ -3147,16 +3122,35 @@ fts_optimize_thread( ib_vector_get(tables, i)); if (slot->state != FTS_STATE_EMPTY) { - fts_optimize_sync_table(slot->table_id); + dict_table_t* table = NULL; + + /*slot->table may be freed, so we try to open + table by slot->table_id.*/ + table = dict_table_open_on_id( + slot->table_id, FALSE, + DICT_TABLE_OP_NORMAL); + + if (table) { + + if (dict_table_has_fts_index(table)) { + fts_sync_table(table, false, true); + } + + if (table->fts) { + fts_free(table); + } + + dict_table_close(table, FALSE, FALSE); + } } } } ib_vector_free(tables); - ib_logf(IB_LOG_LEVEL_INFO, "FTS optimize thread exiting."); + ib::info() << "FTS optimize thread exiting."; - os_event_set(exit_event); + os_event_set(fts_opt_shutdown_event); my_thread_end(); /* We count the number of threads in os_thread_exit(). A created @@ -3168,7 +3162,6 @@ fts_optimize_thread( /**********************************************************************//** Startup the optimize thread and create the work queue. */ -UNIV_INTERN void fts_optimize_init(void) /*===================*/ @@ -3179,6 +3172,7 @@ fts_optimize_init(void) ut_a(fts_optimize_wq == NULL); fts_optimize_wq = ib_wqueue_create(); + fts_opt_shutdown_event = os_event_create(0); ut_a(fts_optimize_wq != NULL); last_check_sync_time = ut_time(); @@ -3188,7 +3182,6 @@ fts_optimize_init(void) /**********************************************************************//** Check whether the work queue is initialized. @return TRUE if optimze queue is initialized. */ -UNIV_INTERN ibool fts_optimize_is_init(void) /*======================*/ @@ -3198,7 +3191,6 @@ fts_optimize_is_init(void) /**********************************************************************//** Signal the optimize thread to prepare for shutdown. */ -UNIV_INTERN void fts_optimize_start_shutdown(void) /*=============================*/ @@ -3206,7 +3198,6 @@ fts_optimize_start_shutdown(void) ut_ad(!srv_read_only_mode); fts_msg_t* msg; - os_event_t event; /* If there is an ongoing activity on dictionary, such as srv_master_evict_from_table_cache(), wait for it */ @@ -3221,23 +3212,20 @@ fts_optimize_start_shutdown(void) /* We tell the OPTIMIZE thread to switch to state done, we can't delete the work queue here because the add thread needs deregister the FTS tables. */ - event = os_event_create(); msg = fts_optimize_create_msg(FTS_MSG_STOP, NULL); - msg->ptr = event; ib_wqueue_add(fts_optimize_wq, msg, msg->heap); - os_event_wait(event); - os_event_free(event); + os_event_wait(fts_opt_shutdown_event); - ib_wqueue_free(fts_optimize_wq); + os_event_destroy(fts_opt_shutdown_event); + ib_wqueue_free(fts_optimize_wq); } /**********************************************************************//** Reset the work queue. */ -UNIV_INTERN void fts_optimize_end(void) /*==================*/ diff --git a/storage/innobase/fts/fts0pars.cc b/storage/innobase/fts/fts0pars.cc index 7f0ba4e0c1b..e4d1bba2be6 100644 --- a/storage/innobase/fts/fts0pars.cc +++ b/storage/innobase/fts/fts0pars.cc @@ -76,12 +76,13 @@ /* Line 268 of yacc.c */ #line 26 "fts0pars.y" - +#include "ha_prototypes.h" #include "mem0mem.h" #include "fts0ast.h" #include "fts0blex.h" #include "fts0tlex.h" #include "fts0pars.h" +#include extern int fts_lexer(YYSTYPE*, fts_lexer_t*); extern int fts_blexer(YYSTYPE*, yyscan_t); @@ -271,8 +272,6 @@ YYID (yyi) # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include /* INFRINGES ON USER NAME SPACE */ -# elif defined _AIX -# define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca @@ -1541,7 +1540,7 @@ yyreduce: /* Line 1806 of yacc.c */ #line 141 "fts0pars.y" { - fts_ast_term_set_distance((yyvsp[(1) - (3)].node), fts_ast_string_to_ul((yyvsp[(3) - (3)].token), 10)); + fts_ast_text_set_distance((yyvsp[(1) - (3)].node), fts_ast_string_to_ul((yyvsp[(3) - (3)].token), 10)); fts_ast_string_free((yyvsp[(3) - (3)].token)); } break; @@ -1574,7 +1573,7 @@ yyreduce: { (yyval.node) = fts_ast_create_node_list(state, (yyvsp[(1) - (4)].node)); fts_ast_add_node((yyval.node), (yyvsp[(2) - (4)].node)); - fts_ast_term_set_distance((yyvsp[(2) - (4)].node), fts_ast_string_to_ul((yyvsp[(4) - (4)].token), 10)); + fts_ast_text_set_distance((yyvsp[(2) - (4)].node), fts_ast_string_to_ul((yyvsp[(4) - (4)].token), 10)); fts_ast_string_free((yyvsp[(4) - (4)].token)); } break; @@ -1933,7 +1932,6 @@ ftserror( /******************************************************************** Create a fts_lexer_t instance.*/ - fts_lexer_t* fts_lexer_create( /*=============*/ @@ -1942,7 +1940,7 @@ fts_lexer_create( ulint query_len) { fts_lexer_t* fts_lexer = static_cast( - ut_malloc(sizeof(fts_lexer_t))); + ut_malloc_nokey(sizeof(fts_lexer_t))); if (boolean_mode) { fts0blex_init(&fts_lexer->yyscanner); @@ -1984,7 +1982,6 @@ fts_lexer_free( /******************************************************************** Call the appropaiate scanner.*/ - int fts_lexer( /*======*/ diff --git a/storage/innobase/fts/fts0pars.y b/storage/innobase/fts/fts0pars.y index e48036e82fe..1f4ec9922e3 100644 --- a/storage/innobase/fts/fts0pars.y +++ b/storage/innobase/fts/fts0pars.y @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,12 +24,13 @@ this program; if not, write to the Free Software Foundation, Inc., */ %{ - +#include "ha_prototypes.h" #include "mem0mem.h" #include "fts0ast.h" #include "fts0blex.h" #include "fts0tlex.h" #include "fts0pars.h" +#include extern int fts_lexer(YYSTYPE*, fts_lexer_t*); extern int fts_blexer(YYSTYPE*, yyscan_t); @@ -139,7 +140,7 @@ expr : term { } | text '@' FTS_NUMB { - fts_ast_term_set_distance($1, fts_ast_string_to_ul($3, 10)); + fts_ast_text_set_distance($1, fts_ast_string_to_ul($3, 10)); fts_ast_string_free($3); } @@ -157,7 +158,7 @@ expr : term { | prefix text '@' FTS_NUMB { $$ = fts_ast_create_node_list(state, $1); fts_ast_add_node($$, $2); - fts_ast_term_set_distance($2, fts_ast_string_to_ul($4, 10)); + fts_ast_text_set_distance($2, fts_ast_string_to_ul($4, 10)); fts_ast_string_free($4); } @@ -224,7 +225,6 @@ ftserror( /******************************************************************** Create a fts_lexer_t instance.*/ - fts_lexer_t* fts_lexer_create( /*=============*/ @@ -233,17 +233,17 @@ fts_lexer_create( ulint query_len) { fts_lexer_t* fts_lexer = static_cast( - ut_malloc(sizeof(fts_lexer_t))); + ut_malloc_nokey(sizeof(fts_lexer_t))); if (boolean_mode) { fts0blex_init(&fts_lexer->yyscanner); - fts0b_scan_bytes((char*) query, query_len, fts_lexer->yyscanner); + fts0b_scan_bytes((char*) query, (int) query_len, fts_lexer->yyscanner); fts_lexer->scanner = (fts_scan) fts_blexer; /* FIXME: Debugging */ /* fts0bset_debug(1 , fts_lexer->yyscanner); */ } else { fts0tlex_init(&fts_lexer->yyscanner); - fts0t_scan_bytes((char*) query, query_len, fts_lexer->yyscanner); + fts0t_scan_bytes((char*) query, (int) query_len, fts_lexer->yyscanner); fts_lexer->scanner = (fts_scan) fts_tlexer; } @@ -269,7 +269,6 @@ fts_lexer_free( /******************************************************************** Call the appropaiate scanner.*/ - int fts_lexer( /*======*/ diff --git a/storage/innobase/fts/fts0plugin.cc b/storage/innobase/fts/fts0plugin.cc new file mode 100644 index 00000000000..eaa32379a7c --- /dev/null +++ b/storage/innobase/fts/fts0plugin.cc @@ -0,0 +1,295 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/******************************************************************//** +@file fts/fts0plugin.cc +Full Text Search plugin support. + +Created 2013/06/04 Shaohua Wang +***********************************************************************/ + +#include "fts0ast.h" +#include "fts0plugin.h" +#include "fts0tokenize.h" + +#include "ft_global.h" + +/******************************************************************//** +FTS default parser init +@return 0 */ +static +int +fts_default_parser_init( +/*====================*/ + MYSQL_FTPARSER_PARAM *param) /*!< in: plugin parser param */ +{ + return(0); +} + +/******************************************************************//** +FTS default parser deinit +@return 0 */ +static +int +fts_default_parser_deinit( +/*======================*/ + MYSQL_FTPARSER_PARAM *param) /*!< in: plugin parser param */ +{ + return(0); +} + +/******************************************************************//** +FTS default parser parse from ft_static.c in MYISAM. +@return 0 if parse successfully, or return non-zero */ +static +int +fts_default_parser_parse( +/*=====================*/ + MYSQL_FTPARSER_PARAM *param) /*!< in: plugin parser param */ +{ + return(param->mysql_parse(param, param->doc, param->length)); +} + +/* FTS default parser from ft_static.c in MYISAM. */ +struct st_mysql_ftparser fts_default_parser = +{ + MYSQL_FTPARSER_INTERFACE_VERSION, + fts_default_parser_parse, + fts_default_parser_init, + fts_default_parser_deinit +}; + +/******************************************************************//** +Get a operator node from token boolean info +@return node */ +static +fts_ast_node_t* +fts_query_get_oper_node( +/*====================*/ + MYSQL_FTPARSER_BOOLEAN_INFO* info, /*!< in: token info */ + fts_ast_state_t* state) /*!< in/out: query parse state*/ +{ + fts_ast_node_t* oper_node = NULL; + + if (info->yesno > 0) { + oper_node = fts_ast_create_node_oper(state, FTS_EXIST); + } else if (info->yesno < 0) { + oper_node = fts_ast_create_node_oper(state, FTS_IGNORE); + } else if (info->weight_adjust > 0) { + oper_node = fts_ast_create_node_oper(state, FTS_INCR_RATING); + } else if (info->weight_adjust < 0) { + oper_node = fts_ast_create_node_oper(state, FTS_DECR_RATING); + } else if (info->wasign > 0) { + oper_node = fts_ast_create_node_oper(state, FTS_NEGATE); + } + + return(oper_node); +} + +/******************************************************************//** +FTS plugin parser 'myql_add_word' callback function for query parse. +Refer to 'st_mysql_ftparser_param' for more detail. +Note: +a. Parse logic refers to 'ftb_query_add_word' from ft_boolean_search.c in MYISAM; +b. Parse node or tree refers to fts0pars.y. +@return 0 if add successfully, or return non-zero. */ +int +fts_query_add_word_for_parser( +/*==========================*/ + MYSQL_FTPARSER_PARAM* param, /*!< in: parser param */ + const char* word, /*!< in: token */ + int word_len, /*!< in: token length */ + MYSQL_FTPARSER_BOOLEAN_INFO* info) /*!< in: token info */ +{ + fts_ast_state_t* state = + static_cast(param->mysql_ftparam); + fts_ast_node_t* cur_node = state->cur_node; + fts_ast_node_t* oper_node = NULL; + fts_ast_node_t* term_node = NULL; + fts_ast_node_t* node = NULL; + + switch (info->type) { + case FT_TOKEN_STOPWORD: + /* We only handler stopword in phrase */ + if (cur_node->type != FTS_AST_PARSER_PHRASE_LIST) { + break; + } + + case FT_TOKEN_WORD: + term_node = fts_ast_create_node_term_for_parser( + state, word, word_len); + + if (info->trunc) { + fts_ast_term_set_wildcard(term_node); + } + + if (cur_node->type == FTS_AST_PARSER_PHRASE_LIST) { + /* Ignore operator inside phrase */ + fts_ast_add_node(cur_node, term_node); + } else { + ut_ad(cur_node->type == FTS_AST_LIST + || cur_node->type == FTS_AST_SUBEXP_LIST); + oper_node = fts_query_get_oper_node(info, state); + + if (oper_node) { + node = fts_ast_create_node_list(state, oper_node); + fts_ast_add_node(node, term_node); + fts_ast_add_node(cur_node, node); + } else { + fts_ast_add_node(cur_node, term_node); + } + } + + break; + + case FT_TOKEN_LEFT_PAREN: + /* Check parse error */ + if (cur_node->type != FTS_AST_LIST + && cur_node->type != FTS_AST_SUBEXP_LIST) { + return(1); + } + + /* Set operator */ + oper_node = fts_query_get_oper_node(info, state); + if (oper_node != NULL) { + node = fts_ast_create_node_list(state, oper_node); + fts_ast_add_node(cur_node, node); + node->go_up = true; + node->up_node = cur_node; + cur_node = node; + } + + if (info->quot) { + /* Phrase node */ + node = fts_ast_create_node_phrase_list(state); + } else { + /* Subexp list node */ + node = fts_ast_create_node_subexp_list(state, NULL); + } + + fts_ast_add_node(cur_node, node); + + node->up_node = cur_node; + state->cur_node = node; + state->depth += 1; + + break; + + case FT_TOKEN_RIGHT_PAREN: + info->quot = 0; + + if (cur_node->up_node != NULL) { + cur_node = cur_node->up_node; + + if (cur_node->go_up) { + ut_a(cur_node->up_node + && !(cur_node->up_node->go_up)); + cur_node = cur_node->up_node; + } + } + + state->cur_node = cur_node; + + if (state->depth > 0) { + state->depth--; + } else { + /* Parentheses mismatch */ + return(1); + } + + break; + + case FT_TOKEN_EOF: + default: + break; + } + + return(0); +} + +/******************************************************************//** +FTS plugin parser 'myql_parser' callback function for query parse. +Refer to 'st_mysql_ftparser_param' for more detail. +@return 0 if parse successfully */ +static +int +fts_parse_query_internal( +/*=====================*/ + MYSQL_FTPARSER_PARAM* param, /*!< in: parser param */ + const char* query, /*!< in: query string */ + int len) /*!< in: query length */ +{ + MYSQL_FTPARSER_BOOLEAN_INFO info; + const CHARSET_INFO* cs = param->cs; + uchar** start = (uchar**)(&query); + uchar* end = (uchar*)(query + len); + FT_WORD w = {NULL, 0, 0}; + + info.prev = ' '; + info.quot = 0; + memset(&w, 0, sizeof(w)); + /* Note: We don't handle simple parser mode here, + but user supplied plugin parser should handler it. */ + while (fts_get_word(cs, start, end, &w, &info)) { + int ret = param->mysql_add_word( + param, + reinterpret_cast(w.pos), + w.len, &info); + if (ret) { + return(ret); + } + } + + return(0); +} + +/******************************************************************//** +fts parse query by plugin parser. +@return 0 if parse successfully, or return non-zero. */ +int +fts_parse_by_parser( +/*================*/ + ibool mode, /*!< in: parse boolean mode */ + uchar* query_str, /*!< in: query string */ + ulint query_len, /*!< in: query string length */ + st_mysql_ftparser* parser, /*!< in: fts plugin parser */ + fts_ast_state_t* state) /*!< in/out: parser state */ +{ + MYSQL_FTPARSER_PARAM param; + int ret; + + ut_ad(parser); + + /* Initial parser param */ + param.mysql_parse = fts_parse_query_internal; + param.mysql_add_word = fts_query_add_word_for_parser; + param.mysql_ftparam = static_cast(state); + param.cs = state->charset; + param.doc = reinterpret_cast(query_str); + param.length = static_cast(query_len); + param.flags = 0; + param.mode = mode ? + MYSQL_FTPARSER_FULL_BOOLEAN_INFO : + MYSQL_FTPARSER_SIMPLE_MODE; + + PARSER_INIT(parser, ¶m); + ret = parser->parse(¶m); + PARSER_DEINIT(parser, ¶m); + + return(ret | state->depth); +} diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index 26bd0378aed..8abeb63f0a4 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -24,7 +24,9 @@ Created 2007/03/27 Sunny Bains Completed 2011/7/10 Sunny and Jimmy Yang *******************************************************/ -#include "dict0dict.h" /* dict_table_get_n_rows() */ +#include "ha_prototypes.h" + +#include "dict0dict.h" #include "ut0rbt.h" #include "row0sel.h" #include "fts0fts.h" @@ -32,14 +34,15 @@ Completed 2011/7/10 Sunny and Jimmy Yang #include "fts0ast.h" #include "fts0pars.h" #include "fts0types.h" -#include "ha_prototypes.h" -#include +#include "fts0plugin.h" +#include "ut0new.h" -#ifndef UNIV_NONINL +#ifdef UNIV_NONINL #include "fts0types.ic" #include "fts0vlc.ic" #endif +#include #include #define FTS_ELEM(t, n, i, j) (t[(i) * n + (j)]) @@ -59,7 +62,7 @@ Completed 2011/7/10 Sunny and Jimmy Yang // FIXME: Need to have a generic iterator that traverses the ilist. -typedef std::vector word_vector_t; +typedef std::vector > word_vector_t; struct fts_word_freq_t; @@ -71,6 +74,7 @@ struct fts_query_t { dict_index_t* index; /*!< The FTS index to search */ /*!< FTS auxiliary common table def */ + fts_table_t fts_common_table; fts_table_t fts_index_table;/*!< FTS auxiliary index table def */ @@ -144,7 +148,11 @@ struct fts_query_t { document, its elements are of type fts_word_freq_t */ + ib_rbt_t* wildcard_words; /*!< words with wildcard */ + bool multi_exist; /*!< multiple FTS_EXIST oper */ + + st_mysql_ftparser* parser; /*!< fts plugin parser */ }; /** For phrase matching, first we collect the documents and the positions @@ -178,7 +186,7 @@ struct fts_select_t { the FTS index */ }; -typedef std::vector pos_vector_t; +typedef std::vector > pos_vector_t; /** structure defines a set of ranges for original documents, each of which has a minimum position and maximum position. Text in such range should @@ -197,22 +205,54 @@ struct fts_proximity_t { /** The match positions and tokesn to match */ struct fts_phrase_t { - ibool found; /*!< Match result */ - - const fts_match_t* - match; /*!< Positions within text */ - - const ib_vector_t* - tokens; /*!< Tokens to match */ - - ulint distance; /*!< For matching on proximity - distance. Can be 0 for exact match */ - CHARSET_INFO* charset; /*!< Phrase match charset */ - mem_heap_t* heap; /*!< Heap for word processing */ - ulint zip_size; /*!< row zip size */ - fts_proximity_t*proximity_pos; /*!< position info for proximity - search verification. Records the min - and max position of words matched */ + fts_phrase_t(const dict_table_t* table) + : + found(false), + match(NULL), + tokens(NULL), + distance(0), + charset(NULL), + heap(NULL), + page_size(dict_table_page_size(table)), + proximity_pos(NULL), + parser(NULL) + { + } + + /** Match result */ + ibool found; + + /** Positions within text */ + const fts_match_t* match; + + /** Tokens to match */ + const ib_vector_t* tokens; + + /** For matching on proximity distance. Can be 0 for exact match */ + ulint distance; + + /** Phrase match charset */ + CHARSET_INFO* charset; + + /** Heap for word processing */ + mem_heap_t* heap; + + /** Row page size */ + const page_size_t page_size; + + /** Position info for proximity search verification. Records the + min and max position of words matched */ + fts_proximity_t* proximity_pos; + + /** FTS plugin parser */ + st_mysql_ftparser* parser; +}; + +/** Paramter passed to fts phrase match by parser */ +struct fts_phrase_param_t { + fts_phrase_t* phrase; /*!< Match phrase instance */ + ulint token_index; /*!< Index of token to match next */ + mem_heap_t* heap; /*!< Heap for word processing */ }; /** For storing the frequncy of a word/term in a document */ @@ -395,7 +435,7 @@ fts_query_lcs( ulint r = len_p1; ulint c = len_p2; ulint size = (r + 1) * (c + 1) * sizeof(ulint); - ulint* table = (ulint*) ut_malloc(size); + ulint* table = (ulint*) ut_malloc_nokey(size); /* Traverse the table backwards, from the last row to the first and also from the last column to the first. We compute the smaller @@ -442,7 +482,7 @@ fts_query_lcs( /*******************************************************************//** Compare two fts_ranking_t instance on their rank value and doc ids in descending order on the rank and ascending order on doc id. -@return 0 if p1 == p2, < 0 if p1 < p2, > 0 if p1 > p2 */ +@return 0 if p1 == p2, < 0 if p1 < p2, > 0 if p1 > p2 */ static int fts_query_compare_rank( @@ -469,67 +509,6 @@ fts_query_compare_rank( return(1); } -#ifdef FTS_UTF8_DEBUG -/*******************************************************************//** -Convert string to lowercase. -@return lower case string, callers responsibility to delete using -ut_free() */ -static -byte* -fts_tolower( -/*========*/ - const byte* src, /*!< in: src string */ - ulint len) /*!< in: src string length */ -{ - fts_string_t str; - byte* lc_str = ut_malloc(len + 1); - - str.f_len = len; - str.f_str = lc_str; - - memcpy(str.f_str, src, len); - - /* Make sure the last byte is NUL terminated */ - str.f_str[len] = '\0'; - - fts_utf8_tolower(&str); - - return(lc_str); -} - -/*******************************************************************//** -Do a case insensitive search. Doesn't check for NUL byte end marker -only relies on len. Convert str2 to lower case before comparing. -@return 0 if p1 == p2, < 0 if p1 < p2, > 0 if p1 > p2 */ -static -int -fts_utf8_strcmp( -/*============*/ - const fts_string_t* - str1, /*!< in: should be lower case*/ - - fts_string_t* str2) /*!< in: any case. We will use the length - of this string during compare as it - should be the min of the two strings */ -{ - byte b = str2->f_str[str2->f_len]; - - ut_a(str2->f_len <= str1->f_len); - - /* We need to write a NUL byte at the end of the string because the - string is converted to lowercase by a MySQL function which doesn't - care about the length. */ - str2->f_str[str2->f_len] = 0; - - fts_utf8_tolower(str2); - - /* Restore the value we replaced above. */ - str2->f_str[str2->f_len] = b; - - return(memcmp(str1->f_str, str2->f_str, str2->f_len)); -} -#endif - /*******************************************************************//** Create words in ranking */ static @@ -593,11 +572,7 @@ fts_ranking_words_add( pos = rbt_size(query->word_map); - new_word.f_str = static_cast(mem_heap_alloc(query->heap, - word->f_len + 1)); - memcpy(new_word.f_str, word->f_str, word->f_len); - new_word.f_str[word->f_len] = 0; - new_word.f_len = word->f_len; + fts_string_dup(&new_word, word, query->heap); new_word.f_n_char = pos; rbt_add_node(query->word_map, &parent, &new_word); @@ -684,11 +659,7 @@ fts_query_add_word_freq( memset(&word_freq, 0, sizeof(word_freq)); - word_freq.word.f_str = static_cast( - mem_heap_alloc(query->heap, word->f_len + 1)); - memcpy(word_freq.word.f_str, word->f_str, word->f_len); - word_freq.word.f_str[word->f_len] = 0; - word_freq.word.f_len = word->f_len; + fts_string_dup(&word_freq.word, word, query->heap); word_freq.doc_count = 0; @@ -1142,8 +1113,12 @@ fts_query_difference( ut_a(query->oper == FTS_IGNORE); #ifdef FTS_INTERNAL_DIAG_PRINT - fprintf(stderr, "DIFFERENCE: Searching: '%.*s'\n", - (int) token->f_len, token->f_str); + { + ib::info out; + out << "DIFFERENCE: Searching: '"; + out.write(token->f_str, token->f_len); + out << "'"; + } #endif if (query->doc_ids) { @@ -1233,8 +1208,12 @@ fts_query_intersect( ut_a(query->oper == FTS_EXIST); #ifdef FTS_INTERNAL_DIAG_PRINT - fprintf(stderr, "INTERSECT: Searching: '%.*s'\n", - (int) token->f_len, token->f_str); + { + ib::info out; + out << "INTERSECT: Searching: '"; + out.write(token->f_str, token->f_len); + out << "'"; + } #endif /* If the words set is not empty and multi exist is true, @@ -1415,8 +1394,12 @@ fts_query_union( query->oper == FTS_NEGATE || query->oper == FTS_INCR_RATING); #ifdef FTS_INTERNAL_DIAG_PRINT - fprintf(stderr, "UNION: Searching: '%.*s'\n", - (int) token->f_len, token->f_str); + { + ib::info out; + out << "UNION: Searching: '"; + out.write(token->f_str, token->f_len); + out << "'"; + } #endif if (query->doc_ids) { @@ -1427,10 +1410,6 @@ fts_query_union( return(query->error); } - /* Single '%' would confuse parser in pars_like_rebind(). In addition, - our wildcard search only supports prefix search */ - ut_ad(*token->f_str != '%'); - fts_query_cache(query, token); /* Setup the callback args for filtering and @@ -1626,18 +1605,17 @@ fts_query_match_phrase_terms( const fts_string_t* token; int result; ulint ret; - ulint offset; ret = innobase_mysql_fts_get_token( - phrase->charset, ptr, (byte*) end, - &match, &offset); + phrase->charset, ptr, + const_cast(end), &match); if (match.f_len > 0) { /* Get next token to match. */ token = static_cast( ib_vector_get_const(tokens, i)); - fts_utf8_string_dup(&cmp_str, &match, heap); + fts_string_dup(&cmp_str, &match, heap); result = innobase_fts_text_case_cmp( phrase->charset, token, &cmp_str); @@ -1718,12 +1696,11 @@ fts_proximity_is_word_in_range( while (cur_pos <= proximity_pos->max_pos[i]) { ulint len; fts_string_t str; - ulint offset = 0; len = innobase_mysql_fts_get_token( phrase->charset, start + cur_pos, - start + total_len, &str, &offset); + start + total_len, &str); if (len == 0) { break; @@ -1752,6 +1729,103 @@ fts_proximity_is_word_in_range( return(false); } +/*****************************************************************//** +FTS plugin parser 'myql_add_word' callback function for phrase match +Refer to 'st_mysql_ftparser_param' for more detail. +@return 0 if match, or return non-zero */ +static +int +fts_query_match_phrase_add_word_for_parser( +/*=======================================*/ + MYSQL_FTPARSER_PARAM* param, /*!< in: parser param */ + const char* word, /*!< in: token */ + int word_len, /*!< in: token length */ + MYSQL_FTPARSER_BOOLEAN_INFO* info) /*!< in: token info */ +{ + fts_phrase_param_t* phrase_param; + fts_phrase_t* phrase; + const ib_vector_t* tokens; + fts_string_t match; + fts_string_t cmp_str; + const fts_string_t* token; + int result; + mem_heap_t* heap; + + phrase_param = static_cast(param->mysql_ftparam); + heap = phrase_param->heap; + phrase = phrase_param->phrase; + tokens = phrase->tokens; + + /* In case plugin parser doesn't check return value */ + if (phrase_param->token_index == ib_vector_size(tokens)) { + return(1); + } + + match.f_str = (uchar *)(word); + match.f_len = word_len; + match.f_n_char = fts_get_token_size(phrase->charset, word, word_len); + + if (match.f_len > 0) { + /* Get next token to match. */ + ut_a(phrase_param->token_index < ib_vector_size(tokens)); + token = static_cast( + ib_vector_get_const(tokens, phrase_param->token_index)); + + fts_string_dup(&cmp_str, &match, heap); + + result = innobase_fts_text_case_cmp( + phrase->charset, token, &cmp_str); + + if (result == 0) { + phrase_param->token_index++; + } else { + return(1); + } + } + + /* Can't be greater than the number of elements. */ + ut_a(phrase_param->token_index <= ib_vector_size(tokens)); + + /* This is the case for multiple words. */ + if (phrase_param->token_index == ib_vector_size(tokens)) { + phrase->found = TRUE; + } + + return(static_cast(phrase->found)); +} + +/*****************************************************************//** +Check whether the terms in the phrase match the text. +@return TRUE if matched else FALSE */ +static +ibool +fts_query_match_phrase_terms_by_parser( +/*===================================*/ + fts_phrase_param_t* phrase_param, /* in/out: phrase param */ + st_mysql_ftparser* parser, /* in: plugin fts parser */ + byte* text, /* in: text to check */ + ulint len) /* in: text length */ +{ + MYSQL_FTPARSER_PARAM param; + + ut_a(parser); + + /* Set paramters for param */ + param.mysql_parse = fts_tokenize_document_internal; + param.mysql_add_word = fts_query_match_phrase_add_word_for_parser; + param.mysql_ftparam = phrase_param; + param.cs = phrase_param->phrase->charset; + param.doc = reinterpret_cast(text); + param.length = static_cast(len); + param.mode= MYSQL_FTPARSER_WITH_STOPWORDS; + + PARSER_INIT(parser, ¶m); + parser->parse(¶m); + PARSER_DEINIT(parser, ¶m); + + return(phrase_param->phrase->found); +} + /*****************************************************************//** Callback function to fetch and search the document. @return TRUE if matched else FALSE */ @@ -1786,11 +1860,7 @@ fts_query_match_phrase( for (i = phrase->match->start; i < ib_vector_size(positions); ++i) { ulint pos; - fts_string_t match; - fts_string_t cmp_str; byte* ptr = start; - ulint ret; - ulint offset; pos = *(ulint*) ib_vector_get_const(positions, i); @@ -1807,39 +1877,60 @@ fts_query_match_phrase( searched field to adjust the doc position when search phrases. */ pos -= prev_len; - ptr = match.f_str = start + pos; + ptr = start + pos; /* Within limits ? */ if (ptr >= end) { break; } - ret = innobase_mysql_fts_get_token( - phrase->charset, start + pos, (byte*) end, - &match, &offset); + if (phrase->parser) { + fts_phrase_param_t phrase_param; - if (match.f_len == 0) { - break; - } + phrase_param.phrase = phrase; + phrase_param.token_index = 0; + phrase_param.heap = heap; - fts_utf8_string_dup(&cmp_str, &match, heap); + if (fts_query_match_phrase_terms_by_parser( + &phrase_param, + phrase->parser, + ptr, + (end - ptr))) { + break; + } + } else { + fts_string_t match; + fts_string_t cmp_str; + ulint ret; - if (innobase_fts_text_case_cmp( - phrase->charset, first, &cmp_str) == 0) { + match.f_str = ptr; + ret = innobase_mysql_fts_get_token( + phrase->charset, start + pos, + const_cast(end), &match); - /* This is the case for the single word - in the phrase. */ - if (ib_vector_size(phrase->tokens) == 1) { - phrase->found = TRUE; + if (match.f_len == 0) { break; } - ptr += ret; + fts_string_dup(&cmp_str, &match, heap); - /* Match the remaining terms in the phrase. */ - if (fts_query_match_phrase_terms(phrase, &ptr, - end, heap)) { - break; + if (innobase_fts_text_case_cmp( + phrase->charset, first, &cmp_str) == 0) { + + /* This is the case for the single word + in the phrase. */ + if (ib_vector_size(phrase->tokens) == 1) { + phrase->found = TRUE; + break; + } + + ptr += ret; + + /* Match the remaining terms in the phrase. */ + if (fts_query_match_phrase_terms(phrase, &ptr, + end, heap)) { + break; + } } } } @@ -1915,9 +2006,9 @@ fts_query_fetch_document( if (dfield_is_ext(dfield)) { data = btr_copy_externally_stored_field( - &cur_len, data, phrase->zip_size, - dfield_get_len(dfield), phrase->heap, - NULL); + &cur_len, data, phrase->page_size, + dfield_get_len(dfield), phrase->heap + ); } else { cur_len = dfield_get_len(dfield); } @@ -2032,13 +2123,22 @@ fts_query_find_term( fts_select_t select; doc_id_t match_doc_id; trx_t* trx = query->trx; + char table_name[MAX_FULL_NAME_LEN]; trx->op_info = "fetching FTS index matching nodes"; if (*graph) { info = (*graph)->info; } else { + ulint selected; + info = pars_info_create(); + + selected = fts_select_index(*word->f_str); + query->fts_index_table.suffix = fts_get_suffix(selected); + + fts_get_table_name(&query->fts_index_table, table_name); + pars_info_bind_id(info, true, "index_table_name", table_name); } select.found = FALSE; @@ -2057,11 +2157,6 @@ fts_query_find_term( fts_bind_doc_id(info, "max_doc_id", &match_doc_id); if (!*graph) { - ulint selected; - - selected = fts_select_index(*word->f_str); - - query->fts_index_table.suffix = fts_get_suffix(selected); *graph = fts_parse_sql( &query->fts_index_table, @@ -2069,10 +2164,10 @@ fts_query_find_term( "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" " SELECT doc_count, ilist\n" - " FROM \"%s\"\n" - " WHERE word LIKE :word AND " - " first_doc_id <= :min_doc_id AND " - " last_doc_id >= :max_doc_id\n" + " FROM $index_table_name\n" + " WHERE word LIKE :word AND" + " first_doc_id <= :min_doc_id AND" + " last_doc_id >= :max_doc_id\n" " ORDER BY first_doc_id;\n" "BEGIN\n" "\n" @@ -2086,24 +2181,22 @@ fts_query_find_term( "CLOSE c;"); } - for(;;) { + for (;;) { error = fts_eval_sql(trx, *graph); if (error == DB_SUCCESS) { break; /* Exit the loop. */ } else { - ut_print_timestamp(stderr); if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout reading FTS index. " - "Retrying!\n"); + ib::warn() << "lock wait timeout reading FTS" + " index. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: %lu " - "while reading FTS index.\n", error); + ib::error() << error + << " while reading FTS index."; break; /* Exit the loop. */ } @@ -2168,6 +2261,7 @@ fts_query_total_docs_containing_term( que_t* graph; ulint selected; trx_t* trx = query->trx; + char table_name[MAX_FULL_NAME_LEN] trx->op_info = "fetching FTS index document count"; @@ -2182,14 +2276,18 @@ fts_query_total_docs_containing_term( query->fts_index_table.suffix = fts_get_suffix(selected); + fts_get_table_name(&query->fts_index_table, table_name); + + pars_info_bind_id(info, true, "index_table_name", table_name); + graph = fts_parse_sql( &query->fts_index_table, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" " SELECT doc_count\n" - " FROM %s\n" - " WHERE word = :word " + " FROM $index_table_name\n" + " WHERE word = :word" " ORDER BY first_doc_id;\n" "BEGIN\n" "\n" @@ -2202,24 +2300,22 @@ fts_query_total_docs_containing_term( "END LOOP;\n" "CLOSE c;"); - for(;;) { + for (;;) { error = fts_eval_sql(trx, graph); if (error == DB_SUCCESS) { break; /* Exit the loop. */ } else { - ut_print_timestamp(stderr); if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout reading FTS index. " - "Retrying!\n"); + ib::warn() << "lock wait timeout reading FTS" + " index. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: %lu " - "while reading FTS index.\n", error); + ib::error() << error + << " while reading FTS index."; break; /* Exit the loop. */ } @@ -2247,6 +2343,7 @@ fts_query_terms_in_document( que_t* graph; doc_id_t read_doc_id; trx_t* trx = query->trx; + char table_name[MAX_FULL_NAME_LEN]; trx->op_info = "fetching FTS document term count"; @@ -2262,15 +2359,19 @@ fts_query_terms_in_document( query->fts_index_table.suffix = "DOC_ID"; + fts_get_table_name(&query->fts_index_table, table_name); + + pars_info_bind_id(info, true, "index_table_name", table_name); + graph = fts_parse_sql( &query->fts_index_table, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" " SELECT count\n" - " FROM \"%s\"\n" - " WHERE doc_id = :doc_id " - "BEGIN\n" + " FROM $index_table_name\n" + " WHERE doc_id = :doc_id" + " BEGIN\n" "\n" "OPEN c;\n" "WHILE 1 = 1 LOOP\n" @@ -2281,25 +2382,22 @@ fts_query_terms_in_document( "END LOOP;\n" "CLOSE c;"); - for(;;) { + for (;;) { error = fts_eval_sql(trx, graph); if (error == DB_SUCCESS) { break; /* Exit the loop. */ } else { - ut_print_timestamp(stderr); if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: lock wait " - "timeout reading FTS doc id table. " - "Retrying!\n"); + ib::warn() << "lock wait timeout reading FTS" + " doc id table. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: %lu " - "while reading FTS doc id table.\n", - error); + ib::error() << error << " while reading FTS" + " doc id table."; break; /* Exit the loop. */ } @@ -2323,20 +2421,18 @@ fts_query_match_document( fts_get_doc_t* get_doc, /*!< in: table and prepared statements */ fts_match_t* match, /*!< in: doc id and positions */ ulint distance, /*!< in: proximity distance */ + st_mysql_ftparser* parser, /*!< in: fts plugin parser */ ibool* found) /*!< out: TRUE if phrase found */ { dberr_t error; - fts_phrase_t phrase; - - memset(&phrase, 0x0, sizeof(phrase)); + fts_phrase_t phrase(get_doc->index_cache->index->table); phrase.match = match; /* Positions to match */ phrase.tokens = tokens; /* Tokens to match */ phrase.distance = distance; phrase.charset = get_doc->index_cache->charset; - phrase.zip_size = dict_table_zip_size( - get_doc->index_cache->index->table); phrase.heap = mem_heap_create(512); + phrase.parser = parser; *found = phrase.found = FALSE; @@ -2345,9 +2441,8 @@ fts_query_match_document( fts_query_fetch_document, &phrase); if (error != DB_SUCCESS) { - ut_print_timestamp(stderr); - fprintf(stderr, "InnoDB: Error: (%s) matching document.\n", - ut_strerr(error)); + ib::error() << "(" << ut_strerr(error) + << ") matching document."; } else { *found = phrase.found; } @@ -2370,23 +2465,21 @@ fts_query_is_in_proximity_range( fts_proximity_t* qualified_pos) /*!< in: position info for qualified ranges */ { - fts_get_doc_t get_doc; - fts_cache_t* cache = query->index->table->fts->cache; - dberr_t err; - fts_phrase_t phrase; + fts_get_doc_t get_doc; + fts_cache_t* cache = query->index->table->fts->cache; + dberr_t err; memset(&get_doc, 0x0, sizeof(get_doc)); - memset(&phrase, 0x0, sizeof(phrase)); rw_lock_x_lock(&cache->lock); get_doc.index_cache = fts_find_index_cache(cache, query->index); rw_lock_x_unlock(&cache->lock); ut_a(get_doc.index_cache != NULL); + fts_phrase_t phrase(get_doc.index_cache->index->table); + phrase.distance = query->distance; phrase.charset = get_doc.index_cache->charset; - phrase.zip_size = dict_table_zip_size( - get_doc.index_cache->index->table); phrase.heap = mem_heap_create(512); phrase.proximity_pos = qualified_pos; phrase.found = FALSE; @@ -2396,9 +2489,8 @@ fts_query_is_in_proximity_range( fts_query_fetch_document, &phrase); if (err != DB_SUCCESS) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Error: (%s) in verification phase of proximity " - "search", ut_strerr(err)); + ib::error() << "(" << ut_strerr(err) << ") in verification" + " phase of proximity search"; } /* Free the prepared statement. */ @@ -2449,8 +2541,7 @@ fts_query_search_phrase( rw_lock_x_unlock(&cache->lock); #ifdef FTS_INTERNAL_DIAG_PRINT - ut_print_timestamp(stderr); - fprintf(stderr, " Start phrase search\n"); + ib::info() << "Start phrase search"; #endif /* Read the document from disk and do the actual @@ -2468,8 +2559,8 @@ fts_query_search_phrase( if (match->doc_id != 0) { query->error = fts_query_match_document( - orig_tokens, &get_doc, - match, query->distance, &found); + orig_tokens, &get_doc, match, + query->distance, query->parser, &found); if (query->error == DB_SUCCESS && found) { ulint z; @@ -2501,57 +2592,77 @@ func_exit: return(query->error); } -/*****************************************************************//** -Text/Phrase search. -@return DB_SUCCESS or error code */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) -dberr_t -fts_query_phrase_search( -/*====================*/ - fts_query_t* query, /*!< in: query instance */ - const fts_string_t* phrase) /*!< in: token to search */ +/** Split the phrase into tokens +@param[in,out] query query instance +@param[in] node query node to search +@param[in,out] tokens token vector +@param[in,out] orig_tokens original node tokens include stopword +@param[in,out] heap mem heap */ +static +void +fts_query_phrase_split( + fts_query_t* query, + const fts_ast_node_t* node, + ib_vector_t* tokens, + ib_vector_t* orig_tokens, + mem_heap_t* heap) { - ib_vector_t* tokens; - ib_vector_t* orig_tokens; - mem_heap_t* heap = mem_heap_create(sizeof(fts_string_t)); - ulint len = phrase->f_len; + fts_string_t phrase; + ulint len = 0; ulint cur_pos = 0; - ib_alloc_t* heap_alloc; - ulint num_token; - CHARSET_INFO* charset; - - charset = query->fts_index_table.charset; + fts_ast_node_t* term_node = NULL; - heap_alloc = ib_heap_allocator_create(heap); - - tokens = ib_vector_create(heap_alloc, sizeof(fts_string_t), 4); - orig_tokens = ib_vector_create(heap_alloc, sizeof(fts_string_t), 4); - - if (query->distance != ULINT_UNDEFINED && query->distance > 0) { - query->flags = FTS_PROXIMITY; + if (node->type == FTS_AST_TEXT) { + phrase.f_str = node->text.ptr->str; + phrase.f_len = node->text.ptr->len; + len = phrase.f_len; } else { - query->flags = FTS_PHRASE; + ut_ad(node->type == FTS_AST_PARSER_PHRASE_LIST); + phrase.f_str = NULL; + phrase.f_len = 0; + term_node = node->list.head; } - /* Split the phrase into tokens. */ - while (cur_pos < len) { + while (true) { fts_cache_t* cache = query->index->table->fts->cache; - ib_rbt_bound_t parent; - ulint offset; ulint cur_len; fts_string_t result_str; - cur_len = innobase_mysql_fts_get_token( - charset, - reinterpret_cast(phrase->f_str) + cur_pos, - reinterpret_cast(phrase->f_str) + len, - &result_str, &offset); + if (node->type == FTS_AST_TEXT) { + if (cur_pos >= len) { + break; + } - if (cur_len == 0) { - break; - } + cur_len = innobase_mysql_fts_get_token( + query->fts_index_table.charset, + reinterpret_cast(phrase.f_str) + + cur_pos, + reinterpret_cast(phrase.f_str) + + len, + &result_str); - cur_pos += cur_len; + if (cur_len == 0) { + break; + } + + cur_pos += cur_len; + } else { + ut_ad(node->type == FTS_AST_PARSER_PHRASE_LIST); + /* Term node in parser phrase list */ + if (term_node == NULL) { + break; + } + + ut_a(term_node->type == FTS_AST_TERM); + result_str.f_str = term_node->term.ptr->str; + result_str.f_len = term_node->term.ptr->len; + result_str.f_n_char = fts_get_token_size( + query->fts_index_table.charset, + reinterpret_cast(result_str.f_str), + result_str.f_len); + + term_node = term_node->next; + } if (result_str.f_n_char == 0) { continue; @@ -2559,19 +2670,13 @@ fts_query_phrase_search( fts_string_t* token = static_cast( ib_vector_push(tokens, NULL)); + fts_string_dup(token, &result_str, heap); - token->f_str = static_cast( - mem_heap_alloc(heap, result_str.f_len + 1)); - ut_memcpy(token->f_str, result_str.f_str, result_str.f_len); - - token->f_len = result_str.f_len; - token->f_str[token->f_len] = 0; - - if (cache->stopword_info.cached_stopword - && rbt_search(cache->stopword_info.cached_stopword, - &parent, token) != 0 - && result_str.f_n_char >= fts_min_token_size - && result_str.f_n_char <= fts_max_token_size) { + if (fts_check_token( + &result_str, + cache->stopword_info.cached_stopword, + query->index->is_ngram, + query->fts_index_table.charset)) { /* Add the word to the RB tree so that we can calculate it's frequencey within a document. */ fts_query_add_word_freq(query, token); @@ -2590,6 +2695,37 @@ fts_query_phrase_search( orig_token->f_len = token->f_len; } } +} + +/*****************************************************************//** +Text/Phrase search. +@return DB_SUCCESS or error code */ +static __attribute__((warn_unused_result)) +dberr_t +fts_query_phrase_search( +/*====================*/ + fts_query_t* query, /*!< in: query instance */ + const fts_ast_node_t* node) /*!< in: node to search */ +{ + ib_vector_t* tokens; + ib_vector_t* orig_tokens; + mem_heap_t* heap = mem_heap_create(sizeof(fts_string_t)); + ib_alloc_t* heap_alloc; + ulint num_token; + + heap_alloc = ib_heap_allocator_create(heap); + + tokens = ib_vector_create(heap_alloc, sizeof(fts_string_t), 4); + orig_tokens = ib_vector_create(heap_alloc, sizeof(fts_string_t), 4); + + if (query->distance != ULINT_UNDEFINED && query->distance > 0) { + query->flags = FTS_PROXIMITY; + } else { + query->flags = FTS_PHRASE; + } + + /* Split the phrase into tokens. */ + fts_query_phrase_split(query, node, tokens, orig_tokens, heap); num_token = ib_vector_size(tokens); if (num_token > MAX_PROXIMITY_ITEM) { @@ -2787,7 +2923,7 @@ fts_query_execute( /*****************************************************************//** Create a wildcard string. It's the responsibility of the caller to -free the byte* pointer. It's allocated using ut_malloc(). +free the byte* pointer. It's allocated using ut_malloc_nokey(). @return ptr to allocated memory */ static byte* @@ -2808,7 +2944,7 @@ fts_query_get_token( if (node->term.wildcard) { - token->f_str = static_cast(ut_malloc(str_len + 2)); + token->f_str = static_cast(ut_malloc_nokey(str_len + 2)); token->f_len = str_len + 1; memcpy(token->f_str, node->term.ptr->str, str_len); @@ -2846,8 +2982,7 @@ fts_query_visitor( switch (node->type) { case FTS_AST_TEXT: - token.f_str = node->text.ptr->str; - token.f_len = node->text.ptr->len; + case FTS_AST_PARSER_PHRASE_LIST: if (query->oper == FTS_EXIST) { ut_ad(query->intersection == NULL); @@ -2863,7 +2998,7 @@ fts_query_visitor( /* Force collection of doc ids and the positions. */ query->collect_positions = TRUE; - query->error = fts_query_phrase_search(query, &token); + query->error = fts_query_phrase_search(query, node); query->collect_positions = FALSE; @@ -2879,6 +3014,20 @@ fts_query_visitor( token.f_str = node->term.ptr->str; token.f_len = node->term.ptr->len; + /* Collect wildcard words for QUERY EXPANSION. */ + if (node->term.wildcard && query->wildcard_words != NULL) { + ib_rbt_bound_t parent; + + if (rbt_search(query->wildcard_words, &parent, &token) + != 0) { + fts_string_t word; + + fts_string_dup(&word, &token, query->heap); + rbt_add_node(query->wildcard_words, &parent, + &word); + } + } + /* Add the word to our RB tree that will be used to calculate this terms per document frequency. */ fts_query_add_word_freq(query, &token); @@ -2889,6 +3038,7 @@ fts_query_visitor( if (ptr) { ut_free(ptr); } + break; case FTS_AST_SUBEXP_LIST: @@ -2910,8 +3060,7 @@ fts_query_visitor( Process (nested) sub-expression, create a new result set to store the sub-expression result by processing nodes under current sub-expression list. Merge the sub-expression result with that of parent expression list. -@return DB_SUCCESS if all well */ -UNIV_INTERN +@return DB_SUCCESS if all go well */ dberr_t fts_ast_visit_sub_exp( /*==================*/ @@ -3180,8 +3329,9 @@ fts_query_read_node( byte buf[FTS_MAX_WORD_LEN + 1]; dberr_t error = DB_SUCCESS; - ut_a(query->cur_node->type == FTS_AST_TERM || - query->cur_node->type == FTS_AST_TEXT); + ut_a(query->cur_node->type == FTS_AST_TERM + || query->cur_node->type == FTS_AST_TEXT + || query->cur_node->type == FTS_AST_PARSER_PHRASE_LIST); memset(&node, 0, sizeof(node)); term.f_str = buf; @@ -3191,6 +3341,7 @@ fts_query_read_node( to assign the frequency on search string behalf. */ if (query->cur_node->type == FTS_AST_TERM && query->cur_node->term.wildcard) { + term.f_len = query->cur_node->term.ptr->len; ut_ad(FTS_MAX_WORD_LEN >= term.f_len); memcpy(term.f_str, query->cur_node->term.ptr->str, term.f_len); @@ -3344,11 +3495,11 @@ fts_query_calculate_idf( } if (fts_enable_diag_print) { - fprintf(stderr,"'%s' -> " UINT64PF "/" UINT64PF - " %6.5lf\n", - word_freq->word.f_str, - query->total_docs, word_freq->doc_count, - word_freq->idf); + ib::info() << "'" << word_freq->word.f_str << "' -> " + << query->total_docs << "/" + << word_freq->doc_count << " " + << std::setw(6) << std::setprecision(5) + << word_freq->idf; } } } @@ -3477,9 +3628,8 @@ fts_query_prepare_result( DBUG_ENTER("fts_query_prepare_result"); if (result == NULL) { - result = static_cast(ut_malloc(sizeof(*result))); - - memset(result, 0x0, sizeof(*result)); + result = static_cast( + ut_zalloc_nokey(sizeof(*result))); result->rankings_by_id = rbt_create( sizeof(fts_ranking_t), fts_ranking_doc_id_cmp); @@ -3605,8 +3755,8 @@ fts_query_get_result( result = fts_query_prepare_result(query, result); } else { /* Create an empty result instance. */ - result = static_cast(ut_malloc(sizeof(*result))); - memset(result, 0, sizeof(*result)); + result = static_cast( + ut_zalloc_nokey(sizeof(*result))); } DBUG_RETURN(result); @@ -3657,14 +3807,18 @@ fts_query_free( rbt_free(query->word_freqs); } + if (query->wildcard_words != NULL) { + rbt_free(query->wildcard_words); + } + ut_a(!query->intersection); if (query->word_map) { rbt_free(query->word_map); } - if (query->word_vector) { - delete query->word_vector; + if (query->word_vector != NULL) { + UT_DELETE(query->word_vector); } if (query->heap) { @@ -3675,7 +3829,8 @@ fts_query_free( } /*****************************************************************//** -Parse the query using flex/bison. */ +Parse the query using flex/bison or plugin parser. +@return parse tree node. */ static fts_ast_node_t* fts_query_parse( @@ -3691,12 +3846,24 @@ fts_query_parse( memset(&state, 0x0, sizeof(state)); - /* Setup the scanner to use, this depends on the mode flag. */ - state.lexer = fts_lexer_create(mode, query_str, query_len); state.charset = query->fts_index_table.charset; - error = fts_parse(&state); - fts_lexer_free(state.lexer); - state.lexer = NULL; + + DBUG_EXECUTE_IF("fts_instrument_query_disable_parser", + query->parser = NULL;); + + if (query->parser) { + state.root = state.cur_node = + fts_ast_create_node_list(&state, NULL); + error = fts_parse_by_parser(mode, query_str, query_len, + query->parser, &state); + } else { + /* Setup the scanner to use, this depends on the mode flag. */ + state.lexer = fts_lexer_create(mode, query_str, query_len); + state.charset = query->fts_index_table.charset; + error = fts_parse(&state); + fts_lexer_free(state.lexer); + state.lexer = NULL; + } /* Error during parsing ? */ if (error) { @@ -3704,6 +3871,10 @@ fts_query_parse( fts_ast_state_free(&state); } else { query->root = state.root; + + if (fts_enable_diag_print && query->root != NULL) { + fts_ast_node_print(query->root); + } } DBUG_RETURN(state.root); @@ -3733,94 +3904,9 @@ fts_query_can_optimize( } } -/*******************************************************************//** -Pre-process the query string -1) make it lower case -2) in boolean mode, if there is '-' or '+' that is immediately proceeded -and followed by valid word, make it a space -@return the processed string */ -static -byte* -fts_query_str_preprocess( -/*=====================*/ - const byte* query_str, /*!< in: FTS query */ - ulint query_len, /*!< in: FTS query string len */ - ulint *result_len, /*!< out: result string length */ - CHARSET_INFO* charset, /*!< in: string charset */ - bool boolean_mode) /*!< in: is boolean mode */ -{ - ulint cur_pos = 0; - ulint str_len; - byte* str_ptr; - bool in_phrase = false; - - /* Convert the query string to lower case before parsing. We own - the ut_malloc'ed result and so remember to free it before return. */ - - str_len = query_len * charset->casedn_multiply + 1; - str_ptr = static_cast(ut_malloc(str_len)); - - *result_len = innobase_fts_casedn_str( - charset, const_cast(reinterpret_cast( - query_str)), query_len, - reinterpret_cast(str_ptr), str_len); - - ut_ad(*result_len < str_len); - - str_ptr[*result_len] = 0; - - /* If it is boolean mode, no need to check for '-/+' */ - if (!boolean_mode) { - return(str_ptr); - } - - /* Otherwise, we travese the string to find any '-/+' that are - immediately proceeded and followed by valid search word. - NOTE: we should not do so for CJK languages, this should - be taken care of in our CJK implementation */ - while (cur_pos < *result_len) { - fts_string_t str; - ulint offset; - ulint cur_len; - - cur_len = innobase_mysql_fts_get_token( - charset, str_ptr + cur_pos, str_ptr + *result_len, - &str, &offset); - - if (cur_len == 0 || str.f_str == NULL) { - /* No valid word found */ - break; - } - - /* Check if we are in a phrase, if so, no need to do - replacement of '-/+'. */ - for (byte* ptr = str_ptr + cur_pos; ptr < str.f_str; ptr++) { - if ((char) (*ptr) == '"' ) { - in_phrase = !in_phrase; - } - } - - /* Find those are not leading '-/+' and also not in a phrase */ - if (cur_pos > 0 && str.f_str - str_ptr - cur_pos == 1 - && !in_phrase) { - char* last_op = reinterpret_cast( - str_ptr + cur_pos); - - if (*last_op == '-' || *last_op == '+') { - *last_op = ' '; - } - } - - cur_pos += cur_len; - } - - return(str_ptr); -} - /*******************************************************************//** FTS Query entry point. @return DB_SUCCESS if successful otherwise error code */ -UNIV_INTERN dberr_t fts_query( /*======*/ @@ -3835,6 +3921,7 @@ fts_query( fts_query_t query; dberr_t error = DB_SUCCESS; byte* lc_query_str; + ulint lc_query_str_len; ulint result_len; bool boolean_mode; trx_t* query_trx; @@ -3859,7 +3946,7 @@ fts_query( query.fts_common_table.type = FTS_COMMON_TABLE; query.fts_common_table.table_id = index->table->id; - query.fts_common_table.parent = index->table->name; + query.fts_common_table.parent = index->table->name.m_name; query.fts_common_table.table = index->table; charset = fts_index_get_charset(index); @@ -3867,22 +3954,26 @@ fts_query( query.fts_index_table.type = FTS_INDEX_TABLE; query.fts_index_table.index_id = index->id; query.fts_index_table.table_id = index->table->id; - query.fts_index_table.parent = index->table->name; + query.fts_index_table.parent = index->table->name.m_name; query.fts_index_table.charset = charset; query.fts_index_table.table = index->table; query.word_map = rbt_create_arg_cmp( - sizeof(fts_string_t), innobase_fts_text_cmp, - (void *) charset); - query.word_vector = new word_vector_t; + sizeof(fts_string_t), innobase_fts_text_cmp, (void*)charset); + query.word_vector = UT_NEW_NOKEY(word_vector_t()); query.error = DB_SUCCESS; /* Setup the RB tree that will be used to collect per term statistics. */ query.word_freqs = rbt_create_arg_cmp( - sizeof(fts_word_freq_t), innobase_fts_text_cmp, + sizeof(fts_word_freq_t), innobase_fts_text_cmp, (void*) charset); + if (flags & FTS_EXPAND) { + query.wildcard_words = rbt_create_arg_cmp( + sizeof(fts_string_t), innobase_fts_text_cmp, (void*)charset); + } + query.total_size += SIZEOF_RBT_CREATE; query.total_docs = dict_table_get_n_rows(index->table); @@ -3896,8 +3987,8 @@ fts_query( goto func_exit; } - fprintf(stderr, "Total docs: " UINT64PF " Total words: %lu\n", - query.total_docs, query.total_words); + ib::info() << "Total docs: " << query.total_docs + << " Total words: " << query.total_words; } #endif /* FTS_DOC_STATS_DEBUG */ @@ -3928,12 +4019,11 @@ fts_query( /* Sort the vector so that we can do a binary search over the ids. */ ib_vector_sort(query.deleted->doc_ids, fts_update_doc_id_cmp); -#if 0 /* Convert the query string to lower case before parsing. We own the ut_malloc'ed result and so remember to free it before return. */ lc_query_str_len = query_len * charset->casedn_multiply + 1; - lc_query_str = static_cast(ut_malloc(lc_query_str_len)); + lc_query_str = static_cast(ut_malloc_nokey(lc_query_str_len)); result_len = innobase_fts_casedn_str( charset, (char*) query_str, query_len, @@ -3943,16 +4033,12 @@ fts_query( lc_query_str[result_len] = 0; -#endif - - lc_query_str = fts_query_str_preprocess( - query_str, query_len, &result_len, charset, boolean_mode); - query.heap = mem_heap_create(128); /* Create the rb tree for the doc id (current) set. */ query.doc_ids = rbt_create( sizeof(fts_ranking_t), fts_ranking_doc_id_cmp); + query.parser = index->parser; query.total_size += SIZEOF_RBT_CREATE; @@ -3995,29 +4081,28 @@ fts_query( } else { /* still return an empty result set */ *result = static_cast( - ut_malloc(sizeof(**result))); - memset(*result, 0, sizeof(**result)); + ut_zalloc_nokey(sizeof(**result))); } ut_free(lc_query_str); if (fts_enable_diag_print && (*result)) { ulint diff_time = ut_time_ms() - start_time_ms; - fprintf(stderr, "FTS Search Processing time: %ld secs:" - " %ld millisec: row(s) %d \n", - diff_time / 1000, diff_time % 1000, - (*result)->rankings_by_id - ? (int) rbt_size((*result)->rankings_by_id) - : -1); + + ib::info() << "FTS Search Processing time: " + << diff_time / 1000 << " secs: " << diff_time % 1000 + << " millisec: row(s) " + << ((*result)->rankings_by_id + ? rbt_size((*result)->rankings_by_id) + : -1); /* Log memory consumption & result size */ - ib_logf(IB_LOG_LEVEL_INFO, - "Full Search Memory: " - "%lu (bytes), Row: %lu .", - query.total_size, - (*result)->rankings_by_id - ? rbt_size((*result)->rankings_by_id) - : 0); + ib::info() << "Full Search Memory: " << query.total_size + << " (bytes), Row: " + << ((*result)->rankings_by_id + ? rbt_size((*result)->rankings_by_id) + : 0) + << "."; } func_exit: @@ -4030,7 +4115,6 @@ func_exit: /*****************************************************************//** FTS Query free result, returned by fts_query(). */ - void fts_query_free_result( /*==================*/ @@ -4053,7 +4137,6 @@ fts_query_free_result( /*****************************************************************//** FTS Query sort result, returned by fts_query() on fts_ranking_t::rank. */ - void fts_query_sort_result_on_rank( /*==========================*/ @@ -4089,7 +4172,6 @@ fts_query_sort_result_on_rank( result->rankings_by_rank = ranked; } -#ifdef UNIV_DEBUG /*******************************************************************//** A debug function to print result doc_id set. */ static @@ -4107,18 +4189,16 @@ fts_print_doc_id( fts_ranking_t* ranking; ranking = rbt_value(fts_ranking_t, node); - ib_logf(IB_LOG_LEVEL_INFO, "doc_ids info, doc_id: %ld \n", - (ulint) ranking->doc_id); + ib::info() << "doc_ids info, doc_id: " << ranking->doc_id; ulint pos = 0; fts_string_t word; while (fts_ranking_words_get_next(query, ranking, &pos, &word)) { - ib_logf(IB_LOG_LEVEL_INFO, "doc_ids info, value: %s \n", word.f_str); + ib::info() << "doc_ids info, value: " << word.f_str; } } } -#endif /*************************************************************//** This function implements a simple "blind" query expansion search: @@ -4158,19 +4238,20 @@ fts_expand_query( (void*) index_cache->charset); result_doc.charset = index_cache->charset; + result_doc.parser = index_cache->index->parser; + result_doc.is_ngram = index_cache->index->is_ngram; query->total_size += SIZEOF_RBT_CREATE; -#ifdef UNIV_DEBUG - fts_print_doc_id(query); -#endif + + if (fts_enable_diag_print) { + fts_print_doc_id(query); + } for (node = rbt_first(query->doc_ids); node; node = rbt_next(query->doc_ids, node)) { fts_ranking_t* ranking; - ulint pos; - fts_string_t word; ulint prev_token_size; ulint estimate_size; @@ -4189,24 +4270,6 @@ fts_expand_query( fts_query_expansion_fetch_doc, &result_doc); - /* Remove words that have already been searched in the - first pass */ - pos = 0; - while (fts_ranking_words_get_next(query, ranking, &pos, - &word)) { - ibool ret; - - ret = rbt_delete(result_doc.tokens, &word); - - /* The word must exist in the doc we found */ - if (!ret) { - ib_logf(IB_LOG_LEVEL_ERROR, "Did not " - "find word %s in doc %ld for query " - "expansion search.\n", word.f_str, - (ulint) ranking->doc_id); - } - } - /* Estimate memory used, see fts_process_token and fts_token_t. We ignore token size here. */ estimate_size = (rbt_size(result_doc.tokens) - prev_token_size) @@ -4220,6 +4283,30 @@ fts_expand_query( } } + /* Remove words that have already been searched in the first pass */ + for (ulint i = 0; i < query->word_vector->size(); i++) { + fts_string_t word = query->word_vector->at(i); + ib_rbt_bound_t parent; + + if (query->wildcard_words + && rbt_search(query->wildcard_words, &parent, &word) == 0) { + /* If it's a wildcard word, remove words having + it as prefix. */ + while (rbt_search_cmp(result_doc.tokens, + &parent, &word, NULL, + innobase_fts_text_cmp_prefix) + == 0) { + ut_free(rbt_remove_node(result_doc.tokens, + parent.last)); + } + } else { + /* We don't check return value, because the word may + have been deleted by a previous wildcard word as its + prefix, e.g. ('g * good'). */ + rbt_delete(result_doc.tokens, &word); + } + } + /* Search the table the second time with expanded search list */ for (token_node = rbt_first(result_doc.tokens); token_node; @@ -4227,6 +4314,12 @@ fts_expand_query( fts_token_t* mytoken; mytoken = rbt_value(fts_token_t, token_node); + /* '%' in the end is treated as prefix search, + it can cause assert failure, so we skip it. */ + if (mytoken->text.f_str[mytoken->text.f_len - 1] == '%') { + continue; + } + ut_ad(mytoken->text.f_str[mytoken->text.f_len] == 0); fts_query_add_word_freq(query, &mytoken->text); error = fts_query_union(query, &mytoken->text); diff --git a/storage/innobase/fts/fts0sql.cc b/storage/innobase/fts/fts0sql.cc index cb8eff3cacc..4900ff3be1a 100644 --- a/storage/innobase/fts/fts0sql.cc +++ b/storage/innobase/fts/fts0sql.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -30,13 +30,12 @@ Created 2007-03-27 Sunny Bains #include "fts0types.h" #include "fts0priv.h" -#ifndef UNIV_NONINL +#ifdef UNIV_NONINL #include "fts0types.ic" #include "fts0vlc.ic" #endif -/** SQL statements for creating the ancillary FTS tables. %s must be replaced -with the indexed table's id. */ +/** SQL statements for creating the ancillary FTS tables. */ /** Preamble to all SQL statements. */ static const char* fts_sql_begin= @@ -50,7 +49,6 @@ static const char* fts_sql_end= /******************************************************************//** Get the table id. @return number of bytes written */ -UNIV_INTERN int fts_get_table_id( /*=============*/ @@ -97,8 +95,7 @@ fts_get_table_id( /******************************************************************//** Construct the prefix name of an FTS table. -@return own: table name, must be freed with mem_free() */ -UNIV_INTERN +@return own: table name, must be freed with ut_free() */ char* fts_get_table_name_prefix( /*======================*/ @@ -124,7 +121,7 @@ fts_get_table_name_prefix( prefix_name_len = dbname_len + 4 + len + 1; - prefix_name = static_cast(mem_alloc(prefix_name_len)); + prefix_name = static_cast(ut_malloc_nokey(prefix_name_len)); len = sprintf(prefix_name, "%.*sFTS_%s", dbname_len, fts_table->parent, table_id); @@ -136,41 +133,34 @@ fts_get_table_name_prefix( } /******************************************************************//** -Construct the name of an ancillary FTS table. -@return own: table name, must be freed with mem_free() */ -UNIV_INTERN -char* +Construct the name of an ancillary FTS table for the given table. +Caller must allocate enough memory(usually size of MAX_FULL_NAME_LEN) +for param 'table_name'. */ +void fts_get_table_name( /*===============*/ - const fts_table_t* fts_table) + const fts_table_t* fts_table, /*!< in: Auxiliary table type */ + char* table_name) + /*!< in/out: aux table name */ { int len; - char* name; - int name_len; char* prefix_name; prefix_name = fts_get_table_name_prefix(fts_table); - name_len = static_cast( - strlen(prefix_name) + 1 + strlen(fts_table->suffix) + 1); - - name = static_cast(mem_alloc(name_len)); - - len = sprintf(name, "%s_%s", prefix_name, fts_table->suffix); + len = sprintf(table_name, "%s_%s", prefix_name, fts_table->suffix); ut_a(len > 0); - ut_a(len == name_len - 1); + ut_a(strlen(prefix_name) + 1 + strlen(fts_table->suffix) + == static_cast(len)); - mem_free(prefix_name); - - return(name); + ut_free(prefix_name); } /******************************************************************//** -Parse an SQL string. %s is replaced with the table's id. +Parse an SQL string. @return query graph */ -UNIV_INTERN que_t* fts_parse_sql( /*==========*/ @@ -180,31 +170,16 @@ fts_parse_sql( { char* str; que_t* graph; - char* str_tmp; ibool dict_locked; - if (fts_table != NULL) { - char* table_name; - - table_name = fts_get_table_name(fts_table); - str_tmp = ut_strreplace(sql, "%s", table_name); - mem_free(table_name); - } else { - ulint sql_len = strlen(sql) + 1; - - str_tmp = static_cast(mem_alloc(sql_len)); - strcpy(str_tmp, sql); - } - - str = ut_str3cat(fts_sql_begin, str_tmp, fts_sql_end); - mem_free(str_tmp); + str = ut_str3cat(fts_sql_begin, sql, fts_sql_end); dict_locked = (fts_table && fts_table->table->fts && (fts_table->table->fts->fts_status & TABLE_DICT_LOCKED)); if (!dict_locked) { - ut_ad(!mutex_own(&(dict_sys->mutex))); + ut_ad(!mutex_own(&dict_sys->mutex)); /* The InnoDB SQL parser is not re-entrant. */ mutex_enter(&dict_sys->mutex); @@ -217,15 +192,14 @@ fts_parse_sql( mutex_exit(&dict_sys->mutex); } - mem_free(str); + ut_free(str); return(graph); } /******************************************************************//** -Parse an SQL string. %s is replaced with the table's id. +Parse an SQL string. @return query graph */ -UNIV_INTERN que_t* fts_parse_sql_no_dict_lock( /*=======================*/ @@ -235,33 +209,19 @@ fts_parse_sql_no_dict_lock( { char* str; que_t* graph; - char* str_tmp = NULL; #ifdef UNIV_DEBUG ut_ad(mutex_own(&dict_sys->mutex)); #endif - if (fts_table != NULL) { - char* table_name; - - table_name = fts_get_table_name(fts_table); - str_tmp = ut_strreplace(sql, "%s", table_name); - mem_free(table_name); - } - - if (str_tmp != NULL) { - str = ut_str3cat(fts_sql_begin, str_tmp, fts_sql_end); - mem_free(str_tmp); - } else { - str = ut_str3cat(fts_sql_begin, sql, fts_sql_end); - } + str = ut_str3cat(fts_sql_begin, sql, fts_sql_end); //fprintf(stderr, "%s\n", str); graph = pars_sql(info, str); ut_a(graph); - mem_free(str); + ut_free(str); return(graph); } @@ -269,7 +229,6 @@ fts_parse_sql_no_dict_lock( /******************************************************************//** Evaluate an SQL query graph. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_eval_sql( /*=========*/ @@ -303,7 +262,6 @@ Two indexed columns named "subject" and "content": "$sel0, $sel1", info/ids: sel0 -> "subject", sel1 -> "content", @return heap-allocated WHERE string */ -UNIV_INTERN const char* fts_get_select_columns_str( /*=======================*/ @@ -334,7 +292,6 @@ fts_get_select_columns_str( /******************************************************************//** Commit a transaction. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_sql_commit( /*===========*/ @@ -353,7 +310,6 @@ fts_sql_commit( /******************************************************************//** Rollback a transaction. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_sql_rollback( /*=============*/ diff --git a/storage/innobase/fts/fts0tlex.cc b/storage/innobase/fts/fts0tlex.cc index d4d9b4c48d1..4cb09784e03 100644 --- a/storage/innobase/fts/fts0tlex.cc +++ b/storage/innobase/fts/fts0tlex.cc @@ -184,15 +184,15 @@ typedef struct yy_buffer_state *YY_BUFFER_STATE; #define EOB_ACT_END_OF_FILE 1 #define EOB_ACT_LAST_MATCH 2 - #define YY_LESS_LINENO(n) +#define YY_LESS_LINENO(n) /* Return all but the first "n" matched characters back to the input stream. */ #define yyless(n) \ do \ { \ /* Undo effects of setting up yytext. */ \ - int yyless_macro_arg = (n); \ - YY_LESS_LINENO(yyless_macro_arg);\ + int yyless_macro_arg = (n); \ + YY_LESS_LINENO(yyless_macro_arg);\ *yy_cp = yyg->yy_hold_char; \ YY_RESTORE_YY_MORE_OFFSET \ yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ @@ -245,8 +245,8 @@ struct yy_buffer_state */ int yy_at_bol; - int yy_bs_lineno; /**< The line count. */ - int yy_bs_column; /**< The column count. */ + int yy_bs_lineno; /**< The line count. */ + int yy_bs_column; /**< The column count. */ /* Whether to try to fill the input buffer when we reach the * end of it. @@ -314,9 +314,9 @@ void fts0tfree (void * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ #define yy_set_interactive(is_interactive) \ { \ if ( ! YY_CURRENT_BUFFER ){ \ - fts0tensure_buffer_stack (yyscanner); \ + fts0tensure_buffer_stack (yyscanner); \ YY_CURRENT_BUFFER_LVALUE = \ - fts0t_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ + fts0t_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ } @@ -324,9 +324,9 @@ void fts0tfree (void * , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ #define yy_set_bol(at_bol) \ { \ if ( ! YY_CURRENT_BUFFER ){\ - fts0tensure_buffer_stack (yyscanner); \ + fts0tensure_buffer_stack (yyscanner); \ YY_CURRENT_BUFFER_LVALUE = \ - fts0t_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ + fts0t_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ } @@ -475,9 +475,10 @@ this program; if not, write to the Free Software Foundation, Inc., /* Required for reentrant parser */ #define YY_DECL int fts_tlexer(YYSTYPE* val, yyscan_t yyscanner) +#define exit(A) ut_error #define YY_NO_INPUT 1 -#line 480 "fts0tlex.cc" +#line 481 "fts0tlex.cc" #define INITIAL 0 @@ -495,37 +496,37 @@ this program; if not, write to the Free Software Foundation, Inc., /* Holds the entire state of the reentrant scanner. */ struct yyguts_t - { - - /* User-defined. Not touched by flex. */ - YY_EXTRA_TYPE yyextra_r; - - /* The rest are the same as the globals declared in the non-reentrant scanner. */ - FILE *yyin_r, *yyout_r; - size_t yy_buffer_stack_top; /**< index of top of stack. */ - size_t yy_buffer_stack_max; /**< capacity of stack. */ - YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */ - char yy_hold_char; - int yy_n_chars; - int yyleng_r; - char *yy_c_buf_p; - int yy_init; - int yy_start; - int yy_did_buffer_switch_on_eof; - int yy_start_stack_ptr; - int yy_start_stack_depth; - int *yy_start_stack; - yy_state_type yy_last_accepting_state; - char* yy_last_accepting_cpos; - - int yylineno_r; - int yy_flex_debug_r; - - char *yytext_r; - int yy_more_flag; - int yy_more_len; - - }; /* end struct yyguts_t */ +{ + + /* User-defined. Not touched by flex. */ + YY_EXTRA_TYPE yyextra_r; + + /* The rest are the same as the globals declared in the non-reentrant scanner. */ + FILE *yyin_r, *yyout_r; + size_t yy_buffer_stack_top; /**< index of top of stack. */ + size_t yy_buffer_stack_max; /**< capacity of stack. */ + YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */ + char yy_hold_char; + int yy_n_chars; + int yyleng_r; + char *yy_c_buf_p; + int yy_init; + int yy_start; + int yy_did_buffer_switch_on_eof; + int yy_start_stack_ptr; + int yy_start_stack_depth; + int *yy_start_stack; + yy_state_type yy_last_accepting_state; + char* yy_last_accepting_cpos; + + int yylineno_r; + int yy_flex_debug_r; + + char *yytext_r; + int yy_more_flag; + int yy_more_len; + +}; /* end struct yyguts_t */ static int yy_init_globals (yyscan_t yyscanner ); @@ -700,12 +701,12 @@ YY_DECL register yy_state_type yy_current_state; register char *yy_cp, *yy_bp; register int yy_act; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; -#line 44 "fts0tlex.l" +#line 45 "fts0tlex.l" -#line 707 "fts0tlex.cc" +#line 708 "fts0tlex.cc" if ( !yyg->yy_init ) { @@ -786,12 +787,12 @@ do_action: /* This label is used only to access EOF actions. */ case 1: YY_RULE_SETUP -#line 46 "fts0tlex.l" +#line 47 "fts0tlex.l" /* Ignore whitespace */ ; YY_BREAK case 2: YY_RULE_SETUP -#line 48 "fts0tlex.l" +#line 49 "fts0tlex.l" { val->oper = fts0tget_text(yyscanner)[0]; @@ -800,7 +801,7 @@ YY_RULE_SETUP YY_BREAK case 3: YY_RULE_SETUP -#line 54 "fts0tlex.l" +#line 55 "fts0tlex.l" { val->token = fts_ast_string_create(reinterpret_cast(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); @@ -809,7 +810,7 @@ YY_RULE_SETUP YY_BREAK case 4: YY_RULE_SETUP -#line 60 "fts0tlex.l" +#line 61 "fts0tlex.l" { val->token = fts_ast_string_create(reinterpret_cast(fts0tget_text(yyscanner)), fts0tget_leng(yyscanner)); @@ -818,21 +819,21 @@ YY_RULE_SETUP YY_BREAK case 5: YY_RULE_SETUP -#line 65 "fts0tlex.l" +#line 66 "fts0tlex.l" ; YY_BREAK case 6: /* rule 6 can match eol */ YY_RULE_SETUP -#line 66 "fts0tlex.l" +#line 67 "fts0tlex.l" YY_BREAK case 7: YY_RULE_SETUP -#line 68 "fts0tlex.l" +#line 69 "fts0tlex.l" ECHO; YY_BREAK -#line 834 "fts0tlex.cc" +#line 835 "fts0tlex.cc" case YY_STATE_EOF(INITIAL): yyterminate(); @@ -975,7 +976,7 @@ case YY_STATE_EOF(INITIAL): */ static int yy_get_next_buffer (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; register char *source = yyg->yytext_ptr; register int number_to_move, i; @@ -1108,11 +1109,11 @@ static int yy_get_next_buffer (yyscan_t yyscanner) /* yy_get_previous_state - get the state just before the EOB char was reached */ - static yy_state_type yy_get_previous_state (yyscan_t yyscanner) +static yy_state_type yy_get_previous_state (yyscan_t yyscanner) { register yy_state_type yy_current_state; register char *yy_cp; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; yy_current_state = yyg->yy_start; @@ -1141,10 +1142,10 @@ static int yy_get_next_buffer (yyscan_t yyscanner) * synopsis * next_state = yy_try_NUL_trans( current_state ); */ - static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner) +static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner) { register int yy_is_jam; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */ + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */ register char *yy_cp = yyg->yy_c_buf_p; register YY_CHAR yy_c = 1; @@ -1167,14 +1168,14 @@ static int yy_get_next_buffer (yyscan_t yyscanner) #ifndef YY_NO_INPUT #ifdef __cplusplus - static int yyinput (yyscan_t yyscanner) + static int yyinput (yyscan_t yyscanner) #else - static int input (yyscan_t yyscanner) + static int input (yyscan_t yyscanner) #endif { int c; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; *yyg->yy_c_buf_p = yyg->yy_hold_char; @@ -1245,14 +1246,14 @@ static int yy_get_next_buffer (yyscan_t yyscanner) * @param yyscanner The scanner object. * @note This function does not reset the start condition to @c INITIAL . */ - void fts0trestart (FILE * input_file , yyscan_t yyscanner) +void fts0trestart (FILE * input_file , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; if ( ! YY_CURRENT_BUFFER ){ - fts0tensure_buffer_stack (yyscanner); + fts0tensure_buffer_stack (yyscanner); YY_CURRENT_BUFFER_LVALUE = - fts0t_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); + fts0t_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); } fts0t_init_buffer(YY_CURRENT_BUFFER,input_file ,yyscanner); @@ -1263,15 +1264,15 @@ static int yy_get_next_buffer (yyscan_t yyscanner) * @param new_buffer The new input buffer. * @param yyscanner The scanner object. */ - void fts0t_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) +void fts0t_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* TODO. We should be able to replace this entire function body * with * fts0tpop_buffer_state(); * fts0tpush_buffer_state(new_buffer); - */ + */ fts0tensure_buffer_stack (yyscanner); if ( YY_CURRENT_BUFFER == new_buffer ) return; @@ -1297,7 +1298,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner) static void fts0t_load_buffer_state (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; @@ -1310,7 +1311,7 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner) * @param yyscanner The scanner object. * @return the allocated buffer state. */ - YY_BUFFER_STATE fts0t_create_buffer (FILE * file, int size , yyscan_t yyscanner) +YY_BUFFER_STATE fts0t_create_buffer (FILE * file, int size , yyscan_t yyscanner) { YY_BUFFER_STATE b; @@ -1338,9 +1339,9 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner) * @param b a buffer created with fts0t_create_buffer() * @param yyscanner The scanner object. */ - void fts0t_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) +void fts0t_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; if ( ! b ) return; @@ -1358,27 +1359,27 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner) * This function is sometimes called more than once on the same buffer, * such as during a fts0trestart() or at EOF. */ - static void fts0t_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner) +static void fts0t_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner) { int oerrno = errno; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; fts0t_flush_buffer(b ,yyscanner); b->yy_input_file = file; b->yy_fill_buffer = 1; - /* If b is the current buffer, then fts0t_init_buffer was _probably_ - * called from fts0trestart() or through yy_get_next_buffer. - * In that case, we don't want to reset the lineno or column. - */ - if (b != YY_CURRENT_BUFFER){ - b->yy_bs_lineno = 1; - b->yy_bs_column = 0; - } + /* If b is the current buffer, then fts0t_init_buffer was _probably_ + * called from fts0trestart() or through yy_get_next_buffer. + * In that case, we don't want to reset the lineno or column. + */ + if (b != YY_CURRENT_BUFFER){ + b->yy_bs_lineno = 1; + b->yy_bs_column = 0; + } - b->yy_is_interactive = 0; + b->yy_is_interactive = 0; errno = oerrno; } @@ -1387,9 +1388,9 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner) * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. * @param yyscanner The scanner object. */ - void fts0t_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) +void fts0t_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; if ( ! b ) return; @@ -1419,7 +1420,7 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner) */ void fts0tpush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; if (new_buffer == NULL) return; @@ -1450,7 +1451,7 @@ void fts0tpush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) */ void fts0tpop_buffer_state (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; if (!YY_CURRENT_BUFFER) return; @@ -1471,14 +1472,14 @@ void fts0tpop_buffer_state (yyscan_t yyscanner) static void fts0tensure_buffer_stack (yyscan_t yyscanner) { int num_to_alloc; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; if (!yyg->yy_buffer_stack) { /* First allocation is just for 2 elements, since we don't know if this * scanner will even need a stack. We use 2 instead of 1 to avoid an * immediate realloc on the next call. - */ + */ num_to_alloc = 1; yyg->yy_buffer_stack = (struct yy_buffer_state**)fts0talloc (num_to_alloc * sizeof(struct yy_buffer_state*) @@ -1604,7 +1605,7 @@ YY_BUFFER_STATE fts0t_scan_bytes (yyconst char * yybytes, int _yybytes_len , y static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused)) MY_ATTRIBUTE((unused))) { - (void) fprintf( stderr, "%s\n", msg ); + (void) fprintf( stderr, "%s\n", msg ); exit( YY_EXIT_FAILURE ); } @@ -1615,8 +1616,8 @@ static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner MY do \ { \ /* Undo effects of setting up yytext. */ \ - int yyless_macro_arg = (n); \ - YY_LESS_LINENO(yyless_macro_arg);\ + int yyless_macro_arg = (n); \ + YY_LESS_LINENO(yyless_macro_arg);\ yytext[yyleng] = yyg->yy_hold_char; \ yyg->yy_c_buf_p = yytext + yyless_macro_arg; \ yyg->yy_hold_char = *yyg->yy_c_buf_p; \ @@ -1632,8 +1633,8 @@ static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner MY */ YY_EXTRA_TYPE fts0tget_extra (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyextra; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyextra; } /** Get the current line number. @@ -1641,12 +1642,12 @@ YY_EXTRA_TYPE fts0tget_extra (yyscan_t yyscanner) */ int fts0tget_lineno (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - if (! YY_CURRENT_BUFFER) - return 0; + if (! YY_CURRENT_BUFFER) + return 0; - return yylineno; + return yylineno; } /** Get the current column number. @@ -1654,12 +1655,12 @@ int fts0tget_lineno (yyscan_t yyscanner) */ int fts0tget_column (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - if (! YY_CURRENT_BUFFER) - return 0; + if (! YY_CURRENT_BUFFER) + return 0; - return yycolumn; + return yycolumn; } /** Get the input stream. @@ -1667,8 +1668,8 @@ int fts0tget_column (yyscan_t yyscanner) */ FILE *fts0tget_in (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyin; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyin; } /** Get the output stream. @@ -1676,8 +1677,8 @@ FILE *fts0tget_in (yyscan_t yyscanner) */ FILE *fts0tget_out (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyout; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyout; } /** Get the length of the current token. @@ -1685,8 +1686,8 @@ FILE *fts0tget_out (yyscan_t yyscanner) */ int fts0tget_leng (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyleng; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yyleng; } /** Get the current token. @@ -1695,8 +1696,8 @@ int fts0tget_leng (yyscan_t yyscanner) char *fts0tget_text (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yytext; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yytext; } /** Set the user-defined data. This data is never touched by the scanner. @@ -1705,8 +1706,8 @@ char *fts0tget_text (yyscan_t yyscanner) */ void fts0tset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yyextra = user_defined ; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yyextra = user_defined ; } /** Set the current line number. @@ -1715,13 +1716,13 @@ void fts0tset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner) */ void fts0tset_lineno (int line_number , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - /* lineno is only valid if an input buffer exists. */ - if (! YY_CURRENT_BUFFER ) - yy_fatal_error( "fts0tset_lineno called with no buffer" , yyscanner); + /* lineno is only valid if an input buffer exists. */ + if (! YY_CURRENT_BUFFER ) + yy_fatal_error( "fts0tset_lineno called with no buffer" , yyscanner); - yylineno = line_number; + yylineno = line_number; } /** Set the current column. @@ -1730,13 +1731,13 @@ void fts0tset_lineno (int line_number , yyscan_t yyscanner) */ void fts0tset_column (int column_no , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - /* column is only valid if an input buffer exists. */ - if (! YY_CURRENT_BUFFER ) - yy_fatal_error( "fts0tset_column called with no buffer" , yyscanner); + /* column is only valid if an input buffer exists. */ + if (! YY_CURRENT_BUFFER ) + yy_fatal_error( "fts0tset_column called with no buffer" , yyscanner); - yycolumn = column_no; + yycolumn = column_no; } /** Set the input stream. This does not discard the current @@ -1747,26 +1748,26 @@ void fts0tset_column (int column_no , yyscan_t yyscanner) */ void fts0tset_in (FILE * in_str , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yyin = in_str ; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yyin = in_str ; } void fts0tset_out (FILE * out_str , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yyout = out_str ; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yyout = out_str ; } int fts0tget_debug (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yy_flex_debug; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + return yy_flex_debug; } void fts0tset_debug (int bdebug , yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yy_flex_debug = bdebug ; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yy_flex_debug = bdebug ; } /* Accessor methods for yylval and yylloc */ @@ -1781,22 +1782,22 @@ void fts0tset_debug (int bdebug , yyscan_t yyscanner) int fts0tlex_init(yyscan_t* ptr_yy_globals) { - if (ptr_yy_globals == NULL){ - errno = EINVAL; - return 1; - } + if (ptr_yy_globals == NULL){ + errno = EINVAL; + return 1; + } - *ptr_yy_globals = (yyscan_t) fts0talloc ( sizeof( struct yyguts_t ), NULL ); + *ptr_yy_globals = (yyscan_t) fts0talloc ( sizeof( struct yyguts_t ), NULL ); - if (*ptr_yy_globals == NULL){ - errno = ENOMEM; - return 1; - } + if (*ptr_yy_globals == NULL){ + errno = ENOMEM; + return 1; + } - /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */ - memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); + /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */ + memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); - return yy_init_globals ( *ptr_yy_globals ); + return yy_init_globals ( *ptr_yy_globals ); } /* fts0tlex_init_extra has the same functionality as fts0tlex_init, but follows the @@ -1810,70 +1811,70 @@ int fts0tlex_init(yyscan_t* ptr_yy_globals) int fts0tlex_init_extra(YY_EXTRA_TYPE yy_user_defined,yyscan_t* ptr_yy_globals ) { - struct yyguts_t dummy_yyguts; + struct yyguts_t dummy_yyguts; - fts0tset_extra (yy_user_defined, &dummy_yyguts); + fts0tset_extra (yy_user_defined, &dummy_yyguts); - if (ptr_yy_globals == NULL){ - errno = EINVAL; - return 1; - } + if (ptr_yy_globals == NULL){ + errno = EINVAL; + return 1; + } - *ptr_yy_globals = (yyscan_t) fts0talloc ( sizeof( struct yyguts_t ), &dummy_yyguts ); + *ptr_yy_globals = (yyscan_t) fts0talloc ( sizeof( struct yyguts_t ), &dummy_yyguts ); - if (*ptr_yy_globals == NULL){ - errno = ENOMEM; - return 1; - } + if (*ptr_yy_globals == NULL){ + errno = ENOMEM; + return 1; + } - /* By setting to 0xAA, we expose bugs in - yy_init_globals. Leave at 0x00 for releases. */ - memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); + /* By setting to 0xAA, we expose bugs in + yy_init_globals. Leave at 0x00 for releases. */ + memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); - fts0tset_extra (yy_user_defined, *ptr_yy_globals); + fts0tset_extra (yy_user_defined, *ptr_yy_globals); - return yy_init_globals ( *ptr_yy_globals ); + return yy_init_globals ( *ptr_yy_globals ); } static int yy_init_globals (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - /* Initialization is the same as for the non-reentrant scanner. - * This function is called from fts0tlex_destroy(), so don't allocate here. - */ - - yyg->yy_buffer_stack = 0; - yyg->yy_buffer_stack_top = 0; - yyg->yy_buffer_stack_max = 0; - yyg->yy_c_buf_p = (char *) 0; - yyg->yy_init = 0; - yyg->yy_start = 0; - - yyg->yy_start_stack_ptr = 0; - yyg->yy_start_stack_depth = 0; - yyg->yy_start_stack = NULL; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + /* Initialization is the same as for the non-reentrant scanner. + * This function is called from fts0tlex_destroy(), so don't allocate here. + */ + + yyg->yy_buffer_stack = 0; + yyg->yy_buffer_stack_top = 0; + yyg->yy_buffer_stack_max = 0; + yyg->yy_c_buf_p = (char *) 0; + yyg->yy_init = 0; + yyg->yy_start = 0; + + yyg->yy_start_stack_ptr = 0; + yyg->yy_start_stack_depth = 0; + yyg->yy_start_stack = NULL; /* Defined in main.c */ #ifdef YY_STDINIT - yyin = stdin; - yyout = stdout; + yyin = stdin; + yyout = stdout; #else - yyin = (FILE *) 0; - yyout = (FILE *) 0; + yyin = (FILE *) 0; + yyout = (FILE *) 0; #endif - /* For future reference: Set errno on error, since we are called by - * fts0tlex_init() - */ - return 0; + /* For future reference: Set errno on error, since we are called by + * fts0tlex_init() + */ + return 0; } /* fts0tlex_destroy is for both reentrant and non-reentrant scanners. */ int fts0tlex_destroy (yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - /* Pop the buffer stack, destroying each element. */ + /* Pop the buffer stack, destroying each element. */ while(YY_CURRENT_BUFFER){ fts0t_delete_buffer(YY_CURRENT_BUFFER ,yyscanner ); YY_CURRENT_BUFFER_LVALUE = NULL; @@ -1884,18 +1885,18 @@ int fts0tlex_destroy (yyscan_t yyscanner) fts0tfree(yyg->yy_buffer_stack ,yyscanner); yyg->yy_buffer_stack = NULL; - /* Destroy the start condition stack. */ - fts0tfree(yyg->yy_start_stack ,yyscanner ); - yyg->yy_start_stack = NULL; + /* Destroy the start condition stack. */ + fts0tfree(yyg->yy_start_stack ,yyscanner ); + yyg->yy_start_stack = NULL; - /* Reset the globals. This is important in a non-reentrant scanner so the next time - * fts0tlex() is called, initialization will occur. */ - yy_init_globals( yyscanner); + /* Reset the globals. This is important in a non-reentrant scanner so the next time + * fts0tlex() is called, initialization will occur. */ + yy_init_globals( yyscanner); - /* Destroy the main struct (reentrant only). */ - fts0tfree ( yyscanner , yyscanner ); - yyscanner = NULL; - return 0; + /* Destroy the main struct (reentrant only). */ + fts0tfree ( yyscanner , yyscanner ); + yyscanner = NULL; + return 0; } /* @@ -1946,7 +1947,5 @@ void fts0tfree (void * ptr , yyscan_t yyscanner MY_ATTRIBUTE((unused) #define YYTABLES_NAME "yytables" -#line 68 "fts0tlex.l" - - +#line 69 "fts0tlex.l" diff --git a/storage/innobase/fts/fts0tlex.l b/storage/innobase/fts/fts0tlex.l index 4f55a83afe5..242bfebda52 100644 --- a/storage/innobase/fts/fts0tlex.l +++ b/storage/innobase/fts/fts0tlex.l @@ -30,6 +30,7 @@ this program; if not, write to the Free Software Foundation, Inc., /* Required for reentrant parser */ #define YY_DECL int fts_tlexer(YYSTYPE* val, yyscan_t yyscanner) +#define exit(A) ut_error %} diff --git a/storage/innobase/fut/fut0lst.cc b/storage/innobase/fut/fut0lst.cc index 8f96a6426d2..8b39a4de1fb 100644 --- a/storage/innobase/fut/fut0lst.cc +++ b/storage/innobase/fut/fut0lst.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -23,6 +23,7 @@ File-based list utilities Created 11/28/1995 Heikki Tuuri ***********************************************************************/ +#include "univ.i" #include "fut0lst.h" #ifdef UNIV_NONINL @@ -49,9 +50,13 @@ flst_add_to_empty( ut_ad(mtr && base && node); ut_ad(base != node); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node, MTR_MEMO_PAGE_X_FIX)); - len = flst_get_len(base, mtr); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + len = flst_get_len(base); ut_a(len == 0); buf_ptr_get_fsp_addr(node, &space, &node_addr); @@ -70,7 +75,6 @@ flst_add_to_empty( /********************************************************************//** Adds a node as the last node in a list. */ -UNIV_INTERN void flst_add_last( /*==========*/ @@ -82,26 +86,35 @@ flst_add_last( fil_addr_t node_addr; ulint len; fil_addr_t last_addr; - flst_node_t* last_node; ut_ad(mtr && base && node); ut_ad(base != node); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node, MTR_MEMO_PAGE_X_FIX)); - len = flst_get_len(base, mtr); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + len = flst_get_len(base); last_addr = flst_get_last(base, mtr); buf_ptr_get_fsp_addr(node, &space, &node_addr); /* If the list is not empty, call flst_insert_after */ if (len != 0) { + flst_node_t* last_node; + if (last_addr.page == node_addr.page) { last_node = page_align(node) + last_addr.boffset; } else { - ulint zip_size = fil_space_get_zip_size(space); + bool found; + const page_size_t& page_size + = fil_space_get_page_size(space, &found); + + ut_ad(found); - last_node = fut_get_ptr(space, zip_size, last_addr, - RW_X_LATCH, mtr); + last_node = fut_get_ptr(space, page_size, last_addr, + RW_SX_LATCH, mtr); } flst_insert_after(base, last_node, node, mtr); @@ -113,7 +126,6 @@ flst_add_last( /********************************************************************//** Adds a node as the first node in a list. */ -UNIV_INTERN void flst_add_first( /*===========*/ @@ -129,9 +141,13 @@ flst_add_first( ut_ad(mtr && base && node); ut_ad(base != node); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node, MTR_MEMO_PAGE_X_FIX)); - len = flst_get_len(base, mtr); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + len = flst_get_len(base); first_addr = flst_get_first(base, mtr); buf_ptr_get_fsp_addr(node, &space, &node_addr); @@ -141,10 +157,14 @@ flst_add_first( if (first_addr.page == node_addr.page) { first_node = page_align(node) + first_addr.boffset; } else { - ulint zip_size = fil_space_get_zip_size(space); + bool found; + const page_size_t& page_size + = fil_space_get_page_size(space, &found); + + ut_ad(found); - first_node = fut_get_ptr(space, zip_size, first_addr, - RW_X_LATCH, mtr); + first_node = fut_get_ptr(space, page_size, first_addr, + RW_SX_LATCH, mtr); } flst_insert_before(base, node, first_node, mtr); @@ -156,7 +176,6 @@ flst_add_first( /********************************************************************//** Inserts a node after another in a list. */ -UNIV_INTERN void flst_insert_after( /*==============*/ @@ -176,9 +195,15 @@ flst_insert_after( ut_ad(base != node1); ut_ad(base != node2); ut_ad(node2 != node1); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node1, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node2, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node1, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node2, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); buf_ptr_get_fsp_addr(node1, &space, &node1_addr); buf_ptr_get_fsp_addr(node2, &space, &node2_addr); @@ -191,10 +216,14 @@ flst_insert_after( if (!fil_addr_is_null(node3_addr)) { /* Update prev field of node3 */ - ulint zip_size = fil_space_get_zip_size(space); + bool found; + const page_size_t& page_size + = fil_space_get_page_size(space, &found); - node3 = fut_get_ptr(space, zip_size, - node3_addr, RW_X_LATCH, mtr); + ut_ad(found); + + node3 = fut_get_ptr(space, page_size, + node3_addr, RW_SX_LATCH, mtr); flst_write_addr(node3 + FLST_PREV, node2_addr, mtr); } else { /* node1 was last in list: update last field in base */ @@ -205,13 +234,12 @@ flst_insert_after( flst_write_addr(node1 + FLST_NEXT, node2_addr, mtr); /* Update len of base node */ - len = flst_get_len(base, mtr); + len = flst_get_len(base); mlog_write_ulint(base + FLST_LEN, len + 1, MLOG_4BYTES, mtr); } /********************************************************************//** Inserts a node before another in a list. */ -UNIV_INTERN void flst_insert_before( /*===============*/ @@ -231,9 +259,15 @@ flst_insert_before( ut_ad(base != node2); ut_ad(base != node3); ut_ad(node2 != node3); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node2, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node3, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node2, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node3, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); buf_ptr_get_fsp_addr(node2, &space, &node2_addr); buf_ptr_get_fsp_addr(node3, &space, &node3_addr); @@ -245,10 +279,15 @@ flst_insert_before( flst_write_addr(node2 + FLST_NEXT, node3_addr, mtr); if (!fil_addr_is_null(node1_addr)) { - ulint zip_size = fil_space_get_zip_size(space); + bool found; + const page_size_t& page_size + = fil_space_get_page_size(space, &found); + + ut_ad(found); + /* Update next field of node1 */ - node1 = fut_get_ptr(space, zip_size, node1_addr, - RW_X_LATCH, mtr); + node1 = fut_get_ptr(space, page_size, node1_addr, + RW_SX_LATCH, mtr); flst_write_addr(node1 + FLST_NEXT, node2_addr, mtr); } else { /* node3 was first in list: update first field in base */ @@ -259,13 +298,12 @@ flst_insert_before( flst_write_addr(node3 + FLST_PREV, node2_addr, mtr); /* Update len of base node */ - len = flst_get_len(base, mtr); + len = flst_get_len(base); mlog_write_ulint(base + FLST_LEN, len + 1, MLOG_4BYTES, mtr); } /********************************************************************//** Removes a node. */ -UNIV_INTERN void flst_remove( /*========*/ @@ -274,7 +312,6 @@ flst_remove( mtr_t* mtr) /*!< in: mini-transaction handle */ { ulint space; - ulint zip_size; flst_node_t* node1; fil_addr_t node1_addr; fil_addr_t node2_addr; @@ -283,11 +320,20 @@ flst_remove( ulint len; ut_ad(mtr && node2 && base); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node2, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node2, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); buf_ptr_get_fsp_addr(node2, &space, &node2_addr); - zip_size = fil_space_get_zip_size(space); + + bool found; + const page_size_t& page_size = fil_space_get_page_size(space, + &found); + + ut_ad(found); node1_addr = flst_get_prev_addr(node2, mtr); node3_addr = flst_get_next_addr(node2, mtr); @@ -300,8 +346,8 @@ flst_remove( node1 = page_align(node2) + node1_addr.boffset; } else { - node1 = fut_get_ptr(space, zip_size, - node1_addr, RW_X_LATCH, mtr); + node1 = fut_get_ptr(space, page_size, + node1_addr, RW_SX_LATCH, mtr); } ut_ad(node1 != node2); @@ -319,8 +365,8 @@ flst_remove( node3 = page_align(node2) + node3_addr.boffset; } else { - node3 = fut_get_ptr(space, zip_size, - node3_addr, RW_X_LATCH, mtr); + node3 = fut_get_ptr(space, page_size, + node3_addr, RW_SX_LATCH, mtr); } ut_ad(node2 != node3); @@ -332,7 +378,7 @@ flst_remove( } /* Update len of base node */ - len = flst_get_len(base, mtr); + len = flst_get_len(base); ut_ad(len > 0); mlog_write_ulint(base + FLST_LEN, len - 1, MLOG_4BYTES, mtr); @@ -342,7 +388,6 @@ flst_remove( Cuts off the tail of the list, including the node given. The number of nodes which will be removed must be provided by the caller, as this function does not measure the length of the tail. */ -UNIV_INTERN void flst_cut_end( /*=========*/ @@ -359,8 +404,12 @@ flst_cut_end( ulint len; ut_ad(mtr && node2 && base); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node2, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node2, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); ut_ad(n_nodes > 0); buf_ptr_get_fsp_addr(node2, &space, &node2_addr); @@ -375,9 +424,14 @@ flst_cut_end( node1 = page_align(node2) + node1_addr.boffset; } else { - node1 = fut_get_ptr(space, - fil_space_get_zip_size(space), - node1_addr, RW_X_LATCH, mtr); + bool found; + const page_size_t& page_size + = fil_space_get_page_size(space, &found); + + ut_ad(found); + + node1 = fut_get_ptr(space, page_size, + node1_addr, RW_SX_LATCH, mtr); } flst_write_addr(node1 + FLST_NEXT, fil_addr_null, mtr); @@ -389,7 +443,7 @@ flst_cut_end( flst_write_addr(base + FLST_LAST, node1_addr, mtr); /* Update len of base node */ - len = flst_get_len(base, mtr); + len = flst_get_len(base); ut_ad(len >= n_nodes); mlog_write_ulint(base + FLST_LEN, len - n_nodes, MLOG_4BYTES, mtr); @@ -399,7 +453,6 @@ flst_cut_end( Cuts off the tail of the list, not including the given node. The number of nodes which will be removed must be provided by the caller, as this function does not measure the length of the tail. */ -UNIV_INTERN void flst_truncate_end( /*==============*/ @@ -413,8 +466,12 @@ flst_truncate_end( ulint space; ut_ad(mtr && node2 && base); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); - ut_ad(mtr_memo_contains_page(mtr, node2, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, node2, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); if (n_nodes == 0) { ut_ad(fil_addr_is_null(flst_get_next_addr(node2, mtr))); @@ -430,7 +487,7 @@ flst_truncate_end( flst_write_addr(base + FLST_LAST, node2_addr, mtr); /* Update len of base node */ - len = flst_get_len(base, mtr); + len = flst_get_len(base); ut_ad(len >= n_nodes); mlog_write_ulint(base + FLST_LEN, len - n_nodes, MLOG_4BYTES, mtr); @@ -438,8 +495,7 @@ flst_truncate_end( /********************************************************************//** Validates a file-based list. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool flst_validate( /*==========*/ @@ -447,7 +503,6 @@ flst_validate( mtr_t* mtr1) /*!< in: mtr */ { ulint space; - ulint zip_size; const flst_node_t* node; fil_addr_t node_addr; fil_addr_t base_addr; @@ -456,7 +511,9 @@ flst_validate( mtr_t mtr2; ut_ad(base); - ut_ad(mtr_memo_contains_page(mtr1, base, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr1, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); /* We use two mini-transaction handles: the first is used to lock the base node, and prevent other threads from modifying the @@ -467,16 +524,21 @@ flst_validate( /* Find out the space id */ buf_ptr_get_fsp_addr(base, &space, &base_addr); - zip_size = fil_space_get_zip_size(space); - len = flst_get_len(base, mtr1); + bool found; + const page_size_t& page_size = fil_space_get_page_size(space, + &found); + + ut_ad(found); + + len = flst_get_len(base); node_addr = flst_get_first(base, mtr1); for (i = 0; i < len; i++) { mtr_start(&mtr2); - node = fut_get_ptr(space, zip_size, - node_addr, RW_X_LATCH, &mtr2); + node = fut_get_ptr(space, page_size, + node_addr, RW_SX_LATCH, &mtr2); node_addr = flst_get_next_addr(node, &mtr2); mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer @@ -490,8 +552,8 @@ flst_validate( for (i = 0; i < len; i++) { mtr_start(&mtr2); - node = fut_get_ptr(space, zip_size, - node_addr, RW_X_LATCH, &mtr2); + node = fut_get_ptr(space, page_size, + node_addr, RW_SX_LATCH, &mtr2); node_addr = flst_get_prev_addr(node, &mtr2); mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer @@ -505,7 +567,6 @@ flst_validate( /********************************************************************//** Prints info of a file-based list. */ -UNIV_INTERN void flst_print( /*=======*/ @@ -516,15 +577,16 @@ flst_print( ulint len; ut_ad(base && mtr); - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); frame = page_align((byte*) base); - len = flst_get_len(base, mtr); + len = flst_get_len(base); - fprintf(stderr, - "FILE-BASED LIST:\n" - "Base node in space %lu page %lu byte offset %lu; len %lu\n", - (ulong) page_get_space_id(frame), - (ulong) page_get_page_no(frame), - (ulong) page_offset(base), (ulong) len); + ib::info() << "FILE-BASED LIST: Base node in space " + << page_get_space_id(frame) + << "; page " << page_get_page_no(frame) + << "; byte offset " << page_offset(base) + << "; len " << len; } diff --git a/storage/innobase/gis/gis0geo.cc b/storage/innobase/gis/gis0geo.cc new file mode 100644 index 00000000000..d971c3d639c --- /dev/null +++ b/storage/innobase/gis/gis0geo.cc @@ -0,0 +1,835 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file gis/gis0geo.cc +InnoDB R-tree related functions. + +Created 2013/03/27 Allen Lai and Jimmy Yang +*******************************************************/ + +#include "page0types.h" +#include "gis0geo.h" +#include "page0cur.h" +#include "ut0rnd.h" +#include "mach0data.h" + +#include + +/* These definitions are for comparing 2 mbrs. */ + +/* Check if a intersects b. +Return false if a intersects b, otherwise true. */ +#define INTERSECT_CMP(amin, amax, bmin, bmax) \ +(((amin) > (bmax)) || ((bmin) > (amax))) + +/* Check if b contains a. +Return false if b contains a, otherwise true. */ +#define CONTAIN_CMP(amin, amax, bmin, bmax) \ +(((bmin) > (amin)) || ((bmax) < (amax))) + +/* Check if b is within a. +Return false if b is within a, otherwise true. */ +#define WITHIN_CMP(amin, amax, bmin, bmax) \ +(((amin) > (bmin)) || ((amax) < (bmax))) + +/* Check if a disjoints b. +Return false if a disjoints b, otherwise true. */ +#define DISJOINT_CMP(amin, amax, bmin, bmax) \ +(((amin) <= (bmax)) && ((bmin) <= (amax))) + +/* Check if a equals b. +Return false if equal, otherwise true. */ +#define EQUAL_CMP(amin, amax, bmin, bmax) \ +(((amin) != (bmin)) || ((amax) != (bmax))) + +/**************************************************************** +Functions for generating mbr +****************************************************************/ +/*************************************************************//** +Add one point stored in wkb to a given mbr. +@return 0 if the point in wkb is valid, otherwise -1. */ +static +int +rtree_add_point_to_mbr( +/*===================*/ + uchar** wkb, /*!< in: pointer to wkb, + where point is stored */ + uchar* end, /*!< in: end of wkb. */ + uint n_dims, /*!< in: dimensions. */ + uchar byte_order, /*!< in: byte order. */ + double* mbr) /*!< in/out: mbr, which + must be of length n_dims * 2. */ +{ + double ord; + double* mbr_end = mbr + n_dims * 2; + + while (mbr < mbr_end) { + if ((*wkb) + sizeof(double) > end) { + return(-1); + } + + ord = mach_double_read(*wkb); + (*wkb) += sizeof(double); + + if (ord < *mbr) { + *mbr = ord; + } + mbr++; + + if (ord > *mbr) { + *mbr = ord; + } + mbr++; + } + + return(0); +} + +/*************************************************************//** +Get mbr of point stored in wkb. +@return 0 if ok, otherwise -1. */ +static +int +rtree_get_point_mbr( +/*================*/ + uchar** wkb, /*!< in: pointer to wkb, + where point is stored. */ + uchar* end, /*!< in: end of wkb. */ + uint n_dims, /*!< in: dimensions. */ + uchar byte_order, /*!< in: byte order. */ + double* mbr) /*!< in/out: mbr, + must be of length n_dims * 2. */ +{ + return rtree_add_point_to_mbr(wkb, end, n_dims, byte_order, mbr); +} + + +/*************************************************************//** +Get mbr of linestring stored in wkb. +@return 0 if the linestring is valid, otherwise -1. */ +static +int +rtree_get_linestring_mbr( +/*=====================*/ + uchar** wkb, /*!< in: pointer to wkb, + where point is stored. */ + uchar* end, /*!< in: end of wkb. */ + uint n_dims, /*!< in: dimensions. */ + uchar byte_order, /*!< in: byte order. */ + double* mbr) /*!< in/out: mbr, + must be of length n_dims * 2. */ +{ + uint n_points; + + n_points = uint4korr(*wkb); + (*wkb) += 4; + + for (; n_points > 0; --n_points) { + /* Add next point to mbr */ + if (rtree_add_point_to_mbr(wkb, end, n_dims, + byte_order, mbr)) { + return(-1); + } + } + + return(0); +} + +/*************************************************************//** +Get mbr of polygon stored in wkb. +@return 0 if the polygon is valid, otherwise -1. */ +static +int +rtree_get_polygon_mbr( +/*==================*/ + uchar** wkb, /*!< in: pointer to wkb, + where point is stored. */ + uchar* end, /*!< in: end of wkb. */ + uint n_dims, /*!< in: dimensions. */ + uchar byte_order, /*!< in: byte order. */ + double* mbr) /*!< in/out: mbr, + must be of length n_dims * 2. */ +{ + uint n_linear_rings; + uint n_points; + + n_linear_rings = uint4korr((*wkb)); + (*wkb) += 4; + + for (; n_linear_rings > 0; --n_linear_rings) { + n_points = uint4korr((*wkb)); + (*wkb) += 4; + + for (; n_points > 0; --n_points) { + /* Add next point to mbr */ + if (rtree_add_point_to_mbr(wkb, end, n_dims, + byte_order, mbr)) { + return(-1); + } + } + } + + return(0); +} + +/*************************************************************//** +Get mbr of geometry stored in wkb. +@return 0 if the geometry is valid, otherwise -1. */ +static +int +rtree_get_geometry_mbr( +/*===================*/ + uchar** wkb, /*!< in: pointer to wkb, + where point is stored. */ + uchar* end, /*!< in: end of wkb. */ + uint n_dims, /*!< in: dimensions. */ + double* mbr, /*!< in/out: mbr. */ + int top) /*!< in: if it is the top, + which means it's not called + by itself. */ +{ + int res; + uchar byte_order = 2; + uint wkb_type = 0; + uint n_items; + + byte_order = *(*wkb); + ++(*wkb); + + wkb_type = uint4korr((*wkb)); + (*wkb) += 4; + + switch ((enum wkbType) wkb_type) { + case wkbPoint: + res = rtree_get_point_mbr(wkb, end, n_dims, byte_order, mbr); + break; + case wkbLineString: + res = rtree_get_linestring_mbr(wkb, end, n_dims, + byte_order, mbr); + break; + case wkbPolygon: + res = rtree_get_polygon_mbr(wkb, end, n_dims, byte_order, mbr); + break; + case wkbMultiPoint: + n_items = uint4korr((*wkb)); + (*wkb) += 4; + for (; n_items > 0; --n_items) { + byte_order = *(*wkb); + ++(*wkb); + (*wkb) += 4; + if (rtree_get_point_mbr(wkb, end, n_dims, + byte_order, mbr)) { + return(-1); + } + } + res = 0; + break; + case wkbMultiLineString: + n_items = uint4korr((*wkb)); + (*wkb) += 4; + for (; n_items > 0; --n_items) { + byte_order = *(*wkb); + ++(*wkb); + (*wkb) += 4; + if (rtree_get_linestring_mbr(wkb, end, n_dims, + byte_order, mbr)) { + return(-1); + } + } + res = 0; + break; + case wkbMultiPolygon: + n_items = uint4korr((*wkb)); + (*wkb) += 4; + for (; n_items > 0; --n_items) { + byte_order = *(*wkb); + ++(*wkb); + (*wkb) += 4; + if (rtree_get_polygon_mbr(wkb, end, n_dims, + byte_order, mbr)) { + return(-1); + } + } + res = 0; + break; + case wkbGeometryCollection: + if (!top) { + return(-1); + } + + n_items = uint4korr((*wkb)); + (*wkb) += 4; + for (; n_items > 0; --n_items) { + if (rtree_get_geometry_mbr(wkb, end, n_dims, + mbr, 0)) { + return(-1); + } + } + res = 0; + break; + default: + res = -1; + } + + return(res); +} + +/*************************************************************//** +Calculate Minimal Bounding Rectangle (MBR) of the spatial object +stored in "well-known binary representation" (wkb) format. +@return 0 if ok. */ +int +rtree_mbr_from_wkb( +/*===============*/ + uchar* wkb, /*!< in: wkb */ + uint size, /*!< in: size of wkb. */ + uint n_dims, /*!< in: dimensions. */ + double* mbr) /*!< in/out: mbr, which must + be of length n_dim2 * 2. */ +{ + for (uint i = 0; i < n_dims; ++i) { + mbr[i * 2] = DBL_MAX; + mbr[i * 2 + 1] = -DBL_MAX; + } + + return rtree_get_geometry_mbr(&wkb, wkb + size, n_dims, mbr, 1); +} + + +/**************************************************************** +Functions for Rtree split +****************************************************************/ +/*************************************************************//** +Join 2 mbrs of dimensions n_dim. */ +static +void +mbr_join( +/*=====*/ + double* a, /*!< in/out: the first mbr, + where the joined result will be. */ + const double* b, /*!< in: the second mbr. */ + int n_dim) /*!< in: dimensions. */ +{ + double* end = a + n_dim * 2; + + do { + if (a[0] > b[0]) { + a[0] = b[0]; + } + + if (a[1] < b[1]) { + a[1] = b[1]; + } + + a += 2; + b += 2; + + } while (a != end); +} + +/*************************************************************//** +Counts the square of mbr which is the join of a and b. Both a and b +are of dimensions n_dim. */ +static +double +mbr_join_square( +/*============*/ + const double* a, /*!< in: the first mbr. */ + const double* b, /*!< in: the second mbr. */ + int n_dim) /*!< in: dimensions. */ +{ + const double* end = a + n_dim * 2; + double square = 1.0; + + do { + square *= std::max(a[1], b[1]) - std::min(a[0], b[0]); + + a += 2; + b += 2; + } while (a != end); + + /* Check for infinity or NaN, so we don't get NaN in calculations */ + /* JAN: TODO: MYSQL 5.7 GIS + if (my_isinf(square) || my_isnan(square)) { + return DBL_MAX; + } + */ + + return square; +} + +/*************************************************************//** +Counts the square of mbr of dimension n_dim. */ +static +double +count_square( +/*=========*/ + const double* a, /*!< in: the mbr. */ + int n_dim) /*!< in: dimensions. */ +{ + const double* end = a + n_dim * 2; + double square = 1.0; + + do { + square *= a[1] - a[0]; + a += 2; + } while (a != end); + + return square; +} + +/*************************************************************//** +Copy mbr of dimension n_dim from src to dst. */ +inline +static +void +copy_coords( +/*========*/ + double* dst, /*!< in/out: destination. */ + const double* src, /*!< in: source. */ + int n_dim) /*!< in: dimensions. */ +{ + memcpy(dst, src, DATA_MBR_LEN); +} + +/*************************************************************//** +Select two nodes to collect group upon */ +static +void +pick_seeds( +/*=======*/ + rtr_split_node_t* node, /*!< in: split nodes. */ + int n_entries, /*!< in: entries number. */ + rtr_split_node_t** seed_a, /*!< out: seed 1. */ + rtr_split_node_t** seed_b, /*!< out: seed 2. */ + int n_dim) /*!< in: dimensions. */ +{ + rtr_split_node_t* cur1; + rtr_split_node_t* lim1 = node + (n_entries - 1); + rtr_split_node_t* cur2; + rtr_split_node_t* lim2 = node + n_entries; + + double max_d = -DBL_MAX; + double d; + + *seed_a = node; + *seed_b = node + 1; + + for (cur1 = node; cur1 < lim1; ++cur1) { + for (cur2 = cur1 + 1; cur2 < lim2; ++cur2) { + d = mbr_join_square(cur1->coords, cur2->coords, n_dim) - + cur1->square - cur2->square; + if (d > max_d) { + max_d = d; + *seed_a = cur1; + *seed_b = cur2; + } + } + } +} + +/*********************************************************//** +Generates a random iboolean value. +@return the random value */ +static +ibool +ut_rnd_gen_ibool(void) +/*=================*/ +{ + ulint x; + + x = ut_rnd_gen_ulint(); + + if (((x >> 20) + (x >> 15)) & 1) { + + return(TRUE); + } + + return(FALSE); +} + +/*************************************************************//** +Select next node and group where to add. */ +static +void +pick_next( +/*======*/ + rtr_split_node_t* node, /*!< in: split nodes. */ + int n_entries, /*!< in: entries number. */ + double* g1, /*!< in: mbr of group 1. */ + double* g2, /*!< in: mbr of group 2. */ + rtr_split_node_t** choice, /*!< out: the next node.*/ + int* n_group, /*!< out: group number.*/ + int n_dim) /*!< in: dimensions. */ +{ + rtr_split_node_t* cur = node; + rtr_split_node_t* end = node + n_entries; + double max_diff = -DBL_MAX; + + for (; cur < end; ++cur) { + double diff; + double abs_diff; + + if (cur->n_node != 0) { + continue; + } + + diff = mbr_join_square(g1, cur->coords, n_dim) - + mbr_join_square(g2, cur->coords, n_dim); + + abs_diff = fabs(diff); + if (abs_diff > max_diff) { + max_diff = abs_diff; + + /* Introduce some randomness if the record + is identical */ + if (diff == 0) { + diff = static_cast( + ut_rnd_gen_ibool()); + } + + *n_group = 1 + (diff > 0); + *choice = cur; + } + } +} + +/*************************************************************//** +Mark not-in-group entries as n_group. */ +static +void +mark_all_entries( +/*=============*/ + rtr_split_node_t* node, /*!< in/out: split nodes. */ + int n_entries, /*!< in: entries number. */ + int n_group) /*!< in: group number. */ +{ + rtr_split_node_t* cur = node; + rtr_split_node_t* end = node + n_entries; + for (; cur < end; ++cur) { + if (cur->n_node != 0) { + continue; + } + cur->n_node = n_group; + } +} + +/*************************************************************//** +Split rtree node. +Return which group the first rec is in. */ +int +split_rtree_node( +/*=============*/ + rtr_split_node_t* node, /*!< in: split nodes. */ + int n_entries, /*!< in: entries number. */ + int all_size, /*!< in: total key's size. */ + int key_size, /*!< in: key's size. */ + int min_size, /*!< in: minimal group size. */ + int size1, /*!< in: size of group. */ + int size2, /*!< in: initial group sizes */ + double** d_buffer, /*!< in/out: buffer. */ + int n_dim, /*!< in: dimensions. */ + uchar* first_rec) /*!< in: the first rec. */ +{ + rtr_split_node_t* cur; + rtr_split_node_t* a = NULL; + rtr_split_node_t* b = NULL; + double* g1 = reserve_coords(d_buffer, n_dim); + double* g2 = reserve_coords(d_buffer, n_dim); + rtr_split_node_t* next = NULL; + int next_node = 0; + int i; + int first_rec_group = 1; + rtr_split_node_t* end = node + n_entries; + + if (all_size < min_size * 2) { + return 1; + } + + cur = node; + for (; cur < end; ++cur) { + cur->square = count_square(cur->coords, n_dim); + cur->n_node = 0; + } + + pick_seeds(node, n_entries, &a, &b, n_dim); + a->n_node = 1; + b->n_node = 2; + + copy_coords(g1, a->coords, n_dim); + size1 += key_size; + copy_coords(g2, b->coords, n_dim); + size2 += key_size; + + for (i = n_entries - 2; i > 0; --i) { + /* Can't write into group 2 */ + if (all_size - (size2 + key_size) < min_size) { + mark_all_entries(node, n_entries, 1); + break; + } + + /* Can't write into group 1 */ + if (all_size - (size1 + key_size) < min_size) { + mark_all_entries(node, n_entries, 2); + break; + } + + pick_next(node, n_entries, g1, g2, &next, &next_node, n_dim); + if (next_node == 1) { + size1 += key_size; + mbr_join(g1, next->coords, n_dim); + } else { + size2 += key_size; + mbr_join(g2, next->coords, n_dim); + } + + next->n_node = next_node; + + /* Find out where the first rec (of the page) will be at, + and inform the caller */ + if (first_rec && first_rec == next->key) { + first_rec_group = next_node; + } + } + + return(first_rec_group); +} + +/*************************************************************//** +Compares two keys a and b depending on nextflag +nextflag can contain these flags: + MBR_INTERSECT(a,b) a overlaps b + MBR_CONTAIN(a,b) a contains b + MBR_DISJOINT(a,b) a disjoint b + MBR_WITHIN(a,b) a within b + MBR_EQUAL(a,b) All coordinates of MBRs are equal +Return 0 on success, otherwise 1. */ +int +rtree_key_cmp( +/*==========*/ + page_cur_mode_t mode, /*!< in: compare method. */ + const uchar* b, /*!< in: first key. */ + int b_len, /*!< in: first key len. */ + const uchar* a, /*!< in: second key. */ + int a_len) /*!< in: second key len. */ +{ + double amin, amax, bmin, bmax; + int key_len; + int keyseg_len; + + keyseg_len = 2 * sizeof(double); + for (key_len = a_len; key_len > 0; key_len -= keyseg_len) { + amin = mach_double_read(a); + bmin = mach_double_read(b); + amax = mach_double_read(a + sizeof(double)); + bmax = mach_double_read(b + sizeof(double)); + + switch (mode) { + case PAGE_CUR_INTERSECT: + if (INTERSECT_CMP(amin, amax, bmin, bmax)) { + return(1); + } + break; + case PAGE_CUR_CONTAIN: + if (CONTAIN_CMP(amin, amax, bmin, bmax)) { + return(1); + } + break; + case PAGE_CUR_WITHIN: + if (WITHIN_CMP(amin, amax, bmin, bmax)) { + return(1); + } + break; + case PAGE_CUR_MBR_EQUAL: + if (EQUAL_CMP(amin, amax, bmin, bmax)) { + return(1); + } + break; + case PAGE_CUR_DISJOINT: + int result; + + result = DISJOINT_CMP(amin, amax, bmin, bmax); + if (result == 0) { + return(0); + } + + if (key_len - keyseg_len <= 0) { + return(1); + } + + break; + default: + /* if unknown comparison operator */ + ut_ad(0); + } + + a += keyseg_len; + b += keyseg_len; + } + + return(0); +} + +/*************************************************************//** +Calculates MBR_AREA(a+b) - MBR_AREA(a) +Note: when 'a' and 'b' objects are far from each other, +the area increase can be really big, so this function +can return 'inf' as a result. +Return the area increaed. */ +double +rtree_area_increase( + const uchar* a, /*!< in: original mbr. */ + const uchar* b, /*!< in: new mbr. */ + int mbr_len, /*!< in: mbr length of a and b. */ + double* ab_area) /*!< out: increased area. */ +{ + double a_area = 1.0; + double loc_ab_area = 1.0; + double amin, amax, bmin, bmax; + int key_len; + int keyseg_len; + double data_round = 1.0; + + keyseg_len = 2 * sizeof(double); + + for (key_len = mbr_len; key_len > 0; key_len -= keyseg_len) { + double area; + + amin = mach_double_read(a); + bmin = mach_double_read(b); + amax = mach_double_read(a + sizeof(double)); + bmax = mach_double_read(b + sizeof(double)); + + area = amax - amin; + if (area == 0) { + a_area *= LINE_MBR_WEIGHTS; + } else { + a_area *= area; + } + + area = (double)std::max(amax, bmax) - + (double)std::min(amin, bmin); + if (area == 0) { + loc_ab_area *= LINE_MBR_WEIGHTS; + } else { + loc_ab_area *= area; + } + + /* Value of amax or bmin can be so large that small difference + are ignored. For example: 3.2884281489988079e+284 - 100 = + 3.2884281489988079e+284. This results some area difference + are not detected */ + if (loc_ab_area == a_area) { + if (bmin < amin || bmax > amax) { + data_round *= ((double)std::max(amax, bmax) + - amax + + (amin - (double)std::min( + amin, bmin))); + } else { + data_round *= area; + } + } + + a += keyseg_len; + b += keyseg_len; + } + + *ab_area = loc_ab_area; + + if (loc_ab_area == a_area && data_round != 1.0) { + return(data_round); + } + + return(loc_ab_area - a_area); +} + +/** Calculates overlapping area +@param[in] a mbr a +@param[in] b mbr b +@param[in] mbr_len mbr length +@return overlapping area */ +double +rtree_area_overlapping( + const uchar* a, + const uchar* b, + int mbr_len) +{ + double area = 1.0; + double amin; + double amax; + double bmin; + double bmax; + int key_len; + int keyseg_len; + + keyseg_len = 2 * sizeof(double); + + for (key_len = mbr_len; key_len > 0; key_len -= keyseg_len) { + amin = mach_double_read(a); + bmin = mach_double_read(b); + amax = mach_double_read(a + sizeof(double)); + bmax = mach_double_read(b + sizeof(double)); + + amin = std::max(amin, bmin); + amax = std::min(amax, bmax); + + if (amin > amax) { + return(0); + } else { + area *= (amax - amin); + } + + a += keyseg_len; + b += keyseg_len; + } + + return(area); +} + +/** Get the wkb of default POINT value, which represents POINT(0 0) +if it's of dimension 2, etc. +@param[in] n_dims dimensions +@param[out] wkb wkb buffer for default POINT +@param[in] len length of wkb buffer +@return non-0 indicate the length of wkb of the default POINT, +0 if the buffer is too small */ +uint +get_wkb_of_default_point( + uint n_dims, + uchar* wkb, + uint len) +{ + // JAN: TODO: MYSQL 5.7 GIS + #define GEOM_HEADER_SIZE 16 + if (len < GEOM_HEADER_SIZE + sizeof(double) * n_dims) { + return(0); + } + + /** POINT wkb comprises SRID, wkb header(byte order and type) + and coordinates of the POINT */ + len = GEOM_HEADER_SIZE + sizeof(double) * n_dims; + /** We always use 0 as default coordinate */ + memset(wkb, 0, len); + /** We don't need to write SRID, write 0x01 for Byte Order */ + mach_write_to_n_little_endian(wkb + SRID_SIZE, 1, 0x01); + /** Write wkbType::wkbPoint for the POINT type */ + mach_write_to_n_little_endian(wkb + SRID_SIZE + 1, 4, wkbPoint); + + return(len); +} diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc new file mode 100644 index 00000000000..45f0bd97821 --- /dev/null +++ b/storage/innobase/gis/gis0rtree.cc @@ -0,0 +1,2016 @@ +/***************************************************************************** + +Copyright (c) 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file gis/gis0rtree.cc +InnoDB R-tree interfaces + +Created 2013/03/27 Allen Lai and Jimmy Yang +***********************************************************************/ + +#include "fsp0fsp.h" +#include "page0page.h" +#include "page0cur.h" +#include "page0zip.h" +#include "gis0rtree.h" + +#ifndef UNIV_HOTBACKUP +#include "btr0cur.h" +#include "btr0sea.h" +#include "btr0pcur.h" +#include "rem0cmp.h" +#include "lock0lock.h" +#include "ibuf0ibuf.h" +#include "trx0trx.h" +#include "srv0mon.h" +#include "gis0geo.h" + +#endif /* UNIV_HOTBACKUP */ + +/*************************************************************//** +Initial split nodes info for R-tree split. +@return initialized split nodes array */ +static +rtr_split_node_t* +rtr_page_split_initialize_nodes( +/*============================*/ + mem_heap_t* heap, /*!< in: pointer to memory heap, or NULL */ + btr_cur_t* cursor, /*!< in: cursor at which to insert; when the + function returns, the cursor is positioned + on the predecessor of the inserted record */ + ulint** offsets,/*!< in: offsets on inserted record */ + const dtuple_t* tuple, /*!< in: tuple to insert */ + double** buf_pos)/*!< in/out: current buffer position */ +{ + rtr_split_node_t* split_node_array; + double* buf; + ulint n_recs; + rtr_split_node_t* task; + rtr_split_node_t* stop; + rtr_split_node_t* cur; + rec_t* rec; + buf_block_t* block; + page_t* page; + ulint n_uniq; + ulint len; + byte* source_cur; + + block = btr_cur_get_block(cursor); + page = buf_block_get_frame(block); + n_uniq = dict_index_get_n_unique_in_tree(cursor->index); + + n_recs = page_get_n_recs(page) + 1; + + /*We reserve 2 MBRs memory space for temp result of split + algrithm. And plus the new mbr that need to insert, we + need (n_recs + 3)*MBR size for storing all MBRs.*/ + buf = static_cast(mem_heap_alloc( + heap, DATA_MBR_LEN * (n_recs + 3) + + sizeof(rtr_split_node_t) * (n_recs + 1))); + + split_node_array = (rtr_split_node_t*)(buf + SPDIMS * 2 * (n_recs + 3)); + task = split_node_array; + *buf_pos = buf; + stop = task + n_recs; + + rec = page_rec_get_next(page_get_infimum_rec(page)); + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + n_uniq, &heap); + + source_cur = rec_get_nth_field(rec, *offsets, 0, &len); + + for (cur = task; cur < stop - 1; ++cur) { + cur->coords = reserve_coords(buf_pos, SPDIMS); + cur->key = rec; + + memcpy(cur->coords, source_cur, DATA_MBR_LEN); + + rec = page_rec_get_next(rec); + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + n_uniq, &heap); + source_cur = rec_get_nth_field(rec, *offsets, 0, &len); + } + + /* Put the insert key to node list */ + source_cur = static_cast(dfield_get_data( + dtuple_get_nth_field(tuple, 0))); + cur->coords = reserve_coords(buf_pos, SPDIMS); + rec = (byte*) mem_heap_alloc( + heap, rec_get_converted_size(cursor->index, tuple, 0)); + + rec = rec_convert_dtuple_to_rec(rec, cursor->index, tuple, 0); + cur->key = rec; + + memcpy(cur->coords, source_cur, DATA_MBR_LEN); + + return split_node_array; +} + +/**********************************************************************//** +Builds a Rtree node pointer out of a physical record and a page number. +Note: For Rtree, we just keep the mbr and page no field in non-leaf level +page. It's different with Btree, Btree still keeps PK fields so far. +@return own: node pointer */ +dtuple_t* +rtr_index_build_node_ptr( +/*=====================*/ + const dict_index_t* index, /*!< in: index */ + const rtr_mbr_t* mbr, /*!< in: mbr of lower page */ + const rec_t* rec, /*!< in: record for which to build node + pointer */ + ulint page_no,/*!< in: page number to put in node + pointer */ + mem_heap_t* heap, /*!< in: memory heap where pointer + created */ + ulint level) /*!< in: level of rec in tree: + 0 means leaf level */ +{ + dtuple_t* tuple; + dfield_t* field; + byte* buf; + ulint n_unique; + ulint info_bits; + + ut_ad(dict_index_is_spatial(index)); + + n_unique = DICT_INDEX_SPATIAL_NODEPTR_SIZE; + + tuple = dtuple_create(heap, n_unique + 1); + + dtuple_set_n_fields_cmp(tuple, n_unique); + + dict_index_copy_types(tuple, index, n_unique); + + /* Write page no field */ + buf = static_cast(mem_heap_alloc(heap, 4)); + + mach_write_to_4(buf, page_no); + + field = dtuple_get_nth_field(tuple, n_unique); + dfield_set_data(field, buf, 4); + + dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4); + + /* Set info bits. */ + info_bits = rec_get_info_bits(rec, dict_table_is_comp(index->table)); + dtuple_set_info_bits(tuple, info_bits | REC_STATUS_NODE_PTR); + + /* Set mbr as index entry data */ + field = dtuple_get_nth_field(tuple, 0); + + buf = static_cast(mem_heap_alloc(heap, DATA_MBR_LEN)); + + rtr_write_mbr(buf, mbr); + + dfield_set_data(field, buf, DATA_MBR_LEN); + + ut_ad(dtuple_check_typed(tuple)); + + return(tuple); +} + +/**************************************************************//** +In-place update the mbr field of a spatial index row. +@return true if update is successful */ +static +bool +rtr_update_mbr_field_in_place( +/*==========================*/ + dict_index_t* index, /*!< in: spatial index. */ + rec_t* rec, /*!< in/out: rec to be modified.*/ + ulint* offsets, /*!< in/out: offsets on rec. */ + rtr_mbr_t* mbr, /*!< in: the new mbr. */ + mtr_t* mtr) /*!< in: mtr */ +{ + void* new_mbr_ptr; + double new_mbr[SPDIMS * 2]; + byte* log_ptr; + page_t* page = page_align(rec); + ulint len = DATA_MBR_LEN; + ulint flags = BTR_NO_UNDO_LOG_FLAG + | BTR_NO_LOCKING_FLAG + | BTR_KEEP_SYS_FLAG; + ulint rec_info; + + rtr_write_mbr(reinterpret_cast(&new_mbr), mbr); + new_mbr_ptr = static_cast(new_mbr); + /* Otherwise, set the mbr to the new_mbr. */ + rec_set_nth_field(rec, offsets, 0, new_mbr_ptr, len); + + rec_info = rec_get_info_bits(rec, rec_offs_comp(offsets)); + + /* Write redo log. */ + /* For now, we use LOG_REC_UPDATE_IN_PLACE to log this enlarge. + In the future, we may need to add a new log type for this. */ + log_ptr = mlog_open_and_write_index(mtr, rec, index, page_is_comp(page) + ? MLOG_COMP_REC_UPDATE_IN_PLACE + : MLOG_REC_UPDATE_IN_PLACE, + 1 + DATA_ROLL_PTR_LEN + 14 + 2 + + MLOG_BUF_MARGIN); + + if (!log_ptr) { + /* Logging in mtr is switched off during + crash recovery */ + return(false); + } + + /* Flags */ + mach_write_to_1(log_ptr, flags); + log_ptr++; + /* TRX_ID Position */ + log_ptr += mach_write_compressed(log_ptr, 0); + /* ROLL_PTR */ + trx_write_roll_ptr(log_ptr, 0); + log_ptr += DATA_ROLL_PTR_LEN; + /* TRX_ID */ + log_ptr += mach_u64_write_compressed(log_ptr, 0); + + /* Offset */ + mach_write_to_2(log_ptr, page_offset(rec)); + log_ptr += 2; + /* Info bits */ + mach_write_to_1(log_ptr, rec_info); + log_ptr++; + /* N fields */ + log_ptr += mach_write_compressed(log_ptr, 1); + /* Field no, len */ + log_ptr += mach_write_compressed(log_ptr, 0); + log_ptr += mach_write_compressed(log_ptr, len); + /* Data */ + memcpy(log_ptr, new_mbr_ptr, len); + log_ptr += len; + + mlog_close(mtr, log_ptr); + + return(true); +} + +/**************************************************************//** +Update the mbr field of a spatial index row. +@return true if update is successful */ +bool +rtr_update_mbr_field( +/*=================*/ + btr_cur_t* cursor, /*!< in/out: cursor pointed to rec.*/ + ulint* offsets, /*!< in/out: offsets on rec. */ + btr_cur_t* cursor2, /*!< in/out: cursor pointed to rec + that should be deleted. + this cursor is for btr_compress to + delete the merged page's father rec.*/ + page_t* child_page, /*!< in: child page. */ + rtr_mbr_t* mbr, /*!< in: the new mbr. */ + rec_t* new_rec, /*!< in: rec to use */ + mtr_t* mtr) /*!< in: mtr */ +{ + dict_index_t* index = cursor->index; + mem_heap_t* heap; + page_t* page; + rec_t* rec; + ulint flags = BTR_NO_UNDO_LOG_FLAG + | BTR_NO_LOCKING_FLAG + | BTR_KEEP_SYS_FLAG; + dberr_t err; + big_rec_t* dummy_big_rec; + buf_block_t* block; + rec_t* child_rec; + ulint up_match = 0; + ulint low_match = 0; + ulint child; + ulint level; + ulint rec_info; + page_zip_des_t* page_zip; + bool ins_suc = true; + ulint cur2_pos = 0; + ulint del_page_no = 0; + ulint* offsets2; + + rec = btr_cur_get_rec(cursor); + page = page_align(rec); + + rec_info = rec_get_info_bits(rec, rec_offs_comp(offsets)); + + heap = mem_heap_create(100); + block = btr_cur_get_block(cursor); + ut_ad(page == buf_block_get_frame(block)); + page_zip = buf_block_get_page_zip(block); + + child = btr_node_ptr_get_child_page_no(rec, offsets); + level = btr_page_get_level(buf_block_get_frame(block), mtr); + + if (new_rec) { + child_rec = new_rec; + } else { + child_rec = page_rec_get_next(page_get_infimum_rec(child_page)); + } + + dtuple_t* node_ptr = rtr_index_build_node_ptr( + index, mbr, child_rec, child, heap, level); + + /* We need to remember the child page no of cursor2, since page could be + reorganized or insert a new rec before it. */ + if (cursor2) { + rec_t* del_rec = btr_cur_get_rec(cursor2); + offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2), + index, NULL, + ULINT_UNDEFINED, &heap); + del_page_no = btr_node_ptr_get_child_page_no(del_rec, offsets2); + cur2_pos = page_rec_get_n_recs_before(btr_cur_get_rec(cursor2)); + } + + if (rec_info & REC_INFO_MIN_REC_FLAG) { + /* When the rec is minimal rec in this level, we do + in-place update for avoiding it move to other place. */ + + if (page_zip) { + /* Check if there's enough space for in-place + update the zip page. */ + if (!btr_cur_update_alloc_zip( + page_zip, + btr_cur_get_page_cur(cursor), + index, offsets, + rec_offs_size(offsets), + false, mtr)) { + + /* If there's not enought space for + inplace update zip page, we do delete + insert. */ + ins_suc = false; + + /* Since btr_cur_update_alloc_zip could + reorganize the page, we need to repositon + cursor2. */ + if (cursor2) { + cursor2->page_cur.rec = + page_rec_get_nth(page, + cur2_pos); + } + + goto update_mbr; + } + + /* Record could be repositioned */ + rec = btr_cur_get_rec(cursor); + +#ifdef UNIV_DEBUG + /* Make sure it is still the first record */ + rec_info = rec_get_info_bits( + rec, rec_offs_comp(offsets)); + ut_ad(rec_info & REC_INFO_MIN_REC_FLAG); +#endif /* UNIV_DEBUG */ + } + + if (!rtr_update_mbr_field_in_place(index, rec, + offsets, mbr, mtr)) { + return(false); + } + + if (page_zip) { + page_zip_write_rec(page_zip, rec, index, offsets, 0); + } + + if (cursor2) { + ulint* offsets2; + + if (page_zip) { + cursor2->page_cur.rec + = page_rec_get_nth(page, cur2_pos); + } + offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2), + index, NULL, + ULINT_UNDEFINED, &heap); + ut_ad(del_page_no == btr_node_ptr_get_child_page_no( + cursor2->page_cur.rec, + offsets2)); + + page_cur_delete_rec(btr_cur_get_page_cur(cursor2), + index, offsets2, mtr); + } + } else if (page_get_n_recs(page) == 1) { + /* When there's only one rec in the page, we do insert/delete to + avoid page merge. */ + + page_cur_t page_cur; + rec_t* insert_rec; + ulint* insert_offsets = NULL; + ulint old_pos; + rec_t* old_rec; + + ut_ad(cursor2 == NULL); + + /* Insert the new mbr rec. */ + old_pos = page_rec_get_n_recs_before(rec); + + err = btr_cur_optimistic_insert( + flags, + cursor, &insert_offsets, &heap, + node_ptr, &insert_rec, &dummy_big_rec, 0, NULL, mtr); + + ut_ad(err == DB_SUCCESS); + + btr_cur_position(index, insert_rec, block, cursor); + + /* Delete the old mbr rec. */ + old_rec = page_rec_get_nth(page, old_pos); + ut_ad(old_rec != insert_rec); + + page_cur_position(old_rec, block, &page_cur); + offsets2 = rec_get_offsets(old_rec, + index, NULL, + ULINT_UNDEFINED, &heap); + page_cur_delete_rec(&page_cur, index, offsets2, mtr); + + } else { +update_mbr: + /* When there're not only 1 rec in the page, we do delete/insert + to avoid page split. */ + rec_t* insert_rec; + ulint* insert_offsets = NULL; + rec_t* next_rec; + + /* Delete the rec which cursor point to. */ + next_rec = page_rec_get_next(rec); + page_cur_delete_rec(btr_cur_get_page_cur(cursor), + index, offsets, mtr); + if (!ins_suc) { + ut_ad(rec_info & REC_INFO_MIN_REC_FLAG); + + btr_set_min_rec_mark(next_rec, mtr); + } + + /* If there's more than 1 rec left in the page, delete + the rec which cursor2 point to. Otherwise, delete it later.*/ + if (cursor2 && page_get_n_recs(page) > 1) { + ulint cur2_rec_info; + rec_t* cur2_rec; + + cur2_rec = cursor2->page_cur.rec; + offsets2 = rec_get_offsets(cur2_rec, index, NULL, + ULINT_UNDEFINED, &heap); + + cur2_rec_info = rec_get_info_bits(cur2_rec, + rec_offs_comp(offsets2)); + if (cur2_rec_info & REC_INFO_MIN_REC_FLAG) { + /* If we delete the leftmost node + pointer on a non-leaf level, we must + mark the new leftmost node pointer as + the predefined minimum record */ + rec_t* next_rec = page_rec_get_next(cur2_rec); + btr_set_min_rec_mark(next_rec, mtr); + } + + ut_ad(del_page_no + == btr_node_ptr_get_child_page_no(cur2_rec, + offsets2)); + page_cur_delete_rec(btr_cur_get_page_cur(cursor2), + index, offsets2, mtr); + cursor2 = NULL; + } + + /* Insert the new rec. */ + page_cur_search_with_match(block, index, node_ptr, + PAGE_CUR_LE , &up_match, &low_match, + btr_cur_get_page_cur(cursor), NULL); + + err = btr_cur_optimistic_insert(flags, cursor, &insert_offsets, + &heap, node_ptr, &insert_rec, + &dummy_big_rec, 0, NULL, mtr); + + if (!ins_suc && err == DB_SUCCESS) { + ins_suc = true; + } + + /* If optimistic insert fail, try reorganize the page + and insert again. */ + if (err != DB_SUCCESS && ins_suc) { + btr_page_reorganize(btr_cur_get_page_cur(cursor), + index, mtr); + + err = btr_cur_optimistic_insert(flags, + cursor, + &insert_offsets, + &heap, + node_ptr, + &insert_rec, + &dummy_big_rec, + 0, NULL, mtr); + + /* Will do pessimistic insert */ + if (err != DB_SUCCESS) { + ins_suc = false; + } + } + + /* Insert succeed, position cursor the inserted rec.*/ + if (ins_suc) { + btr_cur_position(index, insert_rec, block, cursor); + offsets = rec_get_offsets(insert_rec, + index, offsets, + ULINT_UNDEFINED, &heap); + } + + /* Delete the rec which cursor2 point to. */ + if (cursor2) { + ulint cur2_pno; + rec_t* cur2_rec; + + cursor2->page_cur.rec = page_rec_get_nth(page, + cur2_pos); + + cur2_rec = btr_cur_get_rec(cursor2); + + offsets2 = rec_get_offsets(cur2_rec, index, NULL, + ULINT_UNDEFINED, &heap); + + /* If the cursor2 position is on a wrong rec, we + need to reposition it. */ + cur2_pno = btr_node_ptr_get_child_page_no(cur2_rec, offsets2); + if ((del_page_no != cur2_pno) + || (cur2_rec == insert_rec)) { + cur2_rec = page_rec_get_next( + page_get_infimum_rec(page)); + + while (!page_rec_is_supremum(cur2_rec)) { + offsets2 = rec_get_offsets(cur2_rec, index, + NULL, + ULINT_UNDEFINED, + &heap); + cur2_pno = btr_node_ptr_get_child_page_no( + cur2_rec, offsets2); + if (cur2_pno == del_page_no) { + if (insert_rec != cur2_rec) { + cursor2->page_cur.rec = + cur2_rec; + break; + } + } + cur2_rec = page_rec_get_next(cur2_rec); + } + + ut_ad(!page_rec_is_supremum(cur2_rec)); + } + + rec_info = rec_get_info_bits(cur2_rec, + rec_offs_comp(offsets2)); + if (rec_info & REC_INFO_MIN_REC_FLAG) { + /* If we delete the leftmost node + pointer on a non-leaf level, we must + mark the new leftmost node pointer as + the predefined minimum record */ + rec_t* next_rec = page_rec_get_next(cur2_rec); + btr_set_min_rec_mark(next_rec, mtr); + } + + ut_ad(cur2_pno == del_page_no && cur2_rec != insert_rec); + + page_cur_delete_rec(btr_cur_get_page_cur(cursor2), + index, offsets2, mtr); + } + + if (!ins_suc) { + mem_heap_t* new_heap = NULL; + + err = btr_cur_pessimistic_insert( + flags, + cursor, &insert_offsets, &new_heap, + node_ptr, &insert_rec, &dummy_big_rec, + 0, NULL, mtr); + + ut_ad(err == DB_SUCCESS); + + if (new_heap) { + mem_heap_free(new_heap); + } + + } + + if (cursor2) { + btr_cur_compress_if_useful(cursor, FALSE, mtr); + } + } + +#ifdef UNIV_DEBUG + ulint left_page_no = btr_page_get_prev(page, mtr); + + if (left_page_no == FIL_NULL) { + + ut_a(REC_INFO_MIN_REC_FLAG & rec_get_info_bits( + page_rec_get_next(page_get_infimum_rec(page)), + page_is_comp(page))); + } +#endif /* UNIV_DEBUG */ + + mem_heap_free(heap); + + return(true); +} + +/**************************************************************//** +Update parent page's MBR and Predicate lock information during a split */ +static __attribute__((nonnull)) +void +rtr_adjust_upper_level( +/*===================*/ + btr_cur_t* sea_cur, /*!< in: search cursor */ + ulint flags, /*!< in: undo logging and + locking flags */ + buf_block_t* block, /*!< in/out: page to be split */ + buf_block_t* new_block, /*!< in/out: the new half page */ + rtr_mbr_t* mbr, /*!< in: MBR on the old page */ + rtr_mbr_t* new_mbr, /*!< in: MBR on the new page */ + ulint direction, /*!< in: FSP_UP or FSP_DOWN */ + mtr_t* mtr) /*!< in: mtr */ +{ + page_t* page; + page_t* new_page; + ulint page_no; + ulint new_page_no; + page_zip_des_t* page_zip; + page_zip_des_t* new_page_zip; + dict_index_t* index = sea_cur->index; + btr_cur_t cursor; + ulint* offsets; + mem_heap_t* heap; + ulint level; + dtuple_t* node_ptr_upper; + ulint prev_page_no; + ulint next_page_no; + ulint space; + page_cur_t* page_cursor; + rtr_mbr_t parent_mbr; + lock_prdt_t prdt; + lock_prdt_t new_prdt; + lock_prdt_t parent_prdt; + dberr_t err; + big_rec_t* dummy_big_rec; + rec_t* rec; + + /* Create a memory heap where the data tuple is stored */ + heap = mem_heap_create(1024); + memset(&cursor, 0, sizeof(cursor)); + + cursor.thr = sea_cur->thr; + + /* Get the level of the split pages */ + level = btr_page_get_level(buf_block_get_frame(block), mtr); + ut_ad(level + == btr_page_get_level(buf_block_get_frame(new_block), mtr)); + + page = buf_block_get_frame(block); + page_no = block->page.id.page_no(); + page_zip = buf_block_get_page_zip(block); + + new_page = buf_block_get_frame(new_block); + new_page_no = new_block->page.id.page_no(); + new_page_zip = buf_block_get_page_zip(new_block); + + /* Set new mbr for the old page on the upper level. */ + /* Look up the index for the node pointer to page */ + offsets = rtr_page_get_father_block( + NULL, heap, index, block, mtr, sea_cur, &cursor); + + page_cursor = btr_cur_get_page_cur(&cursor); + + rtr_get_mbr_from_rec(page_cursor->rec, offsets, &parent_mbr); + + rtr_update_mbr_field(&cursor, offsets, NULL, page, mbr, NULL, mtr); + + /* Already updated parent MBR, reset in our path */ + if (sea_cur->rtr_info) { + node_visit_t* node_visit = rtr_get_parent_node( + sea_cur, level + 1, true); + if (node_visit) { + node_visit->mbr_inc = 0; + } + } + + /* Insert the node for the new page. */ + node_ptr_upper = rtr_index_build_node_ptr( + index, new_mbr, + page_rec_get_next(page_get_infimum_rec(new_page)), + new_page_no, heap, level); + + ulint up_match = 0; + ulint low_match = 0; + + buf_block_t* father_block = btr_cur_get_block(&cursor); + + page_cur_search_with_match( + father_block, index, node_ptr_upper, + PAGE_CUR_LE , &up_match, &low_match, + btr_cur_get_page_cur(&cursor), NULL); + + err = btr_cur_optimistic_insert( + flags + | BTR_NO_LOCKING_FLAG + | BTR_KEEP_SYS_FLAG + | BTR_NO_UNDO_LOG_FLAG, + &cursor, &offsets, &heap, + node_ptr_upper, &rec, &dummy_big_rec, 0, NULL, mtr); + + if (err == DB_FAIL) { + ut_ad(!cursor.rtr_info); + + cursor.rtr_info = sea_cur->rtr_info; + cursor.tree_height = sea_cur->tree_height; + + err = btr_cur_pessimistic_insert(flags + | BTR_NO_LOCKING_FLAG + | BTR_KEEP_SYS_FLAG + | BTR_NO_UNDO_LOG_FLAG, + &cursor, &offsets, &heap, + node_ptr_upper, &rec, + &dummy_big_rec, 0, NULL, mtr); + cursor.rtr_info = NULL; + ut_a(err == DB_SUCCESS); + } + + prdt.data = static_cast(mbr); + prdt.op = 0; + new_prdt.data = static_cast(new_mbr); + new_prdt.op = 0; + parent_prdt.data = static_cast(&parent_mbr); + parent_prdt.op = 0; + + lock_prdt_update_parent(block, new_block, &prdt, &new_prdt, + &parent_prdt, dict_index_get_space(index), + page_cursor->block->page.id.page_no()); + + mem_heap_free(heap); + + /* Get the previous and next pages of page */ + prev_page_no = btr_page_get_prev(page, mtr); + next_page_no = btr_page_get_next(page, mtr); + space = block->page.id.space(); + const page_size_t& page_size = dict_table_page_size(index->table); + + /* Update page links of the level */ + if (prev_page_no != FIL_NULL) { + page_id_t prev_page_id(space, prev_page_no); + + buf_block_t* prev_block = btr_block_get( + prev_page_id, page_size, RW_X_LATCH, index, mtr); +#ifdef UNIV_BTR_DEBUG + ut_a(page_is_comp(prev_block->frame) == page_is_comp(page)); + ut_a(btr_page_get_next(prev_block->frame, mtr) + == block->page.id.page_no()); +#endif /* UNIV_BTR_DEBUG */ + + btr_page_set_next(buf_block_get_frame(prev_block), + buf_block_get_page_zip(prev_block), + page_no, mtr); + } + + if (next_page_no != FIL_NULL) { + page_id_t next_page_id(space, next_page_no); + + buf_block_t* next_block = btr_block_get( + next_page_id, page_size, RW_X_LATCH, index, mtr); +#ifdef UNIV_BTR_DEBUG + ut_a(page_is_comp(next_block->frame) == page_is_comp(page)); + ut_a(btr_page_get_prev(next_block->frame, mtr) + == page_get_page_no(page)); +#endif /* UNIV_BTR_DEBUG */ + + btr_page_set_prev(buf_block_get_frame(next_block), + buf_block_get_page_zip(next_block), + new_page_no, mtr); + } + + btr_page_set_prev(page, page_zip, prev_page_no, mtr); + btr_page_set_next(page, page_zip, new_page_no, mtr); + + btr_page_set_prev(new_page, new_page_zip, page_no, mtr); + btr_page_set_next(new_page, new_page_zip, next_page_no, mtr); +} + +/*************************************************************//** +Moves record list to another page for rtree splitting. + +IMPORTANT: The caller will have to update IBUF_BITMAP_FREE +if new_block is a compressed leaf page in a secondary index. +This has to be done either within the same mini-transaction, +or by invoking ibuf_reset_free_bits() before mtr_commit(). + +@return TRUE on success; FALSE on compression failure */ +ibool +rtr_split_page_move_rec_list( +/*=========================*/ + rtr_split_node_t* node_array, /*!< in: split node array. */ + int first_rec_group,/*!< in: group number of the + first rec. */ + buf_block_t* new_block, /*!< in/out: index page + where to move */ + buf_block_t* block, /*!< in/out: page containing + split_rec */ + rec_t* first_rec, /*!< in: first record not to + move */ + dict_index_t* index, /*!< in: record descriptor */ + mem_heap_t* heap, /*!< in: pointer to memory + heap, or NULL */ + mtr_t* mtr) /*!< in: mtr */ +{ + rtr_split_node_t* cur_split_node; + rtr_split_node_t* end_split_node; + page_cur_t page_cursor; + page_cur_t new_page_cursor; + page_t* page; + page_t* new_page; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + page_zip_des_t* new_page_zip + = buf_block_get_page_zip(new_block); + rec_t* rec; + rec_t* ret; + ulint moved = 0; + ulint max_to_move = 0; + rtr_rec_move_t* rec_move = NULL; + + rec_offs_init(offsets_); + + page_cur_set_before_first(block, &page_cursor); + page_cur_set_before_first(new_block, &new_page_cursor); + + page = buf_block_get_frame(block); + new_page = buf_block_get_frame(new_block); + ret = page_rec_get_prev(page_get_supremum_rec(new_page)); + + end_split_node = node_array + page_get_n_recs(page); + + mtr_log_t log_mode = MTR_LOG_NONE; + + if (new_page_zip) { + log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE); + } + + max_to_move = page_get_n_recs( + buf_block_get_frame(block)); + rec_move = static_cast(mem_heap_alloc( + heap, + sizeof (*rec_move) * max_to_move)); + + /* Insert the recs in group 2 to new page. */ + for (cur_split_node = node_array; + cur_split_node < end_split_node; ++cur_split_node) { + if (cur_split_node->n_node != first_rec_group) { + lock_rec_store_on_page_infimum( + block, cur_split_node->key); + + offsets = rec_get_offsets(cur_split_node->key, + index, offsets, + ULINT_UNDEFINED, &heap); + + ut_ad (cur_split_node->key != first_rec + || !page_is_leaf(page)); + + rec = page_cur_insert_rec_low( + page_cur_get_rec(&new_page_cursor), + index, + cur_split_node->key, + offsets, + mtr); + + ut_a(rec); + + lock_rec_restore_from_page_infimum( + new_block, rec, block); + + page_cur_move_to_next(&new_page_cursor); + + rec_move[moved].new_rec = rec; + rec_move[moved].old_rec = cur_split_node->key; + rec_move[moved].moved = false; + moved++; + + if (moved > max_to_move) { + ut_ad(0); + break; + } + } + } + + /* Update PAGE_MAX_TRX_ID on the uncompressed page. + Modifications will be redo logged and copied to the compressed + page in page_zip_compress() or page_zip_reorganize() below. + Multiple transactions cannot simultaneously operate on the + same temp-table in parallel. + max_trx_id is ignored for temp tables because it not required + for MVCC. */ + if (dict_index_is_sec_or_ibuf(index) + && page_is_leaf(page) + && !dict_table_is_temporary(index->table)) { + page_update_max_trx_id(new_block, NULL, + page_get_max_trx_id(page), + mtr); + } + + if (new_page_zip) { + mtr_set_log_mode(mtr, log_mode); + + if (!page_zip_compress(new_page_zip, new_page, index, + page_zip_level, NULL, mtr)) { + ulint ret_pos; + + /* Before trying to reorganize the page, + store the number of preceding records on the page. */ + ret_pos = page_rec_get_n_recs_before(ret); + /* Before copying, "ret" was the predecessor + of the predefined supremum record. If it was + the predefined infimum record, then it would + still be the infimum, and we would have + ret_pos == 0. */ + + if (UNIV_UNLIKELY + (!page_zip_reorganize(new_block, index, mtr))) { + + if (UNIV_UNLIKELY + (!page_zip_decompress(new_page_zip, + new_page, FALSE))) { + ut_error; + } +#ifdef UNIV_GIS_DEBUG + ut_ad(page_validate(new_page, index)); +#endif + + return(false); + } + + /* The page was reorganized: Seek to ret_pos. */ + ret = page_rec_get_nth(new_page, ret_pos); + } + } + + /* Update the lock table */ + lock_rtr_move_rec_list(new_block, block, rec_move, moved); + + /* Delete recs in second group from the old page. */ + for (cur_split_node = node_array; + cur_split_node < end_split_node; ++cur_split_node) { + if (cur_split_node->n_node != first_rec_group) { + page_cur_position(cur_split_node->key, + block, &page_cursor); + offsets = rec_get_offsets( + page_cur_get_rec(&page_cursor), index, + offsets, ULINT_UNDEFINED, + &heap); + page_cur_delete_rec(&page_cursor, + index, offsets, mtr); + } + } + + return(true); +} + +/*************************************************************//** +Splits an R-tree index page to halves and inserts the tuple. It is assumed +that mtr holds an x-latch to the index tree. NOTE: the tree x-latch is +released within this function! NOTE that the operation of this +function must always succeed, we cannot reverse it: therefore enough +free disk space (2 pages) must be guaranteed to be available before +this function is called. +@return inserted record */ +rec_t* +rtr_page_split_and_insert( +/*======================*/ + ulint flags, /*!< in: undo logging and locking flags */ + btr_cur_t* cursor, /*!< in/out: cursor at which to insert; when the + function returns, the cursor is positioned + on the predecessor of the inserted record */ + ulint** offsets,/*!< out: offsets on inserted record */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + const dtuple_t* tuple, /*!< in: tuple to insert */ + ulint n_ext, /*!< in: number of externally stored columns */ + mtr_t* mtr) /*!< in: mtr */ +{ + buf_block_t* block; + page_t* page; + page_t* new_page; + ulint page_no; + byte direction; + ulint hint_page_no; + buf_block_t* new_block; + page_zip_des_t* page_zip; + page_zip_des_t* new_page_zip; + buf_block_t* insert_block; + page_cur_t* page_cursor; + rec_t* rec = 0; + ulint n_recs; + ulint total_data; + ulint insert_size; + rtr_split_node_t* rtr_split_node_array; + rtr_split_node_t* cur_split_node; + rtr_split_node_t* end_split_node; + double* buf_pos; + ulint page_level; + node_seq_t current_ssn; + node_seq_t next_ssn; + buf_block_t* root_block; + rtr_mbr_t mbr; + rtr_mbr_t new_mbr; + lock_prdt_t prdt; + lock_prdt_t new_prdt; + rec_t* first_rec = NULL; + int first_rec_group = 1; + + if (!*heap) { + *heap = mem_heap_create(1024); + } + +func_start: + mem_heap_empty(*heap); + *offsets = NULL; + + ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(cursor->index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); + ut_ad(!dict_index_is_online_ddl(cursor->index) + || (flags & BTR_CREATE_FLAG) + || dict_index_is_clust(cursor->index)); + ut_ad(rw_lock_own_flagged(dict_index_get_lock(cursor->index), + RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX)); + + block = btr_cur_get_block(cursor); + page = buf_block_get_frame(block); + page_zip = buf_block_get_page_zip(block); + page_level = btr_page_get_level(page, mtr); + current_ssn = page_get_ssn_id(page); + + ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); + ut_ad(page_get_n_recs(page) >= 1); + + page_no = block->page.id.page_no(); + + if (btr_page_get_prev(page, mtr) == FIL_NULL && !page_is_leaf(page)) { + first_rec = page_rec_get_next( + page_get_infimum_rec(buf_block_get_frame(block))); + } + + /* Initial split nodes array. */ + rtr_split_node_array = rtr_page_split_initialize_nodes( + *heap, cursor, offsets, tuple, &buf_pos); + + /* Divide all mbrs to two groups. */ + n_recs = page_get_n_recs(page) + 1; + + end_split_node = rtr_split_node_array + n_recs; + +#ifdef UNIV_GIS_DEBUG + fprintf(stderr, "Before split a page:\n"); + for (cur_split_node = rtr_split_node_array; + cur_split_node < end_split_node; ++cur_split_node) { + for (int i = 0; i < SPDIMS * 2; i++) { + fprintf(stderr, "%.2lf ", + *(cur_split_node->coords + i)); + } + fprintf(stderr, "\n"); + } +#endif + + insert_size = rec_get_converted_size(cursor->index, tuple, n_ext); + total_data = page_get_data_size(page) + insert_size; + first_rec_group = split_rtree_node(rtr_split_node_array, + static_cast(n_recs), + static_cast(total_data), + static_cast(insert_size), + 0, 2, 2, &buf_pos, SPDIMS, + static_cast(first_rec)); + + /* Allocate a new page to the index */ + direction = FSP_UP; + hint_page_no = page_no + 1; + new_block = btr_page_alloc(cursor->index, hint_page_no, direction, + page_level, mtr, mtr); + new_page_zip = buf_block_get_page_zip(new_block); + btr_page_create(new_block, new_page_zip, cursor->index, + page_level, mtr); + + new_page = buf_block_get_frame(new_block); + ut_ad(page_get_ssn_id(new_page) == 0); + + /* Set new ssn to the new page and page. */ + page_set_ssn_id(new_block, new_page_zip, current_ssn, mtr); + next_ssn = rtr_get_new_ssn_id(cursor->index); + + page_set_ssn_id(block, page_zip, next_ssn, mtr); + + /* Keep recs in first group to the old page, move recs in second + groups to the new page. */ + if (0 +#ifdef UNIV_ZIP_COPY + || page_zip +#endif + || !rtr_split_page_move_rec_list(rtr_split_node_array, + first_rec_group, + new_block, block, first_rec, + cursor->index, *heap, mtr)) { + ulint n = 0; + rec_t* rec; + ulint moved = 0; + ulint max_to_move = 0; + rtr_rec_move_t* rec_move = NULL; + ulint pos; + + /* For some reason, compressing new_page failed, + even though it should contain fewer records than + the original page. Copy the page byte for byte + and then delete the records from both pages + as appropriate. Deleting will always succeed. */ + ut_a(new_page_zip); + + page_zip_copy_recs(new_page_zip, new_page, + page_zip, page, cursor->index, mtr); + + page_cursor = btr_cur_get_page_cur(cursor); + + /* Move locks on recs. */ + max_to_move = page_get_n_recs(page); + rec_move = static_cast(mem_heap_alloc( + *heap, + sizeof (*rec_move) * max_to_move)); + + /* Init the rec_move array for moving lock on recs. */ + for (cur_split_node = rtr_split_node_array; + cur_split_node < end_split_node - 1; ++cur_split_node) { + if (cur_split_node->n_node != first_rec_group) { + pos = page_rec_get_n_recs_before( + cur_split_node->key); + rec = page_rec_get_nth(new_page, pos); + ut_a(rec); + + rec_move[moved].new_rec = rec; + rec_move[moved].old_rec = cur_split_node->key; + rec_move[moved].moved = false; + moved++; + + if (moved > max_to_move) { + ut_ad(0); + break; + } + } + } + + /* Update the lock table */ + lock_rtr_move_rec_list(new_block, block, rec_move, moved); + + /* Delete recs in first group from the new page. */ + for (cur_split_node = rtr_split_node_array; + cur_split_node < end_split_node - 1; ++cur_split_node) { + if (cur_split_node->n_node == first_rec_group) { + ulint pos; + + pos = page_rec_get_n_recs_before( + cur_split_node->key); + ut_a(pos > 0); + rec_t* new_rec = page_rec_get_nth(new_page, + pos - n); + + ut_a(new_rec && page_rec_is_user_rec(new_rec)); + page_cur_position(new_rec, new_block, + page_cursor); + + *offsets = rec_get_offsets( + page_cur_get_rec(page_cursor), + cursor->index, + *offsets, ULINT_UNDEFINED, + heap); + + page_cur_delete_rec(page_cursor, + cursor->index, *offsets, mtr); + n++; + } + } + + /* Delete recs in second group from the old page. */ + for (cur_split_node = rtr_split_node_array; + cur_split_node < end_split_node - 1; ++cur_split_node) { + if (cur_split_node->n_node != first_rec_group) { + page_cur_position(cur_split_node->key, + block, page_cursor); + *offsets = rec_get_offsets( + page_cur_get_rec(page_cursor), + cursor->index, + *offsets, ULINT_UNDEFINED, + heap); + page_cur_delete_rec(page_cursor, + cursor->index, *offsets, mtr); + } + } + +#ifdef UNIV_GIS_DEBUG + ut_ad(page_validate(new_page, cursor->index)); + ut_ad(page_validate(page, cursor->index)); +#endif + } + + /* Insert the new rec to the proper page. */ + cur_split_node = end_split_node - 1; + if (cur_split_node->n_node != first_rec_group) { + insert_block = new_block; + } else { + insert_block = block; + } + + /* Reposition the cursor for insert and try insertion */ + page_cursor = btr_cur_get_page_cur(cursor); + + page_cur_search(insert_block, cursor->index, tuple, + PAGE_CUR_LE, page_cursor); + + rec = page_cur_tuple_insert(page_cursor, tuple, cursor->index, + offsets, heap, n_ext, mtr); + + /* If insert did not fit, try page reorganization. + For compressed pages, page_cur_tuple_insert() will have + attempted this already. */ + if (rec == NULL) { + if (!page_cur_get_page_zip(page_cursor) + && btr_page_reorganize(page_cursor, cursor->index, mtr)) { + rec = page_cur_tuple_insert(page_cursor, tuple, + cursor->index, offsets, + heap, n_ext, mtr); + + } + /* If insert fail, we will try to split the insert_block + again. */ + } + + /* Calculate the mbr on the upper half-page, and the mbr on + original page. */ + rtr_page_cal_mbr(cursor->index, block, &mbr, *heap); + rtr_page_cal_mbr(cursor->index, new_block, &new_mbr, *heap); + prdt.data = &mbr; + new_prdt.data = &new_mbr; + + /* Check any predicate locks need to be moved/copied to the + new page */ + lock_prdt_update_split(block, new_block, &prdt, &new_prdt, + dict_index_get_space(cursor->index), page_no); + + /* Adjust the upper level. */ + rtr_adjust_upper_level(cursor, flags, block, new_block, + &mbr, &new_mbr, direction, mtr); + + /* Save the new ssn to the root page, since we need to reinit + the first ssn value from it after restart server. */ + + root_block = btr_root_block_get(cursor->index, RW_SX_LATCH, mtr); + + page_zip = buf_block_get_page_zip(root_block); + page_set_ssn_id(root_block, page_zip, next_ssn, mtr); + + /* Insert fit on the page: update the free bits for the + left and right pages in the same mtr */ + + if (page_is_leaf(page)) { + ibuf_update_free_bits_for_two_pages_low( + block, new_block, mtr); + } + + /* If the new res insert fail, we need to do another split + again. */ + if (!rec) { + /* We play safe and reset the free bits for new_page */ + if (!dict_index_is_clust(cursor->index) + && !dict_table_is_temporary(cursor->index->table)) { + ibuf_reset_free_bits(new_block); + ibuf_reset_free_bits(block); + } + + *offsets = rtr_page_get_father_block( + NULL, *heap, cursor->index, block, mtr, + NULL, cursor); + + rec_t* i_rec = page_rec_get_next(page_get_infimum_rec( + buf_block_get_frame(block))); + btr_cur_position(cursor->index, i_rec, block, cursor); + + goto func_start; + } + +#ifdef UNIV_GIS_DEBUG + ut_ad(page_validate(buf_block_get_frame(block), cursor->index)); + ut_ad(page_validate(buf_block_get_frame(new_block), cursor->index)); + + ut_ad(!rec || rec_offs_validate(rec, cursor->index, *offsets)); +#endif + MONITOR_INC(MONITOR_INDEX_SPLIT); + + return(rec); +} + +/****************************************************************//** +Following the right link to find the proper block for insert. +@return the proper block.*/ +dberr_t +rtr_ins_enlarge_mbr( +/*================*/ + btr_cur_t* btr_cur, /*!< in: btr cursor */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr) /*!< in: mtr */ +{ + dberr_t err = DB_SUCCESS; + rtr_mbr_t new_mbr; + buf_block_t* block; + mem_heap_t* heap; + dict_index_t* index = btr_cur->index; + page_cur_t* page_cursor; + ulint* offsets; + node_visit_t* node_visit; + btr_cur_t cursor; + page_t* page; + + ut_ad(dict_index_is_spatial(index)); + + /* If no rtr_info or rtree is one level tree, return. */ + if (!btr_cur->rtr_info || btr_cur->tree_height == 1) { + return(err); + } + + /* Check path info is not empty. */ + ut_ad(!btr_cur->rtr_info->parent_path->empty()); + + /* Create a memory heap. */ + heap = mem_heap_create(1024); + + /* Leaf level page is stored in cursor */ + page_cursor = btr_cur_get_page_cur(btr_cur); + block = page_cur_get_block(page_cursor); + + for (ulint i = 1; i < btr_cur->tree_height; i++) { + node_visit = rtr_get_parent_node(btr_cur, i, true); + ut_ad(node_visit != NULL); + + /* If there's no mbr enlarge, return.*/ + if (node_visit->mbr_inc == 0) { + block = btr_pcur_get_block(node_visit->cursor); + continue; + } + + /* Calculate the mbr of the child page. */ + rtr_page_cal_mbr(index, block, &new_mbr, heap); + + /* Get father block. */ + memset(&cursor, 0, sizeof(cursor)); + offsets = rtr_page_get_father_block( + NULL, heap, index, block, mtr, btr_cur, &cursor); + + page = buf_block_get_frame(block); + + /* Update the mbr field of the rec. */ + if (!rtr_update_mbr_field(&cursor, offsets, NULL, page, + &new_mbr, NULL, mtr)) { + err = DB_ERROR; + break; + } + + page_cursor = btr_cur_get_page_cur(&cursor); + block = page_cur_get_block(page_cursor); + } + + mem_heap_free(heap); + + return(err); +} + +/*************************************************************//** +Copy recs from a page to new_block of rtree. +Differs from page_copy_rec_list_end, because this function does not +touch the lock table and max trx id on page or compress the page. + +IMPORTANT: The caller will have to update IBUF_BITMAP_FREE +if new_block is a compressed leaf page in a secondary index. +This has to be done either within the same mini-transaction, +or by invoking ibuf_reset_free_bits() before mtr_commit(). */ +void +rtr_page_copy_rec_list_end_no_locks( +/*================================*/ + buf_block_t* new_block, /*!< in: index page to copy to */ + buf_block_t* block, /*!< in: index page of rec */ + rec_t* rec, /*!< in: record on page */ + dict_index_t* index, /*!< in: record descriptor */ + mem_heap_t* heap, /*!< in/out: heap memory */ + rtr_rec_move_t* rec_move, /*!< in: recording records moved */ + ulint max_move, /*!< in: num of rec to move */ + ulint* num_moved, /*!< out: num of rec to move */ + mtr_t* mtr) /*!< in: mtr */ +{ + page_t* new_page = buf_block_get_frame(new_block); + page_cur_t page_cur; + page_cur_t cur1; + rec_t* cur_rec; + dtuple_t* tuple; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + ulint n_fields = 0; + ulint moved = 0; + bool is_leaf = page_is_leaf(new_page); + + rec_offs_init(offsets_); + + page_cur_position(rec, block, &cur1); + + if (page_cur_is_before_first(&cur1)) { + + page_cur_move_to_next(&cur1); + } + + btr_assert_not_corrupted(new_block, index); + ut_a(page_is_comp(new_page) == page_rec_is_comp(rec)); + ut_a(mach_read_from_2(new_page + UNIV_PAGE_SIZE - 10) == (ulint) + (page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM)); + + cur_rec = page_rec_get_next( + page_get_infimum_rec(buf_block_get_frame(new_block))); + page_cur_position(cur_rec, new_block, &page_cur); + + n_fields = dict_index_get_n_fields(index); + + /* Copy records from the original page to the new page */ + while (!page_cur_is_after_last(&cur1)) { + rec_t* cur1_rec = page_cur_get_rec(&cur1); + rec_t* ins_rec; + + /* Find the place to insert. */ + tuple = dict_index_build_data_tuple(index, cur1_rec, + n_fields, heap); + + if (page_rec_is_infimum(cur_rec)) { + cur_rec = page_rec_get_next(cur_rec); + } + + while (!page_rec_is_supremum(cur_rec)) { + ulint cur_matched_fields = 0; + int cmp; + + offsets = rec_get_offsets( + cur_rec, index, offsets, + dtuple_get_n_fields_cmp(tuple), &heap); + cmp = cmp_dtuple_rec_with_match(tuple, cur_rec, offsets, + &cur_matched_fields); + if (cmp < 0) { + page_cur_move_to_prev(&page_cur); + break; + } else if (cmp > 0) { + /* Skip small recs. */ + page_cur_move_to_next(&page_cur); + cur_rec = page_cur_get_rec(&page_cur); + } else if (is_leaf) { + if (rec_get_deleted_flag(cur1_rec, + dict_table_is_comp(index->table))) { + goto next; + } else { + /* We have two identical leaf records, + skip copying the undeleted one, and + unmark deleted on the current page */ + btr_rec_set_deleted_flag( + cur_rec, NULL, FALSE); + goto next; + } + } + } + + /* If position is on suprenum rec, need to move to + previous rec. */ + if (page_rec_is_supremum(cur_rec)) { + page_cur_move_to_prev(&page_cur); + } + + cur_rec = page_cur_get_rec(&page_cur); + + offsets = rec_get_offsets(cur1_rec, index, offsets, + ULINT_UNDEFINED, &heap); + + ins_rec = page_cur_insert_rec_low(cur_rec, index, + cur1_rec, offsets, mtr); + if (UNIV_UNLIKELY(!ins_rec)) { + fprintf(stderr, "page number %ld and %ld\n", + (long)new_block->page.id.page_no(), + (long)block->page.id.page_no()); + + ib::fatal() << "rec offset " << page_offset(rec) + << ", cur1 offset " + << page_offset(page_cur_get_rec(&cur1)) + << ", cur_rec offset " + << page_offset(cur_rec); + } + + rec_move[moved].new_rec = ins_rec; + rec_move[moved].old_rec = cur1_rec; + rec_move[moved].moved = false; + moved++; +next: + if (moved > max_move) { + ut_ad(0); + break; + } + + page_cur_move_to_next(&cur1); + } + + *num_moved = moved; +} + +/*************************************************************//** +Copy recs till a specified rec from a page to new_block of rtree. */ +void +rtr_page_copy_rec_list_start_no_locks( +/*==================================*/ + buf_block_t* new_block, /*!< in: index page to copy to */ + buf_block_t* block, /*!< in: index page of rec */ + rec_t* rec, /*!< in: record on page */ + dict_index_t* index, /*!< in: record descriptor */ + mem_heap_t* heap, /*!< in/out: heap memory */ + rtr_rec_move_t* rec_move, /*!< in: recording records moved */ + ulint max_move, /*!< in: num of rec to move */ + ulint* num_moved, /*!< out: num of rec to move */ + mtr_t* mtr) /*!< in: mtr */ +{ + page_cur_t cur1; + rec_t* cur_rec; + dtuple_t* tuple; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + ulint n_fields = 0; + page_cur_t page_cur; + ulint moved = 0; + bool is_leaf = page_is_leaf(buf_block_get_frame(block)); + + rec_offs_init(offsets_); + + n_fields = dict_index_get_n_fields(index); + + page_cur_set_before_first(block, &cur1); + page_cur_move_to_next(&cur1); + + cur_rec = page_rec_get_next( + page_get_infimum_rec(buf_block_get_frame(new_block))); + page_cur_position(cur_rec, new_block, &page_cur); + + while (page_cur_get_rec(&cur1) != rec) { + rec_t* cur1_rec = page_cur_get_rec(&cur1); + rec_t* ins_rec; + + /* Find the place to insert. */ + tuple = dict_index_build_data_tuple(index, cur1_rec, + n_fields, heap); + + if (page_rec_is_infimum(cur_rec)) { + cur_rec = page_rec_get_next(cur_rec); + } + + while (!page_rec_is_supremum(cur_rec)) { + ulint cur_matched_fields = 0; + int cmp; + + offsets = rec_get_offsets(cur_rec, index, offsets, + dtuple_get_n_fields_cmp(tuple), + &heap); + cmp = cmp_dtuple_rec_with_match(tuple, cur_rec, offsets, + &cur_matched_fields); + if (cmp < 0) { + page_cur_move_to_prev(&page_cur); + cur_rec = page_cur_get_rec(&page_cur); + break; + } else if (cmp > 0) { + /* Skip small recs. */ + page_cur_move_to_next(&page_cur); + cur_rec = page_cur_get_rec(&page_cur); + } else if (is_leaf) { + if (rec_get_deleted_flag( + cur1_rec, + dict_table_is_comp(index->table))) { + goto next; + } else { + /* We have two identical leaf records, + skip copying the undeleted one, and + unmark deleted on the current page */ + btr_rec_set_deleted_flag( + cur_rec, NULL, FALSE); + goto next; + } + } + } + + /* If position is on suprenum rec, need to move to + previous rec. */ + if (page_rec_is_supremum(cur_rec)) { + page_cur_move_to_prev(&page_cur); + } + + cur_rec = page_cur_get_rec(&page_cur); + + offsets = rec_get_offsets(cur1_rec, index, offsets, + ULINT_UNDEFINED, &heap); + + ins_rec = page_cur_insert_rec_low(cur_rec, index, + cur1_rec, offsets, mtr); + if (UNIV_UNLIKELY(!ins_rec)) { + fprintf(stderr, "page number %ld and %ld\n", + (long)new_block->page.id.page_no(), + (long)block->page.id.page_no()); + + ib::fatal() << "rec offset " << page_offset(rec) + << ", cur1 offset " + << page_offset(page_cur_get_rec(&cur1)) + << ", cur_rec offset " + << page_offset(cur_rec); + } + + rec_move[moved].new_rec = ins_rec; + rec_move[moved].old_rec = cur1_rec; + rec_move[moved].moved = false; + moved++; +next: + if (moved > max_move) { + ut_ad(0); + break; + } + + page_cur_move_to_next(&cur1); + } + + *num_moved = moved; +} + +/****************************************************************//** +Check two MBRs are identical or need to be merged */ +bool +rtr_merge_mbr_changed( +/*==================*/ + btr_cur_t* cursor, /*!< in/out: cursor */ + btr_cur_t* cursor2, /*!< in: the other cursor */ + ulint* offsets, /*!< in: rec offsets */ + ulint* offsets2, /*!< in: rec offsets */ + rtr_mbr_t* new_mbr, /*!< out: MBR to update */ + buf_block_t* merge_block, /*!< in: page to merge */ + buf_block_t* block, /*!< in: page be merged */ + dict_index_t* index) /*!< in: index */ +{ + double* mbr; + double mbr1[SPDIMS * 2]; + double mbr2[SPDIMS * 2]; + rec_t* rec; + ulint len; + bool changed = false; + + ut_ad(dict_index_is_spatial(cursor->index)); + + rec = btr_cur_get_rec(cursor); + + rtr_read_mbr(rec_get_nth_field(rec, offsets, 0, &len), + reinterpret_cast(mbr1)); + + rec = btr_cur_get_rec(cursor2); + + rtr_read_mbr(rec_get_nth_field(rec, offsets2, 0, &len), + reinterpret_cast(mbr2)); + + mbr = reinterpret_cast(new_mbr); + + for (int i = 0; i < SPDIMS * 2; i += 2) { + changed = (changed || mbr1[i] != mbr2[i]); + *mbr = mbr1[i] < mbr2[i] ? mbr1[i] : mbr2[i]; + mbr++; + changed = (changed || mbr1[i + 1] != mbr2 [i + 1]); + *mbr = mbr1[i + 1] > mbr2[i + 1] ? mbr1[i + 1] : mbr2[i + 1]; + mbr++; + } + + if (!changed) { + rec_t* rec1; + rec_t* rec2; + ulint* offsets1; + ulint* offsets2; + mem_heap_t* heap; + + heap = mem_heap_create(100); + + rec1 = page_rec_get_next( + page_get_infimum_rec( + buf_block_get_frame(merge_block))); + + offsets1 = rec_get_offsets( + rec1, index, NULL, ULINT_UNDEFINED, &heap); + + rec2 = page_rec_get_next( + page_get_infimum_rec( + buf_block_get_frame(block))); + offsets2 = rec_get_offsets( + rec2, index, NULL, ULINT_UNDEFINED, &heap); + + /* Check any primary key fields have been changed */ + if (cmp_rec_rec(rec1, rec2, offsets1, offsets2, index) != 0) { + changed = true; + } + + mem_heap_free(heap); + } + + return(changed); +} + +/****************************************************************//** +Merge 2 mbrs and update the the mbr that cursor is on. */ +dberr_t +rtr_merge_and_update_mbr( +/*=====================*/ + btr_cur_t* cursor, /*!< in/out: cursor */ + btr_cur_t* cursor2, /*!< in: the other cursor */ + ulint* offsets, /*!< in: rec offsets */ + ulint* offsets2, /*!< in: rec offsets */ + page_t* child_page, /*!< in: the page. */ + buf_block_t* merge_block, /*!< in: page to merge */ + buf_block_t* block, /*!< in: page be merged */ + dict_index_t* index, /*!< in: index */ + mtr_t* mtr) /*!< in: mtr */ +{ + dberr_t err = DB_SUCCESS; + rtr_mbr_t new_mbr; + bool changed = false; + + ut_ad(dict_index_is_spatial(cursor->index)); + + changed = rtr_merge_mbr_changed(cursor, cursor2, offsets, offsets2, + &new_mbr, merge_block, + block, index); + + /* Update the mbr field of the rec. And will delete the record + pointed by cursor2 */ + if (changed) { + if (!rtr_update_mbr_field(cursor, offsets, cursor2, child_page, + &new_mbr, NULL, mtr)) { + err = DB_ERROR; + } + } else { + rtr_node_ptr_delete(cursor2->index, cursor2, block, mtr); + } + + return(err); +} + +/*************************************************************//** +Deletes on the upper level the node pointer to a page. */ +void +rtr_node_ptr_delete( +/*================*/ + dict_index_t* index, /*!< in: index tree */ + btr_cur_t* cursor, /*!< in: search cursor, contains information + about parent nodes in search */ + buf_block_t* block, /*!< in: page whose node pointer is deleted */ + mtr_t* mtr) /*!< in: mtr */ +{ + ibool compressed; + dberr_t err; + + compressed = btr_cur_pessimistic_delete(&err, TRUE, cursor, + BTR_CREATE_FLAG, false, mtr); + ut_a(err == DB_SUCCESS); + + if (!compressed) { + btr_cur_compress_if_useful(cursor, FALSE, mtr); + } +} + +/**************************************************************//** +Check whether a Rtree page is child of a parent page +@return true if there is child/parent relationship */ +bool +rtr_check_same_block( +/*================*/ + dict_index_t* index, /*!< in: index tree */ + btr_cur_t* cursor, /*!< in/out: position at the parent entry + pointing to the child if successful */ + buf_block_t* parentb,/*!< in: parent page to check */ + buf_block_t* childb, /*!< in: child Page */ + mem_heap_t* heap) /*!< in: memory heap */ + +{ + ulint page_no = childb->page.id.page_no(); + ulint* offsets; + rec_t* rec = page_rec_get_next(page_get_infimum_rec( + buf_block_get_frame(parentb))); + + while (!page_rec_is_supremum(rec)) { + offsets = rec_get_offsets( + rec, index, NULL, ULINT_UNDEFINED, &heap); + + if (btr_node_ptr_get_child_page_no(rec, offsets) == page_no) { + btr_cur_position(index, rec, parentb, cursor); + return(true); + } + + rec = page_rec_get_next(rec); + } + + return(false); +} + +/****************************************************************//** +Calculate the area increased for a new record +@return area increased */ +double +rtr_rec_cal_increase( +/*=================*/ + const dtuple_t* dtuple, /*!< in: data tuple to insert, which + cause area increase */ + const rec_t* rec, /*!< in: physical record which differs from + dtuple in some of the common fields, or which + has an equal number or more fields than + dtuple */ + const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ + double* area) /*!< out: increased area */ +{ + const dfield_t* dtuple_field; + ulint dtuple_f_len; + ulint rec_f_len; + const byte* rec_b_ptr; + double ret = 0; + + ut_ad(!page_rec_is_supremum(rec)); + ut_ad(!page_rec_is_infimum(rec)); + + dtuple_field = dtuple_get_nth_field(dtuple, 0); + dtuple_f_len = dfield_get_len(dtuple_field); + + rec_b_ptr = rec_get_nth_field(rec, offsets, 0, &rec_f_len); + ret = rtree_area_increase( + rec_b_ptr, + static_cast(dfield_get_data(dtuple_field)), + static_cast(dtuple_f_len), area); + + return(ret); +} + +/** Estimates the number of rows in a given area. +@param[in] index index +@param[in] tuple range tuple containing mbr, may also be empty tuple +@param[in] mode search mode +@return estimated number of rows */ +int64_t +rtr_estimate_n_rows_in_range( + dict_index_t* index, + const dtuple_t* tuple, + page_cur_mode_t mode) +{ + /* Check tuple & mode */ + if (tuple->n_fields == 0) { + return(HA_POS_ERROR); + } + + switch (mode) { + case PAGE_CUR_DISJOINT: + case PAGE_CUR_CONTAIN: + case PAGE_CUR_INTERSECT: + case PAGE_CUR_WITHIN: + case PAGE_CUR_MBR_EQUAL: + break; + default: + return(HA_POS_ERROR); + } + + DBUG_EXECUTE_IF("rtr_pcur_move_to_next_return", + return(2); + ); + + /* Read mbr from tuple. */ + const dfield_t* dtuple_field; + ulint dtuple_f_len __attribute__((unused)); + rtr_mbr_t range_mbr; + double range_area; + byte* range_mbr_ptr; + + dtuple_field = dtuple_get_nth_field(tuple, 0); + dtuple_f_len = dfield_get_len(dtuple_field); + range_mbr_ptr = reinterpret_cast(dfield_get_data(dtuple_field)); + + ut_ad(dtuple_f_len >= DATA_MBR_LEN); + rtr_read_mbr(range_mbr_ptr, &range_mbr); + range_area = (range_mbr.xmax - range_mbr.xmin) + * (range_mbr.ymax - range_mbr.ymin); + + /* Get index root page. */ + page_size_t page_size(dict_table_page_size(index->table)); + page_id_t page_id(dict_index_get_space(index), + dict_index_get_page(index)); + mtr_t mtr; + buf_block_t* block; + page_t* page; + ulint n_recs; + + mtr_start(&mtr); + mtr.set_named_space(dict_index_get_space(index)); + mtr_s_lock(dict_index_get_lock(index), &mtr); + + block = btr_block_get(page_id, page_size, RW_S_LATCH, index, &mtr); + page = buf_block_get_frame(block); + n_recs = page_header_get_field(page, PAGE_N_RECS); + + if (n_recs == 0) { + mtr_commit(&mtr); + return(HA_POS_ERROR); + } + + rec_t* rec; + byte* field; + ulint len; + ulint* offsets = NULL; + mem_heap_t* heap; + + heap = mem_heap_create(512); + rec = page_rec_get_next(page_get_infimum_rec(page)); + offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); + + /* Scan records in root page and calculate area. */ + double area = 0; + while (!page_rec_is_supremum(rec)) { + rtr_mbr_t mbr; + double rec_area; + + field = rec_get_nth_field(rec, offsets, 0, &len); + ut_ad(len == DATA_MBR_LEN); + + rtr_read_mbr(field, &mbr); + + rec_area = (mbr.xmax - mbr.xmin) * (mbr.ymax - mbr.ymin); + + if (rec_area == 0) { + switch (mode) { + case PAGE_CUR_CONTAIN: + case PAGE_CUR_INTERSECT: + area += 1; + break; + + case PAGE_CUR_DISJOINT: + break; + + case PAGE_CUR_WITHIN: + case PAGE_CUR_MBR_EQUAL: + if (rtree_key_cmp( + PAGE_CUR_WITHIN, range_mbr_ptr, + DATA_MBR_LEN, field, DATA_MBR_LEN) + == 0) { + area += 1; + } + + break; + + default: + ut_error; + } + } else { + switch (mode) { + case PAGE_CUR_CONTAIN: + case PAGE_CUR_INTERSECT: + area += rtree_area_overlapping(range_mbr_ptr, + field, DATA_MBR_LEN) / rec_area; + break; + + case PAGE_CUR_DISJOINT: + area += 1; + area -= rtree_area_overlapping(range_mbr_ptr, + field, DATA_MBR_LEN) / rec_area; + break; + + case PAGE_CUR_WITHIN: + case PAGE_CUR_MBR_EQUAL: + if (rtree_key_cmp( + PAGE_CUR_WITHIN, range_mbr_ptr, + DATA_MBR_LEN, field, DATA_MBR_LEN) + == 0) { + area += range_area / rec_area; + } + + break; + default: + ut_error; + } + } + + rec = page_rec_get_next(rec); + } + + mtr_commit(&mtr); + mem_heap_free(heap); + + /* JAN: TODO MYSQL 5.7 GIS + if (my_isinf(area) || my_isnan(area)) { + return(HA_POS_ERROR); + } + */ + + return(static_cast(dict_table_get_n_rows(index->table) + * area / n_recs)); +} diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc new file mode 100644 index 00000000000..26440b5fa96 --- /dev/null +++ b/storage/innobase/gis/gis0sea.cc @@ -0,0 +1,2012 @@ +/***************************************************************************** + +Copyright (c) 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file gis/gis0sea.cc +InnoDB R-tree search interfaces + +Created 2014/01/16 Jimmy Yang +***********************************************************************/ + +#include "fsp0fsp.h" +#include "page0page.h" +#include "page0cur.h" +#include "page0zip.h" +#include "gis0rtree.h" + +#ifndef UNIV_HOTBACKUP +#include "btr0cur.h" +#include "btr0sea.h" +#include "btr0pcur.h" +#include "rem0cmp.h" +#include "lock0lock.h" +#include "ibuf0ibuf.h" +#include "trx0trx.h" +#include "srv0mon.h" +#include "gis0geo.h" + +#endif /* UNIV_HOTBACKUP */ + +/*************************************************************//** +Pop out used parent path entry, until we find the parent with matching +page number */ +static +void +rtr_adjust_parent_path( +/*===================*/ + rtr_info_t* rtr_info, /* R-Tree info struct */ + ulint page_no) /* page number to look for */ +{ + while (!rtr_info->parent_path->empty()) { + if (rtr_info->parent_path->back().child_no == page_no) { + break; + } else { + if (rtr_info->parent_path->back().cursor) { + btr_pcur_close( + rtr_info->parent_path->back().cursor); + ut_free(rtr_info->parent_path->back().cursor); + } + + rtr_info->parent_path->pop_back(); + } + } +} + +/*************************************************************//** +Find the next matching record. This function is used by search +or record locating during index delete/update. +@return true if there is suitable record found, otherwise false */ +static +bool +rtr_pcur_getnext_from_path( +/*=======================*/ + const dtuple_t* tuple, /*!< in: data tuple */ + page_cur_mode_t mode, /*!< in: cursor search mode */ + btr_cur_t* btr_cur,/*!< in: persistent cursor; NOTE that the + function may release the page latch */ + ulint target_level, + /*!< in: target level */ + ulint latch_mode, + /*!< in: latch_mode */ + bool index_locked, + /*!< in: index tree locked */ + mtr_t* mtr) /*!< in: mtr */ +{ + dict_index_t* index = btr_cur->index; + bool found = false; + ulint space = dict_index_get_space(index); + page_cur_t* page_cursor; + ulint level = 0; + node_visit_t next_rec; + rtr_info_t* rtr_info = btr_cur->rtr_info; + node_seq_t page_ssn; + ulint my_latch_mode; + ulint skip_parent = false; + bool new_split = false; + bool need_parent; + bool for_delete = false; + bool for_undo_ins = false; + + /* exhausted all the pages to be searched */ + if (rtr_info->path->empty()) { + return(false); + } + + ut_ad(dtuple_get_n_fields_cmp(tuple)); + + my_latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode); + + for_delete = latch_mode & BTR_RTREE_DELETE_MARK; + for_undo_ins = latch_mode & BTR_RTREE_UNDO_INS; + + /* There should be no insert coming to this function. Only + mode with BTR_MODIFY_* should be delete */ + ut_ad(mode != PAGE_CUR_RTREE_INSERT); + ut_ad(my_latch_mode == BTR_SEARCH_LEAF + || my_latch_mode == BTR_MODIFY_LEAF + || my_latch_mode == BTR_MODIFY_TREE + || my_latch_mode == BTR_CONT_MODIFY_TREE); + + /* Whether need to track parent information. Only need so + when we do tree altering operations (such as index page merge) */ + need_parent = ((my_latch_mode == BTR_MODIFY_TREE + || my_latch_mode == BTR_CONT_MODIFY_TREE) + && mode == PAGE_CUR_RTREE_LOCATE); + + if (!index_locked) { + ut_ad(latch_mode & BTR_SEARCH_LEAF + || latch_mode & BTR_MODIFY_LEAF); + mtr_s_lock(dict_index_get_lock(index), mtr); + } else { + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), + MTR_MEMO_SX_LOCK) + || mtr_memo_contains(mtr, dict_index_get_lock(index), + MTR_MEMO_S_LOCK) + || mtr_memo_contains(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK)); + } + + const page_size_t& page_size = dict_table_page_size(index->table); + + /* Pop each node/page to be searched from "path" structure + and do a search on it. Please note, any pages that are in + the "path" structure are protected by "page" lock, so tey + cannot be shrunk away */ + do { + buf_block_t* block; + node_seq_t path_ssn; + const page_t* page; + ulint rw_latch = RW_X_LATCH; + ulint tree_idx; + + mutex_enter(&rtr_info->rtr_path_mutex); + next_rec = rtr_info->path->back(); + rtr_info->path->pop_back(); + level = next_rec.level; + path_ssn = next_rec.seq_no; + tree_idx = btr_cur->tree_height - level - 1; + + /* Maintain the parent path info as well, if needed */ + if (need_parent && !skip_parent && !new_split) { + ulint old_level; + ulint new_level; + + ut_ad(!rtr_info->parent_path->empty()); + + /* Cleanup unused parent info */ + if (rtr_info->parent_path->back().cursor) { + btr_pcur_close( + rtr_info->parent_path->back().cursor); + ut_free(rtr_info->parent_path->back().cursor); + } + + old_level = rtr_info->parent_path->back().level; + + rtr_info->parent_path->pop_back(); + + ut_ad(!rtr_info->parent_path->empty()); + + /* check whether there is a level change. If so, + the current parent path needs to pop enough + nodes to adjust to the new search page */ + new_level = rtr_info->parent_path->back().level; + + if (old_level < new_level) { + rtr_adjust_parent_path( + rtr_info, next_rec.page_no); + } + + ut_ad(!rtr_info->parent_path->empty()); + + ut_ad(next_rec.page_no + == rtr_info->parent_path->back().child_no); + } + + mutex_exit(&rtr_info->rtr_path_mutex); + + skip_parent = false; + new_split = false; + + /* Once we have pages in "path", these pages are + predicate page locked, so they can't be shrunk away. + They also have SSN (split sequence number) to detect + splits, so we can directly latch single page while + getting them. They can be unlatched if not qualified. + One reason for pre-latch is that we might need to position + some parent position (requires latch) during search */ + if (level == 0) { + /* S latched for SEARCH_LEAF, and X latched + for MODIFY_LEAF */ + if (my_latch_mode <= BTR_MODIFY_LEAF) { + rw_latch = my_latch_mode; + } + + if (my_latch_mode == BTR_CONT_MODIFY_TREE + || my_latch_mode == BTR_MODIFY_TREE) { + rw_latch = RW_NO_LATCH; + } + + } else if (level == target_level) { + rw_latch = RW_X_LATCH; + } + + /* Release previous locked blocks */ + if (my_latch_mode != BTR_SEARCH_LEAF) { + for (ulint idx = 0; idx < btr_cur->tree_height; + idx++) { + if (rtr_info->tree_blocks[idx]) { + mtr_release_block_at_savepoint( + mtr, + rtr_info->tree_savepoints[idx], + rtr_info->tree_blocks[idx]); + rtr_info->tree_blocks[idx] = NULL; + } + } + for (ulint idx = RTR_MAX_LEVELS; idx < RTR_MAX_LEVELS + 3; + idx++) { + if (rtr_info->tree_blocks[idx]) { + mtr_release_block_at_savepoint( + mtr, + rtr_info->tree_savepoints[idx], + rtr_info->tree_blocks[idx]); + rtr_info->tree_blocks[idx] = NULL; + } + } + } + + /* set up savepoint to record any locks to be taken */ + rtr_info->tree_savepoints[tree_idx] = mtr_set_savepoint(mtr); + +#ifdef UNIV_RTR_DEBUG + ut_ad(!(rw_lock_own(&btr_cur->page_cur.block->lock, RW_LOCK_X) + || + rw_lock_own(&btr_cur->page_cur.block->lock, RW_LOCK_S)) + || my_latch_mode == BTR_MODIFY_TREE + || my_latch_mode == BTR_CONT_MODIFY_TREE + || !page_is_leaf(buf_block_get_frame( + btr_cur->page_cur.block))); +#endif /* UNIV_RTR_DEBUG */ + + page_id_t page_id(space, next_rec.page_no); + dberr_t err = DB_SUCCESS; + + block = buf_page_get_gen( + page_id, page_size, + rw_latch, NULL, BUF_GET, __FILE__, __LINE__, mtr, &err); + + if (block == NULL) { + continue; + } else if (rw_latch != RW_NO_LATCH) { + ut_ad(!dict_index_is_ibuf(index)); + buf_block_dbg_add_level(block, SYNC_TREE_NODE); + } + + rtr_info->tree_blocks[tree_idx] = block; + + page = buf_block_get_frame(block); + page_ssn = page_get_ssn_id(page); + + /* If there are splits, push the splitted page. + Note that we have SX lock on index->lock, there + should not be any split/shrink happening here */ + if (page_ssn > path_ssn) { + ulint next_page_no = btr_page_get_next(page, mtr); + rtr_non_leaf_stack_push( + rtr_info->path, next_page_no, path_ssn, + level, 0, NULL, 0); + + if (!srv_read_only_mode + && mode != PAGE_CUR_RTREE_INSERT + && mode != PAGE_CUR_RTREE_LOCATE) { + ut_ad(rtr_info->thr); + lock_place_prdt_page_lock( + space, next_page_no, index, + rtr_info->thr); + } + new_split = true; +#if UNIV_GIS_DEBUG + fprintf(stderr, + "GIS_DIAG: Splitted page found: %d, %ld\n", + static_cast(need_parent), next_page_no); +#endif + } + + page_cursor = btr_cur_get_page_cur(btr_cur); + page_cursor->rec = NULL; + + if (mode == PAGE_CUR_RTREE_LOCATE) { + if (level == target_level) { + ulint low_match; + + found = false; + + low_match = page_cur_search( + block, index, tuple, + PAGE_CUR_LE, + btr_cur_get_page_cur(btr_cur)); + + if (low_match == dtuple_get_n_fields_cmp( + tuple)) { + rec_t* rec = btr_cur_get_rec(btr_cur); + + if (!rec_get_deleted_flag(rec, + dict_table_is_comp(index->table)) + || (!for_delete && !for_undo_ins)) { + found = true; + btr_cur->low_match = low_match; + } else { + /* mark we found deleted row */ + btr_cur->rtr_info->fd_del + = true; + } + } + } else { + found = rtr_cur_search_with_match( + block, index, tuple, + PAGE_CUR_RTREE_LOCATE, page_cursor, + btr_cur->rtr_info); + + /* Save the position of parent if needed */ + if (found && need_parent) { + btr_pcur_t* r_cursor = + rtr_get_parent_cursor( + btr_cur, level, false); + + rec_t* rec = page_cur_get_rec( + page_cursor); + page_cur_position( + rec, block, + btr_pcur_get_page_cur(r_cursor)); + r_cursor->pos_state = + BTR_PCUR_IS_POSITIONED; + r_cursor->latch_mode = my_latch_mode; + btr_pcur_store_position(r_cursor, mtr); +#ifdef UNIV_DEBUG + ulint num_stored = + rtr_store_parent_path( + block, btr_cur, + rw_latch, level, mtr); + ut_ad(num_stored > 0); +#else + rtr_store_parent_path( + block, btr_cur, rw_latch, + level, mtr); +#endif /* UNIV_DEBUG */ + } + } + } else { + found = rtr_cur_search_with_match( + block, index, tuple, mode, page_cursor, + btr_cur->rtr_info); + } + + /* Attach predicate lock if needed, no matter whether + there are matched records */ + if (mode != PAGE_CUR_RTREE_INSERT + && mode != PAGE_CUR_RTREE_LOCATE + && mode >= PAGE_CUR_CONTAIN + && btr_cur->rtr_info->need_prdt_lock + && found) { + lock_prdt_t prdt; + + trx_t* trx = thr_get_trx( + btr_cur->rtr_info->thr); + lock_mutex_enter(); + lock_init_prdt_from_mbr( + &prdt, &btr_cur->rtr_info->mbr, + mode, trx->lock.lock_heap); + lock_mutex_exit(); + + if (rw_latch == RW_NO_LATCH) { + rw_lock_s_lock(&(block->lock)); + } + + lock_prdt_lock(block, &prdt, index, LOCK_S, + LOCK_PREDICATE, btr_cur->rtr_info->thr, + mtr); + + if (rw_latch == RW_NO_LATCH) { + rw_lock_s_unlock(&(block->lock)); + } + } + + if (found) { + if (level == target_level) { + page_cur_t* r_cur;; + + if (my_latch_mode == BTR_MODIFY_TREE + && level == 0) { + ut_ad(rw_latch == RW_NO_LATCH); + page_id_t my_page_id( + space, block->page.id.page_no()); + + btr_cur_latch_leaves( + block, my_page_id, + page_size, BTR_MODIFY_TREE, + btr_cur, mtr); + } + + r_cur = btr_cur_get_page_cur(btr_cur); + + page_cur_position( + page_cur_get_rec(page_cursor), + page_cur_get_block(page_cursor), + r_cur); + + break; + } + + /* Keep the parent path node, which points to + last node just located */ + skip_parent = true; + } else { + /* Release latch on the current page */ + ut_ad(rtr_info->tree_blocks[tree_idx]); + + mtr_release_block_at_savepoint( + mtr, rtr_info->tree_savepoints[tree_idx], + rtr_info->tree_blocks[tree_idx]); + rtr_info->tree_blocks[tree_idx] = NULL; + } + + } while (!rtr_info->path->empty()); + + const rec_t* rec = btr_cur_get_rec(btr_cur); + + if (page_rec_is_infimum(rec) || page_rec_is_supremum(rec)) { + mtr_commit(mtr); + mtr_start(mtr); + } else if (!index_locked) { + mtr_memo_release(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK); + } + + return(found); +} + +/*************************************************************//** +Find the next matching record. This function will first exhaust +the copied record listed in the rtr_info->matches vector before +moving to the next page +@return true if there is suitable record found, otherwise false */ +bool +rtr_pcur_move_to_next( +/*==================*/ + const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in + tuple must be set so that it cannot get + compared to the node ptr page number field! */ + page_cur_mode_t mode, /*!< in: cursor search mode */ + btr_pcur_t* cursor, /*!< in: persistent cursor; NOTE that the + function may release the page latch */ + ulint level, /*!< in: target level */ + mtr_t* mtr) /*!< in: mtr */ +{ + rtr_info_t* rtr_info = cursor->btr_cur.rtr_info; + + ut_a(cursor->pos_state == BTR_PCUR_IS_POSITIONED); + + mutex_enter(&rtr_info->matches->rtr_match_mutex); + /* First retrieve the next record on the current page */ + if (!rtr_info->matches->matched_recs->empty()) { + rtr_rec_t rec; + rec = rtr_info->matches->matched_recs->back(); + rtr_info->matches->matched_recs->pop_back(); + mutex_exit(&rtr_info->matches->rtr_match_mutex); + + cursor->btr_cur.page_cur.rec = rec.r_rec; + cursor->btr_cur.page_cur.block = &rtr_info->matches->block; + + DEBUG_SYNC_C("rtr_pcur_move_to_next_return"); + return(true); + } + + mutex_exit(&rtr_info->matches->rtr_match_mutex); + + /* Fetch the next page */ + return(rtr_pcur_getnext_from_path(tuple, mode, &cursor->btr_cur, + level, cursor->latch_mode, + false, mtr)); +} + +/*************************************************************//** +Check if the cursor holds record pointing to the specified child page +@return true if it is (pointing to the child page) false otherwise */ +static +bool +rtr_compare_cursor_rec( +/*===================*/ + dict_index_t* index, /*!< in: index */ + btr_cur_t* cursor, /*!< in: Cursor to check */ + ulint page_no, /*!< in: desired child page number */ + mem_heap_t** heap) /*!< in: memory heap */ +{ + const rec_t* rec; + ulint* offsets; + + rec = btr_cur_get_rec(cursor); + + offsets = rec_get_offsets( + rec, index, NULL, ULINT_UNDEFINED, heap); + + return(btr_node_ptr_get_child_page_no(rec, offsets) == page_no); +} + +/**************************************************************//** +Initializes and opens a persistent cursor to an index tree. It should be +closed with btr_pcur_close. Mainly called by row_search_index_entry() */ +void +rtr_pcur_open_low( +/*==============*/ + dict_index_t* index, /*!< in: index */ + ulint level, /*!< in: level in the rtree */ + const dtuple_t* tuple, /*!< in: tuple on which search done */ + page_cur_mode_t mode, /*!< in: PAGE_CUR_RTREE_LOCATE, ... */ + ulint latch_mode,/*!< in: BTR_SEARCH_LEAF, ... */ + btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ + const char* file, /*!< in: file name */ + ulint line, /*!< in: line where called */ + mtr_t* mtr) /*!< in: mtr */ +{ + btr_cur_t* btr_cursor; + ulint n_fields; + ulint low_match; + rec_t* rec; + bool tree_latched = false; + bool for_delete = false; + bool for_undo_ins = false; + + ut_ad(level == 0); + + ut_ad(latch_mode & BTR_MODIFY_LEAF || latch_mode & BTR_MODIFY_TREE); + ut_ad(mode == PAGE_CUR_RTREE_LOCATE); + + /* Initialize the cursor */ + + btr_pcur_init(cursor); + + for_delete = latch_mode & BTR_RTREE_DELETE_MARK; + for_undo_ins = latch_mode & BTR_RTREE_UNDO_INS; + + cursor->latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode); + cursor->search_mode = mode; + + /* Search with the tree cursor */ + + btr_cursor = btr_pcur_get_btr_cur(cursor); + + btr_cursor->rtr_info = rtr_create_rtr_info(false, false, + btr_cursor, index); + + /* Purge will SX lock the tree instead of take Page Locks */ + if (btr_cursor->thr) { + btr_cursor->rtr_info->need_page_lock = true; + btr_cursor->rtr_info->thr = btr_cursor->thr; + } + + btr_cur_search_to_nth_level(index, level, tuple, mode, latch_mode, + btr_cursor, 0, file, line, mtr); + cursor->pos_state = BTR_PCUR_IS_POSITIONED; + + cursor->trx_if_known = NULL; + + low_match = btr_pcur_get_low_match(cursor); + + rec = btr_pcur_get_rec(cursor); + + n_fields = dtuple_get_n_fields(tuple); + + if (latch_mode & BTR_ALREADY_S_LATCHED) { + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), + MTR_MEMO_S_LOCK)); + tree_latched = true; + } + + if (latch_mode & BTR_MODIFY_TREE) { + ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK) + || mtr_memo_contains(mtr, dict_index_get_lock(index), + MTR_MEMO_SX_LOCK)); + tree_latched = true; + } + + if (page_rec_is_infimum(rec) || low_match != n_fields + || (rec_get_deleted_flag(rec, dict_table_is_comp(index->table)) + && (for_delete || for_undo_ins))) { + + if (rec_get_deleted_flag(rec, dict_table_is_comp(index->table)) + && for_delete) { + btr_cursor->rtr_info->fd_del = true; + btr_cursor->low_match = 0; + } + /* Did not find matched row in first dive. Release + latched block if any before search more pages */ + if (latch_mode & BTR_MODIFY_LEAF) { + ulint tree_idx = btr_cursor->tree_height - 1; + rtr_info_t* rtr_info = btr_cursor->rtr_info; + + ut_ad(level == 0); + + if (rtr_info->tree_blocks[tree_idx]) { + mtr_release_block_at_savepoint( + mtr, + rtr_info->tree_savepoints[tree_idx], + rtr_info->tree_blocks[tree_idx]); + rtr_info->tree_blocks[tree_idx] = NULL; + } + } + + bool ret = rtr_pcur_getnext_from_path( + tuple, mode, btr_cursor, level, latch_mode, + tree_latched, mtr); + + if (ret) { + low_match = btr_pcur_get_low_match(cursor); + ut_ad(low_match == n_fields); + } + } +} + +/************************************************************//** +Returns the father block to a page. It is assumed that mtr holds +an X or SX latch on the tree. +@return rec_get_offsets() of the node pointer record */ +ulint* +rtr_page_get_father_block( +/*======================*/ + ulint* offsets,/*!< in: work area for the return value */ + mem_heap_t* heap, /*!< in: memory heap to use */ + dict_index_t* index, /*!< in: b-tree index */ + buf_block_t* block, /*!< in: child page in the index */ + mtr_t* mtr, /*!< in: mtr */ + btr_cur_t* sea_cur,/*!< in: search cursor, contains information + about parent nodes in search */ + btr_cur_t* cursor) /*!< out: cursor on node pointer record, + its page x-latched */ +{ + rec_t* rec = page_rec_get_next( + page_get_infimum_rec(buf_block_get_frame(block))); + btr_cur_position(index, rec, block, cursor); + + return(rtr_page_get_father_node_ptr(offsets, heap, sea_cur, + cursor, mtr)); +} + +/************************************************************//** +Returns the upper level node pointer to a R-Tree page. It is assumed +that mtr holds an x-latch on the tree. +@return rec_get_offsets() of the node pointer record */ +ulint* +rtr_page_get_father_node_ptr_func( +/*==============================*/ + ulint* offsets,/*!< in: work area for the return value */ + mem_heap_t* heap, /*!< in: memory heap to use */ + btr_cur_t* sea_cur,/*!< in: search cursor */ + btr_cur_t* cursor, /*!< in: cursor pointing to user record, + out: cursor on node pointer record, + its page x-latched */ + const char* file, /*!< in: file name */ + ulint line, /*!< in: line where called */ + mtr_t* mtr) /*!< in: mtr */ +{ + dtuple_t* tuple; + rec_t* user_rec; + rec_t* node_ptr; + ulint level; + ulint page_no; + dict_index_t* index; + rtr_mbr_t mbr; + + page_no = btr_cur_get_block(cursor)->page.id.page_no(); + index = btr_cur_get_index(cursor); + + ut_ad(srv_read_only_mode + || mtr_memo_contains_flagged(mtr, dict_index_get_lock(index), + MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)); + + ut_ad(dict_index_get_page(index) != page_no); + + level = btr_page_get_level(btr_cur_get_page(cursor), mtr); + + user_rec = btr_cur_get_rec(cursor); + ut_a(page_rec_is_user_rec(user_rec)); + + offsets = rec_get_offsets(user_rec, index, offsets, + ULINT_UNDEFINED, &heap); + rtr_get_mbr_from_rec(user_rec, offsets, &mbr); + + tuple = rtr_index_build_node_ptr( + index, &mbr, user_rec, page_no, heap, level); + + if (sea_cur && !sea_cur->rtr_info) { + sea_cur = NULL; + } + + rtr_get_father_node(index, level + 1, tuple, sea_cur, cursor, + page_no, mtr); + + node_ptr = btr_cur_get_rec(cursor); + ut_ad(!page_rec_is_comp(node_ptr) + || rec_get_status(node_ptr) == REC_STATUS_NODE_PTR); + offsets = rec_get_offsets(node_ptr, index, offsets, + ULINT_UNDEFINED, &heap); + + ulint child_page = btr_node_ptr_get_child_page_no(node_ptr, offsets); + + if (child_page != page_no) { + const rec_t* print_rec; + + ib::fatal error; + + error << "Corruption of index " << index->name + << " of table " << index->table->name + << " parent page " << page_no + << " child page " << child_page; + + print_rec = page_rec_get_next( + page_get_infimum_rec(page_align(user_rec))); + offsets = rec_get_offsets(print_rec, index, + offsets, ULINT_UNDEFINED, &heap); + error << "; child "; + rec_print(error.m_oss, print_rec, + rec_get_info_bits(print_rec, rec_offs_comp(offsets)), + offsets); + offsets = rec_get_offsets(node_ptr, index, offsets, + ULINT_UNDEFINED, &heap); + error << "; parent "; + rec_print(error.m_oss, print_rec, + rec_get_info_bits(print_rec, rec_offs_comp(offsets)), + offsets); + + error << ". You should dump + drop + reimport the table to" + " fix the corruption. If the crash happens at" + " database startup, see " REFMAN + "forcing-innodb-recovery.html about forcing" + " recovery. Then dump + drop + reimport."; + } + + return(offsets); +} + +/********************************************************************//** +Returns the upper level node pointer to a R-Tree page. It is assumed +that mtr holds an x-latch on the tree. */ +void +rtr_get_father_node( +/*================*/ + dict_index_t* index, /*!< in: index */ + ulint level, /*!< in: the tree level of search */ + const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in + tuple must be set so that it cannot get + compared to the node ptr page number field! */ + btr_cur_t* sea_cur,/*!< in: search cursor */ + btr_cur_t* btr_cur,/*!< in/out: tree cursor; the cursor page is + s- or x-latched, but see also above! */ + ulint page_no,/*!< Current page no */ + mtr_t* mtr) /*!< in: mtr */ +{ + mem_heap_t* heap = NULL; + bool ret = false; + const rec_t* rec; + ulint n_fields; + bool new_rtr = false; + +get_parent: + /* Try to optimally locate the parent node. Level should always + less than sea_cur->tree_height unless the root is splitting */ + if (sea_cur && sea_cur->tree_height > level) { + + ut_ad(mtr_memo_contains_flagged(mtr, + dict_index_get_lock(index), + MTR_MEMO_X_LOCK + | MTR_MEMO_SX_LOCK)); + ret = rtr_cur_restore_position( + BTR_CONT_MODIFY_TREE, sea_cur, level, mtr); + + /* Once we block shrink tree nodes while there are + active search on it, this optimal locating should always + succeeds */ + ut_ad(ret); + + if (ret) { + btr_pcur_t* r_cursor = rtr_get_parent_cursor( + sea_cur, level, false); + + rec = btr_pcur_get_rec(r_cursor); + + ut_ad(r_cursor->rel_pos == BTR_PCUR_ON); + page_cur_position(rec, + btr_pcur_get_block(r_cursor), + btr_cur_get_page_cur(btr_cur)); + ut_ad(rtr_compare_cursor_rec( + index, btr_cur, page_no, &heap)); + goto func_exit; + } + } + + /* We arrive here in one of two scenario + 1) check table and btr_valide + 2) index root page being raised */ + ut_ad(!sea_cur || sea_cur->tree_height == level); + + if (btr_cur->rtr_info) { + rtr_clean_rtr_info(btr_cur->rtr_info, true); + } else { + new_rtr = true; + } + + btr_cur->rtr_info = rtr_create_rtr_info(false, false, btr_cur, index); + + if (sea_cur && sea_cur->tree_height == level) { + /* root split, and search the new root */ + btr_cur_search_to_nth_level( + index, level, tuple, PAGE_CUR_RTREE_LOCATE, + BTR_CONT_MODIFY_TREE, btr_cur, 0, + __FILE__, __LINE__, mtr); + + + } else { + /* btr_validate */ + ut_ad(level >= 1); + ut_ad(!sea_cur); + + btr_cur_search_to_nth_level( + index, level - 1, tuple, PAGE_CUR_RTREE_LOCATE, + BTR_CONT_MODIFY_TREE, btr_cur, 0, + __FILE__, __LINE__, mtr); + + rec = btr_cur_get_rec(btr_cur); + n_fields = dtuple_get_n_fields_cmp(tuple); + + if (page_rec_is_infimum(rec) + || (btr_cur->low_match != n_fields)) { + ret = rtr_pcur_getnext_from_path( + tuple, PAGE_CUR_RTREE_LOCATE, btr_cur, + level - 1, BTR_CONT_MODIFY_TREE, + true, mtr); + + ut_ad(ret && btr_cur->low_match == n_fields); + } + + /* Since there could be some identical recs in different + pages, we still need to compare the page_no field to + verify we have the right parent. */ + btr_pcur_t* r_cursor = rtr_get_parent_cursor(btr_cur, + level, + false); + rec = btr_pcur_get_rec(r_cursor); + + ulint* offsets = rec_get_offsets(rec, index, NULL, + ULINT_UNDEFINED, &heap); + while (page_no != btr_node_ptr_get_child_page_no(rec, offsets)) { + ret = rtr_pcur_getnext_from_path( + tuple, PAGE_CUR_RTREE_LOCATE, btr_cur, + level - 1, BTR_CONT_MODIFY_TREE, + true, mtr); + + ut_ad(ret && btr_cur->low_match == n_fields); + + /* There must be a rec in the path, if the path + is run out, the spatial index is corrupted. */ + if (!ret) { + mutex_enter(&dict_sys->mutex); + dict_set_corrupted_index_cache_only(index); + mutex_exit(&dict_sys->mutex); + + ib::info() << "InnoDB: Corruption of a" + " spatial index " << index->name + << " of table " << index->table->name; + break; + } + r_cursor = rtr_get_parent_cursor(btr_cur, level, false); + rec = btr_pcur_get_rec(r_cursor); + offsets = rec_get_offsets(rec, index, NULL, + ULINT_UNDEFINED, &heap); + } + + sea_cur = btr_cur; + goto get_parent; + } + + ret = rtr_compare_cursor_rec( + index, btr_cur, page_no, &heap); + + ut_ad(ret); + +func_exit: + if (heap) { + mem_heap_free(heap); + } + + if (new_rtr && btr_cur->rtr_info) { + rtr_clean_rtr_info(btr_cur->rtr_info, true); + btr_cur->rtr_info = NULL; + } +} + +/*******************************************************************//** +Create a RTree search info structure */ +rtr_info_t* +rtr_create_rtr_info( +/******************/ + bool need_prdt, /*!< in: Whether predicate lock + is needed */ + bool init_matches, /*!< in: Whether to initiate the + "matches" structure for collecting + matched leaf records */ + btr_cur_t* cursor, /*!< in: tree search cursor */ + dict_index_t* index) /*!< in: index struct */ +{ + rtr_info_t* rtr_info; + + index = index ? index : cursor->index; + ut_ad(index); + + rtr_info = static_cast(ut_zalloc_nokey(sizeof(*rtr_info))); + + rtr_info->allocated = true; + rtr_info->cursor = cursor; + rtr_info->index = index; + + if (init_matches) { + rtr_info->heap = mem_heap_create(sizeof(*(rtr_info->matches))); + rtr_info->matches = static_cast( + mem_heap_zalloc( + rtr_info->heap, + sizeof(*rtr_info->matches))); + + rtr_info->matches->matched_recs + = UT_NEW_NOKEY(rtr_rec_vector()); + + rtr_info->matches->bufp = page_align(rtr_info->matches->rec_buf + + UNIV_PAGE_SIZE_MAX + 1); + mutex_create(LATCH_ID_RTR_MATCH_MUTEX, + &rtr_info->matches->rtr_match_mutex); + rw_lock_create(PFS_NOT_INSTRUMENTED, + &(rtr_info->matches->block.lock), + SYNC_LEVEL_VARYING); + } + + rtr_info->path = UT_NEW_NOKEY(rtr_node_path_t()); + rtr_info->parent_path = UT_NEW_NOKEY(rtr_node_path_t()); + rtr_info->need_prdt_lock = need_prdt; + mutex_create(LATCH_ID_RTR_PATH_MUTEX, + &rtr_info->rtr_path_mutex); + + mutex_enter(&index->rtr_track->rtr_active_mutex); + index->rtr_track->rtr_active->push_back(rtr_info); + mutex_exit(&index->rtr_track->rtr_active_mutex); + return(rtr_info); +} + +/*******************************************************************//** +Update a btr_cur_t with rtr_info */ +void +rtr_info_update_btr( +/******************/ + btr_cur_t* cursor, /*!< in/out: tree cursor */ + rtr_info_t* rtr_info) /*!< in: rtr_info to set to the + cursor */ +{ + ut_ad(rtr_info); + + cursor->rtr_info = rtr_info; +} + +/*******************************************************************//** +Initialize a R-Tree Search structure */ +void +rtr_init_rtr_info( +/****************/ + rtr_info_t* rtr_info, /*!< in: rtr_info to set to the + cursor */ + bool need_prdt, /*!< in: Whether predicate lock is + needed */ + btr_cur_t* cursor, /*!< in: tree search cursor */ + dict_index_t* index, /*!< in: index structure */ + bool reinit) /*!< in: Whether this is a reinit */ +{ + ut_ad(rtr_info); + + if (!reinit) { + /* Reset all members. */ + rtr_info->path = NULL; + rtr_info->parent_path = NULL; + rtr_info->matches = NULL; + + mutex_create(LATCH_ID_RTR_PATH_MUTEX, + &rtr_info->rtr_path_mutex); + + memset(rtr_info->tree_blocks, 0x0, + sizeof(rtr_info->tree_blocks)); + memset(rtr_info->tree_savepoints, 0x0, + sizeof(rtr_info->tree_savepoints)); + rtr_info->mbr.xmin = 0.0; + rtr_info->mbr.xmax = 0.0; + rtr_info->mbr.ymin = 0.0; + rtr_info->mbr.ymax = 0.0; + rtr_info->thr = NULL; + rtr_info->heap = NULL; + rtr_info->cursor = NULL; + rtr_info->index = NULL; + rtr_info->need_prdt_lock = false; + rtr_info->need_page_lock = false; + rtr_info->allocated = false; + rtr_info->mbr_adj = false; + rtr_info->fd_del = false; + rtr_info->search_tuple = NULL; + rtr_info->search_mode = PAGE_CUR_UNSUPP; + } + + ut_ad(!rtr_info->matches || rtr_info->matches->matched_recs->empty()); + + rtr_info->path = UT_NEW_NOKEY(rtr_node_path_t()); + rtr_info->parent_path = UT_NEW_NOKEY(rtr_node_path_t()); + rtr_info->need_prdt_lock = need_prdt; + rtr_info->cursor = cursor; + rtr_info->index = index; + + mutex_enter(&index->rtr_track->rtr_active_mutex); + index->rtr_track->rtr_active->push_back(rtr_info); + mutex_exit(&index->rtr_track->rtr_active_mutex); +} + +/**************************************************************//** +Clean up R-Tree search structure */ +void +rtr_clean_rtr_info( +/*===============*/ + rtr_info_t* rtr_info, /*!< in: RTree search info */ + bool free_all) /*!< in: need to free rtr_info itself */ +{ + dict_index_t* index; + bool initialized = false; + + if (!rtr_info) { + return; + } + + index = rtr_info->index; + + if (index) { + mutex_enter(&index->rtr_track->rtr_active_mutex); + } + + while (rtr_info->parent_path && !rtr_info->parent_path->empty()) { + btr_pcur_t* cur = rtr_info->parent_path->back().cursor; + rtr_info->parent_path->pop_back(); + + if (cur) { + btr_pcur_close(cur); + ut_free(cur); + } + } + + UT_DELETE(rtr_info->parent_path); + rtr_info->parent_path = NULL; + + if (rtr_info->path != NULL) { + UT_DELETE(rtr_info->path); + rtr_info->path = NULL; + initialized = true; + } + + if (rtr_info->matches) { + rtr_info->matches->used = false; + rtr_info->matches->locked = false; + rtr_info->matches->valid = false; + rtr_info->matches->matched_recs->clear(); + } + + if (index) { + index->rtr_track->rtr_active->remove(rtr_info); + mutex_exit(&index->rtr_track->rtr_active_mutex); + } + + if (free_all) { + if (rtr_info->matches) { + if (rtr_info->matches->matched_recs != NULL) { + UT_DELETE(rtr_info->matches->matched_recs); + } + + rw_lock_free(&(rtr_info->matches->block.lock)); + + mutex_destroy(&rtr_info->matches->rtr_match_mutex); + } + + if (rtr_info->heap) { + mem_heap_free(rtr_info->heap); + } + + if (initialized) { + mutex_destroy(&rtr_info->rtr_path_mutex); + } + + if (rtr_info->allocated) { + ut_free(rtr_info); + } + } +} + +/**************************************************************//** +Rebuilt the "path" to exclude the removing page no */ +static +void +rtr_rebuild_path( +/*=============*/ + rtr_info_t* rtr_info, /*!< in: RTree search info */ + ulint page_no) /*!< in: need to free rtr_info itself */ +{ + rtr_node_path_t* new_path + = UT_NEW_NOKEY(rtr_node_path_t()); + + rtr_node_path_t::iterator rit; +#ifdef UNIV_DEBUG + ulint before_size = rtr_info->path->size(); +#endif /* UNIV_DEBUG */ + + for (rit = rtr_info->path->begin(); + rit != rtr_info->path->end(); ++rit) { + node_visit_t next_rec = *rit; + + if (next_rec.page_no == page_no) { + continue; + } + + new_path->push_back(next_rec); +#ifdef UNIV_DEBUG + node_visit_t rec = new_path->back(); + ut_ad(rec.level < rtr_info->cursor->tree_height + && rec.page_no > 0); +#endif /* UNIV_DEBUG */ + } + + UT_DELETE(rtr_info->path); + + ut_ad(new_path->size() == before_size - 1); + + rtr_info->path = new_path; + + if (!rtr_info->parent_path->empty()) { + rtr_node_path_t* new_parent_path = UT_NEW_NOKEY( + rtr_node_path_t()); + + for (rit = rtr_info->parent_path->begin(); + rit != rtr_info->parent_path->end(); ++rit) { + node_visit_t next_rec = *rit; + + if (next_rec.child_no == page_no) { + btr_pcur_t* cur = next_rec.cursor; + + if (cur) { + btr_pcur_close(cur); + ut_free(cur); + } + + continue; + } + + new_parent_path->push_back(next_rec); + } + UT_DELETE(rtr_info->parent_path); + rtr_info->parent_path = new_parent_path; + } + +} + +/**************************************************************//** +Check whether a discarding page is in anyone's search path */ +void +rtr_check_discard_page( +/*===================*/ + dict_index_t* index, /*!< in: index */ + btr_cur_t* cursor, /*!< in: cursor on the page to discard: not on + the root page */ + buf_block_t* block) /*!< in: block of page to be discarded */ +{ + ulint pageno = block->page.id.page_no(); + rtr_info_t* rtr_info; + rtr_info_active::iterator it; + + mutex_enter(&index->rtr_track->rtr_active_mutex); + + for (it = index->rtr_track->rtr_active->begin(); + it != index->rtr_track->rtr_active->end(); ++it) { + rtr_info = *it; + rtr_node_path_t::iterator rit; + bool found = false; + + if (cursor && rtr_info == cursor->rtr_info) { + continue; + } + + mutex_enter(&rtr_info->rtr_path_mutex); + for (rit = rtr_info->path->begin(); + rit != rtr_info->path->end(); ++rit) { + node_visit_t node = *rit; + + if (node.page_no == pageno) { + found = true; + break; + } + } + + if (found) { + rtr_rebuild_path(rtr_info, pageno); + } + mutex_exit(&rtr_info->rtr_path_mutex); + + if (rtr_info->matches) { + mutex_enter(&rtr_info->matches->rtr_match_mutex); + + if ((&rtr_info->matches->block)->page.id.page_no() + == pageno) { + if (!rtr_info->matches->matched_recs->empty()) { + rtr_info->matches->matched_recs->clear(); + } + ut_ad(rtr_info->matches->matched_recs->empty()); + rtr_info->matches->valid = false; + } + + mutex_exit(&rtr_info->matches->rtr_match_mutex); + } + } + + mutex_exit(&index->rtr_track->rtr_active_mutex); + + lock_mutex_enter(); + lock_prdt_free_from_discard_page(block, lock_sys->prdt_hash); + lock_prdt_free_from_discard_page(block, lock_sys->prdt_page_hash); + lock_mutex_exit(); +} + +/**************************************************************//** +Restores the stored position of a persistent cursor bufferfixing the page */ +bool +rtr_cur_restore_position_func( +/*==========================*/ + ulint latch_mode, /*!< in: BTR_CONT_MODIFY_TREE, ... */ + btr_cur_t* btr_cur, /*!< in: detached persistent cursor */ + ulint level, /*!< in: index level */ + const char* file, /*!< in: file name */ + ulint line, /*!< in: line where called */ + mtr_t* mtr) /*!< in: mtr */ +{ + dict_index_t* index; + mem_heap_t* heap; + btr_pcur_t* r_cursor = rtr_get_parent_cursor(btr_cur, level, false); + dtuple_t* tuple; + bool ret = false; + + ut_ad(mtr); + ut_ad(r_cursor); + ut_ad(mtr->is_active()); + + index = btr_cur_get_index(btr_cur); + + if (r_cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE + || r_cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE) { + return(false); + } + + DBUG_EXECUTE_IF( + "rtr_pessimistic_position", + r_cursor->modify_clock = 100; + ); + + ut_ad(latch_mode == BTR_CONT_MODIFY_TREE); + + if (!buf_pool_is_obsolete(r_cursor->withdraw_clock) + && buf_page_optimistic_get(RW_X_LATCH, + r_cursor->block_when_stored, + r_cursor->modify_clock, file, line, mtr)) { + ut_ad(r_cursor->pos_state == BTR_PCUR_IS_POSITIONED); + + ut_ad(r_cursor->rel_pos == BTR_PCUR_ON); +#ifdef UNIV_DEBUG + do { + const rec_t* rec; + const ulint* offsets1; + const ulint* offsets2; + ulint comp; + + rec = btr_pcur_get_rec(r_cursor); + + heap = mem_heap_create(256); + offsets1 = rec_get_offsets( + r_cursor->old_rec, index, NULL, + r_cursor->old_n_fields, &heap); + offsets2 = rec_get_offsets( + rec, index, NULL, + r_cursor->old_n_fields, &heap); + + comp = rec_offs_comp(offsets1); + + if (rec_get_info_bits(r_cursor->old_rec, comp) + & REC_INFO_MIN_REC_FLAG) { + ut_ad(rec_get_info_bits(rec, comp) + & REC_INFO_MIN_REC_FLAG); + } else { + + ut_ad(!cmp_rec_rec(r_cursor->old_rec, + rec, offsets1, offsets2, + index)); + } + + mem_heap_free(heap); + } while (0); +#endif /* UNIV_DEBUG */ + + return(true); + } + + /* Page has changed, for R-Tree, the page cannot be shrunk away, + so we search the page and its right siblings */ + buf_block_t* block; + node_seq_t page_ssn; + const page_t* page; + page_cur_t* page_cursor; + node_visit_t* node = rtr_get_parent_node(btr_cur, level, false); + ulint space = dict_index_get_space(index); + node_seq_t path_ssn = node->seq_no; + page_size_t page_size = dict_table_page_size(index->table); + + ulint page_no = node->page_no; + + heap = mem_heap_create(256); + + tuple = dict_index_build_data_tuple(index, r_cursor->old_rec, + r_cursor->old_n_fields, heap); + + page_cursor = btr_pcur_get_page_cur(r_cursor); + ut_ad(r_cursor == node->cursor); + +search_again: + page_id_t page_id(space, page_no); + dberr_t err = DB_SUCCESS; + + block = buf_page_get_gen( + page_id, page_size, RW_X_LATCH, NULL, + BUF_GET, __FILE__, __LINE__, mtr, &err); + + ut_ad(block); + + /* Get the page SSN */ + page = buf_block_get_frame(block); + page_ssn = page_get_ssn_id(page); + + ulint low_match = page_cur_search( + block, index, tuple, PAGE_CUR_LE, page_cursor); + + if (low_match == r_cursor->old_n_fields) { + const rec_t* rec; + const ulint* offsets1; + const ulint* offsets2; + ulint comp; + + rec = btr_pcur_get_rec(r_cursor); + + offsets1 = rec_get_offsets( + r_cursor->old_rec, index, NULL, + r_cursor->old_n_fields, &heap); + offsets2 = rec_get_offsets( + rec, index, NULL, + r_cursor->old_n_fields, &heap); + + comp = rec_offs_comp(offsets1); + + if ((rec_get_info_bits(r_cursor->old_rec, comp) + & REC_INFO_MIN_REC_FLAG) + && (rec_get_info_bits(rec, comp) & REC_INFO_MIN_REC_FLAG)) { + r_cursor->pos_state = BTR_PCUR_IS_POSITIONED; + ret = true; + } else if (!cmp_rec_rec(r_cursor->old_rec, rec, offsets1, offsets2, + index)) { + r_cursor->pos_state = BTR_PCUR_IS_POSITIONED; + ret = true; + } + } + + /* Check the page SSN to see if it has been splitted, if so, search + the right page */ + if (!ret && page_ssn > path_ssn) { + page_no = btr_page_get_next(page, mtr); + goto search_again; + } + + mem_heap_free(heap); + + return(ret); +} + +/****************************************************************//** +Copy the leaf level R-tree record, and push it to matched_rec in rtr_info */ +static +void +rtr_leaf_push_match_rec( +/*====================*/ + const rec_t* rec, /*!< in: record to copy */ + rtr_info_t* rtr_info, /*!< in/out: search stack */ + ulint* offsets, /*!< in: offsets */ + bool is_comp) /*!< in: is compact format */ +{ + byte* buf; + matched_rec_t* match_rec = rtr_info->matches; + rec_t* copy; + ulint data_len; + rtr_rec_t rtr_rec; + + buf = match_rec->block.frame + match_rec->used; + + copy = rec_copy(buf, rec, offsets); + + if (is_comp) { + rec_set_next_offs_new(copy, PAGE_NEW_SUPREMUM); + } else { + rec_set_next_offs_old(copy, PAGE_OLD_SUPREMUM); + } + + rtr_rec.r_rec = copy; + rtr_rec.locked = false; + + match_rec->matched_recs->push_back(rtr_rec); + match_rec->valid = true; + + data_len = rec_offs_data_size(offsets) + rec_offs_extra_size(offsets); + match_rec->used += data_len; + + ut_ad(match_rec->used < UNIV_PAGE_SIZE); +} + +/**************************************************************//** +Store the parent path cursor +@return number of cursor stored */ +ulint +rtr_store_parent_path( +/*==================*/ + const buf_block_t* block, /*!< in: block of the page */ + btr_cur_t* btr_cur,/*!< in/out: persistent cursor */ + ulint latch_mode, + /*!< in: latch_mode */ + ulint level, /*!< in: index level */ + mtr_t* mtr) /*!< in: mtr */ +{ + ulint num = btr_cur->rtr_info->parent_path->size(); + ulint num_stored = 0; + + while (num >= 1) { + node_visit_t* node = &(*btr_cur->rtr_info->parent_path)[ + num - 1]; + btr_pcur_t* r_cursor = node->cursor; + buf_block_t* cur_block; + + if (node->level > level) { + break; + } + + r_cursor->pos_state = BTR_PCUR_IS_POSITIONED; + r_cursor->latch_mode = latch_mode; + + cur_block = btr_pcur_get_block(r_cursor); + + if (cur_block == block) { + btr_pcur_store_position(r_cursor, mtr); + num_stored++; + } else { + break; + } + + num--; + } + + return(num_stored); +} +/**************************************************************//** +push a nonleaf index node to the search path for insertion */ +static +void +rtr_non_leaf_insert_stack_push( +/*===========================*/ + dict_index_t* index, /*!< in: index descriptor */ + rtr_node_path_t* path, /*!< in/out: search path */ + ulint level, /*!< in: index page level */ + ulint child_no,/*!< in: child page no */ + const buf_block_t* block, /*!< in: block of the page */ + const rec_t* rec, /*!< in: positioned record */ + double mbr_inc)/*!< in: MBR needs to be enlarged */ +{ + node_seq_t new_seq; + btr_pcur_t* my_cursor; + ulint page_no = block->page.id.page_no(); + + my_cursor = static_cast( + ut_malloc_nokey(sizeof(*my_cursor))); + + btr_pcur_init(my_cursor); + + page_cur_position(rec, block, btr_pcur_get_page_cur(my_cursor)); + + (btr_pcur_get_btr_cur(my_cursor))->index = index; + + new_seq = rtr_get_current_ssn_id(index); + rtr_non_leaf_stack_push(path, page_no, new_seq, level, child_no, + my_cursor, mbr_inc); +} + +/** Copy a buf_block_t strcuture, except "block->lock" and "block->mutex". +@param[in,out] matches copy to match->block +@param[in] block block to copy */ +static +void +rtr_copy_buf( + matched_rec_t* matches, + const buf_block_t* block) +{ + /* Copy all members of "block" to "matches->block" except "mutex" + and "lock". We skip "mutex" and "lock" because they are not used + from the dummy buf_block_t we create here and because memcpy()ing + them generates (valid) compiler warnings that the vtable pointer + will be copied. It is also undefined what will happen with the + newly memcpy()ed mutex if the source mutex was acquired by + (another) thread while it was copied. */ + memcpy(&matches->block.page, &block->page, sizeof(buf_page_t)); + matches->block.frame = block->frame; +#ifndef UNIV_HOTBACKUP + matches->block.unzip_LRU = block->unzip_LRU; + + ut_d(matches->block.in_unzip_LRU_list = block->in_unzip_LRU_list); + ut_d(matches->block.in_withdraw_list = block->in_withdraw_list); + + /* Skip buf_block_t::mutex */ + /* Skip buf_block_t::lock */ + matches->block.lock_hash_val = block->lock_hash_val; + matches->block.modify_clock = block->modify_clock; + matches->block.n_hash_helps = block->n_hash_helps; + matches->block.n_fields = block->n_fields; + matches->block.left_side = block->left_side; +#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG + matches->block.n_pointers = block->n_pointers; +#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ + matches->block.curr_n_fields = block->curr_n_fields; + matches->block.curr_left_side = block->curr_left_side; + matches->block.index = block->index; + matches->block.made_dirty_with_no_latch + = block->made_dirty_with_no_latch; + + ut_d(matches->block.debug_latch = block->debug_latch); + +#endif /* !UNIV_HOTBACKUP */ +} + +/****************************************************************//** +Generate a shadow copy of the page block header to save the +matched records */ +static +void +rtr_init_match( +/*===========*/ + matched_rec_t* matches,/*!< in/out: match to initialize */ + const buf_block_t* block, /*!< in: buffer block */ + const page_t* page) /*!< in: buffer page */ +{ + ut_ad(matches->matched_recs->empty()); + matches->locked = false; + rtr_copy_buf(matches, block); + matches->block.frame = matches->bufp; + matches->valid = false; + /* We have to copy PAGE_W*_SUPREMUM_END bytes so that we can + use infimum/supremum of this page as normal btr page for search. */ + memcpy(matches->block.frame, page, page_is_comp(page) + ? PAGE_NEW_SUPREMUM_END + : PAGE_OLD_SUPREMUM_END); + matches->used = page_is_comp(page) + ? PAGE_NEW_SUPREMUM_END + : PAGE_OLD_SUPREMUM_END; +#ifdef RTR_SEARCH_DIAGNOSTIC + ulint pageno = page_get_page_no(page); + fprintf(stderr, "INNODB_RTR: Searching leaf page %d\n", + static_cast(pageno)); +#endif /* RTR_SEARCH_DIAGNOSTIC */ +} + +/****************************************************************//** +Get the bounding box content from an index record */ +void +rtr_get_mbr_from_rec( +/*=================*/ + const rec_t* rec, /*!< in: data tuple */ + const ulint* offsets,/*!< in: offsets array */ + rtr_mbr_t* mbr) /*!< out MBR */ +{ + ulint rec_f_len; + const byte* data; + + data = rec_get_nth_field(rec, offsets, 0, &rec_f_len); + + rtr_read_mbr(data, mbr); +} + +/****************************************************************//** +Get the bounding box content from a MBR data record */ +void +rtr_get_mbr_from_tuple( +/*===================*/ + const dtuple_t* dtuple, /*!< in: data tuple */ + rtr_mbr* mbr) /*!< out: mbr to fill */ +{ + const dfield_t* dtuple_field; + ulint dtuple_f_len; + byte* data; + + dtuple_field = dtuple_get_nth_field(dtuple, 0); + dtuple_f_len = dfield_get_len(dtuple_field); + ut_a(dtuple_f_len >= 4 * sizeof(double)); + + data = static_cast(dfield_get_data(dtuple_field)); + + rtr_read_mbr(data, mbr); +} + +/****************************************************************//** +Searches the right position in rtree for a page cursor. */ +bool +rtr_cur_search_with_match( +/*======================*/ + const buf_block_t* block, /*!< in: buffer block */ + dict_index_t* index, /*!< in: index descriptor */ + const dtuple_t* tuple, /*!< in: data tuple */ + page_cur_mode_t mode, /*!< in: PAGE_CUR_RTREE_INSERT, + PAGE_CUR_RTREE_LOCATE etc. */ + page_cur_t* cursor, /*!< in/out: page cursor */ + rtr_info_t* rtr_info)/*!< in/out: search stack */ +{ + bool found = false; + const page_t* page; + const rec_t* rec; + const rec_t* last_rec; + ulint offsets_[REC_OFFS_NORMAL_SIZE]; + ulint* offsets = offsets_; + mem_heap_t* heap = NULL; + int cmp = 1; + bool is_leaf; + double least_inc = DBL_MAX; + const rec_t* best_rec; + const rec_t* last_match_rec = NULL; + ulint level; + bool match_init = false; + ulint space = block->page.id.space(); + page_cur_mode_t orig_mode = mode; + const rec_t* first_rec = NULL; + + rec_offs_init(offsets_); + + ut_ad(RTREE_SEARCH_MODE(mode)); + + ut_ad(dict_index_is_spatial(index)); + + page = buf_block_get_frame(block); + + is_leaf = page_is_leaf(page); + level = btr_page_get_level(page, mtr); + + if (mode == PAGE_CUR_RTREE_LOCATE) { + ut_ad(level != 0); + mode = PAGE_CUR_WITHIN; + } + + rec = page_dir_slot_get_rec(page_dir_get_nth_slot(page, 0)); + + last_rec = rec; + best_rec = rec; + + if (page_rec_is_infimum(rec)) { + rec = page_rec_get_next_const(rec); + } + + /* Check insert tuple size is larger than first rec, and try to + avoid it if possible */ + if (mode == PAGE_CUR_RTREE_INSERT && !page_rec_is_supremum(rec)) { + + ulint new_rec_size = rec_get_converted_size(index, tuple, 0); + + offsets = rec_get_offsets(rec, index, offsets, + dtuple_get_n_fields_cmp(tuple), + &heap); + + if (rec_offs_size(offsets) < new_rec_size) { + first_rec = rec; + } + + /* If this is the left-most page of this index level + and the table is a compressed table, try to avoid + first page as much as possible, as there will be problem + when update MIN_REC rec in compress table */ + if (buf_block_get_page_zip(block) + && mach_read_from_4(page + FIL_PAGE_PREV) == FIL_NULL + && page_get_n_recs(page) >= 2) { + + rec = page_rec_get_next_const(rec); + } + } + + while (!page_rec_is_supremum(rec)) { + offsets = rec_get_offsets(rec, index, offsets, + dtuple_get_n_fields_cmp(tuple), + &heap); + if (!is_leaf) { + switch (mode) { + case PAGE_CUR_CONTAIN: + case PAGE_CUR_INTERSECT: + case PAGE_CUR_MBR_EQUAL: + /* At non-leaf level, we will need to check + both CONTAIN and INTERSECT for either of + the search mode */ + cmp = cmp_dtuple_rec_with_gis( + tuple, rec, offsets, PAGE_CUR_CONTAIN); + + if (cmp != 0) { + cmp = cmp_dtuple_rec_with_gis( + tuple, rec, offsets, + PAGE_CUR_INTERSECT); + } + break; + case PAGE_CUR_DISJOINT: + cmp = cmp_dtuple_rec_with_gis( + tuple, rec, offsets, mode); + + if (cmp != 0) { + cmp = cmp_dtuple_rec_with_gis( + tuple, rec, offsets, + PAGE_CUR_INTERSECT); + } + break; + case PAGE_CUR_RTREE_INSERT: + double increase; + double area; + + cmp = cmp_dtuple_rec_with_gis( + tuple, rec, offsets, PAGE_CUR_WITHIN); + + if (cmp != 0) { + increase = rtr_rec_cal_increase( + tuple, rec, offsets, &area); + /* Once it goes beyond DBL_MAX, + it would not make sense to record + such value, just make it + DBL_MAX / 2 */ + if (increase >= DBL_MAX) { + increase = DBL_MAX / 2; + } + + if (increase < least_inc) { + least_inc = increase; + best_rec = rec; + } else if (best_rec + && best_rec == first_rec) { + /* if first_rec is set, + we will try to avoid it */ + least_inc = increase; + best_rec = rec; + } + } + break; + default: + /* WITHIN etc. */ + cmp = cmp_dtuple_rec_with_gis( + tuple, rec, offsets, mode); + } + } else { + /* At leaf level, INSERT should translate to LE */ + ut_ad(mode != PAGE_CUR_RTREE_INSERT); + + cmp = cmp_dtuple_rec_with_gis( + tuple, rec, offsets, mode); + } + + if (cmp == 0) { + found = true; + + /* If located, the matching node/rec will be pushed + to rtr_info->path for non-leaf nodes, or + rtr_info->matches for leaf nodes */ + if (rtr_info && mode != PAGE_CUR_RTREE_INSERT) { + if (!is_leaf) { + ulint page_no; + node_seq_t new_seq; + + offsets = rec_get_offsets( + rec, index, offsets, + ULINT_UNDEFINED, &heap); + + page_no = btr_node_ptr_get_child_page_no( + rec, offsets); + + ut_ad(level >= 1); + + /* Get current SSN, before we insert + it into the path stack */ + new_seq = rtr_get_current_ssn_id(index); + + rtr_non_leaf_stack_push( + rtr_info->path, + page_no, + new_seq, level - 1, 0, + NULL, 0); + + if (orig_mode + == PAGE_CUR_RTREE_LOCATE) { + rtr_non_leaf_insert_stack_push( + index, + rtr_info->parent_path, + level, page_no, block, + rec, 0); + } + + if (!srv_read_only_mode + && (rtr_info->need_page_lock + || orig_mode + != PAGE_CUR_RTREE_LOCATE)) { + + /* Lock the page, preventing it + from being shrunk */ + lock_place_prdt_page_lock( + space, page_no, index, + rtr_info->thr); + } + } else { + ut_ad(orig_mode + != PAGE_CUR_RTREE_LOCATE); + + if (!match_init) { + rtr_init_match( + rtr_info->matches, + block, page); + match_init = true; + } + + /* Collect matched records on page */ + offsets = rec_get_offsets( + rec, index, offsets, + ULINT_UNDEFINED, &heap); + rtr_leaf_push_match_rec( + rec, rtr_info, offsets, + page_is_comp(page)); + } + + last_match_rec = rec; + } else { + /* This is the insertion case, it will break + once it finds the first MBR that can accomodate + the inserting rec */ + break; + } + } + + last_rec = rec; + + rec = page_rec_get_next_const(rec); + } + + /* All records on page are searched */ + if (page_rec_is_supremum(rec)) { + if (!is_leaf) { + if (!found) { + /* No match case, if it is for insertion, + then we select the record that result in + least increased area */ + if (mode == PAGE_CUR_RTREE_INSERT) { + ulint child_no; + ut_ad(least_inc < DBL_MAX); + offsets = rec_get_offsets( + best_rec, index, + offsets, ULINT_UNDEFINED, + &heap); + child_no = + btr_node_ptr_get_child_page_no( + best_rec, offsets); + + rtr_non_leaf_insert_stack_push( + index, rtr_info->parent_path, + level, child_no, block, + best_rec, least_inc); + + page_cur_position(best_rec, block, + cursor); + rtr_info->mbr_adj = true; + } else { + /* Position at the last rec of the + page, if it is not the leaf page */ + page_cur_position(last_rec, block, + cursor); + } + } else { + /* There are matching records, position + in the last matching records */ + if (rtr_info) { + rec = last_match_rec; + page_cur_position( + rec, block, cursor); + } + } + } else if (rtr_info) { + /* Leaf level, no match, position at the + last (supremum) rec */ + if (!last_match_rec) { + page_cur_position(rec, block, cursor); + goto func_exit; + } + + /* There are matched records */ + matched_rec_t* match_rec = rtr_info->matches; + + rtr_rec_t test_rec; + + test_rec = match_rec->matched_recs->back(); +#ifdef UNIV_DEBUG + ulint offsets_2[REC_OFFS_NORMAL_SIZE]; + ulint* offsets2 = offsets_2; + rec_offs_init(offsets_2); + + ut_ad(found); + + /* Verify the record to be positioned is the same + as the last record in matched_rec vector */ + offsets2 = rec_get_offsets(test_rec.r_rec, index, + offsets2, ULINT_UNDEFINED, + &heap); + + offsets = rec_get_offsets(last_match_rec, index, + offsets, ULINT_UNDEFINED, + &heap); + + ut_ad(cmp_rec_rec(test_rec.r_rec, last_match_rec, + offsets2, offsets, index) == 0); +#endif /* UNIV_DEBUG */ + /* Pop the last match record and position on it */ + match_rec->matched_recs->pop_back(); + page_cur_position(test_rec.r_rec, &match_rec->block, + cursor); + } + } else { + + if (mode == PAGE_CUR_RTREE_INSERT) { + ulint child_no; + ut_ad(!last_match_rec && rec); + + offsets = rec_get_offsets( + rec, index, offsets, ULINT_UNDEFINED, &heap); + + child_no = btr_node_ptr_get_child_page_no(rec, offsets); + + rtr_non_leaf_insert_stack_push( + index, rtr_info->parent_path, level, child_no, + block, rec, 0); + + } else if (rtr_info && found && !is_leaf) { + rec = last_match_rec; + } + + page_cur_position(rec, block, cursor); + } + +#ifdef UNIV_DEBUG + /* Verify that we are positioned at the same child page as pushed in + the path stack */ + if (!is_leaf && (!page_rec_is_supremum(rec) || found) + && mode != PAGE_CUR_RTREE_INSERT) { + ulint page_no; + + offsets = rec_get_offsets(rec, index, offsets, + ULINT_UNDEFINED, &heap); + page_no = btr_node_ptr_get_child_page_no(rec, offsets); + + if (rtr_info && found) { + rtr_node_path_t* path = rtr_info->path; + node_visit_t last_visit = path->back(); + + ut_ad(last_visit.page_no == page_no); + } + } +#endif /* UNIV_DEBUG */ + +func_exit: + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } + + return(found); +} diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc index ae1eb55982a..f57a6d383d9 100644 --- a/storage/innobase/ha/ha0ha.cc +++ b/storage/innobase/ha/ha0ha.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,27 +38,19 @@ Created 8/22/1994 Heikki Tuuri /*************************************************************//** Creates a hash table with at least n array cells. The actual number of cells is chosen to be a prime number slightly bigger than n. -@return own: created table */ -UNIV_INTERN +@return own: created table */ hash_table_t* -ha_create_func( -/*===========*/ - ulint n, /*!< in: number of array cells */ -#ifdef UNIV_SYNC_DEBUG - ulint sync_level, /*!< in: level of the mutexes or rw_locks - in the latching order: this is used in the - debug version */ -#endif /* UNIV_SYNC_DEBUG */ - ulint n_sync_obj, /*!< in: number of mutexes or rw_locks - to protect the hash table: must be a - power of 2, or 0 */ - ulint type) /*!< in: type of datastructure for which - the memory heap is going to be used e.g.: - MEM_HEAP_FOR_BTR_SEARCH or +ib_create( +/*======*/ + ulint n, /*!< in: number of array cells */ + latch_id_t id, /*!< in: latch ID */ + ulint n_sync_obj, + /*!< in: number of mutexes to protect the + hash table: must be a power of 2, or 0 */ + ulint type) /*!< in: type of datastructure for which MEM_HEAP_FOR_PAGE_HASH */ { hash_table_t* table; - ulint i; ut_a(type == MEM_HEAP_FOR_BTR_SEARCH || type == MEM_HEAP_FOR_PAGE_HASH); @@ -71,7 +63,10 @@ ha_create_func( if (n_sync_obj == 0) { table->heap = mem_heap_create_typed( - ut_min(4096, MEM_MAX_ALLOC_IN_BUF), type); + ut_min(static_cast(4096), + MEM_MAX_ALLOC_IN_BUF / 2 + - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), + type); ut_a(table->heap); return(table); @@ -80,61 +75,101 @@ ha_create_func( if (type == MEM_HEAP_FOR_PAGE_HASH) { /* We create a hash table protected by rw_locks for buf_pool->page_hash. */ - hash_create_sync_obj(table, HASH_TABLE_SYNC_RW_LOCK, - n_sync_obj, sync_level); + hash_create_sync_obj( + table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj); } else { - hash_create_sync_obj(table, HASH_TABLE_SYNC_MUTEX, - n_sync_obj, sync_level); + hash_create_sync_obj( + table, HASH_TABLE_SYNC_MUTEX, id, n_sync_obj); } table->heaps = static_cast( - mem_alloc(n_sync_obj * sizeof(void*))); - - for (i = 0; i < n_sync_obj; i++) { - table->heaps[i] = mem_heap_create_typed(4096, type); + ut_malloc_nokey(n_sync_obj * sizeof(void*))); + + for (ulint i = 0; i < n_sync_obj; i++) { + table->heaps[i] = mem_heap_create_typed( + ut_min(static_cast(4096), + MEM_MAX_ALLOC_IN_BUF / 2 + - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), + type); ut_a(table->heaps[i]); } return(table); } +/** Recreate a hash table with at least n array cells. The actual number +of cells is chosen to be a prime number slightly bigger than n. +The new cells are all cleared. The heaps are recreated. +The sync objects are reused. +@param[in,out] table hash table to be resuzed (to be freed later) +@param[in] n number of array cells +@return resized new table */ +hash_table_t* +ib_recreate( + hash_table_t* table, + ulint n) +{ + /* This function is for only page_hash for now */ + ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); + ut_ad(table->n_sync_obj > 0); + + hash_table_t* new_table = hash_create(n); + + new_table->type = table->type; + new_table->n_sync_obj = table->n_sync_obj; + new_table->sync_obj = table->sync_obj; + + for (ulint i = 0; i < table->n_sync_obj; i++) { + mem_heap_free(table->heaps[i]); + } + ut_free(table->heaps); + + new_table->heaps = static_cast( + ut_malloc_nokey(new_table->n_sync_obj * sizeof(void*))); + + for (ulint i = 0; i < new_table->n_sync_obj; i++) { + new_table->heaps[i] = mem_heap_create_typed( + ut_min(static_cast(4096), + MEM_MAX_ALLOC_IN_BUF / 2 + - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), + MEM_HEAP_FOR_PAGE_HASH); + ut_a(new_table->heaps[i]); + } + + return(new_table); +} + /*************************************************************//** Empties a hash table and frees the memory heaps. */ -UNIV_INTERN void ha_clear( /*=====*/ hash_table_t* table) /*!< in, own: hash table */ { - ulint i; - ulint n; - - ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!table->adaptive - || rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE)); -#endif /* UNIV_SYNC_DEBUG */ - - /* Free the memory heaps. */ - n = table->n_sync_obj; + ut_ad(!table->adaptive || btr_search_own_all(RW_LOCK_X)); - for (i = 0; i < n; i++) { + for (ulint i = 0; i < table->n_sync_obj; i++) { mem_heap_free(table->heaps[i]); } - if (table->heaps) { - mem_free(table->heaps); - } + ut_free(table->heaps); switch (table->type) { case HASH_TABLE_SYNC_MUTEX: - mem_free(table->sync_obj.mutexes); + for (ulint i = 0; i < table->n_sync_obj; ++i) { + mutex_destroy(&table->sync_obj.mutexes[i]); + } + ut_free(table->sync_obj.mutexes); table->sync_obj.mutexes = NULL; break; case HASH_TABLE_SYNC_RW_LOCK: - mem_free(table->sync_obj.rw_locks); + for (ulint i = 0; i < table->n_sync_obj; ++i) { + rw_lock_free(&table->sync_obj.rw_locks[i]); + } + + ut_free(table->sync_obj.rw_locks); table->sync_obj.rw_locks = NULL; break; @@ -148,9 +183,9 @@ ha_clear( /* Clear the hash table. */ - n = hash_get_n_cells(table); + ulint n = hash_get_n_cells(table); - for (i = 0; i < n; i++) { + for (ulint i = 0; i < n; i++) { hash_get_nth_cell(table, i)->node = NULL; } } @@ -160,8 +195,7 @@ Inserts an entry into a hash table. If an entry with the same fold number is found, its node is updated to point to the new data, and no new node is inserted. If btr_search_enabled is set to FALSE, we will only allow updating existing nodes, but no new node is allowed to be added. -@return TRUE if succeed, FALSE if no more memory could be allocated */ -UNIV_INTERN +@return TRUE if succeed, FALSE if no more memory could be allocated */ ibool ha_insert_for_fold_func( /*====================*/ @@ -262,9 +296,27 @@ ha_insert_for_fold_func( return(TRUE); } +#ifdef UNIV_DEBUG +/** Verify if latch corresponding to the hash table is x-latched +@param[in] table hash table */ +static +void +ha_btr_search_latch_x_locked(const hash_table_t* table) +{ + ulint i; + for (i = 0; i < btr_ahi_parts; ++i) { + if (btr_search_sys->hash_tables[i] == table) { + break; + } + } + + ut_ad(i < btr_ahi_parts); + ut_ad(rw_lock_own(btr_search_latches[i], RW_LOCK_X)); +} +#endif /* UNIV_DEBUG */ + /***********************************************************//** Deletes a hash node. */ -UNIV_INTERN void ha_delete_hash_node( /*================*/ @@ -273,9 +325,7 @@ ha_delete_hash_node( { ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_d(ha_btr_search_latch_x_locked(table)); ut_ad(btr_search_enabled); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG if (table->adaptive) { @@ -292,7 +342,6 @@ ha_delete_hash_node( Looks for an element when we know the pointer to the data, and updates the pointer to data, if found. @return TRUE if found */ -UNIV_INTERN ibool ha_search_and_update_if_found_func( /*===============================*/ @@ -312,9 +361,8 @@ ha_search_and_update_if_found_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG ut_a(new_block->frame == page_align(new_data)); #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + + ut_d(ha_btr_search_latch_x_locked(table)); if (!btr_search_enabled) { return(FALSE); @@ -343,7 +391,6 @@ ha_search_and_update_if_found_func( /*****************************************************************//** Removes from the chain determined by fold all nodes whose data pointer points to the page given. */ -UNIV_INTERN void ha_remove_all_nodes_to_page( /*========================*/ @@ -386,14 +433,13 @@ ha_remove_all_nodes_to_page( node = ha_chain_get_next(node); } -#endif +#endif /* UNIV_DEBUG */ } #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /*************************************************************//** Validates a given range of the cells in hash table. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool ha_validate( /*========*/ @@ -421,12 +467,9 @@ ha_validate( node = node->next) { if (hash_calc_hash(node->fold, table) != i) { - ut_print_timestamp(stderr); - fprintf(stderr, - "InnoDB: Error: hash table node" - " fold value %lu does not\n" - "InnoDB: match the cell number %lu.\n", - (ulong) node->fold, (ulong) i); + ib::error() << "Hash table node fold value " + << node->fold << " does not match the" + " cell number " << i << "."; ok = FALSE; } @@ -439,7 +482,6 @@ ha_validate( /*************************************************************//** Prints info of a hash table. */ -UNIV_INTERN void ha_print_info( /*==========*/ diff --git a/storage/innobase/ha/ha0storage.cc b/storage/innobase/ha/ha0storage.cc index 6820591f316..a36fd573a4f 100644 --- a/storage/innobase/ha/ha0storage.cc +++ b/storage/innobase/ha/ha0storage.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -25,7 +25,7 @@ its own storage, avoiding duplicates. Created September 22, 2007 Vasil Dimov *******************************************************/ -#include "univ.i" +#include "ha_prototypes.h" #include "ha0storage.h" #include "hash0hash.h" #include "mem0mem.h" @@ -83,7 +83,6 @@ data_len bytes need to be allocated) and the size of storage is going to become more than "memlim" then "data" is not added and NULL is returned. To disable this behavior "memlim" can be set to 0, which stands for "no limit". */ -UNIV_INTERN const void* ha_storage_put_memlim( /*==================*/ @@ -169,14 +168,13 @@ test_ha_storage() p = ha_storage_put(storage, buf, sizeof(buf)); if (p != stored[i]) { - - fprintf(stderr, "ha_storage_put() returned %p " - "instead of %p, i=%d\n", p, stored[i], i); + ib::warn() << "ha_storage_put() returned " << p + << " instead of " << stored[i] << ", i=" << i; return; } } - fprintf(stderr, "all ok\n"); + ib::info() << "all ok"; ha_storage_free(storage); } diff --git a/storage/innobase/ha/hash0hash.cc b/storage/innobase/ha/hash0hash.cc index 174b6bcb57e..234fd7ac032 100644 --- a/storage/innobase/ha/hash0hash.cc +++ b/storage/innobase/ha/hash0hash.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,24 +24,18 @@ Created 5/20/1997 Heikki Tuuri *******************************************************/ #include "hash0hash.h" + #ifdef UNIV_NONINL #include "hash0hash.ic" -#endif +#endif /* UNIV_NOINL */ #include "mem0mem.h" +#include "sync0sync.h" #ifndef UNIV_HOTBACKUP -# ifdef UNIV_PFS_MUTEX -UNIV_INTERN mysql_pfs_key_t hash_table_mutex_key; -# endif /* UNIV_PFS_MUTEX */ - -# ifdef UNIV_PFS_RWLOCK -UNIV_INTERN mysql_pfs_key_t hash_table_rw_lock_key; -# endif /* UNIV_PFS_RWLOCK */ /************************************************************//** Reserves the mutex for a fold value in a hash table. */ -UNIV_INTERN void hash_mutex_enter( /*=============*/ @@ -54,7 +48,6 @@ hash_mutex_enter( /************************************************************//** Releases the mutex for a fold value in a hash table. */ -UNIV_INTERN void hash_mutex_exit( /*============*/ @@ -67,16 +60,14 @@ hash_mutex_exit( /************************************************************//** Reserves all the mutexes of a hash table, in an ascending order. */ -UNIV_INTERN void hash_mutex_enter_all( /*=================*/ hash_table_t* table) /*!< in: hash table */ { - ulint i; - ut_ad(table->type == HASH_TABLE_SYNC_MUTEX); - for (i = 0; i < table->n_sync_obj; i++) { + + for (ulint i = 0; i < table->n_sync_obj; i++) { mutex_enter(table->sync_obj.mutexes + i); } @@ -84,16 +75,14 @@ hash_mutex_enter_all( /************************************************************//** Releases all the mutexes of a hash table. */ -UNIV_INTERN void hash_mutex_exit_all( /*================*/ hash_table_t* table) /*!< in: hash table */ { - ulint i; - ut_ad(table->type == HASH_TABLE_SYNC_MUTEX); - for (i = 0; i < table->n_sync_obj; i++) { + + for (ulint i = 0; i < table->n_sync_obj; i++) { mutex_exit(table->sync_obj.mutexes + i); } @@ -101,7 +90,6 @@ hash_mutex_exit_all( /************************************************************//** Releases all but the passed in mutex of a hash table. */ -UNIV_INTERN void hash_mutex_exit_all_but( /*====================*/ @@ -114,7 +102,7 @@ hash_mutex_exit_all_but( for (i = 0; i < table->n_sync_obj; i++) { ib_mutex_t* mutex = table->sync_obj.mutexes + i; - if (UNIV_LIKELY(keep_mutex != mutex)) { + if (keep_mutex != mutex) { mutex_exit(mutex); } } @@ -124,7 +112,6 @@ hash_mutex_exit_all_but( /************************************************************//** s-lock a lock for a fold value in a hash table. */ -UNIV_INTERN void hash_lock_s( /*========*/ @@ -137,17 +124,14 @@ hash_lock_s( ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); ut_ad(lock); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(lock, RW_LOCK_S)); + ut_ad(!rw_lock_own(lock, RW_LOCK_X)); rw_lock_s_lock(lock); } /************************************************************//** x-lock a lock for a fold value in a hash table. */ -UNIV_INTERN void hash_lock_x( /*========*/ @@ -160,17 +144,14 @@ hash_lock_x( ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); ut_ad(lock); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(lock, RW_LOCK_S)); + ut_ad(!rw_lock_own(lock, RW_LOCK_X)); rw_lock_x_lock(lock); } /************************************************************//** unlock an s-lock for a fold value in a hash table. */ -UNIV_INTERN void hash_unlock_s( /*==========*/ @@ -184,16 +165,13 @@ hash_unlock_s( ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); ut_ad(lock); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(lock, RW_LOCK_S)); rw_lock_s_unlock(lock); } /************************************************************//** unlock x-lock for a fold value in a hash table. */ -UNIV_INTERN void hash_unlock_x( /*==========*/ @@ -205,31 +183,26 @@ hash_unlock_x( ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); ut_ad(lock); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(lock, RW_LOCK_X)); rw_lock_x_unlock(lock); } /************************************************************//** Reserves all the locks of a hash table, in an ascending order. */ -UNIV_INTERN void hash_lock_x_all( /*============*/ hash_table_t* table) /*!< in: hash table */ { - ulint i; - ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); - for (i = 0; i < table->n_sync_obj; i++) { + + for (ulint i = 0; i < table->n_sync_obj; i++) { rw_lock_t* lock = table->sync_obj.rw_locks + i; -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + + ut_ad(!rw_lock_own(lock, RW_LOCK_S)); + ut_ad(!rw_lock_own(lock, RW_LOCK_X)); rw_lock_x_lock(lock); } @@ -237,21 +210,18 @@ hash_lock_x_all( /************************************************************//** Releases all the locks of a hash table, in an ascending order. */ -UNIV_INTERN void hash_unlock_x_all( /*==============*/ hash_table_t* table) /*!< in: hash table */ { - ulint i; - ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); - for (i = 0; i < table->n_sync_obj; i++) { + + for (ulint i = 0; i < table->n_sync_obj; i++) { rw_lock_t* lock = table->sync_obj.rw_locks + i; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + + ut_ad(rw_lock_own(lock, RW_LOCK_X)); rw_lock_x_unlock(lock); } @@ -259,24 +229,21 @@ hash_unlock_x_all( /************************************************************//** Releases all but passed in lock of a hash table, */ -UNIV_INTERN void hash_unlock_x_all_but( /*==================*/ hash_table_t* table, /*!< in: hash table */ rw_lock_t* keep_lock) /*!< in: lock to keep */ { - ulint i; - ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); - for (i = 0; i < table->n_sync_obj; i++) { + + for (ulint i = 0; i < table->n_sync_obj; i++) { rw_lock_t* lock = table->sync_obj.rw_locks + i; -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ - if (UNIV_LIKELY(keep_lock != lock)) { + ut_ad(rw_lock_own(lock, RW_LOCK_X)); + + if (keep_lock != lock) { rw_lock_x_unlock(lock); } } @@ -287,8 +254,7 @@ hash_unlock_x_all_but( /*************************************************************//** Creates a hash table with >= n array cells. The actual number of cells is chosen to be a prime number slightly bigger than n. -@return own: created table */ -UNIV_INTERN +@return own: created table */ hash_table_t* hash_create( /*========*/ @@ -300,10 +266,11 @@ hash_create( prime = ut_find_prime(n); - table = static_cast(mem_alloc(sizeof(hash_table_t))); + table = static_cast( + ut_malloc_nokey(sizeof(hash_table_t))); array = static_cast( - ut_malloc(sizeof(hash_cell_t) * prime)); + ut_malloc_nokey(sizeof(hash_cell_t) * prime)); /* The default type of hash_table is HASH_TABLE_SYNC_NONE i.e.: the caller is responsible for access control to the table. */ @@ -329,17 +296,15 @@ hash_create( /*************************************************************//** Frees a hash table. */ -UNIV_INTERN void hash_table_free( /*============*/ hash_table_t* table) /*!< in, own: hash table */ { - ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_free(table->array); - mem_free(table); + ut_free(table); } #ifndef UNIV_HOTBACKUP @@ -347,52 +312,49 @@ hash_table_free( Creates a sync object array to protect a hash table. ::sync_obj can be mutexes or rw_locks depening on the type of hash table. */ -UNIV_INTERN void -hash_create_sync_obj_func( -/*======================*/ +hash_create_sync_obj( +/*=================*/ hash_table_t* table, /*!< in: hash table */ enum hash_table_sync_t type, /*!< in: HASH_TABLE_SYNC_MUTEX or HASH_TABLE_SYNC_RW_LOCK */ -#ifdef UNIV_SYNC_DEBUG - ulint sync_level,/*!< in: latching order level - of the mutexes: used in the - debug version */ -#endif /* UNIV_SYNC_DEBUG */ + latch_id_t id, /*!< in: latch ID */ ulint n_sync_obj)/*!< in: number of sync objects, must be a power of 2 */ { - ulint i; - - ut_ad(table); - ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_a(n_sync_obj > 0); ut_a(ut_is_2pow(n_sync_obj)); + ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); table->type = type; - switch (type) { + switch (table->type) { case HASH_TABLE_SYNC_MUTEX: table->sync_obj.mutexes = static_cast( - mem_alloc(n_sync_obj * sizeof(ib_mutex_t))); + ut_malloc_nokey(n_sync_obj * sizeof(ib_mutex_t))); - for (i = 0; i < n_sync_obj; i++) { - mutex_create(hash_table_mutex_key, - table->sync_obj.mutexes + i, sync_level); + for (ulint i = 0; i < n_sync_obj; i++) { + mutex_create(id, table->sync_obj.mutexes + i); } break; - case HASH_TABLE_SYNC_RW_LOCK: + case HASH_TABLE_SYNC_RW_LOCK: { + + latch_level_t level = sync_latch_get_level(id); + + ut_a(level != SYNC_UNKNOWN); + table->sync_obj.rw_locks = static_cast( - mem_alloc(n_sync_obj * sizeof(rw_lock_t))); + ut_malloc_nokey(n_sync_obj * sizeof(rw_lock_t))); - for (i = 0; i < n_sync_obj; i++) { - rw_lock_create(hash_table_rw_lock_key, - table->sync_obj.rw_locks + i, sync_level); + for (ulint i = 0; i < n_sync_obj; i++) { + rw_lock_create(hash_table_locks_key, + table->sync_obj.rw_locks + i, level); } break; + } case HASH_TABLE_SYNC_NONE: ut_error; diff --git a/storage/innobase/ha_innodb.def b/storage/innobase/ha_innodb.def deleted file mode 100644 index e0faa62deb1..00000000000 --- a/storage/innobase/ha_innodb.def +++ /dev/null @@ -1,4 +0,0 @@ -EXPORTS - _mysql_plugin_interface_version_ - _mysql_sizeof_struct_st_plugin_ - _mysql_plugin_declarations_ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 91284c2c2fc..e12cc313019 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -33,86 +33,93 @@ this program; if not, write to the Free Software Foundation, Inc., *****************************************************************************/ -#define lower_case_file_system lower_case_file_system_server -#define mysql_unpacked_real_data_home mysql_unpacked_real_data_home_server -#include // explain_filename, nz2, EXPLAIN_PARTITIONS_AS_COMMENT, - // EXPLAIN_FILENAME_MAX_EXTRA_LENGTH - -#include // PROCESS_ACL -#include // DEBUG_SYNC -#include // HA_OPTION_* +/** @file ha_innodb.cc */ + +#include "univ.i" + +/* Include necessary SQL headers */ +#include "ha_prototypes.h" +#include +#include +#include #include #include +#include +#include +#include +#include +#include +#include #include #include +#include +#include +#include -#undef lower_case_file_system -#undef mysql_unpacked_real_data_home -MYSQL_PLUGIN_IMPORT extern my_bool lower_case_file_system; -MYSQL_PLUGIN_IMPORT extern char mysql_unpacked_real_data_home[]; - -#ifdef _WIN32 -#include -#endif +// MYSQL_PLUGIN_IMPORT extern my_bool lower_case_file_system; +// MYSQL_PLUGIN_IMPORT extern char mysql_unpacked_real_data_home[]; #include - -/** @file ha_innodb.cc */ +#include /* Include necessary InnoDB headers */ -#include "univ.i" +#include "api0api.h" +#include "api0misc.h" +#include "btr0btr.h" +#include "btr0cur.h" +#include "btr0bulk.h" +#include "btr0sea.h" +#include "buf0dblwr.h" #include "buf0dump.h" -#include "buf0lru.h" #include "buf0flu.h" -#include "buf0dblwr.h" -#include "btr0sea.h" +#include "buf0lru.h" +#include "dict0boot.h" #include "btr0defragment.h" +#include "dict0crea.h" +#include "dict0dict.h" +#include "dict0stats.h" +#include "dict0stats_bg.h" +#include "fil0fil.h" +#include "fsp0fsp.h" +#include "fsp0space.h" +#include "fsp0sysspace.h" +#include "fts0fts.h" +#include "fts0plugin.h" +#include "fts0priv.h" +#include "fts0types.h" +#include "ibuf0ibuf.h" +#include "lock0lock.h" +#include "log0log.h" +#include "mem0mem.h" +#include "mtr0mtr.h" #include "os0file.h" #include "os0thread.h" -#include "srv0start.h" -#include "srv0srv.h" -#include "trx0roll.h" -#include "trx0trx.h" - -#include "trx0sys.h" +#include "page0zip.h" +#include "pars0pars.h" #include "rem0types.h" +#include "row0import.h" #include "row0ins.h" +#include "row0merge.h" #include "row0mysql.h" +#include "row0quiesce.h" #include "row0sel.h" +#include "row0trunc.h" #include "row0upd.h" -#include "log0log.h" -#include "lock0lock.h" -#include "dict0crea.h" -#include "btr0cur.h" -#include "btr0btr.h" -#include "fsp0fsp.h" -#include "sync0sync.h" -#include "fil0fil.h" #include "fil0crypt.h" -#include "trx0xa.h" -#include "row0merge.h" -#include "dict0boot.h" -#include "dict0stats.h" -#include "dict0stats_bg.h" -#include "ha_prototypes.h" -#include "ut0mem.h" #include "ut0timer.h" -#include "ibuf0ibuf.h" -#include "dict0dict.h" #include "srv0mon.h" -#include "api0api.h" -#include "api0misc.h" -#include "pars0pars.h" -#include "fts0fts.h" -#include "fts0types.h" -#include "row0import.h" -#include "row0quiesce.h" +#include "srv0srv.h" +#include "srv0start.h" #ifdef UNIV_DEBUG #include "trx0purge.h" #endif /* UNIV_DEBUG */ -#include "fts0priv.h" -#include "page0zip.h" +#include "trx0roll.h" +#include "trx0sys.h" +#include "trx0trx.h" #include "fil0pagecompress.h" +#include "trx0xa.h" +#include "ut0mem.h" +#include "row0ext.h" #define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X)) @@ -123,10 +130,14 @@ MYSQL_PLUGIN_IMPORT extern char mysql_unpacked_real_data_home[]; #include "ha_innodb.h" #include "i_s.h" +#include "sync0sync.h" #include #include +/* for ha_innopart, Native InnoDB Partitioning. */ +/* JAN: TODO: MySQL 5.7 Native InnoDB Partitioning */ +//#include "ha_innopart.h" #include #include @@ -161,6 +172,27 @@ static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid); static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid); #endif /* WITH_WSREP */ +static +void +innobase_set_cursor_view( +/*=====================*/ + handlerton* hton, /*!< in: innobase hton */ + THD* thd, /*!< in: user thread handle */ + void* curview);/*!< in: Consistent cursor view to be set */ +static +void* +innobase_create_cursor_view( +/*========================*/ + handlerton* hton, /*!< in: innobase hton */ + THD* thd); /*!< in: user thread handle */ +static +void +innobase_close_cursor_view( +/*=======================*/ + handlerton* hton, /*!< in: innobase hton */ + THD* thd, /*!< in: user thread handle */ + void* curview);/*!< in: Consistent read view to be closed */ + /** to protect innobase_open_files */ static mysql_mutex_t innobase_share_mutex; /** to force correct commit order in binlog */ @@ -180,16 +212,12 @@ static const long AUTOINC_OLD_STYLE_LOCKING = 0; static const long AUTOINC_NEW_STYLE_LOCKING = 1; static const long AUTOINC_NO_LOCKING = 2; -static long innobase_mirrored_log_groups; static long innobase_log_buffer_size; -static long innobase_additional_mem_pool_size; -static long innobase_file_io_threads; static long innobase_open_files; static long innobase_autoinc_lock_mode; static ulong innobase_commit_concurrency = 0; static ulong innobase_read_io_threads; static ulong innobase_write_io_threads; -static long innobase_buffer_pool_instances = 1; static long long innobase_buffer_pool_size, innobase_log_file_size; @@ -197,15 +225,12 @@ static long long innobase_buffer_pool_size, innobase_log_file_size; Connected to buf_LRU_old_ratio. */ static uint innobase_old_blocks_pct; -/** Maximum on-disk size of change buffer in terms of percentage -of the buffer pool. */ -static uint innobase_change_buffer_max_size = CHANGE_BUFFER_DEFAULT_SIZE; - /* The default values for the following char* start-up parameters are determined in innobase_init below: */ static char* innobase_data_home_dir = NULL; static char* innobase_data_file_path = NULL; +static char* innobase_temp_data_file_path = NULL; static char* innobase_file_format_name = NULL; static char* innobase_change_buffering = NULL; static char* innobase_enable_monitor_counter = NULL; @@ -216,7 +241,12 @@ static char* innobase_reset_all_monitor_counter = NULL; /* The highest file format being used in the database. The value can be set by user, however, it will be adjusted to the newer file format if a table of such format is created/opened. */ -static char* innobase_file_format_max = NULL; +char* innobase_file_format_max = NULL; + +/** Default value of innodb_file_format */ +static const char* innodb_file_format_default = "Barracuda"; +/** Default value of innodb_file_format_max */ +static const char* innodb_file_format_max_default = "Antelope"; static char* innobase_file_flush_method = NULL; @@ -229,10 +259,6 @@ values */ static ulong innobase_fast_shutdown = 1; static my_bool innobase_file_format_check = TRUE; -#ifdef UNIV_LOG_ARCHIVE -static my_bool innobase_log_archive = FALSE; -static char* innobase_log_arch_dir = NULL; -#endif /* UNIV_LOG_ARCHIVE */ static my_bool innobase_use_atomic_writes = FALSE; static my_bool innobase_use_fallocate = TRUE; static my_bool innobase_use_doublewrite = TRUE; @@ -240,12 +266,10 @@ static my_bool innobase_use_checksums = TRUE; static my_bool innobase_locks_unsafe_for_binlog = FALSE; static my_bool innobase_rollback_on_timeout = FALSE; static my_bool innobase_create_status_file = FALSE; -static my_bool innobase_stats_on_metadata = TRUE; +my_bool innobase_stats_on_metadata = TRUE; static my_bool innobase_large_prefix = FALSE; static my_bool innodb_optimize_fulltext_only = FALSE; -static char* internal_innobase_data_file_path = NULL; - static char* innodb_version_str = (char*) INNODB_VERSION_STR; extern uint srv_n_fil_crypt_threads; @@ -261,6 +285,61 @@ extern uint srv_background_scrub_data_check_interval; extern my_bool srv_scrub_force_testing; #endif +/** Note we cannot use rec_format_enum because we do not allow +COMPRESSED row format for innodb_default_row_format option. */ +enum default_row_format_enum { + DEFAULT_ROW_FORMAT_REDUNDANT = 0, + DEFAULT_ROW_FORMAT_COMPACT = 1, + DEFAULT_ROW_FORMAT_DYNAMIC = 2, +}; +/* JAN: TODO: MySQL 5.7 */ +void set_my_errno(int err) +{ + errno = err; +} + +/** Return the InnoDB ROW_FORMAT enum value +@param[in] row_format row_format from "innodb_default_row_format" +@return InnoDB ROW_FORMAT value from rec_format_t enum. */ +static +rec_format_t +get_row_format( + ulong row_format) +{ + switch(row_format) { + case DEFAULT_ROW_FORMAT_REDUNDANT: + return(REC_FORMAT_REDUNDANT); + case DEFAULT_ROW_FORMAT_COMPACT: + return(REC_FORMAT_COMPACT); + case DEFAULT_ROW_FORMAT_DYNAMIC: + return(REC_FORMAT_DYNAMIC); + default: + ut_ad(0); + return(REC_FORMAT_DYNAMIC); + } +} + +static ulong innodb_default_row_format = DEFAULT_ROW_FORMAT_DYNAMIC; + +#ifdef UNIV_DEBUG +/** Values for --innodb-debug-compress names. */ +static const char* innodb_debug_compress_names[] = { + "none", + "zlib", + "lz4", + "lz4hc", + NullS +}; + +/** Enumeration of --innodb-debug-compress */ +static TYPELIB innodb_debug_compress_typelib = { + array_elements(innodb_debug_compress_names) - 1, + "innodb_debug_compress_typelib", + innodb_debug_compress_names, + NULL +}; +#endif /* UNIV_DEBUG */ + /** Possible values for system variable "innodb_stats_method". The values are defined the same as its corresponding MyISAM system variable "myisam_stats_method"(see "myisam_stats_method_names"), for better usability */ @@ -280,14 +359,14 @@ static TYPELIB innodb_stats_method_typelib = { NULL }; -/** Possible values for system variable "innodb_checksum_algorithm". */ +/** Possible values of the parameter innodb_checksum_algorithm */ static const char* innodb_checksum_algorithm_names[] = { - "CRC32", - "STRICT_CRC32", - "INNODB", - "STRICT_INNODB", - "NONE", - "STRICT_NONE", + "crc32", + "strict_crc32", + "innodb", + "strict_innodb", + "none", + "strict_none", NullS }; @@ -300,6 +379,23 @@ static TYPELIB innodb_checksum_algorithm_typelib = { NULL }; +/** Possible values for system variable "innodb_default_row_format". */ +static const char* innodb_default_row_format_names[] = { + "redundant", + "compact", + "dynamic", + NullS +}; + +/** Used to define an enumerate type of the system variable +innodb_default_row_format. */ +static TYPELIB innodb_default_row_format_typelib = { + array_elements(innodb_default_row_format_names) - 1, + "innodb_default_row_format_typelib", + innodb_default_row_format_names, + NULL +}; + /* The following counter is used to convey information to InnoDB about server activity: in case of normal DML ops it is not sensible to call srv_active_wake_master_thread after each @@ -320,6 +416,20 @@ static const char* innobase_change_buffering_values[IBUF_USE_COUNT] = { "all" /* IBUF_USE_ALL */ }; + +/* This tablespace name is reserved by InnoDB in order to explicitly +create a file_per_table tablespace for the table. */ +const char reserved_file_per_table_space_name[] = "innodb_file_per_table"; + +/* This tablespace name is reserved by InnoDB for the system tablespace +which uses space_id 0 and stores extra types of system pages like UNDO +and doublewrite. */ +const char reserved_system_space_name[] = "innodb_system"; + +/* This tablespace name is reserved by InnoDB for the predefined temporary +tablespace. */ +const char reserved_temporary_space_name[] = "innodb_temporary"; + /* Call back function array defined by MySQL and used to retrieve FTS results. */ const struct _ft_vft ft_vft_result = {NULL, @@ -334,6 +444,10 @@ const struct _ft_vft_ext ft_vft_ext_result = {innobase_fts_get_version, innobase_fts_count_matches}; #ifdef HAVE_PSI_INTERFACE +# define PSI_KEY(n) {&n##_key, #n, 0} +/* All RWLOCK used in Innodb are SX-locks */ +# define PSI_RWLOCK_KEY(n) {&n##_key, #n, PSI_RWLOCK_FLAG_SX} + /* Keys to register pthread mutexes/cond in the current file with performance schema */ static mysql_pfs_key_t innobase_share_mutex_key; @@ -342,12 +456,12 @@ static mysql_pfs_key_t commit_cond_key; static mysql_pfs_key_t pending_checkpoint_mutex_key; static PSI_mutex_info all_pthread_mutexes[] = { - {&commit_cond_mutex_key, "commit_cond_mutex", 0}, - {&innobase_share_mutex_key, "innobase_share_mutex", 0} + PSI_KEY(commit_cond_mutex), + PSI_KEY(innobase_share_mutex) }; static PSI_cond_info all_innodb_conds[] = { - {&commit_cond_key, "commit_cond", 0} + PSI_KEY(commit_cond) }; # ifdef UNIV_PFS_MUTEX @@ -355,76 +469,66 @@ static PSI_cond_info all_innodb_conds[] = { performance schema instrumented if "UNIV_PFS_MUTEX" is defined */ static PSI_mutex_info all_innodb_mutexes[] = { - {&autoinc_mutex_key, "autoinc_mutex", 0}, + PSI_KEY(autoinc_mutex), # ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK - {&buffer_block_mutex_key, "buffer_block_mutex", 0}, + PSI_KEY(buffer_block_mutex), # endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */ - {&buf_pool_mutex_key, "buf_pool_mutex", 0}, - {&buf_pool_zip_mutex_key, "buf_pool_zip_mutex", 0}, - {&cache_last_read_mutex_key, "cache_last_read_mutex", 0}, - {&dict_foreign_err_mutex_key, "dict_foreign_err_mutex", 0}, - {&dict_sys_mutex_key, "dict_sys_mutex", 0}, - {&file_format_max_mutex_key, "file_format_max_mutex", 0}, - {&fil_system_mutex_key, "fil_system_mutex", 0}, - {&flush_list_mutex_key, "flush_list_mutex", 0}, - {&fts_bg_threads_mutex_key, "fts_bg_threads_mutex", 0}, - {&fts_delete_mutex_key, "fts_delete_mutex", 0}, - {&fts_optimize_mutex_key, "fts_optimize_mutex", 0}, - {&fts_doc_id_mutex_key, "fts_doc_id_mutex", 0}, - {&fts_pll_tokenize_mutex_key, "fts_pll_tokenize_mutex", 0}, - {&log_flush_order_mutex_key, "log_flush_order_mutex", 0}, - {&hash_table_mutex_key, "hash_table_mutex", 0}, - {&ibuf_bitmap_mutex_key, "ibuf_bitmap_mutex", 0}, - {&ibuf_mutex_key, "ibuf_mutex", 0}, - {&ibuf_pessimistic_insert_mutex_key, - "ibuf_pessimistic_insert_mutex", 0}, -# ifndef HAVE_ATOMIC_BUILTINS - {&server_mutex_key, "server_mutex", 0}, -# endif /* !HAVE_ATOMIC_BUILTINS */ - {&log_sys_mutex_key, "log_sys_mutex", 0}, -# ifdef UNIV_MEM_DEBUG - {&mem_hash_mutex_key, "mem_hash_mutex", 0}, -# endif /* UNIV_MEM_DEBUG */ - {&mem_pool_mutex_key, "mem_pool_mutex", 0}, - {&mutex_list_mutex_key, "mutex_list_mutex", 0}, - {&page_zip_stat_per_index_mutex_key, "page_zip_stat_per_index_mutex", 0}, - {&purge_sys_bh_mutex_key, "purge_sys_bh_mutex", 0}, - {&recv_sys_mutex_key, "recv_sys_mutex", 0}, - {&recv_writer_mutex_key, "recv_writer_mutex", 0}, - {&rseg_mutex_key, "rseg_mutex", 0}, -# ifdef UNIV_SYNC_DEBUG - {&rw_lock_debug_mutex_key, "rw_lock_debug_mutex", 0}, -# endif /* UNIV_SYNC_DEBUG */ - {&rw_lock_list_mutex_key, "rw_lock_list_mutex", 0}, - {&rw_lock_mutex_key, "rw_lock_mutex", 0}, - {&srv_dict_tmpfile_mutex_key, "srv_dict_tmpfile_mutex", 0}, - {&srv_innodb_monitor_mutex_key, "srv_innodb_monitor_mutex", 0}, - {&srv_misc_tmpfile_mutex_key, "srv_misc_tmpfile_mutex", 0}, - {&srv_monitor_file_mutex_key, "srv_monitor_file_mutex", 0}, -# ifdef UNIV_SYNC_DEBUG - {&sync_thread_mutex_key, "sync_thread_mutex", 0}, -# endif /* UNIV_SYNC_DEBUG */ - {&buf_dblwr_mutex_key, "buf_dblwr_mutex", 0}, - {&trx_undo_mutex_key, "trx_undo_mutex", 0}, - {&srv_sys_mutex_key, "srv_sys_mutex", 0}, - {&lock_sys_mutex_key, "lock_mutex", 0}, - {&lock_sys_wait_mutex_key, "lock_wait_mutex", 0}, - {&trx_mutex_key, "trx_mutex", 0}, - {&srv_sys_tasks_mutex_key, "srv_threads_mutex", 0}, - /* mutex with os_fast_mutex_ interfaces */ + PSI_KEY(buf_pool_mutex), + PSI_KEY(buf_pool_zip_mutex), + PSI_KEY(cache_last_read_mutex), + PSI_KEY(dict_foreign_err_mutex), + PSI_KEY(dict_sys_mutex), + PSI_KEY(recalc_pool_mutex), + PSI_KEY(file_format_max_mutex), + PSI_KEY(fil_system_mutex), + PSI_KEY(flush_list_mutex), + PSI_KEY(fts_bg_threads_mutex), + PSI_KEY(fts_delete_mutex), + PSI_KEY(fts_optimize_mutex), + PSI_KEY(fts_doc_id_mutex), + PSI_KEY(log_flush_order_mutex), + PSI_KEY(hash_table_mutex), + PSI_KEY(ibuf_bitmap_mutex), + PSI_KEY(ibuf_mutex), + PSI_KEY(ibuf_pessimistic_insert_mutex), + PSI_KEY(log_sys_mutex), + PSI_KEY(mutex_list_mutex), + PSI_KEY(page_zip_stat_per_index_mutex), + PSI_KEY(purge_sys_pq_mutex), + PSI_KEY(recv_sys_mutex), + PSI_KEY(recv_writer_mutex), + PSI_KEY(redo_rseg_mutex), + PSI_KEY(noredo_rseg_mutex), +# ifdef UNIV_DEBUG + PSI_KEY(rw_lock_debug_mutex), +# endif /* UNIV_DEBUG */ + PSI_KEY(rw_lock_list_mutex), + PSI_KEY(rw_lock_mutex), + PSI_KEY(srv_dict_tmpfile_mutex), + PSI_KEY(srv_innodb_monitor_mutex), + PSI_KEY(srv_misc_tmpfile_mutex), + PSI_KEY(srv_monitor_file_mutex), +# ifdef UNIV_DEBUG + PSI_KEY(sync_thread_mutex), +# endif /* UNIV_DEBUG */ + PSI_KEY(buf_dblwr_mutex), + PSI_KEY(trx_undo_mutex), + PSI_KEY(trx_pool_mutex), + PSI_KEY(trx_pool_manager_mutex), + PSI_KEY(srv_sys_mutex), + PSI_KEY(lock_mutex), + PSI_KEY(lock_wait_mutex), + PSI_KEY(trx_mutex), + PSI_KEY(srv_threads_mutex), # ifndef PFS_SKIP_EVENT_MUTEX - {&event_os_mutex_key, "event_os_mutex", 0}, + PSI_KEY(event_mutex), # endif /* PFS_SKIP_EVENT_MUTEX */ - {&os_mutex_key, "os_mutex", 0}, -#ifndef HAVE_ATOMIC_BUILTINS - {&srv_conc_mutex_key, "srv_conc_mutex", 0}, -#endif /* !HAVE_ATOMIC_BUILTINS */ -#ifndef HAVE_ATOMIC_BUILTINS_64 - {&monitor_mutex_key, "monitor_mutex", 0}, -#endif /* !HAVE_ATOMIC_BUILTINS_64 */ - {&ut_list_mutex_key, "ut_list_mutex", 0}, - {&trx_sys_mutex_key, "trx_sys_mutex", 0}, - {&zip_pad_mutex_key, "zip_pad_mutex", 0}, + PSI_KEY(rtr_active_mutex), + PSI_KEY(rtr_match_mutex), + PSI_KEY(rtr_path_mutex), + PSI_KEY(rtr_ssn_mutex), + PSI_KEY(trx_sys_mutex), + PSI_KEY(zip_pad_mutex), }; # endif /* UNIV_PFS_MUTEX */ @@ -433,27 +537,27 @@ static PSI_mutex_info all_innodb_mutexes[] = { performance schema instrumented if "UNIV_PFS_RWLOCK" is defined */ static PSI_rwlock_info all_innodb_rwlocks[] = { -# ifdef UNIV_LOG_ARCHIVE - {&archive_lock_key, "archive_lock", 0}, -# endif /* UNIV_LOG_ARCHIVE */ - {&btr_search_latch_key, "btr_search_latch", 0}, + PSI_RWLOCK_KEY(btr_search_latch), # ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK - {&buf_block_lock_key, "buf_block_lock", 0}, + PSI_RWLOCK_KEY(buf_block_lock), # endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */ -# ifdef UNIV_SYNC_DEBUG - {&buf_block_debug_latch_key, "buf_block_debug_latch", 0}, -# endif /* UNIV_SYNC_DEBUG */ - {&dict_operation_lock_key, "dict_operation_lock", 0}, - {&fil_space_latch_key, "fil_space_latch", 0}, - {&checkpoint_lock_key, "checkpoint_lock", 0}, - {&fts_cache_rw_lock_key, "fts_cache_rw_lock", 0}, - {&fts_cache_init_rw_lock_key, "fts_cache_init_rw_lock", 0}, - {&trx_i_s_cache_lock_key, "trx_i_s_cache_lock", 0}, - {&trx_purge_latch_key, "trx_purge_latch", 0}, - {&index_tree_rw_lock_key, "index_tree_rw_lock", 0}, - {&index_online_log_key, "index_online_log", 0}, - {&dict_table_stats_key, "dict_table_stats", 0}, - {&hash_table_rw_lock_key, "hash_table_locks", 0} +# ifdef UNIV_DEBUG + PSI_RWLOCK_KEY(buf_block_debug_latch), +# endif /* UNIV_DEBUG */ + PSI_RWLOCK_KEY(dict_operation_lock), + PSI_RWLOCK_KEY(fil_space_latch), + PSI_RWLOCK_KEY(checkpoint_lock), + PSI_RWLOCK_KEY(fts_cache_rw_lock), + PSI_RWLOCK_KEY(fts_cache_init_rw_lock), + PSI_RWLOCK_KEY(trx_i_s_cache_lock), + PSI_RWLOCK_KEY(trx_purge_latch), + PSI_RWLOCK_KEY(index_tree_rw_lock), + PSI_RWLOCK_KEY(index_online_log), + PSI_RWLOCK_KEY(dict_table_stats), + PSI_RWLOCK_KEY(hash_table_locks), +# ifdef UNIV_DEBUG + PSI_RWLOCK_KEY(buf_chunk_map_latch) +# endif /* UNIV_DEBUG */ }; # endif /* UNIV_PFS_RWLOCK */ @@ -462,15 +566,21 @@ static PSI_rwlock_info all_innodb_rwlocks[] = { performance schema instrumented if "UNIV_PFS_THREAD" is defined */ static PSI_thread_info all_innodb_threads[] = { - {&trx_rollback_clean_thread_key, "trx_rollback_clean_thread", 0}, - {&io_handler_thread_key, "io_handler_thread", 0}, - {&srv_lock_timeout_thread_key, "srv_lock_timeout_thread", 0}, - {&srv_error_monitor_thread_key, "srv_error_monitor_thread", 0}, - {&srv_monitor_thread_key, "srv_monitor_thread", 0}, - {&srv_master_thread_key, "srv_master_thread", 0}, - {&srv_purge_thread_key, "srv_purge_thread", 0}, - {&buf_page_cleaner_thread_key, "page_cleaner_thread", 0}, - {&recv_writer_thread_key, "recv_writer_thread", 0} + PSI_KEY(buf_dump_thread), + PSI_KEY(dict_stats_thread), + PSI_KEY(io_handler_thread), + PSI_KEY(io_ibuf_thread), + PSI_KEY(io_log_thread), + PSI_KEY(io_read_thread), + PSI_KEY(io_write_thread), + PSI_KEY(page_cleaner_thread), + PSI_KEY(recv_writer_thread), + PSI_KEY(srv_error_monitor_thread), + PSI_KEY(srv_lock_timeout_thread), + PSI_KEY(srv_master_thread), + PSI_KEY(srv_monitor_thread), + PSI_KEY(srv_purge_thread), + PSI_KEY(trx_rollback_clean_thread), }; # endif /* UNIV_PFS_THREAD */ @@ -478,22 +588,13 @@ static PSI_thread_info all_innodb_threads[] = { /* all_innodb_files array contains the type of files that are performance schema instrumented if "UNIV_PFS_IO" is defined */ static PSI_file_info all_innodb_files[] = { - {&innodb_file_data_key, "innodb_data_file", 0}, - {&innodb_file_log_key, "innodb_log_file", 0}, - {&innodb_file_temp_key, "innodb_temp_file", 0} + PSI_KEY(innodb_data_file), + PSI_KEY(innodb_log_file), + PSI_KEY(innodb_temp_file) }; # endif /* UNIV_PFS_IO */ #endif /* HAVE_PSI_INTERFACE */ -/** Always normalize table name to lower case on Windows */ -#ifdef __WIN__ -#define normalize_table_name(norm_name, name) \ - normalize_table_name_low(norm_name, name, TRUE) -#else -#define normalize_table_name(norm_name, name) \ - normalize_table_name_low(norm_name, name, FALSE) -#endif /* __WIN__ */ - /** Set up InnoDB API callback function array */ ib_cb_t innodb_api_cb[] = { (ib_cb_t) ib_cursor_open_table, @@ -504,28 +605,18 @@ ib_cb_t innodb_api_cb[] = { (ib_cb_t) ib_cursor_moveto, (ib_cb_t) ib_cursor_first, (ib_cb_t) ib_cursor_next, - (ib_cb_t) ib_cursor_last, (ib_cb_t) ib_cursor_set_match_mode, (ib_cb_t) ib_sec_search_tuple_create, (ib_cb_t) ib_clust_read_tuple_create, (ib_cb_t) ib_tuple_delete, - (ib_cb_t) ib_tuple_copy, (ib_cb_t) ib_tuple_read_u8, - (ib_cb_t) ib_tuple_write_u8, (ib_cb_t) ib_tuple_read_u16, - (ib_cb_t) ib_tuple_write_u16, (ib_cb_t) ib_tuple_read_u32, - (ib_cb_t) ib_tuple_write_u32, (ib_cb_t) ib_tuple_read_u64, - (ib_cb_t) ib_tuple_write_u64, (ib_cb_t) ib_tuple_read_i8, - (ib_cb_t) ib_tuple_write_i8, (ib_cb_t) ib_tuple_read_i16, - (ib_cb_t) ib_tuple_write_i16, (ib_cb_t) ib_tuple_read_i32, - (ib_cb_t) ib_tuple_write_i32, (ib_cb_t) ib_tuple_read_i64, - (ib_cb_t) ib_tuple_write_i64, (ib_cb_t) ib_tuple_get_n_cols, (ib_cb_t) ib_col_set_value, (ib_cb_t) ib_col_get_value, @@ -535,16 +626,13 @@ ib_cb_t innodb_api_cb[] = { (ib_cb_t) ib_trx_rollback, (ib_cb_t) ib_trx_start, (ib_cb_t) ib_trx_release, - (ib_cb_t) ib_trx_state, (ib_cb_t) ib_cursor_lock, (ib_cb_t) ib_cursor_close, (ib_cb_t) ib_cursor_new_trx, (ib_cb_t) ib_cursor_reset, - (ib_cb_t) ib_open_table_by_name, (ib_cb_t) ib_col_get_name, (ib_cb_t) ib_table_truncate, (ib_cb_t) ib_cursor_open_index_using_name, - (ib_cb_t) ib_close_thd, (ib_cb_t) ib_cfg_get_cfg, (ib_cb_t) ib_cursor_set_memcached_sync, (ib_cb_t) ib_cursor_set_cluster_access, @@ -552,10 +640,10 @@ ib_cb_t innodb_api_cb[] = { (ib_cb_t) ib_cfg_trx_level, (ib_cb_t) ib_tuple_get_n_user_cols, (ib_cb_t) ib_cursor_set_lock_mode, - (ib_cb_t) ib_cursor_clear_trx, (ib_cb_t) ib_get_idx_field_name, (ib_cb_t) ib_trx_get_start_time, (ib_cb_t) ib_cfg_bk_commit_interval, + (ib_cb_t) ib_ut_strerr, (ib_cb_t) ib_cursor_stmt_begin, (ib_cb_t) ib_trx_read_only }; @@ -656,6 +744,37 @@ ha_create_table_option innodb_table_option_list[]= HA_TOPTION_END }; +/******************************************************************//** +Function used to loop a thread (for debugging/instrumentation +purpose). */ +void +srv_debug_loop(void) +/*================*/ +{ + ibool set = TRUE; + + while (set) { + os_thread_yield(); + } +} + +/******************************************************************//** +Debug function used to read a MBR data */ +#ifdef UNIV_DEBUG +void +srv_mbr_debug(const byte* data) +{ + double a, b, c , d; + a = mach_double_read(data); + data += sizeof(double); + b = mach_double_read(data); + data += sizeof(double); + c = mach_double_read(data); + data += sizeof(double); + d = mach_double_read(data); + ut_ad(a && b && c &&d); +} +#endif /*************************************************************//** Check whether valid argument given to innodb_ft_*_stopword_table. This function is registered as a callback with MySQL. @@ -773,10 +892,6 @@ innodb_tmpdir_validate( return(0); } -/** "GEN_CLUST_INDEX" is the name reserved for InnoDB default -system clustered index when there is no primary key. */ -const char innobase_index_reserve_name[] = "GEN_CLUST_INDEX"; - /******************************************************************//** Maps a MySQL trx isolation level code to the InnoDB isolation level code @return InnoDB isolation level */ @@ -786,6 +901,15 @@ innobase_map_isolation_level( /*=========================*/ enum_tx_isolation iso); /*!< in: MySQL isolation level code */ +/** Gets field offset for a field in a table. +@param[in] table MySQL table object +@param[in] field MySQL field object +@return offset */ +static inline +uint +get_field_offset( + const TABLE* table, + const Field* field); /*************************************************************//** Check for a valid value of innobase_compression_algorithm. @return 0 for valid innodb_compression_algorithm. */ @@ -821,8 +945,7 @@ static const char innobase_hton_name[]= "InnoDB"; static MYSQL_THDVAR_BOOL(support_xa, PLUGIN_VAR_OPCMDARG, "Enable InnoDB support for the XA two-phase commit", - /* check_func */ NULL, /* update_func */ NULL, - /* default */ TRUE); + NULL, NULL, TRUE); static MYSQL_THDVAR_BOOL(table_locks, PLUGIN_VAR_OPCMDARG, "Enable InnoDB locking in LOCK TABLES", @@ -831,7 +954,7 @@ static MYSQL_THDVAR_BOOL(table_locks, PLUGIN_VAR_OPCMDARG, static MYSQL_THDVAR_BOOL(strict_mode, PLUGIN_VAR_OPCMDARG, "Use strict mode when evaluating create options.", - NULL, NULL, FALSE); + NULL, NULL, TRUE); static MYSQL_THDVAR_BOOL(ft_enable_stopword, PLUGIN_VAR_OPCMDARG, "Create FTS index with stopword.", @@ -847,6 +970,9 @@ static MYSQL_THDVAR_STR(ft_user_stopword_table, "User supplied stopword table name, effective in the session level.", innodb_stopword_table_validate, NULL, NULL); +/* JAN: TODO: MySQL 5.7 */ +#define SHOW_SCOPE_GLOBAL 0 + static MYSQL_THDVAR_STR(tmpdir, PLUGIN_VAR_OPCMDARG|PLUGIN_VAR_MEMALLOC, "Directory for temporary non-tablespace files.", @@ -857,6 +983,8 @@ static SHOW_VAR innodb_status_variables[]= { (char*) &export_vars.innodb_buffer_pool_dump_status, SHOW_CHAR}, {"buffer_pool_load_status", (char*) &export_vars.innodb_buffer_pool_load_status, SHOW_CHAR}, + {"buffer_pool_resize_status", + (char*) &export_vars.innodb_buffer_pool_resize_status, SHOW_CHAR}, {"buffer_pool_pages_data", (char*) &export_vars.innodb_buffer_pool_pages_data, SHOW_LONG}, {"buffer_pool_bytes_data", @@ -911,8 +1039,6 @@ static SHOW_VAR innodb_status_variables[]= { (char*) &export_vars.innodb_dblwr_pages_written, SHOW_LONG}, {"dblwr_writes", (char*) &export_vars.innodb_dblwr_writes, SHOW_LONG}, - {"have_atomic_builtins", - (char*) &export_vars.innodb_have_atomic_builtins, SHOW_BOOL}, {"log_waits", (char*) &export_vars.innodb_log_waits, SHOW_LONG}, {"log_write_requests", @@ -972,6 +1098,8 @@ static SHOW_VAR innodb_status_variables[]= { (char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG}, {"purge_view_trx_id_age", (char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG}, + {"ahi_drop_lookups", + (char*) &export_vars.innodb_ahi_drop_lookups, SHOW_LONG}, #endif /* UNIV_DEBUG */ /* Status variables for page compression */ {"page_compression_saved", @@ -1078,7 +1206,6 @@ static SHOW_VAR innodb_status_variables[]= { {"scrub_background_page_split_failures_unknown", (char*) &export_vars.innodb_scrub_page_split_failures_unknown, SHOW_LONG}, - {NullS, NullS, SHOW_LONG} }; @@ -1101,12 +1228,22 @@ free_share( /*****************************************************************//** Frees a possible InnoDB trx object associated with the current THD. -@return 0 or error number */ +@return 0 or error number */ static int innobase_close_connection( /*======================*/ - handlerton* hton, /*!< in/out: Innodb handlerton */ + handlerton* hton, /*!< in/out: InnoDB handlerton */ + THD* thd); /*!< in: MySQL thread handle for + which to close the connection */ + +/*****************************************************************//** +Cancel any pending lock request associated with the current THD. */ +static +void +innobase_kill_connection( +/*=====================*/ + handlerton* hton, /*!< in/out: InnoDB handlerton */ THD* thd); /*!< in: MySQL thread handle for which to close the connection */ @@ -1116,12 +1253,12 @@ static void innobase_commit_ordered(handlerton *hton, THD* thd, bool all); /*****************************************************************//** Commits a transaction in an InnoDB database or marks an SQL statement ended. -@return 0 */ +@return 0 */ static int innobase_commit( /*============*/ - handlerton* hton, /*!< in/out: Innodb handlerton */ + handlerton* hton, /*!< in/out: InnoDB handlerton */ THD* thd, /*!< in: MySQL thread handle of the user for whom the transaction should be committed */ @@ -1137,7 +1274,7 @@ static int innobase_rollback( /*==============*/ - handlerton* hton, /*!< in/out: Innodb handlerton */ + handlerton* hton, /*!< in/out: InnoDB handlerton */ THD* thd, /*!< in: handle to the MySQL thread of the user whose transaction should be rolled back */ @@ -1174,7 +1311,7 @@ innobase_rollback_to_savepoint_can_release_mdl( /*****************************************************************//** Sets a transaction savepoint. -@return always 0, that is, always succeeds */ +@return always 0, that is, always succeeds */ static int innobase_savepoint( @@ -1193,7 +1330,7 @@ static int innobase_release_savepoint( /*=======================*/ - handlerton* hton, /*!< in/out: handlerton for Innodb */ + handlerton* hton, /*!< in/out: handlerton for InnoDB */ THD* thd, /*!< in: handle to the MySQL thread of the user whose transaction's savepoint should be released */ @@ -1207,7 +1344,7 @@ static handler* innobase_create_handler( /*====================*/ - handlerton* hton, /*!< in/out: handlerton for Innodb */ + handlerton* hton, /*!< in/out: handlerton for InnoDB */ TABLE_SHARE* table, MEM_ROOT* mem_root); @@ -1237,7 +1374,7 @@ innobase_undo_logs_init_default_max(); /************************************************************//** Validate the file format name and return its corresponding id. -@return valid file format id */ +@return valid file format id */ static uint innobase_file_format_name_lookup( @@ -1247,7 +1384,7 @@ innobase_file_format_name_lookup( /************************************************************//** Validate the file format check config parameters, as a side effect it sets the srv_max_file_format_at_startup variable. -@return the format_id if valid config value, otherwise, return -1 */ +@return the format_id if valid config value, otherwise, return -1 */ static int innobase_file_format_validate_and_set( @@ -1256,7 +1393,7 @@ innobase_file_format_validate_and_set( /*******************************************************************//** This function is used to prepare an X/Open XA distributed transaction. -@return 0 or error number */ +@return 0 or error number */ static int innobase_xa_prepare( @@ -1270,7 +1407,7 @@ innobase_xa_prepare( ended */ /*******************************************************************//** This function is used to recover X/Open XA distributed transactions. -@return number of prepared transactions stored in xid_list */ +@return number of prepared transactions stored in xid_list */ static int innobase_xa_recover( @@ -1281,7 +1418,7 @@ innobase_xa_recover( /*******************************************************************//** This function is used to commit one X/Open XA distributed transaction which is in the prepared state -@return 0 or error number */ +@return 0 or error number */ static int innobase_commit_by_xid( @@ -1292,7 +1429,7 @@ innobase_commit_by_xid( /*******************************************************************//** This function is used to rollback one X/Open XA distributed transaction which is in the prepared state -@return 0 or error number */ +@return 0 or error number */ static int innobase_rollback_by_xid( @@ -1300,62 +1437,37 @@ innobase_rollback_by_xid( handlerton* hton, /*!< in: InnoDB handlerton */ XID* xid); /*!< in: X/Open XA transaction identification */ -/*******************************************************************//** -Create a consistent view for a cursor based on current transaction -which is created if the corresponding MySQL thread still lacks one. -This consistent view is then used inside of MySQL when accessing records -using a cursor. -@return pointer to cursor view or NULL */ -static -void* -innobase_create_cursor_view( -/*========================*/ - handlerton* hton, /*!< in: innobase hton */ - THD* thd); /*!< in: user thread handle */ -/*******************************************************************//** -Set the given consistent cursor view to a transaction which is created -if the corresponding MySQL thread still lacks one. If the given -consistent cursor view is NULL global read view of a transaction is -restored to a transaction read view. */ -static -void -innobase_set_cursor_view( -/*=====================*/ - handlerton* hton, /*!< in: handlerton of Innodb */ - THD* thd, /*!< in: user thread handle */ - void* curview); /*!< in: Consistent cursor view to - be set */ -/*******************************************************************//** -Close the given consistent cursor view of a transaction and restore -global read view to a transaction read view. Transaction is created if the -corresponding MySQL thread still lacks one. */ + +/** This API handles CREATE, ALTER & DROP commands for InnoDB tablespaces. +@param[in] hton Handlerton of InnoDB +@param[in] thd Connection +@param[in] alter_info Describies the command and how to do it. +@return MySQL error code*/ static -void -innobase_close_cursor_view( -/*=======================*/ - handlerton* hton, /*!< in: handlerton of Innodb */ - THD* thd, /*!< in: user thread handle */ - void* curview); /*!< in: Consistent read view to be - closed */ -/*****************************************************************//** -Removes all tables in the named database inside InnoDB. */ +int +innobase_alter_tablespace( + handlerton* hton, + THD* thd, + st_alter_tablespace* alter_info); + +/** Remove all tables in the named database inside InnoDB. +@param[in] hton handlerton from InnoDB +@param[in] path Database path; Inside InnoDB the name of the last +directory in the path is used as the database name. +For example, in 'mysql/data/test' the database name is 'test'. */ static void innobase_drop_database( -/*===================*/ - handlerton* hton, /*!< in: handlerton of Innodb */ - char* path); /*!< in: database path; inside InnoDB - the name of the last directory in - the path is used as the database name: - for example, in 'mysql/data/test' the - database name is 'test' */ + handlerton* hton, + char* path); + /*******************************************************************//** Closes an InnoDB database. */ static int innobase_end( /*=========*/ - handlerton* hton, /* in: Innodb handlerton */ + handlerton* hton, /* in: InnoDB handlerton */ ha_panic_function type); /*****************************************************************//** @@ -1363,25 +1475,48 @@ Creates an InnoDB transaction struct for the thd if it does not yet have one. Starts a new InnoDB transaction if a transaction is not yet started. And assigns a new snapshot for a consistent read if the transaction does not yet have one. -@return 0 */ +@return 0 */ static int innobase_start_trx_and_assign_read_view( /*====================================*/ - handlerton* hton, /* in: Innodb handlerton */ + handlerton* hton, /* in: InnoDB handlerton */ THD* thd); /* in: MySQL thread handle of the user for whom the transaction should be committed */ -/****************************************************************//** -Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes -the logs, and the name of this function should be innobase_checkpoint. -@return TRUE if error */ +/** Flush InnoDB redo logs to the file system. +@param[in] hton InnoDB handlerton +@param[in] binlog_group_flush true if we got invoked by binlog +group commit during flush stage, false in other cases. +@return false */ +static +bool +innobase_flush_logs( + handlerton* hton, /*!< in: InnoDB handlerton */ + bool binlog_group_flush); +/** Flush InnoDB redo logs to the file system. +@param[in] hton InnoDB handlerton +@param[in] binlog_group_flush true if we got invoked by binlog +group commit during flush stage, false in other cases. +@return false */ static bool innobase_flush_logs( -/*================*/ handlerton* hton); /*!< in: InnoDB handlerton */ +/** Flush InnoDB redo logs to the file system. +@param[in] hton InnoDB handlerton +@param[in] binlog_group_flush true if we got invoked by binlog +group commit during flush stage, false in other cases. +@return false */ +static +bool +innobase_flush_logs( + handlerton* hton) +{ + return innobase_flush_logs(hton, true); +} + /************************************************************************//** Implements the SHOW ENGINE INNODB STATUS command. Sends the output of the InnoDB Monitor to the client. @@ -1407,14 +1542,6 @@ innobase_show_status( stat_print_fn* stat_print, enum ha_stat_type stat_type); -/*****************************************************************//** -Commits a transaction in an InnoDB database. */ -static -void -innobase_commit_low( -/*================*/ - trx_t* trx); /*!< in: transaction handle */ - /****************************************************************//** Parse and enable InnoDB monitor counters during server startup. User can enable monitor counters/groups by specifying @@ -1426,23 +1553,52 @@ innodb_enable_monitor_at_startup( /*=============================*/ char* str); /*!< in: monitor counter enable list */ -/********************************************************************* -Normalizes a table name string. A normalized name consists of the -database name catenated to '/' and table name. An example: -test/mytable. On Windows normalization puts both the database name and the -table name always to lower case if "set_lower_case" is set to TRUE. */ +/** Fill handlerton based INFORMATION_SCHEMA tables. +@param[in] (unused) Handle to the handlerton structure +@param[in] thd Thread/connection descriptor +@param[in,out] tables Information Schema tables to fill +@param[in] (unused) Intended for conditional pushdown +@param[in] idx Table id that indicates which I_S table to fill +@return Operation status */ +static +int +innobase_fill_i_s_table( + handlerton*, + THD* thd, + TABLE_LIST* tables, + Item*, + enum_schema_tables idx) +{ + int ret = 0; + + if (idx == SCH_FILES) { + ret = i_s_files_table_fill(thd, tables); + } + + return(ret); +} + +/** Store doc_id value into FTS_DOC_ID field +@param[in,out] tbl table containing FULLTEXT index +@param[in] doc_id FTS_DOC_ID value */ +static void -normalize_table_name_low( -/*=====================*/ - char* norm_name, /* out: normalized name as a - null-terminated string */ - const char* name, /* in: table name string */ - ibool set_lower_case); /* in: TRUE if we want to set - name to lower case */ +innobase_fts_store_docid( + TABLE* tbl, + ulonglong doc_id) +{ + my_bitmap_map* old_map + = dbug_tmp_use_all_columns(tbl, tbl->write_set); + + /* JAN: TODO: MySQL 5.7 */ + //tbl->fts_doc_id_field->store(static_cast(doc_id), true); + + dbug_tmp_restore_column_map(tbl->write_set, old_map); +} /*************************************************************//** Check for a valid value of innobase_commit_concurrency. -@return 0 for valid innodb_commit_concurrency */ +@return 0 for valid innodb_commit_concurrency */ static int innobase_commit_concurrency_validate( @@ -1482,20 +1638,40 @@ innobase_create_handler( TABLE_SHARE* table, MEM_ROOT* mem_root) { - return(new (mem_root) ha_innobase(hton, table)); -} - + /* If the table: + 1) have type InnoDB (not the generic partition handlerton) + 2) have partitioning defined + Then return the native partitioning handler ha_innopart + else return normal ha_innobase handler. */ + if (table + && table->db_type() == innodb_hton_ptr // 1) + && table->partition_info_str // 2) + && table->partition_info_str_len) { // 2) + /* JAN: TODO: MySQL 5.7 InnoDB Partitioning disabled + ha_innopart* file = new (mem_root) ha_innopart(hton, table); + if (file && file->init_partitioning(mem_root)) + { + delete file; + return(NULL); + } + return(file);*/ + return (NULL); + } + + return(new (mem_root) ha_innobase(hton, table)); +} + /* General functions */ -/*************************************************************//** -Check that a page_size is correct for InnoDB. If correct, set the -associated page_size_shift which is the power of 2 for this page size. -@return an associated page_size_shift if valid, 0 if invalid. */ +/** Check that a page_size is correct for InnoDB. +If correct, set the associated page_size_shift which is the power of 2 +for this page size. +@param[in] page_size Page Size to evaluate +@return an associated page_size_shift if valid, 0 if invalid. */ inline -int +ulong innodb_page_size_validate( -/*======================*/ - ulong page_size) /*!< in: Page Size to evaluate */ + ulong page_size) { ulong n; @@ -1504,7 +1680,7 @@ innodb_page_size_validate( for (n = UNIV_PAGE_SIZE_SHIFT_MIN; n <= UNIV_PAGE_SIZE_SHIFT_MAX; n++) { - if (page_size == (ulong) (1 << n)) { + if (page_size == static_cast(1 << n)) { DBUG_RETURN(n); } } @@ -1518,8 +1694,7 @@ server. Used in srv_conc_enter_innodb() to determine if the thread should be allowed to enter InnoDB - the replication thread is treated differently than other threads. Also used in srv_conc_force_exit_innodb(). -@return true if thd is the replication thread */ -UNIV_INTERN +@return true if thd is the replication thread */ ibool thd_is_replication_slave_thread( /*============================*/ @@ -1532,7 +1707,6 @@ thd_is_replication_slave_thread( Gets information on the durability property requested by thread. Used when writing either a prepare or commit record to the log buffer. @return the durability property. */ -UNIV_INTERN enum durability_properties thd_requested_durability( /*=====================*/ @@ -1543,9 +1717,8 @@ thd_requested_durability( /******************************************************************//** Returns true if transaction should be flagged as read-only. -@return true if the thd is marked as read-only */ -UNIV_INTERN -ibool +@return true if the thd is marked as read-only */ +bool thd_trx_is_read_only( /*=================*/ THD* thd) /*!< in: thread handle */ @@ -1553,11 +1726,65 @@ thd_trx_is_read_only( return(thd != 0 && thd_tx_is_read_only(thd)); } +/** +Check if the transaction can be rolled back +@param[in] requestor Session requesting the lock +@param[in] holder Session that holds the lock +@return the session that will be rolled back, null don't care */ + +THD* +thd_trx_arbitrate(THD* requestor, THD* holder) +{ + /* Non-user (thd==0) transactions by default can't rollback, in + practice DDL transactions should never rollback and that's because + they should never wait on table/record locks either */ + + ut_a(holder != NULL); + ut_a(holder != requestor); + + /* JAN: TODO: MySQL 5.7 + THD* victim = thd_tx_arbitrate(requestor, holder); + */ + THD* victim = NULL; + + ut_a(victim == NULL || victim == requestor || victim == holder); + + return(victim); +} + +/** +@param[in] thd Session to check +@return the priority */ + +int +thd_trx_priority(THD* thd) +{ + /* JAN: TODO: MySQL 5.7 + return(thd == NULL ? 0 : thd_tx_priority(thd)); + */ + return (0); +} + +#ifdef UNIV_DEBUG +/** +Returns true if transaction should be flagged as DD attachable transaction +@param[in] thd Thread handle +@return true if the thd is marked as read-only */ +bool +thd_trx_is_dd_trx(THD* thd) +{ + /* JAN: TODO: MySQL 5.7 + ha_table_flags() & HA_ATTACHABLE_TRX_COMPATIBLE + return(thd != NULL && thd_tx_is_dd_trx(thd)); + */ + return false; +} +#endif /* UNIV_DEBUG */ + /******************************************************************//** Check if the transaction is an auto-commit transaction. TRUE also implies that it is a SELECT (read-only) transaction. -@return true if the transaction is an auto commit read-only transaction. */ -UNIV_INTERN +@return true if the transaction is an auto commit read-only transaction. */ ibool thd_trx_is_auto_commit( /*===================*/ @@ -1570,19 +1797,44 @@ thd_trx_is_auto_commit( && thd_is_select(thd)); } +extern "C" time_t thd_start_time(const THD* thd); + /******************************************************************//** -Save some CPU by testing the value of srv_thread_concurrency in inline -functions. */ +Get the thread start time. +@return the thread start time in seconds since the epoch. */ +ulint +thd_start_time_in_secs( +/*===================*/ + THD* thd) /*!< in: thread handle, or NULL */ +{ + // FIXME: This function should be added to the server code. + //return(thd_start_time(thd)); + return(ulint(ut_time())); +} + +/** Enter InnoDB engine after checking the max number of user threads +allowed, else the thread is put into sleep. +@param[in,out] prebuilt row prebuilt handler */ static inline void innobase_srv_conc_enter_innodb( -/*===========================*/ - trx_t* trx) /*!< in: transaction handle */ + row_prebuilt_t* prebuilt) { #ifdef WITH_WSREP - if (wsrep_on(trx->mysql_thd) && - wsrep_thd_is_BF(trx->mysql_thd, FALSE)) return; + if (wsrep_on(prebuilt->trx->mysql_thd) && + wsrep_thd_is_BF(prebuilt->trx->mysql_thd, FALSE)) { + return; + } #endif /* WITH_WSREP */ + + /* We rely on server to do external_lock(F_UNLCK) to reset the + srv_conc.n_active counter. Since there are no locks on instrinsic + tables, we should skip this for intrinsic temporary tables. */ + if (dict_table_is_intrinsic(prebuilt->table)) { + return; + } + + trx_t* trx = prebuilt->trx; if (srv_thread_concurrency) { if (trx->n_tickets_to_enter_innodb > 0) { @@ -1599,29 +1851,41 @@ innobase_srv_conc_enter_innodb( < srv_thread_concurrency, srv_replication_delay * 1000); - } else { - srv_conc_enter_innodb(trx); + } else { + srv_conc_enter_innodb(prebuilt); } } } -/******************************************************************//** -Note that the thread wants to leave InnoDB only if it doesn't have -any spare tickets. */ +/** Note that the thread wants to leave InnoDB only if it doesn't have +any spare tickets. +@param[in,out] m_prebuilt row prebuilt handler */ static inline void innobase_srv_conc_exit_innodb( -/*==========================*/ - trx_t* trx) /*!< in: transaction handle */ + row_prebuilt_t* prebuilt) { -#ifdef UNIV_SYNC_DEBUG - ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch)); -#endif /* UNIV_SYNC_DEBUG */ #ifdef WITH_WSREP - if (wsrep_on(trx->mysql_thd) && - wsrep_thd_is_BF(trx->mysql_thd, FALSE)) return; + if (wsrep_on(prebuilt->trx->mysql_thd) && + wsrep_thd_is_BF(prebuilt->trx->mysql_thd, FALSE)) { + return; + } #endif /* WITH_WSREP */ + /* We rely on server to do external_lock(F_UNLCK) to reset the + srv_conc.n_active counter. Since there are no locks on instrinsic + tables, we should skip this for intrinsic temporary tab */ + if (dict_table_is_intrinsic(prebuilt->table)) { + return; + } + + trx_t* trx = prebuilt->trx; +#ifdef UNIV_DEBUG + btrsea_sync_check check(trx->has_search_latch); + + ut_ad(!sync_check_iterate(check)); +#endif /* UNIV_DEBUG */ + /* This is to avoid making an unnecessary function call. */ if (trx->declared_to_be_inside_innodb && trx->n_tickets_to_enter_innodb == 0) { @@ -1638,9 +1902,11 @@ innobase_srv_conc_force_exit_innodb( /*================================*/ trx_t* trx) /*!< in: transaction handle */ { -#ifdef UNIV_SYNC_DEBUG - ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch)); -#endif /* UNIV_SYNC_DEBUG */ +#ifdef UNIV_DEBUG + btrsea_sync_check check(trx->has_search_latch); + + ut_ad(!sync_check_iterate(check)); +#endif /* UNIV_DEBUG */ /* This is to avoid making an unnecessary function call. */ if (trx->declared_to_be_inside_innodb) { @@ -1650,8 +1916,7 @@ innobase_srv_conc_force_exit_innodb( /******************************************************************//** Returns the NUL terminated value of glob_hostname. -@return pointer to glob_hostname. */ -UNIV_INTERN +@return pointer to glob_hostname. */ const char* server_get_hostname() /*=================*/ @@ -1664,8 +1929,7 @@ Returns true if the transaction this thread is processing has edited non-transactional tables. Used by the deadlock detector when deciding which transaction to rollback in case of a deadlock - we try to avoid rolling back transactions that have edited non-transactional tables. -@return true if non-transactional tables have been edited */ -UNIV_INTERN +@return true if non-transactional tables have been edited */ ibool thd_has_edited_nontrans_tables( /*===========================*/ @@ -1676,8 +1940,7 @@ thd_has_edited_nontrans_tables( /******************************************************************//** Returns true if the thread is executing a SELECT statement. -@return true if thd is executing SELECT */ -UNIV_INTERN +@return true if thd is executing SELECT */ ibool thd_is_select( /*==========*/ @@ -1689,8 +1952,7 @@ thd_is_select( /******************************************************************//** Returns true if the thread supports XA, global value of innodb_supports_xa if thd is NULL. -@return true if thd has XA support */ -UNIV_INTERN +@return true if thd has XA support */ ibool thd_supports_xa( /*============*/ @@ -1700,30 +1962,9 @@ thd_supports_xa( return(THDVAR(thd, support_xa)); } -/** Get the value of innodb_tmpdir. -@param[in] thd thread handle, or NULL to query - the global innodb_tmpdir. -@retval NULL if innodb_tmpdir="" */ -UNIV_INTERN -const char* -thd_innodb_tmpdir( - THD* thd) -{ -#ifdef UNIV_SYNC_DEBUG - ut_ad(!sync_thread_levels_nonempty_trx(false)); -#endif /* UNIV_SYNC_DEBUG */ - - const char* tmp_dir = THDVAR(thd, tmpdir); - if (tmp_dir != NULL && *tmp_dir == '\0') { - tmp_dir = NULL; - } - - return(tmp_dir); -} /******************************************************************//** Returns the lock wait timeout for the current connection. -@return the lock wait timeout, in seconds */ -UNIV_INTERN +@return the lock wait timeout, in seconds */ ulong thd_lock_wait_timeout( /*==================*/ @@ -1737,7 +1978,6 @@ thd_lock_wait_timeout( /******************************************************************//** Set the time waited for the lock for the current query. */ -UNIV_INTERN void thd_set_lock_wait_time( /*===================*/ @@ -1749,19 +1989,89 @@ thd_set_lock_wait_time( } } -/********************************************************************//** -Obtain the InnoDB transaction of a MySQL thread. -@return reference to transaction pointer */ +/** Obtain the private handler of InnoDB session specific data. +@param[in,out] thd MySQL thread handler. +@return reference to private handler */ +__attribute__((warn_unused_result)) +static +innodb_session_t*& +thd_to_innodb_session( + THD* thd) +{ + innodb_session_t*& innodb_session = + *(innodb_session_t**) thd_ha_data(thd, innodb_hton_ptr); + + if (innodb_session != NULL) { + return(innodb_session); + } + + innodb_session = UT_NEW_NOKEY(innodb_session_t()); + + thd_set_ha_data(thd, innodb_hton_ptr, innodb_session); + + return(innodb_session); +} + +/** Obtain the InnoDB transaction of a MySQL thread. +@param[in,out] thd MySQL thread handler. +@return reference to transaction pointer */ MY_ATTRIBUTE((warn_unused_result, nonnull)) -static inline trx_t*& thd_to_trx( -/*=======*/ - THD* thd) /*!< in: MySQL thread */ + THD* thd) +{ + innodb_session_t*& innodb_session = thd_to_innodb_session(thd); + ut_ad(innodb_session != NULL); + + return(innodb_session->m_trx); +} + +/** Check if statement is of type INSERT .... SELECT that involves +use of intrinsic tables. +@param[in] thd thread handler +@return true if INSERT .... SELECT statement. */ +static inline +bool +thd_is_ins_sel_stmt(THD* user_thd) +{ + /* If the session involves use of intrinsic table + and it is trying to fetch the result from non-temporary tables + it indicates "insert .... select" statement. For non-temporary + table this is verifed using the locked tables count but for + intrinsic table as external_lock is not invoked this count is + not updated. + + Why is this needed ? + Use of AHI is blocked if statement is insert .... select statement. */ + innodb_session_t* innodb_priv = thd_to_innodb_session(user_thd); + return(innodb_priv->count_register_table_handler() > 0 ? true : false); +} + +/** Add the table handler to thread cache. +Obtain the InnoDB transaction of a MySQL thread. +@param[in,out] table table handler +@param[in,out] heap heap for allocating system columns. +@param[in,out] thd MySQL thread handler */ +static inline +void +add_table_to_thread_cache( + dict_table_t* table, + mem_heap_t* heap, + THD* thd) { - return(*(trx_t**) thd_ha_data(thd, innodb_hton_ptr)); + dict_table_add_system_columns(table, heap); + + dict_table_set_big_rows(table); + + innodb_session_t*& priv = thd_to_innodb_session(thd); + priv->register_table_handler(table->name.m_name, table); } + #ifdef WITH_WSREP +/********************************************************************//** +Obtain the InnoDB transaction id of a MySQL thread. +@return transaction id */ +__attribute__((warn_unused_result, nonnull)) ulonglong thd_to_trx_id( /*=======*/ @@ -1775,8 +2085,8 @@ thd_to_trx_id( Call this function when mysqld passes control to the client. That is to avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more documentation, see handler.cc. -@return 0 */ -static +@return 0 */ +inline int innobase_release_temporary_latches( /*===============================*/ @@ -1804,7 +2114,7 @@ Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth time calls srv_active_wake_master_thread. This function should be used when a single database operation may introduce a small need for server utility activity, like checkpointing. */ -static inline +inline void innobase_active_small(void) /*=======================*/ @@ -1820,8 +2130,7 @@ innobase_active_small(void) Converts an InnoDB error code to a MySQL error code and also tells to MySQL about a possible transaction rollback inside InnoDB caused by a lock wait timeout or a deadlock. -@return MySQL error code */ -static +@return MySQL error code */ int convert_error_code_to_mysql( /*========================*/ @@ -1834,7 +2143,10 @@ convert_error_code_to_mysql( return(0); case DB_INTERRUPTED: - return(HA_ERR_ABORTED_BY_USER); + /* JAN: TODO: MySQL 5.7 + thd_set_kill_status(thd != NULL ? thd : current_thd); + return(HA_ERR_GENERIC);*/ + return(HA_ERR_ABORTED_BY_USER); case DB_FOREIGN_EXCEED_MAX_CASCADE: ut_ad(thd); @@ -1846,12 +2158,19 @@ convert_error_code_to_mysql( "depth of %d. Please " "drop extra constraints and try " "again", DICT_FK_MAX_RECURSIVE_LOAD); + return(-1);; /* unspecified error */ + /* JAN: TODO: MySQL 5.7 + my_error(ER_FK_DEPTH_EXCEEDED, MYF(0), FK_MAX_CASCADE_DEL); + return(HA_ERR_FK_DEPTH_EXCEEDED); + */ - /* fall through */ + case DB_CANT_CREATE_GEOMETRY_OBJECT: + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); + return(HA_ERR_NULL_IN_SPATIAL); case DB_ERROR: default: - return(-1); /* unspecified error */ + return(HA_ERR_GENERIC); /* unspecified error */ case DB_DUPLICATE_KEY: /* Be cautious with returning this error, since @@ -1874,13 +2193,14 @@ convert_error_code_to_mysql( case DB_RECORD_NOT_FOUND: return(HA_ERR_NO_ACTIVE_RECORD); + case DB_FORCED_ABORT: case DB_DEADLOCK: /* Since we rolled back the whole transaction, we must tell it also to MySQL so that MySQL knows to empty the cached binlog for this transaction */ - if (thd) { - thd_mark_transaction_to_rollback(thd, TRUE); + if (thd != NULL) { + thd_mark_transaction_to_rollback(thd, 1); } return(HA_ERR_LOCK_DEADLOCK); @@ -1892,7 +2212,7 @@ convert_error_code_to_mysql( if (thd) { thd_mark_transaction_to_rollback( - thd, (bool) row_rollback_on_timeout); + thd, (int) row_rollback_on_timeout); } return(HA_ERR_LOCK_WAIT_TIMEOUT); @@ -1920,10 +2240,10 @@ convert_error_code_to_mysql( case DB_OUT_OF_FILE_SPACE: return(HA_ERR_RECORD_FILE_FULL); - case DB_TEMP_FILE_WRITE_FAILURE: + case DB_TEMP_FILE_WRITE_FAIL: my_error(ER_GET_ERRMSG, MYF(0), - DB_TEMP_FILE_WRITE_FAILURE, - ut_strerr(DB_TEMP_FILE_WRITE_FAILURE), + DB_TEMP_FILE_WRITE_FAIL, + ut_strerr(DB_TEMP_FILE_WRITE_FAIL), "InnoDB"); return(HA_ERR_INTERNAL_ERROR); @@ -1933,7 +2253,6 @@ convert_error_code_to_mysql( case DB_TABLE_IS_BEING_USED: return(HA_ERR_WRONG_COMMAND); - case DB_TABLESPACE_DELETED: case DB_TABLE_NOT_FOUND: return(HA_ERR_NO_SUCH_TABLE); @@ -1941,34 +2260,35 @@ convert_error_code_to_mysql( return(HA_ERR_DECRYPTION_FAILED); case DB_TABLESPACE_NOT_FOUND: + /* JAN: TODO: MySQL 5.7 + return(HA_ERR_TABLESPACE_MISSING); + */ return(HA_ERR_NO_SUCH_TABLE); case DB_TOO_BIG_RECORD: { /* If prefix is true then a 768-byte prefix is stored - locally for BLOB fields. Refer to dict_table_get_format() */ + locally for BLOB fields. Refer to dict_table_get_format(). + We limit max record size to 16k for 64k page size. */ bool prefix = (dict_tf_get_format(flags) == UNIV_FORMAT_A); my_printf_error(ER_TOO_BIG_ROWSIZE, - "Row size too large (> %lu). Changing some columns " - "to TEXT or BLOB %smay help. In current row " - "format, BLOB prefix of %d bytes is stored inline.", + "Row size too large (> %lu). Changing some columns" + " to TEXT or BLOB %smay help. In current row" + " format, BLOB prefix of %d bytes is stored inline.", MYF(0), - page_get_free_space_of_empty(flags & + srv_page_size == UNIV_PAGE_SIZE_MAX + ? REC_MAX_DATA_SIZE - 1 + : page_get_free_space_of_empty(flags & DICT_TF_COMPACT) / 2, - prefix ? "or using ROW_FORMAT=DYNAMIC " - "or ROW_FORMAT=COMPRESSED ": "", - prefix ? DICT_MAX_FIXED_COL_LEN : 0); + prefix + ? "or using ROW_FORMAT=DYNAMIC or" + " ROW_FORMAT=COMPRESSED " + : "", + prefix + ? DICT_MAX_FIXED_COL_LEN + : 0); return(HA_ERR_TO_BIG_ROW); } - - case DB_TOO_BIG_FOR_REDO: - my_printf_error(ER_TOO_BIG_ROWSIZE, "%s" , MYF(0), - "The size of BLOB/TEXT data inserted" - " in one transaction is greater than" - " 10% of redo log size. Increase the" - " redo log size using innodb_log_file_size."); - return(HA_ERR_TO_BIG_ROW); - case DB_TOO_BIG_INDEX_COL: my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0), DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags)); @@ -1983,7 +2303,7 @@ convert_error_code_to_mysql( cached binlog for this transaction */ if (thd) { - thd_mark_transaction_to_rollback(thd, TRUE); + thd_mark_transaction_to_rollback(thd, 1); } return(HA_ERR_LOCK_TABLE_FULL); @@ -2004,16 +2324,35 @@ convert_error_code_to_mysql( return(HA_ERR_OUT_OF_MEM); case DB_TABLESPACE_EXISTS: return(HA_ERR_TABLESPACE_EXISTS); + case DB_TABLESPACE_DELETED: + /* JAN: TODO: MySQL 5.7 + return(HA_ERR_TABLESPACE_MISSING); + */ + return(HA_ERR_NO_SUCH_TABLE); case DB_IDENTIFIER_TOO_LONG: return(HA_ERR_INTERNAL_ERROR); + case DB_TABLE_CORRUPT: + /* JAN: TODO: MySQL 5.7 + return(HA_ERR_TABLE_CORRUPT); + */ + return(HA_ERR_INTERNAL_ERROR); case DB_FTS_TOO_MANY_WORDS_IN_PHRASE: return(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE); + case DB_WRONG_FILE_NAME: + /* JAN: TODO: MySQL 5.7 + return(HA_ERR_WRONG_FILE_NAME); + */ + return(HA_ERR_INTERNAL_ERROR); + case DB_COMPUTE_VALUE_FAILED: + /* JAN: TODO: MySQL 5.7 + return(HA_ERR_COMPUTE_FAILED); + */ + return(HA_ERR_INTERNAL_ERROR); } } /*************************************************************//** Prints info of a THD object (== user session thread) to the given file. */ -UNIV_INTERN void innobase_mysql_print_thd( /*=====================*/ @@ -2032,7 +2371,6 @@ innobase_mysql_print_thd( /******************************************************************//** Get the error message format string. @return the format string or 0 if not found. */ -UNIV_INTERN const char* innobase_get_err_msg( /*=================*/ @@ -2043,7 +2381,6 @@ innobase_get_err_msg( /******************************************************************//** Get the variable length bounds of the given character set. */ -UNIV_INTERN void innobase_get_cset_width( /*====================*/ @@ -2069,8 +2406,7 @@ innobase_get_cset_width( /* Fix bug#46256: allow tables to be dropped if the collation is not found, but issue a warning. */ - if ((global_system_variables.log_warnings) - && (cset != 0)){ + if (cset != 0) { sql_print_warning( "Unknown collation #%lu.", cset); @@ -2086,14 +2422,13 @@ innobase_get_cset_width( /******************************************************************//** Converts an identifier to a table name. */ -UNIV_INTERN void innobase_convert_from_table_id( /*===========================*/ - struct charset_info_st* cs, /*!< in: the 'from' character set */ - char* to, /*!< out: converted identifier */ - const char* from, /*!< in: identifier to convert */ - ulint len) /*!< in: length of 'to', in bytes */ + CHARSET_INFO* cs, /*!< in: the 'from' character set */ + char* to, /*!< out: converted identifier */ + const char* from, /*!< in: identifier to convert */ + ulint len) /*!< in: length of 'to', in bytes */ { uint errors; @@ -2103,7 +2438,6 @@ innobase_convert_from_table_id( /********************************************************************** Check if the length of the identifier exceeds the maximum allowed. return true when length of identifier is too long. */ -UNIV_INTERN my_bool innobase_check_identifier_length( /*=============================*/ @@ -2127,14 +2461,13 @@ innobase_check_identifier_length( /******************************************************************//** Converts an identifier to UTF-8. */ -UNIV_INTERN void innobase_convert_from_id( /*=====================*/ - struct charset_info_st* cs, /*!< in: the 'from' character set */ - char* to, /*!< out: converted identifier */ - const char* from, /*!< in: identifier to convert */ - ulint len) /*!< in: length of 'to', in bytes */ + CHARSET_INFO* cs, /*!< in: the 'from' character set */ + char* to, /*!< out: converted identifier */ + const char* from, /*!< in: identifier to convert */ + ulint len) /*!< in: length of 'to', in bytes */ { uint errors; @@ -2143,8 +2476,7 @@ innobase_convert_from_id( /******************************************************************//** Compares NUL-terminated UTF-8 strings case insensitively. -@return 0 if a=b, <0 if a1 if a>b */ -UNIV_INTERN +@return 0 if a=b, <0 if a1 if a>b */ int innobase_strcasecmp( /*================*/ @@ -2168,7 +2500,6 @@ innobase_strcasecmp( Compares NUL-terminated UTF-8 strings case insensitively. The second string contains wildcards. @return 0 if a match is found, 1 if not */ -UNIV_INTERN int innobase_wildcasecmp( /*=================*/ @@ -2178,14 +2509,12 @@ innobase_wildcasecmp( return(wild_case_compare(system_charset_info, a, b)); } -/******************************************************************//** -Strip dir name from a full path name and return only the file name +/** Strip dir name from a full path name and return only the file name +@param[in] path_name full path name @return file name or "null" if no file name */ -UNIV_INTERN const char* innobase_basename( -/*==============*/ - const char* path_name) /*!< in: full path name */ + const char* path_name) { const char* name = base_name(path_name); @@ -2194,7 +2523,6 @@ innobase_basename( /******************************************************************//** Makes all characters in a NUL-terminated UTF-8 string lower case. */ -UNIV_INTERN void innobase_casedn_str( /*================*/ @@ -2205,9 +2533,8 @@ innobase_casedn_str( /**********************************************************************//** Determines the connection character set. -@return connection character set */ -UNIV_INTERN -struct charset_info_st* +@return connection character set */ +CHARSET_INFO* innobase_get_charset( /*=================*/ THD* mysql_thd) /*!< in: MySQL thread handle */ @@ -2215,21 +2542,62 @@ innobase_get_charset( return(thd_charset(mysql_thd)); } -/**********************************************************************//** -Determines the current SQL statement. -@return SQL statement string */ -UNIV_INTERN +/** Determines the current SQL statement. +Thread unsafe, can only be called from the thread owning the THD. +@param[in] thd MySQL thread handle +@param[out] length Length of the SQL statement +@return SQL statement string */ const char* -innobase_get_stmt( +innobase_get_stmt_unsafe( +/*==============*/ + THD* thd, + size_t* length) +{ + LEX_STRING* stmt; + const char* query=NULL; + + stmt = thd_query_string(thd); + + if (stmt && stmt->str) { + *length = stmt->length; + query = stmt->str; + } else { + *length = 0; + } + + return(query); +} + +/** Determines the current SQL statement. +Thread safe, can be called from any thread as the string is copied +into the provided buffer. +@param[in] thd MySQL thread handle +@param[out] buf Buffer containing SQL statement +@param[in] buflen Length of provided buffer +@return Length of the SQL statement */ +size_t +innobase_get_stmt_safe( /*==============*/ - THD* thd, /*!< in: MySQL thread handle */ - size_t* length) /*!< out: length of the SQL statement */ + THD* thd, + char* buf, + size_t buflen) { LEX_STRING* stmt; + size_t length=0; + + ut_ad(buflen > 1); stmt = thd_query_string(thd); - *length = stmt->length; - return(stmt->str); + + if (stmt && stmt->str) { + length = stmt->length > buflen ? buflen : stmt->length; + memcpy(buf, stmt->str, length-1); + buf[length]='\0'; + } else { + buf[0]='\0'; + } + + return (length); } /**********************************************************************//** @@ -2237,7 +2605,6 @@ Get the current setting of the tdc_size global parameter. We do a dirty read because for one there is no synchronization object and secondly there is little harm in doing so even if we get a torn read. @return value of tdc_size */ -UNIV_INTERN ulint innobase_get_table_cache_size(void) /*===============================*/ @@ -2250,8 +2617,7 @@ Get the current setting of the lower_case_table_names global parameter from mysqld.cc. We do a dirty read because for one there is no synchronization object and secondly there is little harm in doing so even if we get a torn read. -@return value of lower_case_table_names */ -UNIV_INTERN +@return value of lower_case_table_names */ ulint innobase_get_lower_case_table_names(void) /*=====================================*/ @@ -2259,14 +2625,12 @@ innobase_get_lower_case_table_names(void) return(lower_case_table_names); } -/** Create a temporary file in the location specified by the parameter -path. If the path is null, then it will be created in tmpdir. -@param[in] path location for creating temporary file -@return temporary file descriptor, or < 0 on error */ -UNIV_INTERN +/*********************************************************************//** +Creates a temporary file. +@return temporary file descriptor, or < 0 on error */ int -innobase_mysql_tmpfile( - const char* path) +innobase_mysql_tmpfile(void) +/*========================*/ { #ifdef WITH_INNODB_DISALLOW_WRITES os_event_wait(srv_allow_writes_event); @@ -2279,11 +2643,7 @@ innobase_mysql_tmpfile( return(-1); ); - if (path == NULL) { - fd = mysql_tmpfile("ib"); - } else { - fd = mysql_tmpfile_path(path, "ib"); - } + fd = mysql_tmpfile("ib"); if (fd >= 0) { /* Copy the file descriptor, so that the additional resources @@ -2318,11 +2678,14 @@ innobase_mysql_tmpfile( fd2 = dup(fd); #endif if (fd2 < 0) { + char errbuf[MYSYS_STRERROR_SIZE]; DBUG_PRINT("error",("Got error %d on dup",fd2)); - my_errno=errno; + set_my_errno(errno); + my_strerror(errbuf, sizeof(errbuf), my_errno); my_error(EE_OUT_OF_FILERESOURCES, MYF(ME_BELL+ME_WAITTANG), - "ib*", my_errno); + "ib*", my_errno, + errbuf); } my_close(fd, MYF(MY_WME)); } @@ -2331,8 +2694,7 @@ innobase_mysql_tmpfile( /*********************************************************************//** Wrapper around MySQL's copy_and_convert function. -@return number of bytes copied to 'to' */ -UNIV_INTERN +@return number of bytes copied to 'to' */ ulint innobase_convert_string( /*====================*/ @@ -2361,8 +2723,7 @@ Not more than "buf_size" bytes are written to "buf". The result is always NUL-terminated (provided buf_size > 0) and the number of bytes that were written to "buf" is returned (including the terminating NUL). -@return number of bytes that were written */ -UNIV_INTERN +@return number of bytes that were written */ ulint innobase_raw_format( /*================*/ @@ -2391,6 +2752,60 @@ innobase_raw_format( return(ut_str_sql_format(buf_tmp, buf_tmp_used, buf, buf_size)); } +/** Check if the string is "empty" or "none". +@param[in] algorithm Compression algorithm to check +@return true if no algorithm requested */ +bool +Compression::is_none(const char* algorithm) +{ + /* NULL is the same as NONE */ + if (algorithm == NULL || innobase_strcasecmp(algorithm, "none") == 0) { + return(true); + } + + return(false); +} + +/** Check for supported COMPRESS := (ZLIB | LZ4 | NONE) supported values +@param[in] name Name of the compression algorithm +@param[out] compression The compression algorithm +@return DB_SUCCESS or DB_UNSUPPORTED */ +dberr_t +Compression::check( + const char* algorithm, + Compression* compression) +{ + if (is_none(algorithm)) { + + compression->m_type = NONE; + + } else if (innobase_strcasecmp(algorithm, "zlib") == 0) { + + compression->m_type = ZLIB; + + } else if (innobase_strcasecmp(algorithm, "lz4") == 0) { + + compression->m_type = LZ4; + + } else { + return(DB_UNSUPPORTED); + } + + return(DB_SUCCESS); +} + +/** Check for supported COMPRESS := (ZLIB | LZ4 | NONE) supported values +@param[in] name Name of the compression algorithm +@param[out] compression The compression algorithm +@return DB_SUCCESS or DB_UNSUPPORTED */ +dberr_t +Compression::validate(const char* algorithm) +{ + Compression compression; + + return(check(algorithm, &compression)); +} + /*********************************************************************//** Compute the next autoinc value. @@ -2408,8 +2823,7 @@ values we want to reserve for multi-value inserts e.g., innobase_next_autoinc() will be called with increment set to 3 where autoinc_lock_mode != TRADITIONAL because we want to reserve 3 values for the multi-value INSERT above. -@return the next value */ -UNIV_INTERN +@return the next value */ ulonglong innobase_next_autoinc( /*==================*/ @@ -2502,43 +2916,100 @@ innobase_next_autoinc( return(next_value); } -/*********************************************************************//** -Initializes some fields in an InnoDB transaction object. */ -static -void -innobase_trx_init( -/*==============*/ - THD* thd, /*!< in: user thread handle */ - trx_t* trx) /*!< in/out: InnoDB transaction handle */ +/********************************************************************//** +Reset the autoinc value in the table. +@return DB_SUCCESS if all went well else error code */ +UNIV_INTERN +dberr_t +ha_innobase::innobase_reset_autoinc( +/*================================*/ + ulonglong autoinc) /*!< in: value to store */ { - DBUG_ENTER("innobase_trx_init"); - DBUG_ASSERT(thd == trx->mysql_thd); - - trx->check_foreigns = !thd_test_options( - thd, OPTION_NO_FOREIGN_KEY_CHECKS); + dberr_t error; - trx->check_unique_secondary = !thd_test_options( - thd, OPTION_RELAXED_UNIQUE_CHECKS); + error = innobase_lock_autoinc(); - DBUG_VOID_RETURN; -} + if (error == DB_SUCCESS) { -/*********************************************************************//** -Allocates an InnoDB transaction for a MySQL handler object for DML. -@return InnoDB transaction handle */ -UNIV_INTERN -trx_t* -innobase_trx_allocate( -/*==================*/ - THD* thd) /*!< in: user thread handle */ -{ - trx_t* trx; + dict_table_autoinc_initialize(m_prebuilt->table, autoinc); - DBUG_ENTER("innobase_trx_allocate"); - DBUG_ASSERT(thd != NULL); - DBUG_ASSERT(EQ_CURRENT_THD(thd)); + dict_table_autoinc_unlock(m_prebuilt->table); + } - trx = trx_allocate_for_mysql(); + return(error); +} + +/*******************************************************************//** +Reset the auto-increment counter to the given value, i.e. the next row +inserted will get the given value. This is called e.g. after TRUNCATE +is emulated by doing a 'DELETE FROM t'. HA_ERR_WRONG_COMMAND is +returned by storage engines that don't support this operation. +@return 0 or error code */ +UNIV_INTERN +int +ha_innobase::reset_auto_increment( +/*==============================*/ + ulonglong value) /*!< in: new value for table autoinc */ +{ + DBUG_ENTER("ha_innobase::reset_auto_increment"); + + dberr_t error; + + update_thd(ha_thd()); + + error = row_lock_table_autoinc_for_mysql(m_prebuilt); + + if (error != DB_SUCCESS) { + DBUG_RETURN(convert_error_code_to_mysql( + error, m_prebuilt->table->flags, m_user_thd)); + } + + /* The next value can never be 0. */ + if (value == 0) { + value = 1; + } + + innobase_reset_autoinc(value); + + DBUG_RETURN(0); +} + +/*********************************************************************//** +Initializes some fields in an InnoDB transaction object. */ +static +void +innobase_trx_init( +/*==============*/ + THD* thd, /*!< in: user thread handle */ + trx_t* trx) /*!< in/out: InnoDB transaction handle */ +{ + DBUG_ENTER("innobase_trx_init"); + DBUG_ASSERT(thd == trx->mysql_thd); + + trx->check_foreigns = !thd_test_options( + thd, OPTION_NO_FOREIGN_KEY_CHECKS); + + trx->check_unique_secondary = !thd_test_options( + thd, OPTION_RELAXED_UNIQUE_CHECKS); + + DBUG_VOID_RETURN; +} + +/*********************************************************************//** +Allocates an InnoDB transaction for a MySQL handler object for DML. +@return InnoDB transaction handle */ +trx_t* +innobase_trx_allocate( +/*==================*/ + THD* thd) /*!< in: user thread handle */ +{ + trx_t* trx; + + DBUG_ENTER("innobase_trx_allocate"); + DBUG_ASSERT(thd != NULL); + DBUG_ASSERT(EQ_CURRENT_THD(thd)); + + trx = trx_allocate_for_mysql(); trx->mysql_thd = thd; @@ -2551,7 +3022,7 @@ innobase_trx_allocate( Gets the InnoDB transaction handle for a MySQL handler object, creates an InnoDB transaction struct if the corresponding MySQL thread struct still lacks one. -@return InnoDB transaction handle */ +@return InnoDB transaction handle */ static inline trx_t* check_trx_exists( @@ -2562,17 +3033,62 @@ check_trx_exists( if (trx == NULL) { trx = innobase_trx_allocate(thd); - thd_set_ha_data(thd, innodb_hton_ptr, trx); - } else if (UNIV_UNLIKELY(trx->magic_n != TRX_MAGIC_N)) { - mem_analyze_corruption(trx); - ut_error; - } + innodb_session_t* innodb_session = thd_to_innodb_session(thd); + innodb_session->m_trx = trx; - innobase_trx_init(thd, trx); + /* User trx can be forced to rollback, + so we unset the disable flag. */ + ut_ad(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE); + trx->in_innodb &= TRX_FORCE_ROLLBACK_MASK; + } else { + ut_a(trx->magic_n == TRX_MAGIC_N); + + innobase_trx_init(thd, trx); + } return(trx); } +/** InnoDB transaction object that is currently associated with THD is +replaced with that of the 2nd argument. The previous value is +returned through the 3rd argument's buffer, unless it's NULL. When +the buffer is not provided (value NULL) that should mean the caller +restores previously saved association so the current trx has to be +additionally freed from all association with MYSQL. + +@param[in,out] thd MySQL thread handle +@param[in] new_trx_arg replacement trx_t +@param[in,out] ptr_trx_arg pointer to a buffer to store old trx_t */ +static +void +innodb_replace_trx_in_thd( + THD* thd, + void* new_trx_arg, + void** ptr_trx_arg) +{ + trx_t*& trx = thd_to_trx(thd); + + ut_ad(new_trx_arg == NULL + || (((trx_t*) new_trx_arg)->mysql_thd == thd + && !((trx_t*) new_trx_arg)->is_recovered)); + + if (ptr_trx_arg) { + *ptr_trx_arg = trx; + + ut_ad(trx == NULL + || (trx->mysql_thd == thd && !trx->is_recovered)); + + } else if (trx->state == TRX_STATE_NOT_STARTED) { + ut_ad(thd == trx->mysql_thd); + trx_free_for_mysql(trx); + } else { + ut_ad(thd == trx->mysql_thd); + ut_ad(trx_state_eq(trx, TRX_STATE_PREPARED)); + trx_disconnect_prepared(trx); + } + trx = static_cast(new_trx_arg); +} + /*********************************************************************//** Note that a transaction has been registered with MySQL. @return true if transaction is registered with MySQL 2PC coordinator */ @@ -2618,7 +3134,7 @@ trx_deregister_from_2pc( trx_t* trx) /* in: transaction */ { trx->is_registered = 0; - trx->active_commit_ordered = 0; + trx->active_commit_ordered = 0; } /*********************************************************************//** @@ -2632,24 +3148,11 @@ trx_is_active_commit_ordered( return(trx->active_commit_ordered == 1); } -/*********************************************************************//** -Check if transaction is started. -@reutrn true if transaction is in state started */ -static -bool -trx_is_started( -/*===========*/ - trx_t* trx) /* in: transaction */ -{ - return(trx->state != TRX_STATE_NOT_STARTED); -} - /*********************************************************************//** Copy table flags from MySQL's HA_CREATE_INFO into an InnoDB table object. Those flags are stored in .frm file and end up in the MySQL table object, but are frequently used inside InnoDB so we keep their copies into the InnoDB table object. */ -UNIV_INTERN void innobase_copy_frm_flags_from_create_info( /*=====================================*/ @@ -2685,7 +3188,6 @@ Copy table flags from MySQL's TABLE_SHARE into an InnoDB table object. Those flags are stored in .frm file and end up in the MySQL table object, but are frequently used inside InnoDB so we keep their copies into the InnoDB table object. */ -UNIV_INTERN void innobase_copy_frm_flags_from_table_share( /*=====================================*/ @@ -2718,31 +3220,50 @@ innobase_copy_frm_flags_from_table_share( /*********************************************************************//** Construct ha_innobase handler. */ -UNIV_INTERN + ha_innobase::ha_innobase( /*=====================*/ handlerton* hton, TABLE_SHARE* table_arg) :handler(hton, table_arg), - int_table_flags(HA_REC_NOT_IN_SEQ | - HA_NULL_IN_KEY | HA_CAN_VIRTUAL_COLUMNS | - HA_CAN_INDEX_BLOBS | HA_CONCURRENT_OPTIMIZE | - HA_CAN_SQL_HANDLER | - HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | - HA_PRIMARY_KEY_IN_READ_INDEX | - HA_BINLOG_ROW_CAPABLE | - HA_CAN_GEOMETRY | HA_PARTIAL_COLUMN_READ | - HA_TABLE_SCAN_ON_INDEX | HA_CAN_FULLTEXT | - (srv_force_primary_key ? HA_REQUIRE_PRIMARY_KEY : 0 ) | - HA_CAN_FULLTEXT_EXT | HA_CAN_EXPORT), - start_of_scan(0), - num_write_row(0), - ha_partition_stats(NULL) + m_prebuilt(), + m_prebuilt_ptr(&m_prebuilt), + m_user_thd(), + m_int_table_flags(HA_REC_NOT_IN_SEQ + | HA_NULL_IN_KEY + | HA_CAN_VIRTUAL_COLUMNS + | HA_CAN_INDEX_BLOBS + | HA_CAN_SQL_HANDLER + | HA_PRIMARY_KEY_REQUIRED_FOR_POSITION + | HA_PRIMARY_KEY_IN_READ_INDEX + | HA_BINLOG_ROW_CAPABLE + | HA_CAN_GEOMETRY + | HA_PARTIAL_COLUMN_READ + | HA_TABLE_SCAN_ON_INDEX + | HA_CAN_FULLTEXT + | HA_CAN_FULLTEXT_EXT + /* JAN: TODO: MySQL 5.7 + | HA_CAN_FULLTEXT_HINTS + */ + | HA_CAN_EXPORT + | HA_CAN_RTREEKEYS + /* JAN: TODO: MySQL 5.7 + | HA_HAS_RECORDS + | HA_NO_READ_LOCAL_LOCK + | HA_GENERATED_COLUMNS + | HA_ATTACHABLE_TRX_COMPATIBLE + | HA_CAN_INDEX_VIRTUAL_GENERATED_COLUMN + */ + | (srv_force_primary_key ? HA_REQUIRE_PRIMARY_KEY : 0) + ), + m_start_of_scan(), + m_num_write_row(), + m_mysql_has_locked() {} /*********************************************************************//** Destruct ha_innobase handler. */ -UNIV_INTERN + ha_innobase::~ha_innobase() /*======================*/ { @@ -2751,38 +3272,46 @@ ha_innobase::~ha_innobase() /*********************************************************************//** Updates the user_thd field in a handle and also allocates a new InnoDB transaction handle if needed, and updates the transaction fields in the -prebuilt struct. */ -UNIV_INTERN inline +m_prebuilt struct. */ void ha_innobase::update_thd( /*====================*/ THD* thd) /*!< in: thd to use the handle */ { - trx_t* trx; - DBUG_ENTER("ha_innobase::update_thd"); DBUG_PRINT("ha_innobase::update_thd", ("user_thd: %p -> %p", - user_thd, thd)); + m_user_thd, thd)); /* The table should have been opened in ha_innobase::open(). */ - DBUG_ASSERT(prebuilt->table->n_ref_count > 0); + DBUG_ASSERT(m_prebuilt->table->n_ref_count > 0); - trx = check_trx_exists(thd); + trx_t* trx = check_trx_exists(thd); + + TrxInInnoDB trx_in_innodb(trx); - if (prebuilt->trx != trx) { + ut_ad(dict_table_is_intrinsic(m_prebuilt->table) + || trx_in_innodb.is_aborted() + || (trx->dict_operation_lock_mode == 0 + && trx->dict_operation == TRX_DICT_OP_NONE)); - row_update_prebuilt_trx(prebuilt, trx); + if (m_prebuilt->trx != trx) { + + row_update_prebuilt_trx(m_prebuilt, trx); } - user_thd = thd; + m_user_thd = thd; + + DBUG_ASSERT(m_prebuilt->trx->magic_n == TRX_MAGIC_N); + DBUG_ASSERT(m_prebuilt->trx == thd_to_trx(m_user_thd)); + DBUG_VOID_RETURN; } /*********************************************************************//** Updates the user_thd field in a handle and also allocates a new InnoDB transaction handle if needed, and updates the transaction fields in the -prebuilt struct. */ -UNIV_INTERN +m_prebuilt struct. */ + void ha_innobase::update_thd() /*=====================*/ @@ -2808,11 +3337,15 @@ innobase_register_trx( THD* thd, /* in: MySQL thd (connection) object */ trx_t* trx) /* in: transaction to register */ { + /* JAN: TODO: MySQL 5.7 + trans_register_ha(thd, FALSE, hton, &trx_id); + */ trans_register_ha(thd, FALSE, hton); if (!trx_is_registered_for_2pc(trx) && thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + //trans_register_ha(thd, TRUE, hton, &trx_id); trans_register_ha(thd, TRUE, hton); } @@ -2880,7 +3413,7 @@ Why a deadlock of threads is not possible: the query cache calls this function at the start of a SELECT processing. Then the calling thread cannot be holding any InnoDB semaphores. The calling thread is holding the query cache mutex, and this function will reserve the InnoDB trx_sys->mutex. -Thus, the 'rank' in sync0sync.h of the MySQL query cache mutex is above +Thus, the 'rank' in sync0mutex.h of the MySQL query cache mutex is above the InnoDB trx_sys->mutex. @return TRUE if permitted, FALSE if not; note that the value FALSE does not mean we should invalidate the query cache: invalidation is @@ -2894,28 +3427,26 @@ innobase_query_caching_of_table_permitted( retrieve it */ char* full_name, /*!< in: normalized path to the table */ uint full_name_len, /*!< in: length of the normalized path - to the table */ + to the table */ ulonglong *unused) /*!< unused for this engine */ { - ibool is_autocommit; - trx_t* trx; + bool is_autocommit; char norm_name[1000]; + trx_t* trx = check_trx_exists(thd); ut_a(full_name_len < 999); - trx = check_trx_exists(thd); - if (trx->isolation_level == TRX_ISO_SERIALIZABLE) { /* In the SERIALIZABLE mode we add LOCK IN SHARE MODE to every plain SELECT if AUTOCOMMIT is not on. */ - return((my_bool)FALSE); + return(static_cast(false)); } - if (UNIV_UNLIKELY(trx->has_search_latch)) { - sql_print_error("The calling thread is holding the adaptive " - "search, latch though calling " - "innobase_query_caching_of_table_permitted."); + if (trx->has_search_latch) { + sql_print_error("The calling thread is holding the adaptive" + " search, latch though calling" + " innobase_query_caching_of_table_permitted."); trx_print(stderr, trx, 1024); } @@ -2925,9 +3456,9 @@ innobase_query_caching_of_table_permitted( if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { - is_autocommit = TRUE; + is_autocommit = true; } else { - is_autocommit = FALSE; + is_autocommit = false; } @@ -2960,217 +3491,155 @@ innobase_query_caching_of_table_permitted( if (row_search_check_if_query_cache_permitted(trx, norm_name)) { - /* printf("Query cache for %s permitted\n", norm_name); */ - - return((my_bool)TRUE); + return(static_cast(true)); } - /* printf("Query cache for %s NOT permitted\n", norm_name); */ - - return((my_bool)FALSE); + return(static_cast(false)); } /*****************************************************************//** Invalidates the MySQL query cache for the table. */ -UNIV_INTERN void innobase_invalidate_query_cache( /*============================*/ trx_t* trx, /*!< in: transaction which modifies the table */ const char* full_name, /*!< in: concatenation of - database name, null char NUL, + database name, path separator, table name, null char NUL; NOTE that in Windows this is always in LOWER CASE! */ ulint full_name_len) /*!< in: full name length where also the null chars count */ { - /* Note that the sync0sync.h rank of the query cache mutex is just + /* Note that the sync0mutex.h rank of the query cache mutex is just above the InnoDB trx_sys_t->lock. The caller of this function must not have latches of a lower rank. */ -#ifdef HAVE_QUERY_CACHE - char qcache_key_name[2 * (NAME_LEN + 1)]; - size_t tabname_len; - size_t dbname_len; - - /* Construct the key("db-name\0table$name\0") for the query cache using - the path name("db@002dname\0table@0024name\0") of the table in its - canonical form. */ - dbname_len = filename_to_tablename(full_name, qcache_key_name, - sizeof(qcache_key_name)); - tabname_len = filename_to_tablename(full_name + strlen(full_name) + 1, - qcache_key_name + dbname_len + 1, - sizeof(qcache_key_name) - - dbname_len - 1); - /* Argument TRUE below means we are using transactions */ mysql_query_cache_invalidate4(trx->mysql_thd, - qcache_key_name, - (dbname_len + tabname_len + 2), + full_name, + (uint32) full_name_len, TRUE); -#endif } -/*****************************************************************//** -Convert an SQL identifier to the MySQL system_charset_info (UTF-8) -and quote it if needed. -@return pointer to the end of buf */ -static -char* -innobase_convert_identifier( -/*========================*/ - char* buf, /*!< out: buffer for converted identifier */ - ulint buflen, /*!< in: length of buf, in bytes */ - const char* id, /*!< in: identifier to convert */ - ulint idlen, /*!< in: length of id, in bytes */ - THD* thd, /*!< in: MySQL connection thread, or NULL */ - ibool file_id)/*!< in: TRUE=id is a table or database name; - FALSE=id is an UTF-8 string */ +/** Quote a standard SQL identifier like tablespace, index or column name. +@param[in] file output stream +@param[in] trx InnoDB transaction, or NULL +@param[in] id identifier to quote */ +void +innobase_quote_identifier( + FILE* file, + trx_t* trx, + const char* id) { - const char* s = id; - int q; - - if (file_id) { - - char nz[MAX_TABLE_NAME_LEN + 1]; - char nz2[MAX_TABLE_NAME_LEN + 1]; - - /* Decode the table name. The MySQL function expects - a NUL-terminated string. The input and output strings - buffers must not be shared. */ - ut_a(idlen <= MAX_TABLE_NAME_LEN); - memcpy(nz, id, idlen); - nz[idlen] = 0; - - s = nz2; - idlen = explain_filename(thd, nz, nz2, sizeof nz2, - EXPLAIN_PARTITIONS_AS_COMMENT); - goto no_quote; - } + const int q = trx != NULL && trx->mysql_thd != NULL + ? get_quote_char_for_identifier(trx->mysql_thd, id, strlen(id)) + : '`'; - /* See if the identifier needs to be quoted. */ - if (UNIV_UNLIKELY(!thd)) { - q = '"'; + if (q == EOF) { + fputs(id, file); } else { - q = get_quote_char_for_identifier(thd, s, (int) idlen); - } + putc(q, file); - if (q == EOF) { -no_quote: - if (UNIV_UNLIKELY(idlen > buflen)) { - idlen = buflen; + while (int c = *id++) { + if (c == q) { + putc(c, file); + } + putc(c, file); } - memcpy(buf, s, idlen); - return(buf + idlen); - } - /* Quote the identifier. */ - if (buflen < 2) { - return(buf); + putc(q, file); } +} - *buf++ = q; - buflen--; +/** Convert a table name to the MySQL system_charset_info (UTF-8) +and quote it. +@param[out] buf buffer for converted identifier +@param[in] buflen length of buf, in bytes +@param[in] id identifier to convert +@param[in] idlen length of id, in bytes +@param[in] thd MySQL connection thread, or NULL +@return pointer to the end of buf */ +char* +innobase_convert_identifier( + char* buf, + ulint buflen, + const char* id, + ulint idlen, + THD* thd) +{ + const char* s = id; - for (; idlen; idlen--) { - int c = *s++; - if (UNIV_UNLIKELY(c == q)) { - if (UNIV_UNLIKELY(buflen < 3)) { - break; - } + char nz[MAX_TABLE_NAME_LEN + 1]; + char nz2[MAX_TABLE_NAME_LEN + 1]; - *buf++ = c; - *buf++ = c; - buflen -= 2; - } else { - if (UNIV_UNLIKELY(buflen < 2)) { - break; - } + /* Decode the table name. The MySQL function expects + a NUL-terminated string. The input and output strings + buffers must not be shared. */ + ut_a(idlen <= MAX_TABLE_NAME_LEN); + memcpy(nz, id, idlen); + nz[idlen] = 0; - *buf++ = c; - buflen--; - } + s = nz2; + idlen = explain_filename(thd, nz, nz2, sizeof nz2, + EXPLAIN_PARTITIONS_AS_COMMENT); + if (idlen > buflen) { + idlen = buflen; } - - *buf++ = q; - return(buf); + memcpy(buf, s, idlen); + return(buf + idlen); } /*****************************************************************//** -Convert a table or index name to the MySQL system_charset_info (UTF-8) -and quote it if needed. -@return pointer to the end of buf */ -UNIV_INTERN +Convert a table name to the MySQL system_charset_info (UTF-8). +@return pointer to the end of buf */ char* innobase_convert_name( /*==================*/ char* buf, /*!< out: buffer for converted identifier */ ulint buflen, /*!< in: length of buf, in bytes */ - const char* id, /*!< in: identifier to convert */ + const char* id, /*!< in: table name to convert */ ulint idlen, /*!< in: length of id, in bytes */ - THD* thd, /*!< in: MySQL connection thread, or NULL */ - ibool table_id)/*!< in: TRUE=id is a table or database name; - FALSE=id is an index name */ + THD* thd) /*!< in: MySQL connection thread, or NULL */ { char* s = buf; const char* bufend = buf + buflen; - if (table_id) { - const char* slash = (const char*) memchr(id, '/', idlen); - if (!slash) { - - goto no_db_name; - } + const char* slash = (const char*) memchr(id, '/', idlen); - /* Print the database name and table name separately. */ - s = innobase_convert_identifier(s, bufend - s, id, slash - id, - thd, TRUE); - if (UNIV_LIKELY(s < bufend)) { - *s++ = '.'; - s = innobase_convert_identifier(s, bufend - s, - slash + 1, idlen - - (slash - id) - 1, - thd, TRUE); - } - } else if (UNIV_UNLIKELY(*id == TEMP_INDEX_PREFIX)) { - /* Temporary index name (smart ALTER TABLE) */ - const char temp_index_suffix[]= "--temporary--"; + if (slash == NULL) { + return(innobase_convert_identifier( + buf, buflen, id, idlen, thd)); + } - s = innobase_convert_identifier(buf, buflen, id + 1, idlen - 1, - thd, FALSE); - if (s - buf + (sizeof temp_index_suffix - 1) < buflen) { - memcpy(s, temp_index_suffix, - sizeof temp_index_suffix - 1); - s += sizeof temp_index_suffix - 1; - } - } else { -no_db_name: - s = innobase_convert_identifier(buf, buflen, id, idlen, - thd, table_id); + /* Print the database name and table name separately. */ + s = innobase_convert_identifier(s, bufend - s, id, slash - id, thd); + if (s < bufend) { + *s++ = '.'; + s = innobase_convert_identifier(s, bufend - s, + slash + 1, idlen + - (slash - id) - 1, + thd); } return(s); } /*****************************************************************//** -A wrapper function of innobase_convert_name(), convert a table or -index name to the MySQL system_charset_info (UTF-8) and quote it if needed. -@return pointer to the end of buf */ -UNIV_INTERN +A wrapper function of innobase_convert_name(), convert a table name +to the MySQL system_charset_info (UTF-8) and quote it if needed. +@return pointer to the end of buf */ void innobase_format_name( /*==================*/ char* buf, /*!< out: buffer for converted identifier */ ulint buflen, /*!< in: length of buf, in bytes */ - const char* name, /*!< in: index or table name to format */ - ibool is_index_name) /*!< in: index name */ + const char* name) /*!< in: table name to format */ { const char* bufend; - bufend = innobase_convert_name(buf, buflen, name, strlen(name), - NULL, !is_index_name); + bufend = innobase_convert_name(buf, buflen, name, strlen(name), NULL); ut_ad((ulint) (bufend - buf) < buflen); @@ -3179,8 +3648,7 @@ innobase_format_name( /**********************************************************************//** Determines if the currently running transaction has been interrupted. -@return TRUE if interrupted */ -UNIV_INTERN +@return TRUE if interrupted */ ibool trx_is_interrupted( /*===============*/ @@ -3191,8 +3659,7 @@ trx_is_interrupted( /**********************************************************************//** Determines if the currently running transaction is in strict mode. -@return TRUE if strict */ -UNIV_INTERN +@return TRUE if strict */ ibool trx_is_strict( /*==========*/ @@ -3202,104 +3669,217 @@ trx_is_strict( } /**************************************************************//** -Resets some fields of a prebuilt struct. The template is used in fast +Resets some fields of a m_prebuilt struct. The template is used in fast retrieval of just those column values MySQL needs in its processing. */ -inline void ha_innobase::reset_template(void) /*=============================*/ { - ut_ad(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED); - ut_ad(prebuilt->magic_n2 == prebuilt->magic_n); - - /* Force table to be freed in close_thread_table(). */ - DBUG_EXECUTE_IF("free_table_in_fts_query", - if (prebuilt->in_fts_query) { - table->m_needs_reopen = true; - } - ); + ut_ad(m_prebuilt->magic_n == ROW_PREBUILT_ALLOCATED); + ut_ad(m_prebuilt->magic_n2 == m_prebuilt->magic_n); - prebuilt->keep_other_fields_on_keyread = 0; - prebuilt->read_just_key = 0; - prebuilt->in_fts_query = 0; + m_prebuilt->keep_other_fields_on_keyread = 0; + m_prebuilt->read_just_key = 0; + m_prebuilt->in_fts_query = 0; /* Reset index condition pushdown state. */ - if (prebuilt->idx_cond) { - prebuilt->idx_cond = NULL; - prebuilt->idx_cond_n_cols = 0; - /* Invalidate prebuilt->mysql_template + if (m_prebuilt->idx_cond) { + m_prebuilt->idx_cond = NULL; + m_prebuilt->idx_cond_n_cols = 0; + /* Invalidate m_prebuilt->mysql_template in ha_innobase::write_row(). */ - prebuilt->template_type = ROW_MYSQL_NO_TEMPLATE; + m_prebuilt->template_type = ROW_MYSQL_NO_TEMPLATE; } } /*****************************************************************//** Call this when you have opened a new table handle in HANDLER, before you -call index_read_idx() etc. Actually, we can let the cursor stay open even +call index_read_map() etc. Actually, we can let the cursor stay open even over a transaction commit! Then you should call this before every operation, fetch next etc. This function inits the necessary things even after a transaction commit. */ -UNIV_INTERN + void ha_innobase::init_table_handle_for_HANDLER(void) /*============================================*/ { /* If current thd does not yet have a trx struct, create one. - If the current handle does not yet have a prebuilt struct, create - one. Update the trx pointers in the prebuilt struct. Normally + If the current handle does not yet have a m_prebuilt struct, create + one. Update the trx pointers in the m_prebuilt struct. Normally this operation is done in external_lock. */ update_thd(ha_thd()); - /* Initialize the prebuilt struct much like it would be inited in + /* Initialize the m_prebuilt struct much like it would be inited in external_lock */ - trx_search_latch_release_if_reserved(prebuilt->trx); + trx_search_latch_release_if_reserved(m_prebuilt->trx); - innobase_srv_conc_force_exit_innodb(prebuilt->trx); + innobase_srv_conc_force_exit_innodb(m_prebuilt->trx); /* If the transaction is not started yet, start it */ - trx_start_if_not_started_xa(prebuilt->trx); + trx_start_if_not_started_xa(m_prebuilt->trx, false); + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); /* Assign a read view if the transaction does not have it yet */ - trx_assign_read_view(prebuilt->trx); + trx_assign_read_view(m_prebuilt->trx); - innobase_register_trx(ht, user_thd, prebuilt->trx); + innobase_register_trx(ht, m_user_thd, m_prebuilt->trx); /* We did the necessary inits in this function, no need to repeat them in row_search_for_mysql */ - prebuilt->sql_stat_start = FALSE; + m_prebuilt->sql_stat_start = FALSE; /* We let HANDLER always to do the reads as consistent reads, even if the trx isolation level would have been specified as SERIALIZABLE */ - prebuilt->select_lock_type = LOCK_NONE; - prebuilt->stored_select_lock_type = LOCK_NONE; + m_prebuilt->select_lock_type = LOCK_NONE; + m_prebuilt->stored_select_lock_type = LOCK_NONE; /* Always fetch all columns in the index record */ - prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS; + m_prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS; /* We want always to fetch all columns in the whole row? Or do we???? */ - prebuilt->used_in_HANDLER = TRUE; + m_prebuilt->used_in_HANDLER = TRUE; + reset_template(); } +/*********************************************************************//** +Free tablespace resources allocated. */ +static +void +innobase_space_shutdown() +/*=====================*/ +{ + DBUG_ENTER("innobase_space_shutdown"); + + srv_sys_space.shutdown(); + if (srv_tmp_space.get_sanity_check_status()) { + fil_space_close(srv_tmp_space.name()); + srv_tmp_space.delete_files(); + } + srv_tmp_space.shutdown(); + + DBUG_VOID_RETURN; +} + +/*********************************************************************//** +Free any resources that were allocated and return failure. +@return always return 1 */ +static +int +innobase_init_abort() +/*=================*/ +{ + DBUG_ENTER("innobase_init_abort"); + innobase_space_shutdown(); + DBUG_RETURN(1); +} + + +/*****************************************************************//** +This function checks if the given db.tablename is a system table +supported by Innodb and is used as an initializer for the data member +is_supported_system_table of InnoDB storage engine handlerton. +Currently we support only plugin, servers, help- and time_zone- related +system tables in InnoDB. Please don't add any SE-specific system tables here. + +@param db database name to check. +@param table_name table name to check. +@param is_sql_layer_system_table if the supplied db.table_name is a SQL + layer system table. +*/ + +static bool innobase_is_supported_system_table(const char *db, + const char *table_name, + bool is_sql_layer_system_table) +{ + static const char* supported_system_tables[]= { "help_topic", + "help_category", + "help_relation", + "help_keyword", + "plugin", + "servers", + "time_zone", + "time_zone_leap_second", + "time_zone_name", + "time_zone_transition", + "time_zone_transition_type", + (const char *)NULL }; + + if (!is_sql_layer_system_table) + return false; + + for (unsigned i= 0; supported_system_tables[i] != NULL; ++i) + { + if (!strcmp(table_name, supported_system_tables[i])) + return true; + } + + return false; +} + +/** Return partitioning flags. */ +static uint innobase_partition_flags() +{ + /* JAN: TODO: MYSQL 5.7 + return(HA_CAN_EXCHANGE_PARTITION | HA_CANNOT_PARTITION_FK); + */ + return (0); +} + +/** Deprecation message about InnoDB file format related parameters */ +#define DEPRECATED_FORMAT_PARAMETER(x) \ + "Using " x " is deprecated and the parameter" \ + " may be removed in future releases." \ + " See " REFMAN "innodb-file-format.html" + +/** Deprecation message about innodb_file_format */ +static const char* deprecated_file_format + = DEPRECATED_FORMAT_PARAMETER("innodb_file_format"); + +/** Deprecation message about innodb_large_prefix */ +static const char* deprecated_large_prefix + = DEPRECATED_FORMAT_PARAMETER("innodb_large_prefix"); + +/** Deprecation message about innodb_file_format_check */ +static const char* deprecated_file_format_check + = DEPRECATED_FORMAT_PARAMETER("innodb_file_format_check"); + +/** Deprecation message about innodb_file_format_max */ +static const char* deprecated_file_format_max + = DEPRECATED_FORMAT_PARAMETER("innodb_file_format_max"); + +/** Update log_checksum_algorithm_ptr with a pointer to the function +corresponding to whether checksums are enabled. +@param[in] check whether redo log block checksums are enabled */ +static +void +innodb_log_checksums_func_update(bool check) +{ + log_checksum_algorithm_ptr = check + ? log_block_calc_checksum_crc32 + : log_block_calc_checksum_none; +} + /****************************************************************//** Gives the file extension of an InnoDB single-table tablespace. */ static const char* ha_innobase_exts[] = { - ".ibd", - ".isl", - NullS + dot_ext[IBD], + dot_ext[ISL], + NullS }; /*********************************************************************//** Opens an InnoDB database. -@return 0 on success, error code on failure */ +@return 0 on success, 1 on failure */ static int innobase_init( @@ -3308,23 +3888,30 @@ innobase_init( { static char current_dir[3]; /*!< Set if using current lib */ int err; - bool ret; char *default_path; uint format_id; ulong num_pll_degree; + ulint srv_buf_pool_size_org = 0; + ulint fsp_flags =0; DBUG_ENTER("innobase_init"); - handlerton *innobase_hton= (handlerton*) p; + handlerton* innobase_hton= (handlerton*) p; innodb_hton_ptr = innobase_hton; innobase_hton->state = SHOW_OPTION_YES; - innobase_hton->db_type= DB_TYPE_INNODB; + innobase_hton->db_type = DB_TYPE_INNODB; innobase_hton->savepoint_offset = sizeof(trx_named_savept_t); innobase_hton->close_connection = innobase_close_connection; + // JAN: TODO: MySQL 5.7: + // innobase_hton->kill_connection = innobase_kill_connection; + // + innobase_hton->kill_query = innobase_kill_query; innobase_hton->savepoint_set = innobase_savepoint; innobase_hton->savepoint_rollback = innobase_rollback_to_savepoint; + innobase_hton->savepoint_rollback_can_release_mdl = innobase_rollback_to_savepoint_can_release_mdl; + innobase_hton->savepoint_release = innobase_release_savepoint; innobase_hton->prepare_ordered= NULL; innobase_hton->commit_ordered= innobase_commit_ordered; @@ -3339,34 +3926,49 @@ innobase_init( innobase_hton->set_cursor_read_view = innobase_set_cursor_view; innobase_hton->close_cursor_read_view = innobase_close_cursor_view; innobase_hton->create = innobase_create_handler; + innobase_hton->alter_tablespace = innobase_alter_tablespace; innobase_hton->drop_database = innobase_drop_database; innobase_hton->panic = innobase_end; + innobase_hton->partition_flags= innobase_partition_flags; innobase_hton->start_consistent_snapshot = innobase_start_trx_and_assign_read_view; innobase_hton->flush_logs = innobase_flush_logs; innobase_hton->show_status = innobase_show_status; + innobase_hton->fill_is_table = innobase_fill_i_s_table; innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS; innobase_hton->release_temporary_latches = innobase_release_temporary_latches; + /* JAN: TODO: MySQL 5.7 + innobase_hton->replace_native_transaction_in_thd = + innodb_replace_trx_in_thd; + */ #ifdef WITH_WSREP innobase_hton->abort_transaction=wsrep_abort_transaction; innobase_hton->set_checkpoint=innobase_wsrep_set_checkpoint; innobase_hton->get_checkpoint=innobase_wsrep_get_checkpoint; innobase_hton->fake_trx_id=wsrep_fake_trx_id; #endif /* WITH_WSREP */ - innobase_hton->kill_query = innobase_kill_query; if (srv_file_per_table) innobase_hton->tablefile_extensions = ha_innobase_exts; + /* JAN: TODO: MySQL 5.7 + innobase_hton->data = &innodb_api_cb; + */ + innobase_hton->table_options = innodb_table_option_list; innodb_remember_check_sysvar_funcs(); + /* JAN: TODO: MySQL 5.7 + innobase_hton->is_supported_system_table= + innobase_is_supported_system_table; + */ + ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); #ifndef DBUG_OFF @@ -3385,8 +3987,7 @@ innobase_init( test_filename)) { sql_print_error("tablename encoding has been changed"); - - goto error; + DBUG_RETURN(innobase_init_abort()); } #endif /* DBUG_OFF */ @@ -3394,13 +3995,18 @@ innobase_init( if (sizeof(ulint) == 4) { if (innobase_buffer_pool_size > UINT_MAX32) { sql_print_error( - "innobase_buffer_pool_size can't be over 4GB" + "innodb_buffer_pool_size can't be over 4GB" " on 32-bit systems"); - goto error; + DBUG_RETURN(innobase_init_abort()); } } + os_file_set_umask(my_umask); + + /* Setup the memory alloc/free tracing mechanisms before calling + any functions that could possibly allocate memory. */ + ut_new_boot(); if (UNIV_PAGE_SIZE != UNIV_PAGE_SIZE_DEF) { fprintf(stderr, "InnoDB: Warning: innodb_page_size has been " @@ -3469,7 +4075,6 @@ innobase_init( goto error; } - os_innodb_umask = (ulint) my_umask; /* First calculate the default path for innodb_data_home_dir etc., in case the user has not given any value. @@ -3479,7 +4084,6 @@ innobase_init( if (mysqld_embedded) { default_path = mysql_real_data_home; - fil_path_to_mysql_datadir = mysql_real_data_home; } else { /* It's better to use current lib, to keep paths short */ current_dir[0] = FN_CURLIB; @@ -3490,100 +4094,130 @@ innobase_init( ut_a(default_path); + fil_path_to_mysql_datadir = default_path; + folder_mysql_datadir = fil_path_to_mysql_datadir; + /* Set InnoDB initialization parameters according to the values read from MySQL .cnf file */ - /*--------------- Data files -------------------------*/ - /* The default dir for data files is the datadir of MySQL */ - srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir : - default_path); + srv_data_home = innobase_data_home_dir + ? innobase_data_home_dir : default_path; + + /*--------------- Shared tablespaces -------------------------*/ - /* Set default InnoDB data file size to 12 MB and let it be - auto-extending. Thus users can use InnoDB in >= 4.0 without having - to specify any startup options. */ + /* Check that the value of system variable innodb_page_size was + set correctly. Its value was put into srv_page_size. If valid, + return the associated srv_page_size_shift. */ + srv_page_size_shift = innodb_page_size_validate(srv_page_size); + if (!srv_page_size_shift) { + sql_print_error("InnoDB: Invalid page size=%lu.\n", + srv_page_size); + DBUG_RETURN(innobase_init_abort()); + } + /* Set default InnoDB temp data file size to 12 MB and let it be + auto-extending. */ if (!innobase_data_file_path) { innobase_data_file_path = (char*) "ibdata1:12M:autoextend"; } - /* Since InnoDB edits the argument in the next call, we make another - copy of it: */ - - internal_innobase_data_file_path = my_strdup(innobase_data_file_path, - MYF(MY_FAE)); + /* This is the first time univ_page_size is used. + It was initialized to 16k pages before srv_page_size was set */ + univ_page_size.copy_from( + page_size_t(srv_page_size, srv_page_size, false)); - ret = (bool) srv_parse_data_file_paths_and_sizes( - internal_innobase_data_file_path); - if (ret == FALSE) { - sql_print_error( - "InnoDB: syntax error in innodb_data_file_path" - " or size specified is less than 1 megabyte"); -mem_free_and_error: - srv_free_paths_and_sizes(); - my_free(internal_innobase_data_file_path); - goto error; - } + srv_sys_space.set_space_id(TRX_SYS_SPACE); - /* -------------- All log files ---------------------------*/ + /* Create the filespace flags. */ + fsp_flags = fsp_flags_init( + univ_page_size, false, false, false, false, false, 0, ATOMIC_WRITES_DEFAULT); + srv_sys_space.set_flags(fsp_flags); - /* The default dir for log files is the datadir of MySQL */ + srv_sys_space.set_name(reserved_system_space_name); + srv_sys_space.set_path(srv_data_home); - if (!srv_log_group_home_dir) { - srv_log_group_home_dir = default_path; + /* Supports raw devices */ + if (!srv_sys_space.parse_params(innobase_data_file_path, true)) { + DBUG_RETURN(innobase_init_abort()); } -#ifdef UNIV_LOG_ARCHIVE - /* Since innodb_log_arch_dir has no relevance under MySQL, - starting from 4.0.6 we always set it the same as - innodb_log_group_home_dir: */ + /* Set default InnoDB temp data file size to 12 MB and let it be + auto-extending. */ - innobase_log_arch_dir = innobase_log_group_home_dir; + if (!innobase_temp_data_file_path) { + innobase_temp_data_file_path = (char*) "ibtmp1:12M:autoextend"; + } - srv_arch_dir = innobase_log_arch_dir; -#endif /* UNIG_LOG_ARCHIVE */ + /* We set the temporary tablspace id later, after recovery. + The temp tablespace doesn't support raw devices. + Set the name and path. */ + srv_tmp_space.set_name(reserved_temporary_space_name); + srv_tmp_space.set_path(srv_data_home); - srv_normalize_path_for_win(srv_log_group_home_dir); + /* Create the filespace flags with the temp flag set. */ + fsp_flags = fsp_flags_init( + univ_page_size, false, false, false, true, false, 0, ATOMIC_WRITES_DEFAULT); + srv_tmp_space.set_flags(fsp_flags); - if (strchr(srv_log_group_home_dir, ';')) { - sql_print_error("syntax error in innodb_log_group_home_dir"); - goto mem_free_and_error; + if (!srv_tmp_space.parse_params(innobase_temp_data_file_path, false)) { + DBUG_RETURN(innobase_init_abort()); } - if (innobase_mirrored_log_groups == 1) { - sql_print_warning( - "innodb_mirrored_log_groups is an unimplemented " - "feature and the variable will be completely " - "removed in a future version."); + /* Perform all sanity check before we take action of deleting files*/ + if (srv_sys_space.intersection(&srv_tmp_space)) { + sql_print_error("%s and %s file names seem to be the same.", + srv_tmp_space.name(), srv_sys_space.name()); + DBUG_RETURN(innobase_init_abort()); } - if (innobase_mirrored_log_groups > 1) { - sql_print_error( - "innodb_mirrored_log_groups is an unimplemented feature and " - "the variable will be completely removed in a future version. " - "Using values other than 1 is not supported."); - goto mem_free_and_error; + /* ------------ UNDO tablespaces files ---------------------*/ + if (!srv_undo_dir) { + srv_undo_dir = default_path; } - if (innobase_mirrored_log_groups == 0) { - /* To throw a deprecation warning message when the option is - passed, the default was changed to '0' (as a workaround). Since - the only value accepted for this option is '1', reset it to 1 */ - innobase_mirrored_log_groups = 1; + os_normalize_path_for_win(srv_undo_dir); + + if (strchr(srv_undo_dir, ';')) { + sql_print_error("syntax error in innodb_undo_directory"); + DBUG_RETURN(innobase_init_abort()); } - /* Validate the file format by animal name */ - if (innobase_file_format_name != NULL) { + /* -------------- All log files ---------------------------*/ - format_id = innobase_file_format_name_lookup( - innobase_file_format_name); + /* The default dir for log files is the datadir of MySQL */ + + if (!srv_log_group_home_dir) { + srv_log_group_home_dir = default_path; + } + + os_normalize_path(srv_log_group_home_dir); + + if (strchr(srv_log_group_home_dir, ';')) { + sql_print_error("syntax error in innodb_log_group_home_dir"); + DBUG_RETURN(innobase_init_abort()); + } + + if (!innobase_large_prefix) { + ib::warn() << deprecated_large_prefix; + } + + if (innobase_file_format_name != innodb_file_format_default) { + ib::warn() << deprecated_file_format; + } + + /* Validate the file format by animal name */ + if (innobase_file_format_name != NULL) { + + format_id = innobase_file_format_name_lookup( + innobase_file_format_name); if (format_id > UNIV_FORMAT_MAX) { sql_print_error("InnoDB: wrong innodb_file_format."); - goto mem_free_and_error; + DBUG_RETURN(innobase_init_abort()); } } else { /* Set it to the default file format id. Though this @@ -3603,6 +4237,7 @@ mem_free_and_error: /* Check innobase_file_format_check variable */ if (!innobase_file_format_check) { + ib::warn() << deprecated_file_format_check; /* Set the value to disable checking. */ srv_max_file_format_at_startup = UNIV_FORMAT_MAX + 1; @@ -3613,20 +4248,24 @@ mem_free_and_error: srv_max_file_format_at_startup = UNIV_FORMAT_MIN; } + if (innobase_file_format_max != innodb_file_format_max_default) { + ib::warn() << deprecated_file_format_max; + } + /* Did the user specify a format name that we support? As a side effect it will update the variable srv_max_file_format_at_startup */ if (innobase_file_format_validate_and_set( innobase_file_format_max) < 0) { - sql_print_error("InnoDB: invalid " - "innodb_file_format_max value: " - "should be any value up to %s or its " - "equivalent numeric id", + sql_print_error("InnoDB: invalid" + " innodb_file_format_max value:" + " should be any value up to %s or its" + " equivalent numeric id", trx_sys_file_format_id_to_name( UNIV_FORMAT_MAX)); - goto mem_free_and_error; + DBUG_RETURN(innobase_init_abort()); } if (innobase_change_buffering) { @@ -3643,10 +4282,10 @@ mem_free_and_error: } } - sql_print_error("InnoDB: invalid value " - "innodb_change_buffering=%s", + sql_print_error("InnoDB: invalid value" + " innodb_change_buffering=%s", innobase_change_buffering); - goto mem_free_and_error; + DBUG_RETURN(innobase_init_abort()); } innobase_change_buffering_inited_ok: @@ -3674,8 +4313,8 @@ innobase_change_buffering_inited_ok: } else { /* The user has not set the value. We should set it based on innodb_io_capacity. */ - srv_max_io_capacity = static_cast( - ut_max(2 * srv_io_capacity, 2000)); + srv_max_io_capacity = + ut_max(2 * srv_io_capacity, 2000UL); } } else if (srv_max_io_capacity < srv_io_capacity) { @@ -3693,7 +4332,7 @@ innobase_change_buffering_inited_ok: strlen(srv_buf_dump_filename), FALSE)) { sql_print_error("InnoDB: innodb_buffer_pool_filename" " cannot have colon (:) in the file name."); - goto mem_free_and_error; + DBUG_RETURN(innobase_init_abort()); } /* --------------------------------------------------*/ @@ -3702,89 +4341,50 @@ innobase_change_buffering_inited_ok: srv_log_file_size = (ib_uint64_t) innobase_log_file_size; -#ifdef UNIV_LOG_ARCHIVE - srv_log_archive_on = (ulint) innobase_log_archive; -#endif /* UNIV_LOG_ARCHIVE */ - - /* Check that the value of system variable innodb_page_size was - set correctly. Its value was put into srv_page_size. If valid, - return the associated srv_page_size_shift.*/ - srv_page_size_shift = innodb_page_size_validate(srv_page_size); - if (!srv_page_size_shift) { - sql_print_error("InnoDB: Invalid page size=%lu.\n", - srv_page_size); - goto mem_free_and_error; - } if (UNIV_PAGE_SIZE_DEF != srv_page_size) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: innodb-page-size has been changed" - " from the default value %d to %lu.\n", - UNIV_PAGE_SIZE_DEF, srv_page_size); + ib::warn() << "innodb-page-size has been changed from the" + " default value " << UNIV_PAGE_SIZE_DEF << " to " + << srv_page_size << "."; } - srv_log_buffer_size = (ulint) innobase_log_buffer_size; - - if (innobase_buffer_pool_instances == 0) { - innobase_buffer_pool_instances = 8; + if (srv_log_write_ahead_size > srv_page_size) { + srv_log_write_ahead_size = srv_page_size; + } else { + ulong srv_log_write_ahead_size_tmp = OS_FILE_LOG_BLOCK_SIZE; -#if defined(__WIN__) && !defined(_WIN64) - if (innobase_buffer_pool_size > 1331 * 1024 * 1024) { - innobase_buffer_pool_instances - = ut_min(MAX_BUFFER_POOLS, - (long) (innobase_buffer_pool_size - / (128 * 1024 * 1024))); + while (srv_log_write_ahead_size_tmp + < srv_log_write_ahead_size) { + srv_log_write_ahead_size_tmp + = srv_log_write_ahead_size_tmp * 2; + } + if (srv_log_write_ahead_size_tmp + != srv_log_write_ahead_size) { + srv_log_write_ahead_size + = srv_log_write_ahead_size_tmp / 2; } -#endif /* defined(__WIN__) && !defined(_WIN64) */ } - srv_buf_pool_size = (ulint) innobase_buffer_pool_size; - srv_buf_pool_instances = (ulint) innobase_buffer_pool_instances; - - srv_mem_pool_size = (ulint) innobase_additional_mem_pool_size; - if (innobase_additional_mem_pool_size - != 8*1024*1024L /* the default */ ) { - - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: Using " - "innodb_additional_mem_pool_size is DEPRECATED. " - "This option may be removed in future releases, " - "together with the option innodb_use_sys_malloc " - "and with the InnoDB's internal memory " - "allocator.\n"); - } + srv_log_buffer_size = (ulint) innobase_log_buffer_size; - if (!srv_use_sys_malloc ) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: Setting " - "innodb_use_sys_malloc to FALSE is DEPRECATED. " - "This option may be removed in future releases, " - "together with the InnoDB's internal memory " - "allocator.\n"); - } + srv_buf_pool_size = (ulint) innobase_buffer_pool_size; - srv_n_file_io_threads = (ulint) innobase_file_io_threads; srv_n_read_io_threads = (ulint) innobase_read_io_threads; srv_n_write_io_threads = (ulint) innobase_write_io_threads; srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite; if (!innobase_use_checksums) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: Setting " - "innodb_checksums to OFF is DEPRECATED. " - "This option may be removed in future releases. " - "You should set innodb_checksum_algorithm=NONE " - "instead.\n"); + ib::warn() << "Setting innodb_checksums to OFF is DEPRECATED." + " This option may be removed in future releases. You" + " should set innodb_checksum_algorithm=NONE instead."; srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_NONE; } -#ifdef HAVE_LARGE_PAGES - if ((os_use_large_pages = (ibool) my_use_large_pages)) { - os_large_page_size = (ulint) opt_large_page_size; + innodb_log_checksums_func_update(innodb_log_checksums); + +#ifdef HAVE_LINUX_LARGE_PAGES + if ((os_use_large_pages = my_use_large_pages)) { + os_large_page_size = opt_large_page_size; } #endif @@ -3792,13 +4392,10 @@ innobase_change_buffering_inited_ok: srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog; if (innobase_locks_unsafe_for_binlog) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: Using " - "innodb_locks_unsafe_for_binlog is DEPRECATED. " - "This option may be removed in future releases. " - "Please use READ COMMITTED transaction isolation " - "level instead, see " REFMAN "set-transaction.html.\n"); + ib::warn() << "Using innodb_locks_unsafe_for_binlog is" + " DEPRECATED. This option may be removed in future" + " releases. Please use READ COMMITTED transaction" + " isolation level instead; " << SET_TRANSACTION_MSG; } if (innobase_open_files < 10) { @@ -3835,18 +4432,6 @@ innobase_change_buffering_inited_ok: data_mysql_default_charset_coll = (ulint) default_charset_info->number; - ut_a(DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL == - my_charset_latin1.number); - ut_a(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number); - - /* Store the latin1_swedish_ci character ordering table to InnoDB. For - non-latin1_swedish_ci charsets we use the MySQL comparison functions, - and consequently we do not need to know the ordering internally in - InnoDB. */ - - ut_a(0 == strcmp(my_charset_latin1.name, "latin1_swedish_ci")); - srv_latin1_ordering = my_charset_latin1.sort_order; - innobase_commit_concurrency_init_default(); #ifdef HAVE_POSIX_FALLOCATE @@ -3887,7 +4472,7 @@ innobase_change_buffering_inited_ok: int count; count = array_elements(all_pthread_mutexes); - mysql_mutex_register("innodb", all_pthread_mutexes, count); + mysql_mutex_register("innodb", all_pthread_mutexes, count); # ifdef UNIV_PFS_MUTEX count = array_elements(all_innodb_mutexes); @@ -3913,6 +4498,20 @@ innobase_change_buffering_inited_ok: mysql_cond_register("innodb", all_innodb_conds, count); #endif /* HAVE_PSI_INTERFACE */ + /* Set buffer pool size to default for fast startup when mysqld is + run with --help --verbose options. */ + /* JAN: TODO: MySQL 5.7 has opt_verbose + if (opt_help && opt_verbose + */ + if (opt_help + && srv_buf_pool_size > srv_buf_pool_def_size) { + ib::warn() << "Setting innodb_buf_pool_size to " + << srv_buf_pool_def_size << " for fast startup, " + << "when running with --help --verbose options."; + srv_buf_pool_size_org = srv_buf_pool_size; + srv_buf_pool_size = srv_buf_pool_def_size; + } + /* Since we in this module access directly the fields of a trx struct, and due to different headers and flags it might happen that ib_mutex_t has a different size in this module and in InnoDB @@ -3921,8 +4520,19 @@ innobase_change_buffering_inited_ok: err = innobase_start_or_create_for_mysql(); + if (srv_buf_pool_size_org != 0) { + /* Set the original value back to show in help. */ + srv_buf_pool_size_org = + buf_pool_size_align(srv_buf_pool_size_org); + innobase_buffer_pool_size = + static_cast(srv_buf_pool_size_org); + } else { + innobase_buffer_pool_size = + static_cast(srv_buf_pool_size); + } + if (err != DB_SUCCESS) { - goto mem_free_and_error; + DBUG_RETURN(innobase_init_abort()); } /* Adjust the innodb_undo_logs config object */ @@ -3931,7 +4541,7 @@ innobase_change_buffering_inited_ok: innobase_old_blocks_pct = static_cast( buf_LRU_old_ratio_update(innobase_old_blocks_pct, TRUE)); - ibuf_max_size_update(innobase_change_buffer_max_size); + ibuf_max_size_update(srv_change_buffer_max_size); innobase_open_tables = hash_create(200); mysql_mutex_init(innobase_share_mutex_key, @@ -3939,7 +4549,7 @@ innobase_change_buffering_inited_ok: MY_MUTEX_INIT_FAST); mysql_mutex_init(commit_cond_mutex_key, &commit_cond_m, MY_MUTEX_INIT_FAST); - mysql_cond_init(commit_cond_key, &commit_cond, NULL); + mysql_cond_init(commit_cond_key, &commit_cond, 0); mysql_mutex_init(pending_checkpoint_mutex_key, &pending_checkpoint_mutex, MY_MUTEX_INIT_FAST); @@ -3970,14 +4580,35 @@ innobase_change_buffering_inited_ok: /* Turn on monitor counters that are default on */ srv_mon_default_on(); - DBUG_RETURN(FALSE); + + /* Unit Tests */ +#ifdef UNIV_ENABLE_UNIT_TEST_GET_PARENT_DIR + unit_test_os_file_get_parent_dir(); +#endif /* UNIV_ENABLE_UNIT_TEST_GET_PARENT_DIR */ + +#ifdef UNIV_ENABLE_UNIT_TEST_MAKE_FILEPATH + test_make_filepath(); +#endif /*UNIV_ENABLE_UNIT_TEST_MAKE_FILEPATH */ + +#ifdef UNIV_ENABLE_DICT_STATS_TEST + test_dict_stats_all(); +#endif /*UNIV_ENABLE_DICT_STATS_TEST */ + +#ifdef UNIV_ENABLE_UNIT_TEST_ROW_RAW_FORMAT_INT +# ifdef HAVE_UT_CHRONO_T + test_row_raw_format_int(); +# endif /* HAVE_UT_CHRONO_T */ +#endif /* UNIV_ENABLE_UNIT_TEST_ROW_RAW_FORMAT_INT */ + + DBUG_RETURN(0); + error: - DBUG_RETURN(TRUE); + DBUG_RETURN(1); } /*******************************************************************//** Closes an InnoDB database. -@return TRUE if error */ +@return TRUE if error */ static int innobase_end( @@ -4006,11 +4637,13 @@ innobase_end( innodb_inited = 0; hash_table_free(innobase_open_tables); innobase_open_tables = NULL; + if (innobase_shutdown_for_mysql() != DB_SUCCESS) { err = 1; } - srv_free_paths_and_sizes(); - my_free(internal_innobase_data_file_path); + + innobase_space_shutdown(); + mysql_mutex_destroy(&innobase_share_mutex); mysql_mutex_destroy(&commit_cond_m); mysql_cond_destroy(&commit_cond); @@ -4020,31 +4653,46 @@ innobase_end( DBUG_RETURN(err); } -/****************************************************************//** -Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit flushes -the logs, and the name of this function should be innobase_checkpoint. -@return TRUE if error */ +/** Flush InnoDB redo logs to the file system. +@param[in] hton InnoDB handlerton +@param[in] binlog_group_flush true if we got invoked by binlog +group commit during flush stage, false in other cases. +@return false */ static bool innobase_flush_logs( -/*================*/ - handlerton* hton) /*!< in/out: InnoDB handlerton */ + handlerton* hton, + bool binlog_group_flush) { - bool result = 0; - DBUG_ENTER("innobase_flush_logs"); DBUG_ASSERT(hton == innodb_hton_ptr); - if (!srv_read_only_mode) { - log_buffer_flush_to_disk(); + if (srv_read_only_mode) { + DBUG_RETURN(false); } - DBUG_RETURN(result); + /* If !binlog_group_flush, we got invoked by FLUSH LOGS or similar. + Else, we got invoked by binlog group commit during flush stage. */ + + if (binlog_group_flush && srv_flush_log_at_trx_commit == 0) { + /* innodb_flush_log_at_trx_commit=0 + (write and sync once per second). + Do not flush the redo log during binlog group commit. */ + DBUG_RETURN(false); + } + + /* Flush the redo log buffer to the redo log file. + Sync it to disc if we are in FLUSH LOGS, or if + innodb_flush_log_at_trx_commit=1 + (write and sync at each commit). */ + log_buffer_flush_to_disk(!binlog_group_flush + || srv_flush_log_at_trx_commit == 1); + + DBUG_RETURN(false); } /*****************************************************************//** Commits a transaction in an InnoDB database. */ -static void innobase_commit_low( /*================*/ @@ -4071,6 +4719,7 @@ innobase_commit_low( trx_commit_for_mysql(trx); } + trx->will_lock = 0; #ifdef WITH_WSREP if (wsrep_on(thd)) { thd_proc_info(thd, tmp); } #endif /* WITH_WSREP */ @@ -4081,36 +4730,31 @@ Creates an InnoDB transaction struct for the thd if it does not yet have one. Starts a new InnoDB transaction if a transaction is not yet started. And assigns a new snapshot for a consistent read if the transaction does not yet have one. -@return 0 */ +@return 0 */ static int innobase_start_trx_and_assign_read_view( /*====================================*/ - handlerton* hton, /*!< in: Innodb handlerton */ + handlerton* hton, /*!< in: InnoDB handlerton */ THD* thd) /*!< in: MySQL thread handle of the user for whom the transaction should be committed */ { - trx_t* trx; - DBUG_ENTER("innobase_start_trx_and_assign_read_view"); DBUG_ASSERT(hton == innodb_hton_ptr); /* Create a new trx struct for thd, if it does not yet have one */ - trx = check_trx_exists(thd); - - /* This is just to play safe: release a possible FIFO ticket and - search latch. Since we can potentially reserve the trx_sys->mutex, - we have to release the search system latch first to obey the latching - order. */ + trx_t* trx = check_trx_exists(thd); - trx_search_latch_release_if_reserved(trx); + TrxInInnoDB trx_in_innodb(trx); innobase_srv_conc_force_exit_innodb(trx); - /* If the transaction is not started yet, start it */ + /* The transaction should not be active yet, start it */ + + ut_ad(!trx_is_started(trx)); - trx_start_if_not_started_xa(trx); + trx_start_if_not_started_xa(trx, false); /* Assign a read view if the transaction does not have it yet. Do this only if transaction is using REPEATABLE READ isolation @@ -4123,10 +4767,10 @@ innobase_start_trx_and_assign_read_view( } else { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_UNSUPPORTED, - "InnoDB: WITH CONSISTENT SNAPSHOT " - "was ignored because this phrase " - "can only be used with " - "REPEATABLE READ isolation level."); + "InnoDB: WITH CONSISTENT SNAPSHOT" + " was ignored because this phrase" + " can only be used with" + " REPEATABLE READ isolation level."); } /* Set the MySQL flag to mark that there is an active transaction */ @@ -4145,42 +4789,69 @@ innobase_commit_ordered_2( { DBUG_ENTER("innobase_commit_ordered_2"); - /* We need current binlog position for mysqlbackup to work. - Note, the position is current because commit_ordered is guaranteed - to be called in same sequenece as writing to binlog. */ + bool read_only = trx->read_only || trx->id == 0; -retry: - if (innobase_commit_concurrency > 0) { - mysql_mutex_lock(&commit_cond_m); - commit_threads++; + if (!read_only) { + + while (innobase_commit_concurrency > 0) { + + mysql_mutex_lock(&commit_cond_m); + + ++commit_threads; + + if (commit_threads + <= innobase_commit_concurrency) { + + mysql_mutex_unlock(&commit_cond_m); + break; + } + + --commit_threads; + + mysql_cond_wait(&commit_cond, &commit_cond_m); - if (commit_threads > innobase_commit_concurrency) { - commit_threads--; - mysql_cond_wait(&commit_cond, - &commit_cond_m); - mysql_mutex_unlock(&commit_cond_m); - goto retry; - } - else { mysql_mutex_unlock(&commit_cond_m); } + + /* The following call reads the binary log position of + the transaction being committed. + + Binary logging of other engines is not relevant to + InnoDB as all InnoDB requires is that committing + InnoDB transactions appear in the same order in the + MySQL binary log as they appear in InnoDB logs, which + is guaranteed by the server. + + If the binary log is not enabled, or the transaction + is not written to the binary log, the file name will + be a NULL pointer. */ + ulonglong pos; + + thd_binlog_pos(thd, &trx->mysql_log_file_name, &pos); + + trx->mysql_log_offset = static_cast(pos); + + /* Don't do write + flush right now. For group commit + to work we want to do the flush later. */ + trx->flush_log_later = true; } - unsigned long long pos; - thd_binlog_pos(thd, &trx->mysql_log_file_name, &pos); - trx->mysql_log_offset= static_cast(pos); - /* Don't do write + flush right now. For group commit - to work we want to do the flush in the innobase_commit() - method, which runs without holding any locks. */ - trx->flush_log_later = TRUE; innobase_commit_low(trx); - trx->flush_log_later = FALSE; - if (innobase_commit_concurrency > 0) { - mysql_mutex_lock(&commit_cond_m); - commit_threads--; - mysql_cond_signal(&commit_cond); - mysql_mutex_unlock(&commit_cond_m); + if (!read_only) { + trx->flush_log_later = false; + + if (innobase_commit_concurrency > 0) { + + mysql_mutex_lock(&commit_cond_m); + + ut_ad(commit_threads > 0); + --commit_threads; + + mysql_cond_signal(&commit_cond); + + mysql_mutex_unlock(&commit_cond_m); + } } DBUG_VOID_RETURN; @@ -4213,6 +4884,7 @@ innobase_commit_ordered( DBUG_ASSERT(hton == innodb_hton_ptr); trx = check_trx_exists(thd); + TrxInInnoDB trx_in_innodb(trx); /* Since we will reserve the kernel mutex, we must not be holding the search system latch, or we will disobey the latching order. But we @@ -4220,7 +4892,7 @@ innobase_commit_ordered( have an assert here.*/ ut_ad(!trx->has_search_latch); - if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { + if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { /* We cannot throw error here; instead we will catch this error again in innobase_commit() and report it from there. */ DBUG_VOID_RETURN; @@ -4233,7 +4905,7 @@ innobase_commit_ordered( innobase_commit_ordered_2(trx, thd); - trx_set_active_commit_ordered(trx); + trx_set_active_commit_ordered(trx); DBUG_VOID_RETURN; } @@ -4241,12 +4913,13 @@ innobase_commit_ordered( /*****************************************************************//** Commits a transaction in an InnoDB database or marks an SQL statement ended. -@return 0 */ +@return 0 or deadlock error if the transaction was aborted by another + higher priority transaction. */ static int innobase_commit( /*============*/ - handlerton* hton, /*!< in: Innodb handlerton */ + handlerton* hton, /*!< in: InnoDB handlerton */ THD* thd, /*!< in: MySQL thread handle of the user for whom the transaction should be committed */ @@ -4254,21 +4927,22 @@ innobase_commit( false - the current SQL statement ended */ { - trx_t* trx; - DBUG_ENTER("innobase_commit"); DBUG_ASSERT(hton == innodb_hton_ptr); DBUG_PRINT("trans", ("ending transaction")); - trx = check_trx_exists(thd); + trx_t* trx = check_trx_exists(thd); - /* Since we will reserve the trx_sys->mutex, we have to release - the search system latch first to obey the latching order. */ + TrxInInnoDB trx_in_innodb(trx); - if (trx->has_search_latch && !trx_is_active_commit_ordered(trx)) { - trx_search_latch_release_if_reserved(trx); + if (trx_in_innodb.is_aborted()) { + + DBUG_RETURN(innobase_rollback(hton, thd, commit_trx)); } + ut_ad(trx->dict_operation_lock_mode == 0); + ut_ad(trx->dict_operation == TRX_DICT_OP_NONE); + /* Transaction is deregistered only in a commit or a rollback. If it is deregistered we know there cannot be resources to be freed and we could return immediately. For the time being, we play safe @@ -4276,13 +4950,18 @@ innobase_commit( if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { - sql_print_error("Transaction not registered for MySQL 2PC, " - "but transaction is active"); + sql_print_error("Transaction not registered for MySQL 2PC," + " but transaction is active"); } + bool read_only = trx->read_only || trx->id == 0; + if (commit_trx || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + DBUG_EXECUTE_IF("crash_innodb_before_commit", + DBUG_SUICIDE();); + /* Run the fast part of commit if we did not already. */ if (!trx_is_active_commit_ordered(trx)) { innobase_commit_ordered_2(trx, thd); @@ -4296,10 +4975,11 @@ innobase_commit( this one, to allow then to group commit with us. */ thd_wakeup_subsequent_commits(thd, 0); - /* We did the first part already in innobase_commit_ordered(), - Now finish by doing a write + flush of logs. */ + /* Now do a write + flush of logs. */ trx_commit_complete_for_mysql(trx); - trx_deregister_from_2pc(trx); + + trx_deregister_from_2pc(trx); + } else { /* We just mark the SQL statement ended and do not do a transaction commit */ @@ -4307,7 +4987,9 @@ innobase_commit( /* If we had reserved the auto-inc lock for some table in this SQL statement we release it now */ - lock_unlock_table_autoinc(trx); + if (!read_only) { + lock_unlock_table_autoinc(trx); + } /* Store the current undo_no of the transaction so that we know where to roll back if we have to roll back the next @@ -4316,7 +4998,8 @@ innobase_commit( trx_mark_sql_stat_end(trx); } - trx->n_autoinc_rows = 0; /* Reset the number AUTO-INC rows required */ + /* Reset the number AUTO-INC rows required */ + trx->n_autoinc_rows = 0; /* This is a statement level variable. */ trx->fts_next_doc_id = 0; @@ -4328,12 +5011,12 @@ innobase_commit( /*****************************************************************//** Rolls back a transaction or the latest SQL statement. -@return 0 or error number */ +@return 0 or error number */ static int innobase_rollback( /*==============*/ - handlerton* hton, /*!< in: Innodb handlerton */ + handlerton* hton, /*!< in: InnoDB handlerton */ THD* thd, /*!< in: handle to the MySQL thread of the user whose transaction should be rolled back */ @@ -4341,49 +5024,78 @@ innobase_rollback( transaction FALSE - rollback the current statement only */ { - dberr_t error; - trx_t* trx; - DBUG_ENTER("innobase_rollback"); DBUG_ASSERT(hton == innodb_hton_ptr); DBUG_PRINT("trans", ("aborting transaction")); - trx = check_trx_exists(thd); + trx_t* trx = check_trx_exists(thd); - /* Release a possible FIFO ticket and search latch. Since we will - reserve the trx_sys->mutex, we have to release the search system - latch first to obey the latching order. */ + TrxInInnoDB trx_in_innodb(trx); - trx_search_latch_release_if_reserved(trx); + ut_ad(trx_in_innodb.is_aborted() + || (trx->dict_operation_lock_mode == 0 + && trx->dict_operation == TRX_DICT_OP_NONE)); innobase_srv_conc_force_exit_innodb(trx); - trx->n_autoinc_rows = 0; /* Reset the number AUTO-INC rows required */ + /* Reset the number AUTO-INC rows required */ + + trx->n_autoinc_rows = 0; /* If we had reserved the auto-inc lock for some table (if we come here to roll back the latest SQL statement) we release it now before a possibly lengthy rollback */ - lock_unlock_table_autoinc(trx); + if (!trx_in_innodb.is_aborted()) { + lock_unlock_table_autoinc(trx); + } /* This is a statement level variable. */ + trx->fts_next_doc_id = 0; + dberr_t error; + if (rollback_trx || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { error = trx_rollback_for_mysql(trx); + + if (trx->state == TRX_STATE_FORCED_ROLLBACK) { + + char buffer[1024]; + + /* JAN: TODO: MySQL 5.7 + ib::info() << "Forced rollback : " + << thd_security_context(thd, buffer, + sizeof(buffer), + 512); + */ + thd_get_error_context_description(thd, buffer, + sizeof(buffer), 512); + + error = DB_FORCED_ABORT; + + trx->state = TRX_STATE_NOT_STARTED; + } + trx_deregister_from_2pc(trx); + + } else if (trx_in_innodb.is_aborted()) { + + error = DB_FORCED_ABORT; + } else { + error = trx_rollback_last_sql_stat_for_mysql(trx); } - DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL)); + DBUG_RETURN(convert_error_code_to_mysql(error, 0, trx->mysql_thd)); } /*****************************************************************//** Rolls back a transaction -@return 0 or error number */ +@return 0 or error number */ static int innobase_rollback_trx( @@ -4406,14 +5118,17 @@ innobase_rollback_trx( /* If we had reserved the auto-inc lock for some table (if we come here to roll back the latest SQL statement) we release it now before a possibly lengthy rollback */ + if (!TrxInInnoDB::is_aborted(trx)) { + lock_unlock_table_autoinc(trx); + } - lock_unlock_table_autoinc(trx); - - if (!trx->read_only) { + if (trx_is_rseg_updated(trx)) { error = trx_rollback_for_mysql(trx); + } else { + trx->will_lock = 0; } - DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL)); + DBUG_RETURN(convert_error_code_to_mysql(error, 0, trx->mysql_thd)); } @@ -4565,35 +5280,31 @@ static int innobase_rollback_to_savepoint( /*===========================*/ - handlerton* hton, /*!< in: Innodb handlerton */ + handlerton* hton, /*!< in: InnoDB handlerton */ THD* thd, /*!< in: handle to the MySQL thread of the user whose transaction should be rolled back to savepoint */ void* savepoint) /*!< in: savepoint data */ { - ib_int64_t mysql_binlog_cache_pos; - dberr_t error; - trx_t* trx; - char name[64]; DBUG_ENTER("innobase_rollback_to_savepoint"); DBUG_ASSERT(hton == innodb_hton_ptr); - trx = check_trx_exists(thd); - - /* Release a possible FIFO ticket and search latch. Since we will - reserve the trx_sys->mutex, we have to release the search system - latch first to obey the latching order. */ + trx_t* trx = check_trx_exists(thd); - trx_search_latch_release_if_reserved(trx); + TrxInInnoDB trx_in_innodb(trx); innobase_srv_conc_force_exit_innodb(trx); /* TODO: use provided savepoint data area to store savepoint data */ + char name[64]; + longlong2str((ulint) savepoint, name, 36); - error = trx_rollback_to_savepoint_for_mysql( + int64_t mysql_binlog_cache_pos; + + dberr_t error = trx_rollback_to_savepoint_for_mysql( trx, name, &mysql_binlog_cache_pos); if (error == DB_SUCCESS && trx->fts_trx != NULL) { @@ -4618,17 +5329,17 @@ innobase_rollback_to_savepoint_can_release_mdl( of the user whose transaction should be rolled back to savepoint */ { - trx_t* trx; - DBUG_ENTER("innobase_rollback_to_savepoint_can_release_mdl"); DBUG_ASSERT(hton == innodb_hton_ptr); - trx = check_trx_exists(thd); - ut_ad(trx); + trx_t* trx = check_trx_exists(thd); + + TrxInInnoDB trx_in_innodb(trx); /* If transaction has not acquired any locks then it is safe - to release MDL after rollback to savepoint */ - if (!(UT_LIST_GET_LEN(trx->lock.trx_locks))) { + to release MDL after rollback to savepoint */ + if (UT_LIST_GET_LEN(trx->lock.trx_locks) == 0) { + DBUG_RETURN(true); } @@ -4643,7 +5354,7 @@ static int innobase_release_savepoint( /*=======================*/ - handlerton* hton, /*!< in: handlerton for Innodb */ + handlerton* hton, /*!< in: handlerton for InnoDB */ THD* thd, /*!< in: handle to the MySQL thread of the user whose transaction's savepoint should be released */ @@ -4658,9 +5369,7 @@ innobase_release_savepoint( trx = check_trx_exists(thd); - if (trx->state == TRX_STATE_NOT_STARTED) { - trx_start_if_not_started(trx); - } + TrxInInnoDB trx_in_innodb(trx); /* TODO: use provided savepoint data area to store savepoint data */ @@ -4677,18 +5386,15 @@ innobase_release_savepoint( /*****************************************************************//** Sets a transaction savepoint. -@return always 0, that is, always succeeds */ +@return always 0, that is, always succeeds */ static int innobase_savepoint( /*===============*/ - handlerton* hton, /*!< in: handle to the Innodb handlerton */ - THD* thd, /*!< in: handle to the MySQL thread */ - void* savepoint) /*!< in: savepoint data */ + handlerton* hton, /*!< in: handle to the InnoDB handlerton */ + THD* thd, /*!< in: handle to the MySQL thread */ + void* savepoint)/*!< in: savepoint data */ { - dberr_t error; - trx_t* trx; - DBUG_ENTER("innobase_savepoint"); DBUG_ASSERT(hton == innodb_hton_ptr); @@ -4696,13 +5402,9 @@ innobase_savepoint( (unless we are in sub-statement), so SQL layer ensures that this method is never called in such situation. */ - trx = check_trx_exists(thd); - - /* Release a possible FIFO ticket and search latch. Since we will - reserve the trx_sys->mutex, we have to release the search system - latch first to obey the latching order. */ + trx_t* trx = check_trx_exists(thd); - trx_search_latch_release_if_reserved(trx); + TrxInInnoDB trx_in_innodb(trx); innobase_srv_conc_force_exit_innodb(trx); @@ -4710,10 +5412,11 @@ innobase_savepoint( DBUG_ASSERT(trx_is_registered_for_2pc(trx)); /* TODO: use provided savepoint data area to store savepoint data */ - char name[64]; + char name[64]; + longlong2str((ulint) savepoint,name,36); - error = trx_savepoint_for_mysql(trx, name, (ib_int64_t)0); + dberr_t error = trx_savepoint_for_mysql(trx, name, 0); if (error == DB_SUCCESS && trx->fts_trx != NULL) { fts_savepoint_take(trx, trx->fts_trx, name); @@ -4724,7 +5427,7 @@ innobase_savepoint( /*****************************************************************//** Frees a possible InnoDB trx object associated with the current THD. -@return 0 or error number */ +@return 0 or error number */ static int innobase_close_connection( @@ -4733,53 +5436,65 @@ innobase_close_connection( THD* thd) /*!< in: handle to the MySQL thread of the user whose resources should be free'd */ { - trx_t* trx; DBUG_ENTER("innobase_close_connection"); DBUG_ASSERT(hton == innodb_hton_ptr); - trx = thd_to_trx(thd); - ut_a(trx); + trx_t* trx = thd_to_trx(thd); - if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { + if (trx) { + + TrxInInnoDB trx_in_innodb(trx); - sql_print_error("Transaction not registered for MySQL 2PC, " + if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { + + sql_print_error("Transaction not registered for MySQL 2PC, " "but transaction is active"); - } + } - if (trx_is_started(trx) && global_system_variables.log_warnings) { + if (trx_is_started(trx) && global_system_variables.log_warnings) { - sql_print_warning( - "MySQL is closing a connection that has an active " - "InnoDB transaction. " TRX_ID_FMT " row modifications " - "will roll back.", - trx->undo_no); - } + sql_print_warning( + "MySQL is closing a connection that has an active " + "InnoDB transaction. " TRX_ID_FMT " row modifications " + "will roll back.", + trx->undo_no); + } - innobase_rollback_trx(trx); + innobase_rollback_trx(trx); - trx_free_for_mysql(trx); + trx_free_for_mysql(trx); + + UT_DELETE(thd_to_innodb_session(thd)); + + thd_to_innodb_session(thd) = NULL; + } DBUG_RETURN(0); } /*****************************************************************//** -Frees a possible InnoDB trx object associated with the current THD. -@return 0 or error number */ -UNIV_INTERN -int -innobase_close_thd( -/*===============*/ - THD* thd) /*!< in: handle to the MySQL thread of the user - whose resources should be free'd */ +Cancel any pending lock request associated with the current THD. */ +static +void +innobase_kill_connection( +/*======================*/ + handlerton* hton, /*!< in: innobase handlerton */ + THD* thd) /*!< in: handle to the MySQL thread being + killed */ { + DBUG_ENTER("innobase_kill_connection"); + DBUG_ASSERT(hton == innodb_hton_ptr); + trx_t* trx = thd_to_trx(thd); - if (!trx) { - return(0); + if (trx != NULL) { + + /* Cancel a pending lock request if there are any */ + lock_trx_handle_wait(trx, false, false); } - return(innobase_close_connection(innodb_hton_ptr, thd)); + DBUG_VOID_RETURN; } UNIV_INTERN void lock_cancel_waiting_and_release(lock_t* lock); @@ -4813,51 +5528,58 @@ innobase_kill_query( } wsrep_thd_UNLOCK(thd); #endif /* WITH_WSREP */ + trx = thd_to_trx(thd); - if (trx && trx->lock.wait_lock) { - /* In wsrep BF we have already took lock_sys and trx - mutex either on wsrep_abort_transaction() or - before wsrep_kill_victim(). In replication we - could own lock_sys mutex taken in - lock_deadlock_check_and_resolve(). */ - - WSREP_DEBUG("Killing victim trx %p BF %d trx BF %d trx_id " TRX_ID_FMT " ABORT %d thd %p" - " current_thd %p BF %d wait_lock_modes: %s\n", - trx, wsrep_thd_is_BF(trx->mysql_thd, FALSE), - wsrep_thd_is_BF(thd, FALSE), - trx->id, trx->abort_type, - trx->mysql_thd, - current_thd, - wsrep_thd_is_BF(current_thd, FALSE), - lock_get_info(trx->lock.wait_lock).c_str()); + if (trx != NULL) { + /* Cancel a pending lock request if there are any */ + bool lock_mutex_taken = false; + bool trx_mutex_taken = false; + bool already_have_lock_mutex = false; + bool already_have_trx_mutex = false; + dberr_t err; + + if (trx->lock.wait_lock) { + WSREP_DEBUG("Killing victim trx %p BF %d trx BF %d trx_id %lu ABORT %d thd %p" + " current_thd %p BF %d", + trx, wsrep_thd_is_BF(trx->mysql_thd, FALSE), + wsrep_thd_is_BF(thd, FALSE), + trx->id, trx->abort_type, + trx->mysql_thd, + current_thd, + wsrep_thd_is_BF(current_thd, FALSE)); + } if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) && - trx->abort_type == TRX_SERVER_ABORT) { + trx->abort_type == TRX_SERVER_ABORT) { ut_ad(!lock_mutex_own()); lock_mutex_enter(); + lock_mutex_taken = true; + } else { + already_have_lock_mutex = true; } if (trx->abort_type != TRX_WSREP_ABORT) { + ut_ad(!trx_mutex_own(trx)); trx_mutex_enter(trx); + trx_mutex_taken = true; + } else { + already_have_trx_mutex = true; } - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(trx)); + err = lock_trx_handle_wait(trx, + (lock_mutex_taken || already_have_lock_mutex), + (trx_mutex_taken || already_have_trx_mutex)); - /* Cancel a pending lock request. */ - if (trx->lock.wait_lock) { - lock_cancel_waiting_and_release(trx->lock.wait_lock); + if (lock_mutex_taken) { + ut_ad(lock_mutex_own()); + lock_mutex_exit(); } - if (trx->abort_type != TRX_WSREP_ABORT) { + if (trx_mutex_taken) { + ut_ad(trx_mutex_own(trx)); trx_mutex_exit(trx); } - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) && - trx->abort_type == TRX_SERVER_ABORT) { - lock_mutex_exit(); - } } DBUG_VOID_RETURN; @@ -4868,17 +5590,15 @@ innobase_kill_query( ** InnoDB database tables *****************************************************************************/ -/****************************************************************//** -Get the record format from the data dictionary. +/** Get the record format from the data dictionary. @return one of ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, ROW_TYPE_COMPRESSED, ROW_TYPE_DYNAMIC */ -UNIV_INTERN + enum row_type ha_innobase::get_row_type() const -/*=============================*/ { - if (prebuilt && prebuilt->table) { - const ulint flags = prebuilt->table->flags; + if (m_prebuilt && m_prebuilt->table) { + const ulint flags = m_prebuilt->table->flags; switch (dict_tf_get_rec_format(flags)) { case REC_FORMAT_REDUNDANT: @@ -4895,31 +5615,46 @@ ha_innobase::get_row_type() const return(ROW_TYPE_NOT_USED); } - - /****************************************************************//** Get the table flags to use for the statement. -@return table flags */ -UNIV_INTERN +@return table flags */ + handler::Table_flags ha_innobase::table_flags() const /*============================*/ { + THD* thd = ha_thd(); + handler::Table_flags flags = m_int_table_flags; + + /* If querying the table flags when no table_share is given, + then we must check if the table to be created/checked is partitioned. + */ + if (table_share == NULL) { + /* JAN: TODO: MySQL 5.7 Partitioning && thd_get_work_part_info(thd) != NULL) { */ + /* Currently ha_innopart does not support + all InnoDB features such as GEOMETRY, FULLTEXT etc. */ + /* JAN: TODO: MySQL 5.7 + flags &= ~(HA_INNOPART_DISABLED_TABLE_FLAGS); + + */ + } + /* Need to use tx_isolation here since table flags is (also) called before prebuilt is inited. */ - ulong const tx_isolation = thd_tx_isolation(ha_thd()); + + ulong const tx_isolation = thd_tx_isolation(thd); if (tx_isolation <= ISO_READ_COMMITTED) { - return(int_table_flags); + return(flags); } - return(int_table_flags | HA_BINLOG_STMT_CAPABLE); + return(flags | HA_BINLOG_STMT_CAPABLE); } /****************************************************************//** Returns the table type (storage engine name). -@return table type */ -UNIV_INTERN +@return table type */ + const char* ha_innobase::table_type() const /*===========================*/ @@ -4930,7 +5665,7 @@ ha_innobase::table_type() const /****************************************************************//** Returns the index type. @return index type */ -UNIV_INTERN + const char* ha_innobase::index_type( /*====================*/ @@ -4940,6 +5675,8 @@ ha_innobase::index_type( if (index && index->type & DICT_FTS) { return("FULLTEXT"); + } else if (dict_index_is_spatial(index)) { + return("SPATIAL"); } else { return("BTREE"); } @@ -4947,8 +5684,8 @@ ha_innobase::index_type( /****************************************************************//** Returns the table file name extension. -@return file extension string */ -UNIV_INTERN +@return file extension string */ + const char** ha_innobase::bas_ext() const /*========================*/ @@ -4958,8 +5695,8 @@ ha_innobase::bas_ext() const /****************************************************************//** Returns the operations supported for indexes. -@return flags of supported operations */ -UNIV_INTERN +@return flags of supported operations */ + ulong ha_innobase::index_flags( /*=====================*/ @@ -4967,20 +5704,35 @@ ha_innobase::index_flags( uint, bool) const { - ulong extra_flag= 0; - if (table && key == table->s->primary_key) - extra_flag= HA_CLUSTERED_INDEX; - return((table_share->key_info[key].algorithm == HA_KEY_ALG_FULLTEXT) - ? 0 - : (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER - | HA_READ_RANGE | HA_KEYREAD_ONLY | extra_flag - | HA_DO_INDEX_COND_PUSHDOWN)); + if (table_share->key_info[key].algorithm == HA_KEY_ALG_FULLTEXT) { + return(0); + } + + ulong extra_flag= 0; + + if (table && key == table->s->primary_key) { + extra_flag= HA_CLUSTERED_INDEX; + } + + ulong flags = HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER + | HA_READ_RANGE | HA_KEYREAD_ONLY + | extra_flag + | HA_DO_INDEX_COND_PUSHDOWN; + + /* For spatial index, we don't support descending scan + and ICP so far. */ + if (table_share->key_info[key].flags & HA_SPATIAL) { + flags = HA_READ_NEXT | HA_READ_ORDER| HA_READ_RANGE + | HA_KEYREAD_ONLY | HA_KEY_SCAN_NOT_ROR; + } + + return(flags); } /****************************************************************//** Returns the maximum number of keys. -@return MAX_KEY */ -UNIV_INTERN +@return MAX_KEY */ + uint ha_innobase::max_supported_keys() const /*===================================*/ @@ -4990,8 +5742,8 @@ ha_innobase::max_supported_keys() const /****************************************************************//** Returns the maximum key length. -@return maximum supported key length, in bytes */ -UNIV_INTERN +@return maximum supported key length, in bytes */ + uint ha_innobase::max_supported_key_length() const /*=========================================*/ @@ -5027,8 +5779,8 @@ ha_innobase::max_supported_key_length() const /****************************************************************//** Returns the key map of keys that are usable for scanning. -@return key_map_full */ -UNIV_INTERN +@return key_map_full */ + const key_map* ha_innobase::keys_to_use_for_scanning() /*===================================*/ @@ -5038,8 +5790,8 @@ ha_innobase::keys_to_use_for_scanning() /****************************************************************//** Determines if table caching is supported. -@return HA_CACHE_TBL_ASKTRANSACT */ -UNIV_INTERN +@return HA_CACHE_TBL_ASKTRANSACT */ + uint8 ha_innobase::table_cache_type() /*===========================*/ @@ -5049,28 +5801,28 @@ ha_innobase::table_cache_type() /****************************************************************//** Determines if the primary key is clustered index. -@return true */ -UNIV_INTERN +@return true */ + bool ha_innobase::primary_key_is_clustered() -/*===================================*/ +/*=========================================*/ { return(true); } -/*****************************************************************//** +/********************************************************************* Normalizes a table name string. A normalized name consists of the -database name catenated to '/' and table name. Example: test/mytable. -On Windows normalization puts both the database name and the +database name catenated to '/' and table name. An example: +test/mytable. On Windows normalization puts both the database name and the table name always to lower case if "set_lower_case" is set to TRUE. */ void -normalize_table_name_low( -/*=====================*/ - char* norm_name, /*!< out: normalized name as a +normalize_table_name_c_low( +/*=======================*/ + char* norm_name, /* out: normalized name as a null-terminated string */ - const char* name, /*!< in: table name string */ - ibool set_lower_case) /*!< in: TRUE if we want to set name - to lower case */ + const char* name, /* in: table name string */ + ibool set_lower_case) /* in: TRUE if we want to set + name to lower case */ { char* name_ptr; ulint name_len; @@ -5123,6 +5875,23 @@ normalize_table_name_low( } } +/** Normalizes a table name string. +A normalized name consists of the database name catenated to '/' +and table name. For example: test/mytable. +On Windows, normalization puts both the database name and the +table name always to lower case if "set_lower_case" is set to TRUE. +@param[out] norm_name Normalized name, null-terminated. +@param[in] name Name to normalize. +@param[in] set_lower_case True if we also should fold to lower case. */ +void +create_table_info_t::normalize_table_name_low( + char* norm_name, + const char* name, + ibool set_lower_case) +{ + normalize_table_name_c_low(norm_name, name, set_lower_case); +} + #if !defined(DBUG_OFF) /********************************************************************* Test normalize_table_name_low(). */ @@ -5173,11 +5942,12 @@ test_normalize_table_name_low() }; for (size_t i = 0; i < UT_ARR_SIZE(test_data); i++) { - printf("test_normalize_table_name_low(): " - "testing \"%s\", expected \"%s\"... ", + printf("test_normalize_table_name_low():" + " testing \"%s\", expected \"%s\"... ", test_data[i][0], test_data[i][1]); - normalize_table_name_low(norm_name, test_data[i][0], FALSE); + create_table_info_t::normalize_table_name_low( + norm_name, test_data[i][0], FALSE); if (strcmp(norm_name, test_data[i][1]) == 0) { printf("ok\n"); @@ -5199,30 +5969,27 @@ test_ut_format_name() struct { const char* name; - ibool is_table; ulint buf_size; const char* expected; } test_data[] = { - {"test/t1", TRUE, sizeof(buf), "\"test\".\"t1\""}, - {"test/t1", TRUE, 12, "\"test\".\"t1\""}, - {"test/t1", TRUE, 11, "\"test\".\"t1"}, - {"test/t1", TRUE, 10, "\"test\".\"t"}, - {"test/t1", TRUE, 9, "\"test\".\""}, - {"test/t1", TRUE, 8, "\"test\"."}, - {"test/t1", TRUE, 7, "\"test\""}, - {"test/t1", TRUE, 6, "\"test"}, - {"test/t1", TRUE, 5, "\"tes"}, - {"test/t1", TRUE, 4, "\"te"}, - {"test/t1", TRUE, 3, "\"t"}, - {"test/t1", TRUE, 2, "\""}, - {"test/t1", TRUE, 1, ""}, - {"test/t1", TRUE, 0, "BUF_NOT_CHANGED"}, - {"table", TRUE, sizeof(buf), "\"table\""}, - {"ta'le", TRUE, sizeof(buf), "\"ta'le\""}, - {"ta\"le", TRUE, sizeof(buf), "\"ta\"\"le\""}, - {"ta`le", TRUE, sizeof(buf), "\"ta`le\""}, - {"index", FALSE, sizeof(buf), "\"index\""}, - {"ind/ex", FALSE, sizeof(buf), "\"ind/ex\""}, + {"test/t1", sizeof(buf), "`test`.`t1`"}, + {"test/t1", 12, "`test`.`t1`"}, + {"test/t1", 11, "`test`.`t1"}, + {"test/t1", 10, "`test`.`t"}, + {"test/t1", 9, "`test`.`"}, + {"test/t1", 8, "`test`."}, + {"test/t1", 7, "`test`"}, + {"test/t1", 6, "`test"}, + {"test/t1", 5, "`tes"}, + {"test/t1", 4, "`te"}, + {"test/t1", 3, "`t"}, + {"test/t1", 2, "`"}, + {"test/t1", 1, ""}, + {"test/t1", 0, "BUF_NOT_CHANGED"}, + {"table", sizeof(buf), "`table`"}, + {"ta'le", sizeof(buf), "`ta'le`"}, + {"ta\"le", sizeof(buf), "`ta\"le`"}, + {"ta`le", sizeof(buf), "`ta``le`"}, }; for (size_t i = 0; i < UT_ARR_SIZE(test_data); i++) { @@ -5232,109 +5999,37 @@ test_ut_format_name() char* ret; ret = ut_format_name(test_data[i].name, - test_data[i].is_table, buf, test_data[i].buf_size); ut_a(ret == buf); if (strcmp(buf, test_data[i].expected) == 0) { - fprintf(stderr, - "ut_format_name(%s, %s, buf, %lu), " - "expected %s, OK\n", - test_data[i].name, - test_data[i].is_table ? "TRUE" : "FALSE", - test_data[i].buf_size, - test_data[i].expected); + ib::info() << "ut_format_name(" << test_data[i].name + << ", buf, " << test_data[i].buf_size << ")," + " expected " << test_data[i].expected + << ", OK"; } else { - fprintf(stderr, - "ut_format_name(%s, %s, buf, %lu), " - "expected %s, ERROR: got %s\n", - test_data[i].name, - test_data[i].is_table ? "TRUE" : "FALSE", - test_data[i].buf_size, - test_data[i].expected, - buf); + ib::error() << "ut_format_name(" << test_data[i].name + << ", buf, " << test_data[i].buf_size << ")," + " expected " << test_data[i].expected + << ", ERROR: got " << buf; ut_error; } } } #endif /* !DBUG_OFF */ -/********************************************************************//** -Get the upper limit of the MySQL integral and floating-point type. -@return maximum allowed value for the field */ -UNIV_INTERN -ulonglong -innobase_get_int_col_max_value( -/*===========================*/ - const Field* field) /*!< in: MySQL field */ -{ - ulonglong max_value = 0; - - switch (field->key_type()) { - /* TINY */ - case HA_KEYTYPE_BINARY: - max_value = 0xFFULL; - break; - case HA_KEYTYPE_INT8: - max_value = 0x7FULL; - break; - /* SHORT */ - case HA_KEYTYPE_USHORT_INT: - max_value = 0xFFFFULL; - break; - case HA_KEYTYPE_SHORT_INT: - max_value = 0x7FFFULL; - break; - /* MEDIUM */ - case HA_KEYTYPE_UINT24: - max_value = 0xFFFFFFULL; - break; - case HA_KEYTYPE_INT24: - max_value = 0x7FFFFFULL; - break; - /* LONG */ - case HA_KEYTYPE_ULONG_INT: - max_value = 0xFFFFFFFFULL; - break; - case HA_KEYTYPE_LONG_INT: - max_value = 0x7FFFFFFFULL; - break; - /* BIG */ - case HA_KEYTYPE_ULONGLONG: - max_value = 0xFFFFFFFFFFFFFFFFULL; - break; - case HA_KEYTYPE_LONGLONG: - max_value = 0x7FFFFFFFFFFFFFFFULL; - break; - case HA_KEYTYPE_FLOAT: - /* We use the maximum as per IEEE754-2008 standard, 2^24 */ - max_value = 0x1000000ULL; - break; - case HA_KEYTYPE_DOUBLE: - /* We use the maximum as per IEEE754-2008 standard, 2^53 */ - max_value = 0x20000000000000ULL; - break; - default: - ut_error; - } - - return(max_value); -} - -/*******************************************************************//** +/** Match index columns between MySQL and InnoDB. This function checks whether the index column information is consistent between KEY info from mysql and that from innodb index. -@return TRUE if all column types match. */ -static -ibool +@param[in] key_info Index info from mysql +@param[in] index_info Index info from InnoDB +@return true if all column types match. */ +bool innobase_match_index_columns( -/*=========================*/ - const KEY* key_info, /*!< in: Index info - from mysql */ - const dict_index_t* index_info) /*!< in: Index info - from Innodb */ + const KEY* key_info, + const dict_index_t* index_info) { const KEY_PART_INFO* key_part; const KEY_PART_INFO* key_end; @@ -5359,7 +6054,7 @@ innobase_match_index_columns( column name got modified in mysql but such change does not propagate to InnoDB. One hidden assumption here is that the index column sequences - are matched up between those in mysql and Innodb. */ + are matched up between those in mysql and InnoDB. */ for (; key_part != key_end; ++key_part) { ulint col_type; ibool is_unsigned; @@ -5367,10 +6062,10 @@ innobase_match_index_columns( /* Need to translate to InnoDB column type before comparison. */ - col_type = get_innobase_type_from_mysql_type(&is_unsigned, - key_part->field); + col_type = get_innobase_type_from_mysql_type( + &is_unsigned, key_part->field); - /* Ignore Innodb specific system columns. */ + /* Ignore InnoDB specific system columns. */ while (mtype == DATA_SYS) { innodb_idx_fld++; @@ -5388,8 +6083,31 @@ innobase_match_index_columns( col_type= DATA_FIXBINARY; if (col_type != mtype) { - /* Column Type mismatches */ - DBUG_RETURN(FALSE); + /* If the col_type we get from mysql type is a geometry + data type, we should check if mtype is a legacy type + from 5.6, either upgraded to DATA_GEOMETRY or not. + This is indeed not an accurate check, but should be + safe, since DATA_BLOB would be upgraded once we create + spatial index on it and we intend to use DATA_GEOMETRY + for legacy GIS data types which are of var-length. */ + switch (col_type) { + case DATA_POINT: + case DATA_VAR_POINT: + if (DATA_POINT_MTYPE(mtype) + || mtype == DATA_GEOMETRY + || mtype == DATA_BLOB) { + break; + } + /* Fall through */ + case DATA_GEOMETRY: + if (mtype == DATA_BLOB) { + break; + } + /* Fall through */ + default: + /* Column type mismatches */ + DBUG_RETURN(false); + } } innodb_idx_fld++; @@ -5398,49 +6116,282 @@ innobase_match_index_columns( DBUG_RETURN(TRUE); } +/** Build a template for a base column for a virtual column +@param[in] table MySQL TABLE +@param[in] clust_index InnoDB clustered index +@param[in] field field in MySQL table +@param[in] col InnoDB column +@param[in,out] templ template to fill +@param[in] col_no field index for virtual col +*/ +static +void +innobase_vcol_build_templ( + const TABLE* table, + dict_index_t* clust_index, + Field* field, + const dict_col_t* col, + mysql_row_templ_t* templ, + ulint col_no) +{ + if (dict_col_is_virtual(col)) { + templ->is_virtual = true; + templ->col_no = col_no; + templ->clust_rec_field_no = ULINT_UNDEFINED; + templ->rec_field_no = col->ind; + } else { + templ->is_virtual = false; + templ->col_no = col_no; + templ->clust_rec_field_no = dict_col_get_clust_pos( + col, clust_index); + ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED); + + templ->rec_field_no = templ->clust_rec_field_no; + } + + if (field->real_maybe_null()) { + templ->mysql_null_byte_offset = + field->null_offset(); + + templ->mysql_null_bit_mask = (ulint) field->null_bit; + } else { + templ->mysql_null_bit_mask = 0; + } + + templ->mysql_col_offset = static_cast( + get_field_offset(table, field)); + templ->mysql_col_len = static_cast(field->pack_length()); + templ->type = col->mtype; + templ->mysql_type = static_cast(field->type()); + + if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { + templ->mysql_length_bytes = static_cast( + ((Field_varstring*) field)->length_bytes); + } + + templ->charset = dtype_get_charset_coll(col->prtype); + templ->mbminlen = dict_col_get_mbminlen(col); + templ->mbmaxlen = dict_col_get_mbmaxlen(col); + templ->is_unsigned = col->prtype & DATA_UNSIGNED; +} + +/** callback used by MySQL server layer to initialize +the table virtual columns' template +@param[in] table MySQL TABLE +@param[in,out] ib_table InnoDB table */ +void +innobase_build_v_templ_callback( + const TABLE* table, + void* ib_table) +{ + const dict_table_t* t_table = static_cast(ib_table); + + innobase_build_v_templ(table, t_table, t_table->vc_templ, NULL, + true, NULL); +} + +/** Build template for the virtual columns and their base columns. This +is done when the table first opened. +@param[in] table MySQL TABLE +@param[in] ib_table InnoDB dict_table_t +@param[in,out] s_templ InnoDB template structure +@param[in] add_v new virtual columns added along with + add index call +@param[in] locked true if innobase_share_mutex is held +@param[in] share_tbl_name original MySQL table name */ +void +innobase_build_v_templ( + const TABLE* table, + const dict_table_t* ib_table, + innodb_col_templ_t* s_templ, + const dict_add_v_col_t* add_v, + bool locked, + const char* share_tbl_name) +{ + ulint ncol = ib_table->n_cols - DATA_N_SYS_COLS; + ulint n_v_col = ib_table->n_v_cols; + bool marker[REC_MAX_N_FIELDS]; + + ut_ad(ncol < REC_MAX_N_FIELDS); + + if (add_v != NULL) { + n_v_col += add_v->n_v_col; + } + + ut_ad(n_v_col > 0); + + if (!locked) { + mysql_mutex_lock(&innobase_share_mutex); + } + + if (s_templ->vtempl) { + if (!locked) { + mysql_mutex_unlock(&innobase_share_mutex); + } + return; + } + + memset(marker, 0, sizeof(bool) * ncol); + + s_templ->vtempl = static_cast( + ut_zalloc_nokey((ncol + n_v_col) + * sizeof *s_templ->vtempl)); + s_templ->n_col = ncol; + s_templ->n_v_col = n_v_col; + s_templ->rec_len = table->s->stored_rec_length; + s_templ->default_rec = table->s->default_values; + + /* Mark those columns could be base columns */ + for (ulint i = 0; i < ib_table->n_v_cols; i++) { + const dict_v_col_t* vcol = dict_table_get_nth_v_col( + ib_table, i); + + for (ulint j = 0; j < vcol->num_base; j++) { + ulint col_no = vcol->base_col[j]->ind; + marker[col_no] = true; + } + } + + if (add_v) { + for (ulint i = 0; i < add_v->n_v_col; i++) { + const dict_v_col_t* vcol = &add_v->v_col[i]; + + for (ulint j = 0; j < vcol->num_base; j++) { + ulint col_no = vcol->base_col[j]->ind; + marker[col_no] = true; + } + } + } + + ulint j = 0; + ulint z = 0; + + dict_index_t* clust_index = dict_table_get_first_index(ib_table); + + for (ulint i = 0; i < table->s->fields; i++) { + Field* field = table->field[i]; + + /* Build template for virtual columns */ + if (innobase_is_v_fld(field)) { +#ifdef UNIV_DEBUG + const char* name; + + if (z >= ib_table->n_v_def) { + name = add_v->v_col_name[z - ib_table->n_v_def]; + } else { + name = dict_table_get_v_col_name(ib_table, z); + } + + ut_ad(!ut_strcmp(name, field->field_name)); +#endif + const dict_v_col_t* vcol; + + if (z >= ib_table->n_v_def) { + vcol = &add_v->v_col[z - ib_table->n_v_def]; + } else { + vcol = dict_table_get_nth_v_col(ib_table, z); + } + + s_templ->vtempl[z + s_templ->n_col] + = static_cast( + ut_malloc_nokey( + sizeof *s_templ->vtempl[j])); + + innobase_vcol_build_templ( + table, clust_index, field, + &vcol->m_col, + s_templ->vtempl[z + s_templ->n_col], + z); + z++; + continue; + } + + ut_ad(j < ncol); + + /* Build template for base columns */ + if (marker[j]) { + dict_col_t* col = dict_table_get_nth_col( + ib_table, j); + +#ifdef UNIV_DEBUG + const char* name = dict_table_get_col_name( + ib_table, j); + + ut_ad(!ut_strcmp(name, field->field_name)); +#endif + + s_templ->vtempl[j] = static_cast< + mysql_row_templ_t*>( + ut_malloc_nokey( + sizeof *s_templ->vtempl[j])); + + innobase_vcol_build_templ( + table, clust_index, field, col, + s_templ->vtempl[j], j); + } + + j++; + } + + if (!locked) { + mysql_mutex_unlock(&innobase_share_mutex); + } + + ut_strlcpy(s_templ->db_name, table->s->db.str, + table->s->db.length + 1); + s_templ->db_name[table->s->db.length] = 0; + + ut_strlcpy(s_templ->tb_name, table->s->table_name.str, + table->s->table_name.length + 1); + s_templ->tb_name[table->s->table_name.length] = 0; + + if (share_tbl_name) { + ulint s_len = strlen(share_tbl_name); + ut_strlcpy(s_templ->share_name, share_tbl_name, + s_len + 1); + s_templ->tb_name[s_len] = 0; + } +} + /*******************************************************************//** This function builds a translation table in INNOBASE_SHARE structure for fast index location with mysql array number from its table->key_info structure. This also provides the necessary translation -between the key order in mysql key_info and Innodb ib_table->indexes if +between the key order in mysql key_info and InnoDB ib_table->indexes if they are not fully matched with each other. Note we do not have any mutex protecting the translation table building based on the assumption that there is no concurrent index creation/drop and DMLs that requires index lookup. All table handle will be closed before the index creation/drop. -@return TRUE if index translation table built successfully */ +@return true if index translation table built successfully */ static -ibool +bool innobase_build_index_translation( /*=============================*/ const TABLE* table, /*!< in: table in MySQL data dictionary */ - dict_table_t* ib_table,/*!< in: table in Innodb data + dict_table_t* ib_table,/*!< in: table in InnoDB data dictionary */ INNOBASE_SHARE* share) /*!< in/out: share structure where index translation table will be constructed in. */ { - ulint mysql_num_index; - ulint ib_num_index; - dict_index_t** index_mapping; - ibool ret = TRUE; - DBUG_ENTER("innobase_build_index_translation"); - mutex_enter(&dict_sys->mutex); + bool ret = true; - mysql_num_index = table->s->keys; - ib_num_index = UT_LIST_GET_LEN(ib_table->indexes); + mutex_enter(&dict_sys->mutex); - index_mapping = share->idx_trans_tbl.index_mapping; + ulint mysql_num_index = table->s->keys; + ulint ib_num_index = UT_LIST_GET_LEN(ib_table->indexes); + dict_index_t** index_mapping = share->idx_trans_tbl.index_mapping; /* If there exists inconsistency between MySQL and InnoDB dictionary (metadata) information, the number of index defined in MySQL could exceed that in InnoDB, do not build index translation table in such case */ - if (UNIV_UNLIKELY(ib_num_index < mysql_num_index)) { - ret = FALSE; + if (ib_num_index < mysql_num_index) { + ret = false; goto func_exit; } @@ -5454,20 +6405,20 @@ innobase_build_index_translation( /* The number of index increased, rebuild the mapping table */ if (mysql_num_index > share->idx_trans_tbl.array_size) { - index_mapping = (dict_index_t**) my_realloc(index_mapping, - mysql_num_index * - sizeof(*index_mapping), - MYF(MY_ALLOW_ZERO_PTR)); - if (!index_mapping) { + index_mapping = reinterpret_cast( + ut_realloc(index_mapping, + mysql_num_index * sizeof(*index_mapping))); + + if (index_mapping == NULL) { /* Report an error if index_mapping continues to be NULL and mysql_num_index is a non-zero value */ - sql_print_error("InnoDB: fail to allocate memory for " - "index translation table. Number of " - "Index:%lu, array size:%lu", + sql_print_error("InnoDB: fail to allocate memory for" + " index translation table. Number of" + " Index:%lu, array size:%lu", mysql_num_index, share->idx_trans_tbl.array_size); - ret = FALSE; + ret = false; goto func_exit; } @@ -5484,11 +6435,11 @@ innobase_build_index_translation( index_mapping[count] = dict_table_get_index_on_name( ib_table, table->key_info[count].name); - if (!index_mapping[count]) { - sql_print_error("Cannot find index %s in InnoDB " - "index dictionary.", + if (index_mapping[count] == 0) { + sql_print_error("Cannot find index %s in InnoDB" + " index dictionary.", table->key_info[count].name); - ret = FALSE; + ret = false; goto func_exit; } @@ -5496,10 +6447,10 @@ innobase_build_index_translation( column info as those in mysql key_info. */ if (!innobase_match_index_columns(&table->key_info[count], index_mapping[count])) { - sql_print_error("Found index %s whose column info " - "does not match that of MySQL.", + sql_print_error("Found index %s whose column info" + " does not match that of MySQL.", table->key_info[count].name); - ret = FALSE; + ret = false; goto func_exit; } } @@ -5510,7 +6461,7 @@ innobase_build_index_translation( func_exit: if (!ret) { /* Build translation table failed. */ - my_free(index_mapping); + ut_free(index_mapping); share->idx_trans_tbl.array_size = 0; share->idx_trans_tbl.index_count = 0; @@ -5542,65 +6493,140 @@ innobase_index_lookup( uint keynr) /*!< in: index number for the requested index */ { - if (!share->idx_trans_tbl.index_mapping + if (share->idx_trans_tbl.index_mapping == NULL || keynr >= share->idx_trans_tbl.index_count) { return(NULL); } return(share->idx_trans_tbl.index_mapping[keynr]); } - -/************************************************************************ -Set the autoinc column max value. This should only be called once from -ha_innobase::open(). Therefore there's no need for a covering lock. */ +/********************************************************************//** +Get the upper limit of the MySQL integral and floating-point type. +@return maximum allowed value for the field */ UNIV_INTERN -void -ha_innobase::innobase_initialize_autoinc() -/*======================================*/ +ulonglong +innobase_get_int_col_max_value( +/*===========================*/ + const Field* field) /*!< in: MySQL field */ { - ulonglong auto_inc; - const Field* field = table->found_next_number_field; - - if (field != NULL) { - auto_inc = innobase_get_int_col_max_value(field); - } else { - /* We have no idea what's been passed in to us as the - autoinc column. We set it to the 0, effectively disabling - updates to the table. */ - auto_inc = 0; - - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: Unable to determine the AUTOINC " - "column name\n"); - } - - if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { - /* If the recovery level is set so high that writes - are disabled we force the AUTOINC counter to 0 - value effectively disabling writes to the table. - Secondly, we avoid reading the table in case the read - results in failure due to a corrupted table/index. + ulonglong max_value = 0; - We will not return an error to the client, so that the - tables can be dumped with minimal hassle. If an error - were returned in this case, the first attempt to read - the table would fail and subsequent SELECTs would succeed. */ - auto_inc = 0; - } else if (field == NULL) { - /* This is a far more serious error, best to avoid - opening the table and return failure. */ - my_error(ER_AUTOINC_READ_FAILED, MYF(0)); - } else { - dict_index_t* index; - const char* col_name; - ib_uint64_t read_auto_inc; - ulint err; + switch (field->key_type()) { + /* TINY */ + case HA_KEYTYPE_BINARY: + max_value = 0xFFULL; + break; + case HA_KEYTYPE_INT8: + max_value = 0x7FULL; + break; + /* SHORT */ + case HA_KEYTYPE_USHORT_INT: + max_value = 0xFFFFULL; + break; + case HA_KEYTYPE_SHORT_INT: + max_value = 0x7FFFULL; + break; + /* MEDIUM */ + case HA_KEYTYPE_UINT24: + max_value = 0xFFFFFFULL; + break; + case HA_KEYTYPE_INT24: + max_value = 0x7FFFFFULL; + break; + /* LONG */ + case HA_KEYTYPE_ULONG_INT: + max_value = 0xFFFFFFFFULL; + break; + case HA_KEYTYPE_LONG_INT: + max_value = 0x7FFFFFFFULL; + break; + /* BIG */ + case HA_KEYTYPE_ULONGLONG: + max_value = 0xFFFFFFFFFFFFFFFFULL; + break; + case HA_KEYTYPE_LONGLONG: + max_value = 0x7FFFFFFFFFFFFFFFULL; + break; + case HA_KEYTYPE_FLOAT: + /* We use the maximum as per IEEE754-2008 standard, 2^24 */ + max_value = 0x1000000ULL; + break; + case HA_KEYTYPE_DOUBLE: + /* We use the maximum as per IEEE754-2008 standard, 2^53 */ + max_value = 0x20000000000000ULL; + break; + default: + ut_error; + } - update_thd(ha_thd()); + return(max_value); +} +/************************************************************************ +Set the autoinc column max value. This should only be called once from +ha_innobase::open(). Therefore there's no need for a covering lock. */ + +void +ha_innobase::innobase_initialize_autoinc() +/*======================================*/ +{ + ulonglong auto_inc; + const Field* field = table->found_next_number_field; + + if (field != NULL) { + /* JAN: TODO: MySQL 5.7 + auto_inc = field->get_max_int_value(); + */ + auto_inc = innobase_get_int_col_max_value(field); - ut_a(prebuilt->trx == thd_to_trx(user_thd)); + /* autoinc column cannot be virtual column */ + ut_ad(!innobase_is_v_fld(field)); + } else { + /* We have no idea what's been passed in to us as the + autoinc column. We set it to the 0, effectively disabling + updates to the table. */ + auto_inc = 0; + + ib::info() << "Unable to determine the AUTOINC column name"; + } + + if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { + /* If the recovery level is set so high that writes + are disabled we force the AUTOINC counter to 0 + value effectively disabling writes to the table. + Secondly, we avoid reading the table in case the read + results in failure due to a corrupted table/index. + + We will not return an error to the client, so that the + tables can be dumped with minimal hassle. If an error + were returned in this case, the first attempt to read + the table would fail and subsequent SELECTs would succeed. */ + auto_inc = 0; + } else if (field == NULL) { + /* This is a far more serious error, best to avoid + opening the table and return failure. */ + my_error(ER_AUTOINC_READ_FAILED, MYF(0)); + } else { + dict_index_t* index; + const char* col_name; + ib_uint64_t read_auto_inc; + ulint err; + + update_thd(ha_thd()); col_name = field->field_name; + + /* For intrinsic table, name of field has to be prefixed with + table name to maintain column-name uniqueness. */ + if (m_prebuilt->table != NULL + && dict_table_is_intrinsic(m_prebuilt->table)) { + + ulint col_no = dict_col_get_no(dict_table_get_nth_col( + m_prebuilt->table, field->field_index)); + + col_name = dict_table_get_col_name( + m_prebuilt->table, col_no); + } + index = innobase_get_index(table->s->next_number_index); /* Execute SELECT MAX(col_name) FROM TABLE; */ @@ -5610,7 +6636,10 @@ ha_innobase::innobase_initialize_autoinc() case DB_SUCCESS: { ulonglong col_max_value; - col_max_value = innobase_get_int_col_max_value(field); + /* JAN: TODO: MySQL 5.7 + col_max_value = field->get_max_int_value(); + */ + col_max_value = innobase_get_int_col_max_value(field); /* At the this stage we do not know the increment nor the offset, so use a default increment of 1. */ @@ -5621,20 +6650,18 @@ ha_innobase::innobase_initialize_autoinc() break; } case DB_RECORD_NOT_FOUND: - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: MySQL and InnoDB data " - "dictionaries are out of sync.\n" - "InnoDB: Unable to find the AUTOINC column " - "%s in the InnoDB table %s.\n" - "InnoDB: We set the next AUTOINC column " - "value to 0,\n" - "InnoDB: in effect disabling the AUTOINC " - "next value generation.\n" - "InnoDB: You can either set the next " - "AUTOINC value explicitly using ALTER TABLE\n" - "InnoDB: or fix the data dictionary by " - "recreating the table.\n", - col_name, index->table->name); + ib::error() << "MySQL and InnoDB data dictionaries are" + " out of sync. Unable to find the AUTOINC" + " column " << col_name << " in the InnoDB" + " table " << index->table->name << ". We set" + " the next AUTOINC column value to 0, in" + " effect disabling the AUTOINC next value" + " generation."; + + ib::info() << "You can either set the next AUTOINC" + " value explicitly using ALTER TABLE or fix" + " the data dictionary by recreating the" + " table."; /* This will disable the AUTOINC generation. */ auto_inc = 0; @@ -5651,14 +6678,33 @@ ha_innobase::innobase_initialize_autoinc() } } - dict_table_autoinc_initialize(prebuilt->table, auto_inc); + dict_table_autoinc_initialize(m_prebuilt->table, auto_inc); +} + +/** Free the virtual column template +@param[in,out] vc_templ virtual column template */ +void +free_vc_templ( + innodb_col_templ_t* vc_templ) +{ + if (vc_templ->vtempl) { + ut_ad(vc_templ->n_v_col); + for (ulint i = 0; i < vc_templ->n_col + + vc_templ->n_v_col ; i++) { + if (vc_templ->vtempl[i]) { + ut_free(vc_templ->vtempl[i]); + } + } + ut_free(vc_templ->vtempl); + vc_templ->vtempl = NULL; + } } /*****************************************************************//** Creates and opens a handle to a table which already exists in an InnoDB database. -@return 1 if error, 0 if success */ -UNIV_INTERN +@return 1 if error, 0 if success */ + int ha_innobase::open( /*==============*/ @@ -5670,8 +6716,6 @@ ha_innobase::open( char norm_name[FN_REFLEN]; THD* thd; char* is_part = NULL; - ibool par_case_name_set = FALSE; - char par_case_name[FN_REFLEN]; dict_err_ignore_t ignore_err = DICT_ERR_IGNORE_NONE; DBUG_ENTER("ha_innobase::open"); @@ -5682,7 +6726,7 @@ ha_innobase::open( thd = ha_thd(); /* Under some cases MySQL seems to call this function while - holding btr_search_latch. This breaks the latching order as + holding search latch(es). This breaks the latching order as we acquire dict_sys->mutex below and leads to a deadlock. */ if (thd != NULL) { innobase_release_temporary_latches(ht, thd); @@ -5690,24 +6734,24 @@ ha_innobase::open( normalize_table_name(norm_name, name); - user_thd = NULL; + m_user_thd = NULL; - if (!(share=get_share(name))) { + if (!(m_share = get_share(name))) { DBUG_RETURN(1); } /* Will be allocated if it is needed in ::update_row() */ - upd_buf = NULL; - upd_buf_size = 0; + m_upd_buf = NULL; + m_upd_buf_size = 0; /* We look for pattern #P# to see if the table is partitioned MySQL table. */ -#ifdef __WIN__ +#ifdef _WIN32 is_part = strstr(norm_name, "#p#"); #else is_part = strstr(norm_name, "#P#"); -#endif /* __WIN__ */ +#endif /* _WIN32 */ /* Check whether FOREIGN_KEY_CHECKS is set to 0. If so, the table can be opened even if some FK indexes are missing. If not, the table @@ -5716,23 +6760,33 @@ ha_innobase::open( ignore_err = DICT_ERR_IGNORE_FK_NOKEY; } - /* Get pointer to a table object in InnoDB dictionary cache */ - ib_table = dict_table_open_on_name(norm_name, FALSE, TRUE, ignore_err); + /* Get pointer to a table object in InnoDB dictionary cache. + For intrinsic table, get it from session private data */ + ib_table = thd_to_innodb_session(thd)->lookup_table_handler(norm_name); - if (ib_table + if (ib_table == NULL) { + + ib_table = open_dict_table(name, norm_name, is_part, + ignore_err); + } else { + ib_table->acquire(); + ut_ad(dict_table_is_intrinsic(ib_table)); + } + + if (ib_table != NULL && ((!DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID) - && table->s->stored_fields != dict_table_get_n_user_cols(ib_table)) + && table->s->stored_fields != dict_table_get_n_tot_u_cols(ib_table)) || (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID) - && (table->s->fields - != dict_table_get_n_user_cols(ib_table) - 1)))) { - ib_logf(IB_LOG_LEVEL_WARN, - "table %s contains %lu user defined columns " - "in InnoDB, but %lu columns in MySQL. Please " - "check INFORMATION_SCHEMA.INNODB_SYS_COLUMNS and " - REFMAN "innodb-troubleshooting.html " - "for how to resolve it", - norm_name, (ulong) dict_table_get_n_user_cols(ib_table), - (ulong) table->s->fields); + && (table->s->stored_fields + != dict_table_get_n_tot_u_cols(ib_table) - 1)))) { + + ib::warn() << "Table " << norm_name << " contains " + << dict_table_get_n_user_cols(ib_table) << " user" + " defined columns in InnoDB, but " << table->s->fields + << " columns in MySQL. Please check" + " INFORMATION_SCHEMA.INNODB_SYS_COLUMNS and " REFMAN + "innodb-troubleshooting.html for how to resolve the" + " issue."; /* Mark this table as corrupted, so the drop table or force recovery can still use it, but not others. */ @@ -5743,94 +6797,22 @@ ha_innobase::open( } if (NULL == ib_table) { - if (is_part) { - /* MySQL partition engine hard codes the file name - separator as "#P#". The text case is fixed even if - lower_case_table_names is set to 1 or 2. This is true - for sub-partition names as well. InnoDB always - normalises file names to lower case on Windows, this - can potentially cause problems when copying/moving - tables between platforms. - - 1) If boot against an installation from Windows - platform, then its partition table name could - be in lower case in system tables. So we will - need to check lower case name when load table. - - 2) If we boot an installation from other case - sensitive platform in Windows, we might need to - check the existence of table name without lower - case in the system table. */ - if (innobase_get_lower_case_table_names() == 1) { - - if (!par_case_name_set) { -#ifndef __WIN__ - /* Check for the table using lower - case name, including the partition - separator "P" */ - strcpy(par_case_name, norm_name); - innobase_casedn_str(par_case_name); -#else - /* On Windows platfrom, check - whether there exists table name in - system table whose name is - not being normalized to lower case */ - normalize_table_name_low( - par_case_name, name, FALSE); -#endif - par_case_name_set = TRUE; - } - - ib_table = dict_table_open_on_name( - par_case_name, FALSE, TRUE, - ignore_err); - } - - if (ib_table) { -#ifndef __WIN__ - sql_print_warning("Partition table %s opened " - "after converting to lower " - "case. The table may have " - "been moved from a case " - "in-sensitive file system. " - "Please recreate table in " - "the current file system\n", - norm_name); -#else - sql_print_warning("Partition table %s opened " - "after skipping the step to " - "lower case the table name. " - "The table may have been " - "moved from a case sensitive " - "file system. Please " - "recreate table in the " - "current file system\n", - norm_name); -#endif - goto table_opened; - } - } if (is_part) { sql_print_error("Failed to open table %s.\n", norm_name); } - ib_logf(IB_LOG_LEVEL_WARN, - "Cannot open table %s from the internal data " - "dictionary of InnoDB though the .frm file " - "for the table exists. See " - REFMAN "innodb-troubleshooting.html for how " - "you can resolve the problem.", norm_name); + ib::warn() << "Cannot open table " << norm_name << " from the" + " internal data dictionary of InnoDB though the .frm" + " file for the table exists. " << TROUBLESHOOTING_MSG; - free_share(share); - my_errno = ENOENT; + free_share(m_share); + set_my_errno(ENOENT); DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } -table_opened: - innobase_copy_frm_flags_from_table_share(ib_table, table->s); ib_table->thd = (void*)thd; @@ -5878,16 +6860,38 @@ table_opened: no_tablespace = false; } + if (dict_table_has_fts_index(ib_table)) { + + /* Check if table is in a consistent state. + Crash during truncate can put table in an inconsistent state. */ + trx_t* trx = innobase_trx_allocate(ha_thd()); + bool sane = fts_is_corrupt(ib_table, trx); + innobase_commit_low(trx); + trx_free_for_mysql(trx); + trx = NULL; + + if (!sane) { + /* In-consistent fts index found. */ + free_share(m_share); + set_my_errno(ENOENT); + + dict_table_close(ib_table, FALSE, FALSE); + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + } + } + if (!thd_tablespace_op(thd) && no_tablespace) { - free_share(share); - my_errno = ENOENT; + free_share(m_share); + set_my_errno(ENOENT); int ret_err = HA_ERR_NO_SUCH_TABLE; /* If table has no talespace but it has crypt data, check is tablespace made unaccessible because encryption service or used key_id is not available. */ if (ib_table) { + bool warning_pushed = false; fil_space_crypt_t* crypt_data = ib_table->crypt_data; + if ((crypt_data && crypt_data->encryption == FIL_SPACE_ENCRYPTION_ON) || (srv_encrypt_tables && crypt_data && crypt_data->encryption == FIL_SPACE_ENCRYPTION_DEFAULT)) { @@ -5900,8 +6904,14 @@ table_opened: " Can't continue reading table.", ib_table->name, crypt_data->key_id); ret_err = HA_ERR_DECRYPTION_FAILED; + warning_pushed = true; } - } else if (ib_table->is_encrypted) { + } + + /* If table is marked as encrypted then we push + warning if it has not been already done as used + key_id might be found but it is incorrect. */ + if (ib_table->is_encrypted && !warning_pushed) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_DECRYPTION_FAILED, "Table %s is encrypted but encryption service or" @@ -5913,20 +6923,39 @@ table_opened: } dict_table_close(ib_table, FALSE, FALSE); - DBUG_RETURN(ret_err); } - prebuilt = row_create_prebuilt(ib_table, table->s->stored_rec_length); + m_prebuilt = row_create_prebuilt(ib_table, table->s->stored_rec_length); - prebuilt->default_rec = table->s->default_values; - ut_ad(prebuilt->default_rec); + m_prebuilt->default_rec = table->s->default_values; + ut_ad(m_prebuilt->default_rec); /* Looks like MySQL-3.23 sometimes has primary key number != 0 */ - primary_key = table->s->primary_key; - key_used_on_scan = primary_key; + m_primary_key = table->s->primary_key; + + key_used_on_scan = m_primary_key; + + if (ib_table->n_v_cols) { + if (!m_share->s_templ.vtempl) { + innobase_build_v_templ( + table, ib_table, &(m_share->s_templ), NULL, + false, m_share->table_name); - if (!innobase_build_index_translation(table, ib_table, share)) { + mysql_mutex_lock(&innobase_share_mutex); + if (ib_table->vc_templ + && ib_table->vc_templ_purge) { + free_vc_templ(ib_table->vc_templ); + ut_free(ib_table->vc_templ); + } + mysql_mutex_unlock(&innobase_share_mutex); + } + ib_table->vc_templ = &m_share->s_templ; + } else { + ib_table->vc_templ = NULL; + } + + if (!innobase_build_index_translation(table, ib_table, m_share)) { sql_print_error("Build InnoDB index translation table for" " Table %s failed", name); } @@ -5939,9 +6968,9 @@ table_opened: if (!row_table_got_default_clust_index(ib_table)) { - prebuilt->clust_index_was_generated = FALSE; + m_prebuilt->clust_index_was_generated = FALSE; - if (UNIV_UNLIKELY(primary_key >= MAX_KEY)) { + if (m_primary_key >= MAX_KEY) { ib_table->dict_frm_mismatch = DICT_FRM_NO_PK; /* This mismatch could cause further problems @@ -5951,7 +6980,7 @@ table_opened: ib_push_frm_error(thd, ib_table, table, 0, true); - /* If primary_key >= MAX_KEY, its (primary_key) + /* If m_primary_key >= MAX_KEY, its (m_primary_key) value could be out of bound if continue to index into key_info[] array. Find InnoDB primary index, and assign its key_length to ref_length. @@ -5992,10 +7021,10 @@ table_opened: save space, because all row reference buffers are allocated based on ref_length. */ - ref_length = table->key_info[primary_key].key_length; + ref_length = table->key_info[m_primary_key].key_length; } } else { - if (primary_key != MAX_KEY) { + if (m_primary_key != MAX_KEY) { ib_table->dict_frm_mismatch = DICT_NO_PK_FRM_HAS; @@ -6006,7 +7035,7 @@ table_opened: ib_push_frm_error(thd, ib_table, table, 0, true); } - prebuilt->clust_index_was_generated = TRUE; + m_prebuilt->clust_index_was_generated = TRUE; ref_length = DATA_ROW_ID_LEN; @@ -6020,74 +7049,217 @@ table_opened: if (key_used_on_scan != MAX_KEY) { sql_print_warning( - "Table %s key_used_on_scan is %lu even " - "though there is no primary key inside " - "InnoDB.", name, (ulong) key_used_on_scan); + "Table %s key_used_on_scan is %lu even" + " though there is no primary key inside" + " InnoDB.", name, (ulong) key_used_on_scan); } } /* Index block size in InnoDB: used by MySQL in query optimization */ stats.block_size = UNIV_PAGE_SIZE; - /* Init table lock structure */ - thr_lock_data_init(&share->lock,&lock,(void*) 0); - - if (prebuilt->table) { + if (m_prebuilt->table != NULL) { /* We update the highest file format in the system table space, if this table has higher file format setting. */ trx_sys_file_format_max_upgrade( (const char**) &innobase_file_format_max, - dict_table_get_format(prebuilt->table)); + dict_table_get_format(m_prebuilt->table)); } /* Only if the table has an AUTOINC column. */ - if (prebuilt->table != NULL - && !prebuilt->table->ibd_file_missing + if (m_prebuilt->table != NULL + && !m_prebuilt->table->ibd_file_missing && table->found_next_number_field != NULL) { - dict_table_autoinc_lock(prebuilt->table); + dict_table_autoinc_lock(m_prebuilt->table); /* Since a table can already be "open" in InnoDB's internal data dictionary, we only init the autoinc counter once, the first time the table is loaded. We can safely reuse the autoinc value from a previous MySQL open. */ - if (dict_table_autoinc_read(prebuilt->table) == 0) { + if (dict_table_autoinc_read(m_prebuilt->table) == 0) { innobase_initialize_autoinc(); } - dict_table_autoinc_unlock(prebuilt->table); + dict_table_autoinc_unlock(m_prebuilt->table); + } + + /* Set plugin parser for fulltext index */ + for (uint i = 0; i < table->s->keys; i++) { + /* JAN: TODO: MySQL 5.7 FT Parser + if (table->key_info[i].flags & HA_USES_PARSER) { + dict_index_t* index = innobase_get_index(i); + plugin_ref parser = table->key_info[i].parser; + + ut_ad(index->type & DICT_FTS); + index->parser = + static_cast( + plugin_decl(parser)->info); + + index->is_ngram = strncmp( + plugin_name(parser)->str, + FTS_NGRAM_PARSER_NAME, + plugin_name(parser)->length) == 0; + + DBUG_EXECUTE_IF("fts_instrument_use_default_parser", + index->parser = &fts_default_parser;); + } + */ } info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); + /* We don't support compression for the system tablespace and + the temporary tablespace. Only because they are shared tablespaces. + There is no other technical reason. */ + + if (m_prebuilt->table != NULL + && !m_prebuilt->table->ibd_file_missing + && !is_shared_tablespace(m_prebuilt->table->space)) { + + /* JAN: TODO: MySQL 5.7: Table options should be used here + dberr_t err = fil_set_compression( + m_prebuilt->table->space, table->s->compress.str); + */ + dberr_t err = DB_SUCCESS; + + switch (err) { + case DB_NOT_FOUND: + case DB_UNSUPPORTED: + /* We will do another check before the create + table and push the error to the client there. */ + break; + + case DB_IO_NO_PUNCH_HOLE_TABLESPACE: + /* We did the check in the 'if' above. */ + + case DB_IO_NO_PUNCH_HOLE_FS: + /* During open we can't check whether the FS supports + punch hole or not, at least on Linux. */ + break; + + default: + ut_error; + + case DB_SUCCESS: + break; + } + } + DBUG_RETURN(0); } -UNIV_INTERN +/** Opens dictionary table object using table name. For partition, we need to +try alternative lower/upper case names to support moving data files across +platforms. +@param[in] table_name name of the table/partition +@param[in] norm_name normalized name of the table/partition +@param[in] is_partition if this is a partition of a table +@param[in] ignore_err error to ignore for loading dictionary object +@return dictionary table object or NULL if not found */ +dict_table_t* +ha_innobase::open_dict_table( + const char* table_name, + const char* norm_name, + bool is_partition, + dict_err_ignore_t ignore_err) +{ + DBUG_ENTER("ha_innobase::open_dict_table"); + dict_table_t* ib_table = dict_table_open_on_name(norm_name, FALSE, + TRUE, ignore_err); + + if (NULL == ib_table && is_partition) { + /* MySQL partition engine hard codes the file name + separator as "#P#". The text case is fixed even if + lower_case_table_names is set to 1 or 2. This is true + for sub-partition names as well. InnoDB always + normalises file names to lower case on Windows, this + can potentially cause problems when copying/moving + tables between platforms. + + 1) If boot against an installation from Windows + platform, then its partition table name could + be in lower case in system tables. So we will + need to check lower case name when load table. + + 2) If we boot an installation from other case + sensitive platform in Windows, we might need to + check the existence of table name without lower + case in the system table. */ + if (innobase_get_lower_case_table_names() == 1) { + char par_case_name[FN_REFLEN]; + +#ifndef _WIN32 + /* Check for the table using lower + case name, including the partition + separator "P" */ + strcpy(par_case_name, norm_name); + innobase_casedn_str(par_case_name); +#else + /* On Windows platfrom, check + whether there exists table name in + system table whose name is + not being normalized to lower case */ + create_table_info_t:: + normalize_table_name_low( + par_case_name, + table_name, FALSE); +#endif + ib_table = dict_table_open_on_name( + par_case_name, FALSE, TRUE, + ignore_err); + } + + if (ib_table != NULL) { +#ifndef _WIN32 + sql_print_warning("Partition table %s opened" + " after converting to lower" + " case. The table may have" + " been moved from a case" + " in-sensitive file system." + " Please recreate table in" + " the current file system\n", + norm_name); +#else + sql_print_warning("Partition table %s opened" + " after skipping the step to" + " lower case the table name." + " The table may have been" + " moved from a case sensitive" + " file system. Please" + " recreate table in the" + " current file system\n", + norm_name); +#endif + } + } + + DBUG_RETURN(ib_table); +} + handler* ha_innobase::clone( /*===============*/ const char* name, /*!< in: table name */ MEM_ROOT* mem_root) /*!< in: memory context */ { - ha_innobase* new_handler; - DBUG_ENTER("ha_innobase::clone"); - new_handler = static_cast(handler::clone(name, - mem_root)); - if (new_handler) { - DBUG_ASSERT(new_handler->prebuilt != NULL); + ha_innobase* new_handler = static_cast( + handler::clone(name, mem_root)); - new_handler->prebuilt->select_lock_type - = prebuilt->select_lock_type; + if (new_handler != NULL) { + DBUG_ASSERT(new_handler->m_prebuilt != NULL); + + new_handler->m_prebuilt->select_lock_type + = m_prebuilt->select_lock_type; } DBUG_RETURN(new_handler); } -UNIV_INTERN + uint ha_innobase::max_supported_key_part_length() const /*==============================================*/ @@ -6101,31 +7273,30 @@ ha_innobase::max_supported_key_part_length() const /******************************************************************//** Closes a handle to an InnoDB table. -@return 0 */ -UNIV_INTERN +@return 0 */ + int ha_innobase::close() /*================*/ { - THD* thd; - DBUG_ENTER("ha_innobase::close"); - thd = ha_thd(); + THD* thd = ha_thd(); + if (thd != NULL) { innobase_release_temporary_latches(ht, thd); } - row_prebuilt_free(prebuilt, FALSE); + row_prebuilt_free(m_prebuilt, FALSE); - if (upd_buf != NULL) { - ut_ad(upd_buf_size != 0); - my_free(upd_buf); - upd_buf = NULL; - upd_buf_size = 0; + if (m_upd_buf != NULL) { + ut_ad(m_upd_buf_size != 0); + my_free(m_upd_buf); + m_upd_buf = NULL; + m_upd_buf_size = 0; } - free_share(share); + free_share(m_share); MONITOR_INC(MONITOR_TABLE_CLOSE); @@ -6139,17 +7310,17 @@ ha_innobase::close() /* The following accessor functions should really be inside MySQL code! */ -/**************************************************************//** -Gets field offset for a field in a table. -@return offset */ +/** Gets field offset for a field in a table. +@param[in] table MySQL table object +@param[in] field MySQL field object +@return offset */ static inline uint get_field_offset( -/*=============*/ - const TABLE* table, /*!< in: MySQL table object */ - const Field* field) /*!< in: MySQL field object */ + const TABLE* table, + const Field* field) { - return((uint) (field->ptr - table->record[0])); + return(static_cast((field->ptr - table->record[0]))); } #ifdef WITH_WSREP @@ -6232,7 +7403,7 @@ wsrep_innobase_mysql_sort( DBUG_ASSERT(tmp_length <= buf_length); ret_length = tmp_length; } - + break; } case MYSQL_TYPE_DECIMAL : @@ -6263,203 +7434,39 @@ wsrep_innobase_mysql_sort( } #endif /* WITH_WSREP */ -/*************************************************************//** -InnoDB uses this function to compare two data fields for which the data type -is such that we must use MySQL code to compare them. NOTE that the prototype -of this function is in rem0cmp.cc in InnoDB source code! If you change this -function, remember to update the prototype there! -@return 1, 0, -1, if a is greater, equal, less than b, respectively */ -UNIV_INTERN +/******************************************************************//** +compare two character string according to their charset. */ int -innobase_mysql_cmp( -/*===============*/ - int mysql_type, /*!< in: MySQL type */ - uint charset_number, /*!< in: number of the charset */ - const unsigned char* a, /*!< in: data field */ - unsigned int a_length, /*!< in: data field length, - not UNIV_SQL_NULL */ - const unsigned char* b, /*!< in: data field */ - unsigned int b_length) /*!< in: data field length, - not UNIV_SQL_NULL */ +innobase_fts_text_cmp( +/*==================*/ + const void* cs, /*!< in: Character set */ + const void* p1, /*!< in: key */ + const void* p2) /*!< in: node */ { - CHARSET_INFO* charset; - enum_field_types mysql_tp; - int ret; + const CHARSET_INFO* charset = (const CHARSET_INFO*) cs; + const fts_string_t* s1 = (const fts_string_t*) p1; + const fts_string_t* s2 = (const fts_string_t*) p2; - DBUG_ASSERT(a_length != UNIV_SQL_NULL); - DBUG_ASSERT(b_length != UNIV_SQL_NULL); + return(ha_compare_text( + charset, s1->f_str, static_cast(s1->f_len), + s2->f_str, static_cast(s2->f_len), 0)); +} - mysql_tp = (enum_field_types) mysql_type; +/******************************************************************//** +compare two character string case insensitively according to their charset. */ +int +innobase_fts_text_case_cmp( +/*=======================*/ + const void* cs, /*!< in: Character set */ + const void* p1, /*!< in: key */ + const void* p2) /*!< in: node */ +{ + const CHARSET_INFO* charset = (const CHARSET_INFO*) cs; + const fts_string_t* s1 = (const fts_string_t*) p1; + const fts_string_t* s2 = (const fts_string_t*) p2; + ulint newlen; - switch (mysql_tp) { - - case MYSQL_TYPE_BIT: - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_VARCHAR: - /* Use the charset number to pick the right charset struct for - the comparison. Since the MySQL function get_charset may be - slow before Bar removes the mutex operation there, we first - look at 2 common charsets directly. */ - - if (charset_number == default_charset_info->number) { - charset = default_charset_info; - } else if (charset_number == my_charset_latin1.number) { - charset = &my_charset_latin1; - } else { - charset = get_charset(charset_number, MYF(MY_WME)); - - if (charset == NULL) { - sql_print_error("InnoDB needs charset %lu for doing " - "a comparison, but MySQL cannot " - "find that charset.", - (ulong) charset_number); - ut_a(0); - } - } - - /* Starting from 4.1.3, we use strnncollsp() in comparisons of - non-latin1_swedish_ci strings. NOTE that the collation order - changes then: 'b\0\0...' is ordered BEFORE 'b ...'. Users - having indexes on such data need to rebuild their tables! */ - - ret = charset->coll->strnncollsp( - charset, a, a_length, b, b_length); - - if (ret < 0) { - return(-1); - } else if (ret > 0) { - return(1); - } else { - return(0); - } - default: - ut_error; - } - - return(0); -} - - -/*************************************************************//** -Get the next token from the given string and store it in *token. */ -UNIV_INTERN -CHARSET_INFO* -innobase_get_fts_charset( -/*=====================*/ - int mysql_type, /*!< in: MySQL type */ - uint charset_number) /*!< in: number of the charset */ -{ - enum_field_types mysql_tp; - CHARSET_INFO* charset; - - mysql_tp = (enum_field_types) mysql_type; - - switch (mysql_tp) { - - case MYSQL_TYPE_BIT: - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_VARCHAR: - /* Use the charset number to pick the right charset struct for - the comparison. Since the MySQL function get_charset may be - slow before Bar removes the mutex operation there, we first - look at 2 common charsets directly. */ - - if (charset_number == default_charset_info->number) { - charset = default_charset_info; - } else if (charset_number == my_charset_latin1.number) { - charset = &my_charset_latin1; - } else { - charset = get_charset(charset_number, MYF(MY_WME)); - - if (charset == NULL) { - sql_print_error("InnoDB needs charset %lu for doing " - "a comparison, but MySQL cannot " - "find that charset.", - (ulong) charset_number); - ut_a(0); - } - } - break; - default: - ut_error; - } - - return(charset); -} - -/*************************************************************//** -InnoDB uses this function to compare two data fields for which the data type -is such that we must use MySQL code to compare them. NOTE that the prototype -of this function is in rem0cmp.c in InnoDB source code! If you change this -function, remember to update the prototype there! -@return 1, 0, -1, if a is greater, equal, less than b, respectively */ -UNIV_INTERN -int -innobase_mysql_cmp_prefix( -/*======================*/ - int mysql_type, /*!< in: MySQL type */ - uint charset_number, /*!< in: number of the charset */ - const unsigned char* a, /*!< in: data field */ - unsigned int a_length, /*!< in: data field length, - not UNIV_SQL_NULL */ - const unsigned char* b, /*!< in: data field */ - unsigned int b_length) /*!< in: data field length, - not UNIV_SQL_NULL */ -{ - CHARSET_INFO* charset; - int result; - - charset = innobase_get_fts_charset(mysql_type, charset_number); - - result = ha_compare_text(charset, (uchar*) a, a_length, - (uchar*) b, b_length, 1); - - return(result); -} -/******************************************************************//** -compare two character string according to their charset. */ -UNIV_INTERN -int -innobase_fts_text_cmp( -/*==================*/ - const void* cs, /*!< in: Character set */ - const void* p1, /*!< in: key */ - const void* p2) /*!< in: node */ -{ - const CHARSET_INFO* charset = (const CHARSET_INFO*) cs; - const fts_string_t* s1 = (const fts_string_t*) p1; - const fts_string_t* s2 = (const fts_string_t*) p2; - - return(ha_compare_text( - charset, s1->f_str, static_cast(s1->f_len), - s2->f_str, static_cast(s2->f_len), 0)); -} -/******************************************************************//** -compare two character string case insensitively according to their charset. */ -UNIV_INTERN -int -innobase_fts_text_case_cmp( -/*=======================*/ - const void* cs, /*!< in: Character set */ - const void* p1, /*!< in: key */ - const void* p2) /*!< in: node */ -{ - const CHARSET_INFO* charset = (const CHARSET_INFO*) cs; - const fts_string_t* s1 = (const fts_string_t*) p1; - const fts_string_t* s2 = (const fts_string_t*) p2; - ulint newlen; - - my_casedn_str(charset, (char*) s2->f_str); + my_casedn_str(charset, (char*) s2->f_str); newlen = strlen((const char*) s2->f_str); @@ -6467,9 +7474,9 @@ innobase_fts_text_case_cmp( charset, s1->f_str, static_cast(s1->f_len), s2->f_str, static_cast(newlen), 0)); } + /******************************************************************//** Get the first character's code position for FTS index partition. */ -UNIV_INTERN ulint innobase_strnxfrm( /*==============*/ @@ -6498,7 +7505,6 @@ innobase_strnxfrm( /******************************************************************//** compare two character string according to their charset. */ -UNIV_INTERN int innobase_fts_text_cmp_prefix( /*=========================*/ @@ -6522,7 +7528,6 @@ innobase_fts_text_cmp_prefix( /******************************************************************//** Makes all characters in a string lower case. */ -UNIV_INTERN size_t innobase_fts_casedn_str( /*====================*/ @@ -6551,7 +7556,6 @@ innobase_fts_casedn_str( Get the next token from the given string and store it in *token. It is mostly copied from MyISAM's doc parsing function ft_simple_get_word() @return length of string processed */ -UNIV_INTERN ulint innobase_mysql_fts_get_token( /*=========================*/ @@ -6559,10 +7563,7 @@ innobase_mysql_fts_get_token( const byte* start, /*!< in: start of text */ const byte* end, /*!< in: one character past end of text */ - fts_string_t* token, /*!< out: token's text */ - ulint* offset) /*!< out: offset to token, - measured as characters from - 'start' */ + fts_string_t* token) /*!< out: token's text */ { int mbl; const uchar* doc = start; @@ -6620,21 +7621,17 @@ innobase_mysql_fts_get_token( return(doc - start); } -/**************************************************************//** -Converts a MySQL type to an InnoDB type. Note that this function returns +/** Converts a MySQL type to an InnoDB type. Note that this function returns the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1 VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. -@return DATA_BINARY, DATA_VARCHAR, ... */ -UNIV_INTERN +@param[out] unsigned_flag DATA_UNSIGNED if an 'unsigned type'; at least +ENUM and SET, and unsigned integer types are 'unsigned types' +@param[in] f MySQL Field +@return DATA_BINARY, DATA_VARCHAR, ... */ ulint get_innobase_type_from_mysql_type( -/*==============================*/ - ulint* unsigned_flag, /*!< out: DATA_UNSIGNED if an - 'unsigned type'; - at least ENUM and SET, - and unsigned integer - types are 'unsigned types' */ - const void* f) /*!< in: MySQL Field */ + ulint* unsigned_flag, + const void* f) { const class Field* field = reinterpret_cast(f); @@ -6717,11 +7714,19 @@ get_innobase_type_from_mysql_type( case MYSQL_TYPE_DECIMAL: return(DATA_DECIMAL); case MYSQL_TYPE_GEOMETRY: + return (DATA_BLOB); + /* TODO: MySQL 5.7: Geometry + return(DATA_GEOMETRY); + */ case MYSQL_TYPE_TINY_BLOB: case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_BLOB: case MYSQL_TYPE_LONG_BLOB: return(DATA_BLOB); + /* JAN: TODO: MySQL 5.7 JSON + case MYSQL_TYPE_JSON: // JSON fields are stored as BLOBs + return(DATA_BLOB); + */ case MYSQL_TYPE_NULL: /* MySQL currently accepts "NULL" datatype, but will reject such datatype in the next release. We will cope @@ -6734,26 +7739,10 @@ get_innobase_type_from_mysql_type( return(0); } -/*******************************************************************//** -Writes an unsigned integer value < 64k to 2 bytes, in the little-endian -storage format. */ -static inline -void -innobase_write_to_2_little_endian( -/*==============================*/ - byte* buf, /*!< in: where to store */ - ulint val) /*!< in: value to write, must be < 64k */ -{ - ut_a(val < 256 * 256); - - buf[0] = (byte)(val & 0xFF); - buf[1] = (byte)(val / 256); -} - /*******************************************************************//** Reads an unsigned integer value < 64k from 2 bytes, in the little-endian storage format. -@return value */ +@return value */ static inline uint innobase_read_from_2_little_endian( @@ -6770,7 +7759,7 @@ Stores a key value for a row to a buffer. UNIV_INTERN uint wsrep_store_key_val_for_row( -/*===============================*/ +/*=========================*/ THD* thd, TABLE* table, uint keynr, /*!< in: key number */ @@ -6800,7 +7789,7 @@ wsrep_store_key_val_for_row( if (key_part->null_bit) { if (buff_space > 0) { - if (record[key_part->null_offset] + if (record[key_part->null_offset] & key_part->null_bit) { *buff = 1; part_is_null = TRUE; @@ -6876,7 +7865,7 @@ wsrep_store_key_val_for_row( memcpy(sorted, data, true_len); true_len = wsrep_innobase_mysql_sort( - mysql_type, cs->number, sorted, true_len, + mysql_type, cs->number, sorted, true_len, REC_VERSION_56_MAX_INDEX_COL_LEN); if (wsrep_protocol_version > 1) { @@ -7069,367 +8058,93 @@ wsrep_store_key_val_for_row( } #endif /* WITH_WSREP */ -/*******************************************************************//** -Stores a key value for a row to a buffer. -@return key value length as stored in buff */ -UNIV_INTERN -uint -ha_innobase::store_key_val_for_row( -/*===============================*/ - uint keynr, /*!< in: key number */ - char* buff, /*!< in/out: buffer for the key value (in MySQL - format) */ - uint buff_len,/*!< in: buffer length */ - const uchar* record)/*!< in: row in MySQL format */ +/**************************************************************//** +Determines if a field is needed in a m_prebuilt struct 'template'. +@return field to use, or NULL if the field is not needed */ +static +const Field* +build_template_needs_field( +/*=======================*/ + ibool index_contains, /*!< in: + dict_index_contains_col_or_prefix( + index, i) */ + ibool read_just_key, /*!< in: TRUE when MySQL calls + ha_innobase::extra with the + argument HA_EXTRA_KEYREAD; it is enough + to read just columns defined in + the index (i.e., no read of the + clustered index record necessary) */ + ibool fetch_all_in_key, + /*!< in: true=fetch all fields in + the index */ + ibool fetch_primary_key_cols, + /*!< in: true=fetch the + primary key columns */ + dict_index_t* index, /*!< in: InnoDB index to use */ + const TABLE* table, /*!< in: MySQL table object */ + ulint i, /*!< in: field index in InnoDB table */ + ulint sql_idx, /*!< in: field index in SQL table */ + ulint num_v) /*!< in: num virtual column so far */ { - KEY* key_info = table->key_info + keynr; - KEY_PART_INFO* key_part = key_info->key_part; - KEY_PART_INFO* end = - key_part + key_info->user_defined_key_parts; - char* buff_start = buff; - enum_field_types mysql_type; - Field* field; - ibool is_null; - - DBUG_ENTER("store_key_val_for_row"); - - /* The format for storing a key field in MySQL is the following: - - 1. If the column can be NULL, then in the first byte we put 1 if the - field value is NULL, 0 otherwise. + const Field* field = table->field[sql_idx]; - 2. If the column is of a BLOB type (it must be a column prefix field - in this case), then we put the length of the data in the field to the - next 2 bytes, in the little-endian format. If the field is SQL NULL, - then these 2 bytes are set to 0. Note that the length of data in the - field is <= column prefix length. + if (!index_contains) { + if (read_just_key) { + /* If this is a 'key read', we do not need + columns that are not in the key */ - 3. In a column prefix field, prefix_len next bytes are reserved for - data. In a normal field the max field length next bytes are reserved - for data. For a VARCHAR(n) the max field length is n. If the stored - value is the SQL NULL then these data bytes are set to 0. + return(NULL); + } + } else if (fetch_all_in_key) { + /* This field is needed in the query */ - 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that - in the MySQL row format, the length is stored in 1 or 2 bytes, - depending on the maximum allowed length. But in the MySQL key value - format, the length always takes 2 bytes. + return(field); + } - We have to zero-fill the buffer so that MySQL is able to use a - simple memcmp to compare two key values to determine if they are - equal. MySQL does this to compare contents of two 'ref' values. */ + if (bitmap_is_set(table->read_set, static_cast(sql_idx)) + || bitmap_is_set(table->write_set, static_cast(sql_idx))) { + /* This field is needed in the query */ - memset(buff, 0, buff_len); + return(field); + } - for (; key_part != end; key_part++) { - is_null = FALSE; + ut_ad(i >= num_v); + if (fetch_primary_key_cols + && dict_table_col_in_clustered_key(index->table, i - num_v)) { + /* This field is needed in the query */ - if (key_part->null_bit) { - if (record[key_part->null_offset] - & key_part->null_bit) { - *buff = 1; - is_null = TRUE; - } else { - *buff = 0; - } - buff++; - } + return(field); + } - field = key_part->field; - mysql_type = field->type(); + /* This field is not needed in the query, skip it */ - if (mysql_type == MYSQL_TYPE_VARCHAR) { - /* >= 5.0.3 true VARCHAR */ - ulint lenlen; - ulint len; - const byte* data; - ulint key_len; - ulint true_len; - const CHARSET_INFO* cs; - int error=0; + return(NULL); +} - key_len = key_part->length; +/**************************************************************//** +Determines if a field is needed in a m_prebuilt struct 'template'. +@return whether the field is needed for index condition pushdown */ +inline +bool +build_template_needs_field_in_icp( +/*==============================*/ + const dict_index_t* index, /*!< in: InnoDB index */ + const row_prebuilt_t* prebuilt,/*!< in: row fetch template */ + bool contains,/*!< in: whether the index contains + column i */ + ulint i, /*!< in: column number */ + bool is_virtual) + /*!< in: a virtual column or not */ +{ + ut_ad(contains == dict_index_contains_col_or_prefix(index, i, is_virtual)); - if (is_null) { - buff += key_len + 2; - - continue; - } - cs = field->charset(); - - lenlen = (ulint) - (((Field_varstring*) field)->length_bytes); - - data = row_mysql_read_true_varchar(&len, - (byte*) (record - + (ulint) get_field_offset(table, field)), - lenlen); - - true_len = len; - - /* For multi byte character sets we need to calculate - the true length of the key */ - - if (len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char*) data, - (const char*) data + len, - (uint) (key_len / cs->mbmaxlen), - &error); - } - - /* In a column prefix index, we may need to truncate - the stored value: */ - - if (true_len > key_len) { - true_len = key_len; - } - - /* The length in a key value is always stored in 2 - bytes */ - - row_mysql_store_true_var_len((byte*) buff, true_len, 2); - buff += 2; - - memcpy(buff, data, true_len); - - /* Note that we always reserve the maximum possible - length of the true VARCHAR in the key value, though - only len first bytes after the 2 length bytes contain - actual data. The rest of the space was reset to zero - in the memset() call above. */ - - buff += key_len; - - } else if (mysql_type == MYSQL_TYPE_TINY_BLOB - || mysql_type == MYSQL_TYPE_MEDIUM_BLOB - || mysql_type == MYSQL_TYPE_BLOB - || mysql_type == MYSQL_TYPE_LONG_BLOB - /* MYSQL_TYPE_GEOMETRY data is treated - as BLOB data in innodb. */ - || mysql_type == MYSQL_TYPE_GEOMETRY) { - - const CHARSET_INFO* cs; - ulint key_len; - ulint true_len; - int error=0; - ulint blob_len; - const byte* blob_data; - - ut_a(key_part->key_part_flag & HA_PART_KEY_SEG); - - key_len = key_part->length; - - if (is_null) { - buff += key_len + 2; - - continue; - } - - cs = field->charset(); - - blob_data = row_mysql_read_blob_ref(&blob_len, - (byte*) (record - + (ulint) get_field_offset(table, field)), - (ulint) field->pack_length()); - - true_len = blob_len; - - ut_a(get_field_offset(table, field) - == key_part->offset); - - /* For multi byte character sets we need to calculate - the true length of the key */ - - if (blob_len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) cs->cset->well_formed_len(cs, - (const char*) blob_data, - (const char*) blob_data - + blob_len, - (uint) (key_len / cs->mbmaxlen), - &error); - } - - /* All indexes on BLOB and TEXT are column prefix - indexes, and we may need to truncate the data to be - stored in the key value: */ - - if (true_len > key_len) { - true_len = key_len; - } - - /* MySQL reserves 2 bytes for the length and the - storage of the number is little-endian */ - - innobase_write_to_2_little_endian( - (byte*) buff, true_len); - buff += 2; - - memcpy(buff, blob_data, true_len); - - /* Note that we always reserve the maximum possible - length of the BLOB prefix in the key value. */ - - buff += key_len; - } else { - /* Here we handle all other data types except the - true VARCHAR, BLOB and TEXT. Note that the column - value we store may be also in a column prefix - index. */ - - const CHARSET_INFO* cs = NULL; - ulint true_len; - ulint key_len; - const uchar* src_start; - int error=0; - enum_field_types real_type; - - key_len = key_part->length; - - if (is_null) { - buff += key_len; - - continue; - } - - src_start = record + key_part->offset; - real_type = field->real_type(); - true_len = key_len; - - /* Character set for the field is defined only - to fields whose type is string and real field - type is not enum or set. For these fields check - if character set is multi byte. */ - - if (real_type != MYSQL_TYPE_ENUM - && real_type != MYSQL_TYPE_SET - && ( mysql_type == MYSQL_TYPE_VAR_STRING - || mysql_type == MYSQL_TYPE_STRING)) { - - cs = field->charset(); - - /* For multi byte character sets we need to - calculate the true length of the key */ - - if (key_len > 0 && cs->mbmaxlen > 1) { - - true_len = (ulint) - cs->cset->well_formed_len(cs, - (const char*) src_start, - (const char*) src_start - + key_len, - (uint) (key_len - / cs->mbmaxlen), - &error); - } - } - - memcpy(buff, src_start, true_len); - buff += true_len; - - /* Pad the unused space with spaces. */ - - if (true_len < key_len) { - ulint pad_len = key_len - true_len; - ut_a(cs != NULL); - ut_a(!(pad_len % cs->mbminlen)); - - cs->cset->fill(cs, buff, pad_len, - 0x20 /* space */); - buff += pad_len; - } - } - } - - ut_a(buff <= buff_start + buff_len); - - DBUG_RETURN((uint)(buff - buff_start)); -} - -/**************************************************************//** -Determines if a field is needed in a prebuilt struct 'template'. -@return field to use, or NULL if the field is not needed */ -static -const Field* -build_template_needs_field( -/*=======================*/ - ibool index_contains, /*!< in: - dict_index_contains_col_or_prefix( - index, i) */ - ibool read_just_key, /*!< in: TRUE when MySQL calls - ha_innobase::extra with the - argument HA_EXTRA_KEYREAD; it is enough - to read just columns defined in - the index (i.e., no read of the - clustered index record necessary) */ - ibool fetch_all_in_key, - /*!< in: true=fetch all fields in - the index */ - ibool fetch_primary_key_cols, - /*!< in: true=fetch the - primary key columns */ - dict_index_t* index, /*!< in: InnoDB index to use */ - const TABLE* table, /*!< in: MySQL table object */ - ulint i, /*!< in: field index in InnoDB table */ - ulint sql_idx) /*!< in: field index in SQL table */ -{ - const Field* field = table->field[sql_idx]; - - ut_ad(index_contains == dict_index_contains_col_or_prefix(index, i)); - - if (!index_contains) { - if (read_just_key) { - /* If this is a 'key read', we do not need - columns that are not in the key */ - - return(NULL); - } - } else if (fetch_all_in_key) { - /* This field is needed in the query */ - - return(field); - } - - if (bitmap_is_set(table->read_set, static_cast(sql_idx)) - || bitmap_is_set(table->write_set, static_cast(sql_idx))) { - /* This field is needed in the query */ - - return(field); - } - - if (fetch_primary_key_cols - && dict_table_col_in_clustered_key(index->table, i)) { - /* This field is needed in the query */ - - return(field); - } - - /* This field is not needed in the query, skip it */ - - return(NULL); -} - -/**************************************************************//** -Determines if a field is needed in a prebuilt struct 'template'. -@return whether the field is needed for index condition pushdown */ -inline -bool -build_template_needs_field_in_icp( -/*==============================*/ - const dict_index_t* index, /*!< in: InnoDB index */ - const row_prebuilt_t* prebuilt,/*!< in: row fetch template */ - bool contains,/*!< in: whether the index contains - column i */ - ulint i) /*!< in: column number */ -{ - ut_ad(contains == dict_index_contains_col_or_prefix(index, i)); - - return(index == prebuilt->index - ? contains - : dict_index_contains_col_or_prefix(prebuilt->index, i)); -} + return(index == prebuilt->index + ? contains + : dict_index_contains_col_or_prefix(prebuilt->index, i, is_virtual)); +} /**************************************************************//** -Adds a field to a prebuilt struct 'template'. +Adds a field to a m_prebuilt struct 'template'. @return the field template */ static mysql_row_templ_t* @@ -7440,35 +8155,58 @@ build_template_field( dict_index_t* index, /*!< in: InnoDB index to use */ TABLE* table, /*!< in: MySQL table object */ const Field* field, /*!< in: field in MySQL table */ - ulint i) /*!< in: field index in InnoDB table */ + ulint i, /*!< in: field index in InnoDB table */ + ulint v_no) /*!< in: field index for virtual col */ { mysql_row_templ_t* templ; const dict_col_t* col; - //ut_ad(field == table->field[i]); ut_ad(clust_index->table == index->table); - col = dict_table_get_nth_col(index->table, i); - templ = prebuilt->mysql_template + prebuilt->n_template++; UNIV_MEM_INVALID(templ, sizeof *templ); - templ->col_no = i; - templ->clust_rec_field_no = dict_col_get_clust_pos(col, clust_index); - ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED); - templ->rec_field_is_prefix = FALSE; - if (dict_index_is_clust(index)) { - templ->rec_field_no = templ->clust_rec_field_no; - templ->rec_prefix_field_no = ULINT_UNDEFINED; + if (innobase_is_v_fld(field)) { + templ->is_virtual = true; + col = &dict_table_get_nth_v_col(index->table, v_no)->m_col; } else { - /* If we're in a secondary index, keep track - * of the original index position even if this - * is just a prefix index; we will use this - * later to avoid a cluster index lookup in - * some cases.*/ + templ->is_virtual = false; + col = dict_table_get_nth_col(index->table, i); + } + + if (!templ->is_virtual) { + templ->col_no = i; + templ->clust_rec_field_no = dict_col_get_clust_pos( + col, clust_index); + ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED); + templ->rec_field_is_prefix = FALSE; + templ->rec_prefix_field_no = ULINT_UNDEFINED; - templ->rec_field_no = dict_index_get_nth_col_pos(index, i, + if (dict_index_is_clust(index)) { + templ->rec_field_no = templ->clust_rec_field_no; + } else { + /* If we're in a secondary index, keep track + * of the original index position even if this + * is just a prefix index; we will use this + * later to avoid a cluster index lookup in + * some cases.*/ + + templ->rec_field_no = dict_index_get_nth_col_pos(index, i, &templ->rec_prefix_field_no); + } + } else { + templ->clust_rec_field_no = v_no; + templ->rec_prefix_field_no = ULINT_UNDEFINED; + + if (dict_index_is_clust(index)) { + templ->rec_field_no = templ->clust_rec_field_no; + } else { + templ->rec_field_no + = dict_index_get_nth_col_or_prefix_pos( + index, v_no, FALSE, true, + &templ->rec_prefix_field_no); + } + templ->icp_rec_field_no = ULINT_UNDEFINED; } if (field->real_maybe_null()) { @@ -7480,8 +8218,8 @@ build_template_field( templ->mysql_null_bit_mask = 0; } - templ->mysql_col_offset = (ulint) get_field_offset(table, field); + templ->mysql_col_offset = (ulint) get_field_offset(table, field); templ->mysql_col_len = (ulint) field->pack_length(); templ->type = col->mtype; templ->mysql_type = (ulint) field->type(); @@ -7489,6 +8227,8 @@ build_template_field( if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { templ->mysql_length_bytes = (ulint) (((Field_varstring*) field)->length_bytes); + } else { + templ->mysql_length_bytes = 0; } templ->charset = dtype_get_charset_coll(col->prtype); @@ -7508,23 +8248,34 @@ build_template_field( } } + /* For spatial index, we need to access cluster index. */ + if (dict_index_is_spatial(index)) { + prebuilt->need_to_access_clustered = TRUE; + } + if (prebuilt->mysql_prefix_len < templ->mysql_col_offset + templ->mysql_col_len) { prebuilt->mysql_prefix_len = templ->mysql_col_offset + templ->mysql_col_len; } - if (templ->type == DATA_BLOB) { + if (DATA_LARGE_MTYPE(templ->type)) { prebuilt->templ_contains_blob = TRUE; } + if (templ->type == DATA_POINT) { + /* We set this only when it's DATA_POINT, but not + DATA_VAR_POINT */ + prebuilt->templ_contains_fixed_point = TRUE; + } + return(templ); } /**************************************************************//** -Builds a 'template' to the prebuilt struct. The template is used in fast +Builds a 'template' to the m_prebuilt struct. The template is used in fast retrieval of just those column values MySQL needs in its processing. */ -UNIV_INTERN + void ha_innobase::build_template( /*========================*/ @@ -7538,20 +8289,20 @@ ha_innobase::build_template( ibool fetch_primary_key_cols = FALSE; ulint i, sql_idx; - if (prebuilt->select_lock_type == LOCK_X) { + if (m_prebuilt->select_lock_type == LOCK_X) { /* We always retrieve the whole clustered index record if we use exclusive row level locks, for example, if the read is done in an UPDATE statement. */ whole_row = true; } else if (!whole_row) { - if (prebuilt->hint_need_to_fetch_extra_cols + if (m_prebuilt->hint_need_to_fetch_extra_cols == ROW_RETRIEVE_ALL_COLS) { /* We know we must at least fetch all columns in the key, or all columns in the table */ - if (prebuilt->read_just_key) { + if (m_prebuilt->read_just_key) { /* MySQL has instructed us that it is enough to fetch the columns in the key; looks like MySQL can set this flag also when there is @@ -7563,26 +8314,26 @@ ha_innobase::build_template( } else { whole_row = true; } - } else if (prebuilt->hint_need_to_fetch_extra_cols + } else if (m_prebuilt->hint_need_to_fetch_extra_cols == ROW_RETRIEVE_PRIMARY_KEY) { /* We must at least fetch all primary key cols. Note that if the clustered index was internally generated by InnoDB on the row id (no primary key was defined), then row_search_for_mysql() will always retrieve the row id to a special buffer in the - prebuilt struct. */ + m_prebuilt struct. */ fetch_primary_key_cols = TRUE; } } - clust_index = dict_table_get_first_index(prebuilt->table); + clust_index = dict_table_get_first_index(m_prebuilt->table); - index = whole_row ? clust_index : prebuilt->index; + index = whole_row ? clust_index : m_prebuilt->index; - prebuilt->need_to_access_clustered = (index == clust_index); + m_prebuilt->need_to_access_clustered = (index == clust_index); - /* Either prebuilt->index should be a secondary index, or it + /* Either m_prebuilt->index should be a secondary index, or it should be the clustered index. */ ut_ad(dict_index_is_clust(index) == (index == clust_index)); @@ -7591,40 +8342,51 @@ ha_innobase::build_template( n_stored_fields= (ulint)table->s->stored_fields; /* number of stored columns */ - if (!prebuilt->mysql_template) { - prebuilt->mysql_template = (mysql_row_templ_t*) - mem_alloc(n_stored_fields * sizeof(mysql_row_templ_t)); + if (!m_prebuilt->mysql_template) { + m_prebuilt->mysql_template = (mysql_row_templ_t*) + ut_malloc_nokey(n_stored_fields * sizeof(mysql_row_templ_t)); } - prebuilt->template_type = whole_row + m_prebuilt->template_type = whole_row ? ROW_MYSQL_WHOLE_ROW : ROW_MYSQL_REC_FIELDS; - prebuilt->null_bitmap_len = table->s->null_bytes; + m_prebuilt->null_bitmap_len = table->s->null_bytes; - /* Prepare to build prebuilt->mysql_template[]. */ - prebuilt->templ_contains_blob = FALSE; - prebuilt->mysql_prefix_len = 0; - prebuilt->n_template = 0; - prebuilt->idx_cond_n_cols = 0; + /* Prepare to build m_prebuilt->mysql_template[]. */ + m_prebuilt->templ_contains_blob = FALSE; + m_prebuilt->templ_contains_fixed_point = FALSE; + m_prebuilt->mysql_prefix_len = 0; + m_prebuilt->n_template = 0; + m_prebuilt->idx_cond_n_cols = 0; /* Note that in InnoDB, i is the column number in the table. MySQL calls columns 'fields'. */ - if (active_index != MAX_KEY && active_index == pushed_idx_cond_keyno) { + if (active_index != MAX_KEY + && active_index == pushed_idx_cond_keyno) { + ulint num_v = 0; + /* Push down an index condition or an end_range check. */ for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { + ibool index_contains; while (!table->field[sql_idx]->stored_in_db()) { - sql_idx++; - } + sql_idx++; + } - const ibool index_contains - = dict_index_contains_col_or_prefix(index, i); + if (innobase_is_v_fld(table->field[sql_idx])) { + index_contains = dict_index_contains_col_or_prefix( + index, num_v, true); + } else { + index_contains = dict_index_contains_col_or_prefix( + index, i - num_v, false); + } /* Test if an end_range or an index condition refers to the field. Note that "index" and "index_contains" may refer to the clustered index. - Index condition pushdown is relative to prebuilt->index - (the index that is being looked up first). */ + Index condition pushdown is relative to + m_prebuilt->index (the index that is being + looked up first). */ /* When join_read_always_key() invokes this code via handler::ha_index_init() and @@ -7634,8 +8396,11 @@ ha_innobase::build_template( the subset field->part_of_key.is_set(active_index) which would be acceptable if end_range==NULL. */ + bool is_v = innobase_is_v_fld(table->field[sql_idx]); + if (build_template_needs_field_in_icp( - index, prebuilt, index_contains, i)) { + index, m_prebuilt, index_contains, + is_v ? num_v : i - num_v, is_v)) { /* Needed in ICP */ const Field* field; mysql_row_templ_t* templ; @@ -7645,33 +8410,41 @@ ha_innobase::build_template( } else { field = build_template_needs_field( index_contains, - prebuilt->read_just_key, + m_prebuilt->read_just_key, fetch_all_in_key, fetch_primary_key_cols, - index, table, i, sql_idx); + index, table, i, sql_idx, num_v); if (!field) { + if (innobase_is_v_fld( + table->field[sql_idx])) { + num_v++; + } continue; } } templ = build_template_field( - prebuilt, clust_index, index, - table, field, i); - prebuilt->idx_cond_n_cols++; - ut_ad(prebuilt->idx_cond_n_cols - == prebuilt->n_template); + m_prebuilt, clust_index, index, + table, field, i - num_v, 0); - if (index == prebuilt->index) { + ut_ad(!templ->is_virtual); + + m_prebuilt->idx_cond_n_cols++; + ut_ad(m_prebuilt->idx_cond_n_cols + == m_prebuilt->n_template); + + if (index == m_prebuilt->index) { templ->icp_rec_field_no = templ->rec_field_no; } else { templ->icp_rec_field_no = dict_index_get_nth_col_pos( - prebuilt->index, i, - NULL); + m_prebuilt->index, + i - num_v, + &templ->rec_prefix_field_no); } - if (dict_index_is_clust(prebuilt->index)) { + if (dict_index_is_clust(m_prebuilt->index)) { ut_ad(templ->icp_rec_field_no != ULINT_UNDEFINED); /* If the primary key includes @@ -7682,7 +8455,7 @@ ha_innobase::build_template( off-page (externally stored) columns. */ if (templ->icp_rec_field_no - < prebuilt->index->n_uniq) { + < m_prebuilt->index->n_uniq) { /* This is a key column; all set. */ continue; @@ -7698,7 +8471,9 @@ ha_innobase::build_template( templ->icp_rec_field_no = dict_index_get_nth_col_or_prefix_pos( - prebuilt->index, i, TRUE, NULL); + m_prebuilt->index, i - num_v, + true, false, + &templ->rec_prefix_field_no); ut_ad(templ->icp_rec_field_no != ULINT_UNDEFINED); @@ -7718,29 +8493,44 @@ ha_innobase::build_template( we were unable to use an accurate condition for end_range in the "if" condition above, and the following assertion would fail. - ut_ad(!dict_index_is_clust(prebuilt->index) + ut_ad(!dict_index_is_clust(m_prebuilt->index) || templ->rec_field_no - < prebuilt->index->n_uniq); + < m_prebuilt->index->n_uniq); */ } + if (innobase_is_v_fld(table->field[sql_idx])) { + num_v++; + } } - ut_ad(prebuilt->idx_cond_n_cols > 0); - ut_ad(prebuilt->idx_cond_n_cols == prebuilt->n_template); + ut_ad(m_prebuilt->idx_cond_n_cols > 0); + ut_ad(m_prebuilt->idx_cond_n_cols == m_prebuilt->n_template); + + num_v = 0; /* Include the fields that are not needed in index condition pushdown. */ for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { + mysql_row_templ_t* templ; + ibool index_contains; while (!table->field[sql_idx]->stored_in_db()) { sql_idx++; } - const ibool index_contains - = dict_index_contains_col_or_prefix(index, i); + if (innobase_is_v_fld(table->field[sql_idx])) { + index_contains = dict_index_contains_col_or_prefix( + index, num_v, true); + } else { + index_contains = dict_index_contains_col_or_prefix( + index, i - num_v, false); + } + + bool is_v = innobase_is_v_fld(table->field[sql_idx]); if (!build_template_needs_field_in_icp( - index, prebuilt, index_contains, i)) { + index, m_prebuilt, index_contains, + is_v ? num_v : i - num_v, is_v)) { /* Not needed in ICP */ const Field* field; @@ -7749,25 +8539,34 @@ ha_innobase::build_template( } else { field = build_template_needs_field( index_contains, - prebuilt->read_just_key, + m_prebuilt->read_just_key, fetch_all_in_key, fetch_primary_key_cols, - index, table, i, sql_idx); + index, table, i, sql_idx, num_v); if (!field) { + if (innobase_is_v_fld(table->field[sql_idx])) { + num_v++; + } continue; } } - build_template_field(prebuilt, - clust_index, index, - table, field, i); + templ = build_template_field( + m_prebuilt, clust_index, index, + table, field, i - num_v, num_v); + + if (templ->is_virtual) { + num_v++; + } } } - prebuilt->idx_cond = this; + m_prebuilt->idx_cond = this; } else { + mysql_row_templ_t* templ; + ulint num_v = 0; /* No index condition pushdown */ - prebuilt->idx_cond = NULL; + m_prebuilt->idx_cond = NULL; for (i = 0, sql_idx = 0; i < n_stored_fields; i++, sql_idx++) { const Field* field; @@ -7777,32 +8576,68 @@ ha_innobase::build_template( } if (whole_row) { + /* Even this is whole_row, if the seach is + on a virtual column, and read_just_key is + set, and field is not in this index, we + will not try to fill the value since they + are not stored in such index nor in the + cluster index. */ + if (innobase_is_v_fld(table->field[sql_idx]) + && m_prebuilt->read_just_key + && !dict_index_contains_col_or_prefix( + m_prebuilt->index, num_v, true)) + { + ut_ad(!bitmap_is_set( + table->read_set, i)); + ut_ad(!bitmap_is_set( + table->write_set, i)); + + continue; + } field = table->field[sql_idx]; } else { + ibool contain; + + if (innobase_is_v_fld(table->field[sql_idx])) { + contain = dict_index_contains_col_or_prefix( + index, num_v, true); + } else { + contain = dict_index_contains_col_or_prefix( + index, i - num_v, + false); + } + field = build_template_needs_field( - dict_index_contains_col_or_prefix( - index, i), - prebuilt->read_just_key, + contain, + m_prebuilt->read_just_key, fetch_all_in_key, fetch_primary_key_cols, - index, table, i, sql_idx); + index, table, i, sql_idx, num_v); + if (!field) { + if (innobase_is_v_fld(table->field[sql_idx])) { + num_v++; + } continue; } } - build_template_field(prebuilt, clust_index, index, - table, field, i); + templ = build_template_field( + m_prebuilt, clust_index, index, + table, field, i - num_v, num_v); + if (templ->is_virtual) { + num_v++; + } } } - if (index != clust_index && prebuilt->need_to_access_clustered) { + if (index != clust_index && m_prebuilt->need_to_access_clustered) { /* Change rec_field_no's to correspond to the clustered index record */ - for (i = 0; i < prebuilt->n_template; i++) { + for (i = 0; i < m_prebuilt->n_template; i++) { mysql_row_templ_t* templ - = &prebuilt->mysql_template[i]; + = &m_prebuilt->mysql_template[i]; templ->rec_field_no = templ->clust_rec_field_no; } @@ -7815,20 +8650,28 @@ binlogging. We need to eliminate the non-determinism that will arise in INSERT ... SELECT type of statements, since MySQL binlog only stores the min value of the autoinc interval. Once that is fixed we can get rid of the special lock handling. -@return DB_SUCCESS if all OK else error code */ -UNIV_INTERN +@return DB_SUCCESS if all OK else error code */ + dberr_t ha_innobase::innobase_lock_autoinc(void) /*====================================*/ { dberr_t error = DB_SUCCESS; + long lock_mode = innobase_autoinc_lock_mode; - ut_ad(!srv_read_only_mode); + ut_ad(!srv_read_only_mode + || dict_table_is_intrinsic(m_prebuilt->table)); + + if (dict_table_is_intrinsic(m_prebuilt->table)) { + /* Intrinsic table are not shared accorss connection + so there is no need to AUTOINC lock the table. */ + lock_mode = AUTOINC_NO_LOCKING; + } - switch (innobase_autoinc_lock_mode) { + switch (lock_mode) { case AUTOINC_NO_LOCKING: /* Acquire only the AUTOINC mutex. */ - dict_table_autoinc_lock(prebuilt->table); + dict_table_autoinc_lock(m_prebuilt->table); break; case AUTOINC_NEW_STYLE_LOCKING: @@ -7837,21 +8680,20 @@ ha_innobase::innobase_lock_autoinc(void) transaction has already acquired the AUTOINC lock on behalf of a LOAD FILE or INSERT ... SELECT etc. type of statement. */ - if (thd_sql_command(user_thd) == SQLCOM_INSERT - || thd_sql_command(user_thd) == SQLCOM_REPLACE - || thd_sql_command(user_thd) == SQLCOM_END // RBR event + if (thd_sql_command(m_user_thd) == SQLCOM_INSERT + || thd_sql_command(m_user_thd) == SQLCOM_REPLACE + || thd_sql_command(m_user_thd) == SQLCOM_END // RBR event ) { - dict_table_t* ib_table = prebuilt->table; /* Acquire the AUTOINC mutex. */ - dict_table_autoinc_lock(ib_table); + dict_table_autoinc_lock(m_prebuilt->table); /* We need to check that another transaction isn't already holding the AUTOINC lock on the table. */ - if (ib_table->n_waiting_or_granted_auto_inc_locks) { + if (m_prebuilt->table->n_waiting_or_granted_auto_inc_locks) { /* Release the mutex to avoid deadlocks and fall back to old style locking. */ - dict_table_autoinc_unlock(ib_table); + dict_table_autoinc_unlock(m_prebuilt->table); } else { /* Do not fall back to old style locking. */ break; @@ -7860,12 +8702,12 @@ ha_innobase::innobase_lock_autoinc(void) /* Fall through to old style locking. */ case AUTOINC_OLD_STYLE_LOCKING: - error = row_lock_table_autoinc_for_mysql(prebuilt); + error = row_lock_table_autoinc_for_mysql(m_prebuilt); if (error == DB_SUCCESS) { /* Acquire the AUTOINC mutex. */ - dict_table_autoinc_lock(prebuilt->table); + dict_table_autoinc_lock(m_prebuilt->table); } break; @@ -7877,13 +8719,14 @@ ha_innobase::innobase_lock_autoinc(void) } /********************************************************************//** -Reset the autoinc value in the table. -@return DB_SUCCESS if all went well else error code */ -UNIV_INTERN +Store the autoinc value in the table. The autoinc value is only set if +it's greater than the existing autoinc value in the table. +@return DB_SUCCESS if all went well else error code */ + dberr_t -ha_innobase::innobase_reset_autoinc( -/*================================*/ - ulonglong autoinc) /*!< in: value to store */ +ha_innobase::innobase_set_max_autoinc( +/*==================================*/ + ulonglong auto_inc) /*!< in: value to store */ { dberr_t error; @@ -7891,73 +8734,85 @@ ha_innobase::innobase_reset_autoinc( if (error == DB_SUCCESS) { - dict_table_autoinc_initialize(prebuilt->table, autoinc); + dict_table_autoinc_update_if_greater(m_prebuilt->table, auto_inc); - dict_table_autoinc_unlock(prebuilt->table); + dict_table_autoinc_unlock(m_prebuilt->table); } return(error); } -/********************************************************************//** -Store the autoinc value in the table. The autoinc value is only set if -it's greater than the existing autoinc value in the table. -@return DB_SUCCESS if all went well else error code */ -UNIV_INTERN -dberr_t -ha_innobase::innobase_set_max_autoinc( -/*==================================*/ - ulonglong auto_inc) /*!< in: value to store */ +/** Write Row interface optimized for intrinisc table. +@param[in] record a row in MySQL format. +@return 0 on success or error code */ +int +ha_innobase::intrinsic_table_write_row(uchar* record) { - dberr_t error; - - error = innobase_lock_autoinc(); - - if (error == DB_SUCCESS) { + dberr_t err; - dict_table_autoinc_update_if_greater(prebuilt->table, auto_inc); + /* No auto-increment support for intrinsic table. */ + ut_ad(!(table->next_number_field && record == table->record[0])); - dict_table_autoinc_unlock(prebuilt->table); + if (m_prebuilt->mysql_template == NULL + || m_prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { + /* Build the template used in converting quickly between + the two database formats */ + build_template(true); } - return(error); + err = row_insert_for_mysql((byte*) record, m_prebuilt); + + return(convert_error_code_to_mysql( + err, m_prebuilt->table->flags, m_user_thd)); } /********************************************************************//** Stores a row in an InnoDB database, to the table specified in this handle. -@return error code */ -UNIV_INTERN +@return error code */ + int ha_innobase::write_row( /*===================*/ uchar* record) /*!< in: a row in MySQL format */ { dberr_t error; - int error_result= 0; - ibool auto_inc_used= FALSE; #ifdef WITH_WSREP ibool auto_inc_inserted= FALSE; /* if NULL was inserted */ #endif ulint sql_command; - trx_t* trx = thd_to_trx(user_thd); + int error_result = 0; + bool auto_inc_used = false; DBUG_ENTER("ha_innobase::write_row"); + if (dict_table_is_intrinsic(m_prebuilt->table)) { + DBUG_RETURN(intrinsic_table_write_row(record)); + } + + trx_t* trx = thd_to_trx(m_user_thd); + TrxInInnoDB trx_in_innodb(trx); + + if (!dict_table_is_intrinsic(m_prebuilt->table) + && trx_in_innodb.is_aborted()) { + + DBUG_RETURN(innobase_rollback(ht, m_user_thd, false)); + } + + /* Step-1: Validation checks before we commence write_row operation. */ if (high_level_read_only) { ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); DBUG_RETURN(HA_ERR_TABLE_READONLY); - } else if (prebuilt->trx != trx) { - sql_print_error("The transaction object for the table handle " - "is at %p, but for the current thread it is at " - "%p", - (const void*) prebuilt->trx, (const void*) trx); - - fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr); - ut_print_buf(stderr, ((const byte*) prebuilt) - 100, 200); - fputs("\n" - "InnoDB: Dump of 200 bytes around ha_data: ", - stderr); + } else if (m_prebuilt->trx != trx) { + + ib::error() << "The transaction object for the table handle is" + " at " << static_cast(m_prebuilt->trx) + << ", but for the current thread it is at " + << static_cast(trx); + + fputs("InnoDB: Dump of 200 bytes around m_prebuilt: ", stderr); + ut_print_buf(stderr, ((const byte*) m_prebuilt) - 100, 200); + fputs("\nInnoDB: Dump of 200 bytes around ha_data: ", stderr); ut_print_buf(stderr, ((const byte*) trx) - 100, 200); putc('\n', stderr); ut_error; @@ -7967,23 +8822,26 @@ ha_innobase::write_row( ha_statistic_increment(&SSV::ha_write_count); - sql_command = thd_sql_command(user_thd); + /* Step-2: Intermediate commit if original operation involves ALTER + table with algorithm = copy. Intermediate commit ease pressure on + recovery if server crashes while ALTER is active. */ + sql_command = thd_sql_command(m_user_thd); if ((sql_command == SQLCOM_ALTER_TABLE || sql_command == SQLCOM_OPTIMIZE || sql_command == SQLCOM_CREATE_INDEX #ifdef WITH_WSREP - || (wsrep_on(user_thd) && wsrep_load_data_splitting && + || (wsrep_on(m_user_thd) && wsrep_load_data_splitting && sql_command == SQLCOM_LOAD && !thd_test_options( - user_thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + m_user_thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) #endif /* WITH_WSREP */ || sql_command == SQLCOM_DROP_INDEX) - && num_write_row >= 10000) { + && m_num_write_row >= 10000) { #ifdef WITH_WSREP - if (wsrep_on(user_thd) && sql_command == SQLCOM_LOAD) { - WSREP_DEBUG("forced trx split for LOAD: %s", - wsrep_thd_query(user_thd)); + if (wsrep_on(m_user_thd) && sql_command == SQLCOM_LOAD) { + WSREP_DEBUG("forced trx split for LOAD: %s", + wsrep_thd_query(m_user_thd)); } #endif /* WITH_WSREP */ /* ALTER TABLE is COMMITted at every 10000 copied rows. @@ -7998,7 +8856,7 @@ ha_innobase::write_row( dict_table_t* src_table; enum lock_mode mode; - num_write_row = 0; + m_num_write_row = 0; /* Commit the transaction. This will release the table locks, so they have to be acquired again. */ @@ -8006,28 +8864,20 @@ ha_innobase::write_row( /* Altering an InnoDB table */ /* Get the source table. */ src_table = lock_get_src_table( - prebuilt->trx, prebuilt->table, &mode); + m_prebuilt->trx, m_prebuilt->table, &mode); if (!src_table) { no_commit: /* Unknown situation: do not commit */ - /* - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: ALTER TABLE is holding lock" - " on %lu tables!\n", - prebuilt->trx->mysql_n_tables_locked); - */ ; - } else if (src_table == prebuilt->table) { + } else if (src_table == m_prebuilt->table) { #ifdef WITH_WSREP - if (wsrep_on(user_thd) && + if (wsrep_on(m_user_thd) && wsrep_load_data_splitting && sql_command == SQLCOM_LOAD && - !thd_test_options(user_thd, + !thd_test_options(m_user_thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { - switch (wsrep_run_wsrep_commit(user_thd, 1)) - { + switch (wsrep_run_wsrep_commit(m_user_thd, 1)) { case WSREP_TRX_OK: break; case WSREP_TRX_SIZE_EXCEEDED: @@ -8036,71 +8886,72 @@ no_commit: DBUG_RETURN(1); } - if (binlog_hton->commit(binlog_hton, user_thd, 1)) + if (binlog_hton->commit(binlog_hton, m_user_thd, 1)) { DBUG_RETURN(1); - wsrep_post_commit(user_thd, TRUE); + } + wsrep_post_commit(m_user_thd, TRUE); } #endif /* WITH_WSREP */ /* Source table is not in InnoDB format: no need to re-acquire locks on it. */ /* Altering to InnoDB format */ - innobase_commit(ht, user_thd, 1); + innobase_commit(ht, m_user_thd, 1); /* Note that this transaction is still active. */ - trx_register_for_2pc(prebuilt->trx); + trx_register_for_2pc(m_prebuilt->trx); /* We will need an IX lock on the destination table. */ - prebuilt->sql_stat_start = TRUE; + m_prebuilt->sql_stat_start = TRUE; } else { #ifdef WITH_WSREP - if (wsrep_on(user_thd) && + if (wsrep_on(m_user_thd) && wsrep_load_data_splitting && sql_command == SQLCOM_LOAD && - !thd_test_options(user_thd, - OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) - { - switch (wsrep_run_wsrep_commit(user_thd, 1)) - { + !thd_test_options(m_user_thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + switch (wsrep_run_wsrep_commit(m_user_thd, 1)) { case WSREP_TRX_OK: - break; + break; case WSREP_TRX_SIZE_EXCEEDED: case WSREP_TRX_CERT_FAIL: case WSREP_TRX_ERROR: - DBUG_RETURN(1); + DBUG_RETURN(1); } - if (binlog_hton->commit(binlog_hton, user_thd, 1)) + if (binlog_hton->commit(binlog_hton, m_user_thd, 1)) { DBUG_RETURN(1); - wsrep_post_commit(user_thd, TRUE); + } + + wsrep_post_commit(m_user_thd, TRUE); } #endif /* WITH_WSREP */ /* Ensure that there are no other table locks than LOCK_IX and LOCK_AUTO_INC on the destination table. */ - if (!lock_is_table_exclusive(prebuilt->table, - prebuilt->trx)) { + if (!lock_is_table_exclusive(m_prebuilt->table, + m_prebuilt->trx)) { goto no_commit; } /* Commit the transaction. This will release the table locks, so they have to be acquired again. */ - innobase_commit(ht, user_thd, 1); + innobase_commit(ht, m_user_thd, 1); /* Note that this transaction is still active. */ - trx_register_for_2pc(prebuilt->trx); + trx_register_for_2pc(m_prebuilt->trx); /* Re-acquire the table lock on the source table. */ - row_lock_table_for_mysql(prebuilt, src_table, mode); + row_lock_table_for_mysql(m_prebuilt, src_table, mode); /* We will need an IX lock on the destination table. */ - prebuilt->sql_stat_start = TRUE; + m_prebuilt->sql_stat_start = TRUE; } } - num_write_row++; + m_num_write_row++; - /* This is the case where the table has an auto-increment column */ + /* Step-3: Handling of Auto-Increment Columns. */ if (table->next_number_field && record == table->record[0]) { /* Reset the error code before calling innobase_get_auto_increment(). */ - prebuilt->autoinc_error = DB_SUCCESS; + m_prebuilt->autoinc_error = DB_SUCCESS; #ifdef WITH_WSREP auto_inc_inserted= (table->next_number_field->val_int() == 0); @@ -8111,13 +8962,13 @@ no_commit: /* Handle the case where the AUTOINC sub-system failed during initialization. */ - if (prebuilt->autoinc_error == DB_UNSUPPORTED) { + if (m_prebuilt->autoinc_error == DB_UNSUPPORTED) { error_result = ER_AUTOINC_READ_FAILED; /* Set the error message to report too. */ my_error(ER_AUTOINC_READ_FAILED, MYF(0)); goto func_exit; - } else if (prebuilt->autoinc_error != DB_SUCCESS) { - error = prebuilt->autoinc_error; + } else if (m_prebuilt->autoinc_error != DB_SUCCESS) { + error = m_prebuilt->autoinc_error; goto report_error; } @@ -8125,11 +8976,13 @@ no_commit: goto func_exit; } - auto_inc_used = TRUE; + auto_inc_used = true; } - if (prebuilt->mysql_template == NULL - || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { + /* Step-4: Prepare INSERT graph that will be executed for actual INSERT + (This is a one time operation) */ + if (m_prebuilt->mysql_template == NULL + || m_prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { /* Build the template used in converting quickly between the two database formats */ @@ -8137,12 +8990,14 @@ no_commit: build_template(true); } - innobase_srv_conc_enter_innodb(prebuilt->trx); + innobase_srv_conc_enter_innodb(m_prebuilt); - error = row_insert_for_mysql((byte*) record, prebuilt); - DEBUG_SYNC(user_thd, "ib_after_row_insert"); + /* Step-5: Execute insert graph that will result in actual insert. */ + error = row_insert_for_mysql((byte*) record, m_prebuilt); - /* Handle duplicate key errors */ + DEBUG_SYNC(m_user_thd, "ib_after_row_insert"); + + /* Step-6: Handling of errors related to auto-increment. */ if (auto_inc_used) { ulonglong auto_inc; ulonglong col_max_value; @@ -8150,17 +9005,19 @@ no_commit: /* Note the number of rows processed for this statement, used by get_auto_increment() to determine the number of AUTO-INC values to reserve. This is only useful for a mult-value INSERT - and is a statement level counter.*/ + and is a statement level counter. */ if (trx->n_autoinc_rows > 0) { --trx->n_autoinc_rows; } /* We need the upper limit of the col type to check for whether we update the table autoinc counter or not. */ - col_max_value = innobase_get_int_col_max_value( - table->next_number_field); + col_max_value = innobase_get_int_col_max_value(table->next_number_field); + /* JAN: TODO: MySQL 5.7 + table->next_number_field->get_max_int_value(); + */ - /* Get the value that MySQL attempted to store in the table.*/ + /* Get the value that MySQL attempted to store in the table. */ auto_inc = table->next_number_field->val_int(); switch (error) { @@ -8190,27 +9047,27 @@ no_commit: WSREP_DEBUG("DUPKEY error for autoinc\n" "THD %ld, value %llu, off %llu inc %llu", - thd_get_thread_id(current_thd), + thd_get_thread_id(m_user_thd), auto_inc, - prebuilt->autoinc_offset, - prebuilt->autoinc_increment); + m_prebuilt->autoinc_offset, + m_prebuilt->autoinc_increment); - if (wsrep_on(current_thd) && + if (wsrep_on(m_user_thd) && auto_inc_inserted && wsrep_drupal_282555_workaround && - wsrep_thd_retry_counter(current_thd) == 0 && - !thd_test_options(current_thd, - OPTION_NOT_AUTOCOMMIT | + wsrep_thd_retry_counter(m_user_thd) == 0 && + !thd_test_options(m_user_thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { WSREP_DEBUG( "retrying insert: %s", - (*wsrep_thd_query(current_thd)) ? - wsrep_thd_query(current_thd) : + (*wsrep_thd_query(m_user_thd)) ? + wsrep_thd_query(m_user_thd) : (char *)"void"); error= DB_SUCCESS; wsrep_thd_set_conflict_state( - current_thd, MUST_ABORT); - innobase_srv_conc_exit_innodb(prebuilt->trx); + m_user_thd, MUST_ABORT); + innobase_srv_conc_exit_innodb(m_prebuilt); /* jump straight to func exit over * later wsrep hooks */ goto func_exit; @@ -8228,21 +9085,21 @@ no_commit: /* If the actual value inserted is greater than the upper limit of the interval, then we try and update the table upper limit. Note: last_value - will be 0 if get_auto_increment() was not called.*/ + will be 0 if get_auto_increment() was not called. */ - if (auto_inc >= prebuilt->autoinc_last_value) { + if (auto_inc >= m_prebuilt->autoinc_last_value) { set_max_autoinc: /* This should filter out the negative values set explicitly by the user. */ if (auto_inc <= col_max_value) { - ut_a(prebuilt->autoinc_increment > 0); + ut_a(m_prebuilt->autoinc_increment > 0); ulonglong offset; ulonglong increment; dberr_t err; - offset = prebuilt->autoinc_offset; - increment = prebuilt->autoinc_increment; + offset = m_prebuilt->autoinc_offset; + increment = m_prebuilt->autoinc_increment; auto_inc = innobase_next_autoinc( auto_inc, @@ -8263,9 +9120,10 @@ set_max_autoinc: } } - innobase_srv_conc_exit_innodb(prebuilt->trx); + innobase_srv_conc_exit_innodb(m_prebuilt); report_error: + /* Step-7: Cleanup and exit. */ if (error == DB_TABLESPACE_DELETED) { ib_senderrf( trx->mysql_thd, IB_LOG_LEVEL_ERROR, @@ -8273,18 +9131,17 @@ report_error: table->s->table_name.str); } - error_result = convert_error_code_to_mysql(error, - prebuilt->table->flags, - user_thd); + error_result = convert_error_code_to_mysql( + error, m_prebuilt->table->flags, m_user_thd); #ifdef WITH_WSREP - if (!error_result && - wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd) && - !wsrep_consistency_check(user_thd) && - !wsrep_thd_ignore_table(user_thd)) + if (!error_result && + wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE && + wsrep_on(m_user_thd) && + !wsrep_consistency_check(m_user_thd) && + !wsrep_thd_ignore_table(m_user_thd)) { - if (wsrep_append_keys(user_thd, false, record, NULL)) + if (wsrep_append_keys(m_user_thd, false, record, NULL)) { DBUG_PRINT("wsrep", ("row key failed")); error_result = HA_ERR_INTERNAL_ERROR; @@ -8304,16 +9161,57 @@ func_exit: DBUG_RETURN(error_result); } +/** Fill the update vector's "old_vrow" field for those non-updated, +but indexed columns. Such columns could stil present in the virtual +index rec fields even if they are not updated (some other fields updated), +so needs to be logged. +@param[in] prebuilt InnoDB prebuilt struct +@param[in,out] vfield field to filled +@param[in] o_len actual column length +@param[in,out] col column to be filled +@param[in] old_mysql_row_col MySQL old field ptr +@param[in] col_pack_len MySQL field col length +@param[in,out] buf buffer for a converted integer value +@return used buffer ptr from row_mysql_store_col_in_innobase_format() */ +static +byte* +innodb_fill_old_vcol_val( + row_prebuilt_t* prebuilt, + dfield_t* vfield, + ulint o_len, + dict_col_t* col, + const byte* old_mysql_row_col, + ulint col_pack_len, + byte* buf) +{ + dict_col_copy_type( + col, dfield_get_type(vfield)); + if (o_len != UNIV_SQL_NULL) { + + buf = row_mysql_store_col_in_innobase_format( + vfield, + buf, + TRUE, + old_mysql_row_col, + col_pack_len, + dict_table_is_comp(prebuilt->table)); + } else { + dfield_set_null(vfield); + } + + return(buf); +} + /**********************************************************************//** Checks which fields have changed in a row and stores information of them to an update vector. -@return DB_SUCCESS or error code */ +@return DB_SUCCESS or error code */ static dberr_t calc_row_difference( /*================*/ upd_t* uvect, /*!< in/out: update vector */ - uchar* old_row, /*!< in: old row in MySQL format */ + const uchar* old_row, /*!< in: old row in MySQL format */ uchar* new_row, /*!< in: new row in MySQL format */ TABLE* table, /*!< in: table in MySQL data dictionary */ @@ -8330,6 +9228,7 @@ calc_row_difference( ulint n_len; ulint col_pack_len; const byte* new_mysql_row_col; + const byte* old_mysql_row_col; const byte* o_ptr; const byte* n_ptr; byte* buf; @@ -8338,13 +9237,14 @@ calc_row_difference( ulint n_changed = 0; dfield_t dfield; dict_index_t* clust_index; - uint sql_idx, innodb_idx= 0; + uint sql_idx,i, innodb_idx= 0; ibool changes_fts_column = FALSE; ibool changes_fts_doc_col = FALSE; trx_t* trx = thd_to_trx(thd); doc_id_t doc_id = FTS_NULL_DOC_ID; + ulint num_v = 0; - ut_ad(!srv_read_only_mode); + ut_ad(!srv_read_only_mode || dict_table_is_intrinsic(prebuilt->table)); n_fields = table->s->fields; clust_index = dict_table_get_first_index(prebuilt->table); @@ -8352,10 +9252,20 @@ calc_row_difference( /* We use upd_buff to convert changed fields */ buf = (byte*) upd_buff; - for (sql_idx = 0; sql_idx < n_fields; sql_idx++) { + for (sql_idx = 0,i=0; i < n_fields; i++, sql_idx++) { field = table->field[sql_idx]; - if (!field->stored_in_db()) - continue; + bool is_virtual = innobase_is_v_fld(field); + dict_col_t* col; + + if (!field->stored_in_db()) { + continue; + } + + if (is_virtual) { + col = &prebuilt->table->v_cols[num_v].m_col; + } else { + col = &prebuilt->table->cols[innodb_idx - num_v]; + } o_ptr = (const byte*) old_row + get_field_offset(table, field); n_ptr = (const byte*) new_row + get_field_offset(table, field); @@ -8363,6 +9273,7 @@ calc_row_difference( /* Use new_mysql_row_col and col_pack_len save the values */ new_mysql_row_col = n_ptr; + old_mysql_row_col = o_ptr; col_pack_len = field->pack_length(); o_len = col_pack_len; @@ -8373,11 +9284,14 @@ calc_row_difference( field_mysql_type = field->type(); - col_type = prebuilt->table->cols[innodb_idx].mtype; + col_type = col->mtype; switch (col_type) { case DATA_BLOB: + case DATA_POINT: + case DATA_VAR_POINT: + case DATA_GEOMETRY: o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); @@ -8418,7 +9332,6 @@ calc_row_difference( } } - if (field->real_maybe_null()) { if (field->is_null_in_record(old_row)) { o_len = UNIV_SQL_NULL; @@ -8429,6 +9342,42 @@ calc_row_difference( } } + if (is_virtual) { + /* If the virtual column is not indexed, + we shall ignore it for update */ + if (!col->ord_part) { + num_v++; + continue; + } + + if (!uvect->old_vrow) { + uvect->old_vrow = dtuple_create_with_vcol( + uvect->heap, 0, prebuilt->table->n_v_cols); + } + + ulint max_field_len = DICT_MAX_FIELD_LEN_BY_FORMAT( + prebuilt->table); + + /* for virtual columns, we only materialize + its index, and index field length would not + exceed max_field_len. So continue if the + first max_field_len bytes are matched up */ + if (o_len != UNIV_SQL_NULL + && n_len != UNIV_SQL_NULL + && o_len >= max_field_len + && n_len >= max_field_len + && memcmp(o_ptr, n_ptr, max_field_len) == 0) { + dfield_t* vfield = dtuple_get_nth_v_field( + uvect->old_vrow, num_v); + buf = innodb_fill_old_vcol_val( + prebuilt, vfield, o_len, + col, old_mysql_row_col, + col_pack_len, buf); + num_v++; + continue; + } + } + if (o_len != n_len || (o_len != UNIV_SQL_NULL && 0 != memcmp(o_ptr, n_ptr, o_len))) { /* The field has changed */ @@ -8439,10 +9388,18 @@ calc_row_difference( /* Let us use a dummy dfield to make the conversion from the MySQL column format to the InnoDB format */ + + /* If the length of new geometry object is 0, means + this object is invalid geometry object, we need + to block it. */ + if (DATA_GEOMETRY_MTYPE(col_type) + && o_len != 0 && n_len == 0) { + return(DB_CANT_CREATE_GEOMETRY_OBJECT); + } + if (n_len != UNIV_SQL_NULL) { - dict_col_copy_type(prebuilt->table->cols + - innodb_idx, - dfield_get_type(&dfield)); + dict_col_copy_type( + col, dfield_get_type(&dfield)); buf = row_mysql_store_col_in_innobase_format( &dfield, @@ -8453,13 +9410,57 @@ calc_row_difference( dict_table_is_comp(prebuilt->table)); dfield_copy(&ufield->new_val, &dfield); } else { + dict_col_copy_type( + col, dfield_get_type(&ufield->new_val)); dfield_set_null(&ufield->new_val); } ufield->exp = NULL; ufield->orig_len = 0; - ufield->field_no = dict_col_get_clust_pos( - &prebuilt->table->cols[innodb_idx], clust_index); + if (is_virtual) { + dfield_t* vfield = dtuple_get_nth_v_field( + uvect->old_vrow, num_v); + upd_fld_set_virtual_col(ufield); + ufield->field_no = num_v; + + ut_ad(col->ord_part); + ufield->old_v_val = static_cast( + mem_heap_alloc( + uvect->heap, + sizeof *ufield->old_v_val)); + + if (!field->is_null_in_record(old_row)) { + if (n_len == UNIV_SQL_NULL) { + dict_col_copy_type( + col, dfield_get_type( + &dfield)); + } + + buf = row_mysql_store_col_in_innobase_format( + &dfield, + (byte*) buf, + TRUE, + old_mysql_row_col, + col_pack_len, + dict_table_is_comp( + prebuilt->table)); + dfield_copy(ufield->old_v_val, + &dfield); + dfield_copy(vfield, &dfield); + } else { + dict_col_copy_type( + col, dfield_get_type( + ufield->old_v_val)); + dfield_set_null(ufield->old_v_val); + dfield_set_null(vfield); + } + num_v++; + } else { + ufield->field_no = dict_col_get_clust_pos( + &prebuilt->table->cols[innodb_idx - num_v], + clust_index); + ufield->old_v_val = NULL; + } n_changed++; /* If an FTS indexed column was changed by this @@ -8471,8 +9472,8 @@ calc_row_difference( checking only once here. Later we will need to note which columns have been updated and do selective processing. */ - if (prebuilt->table->fts != NULL) { - ulint offset; + if (prebuilt->table->fts != NULL && !is_virtual) { + ulint offset; dict_table_t* innodb_table; innodb_table = prebuilt->table; @@ -8492,9 +9493,20 @@ calc_row_difference( innodb_table, ufield); } } - } - if (field->stored_in_db()) + } else if (is_virtual) { + dfield_t* vfield = dtuple_get_nth_v_field( + uvect->old_vrow, num_v); + buf = innodb_fill_old_vcol_val( + prebuilt, vfield, o_len, + col, old_mysql_row_col, + col_pack_len, buf); + ut_ad(col->ord_part); + num_v++; + } + + if (field->stored_in_db()) { innodb_idx++; + } } /* If the update changes a column with an FTS index on it, we @@ -8516,37 +9528,31 @@ calc_row_difference( Doc ID must also be updated. Otherwise, return error */ if (changes_fts_column && !changes_fts_doc_col) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: A new Doc ID" - " must be supplied while updating" - " FTS indexed columns.\n"); + ib::warn() << "A new Doc ID must be supplied" + " while updating FTS indexed columns."; return(DB_FTS_INVALID_DOCID); } /* Doc ID must monotonically increase */ ut_ad(innodb_table->fts->cache); if (doc_id < prebuilt->table->fts->cache->next_doc_id) { - fprintf(stderr, - "InnoDB: FTS Doc ID must be larger than" - " " IB_ID_FMT " for table", - innodb_table->fts->cache->next_doc_id - - 1); - ut_print_name(stderr, trx, - TRUE, innodb_table->name); - putc('\n', stderr); + + ib::warn() << "FTS Doc ID must be larger than " + << innodb_table->fts->cache->next_doc_id + - 1 << " for table " + << innodb_table->name; return(DB_FTS_INVALID_DOCID); } else if ((doc_id - prebuilt->table->fts->cache->next_doc_id) >= FTS_DOC_ID_MAX_STEP) { - fprintf(stderr, - "InnoDB: Doc ID " UINT64PF " is too" + + ib::warn() << "Doc ID " << doc_id << " is too" " big. Its difference with largest" - " Doc ID used " UINT64PF " cannot" - " exceed or equal to %d\n", - doc_id, - prebuilt->table->fts->cache->next_doc_id - 1, - FTS_DOC_ID_MAX_STEP); + " Doc ID used " << prebuilt->table->fts + ->cache->next_doc_id - 1 + << " cannot exceed or equal to " + << FTS_DOC_ID_MAX_STEP; } @@ -8578,6 +9584,7 @@ calc_row_difference( ut_a(buf <= (byte*) original_upd_buff + buff_len); + ut_ad(uvect->validate()); return(DB_SUCCESS); } @@ -8663,38 +9670,40 @@ wsrep_calc_row_hash( return(0); } #endif /* WITH_WSREP */ -/**********************************************************************//** +/* Updates a row given as a parameter to a new value. Note that we are given whole rows, not just the fields which are updated: this incurs some overhead for CPU when we check which fields are actually updated. TODO: currently InnoDB does not prevent the 'Halloween problem': in a searched update a single row can get updated several times if its index columns are updated! -@return error number or 0 */ -UNIV_INTERN +@param[in] old_row Old row contents in MySQL format +@param[out] new_row Updated row contents in MySQL format +@return error number or 0 */ + int ha_innobase::update_row( -/*====================*/ - const uchar* old_row, /*!< in: old row in MySQL format */ - uchar* new_row) /*!< in: new row in MySQL format */ + const uchar* old_row, + uchar* new_row) { - upd_t* uvect; + int err; + dberr_t error; - trx_t* trx = thd_to_trx(user_thd); + trx_t* trx = thd_to_trx(m_user_thd); DBUG_ENTER("ha_innobase::update_row"); - ut_a(prebuilt->trx == trx); + ut_a(m_prebuilt->trx == trx); - if (high_level_read_only) { + if (high_level_read_only && !dict_table_is_intrinsic(m_prebuilt->table)) { ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); DBUG_RETURN(HA_ERR_TABLE_READONLY); } else if (!trx_is_started(trx)) { ++trx->will_lock; } - if (upd_buf == NULL) { - ut_ad(upd_buf_size == 0); + if (m_upd_buf == NULL) { + ut_ad(m_upd_buf_size == 0); /* Create a buffer for packing the fields of a record. Why table->stored_rec_length did not work here? Obviously, @@ -8702,41 +9711,54 @@ ha_innobase::update_row( longer, when we also stored the string length as the first byte. */ - upd_buf_size = table->s->stored_rec_length + - table->s->max_key_length + MAX_REF_PARTS * 3; - upd_buf = (uchar*) my_malloc(upd_buf_size, MYF(MY_WME)); - if (upd_buf == NULL) { - upd_buf_size = 0; + m_upd_buf_size = table->s->stored_rec_length + table->s->max_key_length + + MAX_REF_PARTS * 3; + + m_upd_buf = reinterpret_cast( + my_malloc( + m_upd_buf_size, + MYF(MY_WME))); + /* JAN: TODO: MySQL 5.7: PSI_INSTRUMENT_ME,...*/ + + if (m_upd_buf == NULL) { + m_upd_buf_size = 0; DBUG_RETURN(HA_ERR_OUT_OF_MEM); } } ha_statistic_increment(&SSV::ha_update_count); - if (prebuilt->upd_node) { - uvect = prebuilt->upd_node->update; + upd_t* uvect; + + if (m_prebuilt->upd_node) { + uvect = m_prebuilt->upd_node->update; } else { - uvect = row_get_prebuilt_update_vector(prebuilt); + uvect = row_get_prebuilt_update_vector(m_prebuilt); } /* Build an update vector from the modified fields in the rows - (uses upd_buf of the handle) */ + (uses m_upd_buf of the handle) */ - error = calc_row_difference(uvect, (uchar*) old_row, new_row, table, - upd_buf, upd_buf_size, prebuilt, user_thd); + error = calc_row_difference( + uvect, old_row, new_row, table, m_upd_buf, m_upd_buf_size, + m_prebuilt, m_user_thd); if (error != DB_SUCCESS) { goto func_exit; } - /* This is not a delete */ - prebuilt->upd_node->is_delete = FALSE; + if (!dict_table_is_intrinsic(m_prebuilt->table) + && TrxInInnoDB::is_aborted(trx)) { + + DBUG_RETURN(innobase_rollback(ht, m_user_thd, false)); + } - ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW); + /* This is not a delete */ + m_prebuilt->upd_node->is_delete = FALSE; - innobase_srv_conc_enter_innodb(trx); + innobase_srv_conc_enter_innodb(m_prebuilt); - error = row_update_for_mysql((byte*) old_row, prebuilt); + error = row_update_for_mysql((byte*) old_row, m_prebuilt); /* We need to do some special AUTOINC handling for the following case: @@ -8744,12 +9766,12 @@ ha_innobase::update_row( We need to use the AUTOINC counter that was actually used by MySQL in the UPDATE statement, which can be different from the - value used in the INSERT statement.*/ + value used in the INSERT statement. */ if (error == DB_SUCCESS && table->next_number_field && new_row == table->record[0] - && thd_sql_command(user_thd) == SQLCOM_INSERT + && thd_sql_command(m_user_thd) == SQLCOM_INSERT && trx->duplicates) { ulonglong auto_inc; @@ -8759,16 +9781,19 @@ ha_innobase::update_row( /* We need the upper limit of the col type to check for whether we update the table autoinc counter or not. */ - col_max_value = innobase_get_int_col_max_value( - table->next_number_field); + /* JAN: TODO: MySQL 5.7 + table->next_number_field->get_max_int_value(); + */ + col_max_value = + innobase_get_int_col_max_value(table->next_number_field); if (auto_inc <= col_max_value && auto_inc != 0) { ulonglong offset; ulonglong increment; - offset = prebuilt->autoinc_offset; - increment = prebuilt->autoinc_increment; + offset = m_prebuilt->autoinc_offset; + increment = m_prebuilt->autoinc_increment; auto_inc = innobase_next_autoinc( auto_inc, 1, increment, offset, col_max_value); @@ -8777,11 +9802,12 @@ ha_innobase::update_row( } } - innobase_srv_conc_exit_innodb(trx); + innobase_srv_conc_exit_innodb(m_prebuilt); func_exit: - int err = convert_error_code_to_mysql(error, - prebuilt->table->flags, user_thd); + + err = convert_error_code_to_mysql( + error, m_prebuilt->table->flags, m_user_thd); /* If success and no columns were updated. */ if (err == 0 && uvect->n_fields == 0) { @@ -8801,14 +9827,14 @@ func_exit: innobase_active_small(); #ifdef WITH_WSREP - if (error == DB_SUCCESS && - wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd) && - !wsrep_thd_ignore_table(user_thd)) + if (error == DB_SUCCESS && + wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE && + wsrep_on(m_user_thd) && + !wsrep_thd_ignore_table(m_user_thd)) { DBUG_PRINT("wsrep", ("update row key")); - if (wsrep_append_keys(user_thd, false, old_row, new_row)) { + if (wsrep_append_keys(m_user_thd, false, old_row, new_row)) { WSREP_DEBUG("WSREP: UPDATE_ROW_KEY FAILED"); DBUG_PRINT("wsrep", ("row key failed")); err = HA_ERR_INTERNAL_ERROR; @@ -8824,21 +9850,28 @@ wsrep_error: /**********************************************************************//** Deletes a row given as the parameter. -@return error number or 0 */ -UNIV_INTERN +@return error number or 0 */ + int ha_innobase::delete_row( /*====================*/ const uchar* record) /*!< in: a row in MySQL format */ { dberr_t error; - trx_t* trx = thd_to_trx(user_thd); + trx_t* trx = thd_to_trx(m_user_thd); + TrxInInnoDB trx_in_innodb(trx); DBUG_ENTER("ha_innobase::delete_row"); - ut_a(prebuilt->trx == trx); + if (!dict_table_is_intrinsic(m_prebuilt->table) + && trx_in_innodb.is_aborted()) { - if (high_level_read_only) { + DBUG_RETURN(innobase_rollback(ht, m_user_thd, false)); + } + + ut_a(m_prebuilt->trx == trx); + + if (high_level_read_only && !dict_table_is_intrinsic(m_prebuilt->table)) { ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); DBUG_RETURN(HA_ERR_TABLE_READONLY); } else if (!trx_is_started(trx)) { @@ -8847,19 +9880,19 @@ ha_innobase::delete_row( ha_statistic_increment(&SSV::ha_delete_count); - if (!prebuilt->upd_node) { - row_get_prebuilt_update_vector(prebuilt); + if (!m_prebuilt->upd_node) { + row_get_prebuilt_update_vector(m_prebuilt); } /* This is a delete */ - prebuilt->upd_node->is_delete = TRUE; + m_prebuilt->upd_node->is_delete = TRUE; - innobase_srv_conc_enter_innodb(trx); + innobase_srv_conc_enter_innodb(m_prebuilt); - error = row_update_for_mysql((byte*) record, prebuilt); + error = row_update_for_mysql((byte*) record, m_prebuilt); - innobase_srv_conc_exit_innodb(trx); + innobase_srv_conc_exit_innodb(m_prebuilt); /* Tell the InnoDB server that there might be work for utility threads: */ @@ -8867,12 +9900,12 @@ ha_innobase::delete_row( innobase_active_small(); #ifdef WITH_WSREP - if (error == DB_SUCCESS && - wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd) && - !wsrep_thd_ignore_table(user_thd)) + if (error == DB_SUCCESS && + wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE && + wsrep_on(m_user_thd) && + !wsrep_thd_ignore_table(m_user_thd)) { - if (wsrep_append_keys(user_thd, false, record, NULL)) { + if (wsrep_append_keys(m_user_thd, false, record, NULL)) { DBUG_PRINT("wsrep", ("delete fail")); error = (dberr_t) HA_ERR_INTERNAL_ERROR; goto wsrep_error; @@ -8881,14 +9914,45 @@ ha_innobase::delete_row( wsrep_error: #endif /* WITH_WSREP */ DBUG_RETURN(convert_error_code_to_mysql( - error, prebuilt->table->flags, user_thd)); + error, m_prebuilt->table->flags, m_user_thd)); +} + +/** Delete all rows from the table. +@return error number or 0 */ + +int +ha_innobase::delete_all_rows() +{ + DBUG_ENTER("ha_innobase::delete_all_rows"); + + /* Currently enabled only for intrinsic tables. */ + if (!dict_table_is_intrinsic(m_prebuilt->table)) { + DBUG_RETURN(HA_ERR_WRONG_COMMAND); + } + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + if (!dict_table_is_intrinsic(m_prebuilt->table) + && trx_in_innodb.is_aborted()) { + + DBUG_RETURN(innobase_rollback(ht, m_user_thd, false)); + } + + dberr_t error = row_delete_all_rows(m_prebuilt->table); + + if (error == DB_SUCCESS) { + dict_stats_update(m_prebuilt->table, DICT_STATS_EMPTY_TABLE); + } + + DBUG_RETURN(convert_error_code_to_mysql( + error, m_prebuilt->table->flags, m_user_thd)); } /**********************************************************************//** Removes a new lock set on a row, if it was not read optimistically. This can be called after a row has been read in the processing of an UPDATE or a DELETE query, if the option innodb_locks_unsafe_for_binlog is set. */ -UNIV_INTERN + void ha_innobase::unlock_row(void) /*=========================*/ @@ -8896,31 +9960,42 @@ ha_innobase::unlock_row(void) DBUG_ENTER("ha_innobase::unlock_row"); /* Consistent read does not take any locks, thus there is - nothing to unlock. */ + nothing to unlock. There is no locking for intrinsic table. */ - if (prebuilt->select_lock_type == LOCK_NONE) { + if (m_prebuilt->select_lock_type == LOCK_NONE + || dict_table_is_intrinsic(m_prebuilt->table)) { DBUG_VOID_RETURN; } + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + if (trx_in_innodb.is_aborted()) { + DBUG_VOID_RETURN; + } + + ut_ad(!dict_table_is_intrinsic(m_prebuilt->table)); + /* Ideally, this assert must be in the beginning of the function. But there are some calls to this function from the SQL layer when the transaction is in state TRX_STATE_NOT_STARTED. The check on - prebuilt->select_lock_type above gets around this issue. */ - ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE)); + m_prebuilt->select_lock_type above gets around this issue. */ + + ut_ad(trx_state_eq(m_prebuilt->trx, TRX_STATE_ACTIVE) + || trx_state_eq(m_prebuilt->trx, TRX_STATE_FORCED_ROLLBACK)); - switch (prebuilt->row_read_type) { + switch (m_prebuilt->row_read_type) { case ROW_READ_WITH_LOCKS: if (!srv_locks_unsafe_for_binlog - && prebuilt->trx->isolation_level + && m_prebuilt->trx->isolation_level > TRX_ISO_READ_COMMITTED) { break; } /* fall through */ case ROW_READ_TRY_SEMI_CONSISTENT: - row_unlock_for_mysql(prebuilt, FALSE); + row_unlock_for_mysql(m_prebuilt, FALSE); break; case ROW_READ_DID_SEMI_CONSISTENT: - prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; + m_prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; break; } @@ -8928,21 +10003,21 @@ ha_innobase::unlock_row(void) } /* See handler.h and row0mysql.h for docs on this function. */ -UNIV_INTERN + bool ha_innobase::was_semi_consistent_read(void) /*=======================================*/ { - return(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT); + return(m_prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT); } /* See handler.h and row0mysql.h for docs on this function. */ -UNIV_INTERN + void ha_innobase::try_semi_consistent_read(bool yes) /*===========================================*/ { - ut_a(prebuilt->trx == thd_to_trx(ha_thd())); + ut_a(m_prebuilt->trx == thd_to_trx(ha_thd())); /* Row read type is set to semi consistent read if this was requested by the MySQL and either innodb_locks_unsafe_for_binlog @@ -8951,22 +10026,26 @@ ha_innobase::try_semi_consistent_read(bool yes) if (yes && (srv_locks_unsafe_for_binlog - || prebuilt->trx->isolation_level <= TRX_ISO_READ_COMMITTED)) { - prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; + || m_prebuilt->trx->isolation_level + <= TRX_ISO_READ_COMMITTED)) { + + m_prebuilt->row_read_type = ROW_READ_TRY_SEMI_CONSISTENT; + } else { - prebuilt->row_read_type = ROW_READ_WITH_LOCKS; + m_prebuilt->row_read_type = ROW_READ_WITH_LOCKS; } } /******************************************************************//** Initializes a handle to use an index. -@return 0 or error number */ -UNIV_INTERN +@return 0 or error number */ + int ha_innobase::index_init( /*====================*/ - uint keynr, /*!< in: key (index) number */ - bool sorted) /*!< in: 1 if result MUST be sorted according to index */ + uint keynr, /*!< in: key (index) number */ + bool sorted) /*!< in: 1 if result MUST be sorted + according to index */ { DBUG_ENTER("index_init"); @@ -8975,66 +10054,62 @@ ha_innobase::index_init( /******************************************************************//** Currently does nothing. -@return 0 */ -UNIV_INTERN +@return 0 */ + int ha_innobase::index_end(void) /*========================*/ { - int error = 0; DBUG_ENTER("index_end"); + + m_prebuilt->index->last_sel_cur->release(); + active_index = MAX_KEY; + in_range_check_pushed_down = FALSE; - ds_mrr.dsmrr_close(); - DBUG_RETURN(error); + + m_ds_mrr.dsmrr_close(); + + DBUG_RETURN(0); } /*********************************************************************//** Converts a search mode flag understood by MySQL to a flag understood by InnoDB. */ -static inline -ulint +page_cur_mode_t convert_search_mode_to_innobase( /*============================*/ - enum ha_rkey_function find_flag) + ha_rkey_function find_flag) { switch (find_flag) { case HA_READ_KEY_EXACT: /* this does not require the index to be UNIQUE */ - return(PAGE_CUR_GE); case HA_READ_KEY_OR_NEXT: return(PAGE_CUR_GE); - case HA_READ_KEY_OR_PREV: - return(PAGE_CUR_LE); case HA_READ_AFTER_KEY: return(PAGE_CUR_G); case HA_READ_BEFORE_KEY: return(PAGE_CUR_L); - case HA_READ_PREFIX: - return(PAGE_CUR_GE); + case HA_READ_KEY_OR_PREV: case HA_READ_PREFIX_LAST: - return(PAGE_CUR_LE); case HA_READ_PREFIX_LAST_OR_PREV: return(PAGE_CUR_LE); - /* In MySQL-4.0 HA_READ_PREFIX and HA_READ_PREFIX_LAST always - pass a complete-field prefix of a key value as the search - tuple. I.e., it is not allowed that the last field would - just contain n first bytes of the full field value. - MySQL uses a 'padding' trick to convert LIKE 'abc%' - type queries so that it can use as a search tuple - a complete-field-prefix of a key value. Thus, the InnoDB - search mode PAGE_CUR_LE_OR_EXTENDS is never used. - TODO: when/if MySQL starts to use also partial-field - prefixes, we have to deal with stripping of spaces - and comparison of non-latin1 char type fields in - innobase_mysql_cmp() to get PAGE_CUR_LE_OR_EXTENDS to - work correctly. */ case HA_READ_MBR_CONTAIN: + return(PAGE_CUR_CONTAIN); case HA_READ_MBR_INTERSECT: + return(PAGE_CUR_INTERSECT); case HA_READ_MBR_WITHIN: + return(PAGE_CUR_WITHIN); case HA_READ_MBR_DISJOINT: + return(PAGE_CUR_DISJOINT); case HA_READ_MBR_EQUAL: + return(PAGE_CUR_MBR_EQUAL); + case HA_READ_PREFIX: return(PAGE_CUR_UNSUPP); + /* JAN: TODO: MySQL 5.7 + case HA_READ_INVALID: + return(PAGE_CUR_UNSUPP); + */ /* do not use "default:" in order to produce a gcc warning: enumeration value '...' not handled in switch (if -Wswitch or -Wall is used) */ @@ -9053,22 +10128,22 @@ the start of a new SQL statement, and what is associated with it. For each table in the database the MySQL interpreter may have several table handle instances in use, also in a single SQL query. For each table -handle instance there is an InnoDB 'prebuilt' struct which contains most +handle instance there is an InnoDB 'm_prebuilt' struct which contains most of the InnoDB data associated with this table handle instance. A) if the user has not explicitly set any MySQL table level locks: 1) MySQL calls ::external_lock to set an 'intention' table level lock on the table of the handle instance. There we set -prebuilt->sql_stat_start = TRUE. The flag sql_stat_start should be set +m_prebuilt->sql_stat_start = TRUE. The flag sql_stat_start should be set true if we are taking this table handle instance to use in a new SQL statement issued by the user. We also increment trx->n_mysql_tables_in_use. - 2) If prebuilt->sql_stat_start == TRUE we 'pre-compile' the MySQL search -instructions to prebuilt->template of the table handle instance in + 2) If m_prebuilt->sql_stat_start == TRUE we 'pre-compile' the MySQL search +instructions to m_prebuilt->template of the table handle instance in ::index_read. The template is used to save CPU time in large joins. - 3) In row_search_for_mysql, if prebuilt->sql_stat_start is true, we + 3) In row_search_for_mysql, if m_prebuilt->sql_stat_start is true, we allocate a new consistent read view for the trx if it does not yet have one, or in the case of a locking read, set an InnoDB 'intention' table level lock on the table. @@ -9097,8 +10172,8 @@ start of a new SQL statement. */ /**********************************************************************//** Positions an index cursor to the index specified in the handle. Fetches the row if any. -@return 0, HA_ERR_KEY_NOT_FOUND, or error number */ -UNIV_INTERN +@return 0, HA_ERR_KEY_NOT_FOUND, or error number */ + int ha_innobase::index_read( /*====================*/ @@ -9116,27 +10191,22 @@ ha_innobase::index_read( uint key_len,/*!< in: key value length */ enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */ { - ulint mode; - dict_index_t* index; - ulint match_mode = 0; - int error; - dberr_t ret; - DBUG_ENTER("index_read"); DEBUG_SYNC_C("ha_innobase_index_read_begin"); - ut_a(prebuilt->trx == thd_to_trx(user_thd)); + ut_a(m_prebuilt->trx == thd_to_trx(m_user_thd)); ut_ad(key_len != 0 || find_flag != HA_READ_KEY_EXACT); ha_statistic_increment(&SSV::ha_read_key_count); - index = prebuilt->index; + dict_index_t* index = m_prebuilt->index; - if (UNIV_UNLIKELY(index == NULL) || dict_index_is_corrupted(index)) { - prebuilt->index_usable = FALSE; + if (index == NULL || dict_index_is_corrupted(index)) { + m_prebuilt->index_usable = FALSE; DBUG_RETURN(HA_ERR_CRASHED); } - if (UNIV_UNLIKELY(!prebuilt->index_usable)) { + + if (!m_prebuilt->index_usable) { DBUG_RETURN(dict_index_is_corrupted(index) ? HA_ERR_INDEX_CORRUPT : HA_ERR_TABLE_DEF_CHANGED); @@ -9146,105 +10216,140 @@ ha_innobase::index_read( DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } + /* For R-Tree index, we will always place the page lock to + pages being searched */ + if (dict_index_is_spatial(index)) { + ++m_prebuilt->trx->will_lock; + } + /* Note that if the index for which the search template is built is not - necessarily prebuilt->index, but can also be the clustered index */ + necessarily m_prebuilt->index, but can also be the clustered index */ - if (prebuilt->sql_stat_start) { + if (m_prebuilt->sql_stat_start) { build_template(false); } - if (key_ptr) { + if (key_ptr != NULL) { /* Convert the search key value to InnoDB format into - prebuilt->search_tuple */ + m_prebuilt->search_tuple */ row_sel_convert_mysql_key_to_innobase( - prebuilt->search_tuple, - prebuilt->srch_key_val1, - prebuilt->srch_key_val_len, + m_prebuilt->search_tuple, + m_prebuilt->srch_key_val1, + m_prebuilt->srch_key_val_len, index, (byte*) key_ptr, (ulint) key_len, - prebuilt->trx); - DBUG_ASSERT(prebuilt->search_tuple->n_fields > 0); + m_prebuilt->trx); + + DBUG_ASSERT(m_prebuilt->search_tuple->n_fields > 0); } else { /* We position the cursor to the last or the first entry in the index */ - dtuple_set_n_fields(prebuilt->search_tuple, 0); + dtuple_set_n_fields(m_prebuilt->search_tuple, 0); } - mode = convert_search_mode_to_innobase(find_flag); + page_cur_mode_t mode = convert_search_mode_to_innobase(find_flag); - match_mode = 0; + ulint match_mode = 0; if (find_flag == HA_READ_KEY_EXACT) { match_mode = ROW_SEL_EXACT; - } else if (find_flag == HA_READ_PREFIX - || find_flag == HA_READ_PREFIX_LAST) { + } else if (find_flag == HA_READ_PREFIX_LAST) { match_mode = ROW_SEL_EXACT_PREFIX; } - last_match_mode = (uint) match_mode; + m_last_match_mode = (uint) match_mode; + + dberr_t ret; if (mode != PAGE_CUR_UNSUPP) { - innobase_srv_conc_enter_innodb(prebuilt->trx); + innobase_srv_conc_enter_innodb(m_prebuilt); + + if (!dict_table_is_intrinsic(m_prebuilt->table)) { + + if (TrxInInnoDB::is_aborted(m_prebuilt->trx)) { + + DBUG_RETURN(innobase_rollback( + ht, m_user_thd, false)); + } + + m_prebuilt->ins_sel_stmt = thd_is_ins_sel_stmt( + m_user_thd); + + ret = row_search_mvcc( + buf, mode, m_prebuilt, match_mode, 0); + + } else { + m_prebuilt->session = thd_to_innodb_session(m_user_thd); - ret = row_search_for_mysql((byte*) buf, mode, prebuilt, - match_mode, 0); + ret = row_search_no_mvcc( + buf, mode, m_prebuilt, match_mode, 0); + } - innobase_srv_conc_exit_innodb(prebuilt->trx); + innobase_srv_conc_exit_innodb(m_prebuilt); } else { ret = DB_UNSUPPORTED; } + DBUG_EXECUTE_IF("ib_select_query_failure", ret = DB_ERROR;); + + int error; + switch (ret) { case DB_SUCCESS: error = 0; table->status = 0; - if (prebuilt->table->is_system_db) { + if (m_prebuilt->table->is_system_db) { srv_stats.n_system_rows_read.add( - (size_t) prebuilt->trx->id, 1); + (size_t) m_prebuilt->trx->id, 1); } else { srv_stats.n_rows_read.add( - (size_t) prebuilt->trx->id, 1); + (size_t) m_prebuilt->trx->id, 1); } break; + case DB_RECORD_NOT_FOUND: error = HA_ERR_KEY_NOT_FOUND; table->status = STATUS_NOT_FOUND; break; + case DB_END_OF_INDEX: error = HA_ERR_KEY_NOT_FOUND; table->status = STATUS_NOT_FOUND; break; - case DB_TABLESPACE_DELETED: + case DB_TABLESPACE_DELETED: ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_TABLESPACE_DISCARDED, table->s->table_name.str); table->status = STATUS_NOT_FOUND; error = HA_ERR_NO_SUCH_TABLE; break; + case DB_TABLESPACE_NOT_FOUND: ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, - ER_TABLESPACE_MISSING, MYF(0), + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_MISSING, table->s->table_name.str); table->status = STATUS_NOT_FOUND; - error = HA_ERR_NO_SUCH_TABLE; + //error = HA_ERR_TABLESPACE_MISSING; + error = HA_ERR_NO_SUCH_TABLE; break; + default: error = convert_error_code_to_mysql( - ret, prebuilt->table->flags, user_thd); + ret, m_prebuilt->table->flags, m_user_thd); table->status = STATUS_NOT_FOUND; break; @@ -9256,8 +10361,8 @@ ha_innobase::index_read( /*******************************************************************//** The following functions works like index_read, but it find the last row with the current key value or prefix. -@return 0, HA_ERR_KEY_NOT_FOUND, or an error code */ -UNIV_INTERN +@return 0, HA_ERR_KEY_NOT_FOUND, or an error code */ + int ha_innobase::index_read_last( /*=========================*/ @@ -9272,8 +10377,8 @@ ha_innobase::index_read_last( /********************************************************************//** Get the index for a handle. Does not change active index. -@return NULL or index instance. */ -UNIV_INTERN +@return NULL or index instance. */ + dict_index_t* ha_innobase::innobase_get_index( /*============================*/ @@ -9281,35 +10386,33 @@ ha_innobase::innobase_get_index( clustered index, even if it was internally generated by InnoDB */ { - KEY* key = 0; - dict_index_t* index = 0; + KEY* key; + dict_index_t* index; DBUG_ENTER("innobase_get_index"); if (keynr != MAX_KEY && table->s->keys > 0) { + key = table->key_info + keynr; - index = innobase_index_lookup(share, keynr); + index = innobase_index_lookup(m_share, keynr); - if (index) { + if (index != NULL) { if (!key || ut_strcmp(index->name, key->name) != 0) { - fprintf(stderr, "InnoDB: [Error] Index for key no %u" - " mysql name %s , InnoDB name %s for table %s\n", - keynr, key ? key->name : "NULL", - index->name, - prebuilt->table->name); + ib::error() << " Index for key no " << keynr + << " mysql name " << (key ? key->name : "NULL") + << " InnoDB name " << index->name + << " for table " << m_prebuilt->table->name.m_name; for(ulint i=0; i < table->s->keys; i++) { - index = innobase_index_lookup(share, i); + index = innobase_index_lookup(m_share, i); key = table->key_info + keynr; if (index) { - - fprintf(stderr, "InnoDB: [Note] Index for key no %u" - " mysql name %s , InnoDB name %s for table %s\n", - keynr, key ? key->name : "NULL", - index->name, - prebuilt->table->name); + ib::info() << " Index for key no " << keynr + << " mysql name " << (key ? key->name : "NULL") + << " InnoDB name " << index->name + << " for table " << m_prebuilt->table->name.m_name; } } @@ -9320,29 +10423,30 @@ ha_innobase::innobase_get_index( /* Can't find index with keynr in the translation table. Only print message if the index translation table exists */ - if (share->idx_trans_tbl.index_mapping) { - sql_print_warning("InnoDB could not find " - "index %s key no %u for " - "table %s through its " - "index translation table", + if (m_share->idx_trans_tbl.index_mapping != NULL) { + sql_print_warning("InnoDB could not find" + " index %s key no %u for" + " table %s through its" + " index translation table", key ? key->name : "NULL", keynr, - prebuilt->table->name); + m_prebuilt->table->name.m_name); } - index = dict_table_get_index_on_name(prebuilt->table, - key->name); + index = dict_table_get_index_on_name( + m_prebuilt->table, key->name); } } else { - index = dict_table_get_first_index(prebuilt->table); + key = 0; + index = dict_table_get_first_index(m_prebuilt->table); } - if (!index) { + if (index == NULL) { sql_print_error( - "Innodb could not find key n:o %u with name %s " - "from dict cache for table %s", + "InnoDB could not find key no %u with name %s" + " from dict cache for table %s", keynr, key ? key->name : "NULL", - prebuilt->table->name); + m_prebuilt->table->name.m_name); } DBUG_RETURN(index); @@ -9350,8 +10454,8 @@ ha_innobase::innobase_get_index( /********************************************************************//** Changes the active index of a handle. -@return 0 or error code */ -UNIV_INTERN +@return 0 or error code */ + int ha_innobase::change_active_index( /*=============================*/ @@ -9361,46 +10465,60 @@ ha_innobase::change_active_index( { DBUG_ENTER("change_active_index"); - ut_ad(user_thd == ha_thd()); - ut_a(prebuilt->trx == thd_to_trx(user_thd)); + ut_ad(m_user_thd == ha_thd()); + ut_a(m_prebuilt->trx == thd_to_trx(m_user_thd)); + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + if (!dict_table_is_intrinsic(m_prebuilt->table) + && trx_in_innodb.is_aborted()) { + + DBUG_RETURN(innobase_rollback(ht, m_user_thd, false)); + } active_index = keynr; - prebuilt->index = innobase_get_index(keynr); + m_prebuilt->index = innobase_get_index(keynr); - if (UNIV_UNLIKELY(!prebuilt->index)) { + if (m_prebuilt->index == NULL) { sql_print_warning("InnoDB: change_active_index(%u) failed", keynr); - prebuilt->index_usable = FALSE; + m_prebuilt->index_usable = FALSE; DBUG_RETURN(1); } - prebuilt->index_usable = row_merge_is_index_usable(prebuilt->trx, - prebuilt->index); + m_prebuilt->index_usable = row_merge_is_index_usable( + m_prebuilt->trx, m_prebuilt->index); - if (UNIV_UNLIKELY(!prebuilt->index_usable)) { - if (dict_index_is_corrupted(prebuilt->index)) { - char index_name[MAX_FULL_NAME_LEN + 1]; - char table_name[MAX_FULL_NAME_LEN + 1]; - - innobase_format_name( - index_name, sizeof index_name, - prebuilt->index->name, TRUE); + if (!m_prebuilt->index_usable) { + if (dict_index_is_corrupted(m_prebuilt->index)) { + char table_name[MAX_FULL_NAME_LEN + 1]; innobase_format_name( table_name, sizeof table_name, - prebuilt->index->table->name, FALSE); + m_prebuilt->index->table->name.m_name); + if (dict_index_is_clust(m_prebuilt->index)) { + ut_ad(m_prebuilt->index->table->corrupted); + push_warning_printf( + m_user_thd, Sql_condition::WARN_LEVEL_WARN, + ER_TABLE_CORRUPT, + "InnoDB: Table %s is corrupted.", + table_name); + DBUG_RETURN(ER_TABLE_CORRUPT); + } else { + push_warning_printf( + m_user_thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_INDEX_CORRUPT, + "InnoDB: Index %s for table %s is" + " marked as corrupted", + m_prebuilt->index->name(), + table_name); + DBUG_RETURN(HA_ERR_INDEX_CORRUPT); + } + } else { push_warning_printf( - user_thd, Sql_condition::WARN_LEVEL_WARN, - HA_ERR_INDEX_CORRUPT, - "InnoDB: Index %s for table %s is" - " marked as corrupted", - index_name, table_name); - DBUG_RETURN(HA_ERR_INDEX_CORRUPT); - } else { - push_warning_printf( - user_thd, Sql_condition::WARN_LEVEL_WARN, + m_user_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TABLE_DEF_CHANGED, "InnoDB: insufficient history for index %u", keynr); @@ -9412,12 +10530,36 @@ ha_innobase::change_active_index( 0, NULL)); } - ut_a(prebuilt->search_tuple != 0); + ut_a(m_prebuilt->search_tuple != 0); - dtuple_set_n_fields(prebuilt->search_tuple, prebuilt->index->n_fields); - - dict_index_copy_types(prebuilt->search_tuple, prebuilt->index, - prebuilt->index->n_fields); + /* Initialization of search_tuple is not needed for FT index + since FT search returns rank only. In addition engine should + be able to retrieve FTS_DOC_ID column value if necessary. */ + if ((m_prebuilt->index->type & DICT_FTS)) { + /* JAN: TODO: MySQL 5.7 + if (table->fts_doc_id_field + && bitmap_is_set(table->read_set, + table->fts_doc_id_field->field_index + && m_prebuilt->read_just_key)) { + m_prebuilt->fts_doc_id_in_read_set = 1; + } + */ + } else { + dtuple_set_n_fields(m_prebuilt->search_tuple, + m_prebuilt->index->n_fields); + + dict_index_copy_types( + m_prebuilt->search_tuple, m_prebuilt->index, + m_prebuilt->index->n_fields); + + /* If it's FTS query and FTS_DOC_ID exists FTS_DOC_ID field is + always added to read_set. */ + /* JAN: TODO: MySQL 5.7 + m_prebuilt->fts_doc_id_in_read_set = + (m_prebuilt->read_just_key && table->fts_doc_id_field + && m_prebuilt->in_fts_query); + */ + } /* MySQL changes the active index for a handle also during some queries, for example SELECT MAX(a), SUM(a) first retrieves the MAX() @@ -9430,37 +10572,11 @@ ha_innobase::change_active_index( DBUG_RETURN(0); } -/**********************************************************************//** -Positions an index cursor to the index specified in keynr. Fetches the -row if any. -??? This is only used to read whole keys ??? -@return error number or 0 */ -UNIV_INTERN -int -ha_innobase::index_read_idx( -/*========================*/ - uchar* buf, /*!< in/out: buffer for the returned - row */ - uint keynr, /*!< in: use this index */ - const uchar* key, /*!< in: key value; if this is NULL - we position the cursor at the - start or end of index */ - uint key_len, /*!< in: key value length */ - enum ha_rkey_function find_flag)/*!< in: search flags from my_base.h */ -{ - if (change_active_index(keynr)) { - - return(1); - } - - return(index_read(buf, key, key_len, find_flag)); -} - /***********************************************************************//** Reads the next or previous row from a cursor, which must have previously been positioned using index_read. -@return 0, HA_ERR_END_OF_FILE, or error number */ -UNIV_INTERN +@return 0, HA_ERR_END_OF_FILE, or error number */ + int ha_innobase::general_fetch( /*=======================*/ @@ -9470,35 +10586,47 @@ ha_innobase::general_fetch( uint match_mode) /*!< in: 0, ROW_SEL_EXACT, or ROW_SEL_EXACT_PREFIX */ { - dberr_t ret; - int error; - DBUG_ENTER("general_fetch"); - /* If transaction is not startted do not continue, instead return a error code. */ - if(!(prebuilt->sql_stat_start || (prebuilt->trx && prebuilt->trx->state == 1))) { - DBUG_RETURN(HA_ERR_END_OF_FILE); + const trx_t* trx = m_prebuilt->trx; + + ut_ad(trx == thd_to_trx(m_user_thd)); + + bool intrinsic = dict_table_is_intrinsic(m_prebuilt->table); + + if (!intrinsic && TrxInInnoDB::is_aborted(trx)) { + + DBUG_RETURN(innobase_rollback(ht, m_user_thd, false)); } - ut_a(prebuilt->trx == thd_to_trx(user_thd)); + innobase_srv_conc_enter_innodb(m_prebuilt); + + dberr_t ret; - innobase_srv_conc_enter_innodb(prebuilt->trx); + if (!intrinsic) { + ret = row_search_mvcc( + buf, PAGE_CUR_UNSUPP, m_prebuilt, match_mode, + direction); + } else { + ret = row_search_no_mvcc( + buf, PAGE_CUR_UNSUPP, m_prebuilt, match_mode, + direction); + } - ret = row_search_for_mysql( - (byte*) buf, 0, prebuilt, match_mode, direction); + innobase_srv_conc_exit_innodb(m_prebuilt); - innobase_srv_conc_exit_innodb(prebuilt->trx); + int error; switch (ret) { case DB_SUCCESS: error = 0; table->status = 0; - if (prebuilt->table->is_system_db) { + if (m_prebuilt->table->is_system_db) { srv_stats.n_system_rows_read.add( - (size_t) prebuilt->trx->id, 1); + (size_t) m_prebuilt->trx->id, 1); } else { srv_stats.n_rows_read.add( - (size_t) prebuilt->trx->id, 1); + (size_t) m_prebuilt->trx->id, 1); } break; case DB_RECORD_NOT_FOUND: @@ -9510,9 +10638,8 @@ ha_innobase::general_fetch( table->status = STATUS_NOT_FOUND; break; case DB_TABLESPACE_DELETED: - ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_TABLESPACE_DISCARDED, table->s->table_name.str); @@ -9522,16 +10649,18 @@ ha_innobase::general_fetch( case DB_TABLESPACE_NOT_FOUND: ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_TABLESPACE_MISSING, table->s->table_name.str); table->status = STATUS_NOT_FOUND; error = HA_ERR_NO_SUCH_TABLE; + // JAN: TODO: MySQL 5.7 + //error = HA_ERR_TABLESPACE_MISSING; break; default: error = convert_error_code_to_mysql( - ret, prebuilt->table->flags, user_thd); + ret, m_prebuilt->table->flags, m_user_thd); table->status = STATUS_NOT_FOUND; break; @@ -9543,8 +10672,8 @@ ha_innobase::general_fetch( /***********************************************************************//** Reads the next row from a cursor, which must have previously been positioned using index_read. -@return 0, HA_ERR_END_OF_FILE, or error number */ -UNIV_INTERN +@return 0, HA_ERR_END_OF_FILE, or error number */ + int ha_innobase::index_next( /*====================*/ @@ -9558,8 +10687,8 @@ ha_innobase::index_next( /*******************************************************************//** Reads the next row matching to the key value given as the parameter. -@return 0, HA_ERR_END_OF_FILE, or error number */ -UNIV_INTERN +@return 0, HA_ERR_END_OF_FILE, or error number */ + int ha_innobase::index_next_same( /*=========================*/ @@ -9569,14 +10698,14 @@ ha_innobase::index_next_same( { ha_statistic_increment(&SSV::ha_read_next_count); - return(general_fetch(buf, ROW_SEL_NEXT, last_match_mode)); + return(general_fetch(buf, ROW_SEL_NEXT, m_last_match_mode)); } /***********************************************************************//** Reads the previous row from a cursor, which must have previously been positioned using index_read. -@return 0, HA_ERR_END_OF_FILE, or error number */ -UNIV_INTERN +@return 0, HA_ERR_END_OF_FILE, or error number */ + int ha_innobase::index_prev( /*====================*/ @@ -9590,19 +10719,18 @@ ha_innobase::index_prev( /********************************************************************//** Positions a cursor on the first record in an index and reads the corresponding row to buf. -@return 0, HA_ERR_END_OF_FILE, or error code */ -UNIV_INTERN +@return 0, HA_ERR_END_OF_FILE, or error code */ + int ha_innobase::index_first( /*=====================*/ uchar* buf) /*!< in/out: buffer for the row */ { - int error; - DBUG_ENTER("index_first"); + ha_statistic_increment(&SSV::ha_read_first_count); - error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY); + int error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY); /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */ @@ -9616,19 +10744,18 @@ ha_innobase::index_first( /********************************************************************//** Positions a cursor on the last record in an index and reads the corresponding row to buf. -@return 0, HA_ERR_END_OF_FILE, or error code */ -UNIV_INTERN +@return 0, HA_ERR_END_OF_FILE, or error code */ + int ha_innobase::index_last( /*====================*/ uchar* buf) /*!< in/out: buffer for the row */ { - int error; - DBUG_ENTER("index_last"); + ha_statistic_increment(&SSV::ha_read_last_count); - error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY); + int error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY); /* MySQL does not seem to allow this to return HA_ERR_KEY_NOT_FOUND */ @@ -9641,22 +10768,30 @@ ha_innobase::index_last( /****************************************************************//** Initialize a table scan. -@return 0 or error number */ -UNIV_INTERN +@return 0 or error number */ + int ha_innobase::rnd_init( /*==================*/ - bool scan) /*!< in: TRUE if table/index scan FALSE otherwise */ + bool scan) /*!< in: true if table/index scan FALSE otherwise */ { + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + if (!dict_table_is_intrinsic(m_prebuilt->table) + && trx_in_innodb.is_aborted()) { + + return(innobase_rollback(ht, m_user_thd, false)); + } + int err; /* Store the active index value so that we can restore the original value after a scan */ - if (prebuilt->clust_index_was_generated) { + if (m_prebuilt->clust_index_was_generated) { err = change_active_index(MAX_KEY); } else { - err = change_active_index(primary_key); + err = change_active_index(m_primary_key); } /* Don't use semi-consistent read in random row reads (by position). @@ -9666,15 +10801,15 @@ ha_innobase::rnd_init( try_semi_consistent_read(0); } - start_of_scan = 1; + m_start_of_scan = true; return(err); } /*****************************************************************//** Ends a table scan. -@return 0 or error number */ -UNIV_INTERN +@return 0 or error number */ + int ha_innobase::rnd_end(void) /*======================*/ @@ -9685,8 +10820,8 @@ ha_innobase::rnd_end(void) /*****************************************************************//** Reads the next row in a table scan (also used to read the FIRST row in a table scan). -@return 0, HA_ERR_END_OF_FILE, or error number */ -UNIV_INTERN +@return 0, HA_ERR_END_OF_FILE, or error number */ + int ha_innobase::rnd_next( /*==================*/ @@ -9696,16 +10831,17 @@ ha_innobase::rnd_next( int error; DBUG_ENTER("rnd_next"); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); - if (start_of_scan) { + if (m_start_of_scan) { error = index_first(buf); if (error == HA_ERR_KEY_NOT_FOUND) { error = HA_ERR_END_OF_FILE; } - start_of_scan = 0; + m_start_of_scan = false; } else { error = general_fetch(buf, ROW_SEL_NEXT, 0); } @@ -9715,8 +10851,8 @@ ha_innobase::rnd_next( /**********************************************************************//** Fetches a row from the table based on a row reference. -@return 0, HA_ERR_KEY_NOT_FOUND, or error code */ -UNIV_INTERN +@return 0, HA_ERR_KEY_NOT_FOUND, or error code */ + int ha_innobase::rnd_pos( /*=================*/ @@ -9726,20 +10862,19 @@ ha_innobase::rnd_pos( index was internally generated by InnoDB; the length of data in pos has to be ref_length */ { - int error; DBUG_ENTER("rnd_pos"); DBUG_DUMP("key", pos, ref_length); ha_statistic_increment(&SSV::ha_read_rnd_count); - ut_a(prebuilt->trx == thd_to_trx(ha_thd())); + ut_a(m_prebuilt->trx == thd_to_trx(ha_thd())); /* Note that we assume the length of the row reference is fixed for the table, and it is == ref_length */ - error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); + int error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); - if (error) { + if (error != 0) { DBUG_PRINT("error", ("Got error: %d", error)); } @@ -9749,7 +10884,7 @@ ha_innobase::rnd_pos( /**********************************************************************//** Initialize FT index scan @return 0 or error number */ -UNIV_INTERN + int ha_innobase::ft_init() /*==================*/ @@ -9773,7 +10908,7 @@ ha_innobase::ft_init() /**********************************************************************//** Initialize FT index scan @return FT_INFO structure if successful or NULL */ -UNIV_INTERN + FT_INFO* ha_innobase::ft_init_ext( /*=====================*/ @@ -9781,47 +10916,59 @@ ha_innobase::ft_init_ext( uint keynr, /* in: */ String* key) /* in: */ { - trx_t* trx; - dict_table_t* ft_table; - dberr_t error; - byte* query = (byte*) key->ptr(); - ulint query_len = key->length(); - const CHARSET_INFO* char_set = key->charset(); NEW_FT_INFO* fts_hdl = NULL; dict_index_t* index; fts_result_t* result; char buf_tmp[8192]; ulint buf_tmp_used; uint num_errors; + ulint query_len = key->length(); + const CHARSET_INFO* char_set = key->charset(); + const char* query = key->ptr(); if (fts_enable_diag_print) { - fprintf(stderr, "keynr=%u, '%.*s'\n", - keynr, (int) key->length(), (byte*) key->ptr()); + { + ib::info out; + out << "keynr=" << keynr << ", '"; + out.write(key->ptr(), key->length()); + } if (flags & FT_BOOL) { - fprintf(stderr, "BOOL search\n"); + ib::info() << "BOOL search"; } else { - fprintf(stderr, "NL search\n"); + ib::info() << "NL search"; } } /* FIXME: utf32 and utf16 are not compatible with some string function used. So to convert them to uft8 before - proceed. */ + we proceed. */ if (strcmp(char_set->csname, "utf32") == 0 || strcmp(char_set->csname, "utf16") == 0) { + buf_tmp_used = innobase_convert_string( buf_tmp, sizeof(buf_tmp) - 1, &my_charset_utf8_general_ci, query, query_len, (CHARSET_INFO*) char_set, &num_errors); - query = (byte*) buf_tmp; + buf_tmp[buf_tmp_used] = 0; + query = buf_tmp; query_len = buf_tmp_used; - query[query_len] = 0; } - trx = prebuilt->trx; + trx_t* trx = m_prebuilt->trx; + + TrxInInnoDB trx_in_innodb(trx); + + if (trx_in_innodb.is_aborted()) { + + int ret = innobase_rollback(ht, m_user_thd, false); + + my_error(ret, MYF(0)); + + return(NULL); + } /* FTS queries are not treated as autocommit non-locking selects. This is because the FTS implementation can acquire locks behind @@ -9832,7 +10979,7 @@ ha_innobase::ft_init_ext( ++trx->will_lock; } - ft_table = prebuilt->table; + dict_table_t* ft_table = m_prebuilt->table; /* Table does not have an FTS index */ if (!ft_table->fts || ib_vector_is_empty(ft_table->fts->indexes)) { @@ -9849,12 +10996,13 @@ ha_innobase::ft_init_ext( if (keynr == NO_SUCH_KEY) { /* FIXME: Investigate the NO_SUCH_KEY usage */ - index = (dict_index_t*) ib_vector_getp(ft_table->fts->indexes, 0); + index = reinterpret_cast + (ib_vector_getp(ft_table->fts->indexes, 0)); } else { index = innobase_get_index(keynr); } - if (!index || index->type != DICT_FTS) { + if (index == NULL || index->type != DICT_FTS) { my_error(ER_TABLE_HAS_NO_FT, MYF(0)); return(NULL); } @@ -9865,28 +11013,50 @@ ha_innobase::ft_init_ext( ft_table->fts->fts_status |= ADDED_TABLE_SYNCED; } - error = fts_query(trx, index, flags, query, query_len, &result); + const byte* q = reinterpret_cast( + const_cast(query)); + + dberr_t error = fts_query(trx, index, flags, q, query_len, &result); if (error != DB_SUCCESS) { - my_error(convert_error_code_to_mysql(error, 0, NULL), - MYF(0)); + my_error(convert_error_code_to_mysql(error, 0, NULL), MYF(0)); return(NULL); } /* Allocate FTS handler, and instantiate it before return */ - fts_hdl = static_cast(my_malloc(sizeof(NEW_FT_INFO), - MYF(0))); + fts_hdl = reinterpret_cast( + my_malloc(sizeof(NEW_FT_INFO), MYF(0))); + /* JAN: TODO: MySQL 5.7 PSI + my_malloc(PSI_INSTRUMENT_ME, sizeof(NEW_FT_INFO), MYF(0))); + */ fts_hdl->please = const_cast<_ft_vft*>(&ft_vft_result); fts_hdl->could_you = const_cast<_ft_vft_ext*>(&ft_vft_ext_result); - fts_hdl->ft_prebuilt = prebuilt; + fts_hdl->ft_prebuilt = m_prebuilt; fts_hdl->ft_result = result; - /* FIXME: Re-evluate the condition when Bug 14469540 - is resolved */ - prebuilt->in_fts_query = true; + /* FIXME: Re-evaluate the condition when Bug 14469540 is resolved */ + m_prebuilt->in_fts_query = true; + + return(reinterpret_cast(fts_hdl)); +} + +/**********************************************************************//** +Initialize FT index scan +@return FT_INFO structure if successful or NULL */ - return((FT_INFO*) fts_hdl); +FT_INFO* +ha_innobase::ft_init_ext_with_hints( +/*================================*/ + uint keynr, /* in: key num */ + String* key, /* in: key */ + void* hints) /* in: hints */ +// JAN: TODO: MySQL 5.7 +// Ft_hints* hints) /* in: hints */ +{ + /* TODO Implement function properly working with FT hint. */ + //return(ft_init_ext(hints->get_flags(), keynr, key)); + return NULL; } /*****************************************************************//** @@ -9897,7 +11067,7 @@ static void innobase_fts_create_doc_id_key( /*===========================*/ - dtuple_t* tuple, /* in/out: prebuilt->search_tuple */ + dtuple_t* tuple, /* in/out: m_prebuilt->search_tuple */ const dict_index_t* index, /* in: index (FTS_DOC_ID_INDEX) */ doc_id_t* doc_id) /* in/out: doc id to search, value @@ -9917,7 +11087,7 @@ innobase_fts_create_doc_id_key( dict_field_t* field = dict_index_get_nth_field(index, 0); ut_a(field->col->mtype == DATA_INT); ut_ad(sizeof(*doc_id) == field->fixed_len); - ut_ad(innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME) == 0); + ut_ad(!strcmp(index->name, FTS_DOC_ID_INDEX_NAME)); #endif /* UNIV_DEBUG */ /* Convert to storage byte order */ @@ -9936,21 +11106,28 @@ innobase_fts_create_doc_id_key( /**********************************************************************//** Fetch next result from the FT result set @return error code */ -UNIV_INTERN + int ha_innobase::ft_read( /*=================*/ uchar* buf) /*!< in/out: buf contain result row */ { - fts_result_t* result; - int error; + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + if (trx_in_innodb.is_aborted()) { + + return(innobase_rollback(ht, m_user_thd, false)); + } + row_prebuilt_t* ft_prebuilt; - ft_prebuilt = ((NEW_FT_INFO*) ft_handler)->ft_prebuilt; + ft_prebuilt = reinterpret_cast(ft_handler)->ft_prebuilt; + + ut_a(ft_prebuilt == m_prebuilt); - ut_a(ft_prebuilt == prebuilt); + fts_result_t* result; - result = ((NEW_FT_INFO*) ft_handler)->ft_result; + result = reinterpret_cast(ft_handler)->ft_result; if (result->current == NULL) { /* This is the case where the FTS query did not @@ -9975,25 +11152,32 @@ ha_innobase::ft_read( next_record: if (result->current != NULL) { - dict_index_t* index; - dtuple_t* tuple = prebuilt->search_tuple; doc_id_t search_doc_id; + dtuple_t* tuple = m_prebuilt->search_tuple; /* If we only need information from result we can return without fetching the table row */ if (ft_prebuilt->read_just_key) { + if (m_prebuilt->fts_doc_id_in_read_set) { + fts_ranking_t* ranking; + ranking = rbt_value(fts_ranking_t, + result->current); + innobase_fts_store_docid( + table, ranking->doc_id); + } table->status= 0; return(0); } - index = dict_table_get_index_on_name( - prebuilt->table, FTS_DOC_ID_INDEX_NAME); + dict_index_t* index; + + index = m_prebuilt->table->fts_doc_id_index; /* Must find the index */ - ut_a(index); + ut_a(index != NULL); /* Switch to the FTS doc id index */ - prebuilt->index = index; + m_prebuilt->index = index; fts_ranking_t* ranking = rbt_value( fts_ranking_t, result->current); @@ -10005,12 +11189,14 @@ next_record: tuple. */ innobase_fts_create_doc_id_key(tuple, index, &search_doc_id); - innobase_srv_conc_enter_innodb(prebuilt->trx); + innobase_srv_conc_enter_innodb(m_prebuilt); dberr_t ret = row_search_for_mysql( - (byte*) buf, PAGE_CUR_GE, prebuilt, ROW_SEL_EXACT, 0); + (byte*) buf, PAGE_CUR_GE, m_prebuilt, ROW_SEL_EXACT, 0); + + innobase_srv_conc_exit_innodb(m_prebuilt); - innobase_srv_conc_exit_innodb(prebuilt->trx); + int error; switch (ret) { case DB_SUCCESS: @@ -10040,7 +11226,7 @@ next_record: case DB_TABLESPACE_DELETED: ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_TABLESPACE_DISCARDED, table->s->table_name.str); @@ -10050,16 +11236,18 @@ next_record: case DB_TABLESPACE_NOT_FOUND: ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_TABLESPACE_MISSING, table->s->table_name.str); table->status = STATUS_NOT_FOUND; error = HA_ERR_NO_SUCH_TABLE; + // JAN: TODO: MySQL 5.7 + // error = HA_ERR_TABLESPACE_MISSING; break; default: error = convert_error_code_to_mysql( - ret, 0, user_thd); + ret, 0, m_user_thd); table->status = STATUS_NOT_FOUND; break; @@ -10077,13 +11265,14 @@ next_record: void ha_innobase::ft_end() { - fprintf(stderr, "ft_end()\n"); + ib::info() << "ft_end()"; rnd_end(); } #ifdef WITH_WSREP extern dict_index_t* wsrep_dict_foreign_find_index( +/*==========================*/ dict_table_t* table, const char** col_names, const char** columns, @@ -10107,16 +11296,16 @@ wsrep_append_foreign_key( THD* thd = (THD*)trx->mysql_thd; ulint rcode = DB_SUCCESS; char cache_key[513] = {'\0'}; - int cache_key_len; - bool const copy = true; + int cache_key_len=0; + bool const copy = true; if (!wsrep_on(trx->mysql_thd) || - wsrep_thd_exec_mode(thd) != LOCAL_STATE) + wsrep_thd_exec_mode(thd) != LOCAL_STATE) { return DB_SUCCESS; + } if (!thd || !foreign || - (!foreign->referenced_table && !foreign->foreign_table)) - { + (!foreign->referenced_table && !foreign->foreign_table)) { WSREP_INFO("FK: %s missing in: %s", (!thd) ? "thread" : ((!foreign) ? "constraint" : @@ -10128,40 +11317,36 @@ wsrep_append_foreign_key( } if ( !((referenced) ? - foreign->referenced_table : foreign->foreign_table)) - { + foreign->referenced_table : foreign->foreign_table)) { WSREP_DEBUG("pulling %s table into cache", (referenced) ? "referenced" : "foreign"); mutex_enter(&(dict_sys->mutex)); - if (referenced) - { + + if (referenced) { foreign->referenced_table = dict_table_get_low( foreign->referenced_table_name_lookup); - if (foreign->referenced_table) - { + if (foreign->referenced_table) { foreign->referenced_index = wsrep_dict_foreign_find_index( foreign->referenced_table, NULL, foreign->referenced_col_names, - foreign->n_fields, + foreign->n_fields, foreign->foreign_index, TRUE, FALSE); } - } - else - { + } else { foreign->foreign_table = dict_table_get_low( foreign->foreign_table_name_lookup); - if (foreign->foreign_table) - { + + if (foreign->foreign_table) { foreign->foreign_index = wsrep_dict_foreign_find_index( foreign->foreign_table, NULL, foreign->foreign_col_names, foreign->n_fields, - foreign->referenced_index, + foreign->referenced_index, TRUE, FALSE); } } @@ -10169,8 +11354,7 @@ wsrep_append_foreign_key( } if ( !((referenced) ? - foreign->referenced_table : foreign->foreign_table)) - { + foreign->referenced_table : foreign->foreign_table)) { WSREP_WARN("FK: %s missing in query: %s", (!foreign->referenced_table) ? "referenced table" : "foreign table", @@ -10178,6 +11362,7 @@ wsrep_append_foreign_key( wsrep_thd_query(thd) : "void"); return DB_ERROR; } + byte key[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; ulint len = WSREP_MAX_SUPPORTED_KEY_LENGTH; @@ -10187,36 +11372,39 @@ wsrep_append_foreign_key( UT_LIST_GET_FIRST(foreign->referenced_table->indexes) : UT_LIST_GET_FIRST(foreign->foreign_table->indexes); int i = 0; + while (idx != NULL && idx != idx_target) { if (innobase_strcasecmp (idx->name, innobase_index_reserve_name) != 0) { i++; } idx = UT_LIST_GET_NEXT(indexes, idx); } + ut_a(idx); key[0] = (char)i; rcode = wsrep_rec_get_foreign_key( &key[1], &len, rec, index, idx, wsrep_protocol_version > 1); + if (rcode != DB_SUCCESS) { WSREP_ERROR( "FK key set failed: %lu (%lu %lu), index: %s %s, %s", rcode, referenced, shared, - (index && index->name) ? index->name : - "void index", - (index && index->table_name) ? index->table_name : - "void table", + (index) ? index->name() : "void index", + (index) ? index->table->name.m_name : "void table", wsrep_thd_query(thd)); return DB_ERROR; } + strncpy(cache_key, (wsrep_protocol_version > 1) ? ((referenced) ? - foreign->referenced_table->name : - foreign->foreign_table->name) : - foreign->foreign_table->name, sizeof(cache_key) - 1); + foreign->referenced_table->name.m_name : + foreign->foreign_table->name.m_name) : + foreign->foreign_table->name.m_name, sizeof(cache_key) - 1); cache_key_len = strlen(cache_key); + #ifdef WSREP_DEBUG_PRINT ulint j; fprintf(stderr, "FK parent key, table: %s %s len: %lu ", @@ -10227,16 +11415,18 @@ wsrep_append_foreign_key( fprintf(stderr, "\n"); #endif char *p = strchr(cache_key, '/'); + if (p) { *p = '\0'; } else { WSREP_WARN("unexpected foreign key table %s %s", - foreign->referenced_table->name, - foreign->foreign_table->name); + foreign->referenced_table->name.m_name, + foreign->foreign_table->name.m_name); } wsrep_buf_t wkey_part[3]; wsrep_key_t wkey = {wkey_part, 3}; + if (!wsrep_prepare_key( (const uchar*)cache_key, cache_key_len + 1, @@ -10248,7 +11438,9 @@ wsrep_append_foreign_key( wsrep_thd_query(thd) : "void"); return DB_ERROR; } + wsrep_t *wsrep= get_wsrep(); + rcode = (int)wsrep->append_key( wsrep, wsrep_ws_handle(thd, trx), @@ -10256,6 +11448,7 @@ wsrep_append_foreign_key( 1, shared ? WSREP_KEY_SHARED : WSREP_KEY_EXCLUSIVE, copy); + if (rcode) { DBUG_PRINT("wsrep", ("row key failed: %lu", rcode)); WSREP_ERROR("Appending cascaded fk row key failed: %s, %lu", @@ -10269,7 +11462,7 @@ wsrep_append_foreign_key( static int wsrep_append_key( -/*==================*/ +/*=============*/ THD *thd, trx_t *trx, TABLE_SHARE *table_share, @@ -10293,6 +11486,7 @@ wsrep_append_key( #endif wsrep_buf_t wkey_part[3]; wsrep_key_t wkey = {wkey_part, 3}; + if (!wsrep_prepare_key( (const uchar*)table_share->table_cache_key.str, table_share->table_cache_key.length, @@ -10306,6 +11500,7 @@ wsrep_append_key( } wsrep_t *wsrep= get_wsrep(); + int rcode = (int)wsrep->append_key( wsrep, wsrep_ws_handle(thd, trx), @@ -10320,21 +11515,26 @@ wsrep_append_key( wsrep_thd_query(thd) : "void", rcode); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } + DBUG_RETURN(0); } static bool -referenced_by_foreign_key2(dict_table_t* table, - dict_index_t* index) { - ut_ad(table != NULL); - ut_ad(index != NULL); +referenced_by_foreign_key2( +/*=======================*/ + dict_table_t* table, + dict_index_t* index) +{ + ut_ad(table != NULL); + ut_ad(index != NULL); + + const dict_foreign_set* fks = &table->referenced_set; - const dict_foreign_set* fks = &table->referenced_set; - for (dict_foreign_set::const_iterator it = fks->begin(); + for (dict_foreign_set::const_iterator it = fks->begin(); it != fks->end(); - ++it) - { + ++it) { dict_foreign_t* foreign = *it; + if (foreign->referenced_index != index) { continue; } @@ -10346,7 +11546,7 @@ referenced_by_foreign_key2(dict_table_t* table, int ha_innobase::wsrep_append_keys( -/*==================*/ +/*===========================*/ THD *thd, bool shared, const uchar* record0, /* in: row in MySQL format */ @@ -10359,10 +11559,10 @@ ha_innobase::wsrep_append_keys( trx_t *trx = thd_to_trx(thd); if (table_share && table_share->tmp_table != NO_TMP_TABLE) { - WSREP_DEBUG("skipping tmp table DML: THD: %lu tmp: %d SQL: %s", + WSREP_DEBUG("skipping tmp table DML: THD: %lu tmp: %d SQL: %s", thd_get_thread_id(thd), table_share->tmp_table, - (wsrep_thd_query(thd)) ? + (wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void"); DBUG_RETURN(0); } @@ -10379,13 +11579,14 @@ ha_innobase::wsrep_append_keys( if (!is_null) { rcode = wsrep_append_key( - thd, trx, table_share, table, keyval, + thd, trx, table_share, table, keyval, len, shared); - if (rcode) DBUG_RETURN(rcode); - } - else - { - WSREP_DEBUG("NULL key skipped (proto 0): %s", + + if (rcode) { + DBUG_RETURN(rcode); + } + } else { + WSREP_DEBUG("NULL key skipped (proto 0): %s", wsrep_thd_query(thd)); } } else { @@ -10417,7 +11618,7 @@ ha_innobase::wsrep_append_keys( if (!tab) { WSREP_WARN("MySQL-InnoDB key mismatch %s %s", - table->s->table_name.str, + table->s->table_name.str, key_info->name); } /* !hasPK == table with no PK, must append all non-unique keys */ @@ -10427,32 +11628,35 @@ ha_innobase::wsrep_append_keys( (!tab && referenced_by_foreign_key()))) { len = wsrep_store_key_val_for_row( - thd, table, i, key0, - WSREP_MAX_SUPPORTED_KEY_LENGTH, + thd, table, i, key0, + WSREP_MAX_SUPPORTED_KEY_LENGTH, record0, &is_null); if (!is_null) { rcode = wsrep_append_key( - thd, trx, table_share, table, + thd, trx, table_share, table, keyval0, len+1, shared); - if (rcode) DBUG_RETURN(rcode); + + if (rcode) { + DBUG_RETURN(rcode); + } if (key_info->flags & HA_NOSAME || shared) key_appended = true; - } - else - { - WSREP_DEBUG("NULL key skipped: %s", + } else { + WSREP_DEBUG("NULL key skipped: %s", wsrep_thd_query(thd)); } + if (record1) { len = wsrep_store_key_val_for_row( - thd, table, i, key1, + thd, table, i, key1, WSREP_MAX_SUPPORTED_KEY_LENGTH, record1, &is_null); + if (!is_null && memcmp(key0, key1, len)) { rcode = wsrep_append_key( - thd, trx, table_share, - table, + thd, trx, table_share, + table, keyval1, len+1, shared); if (rcode) DBUG_RETURN(rcode); } @@ -10466,19 +11670,20 @@ ha_innobase::wsrep_append_keys( uchar digest[16]; int rcode; - wsrep_calc_row_hash(digest, record0, table, prebuilt, thd); - if ((rcode = wsrep_append_key(thd, trx, table_share, table, - (const char*) digest, 16, + wsrep_calc_row_hash(digest, record0, table, m_prebuilt, thd); + + if ((rcode = wsrep_append_key(thd, trx, table_share, table, + (const char*) digest, 16, shared))) { DBUG_RETURN(rcode); } if (record1) { wsrep_calc_row_hash( - digest, record1, table, prebuilt, thd); - if ((rcode = wsrep_append_key(thd, trx, table_share, + digest, record1, table, m_prebuilt, thd); + if ((rcode = wsrep_append_key(thd, trx, table_share, table, - (const char*) digest, + (const char*) digest, 16, shared))) { DBUG_RETURN(rcode); } @@ -10498,7 +11703,7 @@ is the current 'position' of the handle, because if row ref is actually the row id internally generated in InnoDB, then 'record' does not contain it. We just guess that the row id must be for the record where the handle was positioned the last time. */ -UNIV_INTERN + void ha_innobase::position( /*==================*/ @@ -10506,9 +11711,9 @@ ha_innobase::position( { uint len; - ut_a(prebuilt->trx == thd_to_trx(ha_thd())); + ut_a(m_prebuilt->trx == thd_to_trx(ha_thd())); - if (prebuilt->clust_index_was_generated) { + if (m_prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we generated the clustered index from row id: the row reference will be the row id, not any key value @@ -10516,18 +11721,21 @@ ha_innobase::position( len = DATA_ROW_ID_LEN; - memcpy(ref, prebuilt->row_id, len); + memcpy(ref, m_prebuilt->row_id, len); } else { - len = store_key_val_for_row(primary_key, (char*) ref, - ref_length, record); + + /* Copy primary key as the row reference */ + KEY* key_info = table->key_info + m_primary_key; + key_copy(ref, (uchar*)record, key_info, key_info->key_length); + len = key_info->key_length; } /* We assume that the 'ref' value len is always fixed for the same table. */ if (len != ref_length) { - sql_print_error("Stored ref len is %lu, but table ref len is " - "%lu", (ulong) len, (ulong) ref_length); + sql_print_error("Stored ref len is %lu, but table ref len is" + " %lu", (ulong) len, (ulong) ref_length); } } @@ -10554,9 +11762,12 @@ create_table_check_doc_id_col( ulint unsigned_type; field = form->field[i]; + if (!field->stored_in_db()) { + continue; + } - col_type = get_innobase_type_from_mysql_type(&unsigned_type, - field); + col_type = get_innobase_type_from_mysql_type( + &unsigned_type, field); col_len = field->pack_length(); @@ -10576,9 +11787,9 @@ create_table_check_doc_id_col( trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: FTS_DOC_ID column must be " - "of BIGINT NOT NULL type, and named " - "in all capitalized characters"); + "InnoDB: FTS_DOC_ID column must be" + " of BIGINT NOT NULL type, and named" + " in all capitalized characters"); my_error(ER_WRONG_COLUMN_NAME, MYF(0), field->field_name); *doc_id_col = ULINT_UNDEFINED; @@ -10591,31 +11802,47 @@ create_table_check_doc_id_col( return(false); } -/*****************************************************************//** -Creates a table definition to an InnoDB database. */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) +/** Set up base columns for virtual column +@param[in] table InnoDB table +@param[in] field MySQL field +@param[in,out] v_col virtual column */ +void +innodb_base_col_setup( + dict_table_t* table, + const Field* field, + dict_v_col_t* v_col) +{ + for (uint i= 0; i < field->table->s->fields; ++i) { + // const Field* base_field = field->table->field[i]; + /* JAN: TODO: MySQL 5.7 Virtual columns + if (!base_field->is_virtual_gcol() + && bitmap_is_set(&field->gcol_info->base_columns_map, i)) { + ulint z; + + for (z = 0; z < table->n_cols; z++) { + const char* name = dict_table_get_col_name(table, z); + if (!innobase_strcasecmp(name, + base_field->field_name)) { + break; + } + } + + ut_ad(z != table->n_cols); + + v_col->base_col[n] = dict_table_get_nth_col(table, z); + ut_ad(v_col->base_col[n]->ind == z); + n++; + } + */ + } +} + +/** Create a table definition to an InnoDB database. +@return ER_* level error */ +inline __attribute__((warn_unused_result)) int -create_table_def( -/*=============*/ - trx_t* trx, /*!< in: InnoDB transaction handle */ - const TABLE* form, /*!< in: information on table - columns and indexes */ - const char* table_name, /*!< in: table name */ - const char* temp_path, /*!< in: if this is a table explicitly - created by the user with the - TEMPORARY keyword, then this - parameter is the dir path where the - table should be placed if we create - an .ibd file for it (no .ibd extension - in the path, though). Otherwise this - is a zero length-string */ - const char* remote_path, /*!< in: Remote path or zero length-string */ - ulint flags, /*!< in: table flags */ - ulint flags2, /*!< in: table flags2 */ - fil_encryption_t mode, /*!< in: encryption mode */ - ulint key_id) /*!< in: encryption key_id */ -{ - THD* thd = trx->mysql_thd; +create_table_info_t::create_table_def() +{ dict_table_t* table; ulint n_cols; dberr_t err; @@ -10627,44 +11854,58 @@ create_table_def( ulint long_true_varchar; ulint charset_no; ulint i; + ulint j = 0; ulint doc_id_col = 0; ibool has_doc_id_col = FALSE; mem_heap_t* heap; + ulint num_v = 0; + ulint space_id = 0; + ulint actual_n_cols; + ha_table_option_struct *options= m_form->s->option_struct; DBUG_ENTER("create_table_def"); - DBUG_PRINT("enter", ("table_name: %s", table_name)); + DBUG_PRINT("enter", ("table_name: %s", m_table_name)); - DBUG_ASSERT(thd != NULL); + DBUG_ASSERT(m_trx->mysql_thd == m_thd); /* MySQL does the name length check. But we do additional check on the name length here */ - const size_t table_name_len = strlen(table_name); + const size_t table_name_len = strlen(m_table_name); if (table_name_len > MAX_FULL_NAME_LEN) { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_TABLE_NAME, "InnoDB: Table Name or Database Name is too long"); DBUG_RETURN(ER_TABLE_NAME); } - if (table_name[table_name_len - 1] == '/') { + if (m_table_name[table_name_len - 1] == '/') { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_TABLE_NAME, "InnoDB: Table name is empty"); DBUG_RETURN(ER_WRONG_TABLE_NAME); } - n_cols = form->s->fields; + n_cols = m_form->s->fields; + + /* Find out any virtual column */ + for (i = 0; i < n_cols; i++) { + Field* field = m_form->field[i]; + + if (innobase_is_v_fld(field)) { + num_v++; + } + } /* Check whether there already exists a FTS_DOC_ID column */ - if (create_table_check_doc_id_col(trx, form, &doc_id_col)){ + if (create_table_check_doc_id_col(m_trx, m_form, &doc_id_col)){ /* Raise error if the Doc ID column is of wrong type or name */ if (doc_id_col == ULINT_UNDEFINED) { - trx_commit_for_mysql(trx); + trx_commit_for_mysql(m_trx); err = DB_ERROR; goto error_ret; @@ -10673,59 +11914,94 @@ create_table_def( } } - /* We pass 0 as the space id, and determine at a lower level the space - id where to store the table */ + /* For single-table tablespaces, we pass 0 as the space id, and then + determine the actual space id when the tablespace is created. */ + if (DICT_TF_HAS_SHARED_SPACE(m_flags)) { + ut_ad(m_tablespace != NULL && m_tablespace[0] != '\0'); + + space_id = fil_space_get_id_by_name(m_tablespace); + } - if (flags2 & DICT_TF2_FTS) { - /* Adjust for the FTS hidden field */ - if (!has_doc_id_col) { - table = dict_mem_table_create(table_name, 0, form->s->stored_fields + 1, - flags, flags2); + /* Adjust the number of columns for the FTS hidden field */ + actual_n_cols = m_form->s->stored_fields; - /* Set the hidden doc_id column. */ - table->fts->doc_col = form->s->stored_fields; - } else { - table = dict_mem_table_create(table_name, 0, form->s->stored_fields, - flags, flags2); - table->fts->doc_col = doc_id_col; - } - } else { - table = dict_mem_table_create(table_name, 0, form->s->stored_fields, - flags, flags2); + if (m_flags2 & DICT_TF2_FTS && !has_doc_id_col) { + actual_n_cols += 1; } - if (flags2 & DICT_TF2_TEMPORARY) { - ut_a(strlen(temp_path)); + table = dict_mem_table_create(m_table_name, space_id, + actual_n_cols, num_v, m_flags, m_flags2); + + /* Set the hidden doc_id column. */ + if (m_flags2 & DICT_TF2_FTS) { + table->fts->doc_col = has_doc_id_col + ? doc_id_col : n_cols - num_v; + } + + if (strlen(m_temp_path) != 0) { table->dir_path_of_temp_table = - mem_heap_strdup(table->heap, temp_path); + mem_heap_strdup(table->heap, m_temp_path); } - if (DICT_TF_HAS_DATA_DIR(flags)) { - ut_a(strlen(remote_path)); - table->data_dir_path = mem_heap_strdup(table->heap, remote_path); + if (DICT_TF_HAS_DATA_DIR(m_flags)) { + ut_a(strlen(m_remote_path)); + + table->data_dir_path = mem_heap_strdup( + table->heap, m_remote_path); + } else { table->data_dir_path = NULL; } + + if (DICT_TF_HAS_SHARED_SPACE(m_flags)) { + ut_ad(strlen(m_tablespace)); + table->tablespace = mem_heap_strdup(table->heap, m_tablespace); + } else { + table->tablespace = NULL; + } + heap = mem_heap_create(1000); for (i = 0; i < n_cols; i++) { - Field* field = form->field[i]; - if (!field->stored_in_db()) - continue; + ulint is_virtual; + Field* field = m_form->field[i]; + + if (!field->stored_in_db()) { + continue; + } + + /* Generate a unique column name by pre-pending table-name for + intrinsic tables. For other tables (including normal + temporary) column names are unique. If not, MySQL layer will + block such statement. + This is work-around fix till Optimizer can handle this issue + (probably 5.7.4+). */ + char field_name[MAX_FULL_NAME_LEN + 2 + 10]; + + if (dict_table_is_intrinsic(table) && field->orig_table) { + + ut_snprintf(field_name, sizeof(field_name), + "%s_%s_%lu", field->orig_table->alias.c_ptr(), + field->field_name, i); + + } else { + ut_snprintf(field_name, sizeof(field_name), + "%s", field->field_name); + } - col_type = get_innobase_type_from_mysql_type(&unsigned_type, - field); + col_type = get_innobase_type_from_mysql_type( + &unsigned_type, field); if (!col_type) { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_CANT_CREATE_TABLE, - "Error creating table '%s' with " - "column '%s'. Please check its " - "column type and try to re-create " - "the table with an appropriate " - "column type.", - table->name, field->field_name); + "Error creating table '%s' with" + " column '%s'. Please check its" + " column type and try to re-create" + " the table with an appropriate" + " column type.", + table->name.m_name, field->field_name); goto err_col; } @@ -10738,11 +12014,11 @@ create_table_def( charset_no = (ulint) field->charset()->number; - if (UNIV_UNLIKELY(charset_no > MAX_CHAR_COLL_NUM)) { + if (charset_no > MAX_CHAR_COLL_NUM) { /* in data0type.h we assume that the number fits in one byte in prtype */ push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_CANT_CREATE_TABLE, "In InnoDB, charset-collation codes" " must be below 256." @@ -10753,9 +12029,6 @@ create_table_def( } } - /* we assume in dtype_form_prtype() that this fits in - two bytes */ - ut_a(static_cast(field->type()) <= MAX_CHAR_COLL_NUM); col_len = field->pack_length(); /* The MySQL pack length contains 1 or 2 bytes length field @@ -10773,49 +12046,197 @@ create_table_def( } } + if (col_type == DATA_POINT) { + col_len = DATA_POINT_LEN; + } + + is_virtual = (innobase_is_v_fld(field)) ? DATA_VIRTUAL : 0; + /* First check whether the column to be added has a system reserved name. */ - if (dict_col_name_is_reserved(field->field_name)){ + if (dict_col_name_is_reserved(field_name)){ my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); + field_name); err_col: dict_mem_table_free(table); mem_heap_free(heap); - trx_commit_for_mysql(trx); + trx_commit_for_mysql(m_trx); err = DB_ERROR; goto error_ret; } - dict_mem_table_add_col(table, heap, - field->field_name, - col_type, - dtype_form_prtype( - (ulint) field->type() - | nulls_allowed | unsigned_type - | binary_type | long_true_varchar, - charset_no), - col_len); + if (!is_virtual) { + dict_mem_table_add_col(table, heap, + field_name, col_type, + dtype_form_prtype( + (ulint) field->type() + | nulls_allowed | unsigned_type + | binary_type | long_true_varchar, + charset_no), + col_len); + } else { + dict_mem_table_add_v_col(table, heap, + field_name, col_type, + dtype_form_prtype( + (ulint) field->type() + | nulls_allowed | unsigned_type + | binary_type | long_true_varchar + | is_virtual, + charset_no), + col_len, i, + 0); + // JAN: TODO: MySQL 5.7 Virtual columns + //field->gcol_info->non_virtual_base_columns()); + } + } + + if (num_v) { + for (i = 0; i < n_cols; i++) { + dict_v_col_t* v_col; + + Field* field = m_form->field[i]; + + if (!innobase_is_v_fld(field)) { + continue; + } + + v_col = dict_table_get_nth_v_col(table, j); + + j++; + + innodb_base_col_setup(table, field, v_col); + } } /* Add the FTS doc_id hidden column. */ - if (flags2 & DICT_TF2_FTS && !has_doc_id_col) { + if (m_flags2 & DICT_TF2_FTS && !has_doc_id_col) { fts_add_doc_id_column(table, heap); } - err = row_create_table_for_mysql(table, trx, false, mode, key_id); + /* If temp table, then we avoid creation of entries in SYSTEM TABLES. + Given that temp table lifetime is limited to connection/server lifetime + on re-start we don't need to restore temp-table and so no entry is + needed in SYSTEM tables. */ + if (dict_table_is_temporary(table)) { + ulint clen = 0; - mem_heap_free(heap); + // JAN: TODO: MySQL 5.7 compressed tables + // if (m_create_info->compress.length > 0) { - DBUG_EXECUTE_IF("ib_create_err_tablespace_exist", - err = DB_TABLESPACE_EXISTS;); + if (clen > 0) { + push_warning_printf( + m_thd, + Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: Compression not supported for " + "temporary tables"); + + err = DB_UNSUPPORTED; + + } else { + + /* Get a new table ID */ + dict_table_assign_new_id(table, m_trx); + + /* Create temp tablespace if configured. */ + err = dict_build_tablespace_for_table(table, NULL); + + if (err == DB_SUCCESS) { + /* Temp-table are maintained in memory and so + can_be_evicted is FALSE. */ + mem_heap_t* temp_table_heap; + + temp_table_heap = mem_heap_create(256); + + /* For intrinsic table (given that they are + not shared beyond session scope), add + it to session specific THD structure + instead of adding it to dictionary cache. */ + if (dict_table_is_intrinsic(table)) { + add_table_to_thread_cache( + table, temp_table_heap, m_thd); + + } else { + dict_table_add_to_cache( + table, FALSE, temp_table_heap); + } + + DBUG_EXECUTE_IF("ib_ddl_crash_during_create2", + DBUG_SUICIDE();); + + mem_heap_free(temp_table_heap); + } + } + + } else { + + const char* algorithm = ""; + // JAN: TODO: MySQL 5.7 + // const char* algorithm = m_create_info->compress.str; + + err = DB_SUCCESS; + ulint clen = 0; + + // if (!(m_flags2 & DICT_TF2_USE_FILE_PER_TABLE) + // && m_create_info->compress.length > 0 + // && !Compression::is_none(algorithm)) { + if (!(m_flags2 & DICT_TF2_USE_FILE_PER_TABLE) + && clen > 0 + && !Compression::is_none(algorithm)) { + + push_warning_printf( + m_thd, + Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: Compression not supported for " + "shared tablespaces"); + + algorithm = NULL; + + err = DB_UNSUPPORTED; + + } else if (Compression::validate(algorithm) != DB_SUCCESS + || m_form->s->row_type == ROW_TYPE_COMPRESSED + || m_create_info->key_block_size > 0) { + + algorithm = NULL; + } + + if (err == DB_SUCCESS) { + err = row_create_table_for_mysql( + table, algorithm, m_trx, false, + (fil_encryption_t)options->encryption, + options->encryption_key_id); + } + + if (err == DB_IO_NO_PUNCH_HOLE_FS) { + + ut_ad(!is_shared_tablespace(table->space)); + + push_warning_printf( + m_thd, + Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: Punch hole not supported by the " + "file system or the tablespace page size " + "is not large enough. Compression disabled"); + + err = DB_SUCCESS; + } + } + + mem_heap_free(heap); + + DBUG_EXECUTE_IF("ib_create_err_tablespace_exist", + err = DB_TABLESPACE_EXISTS;); if (err == DB_DUPLICATE_KEY || err == DB_TABLESPACE_EXISTS) { char display_name[FN_REFLEN]; char* buf_end = innobase_convert_identifier( display_name, sizeof(display_name) - 1, - table_name, strlen(table_name), - thd, TRUE); + m_table_name, strlen(m_table_name), + m_thd); *buf_end = '\0'; @@ -10824,17 +12245,17 @@ err_col: : ER_TABLESPACE_EXISTS, MYF(0), display_name); } - if (err == DB_SUCCESS && (flags2 & DICT_TF2_FTS)) { + if (err == DB_SUCCESS && (m_flags2 & DICT_TF2_FTS)) { fts_optimize_add_table(table); } error_ret: - DBUG_RETURN(convert_error_code_to_mysql(err, flags, thd)); + DBUG_RETURN(convert_error_code_to_mysql(err, m_flags, m_thd)); } /*****************************************************************//** Creates an index in an InnoDB database. */ -static +inline int create_index( /*=========*/ @@ -10858,20 +12279,36 @@ create_index( /* Assert that "GEN_CLUST_INDEX" cannot be used as non-primary index */ ut_a(innobase_strcasecmp(key->name, innobase_index_reserve_name) != 0); - if (key->flags & HA_FULLTEXT) { + ind_type = 0; + if (key->flags & HA_SPATIAL) { + ind_type = DICT_SPATIAL; + } else if (key->flags & HA_FULLTEXT) { + ind_type = DICT_FTS; + } + + if (ind_type != 0) + { index = dict_mem_index_create(table_name, key->name, 0, - DICT_FTS, + ind_type, key->user_defined_key_parts); for (ulint i = 0; i < key->user_defined_key_parts; i++) { KEY_PART_INFO* key_part = key->key_part + i; + + /* We do not support special (Fulltext or Spatial) + index on virtual columns */ + if (innobase_is_v_fld(key_part->field)) { + ut_ad(0); + DBUG_RETURN(HA_ERR_UNSUPPORTED); + } + dict_mem_index_add_field( index, key_part->field->field_name, 0); } DBUG_RETURN(convert_error_code_to_mysql( row_create_index_for_mysql( - index, trx, NULL), + index, trx, NULL, NULL), flags, NULL)); } @@ -10886,6 +12323,11 @@ create_index( ind_type |= DICT_UNIQUE; } + /* JAN: TODO: MySQL 5.7 PSI + field_lengths = (ulint*) my_malloc(PSI_INSTRUMENT_ME, + key->user_defined_key_parts * sizeof * + field_lengths, MYF(MY_FAE)); + */ field_lengths = (ulint*) my_malloc( key->user_defined_key_parts * sizeof * field_lengths, MYF(MY_FAE)); @@ -10896,6 +12338,26 @@ create_index( index = dict_mem_index_create(table_name, key->name, 0, ind_type, key->user_defined_key_parts); + innodb_session_t*& priv = thd_to_innodb_session(trx->mysql_thd); + dict_table_t* handler = priv->lookup_table_handler(table_name); + + if (handler != NULL) { + /* This setting will enforce SQL NULL == SQL NULL. + For now this is turned-on for intrinsic tables + only but can be turned on for other tables if needed arises. */ + index->nulls_equal = + (key->flags & HA_NULL_ARE_EQUAL) ? true : false; + + /* Disable use of AHI for intrinsic table indexes as AHI + validates the predicated entry using index-id which has to be + system-wide unique that is not the case with indexes of + intrinsic table for performance reason. + Also given the lifetime of these tables and frequent delete + and update AHI would not help on performance front as it does + with normal tables. */ + index->disable_ahi = true; + } + for (ulint i = 0; i < key->user_defined_key_parts; i++) { KEY_PART_INFO* key_part = key->key_part + i; ulint prefix_len; @@ -10908,29 +12370,29 @@ create_index( specified number of first bytes of the column to the index field.) The flag does not seem to be properly set by MySQL. Let us fall back on testing - the length of the key part versus the column. */ - - Field* field = NULL; + the length of the key part versus the column. + We first reach to the table's column; if the index is on a + prefix, key_part->field is not the table's column (it's a + "fake" field forged in open_table_from_share() with length + equal to the length of the prefix); so we have to go to + form->fied. */ + Field* field= form->field[key_part->field->field_index]; + if (field == NULL) + ut_error; - for (ulint j = 0; j < form->s->fields; j++) { + const char* field_name = key_part->field->field_name; + if (handler != NULL && dict_table_is_intrinsic(handler)) { - field = form->field[j]; - - if (0 == innobase_strcasecmp( - field->field_name, - key_part->field->field_name)) { - /* Found the corresponding column */ - - goto found; - } + ut_ad(!innobase_is_v_fld(key_part->field)); + ulint col_no = dict_col_get_no(dict_table_get_nth_col( + handler, key_part->field->field_index)); + field_name = dict_table_get_col_name(handler, col_no); } - ut_error; -found: col_type = get_innobase_type_from_mysql_type( &is_unsigned, key_part->field); - if (DATA_BLOB == col_type + if (DATA_LARGE_MTYPE(col_type) || (key_part->length < field->pack_length() && field->type() != MYSQL_TYPE_VARCHAR) || (field->type() == MYSQL_TYPE_VARCHAR @@ -10946,10 +12408,10 @@ found: case DATA_DOUBLE: case DATA_DECIMAL: sql_print_error( - "MySQL is trying to create a column " - "prefix index field, on an " - "inappropriate data type. Table " - "name %s, column name %s.", + "MySQL is trying to create a column" + " prefix index field, on an" + " inappropriate data type. Table" + " name %s, column name %s.", table_name, key_part->field->field_name); @@ -10961,8 +12423,11 @@ found: field_lengths[i] = key_part->length; - dict_mem_index_add_field( - index, key_part->field->field_name, prefix_len); + if (innobase_is_v_fld(key_part->field)) { + index->type |= DICT_VIRTUAL; + } + + dict_mem_index_add_field(index, field_name, prefix_len); } ut_ad(key->flags & HA_FULLTEXT || !(index->type & DICT_FTS)); @@ -10972,9 +12437,13 @@ found: sure we don't create too long indexes. */ error = convert_error_code_to_mysql( - row_create_index_for_mysql(index, trx, field_lengths), + row_create_index_for_mysql(index, trx, field_lengths, handler), flags, NULL); + if (error && handler != NULL) { + priv->unregister_table_handler(table_name); + } + my_free(field_lengths); DBUG_RETURN(error); @@ -10983,7 +12452,7 @@ found: /*****************************************************************//** Creates an index to an InnoDB table when the user has defined no primary index. */ -static +inline int create_clustered_index_when_no_primary( /*===================================*/ @@ -11000,19 +12469,36 @@ create_clustered_index_when_no_primary( innobase_index_reserve_name, 0, DICT_CLUSTERED, 0); - error = row_create_index_for_mysql(index, trx, NULL); + innodb_session_t*& priv = thd_to_innodb_session(trx->mysql_thd); + + dict_table_t* handler = priv->lookup_table_handler(table_name); + + if (handler != NULL) { + /* Disable use of AHI for intrinsic table indexes as AHI + validates the predicated entry using index-id which has to be + system-wide unique that is not the case with indexes of + intrinsic table for performance reason. + Also given the lifetime of these tables and frequent delete + and update AHI would not help on performance front as it does + with normal tables. */ + index->disable_ahi = true; + } + + error = row_create_index_for_mysql(index, trx, NULL, handler); + + if (error != DB_SUCCESS && handler != NULL) { + priv->unregister_table_handler(table_name); + } return(convert_error_code_to_mysql(error, flags, NULL)); } -/*****************************************************************//** -Return a display name for the row format +/** Return a display name for the row format +@param[in] row_format Row Format @return row format name */ -UNIV_INTERN const char* get_row_format_name( -/*================*/ - enum row_type row_format) /*!< in: Row Format */ + enum row_type row_format) { switch (row_format) { case ROW_TYPE_COMPACT: @@ -11034,75 +12520,301 @@ get_row_format_name( return("NOT USED"); } -/** If file-per-table is missing, issue warning and set ret false */ -#define CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace)\ - if (!use_tablespace) { \ - push_warning_printf( \ - thd, Sql_condition::WARN_LEVEL_WARN, \ - ER_ILLEGAL_HA_CREATE_OPTION, \ - "InnoDB: ROW_FORMAT=%s requires" \ - " innodb_file_per_table.", \ - get_row_format_name(row_format)); \ - ret = "ROW_FORMAT"; \ +/** Validate DATA DIRECTORY option. +@return true if valid, false if not. */ +bool +create_table_info_t::create_option_data_directory_is_valid() +{ + bool is_valid = true; + + ut_ad(m_create_info->data_file_name + && m_create_info->data_file_name[0] != '\0'); + + /* Use DATA DIRECTORY only with file-per-table. */ + if (!m_use_shared_space && !m_allow_file_per_table) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: DATA DIRECTORY requires" + " innodb_file_per_table."); + is_valid = false; + } + + /* Do not use DATA DIRECTORY with TEMPORARY TABLE. */ + if (m_create_info->options & HA_LEX_CREATE_TMP_TABLE) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: DATA DIRECTORY cannot be used" + " for TEMPORARY tables."); + is_valid = false; + } + + /* We check for a DATA DIRECTORY mixed with TABLESPACE in + create_option_tablespace_is_valid(), no need to here. */ + + return(is_valid); +} + +/** Validate the tablespace name provided for a tablespace DDL +@param[in] name A proposed tablespace name +@param[in] for_table Caller is putting a table here +@return MySQL handler error code like HA_... */ +static +int +validate_tablespace_name( + const char* name, + bool for_table) +{ + int err = 0; + + /* This prefix is reserved by InnoDB for use in internal tablespace names. */ + const char reserved_space_name_prefix[] = "innodb_"; + + /* JAN: TODO: MySQL 5.7 + if (check_tablespace_name(name) != IDENT_NAME_OK) { + err = HA_WRONG_CREATE_OPTION; + } + */ + + /* The tablespace name cannot start with `innodb_`. */ + if (strlen(name) >= sizeof(reserved_space_name_prefix) - 1 + && 0 == memcmp(name, reserved_space_name_prefix, + sizeof(reserved_space_name_prefix) - 1)) { + + /* Use a different message for reserved names */ + if (0 == strcmp(name, reserved_file_per_table_space_name) + || 0 == strcmp(name, reserved_system_space_name) + || 0 == strcmp(name, reserved_temporary_space_name)) { + /* Allow these names if the caller is putting a + table into one of these by CREATE/ALTER TABLE */ + if (!for_table) { + my_printf_error( + ER_WRONG_NAME_FOR_CATALOG, + // JAN: TODO: MySQL 5.7 + //ER_WRONG_TABLESPACE_NAME, + "InnoDB: `%s` is a reserved" + " tablespace name.", + MYF(0), name); + err = HA_WRONG_CREATE_OPTION; + } + } else { + my_printf_error( + ER_WRONG_NAME_FOR_CATALOG, + // JAN: TODO: MYSQL 5.7 + //ER_WRONG_TABLESPACE_NAME, + "InnoDB: A general tablespace" + " name cannot start with `%s`.", + MYF(0), reserved_space_name_prefix); + err = HA_WRONG_CREATE_OPTION; + } } -/** If file-format is Antelope, issue warning and set ret false */ -#define CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE \ - if (srv_file_format < UNIV_FORMAT_B) { \ - push_warning_printf( \ - thd, Sql_condition::WARN_LEVEL_WARN, \ - ER_ILLEGAL_HA_CREATE_OPTION, \ - "InnoDB: ROW_FORMAT=%s requires" \ - " innodb_file_format > Antelope.", \ - get_row_format_name(row_format)); \ - ret = "ROW_FORMAT"; \ + /* The tablespace name cannot contain a '/'. */ + if (memchr(name, '/', strlen(name)) != NULL) { + my_printf_error( + ER_WRONG_NAME_FOR_CATALOG, + // my_printf_error(ER_WRONG_TABLESPACE_NAME, + "InnoDB: A general tablespace name cannot" + " contain '/'.", MYF(0)); + err = HA_WRONG_CREATE_OPTION; } + return(err); +} -/*****************************************************************//** -Validates the create options. We may build on this function -in future. For now, it checks two specifiers: -KEY_BLOCK_SIZE and ROW_FORMAT -If innodb_strict_mode is not set then this function is a no-op -@return NULL if valid, string if not. */ -UNIV_INTERN +/** Validate TABLESPACE option. +@return true if valid, false if not. */ +bool +create_table_info_t::create_option_tablespace_is_valid() +{ + ut_ad(m_use_shared_space); + + if (0 != validate_tablespace_name(m_create_info->tablespace, true)) { + return(false); + } + + /* Look up the tablespace name in the fil_system. */ + ulint space_id = fil_space_get_id_by_name( + m_create_info->tablespace); + + if (space_id == ULINT_UNDEFINED) { + my_printf_error(ER_TABLESPACE_MISSING, + "InnoDB: A general tablespace named" + " `%s` cannot be found.", MYF(0), + m_create_info->tablespace); + return(false); + } + + /* Cannot add a second table to a file-per-table tablespace. */ + ulint fsp_flags = fil_space_get_flags(space_id); + if (fsp_is_file_per_table(space_id, fsp_flags)) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Tablespace `%s` is file-per-table so no" + " other table can be added to it.", MYF(0), + m_create_info->tablespace); + return(false); + } + + /* If TABLESPACE=innodb_file_per_table this function is not called + since tablespace_is_shared_space() will return false. Any other + tablespace is incompatible with the DATA DIRECTORY phrase. + On any ALTER TABLE that contains a DATA DIRECTORY, MySQL will issue + a warning like " option ignored." The check below is + needed for CREATE TABLE only. ALTER TABLE may be moving remote + file-per-table table to a general tablespace, in which case the + create_info->data_file_name is not null. */ + bool is_create_table = (thd_sql_command(m_thd) == SQLCOM_CREATE_TABLE); + if (is_create_table + && m_create_info->data_file_name != NULL + && m_create_info->data_file_name[0] != '\0') { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: DATA DIRECTORY cannot be used" + " with a TABLESPACE assignment.", MYF(0)); + return(false); + } + + /* Temp tables only belong in temp tablespaces. */ + if (m_create_info->options & HA_LEX_CREATE_TMP_TABLE) { + if (!FSP_FLAGS_GET_TEMPORARY(fsp_flags)) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Tablespace `%s` cannot contain" + " TEMPORARY tables.", MYF(0), + m_create_info->tablespace); + return(false); + } + + /* Restrict Compressed Temporary General tablespaces. */ + if (m_create_info->key_block_size + || m_create_info->row_type == ROW_TYPE_COMPRESSED) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Temporary tablespace `%s` cannot" + " contain COMPRESSED tables.", MYF(0), + m_create_info->tablespace); + return(false); + } + } else if (FSP_FLAGS_GET_TEMPORARY(fsp_flags)) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Tablespace `%s` can only contain" + " TEMPORARY tables.", MYF(0), + m_create_info->tablespace); + return(false); + } + + /* Make sure the physical page size of the table matches the + file block size of the tablespace. */ + ulint block_size_needed; + bool table_is_compressed; + if (m_create_info->key_block_size) { + block_size_needed = m_create_info->key_block_size * 1024; + table_is_compressed = true; + } else if (m_create_info->row_type == ROW_TYPE_COMPRESSED) { + block_size_needed = ut_min( + UNIV_PAGE_SIZE / 2, + static_cast(UNIV_ZIP_SIZE_MAX)); + table_is_compressed = true; + } else { + block_size_needed = UNIV_PAGE_SIZE; + table_is_compressed = false; + } + + const page_size_t page_size(fsp_flags); + + /* The compression code needs some work in order for a general + tablespace to contain both compressed and non-compressed tables + together in the same tablespace. The problem seems to be that + each page is either compressed or not based on the fsp flags, + which is shared by all tables in that general tablespace. */ + if (table_is_compressed && page_size.physical() == UNIV_PAGE_SIZE) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Tablespace `%s` cannot contain a" + " COMPRESSED table", MYF(0), + m_create_info->tablespace); + return(false); + } + + if (block_size_needed != page_size.physical()) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Tablespace `%s` uses block size " ULINTPF + " and cannot contain a table with physical" + " page size " ULINTPF, MYF(0), + m_create_info->tablespace, + page_size.physical(), + block_size_needed); + return(false); + } + + return(true); +} + +/** Validate the create options. Check that the options KEY_BLOCK_SIZE, +ROW_FORMAT, DATA DIRECTORY, TEMPORARY & TABLESPACE are compatible with +each other and other settings. These CREATE OPTIONS are not validated +here unless innodb_strict_mode is on. With strict mode, this function +will report each problem it finds using a custom message with error +code ER_ILLEGAL_HA_CREATE_OPTION, not its built-in message. +@return NULL if valid, string name of bad option if not. */ const char* -create_options_are_invalid( -/*=======================*/ - THD* thd, /*!< in: connection thread. */ - TABLE* form, /*!< in: information on table - columns and indexes */ - HA_CREATE_INFO* create_info, /*!< in: create info. */ - bool use_tablespace) /*!< in: srv_file_per_table */ +create_table_info_t::create_options_are_invalid() { - ibool kbs_specified = FALSE; - const char* ret = NULL; - enum row_type row_format = form->s->row_type; + bool has_key_block_size = (m_create_info->key_block_size != 0); - ut_ad(thd != NULL); + const char* ret = NULL; + enum row_type row_format = m_create_info->row_type; - /* If innodb_strict_mode is not set don't do any validation. */ - if (!(THDVAR(thd, strict_mode))) { - return(NULL); + ut_ad(m_thd != NULL); + ut_ad(m_create_info != NULL); + + /* The TABLESPACE designation on a CREATE TABLE is not subject to + non-strict-mode. If it is incorrect or is incompatible with other + options, then we will return an error. Make sure the tablespace exists + and is compatible with this table */ + if (m_use_shared_space + && !create_option_tablespace_is_valid()) { + return("TABLESPACE"); } - ut_ad(form != NULL); - ut_ad(create_info != NULL); + /* If innodb_strict_mode is not set don't do any more validation. */ + if (!m_use_shared_space && !(THDVAR(m_thd, strict_mode))) { + return(NULL); + } /* First check if a non-zero KEY_BLOCK_SIZE was specified. */ - if (create_info->key_block_size) { - kbs_specified = TRUE; - switch (create_info->key_block_size) { + if (has_key_block_size) { + switch (m_create_info->key_block_size) { ulint kbs_max; case 1: case 2: case 4: case 8: case 16: + /* The maximum KEY_BLOCK_SIZE (KBS) is + UNIV_PAGE_SIZE_MAX. But if UNIV_PAGE_SIZE is + smaller than UNIV_PAGE_SIZE_MAX, the maximum + KBS is also smaller. */ + kbs_max = ut_min( + 1 << (UNIV_PAGE_SSIZE_MAX - 1), + 1 << (PAGE_ZIP_SSIZE_MAX - 1)); + if (m_create_info->key_block_size > kbs_max) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: KEY_BLOCK_SIZE=%ld" + " cannot be larger than %ld.", + m_create_info->key_block_size, + kbs_max); + ret = "KEY_BLOCK_SIZE"; + } + + /* The following checks do not appy to shared tablespaces */ + if (m_use_shared_space) { + break; + } + /* Valid KEY_BLOCK_SIZE, check its dependencies. */ - if (!use_tablespace) { + if (!m_allow_file_per_table) { push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: KEY_BLOCK_SIZE requires" " innodb_file_per_table."); @@ -11110,58 +12822,77 @@ create_options_are_invalid( } if (srv_file_format < UNIV_FORMAT_B) { push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: KEY_BLOCK_SIZE requires" " innodb_file_format > Antelope."); ret = "KEY_BLOCK_SIZE"; } - - /* The maximum KEY_BLOCK_SIZE (KBS) is 16. But if - UNIV_PAGE_SIZE is smaller than 16k, the maximum - KBS is also smaller. */ - kbs_max = ut_min( - 1 << (UNIV_PAGE_SSIZE_MAX - 1), - 1 << (PAGE_ZIP_SSIZE_MAX - 1)); - if (create_info->key_block_size > kbs_max) { - push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: KEY_BLOCK_SIZE=%ld" - " cannot be larger than %ld.", - create_info->key_block_size, - kbs_max); - ret = "KEY_BLOCK_SIZE"; - } break; default: push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: invalid KEY_BLOCK_SIZE = %lu." " Valid values are [1, 2, 4, 8, 16]", - create_info->key_block_size); + m_create_info->key_block_size); ret = "KEY_BLOCK_SIZE"; break; } } - /* Check for a valid Innodb ROW_FORMAT specifier and + /* Check for a valid InnoDB ROW_FORMAT specifier and other incompatibilities. */ switch (row_format) { case ROW_TYPE_COMPRESSED: - CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); - CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; + if (!m_use_shared_space) { + if (!m_allow_file_per_table) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: ROW_FORMAT=%s requires" + " innodb_file_per_table.", + get_row_format_name(row_format)); + ret = "ROW_FORMAT"; + } + if (srv_file_format < UNIV_FORMAT_B) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: ROW_FORMAT=%s requires" + " innodb_file_format > Antelope.", + get_row_format_name(row_format)); + ret = "ROW_FORMAT"; + } + } break; case ROW_TYPE_DYNAMIC: - CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); - CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; - /* fall through since dynamic also shuns KBS */ + if (!m_use_shared_space) { + if (!m_allow_file_per_table) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: ROW_FORMAT=%s requires" + " innodb_file_per_table.", + get_row_format_name(row_format)); + ret = "ROW_FORMAT"; + } + if (srv_file_format < UNIV_FORMAT_B) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: ROW_FORMAT=%s requires" + " innodb_file_format > Antelope.", + get_row_format_name(row_format)); + ret = "ROW_FORMAT"; + } + } + /* FALLTRHOUGH */ case ROW_TYPE_COMPACT: case ROW_TYPE_REDUNDANT: - if (kbs_specified) { + if (has_key_block_size) { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: cannot specify ROW_FORMAT = %s" " with KEY_BLOCK_SIZE.", @@ -11175,86 +12906,280 @@ create_options_are_invalid( case ROW_TYPE_PAGE: case ROW_TYPE_NOT_USED: push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, \ + m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: invalid ROW_FORMAT specifier."); ret = "ROW_TYPE"; break; } - /* Use DATA DIRECTORY only with file-per-table. */ - if (create_info->data_file_name && !use_tablespace) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: DATA DIRECTORY requires" - " innodb_file_per_table."); - ret = "DATA DIRECTORY"; - } - - /* Do not use DATA DIRECTORY with TEMPORARY TABLE. */ - if (create_info->data_file_name - && create_info->options & HA_LEX_CREATE_TMP_TABLE) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: DATA DIRECTORY cannot be used" - " for TEMPORARY tables."); + if (m_create_info->data_file_name + && m_create_info->data_file_name[0] != '\0' + && !create_option_data_directory_is_valid()) { ret = "DATA DIRECTORY"; } /* Do not allow INDEX_DIRECTORY */ - if (create_info->index_file_name) { + if (m_create_info->index_file_name) { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: INDEX DIRECTORY is not supported"); ret = "INDEX DIRECTORY"; } - if ((kbs_specified || row_format == ROW_TYPE_COMPRESSED) - && UNIV_PAGE_SIZE > (1<<14)) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: Cannot create a COMPRESSED table" - " when innodb_page_size > 16k."); - - if (kbs_specified) { + /* Don't support compressed table when page size > 16k. */ + if ((has_key_block_size || row_format == ROW_TYPE_COMPRESSED) + && UNIV_PAGE_SIZE > UNIV_PAGE_SIZE_DEF) { + push_warning(m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Cannot create a COMPRESSED table" + " when innodb_page_size > 16k."); + + if (has_key_block_size) { ret = "KEY_BLOCK_SIZE"; } else { ret = "ROW_TYPE"; } } - return(ret); +#ifdef MYSQL_COMPRESSION + /* Note: Currently the max length is 4: ZLIB, LZ4, NONE. */ + + if (ret == NULL && m_create_info->compress.length > 0) { + + dberr_t err; + Compression compression; + + err = Compression::check( + m_create_info->compress.str, &compression); + + if (err == DB_UNSUPPORTED) { + + push_warning_printf( + m_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_UNSUPPORTED_EXTENSION, + "InnoDB: Unsupported compression algorithm '" + "%s'", + m_create_info->compress.str); + + ret = "COMPRESSION"; + + } else if (m_create_info->key_block_size > 0 + && compression.m_type != Compression::NONE) { + + push_warning_printf( + m_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_UNSUPPORTED_EXTENSION, + "InnODB: Attribute not supported with row " + "format compressed or key block size > 0"); + + ret = "COMPRESSION"; + } + + } +#endif /* MYSQL_COMPRESSION */ + + return(ret); +} + +/*****************************************************************//** +Check engine specific table options not handled by SQL-parser. +@return NULL if valid, string if not */ +UNIV_INTERN +const char* +create_table_info_t::check_table_options() +{ + enum row_type row_format = m_form->s->row_type; + ha_table_option_struct *options= m_form->s->option_struct; + atomic_writes_t awrites = (atomic_writes_t)options->atomic_writes; + fil_encryption_t encrypt = (fil_encryption_t)options->encryption; + + if (encrypt != FIL_SPACE_ENCRYPTION_DEFAULT && !m_allow_file_per_table) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: ENCRYPTED requires innodb_file_per_table"); + return "ENCRYPTED"; + } + + if (encrypt == FIL_SPACE_ENCRYPTION_OFF && srv_encrypt_tables == 2) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: ENCRYPTED=OFF cannot be used when innodb_encrypt_tables=FORCE"); + return "ENCRYPTED"; + } + + /* Check page compression requirements */ + if (options->page_compressed) { + + if (row_format == ROW_TYPE_COMPRESSED) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: PAGE_COMPRESSED table can't have" + " ROW_TYPE=COMPRESSED"); + return "PAGE_COMPRESSED"; + } + + if (row_format == ROW_TYPE_REDUNDANT) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: PAGE_COMPRESSED table can't have" + " ROW_TYPE=REDUNDANT"); + return "PAGE_COMPRESSED"; + } + + if (!m_allow_file_per_table) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: PAGE_COMPRESSED requires" + " innodb_file_per_table."); + return "PAGE_COMPRESSED"; + } + + if (srv_file_format < UNIV_FORMAT_B) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: PAGE_COMPRESSED requires" + " innodb_file_format > Antelope."); + return "PAGE_COMPRESSED"; + } + + if (m_create_info->key_block_size) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: PAGE_COMPRESSED table can't have" + " key_block_size"); + return "PAGE_COMPRESSED"; + } + } + + /* Check page compression level requirements, some of them are + already checked above */ + if (options->page_compression_level != 0) { + if (options->page_compressed == false) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: PAGE_COMPRESSION_LEVEL requires" + " PAGE_COMPRESSED"); + return "PAGE_COMPRESSION_LEVEL"; + } + + if (options->page_compression_level < 1 || options->page_compression_level > 9) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: invalid PAGE_COMPRESSION_LEVEL = %lu." + " Valid values are [1, 2, 3, 4, 5, 6, 7, 8, 9]", + options->page_compression_level); + return "PAGE_COMPRESSION_LEVEL"; + } + } + + /* If encryption is set up make sure that used key_id is found */ + if (encrypt == FIL_SPACE_ENCRYPTION_ON || + (encrypt == FIL_SPACE_ENCRYPTION_DEFAULT && srv_encrypt_tables)) { + if (!encryption_key_id_exists((unsigned int)options->encryption_key_id)) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: ENCRYPTION_KEY_ID %u not available", + (uint)options->encryption_key_id + ); + return "ENCRYPTION_KEY_ID"; + } + } + + /* Ignore nondefault key_id if encryption is set off */ + if (encrypt == FIL_SPACE_ENCRYPTION_OFF && + options->encryption_key_id != THDVAR(m_thd, default_encryption_key_id)) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: Ignored ENCRYPTION_KEY_ID %u when encryption is disabled", + (uint)options->encryption_key_id + ); + options->encryption_key_id = FIL_DEFAULT_ENCRYPTION_KEY; + } + + /* If default encryption is used make sure that used kay is found + from key file. */ + if (encrypt == FIL_SPACE_ENCRYPTION_DEFAULT && + !srv_encrypt_tables && + options->encryption_key_id != FIL_DEFAULT_ENCRYPTION_KEY) { + if (!encryption_key_id_exists((unsigned int)options->encryption_key_id)) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: ENCRYPTION_KEY_ID %u not available", + (uint)options->encryption_key_id + ); + return "ENCRYPTION_KEY_ID"; + + } + } + + /* Check atomic writes requirements */ + if (awrites == ATOMIC_WRITES_ON || + (awrites == ATOMIC_WRITES_DEFAULT && srv_use_atomic_writes)) { + if (!m_allow_file_per_table) { + push_warning( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_WRONG_CREATE_OPTION, + "InnoDB: ATOMIC_WRITES requires" + " innodb_file_per_table."); + return "ATOMIC_WRITES"; + } + } + + return NULL; } /*****************************************************************//** Update create_info. Used in SHOW CREATE TABLE et al. */ -UNIV_INTERN + void ha_innobase::update_create_info( /*============================*/ HA_CREATE_INFO* create_info) /*!< in/out: create info */ { if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) { - ha_innobase::info(HA_STATUS_AUTO); + info(HA_STATUS_AUTO); create_info->auto_increment_value = stats.auto_increment_value; } /* Update the DATA DIRECTORY name from SYS_DATAFILES. */ - dict_get_and_save_data_dir_path(prebuilt->table, false); + dict_get_and_save_data_dir_path(m_prebuilt->table, false); + + if (m_prebuilt->table->data_dir_path) { + create_info->data_file_name = m_prebuilt->table->data_dir_path; + } + + /* Update the TABLESPACE name from the Data Dictionary. */ + dict_get_and_save_space_name(m_prebuilt->table, false); - if (prebuilt->table->data_dir_path) { - create_info->data_file_name = prebuilt->table->data_dir_path; + /* Put this tablespace name into the create_info structure so that + SHOW CREATE TABLE will display TABLESPACE=name. This also affects + an ALTER TABLE which must know the current TABLESPACE so that the + table will stay there. */ + if (m_prebuilt->table->tablespace != NULL + && create_info->tablespace == NULL) { + create_info->tablespace = m_prebuilt->table->tablespace; } } /*****************************************************************//** Initialize the table FTS stopword list @return TRUE if success */ -UNIV_INTERN ibool innobase_fts_load_stopword( /*=======================*/ @@ -11268,29 +13193,17 @@ innobase_fts_load_stopword( THDVAR(thd, ft_enable_stopword), FALSE)); } -/*****************************************************************//** -Parses the table name into normal name and either temp path or remote path +/** Parse the table name into normal name and either temp path or remote path if needed. -@return 0 if successful, otherwise, error number */ -UNIV_INTERN +@param[in] name Table name (db/table or full path). +@return 0 if successful, otherwise, error number */ int -ha_innobase::parse_table_name( -/*==========================*/ - const char* name, /*!< in/out: table name provided*/ - HA_CREATE_INFO* create_info, /*!< in: more information of the - created table, contains also the - create statement string */ - ulint flags, /*!< in: flags*/ - ulint flags2, /*!< in: flags2*/ - char* norm_name, /*!< out: normalized table name */ - char* temp_path, /*!< out: absolute path of table */ - char* remote_path) /*!< out: remote path of table */ -{ - THD* thd = ha_thd(); - bool use_tablespace = flags2 & DICT_TF2_USE_TABLESPACE; - DBUG_ENTER("ha_innobase::parse_table_name"); +create_table_info_t::parse_table_name( + const char* name) +{ + DBUG_ENTER("parse_table_name"); -#ifdef __WIN__ +#ifdef _WIN32 /* Names passed in from server are in two formats: 1. /: for normal table creation 2. full path: for temp table creation, or DATA DIRECTORY. @@ -11302,9 +13215,9 @@ ha_innobase::parse_table_name( returns error if it is in full path format, but not creating a temp. table. Currently InnoDB does not support symbolic link on Windows. */ - if (use_tablespace + if (m_innodb_file_per_table && !mysqld_embedded - && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) { + && !(m_create_info->options & HA_LEX_CREATE_TMP_TABLE)) { if ((name[1] == ':') || (name[0] == '\\' && name[1] == '\\')) { @@ -11314,83 +13227,77 @@ ha_innobase::parse_table_name( } #endif - normalize_table_name(norm_name, name); - temp_path[0] = '\0'; - remote_path[0] = '\0'; + m_temp_path[0] = '\0'; + m_remote_path[0] = '\0'; + m_tablespace[0] = '\0'; - /* A full path is used for TEMPORARY TABLE and DATA DIRECTORY. - In the case of; - CREATE TEMPORARY TABLE ... DATA DIRECTORY={path} ... ; - We ignore the DATA DIRECTORY. */ - if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { - strncpy(temp_path, name, FN_REFLEN - 1); + /* A full path is provided by the server for TEMPORARY tables not + targeted for a tablespace or when DATA DIRECTORY is given. + So these two are not compatible. Likewise, DATA DIRECTORY is not + compatible with a TABLESPACE assignment. */ + if ((m_create_info->options & HA_LEX_CREATE_TMP_TABLE) + && !m_use_shared_space) { + strncpy(m_temp_path, name, FN_REFLEN - 1); } - if (create_info->data_file_name) { - bool ignore = false; - - /* Use DATA DIRECTORY only with file-per-table. */ - if (!use_tablespace) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: DATA DIRECTORY requires" - " innodb_file_per_table."); - ignore = true; - } - - /* Do not use DATA DIRECTORY with TEMPORARY TABLE. */ - if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: DATA DIRECTORY cannot be" - " used for TEMPORARY tables."); - ignore = true; - } - - if (ignore) { - my_error(WARN_OPTION_IGNORED, ME_JUST_WARNING, + /* Make sure DATA DIRECTORY is compatible with other options + and set the remote path. In the case of either; + CREATE TEMPORARY TABLE ... DATA DIRECTORY={path} ... ; + CREATE TABLE ... DATA DIRECTORY={path} TABLESPACE={name}... ; + we ignore the DATA DIRECTORY. */ + if (m_create_info->data_file_name + && m_create_info->data_file_name[0] != '\0') { + if (!create_option_data_directory_is_valid()) { + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + WARN_OPTION_IGNORED, + ER_DEFAULT(WARN_OPTION_IGNORED), "DATA DIRECTORY"); + + m_flags &= ~DICT_TF_MASK_DATA_DIR; } else { - strncpy(remote_path, create_info->data_file_name, + strncpy(m_remote_path, + m_create_info->data_file_name, FN_REFLEN - 1); } } - if (create_info->index_file_name) { + if (m_create_info->index_file_name) { my_error(WARN_OPTION_IGNORED, ME_JUST_WARNING, "INDEX DIRECTORY"); } + /* The TABLESPACE designation has already been validated by + create_option_tablespace_is_valid() irregardless of strict-mode. + So it only needs to be copied now. */ + if (m_use_shared_space) { + strncpy(m_tablespace, m_create_info->tablespace, + NAME_LEN - 1); + } + DBUG_RETURN(0); } -/*****************************************************************//** -Determines InnoDB table flags. +/** Determine InnoDB table flags. +If strict_mode=OFF, this will adjust the flags to what should be assumed. +However, if an existing general tablespace is being targeted, we will NOT +assume anything or adjust these flags. @retval true if successful, false if error */ -UNIV_INTERN bool -innobase_table_flags( -/*=================*/ - const TABLE* form, /*!< in: table */ - const HA_CREATE_INFO* create_info, /*!< in: information - on table columns and indexes */ - THD* thd, /*!< in: connection */ - bool use_tablespace, /*!< in: whether to create - outside system tablespace */ - ulint* flags, /*!< out: DICT_TF flags */ - ulint* flags2) /*!< out: DICT_TF2 flags */ +create_table_info_t::innobase_table_flags() { DBUG_ENTER("innobase_table_flags"); const char* fts_doc_id_index_bad = NULL; bool zip_allowed = true; ulint zip_ssize = 0; - enum row_type row_format; - rec_format_t innodb_row_format = REC_FORMAT_COMPACT; - bool use_data_dir; - ha_table_option_struct *options= form->s->option_struct; + enum row_type row_type; + rec_format_t innodb_row_format = + get_row_format(innodb_default_row_format); + + const ulint zip_ssize_max = + ut_min(static_cast(UNIV_PAGE_SSIZE_MAX), + static_cast(PAGE_ZIP_SSIZE_MAX)); /* Cache the value of innodb_file_format, in case it is modified by another thread while the table is being created. */ @@ -11400,19 +13307,21 @@ innobase_table_flags( modified by another thread while the table is being created. */ const ulint default_compression_level = page_zip_level; - *flags = 0; - *flags2 = 0; + ha_table_option_struct *options= m_form->s->option_struct; + + m_flags = 0; + m_flags2 = 0; /* Check if there are any FTS indexes defined on this table. */ - for (uint i = 0; i < form->s->keys; i++) { - const KEY* key = &form->key_info[i]; + for (uint i = 0; i < m_form->s->keys; i++) { + const KEY* key = &m_form->key_info[i]; if (key->flags & HA_FULLTEXT) { - *flags2 |= DICT_TF2_FTS; + m_flags2 |= DICT_TF2_FTS; /* We don't support FTS indexes in temporary tables. */ - if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { + if (m_create_info->options & HA_LEX_CREATE_TMP_TABLE) { my_error(ER_INNODB_NO_FT_TEMP_TABLE, MYF(0)); DBUG_RETURN(false); @@ -11423,14 +13332,19 @@ innobase_table_flags( DBUG_RETURN(false); } - if (key->flags & HA_USES_PARSER) { - my_error(ER_INNODB_NO_FT_USES_PARSER, MYF(0)); - DBUG_RETURN(false); - } - if (fts_doc_id_index_bad) { goto index_bad; } + } else if (key->flags & HA_SPATIAL) { + if (m_create_info->options & HA_LEX_CREATE_TMP_TABLE + /* JAN: TODO: MySQL 5.7 + && m_create_info->options + & HA_LEX_CREATE_INTERNAL_TMP_TABLE + */ + && !m_use_file_per_table) { + my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0)); + DBUG_RETURN(false); + } } if (innobase_strcasecmp(key->name, FTS_DOC_ID_INDEX_NAME)) { @@ -11445,7 +13359,7 @@ innobase_table_flags( fts_doc_id_index_bad = key->name; } - if (fts_doc_id_index_bad && (*flags2 & DICT_TF2_FTS)) { + if (fts_doc_id_index_bad && (m_flags2 & DICT_TF2_FTS)) { index_bad: my_error(ER_INNODB_FT_WRONG_DOCID_INDEX, MYF(0), fts_doc_id_index_bad); @@ -11453,61 +13367,92 @@ index_bad: } } - row_format = form->s->row_type; + //rec_format_t row_format = m_form->s->row_type; + ulint clen = 0; - if (create_info->key_block_size) { + if (m_create_info->key_block_size > 0) { /* The requested compressed page size (key_block_size) is given in kilobytes. If it is a valid number, store that value as the number of log2 shifts from 512 in zip_ssize. Zero means it is not compressed. */ - ulint zssize; /* Zip Shift Size */ - ulint kbsize; /* Key Block Size */ + ulint zssize; /* Zip Shift Size */ + ulint kbsize; /* Key Block Size */ for (zssize = kbsize = 1; - zssize <= ut_min(UNIV_PAGE_SSIZE_MAX, - PAGE_ZIP_SSIZE_MAX); + zssize <= zip_ssize_max; zssize++, kbsize <<= 1) { - if (kbsize == create_info->key_block_size) { + if (kbsize == m_create_info->key_block_size) { zip_ssize = zssize; break; } } /* Make sure compressed row format is allowed. */ - if (!use_tablespace) { + if (!m_allow_file_per_table && !m_use_shared_space) { push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: KEY_BLOCK_SIZE requires" " innodb_file_per_table."); - zip_allowed = FALSE; + zip_allowed = false; } - if (file_format_allowed < UNIV_FORMAT_B) { + if (file_format_allowed < UNIV_FORMAT_B + && !m_use_shared_space) { push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: KEY_BLOCK_SIZE requires" " innodb_file_format > Antelope."); - zip_allowed = FALSE; + zip_allowed = false; } if (!zip_allowed - || zssize > ut_min(UNIV_PAGE_SSIZE_MAX, - PAGE_ZIP_SSIZE_MAX)) { + || zssize > zip_ssize_max) { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: ignoring KEY_BLOCK_SIZE=%lu.", - create_info->key_block_size); + m_create_info->key_block_size); + } + + } else if (clen > 0) { + // JAN: TODO: MySQL 5.7 + //} else if (m_create_info->compress.length > 0) { + + if (m_use_shared_space + || (m_create_info->options & HA_LEX_CREATE_TMP_TABLE)) { + + push_warning_printf( + m_thd, + Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: Cannot compress pages of shared " + "tablespaces"); + } + + //const char* compression = m_create_info->compress.str; + const char* compression = ""; + + if (Compression::validate(compression) != DB_SUCCESS) { + + push_warning_printf( + m_thd, + Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "InnoDB: Unsupported compression " + "algorithm '%s'", + compression); } } + row_type = m_form->s->row_type; + if (zip_ssize && zip_allowed) { /* if ROW_FORMAT is set to default, - automatically change it to COMPRESSED.*/ - if (row_format == ROW_TYPE_DEFAULT) { - row_format = ROW_TYPE_COMPRESSED; - } else if (row_format != ROW_TYPE_COMPRESSED) { + automatically change it to COMPRESSED. */ + if (row_type == ROW_TYPE_DEFAULT) { + row_type = ROW_TYPE_COMPRESSED; + } else if (row_type != ROW_TYPE_COMPRESSED) { /* ROW_FORMAT other than COMPRESSED ignores KEY_BLOCK_SIZE. It does not make sense to reject conflicting @@ -11515,47 +13460,53 @@ index_bad: such combinations can be obtained with ALTER TABLE anyway. */ push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: ignoring KEY_BLOCK_SIZE=%lu" " unless ROW_FORMAT=COMPRESSED.", - create_info->key_block_size); - zip_allowed = FALSE; + m_create_info->key_block_size); + zip_allowed = false; } } else { - /* zip_ssize == 0 means no KEY_BLOCK_SIZE.*/ - if (row_format == ROW_TYPE_COMPRESSED && zip_allowed) { + /* zip_ssize == 0 means no KEY_BLOCK_SIZE. */ + if (row_type == ROW_TYPE_COMPRESSED && zip_allowed) { /* ROW_FORMAT=COMPRESSED without KEY_BLOCK_SIZE implies half the maximum KEY_BLOCK_SIZE(*1k) or UNIV_PAGE_SIZE, whichever is less. */ - zip_ssize = ut_min(UNIV_PAGE_SSIZE_MAX, - PAGE_ZIP_SSIZE_MAX) - 1; + zip_ssize = zip_ssize_max - 1; } } /* Validate the row format. Correct it if necessary */ - switch (row_format) { + + switch (row_type) { case ROW_TYPE_REDUNDANT: innodb_row_format = REC_FORMAT_REDUNDANT; break; + case ROW_TYPE_COMPACT: + innodb_row_format = REC_FORMAT_COMPACT; + break; case ROW_TYPE_COMPRESSED: - case ROW_TYPE_DYNAMIC: - if (!use_tablespace) { + /* ROW_FORMAT=COMPRESSED requires file_per_table and + file_format=Barracuda unless there is a target tablespace. */ + if (!m_allow_file_per_table + && !m_use_shared_space) { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: ROW_FORMAT=%s requires" - " innodb_file_per_table.", - get_row_format_name(row_format)); - } else if (file_format_allowed == UNIV_FORMAT_A) { + "InnoDB: ROW_FORMAT=COMPRESSED requires" + " innodb_file_per_table."); + + } else if (file_format_allowed == UNIV_FORMAT_A + && !m_use_shared_space) { push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: ROW_FORMAT=%s requires" - " innodb_file_format > Antelope.", - get_row_format_name(row_format)); + "InnoDB: ROW_FORMAT=COMPRESSED requires" + " innodb_file_format > Antelope."); + } else { - switch(row_format) { + switch(row_type) { case ROW_TYPE_COMPRESSED: innodb_row_format = REC_FORMAT_COMPRESSED; break; @@ -11568,31 +13519,30 @@ index_bad: } break; /* Correct row_format */ } - zip_allowed = FALSE; - /* fall through to set row_format = COMPACT */ + zip_allowed = false; + /* fall through to set row_type = DYNAMIC */ case ROW_TYPE_NOT_USED: case ROW_TYPE_FIXED: case ROW_TYPE_PAGE: push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: assuming ROW_FORMAT=COMPACT."); - case ROW_TYPE_DEFAULT: - /* If we fell through, set row format to Compact. */ - row_format = ROW_TYPE_COMPACT; - case ROW_TYPE_COMPACT: + "InnoDB: assuming ROW_FORMAT=DYNAMIC."); + case ROW_TYPE_DYNAMIC: + innodb_row_format = REC_FORMAT_DYNAMIC; break; + case ROW_TYPE_DEFAULT: + ; } /* Don't support compressed table when page size > 16k. */ if (zip_allowed && zip_ssize && UNIV_PAGE_SIZE > UNIV_PAGE_SIZE_DEF) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - "InnoDB: Cannot create a COMPRESSED table" - " when innodb_page_size > 16k." - " Assuming ROW_FORMAT=COMPACT."); - zip_allowed = FALSE; + push_warning(m_thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Cannot create a COMPRESSED table" + " when innodb_page_size > 16k." + " Assuming ROW_FORMAT=DYNAMIC."); + zip_allowed = false; } /* Set the table flags */ @@ -11600,386 +13550,409 @@ index_bad: zip_ssize = 0; } - use_data_dir = use_tablespace - && ((create_info->data_file_name != NULL) - && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)); - - /* Set up table dictionary flags */ - dict_tf_set(flags, - innodb_row_format, - zip_ssize, - use_data_dir, - options->page_compressed, - options->page_compression_level == 0 ? - default_compression_level : options->page_compression_level, - options->atomic_writes); - - if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { - *flags2 |= DICT_TF2_TEMPORARY; + if (m_create_info->options & HA_LEX_CREATE_TMP_TABLE) { + m_flags2 |= DICT_TF2_TEMPORARY; + /* Intrinsic tables reside only in the shared temporary + tablespace and we will always use ROW_FORMAT=DYNAMIC. */ + /* JAN: TODO: MySQL 5.7 + if ((m_create_info->options & HA_LEX_CREATE_INTERNAL_TMP_TABLE) + && !m_use_file_per_table) { + if (!m_use_file_per_table) { + */ + /* We do not allow compressed instrinsic + temporary tables. */ + /* + ut_ad(zip_ssize == 0); + m_flags2 |= DICT_TF2_INTRINSIC; + innodb_row_format = REC_FORMAT_DYNAMIC; + } + */ } - if (use_tablespace) { - *flags2 |= DICT_TF2_USE_TABLESPACE; + /* Set the table flags */ + dict_tf_set(&m_flags, innodb_row_format, zip_ssize, + m_use_data_dir, m_use_shared_space, + options->page_compressed, + options->page_compression_level == 0 ? + default_compression_level : options->page_compression_level, + options->atomic_writes); + + if (m_use_file_per_table) { + ut_ad(!m_use_shared_space); + m_flags2 |= DICT_TF2_USE_FILE_PER_TABLE; } /* Set the flags2 when create table or alter tables */ - *flags2 |= DICT_TF2_FTS_AUX_HEX_NAME; + m_flags2 |= DICT_TF2_FTS_AUX_HEX_NAME; DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name", - *flags2 &= ~DICT_TF2_FTS_AUX_HEX_NAME;); + m_flags2 &= ~DICT_TF2_FTS_AUX_HEX_NAME;); DBUG_RETURN(true); } +/** Parse MERGE_THRESHOLD value from the string. +@param[in] thd connection +@param[in] str string which might include 'MERGE_THRESHOLD=' +@return value parsed. 0 means not found or invalid value. */ +static +ulint +innobase_parse_merge_threshold( + THD* thd, + const char* str) +{ + static const char* label = "MERGE_THRESHOLD="; + static const size_t label_len = strlen(label); + const char* pos = str; -/*****************************************************************//** -Check engine specific table options not handled by SQL-parser. -@return NULL if valid, string if not */ -UNIV_INTERN -const char* -ha_innobase::check_table_options( - THD *thd, /*!< in: thread handle */ - TABLE* table, /*!< in: information on table - columns and indexes */ - HA_CREATE_INFO* create_info, /*!< in: more information of the - created table, contains also the - create statement string */ - const bool use_tablespace, /*!< in: use file par table */ - const ulint file_format) -{ - enum row_type row_format = table->s->row_type; - ha_table_option_struct *options= table->s->option_struct; - atomic_writes_t awrites = (atomic_writes_t)options->atomic_writes; - fil_encryption_t encrypt = (fil_encryption_t)options->encryption; + pos = strstr(str, label); - if (encrypt != FIL_SPACE_ENCRYPTION_DEFAULT && !use_tablespace) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: ENCRYPTED requires innodb_file_per_table"); - return "ENCRYPTED"; - } + if (pos == NULL) { + return(0); + } - if (encrypt == FIL_SPACE_ENCRYPTION_OFF && srv_encrypt_tables == 2) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: ENCRYPTED=OFF cannot be used when innodb_encrypt_tables=FORCE"); - return "ENCRYPTED"; + pos += label_len; + + lint ret = atoi(pos); + + if (ret > 0 && ret <= 50) { + return(static_cast(ret)); } - /* Check page compression requirements */ - if (options->page_compressed) { + push_warning_printf( + thd, Sql_condition::WARN_LEVEL_WARN, + ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Invalid value for MERGE_THRESHOLD in the CREATE TABLE" + " statement. The value is ignored."); - if (row_format == ROW_TYPE_COMPRESSED) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: PAGE_COMPRESSED table can't have" - " ROW_TYPE=COMPRESSED"); - return "PAGE_COMPRESSED"; - } + return(0); +} - if (row_format == ROW_TYPE_REDUNDANT) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: PAGE_COMPRESSED table can't have" - " ROW_TYPE=REDUNDANT"); - return "PAGE_COMPRESSED"; - } +/** Parse hint for table and its indexes, and update the information +in dictionary. +@param[in] thd connection +@param[in,out] table target table +@param[in] table_share table definition */ +void +innobase_parse_hint_from_comment( + THD* thd, + dict_table_t* table, + const TABLE_SHARE* table_share) +{ + ulint merge_threshold_table; + ulint merge_threshold_index[MAX_KEY]; + bool is_found[MAX_KEY]; + + if (table_share->comment.str != NULL) { + merge_threshold_table + = innobase_parse_merge_threshold( + thd, table_share->comment.str); + } else { + merge_threshold_table = DICT_INDEX_MERGE_THRESHOLD_DEFAULT; + } - if (!use_tablespace) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: PAGE_COMPRESSED requires" - " innodb_file_per_table."); - return "PAGE_COMPRESSED"; - } + if (merge_threshold_table == 0) { + merge_threshold_table = DICT_INDEX_MERGE_THRESHOLD_DEFAULT; + } - if (file_format < UNIV_FORMAT_B) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: PAGE_COMPRESSED requires" - " innodb_file_format > Antelope."); - return "PAGE_COMPRESSED"; + for (uint i = 0; i < table_share->keys; i++) { + KEY* key_info = &table_share->key_info[i]; + + ut_ad(i < sizeof(merge_threshold_index) + / sizeof(merge_threshold_index[0])); + + if (key_info->flags & HA_USES_COMMENT + && key_info->comment.str != NULL) { + merge_threshold_index[i] + = innobase_parse_merge_threshold( + thd, key_info->comment.str); + } else { + merge_threshold_index[i] = merge_threshold_table; } - if (create_info->key_block_size) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: PAGE_COMPRESSED table can't have" - " key_block_size"); - return "PAGE_COMPRESSED"; + if (merge_threshold_index[i] == 0) { + merge_threshold_index[i] = merge_threshold_table; } } - /* Check page compression level requirements, some of them are - already checked above */ - if (options->page_compression_level != 0) { - if (options->page_compressed == false) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: PAGE_COMPRESSION_LEVEL requires" - " PAGE_COMPRESSED"); - return "PAGE_COMPRESSION_LEVEL"; + /* update SYS_INDEX table */ + if (!dict_table_is_temporary(table)) { + for (uint i = 0; i < table_share->keys; i++) { + is_found[i] = false; } - if (options->page_compression_level < 1 || options->page_compression_level > 9) { - push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: invalid PAGE_COMPRESSION_LEVEL = %lu." - " Valid values are [1, 2, 3, 4, 5, 6, 7, 8, 9]", - options->page_compression_level); - return "PAGE_COMPRESSION_LEVEL"; - } - } + for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); + index != NULL; + index = UT_LIST_GET_NEXT(indexes, index)) { - /* If encryption is set up make sure that used key_id is found */ - if (encrypt == FIL_SPACE_ENCRYPTION_ON || - (encrypt == FIL_SPACE_ENCRYPTION_DEFAULT && srv_encrypt_tables)) { - if (!encryption_key_id_exists((unsigned int)options->encryption_key_id)) { - push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: ENCRYPTION_KEY_ID %u not available", - (uint)options->encryption_key_id - ); - return "ENCRYPTION_KEY_ID"; + if (dict_index_is_auto_gen_clust(index)) { + + /* GEN_CLUST_INDEX should use + merge_threshold_table */ + dict_index_set_merge_threshold( + index, merge_threshold_table); + continue; + } + + for (uint i = 0; i < table_share->keys; i++) { + if (is_found[i]) { + continue; + } + + KEY* key_info = &table_share->key_info[i]; + + if (innobase_strcasecmp( + index->name, key_info->name) == 0) { + + dict_index_set_merge_threshold( + index, + merge_threshold_index[i]); + is_found[i] = true; + break; + } + } } } - /* Ignore nondefault key_id if encryption is set off */ - if (encrypt == FIL_SPACE_ENCRYPTION_OFF && - options->encryption_key_id != THDVAR(thd, default_encryption_key_id)) { - push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: Ignored ENCRYPTION_KEY_ID %u when encryption is disabled", - (uint)options->encryption_key_id - ); - options->encryption_key_id = FIL_DEFAULT_ENCRYPTION_KEY; + for (uint i = 0; i < table_share->keys; i++) { + is_found[i] = false; } - /* If default encryption is used make sure that used kay is found - from key file. */ - if (encrypt == FIL_SPACE_ENCRYPTION_DEFAULT && - !srv_encrypt_tables && - options->encryption_key_id != FIL_DEFAULT_ENCRYPTION_KEY) { - if (!encryption_key_id_exists((unsigned int)options->encryption_key_id)) { - push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: ENCRYPTION_KEY_ID %u not available", - (uint)options->encryption_key_id - ); - return "ENCRYPTION_KEY_ID"; + /* update in memory */ + for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); + index != NULL; + index = UT_LIST_GET_NEXT(indexes, index)) { + + if (dict_index_is_auto_gen_clust(index)) { + + /* GEN_CLUST_INDEX should use merge_threshold_table */ + /* x-lock index is needed to exclude concurrent + pessimistic tree operations */ + rw_lock_x_lock(dict_index_get_lock(index)); + index->merge_threshold = merge_threshold_table; + rw_lock_x_unlock(dict_index_get_lock(index)); + + continue; } - } - /* Check atomic writes requirements */ - if (awrites == ATOMIC_WRITES_ON || - (awrites == ATOMIC_WRITES_DEFAULT && srv_use_atomic_writes)) { - if (!use_tablespace) { - push_warning( - thd, Sql_condition::WARN_LEVEL_WARN, - HA_WRONG_CREATE_OPTION, - "InnoDB: ATOMIC_WRITES requires" - " innodb_file_per_table."); - return "ATOMIC_WRITES"; + for (uint i = 0; i < table_share->keys; i++) { + if (is_found[i]) { + continue; + } + + KEY* key_info = &table_share->key_info[i]; + + if (innobase_strcasecmp( + index->name, key_info->name) == 0) { + + /* x-lock index is needed to exclude concurrent + pessimistic tree operations */ + rw_lock_x_lock(dict_index_get_lock(index)); + index->merge_threshold + = merge_threshold_index[i]; + rw_lock_x_unlock(dict_index_get_lock(index)); + is_found[i] = true; + + break; + } } } - - return 0; } -/*****************************************************************//** -Creates a new table to an InnoDB database. -@return error number */ -UNIV_INTERN +/** Set m_use_* flags. */ +void +create_table_info_t::set_tablespace_type( + bool table_being_altered_is_file_per_table) +{ + /* Note whether this table will be created using a shared, + general or system tablespace. */ + m_use_shared_space = tablespace_is_shared_space(m_create_info); + + /** Allow file_per_table for this table either because: + 1) the setting innodb_file_per_table=on, + 2) the table being altered is currently file_per_table + 3) explicitly requested by tablespace=innodb_file_per_table. */ + m_allow_file_per_table = + m_innodb_file_per_table + || table_being_altered_is_file_per_table + || tablespace_is_file_per_table(m_create_info); + + /* All noncompresed temporary tables will be put into the + system temporary tablespace. */ + bool is_noncompressed_temporary = + m_create_info->options & HA_LEX_CREATE_TMP_TABLE + && !(m_create_info->row_type == ROW_TYPE_COMPRESSED + || m_create_info->key_block_size > 0); + + /* Ignore the current innodb-file-per-table setting if we are + creating a temporary, non-compressed table or if the + TABLESPACE= phrase is using an existing shared tablespace. */ + m_use_file_per_table = + m_allow_file_per_table + && !is_noncompressed_temporary + && !m_use_shared_space; + + /* DATA DIRECTORY must have m_use_file_per_table but cannot be + used with TEMPORARY tables. */ + m_use_data_dir = + m_use_file_per_table + && !(m_create_info->options & HA_LEX_CREATE_TMP_TABLE) + && (m_create_info->data_file_name != NULL) + && (m_create_info->data_file_name[0] != '\0'); + ut_ad(!(m_use_shared_space && m_use_data_dir)); +} + +/** Initialize the create_table_info_t object. +@return error number */ int -ha_innobase::create( -/*================*/ - const char* name, /*!< in: table name */ - TABLE* form, /*!< in: information on table - columns and indexes */ - HA_CREATE_INFO* create_info) /*!< in: more information of the - created table, contains also the - create statement string */ +create_table_info_t::initialize() { - int error; trx_t* parent_trx; - trx_t* trx; - int primary_key_no; - uint i; - char norm_name[FN_REFLEN]; /* {database}/{tablename} */ - char temp_path[FN_REFLEN]; /* absolute path of temp frm */ - char remote_path[FN_REFLEN]; /* absolute path of table */ - THD* thd = ha_thd(); - ib_int64_t auto_inc_value; - - /* Cache the global variable "srv_file_per_table" to a local - variable before using it. Note that "srv_file_per_table" - is not under dict_sys mutex protection, and could be changed - while creating the table. So we read the current value here - and make all further decisions based on this. */ - bool use_tablespace = srv_file_per_table; - const ulint file_format = srv_file_format; - - /* Zip Shift Size - log2 - 9 of compressed page size, - zero for uncompressed */ - ulint flags; - ulint flags2; - dict_table_t* innobase_table = NULL; - - const char* stmt; - size_t stmt_len; - /* Cache table options */ - ha_table_option_struct *options= form->s->option_struct; - fil_encryption_t encrypt = (fil_encryption_t)options->encryption; - uint key_id = (uint)options->encryption_key_id; - DBUG_ENTER("ha_innobase::create"); + DBUG_ENTER("create_table_info_t::initialize"); - DBUG_ASSERT(thd != NULL); - DBUG_ASSERT(create_info != NULL); + ut_ad(m_thd != NULL); + ut_ad(m_create_info != NULL); - if (form->s->stored_fields > REC_MAX_N_USER_FIELDS) { + if (m_form->s->fields > REC_MAX_N_USER_FIELDS) { DBUG_RETURN(HA_ERR_TOO_MANY_FIELDS); - } else if (high_level_read_only) { - DBUG_RETURN(HA_ERR_TABLE_READONLY); } - /* Create the table definition in InnoDB */ + ut_ad(m_form->s->row_type == m_create_info->row_type); - /* Validate table options not handled by the SQL-parser */ - if(check_table_options(thd, form, create_info, use_tablespace, - file_format)) { - DBUG_RETURN(HA_WRONG_CREATE_OPTION); + /* Check for name conflicts (with reserved name) for + any user indices to be created. */ + if (innobase_index_name_is_reserved(m_thd, m_form->key_info, + m_form->s->keys)) { + DBUG_RETURN(HA_ERR_WRONG_INDEX); } - /* Validate create options if innodb_strict_mode is set. */ - if (create_options_are_invalid( - thd, form, create_info, use_tablespace)) { - DBUG_RETURN(HA_WRONG_CREATE_OPTION); - } + /* Get the transaction associated with the current thd, or create one + if not yet created */ - if (!innobase_table_flags(form, create_info, - thd, use_tablespace, - &flags, &flags2)) { - DBUG_RETURN(-1); - } + parent_trx = check_trx_exists(m_thd); - error = parse_table_name(name, create_info, flags, flags2, - norm_name, temp_path, remote_path); - if (error) { - DBUG_RETURN(error); - } + /* In case MySQL calls this in the middle of a SELECT query, release + possible adaptive hash latch to avoid deadlocks of threads */ - /* Look for a primary key */ - primary_key_no = (form->s->primary_key != MAX_KEY ? - (int) form->s->primary_key : - -1); + trx_search_latch_release_if_reserved(parent_trx); + DBUG_RETURN(0); +} - /* Our function innobase_get_mysql_key_number_for_index assumes - the primary key is always number 0, if it exists */ - ut_a(primary_key_no == -1 || primary_key_no == 0); +/** Prepare to create a new table to an InnoDB database. +@param[in] name Table name +@return error number */ +int +create_table_info_t::prepare_create_table( + const char* name) +{ + DBUG_ENTER("prepare_create_table"); - /* Check for name conflicts (with reserved name) for - any user indices to be created. */ - if (innobase_index_name_is_reserved(thd, form->key_info, - form->s->keys)) { - DBUG_RETURN(-1); + ut_ad(m_thd != NULL); + ut_ad(m_create_info != NULL); + + ut_ad(m_form->s->row_type == m_create_info->row_type); + + set_tablespace_type(false); + + normalize_table_name(m_table_name, name); + + /* Validate table options not handled by the SQL-parser */ + if(check_table_options()) { + DBUG_RETURN(HA_WRONG_CREATE_OPTION); } - if (row_is_magic_monitor_table(norm_name)) { - push_warning_printf(thd, - Sql_condition::WARN_LEVEL_WARN, - HA_ERR_WRONG_COMMAND, - "Using the table name %s to enable " - "diagnostic output is deprecated " - "and may be removed in future releases. " - "Use INFORMATION_SCHEMA or " - "PERFORMANCE_SCHEMA tables or " - "SET GLOBAL innodb_status_output=ON.", - dict_remove_db_name(norm_name)); - - /* Limit innodb monitor access to users with PROCESS privilege. - See http://bugs.mysql.com/32710 why we chose PROCESS. */ - if (check_global_access(thd, PROCESS_ACL)) { - DBUG_RETURN(HA_ERR_GENERIC); - } + /* Validate the create options if innodb_strict_mode is set. + Do not use the regular message for ER_ILLEGAL_HA_CREATE_OPTION + because InnoDB might actually support the option, but not under + the current conditions. The messages revealing the specific + problems are reported inside this function. */ + if (create_options_are_invalid()) { + DBUG_RETURN(HA_WRONG_CREATE_OPTION); } - /* Get the transaction associated with the current thd, or create one - if not yet created */ + /* Create the table flags and flags2 */ + if (!innobase_table_flags()) { + DBUG_RETURN(HA_WRONG_CREATE_OPTION); + } - parent_trx = check_trx_exists(thd); + if (high_level_read_only && !is_intrinsic_temp_table()) { + DBUG_RETURN(HA_ERR_TABLE_READONLY); + // JAN: TODO: MySQL 5.7 + //DBUG_RETURN(HA_ERR_INNODB_READ_ONLY); + } - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ + DBUG_RETURN(parse_table_name(name)); +} - trx_search_latch_release_if_reserved(parent_trx); +/** Create a new table to an InnoDB database. +@return error number */ +int +create_table_info_t::create_table() +{ + int error; + int primary_key_no; + uint i; + dict_table_t* innobase_table = NULL; + const char* stmt; + size_t stmt_len; - trx = innobase_trx_allocate(thd); + DBUG_ENTER("create_table"); - /* Latch the InnoDB data dictionary exclusively so that no deadlocks - or lock waits can happen in it during a table create operation. - Drop table etc. do this latching in row0mysql.cc. */ + /* Look for a primary key */ + primary_key_no = (m_form->s->primary_key != MAX_KEY ? + (int) m_form->s->primary_key : + -1); - row_mysql_lock_data_dictionary(trx); + /* Our function innobase_get_mysql_key_number_for_index assumes + the primary key is always number 0, if it exists */ + ut_a(primary_key_no == -1 || primary_key_no == 0); - error = create_table_def(trx, form, norm_name, temp_path, - remote_path, flags, flags2, encrypt, key_id); + error = create_table_def(); if (error) { - goto cleanup; + DBUG_RETURN(error); } /* Create the keys */ - if (form->s->keys == 0 || primary_key_no == -1) { + if (m_form->s->keys == 0 || primary_key_no == -1) { /* Create an index which is used as the clustered index; order the rows by their row id which is internally generated by InnoDB */ error = create_clustered_index_when_no_primary( - trx, flags, norm_name); + m_trx, m_flags, m_table_name); if (error) { - goto cleanup; + DBUG_RETURN(error); } } if (primary_key_no != -1) { /* In InnoDB the clustered index must always be created first */ - if ((error = create_index(trx, form, flags, norm_name, + if ((error = create_index(m_trx, m_form, m_flags, m_table_name, (uint) primary_key_no))) { - goto cleanup; + DBUG_RETURN(error); } } /* Create the ancillary tables that are common to all FTS indexes on this table. */ - if (flags2 & DICT_TF2_FTS) { - enum fts_doc_id_index_enum ret; + if (m_flags2 & DICT_TF2_FTS) { + fts_doc_id_index_enum ret; innobase_table = dict_table_open_on_name( - norm_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE); + m_table_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE); ut_a(innobase_table); /* Check whether there already exists FTS_DOC_ID_INDEX */ ret = innobase_fts_check_doc_id_index_in_def( - form->s->keys, form->key_info); + m_form->s->keys, m_form->key_info); switch (ret) { case FTS_INCORRECT_DOC_ID_INDEX: - push_warning_printf(thd, + push_warning_printf(m_thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_NAME_FOR_INDEX, " InnoDB: Index name %s is reserved" @@ -11991,7 +13964,7 @@ ha_innobase::create( " make sure it is of correct" " type\n", FTS_DOC_ID_INDEX_NAME, - innobase_table->name); + innobase_table->name.m_name); if (innobase_table->fts) { fts_free(innobase_table); @@ -12001,14 +13974,14 @@ ha_innobase::create( my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), FTS_DOC_ID_INDEX_NAME); error = -1; - goto cleanup; + DBUG_RETURN(error); case FTS_EXIST_DOC_ID_INDEX: case FTS_NOT_EXIST_DOC_ID_INDEX: break; } dberr_t err = fts_create_common_tables( - trx, innobase_table, norm_name, + m_trx, innobase_table, m_table_name, (ret == FTS_EXIST_DOC_ID_INDEX)); error = convert_error_code_to_mysql(err, 0, NULL); @@ -12016,24 +13989,30 @@ ha_innobase::create( dict_table_close(innobase_table, TRUE, FALSE); if (error) { - goto cleanup; + trx_rollback_to_savepoint(m_trx, NULL); + m_trx->error_state = DB_SUCCESS; + + row_drop_table_for_mysql(m_table_name, m_trx, true, FALSE); + + m_trx->error_state = DB_SUCCESS; + DBUG_RETURN(error); } } - for (i = 0; i < form->s->keys; i++) { + for (i = 0; i < m_form->s->keys; i++) { if (i != static_cast(primary_key_no)) { - if ((error = create_index(trx, form, flags, - norm_name, i))) { - goto cleanup; + if ((error = create_index(m_trx, m_form, m_flags, + m_table_name, i))) { + DBUG_RETURN(error); } } } /* Cache all the FTS indexes on this table in the FTS specific structure. They are used for FTS indexed column update handling. */ - if (flags2 & DICT_TF2_FTS) { + if (m_flags2 & DICT_TF2_FTS) { fts_t* fts = innobase_table->fts; ut_a(fts != NULL); @@ -12041,61 +14020,125 @@ ha_innobase::create( dict_table_get_all_fts_indexes(innobase_table, fts->indexes); } - stmt = innobase_get_stmt(thd, &stmt_len); + stmt = innobase_get_stmt_unsafe(m_thd, &stmt_len); + + innodb_session_t*& priv = + thd_to_innodb_session(m_trx->mysql_thd); + dict_table_t* handler = + priv->lookup_table_handler(m_table_name); + + ut_ad(handler == NULL + || (handler != NULL && dict_table_is_intrinsic(handler))); + + /* There is no concept of foreign key for intrinsic tables. */ + if (stmt && (handler == NULL)) { - if (stmt) { dberr_t err = row_table_add_foreign_constraints( - trx, stmt, stmt_len, norm_name, - create_info->options & HA_LEX_CREATE_TMP_TABLE); + m_trx, stmt, stmt_len, m_table_name, + m_create_info->options & HA_LEX_CREATE_TMP_TABLE); switch (err) { case DB_PARENT_NO_INDEX: push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_CANNOT_ADD_FOREIGN, "Create table '%s' with foreign key constraint" " failed. There is no index in the referenced" " table where the referenced columns appear" - " as the first columns.\n", norm_name); + " as the first columns.\n", m_table_name); break; case DB_CHILD_NO_INDEX: push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, + m_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_CANNOT_ADD_FOREIGN, "Create table '%s' with foreign key constraint" " failed. There is no index in the referencing" " table where referencing columns appear" - " as the first columns.\n", norm_name); + " as the first columns.\n", m_table_name); + break; + /* JAN: TODO: MySQL 5.7 Virtual columns + case DB_NO_FK_ON_V_BASE_COL: + push_warning_printf( + m_thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_CANNOT_ADD_FOREIGN, + "Create table '%s' with foreign key constraint" + " failed. Cannot add foreign key constraint" + " placed on the base column of indexed" + " virtual column, or constraint placed" + " on columns being part of virtual index.\n", + m_table_name); break; + */ default: break; } - error = convert_error_code_to_mysql(err, flags, NULL); + error = convert_error_code_to_mysql(err, m_flags, NULL); if (error) { - goto cleanup; + if (handler != NULL) { + priv->unregister_table_handler(m_table_name); + } + DBUG_RETURN(error); } } - innobase_commit_low(trx); + if (!is_intrinsic_temp_table()) { + innobase_table = dict_table_open_on_name( + m_table_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE); - row_mysql_unlock_data_dictionary(trx); + if (innobase_table != NULL) { + dict_table_close(innobase_table, TRUE, FALSE); + } - /* Flush the log to reduce probability that the .frm files and - the InnoDB data dictionary get out-of-sync if the user runs - with innodb_flush_log_at_trx_commit = 0 */ + } else { + innobase_table = NULL; + } - log_buffer_flush_to_disk(); + DBUG_RETURN(0); +} + +/** Update a new table in an InnoDB database. +@return error number */ +int +create_table_info_t::create_table_update_dict() +{ + dict_table_t* innobase_table; + + DBUG_ENTER("create_table_update_dict"); + + innobase_table = thd_to_innodb_session(m_thd)->lookup_table_handler( + m_table_name); - innobase_table = dict_table_open_on_name( - norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); + if (innobase_table == NULL) { + innobase_table = dict_table_open_on_name( + m_table_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); + } else { + innobase_table->acquire(); + ut_ad(dict_table_is_intrinsic(innobase_table)); + } DBUG_ASSERT(innobase_table != 0); + if (innobase_table->fts != NULL) { + if (innobase_table->fts_doc_id_index == NULL) { + innobase_table->fts_doc_id_index + = dict_table_get_index_on_name( + innobase_table, FTS_DOC_ID_INDEX_NAME); + DBUG_ASSERT(innobase_table->fts_doc_id_index != NULL); + } else { + DBUG_ASSERT(innobase_table->fts_doc_id_index + == dict_table_get_index_on_name( + innobase_table, + FTS_DOC_ID_INDEX_NAME)); + } + } - innobase_copy_frm_flags_from_create_info(innobase_table, create_info); + DBUG_ASSERT((innobase_table->fts == NULL) + == (innobase_table->fts_doc_id_index == NULL)); + + innobase_copy_frm_flags_from_create_info(innobase_table, m_create_info); dict_stats_update(innobase_table, DICT_STATS_EMPTY_TABLE); @@ -12109,16 +14152,16 @@ ha_innobase::create( } /* Load server stopword into FTS cache */ - if (flags2 & DICT_TF2_FTS) { - if (!innobase_fts_load_stopword(innobase_table, NULL, thd)) { + if (m_flags2 & DICT_TF2_FTS) { + if (!innobase_fts_load_stopword(innobase_table, NULL, m_thd)) { dict_table_close(innobase_table, FALSE, FALSE); srv_active_wake_master_thread(); - trx_free_for_mysql(trx); + trx_free_for_mysql(m_trx); DBUG_RETURN(-1); } } - /* Note: We can't call update_thd() as prebuilt will not be + /* Note: We can't call update_thd() as m_prebuilt will not be setup at this stage and so we use thd. */ /* We need to copy the AUTOINC value from the old table if @@ -12135,13 +14178,14 @@ ha_innobase::create( value to the auto increment field if the value is greater than the maximum value in the column. */ - if (((create_info->used_fields & HA_CREATE_USED_AUTO) - || thd_sql_command(thd) == SQLCOM_ALTER_TABLE - || thd_sql_command(thd) == SQLCOM_OPTIMIZE - || thd_sql_command(thd) == SQLCOM_CREATE_INDEX) - && create_info->auto_increment_value > 0) { + if (((m_create_info->used_fields & HA_CREATE_USED_AUTO) + || thd_sql_command(m_thd) == SQLCOM_ALTER_TABLE + || thd_sql_command(m_thd) == SQLCOM_OPTIMIZE + || thd_sql_command(m_thd) == SQLCOM_CREATE_INDEX) + && m_create_info->auto_increment_value > 0) { + ib_uint64_t auto_inc_value; - auto_inc_value = create_info->auto_increment_value; + auto_inc_value = m_create_info->auto_increment_value; dict_table_autoinc_lock(innobase_table); dict_table_autoinc_initialize(innobase_table, auto_inc_value); @@ -12150,6 +14194,89 @@ ha_innobase::create( dict_table_close(innobase_table, FALSE, FALSE); + innobase_parse_hint_from_comment(m_thd, innobase_table, m_form->s); + + DBUG_RETURN(0); +} + +/** Allocate a new trx. */ +void +create_table_info_t::allocate_trx() +{ + m_trx = innobase_trx_allocate(m_thd); + + m_trx->will_lock++; + m_trx->ddl = true; +} + +/** Create a new table to an InnoDB database. +@param[in] name Table name, format: "db/table_name". +@param[in] form Table format; columns and index information. +@param[in] create_info Create info (including create statement string). +@return 0 if success else error number. */ +int +ha_innobase::create( + const char* name, + TABLE* form, + HA_CREATE_INFO* create_info) +{ + int error; + char norm_name[FN_REFLEN]; /* {database}/{tablename} */ + char temp_path[FN_REFLEN]; /* Absolute path of temp frm */ + char remote_path[FN_REFLEN]; /* Absolute path of table */ + char tablespace[NAME_LEN]; /* Tablespace name identifier */ + trx_t* trx; + DBUG_ENTER("ha_innobase::create"); + + create_table_info_t info(ha_thd(), + form, + create_info, + norm_name, + temp_path, + remote_path, + tablespace); + + /* Initialize the object. */ + if ((error = info.initialize())) { + DBUG_RETURN(error); + } + + /* Prepare for create and validate options. */ + if ((error = info.prepare_create_table(name))) { + DBUG_RETURN(error); + } + + info.allocate_trx(); + + trx = info.trx(); + + /* Latch the InnoDB data dictionary exclusively so that no deadlocks + or lock waits can happen in it during a table create operation. + Drop table etc. do this latching in row0mysql.cc. + Avoid locking dictionary if table is intrinsic. + Table Object for such table is cached in THD instead of storing it + to dictionary. */ + if (!info.is_intrinsic_temp_table()) { + row_mysql_lock_data_dictionary(trx); + } + + if ((error = info.create_table())) { + goto cleanup; + } + + innobase_commit_low(trx); + + if (!info.is_intrinsic_temp_table()) { + ut_ad(!srv_read_only_mode); + row_mysql_unlock_data_dictionary(trx); + /* Flush the log to reduce probability that the .frm files and + the InnoDB data dictionary get out-of-sync if the user runs + with innodb_flush_log_at_trx_commit = 0 */ + log_buffer_flush_to_disk(); + } + + error = info.create_table_update_dict(); + /* Tell the InnoDB server that there might be work for utility threads: */ @@ -12157,12 +14284,41 @@ ha_innobase::create( trx_free_for_mysql(trx); - DBUG_RETURN(0); + DBUG_RETURN(error); cleanup: trx_rollback_for_mysql(trx); - row_mysql_unlock_data_dictionary(trx); + if (!info.is_intrinsic_temp_table()) { + row_mysql_unlock_data_dictionary(trx); + } else { + THD* thd = info.thd(); + + dict_table_t* intrinsic_table = + thd_to_innodb_session(thd)->lookup_table_handler( + info.table_name()); + + if (intrinsic_table != NULL) { + thd_to_innodb_session(thd)->unregister_table_handler( + info.table_name()); + + for (;;) { + dict_index_t* index; + index = UT_LIST_GET_FIRST( + intrinsic_table->indexes); + if (index == NULL) { + break; + } + rw_lock_free(&index->lock); + UT_LIST_REMOVE(intrinsic_table->indexes, index); + dict_mem_index_free(index); + index = NULL; + } + + dict_mem_table_free(intrinsic_table); + intrinsic_table = NULL; + } + } trx_free_for_mysql(trx); @@ -12171,47 +14327,70 @@ cleanup: /*****************************************************************//** Discards or imports an InnoDB tablespace. -@return 0 == success, -1 == error */ -UNIV_INTERN +@return 0 == success, -1 == error */ + int ha_innobase::discard_or_import_tablespace( /*======================================*/ - my_bool discard) /*!< in: TRUE if discard, else import */ + my_bool discard) /*!< in: TRUE if discard, else import */ { - dberr_t err; - dict_table_t* dict_table; DBUG_ENTER("ha_innobase::discard_or_import_tablespace"); - ut_a(prebuilt->trx); - ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); - ut_a(prebuilt->trx == thd_to_trx(ha_thd())); + ut_a(m_prebuilt->trx != NULL); + ut_a(m_prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(m_prebuilt->trx == thd_to_trx(ha_thd())); if (high_level_read_only) { DBUG_RETURN(HA_ERR_TABLE_READONLY); } - dict_table = prebuilt->table; + dict_table_t* dict_table = m_prebuilt->table; + + if (dict_table_is_temporary(dict_table)) { - if (dict_table->space == TRX_SYS_SPACE) { + ib_senderrf( + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_READ_ONLY_MODE); + // JAN: TODO: MySQL 5.7 + //ER_CANNOT_DISCARD_TEMPORARY_TABLE); + + DBUG_RETURN(HA_ERR_TABLE_NEEDS_UPGRADE); + } + if (dict_table->space == srv_sys_space.space_id()) { ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, ER_TABLE_IN_SYSTEM_TABLESPACE, - table->s->table_name.str); + dict_table->name.m_name); DBUG_RETURN(HA_ERR_TABLE_NEEDS_UPGRADE); } - trx_start_if_not_started(prebuilt->trx); + if (DICT_TF_HAS_SHARED_SPACE(dict_table->flags)) { + my_printf_error(ER_NOT_ALLOWED_COMMAND, + "InnoDB: Cannot %s table `%s` because it is in" + " a general tablespace. It must be file-per-table.", + MYF(0), discard ? "discard" : "import", + dict_table->name.m_name); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads. */ - trx_search_latch_release_if_reserved(prebuilt->trx); + // JAN: TODO: MySQL 5.7 + //DBUG_RETURN(HA_ERR_NOT_ALLOWED_COMMAND); + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + if (trx_in_innodb.is_aborted()) { + + DBUG_RETURN(innobase_rollback(ht, m_user_thd, false)); + } + + trx_start_if_not_started(m_prebuilt->trx, true); /* Obtain an exclusive lock on the table. */ - err = row_mysql_lock_table( - prebuilt->trx, dict_table, LOCK_X, + dberr_t err = row_mysql_lock_table( + m_prebuilt->trx, dict_table, LOCK_X, discard ? "setting table lock for DISCARD TABLESPACE" : "setting table lock for IMPORT TABLESPACE"); @@ -12226,35 +14405,33 @@ ha_innobase::discard_or_import_tablespace( if (dict_table->ibd_file_missing) { ib_senderrf( - prebuilt->trx->mysql_thd, + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_WARN, ER_TABLESPACE_MISSING, - table->s->table_name.str); + dict_table->name.m_name); } err = row_discard_tablespace_for_mysql( - dict_table->name, prebuilt->trx); + dict_table->name.m_name, m_prebuilt->trx); } else if (!dict_table->ibd_file_missing) { /* Commit the transaction in order to release the table lock. */ - trx_commit_for_mysql(prebuilt->trx); + trx_commit_for_mysql(m_prebuilt->trx); + ib::error() << "Unable to import tablespace " + << dict_table->name << " because it already" + " exists. Please DISCARD the tablespace" + " before IMPORT."; ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, - ER_TABLESPACE_EXISTS, table->s->table_name.str); + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_EXISTS, dict_table->name.m_name); DBUG_RETURN(HA_ERR_TABLE_EXIST); } else { - err = row_import_for_mysql(dict_table, prebuilt); + err = row_import_for_mysql(dict_table, m_prebuilt); if (err == DB_SUCCESS) { - if (table->found_next_number_field) { - dict_table_autoinc_lock(dict_table); - innobase_initialize_autoinc(); - dict_table_autoinc_unlock(dict_table); - } - info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE @@ -12265,7 +14442,26 @@ ha_innobase::discard_or_import_tablespace( } /* Commit the transaction in order to release the table lock. */ - trx_commit_for_mysql(prebuilt->trx); + trx_commit_for_mysql(m_prebuilt->trx); + + if (err == DB_SUCCESS && !discard + && dict_stats_is_persistent_enabled(dict_table)) { + dberr_t ret; + + /* Adjust the persistent statistics. */ + ret = dict_stats_update(dict_table, + DICT_STATS_RECALC_PERSISTENT); + + if (ret != DB_SUCCESS) { + push_warning_printf( + ha_thd(), + Sql_condition::WARN_LEVEL_WARN, + ER_ALTER_INFO, + "Error updating stats for table '%s'" + " after table rebuild: %s", + dict_table->name.m_name, ut_strerr(ret)); + } + } if (err == DB_SUCCESS && !discard && dict_stats_is_persistent_enabled(dict_table)) { @@ -12291,53 +14487,64 @@ ha_innobase::discard_or_import_tablespace( /*****************************************************************//** Deletes all rows of an InnoDB table. -@return error number */ -UNIV_INTERN +@return error number */ + int ha_innobase::truncate() /*===================*/ { - dberr_t err; - int error; - DBUG_ENTER("ha_innobase::truncate"); + /* Truncate of intrinsic table is not allowed truncate for now. */ + if (dict_table_is_intrinsic(m_prebuilt->table)) { + DBUG_RETURN(HA_ERR_WRONG_COMMAND); + } + if (high_level_read_only) { DBUG_RETURN(HA_ERR_TABLE_READONLY); } /* Get the transaction associated with the current thd, or create one - if not yet created, and update prebuilt->trx */ + if not yet created, and update m_prebuilt->trx */ update_thd(ha_thd()); - if (!trx_is_started(prebuilt->trx)) { - ++prebuilt->trx->will_lock; + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + if (!trx_is_started(m_prebuilt->trx)) { + ++m_prebuilt->trx->will_lock; } + + dberr_t err; + /* Truncate the table in InnoDB */ + err = row_truncate_table_for_mysql(m_prebuilt->table, m_prebuilt->trx); - err = row_truncate_table_for_mysql(prebuilt->table, prebuilt->trx); + int error; switch (err) { - case DB_TABLESPACE_DELETED: case DB_TABLESPACE_NOT_FOUND: ib_senderrf( - prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, (err == DB_TABLESPACE_DELETED ? ER_TABLESPACE_DISCARDED : ER_TABLESPACE_MISSING), table->s->table_name.str); table->status = STATUS_NOT_FOUND; + // JAN: TODO: MySQL 5.7 + // error = HA_ERR_TABLESPACE_MISSING; error = HA_ERR_NO_SUCH_TABLE; break; default: error = convert_error_code_to_mysql( - err, prebuilt->table->flags, - prebuilt->trx->mysql_thd); + err, m_prebuilt->table->flags, + m_prebuilt->trx->mysql_thd); + table->status = STATUS_NOT_FOUND; break; } + DBUG_RETURN(error); } @@ -12347,17 +14554,14 @@ MySQL calls innobase_commit to commit the transaction of the current user. Then the current user cannot have locks set on the table. Drop table operation inside InnoDB will remove all locks any user has on the table inside InnoDB. -@return error number */ -UNIV_INTERN +@return error number */ + int ha_innobase::delete_table( /*======================*/ const char* name) /*!< in: table name */ { - ulint name_len; dberr_t err; - trx_t* parent_trx; - trx_t* trx; THD* thd = ha_thd(); char norm_name[FN_REFLEN]; @@ -12376,23 +14580,47 @@ ha_innobase::delete_table( extension, in contrast to ::create */ normalize_table_name(norm_name, name); - if (srv_read_only_mode) { + innodb_session_t*& priv = thd_to_innodb_session(thd); + dict_table_t* handler = priv->lookup_table_handler(norm_name); + + if (handler != NULL) { + for (dict_index_t* index = UT_LIST_GET_FIRST(handler->indexes); + index != NULL; + index = UT_LIST_GET_NEXT(indexes, index)) { + index->last_ins_cur->release(); + index->last_sel_cur->release(); + } + } else if (srv_read_only_mode) { DBUG_RETURN(HA_ERR_TABLE_READONLY); - } else if (row_is_magic_monitor_table(norm_name) - && check_global_access(thd, PROCESS_ACL)) { - DBUG_RETURN(HA_ERR_GENERIC); } - parent_trx = check_trx_exists(thd); + trx_t* parent_trx = check_trx_exists(thd); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ + TrxInInnoDB trx_in_innodb(parent_trx); - trx_search_latch_release_if_reserved(parent_trx); + /* Remove the to-be-dropped table from the list of modified tables + by parent_trx. Otherwise we may end up with an orphaned pointer to + the table object from parent_trx::mod_tables. This could happen in: + SET AUTOCOMMIT=0; + CREATE TABLE t (PRIMARY KEY (a)) ENGINE=INNODB SELECT 1 AS a UNION + ALL SELECT 1 AS a; */ + trx_mod_tables_t::const_iterator iter; - trx = innobase_trx_allocate(thd); + for (iter = parent_trx->mod_tables.begin(); + iter != parent_trx->mod_tables.end(); + ++iter) { + + dict_table_t* table_to_drop = *iter; + + if (strcmp(norm_name, table_to_drop->name.m_name) == 0) { + parent_trx->mod_tables.erase(table_to_drop); + break; + } + } + + trx_t* trx = innobase_trx_allocate(thd); - name_len = strlen(name); + ulint name_len = strlen(name); ut_a(name_len < 1000); @@ -12403,27 +14631,62 @@ ha_innobase::delete_table( /* We are doing a DDL operation. */ ++trx->will_lock; - trx->ddl = true; /* Drop the table in InnoDB */ + err = row_drop_table_for_mysql( norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB, - FALSE); - - + false, handler); + + if (err == DB_TABLE_NOT_FOUND) { + /* Test to drop all tables which matches db/tablename + '#'. + Only partitions can have '#' as non-first character in + the table name! + + Temporary table names always start with '#', partitions are + the only 'tables' that can have '#' after the first character + and table name must have length > 0. User tables cannot have + '#' since it would be translated to @0023. Therefor this should + only match partitions. */ + uint len = (uint) strlen(norm_name); + ulint num_partitions; + ut_a(len < FN_REFLEN); + norm_name[len] = '#'; + norm_name[len + 1] = 0; + err = row_drop_database_for_mysql(norm_name, trx, + &num_partitions); + norm_name[len] = 0; + if (num_partitions == 0 + && !row_is_mysql_tmp_table_name(norm_name)) { + table_name_t tbl_name; + tbl_name.m_name = norm_name; + ib::error() << "Table " << tbl_name << + " does not exist in the InnoDB" + " internal data dictionary though MySQL is" + " trying to drop it. Have you copied the .frm" + " file of the table to the MySQL database" + " directory from another database? " + << TROUBLESHOOTING_MSG; + } + if (num_partitions == 0) { + err = DB_TABLE_NOT_FOUND; + } + } + + /* TODO: remove this when the conversion tool from ha_partition to + native innodb partitioning is completed */ if (err == DB_TABLE_NOT_FOUND && innobase_get_lower_case_table_names() == 1) { - char* is_part = NULL; -#ifdef __WIN__ - is_part = strstr(norm_name, "#p#"); +#ifdef _WIN32 + char* is_part = strstr(norm_name, "#p#"); #else - is_part = strstr(norm_name, "#P#"); -#endif /* __WIN__ */ + char* is_part = strstr(norm_name, "#P#"); +#endif /* _WIN32 */ - if (is_part) { + if (is_part != NULL) { char par_case_name[FN_REFLEN]; -#ifndef __WIN__ +#ifndef _WIN32 /* Check for the table using lower case name, including the partition separator "P" */ @@ -12434,21 +14697,26 @@ ha_innobase::delete_table( whether there exists table name in system table whose name is not being normalized to lower case */ - normalize_table_name_low( + create_table_info_t::normalize_table_name_low( par_case_name, name, FALSE); -#endif +#endif /* _WIN32 */ err = row_drop_table_for_mysql( par_case_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB, - FALSE); + true, handler); } } - /* Flush the log to reduce probability that the .frm files and - the InnoDB data dictionary get out-of-sync if the user runs - with innodb_flush_log_at_trx_commit = 0 */ + if (handler == NULL) { + ut_ad(!srv_read_only_mode); + /* Flush the log to reduce probability that the .frm files and + the InnoDB data dictionary get out-of-sync if the user runs + with innodb_flush_log_at_trx_commit = 0 */ - log_buffer_flush_to_disk(); + log_buffer_flush_to_disk(); + } else if (err == DB_SUCCESS) { + priv->unregister_table_handler(norm_name); + } innobase_commit_low(trx); @@ -12456,6 +14724,7 @@ ha_innobase::delete_table( DBUG_RETURN(convert_error_code_to_mysql(err, 0, NULL)); } + /*****************************************************************//** Defragment table. @return error number */ @@ -12529,7 +14798,7 @@ ha_innobase::defragment_table( break; } } - os_event_free(event); + os_event_destroy(event); } if (ret) { @@ -12551,23 +14820,429 @@ ha_innobase::defragment_table( return ret; } -/*****************************************************************//** -Removes all tables in the named database inside InnoDB. */ +/** Validate the parameters in st_alter_tablespace +before using them in InnoDB tablespace functions. +@param[in] thd Connection +@param[in] alter_info How to do the command. +@return MySQL handler error code like HA_... */ +static +int +validate_create_tablespace_info( + THD* thd, + st_alter_tablespace* alter_info) +{ + ulint space_id; + + /* The parser ensures that these fields are provided. */ + ut_a(alter_info->tablespace_name); + ut_a(alter_info->data_file_name); + + if (high_level_read_only) { + return (HA_ERR_TABLE_READONLY); + // JAN: TODO: MySQL 5.7 + // return(HA_ERR_INNODB_READ_ONLY); + } + + /* From this point forward, push a warning for each problem found + instead of returning immediately*/ + int error = validate_tablespace_name( + alter_info->tablespace_name, false); + + /* Make sure the tablespace is not already open. */ + space_id = fil_space_get_id_by_name(alter_info->tablespace_name); + if (space_id != ULINT_UNDEFINED) { + my_printf_error(ER_TABLESPACE_EXISTS, + "InnoDB: A tablespace named `%s`" + " already exists.", MYF(0), + alter_info->tablespace_name); + error = HA_ERR_TABLESPACE_EXISTS; + } + + /* JAN: TODO: MySQL 5.7 FILE_BLOCK_SIZE + if (alter_info->file_block_size) { + */ + /* Check for a bad file block size. */ + /* + if (!ut_is_2pow(alter_info->file_block_size) + || alter_info->file_block_size < UNIV_ZIP_SIZE_MIN + || alter_info->file_block_size > UNIV_PAGE_SIZE_MAX) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB does not support" + " FILE_BLOCK_SIZE=%llu", MYF(0), + alter_info->file_block_size); + error = HA_WRONG_CREATE_OPTION; + */ + /* Don't allow a file block size larger than UNIV_PAGE_SIZE. */ + /* } else if (alter_info->file_block_size > UNIV_PAGE_SIZE) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Cannot create a tablespace" + " with FILE_BLOCK_SIZE=%llu because" + " INNODB_PAGE_SIZE=%lu.", MYF(0), + alter_info->file_block_size, + UNIV_PAGE_SIZE); + error = HA_WRONG_CREATE_OPTION; + */ + /* Don't allow a compressed tablespace when page size > 16k. */ + /* } else if (UNIV_PAGE_SIZE > UNIV_PAGE_SIZE_DEF + && alter_info->file_block_size != UNIV_PAGE_SIZE) { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: Cannot create a COMPRESSED" + " tablespace when innodb_page_size >" + " 16k.", MYF(0)); + error = HA_WRONG_CREATE_OPTION; + } + } + */ + + /* Validate the ADD DATAFILE name. */ + char* filepath = mem_strdup(alter_info->data_file_name); + os_normalize_path(filepath); + + /* It must end with '.ibd' and contain a basename of at least + 1 character before the.ibd extension. */ + ulint dirname_len = dirname_length(filepath); + const char* basename = filepath + dirname_len; + ulint basename_len = strlen(basename); + + // JAN: TODO: MySQL 5.7 ER_WRONG_FILE_NAME + if (basename_len < 5) { + my_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + MYF(0), + alter_info->data_file_name); + ut_free(filepath); + return(HA_WRONG_CREATE_OPTION); + } + if (memcmp(&basename[basename_len - 4], DOT_IBD, 5)) { + my_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + MYF(0), + alter_info->data_file_name); + my_printf_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + "An IBD filepath must end with `.ibd`.", + MYF(0)); + ut_free(filepath); + return(HA_WRONG_CREATE_OPTION); + } + + /* Do not allow an invalid colon in the file name. */ + const char* colon = strchr(filepath, ':'); + if (colon != NULL) { +#ifdef _WIN32 + /* Do not allow names like "C:name.ibd" because it + specifies the "C:" drive but allows a relative location. + It should be like "c:\". If a single colon is used it must + be the second byte the the third byte must be a separator. */ + if (colon != &filepath[1] + || (colon[1] != OS_PATH_SEPARATOR) + || NULL != strchr(&colon[1], ':')) { +#endif /* _WIN32 */ + my_error(ER_WRONG_TABLE_NAME, // ER_WRONG_FILE_NAME, + MYF(0), + alter_info->data_file_name); + my_printf_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + "Invalid use of ':'.", MYF(0)); + ut_free(filepath); + return(HA_WRONG_CREATE_OPTION); +#ifdef _WIN32 + } +#endif /* _WIN32 */ + } + +#ifndef _WIN32 + /* On Non-Windows platforms, '\\' is a valid file name character. + But for InnoDB datafiles, we always assume it is a directory + separator and convert these to '/' */ + if (strchr(alter_info->data_file_name, '\\') != NULL) { + ib::warn() << "Converting backslash to forward slash in" + " ADD DATAFILE " << alter_info->data_file_name; + } +#endif /* _WIN32 */ + + /* The directory path must be pre-existing. */ + Folder folder(filepath, dirname_len); + ut_free(filepath); + if (!folder.exists()) { + my_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + MYF(0), + alter_info->data_file_name); + my_printf_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + "The directory does not exist.", MYF(0)); + return(HA_WRONG_CREATE_OPTION); + } + + /* CREATE TABLESPACE...ADD DATAFILE can be inside but not under + the datadir.*/ + if (folder_mysql_datadir > folder) { + my_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + MYF(0), + alter_info->data_file_name); + my_printf_error(ER_WRONG_TABLE_NAME, //ER_WRONG_FILE_NAME, + "CREATE TABLESPACE data file" + " cannot be under the datadir.", MYF(0)); + error = HA_WRONG_CREATE_OPTION; + } + + return(error); +} + +/** CREATE a tablespace. +@param[in] hton Handlerton of InnoDB +@param[in] thd Connection +@param[in] alter_info How to do the command +@return MySQL error code*/ +static +int +innobase_create_tablespace( + handlerton* hton, + THD* thd, + st_alter_tablespace* alter_info) +{ + trx_t* trx; + int error; + Tablespace tablespace; + + DBUG_ENTER("innobase_create_tablespace"); + DBUG_ASSERT(hton == innodb_hton_ptr); + + /* Be sure the input parameters are valid before continuing. */ + error = validate_create_tablespace_info(thd, alter_info); + if (error) { + DBUG_RETURN(error); + } + + /* Create the tablespace object. */ + tablespace.set_name(alter_info->tablespace_name); + + dberr_t err = tablespace.add_datafile(alter_info->data_file_name); + if (err != DB_SUCCESS) { + DBUG_RETURN(convert_error_code_to_mysql(err, 0, NULL)); + } + + /* Get the transaction associated with the current thd and make + sure it will not block this DDL. */ + trx_t* parent_trx = check_trx_exists(thd); + + /* In case MySQL calls this in the middle of a SELECT + query, release possible adaptive hash latch to avoid + deadlocks of threads */ + trx_search_latch_release_if_reserved(parent_trx); + + /* Allocate a new transaction for this DDL */ + trx = innobase_trx_allocate(thd); + ++trx->will_lock; + + trx_start_if_not_started(trx, true); + row_mysql_lock_data_dictionary(trx); + + /* In FSP_FLAGS, a zip_ssize of zero means that the tablespace + holds non-compresssed tables. A non-zero zip_ssize means that + the general tablespace can ONLY contain compressed tables. */ + ulint zip_size = static_cast(0); + // JAN: TODO: MySQL 5.7 + //ulint zip_size = static_cast(alter_info->file_block_size); + ut_ad(zip_size <= UNIV_PAGE_SIZE_MAX); + if (zip_size == 0) { + zip_size = UNIV_PAGE_SIZE; + } + bool zipped = (zip_size != UNIV_PAGE_SIZE); + page_size_t page_size(zip_size, UNIV_PAGE_SIZE, zipped); + bool atomic_blobs = page_size.is_compressed(); + + /* Create the filespace flags */ + ulint fsp_flags = fsp_flags_init( + page_size, /* page sizes and a flag if compressed */ + atomic_blobs, /* needed only for compressed tables */ + false, /* This is not a file-per-table tablespace */ + true, /* This is a general shared tablespace */ + false, /* Temporary General Tablespaces not + allowed */ + false, /* Page compression is not used. */ + 0, /* Page compression level 0 */ + ATOMIC_WRITES_DEFAULT); /* No atomic writes yet */ + + tablespace.set_flags(fsp_flags); + + err = dict_build_tablespace(&tablespace); + if (err != DB_SUCCESS) { + error = convert_error_code_to_mysql(err, 0, NULL); + trx_rollback_for_mysql(trx); + goto cleanup; + } + + innobase_commit_low(trx); + +cleanup: + row_mysql_unlock_data_dictionary(trx); + trx_free_for_mysql(trx); + + DBUG_RETURN(error); +} + +/** DROP a tablespace. +@param[in] hton Handlerton of InnoDB +@param[in] thd Connection +@param[in] alter_info How to do the command +@return MySQL error code*/ +static +int +innobase_drop_tablespace( + handlerton* hton, + THD* thd, + st_alter_tablespace* alter_info) +{ + trx_t* trx; + dberr_t err; + int error = 0; + ulint space_id; + + DBUG_ENTER("innobase_drop_tablespace"); + DBUG_ASSERT(hton == innodb_hton_ptr); + + if (srv_read_only_mode) { + DBUG_RETURN(HA_ERR_TABLE_READONLY); + //DBUG_RETURN(HA_ERR_INNODB_READ_ONLY); + } + + error = validate_tablespace_name(alter_info->tablespace_name, false); + if (error != 0) { + DBUG_RETURN(error); + } + + /* Be sure that this tablespace is known and valid. */ + space_id = fil_space_get_id_by_name(alter_info->tablespace_name); + if (space_id == ULINT_UNDEFINED) { + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + // JAN: TODO: MySQL 5.7 + // DBUG_RETURN(HA_ERR_TABLESPACE_MISSING); + } + + /* The tablespace can only be dropped if it is empty. */ + if (!dict_tablespace_is_empty(space_id)) { + DBUG_RETURN(HA_ERR_TABLE_READONLY); + // JAN: TODO: MySQL 5.7 + // DBUG_RETURN(HA_ERR_TABLESPACE_IS_NOT_EMPTY); + } + + /* Get the transaction associated with the current thd and make sure + it will not block this DDL. */ + trx_t* parent_trx = check_trx_exists(thd); + + /* In case MySQL calls this in the middle of a SELECT + query, release possible adaptive hash latch to avoid + deadlocks of threads */ + trx_search_latch_release_if_reserved(parent_trx); + + /* Allocate a new transaction for this DDL */ + trx = innobase_trx_allocate(thd); + ++trx->will_lock; + + trx_start_if_not_started(trx, true); + row_mysql_lock_data_dictionary(trx); + + /* Update SYS_TABLESPACES and SYS_DATAFILES */ + err = dict_delete_tablespace_and_datafiles(space_id, trx); + if (err != DB_SUCCESS) { + ib::error() << "Unable to delete the dictionary entries" + " for tablespace `" << alter_info->tablespace_name + << "`, Space ID " << space_id; + error = convert_error_code_to_mysql(err, 0, NULL); + trx_rollback_for_mysql(trx); + goto cleanup; + } + + /* Delete the physical files, fil_space_t & fil_node_t entries. */ + err = fil_delete_tablespace(space_id, BUF_REMOVE_FLUSH_NO_WRITE); + if (err != DB_SUCCESS) { + ib::error() << "Unable to delete the tablespace `" + << alter_info->tablespace_name + << "`, Space ID " << space_id; + error = convert_error_code_to_mysql(err, 0, NULL); + trx_rollback_for_mysql(trx); + goto cleanup; + } + + innobase_commit_low(trx); + +cleanup: + row_mysql_unlock_data_dictionary(trx); + trx_free_for_mysql(trx); + + DBUG_RETURN(error); +} + +/** This API handles CREATE, ALTER & DROP commands for InnoDB tablespaces. +@param[in] hton Handlerton of InnoDB +@param[in] thd Connection +@param[in] alter_info How to do the command +@return MySQL error code*/ +static +int +innobase_alter_tablespace( + handlerton* hton, + THD* thd, + st_alter_tablespace* alter_info) +{ + int error; /* return zero for success */ + DBUG_ENTER("innobase_alter_tablespace"); + + switch (alter_info->ts_cmd_type) { + case CREATE_TABLESPACE: + error = innobase_create_tablespace(hton, thd, alter_info); + break; + + case DROP_TABLESPACE: + error = innobase_drop_tablespace(hton, thd, alter_info); + break; + + default: + error = HA_ADMIN_NOT_IMPLEMENTED; + } + + if (error) { + /* These are the most common message params */ + const char* object_type = "TABLESPACE"; + const char* object = alter_info->tablespace_name; + + /* Modify those params as needed. */ + switch (alter_info->ts_cmd_type) { + case DROP_TABLESPACE: + ib_errf(thd, IB_LOG_LEVEL_ERROR, + ER_DROP_FILEGROUP_FAILED, + "%s %s", object_type, object); + break; + case CREATE_TABLESPACE: + ib_errf(thd, IB_LOG_LEVEL_ERROR, + ER_CREATE_FILEGROUP_FAILED, + "%s %s", object_type, object); + break; + case CREATE_LOGFILE_GROUP: + my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), + innobase_hton_name, "LOGFILE GROUP"); + break; + case ALTER_TABLESPACE: + case ALTER_ACCESS_MODE_TABLESPACE: + case DROP_LOGFILE_GROUP: + case ALTER_LOGFILE_GROUP: + case CHANGE_FILE_TABLESPACE: + case TS_CMD_NOT_DEFINED: + break; + } + } + + DBUG_RETURN(error); +} + +/** Remove all tables in the named database inside InnoDB. +@param[in] hton handlerton from InnoDB +@param[in] path Database path; Inside InnoDB the name of the last +directory in the path is used as the database name. +For example, in 'mysql/data/test' the database name is 'test'. */ static void innobase_drop_database( -/*===================*/ - handlerton* hton, /*!< in: handlerton of Innodb */ - char* path) /*!< in: database path; inside InnoDB the name - of the last directory in the path is used as - the database name: for example, in - 'mysql/data/test' the database name is 'test' */ + handlerton* hton, + char* path) { - ulint len = 0; - trx_t* trx; - char* ptr; char* namebuf; - THD* thd = current_thd; /* Get the transaction associated with the current thd, or create one if not yet created */ @@ -12578,8 +15253,10 @@ innobase_drop_database( return; } + THD* thd = current_thd; + /* In the Windows plugin, thd = current_thd is always NULL */ - if (thd) { + if (thd != NULL) { trx_t* parent_trx = check_trx_exists(thd); /* In case MySQL calls this in the middle of a SELECT @@ -12589,7 +15266,8 @@ innobase_drop_database( trx_search_latch_release_if_reserved(parent_trx); } - ptr = strend(path) - 2; + ulint len = 0; + char* ptr = strend(path) - 2; while (ptr >= path && *ptr != '\\' && *ptr != '/') { ptr--; @@ -12598,14 +15276,18 @@ innobase_drop_database( ptr++; namebuf = (char*) my_malloc((uint) len + 2, MYF(0)); + // JAN: TODO: MySQL 5.7 + //namebuf = (char*) my_malloc(PSI_INSTRUMENT_ME, (uint) len + 2, MYF(0)); memcpy(namebuf, ptr, len); namebuf[len] = '/'; namebuf[len + 1] = '\0'; -#ifdef __WIN__ + +#ifdef _WIN32 innobase_casedn_str(namebuf); -#endif - trx = innobase_trx_allocate(thd); +#endif /* _WIN32 */ + + trx_t* trx = innobase_trx_allocate(thd); /* Either the transaction is already flagged as a locking transaction or it hasn't been started yet. */ @@ -12615,7 +15297,9 @@ innobase_drop_database( /* We are doing a DDL operation. */ ++trx->will_lock; - row_drop_database_for_mysql(namebuf, trx); + ulint dummy; + + row_drop_database_for_mysql(namebuf, trx, &dummy); my_free(namebuf); @@ -12626,6 +15310,7 @@ innobase_drop_database( log_buffer_flush_to_disk(); innobase_commit_low(trx); + trx_free_for_mysql(trx); } @@ -12654,7 +15339,9 @@ innobase_rename_table( DEBUG_SYNC_C("innodb_rename_table_ready"); - trx_start_if_not_started(trx); + TrxInInnoDB trx_in_innodb(trx); + + trx_start_if_not_started(trx, true); /* Serialize data dictionary operations with dictionary mutex: no deadlocks can occur then in these operations. */ @@ -12666,22 +15353,41 @@ innobase_rename_table( ut_a(trx->will_lock > 0); - error = row_rename_table_for_mysql( - norm_from, norm_to, trx, TRUE); + error = row_rename_table_for_mysql(norm_from, norm_to, trx, TRUE); + if (error == DB_TABLE_NOT_FOUND) { + /* May be partitioned table, which consists of partitions + named table_name#P#partition_name[#SP#subpartition_name]. + + We are doing a DDL operation. */ + ++trx->will_lock; + trx_set_dict_operation(trx, TRX_DICT_OP_INDEX); + trx_start_if_not_started(trx, true); + error = row_rename_partitions_for_mysql(norm_from, norm_to, + trx); + if (error == DB_TABLE_NOT_FOUND) { + ib::error() << "Table " << ut_get_name(trx, norm_from) + << " does not exist in the InnoDB internal" + " data dictionary though MySQL is trying to" + " rename the table. Have you copied the .frm" + " file of the table to the MySQL database" + " directory from another database? " + << TROUBLESHOOTING_MSG; + } + } if (error != DB_SUCCESS) { if (error == DB_TABLE_NOT_FOUND && innobase_get_lower_case_table_names() == 1) { char* is_part = NULL; -#ifdef __WIN__ +#ifdef _WIN32 is_part = strstr(norm_from, "#p#"); #else is_part = strstr(norm_from, "#P#"); -#endif /* __WIN__ */ +#endif /* _WIN32 */ if (is_part) { char par_case_name[FN_REFLEN]; -#ifndef __WIN__ +#ifndef _WIN32 /* Check for the table using lower case name, including the partition separator "P" */ @@ -12692,32 +15398,32 @@ innobase_rename_table( whether there exists table name in system table whose name is not being normalized to lower case */ - normalize_table_name_low( + create_table_info_t::normalize_table_name_low( par_case_name, from, FALSE); -#endif - trx_start_if_not_started(trx); +#endif /* _WIN32 */ + trx_start_if_not_started(trx, true); error = row_rename_table_for_mysql( par_case_name, norm_to, trx, TRUE); } } if (error == DB_SUCCESS) { -#ifndef __WIN__ - sql_print_warning("Rename partition table %s " - "succeeds after converting to lower " - "case. The table may have " - "been moved from a case " - "in-sensitive file system.\n", +#ifndef _WIN32 + sql_print_warning("Rename partition table %s" + " succeeds after converting to lower" + " case. The table may have" + " been moved from a case" + " in-sensitive file system.\n", norm_from); #else - sql_print_warning("Rename partition table %s " - "succeeds after skipping the step to " - "lower case the table name. " - "The table may have been " - "moved from a case sensitive " - "file system.\n", + sql_print_warning("Rename partition table %s" + " succeeds after skipping the step to" + " lower case the table name." + " The table may have been" + " moved from a case sensitive" + " file system.\n", norm_from); -#endif /* __WIN__ */ +#endif /* _WIN32 */ } } @@ -12734,18 +15440,15 @@ innobase_rename_table( /*********************************************************************//** Renames an InnoDB table. -@return 0 or error code */ -UNIV_INTERN +@return 0 or error code */ + int ha_innobase::rename_table( /*======================*/ const char* from, /*!< in: old name of the table */ const char* to) /*!< in: new name of the table */ { - trx_t* trx; - dberr_t error; - trx_t* parent_trx; - THD* thd = ha_thd(); + THD* thd = ha_thd(); DBUG_ENTER("ha_innobase::rename_table"); @@ -12757,24 +15460,22 @@ ha_innobase::rename_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(thd); + trx_t* parent_trx = check_trx_exists(thd); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - trx_search_latch_release_if_reserved(parent_trx); + TrxInInnoDB trx_in_innodb(parent_trx); - trx = innobase_trx_allocate(thd); + trx_t* trx = innobase_trx_allocate(thd); /* We are doing a DDL operation. */ ++trx->will_lock; trx_set_dict_operation(trx, TRX_DICT_OP_INDEX); - error = innobase_rename_table(trx, from, to); + dberr_t error = innobase_rename_table(trx, from, to); DEBUG_SYNC(thd, "after_innobase_rename_table"); innobase_commit_low(trx); + trx_free_for_mysql(trx); if (error == DB_SUCCESS) { @@ -12790,8 +15491,7 @@ ha_innobase::rename_table( errstr, sizeof(errstr)); if (ret != DB_SUCCESS) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: %s\n", errstr); + ib::error() << errstr; push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_LOCK_WAIT_TIMEOUT, errstr); @@ -12813,16 +15513,132 @@ ha_innobase::rename_table( if (error == DB_DUPLICATE_KEY) { my_error(ER_TABLE_EXISTS_ERROR, MYF(0), to); - error = DB_ERROR; + error = DB_ERROR; + } + + DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL)); +} + +/*********************************************************************//** +Returns the exact number of records that this client can see using this +handler object. +@return Error code in case something goes wrong. +These errors will abort the current query: + case HA_ERR_LOCK_DEADLOCK: + case HA_ERR_LOCK_TABLE_FULL: + case HA_ERR_LOCK_WAIT_TIMEOUT: + case HA_ERR_QUERY_INTERRUPTED: +For other error codes, the server will fall back to counting records. */ + +#ifdef MYSQL_57_SELECT_COUNT_OPTIMIZATION +int +ha_innobase::records(ha_rows* num_rows) +/*===================================*/ +{ + DBUG_ENTER("ha_innobase::records()"); + + dberr_t ret; + ulint n_rows = 0; /* Record count in this view */ + + update_thd(); + + if (dict_table_is_discarded(m_prebuilt->table)) { + ib_senderrf( + m_user_thd, + IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + table->s->table_name.str); + + *num_rows = HA_POS_ERROR; + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + + } else if (m_prebuilt->table->ibd_file_missing) { + // JAN: TODO: MySQL 5.7 + ib_senderrf( + m_user_thd, IB_LOG_LEVEL_ERROR, + ER_TABLESPACE_DISCARDED, + //ER_TABLESPACE_MISSING, + table->s->table_name.str); + + *num_rows = HA_POS_ERROR; + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + //DBUG_RETURN(HA_ERR_TABLESPACE_MISSING); + + } else if (m_prebuilt->table->corrupted) { + ib_errf(m_user_thd, IB_LOG_LEVEL_WARN, + ER_INNODB_INDEX_CORRUPT, + "Table '%s' is corrupt.", + table->s->table_name.str); + + *num_rows = HA_POS_ERROR; + DBUG_RETURN(HA_ERR_INDEX_CORRUPT); + } + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + m_prebuilt->trx->op_info = "counting records"; + + dict_index_t* index = dict_table_get_first_index(m_prebuilt->table); + + ut_ad(dict_index_is_clust(index)); + + m_prebuilt->index_usable = row_merge_is_index_usable( + m_prebuilt->trx, index); + + if (!m_prebuilt->index_usable) { + *num_rows = HA_POS_ERROR; + DBUG_RETURN(HA_ERR_TABLE_DEF_CHANGED); + } + + /* (Re)Build the m_prebuilt->mysql_template if it is null to use + the clustered index and just the key, no off-record data. */ + m_prebuilt->index = index; + dtuple_set_n_fields(m_prebuilt->search_tuple, 0); + m_prebuilt->read_just_key = 1; + build_template(false); + + /* Count the records in the clustered index */ + ret = row_scan_index_for_mysql(m_prebuilt, index, false, &n_rows); + reset_template(); + switch (ret) { + case DB_SUCCESS: + break; + case DB_DEADLOCK: + case DB_LOCK_TABLE_FULL: + case DB_LOCK_WAIT_TIMEOUT: + *num_rows = HA_POS_ERROR; + DBUG_RETURN(convert_error_code_to_mysql(ret, 0, m_user_thd)); + break; + case DB_INTERRUPTED: + *num_rows = HA_POS_ERROR; + DBUG_RETURN(ER_QUERY_INTERRUPTED); + // JAN: TODO: MySQL 5.7 + //DBUG_RETURN(HA_ERR_QUERY_INTERRUPTED); + break; + default: + /* No other error besides the three below is returned from + row_scan_index_for_mysql(). Make a debug catch. */ + *num_rows = HA_POS_ERROR; + ut_ad(0); + } + + m_prebuilt->trx->op_info = ""; + + if (thd_killed(m_user_thd)) { + *num_rows = HA_POS_ERROR; + DBUG_RETURN(ER_QUERY_INTERRUPTED); + // JAN: TODO: MySQL 5.7 + // DBUG_RETURN(HA_ERR_QUERY_INTERRUPTED); } - DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL)); + DBUG_RETURN(0); } +#endif /* MYSQL_57_SELECT_COUNT_OPTIMIZATION */ /*********************************************************************//** Estimates the number of index records in a range. -@return estimated number of rows */ -UNIV_INTERN +@return estimated number of rows */ + ha_rows ha_innobase::records_in_range( /*==========================*/ @@ -12836,21 +15652,18 @@ ha_innobase::records_in_range( dict_index_t* index; dtuple_t* range_start; dtuple_t* range_end; - ib_int64_t n_rows; - ulint mode1; - ulint mode2; + int64_t n_rows; + page_cur_mode_t mode1; + page_cur_mode_t mode2; mem_heap_t* heap; DBUG_ENTER("records_in_range"); - ut_a(prebuilt->trx == thd_to_trx(ha_thd())); + ut_a(m_prebuilt->trx == thd_to_trx(ha_thd())); - prebuilt->trx->op_info = (char*)"estimating records in index range"; - - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ + m_prebuilt->trx->op_info = "estimating records in index range"; - trx_search_latch_release_if_reserved(prebuilt->trx); + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); active_index = keynr; @@ -12861,11 +15674,11 @@ ha_innobase::records_in_range( /* There exists possibility of not being able to find requested index due to inconsistency between MySQL and InoDB dictionary info. Necessary message should have been printed in innobase_get_index() */ - if (dict_table_is_discarded(prebuilt->table)) { + if (dict_table_is_discarded(m_prebuilt->table)) { n_rows = HA_POS_ERROR; goto func_exit; } - if (UNIV_UNLIKELY(!index)) { + if (!index) { n_rows = HA_POS_ERROR; goto func_exit; } @@ -12873,7 +15686,7 @@ ha_innobase::records_in_range( n_rows = HA_ERR_INDEX_CORRUPT; goto func_exit; } - if (UNIV_UNLIKELY(!row_merge_is_index_usable(prebuilt->trx, index))) { + if (!row_merge_is_index_usable(m_prebuilt->trx, index)) { n_rows = HA_ERR_TABLE_DEF_CHANGED; goto func_exit; } @@ -12888,41 +15701,47 @@ ha_innobase::records_in_range( dict_index_copy_types(range_end, index, key->ext_key_parts); row_sel_convert_mysql_key_to_innobase( - range_start, - prebuilt->srch_key_val1, - prebuilt->srch_key_val_len, - index, - (byte*) (min_key ? min_key->key : - (const uchar*) 0), - (ulint) (min_key ? min_key->length : 0), - prebuilt->trx); + range_start, + m_prebuilt->srch_key_val1, + m_prebuilt->srch_key_val_len, + index, + (byte*) (min_key ? min_key->key : (const uchar*) 0), + (ulint) (min_key ? min_key->length : 0), + m_prebuilt->trx); + DBUG_ASSERT(min_key ? range_start->n_fields > 0 : range_start->n_fields == 0); row_sel_convert_mysql_key_to_innobase( - range_end, - prebuilt->srch_key_val2, - prebuilt->srch_key_val_len, - index, - (byte*) (max_key ? max_key->key : - (const uchar*) 0), - (ulint) (max_key ? max_key->length : 0), - prebuilt->trx); + range_end, + m_prebuilt->srch_key_val2, + m_prebuilt->srch_key_val_len, + index, + (byte*) (max_key ? max_key->key : (const uchar*) 0), + (ulint) (max_key ? max_key->length : 0), + m_prebuilt->trx); + DBUG_ASSERT(max_key ? range_end->n_fields > 0 : range_end->n_fields == 0); - mode1 = convert_search_mode_to_innobase(min_key ? min_key->flag : - HA_READ_KEY_EXACT); - mode2 = convert_search_mode_to_innobase(max_key ? max_key->flag : - HA_READ_KEY_EXACT); + mode1 = convert_search_mode_to_innobase( + min_key ? min_key->flag : HA_READ_KEY_EXACT); + + mode2 = convert_search_mode_to_innobase( + max_key ? max_key->flag : HA_READ_KEY_EXACT); if (mode1 != PAGE_CUR_UNSUPP && mode2 != PAGE_CUR_UNSUPP) { - n_rows = btr_estimate_n_rows_in_range(index, range_start, - mode1, range_end, - mode2, prebuilt->trx); + if (dict_index_is_spatial(index)) { + /*Only min_key used in spatial index. */ + n_rows = rtr_estimate_n_rows_in_range( + index, range_start, mode1); + } else { + n_rows = btr_estimate_n_rows_in_range( + index, range_start, mode1, range_end, mode2); + } } else { n_rows = HA_POS_ERROR; @@ -12930,9 +15749,17 @@ ha_innobase::records_in_range( mem_heap_free(heap); + DBUG_EXECUTE_IF( + "print_btr_estimate_n_rows_in_range_return_value", + push_warning_printf( + ha_thd(), Sql_condition::WARN_LEVEL_WARN, + ER_NO_DEFAULT, + "btr_estimate_n_rows_in_range(): %f", n_rows); + ); + func_exit: - prebuilt->trx->op_info = (char*)""; + m_prebuilt->trx->op_info = (char*)""; /* The MySQL optimizer seems to believe an estimate of 0 rows is always accurate and may return the result 'Empty set' based on that. @@ -12950,8 +15777,8 @@ func_exit: /*********************************************************************//** Gives an UPPER BOUND to the number of rows in a table. This is used in filesort.cc. -@return upper bound of rows */ -UNIV_INTERN +@return upper bound of rows */ + ha_rows ha_innobase::estimate_rows_upper_bound() /*====================================*/ @@ -12959,7 +15786,6 @@ ha_innobase::estimate_rows_upper_bound() const dict_index_t* index; ulonglong estimate; ulonglong local_data_file_length; - ulint stat_n_leaf_pages; DBUG_ENTER("estimate_rows_upper_bound"); @@ -12969,16 +15795,13 @@ ha_innobase::estimate_rows_upper_bound() update_thd(ha_thd()); - prebuilt->trx->op_info = "calculating upper bound for table rows"; - - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - trx_search_latch_release_if_reserved(prebuilt->trx); + m_prebuilt->trx->op_info = "calculating upper bound for table rows"; - index = dict_table_get_first_index(prebuilt->table); + index = dict_table_get_first_index(m_prebuilt->table); - stat_n_leaf_pages = index->stat_n_leaf_pages; + ulint stat_n_leaf_pages = index->stat_n_leaf_pages; ut_a(stat_n_leaf_pages > 0); @@ -12993,7 +15816,7 @@ ha_innobase::estimate_rows_upper_bound() estimate = 2 * local_data_file_length / dict_index_calc_min_rec_len(index); - prebuilt->trx->op_info = ""; + m_prebuilt->trx->op_info = ""; /* Set num_rows less than MERGEBUFF to simulate the case where we do not have enough space to merge the externally sorted file blocks. */ @@ -13009,8 +15832,8 @@ ha_innobase::estimate_rows_upper_bound() How many seeks it will take to read through the table. This is to be comparable to the number returned by records_in_range so that we can decide if we should scan the table or use keys. -@return estimated time measured in disk seeks */ -UNIV_INTERN +@return estimated time measured in disk seeks */ + double ha_innobase::scan_time() /*====================*/ @@ -13024,26 +15847,23 @@ ha_innobase::scan_time() it we could end up returning uninitialized value to the caller, which in the worst case could make some query plan go bogus or issue a Valgrind warning. */ -#if 0 - /* avoid potential lock order violation with dict_table_stats_lock() - below */ - update_thd(ha_thd()); - trx_search_latch_release_if_reserved(prebuilt->trx); -#endif - - ulint stat_clustered_index_size; -#if 0 - dict_table_stats_lock(prebuilt->table, RW_S_LATCH); -#endif + if (m_prebuilt == NULL) { + /* In case of derived table, Optimizer will try to fetch stat + for table even before table is create or open. In such + cases return default value of 1. + TODO: This will be further improved to return some approximate + estimate but that would also needs pre-population of stats + structure. As of now approach is in sync with MyISAM. */ + return(ulonglong2double(stats.data_file_length) / IO_SIZE + 2); + } - ut_a(prebuilt->table->stat_initialized); + ulint stat_clustered_index_size; - stat_clustered_index_size = prebuilt->table->stat_clustered_index_size; + ut_a(m_prebuilt->table->stat_initialized); -#if 0 - dict_table_stats_unlock(prebuilt->table, RW_S_LATCH); -#endif + stat_clustered_index_size = + m_prebuilt->table->stat_clustered_index_size; return((double) stat_clustered_index_size); } @@ -13051,8 +15871,8 @@ ha_innobase::scan_time() /******************************************************************//** Calculate the time it takes to read a set of ranges through an index This enables us to optimise reads for clustered indexes. -@return estimated time measured in disk seeks */ -UNIV_INTERN +@return estimated time measured in disk seeks */ + double ha_innobase::read_time( /*===================*/ @@ -13061,7 +15881,6 @@ ha_innobase::read_time( ha_rows rows) /*!< in: estimated number of rows in the ranges */ { ha_rows total_rows; - double time_for_scan; if (index != table->s->primary_key) { /* Not clustered */ @@ -13071,7 +15890,7 @@ ha_innobase::read_time( /* Assume that the read time is proportional to the scan time for all rows + at most one seek per range. */ - time_for_scan = scan_time(); + double time_for_scan = scan_time(); if ((total_rows = estimate_rows_upper_bound()) < rows) { @@ -13083,7 +15902,7 @@ ha_innobase::read_time( /******************************************************************//** Return the size of the InnoDB memory buffer. */ -UNIV_INTERN + longlong ha_innobase::get_memory_buffer_size() const /*=======================================*/ @@ -13091,6 +15910,15 @@ ha_innobase::get_memory_buffer_size() const return(innobase_buffer_pool_size); } +/** Update the system variable with the given value of the InnoDB +buffer pool size. +@param[in] buf_pool_size given value of buffer pool size.*/ +void +innodb_set_buf_pool_size(ulonglong buf_pool_size) +{ + innobase_buffer_pool_size = buf_pool_size; +} + /*********************************************************************//** Calculates the key number used inside MySQL for an Innobase index. We will first check the "index translation table" for a match of the index to get @@ -13108,15 +15936,13 @@ innobase_get_mysql_key_number_for_index( translation table. */ const TABLE* table, /*!< in: table in MySQL data dictionary */ - dict_table_t* ib_table,/*!< in: table in Innodb data + dict_table_t* ib_table,/*!< in: table in InnoDB data dictionary */ const dict_index_t* index) /*!< in: index */ { const dict_index_t* ind; unsigned int i; - ut_a(index); - /* If index does not belong to the table object of share structure (ib_table comes from the share structure) search the index->table object instead */ @@ -13139,7 +15965,7 @@ innobase_get_mysql_key_number_for_index( /* If index translation table exists, we will first check the index through index translation table for a match. */ - if (share->idx_trans_tbl.index_mapping) { + if (share->idx_trans_tbl.index_mapping != NULL) { for (i = 0; i < share->idx_trans_tbl.index_count; i++) { if (share->idx_trans_tbl.index_mapping[i] == index) { return(i); @@ -13148,9 +15974,9 @@ innobase_get_mysql_key_number_for_index( /* Print an error message if we cannot find the index in the "index translation table". */ - if (*index->name != TEMP_INDEX_PREFIX) { - sql_print_error("Cannot find index %s in InnoDB index " - "translation table.", index->name); + if (index->is_committed()) { + sql_print_error("Cannot find index %s in InnoDB index" + " translation table.", index->name()); } } @@ -13175,12 +16001,13 @@ innobase_get_mysql_key_number_for_index( /* Temp index is internal to InnoDB, that is not present in the MySQL index list, so no need to print such mismatch warning. */ - if (*(index->name) != TEMP_INDEX_PREFIX) { + if (index->is_committed()) { sql_print_warning( - "Find index %s in InnoDB index list " - "but not its MySQL index number " - "It could be an InnoDB internal index.", - index->name); + "Found index %s in InnoDB index list" + " but not its MySQL index number." + " It could be an InnoDB internal" + " index.", + index->name()); } return(-1); } @@ -13195,8 +16022,7 @@ innobase_get_mysql_key_number_for_index( Calculate Record Per Key value. Need to exclude the NULL value if innodb_stats_method is set to "nulls_ignored" @return estimated record per key value */ -static -ha_rows +rec_per_key_t innodb_rec_per_key( /*===============*/ dict_index_t* index, /*!< in: dict_index_t structure */ @@ -13204,18 +16030,25 @@ innodb_rec_per_key( calculating rec per key */ ha_rows records) /*!< in: estimated total records */ { - ha_rows rec_per_key; + rec_per_key_t rec_per_key; ib_uint64_t n_diff; ut_a(index->table->stat_initialized); ut_ad(i < dict_index_get_n_unique(index)); + ut_ad(!dict_index_is_spatial(index)); + + if (records == 0) { + /* "Records per key" is meaningless for empty tables. + Return 1.0 because that is most convenient to the Optimizer. */ + return(1.0); + } n_diff = index->stat_n_diff_key_vals[i]; if (n_diff == 0) { - rec_per_key = records; + rec_per_key = static_cast(records); } else if (srv_innodb_stats_method == SRV_STATS_NULLS_IGNORED) { ib_uint64_t n_null; ib_uint64_t n_non_null; @@ -13238,16 +16071,23 @@ innodb_rec_per_key( consider that the table consists mostly of NULL value. Set rec_per_key to 1. */ if (n_diff <= n_null) { - rec_per_key = 1; + rec_per_key = 1.0; } else { /* Need to exclude rows with NULL values from rec_per_key calculation */ - rec_per_key = (ha_rows) - ((records - n_null) / (n_diff - n_null)); + rec_per_key + = static_cast(records - n_null) + / (n_diff - n_null); } } else { DEBUG_SYNC_C("after_checking_for_0"); - rec_per_key = (ha_rows) (records / n_diff); + rec_per_key = static_cast(records) / n_diff; + } + + if (rec_per_key < 1.0) { + /* Values below 1.0 are meaningless and must be due to the + stats being imprecise. */ + rec_per_key = 1.0; } return(rec_per_key); @@ -13257,7 +16097,7 @@ innodb_rec_per_key( Returns statistics information of the table to the MySQL interpreter, in various fields of the handle object. @return HA_ERR_* error code or 0 */ -UNIV_INTERN + int ha_innobase::info_low( /*==================*/ @@ -13265,12 +16105,14 @@ ha_innobase::info_low( bool is_analyze) { dict_table_t* ib_table; - ha_rows rec_per_key; ib_uint64_t n_rows; + char path[FN_REFLEN]; os_file_stat_t stat_info; DBUG_ENTER("info"); + DEBUG_SYNC_C("ha_innobase_info_low"); + /* If we are forcing recovery at a high level, we will suppress statistics calculation on tables, because that may crash the server if an index is badly corrupted. */ @@ -13284,11 +16126,11 @@ ha_innobase::info_low( /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ - prebuilt->trx->op_info = (char*)"returning various info to MySQL"; + m_prebuilt->trx->op_info = (char*)"returning various info to MySQL"; - trx_search_latch_release_if_reserved(prebuilt->trx); + trx_search_latch_release_if_reserved(m_prebuilt->trx); - ib_table = prebuilt->table; + ib_table = m_prebuilt->table; DBUG_ASSERT(ib_table->n_ref_count > 0); if (flag & HA_STATUS_TIME) { @@ -13297,7 +16139,7 @@ ha_innobase::info_low( dict_stats_upd_option_t opt; dberr_t ret; - prebuilt->trx->op_info = "updating table statistics"; + m_prebuilt->trx->op_info = "updating table statistics"; if (dict_stats_is_persistent_enabled(ib_table)) { @@ -13316,19 +16158,20 @@ ha_innobase::info_low( ret = dict_stats_update(ib_table, opt); if (ret != DB_SUCCESS) { - prebuilt->trx->op_info = ""; + m_prebuilt->trx->op_info = ""; DBUG_RETURN(HA_ERR_GENERIC); } - prebuilt->trx->op_info = + m_prebuilt->trx->op_info = "returning various info to MySQL"; } + + stats.update_time = (ulong) ib_table->update_time; } if (flag & HA_STATUS_VARIABLE) { - ulint page_size; ulint stat_clustered_index_size; ulint stat_sum_of_other_index_sizes; @@ -13368,32 +16211,30 @@ ha_innobase::info_low( n_rows can not be 0 unless the table is empty, set to 1 instead. The original problem of bug#29507 is actually fixed in the server code. */ - if (thd_sql_command(user_thd) == SQLCOM_TRUNCATE) { + if (thd_sql_command(m_user_thd) == SQLCOM_TRUNCATE) { n_rows = 1; - /* We need to reset the prebuilt value too, otherwise + /* We need to reset the m_prebuilt value too, otherwise checks for values greater than the last value written to the table will fail and the autoinc counter will not be updated. This will force write_row() into attempting an update of the table's AUTOINC counter. */ - prebuilt->autoinc_last_value = 0; + m_prebuilt->autoinc_last_value = 0; } - page_size = dict_table_zip_size(ib_table); - if (page_size == 0) { - page_size = UNIV_PAGE_SIZE; - } + const page_size_t& page_size + = dict_table_page_size(ib_table); stats.records = (ha_rows) n_rows; stats.deleted = 0; stats.data_file_length = ((ulonglong) stat_clustered_index_size) - * page_size; + * page_size.physical(); stats.index_file_length = ((ulonglong) stat_sum_of_other_index_sizes) - * page_size; + * page_size.physical(); /* Since fsp_get_available_space_in_free_extents() is acquiring latches inside InnoDB, we do not call it if we @@ -13408,33 +16249,36 @@ ha_innobase::info_low( the ha_statistics' constructor. Also we only need delete_length to be set when HA_STATUS_VARIABLE_EXTRA is set */ - } else if (UNIV_UNLIKELY - (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE)) { + } else if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { /* Avoid accessing the tablespace if innodb_crash_recovery is set to a high value. */ stats.delete_length = 0; } else { - ullint avail_space; + uintmax_t avail_space; avail_space = fsp_get_available_space_in_free_extents( ib_table->space); - if (avail_space == ULLINT_UNDEFINED) { + if (avail_space == UINTMAX_MAX) { THD* thd; + char errbuf[MYSYS_STRERROR_SIZE]; thd = ha_thd(); - + my_strerror(errbuf, sizeof(errbuf), + errno); push_warning_printf( thd, Sql_condition::WARN_LEVEL_WARN, ER_CANT_GET_STAT, - "InnoDB: Trying to get the free " - "space for table %s but its " - "tablespace has been discarded or " - "the .ibd file is missing. Setting " - "the free space to zero. " - "(errno: %M)", - ib_table->name, errno); + "InnoDB: Trying to get the free" + " space for table %s but its" + " tablespace has been discarded or" + " the .ibd file is missing. Setting" + " the free space to zero." + " (errno: %d - %s)", + ib_table->name.m_name, errno, + errbuf); + stats.delete_length = 0; } else { @@ -13455,12 +16299,11 @@ ha_innobase::info_low( if (flag & HA_STATUS_CONST) { ulong i; - char path[FN_REFLEN]; /* Verify the number of index in InnoDB and MySQL - matches up. If prebuilt->clust_index_was_generated + matches up. If m_prebuilt->clust_index_was_generated holds, InnoDB defines GEN_CLUST_INDEX internally */ ulint num_innodb_index = UT_LIST_GET_LEN(ib_table->indexes) - - prebuilt->clust_index_was_generated; + - m_prebuilt->clust_index_was_generated; if (table->s->keys < num_innodb_index) { /* If there are too many indexes defined inside InnoDB, ignore those that are being @@ -13480,7 +16323,7 @@ ha_innobase::info_low( time frame, dict_index_is_online_ddl() would not hold and the index would still not be included in TABLE_SHARE. */ - if (*index->name == TEMP_INDEX_PREFIX) { + if (!index->is_committed()) { num_innodb_index--; } } @@ -13495,7 +16338,7 @@ ha_innobase::info_low( if (table->s->keys != num_innodb_index) { ib_table->dict_frm_mismatch = DICT_FRM_INCONSISTENT_KEYS; - ib_push_frm_error(user_thd, ib_table, table, num_innodb_index, true); + ib_push_frm_error(m_user_thd, ib_table, table, num_innodb_index, true); } if (!(flag & HA_STATUS_NO_LOCK)) { @@ -13506,7 +16349,7 @@ ha_innobase::info_low( for (i = 0; i < table->s->keys; i++) { ulong j; - rec_per_key = 1; + /* We could get index quickly through internal index mapping with the index translation table. The identity of index (match up index name with @@ -13516,84 +16359,94 @@ ha_innobase::info_low( if (index == NULL) { ib_table->dict_frm_mismatch = DICT_FRM_INCONSISTENT_KEYS; - ib_push_frm_error(user_thd, ib_table, table, num_innodb_index, true); + ib_push_frm_error(m_user_thd, ib_table, table, num_innodb_index, true); break; } + KEY* key = &table->key_info[i]; + + /* Check if this index supports index statistics. */ + // JAN: TODO: MySQL 5.7 index statistics + // if (!key->supports_records_per_key()) { + // continue; + // } + // for (j = 0; j < key->actual_key_parts; j++) { + for (j = 0; j < table->key_info[i].ext_key_parts; j++) { - if (table->key_info[i].flags & HA_FULLTEXT) { - /* The whole concept has no validity - for FTS indexes. */ - table->key_info[i].rec_per_key[j] = 1; + if ((key->flags & HA_FULLTEXT) + || (key->flags & HA_SPATIAL)) { + + /* The record per key does not apply to + FTS or Spatial indexes. */ + /* + key->rec_per_key[j] = 1; + key->set_records_per_key(j, 1.0); + */ continue; } if (j + 1 > index->n_uniq) { sql_print_error( "Index %s of %s has %lu columns" - " unique inside InnoDB, but " - "MySQL is asking statistics for" - " %lu columns. Have you mixed " - "up .frm files from different " - "installations? " - "See " REFMAN - "innodb-troubleshooting.html\n", - index->name, - ib_table->name, + " unique inside InnoDB, but" + " MySQL is asking statistics for" + " %lu columns. Have you mixed" + " up .frm files from different" + " installations? %s", + index->name(), + ib_table->name.m_name, (unsigned long) - index->n_uniq, j + 1); + index->n_uniq, j + 1, + TROUBLESHOOTING_MSG); break; } - DBUG_EXECUTE_IF("ib_ha_innodb_stat_not_initialized", - index->table->stat_initialized = FALSE;); - - if (!ib_table->stat_initialized || - (index->table != ib_table || - !index->table->stat_initialized)) { - fprintf(stderr, - "InnoDB: Warning: Index %s points to table %s" - " and ib_table %s statistics is initialized %d " - " but index table %s initialized %d " - " mysql table is %s. Have you mixed " - "up .frm files from different " - "installations? " - "See " REFMAN - "innodb-troubleshooting.html\n", - index->name, - index->table->name, - ib_table->name, - ib_table->stat_initialized, - index->table->name, - index->table->stat_initialized, - table->s->table_name.str - ); - - /* This is better than - assert on below function */ - dict_stats_init(index->table); - } - - rec_per_key = innodb_rec_per_key( - index, j, stats.records); + /* innodb_rec_per_key() will use + index->stat_n_diff_key_vals[] and the value we + pass index->table->stat_n_rows. Both are + calculated by ANALYZE and by the background + stats gathering thread (which kicks in when too + much of the table has been changed). In + addition table->stat_n_rows is adjusted with + each DML (e.g. ++ on row insert). Those + adjustments are not MVCC'ed and not even + reversed on rollback. So, + index->stat_n_diff_key_vals[] and + index->table->stat_n_rows could have been + calculated at different time. This is + acceptable. */ + + const rec_per_key_t rec_per_key + = innodb_rec_per_key( + index, j, + index->table->stat_n_rows); + + // JAN: TODO: MySQL 5.7 New interface + // key->set_records_per_key(j, rec_per_key); + + /* The code below is legacy and should be + removed together with this comment once we + are sure the new floating point rec_per_key, + set via set_records_per_key(), works fine. */ + + ulong rec_per_key_int = static_cast( + innodb_rec_per_key(index, j, + stats.records)); /* Since MySQL seems to favor table scans too much over index searches, we pretend index selectivity is 2 times better than our estimate: */ - rec_per_key = rec_per_key / 2; + rec_per_key_int = rec_per_key_int / 2; - if (rec_per_key == 0) { - rec_per_key = 1; + if (rec_per_key_int == 0) { + rec_per_key_int = 1; } - table->key_info[i].rec_per_key[j] = - rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 : - (ulong) rec_per_key; + key->rec_per_key[j] = rec_per_key_int; } - } if (!(flag & HA_STATUS_NO_LOCK)) { @@ -13601,8 +16454,7 @@ ha_innobase::info_low( } my_snprintf(path, sizeof(path), "%s/%s%s", - mysql_data_home, - table->s->normalized_path.str, + mysql_data_home, table->s->normalized_path.str, reg_ext); unpack_filename(path,path); @@ -13610,7 +16462,10 @@ ha_innobase::info_low( /* Note that we do not know the access time of the table, nor the CHECK TABLE time, nor the UPDATE or INSERT time. */ - if (os_file_get_status(path, &stat_info, false) == DB_SUCCESS) { + if (os_file_get_status( + path, &stat_info, false, + (dict_table_is_intrinsic(ib_table) + ? false : srv_read_only_mode)) == DB_SUCCESS) { stats.create_time = (ulong) stat_info.ctime; } } @@ -13618,34 +16473,44 @@ ha_innobase::info_low( if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { goto func_exit; - } - if (flag & HA_STATUS_ERRKEY) { + } else if (flag & HA_STATUS_ERRKEY) { const dict_index_t* err_index; - ut_a(prebuilt->trx); - ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(m_prebuilt->trx); + ut_a(m_prebuilt->trx->magic_n == TRX_MAGIC_N); - err_index = trx_get_error_info(prebuilt->trx); + err_index = trx_get_error_info(m_prebuilt->trx); if (err_index) { errkey = innobase_get_mysql_key_number_for_index( - share, table, ib_table, err_index); + m_share, table, ib_table, err_index); } else { errkey = (unsigned int) ( - (prebuilt->trx->error_key_num + (m_prebuilt->trx->error_key_num == ULINT_UNDEFINED) ? ~0 - : prebuilt->trx->error_key_num); + : m_prebuilt->trx->error_key_num); } } if ((flag & HA_STATUS_AUTO) && table->found_next_number_field) { - stats.auto_increment_value = innobase_peek_autoinc(); + + ulonglong auto_inc_val = innobase_peek_autoinc(); + /* Initialize autoinc value if not set. */ + if (auto_inc_val == 0) { + + dict_table_autoinc_lock(m_prebuilt->table); + innobase_initialize_autoinc(); + dict_table_autoinc_unlock(m_prebuilt->table); + + auto_inc_val = innobase_peek_autoinc(); + } + stats.auto_increment_value = auto_inc_val; } func_exit: - prebuilt->trx->op_info = (char*)""; + m_prebuilt->trx->op_info = (char*)""; DBUG_RETURN(0); } @@ -13654,31 +16519,95 @@ func_exit: Returns statistics information of the table to the MySQL interpreter, in various fields of the handle object. @return HA_ERR_* error code or 0 */ -UNIV_INTERN + int ha_innobase::info( /*==============*/ uint flag) /*!< in: what information is requested */ { - return(this->info_low(flag, false /* not ANALYZE */)); + return(info_low(flag, false /* not ANALYZE */)); } -/**********************************************************************//** +/** Enable indexes. +@param[in] mode enable index mode. +@return HA_ERR_* error code or 0 */ +int +ha_innobase::enable_indexes( + uint mode) +{ + int error = HA_ERR_WRONG_COMMAND; + + /* Enable index only for intrinsic table. Behavior for all other + table continue to remain same. */ + + if (dict_table_is_intrinsic(m_prebuilt->table)) { + ut_ad(mode == HA_KEY_SWITCH_ALL); + for (dict_index_t* index + = UT_LIST_GET_FIRST(m_prebuilt->table->indexes); + index != NULL; + index = UT_LIST_GET_NEXT(indexes, index)) { + + /* InnoDB being clustered index we can't disable/enable + clustered index itself. */ + if (dict_index_is_clust(index)) { + continue; + } + + index->allow_duplicates = false; + } + error = 0; + } + + return(error); +} + +/** Disable indexes. +@param[in] mode disable index mode. +@return HA_ERR_* error code or 0 */ +int +ha_innobase::disable_indexes( + uint mode) +{ + int error = HA_ERR_WRONG_COMMAND; + + /* Disable index only for intrinsic table. Behavior for all other + table continue to remain same. */ + + if (dict_table_is_intrinsic(m_prebuilt->table)) { + ut_ad(mode == HA_KEY_SWITCH_ALL); + for (dict_index_t* index + = UT_LIST_GET_FIRST(m_prebuilt->table->indexes); + index != NULL; + index = UT_LIST_GET_NEXT(indexes, index)) { + + /* InnoDB being clustered index we can't disable/enable + clustered index itself. */ + if (dict_index_is_clust(index)) { + continue; + } + + index->allow_duplicates = true; + } + error = 0; + } + + return(error); +} + +/* Updates index cardinalities of the table, based on random dives into each index tree. This does NOT calculate exact statistics on the table. -@return HA_ADMIN_* error code or HA_ADMIN_OK */ -UNIV_INTERN +@return HA_ADMIN_* error code or HA_ADMIN_OK */ + int ha_innobase::analyze( /*=================*/ THD* thd, /*!< in: connection thread handle */ HA_CHECK_OPT* check_opt) /*!< in: currently ignored */ { - int ret; - - /* Simply call this->info_low() with all the flags + /* Simply call info_low() with all the flags and request recalculation of the statistics */ - ret = this->info_low( + int ret = info_low( HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE, true /* this is ANALYZE */); @@ -13692,14 +16621,17 @@ ha_innobase::analyze( /**********************************************************************//** This is mapped to "ALTER TABLE tablename ENGINE=InnoDB", which rebuilds the table in MySQL. */ -UNIV_INTERN + int ha_innobase::optimize( /*==================*/ THD* thd, /*!< in: connection thread handle */ HA_CHECK_OPT* check_opt) /*!< in: currently ignored */ { - /*FTS-FIXME: Since MySQL doesn't support engine-specific commands, + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + /* FTS-FIXME: Since MySQL doesn't support engine-specific commands, we have to hijack some existing command in order to be able to test the new admin commands added in InnoDB's FTS support. For now, we use MySQL's OPTIMIZE command, normally mapped to ALTER TABLE in @@ -13711,7 +16643,7 @@ ha_innobase::optimize( if (srv_defragment) { int err; - err = defragment_table(prebuilt->table->name, NULL, false); + err = defragment_table(m_prebuilt->table->name.m_name, NULL, false); if (err == 0) { return (HA_ADMIN_OK); @@ -13719,7 +16651,7 @@ ha_innobase::optimize( push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, err, "InnoDB: Cannot defragment table %s: returned error code %d\n", - prebuilt->table->name, err); + m_prebuilt->table->name, err); if(err == ER_SP_ALREADY_EXISTS) { return (HA_ADMIN_OK); @@ -13730,10 +16662,10 @@ ha_innobase::optimize( } if (innodb_optimize_fulltext_only) { - if (prebuilt->table->fts && prebuilt->table->fts->cache - && !dict_table_is_discarded(prebuilt->table)) { - fts_sync_table(prebuilt->table, false, true); - fts_optimize_table(prebuilt->table); + if (m_prebuilt->table->fts && m_prebuilt->table->fts->cache + && !dict_table_is_discarded(m_prebuilt->table)) { + fts_sync_table(m_prebuilt->table, true, false); + fts_optimize_table(m_prebuilt->table); } return(HA_ADMIN_OK); } else { @@ -13746,8 +16678,8 @@ ha_innobase::optimize( Tries to check that an InnoDB table is not corrupted. If corruption is noticed, prints to stderr information about it. In case of corruption may also assert a failure and crash the server. -@return HA_ADMIN_CORRUPT or HA_ADMIN_OK */ -UNIV_INTERN +@return HA_ADMIN_CORRUPT or HA_ADMIN_OK */ + int ha_innobase::check( /*===============*/ @@ -13759,22 +16691,23 @@ ha_innobase::check( ulint n_rows_in_table = ULINT_UNDEFINED; bool is_ok = true; ulint old_isolation_level; - ibool table_corrupted; + dberr_t ret; DBUG_ENTER("ha_innobase::check"); DBUG_ASSERT(thd == ha_thd()); - ut_a(prebuilt->trx); - ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); - ut_a(prebuilt->trx == thd_to_trx(thd)); + ut_a(m_prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(m_prebuilt->trx == thd_to_trx(thd)); + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - if (prebuilt->mysql_template == NULL) { + if (m_prebuilt->mysql_template == NULL) { /* Build the template; we will use a dummy template in index scans done in checking */ build_template(true); } - if (dict_table_is_discarded(prebuilt->table)) { + if (dict_table_is_discarded(m_prebuilt->table)) { ib_senderrf( thd, @@ -13784,7 +16717,7 @@ ha_innobase::check( DBUG_RETURN(HA_ADMIN_CORRUPT); - } else if (prebuilt->table->ibd_file_missing) { + } else if (m_prebuilt->table->ibd_file_missing) { ib_senderrf( thd, IB_LOG_LEVEL_ERROR, @@ -13794,87 +16727,74 @@ ha_innobase::check( DBUG_RETURN(HA_ADMIN_CORRUPT); } - if (prebuilt->table->corrupted) { - char index_name[MAX_FULL_NAME_LEN + 1]; - /* If some previous operation has marked the table as + if (m_prebuilt->table->corrupted) { + /* If some previous oeration has marked the table as corrupted in memory, and has not propagated such to clustered index, we will do so here */ - index = dict_table_get_first_index(prebuilt->table); + index = dict_table_get_first_index(m_prebuilt->table); if (!dict_index_is_corrupted(index)) { - row_mysql_lock_data_dictionary(prebuilt->trx); - dict_set_corrupted(index, prebuilt->trx, "CHECK TABLE"); - row_mysql_unlock_data_dictionary(prebuilt->trx); + dict_set_corrupted( + index, m_prebuilt->trx, "CHECK TABLE"); } - innobase_format_name(index_name, sizeof index_name, - index->name, TRUE); - - push_warning_printf(thd, + push_warning_printf(m_user_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_INDEX_CORRUPT, "InnoDB: Index %s is marked as" - " corrupted", index_name); + " corrupted", + index->name()); /* Now that the table is already marked as corrupted, there is no need to check any index of this table */ - prebuilt->trx->op_info = ""; + m_prebuilt->trx->op_info = ""; + /* JAN: TODO: MySQL 5.7 + if (thd_killed(m_user_thd)) { + thd_set_kill_status(m_user_thd); + } + */ DBUG_RETURN(HA_ADMIN_CORRUPT); } - prebuilt->trx->op_info = "checking table"; - - old_isolation_level = prebuilt->trx->isolation_level; + old_isolation_level = m_prebuilt->trx->isolation_level; /* We must run the index record counts at an isolation level >= READ COMMITTED, because a dirty read can see a wrong number of records in some index; to play safe, we use always REPEATABLE READ here */ + m_prebuilt->trx->isolation_level = TRX_ISO_REPEATABLE_READ; - prebuilt->trx->isolation_level = TRX_ISO_REPEATABLE_READ; - - /* Check whether the table is already marked as corrupted - before running the check table */ - table_corrupted = prebuilt->table->corrupted; + ut_ad(!m_prebuilt->table->corrupted); - /* Reset table->corrupted bit so that check table can proceed to - do additional check */ - prebuilt->table->corrupted = FALSE; - - for (index = dict_table_get_first_index(prebuilt->table); + for (index = dict_table_get_first_index(m_prebuilt->table); index != NULL; index = dict_table_get_next_index(index)) { - char index_name[MAX_FULL_NAME_LEN + 1]; - /* If this is an index being created or dropped, skip */ - if (*index->name == TEMP_INDEX_PREFIX) { + if (!index->is_committed()) { continue; } - if (!(check_opt->flags & T_QUICK)) { + if (!(check_opt->flags & T_QUICK) + && !dict_index_is_corrupted(index)) { /* Enlarge the fatal lock wait timeout during CHECK TABLE. */ - os_increment_counter_by_amount( - server_mutex, - srv_fatal_semaphore_wait_threshold, + os_atomic_increment_ulint( + &srv_fatal_semaphore_wait_threshold, SRV_SEMAPHORE_WAIT_EXTENSION); - dberr_t err = btr_validate_index(index, prebuilt->trx); + + dberr_t err = btr_validate_index( + index, m_prebuilt->trx, false); /* Restore the fatal lock wait timeout after CHECK TABLE. */ - os_decrement_counter_by_amount( - server_mutex, - srv_fatal_semaphore_wait_threshold, + os_atomic_decrement_ulint( + &srv_fatal_semaphore_wait_threshold, SRV_SEMAPHORE_WAIT_EXTENSION); if (err != DB_SUCCESS) { is_ok = false; - innobase_format_name( - index_name, sizeof index_name, - index->name, TRUE); - if (err == DB_DECRYPTION_FAILED) { push_warning_printf( thd, @@ -13891,7 +16811,7 @@ ha_innobase::check( ER_NOT_KEYFILE, "InnoDB: The B-tree of" " index %s is corrupted.", - index_name); + index->name()); } continue; @@ -13901,72 +16821,84 @@ ha_innobase::check( /* Instead of invoking change_active_index(), set up a dummy template for non-locking reads, disabling access to the clustered index. */ - prebuilt->index = index; + m_prebuilt->index = index; - prebuilt->index_usable = row_merge_is_index_usable( - prebuilt->trx, prebuilt->index); + m_prebuilt->index_usable = row_merge_is_index_usable( + m_prebuilt->trx, m_prebuilt->index); DBUG_EXECUTE_IF( "dict_set_index_corrupted", if (!dict_index_is_clust(index)) { - prebuilt->index_usable = FALSE; - row_mysql_lock_data_dictionary(prebuilt->trx); - dict_set_corrupted(index, prebuilt->trx, "dict_set_index_corrupted");; - row_mysql_unlock_data_dictionary(prebuilt->trx); + m_prebuilt->index_usable = FALSE; + // row_mysql_lock_data_dictionary(m_prebuilt->trx); + dict_set_corrupted(index, m_prebuilt->trx, "dict_set_index_corrupted");; + // row_mysql_unlock_data_dictionary(m_prebuilt->trx); }); - if (UNIV_UNLIKELY(!prebuilt->index_usable)) { - innobase_format_name( - index_name, sizeof index_name, - prebuilt->index->name, TRUE); - - if (dict_index_is_corrupted(prebuilt->index)) { + if (UNIV_UNLIKELY(!m_prebuilt->index_usable)) { + if (dict_index_is_corrupted(m_prebuilt->index)) { push_warning_printf( - user_thd, + m_user_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_INDEX_CORRUPT, "InnoDB: Index %s is marked as" " corrupted", - index_name); + index->name()); is_ok = false; } else { push_warning_printf( - thd, + m_user_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TABLE_DEF_CHANGED, "InnoDB: Insufficient history for" " index %s", - index_name); + index->name()); } continue; } - prebuilt->sql_stat_start = TRUE; - prebuilt->template_type = ROW_MYSQL_DUMMY_TEMPLATE; - prebuilt->n_template = 0; - prebuilt->need_to_access_clustered = FALSE; + m_prebuilt->sql_stat_start = TRUE; + m_prebuilt->template_type = ROW_MYSQL_DUMMY_TEMPLATE; + m_prebuilt->n_template = 0; + m_prebuilt->need_to_access_clustered = FALSE; - dtuple_set_n_fields(prebuilt->search_tuple, 0); + dtuple_set_n_fields(m_prebuilt->search_tuple, 0); - prebuilt->select_lock_type = LOCK_NONE; + m_prebuilt->select_lock_type = LOCK_NONE; - if (!row_check_index_for_mysql(prebuilt, index, &n_rows)) { - innobase_format_name( - index_name, sizeof index_name, - index->name, TRUE); + /* Scan this index. */ + if (dict_index_is_spatial(index)) { + ret = row_count_rtree_recs(m_prebuilt, &n_rows); + } else { + ret = row_scan_index_for_mysql( + m_prebuilt, index, true, &n_rows); + } + DBUG_EXECUTE_IF( + "dict_set_index_corrupted", + if (!dict_index_is_clust(index)) { + ret = DB_CORRUPTION; + }); + + if (ret == DB_INTERRUPTED || thd_killed(m_user_thd)) { + /* Do not report error since this could happen + during shutdown */ + break; + } + if (ret != DB_SUCCESS) { + /* Assume some kind of corruption. */ push_warning_printf( thd, Sql_condition::WARN_LEVEL_WARN, ER_NOT_KEYFILE, "InnoDB: The B-tree of" " index %s is corrupted.", - index_name); + index->name()); is_ok = false; dict_set_corrupted( - index, prebuilt->trx, "CHECK TABLE-check index"); + index, m_prebuilt->trx, "CHECK TABLE-check index"); } - if (thd_kill_level(user_thd)) { + if (thd_kill_level(m_user_thd)) { break; } @@ -13975,7 +16907,7 @@ ha_innobase::check( index->name); #endif - if (index == dict_table_get_first_index(prebuilt->table)) { + if (index == dict_table_get_first_index(m_prebuilt->table)) { n_rows_in_table = n_rows; } else if (!(index->type & DICT_FTS) && (n_rows != n_rows_in_table)) { @@ -13984,36 +16916,22 @@ ha_innobase::check( ER_NOT_KEYFILE, "InnoDB: Index '%-.200s' contains %lu" " entries, should be %lu.", - index->name, + index->name(), (ulong) n_rows, (ulong) n_rows_in_table); is_ok = false; dict_set_corrupted( - index, prebuilt->trx, + index, m_prebuilt->trx, "CHECK TABLE; Wrong count"); } } - if (table_corrupted) { - /* If some previous operation has marked the table as - corrupted in memory, and has not propagated such to - clustered index, we will do so here */ - index = dict_table_get_first_index(prebuilt->table); - - if (!dict_index_is_corrupted(index)) { - dict_set_corrupted( - index, prebuilt->trx, "CHECK TABLE"); - } - prebuilt->table->corrupted = TRUE; - } - /* Restore the original isolation level */ - prebuilt->trx->isolation_level = old_isolation_level; - + m_prebuilt->trx->isolation_level = old_isolation_level; +#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /* We validate the whole adaptive hash index for all tables at every CHECK TABLE only when QUICK flag is not present. */ -#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG if (!(check_opt->flags & T_QUICK) && !btr_search_validate()) { push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_NOT_KEYFILE, @@ -14021,10 +16939,10 @@ ha_innobase::check( is_ok = false; } #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */ - - prebuilt->trx->op_info = ""; - if (thd_kill_level(user_thd)) { - my_error(ER_QUERY_INTERRUPTED, MYF(0)); + m_prebuilt->trx->op_info = ""; + //if (thd_killed(m_user_thd)) { + if (thd_kill_level(m_user_thd)) { + // thd_set_kill_status(m_user_thd); } DBUG_RETURN(is_ok ? HA_ADMIN_OK : HA_ADMIN_CORRUPT); @@ -14056,23 +16974,23 @@ ha_innobase::update_table_comment( update_thd(ha_thd()); - prebuilt->trx->op_info = (char*)"returning table comment"; + m_prebuilt->trx->op_info = (char*)"returning table comment"; /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ - trx_search_latch_release_if_reserved(prebuilt->trx); + trx_search_latch_release_if_reserved(m_prebuilt->trx); #define SSTR( x ) reinterpret_cast< std::ostringstream & >( \ ( std::ostringstream() << std::dec << x ) ).str() fk_str.append("InnoDB free: "); fk_str.append(SSTR(fsp_get_available_space_in_free_extents( - prebuilt->table->space))); + m_prebuilt->table->space))); fk_str.append(dict_print_info_on_foreign_keys( - FALSE, prebuilt->trx, - prebuilt->table)); + FALSE, m_prebuilt->trx, + m_prebuilt->table)); flen = fk_str.length(); @@ -14098,7 +17016,7 @@ ha_innobase::update_table_comment( pos[flen] = 0; } - prebuilt->trx->op_info = (char*)""; + m_prebuilt->trx->op_info = (char*)""; return(str ? str : (char*) comment); } @@ -14108,14 +17026,12 @@ Gets the foreign key create info for a table stored in InnoDB. @return own: character string in the form which can be inserted to the CREATE TABLE statement, MUST be freed with ha_innobase::free_foreign_key_create_info */ -UNIV_INTERN + char* ha_innobase::get_foreign_key_create_info(void) /*==========================================*/ { - char* fk_str = 0; - - ut_a(prebuilt != NULL); + ut_a(m_prebuilt != NULL); /* We do not know if MySQL can call this function before calling external_lock(). To be safe, update the thd of the current table @@ -14123,23 +17039,29 @@ ha_innobase::get_foreign_key_create_info(void) update_thd(ha_thd()); - prebuilt->trx->op_info = (char*)"getting info on foreign keys"; + m_prebuilt->trx->op_info = (char*)"getting info on foreign keys"; /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ - trx_search_latch_release_if_reserved(prebuilt->trx); + trx_search_latch_release_if_reserved(m_prebuilt->trx); + - /* Output the data to a temporary file */ + /* Output the data to a temporary string */ std::string str = dict_print_info_on_foreign_keys( - TRUE, prebuilt->trx, - prebuilt->table); + TRUE, m_prebuilt->trx, + m_prebuilt->table); - prebuilt->trx->op_info = (char*)""; + m_prebuilt->trx->op_info = (char*)""; /* Allocate buffer for the string */ - fk_str = (char*) my_malloc(str.length() + 1, MYF(0)); + char* fk_str = (char*) my_malloc(str.length() + 1, MYF(0)); + + /* JAN: TODO: MySQL 5.7 + fk_str = reinterpret_cast( + my_malloc(PSI_INSTRUMENT_ME, str.length() + 1, MYF(0))); + */ if (fk_str) { memcpy(fk_str, str.c_str(), str.length()); @@ -14157,13 +17079,13 @@ static FOREIGN_KEY_INFO* get_foreign_key_info( /*=================*/ - THD* thd, /*!< in: user thread handle */ - dict_foreign_t* foreign) /*!< in: foreign key constraint */ + THD* thd, /*!< in: user thread handle */ + dict_foreign_t* foreign)/*!< in: foreign key constraint */ { FOREIGN_KEY_INFO f_key_info; FOREIGN_KEY_INFO* pf_key_info; uint i = 0; - ulint len; + size_t len; char tmp_buff[NAME_LEN+1]; char name_buff[NAME_LEN+1]; const char* ptr; @@ -14171,8 +17093,8 @@ get_foreign_key_info( LEX_STRING* name = NULL; ptr = dict_remove_db_name(foreign->id); - f_key_info.foreign_id = thd_make_lex_string(thd, 0, ptr, - (uint) strlen(ptr), 1); + f_key_info.foreign_id = thd_make_lex_string( + thd, 0, ptr, (uint) strlen(ptr), 1); /* Name format: database name, '/', table name, '\0' */ @@ -14255,12 +17177,14 @@ get_foreign_key_info( thd, f_key_info.update_method, ptr, static_cast(len), 1); - if (foreign->referenced_index && foreign->referenced_index->name) { - referenced_key_name = thd_make_lex_string(thd, - f_key_info.referenced_key_name, - foreign->referenced_index->name, - (uint) strlen(foreign->referenced_index->name), - 1); + if (foreign->referenced_index + && foreign->referenced_index->name != NULL) { + referenced_key_name = thd_make_lex_string( + thd, + f_key_info.referenced_key_name, + foreign->referenced_index->name, + (uint) strlen(foreign->referenced_index->name), + 1); } else { referenced_key_name = NULL; } @@ -14276,83 +17200,247 @@ get_foreign_key_info( /*******************************************************************//** Gets the list of foreign keys in this table. @return always 0, that is, always succeeds */ -UNIV_INTERN + int ha_innobase::get_foreign_key_list( /*==============================*/ THD* thd, /*!< in: user thread handle */ List* f_key_list) /*!< out: foreign key list */ { - FOREIGN_KEY_INFO* pf_key_info; - dict_foreign_t* foreign; + update_thd(ha_thd()); + + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + m_prebuilt->trx->op_info = "getting list of foreign keys"; + + mutex_enter(&dict_sys->mutex); + + for (dict_foreign_set::iterator it + = m_prebuilt->table->foreign_set.begin(); + it != m_prebuilt->table->foreign_set.end(); + ++it) { + + FOREIGN_KEY_INFO* pf_key_info; + dict_foreign_t* foreign = *it; + + pf_key_info = get_foreign_key_info(thd, foreign); + + if (pf_key_info != NULL) { + f_key_list->push_back(pf_key_info); + } + } + + mutex_exit(&dict_sys->mutex); - ut_a(prebuilt != NULL); + m_prebuilt->trx->op_info = ""; + + return(0); +} + +/*******************************************************************//** +Gets the set of foreign keys where this table is the referenced table. +@return always 0, that is, always succeeds */ + +int +ha_innobase::get_parent_foreign_key_list( +/*=====================================*/ + THD* thd, /*!< in: user thread handle */ + List* f_key_list) /*!< out: foreign key list */ +{ update_thd(ha_thd()); - prebuilt->trx->op_info = "getting list of foreign keys"; + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - trx_search_latch_release_if_reserved(prebuilt->trx); + m_prebuilt->trx->op_info = "getting list of referencing foreign keys"; - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); for (dict_foreign_set::iterator it - = prebuilt->table->foreign_set.begin(); - it != prebuilt->table->foreign_set.end(); + = m_prebuilt->table->referenced_set.begin(); + it != m_prebuilt->table->referenced_set.end(); ++it) { - foreign = *it; + FOREIGN_KEY_INFO* pf_key_info; + dict_foreign_t* foreign = *it; pf_key_info = get_foreign_key_info(thd, foreign); - if (pf_key_info) { + + if (pf_key_info != NULL) { f_key_list->push_back(pf_key_info); } - } - - mutex_exit(&(dict_sys->mutex)); + } + + mutex_exit(&dict_sys->mutex); + + m_prebuilt->trx->op_info = ""; + + return(0); +} + +/** Table list item structure is used to store only the table +and name. It is used by get_cascade_foreign_key_table_list to store +the intermediate result for fetching the table set. */ +struct table_list_item { + /** InnoDB table object */ + const dict_table_t* table; + /** Table name */ + const char* name; +}; + +/** Structure to compare two st_tablename objects using their +db and tablename. It is used in the ordering of cascade_fk_set. +It returns true if the first argument precedes the second argument +and false otherwise. */ +struct tablename_compare { + + bool operator()(const st_handler_tablename lhs, + const st_handler_tablename rhs) const + { + int cmp = strcmp(lhs.db, rhs.db); + if (cmp == 0) { + cmp = strcmp(lhs.tablename, rhs.tablename); + } + + return(cmp < 0); + } +}; + +/** Get the table name and database name for the given table. +@param[in,out] thd user thread handle +@param[out] f_key_info pointer to table_name_info object +@param[in] foreign foreign key constraint. */ +static +void +get_table_name_info( + THD* thd, + st_handler_tablename* f_key_info, + const dict_foreign_t* foreign) +{ + // JAN: TODO: MySQL 5.7 include/mysql_com.h +#define FILENAME_CHARSET_MBMAXLEN 5 + char tmp_buff[NAME_CHAR_LEN * FILENAME_CHARSET_MBMAXLEN + 1]; + char name_buff[NAME_CHAR_LEN * FILENAME_CHARSET_MBMAXLEN + 1]; + const char* ptr; + + size_t len = dict_get_db_name_len( + foreign->referenced_table_name_lookup); + ut_memcpy(tmp_buff, foreign->referenced_table_name_lookup, len); + tmp_buff[len] = 0; + + ut_ad(len < sizeof(tmp_buff)); + + len = filename_to_tablename(tmp_buff, name_buff, sizeof(name_buff)); + f_key_info->db = thd_strmake(thd, name_buff, len); + + ptr = dict_remove_db_name(foreign->referenced_table_name_lookup); + len = filename_to_tablename(ptr, name_buff, sizeof(name_buff)); + f_key_info->tablename = thd_strmake(thd, name_buff, len); +} + +/** Get the list of tables ordered by the dependency on the other tables using +the 'CASCADE' foreign key constraint. +@param[in,out] thd user thread handle +@param[out] fk_table_list set of tables name info for the + dependent table +@retval 0 for success. */ +int +ha_innobase::get_cascade_foreign_key_table_list( + THD* thd, + List* fk_table_list) +{ + TrxInInnoDB trx_in_innodb(m_prebuilt->trx); + + m_prebuilt->trx->op_info = "getting cascading foreign keys"; + + std::list > table_list; + + typedef std::set > cascade_fk_set; + + cascade_fk_set fk_set; + + mutex_enter(&dict_sys->mutex); + + /* Initialize the table_list with prebuilt->table name. */ + struct table_list_item item = {m_prebuilt->table, + m_prebuilt->table->name.m_name}; + + table_list.push_back(item); + + /* Get the parent table, grand parent table info from the + table list by depth-first traversal. */ + do { + const dict_table_t* parent_table; + dict_table_t* parent = NULL; + std::pair ret; + + item = table_list.back(); + table_list.pop_back(); + parent_table = item.table; + + if (parent_table == NULL) { + + ut_ad(item.name != NULL); + + parent_table = parent = dict_table_open_on_name( + item.name, TRUE, FALSE, + DICT_ERR_IGNORE_NONE); + + if (parent_table == NULL) { + /* foreign_key_checks is or was probably + disabled; ignore the constraint */ + continue; + } + } - prebuilt->trx->op_info = ""; + for (dict_foreign_set::const_iterator it = + parent_table->foreign_set.begin(); + it != parent_table->foreign_set.end(); ++it) { - return(0); -} + const dict_foreign_t* foreign = *it; + st_handler_tablename f1; -/*******************************************************************//** -Gets the set of foreign keys where this table is the referenced table. -@return always 0, that is, always succeeds */ -UNIV_INTERN -int -ha_innobase::get_parent_foreign_key_list( -/*=====================================*/ - THD* thd, /*!< in: user thread handle */ - List* f_key_list) /*!< out: foreign key list */ -{ - FOREIGN_KEY_INFO* pf_key_info; - dict_foreign_t* foreign; + /* Skip the table if there is no + cascading operation. */ + if (0 == (foreign->type + & ~(DICT_FOREIGN_ON_DELETE_NO_ACTION + | DICT_FOREIGN_ON_UPDATE_NO_ACTION))) { + continue; + } - ut_a(prebuilt != NULL); - update_thd(ha_thd()); + if (foreign->referenced_table_name_lookup != NULL) { + get_table_name_info(thd, &f1, foreign); + ret = fk_set.insert(f1); - prebuilt->trx->op_info = "getting list of referencing foreign keys"; + /* Ignore the table if it is already + in the set. */ + if (!ret.second) { + continue; + } - trx_search_latch_release_if_reserved(prebuilt->trx); + struct table_list_item item1 = { + foreign->referenced_table, + foreign->referenced_table_name_lookup}; - mutex_enter(&(dict_sys->mutex)); + table_list.push_back(item1); - for (dict_foreign_set::iterator it - = prebuilt->table->referenced_set.begin(); - it != prebuilt->table->referenced_set.end(); - ++it) { + st_handler_tablename* fk_table = + (st_handler_tablename*) thd_memdup( + thd, &f1, sizeof(*fk_table)); - foreign = *it; + fk_table_list->push_back(fk_table); + } + } - pf_key_info = get_foreign_key_info(thd, foreign); - if (pf_key_info) { - f_key_list->push_back(pf_key_info); + if (parent != NULL) { + dict_table_close(parent, true, false); } - } - mutex_exit(&(dict_sys->mutex)); + } while(!table_list.empty()); + + mutex_exit(&dict_sys->mutex); - prebuilt->trx->op_info = ""; + m_prebuilt->trx->op_info = ""; return(0); } @@ -14361,26 +17449,26 @@ ha_innobase::get_parent_foreign_key_list( Checks if ALTER TABLE may change the storage engine of the table. Changing storage engines is not allowed for tables for which there are foreign key constraints (parent or child tables). -@return TRUE if can switch engines */ -UNIV_INTERN +@return TRUE if can switch engines */ + bool ha_innobase::can_switch_engines(void) /*=================================*/ { - bool can_switch; - DBUG_ENTER("ha_innobase::can_switch_engines"); + update_thd(); - prebuilt->trx->op_info = + m_prebuilt->trx->op_info = "determining if there are foreign key constraints"; - row_mysql_freeze_data_dictionary(prebuilt->trx); - can_switch = prebuilt->table->referenced_set.empty() - && prebuilt->table->foreign_set.empty(); + row_mysql_freeze_data_dictionary(m_prebuilt->trx); + + bool can_switch = m_prebuilt->table->referenced_set.empty() + && m_prebuilt->table->foreign_set.empty(); - row_mysql_unfreeze_data_dictionary(prebuilt->trx); - prebuilt->trx->op_info = ""; + row_mysql_unfreeze_data_dictionary(m_prebuilt->trx); + m_prebuilt->trx->op_info = ""; DBUG_RETURN(can_switch); } @@ -14390,13 +17478,13 @@ Checks if a table is referenced by a foreign key. The MySQL manual states that a REPLACE is either equivalent to an INSERT, or DELETE(s) + INSERT. Only a delete is then allowed internally to resolve a duplicate key conflict in REPLACE, not an update. -@return > 0 if referenced by a FOREIGN KEY */ -UNIV_INTERN +@return > 0 if referenced by a FOREIGN KEY */ + uint ha_innobase::referenced_by_foreign_key(void) /*========================================*/ { - if (dict_table_is_referenced_by_foreign_key(prebuilt->table)) { + if (dict_table_is_referenced_by_foreign_key(m_prebuilt->table)) { return(1); } @@ -14407,21 +17495,21 @@ ha_innobase::referenced_by_foreign_key(void) /*******************************************************************//** Frees the foreign key create info for a table stored in InnoDB, if it is non-NULL. */ -UNIV_INTERN + void ha_innobase::free_foreign_key_create_info( /*======================================*/ char* str) /*!< in, own: create info string to free */ { - if (str) { + if (str != NULL) { my_free(str); } } /*******************************************************************//** Tells something additional to the handler about how to do things. -@return 0 or error number */ -UNIV_INTERN +@return 0 or error number */ + int ha_innobase::extra( /*===============*/ @@ -14431,13 +17519,13 @@ ha_innobase::extra( check_trx_exists(ha_thd()); /* Warning: since it is not sure that MySQL calls external_lock - before calling this function, the trx field in prebuilt can be + before calling this function, the trx field in m_prebuilt can be obsolete! */ switch (operation) { case HA_EXTRA_FLUSH: - if (prebuilt->blob_heap) { - row_mysql_prebuilt_free_blob_heap(prebuilt); + if (m_prebuilt->blob_heap) { + row_mysql_prebuilt_free_blob_heap(m_prebuilt); } break; case HA_EXTRA_RESET_STATE: @@ -14445,16 +17533,16 @@ ha_innobase::extra( thd_to_trx(ha_thd())->duplicates = 0; break; case HA_EXTRA_NO_KEYREAD: - prebuilt->read_just_key = 0; + m_prebuilt->read_just_key = 0; break; case HA_EXTRA_KEYREAD: - prebuilt->read_just_key = 1; + m_prebuilt->read_just_key = 1; break; case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: - prebuilt->keep_other_fields_on_keyread = 1; + m_prebuilt->keep_other_fields_on_keyread = 1; break; - /* IMPORTANT: prebuilt->trx can be obsolete in + /* IMPORTANT: m_prebuilt->trx can be obsolete in this method, because it is not sure that MySQL calls external_lock before this method with the parameters below. We must not invoke update_thd() @@ -14479,29 +17567,49 @@ ha_innobase::extra( return(0); } -/******************************************************************//** -*/ -UNIV_INTERN +/** +MySQL calls this method at the end of each statement. This method +exists for readability only. ha_innobase::reset() doesn't give any +clue about the method. */ + int -ha_innobase::reset() -/*================*/ +ha_innobase::end_stmt() { - if (prebuilt->blob_heap) { - row_mysql_prebuilt_free_blob_heap(prebuilt); + if (m_prebuilt->blob_heap) { + row_mysql_prebuilt_free_blob_heap(m_prebuilt); } reset_template(); - ds_mrr.dsmrr_close(); + + //m_ds_mrr.reset(); /* TODO: This should really be reset in reset_template() but for now it's safer to do it explicitly here. */ /* This is a statement level counter. */ - prebuilt->autoinc_last_value = 0; + m_prebuilt->autoinc_last_value = 0; + + /* This transaction had called ha_innobase::start_stmt() */ + trx_t* trx = m_prebuilt->trx; + + if (trx->lock.start_stmt) { + TrxInInnoDB::end_stmt(trx); + + trx->lock.start_stmt = false; + } return(0); } +/** +MySQL calls this method at the end of each statement */ + +int +ha_innobase::reset() +{ + return(end_stmt()); +} + /******************************************************************//** MySQL calls this function at the start of each SQL statement inside LOCK TABLES. Inside LOCK TABLES the ::external_lock method does not work to @@ -14513,42 +17621,48 @@ procedure. To make the execution more deterministic for binlogging, MySQL-5.0 locks all tables involved in a stored procedure with full explicit table locks (thd_in_lock_tables(thd) holds in store_lock()) before executing the procedure. -@return 0 or error code */ -UNIV_INTERN +@return 0 or error code */ + int ha_innobase::start_stmt( /*====================*/ THD* thd, /*!< in: handle to the user thread */ thr_lock_type lock_type) { - trx_t* trx; + trx_t* trx = m_prebuilt->trx; + DBUG_ENTER("ha_innobase::start_stmt"); update_thd(thd); - trx = prebuilt->trx; + ut_ad(m_prebuilt->table != NULL); - /* Here we release the search latch and the InnoDB thread FIFO ticket - if they were reserved. They should have been released already at the - end of the previous statement, but because inside LOCK TABLES the - lock count method does not work to mark the end of a SELECT statement, - that may not be the case. We MUST release the search latch before an - INSERT, for example. */ + TrxInInnoDB trx_in_innodb(trx); - trx_search_latch_release_if_reserved(trx); + if (dict_table_is_intrinsic(m_prebuilt->table)) { + + if (thd_sql_command(thd) == SQLCOM_ALTER_TABLE) { + + DBUG_RETURN(HA_ERR_WRONG_COMMAND); + } + + DBUG_RETURN(0); + } + + trx = m_prebuilt->trx; innobase_srv_conc_force_exit_innodb(trx); /* Reset the AUTOINC statement level counter for multi-row INSERTs. */ trx->n_autoinc_rows = 0; - prebuilt->sql_stat_start = TRUE; - prebuilt->hint_need_to_fetch_extra_cols = 0; + m_prebuilt->sql_stat_start = TRUE; + m_prebuilt->hint_need_to_fetch_extra_cols = 0; reset_template(); - if (dict_table_is_temporary(prebuilt->table) - && prebuilt->mysql_has_locked - && prebuilt->select_lock_type == LOCK_NONE) { + if (dict_table_is_temporary(m_prebuilt->table) + && m_mysql_has_locked + && m_prebuilt->select_lock_type == LOCK_NONE) { dberr_t error; switch (thd_sql_command(thd)) { @@ -14556,12 +17670,12 @@ ha_innobase::start_stmt( case SQLCOM_UPDATE: case SQLCOM_DELETE: init_table_handle_for_HANDLER(); - prebuilt->select_lock_type = LOCK_X; - prebuilt->stored_select_lock_type = LOCK_X; - error = row_lock_table_for_mysql(prebuilt, NULL, 1); + m_prebuilt->select_lock_type = LOCK_X; + m_prebuilt->stored_select_lock_type = LOCK_X; + error = row_lock_table_for_mysql(m_prebuilt, NULL, 1); if (error != DB_SUCCESS) { - int st = convert_error_code_to_mysql( + int st = convert_error_code_to_mysql( error, 0, thd); DBUG_RETURN(st); } @@ -14569,13 +17683,13 @@ ha_innobase::start_stmt( } } - if (!prebuilt->mysql_has_locked) { + if (!m_mysql_has_locked) { /* This handle is for a temporary table created inside this same LOCK TABLES; since MySQL does NOT call external_lock in this case, we must use x-row locks inside InnoDB to be prepared for an update of a row */ - prebuilt->select_lock_type = LOCK_X; + m_prebuilt->select_lock_type = LOCK_X; } else if (trx->isolation_level != TRX_ISO_SERIALIZABLE && thd_sql_command(thd) == SQLCOM_SELECT @@ -14584,18 +17698,19 @@ ha_innobase::start_stmt( /* For other than temporary tables, we obtain no lock for consistent read (plain SELECT). */ - prebuilt->select_lock_type = LOCK_NONE; + m_prebuilt->select_lock_type = LOCK_NONE; } else { /* Not a consistent read: restore the select_lock_type value. The value of stored_select_lock_type was decided in: 1) ::store_lock(), 2) ::external_lock(), - 3) ::init_table_handle_for_HANDLER(), and - 4) ::transactional_table_lock(). */ + 3) ::init_table_handle_for_HANDLER(). */ - ut_a(prebuilt->stored_select_lock_type != LOCK_NONE_UNSET); - prebuilt->select_lock_type = prebuilt->stored_select_lock_type; + ut_a(m_prebuilt->stored_select_lock_type != LOCK_NONE_UNSET); + + m_prebuilt->select_lock_type = + m_prebuilt->stored_select_lock_type; } *trx->detailed_error = 0; @@ -14606,12 +17721,20 @@ ha_innobase::start_stmt( ++trx->will_lock; } + /* Only do it once per transaction. */ + if (!trx->lock.start_stmt && lock_type != TL_UNLOCK) { + + TrxInInnoDB::begin_stmt(trx); + + trx->lock.start_stmt = true; + } + DBUG_RETURN(0); } /******************************************************************//** Maps a MySQL trx isolation level code to the InnoDB isolation level code -@return InnoDB isolation level */ +@return InnoDB isolation level */ static inline ulint innobase_map_isolation_level( @@ -14638,21 +17761,35 @@ the THD in the handle. We will also use this function to communicate to InnoDB that a new SQL statement has started and that we must store a savepoint to our transaction handle, so that we are able to roll back the SQL statement in case of an error. -@return 0 */ -UNIV_INTERN +@return 0 */ + int ha_innobase::external_lock( /*=======================*/ THD* thd, /*!< in: handle to the user thread */ int lock_type) /*!< in: lock type */ { - trx_t* trx; - DBUG_ENTER("ha_innobase::external_lock"); DBUG_PRINT("enter",("lock_type: %d", lock_type)); update_thd(thd); + trx_t* trx = m_prebuilt->trx; + + ut_ad(m_prebuilt->table); + + if (dict_table_is_intrinsic(m_prebuilt->table)) { + + if (thd_sql_command(thd) == SQLCOM_ALTER_TABLE) { + + DBUG_RETURN(HA_ERR_WRONG_COMMAND); + } + + TrxInInnoDB::begin_stmt(trx); + + DBUG_RETURN(0); + } + /* Statement based binlogging does not work in isolation level READ UNCOMMITTED and READ COMMITTED since the necessary locks cannot be taken. In this case, we print an @@ -14665,18 +17802,22 @@ ha_innobase::external_lock( && thd_binlog_format(thd) == BINLOG_FORMAT_STMT && thd_binlog_filter_ok(thd) && thd_sqlcom_can_generate_row_events(thd)) { - bool skip = 0; + + bool skip = false; + /* used by test case */ DBUG_EXECUTE_IF("no_innodb_binlog_errors", skip = true;); + if (!skip) { #ifdef WITH_WSREP if (!wsrep_on(thd) || wsrep_thd_exec_mode(thd) == LOCAL_STATE) { #endif /* WITH_WSREP */ my_error(ER_BINLOG_STMT_MODE_AND_ROW_ENGINE, MYF(0), - " InnoDB is limited to row-logging when " - "transaction isolation level is " - "READ COMMITTED or READ UNCOMMITTED."); + " InnoDB is limited to row-logging when" + " transaction isolation level is" + " READ COMMITTED or READ UNCOMMITTED."); + DBUG_RETURN(HA_ERR_LOGGING_IMPOSSIBLE); #ifdef WITH_WSREP } @@ -14698,8 +17839,7 @@ ha_innobase::external_lock( || thd_sql_command(thd) == SQLCOM_DROP_INDEX || thd_sql_command(thd) == SQLCOM_DELETE)) { - if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE) - { + if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE) { ib_senderrf(thd, IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE); DBUG_RETURN(HA_ERR_TABLE_READONLY); @@ -14708,24 +17848,21 @@ ha_innobase::external_lock( ER_READ_ONLY_MODE); DBUG_RETURN(HA_ERR_TABLE_READONLY); } - } - trx = prebuilt->trx; - - prebuilt->sql_stat_start = TRUE; - prebuilt->hint_need_to_fetch_extra_cols = 0; + m_prebuilt->sql_stat_start = TRUE; + m_prebuilt->hint_need_to_fetch_extra_cols = 0; reset_template(); - switch (prebuilt->table->quiesce) { + switch (m_prebuilt->table->quiesce) { case QUIESCE_START: /* Check for FLUSH TABLE t WITH READ LOCK; */ if (!srv_read_only_mode && thd_sql_command(thd) == SQLCOM_FLUSH && lock_type == F_RDLCK) { - row_quiesce_table_start(prebuilt->table, trx); + row_quiesce_table_start(m_prebuilt->table, trx); /* Use the transaction instance to track UNLOCK TABLES. It can be done via START TRANSACTION; too @@ -14741,7 +17878,7 @@ ha_innobase::external_lock( if (trx->flush_tables > 0 && (lock_type == F_UNLCK || trx_is_interrupted(trx))) { - row_quiesce_table_complete(prebuilt->table, trx); + row_quiesce_table_complete(m_prebuilt->table, trx); ut_a(trx->flush_tables > 0); --trx->flush_tables; @@ -14757,8 +17894,8 @@ ha_innobase::external_lock( /* If this is a SELECT, then it is in UPDATE TABLE ... or SELECT ... FOR UPDATE */ - prebuilt->select_lock_type = LOCK_X; - prebuilt->stored_select_lock_type = LOCK_X; + m_prebuilt->select_lock_type = LOCK_X; + m_prebuilt->stored_select_lock_type = LOCK_X; } if (lock_type != F_UNLCK) { @@ -14769,7 +17906,7 @@ ha_innobase::external_lock( innobase_register_trx(ht, thd, trx); if (trx->isolation_level == TRX_ISO_SERIALIZABLE - && prebuilt->select_lock_type == LOCK_NONE + && m_prebuilt->select_lock_type == LOCK_NONE && thd_test_options( thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { @@ -14781,8 +17918,8 @@ ha_innobase::external_lock( can be serialized also if performed as consistent reads. */ - prebuilt->select_lock_type = LOCK_S; - prebuilt->stored_select_lock_type = LOCK_S; + m_prebuilt->select_lock_type = LOCK_S; + m_prebuilt->stored_select_lock_type = LOCK_S; } /* Starting from 4.1.9, no InnoDB table lock is taken in LOCK @@ -14796,7 +17933,7 @@ ha_innobase::external_lock( can hold in some cases, e.g., at the start of a stored procedure call (SQLCOM_CALL). */ - if (prebuilt->select_lock_type != LOCK_NONE) { + if (m_prebuilt->select_lock_type != LOCK_NONE) { if (thd_sql_command(thd) == SQLCOM_LOCK_TABLES && THDVAR(thd, table_locks) @@ -14804,9 +17941,10 @@ ha_innobase::external_lock( && thd_in_lock_tables(thd)) { dberr_t error = row_lock_table_for_mysql( - prebuilt, NULL, 0); + m_prebuilt, NULL, 0); if (error != DB_SUCCESS) { + DBUG_RETURN( convert_error_code_to_mysql( error, 0, thd)); @@ -14817,28 +17955,34 @@ ha_innobase::external_lock( } trx->n_mysql_tables_in_use++; - prebuilt->mysql_has_locked = TRUE; + m_mysql_has_locked = true; if (!trx_is_started(trx) - && (prebuilt->select_lock_type != LOCK_NONE - || prebuilt->stored_select_lock_type != LOCK_NONE)) { + && (m_prebuilt->select_lock_type != LOCK_NONE + || m_prebuilt->stored_select_lock_type != LOCK_NONE)) { ++trx->will_lock; } + TrxInInnoDB::begin_stmt(trx); + +#ifdef UNIV_DEBUG + if (thd_trx_is_dd_trx(thd)) { + trx->is_dd_trx = true; + } +#endif /* UNIV_DEBUG */ DBUG_RETURN(0); + } else { + + TrxInInnoDB::end_stmt(trx); + + DEBUG_SYNC_C("ha_innobase_end_statement"); } /* MySQL is releasing a table lock */ trx->n_mysql_tables_in_use--; - prebuilt->mysql_has_locked = FALSE; - - /* Release a possible FIFO ticket and search latch. Since we - may reserve the trx_sys->mutex, we have to release the search - system latch first to obey the latching order. */ - - trx_search_latch_release_if_reserved(trx); + m_mysql_has_locked = false; innobase_srv_conc_force_exit_innodb(trx); @@ -14848,28 +17992,36 @@ ha_innobase::external_lock( if (trx->n_mysql_tables_in_use == 0) { trx->mysql_n_tables_locked = 0; - prebuilt->used_in_HANDLER = FALSE; + m_prebuilt->used_in_HANDLER = FALSE; if (!thd_test_options( thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { if (trx_is_started(trx)) { + innobase_commit(ht, thd, TRUE); + } else { + /* Since the trx state is TRX_NOT_STARTED, + trx_commit() will not be called. Reset + trx->is_dd_trx here */ + ut_d(trx->is_dd_trx = false); } } else if (trx->isolation_level <= TRX_ISO_READ_COMMITTED - && trx->global_read_view) { + && MVCC::is_view_active(trx->read_view)) { - /* At low transaction isolation levels we let - each consistent read set its own snapshot */ + mutex_enter(&trx_sys->mutex); - read_view_close_for_mysql(trx); + trx_sys->mvcc->view_close(trx->read_view, true); + + mutex_exit(&trx_sys->mutex); } } if (!trx_is_started(trx) - && (prebuilt->select_lock_type != LOCK_NONE - || prebuilt->stored_select_lock_type != LOCK_NONE)) { + && lock_type != F_UNLCK + && (m_prebuilt->select_lock_type != LOCK_NONE + || m_prebuilt->stored_select_lock_type != LOCK_NONE)) { ++trx->will_lock; } @@ -14877,100 +18029,6 @@ ha_innobase::external_lock( DBUG_RETURN(0); } -/******************************************************************//** -With this function MySQL request a transactional lock to a table when -user issued query LOCK TABLES..WHERE ENGINE = InnoDB. -@return error code */ -UNIV_INTERN -int -ha_innobase::transactional_table_lock( -/*==================================*/ - THD* thd, /*!< in: handle to the user thread */ - int lock_type) /*!< in: lock type */ -{ - trx_t* trx; - - DBUG_ENTER("ha_innobase::transactional_table_lock"); - DBUG_PRINT("enter",("lock_type: %d", lock_type)); - - /* We do not know if MySQL can call this function before calling - external_lock(). To be safe, update the thd of the current table - handle. */ - - update_thd(thd); - - if (!thd_tablespace_op(thd)) { - - if (dict_table_is_discarded(prebuilt->table)) { - - ib_senderrf( - thd, IB_LOG_LEVEL_ERROR, - ER_TABLESPACE_DISCARDED, - table->s->table_name.str); - - } else if (prebuilt->table->ibd_file_missing) { - - ib_senderrf( - thd, IB_LOG_LEVEL_ERROR, - ER_TABLESPACE_MISSING, - table->s->table_name.str); - } - - DBUG_RETURN(HA_ERR_CRASHED); - } - - trx = prebuilt->trx; - - prebuilt->sql_stat_start = TRUE; - prebuilt->hint_need_to_fetch_extra_cols = 0; - - reset_template(); - - if (lock_type == F_WRLCK) { - prebuilt->select_lock_type = LOCK_X; - prebuilt->stored_select_lock_type = LOCK_X; - } else if (lock_type == F_RDLCK) { - prebuilt->select_lock_type = LOCK_S; - prebuilt->stored_select_lock_type = LOCK_S; - } else { - ib_logf(IB_LOG_LEVEL_ERROR, - "MySQL is trying to set transactional table lock " - "with corrupted lock type to table %s, lock type " - "%d does not exist.", - table->s->table_name.str, lock_type); - - DBUG_RETURN(HA_ERR_CRASHED); - } - - /* MySQL is setting a new transactional table lock */ - - innobase_register_trx(ht, thd, trx); - - if (THDVAR(thd, table_locks) && thd_in_lock_tables(thd)) { - dberr_t error; - - error = row_lock_table_for_mysql(prebuilt, NULL, 0); - - if (error != DB_SUCCESS) { - DBUG_RETURN( - convert_error_code_to_mysql( - error, prebuilt->table->flags, thd)); - } - - if (thd_test_options( - thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { - - /* Store the current undo_no of the transaction - so that we know where to roll back if we have - to roll back the next SQL statement */ - - trx_mark_sql_stat_end(trx); - } - } - - DBUG_RETURN(0); -} - /************************************************************************//** Here we export InnoDB status variables to MySQL. */ static @@ -14995,7 +18053,6 @@ innodb_show_status( THD* thd, /*!< in: the MySQL query thread of the caller */ stat_print_fn* stat_print) { - trx_t* trx; static const char truncated_msg[] = "... truncated...\n"; const long MAX_STATUS_SIZE = 1048576; ulint trx_list_start = ULINT_UNDEFINED; @@ -15012,17 +18069,19 @@ innodb_show_status( DBUG_RETURN(0); } - trx = check_trx_exists(thd); + trx_t* trx = check_trx_exists(thd); trx_search_latch_release_if_reserved(trx); innobase_srv_conc_force_exit_innodb(trx); + TrxInInnoDB trx_in_innodb(trx); + /* We let the InnoDB Monitor to output at most MAX_STATUS_SIZE bytes of text. */ char* str; - ssize_t flen, usable_len; + ssize_t flen; mutex_enter(&srv_monitor_file_mutex); rewind(srv_monitor_file); @@ -15036,6 +18095,8 @@ innodb_show_status( flen = 0; } + ssize_t usable_len; + if (flen > MAX_STATUS_SIZE) { usable_len = MAX_STATUS_SIZE; srv_truncated_status_writes++; @@ -15046,7 +18107,12 @@ innodb_show_status( /* allocate buffer for the string, and read the contents of the temporary file */ - if (!(str = (char*) my_malloc(usable_len + 1, MYF(0)))) { + /* JAN: TODO: MySQL 5.7 PSI */ + if (!(str = (char*) my_malloc( + usable_len + 1, MYF(0)))) { + /* if (!(str = (char*) my_malloc(PSI_INSTRUMENT_ME, + usable_len + 1, MYF(0)))) { + */ mutex_exit(&srv_monitor_file_mutex); DBUG_RETURN(1); } @@ -15061,188 +18127,364 @@ innodb_show_status( && trx_list_start + (flen - trx_list_end) < MAX_STATUS_SIZE - sizeof truncated_msg - 1) { - /* Omit the beginning of the list of active transactions. */ - ssize_t len = fread(str, 1, trx_list_start, srv_monitor_file); + /* Omit the beginning of the list of active transactions. */ + ssize_t len = fread(str, 1, trx_list_start, srv_monitor_file); + + memcpy(str + len, truncated_msg, sizeof truncated_msg - 1); + len += sizeof truncated_msg - 1; + usable_len = (MAX_STATUS_SIZE - 1) - len; + fseek(srv_monitor_file, + static_cast(flen - usable_len), SEEK_SET); + len += fread(str + len, 1, usable_len, srv_monitor_file); + flen = len; + } else { + /* Omit the end of the output. */ + flen = fread(str, 1, MAX_STATUS_SIZE - 1, srv_monitor_file); + } + + mutex_exit(&srv_monitor_file_mutex); + + ret_val= stat_print( + thd, innobase_hton_name, + static_cast(strlen(innobase_hton_name)), + STRING_WITH_LEN(""), str, static_cast(flen)); + + my_free(str); + + DBUG_RETURN(ret_val); +} + +/** Callback for collecting mutex statistics */ +struct ShowStatus { + + /** For tracking the mutex metrics */ + struct Value { + + /** Constructor + @param[in] name Name of the mutex + @param[in] spins Number of spins + @param[in] os_waits OS waits so far + @param[in] calls Number of calls to enter() */ + Value(const char* name, + ulint spins, + uint64_t waits, + ulint calls) + : + m_name(name), + m_spins(spins), + m_waits(waits), + m_calls(calls) + { + /* No op */ + } + + /** Mutex name */ + std::string m_name; + + /** Spins so far */ + ulint m_spins; + + /** Waits so far */ + ulint m_waits; + + /** Number of calls so far */ + uint64_t m_calls; + }; + + /** Order by m_waits, in descending order. */ + struct OrderByWaits: public std::binary_function + { + /** @return true if rhs < lhs */ + bool operator()( + const Value& lhs, + const Value& rhs) const + UNIV_NOTHROW + { + return(rhs.m_waits < lhs.m_waits); + } + }; + + typedef std::vector > Values; + + /** Collect the individual latch counts */ + struct GetCount { + typedef latch_meta_t::CounterType::Count Count; + + /** Constructor + @param[in] name Latch name + @param[in,out] values Put the values here */ + GetCount( + const char* name, + Values* values) + UNIV_NOTHROW + : + m_name(name), + m_values(values) + { + /* No op */ + } + + /** Collect the latch metrics. Ignore entries where the + spins and waits are zero. + @param[in] count The latch metrics */ + void operator()(Count* count) + UNIV_NOTHROW + { + if (count->m_spins > 0 || count->m_waits > 0) { + + m_values->push_back(Value( + m_name, + count->m_spins, + count->m_waits, + count->m_calls)); + } + } + + /** The latch name */ + const char* m_name; + + /** For collecting the active mutex stats. */ + Values* m_values; + }; + + /** Constructor */ + ShowStatus() { } + + /** Callback for collecting the stats + @param[in] latch_meta Latch meta data + @return always returns true */ + bool operator()(latch_meta_t& latch_meta) + UNIV_NOTHROW + { + latch_meta_t::CounterType* counter; + + counter = latch_meta.get_counter(); + + GetCount get_count(latch_meta.get_name(), &m_values); + + counter->iterate(get_count); + + return(true); + } + + /** Implements the SHOW MUTEX STATUS command, for mutexes. + The table structure is like so: Engine | Mutex Name | Status + We store the metrics in the "Status" column as: + + spins=N,waits=N,calls=N" + + The user has to parse the dataunfortunately + @param[in,out] hton the innodb handlerton + @param[in,out] thd the MySQL query thread of the caller + @param[in,out] stat_print function for printing statistics + @return true on success. */ + bool to_string( + handlerton* hton, + THD* thd, + stat_print_fn* stat_print) + UNIV_NOTHROW; + + /** For collecting the active mutex stats. */ + Values m_values; +}; + +/** Implements the SHOW MUTEX STATUS command, for mutexes. +The table structure is like so: Engine | Mutex Name | Status +We store the metrics in the "Status" column as: + + spins=N,waits=N,calls=N" + +The user has to parse the dataunfortunately +@param[in,out] hton the innodb handlerton +@param[in,out] thd the MySQL query thread of the caller +@param[in,out] stat_print function for printing statistics +@return true on success. */ +bool +ShowStatus::to_string( + handlerton* hton, + THD* thd, + stat_print_fn* stat_print) + UNIV_NOTHROW +{ + uint hton_name_len = (uint) strlen(innobase_hton_name); + + std::sort(m_values.begin(), m_values.end(), OrderByWaits()); + + Values::iterator end = m_values.end(); + + for (Values::iterator it = m_values.begin(); it != end; ++it) { + + int name_len; + char name_buf[IO_SIZE]; + + name_len = ut_snprintf( + name_buf, sizeof(name_buf), "%s", it->m_name.c_str()); + + int status_len; + char status_buf[IO_SIZE]; + + status_len = ut_snprintf( + status_buf, sizeof(status_buf), + "spins=%lu,waits=%lu,calls=" TRX_ID_FMT, + static_cast(it->m_spins), + static_cast(it->m_waits), + it->m_calls); - memcpy(str + len, truncated_msg, sizeof truncated_msg - 1); - len += sizeof truncated_msg - 1; - usable_len = (MAX_STATUS_SIZE - 1) - len; - fseek(srv_monitor_file, - static_cast(flen - usable_len), SEEK_SET); - len += fread(str + len, 1, usable_len, srv_monitor_file); - flen = len; - } else { - /* Omit the end of the output. */ - flen = fread(str, 1, MAX_STATUS_SIZE - 1, srv_monitor_file); + if (stat_print(thd, innobase_hton_name, + hton_name_len, + name_buf, static_cast(name_len), + status_buf, static_cast(status_len))) { + + return(false); + } } - mutex_exit(&srv_monitor_file_mutex); + return(true); +} - ret_val= stat_print( - thd, innobase_hton_name, - static_cast(strlen(innobase_hton_name)), - STRING_WITH_LEN(""), str, static_cast(flen)); +/** Implements the SHOW MUTEX STATUS command, for mutexes. +@param[in,out] hton the innodb handlerton +@param[in,out] thd the MySQL query thread of the caller +@param[in,out] stat_print function for printing statistics +@return 0 on success. */ +static +int +innodb_show_mutex_status( + handlerton* hton, + THD* thd, + stat_print_fn* stat_print) +{ + DBUG_ENTER("innodb_show_mutex_status"); - my_free(str); + ShowStatus collector; - DBUG_RETURN(ret_val); + DBUG_ASSERT(hton == innodb_hton_ptr); + + mutex_monitor->iterate(collector); + + if (!collector.to_string(hton, thd, stat_print)) { + DBUG_RETURN(1); + } + + DBUG_RETURN(0); } -/************************************************************************//** -Implements the SHOW MUTEX STATUS command. +/** Implements the SHOW MUTEX STATUS command. +@param[in,out] hton the innodb handlerton +@param[in,out] thd the MySQL query thread of the caller +@param[in,out] stat_print function for printing statistics @return 0 on success. */ static int -innodb_mutex_show_status( -/*=====================*/ - handlerton* hton, /*!< in: the innodb handlerton */ - THD* thd, /*!< in: the MySQL query thread of the - caller */ - stat_print_fn* stat_print) /*!< in: function for printing - statistics */ -{ - char buf1[IO_SIZE]; - char buf2[IO_SIZE]; - ib_mutex_t* mutex; - rw_lock_t* lock; - ulint block_mutex_oswait_count = 0; - ulint block_lock_oswait_count = 0; - ib_mutex_t* block_mutex = NULL; - rw_lock_t* block_lock = NULL; -#ifdef UNIV_DEBUG - ulint rw_lock_count= 0; - ulint rw_lock_count_spin_loop= 0; - ulint rw_lock_count_spin_rounds= 0; - ulint rw_lock_count_os_wait= 0; - ulint rw_lock_count_os_yield= 0; - ulonglong rw_lock_wait_time= 0; -#endif /* UNIV_DEBUG */ - uint buf1len; - uint buf2len; - uint hton_name_len; +innodb_show_rwlock_status( + handlerton* hton, + THD* thd, + stat_print_fn* stat_print) +{ + DBUG_ENTER("innodb_show_rwlock_status"); - hton_name_len = (uint) strlen(innobase_hton_name); + rw_lock_t* block_rwlock = NULL; + ulint block_rwlock_oswait_count = 0; + uint hton_name_len = (uint) strlen(innobase_hton_name); - DBUG_ENTER("innodb_mutex_show_status"); DBUG_ASSERT(hton == innodb_hton_ptr); - mutex_enter(&mutex_list_mutex); + mutex_enter(&rw_lock_list_mutex); + + for (rw_lock_t* rw_lock = UT_LIST_GET_FIRST(rw_lock_list); + rw_lock != NULL; + rw_lock = UT_LIST_GET_NEXT(list, rw_lock)) { - for (mutex = UT_LIST_GET_FIRST(mutex_list); mutex != NULL; - mutex = UT_LIST_GET_NEXT(list, mutex)) { - if (mutex->count_os_wait == 0) { + if (rw_lock->count_os_wait == 0) { continue; } - if (buf_pool_is_block_mutex(mutex)) { - block_mutex = mutex; - block_mutex_oswait_count += mutex->count_os_wait; + int buf1len; + char buf1[IO_SIZE]; + + if (rw_lock->is_block_lock) { + + block_rwlock = rw_lock; + block_rwlock_oswait_count += rw_lock->count_os_wait; + continue; } - buf1len= (uint) my_snprintf(buf1, sizeof(buf1), "%s:%lu", - innobase_basename(mutex->cfile_name), - (ulong) mutex->cline); - buf2len= (uint) my_snprintf(buf2, sizeof(buf2), "os_waits=%lu", - (ulong) mutex->count_os_wait); + buf1len = ut_snprintf( + buf1, sizeof buf1, "rwlock: %s:%lu", + innobase_basename(rw_lock->cfile_name), + static_cast(rw_lock->cline)); - if (stat_print(thd, innobase_hton_name, - hton_name_len, buf1, buf1len, - buf2, buf2len)) { - mutex_exit(&mutex_list_mutex); - DBUG_RETURN(1); - } - } + int buf2len; + char buf2[IO_SIZE]; - if (block_mutex) { - buf1len = (uint) my_snprintf(buf1, sizeof buf1, - "combined %s:%lu", - innobase_basename( - block_mutex->cfile_name), - (ulong) block_mutex->cline); - buf2len = (uint) my_snprintf(buf2, sizeof buf2, - "os_waits=%lu", - (ulong) block_mutex_oswait_count); + buf2len = ut_snprintf( + buf2, sizeof buf2, "waits=%lu", + static_cast(rw_lock->count_os_wait)); if (stat_print(thd, innobase_hton_name, - hton_name_len, buf1, buf1len, - buf2, buf2len)) { - mutex_exit(&mutex_list_mutex); + hton_name_len, + buf1, static_cast(buf1len), + buf2, static_cast(buf2len))) { + + mutex_exit(&rw_lock_list_mutex); + DBUG_RETURN(1); } } - mutex_exit(&mutex_list_mutex); + if (block_rwlock != NULL) { - mutex_enter(&rw_lock_list_mutex); + int buf1len; + char buf1[IO_SIZE]; - for (lock = UT_LIST_GET_FIRST(rw_lock_list); lock != NULL; - lock = UT_LIST_GET_NEXT(list, lock)) { - if (lock->count_os_wait == 0) { - continue; - } + buf1len = ut_snprintf( + buf1, sizeof buf1, "sum rwlock: %s:%lu", + innobase_basename(block_rwlock->cfile_name), + static_cast(block_rwlock->cline)); - if (buf_pool_is_block_lock(lock)) { - block_lock = lock; - block_lock_oswait_count += lock->count_os_wait; - continue; - } + int buf2len; + char buf2[IO_SIZE]; - buf1len = (uint) my_snprintf( - buf1, sizeof buf1, "%s:%lu", - innobase_basename(lock->cfile_name), - static_cast(lock->cline)); - buf2len = (uint) my_snprintf( - buf2, sizeof buf2, "os_waits=%lu", - static_cast(lock->count_os_wait)); + buf2len = ut_snprintf( + buf2, sizeof buf2, "waits=%lu", + static_cast(block_rwlock_oswait_count)); if (stat_print(thd, innobase_hton_name, - hton_name_len, buf1, buf1len, - buf2, buf2len)) { - mutex_exit(&rw_lock_list_mutex); - DBUG_RETURN(1); - } - } + hton_name_len, + buf1, static_cast(buf1len), + buf2, static_cast(buf2len))) { - if (block_lock) { - buf1len = (uint) my_snprintf(buf1, sizeof buf1, - "combined %s:%lu", - innobase_basename( - block_lock->cfile_name), - (ulong) block_lock->cline); - buf2len = (uint) my_snprintf(buf2, sizeof buf2, - "os_waits=%lu", - (ulong) block_lock_oswait_count); - - if (stat_print(thd, innobase_hton_name, - hton_name_len, buf1, buf1len, - buf2, buf2len)) { mutex_exit(&rw_lock_list_mutex); + DBUG_RETURN(1); } } mutex_exit(&rw_lock_list_mutex); -#ifdef UNIV_DEBUG - buf2len = static_cast(my_snprintf(buf2, sizeof buf2, - "count=%lu, spin_waits=%lu, spin_rounds=%lu, " - "os_waits=%lu, os_yields=%lu, os_wait_times=%lu", - (ulong) rw_lock_count, - (ulong) rw_lock_count_spin_loop, - (ulong) rw_lock_count_spin_rounds, - (ulong) rw_lock_count_os_wait, - (ulong) rw_lock_count_os_yield, - (ulong) (rw_lock_wait_time / 1000))); - - if (stat_print(thd, innobase_hton_name, hton_name_len, - STRING_WITH_LEN("rw_lock_mutexes"), buf2, buf2len)) { - DBUG_RETURN(1); + DBUG_RETURN(0); +} + +/** Implements the SHOW MUTEX STATUS command. +@param[in,out] hton the innodb handlerton +@param[in,out] thd the MySQL query thread of the caller +@param[in,out] stat_print function for printing statistics +@return 0 on success. */ +static +int +innodb_show_latch_status( + handlerton* hton, + THD* thd, + stat_print_fn* stat_print) +{ + int ret = innodb_show_mutex_status(hton, thd, stat_print); + + if (ret != 0) { + return(ret); } -#endif /* UNIV_DEBUG */ - /* Success */ - DBUG_RETURN(0); + return(innodb_show_rwlock_status(hton, thd, stat_print)); } /************************************************************************//** @@ -15266,8 +18508,7 @@ innobase_show_status( return(innodb_show_status(hton, thd, stat_print) != 0); case HA_ENGINE_MUTEX: - /* Non-zero return value means there was an error. */ - return(innodb_mutex_show_status(hton, thd, stat_print) != 0); + return(innodb_show_latch_status(hton, thd, stat_print) != 0); case HA_ENGINE_LOGS: /* Not handled */ @@ -15278,6 +18519,56 @@ innobase_show_status( return(false); } +/** Refresh template for the virtual columns and their base columns if +the share structure exists +@param[in] table MySQL TABLE +@param[in] ib_table InnoDB dict_table_t +@param[in] table_name table_name used to find the share structure */ +void +refresh_share_vtempl( + const TABLE* mysql_table, + const dict_table_t* ib_table, + const char* table_name) +{ + INNOBASE_SHARE* share; + + ulint fold = ut_fold_string(table_name); + + mysql_mutex_lock(&innobase_share_mutex); + + HASH_SEARCH(table_name_hash, innobase_open_tables, fold, + INNOBASE_SHARE*, share, + ut_ad(share->use_count > 0), + !strcmp(share->table_name, table_name)); + + if (share == NULL) { + /* Partition table does not have "share" structure + instantiated, no need to refresh it */ +#ifdef UNIV_DEBUG + #ifdef _WIN32 + char* is_part = strstr(ib_table->name.m_name, "#p#"); + #else + char* is_part = strstr(ib_table->name.m_name, "#P#"); + #endif /* _WIN32 */ + + ut_ad(is_part != NULL); +#endif /* UNIV_DEBUG */ + + mysql_mutex_unlock(&innobase_share_mutex); + return; + } + + free_share_vtemp(share); + + innobase_build_v_templ( + mysql_table, ib_table, &(share->s_templ), NULL, true, + share->table_name); + + mysql_mutex_unlock(&innobase_share_mutex); + + return; +} + /************************************************************************//** Handling the shared INNOBASE_SHARE structure that is needed to provide table locking. Register the table name if it doesn't exist in the hash table. */ @@ -15298,36 +18589,54 @@ get_share( ut_ad(share->use_count > 0), !strcmp(share->table_name, table_name)); - if (!share) { + if (share == NULL) { uint length = (uint) strlen(table_name); /* TODO: invoke HASH_MIGRATE if innobase_open_tables grows too big */ - share = (INNOBASE_SHARE*) my_malloc(sizeof(*share)+length+1, - MYF(MY_FAE | MY_ZEROFILL)); + share = reinterpret_cast( + my_malloc( + sizeof(*share) + length + 1, + MYF(MY_FAE | MY_ZEROFILL))); + /* JAN: TODO: MySQL 5.7 PSI + share = reinterpret_cast( + my_malloc(PSI_INSTRUMENT_ME, + sizeof(*share) + length + 1, + MYF(MY_FAE | MY_ZEROFILL))); + */ - share->table_name = (char*) memcpy(share + 1, - table_name, length + 1); + share->table_name = reinterpret_cast( + memcpy(share + 1, table_name, length + 1)); HASH_INSERT(INNOBASE_SHARE, table_name_hash, innobase_open_tables, fold, share); - thr_lock_init(&share->lock); - /* Index translation table initialization */ share->idx_trans_tbl.index_mapping = NULL; share->idx_trans_tbl.index_count = 0; share->idx_trans_tbl.array_size = 0; + share->s_templ.vtempl = NULL; + share->s_templ.n_col = 0; } - share->use_count++; + ++share->use_count; + mysql_mutex_unlock(&innobase_share_mutex); return(share); } +/** Free a virtual template in INNOBASE_SHARE structure +@param[in,out] share table share holds the template to free */ +void +free_share_vtemp( + INNOBASE_SHARE* share) +{ + free_vc_templ(&share->s_templ); +} + /************************************************************************//** Free the shared object that was registered with get_share(). */ static @@ -15340,7 +18649,7 @@ free_share( #ifdef UNIV_DEBUG INNOBASE_SHARE* share2; - ulint fold = ut_fold_string(share->table_name); + ulint fold = ut_fold_string(share->table_name); HASH_SEARCH(table_name_hash, innobase_open_tables, fold, INNOBASE_SHARE*, share2, @@ -15350,15 +18659,18 @@ free_share( ut_a(share2 == share); #endif /* UNIV_DEBUG */ - if (!--share->use_count) { + --share->use_count; + + if (share->use_count == 0) { ulint fold = ut_fold_string(share->table_name); HASH_DELETE(INNOBASE_SHARE, table_name_hash, innobase_open_tables, fold, share); - thr_lock_delete(&share->lock); /* Free any memory from index translation table */ - my_free(share->idx_trans_tbl.index_mapping); + ut_free(share->idx_trans_tbl.index_mapping); + + free_share_vtemp(share); my_free(share); @@ -15369,36 +18681,56 @@ free_share( mysql_mutex_unlock(&innobase_share_mutex); } +/*********************************************************************//** +Returns number of THR_LOCK locks used for one instance of InnoDB table. +InnoDB no longer relies on THR_LOCK locks so 0 value is returned. +Instead of THR_LOCK locks InnoDB relies on combination of metadata locks +(e.g. for LOCK TABLES and DDL) and its own locking subsystem. +Note that even though this method returns 0, SQL-layer still calls +::store_lock(), ::start_stmt() and ::external_lock() methods for InnoDB +tables. */ + +uint +ha_innobase::lock_count(void) const +/*===============================*/ +{ + return 0; +} + /*****************************************************************//** -Converts a MySQL table lock stored in the 'lock' field of the handle to -a proper type before storing pointer to the lock into an array of pointers. +Supposed to convert a MySQL table lock stored in the 'lock' field of the +handle to a proper type before storing pointer to the lock into an array +of pointers. +In practice, since InnoDB no longer relies on THR_LOCK locks and its +lock_count() method returns 0 it just informs storage engine about type +of THR_LOCK which SQL-layer would have acquired for this specific statement +on this specific table. MySQL also calls this if it wants to reset some table locks to a not-locked state during the processing of an SQL query. An example is that during a SELECT the read lock is released early on the 'const' tables where we only fetch one row. MySQL does not call this when it releases all locks at the end of an SQL statement. -@return pointer to the next element in the 'to' array */ -UNIV_INTERN +@return pointer to the current element in the 'to' array. */ + THR_LOCK_DATA** ha_innobase::store_lock( /*====================*/ THD* thd, /*!< in: user thread handle */ - THR_LOCK_DATA** to, /*!< in: pointer to an array - of pointers to lock structs; - pointer to the 'lock' field - of current handle is stored - next to this array */ - enum thr_lock_type lock_type) /*!< in: lock type to store in + THR_LOCK_DATA** to, /*!< in: pointer to the current + element in an array of pointers + to lock structs; + only used as return value */ + thr_lock_type lock_type) /*!< in: lock type to store in 'lock'; this may also be TL_IGNORE */ { - trx_t* trx; - - /* Note that trx in this function is NOT necessarily prebuilt->trx + /* Note that trx in this function is NOT necessarily m_prebuilt->trx because we call update_thd() later, in ::external_lock()! Failure to understand this caused a serious memory corruption bug in 5.1.11. */ - trx = check_trx_exists(thd); + trx_t* trx = check_trx_exists(thd); + + TrxInInnoDB trx_in_innodb(trx); /* NOTE: MySQL can call this function with lock 'type' TL_IGNORE! Be careful to ignore TL_IGNORE if we are going to do something with @@ -15413,12 +18745,16 @@ ha_innobase::store_lock( (enum_tx_isolation) thd_tx_isolation(thd)); if (trx->isolation_level <= TRX_ISO_READ_COMMITTED - && trx->global_read_view) { + && MVCC::is_view_active(trx->read_view)) { /* At low transaction isolation levels we let each consistent read set its own snapshot */ - read_view_close_for_mysql(trx); + mutex_enter(&trx_sys->mutex); + + trx_sys->mvcc->view_close(trx->read_view, true); + + mutex_exit(&trx_sys->mutex); } } @@ -15427,6 +18763,7 @@ ha_innobase::store_lock( const uint sql_command = thd_sql_command(thd); if (srv_read_only_mode + && !dict_table_is_intrinsic(m_prebuilt->table) && (sql_command == SQLCOM_UPDATE || sql_command == SQLCOM_INSERT || sql_command == SQLCOM_REPLACE @@ -15454,16 +18791,16 @@ ha_innobase::store_lock( detected in the function. */ dberr_t err = row_quiesce_set_state( - prebuilt->table, QUIESCE_START, trx); + m_prebuilt->table, QUIESCE_START, trx); ut_a(err == DB_SUCCESS || err == DB_UNSUPPORTED); if (trx->isolation_level == TRX_ISO_SERIALIZABLE) { - prebuilt->select_lock_type = LOCK_S; - prebuilt->stored_select_lock_type = LOCK_S; + m_prebuilt->select_lock_type = LOCK_S; + m_prebuilt->stored_select_lock_type = LOCK_S; } else { - prebuilt->select_lock_type = LOCK_NONE; - prebuilt->stored_select_lock_type = LOCK_NONE; + m_prebuilt->select_lock_type = LOCK_NONE; + m_prebuilt->stored_select_lock_type = LOCK_NONE; } /* Check for DROP TABLE */ @@ -15471,7 +18808,7 @@ ha_innobase::store_lock( /* MySQL calls this function in DROP TABLE though this table handle may belong to another thd that is running a query. Let - us in that case skip any changes to the prebuilt struct. */ + us in that case skip any changes to the m_prebuilt struct. */ /* Check for LOCK TABLE t1,...,tn WITH SHARED LOCKS */ } else if ((lock_type == TL_READ && in_lock_tables) @@ -15522,11 +18859,11 @@ ha_innobase::store_lock( MODE in select, then we use consistent read for select. */ - prebuilt->select_lock_type = LOCK_NONE; - prebuilt->stored_select_lock_type = LOCK_NONE; + m_prebuilt->select_lock_type = LOCK_NONE; + m_prebuilt->stored_select_lock_type = LOCK_NONE; } else { - prebuilt->select_lock_type = LOCK_S; - prebuilt->stored_select_lock_type = LOCK_S; + m_prebuilt->select_lock_type = LOCK_S; + m_prebuilt->stored_select_lock_type = LOCK_S; } } else if (lock_type != TL_IGNORE) { @@ -15534,86 +18871,24 @@ ha_innobase::store_lock( /* We set possible LOCK_X value in external_lock, not yet here even if this would be SELECT ... FOR UPDATE */ - prebuilt->select_lock_type = LOCK_NONE; - prebuilt->stored_select_lock_type = LOCK_NONE; - } - - if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { - - /* Starting from 5.0.7, we weaken also the table locks - set at the start of a MySQL stored procedure call, just like - we weaken the locks set at the start of an SQL statement. - MySQL does set in_lock_tables TRUE there, but in reality - we do not need table locks to make the execution of a - single transaction stored procedure call deterministic - (if it does not use a consistent read). */ - - if (lock_type == TL_READ - && sql_command == SQLCOM_LOCK_TABLES) { - /* We come here if MySQL is processing LOCK TABLES - ... READ LOCAL. MyISAM under that table lock type - reads the table as it was at the time the lock was - granted (new inserts are allowed, but not seen by the - reader). To get a similar effect on an InnoDB table, - we must use LOCK TABLES ... READ. We convert the lock - type here, so that for InnoDB, READ LOCAL is - equivalent to READ. This will change the InnoDB - behavior in mysqldump, so that dumps of InnoDB tables - are consistent with dumps of MyISAM tables. */ - - lock_type = TL_READ_NO_INSERT; - } - - /* If we are not doing a LOCK TABLE, DISCARD/IMPORT - TABLESPACE or TRUNCATE TABLE then allow multiple - writers. Note that ALTER TABLE uses a TL_WRITE_ALLOW_READ - < TL_WRITE_CONCURRENT_INSERT. - - We especially allow multiple writers if MySQL is at the - start of a stored procedure call (SQLCOM_CALL) or a - stored function call (MySQL does have in_lock_tables - TRUE there). */ - - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT - && lock_type <= TL_WRITE) - && !(in_lock_tables - && sql_command == SQLCOM_LOCK_TABLES) - && !thd_tablespace_op(thd) - && sql_command != SQLCOM_TRUNCATE - && sql_command != SQLCOM_OPTIMIZE - && sql_command != SQLCOM_CREATE_TABLE) { - - lock_type = TL_WRITE_ALLOW_WRITE; - } - - /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ... - MySQL would use the lock TL_READ_NO_INSERT on t2, and that - would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts - to t2. Convert the lock to a normal read lock to allow - concurrent inserts to t2. - - We especially allow concurrent inserts if MySQL is at the - start of a stored procedure call (SQLCOM_CALL) - (MySQL does have thd_in_lock_tables() TRUE there). */ - - if (lock_type == TL_READ_NO_INSERT - && sql_command != SQLCOM_LOCK_TABLES) { - - lock_type = TL_READ; - } - - lock.type = lock_type; + m_prebuilt->select_lock_type = LOCK_NONE; + m_prebuilt->stored_select_lock_type = LOCK_NONE; } - *to++= &lock; - if (!trx_is_started(trx) - && (prebuilt->select_lock_type != LOCK_NONE - || prebuilt->stored_select_lock_type != LOCK_NONE)) { + && (m_prebuilt->select_lock_type != LOCK_NONE + || m_prebuilt->stored_select_lock_type != LOCK_NONE)) { ++trx->will_lock; } +#ifdef UNIV_DEBUG + if(trx->is_dd_trx) { + ut_ad(trx->will_lock == 0 + && m_prebuilt->select_lock_type == LOCK_NONE); + } +#endif /* UNIV_DEBUG */ + return(to); } @@ -15621,8 +18896,8 @@ ha_innobase::store_lock( Read the next autoinc value. Acquire the relevant locks before reading the AUTOINC value. If SUCCESS then the table AUTOINC mutex will be locked on return and all relevant locks acquired. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ + dberr_t ha_innobase::innobase_get_autoinc( /*==============================*/ @@ -15630,28 +18905,28 @@ ha_innobase::innobase_get_autoinc( { *value = 0; - prebuilt->autoinc_error = innobase_lock_autoinc(); + m_prebuilt->autoinc_error = innobase_lock_autoinc(); - if (prebuilt->autoinc_error == DB_SUCCESS) { + if (m_prebuilt->autoinc_error == DB_SUCCESS) { /* Determine the first value of the interval */ - *value = dict_table_autoinc_read(prebuilt->table); + *value = dict_table_autoinc_read(m_prebuilt->table); /* It should have been initialized during open. */ if (*value == 0) { - prebuilt->autoinc_error = DB_UNSUPPORTED; - dict_table_autoinc_unlock(prebuilt->table); + m_prebuilt->autoinc_error = DB_UNSUPPORTED; + dict_table_autoinc_unlock(m_prebuilt->table); } } - return(prebuilt->autoinc_error); + return(m_prebuilt->autoinc_error); } /*******************************************************************//** This function reads the global auto-inc counter. It doesn't use the AUTOINC lock even if the lock mode is set to TRADITIONAL. -@return the autoinc value */ -UNIV_INTERN +@return the autoinc value */ + ulonglong ha_innobase::innobase_peek_autoinc(void) /*====================================*/ @@ -15659,19 +18934,18 @@ ha_innobase::innobase_peek_autoinc(void) ulonglong auto_inc; dict_table_t* innodb_table; - ut_a(prebuilt != NULL); - ut_a(prebuilt->table != NULL); + ut_a(m_prebuilt != NULL); + ut_a(m_prebuilt->table != NULL); - innodb_table = prebuilt->table; + innodb_table = m_prebuilt->table; dict_table_autoinc_lock(innodb_table); auto_inc = dict_table_autoinc_read(innodb_table); if (auto_inc == 0) { - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: AUTOINC next value generation " - "is disabled for '%s'\n", innodb_table->name); + ib::info() << "AUTOINC next value generation is disabled for" + " '" << innodb_table->name << "'"; } dict_table_autoinc_unlock(innodb_table); @@ -15681,7 +18955,7 @@ ha_innobase::innobase_peek_autoinc(void) /*********************************************************************//** Returns the value of the auto-inc counter in *first_value and ~0 on failure. */ -UNIV_INTERN + void ha_innobase::get_auto_increment( /*============================*/ @@ -15698,7 +18972,7 @@ ha_innobase::get_auto_increment( dberr_t error; ulonglong autoinc = 0; - /* Prepare prebuilt->trx in the table handle */ + /* Prepare m_prebuilt->trx in the table handle */ update_thd(ha_thd()); error = innobase_get_autoinc(&autoinc); @@ -15716,7 +18990,9 @@ ha_innobase::get_auto_increment( called and count down from that as rows are written (see write_row()). */ - trx = prebuilt->trx; + trx = m_prebuilt->trx; + + TrxInInnoDB trx_in_innodb(trx); /* Note: We can't rely on *first_value since some MySQL engines, in particular the partition engine, don't initialize it to 0 when @@ -15725,8 +19001,10 @@ ha_innobase::get_auto_increment( /* We need the upper limit of the col type to check for whether we update the table autoinc counter or not. */ - ulonglong col_max_value = innobase_get_int_col_max_value( - table->next_number_field); + ulonglong col_max_value = innobase_get_int_col_max_value(table->next_number_field); + /* JAN: TODO: MySQL 5.7 + table->next_number_field->get_max_int_value(); + */ /* Called for the first time ? */ if (trx->n_autoinc_rows == 0) { @@ -15742,19 +19020,19 @@ ha_innobase::get_auto_increment( set_if_bigger(*first_value, autoinc); /* Not in the middle of a mult-row INSERT. */ - } else if (prebuilt->autoinc_last_value == 0) { + } else if (m_prebuilt->autoinc_last_value == 0) { set_if_bigger(*first_value, autoinc); } - if (*first_value > col_max_value) - { + if (*first_value > col_max_value) { /* Out of range number. Let handler::update_auto_increment() take care of this */ - prebuilt->autoinc_last_value = 0; - dict_table_autoinc_unlock(prebuilt->table); + m_prebuilt->autoinc_last_value = 0; + dict_table_autoinc_unlock(m_prebuilt->table); *nb_reserved_values= 0; return; } + *nb_reserved_values = trx->n_autoinc_rows; /* With old style AUTOINC locking we only update the table's @@ -15765,23 +19043,24 @@ ha_innobase::get_auto_increment( current = *first_value; - if (prebuilt->autoinc_increment != increment) { + if (m_prebuilt->autoinc_increment != increment) { WSREP_DEBUG("autoinc decrease: %llu -> %llu\n" "THD: %ld, current: %llu, autoinc: %llu", - prebuilt->autoinc_increment, + m_prebuilt->autoinc_increment, increment, thd_get_thread_id(ha_thd()), current, autoinc); - if (!wsrep_on(ha_thd())) - { - current = autoinc - prebuilt->autoinc_increment; + + if (!wsrep_on(ha_thd())) { + current = autoinc - m_prebuilt->autoinc_increment; } current = innobase_next_autoinc( current, 1, increment, offset, col_max_value); - dict_table_autoinc_initialize(prebuilt->table, current); + dict_table_autoinc_initialize( + m_prebuilt->table, current); *first_value = current; } @@ -15791,69 +19070,35 @@ ha_innobase::get_auto_increment( current, *nb_reserved_values, increment, offset, col_max_value); - prebuilt->autoinc_last_value = next_value; + m_prebuilt->autoinc_last_value = next_value; - if (prebuilt->autoinc_last_value < *first_value) { + if (m_prebuilt->autoinc_last_value < *first_value) { *first_value = (~(ulonglong) 0); } else { /* Update the table autoinc variable */ dict_table_autoinc_update_if_greater( - prebuilt->table, prebuilt->autoinc_last_value); + m_prebuilt->table, + m_prebuilt->autoinc_last_value); } } else { /* This will force write_row() into attempting an update of the table's AUTOINC counter. */ - prebuilt->autoinc_last_value = 0; + m_prebuilt->autoinc_last_value = 0; } /* The increment to be used to increase the AUTOINC value, we use this in write_row() and update_row() to increase the autoinc counter for columns that are filled by the user. We need the offset and the increment. */ - prebuilt->autoinc_offset = offset; - prebuilt->autoinc_increment = increment; - - dict_table_autoinc_unlock(prebuilt->table); -} - -/*******************************************************************//** -Reset the auto-increment counter to the given value, i.e. the next row -inserted will get the given value. This is called e.g. after TRUNCATE -is emulated by doing a 'DELETE FROM t'. HA_ERR_WRONG_COMMAND is -returned by storage engines that don't support this operation. -@return 0 or error code */ -UNIV_INTERN -int -ha_innobase::reset_auto_increment( -/*==============================*/ - ulonglong value) /*!< in: new value for table autoinc */ -{ - DBUG_ENTER("ha_innobase::reset_auto_increment"); - - dberr_t error; - - update_thd(ha_thd()); - - error = row_lock_table_autoinc_for_mysql(prebuilt); - - if (error != DB_SUCCESS) { - DBUG_RETURN(convert_error_code_to_mysql( - error, prebuilt->table->flags, user_thd)); - } - - /* The next value can never be 0. */ - if (value == 0) { - value = 1; - } - - innobase_reset_autoinc(value); + m_prebuilt->autoinc_offset = offset; + m_prebuilt->autoinc_increment = increment; - DBUG_RETURN(0); + dict_table_autoinc_unlock(m_prebuilt->table); } /*******************************************************************//** See comment in handler.cc */ -UNIV_INTERN + bool ha_innobase::get_error_message( /*===========================*/ @@ -15873,24 +19118,21 @@ ha_innobase::get_error_message( return(FALSE); } -/*******************************************************************//** - Retrieves the names of the table and the key for which there was a - duplicate entry in the case of HA_ERR_FOREIGN_DUPLICATE_KEY. - - If any of the names is not available, then this method will return - false and will not change any of child_table_name or child_key_name. - - @param child_table_name[out] Table name - @param child_table_name_len[in] Table name buffer size - @param child_key_name[out] Key name - @param child_key_name_len[in] Key name buffer size - - @retval true table and key names were available - and were written into the corresponding - out parameters. - @retval false table and key names were not available, - the out parameters were not touched. -*/ +/** Retrieves the names of the table and the key for which there was a +duplicate entry in the case of HA_ERR_FOREIGN_DUPLICATE_KEY. + +If any of the names is not available, then this method will return +false and will not change any of child_table_name or child_key_name. + +@param[out] child_table_name Table name +@param[in] child_table_name_len Table name buffer size +@param[out] child_key_name Key name +@param[in] child_key_name_len Key name buffer size + +@retval true table and key names were available and were written into the +corresponding out parameters. +@retval false table and key names were not available, the out parameters +were not touched. */ bool ha_innobase::get_foreign_dup_key( /*=============================*/ @@ -15901,10 +19143,10 @@ ha_innobase::get_foreign_dup_key( { const dict_index_t* err_index; - ut_a(prebuilt->trx != NULL); - ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); + ut_a(m_prebuilt->trx != NULL); + ut_a(m_prebuilt->trx->magic_n == TRX_MAGIC_N); - err_index = trx_get_error_info(prebuilt->trx); + err_index = trx_get_error_info(m_prebuilt->trx); if (err_index == NULL) { return(false); @@ -15913,20 +19155,24 @@ ha_innobase::get_foreign_dup_key( /* copy table name (and convert from filename-safe encoding to system_charset_info) */ - char* p; - p = strchr(err_index->table->name, '/'); + char* p = strchr(err_index->table->name.m_name, '/'); + /* strip ".../" prefix if any */ if (p != NULL) { p++; } else { - p = err_index->table->name; + p = err_index->table->name.m_name; } - uint len; + + size_t len; + len = filename_to_tablename(p, child_table_name, child_table_name_len); + child_table_name[len] = '\0'; /* copy index name */ - ut_snprintf(child_key_name, child_key_name_len, "%s", err_index->name); + ut_snprintf(child_key_name, child_key_name_len, "%s", + err_index->name()); return(true); } @@ -15935,8 +19181,8 @@ ha_innobase::get_foreign_dup_key( Compares two 'refs'. A 'ref' is the (internal) primary key value of the row. If there is no explicitly declared non-null unique key or a primary key, then InnoDB internally uses the row id as the primary key. -@return < 0 if ref1 < ref2, 0 if equal, else > 0 */ -UNIV_INTERN +@return < 0 if ref1 < ref2, 0 if equal, else > 0 */ + int ha_innobase::cmp_ref( /*=================*/ @@ -15953,7 +19199,7 @@ ha_innobase::cmp_ref( uint len2; int result; - if (prebuilt->clust_index_was_generated) { + if (m_prebuilt->clust_index_was_generated) { /* The 'ref' is an InnoDB row id */ return(memcmp(ref1, ref2, DATA_ROW_ID_LEN)); @@ -15965,7 +19211,7 @@ ha_innobase::cmp_ref( key_part = table->key_info[table->s->primary_key].key_part; key_part_end = key_part - + table->key_info[table->s->primary_key].user_defined_key_parts; + + table->key_info[table->s->primary_key].user_defined_key_parts; for (; key_part != key_part_end; ++key_part) { field = key_part->field; @@ -16002,8 +19248,8 @@ ha_innobase::cmp_ref( /*******************************************************************//** Ask InnoDB if a query to a table can be cached. -@return TRUE if query caching of the table is permitted */ -UNIV_INTERN +@return TRUE if query caching of the table is permitted */ + my_bool ha_innobase::register_query_cache_table( /*====================================*/ @@ -16018,34 +19264,14 @@ ha_innobase::register_query_cache_table( is permitted */ ulonglong *engine_data) /*!< in/out: data to call_back */ { - *call_back = innobase_query_caching_of_table_permitted; *engine_data = 0; - return(innobase_query_caching_of_table_permitted(thd, table_key, - key_length, - engine_data)); -} - -/*******************************************************************//** -Get the bin log name. */ -UNIV_INTERN -const char* -ha_innobase::get_mysql_bin_log_name() -/*=================================*/ -{ - return(trx_sys_mysql_bin_log_name); -} -/*******************************************************************//** -Get the bin log offset (or file position). */ -UNIV_INTERN -ulonglong -ha_innobase::get_mysql_bin_log_pos() -/*================================*/ -{ - /* trx... is ib_int64_t, which is a typedef for a 64-bit integer - (__int64 or longlong) so it's ok to cast it to ulonglong. */ + *call_back = innobase_query_caching_of_table_permitted; - return(trx_sys_mysql_bin_log_pos); + return(innobase_query_caching_of_table_permitted( + thd, table_key, + static_cast(key_length), + engine_data)); } /******************************************************************//** @@ -16053,8 +19279,7 @@ This function is used to find the storage length in bytes of the first n characters for prefix indexes using a multibyte character set. The function finds charset information and returns length of prefix_len characters in the index field in bytes. -@return number of bytes occupied by the first n characters */ -UNIV_INTERN +@return number of bytes occupied by the first n characters */ ulint innobase_get_at_most_n_mbchars( /*===========================*/ @@ -16107,12 +19332,13 @@ innobase_get_at_most_n_mbchars( if (char_length > data_len) { char_length = data_len; } + } else if (data_len < prefix_len) { + + char_length = data_len; + } else { - if (data_len < prefix_len) { - char_length = data_len; - } else { - char_length = prefix_len; - } + + char_length = prefix_len; } return(char_length); @@ -16120,7 +19346,7 @@ innobase_get_at_most_n_mbchars( /*******************************************************************//** This function is used to prepare an X/Open XA distributed transaction. -@return 0 or error number */ +@return 0 or error number */ static int innobase_xa_prepare( @@ -16133,7 +19359,6 @@ innobase_xa_prepare( false - the current SQL statement ended */ { - int error = 0; trx_t* trx = check_trx_exists(thd); DBUG_ASSERT(hton == innodb_hton_ptr); @@ -16142,14 +19367,10 @@ innobase_xa_prepare( time, not the current session variable value. Any possible changes to the session variable take effect only in the next transaction */ if (!trx->support_xa) { - -#ifdef WITH_WSREP - thd_get_xid(thd, (MYSQL_XID*) &trx->xid); -#endif // WITH_WSREP return(0); } - thd_get_xid(thd, (MYSQL_XID*) &trx->xid); + thd_get_xid(thd, (MYSQL_XID*) trx->xid); /* Release a possible FIFO ticket and search latch. Since we will reserve the trx_sys->mutex, we have to release the search system @@ -16159,10 +19380,17 @@ innobase_xa_prepare( innobase_srv_conc_force_exit_innodb(trx); + TrxInInnoDB trx_in_innodb(trx); + + if (trx_in_innodb.is_aborted()) { + + return(innobase_rollback(hton, thd, prepare_trx)); + } + if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { - sql_print_error("Transaction not registered for MySQL 2PC, " - "but transaction is active"); + sql_print_error("Transaction not registered for MySQL 2PC," + " but transaction is active"); } if (prepare_trx @@ -16173,9 +19401,14 @@ innobase_xa_prepare( ut_ad(trx_is_registered_for_2pc(trx)); - trx_prepare_for_mysql(trx); + dberr_t err = trx_prepare_for_mysql(trx); + + ut_ad(err == DB_SUCCESS || err == DB_FORCED_ABORT); + + if (err == DB_FORCED_ABORT) { + return(innobase_rollback(hton, thd, prepare_trx)); + } - error = 0; } else { /* We just mark the SQL statement ended and do not do a transaction prepare */ @@ -16192,12 +19425,12 @@ innobase_xa_prepare( trx_mark_sql_stat_end(trx); } - return(error); + return(0); } /*******************************************************************//** This function is used to recover X/Open XA distributed transactions. -@return number of prepared transactions stored in xid_list */ +@return number of prepared transactions stored in xid_list */ static int innobase_xa_recover( @@ -16219,7 +19452,7 @@ innobase_xa_recover( /*******************************************************************//** This function is used to commit one X/Open XA distributed transaction which is in the prepared state -@return 0 or error number */ +@return 0 or error number */ static int innobase_commit_by_xid( @@ -16227,15 +19460,20 @@ innobase_commit_by_xid( handlerton* hton, XID* xid) /*!< in: X/Open XA transaction identification */ { - trx_t* trx; - DBUG_ASSERT(hton == innodb_hton_ptr); - trx = trx_get_trx_by_xid(xid); + trx_t* trx = trx_get_trx_by_xid(xid); + + if (trx != NULL) { + TrxInInnoDB trx_in_innodb(trx); - if (trx) { innobase_commit_low(trx); + ut_ad(trx->mysql_thd == NULL); + /* use cases are: disconnected xa, slave xa, recovery */ + trx_deregister_from_2pc(trx); + ut_ad(!trx->will_lock); /* trx cache requirement */ trx_free_for_background(trx); + return(XA_OK); } else { return(XAER_NOTA); @@ -16245,7 +19483,7 @@ innobase_commit_by_xid( /*******************************************************************//** This function is used to rollback one X/Open XA distributed transaction which is in the prepared state -@return 0 or error number */ +@return 0 or error number */ static int innobase_rollback_by_xid( @@ -16254,15 +19492,19 @@ innobase_rollback_by_xid( XID* xid) /*!< in: X/Open XA transaction identification */ { - trx_t* trx; - DBUG_ASSERT(hton == innodb_hton_ptr); - trx = trx_get_trx_by_xid(xid); + trx_t* trx = trx_get_trx_by_xid(xid); + + if (trx != NULL) { + TrxInInnoDB trx_in_innodb(trx); - if (trx) { int ret = innobase_rollback_trx(trx); + + trx_deregister_from_2pc(trx); + ut_ad(!trx->will_lock); trx_free_for_background(trx); + return(ret); } else { return(XAER_NOTA); @@ -16284,7 +19526,9 @@ innobase_create_cursor_view( { DBUG_ASSERT(hton == innodb_hton_ptr); - return(read_cursor_view_create_for_mysql(check_trx_exists(thd))); + return NULL; + // JAN: TODO: MySQL 5.7 Needed ? + // return(read_cursor_view_create_for_mysql(check_trx_exists(thd))); } /*******************************************************************//** @@ -16301,8 +19545,10 @@ innobase_close_cursor_view( { DBUG_ASSERT(hton == innodb_hton_ptr); + /* JAN: TODO: MySQL 5.7 Needed read_cursor_view_close_for_mysql(check_trx_exists(thd), (cursor_view_t*) curview); + */ } /*******************************************************************//** @@ -16320,13 +19566,15 @@ innobase_set_cursor_view( { DBUG_ASSERT(hton == innodb_hton_ptr); + /* JAN: TODO: MySQL 5.7 Needed ? read_cursor_set_for_mysql(check_trx_exists(thd), (cursor_view_t*) curview); + */ } /*******************************************************************//** */ -UNIV_INTERN + bool ha_innobase::check_if_incompatible_data( /*====================================*/ @@ -16339,7 +19587,7 @@ ha_innobase::check_if_incompatible_data( param_new = info->option_struct; param_old = table->s->option_struct; - innobase_copy_frm_flags_from_create_info(prebuilt->table, info); + innobase_copy_frm_flags_from_create_info(m_prebuilt->table, info); if (table_changes != IS_EQUAL_YES) { @@ -16347,8 +19595,8 @@ ha_innobase::check_if_incompatible_data( } /* Check that auto_increment value was not changed */ - if ((info->used_fields & HA_CREATE_USED_AUTO) && - info->auto_increment_value != 0) { + if ((info->used_fields & HA_CREATE_USED_AUTO) + && info->auto_increment_value != 0) { return(COMPATIBLE_DATA_NO); } @@ -16391,6 +19639,7 @@ innodb_io_capacity_max_update( from check function */ { ulong in_val = *static_cast(save); + if (in_val < srv_io_capacity) { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, @@ -16520,7 +19769,7 @@ ha_innobase::set_partition_owner_stats(ha_statistics *stats) /************************************************************//** Validate the file format name and return its corresponding id. -@return valid file format id */ +@return valid file format id */ static uint innobase_file_format_name_lookup( @@ -16587,7 +19836,7 @@ innobase_file_format_validate_and_set( /*************************************************************//** Check if it is a valid file format. This function is registered as a callback with MySQL. -@return 0 for valid file format */ +@return 0 for valid file format */ static int innodb_file_format_name_validate( @@ -16649,6 +19898,10 @@ innodb_file_format_name_update( ut_a(var_ptr != NULL); ut_a(save != NULL); + + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_WRONG_COMMAND, deprecated_file_format); + format_name = *static_cast(save); if (format_name) { @@ -16668,7 +19921,7 @@ innodb_file_format_name_update( /*************************************************************//** Check if valid argument to innodb_file_format_max. This function is registered as a callback with MySQL. -@return 0 for valid file format */ +@return 0 for valid file format */ static int innodb_file_format_max_validate( @@ -16708,9 +19961,9 @@ innodb_file_format_max_validate( push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, - "InnoDB: invalid innodb_file_format_max " - "value; can be any format up to %s " - "or equivalent id of %d", + "InnoDB: invalid innodb_file_format_max" + " value; can be any format up to %s" + " or equivalent id of %d", trx_sys_file_format_id_to_name(UNIV_FORMAT_MAX), UNIV_FORMAT_MAX); } @@ -16742,6 +19995,10 @@ innodb_file_format_max_update( ut_a(save != NULL); ut_a(var_ptr != NULL); + + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_WRONG_COMMAND, deprecated_file_format_max); + format_name_in = *static_cast(save); if (!format_name_in) { @@ -16764,13 +20021,30 @@ innodb_file_format_max_update( /* Update the max format id in the system tablespace. */ if (trx_sys_file_format_max_set(format_id, format_name_out)) { - ut_print_timestamp(stderr); - fprintf(stderr, - " [Info] InnoDB: the file format in the system " - "tablespace is now set to %s.\n", *format_name_out); + ib::info() << "The file format in the system tablespace is now" + " set to " << *format_name_out << "."; } } +/** Update innodb_large_prefix. +@param[in,out] thd MySQL client connection +@param[out] var_ptr current value +@param[in] save to-be-assigned value */ +static +void +innodb_large_prefix_update( + THD* thd, + st_mysql_sys_var*, + void* var_ptr, + const void* save) +{ + + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_WRONG_COMMAND, deprecated_large_prefix); + + *static_cast(var_ptr) = *static_cast(save); +} + /*************************************************************//** Check whether valid argument given to innobase_*_stopword_table. This function is registered as a callback with MySQL. @@ -16814,6 +20088,125 @@ innodb_stopword_table_validate( return(ret); } +/** Update the system variable innodb_buffer_pool_size using the "saved" +value. This function is registered as a callback with MySQL. +@param[in] thd thread handle +@param[in] var pointer to system variable +@param[out] var_ptr where the formal string goes +@param[in] save immediate result from check function */ +static +void +innodb_buffer_pool_size_update( + THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ + longlong in_val = *static_cast(save); + + ut_snprintf(export_vars.innodb_buffer_pool_resize_status, + sizeof(export_vars.innodb_buffer_pool_resize_status), + "Requested to resize buffer pool."); + + os_event_set(srv_buf_resize_event); + + ib::info() << export_vars.innodb_buffer_pool_resize_status + << " (new size: " << in_val << " bytes)"; +} + +/** Validate the requested buffer pool size. Also, reserve the necessary +memory needed for buffer pool resize. +@param[in] thd thread handle +@param[in] var pointer to system variable +@param[out] save immediate result for update function +@param[in] value incoming string +@return 0 on success, 1 on failure. +*/ +static +int +innodb_buffer_pool_size_validate( + THD* thd, + struct st_mysql_sys_var* var, + void* save, + struct st_mysql_value* value) +{ + longlong intbuf; + + + value->val_int(value, &intbuf); + + if (!srv_was_started) { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "Cannot update innodb_buffer_pool_size," + " because InnoDB is not started."); + return(1); + } + +#ifdef UNIV_DEBUG + if (buf_disable_resize_buffer_pool_debug == TRUE) { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "Cannot update innodb_buffer_pool_size," + " because innodb_disable_resize_buffer_pool_debug" + " is set."); + ib::warn() << "Cannot update innodb_buffer_pool_size," + " because innodb_disable_resize_buffer_pool_debug" + " is set."; + return(1); + } +#endif /* UNIV_DEBUG */ + + + buf_pool_mutex_enter_all(); + + if (srv_buf_pool_old_size != srv_buf_pool_size) { + buf_pool_mutex_exit_all(); + // JAN: TODO: MySQL 5.7 New error + // my_error(ER_BUFPOOL_RESIZE_INPROGRESS, MYF(0)); + my_error(ER_WRONG_ARGUMENTS, MYF(0)); + return(1); + } + + if (srv_buf_pool_instances > 1 && intbuf < BUF_POOL_SIZE_THRESHOLD) { + buf_pool_mutex_exit_all(); + + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "Cannot update innodb_buffer_pool_size" + " to less than 1GB if" + " innodb_buffer_pool_instances > 1."); + return(1); + } + + ulint requested_buf_pool_size + = buf_pool_size_align(static_cast(intbuf)); + + *static_cast(save) = requested_buf_pool_size; + + if (srv_buf_pool_size == requested_buf_pool_size) { + buf_pool_mutex_exit_all(); + /* nothing to do */ + return(0); + } + + srv_buf_pool_size = requested_buf_pool_size; + buf_pool_mutex_exit_all(); + + if (intbuf != static_cast(requested_buf_pool_size)) { + char buf[64]; + int len = 64; + value->val_str(value, buf, &len); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + "innodb_buffer_pool_size", + // mysql_sysvar_buffer_pool_size.name, + value->val_str(value, buf, &len)); + } + + return(0); +} + /*************************************************************//** Check whether valid argument given to "innodb_fts_internal_tbl_name" This function is registered as a callback with MySQL. @@ -16866,44 +20259,6 @@ innodb_internal_table_validate( return(ret); } -/****************************************************************//** -Update global variable "fts_internal_tbl_name" with the "saved" -stopword table name value. This function is registered as a callback -with MySQL. */ -static -void -innodb_internal_table_update( -/*=========================*/ - THD* thd, /*!< in: thread handle */ - struct st_mysql_sys_var* var, /*!< in: pointer to - system variable */ - void* var_ptr,/*!< out: where the - formal string goes */ - const void* save) /*!< in: immediate result - from check function */ -{ - const char* table_name; - char* old; - - ut_a(save != NULL); - ut_a(var_ptr != NULL); - - table_name = *static_cast(save); - old = *(char**) var_ptr; - - if (table_name) { - *(char**) var_ptr = my_strdup(table_name, MYF(0)); - } else { - *(char**) var_ptr = NULL; - } - - if (old) { - my_free(old); - } - - fts_internal_tbl_name = *(char**) var_ptr; -} - /****************************************************************//** Update the system variable innodb_adaptive_hash_index using the "saved" value. This function is registered as a callback with MySQL. */ @@ -16922,7 +20277,7 @@ innodb_adaptive_hash_index_update( if (*(my_bool*) save) { btr_search_enable(); } else { - btr_search_disable(); + btr_search_disable(true); } } @@ -16985,9 +20340,9 @@ innodb_change_buffer_max_size_update( const void* save) /*!< in: immediate result from check function */ { - innobase_change_buffer_max_size = + srv_change_buffer_max_size = (*static_cast(save)); - ibuf_max_size_update(innobase_change_buffer_max_size); + ibuf_max_size_update(srv_change_buffer_max_size); } #ifdef UNIV_DEBUG @@ -17010,9 +20365,8 @@ innodb_save_page_no( { srv_saved_page_number_debug = *static_cast(save); - ib_logf(IB_LOG_LEVEL_INFO, - "Saving InnoDB page number: %lu", - srv_saved_page_number_debug); + ib::info() << "Saving InnoDB page number: " + << srv_saved_page_number_debug; } /****************************************************************//** @@ -17029,33 +20383,40 @@ innodb_make_page_dirty( const void* save) /*!< in: immediate result from check function */ { - mtr_t mtr; - ulong space_id = *static_cast(save); + mtr_t mtr; + ulong space_id = *static_cast(save); + fil_space_t* space = fil_space_acquire_silent(space_id); + + if (space == NULL) { + return; + } - mtr_start(&mtr); + mtr.start(); + mtr.set_named_space(space); - buf_block_t* block = buf_page_get( - space_id, 0, srv_saved_page_number_debug, RW_X_LATCH, &mtr); + buf_block_t* block = buf_page_get( + page_id_t(space_id, srv_saved_page_number_debug), + page_size_t(space->flags), RW_X_LATCH, &mtr); + + if (block != NULL) { + byte* page = block->frame; + + ib::info() << "Dirtying page: " << page_id_t( + page_get_space_id(page), page_get_page_no(page)); - if (block) { - byte* page = block->frame; - ib_logf(IB_LOG_LEVEL_INFO, - "Dirtying page:%lu of space:%lu", - page_get_page_no(page), - page_get_space_id(page)); mlog_write_ulint(page + FIL_PAGE_TYPE, fil_page_get_type(page), MLOG_2BYTES, &mtr); } - mtr_commit(&mtr); + mtr.commit(); + fil_space_release(space); } #endif // UNIV_DEBUG - /*************************************************************//** Find the corresponding ibuf_use_t value that indexes into innobase_change_buffering_values[] array for the input change buffering option name. -@return corresponding IBUF_USE_* value for the input variable +@return corresponding IBUF_USE_* value for the input variable name, or IBUF_USE_COUNT if not able to find a match */ static ibuf_use_t @@ -17064,14 +20425,14 @@ innodb_find_change_buffering_value( const char* input_name) /*!< in: input change buffering option name */ { - ulint use; + for (ulint i = 0; + i < UT_ARR_SIZE(innobase_change_buffering_values); + ++i) { - for (use = 0; use < UT_ARR_SIZE(innobase_change_buffering_values); - use++) { /* found a match */ if (!innobase_strcasecmp( - input_name, innobase_change_buffering_values[use])) { - return((ibuf_use_t) use); + input_name, innobase_change_buffering_values[i])) { + return(static_cast(i)); } } @@ -17082,7 +20443,7 @@ innodb_find_change_buffering_value( /*************************************************************//** Check if it is a valid value of innodb_change_buffering. This function is registered as a callback with MySQL. -@return 0 for valid innodb_change_buffering */ +@return 0 for valid innodb_change_buffering */ static int innodb_change_buffering_validate( @@ -17155,7 +20516,7 @@ innodb_change_buffering_update( /*************************************************************//** Just emit a warning that the usage of the variable is deprecated. -@return 0 */ +@return 0 */ static void innodb_stats_sample_pages_update( @@ -17168,19 +20529,16 @@ innodb_stats_sample_pages_update( const void* save) /*!< in: immediate result from check function */ { -#define STATS_SAMPLE_PAGES_DEPRECATED_MSG \ - "Using innodb_stats_sample_pages is deprecated and " \ - "the variable may be removed in future releases. " \ - "Please use innodb_stats_transient_sample_pages " \ - "instead." + + const char* STATS_SAMPLE_PAGES_DEPRECATED_MSG = + "Using innodb_stats_sample_pages is deprecated and" + " the variable may be removed in future releases." + " Please use innodb_stats_transient_sample_pages instead."; push_warning(thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_WRONG_COMMAND, STATS_SAMPLE_PAGES_DEPRECATED_MSG); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: %s\n", - STATS_SAMPLE_PAGES_DEPRECATED_MSG); + ib::warn() << STATS_SAMPLE_PAGES_DEPRECATED_MSG; srv_stats_transient_sample_pages = *static_cast(save); @@ -17215,11 +20573,15 @@ innodb_monitor_set_option( exisitng monitor counter (status variable), make special processing to remember existing counter value. */ - if (monitor_info->monitor_type - & MONITOR_EXISTING) { + if (monitor_info->monitor_type & MONITOR_EXISTING) { srv_mon_process_existing_counter( monitor_id, MONITOR_TURN_ON); } + + if (MONITOR_IS_ON(MONITOR_LATCHES)) { + + mutex_monitor->enable(); + } break; case MONITOR_TURN_OFF: @@ -17230,14 +20592,25 @@ innodb_monitor_set_option( MONITOR_OFF(monitor_id); MONITOR_SET_OFF(monitor_id); + + if (!MONITOR_IS_ON(MONITOR_LATCHES)) { + + mutex_monitor->disable(); + } break; case MONITOR_RESET_VALUE: srv_mon_reset(monitor_id); + + if (monitor_id == (MONITOR_LATCHES)) { + + mutex_monitor->reset(); + } break; case MONITOR_RESET_ALL_VALUE: srv_mon_reset_all(monitor_id); + mutex_monitor->reset(); break; default: @@ -17310,7 +20683,7 @@ innodb_monitor_update_wildcard( /*************************************************************//** Given a configuration variable name, find corresponding monitor counter and return its monitor ID if found. -@return monitor ID if found, MONITOR_NO_MATCH if there is no match */ +@return monitor ID if found, MONITOR_NO_MATCH if there is no match */ static ulint innodb_monitor_id_by_name_get( @@ -17341,7 +20714,7 @@ innodb_monitor_id_by_name_get( /*************************************************************//** Validate that the passed in monitor name matches at least one monitor counter name with wildcard compare. -@return TRUE if at least one monitor name matches */ +@return TRUE if at least one monitor name matches */ static ibool innodb_monitor_validate_wildcard_name( @@ -17360,7 +20733,7 @@ innodb_monitor_validate_wildcard_name( /*************************************************************//** Validate the passed in monitor name, find and save the corresponding monitor name in the function parameter "save". -@return 0 if monitor name is valid */ +@return 0 if monitor name is valid */ static int innodb_monitor_valid_byname( @@ -17423,7 +20796,7 @@ innodb_monitor_valid_byname( /*************************************************************//** Validate passed-in "value" is a valid monitor counter name. This function is registered as a callback with MySQL. -@return 0 for valid name */ +@return 0 for valid name */ static int innodb_monitor_validate( @@ -17451,7 +20824,12 @@ innodb_monitor_validate( by InnoDB, so we can access it in another callback function innodb_monitor_update() and free it appropriately */ if (name) { - monitor_name = my_strdup(name, MYF(0)); + /* JAN: TODO: MySQL 5.7 PSI + monitor_name = my_strdup(PSI_INSTRUMENT_ME, + name, MYF(0)); + */ + monitor_name = my_strdup( + name, MYF(0)); } else { return(1); } @@ -17519,14 +20897,14 @@ innodb_monitor_update( push_warning_printf( thd, Sql_condition::WARN_LEVEL_WARN, ER_NO_DEFAULT, - "Default value is not defined for " - "this set option. Please specify " - "correct counter or module name."); + "Default value is not defined for" + " this set option. Please specify" + " correct counter or module name."); } else { sql_print_error( - "Default value is not defined for " - "this set option. Please specify " - "correct counter or module name.\n"); + "Default value is not defined for" + " this set option. Please specify" + " correct counter or module name.\n"); } if (var_ptr) { @@ -17569,7 +20947,7 @@ exit: been turned on, we will set err_monitor. Print related information */ if (err_monitor) { - sql_print_warning("Monitor %s is already enabled.", + sql_print_warning("InnoDB: Monitor %s is already enabled.", srv_mon_get_name((monitor_id_t) err_monitor)); } @@ -17580,13 +20958,13 @@ exit: return; } -#ifdef __WIN__ +#ifdef _WIN32 /*************************************************************//** Validate if passed-in "value" is a valid value for innodb_buffer_pool_filename. On Windows, file names with colon (:) are not allowed. -@return 0 for valid name */ +@return 0 for valid name */ static int innodb_srv_buf_dump_filename_validate( @@ -17598,16 +20976,15 @@ innodb_srv_buf_dump_filename_validate( for update function */ struct st_mysql_value* value) /*!< in: incoming string */ { - const char* buf_name; char buff[OS_FILE_MAX_PATH]; - int len= sizeof(buff); + int len = sizeof(buff); ut_a(save != NULL); ut_a(value != NULL); - buf_name = value->val_str(value, buff, &len); + const char* buf_name = value->val_str(value, buff, &len); - if (buf_name) { + if (buf_name != NULL) { if (is_filename_allowed(buf_name, len, FALSE)){ *static_cast(save) = buf_name; return(0); @@ -17615,17 +20992,17 @@ innodb_srv_buf_dump_filename_validate( push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, - "InnoDB: innodb_buffer_pool_filename " - "cannot have colon (:) in the file name."); + "InnoDB: innodb_buffer_pool_filename" + " cannot have colon (:) in the file name."); } } return(1); } -#else /* __WIN__ */ +#else /* _WIN32 */ # define innodb_srv_buf_dump_filename_validate NULL -#endif /* __WIN__ */ +#endif /* _WIN32 */ #ifdef UNIV_DEBUG static char* srv_buffer_pool_evict; @@ -17794,6 +21171,15 @@ innodb_defragment_frequency_update( (ulonglong) (1000000.0 / srv_defragment_frequency)); } +static inline char *my_strtok_r(char *str, const char *delim, char **saveptr) +{ +#if defined _WIN32 + return strtok_s(str, delim, saveptr); +#else + return strtok_r(str, delim, saveptr); +#endif +} + /****************************************************************//** Parse and enable InnoDB monitor counters during server startup. User can list the monitor counters/groups to be enable by specifying @@ -17815,9 +21201,9 @@ innodb_enable_monitor_at_startup( and/or counter group name, and calling innodb_monitor_update() if successfully updated. Please note that the "str" would be changed by strtok_r() as it walks through it. */ - for (char* option = strtok_r(str, sep, &last); + for (char* option = my_strtok_r(str, sep, &last); option; - option = strtok_r(NULL, sep, &last)) { + option = my_strtok_r(NULL, sep, &last)) { ulint ret; char* option_name; @@ -17848,6 +21234,7 @@ show_innodb_vars( innodb_export_status(); var->type = SHOW_ARRAY; var->value = (char*) &innodb_status_variables; + //var->scope = SHOW_SCOPE_GLOBAL; return(0); } @@ -17858,7 +21245,6 @@ system default primary index name 'GEN_CLUST_INDEX'. If a name matches, this function pushes an warning message to the client, and returns true. @return true if the index name matches the reserved name */ -UNIV_INTERN bool innobase_index_name_is_reserved( /*============================*/ @@ -17879,10 +21265,10 @@ innobase_index_name_is_reserved( push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_NAME_FOR_INDEX, - "Cannot Create Index with name " - "'%s'. The name is reserved " - "for the system default primary " - "index.", + "Cannot Create Index with name" + " '%s'. The name is reserved" + " for the system default primary" + " index.", innobase_index_reserve_name); my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), @@ -17897,43 +21283,38 @@ innobase_index_name_is_reserved( /*********************************************************************** Retrieve the FTS Relevance Ranking result for doc with doc_id -of prebuilt->fts_doc_id +of m_prebuilt->fts_doc_id @return the relevance ranking value */ -UNIV_INTERN float innobase_fts_retrieve_ranking( /*============================*/ FT_INFO * fts_hdl) /*!< in: FTS handler */ { - row_prebuilt_t* ft_prebuilt; fts_result_t* result; + row_prebuilt_t* ft_prebuilt; - result = ((NEW_FT_INFO*) fts_hdl)->ft_result; + result = reinterpret_cast(fts_hdl)->ft_result; - ft_prebuilt = ((NEW_FT_INFO*) fts_hdl)->ft_prebuilt; + ft_prebuilt = reinterpret_cast(fts_hdl)->ft_prebuilt; - if (ft_prebuilt->read_just_key) { - fts_ranking_t* ranking = - rbt_value(fts_ranking_t, result->current); - return(ranking->rank); - } + fts_ranking_t* ranking = rbt_value(fts_ranking_t, result->current); + ft_prebuilt->fts_doc_id= ranking->doc_id; - /* Retrieve the ranking value for doc_id with value of - prebuilt->fts_doc_id */ - return(fts_retrieve_ranking(result, ft_prebuilt->fts_doc_id)); + return(ranking->rank); } /*********************************************************************** Free the memory for the FTS handler */ -UNIV_INTERN void innobase_fts_close_ranking( /*=======================*/ FT_INFO * fts_hdl) { - fts_result_t* result; + reinterpret_cast(fts_hdl)->ft_prebuilt->in_fts_query = + false; - result = ((NEW_FT_INFO*) fts_hdl)->ft_result; + fts_result_t* result; + result = reinterpret_cast(fts_hdl)->ft_result; fts_query_free_result(result); @@ -17944,9 +21325,8 @@ innobase_fts_close_ranking( /*********************************************************************** Find and Retrieve the FTS Relevance Ranking result for doc with doc_id -of prebuilt->fts_doc_id +of m_prebuilt->fts_doc_id @return the relevance ranking value */ -UNIV_INTERN float innobase_fts_find_ranking( /*======================*/ @@ -17954,14 +21334,14 @@ innobase_fts_find_ranking( uchar* record, /*!< in: Unused */ uint len) /*!< in: Unused */ { - row_prebuilt_t* ft_prebuilt; fts_result_t* result; + row_prebuilt_t* ft_prebuilt; - ft_prebuilt = ((NEW_FT_INFO*) fts_hdl)->ft_prebuilt; - result = ((NEW_FT_INFO*) fts_hdl)->ft_result; + ft_prebuilt = reinterpret_cast(fts_hdl)->ft_prebuilt; + result = reinterpret_cast(fts_hdl)->ft_result; /* Retrieve the ranking value for doc_id with value of - prebuilt->fts_doc_id */ + m_prebuilt->fts_doc_id */ return(fts_retrieve_ranking(result, ft_prebuilt->fts_doc_id)); } @@ -17970,6 +21350,8 @@ static my_bool innodb_purge_run_now = TRUE; static my_bool innodb_purge_stop_now = TRUE; static my_bool innodb_log_checkpoint_now = TRUE; static my_bool innodb_buf_flush_list_now = TRUE; +static uint innodb_merge_threshold_set_all_debug + = DICT_INDEX_MERGE_THRESHOLD_DEFAULT; /****************************************************************//** Set the purge state to RUN. If purge is disabled then it @@ -18035,12 +21417,16 @@ checkpoint_now_set( check function */ { if (*(my_bool*) save) { - while (log_sys->last_checkpoint_lsn < log_sys->lsn) { + while (log_sys->last_checkpoint_lsn + + SIZE_OF_MLOG_CHECKPOINT + + (log_sys->append_on_checkpoint != NULL + ? log_sys->append_on_checkpoint->size() : 0) + < log_sys->lsn) { log_make_checkpoint_at(LSN_MAX, TRUE); - fil_flush_file_spaces(FIL_LOG); + fil_flush_file_spaces(FIL_TYPE_LOG); } - fil_write_flushed_lsn_to_data_files(log_sys->lsn, 0); - fil_flush_file_spaces(FIL_TABLESPACE); + + fil_write_flushed_lsn(log_sys->lsn); } } @@ -18062,10 +21448,29 @@ buf_flush_list_now_set( check function */ { if (*(my_bool*) save) { - buf_flush_list(ULINT_MAX, LSN_MAX, NULL); - buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); + buf_flush_sync_all_buf_pools(); } } + +/** Override current MERGE_THRESHOLD setting for all indexes at dictionary +now. +@param[in] thd thread handle +@param[in] var pointer to system variable +@param[out] var_ptr where the formal string goes +@param[in] save immediate result from check function */ +static +void +innodb_merge_threshold_set_all_debug_update( + THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ + innodb_merge_threshold_set_all_debug + = (*static_cast(save)); + dict_set_merge_threshold_all_debug( + innodb_merge_threshold_set_all_debug); +} #endif /* UNIV_DEBUG */ /*********************************************************************** @@ -18096,17 +21501,19 @@ Find and Retrieve the FTS doc_id for the current result row ulonglong innobase_fts_retrieve_docid( /*========================*/ - FT_INFO_EXT * fts_hdl) /*!< in: FTS handler */ + FT_INFO_EXT* fts_hdl) /*!< in: FTS handler */ { - row_prebuilt_t* ft_prebuilt; fts_result_t* result; + row_prebuilt_t* ft_prebuilt; - ft_prebuilt = ((NEW_FT_INFO *)fts_hdl)->ft_prebuilt; - result = ((NEW_FT_INFO *)fts_hdl)->ft_result; + ft_prebuilt = reinterpret_cast(fts_hdl)->ft_prebuilt; + result = reinterpret_cast(fts_hdl)->ft_result; if (ft_prebuilt->read_just_key) { + fts_ranking_t* ranking = rbt_value(fts_ranking_t, result->current); + return(ranking->doc_id); } @@ -18119,9 +21526,9 @@ Find and retrieve the size of the current result ulonglong innobase_fts_count_matches( /*=======================*/ - FT_INFO_EXT* fts_hdl) /*!< in: FTS handler */ + FT_INFO_EXT* fts_hdl) /*!< in: FTS handler */ { - NEW_FT_INFO* handle = (NEW_FT_INFO *) fts_hdl; + NEW_FT_INFO* handle = reinterpret_cast(fts_hdl); if (handle->ft_result->rankings_by_id != 0) { return rbt_size(handle->ft_result->rankings_by_id); @@ -18215,10 +21622,56 @@ buffer_pool_load_abort( } } +/****************************************************************//** +Update the system variable innodb_log_write_ahead_size using the "saved" +value. This function is registered as a callback with MySQL. */ +static +void +innodb_log_write_ahead_size_update( +/*===============================*/ + THD* thd, /*!< in: thread handle */ + struct st_mysql_sys_var* var, /*!< in: pointer to + system variable */ + void* var_ptr,/*!< out: where the + formal string goes */ + const void* save) /*!< in: immediate result + from check function */ +{ + ulong val = OS_FILE_LOG_BLOCK_SIZE; + ulong in_val = *static_cast(save); + + while (val < in_val) { + val = val * 2; + } + + if (val > UNIV_PAGE_SIZE) { + val = UNIV_PAGE_SIZE; + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "innodb_log_write_ahead_size cannot" + " be set higher than innodb_page_size."); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "Setting innodb_log_write_ahead_size" + " to %lu", + UNIV_PAGE_SIZE); + } else if (val != in_val) { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "innodb_log_write_ahead_size should be" + " set 2^n value and larger than 512."); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_ARGUMENTS, + "Setting innodb_log_write_ahead_size" + " to %lu", + val); + } + + srv_log_write_ahead_size = val; +} + /** Update innodb_status_output or innodb_status_output_locks, which control InnoDB "status monitor" output to the error log. -@param[in] thd thread handle -@param[in] var system variable @param[out] var_ptr current value @param[in] save to-be-assigned value */ static @@ -18303,6 +21756,28 @@ innodb_encrypt_tables_update( fil_crypt_set_encrypt_tables(*static_cast(save)); } +/** Update the innodb_log_checksums parameter. +@param[in] thd thread handle +@param[in] var system variable +@param[out] var_ptr current value +@param[in] save immediate result from check function */ +static +void +innodb_log_checksums_update( + THD* thd, + struct st_mysql_sys_var* var, + void* var_ptr, + const void* save) +{ + my_bool check = *static_cast(var_ptr) + = *static_cast(save); + + /* Make sure we are the only log user */ + mutex_enter(&log_sys->mutex); + innodb_log_checksums_func_update(check); + mutex_exit(&log_sys->mutex); +} + static SHOW_VAR innodb_status_variables_export[]= { {"Innodb", (char*) &show_innodb_vars, SHOW_FUNC}, {NullS, NullS, SHOW_LONG} @@ -18313,7 +21788,10 @@ static struct st_mysql_storage_engine innobase_storage_engine= #ifdef WITH_WSREP void -wsrep_abort_slave_trx(wsrep_seqno_t bf_seqno, wsrep_seqno_t victim_seqno) +wsrep_abort_slave_trx( +/*==================*/ + wsrep_seqno_t bf_seqno, + wsrep_seqno_t victim_seqno) { WSREP_ERROR("Trx %lld tries to abort slave trx %lld. This could be " "caused by:\n\t" @@ -18329,6 +21807,7 @@ This function is used to kill one transaction in BF. */ UNIV_INTERN int wsrep_innobase_kill_one_trx( +/*========================*/ void * const bf_thd_ptr, const trx_t * const bf_trx, trx_t *victim_trx, @@ -18384,7 +21863,7 @@ wsrep_innobase_kill_one_trx( DBUG_RETURN(0); } - if(wsrep_thd_exec_mode(thd) != LOCAL_STATE) { + if (wsrep_thd_exec_mode(thd) != LOCAL_STATE) { WSREP_DEBUG("withdraw for BF trx: %llu, state: %d", (longlong) victim_trx->id, wsrep_thd_get_conflict_state(thd)); @@ -18465,10 +21944,12 @@ wsrep_innobase_kill_one_trx( (ulonglong) victim_trx->id); victim_trx->lock.was_chosen_as_deadlock_victim= TRUE; + if (victim_trx->lock.wait_lock) { WSREP_DEBUG("victim has wait flag: %ld", thd_get_thread_id(thd)); lock_t* wait_lock = victim_trx->lock.wait_lock; + if (wait_lock) { WSREP_DEBUG("canceling wait lock"); victim_trx->lock.was_chosen_as_deadlock_victim= TRUE; @@ -18543,6 +22024,7 @@ wsrep_innobase_kill_one_trx( static int wsrep_abort_transaction( +/*====================*/ handlerton* hton, THD *bf_thd, THD *victim_thd, @@ -18578,32 +22060,44 @@ wsrep_abort_transaction( DBUG_RETURN(-1); } -static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid) +static +int +innobase_wsrep_set_checkpoint( +/*==========================*/ + handlerton* hton, + const XID* xid) { DBUG_ASSERT(hton == innodb_hton_ptr); + if (wsrep_is_wsrep_xid(xid)) { mtr_t mtr; mtr_start(&mtr); trx_sysf_t* sys_header = trx_sysf_get(&mtr); trx_sys_update_wsrep_checkpoint(xid, sys_header, &mtr); mtr_commit(&mtr); - innobase_flush_logs(hton); + innobase_flush_logs(hton, false); return 0; } else { return 1; } } -static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid) +static +int +innobase_wsrep_get_checkpoint( +/*==========================*/ + handlerton* hton, + XID* xid) { DBUG_ASSERT(hton == innodb_hton_ptr); trx_sys_read_wsrep_checkpoint(xid); return 0; } -static void +static +void wsrep_fake_trx_id( -/*==================*/ +/*==============*/ handlerton *hton, THD *thd) /*!< in: user thread handle */ { @@ -18620,34 +22114,39 @@ wsrep_fake_trx_id( static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm, PLUGIN_VAR_RQCMDARG, - "The algorithm InnoDB uses for page checksumming. Possible values are " - "CRC32 (hardware accelerated if the CPU supports it) " - "write crc32, allow any of the other checksums to match when reading; " - "STRICT_CRC32 " - "write crc32, do not allow other algorithms to match when reading; " - "INNODB " - "write a software calculated checksum, allow any other checksums " - "to match when reading; " - "STRICT_INNODB " - "write a software calculated checksum, do not allow other algorithms " - "to match when reading; " - "NONE " - "write a constant magic number, do not do any checksum verification " - "when reading (same as innodb_checksums=OFF); " - "STRICT_NONE " - "write a constant magic number, do not allow values other than that " - "magic number when reading; " - "Files updated when this option is set to crc32 or strict_crc32 will " - "not be readable by MySQL versions older than 5.6.3", - NULL, NULL, SRV_CHECKSUM_ALGORITHM_INNODB, + "The algorithm InnoDB uses for page checksumming. Possible values are" + " CRC32 (hardware accelerated if the CPU supports it)" + " write crc32, allow any of the other checksums to match when reading;" + " STRICT_CRC32" + " write crc32, do not allow other algorithms to match when reading;" + " INNODB" + " write a software calculated checksum, allow any other checksums" + " to match when reading;" + " STRICT_INNODB" + " write a software calculated checksum, do not allow other algorithms" + " to match when reading;" + " NONE" + " write a constant magic number, do not do any checksum verification" + " when reading (same as innodb_checksums=OFF);" + " STRICT_NONE" + " write a constant magic number, do not allow values other than that" + " magic number when reading;" + " Files updated when this option is set to crc32 or strict_crc32 will" + " not be readable by MySQL versions older than 5.6.3", + NULL, NULL, SRV_CHECKSUM_ALGORITHM_CRC32, &innodb_checksum_algorithm_typelib); +static MYSQL_SYSVAR_BOOL(log_checksums, innodb_log_checksums, + PLUGIN_VAR_RQCMDARG, + "Whether to compute and require checksums for InnoDB redo log blocks", + NULL, innodb_log_checksums_update, TRUE); + static MYSQL_SYSVAR_BOOL(checksums, innobase_use_checksums, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, - "DEPRECATED. Use innodb_checksum_algorithm=NONE instead of setting " - "this to OFF. " - "Enable InnoDB checksums validation (enabled by default). " - "Disable with --skip-innodb-checksums.", + "DEPRECATED. Use innodb_checksum_algorithm=NONE instead of setting" + " this to OFF." + " Enable InnoDB checksums validation (enabled by default)." + " Disable with --skip-innodb-checksums.", NULL, NULL, TRUE); static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir, @@ -18657,8 +22156,8 @@ static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir, static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, - "Enable InnoDB doublewrite buffer (enabled by default). " - "Disable with --skip-innodb-doublewrite.", + "Enable InnoDB doublewrite buffer (enabled by default)." + " Disable with --skip-innodb-doublewrite.", NULL, NULL, TRUE); static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes, @@ -18714,6 +22213,14 @@ static MYSQL_SYSVAR_BOOL(buf_flush_list_now, innodb_buf_flush_list_now, PLUGIN_VAR_OPCMDARG, "Force dirty page flush now", NULL, buf_flush_list_now_set, FALSE); + +static MYSQL_SYSVAR_UINT(merge_threshold_set_all_debug, + innodb_merge_threshold_set_all_debug, + PLUGIN_VAR_RQCMDARG, + "Override current MERGE_THRESHOLD setting for all indexes at dictionary" + " cache by the specified value dynamically, at the time.", + NULL, innodb_merge_threshold_set_all_debug_update, + DICT_INDEX_MERGE_THRESHOLD_DEFAULT, 1, 50, 0); #endif /* UNIV_DEBUG */ static MYSQL_SYSVAR_ULONG(purge_batch_size, srv_purge_batch_size, @@ -18726,9 +22233,9 @@ static MYSQL_SYSVAR_ULONG(purge_batch_size, srv_purge_batch_size, static MYSQL_SYSVAR_ULONG(purge_threads, srv_n_purge_threads, PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, - "Purge threads can be from 1 to 32. Default is 1.", + "Purge threads can be from 1 to 32. Default is 4.", NULL, NULL, - 1, /* Default setting */ + 4, /* Default setting */ 1, /* Minimum value */ 32, 0); /* Maximum value */ @@ -18742,8 +22249,8 @@ static MYSQL_SYSVAR_ULONG(sync_array_size, srv_sync_array_size, static MYSQL_SYSVAR_ULONG(fast_shutdown, innobase_fast_shutdown, PLUGIN_VAR_OPCMDARG, - "Speeds up the shutdown process of the InnoDB storage engine. Possible " - "values are 0, 1 (faster) or 2 (fastest - crash-like).", + "Speeds up the shutdown process of the InnoDB storage engine. Possible" + " values are 0, 1 (faster) or 2 (fastest - crash-like).", NULL, NULL, 1, 0, 2, 0); static MYSQL_SYSVAR_BOOL(file_per_table, srv_file_per_table, @@ -18755,7 +22262,7 @@ static MYSQL_SYSVAR_STR(file_format, innobase_file_format_name, PLUGIN_VAR_RQCMDARG, "File format to use for new tables in .ibd files.", innodb_file_format_name_validate, - innodb_file_format_name_update, "Antelope"); + innodb_file_format_name_update, innodb_file_format_default); /* "innobase_file_format_check" decides whether we would continue booting the server if the file format stamped on the system @@ -18776,7 +22283,7 @@ static MYSQL_SYSVAR_STR(file_format_max, innobase_file_format_max, PLUGIN_VAR_OPCMDARG, "The highest file format in the tablespace.", innodb_file_format_max_validate, - innodb_file_format_max_update, "Antelope"); + innodb_file_format_max_update, innodb_file_format_max_default); static MYSQL_SYSVAR_STR(ft_server_stopword_table, innobase_server_stopword_table, PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_MEMALLOC, @@ -18811,7 +22318,7 @@ static MYSQL_SYSVAR_STR(flush_method, innobase_file_flush_method, static MYSQL_SYSVAR_BOOL(large_prefix, innobase_large_prefix, PLUGIN_VAR_NOCMDARG, "Support large index prefix length of REC_VERSION_56_MAX_INDEX_COL_LEN (3072) bytes.", - NULL, NULL, FALSE); + NULL, innodb_large_prefix_update, TRUE); static MYSQL_SYSVAR_BOOL(force_load_corrupted, srv_load_corrupted, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, @@ -18820,35 +22327,30 @@ static MYSQL_SYSVAR_BOOL(force_load_corrupted, srv_load_corrupted, static MYSQL_SYSVAR_BOOL(locks_unsafe_for_binlog, innobase_locks_unsafe_for_binlog, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, - "DEPRECATED. This option may be removed in future releases. " - "Please use READ COMMITTED transaction isolation level instead. " - "Force InnoDB to not use next-key locking, to use only row-level locking.", + "DEPRECATED. This option may be removed in future releases." + " Please use READ COMMITTED transaction isolation level instead." + " Force InnoDB to not use next-key locking, to use only row-level locking.", NULL, NULL, FALSE); -#ifdef UNIV_LOG_ARCHIVE -static MYSQL_SYSVAR_STR(log_arch_dir, innobase_log_arch_dir, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Where full logs should be archived.", NULL, NULL, NULL); - -static MYSQL_SYSVAR_BOOL(log_archive, innobase_log_archive, - PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, - "Set to 1 if you want to have logs archived.", NULL, NULL, FALSE); -#endif /* UNIV_LOG_ARCHIVE */ - static MYSQL_SYSVAR_STR(log_group_home_dir, srv_log_group_home_dir, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Path to InnoDB log files.", NULL, NULL, NULL); +static MYSQL_SYSVAR_ULONG(page_cleaners, srv_n_page_cleaners, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Page cleaner threads can be from 1 to 64. Default is 4.", + NULL, NULL, 4, 1, 64, 0); + static MYSQL_SYSVAR_DOUBLE(max_dirty_pages_pct, srv_max_buf_pool_modified_pct, PLUGIN_VAR_RQCMDARG, "Percentage of dirty pages allowed in bufferpool.", - NULL, innodb_max_dirty_pages_pct_update, 75.0, 0.001, 99.999, 0); + NULL, innodb_max_dirty_pages_pct_update, 75.0, 0, 99.999, 0); static MYSQL_SYSVAR_DOUBLE(max_dirty_pages_pct_lwm, srv_max_dirty_pages_pct_lwm, PLUGIN_VAR_RQCMDARG, "Percentage of dirty pages at which flushing kicks in.", - NULL, innodb_max_dirty_pages_pct_lwm_update, 0.001, 0.000, 99.999, 0); + NULL, innodb_max_dirty_pages_pct_lwm_update, 0, 0, 99.999, 0); static MYSQL_SYSVAR_DOUBLE(adaptive_flushing_lwm, srv_adaptive_flushing_lwm, @@ -18861,6 +22363,11 @@ static MYSQL_SYSVAR_BOOL(adaptive_flushing, srv_adaptive_flushing, "Attempt flushing dirty pages to avoid IO bursts at checkpoints.", NULL, NULL, TRUE); +static MYSQL_SYSVAR_BOOL(flush_sync, srv_flush_sync, + PLUGIN_VAR_NOCMDARG, + "Allow IO bursts at the checkpoints ignoring io_capacity setting.", + NULL, NULL, TRUE); + static MYSQL_SYSVAR_ULONG(flushing_avg_loops, srv_flushing_avg_loops, PLUGIN_VAR_RQCMDARG, @@ -18892,8 +22399,8 @@ static MYSQL_SYSVAR_BOOL(status_file, innobase_create_status_file, static MYSQL_SYSVAR_BOOL(stats_on_metadata, innobase_stats_on_metadata, PLUGIN_VAR_OPCMDARG, - "Enable statistics gathering for metadata commands such as " - "SHOW TABLE STATUS for tables that use transient statistics (off by default)", + "Enable statistics gathering for metadata commands such as" + " SHOW TABLE STATUS for tables that use transient statistics (off by default)", NULL, NULL, FALSE); static MYSQL_SYSVAR_ULONGLONG(stats_sample_pages, srv_stats_transient_sample_pages, @@ -18904,29 +22411,29 @@ static MYSQL_SYSVAR_ULONGLONG(stats_sample_pages, srv_stats_transient_sample_pag static MYSQL_SYSVAR_ULONGLONG(stats_transient_sample_pages, srv_stats_transient_sample_pages, PLUGIN_VAR_RQCMDARG, - "The number of leaf index pages to sample when calculating transient " - "statistics (if persistent statistics are not used, default 8)", + "The number of leaf index pages to sample when calculating transient" + " statistics (if persistent statistics are not used, default 8)", NULL, NULL, 8, 1, ~0ULL, 0); static MYSQL_SYSVAR_BOOL(stats_persistent, srv_stats_persistent, PLUGIN_VAR_OPCMDARG, - "InnoDB persistent statistics enabled for all tables unless overridden " - "at table level", + "InnoDB persistent statistics enabled for all tables unless overridden" + " at table level", NULL, NULL, TRUE); static MYSQL_SYSVAR_BOOL(stats_auto_recalc, srv_stats_auto_recalc, PLUGIN_VAR_OPCMDARG, - "InnoDB automatic recalculation of persistent statistics enabled for all " - "tables unless overridden at table level (automatic recalculation is only " - "done when InnoDB decides that the table has changed too much and needs a " - "new statistics)", + "InnoDB automatic recalculation of persistent statistics enabled for all" + " tables unless overridden at table level (automatic recalculation is only" + " done when InnoDB decides that the table has changed too much and needs a" + " new statistics)", NULL, NULL, TRUE); static MYSQL_SYSVAR_ULONGLONG(stats_persistent_sample_pages, srv_stats_persistent_sample_pages, PLUGIN_VAR_RQCMDARG, - "The number of leaf index pages to sample when calculating persistent " - "statistics (by ANALYZE, default 20)", + "The number of leaf index pages to sample when calculating persistent" + " statistics (by ANALYZE, default 20)", NULL, NULL, 20, 1, ~0ULL, 0); static MYSQL_SYSVAR_ULONGLONG(stats_modified_counter, srv_stats_modified_counter, @@ -18941,14 +22448,22 @@ static MYSQL_SYSVAR_BOOL(stats_traditional, srv_stats_sample_traditional, static MYSQL_SYSVAR_BOOL(adaptive_hash_index, btr_search_enabled, PLUGIN_VAR_OPCMDARG, - "Enable InnoDB adaptive hash index (enabled by default). " - "Disable with --skip-innodb-adaptive-hash-index.", - NULL, innodb_adaptive_hash_index_update, TRUE); + "Enable InnoDB adaptive hash index (enabled by default). " + " Disable with --skip-innodb-adaptive-hash-index.", + NULL, innodb_adaptive_hash_index_update, true); + +/** Number of distinct partitions of AHI. +Each partition is protected by its own latch and so we have parts number +of latches protecting complete search system. */ +static MYSQL_SYSVAR_ULONG(adaptive_hash_index_parts, btr_ahi_parts, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Number of InnoDB Adapative Hash Index Partitions. (default = 8). ", + NULL, NULL, 8, 1, 512, 0); static MYSQL_SYSVAR_ULONG(replication_delay, srv_replication_delay, PLUGIN_VAR_RQCMDARG, - "Replication thread delay (ms) on the slave server if " - "innodb_thread_concurrency is reached (0 by default)", + "Replication thread delay (ms) on the slave server if" + " innodb_thread_concurrency is reached (0 by default)", NULL, NULL, 0, 0, ~0UL, 0); static MYSQL_SYSVAR_UINT(compression_level, page_zip_level, @@ -18966,23 +22481,49 @@ static MYSQL_SYSVAR_BOOL(log_compressed_pages, page_zip_log_pages, " compression algorithm doesn't change.", NULL, NULL, FALSE); -static MYSQL_SYSVAR_LONG(additional_mem_pool_size, innobase_additional_mem_pool_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "DEPRECATED. This option may be removed in future releases, " - "together with the option innodb_use_sys_malloc and with the InnoDB's " - "internal memory allocator. " - "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", - NULL, NULL, 8*1024*1024L, 512*1024L, LONG_MAX, 1024); - -static MYSQL_SYSVAR_ULONG(autoextend_increment, srv_auto_extend_increment, +static MYSQL_SYSVAR_ULONG(autoextend_increment, + sys_tablespace_auto_extend_increment, PLUGIN_VAR_RQCMDARG, "Data file autoextend increment in megabytes", NULL, NULL, 64L, 1L, 1000L, 0); +/** Validate the requested buffer pool size. Also, reserve the necessary +memory needed for buffer pool resize. +@param[in] thd thread handle +@param[in] var pointer to system variable +@param[out] save immediate result for update function +@param[in] value incoming string +@return 0 on success, 1 on failure. +*/ +static +int +innodb_buffer_pool_size_validate( + THD* thd, + struct st_mysql_sys_var* var, + void* save, + struct st_mysql_value* value); + +/* If the default value of innodb_buffer_pool_size is increased to be more than +BUF_POOL_SIZE_THRESHOLD (srv/srv0start.cc), then srv_buf_pool_instances_default +can be removed and 8 used instead. The problem with the current setup is that +with 128MiB default buffer pool size and 8 instances by default we would emit +a warning when no options are specified. */ static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + PLUGIN_VAR_RQCMDARG, "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", - NULL, NULL, 128*1024*1024L, 5*1024*1024L, LONGLONG_MAX, 1024*1024L); + innodb_buffer_pool_size_validate, + innodb_buffer_pool_size_update, + static_cast(srv_buf_pool_def_size), + static_cast(srv_buf_pool_min_size), + LLONG_MAX, 1024*1024L); + +static MYSQL_SYSVAR_ULONG(buffer_pool_chunk_size, srv_buf_pool_chunk_unit, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Size of a single memory chunk within each buffer pool instance" + " for resizing buffer pool. Online buffer pool resizing happens" + " at this granularity. 0 means disable resizing buffer pool.", + NULL, NULL, + 128 * 1024 * 1024, 1024 * 1024, LONG_MAX, 1024 * 1024); #if defined UNIV_DEBUG || defined UNIV_PERF_DEBUG static MYSQL_SYSVAR_ULONG(page_hash_locks, srv_n_page_hash_locks, @@ -18996,10 +22537,10 @@ static MYSQL_SYSVAR_ULONG(doublewrite_batch_size, srv_doublewrite_batch_size, NULL, NULL, 120, 1, 127, 0); #endif /* defined UNIV_DEBUG || defined UNIV_PERF_DEBUG */ -static MYSQL_SYSVAR_LONG(buffer_pool_instances, innobase_buffer_pool_instances, +static MYSQL_SYSVAR_ULONG(buffer_pool_instances, srv_buf_pool_instances, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Number of buffer pool instances, set to higher value on high-end machines to increase scalability", - NULL, NULL, 0L, 0L, MAX_BUFFER_POOLS, 1L); + NULL, NULL, srv_buf_pool_instances_default, 0, MAX_BUFFER_POOLS, 0); static MYSQL_SYSVAR_STR(buffer_pool_filename, srv_buf_dump_filename, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, @@ -19014,12 +22555,12 @@ static MYSQL_SYSVAR_BOOL(buffer_pool_dump_now, innodb_buffer_pool_dump_now, static MYSQL_SYSVAR_BOOL(buffer_pool_dump_at_shutdown, srv_buffer_pool_dump_at_shutdown, PLUGIN_VAR_RQCMDARG, "Dump the buffer pool into a file named @@innodb_buffer_pool_filename", - NULL, NULL, FALSE); + NULL, NULL, TRUE); static MYSQL_SYSVAR_ULONG(buffer_pool_dump_pct, srv_buf_pool_dump_pct, PLUGIN_VAR_RQCMDARG, - "Dump only the hottest N% of each buffer pool, defaults to 100", - NULL, NULL, 100, 1, 100, 0); + "Dump only the hottest N% of each buffer pool, defaults to 25", + NULL, NULL, 25, 1, 100, 0); #ifdef UNIV_DEBUG static MYSQL_SYSVAR_STR(buffer_pool_evict, srv_buffer_pool_evict, @@ -19042,7 +22583,7 @@ static MYSQL_SYSVAR_BOOL(buffer_pool_load_abort, innodb_buffer_pool_load_abort, static MYSQL_SYSVAR_BOOL(buffer_pool_load_at_startup, srv_buffer_pool_load_at_startup, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Load the buffer pool from a file named @@innodb_buffer_pool_filename", - NULL, NULL, FALSE); + NULL, NULL, TRUE); static MYSQL_SYSVAR_BOOL(defragment, srv_defragment, PLUGIN_VAR_RQCMDARG, @@ -19121,10 +22662,10 @@ static MYSQL_SYSVAR_ULONG(concurrency_tickets, srv_n_free_tickets_to_enter, "Number of times a thread is allowed to enter InnoDB within the same SQL query after it has once got the ticket", NULL, NULL, 5000L, 1L, ~0UL, 0); -static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR, - "Number of file I/O threads in InnoDB.", - NULL, NULL, 4, 4, 64, 0); +static MYSQL_SYSVAR_LONG(fill_factor, innobase_fill_factor, + PLUGIN_VAR_RQCMDARG, + "Percentage of B-tree page filled during bulk insert", + NULL, NULL, 100, 10, 100, 0); static MYSQL_SYSVAR_BOOL(ft_enable_diag_print, fts_enable_diag_print, PLUGIN_VAR_OPCMDARG, @@ -19137,10 +22678,10 @@ static MYSQL_SYSVAR_BOOL(disable_sort_file_cache, srv_disable_sort_file_cache, NULL, NULL, FALSE); static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name, - PLUGIN_VAR_NOCMDARG, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC, "FTS internal auxiliary table to be checked", innodb_internal_table_validate, - innodb_internal_table_update, NULL); + NULL, NULL); static MYSQL_SYSVAR_ULONG(ft_cache_size, fts_max_cache_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -19167,7 +22708,6 @@ static MYSQL_SYSVAR_ULONG(ft_max_token_size, fts_max_token_size, "InnoDB Fulltext search maximum token size in characters", NULL, NULL, FTS_MAX_WORD_LEN_IN_CHAR, 10, FTS_MAX_WORD_LEN_IN_CHAR, 0); - static MYSQL_SYSVAR_ULONG(ft_num_word_optimize, fts_num_word_optimize, PLUGIN_VAR_OPCMDARG, "InnoDB Fulltext search number of words to optimize for each optimize table call ", @@ -19212,7 +22752,7 @@ static MYSQL_SYSVAR_ULONG(force_recovery, srv_force_recovery, static MYSQL_SYSVAR_ULONG(force_recovery_crash, srv_force_recovery_crash, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Kills the server during crash recovery.", - NULL, NULL, 0, 0, 10, 0); + NULL, NULL, 0, 0, 100, 0); #endif /* !DBUG_OFF */ static MYSQL_SYSVAR_ULONG(page_size, srv_page_size, @@ -19229,19 +22769,19 @@ static MYSQL_SYSVAR_LONG(log_buffer_size, innobase_log_buffer_size, static MYSQL_SYSVAR_LONGLONG(log_file_size, innobase_log_file_size, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Size of each log file in a log group.", - NULL, NULL, 48*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 1024*1024L); + NULL, NULL, 48*1024*1024L, 1*1024*1024L, LLONG_MAX, 1024*1024L); static MYSQL_SYSVAR_ULONG(log_files_in_group, srv_n_log_files, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Number of log files in the log group. InnoDB writes to the files in a circular fashion.", NULL, NULL, 2, 2, SRV_N_LOG_FILES_MAX, 0); -/* Note that the default and minimum values are set to 0 to -detect if the option is passed and print deprecation message */ -static MYSQL_SYSVAR_LONG(mirrored_log_groups, innobase_mirrored_log_groups, - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.", - NULL, NULL, 0, 0, 10, 0); +static MYSQL_SYSVAR_ULONG(log_write_ahead_size, srv_log_write_ahead_size, + PLUGIN_VAR_RQCMDARG, + "Redo log write ahead unit size to avoid read-on-write," + " it should match the OS cache block IO size", + NULL, innodb_log_write_ahead_size_update, + 8*1024L, OS_FILE_LOG_BLOCK_SIZE, UNIV_PAGE_SIZE_DEF, OS_FILE_LOG_BLOCK_SIZE); static MYSQL_SYSVAR_UINT(old_blocks_pct, innobase_old_blocks_pct, PLUGIN_VAR_RQCMDARG, @@ -19275,7 +22815,6 @@ static MYSQL_SYSVAR_ULONG(thread_concurrency, srv_thread_concurrency, "Helps in performance tuning in heavily concurrent environments. Sets the maximum number of threads allowed inside InnoDB. Value 0 will disable the thread throttling.", NULL, NULL, 0, 0, 1000, 0); -#ifdef HAVE_ATOMIC_BUILTINS static MYSQL_SYSVAR_ULONG( adaptive_max_sleep_delay, srv_adaptive_max_sleep_delay, PLUGIN_VAR_RQCMDARG, @@ -19284,7 +22823,6 @@ static MYSQL_SYSVAR_ULONG( 150000, /* Default setting */ 0, /* Minimum value */ 1000000, 0); /* Maximum value */ -#endif /* HAVE_ATOMIC_BUILTINS */ static MYSQL_SYSVAR_BOOL(prefix_index_cluster_optimization, srv_prefix_index_cluster_optimization, @@ -19294,8 +22832,8 @@ static MYSQL_SYSVAR_BOOL(prefix_index_cluster_optimization, static MYSQL_SYSVAR_ULONG(thread_sleep_delay, srv_thread_sleep_delay, PLUGIN_VAR_RQCMDARG, - "Time of innodb thread sleeping before joining InnoDB queue (usec). " - "Value 0 disable a sleep", + "Time of innodb thread sleeping before joining InnoDB queue (usec)." + " Value 0 disable a sleep", NULL, NULL, 10000L, 0L, @@ -19306,10 +22844,15 @@ static MYSQL_SYSVAR_STR(data_file_path, innobase_data_file_path, "Path to individual files and their sizes.", NULL, NULL, NULL); +static MYSQL_SYSVAR_STR(temp_data_file_path, innobase_temp_data_file_path, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Path to files and their sizes making temp-tablespace.", + NULL, NULL, NULL); + static MYSQL_SYSVAR_STR(undo_directory, srv_undo_dir, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, "Directory where undo tablespace files live, this path can be absolute.", - NULL, NULL, "."); + NULL, NULL, NULL); static MYSQL_SYSVAR_ULONG(undo_tablespaces, srv_undo_tablespaces, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, @@ -19317,7 +22860,7 @@ static MYSQL_SYSVAR_ULONG(undo_tablespaces, srv_undo_tablespaces, NULL, NULL, 0L, /* Default seting */ 0L, /* Minimum value */ - 126L, 0); /* Maximum value */ + 95L, 0); /* Maximum value */ static MYSQL_SYSVAR_ULONG(undo_logs, srv_undo_logs, PLUGIN_VAR_OPCMDARG, @@ -19327,6 +22870,27 @@ static MYSQL_SYSVAR_ULONG(undo_logs, srv_undo_logs, 1, /* Minimum value */ TRX_SYS_N_RSEGS, 0); /* Maximum value */ +static MYSQL_SYSVAR_ULONGLONG(max_undo_log_size, srv_max_undo_log_size, + PLUGIN_VAR_OPCMDARG, + "Maximum size of UNDO tablespace in MB (If UNDO tablespace grows" + " beyond this size it will be truncated in due course). ", + NULL, NULL, + 1024 * 1024 * 1024L, + 10 * 1024 * 1024L, + ~0ULL, 0); + +static MYSQL_SYSVAR_ULONG(purge_rseg_truncate_frequency, + srv_purge_rseg_truncate_frequency, + PLUGIN_VAR_OPCMDARG, + "Dictates rate at which UNDO records are purged. Value N means" + " purge rollback segment(s) on every Nth iteration of purge invocation", + NULL, NULL, 128, 1, 128, 0); + +static MYSQL_SYSVAR_BOOL(undo_log_truncate, srv_undo_log_truncate, + PLUGIN_VAR_OPCMDARG, + "Enable or Disable Truncate of UNDO tablespace.", + NULL, NULL, FALSE); + /* Alias for innodb_undo_logs, this config variable is deprecated. */ static MYSQL_SYSVAR_ULONG(rollback_segments, srv_undo_logs, PLUGIN_VAR_OPCMDARG, @@ -19338,11 +22902,10 @@ static MYSQL_SYSVAR_ULONG(rollback_segments, srv_undo_logs, static MYSQL_SYSVAR_LONG(autoinc_lock_mode, innobase_autoinc_lock_mode, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "The AUTOINC lock modes supported by InnoDB: " - "0 => Old style AUTOINC locking (for backward" - " compatibility) " - "1 => New style AUTOINC locking " - "2 => No AUTOINC locking (unsafe for SBR)", + "The AUTOINC lock modes supported by InnoDB:" + " 0 => Old style AUTOINC locking (for backward compatibility);" + " 1 => New style AUTOINC locking;" + " 2 => No AUTOINC locking (unsafe for SBR)", NULL, NULL, AUTOINC_NEW_STYLE_LOCKING, /* Default setting */ AUTOINC_OLD_STYLE_LOCKING, /* Minimum value */ @@ -19352,13 +22915,6 @@ static MYSQL_SYSVAR_STR(version, innodb_version_str, PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_READONLY, "InnoDB version", NULL, NULL, INNODB_VERSION_STR); -static MYSQL_SYSVAR_BOOL(use_sys_malloc, srv_use_sys_malloc, - PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, - "DEPRECATED. This option may be removed in future releases, " - "together with the InnoDB's internal memory allocator. " - "Use OS memory allocator instead of InnoDB's internal memory allocator", - NULL, NULL, TRUE); - static MYSQL_SYSVAR_BOOL(use_native_aio, srv_use_native_aio, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, "Use native AIO if supported on this platform.", @@ -19404,13 +22960,13 @@ static MYSQL_SYSVAR_ULONG(api_bk_commit_interval, ib_bk_commit_interval, static MYSQL_SYSVAR_STR(change_buffering, innobase_change_buffering, PLUGIN_VAR_RQCMDARG, - "Buffer changes to reduce random access: " - "OFF, ON, inserting, deleting, changing, or purging.", + "Buffer changes to reduce random access:" + " OFF, ON, inserting, deleting, changing, or purging.", innodb_change_buffering_validate, innodb_change_buffering_update, "all"); static MYSQL_SYSVAR_UINT(change_buffer_max_size, - innobase_change_buffer_max_size, + srv_change_buffer_max_size, PLUGIN_VAR_RQCMDARG, "Maximum on-disk size of change buffer in terms of percentage" " of the buffer pool.", @@ -19419,9 +22975,9 @@ static MYSQL_SYSVAR_UINT(change_buffer_max_size, static MYSQL_SYSVAR_ENUM(stats_method, srv_innodb_stats_method, PLUGIN_VAR_RQCMDARG, - "Specifies how InnoDB index statistics collection code should " - "treat NULLs. Possible values are NULLS_EQUAL (default), " - "NULLS_UNEQUAL and NULLS_IGNORED", + "Specifies how InnoDB index statistics collection code should" + " treat NULLs. Possible values are NULLS_EQUAL (default)," + " NULLS_UNEQUAL and NULLS_IGNORED", NULL, NULL, SRV_STATS_NULLS_EQUAL, &innodb_stats_method_typelib); #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG @@ -19435,6 +22991,11 @@ static MYSQL_SYSVAR_BOOL(disable_background_merge, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_RQCMDARG, "Disable change buffering merges by the master thread", NULL, NULL, FALSE); + +static MYSQL_SYSVAR_ENUM(compress_debug, srv_debug_compress, + PLUGIN_VAR_RQCMDARG, + "Compress all tables, without specifying the COMRPESS table attribute", + NULL, NULL, Compression::NONE, &innodb_debug_compress_typelib); #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ static MYSQL_SYSVAR_ULONG(buf_dump_status_frequency, srv_buf_dump_status_frequency, @@ -19486,8 +23047,8 @@ static MYSQL_SYSVAR_BOOL(random_read_ahead, srv_random_read_ahead, static MYSQL_SYSVAR_ULONG(read_ahead_threshold, srv_read_ahead_threshold, PLUGIN_VAR_RQCMDARG, - "Number of pages that must be accessed sequentially for InnoDB to " - "trigger a readahead.", + "Number of pages that must be accessed sequentially for InnoDB to" + " trigger a readahead.", NULL, NULL, 56, 0, 64, 0); static MYSQL_SYSVAR_STR(monitor_enable, innobase_enable_monitor_counter, @@ -19548,10 +23109,18 @@ static MYSQL_SYSVAR_BOOL(read_only, srv_read_only_mode, static MYSQL_SYSVAR_BOOL(cmp_per_index_enabled, srv_cmp_per_index_enabled, PLUGIN_VAR_OPCMDARG, - "Enable INFORMATION_SCHEMA.innodb_cmp_per_index, " - "may have negative impact on performance (off by default)", + "Enable INFORMATION_SCHEMA.innodb_cmp_per_index," + " may have negative impact on performance (off by default)", NULL, innodb_cmp_per_index_update, FALSE); +static MYSQL_SYSVAR_ENUM(default_row_format, innodb_default_row_format, + PLUGIN_VAR_RQCMDARG, + "The default ROW FORMAT for all innodb tables created without explicit" + " ROW_FORMAT. Possible values are REDUNDANT, COMPACT, and DYNAMIC." + " The ROW_FORMAT value COMPRESSED is not allowed", + NULL, NULL, DEFAULT_ROW_FORMAT_DYNAMIC, + &innodb_default_row_format_typelib); + #ifdef UNIV_DEBUG static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_NOCMDOPT, @@ -19565,9 +23134,9 @@ static MYSQL_SYSVAR_UINT(limit_optimistic_insert_debug, static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug, srv_purge_view_update_only_debug, PLUGIN_VAR_NOCMDOPT, - "Pause actual purging any delete-marked records, but merely update the purge view. " - "It is to create artificially the situation the purge view have been updated " - "but the each purges were not done yet.", + "Pause actual purging any delete-marked records, but merely update the purge view." + " It is to create artificially the situation the purge view have been updated" + " but the each purges were not done yet.", NULL, NULL, FALSE); static MYSQL_SYSVAR_ULONG(fil_make_page_dirty_debug, @@ -19579,6 +23148,16 @@ static MYSQL_SYSVAR_ULONG(saved_page_number_debug, srv_saved_page_number_debug, PLUGIN_VAR_OPCMDARG, "An InnoDB page number.", NULL, innodb_save_page_no, 0, 0, UINT_MAX32, 0); + +static MYSQL_SYSVAR_BOOL(disable_resize_buffer_pool_debug, + buf_disable_resize_buffer_pool_debug, PLUGIN_VAR_NOCMDARG, + "Disable resizing buffer pool to make assertion code not expensive.", + NULL, NULL, TRUE); + +static MYSQL_SYSVAR_BOOL(sync_debug, srv_sync_debug, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Enable the sync debug checks", + NULL, NULL, FALSE); #endif /* UNIV_DEBUG */ static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures, @@ -19749,11 +23328,11 @@ static MYSQL_SYSVAR_BOOL(instrument_semaphores, srv_instrument_semaphores, 0, 0, FALSE); static struct st_mysql_sys_var* innobase_system_variables[]= { - MYSQL_SYSVAR(additional_mem_pool_size), MYSQL_SYSVAR(api_trx_level), MYSQL_SYSVAR(api_bk_commit_interval), MYSQL_SYSVAR(autoextend_increment), MYSQL_SYSVAR(buffer_pool_size), + MYSQL_SYSVAR(buffer_pool_chunk_size), MYSQL_SYSVAR(buffer_pool_instances), MYSQL_SYSVAR(buffer_pool_filename), MYSQL_SYSVAR(buffer_pool_dump_now), @@ -19774,11 +23353,13 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(lru_scan_depth), MYSQL_SYSVAR(flush_neighbors), MYSQL_SYSVAR(checksum_algorithm), + MYSQL_SYSVAR(log_checksums), MYSQL_SYSVAR(checksums), MYSQL_SYSVAR(commit_concurrency), MYSQL_SYSVAR(concurrency_tickets), MYSQL_SYSVAR(compression_level), MYSQL_SYSVAR(data_file_path), + MYSQL_SYSVAR(temp_data_file_path), MYSQL_SYSVAR(data_home_dir), MYSQL_SYSVAR(doublewrite), MYSQL_SYSVAR(use_atomic_writes), @@ -19787,7 +23368,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(api_enable_mdl), MYSQL_SYSVAR(api_disable_rowlock), MYSQL_SYSVAR(fast_shutdown), - MYSQL_SYSVAR(file_io_threads), MYSQL_SYSVAR(read_io_threads), MYSQL_SYSVAR(write_io_threads), MYSQL_SYSVAR(file_per_table), @@ -19801,6 +23381,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { #ifndef DBUG_OFF MYSQL_SYSVAR(force_recovery_crash), #endif /* !DBUG_OFF */ + MYSQL_SYSVAR(fill_factor), MYSQL_SYSVAR(ft_cache_size), MYSQL_SYSVAR(ft_total_cache_size), MYSQL_SYSVAR(ft_result_cache_limit), @@ -19813,24 +23394,21 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(force_load_corrupted), MYSQL_SYSVAR(locks_unsafe_for_binlog), MYSQL_SYSVAR(lock_wait_timeout), -#ifdef UNIV_LOG_ARCHIVE - MYSQL_SYSVAR(log_arch_dir), - MYSQL_SYSVAR(log_archive), -#endif /* UNIV_LOG_ARCHIVE */ MYSQL_SYSVAR(page_size), MYSQL_SYSVAR(log_buffer_size), MYSQL_SYSVAR(log_file_size), MYSQL_SYSVAR(log_files_in_group), + MYSQL_SYSVAR(log_write_ahead_size), MYSQL_SYSVAR(log_group_home_dir), MYSQL_SYSVAR(log_compressed_pages), MYSQL_SYSVAR(max_dirty_pages_pct), MYSQL_SYSVAR(max_dirty_pages_pct_lwm), MYSQL_SYSVAR(adaptive_flushing_lwm), MYSQL_SYSVAR(adaptive_flushing), + MYSQL_SYSVAR(flush_sync), MYSQL_SYSVAR(flushing_avg_loops), MYSQL_SYSVAR(max_purge_lag), MYSQL_SYSVAR(max_purge_lag_delay), - MYSQL_SYSVAR(mirrored_log_groups), MYSQL_SYSVAR(old_blocks_pct), MYSQL_SYSVAR(old_blocks_time), MYSQL_SYSVAR(open_files), @@ -19850,6 +23428,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(stats_modified_counter), MYSQL_SYSVAR(stats_traditional), MYSQL_SYSVAR(adaptive_hash_index), + MYSQL_SYSVAR(adaptive_hash_index_parts), MYSQL_SYSVAR(stats_method), MYSQL_SYSVAR(replication_delay), MYSQL_SYSVAR(status_file), @@ -19861,14 +23440,11 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(spin_wait_delay), MYSQL_SYSVAR(table_locks), MYSQL_SYSVAR(thread_concurrency), -#ifdef HAVE_ATOMIC_BUILTINS MYSQL_SYSVAR(adaptive_max_sleep_delay), -#endif /* HAVE_ATOMIC_BUILTINS */ MYSQL_SYSVAR(prefix_index_cluster_optimization), MYSQL_SYSVAR(thread_sleep_delay), MYSQL_SYSVAR(autoinc_lock_mode), MYSQL_SYSVAR(version), - MYSQL_SYSVAR(use_sys_malloc), MYSQL_SYSVAR(use_native_aio), #ifdef HAVE_LIBNUMA MYSQL_SYSVAR(numa_interleave), @@ -19887,6 +23463,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(read_only), MYSQL_SYSVAR(io_capacity), MYSQL_SYSVAR(io_capacity_max), + MYSQL_SYSVAR(page_cleaners), MYSQL_SYSVAR(idle_flush_pct), MYSQL_SYSVAR(monitor_enable), MYSQL_SYSVAR(monitor_disable), @@ -19899,6 +23476,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(purge_stop_now), MYSQL_SYSVAR(log_checkpoint_now), MYSQL_SYSVAR(buf_flush_list_now), + MYSQL_SYSVAR(merge_threshold_set_all_debug), #endif /* UNIV_DEBUG */ #if defined UNIV_DEBUG || defined UNIV_PERF_DEBUG MYSQL_SYSVAR(page_hash_locks), @@ -19909,12 +23487,16 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(print_all_deadlocks), MYSQL_SYSVAR(cmp_per_index_enabled), MYSQL_SYSVAR(undo_logs), + MYSQL_SYSVAR(max_undo_log_size), + MYSQL_SYSVAR(purge_rseg_truncate_frequency), + MYSQL_SYSVAR(undo_log_truncate), MYSQL_SYSVAR(rollback_segments), MYSQL_SYSVAR(undo_directory), MYSQL_SYSVAR(undo_tablespaces), MYSQL_SYSVAR(sync_array_size), MYSQL_SYSVAR(compression_failure_threshold_pct), MYSQL_SYSVAR(compression_pad_pct_max), + MYSQL_SYSVAR(default_row_format), MYSQL_SYSVAR(simulate_comp_failures), #ifdef UNIV_DEBUG MYSQL_SYSVAR(trx_rseg_n_slots_debug), @@ -19922,6 +23504,9 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(trx_purge_view_update_only_debug), MYSQL_SYSVAR(fil_make_page_dirty_debug), MYSQL_SYSVAR(saved_page_number_debug), + MYSQL_SYSVAR(compress_debug), + MYSQL_SYSVAR(disable_resize_buffer_pool_debug), + MYSQL_SYSVAR(sync_debug), #endif /* UNIV_DEBUG */ MYSQL_SYSVAR(tmpdir), MYSQL_SYSVAR(force_primary_key), @@ -19982,6 +23567,7 @@ i_s_innodb_cmp_per_index_reset, i_s_innodb_buffer_page, i_s_innodb_buffer_page_lru, i_s_innodb_buffer_stats, +i_s_innodb_temp_table_info, i_s_innodb_metrics, i_s_innodb_ft_default_stopword, i_s_innodb_ft_deleted, @@ -19998,6 +23584,7 @@ i_s_innodb_sys_foreign, i_s_innodb_sys_foreign_cols, i_s_innodb_sys_tablespaces, i_s_innodb_sys_datafiles, +i_s_innodb_sys_virtual, i_s_innodb_mutexes, i_s_innodb_sys_semaphore_waits, i_s_innodb_tablespaces_encryption, @@ -20037,191 +23624,84 @@ innobase_undo_logs_init_default_max() = static_cast(srv_available_undo_logs); } -#ifdef UNIV_COMPILE_TEST_FUNCS - -struct innobase_convert_name_test_t { - char* buf; - ulint buflen; - const char* id; - ulint idlen; - void* thd; - ibool file_id; - - const char* expected; -}; - -void -test_innobase_convert_name() -{ - char buf[1024]; - ulint i; - - innobase_convert_name_test_t test_input[] = { - {buf, sizeof(buf), "abcd", 4, NULL, TRUE, "\"abcd\""}, - {buf, 7, "abcd", 4, NULL, TRUE, "\"abcd\""}, - {buf, 6, "abcd", 4, NULL, TRUE, "\"abcd\""}, - {buf, 5, "abcd", 4, NULL, TRUE, "\"abc\""}, - {buf, 4, "abcd", 4, NULL, TRUE, "\"ab\""}, - - {buf, sizeof(buf), "ab@0060cd", 9, NULL, TRUE, "\"ab`cd\""}, - {buf, 9, "ab@0060cd", 9, NULL, TRUE, "\"ab`cd\""}, - {buf, 8, "ab@0060cd", 9, NULL, TRUE, "\"ab`cd\""}, - {buf, 7, "ab@0060cd", 9, NULL, TRUE, "\"ab`cd\""}, - {buf, 6, "ab@0060cd", 9, NULL, TRUE, "\"ab`c\""}, - {buf, 5, "ab@0060cd", 9, NULL, TRUE, "\"ab`\""}, - {buf, 4, "ab@0060cd", 9, NULL, TRUE, "\"ab\""}, - - {buf, sizeof(buf), "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#ab\"\"cd\""}, - {buf, 17, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#ab\"\"cd\""}, - {buf, 16, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#ab\"\"c\""}, - {buf, 15, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#ab\"\"\""}, - {buf, 14, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#ab\""}, - {buf, 13, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#ab\""}, - {buf, 12, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#a\""}, - {buf, 11, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50#\""}, - {buf, 10, "ab\"cd", 5, NULL, TRUE, - "\"#mysql50\""}, - - {buf, sizeof(buf), "ab/cd", 5, NULL, TRUE, "\"ab\".\"cd\""}, - {buf, 9, "ab/cd", 5, NULL, TRUE, "\"ab\".\"cd\""}, - {buf, 8, "ab/cd", 5, NULL, TRUE, "\"ab\".\"c\""}, - {buf, 7, "ab/cd", 5, NULL, TRUE, "\"ab\".\"\""}, - {buf, 6, "ab/cd", 5, NULL, TRUE, "\"ab\"."}, - {buf, 5, "ab/cd", 5, NULL, TRUE, "\"ab\"."}, - {buf, 4, "ab/cd", 5, NULL, TRUE, "\"ab\""}, - {buf, 3, "ab/cd", 5, NULL, TRUE, "\"a\""}, - {buf, 2, "ab/cd", 5, NULL, TRUE, "\"\""}, - /* XXX probably "" is a better result in this case - {buf, 1, "ab/cd", 5, NULL, TRUE, "."}, - */ - {buf, 0, "ab/cd", 5, NULL, TRUE, ""}, - }; - - for (i = 0; i < sizeof(test_input) / sizeof(test_input[0]); i++) { - - char* end; - ibool ok = TRUE; - size_t res_len; - - fprintf(stderr, "TESTING %lu, %s, %lu, %s\n", - test_input[i].buflen, - test_input[i].id, - test_input[i].idlen, - test_input[i].expected); - - end = innobase_convert_name( - test_input[i].buf, - test_input[i].buflen, - test_input[i].id, - test_input[i].idlen, - test_input[i].thd, - test_input[i].file_id); - - res_len = (size_t) (end - test_input[i].buf); - - if (res_len != strlen(test_input[i].expected)) { - - fprintf(stderr, "unexpected len of the result: %u, " - "expected: %u\n", (unsigned) res_len, - (unsigned) strlen(test_input[i].expected)); - ok = FALSE; - } - - if (memcmp(test_input[i].buf, - test_input[i].expected, - strlen(test_input[i].expected)) != 0 - || !ok) { - - fprintf(stderr, "unexpected result: %.*s, " - "expected: %s\n", (int) res_len, - test_input[i].buf, - test_input[i].expected); - ok = FALSE; - } - - if (ok) { - fprintf(stderr, "OK: res: %.*s\n\n", (int) res_len, - buf); - } else { - fprintf(stderr, "FAILED\n\n"); - return; - } - } -} - -#endif /* UNIV_COMPILE_TEST_FUNCS */ - /**************************************************************************** * DS-MRR implementation ***************************************************************************/ /** - * Multi Range Read interface, DS-MRR calls - */ - -int ha_innobase::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param, - uint n_ranges, uint mode, - HANDLER_BUFFER *buf) +Multi Range Read interface, DS-MRR calls */ +int +ha_innobase::multi_range_read_init( + RANGE_SEQ_IF* seq, + void* seq_init_param, + uint n_ranges, + uint mode, + HANDLER_BUFFER* buf) { - return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf); + return(m_ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf)); } -int ha_innobase::multi_range_read_next(range_id_t *range_info) +int +ha_innobase::multi_range_read_next( + range_id_t *range_info) { - return ds_mrr.dsmrr_next(range_info); + return(m_ds_mrr.dsmrr_next(range_info)); } -ha_rows ha_innobase::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq, - void *seq_init_param, - uint n_ranges, uint *bufsz, - uint *flags, - Cost_estimate *cost) +ha_rows +ha_innobase::multi_range_read_info_const( + uint keyno, + RANGE_SEQ_IF* seq, + void* seq_init_param, + uint n_ranges, + uint* bufsz, + uint* flags, + Cost_estimate* cost) { - /* See comments in ha_myisam::multi_range_read_info_const */ - ds_mrr.init(this, table); + /* See comments in ha_myisam::multi_range_read_info_const */ + m_ds_mrr.init(this, table); - if (prebuilt->select_lock_type != LOCK_NONE) - *flags |= HA_MRR_USE_DEFAULT_IMPL; + if (m_prebuilt->select_lock_type != LOCK_NONE) { + *flags |= HA_MRR_USE_DEFAULT_IMPL; + } - ha_rows res= ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges, - bufsz, flags, cost); - return res; + ha_rows res= m_ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges, + bufsz, flags, cost); + return res; } -ha_rows ha_innobase::multi_range_read_info(uint keyno, uint n_ranges, - uint keys, uint key_parts, - uint *bufsz, uint *flags, - Cost_estimate *cost) +ha_rows +ha_innobase::multi_range_read_info( + uint keyno, + uint n_ranges, + uint keys, + uint key_parts, + uint* bufsz, + uint* flags, + Cost_estimate* cost) { - ds_mrr.init(this, table); - ha_rows res= ds_mrr.dsmrr_info(keyno, n_ranges, keys, key_parts, bufsz, - flags, cost); - return res; + m_ds_mrr.init(this, table); + ha_rows res= m_ds_mrr.dsmrr_info(keyno, n_ranges, keys, key_parts, bufsz, + flags, cost); + return res; } -int ha_innobase::multi_range_read_explain_info(uint mrr_mode, char *str, - size_t size) +int +ha_innobase::multi_range_read_explain_info( + uint mrr_mode, + char *str, + size_t size) { - return ds_mrr.dsmrr_explain_info(mrr_mode, str, size); + return m_ds_mrr.dsmrr_explain_info(mrr_mode, str, size); } /** - * Index Condition Pushdown interface implementation - */ +Index Condition Pushdown interface implementation */ /*************************************************************//** InnoDB index push-down condition check @return ICP_NO_MATCH, ICP_MATCH, or ICP_OUT_OF_RANGE */ -UNIV_INTERN -enum icp_result +ICP_RESULT innobase_index_cond( /*================*/ void* file) /*!< in/out: pointer to ha_innobase */ @@ -20229,12 +23709,277 @@ innobase_index_cond( return handler_index_cond_check(file); } + +/** Get the computed value by supplying the base column values. +@param[in,out] table the table whose virtual column template to be built */ +void +innobase_init_vc_templ( + dict_table_t* table) +{ + THD* thd = current_thd; + char dbname[MAX_DATABASE_NAME_LEN + 1]; + char tbname[MAX_TABLE_NAME_LEN + 1]; + char* name = table->name.m_name; + ulint dbnamelen = dict_get_db_name_len(name); + ulint tbnamelen = strlen(name) - dbnamelen - 1; + char t_dbname[MAX_DATABASE_NAME_LEN + 1]; + char t_tbname[MAX_TABLE_NAME_LEN + 1]; + + /* Acquire innobase_share_mutex to see if table->vc_templ + is assigned with its counter part in the share structure */ + mysql_mutex_lock(&innobase_share_mutex); + + if (table->vc_templ) { + mysql_mutex_unlock(&innobase_share_mutex); + + return; + } + + strncpy(dbname, name, dbnamelen); + dbname[dbnamelen] = 0; + strncpy(tbname, name + dbnamelen + 1, tbnamelen); + tbname[tbnamelen] =0; + + /* For partition table, remove the partition name and use the + "main" table name to build the template */ +#ifdef _WIN32 + char* is_part = strstr(tbname, "#p#"); +#else + char* is_part = strstr(tbname, "#P#"); +#endif /* _WIN32 */ + + if (is_part != NULL) { + *is_part = '\0'; + tbnamelen = is_part - tbname; + } + + table->vc_templ = static_cast( + ut_zalloc_nokey(sizeof *(table->vc_templ))); + + dbnamelen = filename_to_tablename(dbname, t_dbname, + MAX_DATABASE_NAME_LEN + 1); + tbnamelen = filename_to_tablename(tbname, t_tbname, + MAX_TABLE_NAME_LEN + 1); + +#ifdef UNIV_DEBUG + // bool ret = +#endif /* UNIV_DEBUG */ + + /* JAN: TODO: MySQL: 5.7 virtual columsn + handler::my_prepare_gcolumn_template( + thd, t_dbname, t_tbname, + &innobase_build_v_templ_callback, + static_cast(table)); + ut_ad(!ret); + */ + table->vc_templ_purge = true; + mysql_mutex_unlock(&innobase_share_mutex); +} + +/** Get the computed value by supplying the base column values. +@param[in,out] row the data row +@param[in] col virtual column +@param[in] index index +@param[in,out] my_rec mysql record to store the data +@param[in,out] local_heap heap memory for processing large data etc. +@param[in,out] heap memory heap that copies the actual index row +@param[in] ifield index field +@param[in] in_purge whether this is called by purge +@return the field filled with computed value, or NULL if just want +to store the value in passed in "my_rec" */ +dfield_t* +innobase_get_computed_value( + const dtuple_t* row, + const dict_v_col_t* col, + const dict_index_t* index, + byte* my_rec, + mem_heap_t** local_heap, + mem_heap_t* heap, + const dict_field_t* ifield, + bool in_purge) +{ + byte rec_buf1[REC_VERSION_56_MAX_INDEX_COL_LEN]; + byte rec_buf2[REC_VERSION_56_MAX_INDEX_COL_LEN]; + byte* mysql_rec; + byte* buf; + dfield_t* field; + ulint len; + const page_size_t page_size = dict_table_page_size(index->table); + ulint ret = 0; + + ut_ad(index->table->vc_templ); + + const mysql_row_templ_t* + vctempl = index->table->vc_templ->vtempl[ + index->table->vc_templ->n_col + col->v_pos]; + if (!heap || index->table->vc_templ->rec_len + >= REC_VERSION_56_MAX_INDEX_COL_LEN) { + if (*local_heap == NULL) { + *local_heap = mem_heap_create(UNIV_PAGE_SIZE); + } + + if (!my_rec) { + mysql_rec = static_cast(mem_heap_alloc( + *local_heap, index->table->vc_templ->rec_len)); + } else { + mysql_rec = my_rec; + } + + buf = static_cast(mem_heap_alloc( + *local_heap, index->table->vc_templ->rec_len)); + } else { + if (!my_rec) { + mysql_rec = rec_buf1; + } else { + mysql_rec = my_rec; + } + + buf = rec_buf2; + } + + for (ulint i = 0; i < col->num_base; i++) { + dict_col_t* base_col = col->base_col[i]; + const dfield_t* row_field; + ulint col_no = base_col->ind; + const mysql_row_templ_t* templ + = index->table->vc_templ->vtempl[col_no]; + const byte* data; + + row_field = dtuple_get_nth_field(row, col_no); + + data = static_cast(row_field->data); + len = row_field->len; + + if (row_field->ext) { + if (*local_heap == NULL) { + *local_heap = mem_heap_create(UNIV_PAGE_SIZE); + } + + data = btr_copy_externally_stored_field( + &len, data, page_size, + dfield_get_len(row_field), *local_heap); + } + + if (len == UNIV_SQL_NULL) { + mysql_rec[templ->mysql_null_byte_offset] + |= (byte) templ->mysql_null_bit_mask; + memcpy(mysql_rec + templ->mysql_col_offset, + static_cast( + index->table->vc_templ->default_rec + + templ->mysql_col_offset), + templ->mysql_col_len); + } else { + + row_sel_field_store_in_mysql_format( + mysql_rec + templ->mysql_col_offset, + templ, index, templ->clust_rec_field_no, + (const byte*)data, len); + + if (templ->mysql_null_bit_mask) { + /* It is a nullable column with a + non-NULL value */ + mysql_rec[templ->mysql_null_byte_offset] + &= ~(byte) templ->mysql_null_bit_mask; + } + } + } + + field = dtuple_get_nth_v_field(row, col->v_pos); + + /* Bitmap for specifying which virtual columns the server + should evaluate */ + MY_BITMAP column_map; + my_bitmap_map col_map_storage[bitmap_buffer_size(REC_MAX_N_FIELDS)]; + bitmap_init(&column_map, col_map_storage, REC_MAX_N_FIELDS, false); + + /* Specify the column the server should evaluate */ + bitmap_set_bit(&column_map, col->m_col.ind); + + if (in_purge) { + if (vctempl->type == DATA_BLOB) { + ulint max_len = DICT_MAX_FIELD_LEN_BY_FORMAT( + index->table) + 1; + byte* blob_mem = static_cast( + mem_heap_alloc(heap, max_len)); + + row_mysql_store_blob_ref( + mysql_rec + vctempl->mysql_col_offset, + vctempl->mysql_col_len, blob_mem, max_len); + } + + /* JAN: TODO: MySQL 5.7 + ret = handler::my_eval_gcolumn_expr( + current_thd, false, index->table->vc_templ->db_name, + index->table->vc_templ->tb_name, &column_map, + (uchar *)mysql_rec); + */ + } else { + /* JAN: TODO: MySQL 5.7 + ret = handler::my_eval_gcolumn_expr( + current_thd, index->table->vc_templ->db_name, + index->table->vc_templ->tb_name, &column_map, + (uchar *)mysql_rec); + */ + } + + if (ret != 0) { + ib::warn() << "Compute virtual column values failed "; + fputs("InnoDB: Cannot compute value for following record ", + stderr); + dtuple_print(stderr, row); + return(NULL); + } + + /* we just want to store the data in passed in MySQL record */ + if (my_rec || ret != 0) { + return(NULL); + } + + if (vctempl->mysql_null_bit_mask + && (mysql_rec[vctempl->mysql_null_byte_offset] + & vctempl->mysql_null_bit_mask)) { + dfield_set_null(field); + field->type.prtype |= DATA_VIRTUAL; + return(field); + } + + row_mysql_store_col_in_innobase_format( + field, buf, + TRUE, mysql_rec + vctempl->mysql_col_offset, + vctempl->mysql_col_len, dict_table_is_comp(index->table)); + field->type.prtype |= DATA_VIRTUAL; + + ulint max_prefix = col->m_col.max_prefix; + + if (max_prefix && ifield + && (ifield->prefix_len == 0 + || ifield->prefix_len > col->m_col.max_prefix)) { + max_prefix = ifield->prefix_len; + } + + /* If this is a prefix index, we only need a portion of the field */ + if (max_prefix) { + len = dtype_get_at_most_n_mbchars( + col->m_col.prtype, + col->m_col.mbminmaxlen, + max_prefix, + field->len, + static_cast(dfield_get_data(field))); + dfield_set_len(field, len); + } + + if (heap) { + dfield_dup(field, heap); + } + + return(field); +} + /** Attempt to push down an index condition. -* @param[in] keyno MySQL key number -* @param[in] idx_cond Index condition to be checked -* @return Part of idx_cond which the handler will not evaluate -*/ -UNIV_INTERN +@param[in] keyno MySQL key number +@param[in] idx_cond Index condition to be checked +@return Part of idx_cond which the handler will not evaluate */ + class Item* ha_innobase::idx_cond_push( uint keyno, @@ -20258,10 +24003,9 @@ errmsg-utf8.txt directly as is. Push a warning message to the client, it is a wrapper around: void push_warning_printf( - THD *thd, Sql_condition::enum_warning_level level, + THD *thd, Sql_condition::enum_condition_level level, uint code, const char *format, ...); */ -UNIV_INTERN void ib_senderrf( /*========*/ @@ -20270,7 +24014,8 @@ ib_senderrf( ib_uint32_t code, /*!< MySQL error code */ ...) /*!< Args */ { - va_list args; + va_list args; + char* str = NULL; const char* format = innobase_get_err_msg(code); /* If the caller wants to push a message to the client then @@ -20283,9 +24028,9 @@ ib_senderrf( va_start(args, code); - myf l=0; + myf l = Sql_condition::WARN_LEVEL_NOTE; - switch(level) { + switch (level) { case IB_LOG_LEVEL_INFO: l = ME_JUST_INFO; break; @@ -20293,8 +24038,12 @@ ib_senderrf( l = ME_JUST_WARNING; break; case IB_LOG_LEVEL_ERROR: + sd_notifyf(0, "STATUS=InnoDB: Error: %s", str); + l = 0; + break; case IB_LOG_LEVEL_FATAL: l = 0; + sd_notifyf(0, "STATUS=InnoDB: Fatal: %s", str); break; default: l = 0; @@ -20304,6 +24053,7 @@ ib_senderrf( my_printv_error(code, format, MYF(l), args); va_end(args); + free(str); if (level == IB_LOG_LEVEL_FATAL) { ut_error; @@ -20318,10 +24068,9 @@ must be: "Some string ... %s". Push a warning message to the client, it is a wrapper around: void push_warning_printf( - THD *thd, Sql_condition::enum_warning_level level, + THD *thd, Sql_condition::enum_condition_level level, uint code, const char *format, ...); */ -UNIV_INTERN void ib_errf( /*====*/ @@ -20331,7 +24080,7 @@ ib_errf( const char* format, /*!< printf format */ ...) /*!< Args */ { - char* str; + char* str = NULL; va_list args; /* If the caller wants to push a message to the client then @@ -20342,9 +24091,15 @@ ib_errf( va_start(args, format); -#ifdef __WIN__ +#ifdef _WIN32 int size = _vscprintf(format, args) + 1; - str = static_cast(malloc(size)); + if (size > 0) { + str = static_cast(malloc(size)); + } + if (str == NULL) { + va_end(args); + return; /* Watch for Out-Of-Memory */ + } str[size - 1] = 0x0; vsnprintf(str, size, format, args); #elif HAVE_VASPRINTF @@ -20356,8 +24111,12 @@ ib_errf( #else /* Use a fixed length string. */ str = static_cast(malloc(BUFSIZ)); + if (str == NULL) { + va_end(args); + return; /* Watch for Out-Of-Memory */ + } my_vsnprintf(str, BUFSIZ, format, args); -#endif /* __WIN__ */ +#endif /* _WIN32 */ ib_senderrf(thd, level, code, str); @@ -20365,62 +24124,39 @@ ib_errf( free(str); } -/******************************************************************//** -Write a message to the MySQL log, prefixed with "InnoDB: " */ -UNIV_INTERN -void -ib_logf( -/*====*/ - ib_log_level_t level, /*!< in: warning level */ - const char* format, /*!< printf format */ - ...) /*!< Args */ -{ - char* str; - va_list args; +/* Keep the first 16 characters as-is, since the url is sometimes used +as an offset from this.*/ +const char* TROUBLESHOOTING_MSG = + "Please refer to " REFMAN "innodb-troubleshooting.html" + " for how to resolve the issue."; - va_start(args, format); +const char* TROUBLESHOOT_DATADICT_MSG = + "Please refer to " REFMAN "innodb-troubleshooting-datadict.html" + " for how to resolve the issue."; -#ifdef __WIN__ - int size = _vscprintf(format, args) + 1; - str = static_cast(malloc(size)); - str[size - 1] = 0x0; - vsnprintf(str, size, format, args); -#elif HAVE_VASPRINTF - if (vasprintf(&str, format, args) == -1) { - /* In case of failure use a fixed length string */ - str = static_cast(malloc(BUFSIZ)); - my_vsnprintf(str, BUFSIZ, format, args); - } -#else - /* Use a fixed length string. */ - str = static_cast(malloc(BUFSIZ)); - my_vsnprintf(str, BUFSIZ, format, args); -#endif /* __WIN__ */ +const char* BUG_REPORT_MSG = + "Submit a detailed bug report to http://bugs.mysql.com"; - switch(level) { - case IB_LOG_LEVEL_INFO: - sql_print_information("InnoDB: %s", str); - break; - case IB_LOG_LEVEL_WARN: - sql_print_warning("InnoDB: %s", str); - break; - case IB_LOG_LEVEL_ERROR: - sql_print_error("InnoDB: %s", str); - sd_notifyf(0, "STATUS=InnoDB: Error: %s", str); - break; - case IB_LOG_LEVEL_FATAL: - sql_print_error("InnoDB: %s", str); - sd_notifyf(0, "STATUS=InnoDB: Fatal: %s", str); - break; - } +const char* FORCE_RECOVERY_MSG = + "Please refer to " REFMAN "forcing-innodb-recovery.html" + " for information about forcing recovery."; - va_end(args); - free(str); +const char* ERROR_CREATING_MSG = + "Please refer to " REFMAN "error-creating-innodb.html"; - if (level == IB_LOG_LEVEL_FATAL) { - ut_error; - } -} +const char* OPERATING_SYSTEM_ERROR_MSG = + "Some operating system error numbers are described at" + " " REFMAN "operating-system-error-codes.html"; + +const char* FOREIGN_KEY_CONSTRAINTS_MSG = + "Please refer to " REFMAN "innodb-foreign-key-constraints.html" + " for correct foreign key definition."; + +const char* SET_TRANSACTION_MSG = + "Please refer to " REFMAN "set-transaction.html"; + +const char* INNODB_PARAMETERS_MSG = + "Please refer to " REFMAN "innodb-parameters.html"; /********************************************************************** Converts an identifier from my_charset_filename to UTF-8 charset. @@ -20436,8 +24172,9 @@ innobase_convert_to_filename_charset( CHARSET_INFO* cs_to = &my_charset_filename; CHARSET_INFO* cs_from = system_charset_info; - return(strconvert( cs_from, from, strlen(from), cs_to, to, - static_cast(len), &errors)); + return(static_cast(strconvert( + cs_from, from, strlen(from), + cs_to, to, static_cast(len), &errors))); } /********************************************************************** @@ -20454,13 +24191,13 @@ innobase_convert_to_system_charset( CHARSET_INFO* cs1 = &my_charset_filename; CHARSET_INFO* cs2 = system_charset_info; - return(strconvert(cs1, from, strlen(from), cs2, to, - static_cast(len), errors)); + return(static_cast(strconvert( + cs1, from, strlen(from), + cs2, to, static_cast(len), errors))); } /********************************************************************** Issue a warning that the row is too big. */ -UNIV_INTERN void ib_warn_row_too_big(const dict_table_t* table) { @@ -20474,10 +24211,6 @@ ib_warn_row_too_big(const dict_table_t* table) THD* thd = current_thd; - if (thd == NULL) { - return; - } - push_warning_printf( thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW, "Row size too large (> %lu). Changing some columns to TEXT" @@ -20648,15 +24381,17 @@ ib_push_warning( thd = current_thd; } - va_start(args, format); - buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME)); - vsprintf(buf,format, args); + if (thd) { + va_start(args, format); + buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME)); + vsprintf(buf,format, args); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - convert_error_code_to_mysql((dberr_t)error, 0, thd), - buf); - my_free(buf); - va_end(args); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + convert_error_code_to_mysql((dberr_t)error, 0, thd), + buf); + my_free(buf); + va_end(args); + } } /********************************************************************//** diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 478187e0b23..b2696751be5 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -17,43 +17,82 @@ this program; if not, write to the Free Software Foundation, Inc., *****************************************************************************/ -/* - This file is based on ha_berkeley.h of MySQL distribution +/* The InnoDB handler: the interface between MySQL and InnoDB. */ - This file defines the Innodb handler: the interface between MySQL and - Innodb -*/ +/** "GEN_CLUST_INDEX" is the name reserved for InnoDB default +system clustered index when there is no primary key. */ +extern const char innobase_index_reserve_name[]; + +/* "innodb_file_per_table" tablespace name is reserved by InnoDB in order +to explicitly create a file_per_table tablespace for the table. */ +extern const char reserved_file_per_table_space_name[]; -#include "dict0stats.h" +/* "innodb_system" tablespace name is reserved by InnoDB for the system tablespace +which uses space_id 0 and stores extra types of system pages like UNDO +and doublewrite. */ +extern const char reserved_system_space_name[]; -/* Structure defines translation table between mysql index and innodb +/* Structure defines translation table between mysql index and InnoDB index structures */ struct innodb_idx_translate_t { + ulint index_count; /*!< number of valid index entries in the index_mapping array */ + ulint array_size; /*!< array size of index_mapping */ + dict_index_t** index_mapping; /*!< index pointer array directly - maps to index in Innodb from MySQL + maps to index in InnoDB from MySQL array index */ }; +/** Structure defines template related to virtual columns and +their base columns */ +struct innodb_col_templ_t { + /** number of regular columns */ + ulint n_col; + + /** number of virtual columns */ + ulint n_v_col; + + /** array of templates for virtual col and their base columns */ + mysql_row_templ_t** vtempl; + + /** table's database name */ + char db_name[MAX_DATABASE_NAME_LEN]; + + /** table name */ + char tb_name[MAX_TABLE_NAME_LEN]; + + /** share->table_name */ + char share_name[MAX_DATABASE_NAME_LEN + + MAX_TABLE_NAME_LEN]; + + /** MySQL record length */ + ulint rec_len; + + /** default column value if any */ + const byte* default_rec; +}; + /** InnoDB table share */ typedef struct st_innobase_share { - THR_LOCK lock; /*!< MySQL lock protecting - this structure */ - const char* table_name; /*!< InnoDB table name */ - uint use_count; /*!< reference count, - incremented in get_share() - and decremented in - free_share() */ - void* table_name_hash;/*!< hash table chain node */ - innodb_idx_translate_t idx_trans_tbl; /*!< index translation - table between MySQL and - Innodb */ + const char* table_name; /*!< InnoDB table name */ + uint use_count; /*!< reference count, + incremented in get_share() + and decremented in + free_share() */ + void* table_name_hash; + /*!< hash table chain node */ + innodb_idx_translate_t + idx_trans_tbl; /*!< index translation table between + MySQL and InnoDB */ + innodb_col_templ_t + s_templ; /*!< table virtual column template + info */ } INNOBASE_SHARE; - /** Prebuilt structures in an InnoDB table handle used within MySQL */ struct row_prebuilt_t; @@ -74,32 +113,16 @@ struct ha_table_option_struct ulonglong encryption_key_id; /*!< encryption key id */ }; +/* JAN: TODO: MySQL 5.7 handler.h */ +struct st_handler_tablename +{ + const char *db; + const char *tablename; +}; /** The class defining a handle to an Innodb table */ class ha_innobase: public handler { - row_prebuilt_t* prebuilt; /*!< prebuilt struct in InnoDB, used - to save CPU time with prebuilt data - structures*/ - THD* user_thd; /*!< the thread handle of the user - currently using the handle; this is - set in external_lock function */ - THR_LOCK_DATA lock; - INNOBASE_SHARE* share; /*!< information for MySQL - table locking */ - - uchar* upd_buf; /*!< buffer used in updates */ - ulint upd_buf_size; /*!< the size of upd_buf in bytes */ - Table_flags int_table_flags; - uint primary_key; - ulong start_of_scan; /*!< this is set to 1 when we are - starting a table scan but have not - yet fetched any row, else 0 */ - uint last_match_mode;/* match mode of the latest search: - ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX, - or undefined */ - uint num_write_row; /*!< number of write_row() calls */ - ha_statistics* ha_partition_stats; /*!< stats of the partition owner handler (if there is one) */ uint store_key_val_for_row(uint keynr, char* buff, uint buff_len, @@ -120,14 +143,14 @@ class ha_innobase: public handler int wsrep_append_keys(THD *thd, bool shared, const uchar* record0, const uchar* record1); #endif + /* Init values for the class: */ - public: - ha_innobase(handlerton *hton, TABLE_SHARE *table_arg); +public: + ha_innobase(handlerton* hton, TABLE_SHARE* table_arg); ~ha_innobase(); - /* - Get the row type from the storage engine. If this method returns - ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used. - */ + + /** Get the row type from the storage engine. If this method returns + ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used. */ enum row_type get_row_type() const; const char* table_type() const; @@ -140,6 +163,20 @@ class ha_innobase: public handler uint max_supported_key_part_length() const; const key_map* keys_to_use_for_scanning(); + /** Opens dictionary table object using table name. For partition, we need to + try alternative lower/upper case names to support moving data files across + platforms. + @param[in] table_name name of the table/partition + @param[in] norm_name normalized name of the table/partition + @param[in] is_partition if this is a partition of a table + @param[in] ignore_err error to ignore for loading dictionary object + @return dictionary table object or NULL if not found */ + static dict_table_t* open_dict_table( + const char* table_name, + const char* norm_name, + bool is_partition, + dict_err_ignore_t ignore_err); + int open(const char *name, int mode, uint test_if_locked); handler* clone(const char *name, MEM_ROOT *mem_root); int close(void); @@ -147,6 +184,8 @@ class ha_innobase: public handler double read_time(uint index, uint ranges, ha_rows rows); longlong get_memory_buffer_size() const; + int delete_all_rows(); + int write_row(uchar * buf); int update_row(const uchar * old_data, uchar * new_data); int delete_row(const uchar * buf); @@ -177,6 +216,17 @@ class ha_innobase: public handler FT_INFO *ft_init_ext(uint flags, uint inx, String* key); int ft_read(uchar* buf); + FT_INFO *ft_init_ext_with_hints( + uint inx, + String* key, + void* hints); + /* JAN: TODO: MySQL 5.6 + Ft_hints* hints); + */ + + int enable_indexes(uint mode); + int disable_indexes(uint mode); + void position(const uchar *record); int info(uint); int analyze(THD* thd,HA_CHECK_OPT* check_opt); @@ -192,6 +242,9 @@ class ha_innobase: public handler *max_key); ha_rows estimate_rows_upper_bound(); + // JAN: TODO: MySQL 5.7 + // int records(ha_rows* num_rows); + void update_create_info(HA_CREATE_INFO* create_info); int parse_table_name(const char*name, HA_CREATE_INFO* create_info, @@ -215,6 +268,10 @@ class ha_innobase: public handler int get_foreign_key_list(THD *thd, List *f_key_list); int get_parent_foreign_key_list(THD *thd, List *f_key_list); + int get_cascade_foreign_key_table_list( + THD* thd, + List* fk_table_list); + bool can_switch_engines(); uint referenced_by_foreign_key(); void free_foreign_key_create_info(char* str); @@ -227,11 +284,14 @@ class ha_innobase: public handler ulonglong *nb_reserved_values); int reset_auto_increment(ulonglong value); + uint lock_count(void) const; + virtual bool get_error_message(int error, String *buf); virtual bool get_foreign_dup_key(char*, uint, char*, uint); uint8 table_cache_type(); - /* - ask handler about permission to cache table during query registration + + /** + Ask handler about permission to cache table during query registration */ my_bool register_query_cache_table(THD *thd, char *table_key, uint key_length, @@ -241,37 +301,36 @@ class ha_innobase: public handler static ulonglong get_mysql_bin_log_pos(); bool primary_key_is_clustered(); int cmp_ref(const uchar *ref1, const uchar *ref2); + /** On-line ALTER TABLE interface @see handler0alter.cc @{ */ /** Check if InnoDB supports a particular alter table in-place - @param altered_table TABLE object for new version of table. - @param ha_alter_info Structure describing changes to be done + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. - @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported - @retval HA_ALTER_INPLACE_NO_LOCK Supported + @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported + @retval HA_ALTER_INPLACE_NO_LOCK Supported @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE - Supported, but requires lock - during main phase and exclusive - lock during prepare phase. + Supported, but requires lock during main phase and + exclusive lock during prepare phase. @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE - Supported, prepare phase - requires exclusive lock. - */ + Supported, prepare phase requires exclusive lock. */ enum_alter_inplace_result check_if_supported_inplace_alter( TABLE* altered_table, Alter_inplace_info* ha_alter_info); + /** Allows InnoDB to update internal structures with concurrent writes blocked (provided that check_if_supported_inplace_alter() did not return HA_ALTER_INPLACE_NO_LOCK). This will be invoked before inplace_alter_table(). - @param altered_table TABLE object for new version of table. - @param ha_alter_info Structure describing changes to be done + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. - @retval true Failure - @retval false Success + @retval true Failure + @retval false Success */ bool prepare_inplace_alter_table( TABLE* altered_table, @@ -282,12 +341,12 @@ class ha_innobase: public handler The level of concurrency allowed during this operation depends on the return value from check_if_supported_inplace_alter(). - @param altered_table TABLE object for new version of table. - @param ha_alter_info Structure describing changes to be done + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. - @retval true Failure - @retval false Success + @retval true Failure + @retval false Success */ bool inplace_alter_table( TABLE* altered_table, @@ -300,12 +359,12 @@ class ha_innobase: public handler inplace_alter_table() and thus might be higher than during prepare_inplace_alter_table(). (E.g concurrent writes were blocked during prepare, but might not be during commit). - @param altered_table TABLE object for new version of table. - @param ha_alter_info Structure describing changes to be done + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. - @param commit true => Commit, false => Rollback. - @retval true Failure - @retval false Success + @param commit true => Commit, false => Rollback. + @retval true Failure + @retval false Success */ bool commit_inplace_alter_table( TABLE* altered_table, @@ -313,8 +372,11 @@ class ha_innobase: public handler bool commit); /** @} */ void set_partition_owner_stats(ha_statistics *stats); + bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); + bool check_if_supported_virtual_columns(void) { return TRUE; } + private: /** Builds a 'template' to the prebuilt struct. @@ -329,6 +391,9 @@ private: int info_low(uint, bool); + /** Write Row Interface optimized for Intrinsic table. */ + int intrinsic_table_write_row(uchar* record); + public: /** @name Multi Range Read interface @{ */ /** Initialize multi range read @see DsMrr_impl::dsmrr_init @@ -345,7 +410,7 @@ public: /** Process next multi range read @see DsMrr_impl::dsmrr_next * @param range_info */ - int multi_range_read_next(range_id_t *range_info); + int multi_range_read_next(range_id_t *range_info); /** Initialize multi range read and get information. * @see ha_myisam::multi_range_read_info_const * @see DsMrr_impl::dsmrr_info_const @@ -375,8 +440,9 @@ public: uint key_parts, uint* bufsz, uint* flags, Cost_estimate* cost); - int multi_range_read_explain_info(uint mrr_mode, char *str, - size_t size); + int multi_range_read_explain_info(uint mrr_mode, + char *str, size_t size); + /** Attempt to push down an index condition. * @param[in] keyno MySQL key number * @param[in] idx_cond Index condition to be checked @@ -384,12 +450,62 @@ public: */ class Item* idx_cond_push(uint keyno, class Item* idx_cond); -private: + /* An helper function for index_cond_func_innodb: */ + bool is_thd_killed(); + +protected: + + /** + MySQL calls this method at the end of each statement. This method + exists for readability only, called from reset(). The name reset() + doesn't give any clue that it is called at the end of a statement. */ + int end_stmt(); + /** The multi range read session object */ - DsMrr_impl ds_mrr; - /* @} */ + DsMrr_impl m_ds_mrr; + + /** Save CPU time with prebuilt/cached data structures */ + row_prebuilt_t* m_prebuilt; + + /** prebuilt pointer for the right prebuilt. For native + partitioning, points to the current partition prebuilt. */ + row_prebuilt_t** m_prebuilt_ptr; + + /** Thread handle of the user currently using the handler; + this is set in external_lock function */ + THD* m_user_thd; + + /** information for MySQL table locking */ + INNOBASE_SHARE* m_share; + + /** buffer used in updates */ + uchar* m_upd_buf; + + /** the size of upd_buf in bytes */ + ulint m_upd_buf_size; + + /** Flags that specificy the handler instance (table) capability. */ + Table_flags m_int_table_flags; + + /** Index into the server's primkary keye meta-data table->key_info{} */ + uint m_primary_key; + + /** this is set to 1 when we are starting a table scan but have + not yet fetched any row, else false */ + bool m_start_of_scan; + + /*!< match mode of the latest search: ROW_SEL_EXACT, + ROW_SEL_EXACT_PREFIX, or undefined */ + uint m_last_match_mode; + + /** number of write_row() calls */ + uint m_num_write_row; + + /** If mysql has locked with external_lock() */ + bool m_mysql_has_locked; }; + /* Some accessor functions which the InnoDB plugin needs, but which can not be added to mysql/plugin.h as part of the public interface; the definitions are bracketed with #ifdef INNODB_COMPATIBILITY_HOOKS */ @@ -398,11 +514,10 @@ the definitions are bracketed with #ifdef INNODB_COMPATIBILITY_HOOKS */ #error InnoDB needs MySQL to be built with #define INNODB_COMPATIBILITY_HOOKS #endif -LEX_STRING* thd_query_string(MYSQL_THD thd); - extern "C" { struct charset_info_st *thd_charset(MYSQL_THD thd); +LEX_STRING* thd_query_string(MYSQL_THD thd); /** Check if a user thread is a replication slave thread @@ -462,7 +577,7 @@ enum durability_properties thd_get_durability_property(const MYSQL_THD thd); @return True if sql_mode has strict mode (all or trans), false otherwise. */ bool thd_is_strict_mode(const MYSQL_THD thd) -MY_ATTRIBUTE((nonnull)); +__attribute__((nonnull)); } /* extern "C" */ /** Get the file name and position of the MySQL binlog corresponding to the @@ -470,13 +585,39 @@ MY_ATTRIBUTE((nonnull)); */ extern void mysql_bin_log_commit_pos(THD *thd, ulonglong *out_pos, const char **out_file); +/** Get the partition_info working copy. +@param thd Thread object. +@return NULL or pointer to partition_info working copy. */ +/* JAN: TODO: MySQL 5.7 Partitioning +partition_info* +thd_get_work_part_info( + THD* thd); +*/ + +struct trx_t; #ifdef WITH_WSREP #include +//extern "C" int wsrep_trx_order_before(void *thd1, void *thd2); + +extern "C" bool wsrep_thd_is_wsrep_on(THD *thd); + + +extern "C" void wsrep_thd_set_exec_mode(THD *thd, enum wsrep_exec_mode mode); +extern "C" void wsrep_thd_set_query_state( + THD *thd, enum wsrep_query_state state); + +extern "C" void wsrep_thd_set_trx_to_replay(THD *thd, uint64 trx_id); + +extern "C" uint32 wsrep_thd_wsrep_rand(THD *thd); +extern "C" time_t wsrep_thd_query_start(THD *thd); +extern "C" query_id_t wsrep_thd_query_id(THD *thd); +extern "C" query_id_t wsrep_thd_wsrep_last_query_id(THD *thd); +extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id); #endif extern const struct _ft_vft ft_vft_result; -/* Structure Returned by ha_innobase::ft_init_ext() */ +/** Structure Returned by ha_innobase::ft_init_ext() */ typedef struct new_ft_info { struct _ft_vft *please; @@ -485,34 +626,46 @@ typedef struct new_ft_info fts_result_t* ft_result; } NEW_FT_INFO; -/*********************************************************************//** +/** Allocates an InnoDB transaction for a MySQL handler object. -@return InnoDB transaction handle */ +@return InnoDB transaction handle */ trx_t* innobase_trx_allocate( -/*==================*/ MYSQL_THD thd); /*!< in: user thread handle */ +/** Match index columns between MySQL and InnoDB. +This function checks whether the index column information +is consistent between KEY info from mysql and that from innodb index. +@param[in] key_info Index info from mysql +@param[in] index_info Index info from InnoDB +@return true if all column types match. */ +bool +innobase_match_index_columns( + const KEY* key_info, + const dict_index_t* index_info); + /*********************************************************************//** This function checks each index name for a table against reserved system default primary index name 'GEN_CLUST_INDEX'. If a name matches, this function pushes an warning message to the client, and returns true. @return true if the index name matches the reserved name */ -UNIV_INTERN bool innobase_index_name_is_reserved( -/*============================*/ - THD* thd, /*!< in/out: MySQL connection */ - const KEY* key_info, /*!< in: Indexes to be created */ - ulint num_of_keys) /*!< in: Number of indexes to - be created. */ + THD* thd, /*!< in/out: MySQL connection */ + const KEY* key_info, /*!< in: Indexes to be + created */ + ulint num_of_keys) /*!< in: Number of indexes to + be created. */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/*****************************************************************//** +extern const char reserved_file_per_table_space_name[]; #ifdef WITH_WSREP -extern "C" int wsrep_trx_is_aborting(void *thd_ptr); +//extern "C" int wsrep_trx_is_aborting(void *thd_ptr); #endif + +/** Check if the explicit tablespace targeted is file_per_table. +@param[in] create_info Metadata for the table to create. Determines InnoDB table flags. @retval true if successful, false if error */ UNIV_INTERN @@ -546,39 +699,247 @@ create_options_are_invalid( bool use_tablespace) /*!< in: srv_file_per_table */ MY_ATTRIBUTE((nonnull, warn_unused_result)); +/** Check if the explicit tablespace targeted is file_per_table. +@param[in] create_info Metadata for the table to create. +@return true if the table is intended to use a file_per_table tablespace. */ +UNIV_INLINE +bool +tablespace_is_file_per_table( + const HA_CREATE_INFO* create_info) +{ + return(create_info->tablespace != NULL + && (0 == strcmp(create_info->tablespace, + reserved_file_per_table_space_name))); +} + +/** Check if table will be put in an existing shared general tablespace. +@param[in] create_info Metadata for the table to create. +@return true if the table will use an existing shared general tablespace. */ +UNIV_INLINE +bool +tablespace_is_shared_space( + const HA_CREATE_INFO* create_info) +{ + return(create_info->tablespace != NULL + && create_info->tablespace[0] != '\0' + && (0 != strcmp(create_info->tablespace, + reserved_file_per_table_space_name))); +} + +/** Parse hint for table and its indexes, and update the information +in dictionary. +@param[in] thd Connection thread +@param[in,out] table Target table +@param[in] table_share Table definition */ +void +innobase_parse_hint_from_comment( + THD* thd, + dict_table_t* table, + const TABLE_SHARE* table_share); + +/** Class for handling create table information. */ +class create_table_info_t +{ +public: + /** Constructor. + Used in two ways: + - all but file_per_table is used, when creating the table. + - all but name/path is used, when validating options and using flags. */ + create_table_info_t( + THD* thd, + TABLE* form, + HA_CREATE_INFO* create_info, + char* table_name, + char* temp_path, + char* remote_path, + char* tablespace) + :m_thd(thd), + m_form(form), + m_create_info(create_info), + m_table_name(table_name), + m_temp_path(temp_path), + m_remote_path(remote_path), + m_tablespace(tablespace), + m_innodb_file_per_table(srv_file_per_table) + {} + + /** Initialize the object. */ + int initialize(); + + /** Set m_tablespace_type. */ + void set_tablespace_type(bool table_being_altered_is_file_per_table); + + /** Create the internal innodb table. */ + int create_table(); + + /** Update the internal data dictionary. */ + int create_table_update_dict(); + + /** Validates the create options. Checks that the options + KEY_BLOCK_SIZE, ROW_FORMAT, DATA DIRECTORY, TEMPORARY & TABLESPACE + are compatible with each other and other settings. + These CREATE OPTIONS are not validated here unless innodb_strict_mode + is on. With strict mode, this function will report each problem it + finds using a custom message with error code + ER_ILLEGAL_HA_CREATE_OPTION, not its built-in message. + @return NULL if valid, string name of bad option if not. */ + const char* create_options_are_invalid(); + + /** Validates engine specific table options not handled by + SQL-parser. + @return NULL if valid, string name of bad option if not. */ + const char* check_table_options(); + + /** Validate DATA DIRECTORY option. */ + bool create_option_data_directory_is_valid(); + + /** Validate TABLESPACE option. */ + bool create_option_tablespace_is_valid(); + + /** Prepare to create a table. */ + int prepare_create_table(const char* name); + + void allocate_trx(); + + /** Determines InnoDB table flags. + If strict_mode=OFF, this will adjust the flags to what should be assumed. + @retval true if successful, false if error */ + bool innobase_table_flags(); + + /** Set flags and append '/' to remote path if necessary. */ + void set_remote_path_flags(); + + /** Get table flags. */ + ulint flags() const + { return(m_flags); } + + /** Get table flags2. */ + ulint flags2() const + { return(m_flags2); } + + /** Get trx. */ + trx_t* trx() const + { return(m_trx); } + + /** Return table name. */ + const char* table_name() const + { return(m_table_name); } + + THD* thd() const + { return(m_thd); } + + inline bool is_intrinsic_temp_table() const + { + /* DICT_TF2_INTRINSIC implies DICT_TF2_TEMPORARY */ + ut_ad(!(m_flags2 & DICT_TF2_INTRINSIC) + || (m_flags2 & DICT_TF2_TEMPORARY)); + return((m_flags2 & DICT_TF2_INTRINSIC) != 0); + } + + /** Normalizes a table name string. + A normalized name consists of the database name catenated to '/' and + table name. An example: test/mytable. On Windows normalization puts + both the database name and the table name always to lower case if + "set_lower_case" is set to true. + @param[in,out] norm_name Buffer to return the normalized name in. + @param[in] name Table name string. + @param[in] set_lower_case True if we want to set name to lower + case. */ + static void normalize_table_name_low( + char* norm_name, + const char* name, + ibool set_lower_case); + +private: + /** Parses the table name into normal name and either temp path or + remote path if needed.*/ + int + parse_table_name( + const char* name); + + /** Create the internal innodb table definition. */ + int create_table_def(); + + /** Connection thread handle. */ + THD* m_thd; + + /** InnoDB transaction handle. */ + trx_t* m_trx; + + /** Information on table columns and indexes. */ + const TABLE* m_form; + + /** Create options. */ + HA_CREATE_INFO* m_create_info; + + /** Table name */ + char* m_table_name; + /** If this is a table explicitly created by the user with the + TEMPORARY keyword, then this parameter is the dir path where the + table should be placed if we create an .ibd file for it + (no .ibd extension in the path, though). + Otherwise this is a zero length-string */ + char* m_temp_path; + + /** Remote path (DATA DIRECTORY) or zero length-string */ + char* m_remote_path; + + /** Tablespace name or zero length-string. */ + char* m_tablespace; + + /** Local copy of srv_file_per_table. */ + bool m_innodb_file_per_table; + + /** Allow file_per_table for this table either because: + 1) the setting innodb_file_per_table=on, + 2) it was explicitly requested by tablespace=innodb_file_per_table. + 3) the table being altered is currently file_per_table */ + bool m_allow_file_per_table; + + /** After all considerations, this shows whether we will actually + create a table and tablespace using file-per-table. */ + bool m_use_file_per_table; + + /** Using DATA DIRECTORY */ + bool m_use_data_dir; + + /** Using a Shared General Tablespace */ + bool m_use_shared_space; + + /** Table flags */ + ulint m_flags; + + /** Table flags2 */ + ulint m_flags2; +}; /*********************************************************************//** Retrieve the FTS Relevance Ranking result for doc with doc_id of prebuilt->fts_doc_id @return the relevance ranking value */ -UNIV_INTERN float innobase_fts_retrieve_ranking( -/*==========================*/ FT_INFO* fts_hdl); /*!< in: FTS handler */ -/*********************************************************************//** +/** Find and Retrieve the FTS Relevance Ranking result for doc with doc_id of prebuilt->fts_doc_id @return the relevance ranking value */ -UNIV_INTERN float innobase_fts_find_ranking( -/*======================*/ FT_INFO* fts_hdl, /*!< in: FTS handler */ uchar* record, /*!< in: Unused */ uint len); /*!< in: Unused */ -/*********************************************************************//** + +/** Free the memory for the FTS handler */ -UNIV_INTERN void innobase_fts_close_ranking( /*=======================*/ FT_INFO* fts_hdl) /*!< in: FTS handler */ MY_ATTRIBUTE((nonnull)); -/*****************************************************************//** +/** Initialize the table FTS stopword list @return TRUE if success */ -UNIV_INTERN ibool innobase_fts_load_stopword( /*=======================*/ @@ -594,12 +955,11 @@ enum fts_doc_id_index_enum { FTS_NOT_EXIST_DOC_ID_INDEX }; -/*******************************************************************//** +/** Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME on the Doc ID column. @return the status of the FTS_DOC_ID index */ -UNIV_INTERN -enum fts_doc_id_index_enum +fts_doc_id_index_enum innobase_fts_check_doc_id_index( /*============================*/ const dict_table_t* table, /*!< in: table definition */ @@ -609,13 +969,12 @@ innobase_fts_check_doc_id_index( Doc ID */ MY_ATTRIBUTE((warn_unused_result)); -/*******************************************************************//** +/** Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME on the Doc ID column in MySQL create index definition. @return FTS_EXIST_DOC_ID_INDEX if there exists the FTS_DOC_ID index, FTS_INCORRECT_DOC_ID_INDEX if the FTS_DOC_ID index is of wrong format */ -UNIV_INTERN -enum fts_doc_id_index_enum +fts_doc_id_index_enum innobase_fts_check_doc_id_index_in_def( /*===================================*/ ulint n_key, /*!< in: Number of keys */ @@ -637,7 +996,7 @@ Find and Retrieve the FTS doc_id for the current result row @return the document ID */ ulonglong innobase_fts_retrieve_docid( -/*============================*/ +/*========================*/ FT_INFO_EXT* fts_hdl); /*!< in: FTS handler */ /*********************************************************************** @@ -645,7 +1004,7 @@ Find and retrieve the size of the current result @return number of matching rows */ ulonglong innobase_fts_count_matches( -/*============================*/ +/*=======================*/ FT_INFO_EXT* fts_hdl); /*!< in: FTS handler */ /** "GEN_CLUST_INDEX" is the name reserved for InnoDB default @@ -688,3 +1047,146 @@ ib_push_frm_error( TABLE* table, /*!< in: MySQL table */ ulint n_keys, /*!< in: InnoDB #keys */ bool push_warning); /*!< in: print warning ? */ + +// JAN: TODO: MySQL 5.7 virtual fields +// #define innobase_is_v_fld(field) ((field)->gcol_info && !(field)->stored_in_db) +#define innobase_is_v_fld(field) (false) + +/** Release temporary latches. +Call this function when mysqld passes control to the client. That is to +avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more +documentation, see handler.cc. +@param[in] hton Handlerton. +@param[in] thd MySQL thread. +@return 0 */ +int +innobase_release_temporary_latches( + handlerton* hton, + THD* thd); + +/** Always normalize table name to lower case on Windows */ +#ifdef _WIN32 +#define normalize_table_name(norm_name, name) \ + create_table_info_t::normalize_table_name_low(norm_name, name, TRUE) +#else +#define normalize_table_name(norm_name, name) \ + create_table_info_t::normalize_table_name_low(norm_name, name, FALSE) +#endif /* _WIN32 */ + +/** Obtain the InnoDB transaction of a MySQL thread. +@param[in,out] thd MySQL thread handler. +@return reference to transaction pointer */ +trx_t*& thd_to_trx(THD* thd); + +/** Converts an InnoDB error code to a MySQL error code. +Also tells to MySQL about a possible transaction rollback inside InnoDB caused +by a lock wait timeout or a deadlock. +@param[in] error InnoDB error code. +@param[in] flags InnoDB table flags or 0. +@param[in] thd MySQL thread or NULL. +@return MySQL error code */ +int +convert_error_code_to_mysql( + dberr_t error, + ulint flags, + THD* thd); + +/** Converts a search mode flag understood by MySQL to a flag understood +by InnoDB. +@param[in] find_flag MySQL search mode flag. +@return InnoDB search mode flag. */ +page_cur_mode_t +convert_search_mode_to_innobase( + enum ha_rkey_function find_flag); + +/** Commits a transaction in an InnoDB database. +@param[in] trx Transaction handle. */ +void +innobase_commit_low( + trx_t* trx); + +extern my_bool innobase_stats_on_metadata; + +/** Calculate Record Per Key value. +Need to exclude the NULL value if innodb_stats_method is set to "nulls_ignored" +@param[in] index InnoDB index. +@param[in] i The column we are calculating rec per key. +@param[in] records Estimated total records. +@return estimated record per key value */ +/* JAN: TODO: MySQL 5.7 */ +typedef float rec_per_key_t; +rec_per_key_t +innodb_rec_per_key( + dict_index_t* index, + ulint i, + ha_rows records); + +/** Build template for the virtual columns and their base columns +@param[in] table MySQL TABLE +@param[in] ib_table InnoDB dict_table_t +@param[in,out] s_templ InnoDB template structure +@param[in] add_v new virtual columns added along with + add index call +@param[in] locked true if innobase_share_mutex is held +@param[in] share_tbl_name original MySQL table name */ +void +innobase_build_v_templ( + const TABLE* table, + const dict_table_t* ib_table, + innodb_col_templ_t* s_templ, + const dict_add_v_col_t* add_v, + bool locked, + const char* share_tbl_name); + +/** Free a virtual template in INNOBASE_SHARE structure +@param[in,out] share table share holds the template to free */ +void +free_share_vtemp( + INNOBASE_SHARE* share); + +/** Refresh template for the virtual columns and their base columns if +the share structure exists +@param[in] table MySQL TABLE +@param[in] ib_table InnoDB dict_table_t +@param[in] table_name table_name used to find the share structure */ +void +refresh_share_vtempl( + const TABLE* mysql_table, + const dict_table_t* ib_table, + const char* table_name); + +/** callback used by MySQL server layer to initialized +the table virtual columns' template +@param[in] table MySQL TABLE +@param[in,out] ib_table InnoDB dict_table_t */ +void +innobase_build_v_templ_callback( + const TABLE* table, + void* ib_table); + +/** Callback function definition, used by MySQL server layer to initialized +the table virtual columns' template */ +typedef void (*my_gcolumn_templatecallback_t)(const TABLE*, void*); + +/** Get the computed value by supplying the base column values. +@param[in,out] table the table whose virtual column template to be built */ +void +innobase_init_vc_templ( + dict_table_t* table); + +/** Free the virtual column template +@param[in,out] vc_templ virtual column template */ +void +free_vc_templ( + innodb_col_templ_t* vc_templ); + +/** Set up base columns for virtual column +@param[in] table InnoDB table +@param[in] field MySQL field +@param[in,out] v_col virtual column */ +void +innodb_base_col_setup( + dict_table_t* table, + const Field* field, + dict_v_col_t* v_col); + diff --git a/storage/innobase/handler/ha_innopart.cc b/storage/innobase/handler/ha_innopart.cc new file mode 100644 index 00000000000..855090af95d --- /dev/null +++ b/storage/innobase/handler/ha_innopart.cc @@ -0,0 +1,4438 @@ +/***************************************************************************** + +Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/** @file ha_innopart.cc +Code for native partitioning in InnoDB. + +Created Nov 22, 2013 Mattias Jonsson */ + +#include "univ.i" + +/* Include necessary SQL headers */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Include necessary InnoDB headers */ +#include "btr0sea.h" +#include "dict0dict.h" +#include "dict0stats.h" +#include "lock0lock.h" +#include "row0import.h" +#include "row0merge.h" +#include "row0mysql.h" +#include "row0quiesce.h" +#include "row0sel.h" +#include "row0ins.h" +#include "row0upd.h" +#include "fsp0sysspace.h" +#include "ut0ut.h" + +#include "ha_innodb.h" +#include "ha_innopart.h" +#include "partition_info.h" +#include "key.h" + +#define INSIDE_HA_INNOPART_CC + +/* To be backwards compatible we also fold partition separator on windows. */ +#ifdef _WIN32 +const char* part_sep = "#p#"; +const char* sub_sep = "#sp#"; +#else +const char* part_sep = "#P#"; +const char* sub_sep = "#SP#"; +#endif /* _WIN32 */ + +/* Partition separator for *nix platforms */ +const char* part_sep_nix = "#P#"; +const char* sub_sep_nix = "#SP#"; + +extern char* innobase_file_format_max; + +Ha_innopart_share::Ha_innopart_share( + TABLE_SHARE* table_share) + : + Partition_share(), + m_table_parts(), + m_index_mapping(), + m_tot_parts(), + m_index_count(), + m_ref_count(), + m_table_share(table_share), + m_s_templ() +{} + +Ha_innopart_share::~Ha_innopart_share() +{ + ut_ad(m_ref_count == 0); + if (m_table_parts != NULL) { + ut_free(m_table_parts); + m_table_parts = NULL; + } + if (m_index_mapping != NULL) { + ut_free(m_index_mapping); + m_index_mapping = NULL; + } + if (m_s_templ != NULL) { + free_vc_templ(m_s_templ); + ut_free(m_s_templ); + m_s_templ = NULL; + } +} + +/** Fold to lower case if windows or lower_case_table_names == 1. +@param[in,out] s String to fold.*/ +void +Ha_innopart_share::partition_name_casedn_str( + char* s) +{ +#ifdef _WIN32 + innobase_casedn_str(s); +#endif +} + +/** Translate and append partition name. +@param[out] to String to write in filesystem charset +@param[in] from Name in system charset +@param[in] sep Separator +@param[in] len Max length of to buffer +@return length of written string. */ +size_t +Ha_innopart_share::append_sep_and_name( + char* to, + const char* from, + const char* sep, + size_t len) +{ + size_t ret; + size_t sep_len = strlen(sep); + + ut_ad(len > sep_len + strlen(from)); + ut_ad(to != NULL); + ut_ad(from != NULL); + ut_ad(from[0] != '\0'); + memcpy(to, sep, sep_len); + + ret = tablename_to_filename(from, to + sep_len, + len - sep_len); + + /* Don't convert to lower case for nix style name. */ + if (strcmp(sep, part_sep_nix) != 0 + && strcmp(sep, sub_sep_nix) != 0) { + + partition_name_casedn_str(to); + } + + return(ret + sep_len); +} + +/** Copy a cached MySQL row. +If requested, also avoids overwriting non-read columns. +@param[out] buf Row in MySQL format. +@param[in] cached_row Which row to copy. */ +inline +void +ha_innopart::copy_cached_row( + uchar* buf, + const uchar* cached_row) +{ + if (m_prebuilt->keep_other_fields_on_keyread) { + row_sel_copy_cached_fields_for_mysql(buf, cached_row, + m_prebuilt); + } else { + memcpy(buf, cached_row, m_rec_length); + } +} + +/** Open one partition. +@param[in] part_id Partition id to open. +@param[in] partition_name Name of internal innodb table to open. +@return false on success else true. */ +bool +Ha_innopart_share::open_one_table_part( + uint part_id, + const char* partition_name) +{ + char norm_name[FN_REFLEN]; + + normalize_table_name(norm_name, partition_name); + m_table_parts[part_id] = + ha_innobase::open_dict_table(partition_name, norm_name, + TRUE, DICT_ERR_IGNORE_NONE); + + if (m_table_parts[part_id] == NULL) { + return(true); + } + + dict_table_t *ib_table = m_table_parts[part_id]; + if ((!DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID) + && m_table_share->fields + != (dict_table_get_n_user_cols(ib_table) + + dict_table_get_n_v_cols(ib_table))) + || (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID) + && (m_table_share->fields + != dict_table_get_n_user_cols(ib_table) + + dict_table_get_n_v_cols(ib_table) - 1))) { + ib::warn() << "Partition `" << get_partition_name(part_id) + << "` contains " << dict_table_get_n_user_cols(ib_table) + << " user defined columns in InnoDB, but " + << m_table_share->fields + << " columns in MySQL. Please check" + " INFORMATION_SCHEMA.INNODB_SYS_COLUMNS and " REFMAN + "innodb-troubleshooting.html for how to resolve the" + " issue."; + + /* Mark this partition as corrupted, so the drop table + or force recovery can still use it, but not others. + TODO: persist table->corrupted so it will be retained on + restart and out-of-bounds operations will see it. */ + + ib_table->corrupted = true; + dict_table_close(ib_table, FALSE, FALSE); + } + + /* TODO: To save memory, compare with first partition and reuse + the column names etc. in the internal InnoDB meta-data cache. */ + + return(false); +} + +/** Set up the virtual column template for partition table, and points +all m_table_parts[]->vc_templ to it. +@param[in] table MySQL TABLE object +@param[in] ib_table InnoDB dict_table_t +@param[in] table_name Table name (db/table_name) */ +void +Ha_innopart_share::set_v_templ( + TABLE* table, + dict_table_t* ib_table, + const char* name) +{ +#ifndef DBUG_OFF + if (m_table_share->tmp_table == NO_TMP_TABLE) { + mysql_mutex_assert_owner(&m_table_share->LOCK_ha_data); + } +#endif /* DBUG_OFF */ + + if (ib_table->n_v_cols > 0) { + if (!m_s_templ) { + m_s_templ = static_cast( + ut_zalloc_nokey( sizeof *m_s_templ)); + innobase_build_v_templ(table, ib_table, + m_s_templ, NULL, false, name); + + for (ulint i = 0; i < m_tot_parts; i++) { + m_table_parts[i]->vc_templ = m_s_templ; + } + } + } else { + ut_ad(!m_s_templ); + } +} + +/** Initialize the share with table and indexes per partition. +@param[in] part_info Partition info (partition names to use). +@param[in] table_name Table name (db/table_name). +@return false on success else true. */ +bool +Ha_innopart_share::open_table_parts( + partition_info* part_info, + const char* table_name) +{ + size_t table_name_len; + size_t len; + uint ib_num_index; + uint mysql_num_index; + char partition_name[FN_REFLEN]; + bool index_loaded = true; + +#ifndef DBUG_OFF + if (m_table_share->tmp_table == NO_TMP_TABLE) { + mysql_mutex_assert_owner(&m_table_share->LOCK_ha_data); + } +#endif /* DBUG_OFF */ + m_ref_count++; + if (m_table_parts != NULL) { + ut_ad(m_ref_count > 1); + ut_ad(m_tot_parts > 0); + + /* Increment dict_table_t reference count for all partitions */ + mutex_enter(&dict_sys->mutex); + for (uint i = 0; i < m_tot_parts; i++) { + dict_table_t* table = m_table_parts[i]; + table->acquire(); + ut_ad(table->get_ref_count() >= m_ref_count); + } + mutex_exit(&dict_sys->mutex); + + return(false); + } + ut_ad(m_ref_count == 1); + m_tot_parts = part_info->get_tot_partitions(); + size_t table_parts_size = sizeof(dict_table_t*) * m_tot_parts; + m_table_parts = static_cast( + ut_zalloc(table_parts_size, mem_key_partitioning)); + if (m_table_parts == NULL) { + m_ref_count--; + return(true); + } + + /* Set up the array over all table partitions. */ + table_name_len = strlen(table_name); + memcpy(partition_name, table_name, table_name_len); + List_iterator + part_it(part_info->partitions); + partition_element* part_elem; + uint i = 0; + + while ((part_elem = part_it++)) { + len = append_sep_and_name( + partition_name + table_name_len, + part_elem->partition_name, + part_sep_nix, + FN_REFLEN - table_name_len); + if (part_info->is_sub_partitioned()) { + List_iterator + sub_it(part_elem->subpartitions); + partition_element* sub_elem; + while ((sub_elem = sub_it++)) { + append_sep_and_name( + partition_name + + table_name_len + len, + sub_elem->partition_name, + sub_sep_nix, + FN_REFLEN - table_name_len - len); + if (open_one_table_part(i, partition_name)) { + goto err; + } + i++; + } + } else { + if (open_one_table_part(i, partition_name)) { + goto err; + } + i++; + } + } + ut_ad(i == m_tot_parts); + + /* Create the mapping of mysql index number to innodb indexes. */ + + ib_num_index = (uint) UT_LIST_GET_LEN(m_table_parts[0]->indexes); + mysql_num_index = part_info->table->s->keys; + + /* If there exists inconsistency between MySQL and InnoDB dictionary + (metadata) information, the number of index defined in MySQL + could exceed that in InnoDB, do not build index translation + table in such case. */ + + if (ib_num_index < mysql_num_index) { + ut_ad(0); + goto err; + } + + if (mysql_num_index != 0) { + size_t alloc_size = mysql_num_index * m_tot_parts + * sizeof(*m_index_mapping); + m_index_mapping = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + if (m_index_mapping == NULL) { + + /* Report an error if index_mapping continues to be + NULL and mysql_num_index is a non-zero value. */ + + ib::error() << "Failed to allocate memory for" + " index translation table. Number of" + " Index:" << mysql_num_index; + goto err; + } + } + + /* For each index in the mysql key_info array, fetch its + corresponding InnoDB index pointer into index_mapping + array. */ + + for (ulint idx = 0; idx < mysql_num_index; idx++) { + for (ulint part = 0; part < m_tot_parts; part++) { + ulint count = part * mysql_num_index + idx; + + /* Fetch index pointers into index_mapping according + to mysql index sequence. */ + + m_index_mapping[count] = dict_table_get_index_on_name( + m_table_parts[part], + part_info->table->key_info[idx].name); + + if (m_index_mapping[count] == NULL) { + ib::error() << "Cannot find index `" + << part_info->table->key_info[idx].name + << "` in InnoDB index dictionary" + " partition `" + << get_partition_name(part) << "`."; + index_loaded = false; + break; + } + + /* Double check fetched index has the same + column info as those in mysql key_info. */ + + if (!innobase_match_index_columns( + &part_info->table->key_info[idx], + m_index_mapping[count])) { + ib::error() << "Found index `" + << part_info->table->key_info[idx].name + << "` whose column info does not match" + " that of MySQL."; + index_loaded = false; + break; + } + } + } + if (!index_loaded && m_index_mapping != NULL) { + ut_free(m_index_mapping); + m_index_mapping = NULL; + } + + /* Successfully built the translation table. */ + m_index_count = mysql_num_index; + + return(false); +err: + close_table_parts(); + + return(true); +} + +/** Close all partitions. */ +void +Ha_innopart_share::close_table_parts() +{ +#ifndef DBUG_OFF + if (m_table_share->tmp_table == NO_TMP_TABLE) { + mysql_mutex_assert_owner(&m_table_share->LOCK_ha_data); + } +#endif /* DBUG_OFF */ + m_ref_count--; + if (m_ref_count != 0) { + + /* Decrement dict_table_t reference count for all partitions */ + mutex_enter(&dict_sys->mutex); + for (uint i = 0; i < m_tot_parts; i++) { + dict_table_t* table = m_table_parts[i]; + table->release(); + ut_ad(table->get_ref_count() >= m_ref_count); + } + mutex_exit(&dict_sys->mutex); + + return; + } + + /* Last instance closed, close all table partitions and + free the memory. */ + + mutex_enter(&dict_sys->mutex); + if (m_table_parts != NULL) { + for (uint i = 0; i < m_tot_parts; i++) { + if (m_table_parts[i] != NULL) { + dict_table_close(m_table_parts[i], TRUE, TRUE); + } + } + ut_free(m_table_parts); + m_table_parts = NULL; + } + mutex_exit(&dict_sys->mutex); + if (m_index_mapping != NULL) { + ut_free(m_index_mapping); + m_index_mapping = NULL; + } + + if (m_s_templ != NULL) { + free_vc_templ(m_s_templ); + ut_free(m_s_templ); + m_s_templ = NULL; + } + + m_tot_parts = 0; + m_index_count = 0; +} + +/** Get index. +Find the index of the specified partition and key number. +@param[in] part_id Partition number. +@param[in] keynr Key number. +@return Index pointer or NULL. */ +inline +dict_index_t* +Ha_innopart_share::get_index( + uint part_id, + uint keynr) +{ + ut_a(part_id < m_tot_parts); + ut_ad(keynr < m_index_count || keynr == MAX_KEY); + if (m_index_mapping == NULL + || keynr >= m_index_count) { + + if (keynr == MAX_KEY) { + return(dict_table_get_first_index( + get_table_part(part_id))); + } + return(NULL); + } + return(m_index_mapping[m_index_count * part_id + keynr]); +} + +/** Get MySQL key number corresponding to InnoDB index. +Calculates the key number used inside MySQL for an Innobase index. We will +first check the "index translation table" for a match of the index to get +the index number. If there does not exist an "index translation table", +or not able to find the index in the translation table, then we will fall back +to the traditional way of looping through dict_index_t list to find a +match. In this case, we have to take into account if we generated a +default clustered index for the table +@param[in] part_id Partition the index belongs to. +@param[in] index Index to return MySQL key number for. +@return the key number used inside MySQL or UINT_MAX if key is not found. */ +inline +uint +Ha_innopart_share::get_mysql_key( + uint part_id, + const dict_index_t* index) +{ + ut_ad(index != NULL); + ut_ad(m_index_mapping != NULL); + ut_ad(m_tot_parts); + + if (index != NULL && m_index_mapping != NULL) { + uint start; + uint end; + + if (part_id < m_tot_parts) { + start = part_id * m_index_count; + end = start + m_index_count; + } else { + start = 0; + end = m_tot_parts * m_index_count; + } + for (uint i = start; i < end; i++) { + if (m_index_mapping[i] == index) { + return(i % m_index_count); + } + } + + /* Print an error message if we cannot find the index + in the "index translation table". */ + + if (index->is_committed()) { + ib::error() << "Cannot find index " + << index->name + << " in InnoDB index translation table."; + } + } + + return(UINT_MAX); +} + +/** Helper function for set bit in bitmap. +@param[in,out] buf Bitmap buffer to update bit in. +@param[in] bit_pos Bit number (index starts at 0). */ +static +inline +void +set_bit( + byte* buf, + size_t pos) +{ + buf[pos/8] |= (0x1 << (pos & 0x7)); +} + +/** Helper function for clear bit in bitmap. +@param[in,out] buf Bitmap buffer to update bit in. +@param[in] bit_pos Bit number (index starts at 0). */ +static +inline +void +clear_bit( + byte* buf, + size_t pos) +{ + buf[pos/8] &= ~(0x1 << (pos & 0x7)); +} + +/** Helper function for get bit in bitmap. +@param[in,out] buf Bitmap buffer. +@param[in] bit_pos Bit number (index starts at 0). +@return byte set to 0x0 or 0x1. +@retval 0x0 bit not set. +@retval 0x1 bet set. */ +static +inline +byte +get_bit( + byte* buf, + size_t pos) +{ + return((buf[pos/8] >> (pos & 0x7)) & 0x1); +} + +/** Helper class for encapsulating new/altered partitions during +ADD/REORG/... PARTITION. */ +class Altered_partitions +{ +private: + /** New partitions during ADD/REORG/... PARTITION. */ + dict_table_t** m_new_table_parts; + + /** Insert nodes per partition. */ + ins_node_t** m_ins_nodes; + + /** sql_stat_start per partition. */ + byte* m_sql_stat_start; + + /** Trx id per partition. */ + trx_id_t* m_trx_ids; + + /** Number of new partitions. */ + size_t m_num_new_parts; + + /** Only need to create the partitions (no open/lock). */ + bool m_only_create; + +public: + Altered_partitions( + uint n_partitions, + bool only_create); + + ~Altered_partitions(); + + bool + initialize(); + + bool + only_create() const + { + return(m_only_create); + } + + /** Set currently used partition. + @param[in] new_part_id Partition id to set. + @param[in] part InnoDB table to use. */ + inline + void + set_part( + ulint new_part_id, + dict_table_t* part) + { + ut_ad(m_new_table_parts[new_part_id] == NULL); + m_new_table_parts[new_part_id] = part; + set_bit(m_sql_stat_start, new_part_id); + } + + /** Get lower level InnoDB table for partition. + @param[in] part_id Partition id. + @return Lower level InnoDB table for the partition id. */ + inline + dict_table_t* + part( + uint part_id) const + { + ut_ad(part_id < m_num_new_parts); + return(m_new_table_parts[part_id]); + } + + /** Set up prebuilt for using a specified partition. + @param[in] prebuilt Prebuilt to update. + @param[in] new_part_id Partition to use. */ + inline + void + get_prebuilt( + row_prebuilt_t* prebuilt, + uint new_part_id) const + { + ut_ad(m_new_table_parts[new_part_id]); + prebuilt->table = m_new_table_parts[new_part_id]; + prebuilt->ins_node = m_ins_nodes[new_part_id]; + prebuilt->trx_id = m_trx_ids[new_part_id]; + prebuilt->sql_stat_start = get_bit(m_sql_stat_start, + new_part_id); + } + + /** Update cached values for a partition from prebuilt. + @param[in] prebuilt Prebuilt to copy from. + @param[in] new_part_id Partition id to copy. */ + inline + void + set_from_prebuilt( + row_prebuilt_t* prebuilt, + uint new_part_id) + { + ut_ad(m_new_table_parts[new_part_id] == prebuilt->table); + m_ins_nodes[new_part_id] = prebuilt->ins_node; + m_trx_ids[new_part_id] = prebuilt->trx_id; + if (prebuilt->sql_stat_start == 0) { + clear_bit(m_sql_stat_start, new_part_id); + } + } +}; + +Altered_partitions::Altered_partitions( + uint n_partitions, + bool only_create) + : + m_new_table_parts(), + m_ins_nodes(), + m_sql_stat_start(), + m_trx_ids(), + m_num_new_parts(n_partitions), + m_only_create(only_create) + {} + +Altered_partitions::~Altered_partitions() +{ + if (m_new_table_parts != NULL) { + for (ulint i = 0; i < m_num_new_parts; i++) { + if (m_new_table_parts[i] != NULL) { + dict_table_close(m_new_table_parts[i], + false, true); + } + } + ut_free(m_new_table_parts); + m_new_table_parts = NULL; + } + if (m_ins_nodes != NULL) { + for (ulint i = 0; i < m_num_new_parts; i++) { + if (m_ins_nodes[i] != NULL) { + ins_node_t* ins = m_ins_nodes[i]; + ut_ad(ins->select == NULL); + que_graph_free_recursive(ins->select); + ins->select = NULL; + if (ins->entry_sys_heap != NULL) { + mem_heap_free(ins->entry_sys_heap); + ins->entry_sys_heap = NULL; + } + } + } + ut_free(m_ins_nodes); + m_ins_nodes = NULL; + } + if (m_sql_stat_start != NULL) { + ut_free(m_sql_stat_start); + m_sql_stat_start = NULL; + } + if (m_trx_ids != NULL) { + ut_free(m_trx_ids); + m_trx_ids = NULL; + } +} + +/** Initialize the object. +@return false on success else true. */ +bool +Altered_partitions::initialize() +{ + size_t alloc_size = sizeof(*m_new_table_parts) * m_num_new_parts; + m_new_table_parts = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + if (m_new_table_parts == NULL) { + return(true); + } + + alloc_size = sizeof(*m_ins_nodes) * m_num_new_parts; + m_ins_nodes = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + if (m_ins_nodes == NULL) { + ut_free(m_new_table_parts); + m_new_table_parts = NULL; + return(true); + } + + alloc_size = sizeof(*m_sql_stat_start) + * UT_BITS_IN_BYTES(m_num_new_parts); + m_sql_stat_start = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + if (m_sql_stat_start == NULL) { + ut_free(m_new_table_parts); + m_new_table_parts = NULL; + ut_free(m_ins_nodes); + m_ins_nodes = NULL; + return(true); + } + + alloc_size = sizeof(*m_trx_ids) * m_num_new_parts; + m_trx_ids = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + if (m_trx_ids == NULL) { + ut_free(m_new_table_parts); + m_new_table_parts = NULL; + ut_free(m_ins_nodes); + m_ins_nodes = NULL; + ut_free(m_sql_stat_start); + m_sql_stat_start = NULL; + return(true); + } + + return(false); +} + +/** Construct ha_innopart handler. +@param[in] hton Handlerton. +@param[in] table_arg MySQL Table. +@return a new ha_innopart handler. */ +ha_innopart::ha_innopart( + handlerton* hton, + TABLE_SHARE* table_arg) + : + ha_innobase(hton, table_arg), + Partition_helper(this), + m_ins_node_parts(), + m_upd_node_parts(), + m_blob_heap_parts(), + m_trx_id_parts(), + m_row_read_type_parts(), + m_sql_stat_start_parts(), + m_pcur(), + m_clust_pcur(), + m_new_partitions() +{ + m_int_table_flags &= ~(HA_INNOPART_DISABLED_TABLE_FLAGS); + + /* INNOBASE_SHARE is not used in ha_innopart. + This also flags for ha_innobase that it is a partitioned table. + And make it impossible to use legacy share functionality. */ + + m_share = NULL; +} + +/** Destruct ha_innopart handler. */ +ha_innopart::~ha_innopart() +{} + +/** Returned supported alter table flags. +@param[in] flags Flags to support. +@return Supported flags. */ +uint +ha_innopart::alter_table_flags( + uint flags) +{ + return(HA_PARTITION_FUNCTION_SUPPORTED | HA_FAST_CHANGE_PARTITION); +} + +/** Internally called for initializing auto increment value. +Only called from ha_innobase::discard_or_import_table_space() +and should not do anything, since it is ha_innopart will initialize +it on first usage. */ +int +ha_innopart::innobase_initialize_autoinc() +{ + ut_ad(0); + return(0); +} + +/** Set the autoinc column max value. +This should only be called once from ha_innobase::open(). +Therefore there's no need for a covering lock. +@param[in] no_lock Ignored! +@return 0 for success or error code. */ +inline +int +ha_innopart::initialize_auto_increment( + bool /* no_lock */) +{ + int error = 0; + ulonglong auto_inc = 0; + const Field* field = table->found_next_number_field; + +#ifndef DBUG_OFF + if (table_share->tmp_table == NO_TMP_TABLE) + { + mysql_mutex_assert_owner(m_part_share->auto_inc_mutex); + } +#endif + + /* Since a table can already be "open" in InnoDB's internal + data dictionary, we only init the autoinc counter once, the + first time the table is loaded. We can safely reuse the + autoinc value from a previous MySQL open. */ + + if (m_part_share->auto_inc_initialized) { + /* Already initialized, nothing to do. */ + return(0); + } + + if (field == NULL) { + ib::info() << "Unable to determine the AUTOINC column name"; + } + + if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { + /* If the recovery level is set so high that writes + are disabled we force the AUTOINC counter to 0 + value effectively disabling writes to the table. + Secondly, we avoid reading the table in case the read + results in failure due to a corrupted table/index. + + We will not return an error to the client, so that the + tables can be dumped with minimal hassle. If an error + were returned in this case, the first attempt to read + the table would fail and subsequent SELECTs would succeed. */ + + } else if (field == NULL) { + /* This is a far more serious error, best to avoid + opening the table and return failure. */ + + my_error(ER_AUTOINC_READ_FAILED, MYF(0)); + error = HA_ERR_AUTOINC_READ_FAILED; + } else { + dict_index_t* index; + const char* col_name; + ib_uint64_t read_auto_inc; + ib_uint64_t max_auto_inc = 0; + ulint err; + dict_table_t* ib_table; + ulonglong col_max_value; + + col_max_value = field->get_max_int_value(); + + update_thd(ha_thd()); + + col_name = field->field_name; + for (uint part = 0; part < m_tot_parts; part++) { + ib_table = m_part_share->get_table_part(part); + dict_table_autoinc_lock(ib_table); + read_auto_inc = dict_table_autoinc_read(ib_table); + if (read_auto_inc != 0) { + set_if_bigger(max_auto_inc, read_auto_inc); + dict_table_autoinc_unlock(ib_table); + continue; + } + /* Execute SELECT MAX(col_name) FROM TABLE; */ + index = m_part_share->get_index( + part, table->s->next_number_index); + err = row_search_max_autoinc( + index, col_name, &read_auto_inc); + + switch (err) { + case DB_SUCCESS: { + /* At the this stage we do not know the + increment nor the offset, + so use a default increment of 1. */ + + auto_inc = innobase_next_autoinc( + read_auto_inc, 1, 1, 0, col_max_value); + set_if_bigger(max_auto_inc, auto_inc); + dict_table_autoinc_initialize(ib_table, + auto_inc); + break; + } + case DB_RECORD_NOT_FOUND: + ib::error() << "MySQL and InnoDB data" + " dictionaries are out of sync. Unable" + " to find the AUTOINC column " + << col_name << " in the InnoDB table " + << index->table->name << ". We set the" + " next AUTOINC column value to 0, in" + " effect disabling the AUTOINC next" + " value generation."; + + ib::info() << "You can either set the next" + " AUTOINC value explicitly using ALTER" + " TABLE or fix the data dictionary by" + " recreating the table."; + + /* We want the open to succeed, so that the + user can take corrective action. ie. reads + should succeed but updates should fail. */ + + /* This will disable the AUTOINC generation. */ + auto_inc = 0; + goto done; + default: + /* row_search_max_autoinc() should only return + one of DB_SUCCESS or DB_RECORD_NOT_FOUND. */ + + ut_error; + } + dict_table_autoinc_unlock(ib_table); + } + auto_inc = max_auto_inc; + } + +done: + m_part_share->next_auto_inc_val = auto_inc; + m_part_share->auto_inc_initialized = true; + return(error); +} + +/** Opens a partitioned InnoDB table. +Initializes needed data and opens the table which already exists +in an InnoDB database. +@param[in] name Table name (db/tablename) +@param[in] mode Not used +@param[in] test_if_locked Not used +@return 0 or error number. */ +int +ha_innopart::open( + const char* name, + int /*mode*/, + uint /*test_if_locked*/) +{ + dict_table_t* ib_table; + char norm_name[FN_REFLEN]; + THD* thd; + + DBUG_ENTER("ha_innopart::open"); + + ut_ad(table); + if (m_part_info == NULL) { + /* Must be during ::clone()! */ + ut_ad(table->part_info != NULL); + m_part_info = table->part_info; + } + thd = ha_thd(); + + /* Under some cases MySQL seems to call this function while + holding search latch(es). This breaks the latching order as + we acquire dict_sys->mutex below and leads to a deadlock. */ + + if (thd != NULL) { + innobase_release_temporary_latches(ht, thd); + } + + normalize_table_name(norm_name, name); + + m_user_thd = NULL; + + /* Get the Ha_innopart_share from the TABLE_SHARE. */ + lock_shared_ha_data(); + m_part_share = static_cast(get_ha_share_ptr()); + if (m_part_share == NULL) { + m_part_share = new (std::nothrow) + Ha_innopart_share(table_share); + if (m_part_share == NULL) { +share_error: + unlock_shared_ha_data(); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + set_ha_share_ptr(static_cast(m_part_share)); + } + if (m_part_share->open_table_parts(m_part_info, name) + || m_part_share->populate_partition_name_hash(m_part_info)) { + goto share_error; + } + if (m_part_share->auto_inc_mutex == NULL + && table->found_next_number_field != NULL) { + if (m_part_share->init_auto_inc_mutex(table_share)) { + goto share_error; + } + } + unlock_shared_ha_data(); + + /* Will be allocated if it is needed in ::update_row(). */ + m_upd_buf = NULL; + m_upd_buf_size = 0; + + /* Get pointer to a table object in InnoDB dictionary cache. */ + ib_table = m_part_share->get_table_part(0); + + m_pcur_parts = NULL; + m_clust_pcur_parts = NULL; + m_pcur_map = NULL; + + /* TODO: Handle mismatching #P# vs #p# in upgrading to new DD instead! + See bug#58406, The problem exists when moving partitioned tables + between Windows and Unix-like platforms. InnoDB always folds the name + on windows, partitioning never folds partition (and #P# separator). + I.e. non of it follows lower_case_table_names correctly :( */ + + if (open_partitioning(m_part_share)) + { + close(); + DBUG_RETURN(HA_ERR_INITIALIZATION); + } + + /* Currently we track statistics for all partitions, but for + the secondary indexes we only use the biggest partition. */ + + for (uint part_id = 0; part_id < m_tot_parts; part_id++) { + innobase_copy_frm_flags_from_table_share( + m_part_share->get_table_part(part_id), + table->s); + dict_stats_init(m_part_share->get_table_part(part_id)); + } + + MONITOR_INC(MONITOR_TABLE_OPEN); + + bool no_tablespace; + + /* TODO: Should we do this check for every partition during ::open()? */ + /* TODO: refactor this in ha_innobase so it can increase code reuse. */ + if (dict_table_is_discarded(ib_table)) { + + ib_senderrf(thd, + IB_LOG_LEVEL_WARN, ER_TABLESPACE_DISCARDED, + table->s->table_name.str); + + /* Allow an open because a proper DISCARD should have set + all the flags and index root page numbers to FIL_NULL that + should prevent any DML from running but it should allow DDL + operations. */ + + no_tablespace = false; + + } else if (ib_table->ibd_file_missing) { + + ib_senderrf( + thd, IB_LOG_LEVEL_WARN, + ER_TABLESPACE_MISSING, norm_name); + + /* This means we have no idea what happened to the tablespace + file, best to play it safe. */ + + no_tablespace = true; + } else { + no_tablespace = false; + } + + if (!thd_tablespace_op(thd) && no_tablespace) { + set_my_errno(ENOENT); + + lock_shared_ha_data(); + m_part_share->close_table_parts(); + unlock_shared_ha_data(); + m_part_share = NULL; + + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); + } + + m_prebuilt = row_create_prebuilt(ib_table, table->s->reclength); + + m_prebuilt->default_rec = table->s->default_values; + ut_ad(m_prebuilt->default_rec); + + if (ib_table->n_v_cols > 0) { + lock_shared_ha_data(); + m_part_share->set_v_templ(table, ib_table, name); + unlock_shared_ha_data(); + } + + /* Looks like MySQL-3.23 sometimes has primary key number != 0. */ + m_primary_key = table->s->primary_key; + key_used_on_scan = m_primary_key; + + /* Allocate a buffer for a 'row reference'. A row reference is + a string of bytes of length ref_length which uniquely specifies + a row in our table. Note that MySQL may also compare two row + references for equality by doing a simple memcmp on the strings + of length ref_length! */ + + if (!row_table_got_default_clust_index(ib_table)) { + + m_prebuilt->clust_index_was_generated = FALSE; + + if (UNIV_UNLIKELY(m_primary_key >= MAX_KEY)) { + table_name_t table_name; + table_name.m_name = const_cast(name); + ib::error() << "Table " << table_name + << " has a primary key in InnoDB data" + " dictionary, but not in MySQL!"; + + /* This mismatch could cause further problems + if not attended, bring this to the user's attention + by printing a warning in addition to log a message + in the errorlog. */ + + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_NO_SUCH_INDEX, + "Table %s has a" + " primary key in InnoDB data" + " dictionary, but not in" + " MySQL!", name); + + /* If m_primary_key >= MAX_KEY, its (m_primary_key) + value could be out of bound if continue to index + into key_info[] array. Find InnoDB primary index, + and assign its key_length to ref_length. + In addition, since MySQL indexes are sorted starting + with primary index, unique index etc., initialize + ref_length to the first index key length in + case we fail to find InnoDB cluster index. + + Please note, this will not resolve the primary + index mismatch problem, other side effects are + possible if users continue to use the table. + However, we allow this table to be opened so + that user can adopt necessary measures for the + mismatch while still being accessible to the table + date. */ + + if (table->key_info == NULL) { + ut_ad(table->s->keys == 0); + ref_length = 0; + } else { + ref_length = table->key_info[0].key_length; + } + + /* Find corresponding cluster index + key length in MySQL's key_info[] array. */ + + for (uint i = 0; i < table->s->keys; i++) { + dict_index_t* index; + index = innopart_get_index(0, i); + if (dict_index_is_clust(index)) { + ref_length = + table->key_info[i].key_length; + } + } + ut_a(ref_length); + ref_length += PARTITION_BYTES_IN_POS; + } else { + /* MySQL allocates the buffer for ref. + key_info->key_length includes space for all key + columns + one byte for each column that may be + NULL. ref_length must be as exact as possible to + save space, because all row reference buffers are + allocated based on ref_length. */ + + ref_length = table->key_info[m_primary_key].key_length; + ref_length += PARTITION_BYTES_IN_POS; + } + } else { + if (m_primary_key != MAX_KEY) { + table_name_t table_name; + table_name.m_name = const_cast(name); + ib::error() << "Table " << table_name + << " has no primary key in InnoDB data" + " dictionary, but has one in MySQL! If you" + " created the table with a MySQL version <" + " 3.23.54 and did not define a primary key," + " but defined a unique key with all non-NULL" + " columns, then MySQL internally treats that" + " key as the primary key. You can fix this" + " error by dump + DROP + CREATE + reimport" + " of the table."; + + /* This mismatch could cause further problems + if not attended, bring this to the user attention + by printing a warning in addition to log a message + in the errorlog. */ + + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_NO_SUCH_INDEX, + "InnoDB: Table %s has no" + " primary key in InnoDB data" + " dictionary, but has one in" + " MySQL!", name); + } + + m_prebuilt->clust_index_was_generated = TRUE; + + ref_length = DATA_ROW_ID_LEN; + ref_length += PARTITION_BYTES_IN_POS; + + /* If we automatically created the clustered index, then + MySQL does not know about it, and MySQL must NOT be aware + of the index used on scan, to make it avoid checking if we + update the column of the index. That is why we assert below + that key_used_on_scan is the undefined value MAX_KEY. + The column is the row id in the automatical generation case, + and it will never be updated anyway. */ + + if (key_used_on_scan != MAX_KEY) { + table_name_t table_name; + table_name.m_name = const_cast(name); + ib::warn() << "Table " << table_name + << " key_used_on_scan is " + << key_used_on_scan << " even though there is" + " no primary key inside InnoDB."; + } + } + + /* Index block size in InnoDB: used by MySQL in query optimization. */ + stats.block_size = UNIV_PAGE_SIZE; + + if (m_prebuilt->table != NULL) { + /* We update the highest file format in the system table + space, if this table has higher file format setting. */ + + trx_sys_file_format_max_upgrade( + (const char**) &innobase_file_format_max, + dict_table_get_format(m_prebuilt->table)); + } + + /* Only if the table has an AUTOINC column. */ + if (m_prebuilt->table != NULL + && !m_prebuilt->table->ibd_file_missing + && table->found_next_number_field != NULL) { + int error; + + /* Since a table can already be "open" in InnoDB's internal + data dictionary, we only init the autoinc counter once, the + first time the table is loaded, + see ha_innopart::initialize_auto_increment. + We can safely reuse the autoinc value from a previous MySQL + open. */ + + lock_auto_increment(); + error = initialize_auto_increment(false); + unlock_auto_increment(); + if (error != 0) { + close(); + DBUG_RETURN(error); + } + } + +#ifdef HA_INNOPART_SUPPORTS_FULLTEXT + /* Set plugin parser for fulltext index. */ + for (uint i = 0; i < table->s->keys; i++) { + if (table->key_info[i].flags & HA_USES_PARSER) { + dict_index_t* index = innobase_get_index(i); + plugin_ref parser = table->key_info[i].parser; + + ut_ad(index->type & DICT_FTS); + index->parser = + static_cast( + plugin_decl(parser)->info); + + DBUG_EXECUTE_IF("fts_instrument_use_default_parser", + index->parser = &fts_default_parser;); + } + } +#endif /* HA_INNOPART_SUPPORTS_FULLTEXT */ + + size_t alloc_size = sizeof(*m_ins_node_parts) * m_tot_parts; + m_ins_node_parts = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + + alloc_size = sizeof(*m_upd_node_parts) * m_tot_parts; + m_upd_node_parts = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + + alloc_blob_heap_array(); + + alloc_size = sizeof(*m_trx_id_parts) * m_tot_parts; + m_trx_id_parts = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + + alloc_size = sizeof(*m_row_read_type_parts) * m_tot_parts; + m_row_read_type_parts = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + + alloc_size = UT_BITS_IN_BYTES(m_tot_parts); + m_sql_stat_start_parts = static_cast( + ut_zalloc(alloc_size, mem_key_partitioning)); + if (m_ins_node_parts == NULL + || m_upd_node_parts == NULL + || m_blob_heap_parts == NULL + || m_trx_id_parts == NULL + || m_row_read_type_parts == NULL + || m_sql_stat_start_parts == NULL) { + close(); // Frees all the above. + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); + + DBUG_RETURN(0); +} + +/** Get a cloned ha_innopart handler. +@param[in] name Table name. +@param[in] mem_root MySQL mem_root to use. +@return new ha_innopart handler. */ +handler* +ha_innopart::clone( + const char* name, + MEM_ROOT* mem_root) +{ + ha_innopart* new_handler; + + DBUG_ENTER("ha_innopart::clone"); + + new_handler = dynamic_cast(handler::clone(name, + mem_root)); + if (new_handler != NULL) { + ut_ad(new_handler->m_prebuilt != NULL); + + new_handler->m_prebuilt->select_lock_type = + m_prebuilt->select_lock_type; + } + + DBUG_RETURN(new_handler); +} + +/** Clear used ins_nodes and upd_nodes. */ +void ha_innopart::clear_ins_upd_nodes() +{ + /* Free memory from insert nodes. */ + if (m_ins_node_parts != NULL) { + for (uint i = 0; i < m_tot_parts; i++) { + if (m_ins_node_parts[i] != NULL) { + ins_node_t* ins = m_ins_node_parts[i]; + if (ins->select != NULL) { + que_graph_free_recursive(ins->select); + ins->select = NULL; + } + + if (ins->entry_sys_heap != NULL) { + mem_heap_free(ins->entry_sys_heap); + ins->entry_sys_heap = NULL; + } + m_ins_node_parts[i] = NULL; + } + } + } + + /* Free memory from update nodes. */ + if (m_upd_node_parts != NULL) { + for (uint i = 0; i < m_tot_parts; i++) { + if (m_upd_node_parts[i] != NULL) { + upd_node_t* upd = m_upd_node_parts[i]; + if (upd->cascade_top) { + mem_heap_free(upd->cascade_heap); + upd->cascade_top = false; + upd->cascade_heap = NULL; + } + if (upd->in_mysql_interface) { + btr_pcur_free_for_mysql(upd->pcur); + upd->in_mysql_interface = FALSE; + } + + if (upd->select != NULL) { + que_graph_free_recursive(upd->select); + upd->select = NULL; + } + if (upd->heap != NULL) { + mem_heap_free(upd->heap); + upd->heap = NULL; + } + m_upd_node_parts[i] = NULL; + } + } + } +} + +/** Closes a handle to an InnoDB table. +@return 0 */ +int +ha_innopart::close() +{ + THD* thd; + + DBUG_ENTER("ha_innopart::close"); + + thd = ha_thd(); + if (thd != NULL) { + innobase_release_temporary_latches(ht, thd); + } + + ut_ad(m_pcur_parts == NULL); + ut_ad(m_clust_pcur_parts == NULL); + close_partitioning(); + + ut_ad(m_part_share != NULL); + if (m_part_share != NULL) { + lock_shared_ha_data(); + m_part_share->close_table_parts(); + unlock_shared_ha_data(); + m_part_share = NULL; + } + clear_ins_upd_nodes(); + free_blob_heap_array(); + + /* Prevent double close of m_prebuilt->table. The real one was done + done in m_part_share->close_table_parts(). */ + m_prebuilt->table = NULL; + row_prebuilt_free(m_prebuilt, FALSE); + + if (m_upd_buf != NULL) { + ut_ad(m_upd_buf_size != 0); + /* Allocated with my_malloc! */ + my_free(m_upd_buf); + m_upd_buf = NULL; + m_upd_buf_size = 0; + } + + if (m_ins_node_parts != NULL) { + ut_free(m_ins_node_parts); + m_ins_node_parts = NULL; + } + if (m_upd_node_parts != NULL) { + ut_free(m_upd_node_parts); + m_upd_node_parts = NULL; + } + if (m_trx_id_parts != NULL) { + ut_free(m_trx_id_parts); + m_trx_id_parts = NULL; + } + if (m_row_read_type_parts != NULL) { + ut_free(m_row_read_type_parts); + m_row_read_type_parts = NULL; + } + if (m_sql_stat_start_parts != NULL) { + ut_free(m_sql_stat_start_parts); + m_sql_stat_start_parts = NULL; + } + + MONITOR_INC(MONITOR_TABLE_CLOSE); + + /* Tell InnoDB server that there might be work for + utility threads: */ + + srv_active_wake_master_thread(); + + DBUG_RETURN(0); +} + +/** Change active partition. +Copies needed info into m_prebuilt from the partition specific memory. +@param[in] part_id Partition to set as active. */ +void +ha_innopart::set_partition( + uint part_id) +{ + DBUG_ENTER("ha_innopart::set_partition"); + + DBUG_PRINT("ha_innopart", ("partition id: %u", part_id)); + + if (part_id >= m_tot_parts) { + ut_ad(0); + DBUG_VOID_RETURN; + } + if (m_pcur_parts != NULL) { + m_prebuilt->pcur = &m_pcur_parts[m_pcur_map[part_id]]; + } + if (m_clust_pcur_parts != NULL) { + m_prebuilt->clust_pcur = + &m_clust_pcur_parts[m_pcur_map[part_id]]; + } + m_prebuilt->ins_node = m_ins_node_parts[part_id]; + m_prebuilt->upd_node = m_upd_node_parts[part_id]; + + /* For unordered scan and table scan, use blob_heap from first + partition as we need exactly one blob. */ + m_prebuilt->blob_heap = m_blob_heap_parts[m_ordered ? part_id : 0]; + +#ifdef UNIV_DEBUG + if (m_prebuilt->blob_heap != NULL) { + DBUG_PRINT("ha_innopart", ("validating blob_heap: %p", + m_prebuilt->blob_heap)); + mem_heap_validate(m_prebuilt->blob_heap); + } +#endif + + m_prebuilt->trx_id = m_trx_id_parts[part_id]; + m_prebuilt->row_read_type = m_row_read_type_parts[part_id]; + m_prebuilt->sql_stat_start = get_bit(m_sql_stat_start_parts, part_id); + m_prebuilt->table = m_part_share->get_table_part(part_id); + m_prebuilt->index = innopart_get_index(part_id, active_index); + + DBUG_VOID_RETURN; +} + +/** Update active partition. +Copies needed info from m_prebuilt into the partition specific memory. +@param[in] part_id Partition to set as active. */ +void +ha_innopart::update_partition( + uint part_id) +{ + DBUG_ENTER("ha_innopart::update_partition"); + DBUG_PRINT("ha_innopart", ("partition id: %u", part_id)); + + if (part_id >= m_tot_parts) { + ut_ad(0); + DBUG_VOID_RETURN; + } + m_ins_node_parts[part_id] = m_prebuilt->ins_node; + m_upd_node_parts[part_id] = m_prebuilt->upd_node; + +#ifdef UNIV_DEBUG + if (m_prebuilt->blob_heap != NULL) { + DBUG_PRINT("ha_innopart", ("validating blob_heap: %p", + m_prebuilt->blob_heap)); + mem_heap_validate(m_prebuilt->blob_heap); + } +#endif + + /* For unordered scan and table scan, use blob_heap from first + partition as we need exactly one blob anytime. */ + m_blob_heap_parts[m_ordered ? part_id : 0] = m_prebuilt->blob_heap; + + m_trx_id_parts[part_id] = m_prebuilt->trx_id; + m_row_read_type_parts[part_id] = m_prebuilt->row_read_type; + if (m_prebuilt->sql_stat_start == 0) { + clear_bit(m_sql_stat_start_parts, part_id); + } + m_last_part = part_id; + DBUG_VOID_RETURN; +} + +/** Save currently highest auto increment value. +@param[in] nr Auto increment value to save. */ +void +ha_innopart::save_auto_increment( + ulonglong nr) +{ + + /* Store it in the shared dictionary of the partition. + TODO: When the new DD is done, store it in the table and make it + persistent! */ + + dict_table_autoinc_lock(m_prebuilt->table); + dict_table_autoinc_update_if_greater(m_prebuilt->table, nr + 1); + dict_table_autoinc_unlock(m_prebuilt->table); +} + +/** Was the last returned row semi consistent read. +In an UPDATE or DELETE, if the row under the cursor was locked by +another transaction, and the engine used an optimistic read of the last +committed row value under the cursor, then the engine returns 1 from +this function. MySQL must NOT try to update this optimistic value. If +the optimistic value does not match the WHERE condition, MySQL can +decide to skip over this row. This can be used to avoid unnecessary +lock waits. + +If this method returns true, it will also signal the storage +engine that the next read will be a locking re-read of the row. +@see handler.h and row0mysql.h +@return true if last read was semi consistent else false. */ +bool +ha_innopart::was_semi_consistent_read() +{ + return(m_row_read_type_parts[m_last_part] + == ROW_READ_DID_SEMI_CONSISTENT); +} + +/** Try semi consistent read. +Tell the engine whether it should avoid unnecessary lock waits. +If yes, in an UPDATE or DELETE, if the row under the cursor was locked +by another transaction, the engine may try an optimistic read of +the last committed row value under the cursor. +@see handler.h and row0mysql.h +@param[in] yes Should semi-consistent read be used. */ +void +ha_innopart::try_semi_consistent_read( + bool yes) +{ + ha_innobase::try_semi_consistent_read(yes); + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + m_row_read_type_parts[i] = m_prebuilt->row_read_type; + } +} + +/** Removes a lock on a row. +Removes a new lock set on a row, if it was not read optimistically. +This can be called after a row has been read in the processing of +an UPDATE or a DELETE query. @see ha_innobase::unlock_row(). */ +void +ha_innopart::unlock_row() +{ + ut_ad(m_last_part < m_tot_parts); + set_partition(m_last_part); + ha_innobase::unlock_row(); + update_partition(m_last_part); +} + +/** Write a row in partition. +Stores a row in an InnoDB database, to the table specified in this +handle. +@param[in] part_id Partition to write to. +@param[in] record A row in MySQL format. +@return 0 or error code. */ +int +ha_innopart::write_row_in_part( + uint part_id, + uchar* record) +{ + int error; + Field* saved_next_number_field = table->next_number_field; + DBUG_ENTER("ha_innopart::write_row_in_part"); + set_partition(part_id); + + /* Prevent update_auto_increment to be called + again in ha_innobase::write_row(). */ + + table->next_number_field = NULL; + + /* TODO: try to avoid creating a new dtuple + (in row_get_prebuilt_insert_row()) for each partition). + Might be needed due to ins_node implementation. */ + + error = ha_innobase::write_row(record); + update_partition(part_id); + table->next_number_field = saved_next_number_field; + DBUG_RETURN(error); +} + +/** Update a row in partition. +Updates a row given as a parameter to a new value. +@param[in] part_id Partition to update row in. +@param[in] old_row Old row in MySQL format. +@param[in] new_row New row in MySQL format. +@return 0 or error number. */ +int +ha_innopart::update_row_in_part( + uint part_id, + const uchar* old_row, + uchar* new_row) +{ + int error; + DBUG_ENTER("ha_innopart::update_row_in_part"); + + set_partition(part_id); + error = ha_innobase::update_row(old_row, new_row); + update_partition(part_id); + DBUG_RETURN(error); +} + +/** Deletes a row in partition. +@param[in] part_id Partition to delete from. +@param[in] record Row to delete in MySQL format. +@return 0 or error number. */ +int +ha_innopart::delete_row_in_part( + uint part_id, + const uchar* record) +{ + int error; + DBUG_ENTER("ha_innopart::delete_row_in_part"); + m_err_rec = NULL; + + m_last_part = part_id; + set_partition(part_id); + error = ha_innobase::delete_row(record); + update_partition(part_id); + DBUG_RETURN(error); +} + +/** Initializes a handle to use an index. +@param[in] keynr Key (index) number. +@param[in] sorted True if result MUST be sorted according to index. +@return 0 or error number. */ +int +ha_innopart::index_init( + uint keynr, + bool sorted) +{ + int error; + uint part_id = m_part_info->get_first_used_partition(); + DBUG_ENTER("ha_innopart::index_init"); + + active_index = keynr; + if (part_id == MY_BIT_NONE) { + DBUG_RETURN(0); + } + + error = ph_index_init_setup(keynr, sorted); + if (error != 0) { + DBUG_RETURN(error); + } + + if (sorted) { + error = init_record_priority_queue(); + if (error != 0) { + /* Needs cleanup in case it returns error. */ + destroy_record_priority_queue(); + DBUG_RETURN(error); + } + /* Disable prefetch. + The prefetch buffer is not partitioning aware, so it may return + rows from a different partition if either the prefetch buffer is + full, or it is non-empty and the partition is exhausted. */ + m_prebuilt->m_no_prefetch = true; + } + + error = change_active_index(part_id, keynr); + if (error != 0) { + destroy_record_priority_queue(); + DBUG_RETURN(error); + } + + DBUG_EXECUTE_IF("partition_fail_index_init", { + destroy_record_priority_queue(); + DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); + }); + + DBUG_RETURN(0); +} + +/** End index cursor. +@return 0 or error code. */ +int +ha_innopart::index_end() +{ + uint part_id = m_part_info->get_first_used_partition(); + DBUG_ENTER("ha_innopart::index_end"); + + if (part_id == MY_BIT_NONE) { + /* Never initialized any index. */ + DBUG_RETURN(0); + } + if (m_ordered) { + destroy_record_priority_queue(); + m_prebuilt->m_no_prefetch = false; + } + + DBUG_RETURN(ha_innobase::index_end()); +} + +/* Partitioning support functions. */ + +/** Setup the ordered record buffer and the priority queue. +@param[in] used_parts Number of used partitions in query. +@return false for success else true. */ +int +ha_innopart::init_record_priority_queue_for_parts( + uint used_parts) +{ + size_t alloc_size; + void* buf; + + DBUG_ENTER("ha_innopart::init_record_priority_queue_for_parts"); + ut_ad(used_parts >= 1); + /* TODO: Don't use this if only one partition is used! */ + //ut_ad(used_parts > 1); + + /* We could reuse current m_prebuilt->pcur/clust_pcur for the first + used partition, but it would complicate and affect performance, + so we trade some extra memory instead. */ + + m_pcur = m_prebuilt->pcur; + m_clust_pcur = m_prebuilt->clust_pcur; + + /* If we searching for secondary key or doing a write/update + we will need two pcur, one for the active (secondary) index and + one for the clustered index. */ + + bool need_clust_index = + m_curr_key_info[1] != NULL + || get_lock_type() != F_RDLCK; + + /* pcur and clust_pcur per partition. + By using zalloc, we do not need to initialize the pcur's! */ + + alloc_size = used_parts * sizeof(btr_pcur_t); + if (need_clust_index) { + alloc_size *= 2; + } + buf = ut_zalloc(alloc_size, mem_key_partitioning); + if (buf == NULL) { + DBUG_RETURN(true); + } + m_pcur_parts = static_cast(buf); + if (need_clust_index) { + m_clust_pcur_parts = &m_pcur_parts[used_parts]; + } + /* mapping from part_id to pcur. */ + alloc_size = m_tot_parts * sizeof(*m_pcur_map); + buf = ut_zalloc(alloc_size, mem_key_partitioning); + if (buf == NULL) { + DBUG_RETURN(true); + } + m_pcur_map = static_cast(buf); + { + uint16_t pcur_count = 0; + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + m_pcur_map[i] = pcur_count++; + } + } + + DBUG_RETURN(false); +} + +/** Destroy the ordered record buffer and the priority queue. */ +inline +void +ha_innopart::destroy_record_priority_queue_for_parts() +{ + DBUG_ENTER("ha_innopart::destroy_record_priority_queue"); + if (m_pcur_parts != NULL) { + uint used_parts; + used_parts = bitmap_bits_set(&m_part_info->read_partitions); + for (uint i = 0; i < used_parts; i++) { + btr_pcur_free(&m_pcur_parts[i]); + if (m_clust_pcur_parts != NULL) { + btr_pcur_free(&m_clust_pcur_parts[i]); + } + } + ut_free(m_pcur_parts); + m_clust_pcur_parts = NULL; + m_pcur_parts = NULL; + /* Reset the original m_prebuilt->pcur. */ + m_prebuilt->pcur = m_pcur; + m_prebuilt->clust_pcur = m_clust_pcur; + } + if (m_pcur_map != NULL) { + ut_free(m_pcur_map); + m_pcur_map = NULL; + } + DBUG_VOID_RETURN; +} + +/** Print error information. +@param[in] error Error code (MySQL). +@param[in] errflag Flags. */ +void +ha_innopart::print_error( + int error, + myf errflag) +{ + DBUG_ENTER("ha_innopart::print_error"); + if (print_partition_error(error, errflag)) { + ha_innobase::print_error(error, errflag); + } + + DBUG_VOID_RETURN; +} + +/** Can error be ignored. +@param[in] error Error code to check. +@return true if ignorable else false. */ +bool +ha_innopart::is_ignorable_error( + int error) +{ + if (ha_innobase::is_ignorable_error(error) + || error == HA_ERR_NO_PARTITION_FOUND + || error == HA_ERR_NOT_IN_LOCK_PARTITIONS) { + + return(true); + } + return(false); +} + +/** Get the index for the current partition +@param[in] keynr MySQL index number. +@return InnoDB index or NULL. */ +inline +dict_index_t* +ha_innopart::innobase_get_index( + uint keynr) +{ + uint part_id = m_last_part; + if (part_id >= m_tot_parts) { + ut_ad(0); + part_id = 0; + } + return(innopart_get_index(part_id, keynr)); +} + +/** Get the index for a handle. +Does not change active index. +@param[in] keynr Use this index; MAX_KEY means always clustered index, +even if it was internally generated by InnoDB. +@param[in] part_id From this partition. +@return NULL or index instance. */ +inline +dict_index_t* +ha_innopart::innopart_get_index( + uint part_id, + uint keynr) +{ + KEY* key = NULL; + dict_index_t* index = NULL; + + DBUG_ENTER("innopart_get_index"); + + if (keynr != MAX_KEY && table->s->keys > 0) { + key = table->key_info + keynr; + + index = m_part_share->get_index(part_id, keynr); + + if (index != NULL) { + ut_a(ut_strcmp(index->name, key->name) == 0); + } else { + /* Can't find index with keynr in the translation + table. Only print message if the index translation + table exists. */ + + ib::warn() << "InnoDB could not find index " + << (key ? key->name : "NULL") + << " key no " << keynr << " for table " + << m_prebuilt->table->name + << " through its index translation table"; + + index = dict_table_get_index_on_name(m_prebuilt->table, + key->name); + } + } else { + /* Get the generated index. */ + ut_ad(keynr == MAX_KEY); + index = dict_table_get_first_index( + m_part_share->get_table_part(part_id)); + } + + if (index == NULL) { + ib::error() << "InnoDB could not find key n:o " + << keynr << " with name " << (key ? key->name : "NULL") + << " from dict cache for table " + << m_prebuilt->table->name << " partition n:o " + << part_id; + } + + DBUG_RETURN(index); +} + +/** Changes the active index of a handle. +@param[in] part_id Use this partition. +@param[in] keynr Use this index; MAX_KEY means always clustered index, +even if it was internally generated by InnoDB. +@return 0 or error number. */ +int +ha_innopart::change_active_index( + uint part_id, + uint keynr) +{ + DBUG_ENTER("ha_innopart::change_active_index"); + + ut_ad(m_user_thd == ha_thd()); + ut_a(m_prebuilt->trx == thd_to_trx(m_user_thd)); + + active_index = keynr; + set_partition(part_id); + + if (UNIV_UNLIKELY(m_prebuilt->index == NULL)) { + ib::warn() << "change_active_index(" << part_id + << "," << keynr << ") failed"; + m_prebuilt->index_usable = FALSE; + DBUG_RETURN(1); + } + + m_prebuilt->index_usable = row_merge_is_index_usable(m_prebuilt->trx, + m_prebuilt->index); + + if (UNIV_UNLIKELY(!m_prebuilt->index_usable)) { + if (dict_index_is_corrupted(m_prebuilt->index)) { + char table_name[MAX_FULL_NAME_LEN + 1]; + + innobase_format_name( + table_name, sizeof table_name, + m_prebuilt->index->table->name.m_name); + + push_warning_printf( + m_user_thd, Sql_condition::SL_WARNING, + HA_ERR_INDEX_CORRUPT, + "InnoDB: Index %s for table %s is" + " marked as corrupted" + " (partition %u)", + m_prebuilt->index->name(), table_name, part_id); + DBUG_RETURN(HA_ERR_INDEX_CORRUPT); + } else { + push_warning_printf( + m_user_thd, Sql_condition::SL_WARNING, + HA_ERR_TABLE_DEF_CHANGED, + "InnoDB: insufficient history for index %u", + keynr); + } + + /* The caller seems to ignore this. Thus, we must check + this again in row_search_for_mysql(). */ + + DBUG_RETURN(HA_ERR_TABLE_DEF_CHANGED); + } + + ut_a(m_prebuilt->search_tuple != NULL); + + /* If too expensive, cache the keynr and only update search_tuple when + keynr changes. Remember that the clustered index is also used for + MAX_KEY. */ + dtuple_set_n_fields(m_prebuilt->search_tuple, + m_prebuilt->index->n_fields); + + dict_index_copy_types(m_prebuilt->search_tuple, m_prebuilt->index, + m_prebuilt->index->n_fields); + + /* MySQL changes the active index for a handle also during some + queries, for example SELECT MAX(a), SUM(a) first retrieves the + MAX() and then calculates the sum. Previously we played safe + and used the flag ROW_MYSQL_WHOLE_ROW below, but that caused + unnecessary copying. Starting from MySQL-4.1 we use a more + efficient flag here. */ + + /* TODO: Is this really needed? + Will it not be built in index_read? */ + + build_template(false); + + DBUG_RETURN(0); +} + +/** Return first record in index from a partition. +@param[in] part Partition to read from. +@param[out] record First record in index in the partition. +@return error number or 0. */ +int +ha_innopart::index_first_in_part( + uint part, + uchar* record) +{ + int error; + DBUG_ENTER("ha_innopart::index_first_in_part"); + + set_partition(part); + error = ha_innobase::index_first(record); + update_partition(part); + + DBUG_RETURN(error); +} + +/** Return next record in index from a partition. +@param[in] part Partition to read from. +@param[out] record Last record in index in the partition. +@return error number or 0. */ +int +ha_innopart::index_next_in_part( + uint part, + uchar* record) +{ + DBUG_ENTER("ha_innopart::index_next_in_part"); + + int error; + + set_partition(part); + error = ha_innobase::index_next(record); + update_partition(part); + + ut_ad(m_ordered_scan_ongoing + || m_ordered_rec_buffer == NULL + || m_prebuilt->used_in_HANDLER + || m_part_info->num_partitions_used() <= 1); + + DBUG_RETURN(error); +} + +/** Return next same record in index from a partition. +This routine is used to read the next record, but only if the key is +the same as supplied in the call. +@param[in] part Partition to read from. +@param[out] record Last record in index in the partition. +@param[in] key Key to match. +@param[in] length Length of key. +@return error number or 0. */ +int +ha_innopart::index_next_same_in_part( + uint part, + uchar* record, + const uchar* key, + uint length) +{ + int error; + + set_partition(part); + error = ha_innobase::index_next_same(record, key, length); + update_partition(part); + return(error); +} + +/** Return last record in index from a partition. +@param[in] part Partition to read from. +@param[out] record Last record in index in the partition. +@return error number or 0. */ +int +ha_innopart::index_last_in_part( + uint part, + uchar* record) +{ + int error; + + set_partition(part); + error = ha_innobase::index_last(record); + update_partition(part); + return(error); +} + +/** Return previous record in index from a partition. +@param[in] part Partition to read from. +@param[out] record Last record in index in the partition. +@return error number or 0. */ +int +ha_innopart::index_prev_in_part( + uint part, + uchar* record) +{ + int error; + + set_partition(part); + error = ha_innobase::index_prev(record); + update_partition(part); + + ut_ad(m_ordered_scan_ongoing + || m_ordered_rec_buffer == NULL + || m_prebuilt->used_in_HANDLER + || m_part_info->num_partitions_used() <= 1); + + return(error); +} + +/** Start index scan and return first record from a partition. +This routine starts an index scan using a start key. The calling +function will check the end key on its own. +@param[in] part Partition to read from. +@param[out] record First matching record in index in the partition. +@param[in] key Key to match. +@param[in] keypart_map Which part of the key to use. +@param[in] find_flag Key condition/direction to use. +@return error number or 0. */ +int +ha_innopart::index_read_map_in_part( + uint part, + uchar* record, + const uchar* key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) +{ + int error; + + set_partition(part); + error = ha_innobase::index_read_map( + record, + key, + keypart_map, + find_flag); + update_partition(part); + return(error); +} + +/** Start index scan and return first record from a partition. +This routine starts an index scan using a start key. The calling +function will check the end key on its own. +@param[in] part Partition to read from. +@param[out] record First matching record in index in the partition. +@param[in] index Index to read from. +@param[in] key Key to match. +@param[in] keypart_map Which part of the key to use. +@param[in] find_flag Key condition/direction to use. +@return error number or 0. */ +int +ha_innopart::index_read_idx_map_in_part( + uint part, + uchar* record, + uint index, + const uchar* key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) +{ + int error; + + set_partition(part); + error = ha_innobase::index_read_idx_map( + record, + index, + key, + keypart_map, + find_flag); + update_partition(part); + return(error); +} + +/** Return last matching record in index from a partition. +@param[in] part Partition to read from. +@param[out] record Last matching record in index in the partition. +@param[in] key Key to match. +@param[in] keypart_map Which part of the key to use. +@return error number or 0. */ +int +ha_innopart::index_read_last_map_in_part( + uint part, + uchar* record, + const uchar* key, + key_part_map keypart_map) +{ + int error; + set_partition(part); + error = ha_innobase::index_read_last_map(record, key, keypart_map); + update_partition(part); + return(error); +} + +/** Start index scan and return first record from a partition. +This routine starts an index scan using a start and end key. +@param[in] part Partition to read from. +@param[in,out] record First matching record in index in the partition, +if NULL use table->record[0] as return buffer. +@param[in] start_key Start key to match. +@param[in] end_key End key to match. +@param[in] eq_range Is equal range, start_key == end_key. +@param[in] sorted Return rows in sorted order. +@return error number or 0. */ +int +ha_innopart::read_range_first_in_part( + uint part, + uchar* record, + const key_range* start_key, + const key_range* end_key, + bool eq_range, + bool sorted) +{ + int error; + uchar* read_record = record; + set_partition(part); + if (read_record == NULL) { + read_record = table->record[0]; + } + if (m_start_key.key != NULL) { + error = ha_innobase::index_read( + read_record, + m_start_key.key, + m_start_key.length, + m_start_key.flag); + } else { + error = ha_innobase::index_first(read_record); + } + if (error == HA_ERR_KEY_NOT_FOUND) { + error = HA_ERR_END_OF_FILE; + } else if (error == 0 && !in_range_check_pushed_down) { + /* compare_key uses table->record[0], so we + need to copy the data if not already there. */ + + if (record != NULL) { + copy_cached_row(table->record[0], read_record); + } + if (compare_key(end_range) > 0) { + /* must use ha_innobase:: due to set/update_partition + could overwrite states if ha_innopart::unlock_row() + was used. */ + ha_innobase::unlock_row(); + error = HA_ERR_END_OF_FILE; + } + } + update_partition(part); + return(error); +} + +/** Return next record in index range scan from a partition. +@param[in] part Partition to read from. +@param[in,out] record First matching record in index in the partition, +if NULL use table->record[0] as return buffer. +@return error number or 0. */ +int +ha_innopart::read_range_next_in_part( + uint part, + uchar* record) +{ + int error; + uchar* read_record = record; + + set_partition(part); + if (read_record == NULL) { + read_record = table->record[0]; + } + + /* TODO: Implement ha_innobase::read_range*? + So it will return HA_ERR_END_OF_FILE or + HA_ERR_KEY_NOT_FOUND when passing end_range. */ + + error = ha_innobase::index_next(read_record); + if (error == 0 && !in_range_check_pushed_down) { + /* compare_key uses table->record[0], so we + need to copy the data if not already there. */ + + if (record != NULL) { + copy_cached_row(table->record[0], read_record); + } + if (compare_key(end_range) > 0) { + /* must use ha_innobase:: due to set/update_partition + could overwrite states if ha_innopart::unlock_row() + was used. */ + ha_innobase::unlock_row(); + error = HA_ERR_END_OF_FILE; + } + } + update_partition(part); + + return(error); +} + +/** Initialize a table scan in a specific partition. +@param[in] part_id Partition to initialize. +@param[in] scan True if table/index scan false otherwise (for rnd_pos) +@return 0 or error number. */ +int +ha_innopart::rnd_init_in_part( + uint part_id, + bool scan) +{ + int err; + + if (m_prebuilt->clust_index_was_generated) { + err = change_active_index(part_id, MAX_KEY); + } else { + err = change_active_index(part_id, m_primary_key); + } + + m_start_of_scan = 1; + + /* Don't use semi-consistent read in random row reads (by position). + This means we must disable semi_consistent_read if scan is false. */ + + if (!scan) { + try_semi_consistent_read(false); + } + + return(err); +} + +/** Ends a table scan. +@param[in] part_id Partition to end table scan in. +@param[in] scan True for scan else random access. +@return 0 or error number. */ +int +ha_innopart::rnd_end_in_part( + uint part_id, + bool scan) +{ + return(index_end()); +} + +/** Read next row in partition. +Reads the next row in a table scan (also used to read the FIRST row +in a table scan). +@param[in] part_id Partition to end table scan in. +@param[out] buf Returns the row in this buffer, in MySQL format. +@return 0, HA_ERR_END_OF_FILE or error number. */ +int +ha_innopart::rnd_next_in_part( + uint part_id, + uchar* buf) +{ + int error; + + DBUG_ENTER("ha_innopart::rnd_next_in_part"); + + set_partition(part_id); + if (m_start_of_scan) { + error = ha_innobase::index_first(buf); + + if (error == HA_ERR_KEY_NOT_FOUND) { + error = HA_ERR_END_OF_FILE; + } + m_start_of_scan = 0; + } else { + ha_statistic_increment(&SSV::ha_read_rnd_next_count); + error = ha_innobase::general_fetch(buf, ROW_SEL_NEXT, 0); + } + + update_partition(part_id); + DBUG_RETURN(error); +} + +/** Get a row from a position. +Fetches a row from the table based on a row reference. +@param[out] buf Returns the row in this buffer, in MySQL format. +@param[in] pos Position, given as primary key value or DB_ROW_ID +(if no primary key) of the row in MySQL format. The length of data in pos has +to be ref_length. +@return 0, HA_ERR_KEY_NOT_FOUND or error code. */ +int +ha_innopart::rnd_pos( + uchar* buf, + uchar* pos) +{ + int error; + uint part_id; + DBUG_ENTER("ha_innopart::rnd_pos"); + ut_ad(PARTITION_BYTES_IN_POS == 2); + DBUG_DUMP("pos", pos, ref_length); + + ha_statistic_increment(&SSV::ha_read_rnd_count); + + ut_a(m_prebuilt->trx == thd_to_trx(ha_thd())); + + /* Restore used partition. */ + part_id = uint2korr(pos); + + set_partition(part_id); + + /* Note that we assume the length of the row reference is fixed + for the table, and it is == ref_length. */ + + error = ha_innobase::index_read(buf, pos + PARTITION_BYTES_IN_POS, + ref_length - PARTITION_BYTES_IN_POS, + HA_READ_KEY_EXACT); + DBUG_PRINT("info", ("part %u index_read returned %d", part_id, error)); + DBUG_DUMP("buf", buf, table_share->reclength); + + update_partition(part_id); + + DBUG_RETURN(error); +} + +/** Return position for cursor in last used partition. +Stores a reference to the current row to 'ref' field of the handle. Note +that in the case where we have generated the clustered index for the +table, the function parameter is illogical: we MUST ASSUME that 'record' +is the current 'position' of the handle, because if row ref is actually +the row id internally generated in InnoDB, then 'record' does not contain +it. We just guess that the row id must be for the record where the handle +was positioned the last time. +@param[out] ref_arg Pointer to buffer where to write the position. +@param[in] record Record to position for. */ +void +ha_innopart::position_in_last_part( + uchar* ref_arg, + const uchar* record) +{ + if (m_prebuilt->clust_index_was_generated) { + /* No primary key was defined for the table and we + generated the clustered index from row id: the + row reference will be the row id, not any key value + that MySQL knows of. */ + + memcpy(ref_arg, m_prebuilt->row_id, DATA_ROW_ID_LEN); + } else { + + /* Copy primary key as the row reference */ + KEY* key_info = table->key_info + m_primary_key; + key_copy(ref_arg, (uchar*)record, key_info, + key_info->key_length); + } +} + +/** Fill in data_dir_path and tablespace name from internal data +dictionary. +@param part_elem Partition element to fill. +@param ib_table InnoDB table to copy from. */ +void +ha_innopart::update_part_elem( + partition_element* part_elem, + dict_table_t* ib_table) +{ + dict_get_and_save_data_dir_path(ib_table, false); + if (ib_table->data_dir_path != NULL) { + if (part_elem->data_file_name == NULL + || strcmp(ib_table->data_dir_path, + part_elem->data_file_name) != 0) { + + /* Play safe and allocate memory from TABLE and copy + instead of expose the internal data dictionary. */ + part_elem->data_file_name = + strdup_root(&table->mem_root, + ib_table->data_dir_path); + } + } else { + part_elem->data_file_name = NULL; + } + + part_elem->index_file_name = NULL; + dict_get_and_save_space_name(ib_table, false); + if (ib_table->tablespace != NULL) { + ut_ad(part_elem->tablespace_name == NULL + || 0 == strcmp(part_elem->tablespace_name, + ib_table->tablespace)); + if (part_elem->tablespace_name == NULL + || strcmp(ib_table->tablespace, + part_elem->tablespace_name) != 0) { + + /* Play safe and allocate memory from TABLE and copy + instead of expose the internal data dictionary. */ + part_elem->tablespace_name = + strdup_root(&table->mem_root, + ib_table->tablespace); + } + } +} + +/** Update create_info. +Used in SHOW CREATE TABLE et al. +@param[in,out] create_info Create info to update. */ +void +ha_innopart::update_create_info( + HA_CREATE_INFO* create_info) +{ + uint num_subparts = m_part_info->num_subparts; + uint num_parts; + uint part; + dict_table_t* table; + List_iterator + part_it(m_part_info->partitions); + partition_element* part_elem; + partition_element* sub_elem; + DBUG_ENTER("ha_innopart::update_create_info"); + if ((create_info->used_fields & HA_CREATE_USED_AUTO) == 0) { + info(HA_STATUS_AUTO); + create_info->auto_increment_value = stats.auto_increment_value; + } + + num_parts = (num_subparts != 0) ? m_tot_parts / num_subparts : m_tot_parts; + + /* DATA/INDEX DIRECTORY are never applied to the whole partitioned + table, only to its parts. */ + + create_info->data_file_name = NULL; + create_info->index_file_name = NULL; + + /* Since update_create_info() can be called from + mysql_prepare_alter_table() when not all partitions are set up, + we look for that condition first. + If all partitions are not available then simply return, + since it does not need any updated partitioning info. */ + + if (!m_part_info->temp_partitions.is_empty()) { + DBUG_VOID_RETURN; + } + part = 0; + while ((part_elem = part_it++)) { + if (part >= num_parts) { + DBUG_VOID_RETURN; + } + if (m_part_info->is_sub_partitioned()) { + List_iterator + subpart_it(part_elem->subpartitions); + uint subpart = 0; + while ((sub_elem = subpart_it++)) { + if (subpart >= num_subparts) { + DBUG_VOID_RETURN; + } + subpart++; + } + if (subpart != num_subparts) { + DBUG_VOID_RETURN; + } + } + part++; + } + if (part != num_parts) { + DBUG_VOID_RETURN; + } + + /* part_elem->data_file_name and tablespace_name should be correct from + the .frm, but may have been changed, so update from SYS_DATAFILES. + index_file_name is ignored, so remove it. */ + + part = 0; + part_it.rewind(); + while ((part_elem = part_it++)) { + if (m_part_info->is_sub_partitioned()) { + List_iterator + subpart_it(part_elem->subpartitions); + while ((sub_elem = subpart_it++)) { + table = m_part_share->get_table_part(part++); + update_part_elem(sub_elem, table); + } + } else { + table = m_part_share->get_table_part(part++); + update_part_elem(part_elem, table); + } + } + DBUG_VOID_RETURN; +} + +/** Set create_info->data_file_name. +@param[in] part_elem Partition to copy from. +@param[in,out] info Create info to set. */ +static +void +set_create_info_dir( + partition_element* part_elem, + HA_CREATE_INFO* info) +{ + if (part_elem->data_file_name != NULL + && part_elem->data_file_name[0] != '\0') { + info->data_file_name = part_elem->data_file_name; + /* Also implies non-default tablespace. */ + info->tablespace = NULL; + } + if (part_elem->index_file_name != NULL + && part_elem->index_file_name[0] != '\0') { + info->index_file_name = part_elem->index_file_name; + } + if (part_elem->tablespace_name != NULL + && part_elem->tablespace_name[0] != '\0') { + info->tablespace = part_elem->tablespace_name; + } +} + +/** Set flags and append '/' to remote path if necessary. */ +void +create_table_info_t::set_remote_path_flags() +{ + if (m_remote_path[0] != '\0') { + ut_ad(DICT_TF_HAS_DATA_DIR(m_flags) != 0); + + /* os_file_make_remote_pathname will truncate + everything after the last '/', so append '/' + if it is not the last character. */ + + size_t len = strlen(m_remote_path); + if (m_remote_path[len - 1] != OS_PATH_SEPARATOR) { + m_remote_path[len] = OS_PATH_SEPARATOR; + m_remote_path[len + 1] = '\0'; + } + } else { + ut_ad(DICT_TF_HAS_DATA_DIR(m_flags) == 0); + } +} + +/** Creates a new table to an InnoDB database. +@param[in] name Table name (in filesystem charset). +@param[in] form MySQL Table containing information of +partitions, columns and indexes etc. +@param[in] create_info Additional create information, like +create statement string. +@return 0 or error number. */ +int +ha_innopart::create( + const char* name, + TABLE* form, + HA_CREATE_INFO* create_info) +{ + int error; + /** {database}/{tablename} */ + char table_name[FN_REFLEN]; + /** absolute path of temp frm */ + char temp_path[FN_REFLEN]; + /** absolute path of table */ + char remote_path[FN_REFLEN]; + char partition_name[FN_REFLEN]; + char tablespace_name[NAME_LEN + 1]; + char* table_name_end; + size_t table_name_len; + char* partition_name_start; + char table_data_file_name[FN_REFLEN]; + char table_level_tablespace_name[NAME_LEN + 1]; + const char* index_file_name; + size_t len; + + create_table_info_t info(ha_thd(), + form, + create_info, + table_name, + temp_path, + remote_path, + tablespace_name); + + DBUG_ENTER("ha_innopart::create"); + ut_ad(create_info != NULL); + ut_ad(m_part_info == form->part_info); + ut_ad(table_share != NULL); + + /* Not allowed to create temporary partitioned tables. */ + if (create_info != NULL + && (create_info->options & HA_LEX_CREATE_TMP_TABLE) != 0) { + my_error(ER_PARTITION_NO_TEMPORARY, MYF(0)); + ut_ad(0); // Can we support partitioned temporary tables? + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + + error = info.initialize(); + if (error != 0) { + DBUG_RETURN(error); + } + + /* Setup and check table level options. */ + error = info.prepare_create_table(name); + if (error != 0) { + DBUG_RETURN(error); + } + ut_ad(temp_path[0] == '\0'); + strcpy(partition_name, table_name); + partition_name_start = partition_name + strlen(partition_name); + table_name_len = strlen(table_name); + table_name_end = table_name + table_name_len; + if (create_info->data_file_name != NULL) { + /* Strip the tablename from the path. */ + strncpy(table_data_file_name, create_info->data_file_name, + FN_REFLEN-1); + table_data_file_name[FN_REFLEN - 1] = '\0'; + char* ptr = strrchr(table_data_file_name, OS_PATH_SEPARATOR); + ut_ad(ptr != NULL); + if (ptr != NULL) { + ptr++; + *ptr = '\0'; + create_info->data_file_name = table_data_file_name; + } + } else { + table_data_file_name[0] = '\0'; + } + index_file_name = create_info->index_file_name; + if (create_info->tablespace != NULL) { + strcpy(table_level_tablespace_name, create_info->tablespace); + } else { + table_level_tablespace_name[0] = '\0'; + } + + info.allocate_trx(); + + /* Latch the InnoDB data dictionary exclusively so that no deadlocks + or lock waits can happen in it during a table create operation. + Drop table etc. do this latching in row0mysql.cc. */ + + row_mysql_lock_data_dictionary(info.trx()); + + /* TODO: use the new DD tables instead to decrease duplicate info. */ + List_iterator_fast + part_it(form->part_info->partitions); + partition_element* part_elem; + while ((part_elem = part_it++)) { + /* Append the partition name to the table name. */ + len = Ha_innopart_share::append_sep_and_name( + partition_name_start, + part_elem->partition_name, + part_sep, + FN_REFLEN - table_name_len); + if ((table_name_len + len) >= FN_REFLEN) { + ut_ad(0); + goto cleanup; + } + + /* Override table level DATA/INDEX DIRECTORY. */ + set_create_info_dir(part_elem, create_info); + + if (!form->part_info->is_sub_partitioned()) { + error = info.prepare_create_table(partition_name); + if (error != 0) { + goto cleanup; + } + info.set_remote_path_flags(); + error = info.create_table(); + if (error != 0) { + goto cleanup; + } + } else { + size_t part_name_len = strlen(partition_name_start) + + table_name_len; + char* part_name_end = partition_name + part_name_len; + List_iterator_fast + sub_it(part_elem->subpartitions); + partition_element* sub_elem; + + while ((sub_elem = sub_it++)) { + ut_ad(sub_elem->partition_name != NULL); + + /* 'table' will be + #P##SP#. + Append the sub-partition name to + the partition name. */ + + len = Ha_innopart_share::append_sep_and_name( + part_name_end, + sub_elem->partition_name, + sub_sep, + FN_REFLEN - part_name_len); + if ((len + part_name_len) >= FN_REFLEN) { + ut_ad(0); + goto cleanup; + } + /* Override part level DATA/INDEX DIRECTORY. */ + set_create_info_dir(sub_elem, create_info); + + Ha_innopart_share::partition_name_casedn_str( + part_name_end + 4); + error = info.prepare_create_table(partition_name); + if (error != 0) { + goto cleanup; + } + info.set_remote_path_flags(); + error = info.create_table(); + if (error != 0) { + goto cleanup; + } + + /* Reset partition level + DATA/INDEX DIRECTORY. */ + + create_info->data_file_name = + table_data_file_name; + create_info->index_file_name = + index_file_name; + create_info->tablespace = + table_level_tablespace_name; + set_create_info_dir(part_elem, create_info); + } + } + /* Reset table level DATA/INDEX DIRECTORY. */ + create_info->data_file_name = table_data_file_name; + create_info->index_file_name = index_file_name; + create_info->tablespace = table_level_tablespace_name; + } + + innobase_commit_low(info.trx()); + + row_mysql_unlock_data_dictionary(info.trx()); + + /* Flush the log to reduce probability that the .frm files and + the InnoDB data dictionary get out-of-sync if the user runs + with innodb_flush_log_at_trx_commit = 0. */ + + log_buffer_flush_to_disk(); + + part_it.rewind(); + /* No need to use these now, only table_name will be used. */ + create_info->data_file_name = NULL; + create_info->index_file_name = NULL; + while ((part_elem = part_it++)) { + Ha_innopart_share::append_sep_and_name( + table_name_end, + part_elem->partition_name, + part_sep, + FN_REFLEN - table_name_len); + if (!form->part_info->is_sub_partitioned()) { + error = info.create_table_update_dict(); + if (error != 0) { + ut_ad(0); + goto end; + } + } else { + size_t part_name_len = strlen(table_name_end); + char* part_name_end = table_name_end + part_name_len; + List_iterator_fast + sub_it(part_elem->subpartitions); + partition_element* sub_elem; + while ((sub_elem = sub_it++)) { + Ha_innopart_share::append_sep_and_name( + part_name_end, + sub_elem->partition_name, + sub_sep, + FN_REFLEN - table_name_len + - part_name_len); + error = info.create_table_update_dict(); + if (error != 0) { + ut_ad(0); + goto end; + } + } + } + } + +end: + /* Tell the InnoDB server that there might be work for + utility threads: */ + + srv_active_wake_master_thread(); + + trx_free_for_mysql(info.trx()); + + DBUG_RETURN(error); + +cleanup: + trx_rollback_for_mysql(info.trx()); + + row_mysql_unlock_data_dictionary(info.trx()); + + trx_free_for_mysql(info.trx()); + + DBUG_RETURN(error); +} + +/** Discards or imports an InnoDB tablespace. +@param[in] discard True if discard, else import. +@return 0 or error number. */ +int +ha_innopart::discard_or_import_tablespace( + my_bool discard) +{ + int error = 0; + uint i; + DBUG_ENTER("ha_innopart::discard_or_import_tablespace"); + + for (i= m_part_info->get_first_used_partition(); + i < m_tot_parts; + i= m_part_info->get_next_used_partition(i)) { + + m_prebuilt->table = m_part_share->get_table_part(i); + error= ha_innobase::discard_or_import_tablespace(discard); + if (error != 0) { + break; + } + } + m_prebuilt->table = m_part_share->get_table_part(0); + + /* IMPORT/DISCARD also means resetting auto_increment. Make sure + that auto_increment initialization is done after all partitions + are imported. */ + if (table->found_next_number_field != NULL) { + lock_auto_increment(); + m_part_share->next_auto_inc_val = 0; + m_part_share->auto_inc_initialized = false; + unlock_auto_increment(); + } + + DBUG_RETURN(error); +} + +/** Compare key and rowid. +Helper function for sorting records in the priority queue. +a/b points to table->record[0] rows which must have the +key fields set. The bytes before a and b store the rowid. +This is used for comparing/sorting rows first according to +KEY and if same KEY, by rowid (ref). +@param[in] key_info Null terminated array of index information. +@param[in] a Pointer to record+ref in first record. +@param[in] b Pointer to record+ref in second record. +@return Return value is SIGN(first_rec - second_rec) +@retval 0 Keys are equal. +@retval -1 second_rec is greater than first_rec. +@retval +1 first_rec is greater than second_rec. */ +int +ha_innopart::key_and_rowid_cmp( + KEY** key_info, + uchar *a, + uchar *b) +{ + int cmp = key_rec_cmp(key_info, a, b); + if (cmp != 0) { + return(cmp); + } + + /* We must compare by rowid, which is added before the record, + in the priority queue. */ + + return(memcmp(a - DATA_ROW_ID_LEN, b - DATA_ROW_ID_LEN, + DATA_ROW_ID_LEN)); +} + +/** Extra hints from MySQL. +@param[in] operation Operation hint. +@return 0 or error number. */ +int +ha_innopart::extra( + enum ha_extra_function operation) +{ + if (operation == HA_EXTRA_SECONDARY_SORT_ROWID) { + /* index_init(sorted=true) must have been called! */ + ut_ad(m_ordered); + ut_ad(m_ordered_rec_buffer != NULL); + /* No index_read call must have been done! */ + ut_ad(m_queue->empty()); + + /* If not PK is set as secondary sort, do secondary sort by + rowid/ref. */ + + ut_ad(m_curr_key_info[1] != NULL + || m_prebuilt->clust_index_was_generated != 0 + || m_curr_key_info[0] + == table->key_info + table->s->primary_key); + + if (m_curr_key_info[1] == NULL + && m_prebuilt->clust_index_was_generated) { + m_ref_usage = Partition_helper::REF_USED_FOR_SORT; + m_queue->m_fun = key_and_rowid_cmp; + } + return(0); + } + return(ha_innobase::extra(operation)); +} + +/** Delete all rows in a partition. +@return 0 or error number. */ +int +ha_innopart::truncate_partition_low() +{ + return(truncate()); +} + +/** Deletes all rows of a partitioned InnoDB table. +@return 0 or error number. */ +int +ha_innopart::truncate() +{ + dberr_t err = DB_SUCCESS; + int error; + + DBUG_ENTER("ha_innopart::truncate"); + + if (high_level_read_only) { + DBUG_RETURN(HA_ERR_TABLE_READONLY); + } + + /* TRUNCATE also means resetting auto_increment. Hence, reset + it so that it will be initialized again at the next use. */ + + if (table->found_next_number_field != NULL) { + lock_auto_increment(); + m_part_share->next_auto_inc_val= 0; + m_part_share->auto_inc_initialized= false; + unlock_auto_increment(); + } + + /* Get the transaction associated with the current thd, or create one + if not yet created, and update m_prebuilt->trx. */ + + update_thd(ha_thd()); + + if (!trx_is_started(m_prebuilt->trx)) { + ++m_prebuilt->trx->will_lock; + } + /* Truncate the table in InnoDB. */ + + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + set_partition(i); + err = row_truncate_table_for_mysql(m_prebuilt->table, + m_prebuilt->trx); + update_partition(i); + if (err != DB_SUCCESS) { + break; + } + } + + switch (err) { + + case DB_TABLESPACE_DELETED: + case DB_TABLESPACE_NOT_FOUND: + ib_senderrf( + m_prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR, + (err == DB_TABLESPACE_DELETED ? + ER_TABLESPACE_DISCARDED : ER_TABLESPACE_MISSING), + table->s->table_name.str); + table->status = STATUS_NOT_FOUND; + error = HA_ERR_NO_SUCH_TABLE; + break; + + default: + error = convert_error_code_to_mysql( + err, m_prebuilt->table->flags, + m_prebuilt->trx->mysql_thd); + table->status = STATUS_NOT_FOUND; + break; + } + DBUG_RETURN(error); +} + +/** Total number of rows in all used partitions. +Returns the exact number of records that this client can see using this +handler object. +@param[out] num_rows Number of rows. +@return 0 or error number. */ +int +ha_innopart::records( + ha_rows* num_rows) +{ + ha_rows n_rows; + int err; + DBUG_ENTER("ha_innopart::records()"); + + *num_rows = 0; + + /* The index scan is probably so expensive, so the overhead + of the rest of the function is neglectable for each partition. + So no current reason for optimizing this further. */ + + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + set_partition(i); + err = ha_innobase::records(&n_rows); + update_partition(i); + if (err != 0) { + *num_rows = HA_POS_ERROR; + DBUG_RETURN(err); + } + *num_rows += n_rows; + } + DBUG_RETURN(0); +} + +/** Estimates the number of index records in a range. +@param[in] keynr Index number. +@param[in] min_key Start key value (or NULL). +@param[in] max_key End key value (or NULL). +@return estimated number of rows. */ +ha_rows +ha_innopart::records_in_range( + uint keynr, + key_range* min_key, + key_range* max_key) +{ + KEY* key; + dict_index_t* index; + dtuple_t* range_start; + dtuple_t* range_end; + int64_t n_rows = 0; + page_cur_mode_t mode1; + page_cur_mode_t mode2; + mem_heap_t* heap; + uint part_id; + + DBUG_ENTER("ha_innopart::records_in_range"); + DBUG_PRINT("info", ("keynr %u min %p max %p", keynr, min_key, max_key)); + + ut_a(m_prebuilt->trx == thd_to_trx(ha_thd())); + + m_prebuilt->trx->op_info = (char*)"estimating records in index range"; + + /* In case MySQL calls this in the middle of a SELECT query, release + possible adaptive hash latch to avoid deadlocks of threads. */ + + trx_search_latch_release_if_reserved(m_prebuilt->trx); + + active_index = keynr; + + key = table->key_info + active_index; + + part_id = m_part_info->get_first_used_partition(); + if (part_id == MY_BIT_NONE) { + DBUG_RETURN(0); + } + /* This also sets m_prebuilt->index! */ + set_partition(part_id); + index = m_prebuilt->index; + + /* Only validate the first partition, to avoid too much overhead. */ + + /* There exists possibility of not being able to find requested + index due to inconsistency between MySQL and InoDB dictionary info. + Necessary message should have been printed in innopart_get_index(). */ + if (index == NULL + || dict_table_is_discarded(m_prebuilt->table) + || dict_index_is_corrupted(index) + || !row_merge_is_index_usable(m_prebuilt->trx, index)) { + + n_rows = HA_POS_ERROR; + goto func_exit; + } + + heap = mem_heap_create(2 * (key->actual_key_parts * sizeof(dfield_t) + + sizeof(dtuple_t))); + + range_start = dtuple_create(heap, key->actual_key_parts); + dict_index_copy_types(range_start, index, key->actual_key_parts); + + range_end = dtuple_create(heap, key->actual_key_parts); + dict_index_copy_types(range_end, index, key->actual_key_parts); + + row_sel_convert_mysql_key_to_innobase( + range_start, + m_prebuilt->srch_key_val1, + m_prebuilt->srch_key_val_len, + index, + (byte*) (min_key ? min_key->key : (const uchar*) 0), + (ulint) (min_key ? min_key->length : 0), + m_prebuilt->trx); + + ut_ad(min_key != NULL + ? range_start->n_fields > 0 + : range_start->n_fields == 0); + + row_sel_convert_mysql_key_to_innobase( + range_end, + m_prebuilt->srch_key_val2, + m_prebuilt->srch_key_val_len, + index, + (byte*) (max_key != NULL ? max_key->key : (const uchar*) 0), + (ulint) (max_key != NULL ? max_key->length : 0), + m_prebuilt->trx); + + ut_ad(max_key != NULL + ? range_end->n_fields > 0 + : range_end->n_fields == 0); + + mode1 = convert_search_mode_to_innobase(min_key ? min_key->flag : + HA_READ_KEY_EXACT); + mode2 = convert_search_mode_to_innobase(max_key ? max_key->flag : + HA_READ_KEY_EXACT); + + if (mode1 != PAGE_CUR_UNSUPP && mode2 != PAGE_CUR_UNSUPP) { + + n_rows = btr_estimate_n_rows_in_range(index, range_start, + mode1, range_end, + mode2); + DBUG_PRINT("info", ("part_id %u rows %ld", part_id, + (long int) n_rows)); + for (part_id = m_part_info->get_next_used_partition(part_id); + part_id < m_tot_parts; + part_id = m_part_info->get_next_used_partition(part_id)) { + + index = m_part_share->get_index(part_id, keynr); + int64_t n = btr_estimate_n_rows_in_range(index, + range_start, + mode1, + range_end, + mode2); + n_rows += n; + DBUG_PRINT("info", ("part_id %u rows %ld (%ld)", + part_id, + (long int) n, + (long int) n_rows)); + } + } else { + + n_rows = HA_POS_ERROR; + } + + mem_heap_free(heap); + +func_exit: + + m_prebuilt->trx->op_info = (char*)""; + + /* The MySQL optimizer seems to believe an estimate of 0 rows is + always accurate and may return the result 'Empty set' based on that. + The accuracy is not guaranteed, and even if it were, for a locking + read we should anyway perform the search to set the next-key lock. + Add 1 to the value to make sure MySQL does not make the assumption! */ + + if (n_rows == 0) { + n_rows = 1; + } + + DBUG_RETURN((ha_rows) n_rows); +} + +/** Gives an UPPER BOUND to the number of rows in a table. +This is used in filesort.cc. +@return upper bound of rows. */ +ha_rows +ha_innopart::estimate_rows_upper_bound() +{ + const dict_index_t* index; + ulonglong estimate = 0; + ulonglong local_data_file_length; + ulint stat_n_leaf_pages; + + DBUG_ENTER("ha_innopart::estimate_rows_upper_bound"); + + /* We do not know if MySQL can call this function before calling + external_lock(). To be safe, update the thd of the current table + handle. */ + + update_thd(ha_thd()); + + m_prebuilt->trx->op_info = "calculating upper bound for table rows"; + + /* In case MySQL calls this in the middle of a SELECT query, release + possible adaptive hash latch to avoid deadlocks of threads. */ + + trx_search_latch_release_if_reserved(m_prebuilt->trx); + + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + m_prebuilt->table = m_part_share->get_table_part(i); + index = dict_table_get_first_index(m_prebuilt->table); + + stat_n_leaf_pages = index->stat_n_leaf_pages; + + ut_a(stat_n_leaf_pages > 0); + + local_data_file_length = + ((ulonglong) stat_n_leaf_pages) * UNIV_PAGE_SIZE; + + /* Calculate a minimum length for a clustered index record + and from that an upper bound for the number of rows. + Since we only calculate new statistics in row0mysql.cc when a + table has grown by a threshold factor, + we must add a safety factor 2 in front of the formula below. */ + + estimate += 2 * local_data_file_length + / dict_index_calc_min_rec_len(index); + } + + m_prebuilt->trx->op_info = ""; + + DBUG_RETURN((ha_rows) estimate); +} + +/** Time estimate for full table scan. +How many seeks it will take to read through the table. This is to be +comparable to the number returned by records_in_range so that we can +decide if we should scan the table or use keys. +@return estimated time measured in disk seeks. */ +double +ha_innopart::scan_time() +{ + double scan_time = 0.0; + DBUG_ENTER("ha_innopart::scan_time"); + + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + m_prebuilt->table = m_part_share->get_table_part(i); + scan_time += ha_innobase::scan_time(); + } + DBUG_RETURN(scan_time); +} + +/** Updates the statistics for one partition (table). +@param[in] table Table to update the statistics for. +@param[in] is_analyze True if called from ::analyze(). +@return error code. */ +static +int +update_table_stats( + dict_table_t* table, + bool is_analyze) +{ + dict_stats_upd_option_t opt; + dberr_t ret; + + if (dict_stats_is_persistent_enabled(table)) { + if (is_analyze) { + opt = DICT_STATS_RECALC_PERSISTENT; + } else { + /* This is e.g. 'SHOW INDEXES', + fetch the persistent stats from disk. */ + opt = DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY; + } + } else { + opt = DICT_STATS_RECALC_TRANSIENT; + } + + ut_ad(!mutex_own(&dict_sys->mutex)); + ret = dict_stats_update(table, opt); + + if (ret != DB_SUCCESS) { + return(HA_ERR_GENERIC); + } + return(0); +} + +/** Updates and return statistics. +Returns statistics information of the table to the MySQL interpreter, +in various fields of the handle object. +@param[in] flag Flags for what to update and return. +@param[in] is_analyze True if called from ::analyze(). +@return HA_ERR_* error code or 0. */ +int +ha_innopart::info_low( + uint flag, + bool is_analyze) +{ + dict_table_t* ib_table; + ib_uint64_t max_rows = 0; + uint biggest_partition = 0; + int error = 0; + + DBUG_ENTER("ha_innopart::info_low"); + + /* If we are forcing recovery at a high level, we will suppress + statistics calculation on tables, because that may crash the + server if an index is badly corrupted. */ + + /* We do not know if MySQL can call this function before calling + external_lock(). To be safe, update the thd of the current table + handle. */ + + update_thd(ha_thd()); + + /* In case MySQL calls this in the middle of a SELECT query, release + possible adaptive hash latch to avoid deadlocks of threads. */ + + m_prebuilt->trx->op_info = (char*)"returning various info to MySQL"; + + trx_search_latch_release_if_reserved(m_prebuilt->trx); + + ut_ad(m_part_share->get_table_part(0)->n_ref_count > 0); + + if ((flag & HA_STATUS_TIME) != 0) { + stats.update_time = 0; + + if (is_analyze) { + /* Only analyze the given partitions. */ + int error = set_altered_partitions(); + if (error != 0) { + /* Already checked in mysql_admin_table! */ + ut_ad(0); + DBUG_RETURN(error); + } + } + if (is_analyze || innobase_stats_on_metadata) { + m_prebuilt->trx->op_info = "updating table statistics"; + } + + /* TODO: Only analyze the PK for all partitions, + then the secondary indexes only for the largest partition! */ + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + ib_table = m_part_share->get_table_part(i); + if (is_analyze || innobase_stats_on_metadata) { + error = update_table_stats(ib_table, is_analyze); + if (error != 0) { + m_prebuilt->trx->op_info = ""; + DBUG_RETURN(error); + } + } + set_if_bigger(stats.update_time, + (ulong) ib_table->update_time); + } + + if (is_analyze || innobase_stats_on_metadata) { + m_prebuilt->trx->op_info = + "returning various info to MySQL"; + } + } + + if ((flag & HA_STATUS_VARIABLE) != 0) { + + /* TODO: If this is called after pruning, then we could + also update the statistics according to the non-pruned + partitions, by allocating new rec_per_key on the TABLE, + instead of using the info from the TABLE_SHARE. */ + ulint stat_clustered_index_size = 0; + ulint stat_sum_of_other_index_sizes = 0; + ib_uint64_t n_rows = 0; + ulint avail_space = 0; + bool checked_sys_tablespace = false; + + if ((flag & HA_STATUS_VARIABLE_EXTRA) != 0) { + stats.delete_length = 0; + } + + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + ib_table = m_part_share->get_table_part(i); + if ((flag & HA_STATUS_NO_LOCK) == 0) { + dict_table_stats_lock(ib_table, RW_S_LATCH); + } + + ut_a(ib_table->stat_initialized); + + n_rows += ib_table->stat_n_rows; + if (ib_table->stat_n_rows > max_rows) { + max_rows = ib_table->stat_n_rows; + biggest_partition = i; + } + + stat_clustered_index_size += + ib_table->stat_clustered_index_size; + + stat_sum_of_other_index_sizes += + ib_table->stat_sum_of_other_index_sizes; + + if ((flag & HA_STATUS_NO_LOCK) == 0) { + dict_table_stats_unlock(ib_table, RW_S_LATCH); + } + + if ((flag & HA_STATUS_VARIABLE_EXTRA) != 0 + && (flag & HA_STATUS_NO_LOCK) == 0 + && srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE + && avail_space != ULINT_UNDEFINED) { + + /* Only count system tablespace once! */ + if (is_system_tablespace(ib_table->space)) { + if (checked_sys_tablespace) { + continue; + } + checked_sys_tablespace = true; + } + + ulint space = static_cast( + fsp_get_available_space_in_free_extents( + ib_table->space)); + if (space == ULINT_UNDEFINED) { + ut_ad(0); + avail_space = space; + } else { + avail_space += space; + } + } + } + + /* + The MySQL optimizer seems to assume in a left join that n_rows + is an accurate estimate if it is zero. Of course, it is not, + since we do not have any locks on the rows yet at this phase. + Since SHOW TABLE STATUS seems to call this function with the + HA_STATUS_TIME flag set, while the left join optimizer does not + set that flag, we add one to a zero value if the flag is not + set. That way SHOW TABLE STATUS will show the best estimate, + while the optimizer never sees the table empty. */ + + if (n_rows == 0 && (flag & HA_STATUS_TIME) == 0) { + n_rows++; + } + + /* Fix bug#40386: Not flushing query cache after truncate. + n_rows can not be 0 unless the table is empty, set to 1 + instead. The original problem of bug#29507 is actually + fixed in the server code. */ + if (thd_sql_command(m_user_thd) == SQLCOM_TRUNCATE) { + + n_rows = 1; + + /* We need to reset the m_prebuilt value too, otherwise + checks for values greater than the last value written + to the table will fail and the autoinc counter will + not be updated. This will force write_row() into + attempting an update of the table's AUTOINC counter. */ + + m_prebuilt->autoinc_last_value = 0; + } + + /* Take page_size from first partition. */ + ib_table = m_part_share->get_table_part(0); + const page_size_t& page_size = + dict_table_page_size(ib_table); + + stats.records = (ha_rows) n_rows; + stats.deleted = 0; + stats.data_file_length = + ((ulonglong) stat_clustered_index_size) + * page_size.physical(); + stats.index_file_length = + ((ulonglong) stat_sum_of_other_index_sizes) + * page_size.physical(); + + /* See ha_innobase::info_low() for comments! */ + if ((flag & HA_STATUS_NO_LOCK) == 0 + && (flag & HA_STATUS_VARIABLE_EXTRA) != 0 + && srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE) { + + if (avail_space == ULINT_UNDEFINED) { + THD* thd; + char errbuf[MYSYS_STRERROR_SIZE]; + + thd = ha_thd(); + + std::string err_str; + err_str = "InnoDB: Trying to get" + " the free space for table "; + err_str += ut_get_name(m_prebuilt->trx, + ib_table->name.m_name); + err_str += " but its tablespace has been" + " discarded or the .ibd file is" + " missing. Setting the free space to" + " zero."; + push_warning_printf( + thd, + Sql_condition::SL_WARNING, + ER_CANT_GET_STAT, + err_str.c_str(), + errno, + my_strerror(errbuf, sizeof(errbuf), + errno)); + + stats.delete_length = 0; + } else { + stats.delete_length = avail_space * 1024; + } + } + + stats.check_time = 0; + stats.mrr_length_per_rec = ref_length + sizeof(void*) + - PARTITION_BYTES_IN_POS; + + if (stats.records == 0) { + stats.mean_rec_length = 0; + } else { + stats.mean_rec_length = (ulong) + (stats.data_file_length / stats.records); + } + } + + if ((flag & HA_STATUS_CONST) != 0) { + /* Find max rows and biggest partition. */ + for (uint i = 0; i < m_tot_parts; i++) { + /* Skip partitions from above. */ + if ((flag & HA_STATUS_VARIABLE) == 0 + || !bitmap_is_set(&(m_part_info->read_partitions), + i)) { + + ib_table = m_part_share->get_table_part(i); + if (ib_table->stat_n_rows > max_rows) { + max_rows = ib_table->stat_n_rows; + biggest_partition = i; + } + } + } + ib_table = m_part_share->get_table_part(biggest_partition); + /* Verify the number of index in InnoDB and MySQL + matches up. If m_prebuilt->clust_index_was_generated + holds, InnoDB defines GEN_CLUST_INDEX internally. */ + ulint num_innodb_index = UT_LIST_GET_LEN(ib_table->indexes) + - m_prebuilt->clust_index_was_generated; + if (table->s->keys < num_innodb_index) { + /* If there are too many indexes defined + inside InnoDB, ignore those that are being + created, because MySQL will only consider + the fully built indexes here. */ + + for (const dict_index_t* index = + UT_LIST_GET_FIRST(ib_table->indexes); + index != NULL; + index = UT_LIST_GET_NEXT(indexes, index)) { + + /* First, online index creation is + completed inside InnoDB, and then + MySQL attempts to upgrade the + meta-data lock so that it can rebuild + the .frm file. If we get here in that + time frame, dict_index_is_online_ddl() + would not hold and the index would + still not be included in TABLE_SHARE. */ + if (!index->is_committed()) { + num_innodb_index--; + } + } + + if (table->s->keys < num_innodb_index + && (innobase_fts_check_doc_id_index(ib_table, + NULL, NULL) + == FTS_EXIST_DOC_ID_INDEX)) { + num_innodb_index--; + } + } + + if (table->s->keys != num_innodb_index) { + ib::error() << "Table " + << ib_table->name << " contains " + << num_innodb_index + << " indexes inside InnoDB, which" + " is different from the number of" + " indexes " << table->s->keys + << " defined in the MySQL"; + } + + if ((flag & HA_STATUS_NO_LOCK) == 0) { + dict_table_stats_lock(ib_table, RW_S_LATCH); + } + + ut_a(ib_table->stat_initialized); + + for (ulong i = 0; i < table->s->keys; i++) { + ulong j; + /* We could get index quickly through internal + index mapping with the index translation table. + The identity of index (match up index name with + that of table->key_info[i]) is already verified in + innopart_get_index(). */ + dict_index_t* index = innopart_get_index( + biggest_partition, i); + + if (index == NULL) { + ib::error() << "Table " + << ib_table->name << " contains fewer" + " indexes inside InnoDB than" + " are defined in the MySQL" + " .frm file. Have you mixed up" + " .frm files from different" + " installations? " + << TROUBLESHOOTING_MSG; + break; + } + + KEY* key = &table->key_info[i]; + for (j = 0; + j < key->actual_key_parts; + j++) { + + if ((key->flags & HA_FULLTEXT) != 0) { + /* The whole concept has no validity + for FTS indexes. */ + key->rec_per_key[j] = 1; + continue; + } + + if ((j + 1) > index->n_uniq) { + ib::error() << "Index " << index->name + << " of " << ib_table->name + << " has " << index->n_uniq + << " columns unique inside" + " InnoDB, but MySQL is" + " asking statistics for " + << j + 1 << " columns. Have" + " you mixed up .frm files" + " from different" + " installations? " + << TROUBLESHOOTING_MSG; + break; + } + + /* innodb_rec_per_key() will use + index->stat_n_diff_key_vals[] and the value we + pass index->table->stat_n_rows. Both are + calculated by ANALYZE and by the background + stats gathering thread (which kicks in when too + much of the table has been changed). In + addition table->stat_n_rows is adjusted with + each DML (e.g. ++ on row insert). Those + adjustments are not MVCC'ed and not even + reversed on rollback. So, + index->stat_n_diff_key_vals[] and + index->table->stat_n_rows could have been + calculated at different time. This is + acceptable. */ + const rec_per_key_t rec_per_key = + innodb_rec_per_key( + index, j, + max_rows); + + key->set_records_per_key(j, rec_per_key); + + /* The code below is legacy and should be + removed together with this comment once we + are sure the new floating point rec_per_key, + set via set_records_per_key(), works fine. */ + + ulong rec_per_key_int = static_cast( + innodb_rec_per_key(index, j, + max_rows)); + + /* Since MySQL seems to favor table scans + too much over index searches, we pretend + index selectivity is 2 times better than + our estimate: */ + + rec_per_key_int = rec_per_key_int / 2; + + if (rec_per_key_int == 0) { + rec_per_key_int = 1; + } + + key->rec_per_key[j] = rec_per_key_int; + } + } + + if ((flag & HA_STATUS_NO_LOCK) == 0) { + dict_table_stats_unlock(ib_table, RW_S_LATCH); + } + + char path[FN_REFLEN]; + os_file_stat_t stat_info; + /* Use the first partition for create time until new DD. */ + ib_table = m_part_share->get_table_part(0); + my_snprintf(path, sizeof(path), "%s/%s%s", + mysql_data_home, + table->s->normalized_path.str, + reg_ext); + + unpack_filename(path,path); + + if (os_file_get_status(path, &stat_info, false, true) == DB_SUCCESS) { + stats.create_time = (ulong) stat_info.ctime; + } + } + + if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) { + + goto func_exit; + } + + if ((flag & HA_STATUS_ERRKEY) != 0) { + const dict_index_t* err_index; + + ut_a(m_prebuilt->trx); + ut_a(m_prebuilt->trx->magic_n == TRX_MAGIC_N); + + err_index = trx_get_error_info(m_prebuilt->trx); + + if (err_index != NULL) { + errkey = m_part_share->get_mysql_key(m_last_part, + err_index); + } else { + errkey = (unsigned int) ( + (m_prebuilt->trx->error_key_num + == ULINT_UNDEFINED) + ? UINT_MAX + : m_prebuilt->trx->error_key_num); + } + } + + if ((flag & HA_STATUS_AUTO) != 0) { + /* auto_inc is only supported in first key for InnoDB! */ + ut_ad(table_share->next_number_keypart == 0); + DBUG_PRINT("info", ("HA_STATUS_AUTO")); + if (table->found_next_number_field == NULL) { + stats.auto_increment_value = 0; + } else { + /* Lock to avoid two concurrent initializations. */ + lock_auto_increment(); + if (m_part_share->auto_inc_initialized) { + stats.auto_increment_value = + m_part_share->next_auto_inc_val; + } else { + /* The auto-inc mutex in the table_share is + locked, so we do not need to have the handlers + locked. */ + + error = initialize_auto_increment( + (flag & HA_STATUS_NO_LOCK) != 0); + stats.auto_increment_value = + m_part_share->next_auto_inc_val; + } + unlock_auto_increment(); + } + } + +func_exit: + m_prebuilt->trx->op_info = (char*)""; + + DBUG_RETURN(error); +} + +/** Optimize table. +This is mapped to "ALTER TABLE tablename ENGINE=InnoDB", which rebuilds +the table in MySQL. +@param[in] thd Connection thread handle. +@param[in] check_opt Currently ignored. +@return 0 for success else error code. */ +int +ha_innopart::optimize( + THD* thd, + HA_CHECK_OPT* check_opt) +{ + return(HA_ADMIN_TRY_ALTER); +} + +/** Checks a partitioned table. +Tries to check that an InnoDB table is not corrupted. If corruption is +noticed, prints to stderr information about it. In case of corruption +may also assert a failure and crash the server. Also checks for records +in wrong partition. +@param[in] thd MySQL THD object/thread handle. +@param[in] check_opt Check options. +@return HA_ADMIN_CORRUPT or HA_ADMIN_OK. */ +int +ha_innopart::check( + THD* thd, + HA_CHECK_OPT* check_opt) +{ + uint error = HA_ADMIN_OK; + uint i; + + DBUG_ENTER("ha_innopart::check"); + /* TODO: Enhance this to: + - Every partition has the same structure. + - The names are correct (partition names checked in ::open()?) + Currently it only does normal InnoDB check of each partition. */ + + if (set_altered_partitions()) { + ut_ad(0); // Already checked by set_part_state()! + DBUG_RETURN(HA_ADMIN_INVALID); + } + for (i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + m_prebuilt->table = m_part_share->get_table_part(i); + error = ha_innobase::check(thd, check_opt); + if (error != 0) { + break; + } + if ((check_opt->flags & (T_MEDIUM | T_EXTEND)) != 0) { + error = Partition_helper::check_misplaced_rows(i, false); + if (error != 0) { + break; + } + } + } + if (error != 0) { + print_admin_msg( + thd, + 256, + "error", + table_share->db.str, + table->alias, + "check", + m_is_sub_partitioned ? + "Subpartition %s returned error" + : "Partition %s returned error", + m_part_share->get_partition_name(i)); + } + + DBUG_RETURN(error); +} + +/** Repair a partitioned table. +Only repairs records in wrong partitions (moves them to the correct +partition or deletes them if not in any partition). +@param[in] thd MySQL THD object/thread handle. +@param[in] repair_opt Repair options. +@return 0 or error code. */ +int +ha_innopart::repair( + THD* thd, + HA_CHECK_OPT* repair_opt) +{ + uint error = HA_ADMIN_OK; + + DBUG_ENTER("ha_innopart::repair"); + + /* TODO: enable this warning to be clear about what is repaired. + Currently disabled to generate smaller test diffs. */ +#ifdef ADD_WARNING_FOR_REPAIR_ONLY_PARTITION + push_warning_printf(thd, Sql_condition::SL_WARNING, + ER_ILLEGAL_HA, + "Only moving rows from wrong partition to correct" + " partition is supported," + " repairing InnoDB indexes is not yet supported!"); +#endif + + /* Only repair partitions for MEDIUM or EXTENDED options. */ + if ((repair_opt->flags & (T_MEDIUM | T_EXTEND)) == 0) { + DBUG_RETURN(HA_ADMIN_OK); + } + if (set_altered_partitions()) { + ut_ad(0); // Already checked by set_part_state()! + DBUG_RETURN(HA_ADMIN_INVALID); + } + for (uint i = m_part_info->get_first_used_partition(); + i < m_tot_parts; + i = m_part_info->get_next_used_partition(i)) { + + /* TODO: Implement and use ha_innobase::repair()! */ + error = Partition_helper::check_misplaced_rows(i, true); + if (error != 0) { + print_admin_msg( + thd, + 256, + "error", + table_share->db.str, + table->alias, + "repair", + m_is_sub_partitioned ? + "Subpartition %s returned error" + : "Partition %s returned error", + m_part_share->get_partition_name(i)); + break; + } + } + + DBUG_RETURN(error); +} + +/** Check if possible to switch engine (no foreign keys). +Checks if ALTER TABLE may change the storage engine of the table. +Changing storage engines is not allowed for tables for which there +are foreign key constraints (parent or child tables). +@return true if can switch engines. */ +bool +ha_innopart::can_switch_engines() +{ + bool can_switch; + + DBUG_ENTER("ha_innopart::can_switch_engines"); + can_switch = ha_innobase::can_switch_engines(); + ut_ad(can_switch); + + DBUG_RETURN(can_switch); +} + +/** Checks if a table is referenced by a foreign key. +The MySQL manual states that a REPLACE is either equivalent to an INSERT, +or DELETE(s) + INSERT. Only a delete is then allowed internally to resolve +a duplicate key conflict in REPLACE, not an update. +@return > 0 if referenced by a FOREIGN KEY. */ +uint +ha_innopart::referenced_by_foreign_key() +{ + if (dict_table_is_referenced_by_foreign_key(m_prebuilt->table)) { + +#ifndef HA_INNOPART_SUPPORTS_FOREIGN_KEYS + ut_ad(0); +#endif /* HA_INNOPART_SUPPORTS_FOREIGN_KEYS */ + return(1); + } + + return(0); +} + +/** Start statement. +MySQL calls this function at the start of each SQL statement inside LOCK +TABLES. Inside LOCK TABLES the ::external_lock method does not work to +mark SQL statement borders. Note also a special case: if a temporary table +is created inside LOCK TABLES, MySQL has not called external_lock() at all +on that table. +MySQL-5.0 also calls this before each statement in an execution of a stored +procedure. To make the execution more deterministic for binlogging, MySQL-5.0 +locks all tables involved in a stored procedure with full explicit table +locks (thd_in_lock_tables(thd) holds in store_lock()) before executing the +procedure. +@param[in] thd Handle to the user thread. +@param[in] lock_type Lock type. +@return 0 or error code. */ +int +ha_innopart::start_stmt( + THD* thd, + thr_lock_type lock_type) +{ + int error = 0; + + if (m_part_info->get_first_used_partition() == MY_BIT_NONE) { + /* All partitions pruned away, do nothing! */ + return(error); + } + + error = ha_innobase::start_stmt(thd, lock_type); + if (m_prebuilt->sql_stat_start) { + memset(m_sql_stat_start_parts, 0xff, + UT_BITS_IN_BYTES(m_tot_parts)); + } else { + memset(m_sql_stat_start_parts, 0, + UT_BITS_IN_BYTES(m_tot_parts)); + } + return(error); +} + +/** Lock/prepare to lock table. +As MySQL will execute an external lock for every new table it uses when it +starts to process an SQL statement (an exception is when MySQL calls +start_stmt for the handle) we can use this function to store the pointer to +the THD in the handle. We will also use this function to communicate +to InnoDB that a new SQL statement has started and that we must store a +savepoint to our transaction handle, so that we are able to roll back +the SQL statement in case of an error. +@param[in] thd Handle to the user thread. +@param[in] lock_type Lock type. +@return 0 or error number. */ +int +ha_innopart::external_lock( + THD* thd, + int lock_type) +{ + int error = 0; + bool is_quiesce_set = false; + bool is_quiesce_start = false; + + if (m_part_info->get_first_used_partition() == MY_BIT_NONE + && !(m_mysql_has_locked + && lock_type == F_UNLCK)) { + + /* All partitions pruned away, do nothing! */ + ut_ad(!m_mysql_has_locked); + return(error); + } + ut_ad(m_mysql_has_locked || lock_type != F_UNLCK); + + m_prebuilt->table = m_part_share->get_table_part(0); + switch (m_prebuilt->table->quiesce) { + case QUIESCE_START: + /* Check for FLUSH TABLE t WITH READ LOCK; */ + if (!srv_read_only_mode + && thd_sql_command(thd) == SQLCOM_FLUSH + && lock_type == F_RDLCK) { + + is_quiesce_set = true; + is_quiesce_start = true; + } + break; + + case QUIESCE_COMPLETE: + /* Check for UNLOCK TABLES; implicit or explicit + or trx interruption. */ + if (m_prebuilt->trx->flush_tables > 0 + && (lock_type == F_UNLCK + || trx_is_interrupted(m_prebuilt->trx))) { + + is_quiesce_set = true; + } + + break; + + case QUIESCE_NONE: + break; + default: + ut_ad(0); + } + + error = ha_innobase::external_lock(thd, lock_type); + + /* FLUSH FOR EXPORT is done above only for the first partition, + so complete it for all the other partitions. */ + if (is_quiesce_set) { + for (uint i = 1; i < m_tot_parts; i++) { + dict_table_t* table = m_part_share->get_table_part(i); + if (is_quiesce_start) { + table->quiesce = QUIESCE_START; + row_quiesce_table_start(table, m_prebuilt->trx); + + /* Use the transaction instance to track UNLOCK + TABLES. It can be done via START TRANSACTION; + too implicitly. */ + + ++m_prebuilt->trx->flush_tables; + } else { + ut_ad(table->quiesce == QUIESCE_COMPLETE); + row_quiesce_table_complete(table, + m_prebuilt->trx); + + ut_a(m_prebuilt->trx->flush_tables > 0); + --m_prebuilt->trx->flush_tables; + } + } + m_prebuilt->table = m_part_share->get_table_part(0); + } + ut_ad(!m_auto_increment_lock); + ut_ad(!m_auto_increment_safe_stmt_log_lock); + + if (m_prebuilt->sql_stat_start) { + memset(m_sql_stat_start_parts, 0xff, + UT_BITS_IN_BYTES(m_tot_parts)); + } else { + memset(m_sql_stat_start_parts, 0, + UT_BITS_IN_BYTES(m_tot_parts)); + } + return(error); +} + +/** Get the current auto_increment value. +@param[in] offset Table auto-inc offset. +@param[in] increment Table auto-inc increment. +@param[in] nb_desired_values Number of required values. +@param[out] first_value The auto increment value. +@param[out] nb_reserved_values Number of reserved values. +@return Auto increment value, or ~0 on failure. */ +void +ha_innopart::get_auto_increment( + ulonglong offset, + ulonglong increment, + ulonglong nb_desired_values, + ulonglong* first_value, + ulonglong* nb_reserved_values) +{ + DBUG_ENTER("ha_innopart::get_auto_increment"); + if (table_share->next_number_keypart != 0) { + /* Only first key part allowed as autoinc for InnoDB tables! */ + ut_ad(0); + *first_value = ULLONG_MAX; + DBUG_VOID_RETURN; + } + get_auto_increment_first_field( + increment, + nb_desired_values, + first_value, + nb_reserved_values); + DBUG_VOID_RETURN; +} + +/** Compares two 'refs'. +A 'ref' is the (internal) primary key value of the row. +If there is no explicitly declared non-null unique key or a primary key, then +InnoDB internally uses the row id as the primary key. +It will use the partition id as secondary compare. +@param[in] ref1 An (internal) primary key value in the MySQL key value +format. +@param[in] ref2 Reference to compare with (same type as ref1). +@return < 0 if ref1 < ref2, 0 if equal, else > 0. */ +int +ha_innopart::cmp_ref( + const uchar* ref1, + const uchar* ref2) +{ + int cmp; + + cmp = ha_innobase::cmp_ref(ref1 + PARTITION_BYTES_IN_POS, + ref2 + PARTITION_BYTES_IN_POS); + + if (cmp != 0) { + return(cmp); + } + + cmp = static_cast(uint2korr(ref1)) + - static_cast(uint2korr(ref2)); + + return(cmp); +} + +/** Prepare for creating new partitions during ALTER TABLE ... PARTITION. +@param[in] num_partitions Number of new partitions to be created. +@param[in] only_create True if only creating the partition +(no open/lock is needed). +@return 0 for success else error code. */ +int +ha_innopart::prepare_for_new_partitions( + uint num_partitions, + bool only_create) +{ + m_new_partitions = UT_NEW(Altered_partitions(num_partitions, + only_create), + mem_key_partitioning); + if (m_new_partitions == NULL) { + return(HA_ERR_OUT_OF_MEM); + } + if (m_new_partitions->initialize()) { + UT_DELETE(m_new_partitions); + m_new_partitions = NULL; + return(HA_ERR_OUT_OF_MEM); + } + return(0); +} + +/** Create a new partition to be filled during ALTER TABLE ... PARTITION. +@param[in] table Table to create the partition in. +@param[in] create_info Table/partition specific create info. +@param[in] part_name Partition name. +@param[in] new_part_id Partition id in new table. +@param[in] part_elem Partition element. +@return 0 for success else error code. */ +int +ha_innopart::create_new_partition( + TABLE* table, + HA_CREATE_INFO* create_info, + const char* part_name, + uint new_part_id, + partition_element* part_elem) +{ + int error; + char norm_name[FN_REFLEN]; + const char* tablespace_name_backup = create_info->tablespace; + const char* data_file_name_backup = create_info->data_file_name; + DBUG_ENTER("ha_innopart::create_new_partition"); + /* Delete by ddl_log on failure. */ + normalize_table_name(norm_name, part_name); + set_create_info_dir(part_elem, create_info); + + /* The below check is the same as for CREATE TABLE, but since we are + doing an alter here it will not trigger the check in + create_option_tablespace_is_valid(). */ + if (tablespace_is_shared_space(create_info) + && create_info->data_file_name != NULL + && create_info->data_file_name[0] != '\0') { + my_printf_error(ER_ILLEGAL_HA_CREATE_OPTION, + "InnoDB: DATA DIRECTORY cannot be used" + " with a TABLESPACE assignment.", MYF(0)); + DBUG_RETURN(HA_WRONG_CREATE_OPTION); + } + + error = ha_innobase::create(norm_name, table, create_info); + create_info->tablespace = tablespace_name_backup; + create_info->data_file_name = data_file_name_backup; + if (error == HA_ERR_FOUND_DUPP_KEY) { + DBUG_RETURN(HA_ERR_TABLE_EXIST); + } + if (error != 0) { + DBUG_RETURN(error); + } + if (!m_new_partitions->only_create()) + { + dict_table_t* part; + part = dict_table_open_on_name(norm_name, + false, + true, + DICT_ERR_IGNORE_NONE); + if (part == NULL) { + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } + m_new_partitions->set_part(new_part_id, part); + } + DBUG_RETURN(0); +} + +/** Close and finalize new partitions. */ +void +ha_innopart::close_new_partitions() +{ + if (m_new_partitions != NULL) { + UT_DELETE(m_new_partitions); + m_new_partitions = NULL; + } +} + +/** write row to new partition. +@param[in] new_part New partition to write to. +@return 0 for success else error code. */ +int +ha_innopart::write_row_in_new_part( + uint new_part) +{ + int result; + DBUG_ENTER("ha_innopart::write_row_in_new_part"); + + m_last_part = new_part; + if (m_new_partitions->part(new_part) == NULL) { + /* Altered partition contains misplaced row. */ + m_err_rec = table->record[0]; + DBUG_RETURN(HA_ERR_ROW_IN_WRONG_PARTITION); + } + m_new_partitions->get_prebuilt(m_prebuilt, new_part); + result = ha_innobase::write_row(table->record[0]); + m_new_partitions->set_from_prebuilt(m_prebuilt, new_part); + DBUG_RETURN(result); +} + +/** Allocate the array to hold blob heaps for all partitions */ +mem_heap_t** +ha_innopart::alloc_blob_heap_array() +{ + DBUG_ENTER("ha_innopart::alloc_blob_heap_array"); + + const ulint len = sizeof(mem_heap_t*) * m_tot_parts; + m_blob_heap_parts = static_cast( + ut_zalloc(len, mem_key_partitioning)); + if (m_blob_heap_parts == NULL) { + DBUG_RETURN(NULL); + } + + DBUG_RETURN(m_blob_heap_parts); +} + +/** Free the array that holds blob heaps for all partitions */ +void +ha_innopart::free_blob_heap_array() +{ + DBUG_ENTER("ha_innopart::free_blob_heap_array"); + + if (m_blob_heap_parts != NULL) { + clear_blob_heaps(); + ut_free(m_blob_heap_parts); + m_blob_heap_parts = NULL; + } + + DBUG_VOID_RETURN; +} + +void +ha_innopart::clear_blob_heaps() +{ + DBUG_ENTER("ha_innopart::clear_blob_heaps"); + + if (m_blob_heap_parts == NULL) { + DBUG_VOID_RETURN; + } + + for (uint i = 0; i < m_tot_parts; i++) { + if (m_blob_heap_parts[i] != NULL) { + DBUG_PRINT("ha_innopart", ("freeing blob_heap: %p", + m_blob_heap_parts[i])); + mem_heap_free(m_blob_heap_parts[i]); + m_blob_heap_parts[i] = NULL; + } + } + + /* Reset blob_heap in m_prebuilt after freeing all heaps. It is set in + ha_innopart::set_partition to the blob heap of current partition. */ + m_prebuilt->blob_heap = NULL; + + DBUG_VOID_RETURN; +} + +/** Reset state of file to after 'open'. This function is called +after every statement for all tables used by that statement. */ +int +ha_innopart::reset() +{ + DBUG_ENTER("ha_innopart::reset"); + + clear_blob_heaps(); + + DBUG_RETURN(ha_innobase::reset()); +} + +/**************************************************************************** + * DS-MRR implementation + ***************************************************************************/ + +/* TODO: move the default implementations into the base handler class! */ +/* TODO: See if it could be optimized for partitioned tables? */ +/* Use default ha_innobase implementation for now... */ diff --git a/storage/innobase/handler/ha_innopart.h b/storage/innobase/handler/ha_innopart.h new file mode 100644 index 00000000000..0a6bfe0b2f1 --- /dev/null +++ b/storage/innobase/handler/ha_innopart.h @@ -0,0 +1,1330 @@ +/***************************************************************************** + +Copyright (c) 2014, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/* The InnoDB Partition handler: the interface between MySQL and InnoDB. */ + +#ifndef ha_innopart_h +#define ha_innopart_h + +/* JAN: TODO: MySQL 5.7 */ +//#include "partitioning/partition_handler.h" +#include "ha_partition.h" + +/* Forward declarations */ +class Altered_partitions; +class partition_info; + +/** HA_DUPLICATE_POS and HA_READ_BEFORE_WRITE_REMOVAL is not +set from ha_innobase, but cannot yet be supported in ha_innopart. +Full text and geometry is not yet supported. */ +const handler::Table_flags HA_INNOPART_DISABLED_TABLE_FLAGS = + ( HA_CAN_FULLTEXT + | HA_CAN_FULLTEXT_EXT + | HA_CAN_GEOMETRY + | HA_DUPLICATE_POS + | HA_READ_BEFORE_WRITE_REMOVAL); + +/** InnoDB partition specific Handler_share. */ +class Ha_innopart_share : public Partition_share +{ +private: + /** Array of all included table definitions (one per partition). */ + dict_table_t** m_table_parts; + + /** Instead of INNOBASE_SHARE::idx_trans_tbl. Maps MySQL index number + to InnoDB index per partition. */ + dict_index_t** m_index_mapping; + + /** Total number of partitions. */ + uint m_tot_parts; + + /** Number of indexes. */ + uint m_index_count; + + /** Reference count. */ + uint m_ref_count; + + /** Pointer back to owning TABLE_SHARE. */ + TABLE_SHARE* m_table_share; + + /** Virtual column template */ + innodb_col_templ_t* m_s_templ; +public: + Ha_innopart_share( + TABLE_SHARE* table_share); + + ~Ha_innopart_share(); + + /** Set innodb table for given partition. + @param[in] part_id Partition number. + @param[in] table Table. */ + inline + void + set_table_part( + uint part_id, + dict_table_t* table) + { + ut_ad(m_table_parts != NULL); + ut_ad(part_id < m_tot_parts); + m_table_parts[part_id] = table; + } + + /** Return innodb table for given partition. + @param[in] part_id Partition number. + @return InnoDB table. */ + inline + dict_table_t* + get_table_part( + uint part_id) const + { + ut_ad(m_table_parts != NULL); + ut_ad(part_id < m_tot_parts); + return(m_table_parts[part_id]); + } + + /** Return innodb index for given partition and key number. + @param[in] part_id Partition number. + @param[in] keynr Key number. + @return InnoDB index. */ + dict_index_t* + get_index( + uint part_id, + uint keynr); + + /** Get MySQL key number corresponding to InnoDB index. + @param[in] part_id Partition number. + @param[in] index InnoDB index. + @return MySQL key number or MAX_KEY if non-existent. */ + uint + get_mysql_key( + uint part_id, + const dict_index_t* index); + + /** Initialize the share with table and indexes per partition. + @param[in] part_info Partition info (partition names to use) + @param[in] table_name Table name (db/table_name) + @return false on success else true. */ + bool + open_table_parts( + partition_info* part_info, + const char* table_name); + + /** Close the table partitions. + If all instances are closed, also release the resources. */ + void + close_table_parts(); + + /* Static helper functions. */ + /** Fold to lower case if windows or lower_case_table_names == 1. + @param[in,out] s String to fold.*/ + static + void + partition_name_casedn_str( + char* s); + + /** Translate and append partition name. + @param[out] to String to write in filesystem charset + @param[in] from Name in system charset + @param[in] sep Separator + @param[in] len Max length of to buffer + @return length of written string. */ + static + size_t + append_sep_and_name( + char* to, + const char* from, + const char* sep, + size_t len); + + /** Set up the virtual column template for partition table, and points + all m_table_parts[]->vc_templ to it. + @param[in] table MySQL TABLE object + @param[in] ib_table InnoDB dict_table_t + @param[in] table_name Table name (db/table_name) */ + void + set_v_templ( + TABLE* table, + dict_table_t* ib_table, + const char* name); + +private: + /** Disable default constructor. */ + Ha_innopart_share() {}; + + /** Open one partition (lower lever innodb table). + @param[in] part_id Partition to open. + @param[in] partition_name Name of partition. + @return false on success else true. */ + bool + open_one_table_part( + uint part_id, + const char* partition_name); +}; + +/** The class defining a partitioning aware handle to an InnoDB table. +Based on ha_innobase and extended with +- Partition_helper for re-using common partitioning functionality +- Partition_handler for providing partitioning specific api calls. +Generic partitioning functions are implemented in Partition_helper. +Lower level storage functions are implemented in ha_innobase. +Partition_handler is inherited for implementing the handler level interface +for partitioning specific functions, like change_partitions and +truncate_partition. +InnoDB specific functions related to partitioning is implemented here. */ +class ha_innopart: + public ha_innobase, +// public Partition_helper, + public Partition_handler +{ +public: + ha_innopart( + handlerton* hton, + TABLE_SHARE* table_arg); + + ~ha_innopart(); + + /** Clone this handler, used when needing more than one cursor + to the same table. + @param[in] name Table name. + @param[in] mem_root mem_root to allocate from. + @retval Pointer to clone or NULL if error. */ + handler* + clone( + const char* name, + MEM_ROOT* mem_root); + + /** Check and register a table in the query cache. + Ask InnoDB if a query to a table can be cached. + @param[in] thd User thread handle. + @param[in] table_key Normalized path to the table. + @param[in] key_length Lenght of table_key. + @param[out] call_back Function pointer for checking if data + has changed. + @param[in,out] engine_data Data for call_back (not used). + @return TRUE if query caching of the table is permitted. */ + my_bool + register_query_cache_table( + THD* thd, + char* table_key, + size_t key_length, + qc_engine_callback* call_back, + ulonglong* engine_data) + { + /* Currently this would need to go through every + [sub] partition in the table to see if any of them has changed. + See row_search_check_if_query_cache_permitted(). + So disabled until we can avoid check all partitions. */ + return(FALSE); + } + + /** On-line ALTER TABLE interface @see handler0alter.cc @{ */ + + /** Check if InnoDB supports a particular alter table in-place. + @param[in] altered_table TABLE object for new version of table. + @param[in,out] ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used during in-place alter. + @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported + @retval HA_ALTER_INPLACE_NO_LOCK Supported + @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE Supported, but + requires lock during main phase and exclusive lock during prepare + phase. + @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE Supported, prepare + phase requires exclusive lock. */ + enum_alter_inplace_result + check_if_supported_inplace_alter( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info); + + /** Prepare in-place ALTER for table. + Allows InnoDB to update internal structures with concurrent + writes blocked (provided that check_if_supported_inplace_alter() + did not return HA_ALTER_INPLACE_NO_LOCK). + This will be invoked before inplace_alter_table(). + @param[in] altered_table TABLE object for new version of table. + @param[in,out] ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used during in-place alter. + @retval true Failure. + @retval false Success. */ + bool + prepare_inplace_alter_table( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info); + + /** Alter the table structure in-place. + Alter the table structure in-place with operations + specified using HA_ALTER_FLAGS and Alter_inplace_information. + The level of concurrency allowed during this operation depends + on the return value from check_if_supported_inplace_alter(). + @param[in] altered_table TABLE object for new version of table. + @param[in,out] ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used during in-place alter. + @retval true Failure. + @retval false Success. */ + bool + inplace_alter_table( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info); + + /** Commit or rollback. + Commit or rollback the changes made during + prepare_inplace_alter_table() and inplace_alter_table() inside + the storage engine. Note that the allowed level of concurrency + during this operation will be the same as for + inplace_alter_table() and thus might be higher than during + prepare_inplace_alter_table(). (E.g concurrent writes were + blocked during prepare, but might not be during commit). + @param[in] altered_table TABLE object for new version of table. + @param[in] ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used during in-place alter. + @param[in,out] commit true => Commit, false => Rollback. + @retval true Failure. + @retval false Success. */ + bool + commit_inplace_alter_table( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info, + bool commit); + + /** Notify the storage engine that the table structure (.frm) has + been updated. + + ha_partition allows inplace operations that also upgrades the engine + if it supports partitioning natively. So if this is the case then + we will remove the .par file since it is not used with ha_innopart + (we use the internal data dictionary instead). */ + void + notify_table_changed(); + /** @} */ + + // TODO: should we implement init_table_handle_for_HANDLER() ? + // (or is sql_stat_start handled correctly anyway?) + int + optimize( + THD* thd, + HA_CHECK_OPT* check_opt); + + int + discard_or_import_tablespace( + my_bool discard); + + /** Compare key and rowid. + Helper function for sorting records in the priority queue. + a/b points to table->record[0] rows which must have the + key fields set. The bytes before a and b store the rowid. + This is used for comparing/sorting rows first according to + KEY and if same KEY, by rowid (ref). + + @param[in] key_info Null terminated array of index + information. + @param[in] a Pointer to record+ref in first record. + @param[in] b Pointer to record+ref in second record. + @return Return value is SIGN(first_rec - second_rec) + @retval 0 Keys are equal. + @retval -1 second_rec is greater than first_rec. + @retval +1 first_rec is greater than second_rec. */ + static + int + key_and_rowid_cmp( + KEY** key_info, + uchar *a, + uchar *b); + + int + extra( + enum ha_extra_function operation); + + void + print_error( + int error, + myf errflag); + + bool + is_ignorable_error( + int error); + + int + start_stmt( + THD* thd, + thr_lock_type lock_type); + + ha_rows + records_in_range( + uint inx, + key_range* min_key, + key_range* max_key); + + ha_rows + estimate_rows_upper_bound(); + + uint + alter_table_flags( + uint flags); + + void + update_create_info( + HA_CREATE_INFO* create_info); + + int + create( + const char* name, + TABLE* form, + HA_CREATE_INFO* create_info); + + int + truncate(); + + int + check( + THD* thd, + HA_CHECK_OPT* check_opt); + + /** Repair table. + Will only handle records in wrong partition, not repairing + corrupt innodb indexes. + @param[in] thd Thread context. + @param[in] repair_opt Repair options. + @return 0 or error code. */ + int + repair( + THD* thd, + HA_CHECK_OPT* repair_opt); + + bool + can_switch_engines(); + + uint + referenced_by_foreign_key(); + + void + get_auto_increment( + ulonglong offset, + ulonglong increment, + ulonglong nb_desired_values, + ulonglong* first_value, + ulonglong* nb_reserved_values); + + int + cmp_ref( + const uchar* ref1, + const uchar* ref2); + + int + read_range_first( + const key_range* start_key, + const key_range* end_key, + bool eq_range_arg, + bool sorted) + { + return(Partition_helper::ph_read_range_first( + start_key, + end_key, + eq_range_arg, + sorted)); + } + + void + position( + const uchar* record) + { + Partition_helper::ph_position(record); + } + + int + rnd_pos_by_record( + uchar* record) + { + return(Partition_helper::ph_rnd_pos_by_record(record)); + } + + /* TODO: Implement these! */ + bool + check_if_incompatible_data( + HA_CREATE_INFO* info, + uint table_changes) + { + ut_ad(0); + return(COMPATIBLE_DATA_NO); + } + + int + delete_all_rows() + { + return(handler::delete_all_rows()); + } + + int + disable_indexes( + uint mode) + { + return(HA_ERR_WRONG_COMMAND); + } + + int + enable_indexes( + uint mode) + { + return(HA_ERR_WRONG_COMMAND); + } + + void + free_foreign_key_create_info( + char* str) + { + ut_ad(0); + } + + int + ft_init() + { + ut_ad(0); + return(HA_ERR_WRONG_COMMAND); + } + + FT_INFO* + ft_init_ext( + uint flags, + uint inx, + String* key) + { + ut_ad(0); + return(NULL); + } + + FT_INFO* + ft_init_ext_with_hints( + uint inx, + String* key, + /* JAN: TODO: MySQL 5. / + Ft_hints* hints)*/ + void* hints) + { + ut_ad(0); + return(NULL); + } + + int + ft_read( + uchar* buf) + { + ut_ad(0); + return(HA_ERR_WRONG_COMMAND); + } + + bool + get_foreign_dup_key( + char* child_table_name, + uint child_table_name_len, + char* child_key_name, + uint child_key_name_len) + { + ut_ad(0); + return(false); + } + + // TODO: not yet supporting FK. + char* + get_foreign_key_create_info() + { + return(NULL); + } + + // TODO: not yet supporting FK. + int + get_foreign_key_list( + THD* thd, + List* f_key_list) + { + return(0); + } + + // TODO: not yet supporting FK. + int + get_parent_foreign_key_list( + THD* thd, + List* f_key_list) + { + return(0); + } + + // TODO: not yet supporting FK. + int + get_cascade_foreign_key_table_list( + THD* thd, + List* fk_table_list) + { + return(0); + } + + int + read_range_next() + { + return(Partition_helper::ph_read_range_next()); + } + + uint32 + calculate_key_hash_value( + Field** field_array) + { + return(Partition_helper::ph_calculate_key_hash_value(field_array)); + } + + Table_flags + table_flags() const + { + return(ha_innobase::table_flags() | HA_CAN_REPAIR); + } + + void + release_auto_increment() + { + Partition_helper::ph_release_auto_increment(); + } + + /** Implementing Partition_handler interface @see partition_handler.h + @{ */ + + /** See Partition_handler. */ + void + get_dynamic_partition_info( + ha_statistics* stat_info, + ha_checksum* check_sum, + uint part_id) + { + Partition_helper::get_dynamic_partition_info_low( + stat_info, + check_sum, + part_id); + } + + uint + alter_flags( + uint flags __attribute__((unused))) const + { + return(HA_PARTITION_FUNCTION_SUPPORTED + | HA_FAST_CHANGE_PARTITION); + } + + Partition_handler* + get_partition_handler() + { + return(static_cast(this)); + } + + void + set_part_info( + partition_info* part_info, + bool early) + { + Partition_helper::set_part_info_low(part_info, early); + } + + void + initialize_partitioning( + partition_info* part_info, + bool early) + { + Partition_helper::set_part_info_low(part_info, early); + } + + handler* + get_handler() + { + return(static_cast(this)); + } + /** @} */ + +private: + /** Pointer to Ha_innopart_share on the TABLE_SHARE. */ + Ha_innopart_share* m_part_share; + + /** ins_node per partition. Synchronized with prebuilt->ins_node + when changing partitions. */ + ins_node_t** m_ins_node_parts; + + /** upd_node per partition. Synchronized with prebuilt->upd_node + when changing partitions. */ + upd_node_t** m_upd_node_parts; + + /** blob_heap per partition. Synchronized with prebuilt->blob_heap + when changing partitions. */ + mem_heap_t** m_blob_heap_parts; + + /** trx_id from the partitions table->def_trx_id. Keep in sync + with prebuilt->trx_id when changing partitions. + prebuilt only reflects the current partition! */ + trx_id_t* m_trx_id_parts; + + /** row_read_type per partition. */ + ulint* m_row_read_type_parts; + + /** sql_stat_start per partition. */ + uchar* m_sql_stat_start_parts; + + /** persistent cursors per partition. */ + btr_pcur_t* m_pcur_parts; + + /** persistent cluster cursors per partition. */ + btr_pcur_t* m_clust_pcur_parts; + + /** map from part_id to offset in above two arrays. */ + uint16_t* m_pcur_map; + + /** Original m_prebuilt->pcur. */ + btr_pcur_t* m_pcur; + + /** Original m_prebuilt->clust_pcur. */ + btr_pcur_t* m_clust_pcur; + + /** New partitions during ADD/REORG/... PARTITION. */ + Altered_partitions* m_new_partitions; + + /** Clear used ins_nodes and upd_nodes. */ + void + clear_ins_upd_nodes(); + + /** Clear the blob heaps for all partitions */ + void + clear_blob_heaps(); + + /** Reset state of file to after 'open'. This function is called + after every statement for all tables used by that statement. */ + int + reset(); + + /** Allocate the array to hold blob heaps for all partitions */ + mem_heap_t** + alloc_blob_heap_array(); + + /** Free the array that holds blob heaps for all partitions */ + void + free_blob_heap_array(); + + /** Changes the active index of a handle. + @param[in] part_id Use this partition. + @param[in] keynr Use this index; MAX_KEY means always + clustered index, even if it was internally generated by InnoDB. + @return 0 or error code. */ + int + change_active_index( + uint part_id, + uint keynr); + + /** Move to next partition and set its index. + @return 0 for success else error number. */ + int + next_partition_index(); + + /** Internally called for initializing auto increment value. + Should never be called, but defined to catch such errors. + @return 0 on success else error code. */ + int + innobase_initialize_autoinc(); + + /** Get the index for the current partition + @param[in] keynr MySQL index number. + @return InnoDB index or NULL. */ + dict_index_t* + innobase_get_index( + uint keynr); + + /** Get the index for a handle. + Does not change active index. + @param[in] keynr use this index; MAX_KEY means always clustered + index, even if it was internally generated by InnoDB. + @param[in] part_id From this partition. + @return NULL or index instance. */ + dict_index_t* + innopart_get_index( + uint part_id, + uint keynr); + + /** Change active partition. + Copies needed info into m_prebuilt from the partition specific memory. + @param[in] part_id Partition to set as active. */ + void + set_partition( + uint part_id); + + /** Update active partition. + Copies needed info from m_prebuilt into the partition specific memory. + @param[in] part_id Partition to set as active. */ + void + update_partition( + uint part_id); + + /** Helpers needed by Partition_helper, @see partition_handler.h @{ */ + + /** Set the autoinc column max value. + This should only be called once from ha_innobase::open(). + Therefore there's no need for a covering lock. + @param[in] no_lock If locking should be skipped. Not used! + @return 0 on success else error code. */ + int + initialize_auto_increment( + bool /* no_lock */); + + /** Save currently highest auto increment value. + @param[in] nr Auto increment value to save. */ + void + save_auto_increment( + ulonglong nr); + + /** Setup the ordered record buffer and the priority queue. + @param[in] used_parts Number of used partitions in query. + @return false for success, else true. */ + int + init_record_priority_queue_for_parts( + uint used_parts); + + /** Destroy the ordered record buffer and the priority queue. */ + void + destroy_record_priority_queue_for_parts(); + + /** Prepare for creating new partitions during ALTER TABLE ... + PARTITION. + @param[in] num_partitions Number of new partitions to be created. + @param[in] only_create True if only creating the partition + (no open/lock is needed). + @return 0 for success else error code. */ + int + prepare_for_new_partitions( + uint num_partitions, + bool only_create); + + /** Create a new partition to be filled during ALTER TABLE ... + PARTITION. + @param[in] table Table to create the partition in. + @param[in] create_info Table/partition specific create info. + @param[in] part_name Partition name. + @param[in] new_part_id Partition id in new table. + @param[in] part_elem Partition element. + @return 0 for success else error code. */ + int + create_new_partition( + TABLE* table, + HA_CREATE_INFO* create_info, + const char* part_name, + uint new_part_id, + partition_element* part_elem); + + /** Close and finalize new partitions. */ + void + close_new_partitions(); + + /** write row to new partition. + @param[in] new_part New partition to write to. + @return 0 for success else error code. */ + int + write_row_in_new_part( + uint new_part); + + /** Write a row in specific partition. + Stores a row in an InnoDB database, to the table specified in this + handle. + @param[in] part_id Partition to write to. + @param[in] row A row in MySQL format. + @return error code. */ + int + write_row_in_part( + uint part_id, + uchar* row); + + /** Update a row in partition. + Updates a row given as a parameter to a new value. + @param[in] part_id Partition to update row in. + @param[in] old_row Old row in MySQL format. + @param[in] new_row New row in MySQL format. + @return error number or 0. */ + int + update_row_in_part( + uint part_id, + const uchar* old_row, + uchar* new_row); + + /** Deletes a row in partition. + @param[in] part_id Partition to delete from. + @param[in] row Row to delete in MySQL format. + @return error number or 0. */ + int + delete_row_in_part( + uint part_id, + const uchar* row); + + /** Return first record in index from a partition. + @param[in] part Partition to read from. + @param[out] record First record in index in the partition. + @return error number or 0. */ + int + index_first_in_part( + uint part, + uchar* record); + + /** Return last record in index from a partition. + @param[in] part Partition to read from. + @param[out] record Last record in index in the partition. + @return error number or 0. */ + int + index_last_in_part( + uint part, + uchar* record); + + /** Return previous record in index from a partition. + @param[in] part Partition to read from. + @param[out] record Last record in index in the partition. + @return error number or 0. */ + int + index_prev_in_part( + uint part, + uchar* record); + + /** Return next record in index from a partition. + @param[in] part Partition to read from. + @param[out] record Last record in index in the partition. + @return error number or 0. */ + int + index_next_in_part( + uint part, + uchar* record); + + /** Return next same record in index from a partition. + This routine is used to read the next record, but only if the key is + the same as supplied in the call. + @param[in] part Partition to read from. + @param[out] record Last record in index in the partition. + @param[in] key Key to match. + @param[in] length Length of key. + @return error number or 0. */ + int + index_next_same_in_part( + uint part, + uchar* record, + const uchar* key, + uint length); + + /** Start index scan and return first record from a partition. + This routine starts an index scan using a start key. The calling + function will check the end key on its own. + @param[in] part Partition to read from. + @param[out] record First matching record in index in the partition. + @param[in] key Key to match. + @param[in] keypart_map Which part of the key to use. + @param[in] find_flag Key condition/direction to use. + @return error number or 0. */ + int + index_read_map_in_part( + uint part, + uchar* record, + const uchar* key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); + + /** Return last matching record in index from a partition. + @param[in] part Partition to read from. + @param[out] record Last matching record in index in the partition. + @param[in] key Key to match. + @param[in] keypart_map Which part of the key to use. + @return error number or 0. */ + int + index_read_last_map_in_part( + uint part, + uchar* record, + const uchar* key, + key_part_map keypart_map); + + /** Start index scan and return first record from a partition. + This routine starts an index scan using a start and end key. + @param[in] part Partition to read from. + @param[out] record First matching record in index in the partition. + if NULL use table->record[0] as return buffer. + @param[in] start_key Start key to match. + @param[in] end_key End key to match. + @param[in] eq_range Is equal range, start_key == end_key. + @param[in] sorted Return rows in sorted order. + @return error number or 0. */ + int + read_range_first_in_part( + uint part, + uchar* record, + const key_range* start_key, + const key_range* end_key, + bool eq_range, + bool sorted); + + /** Return next record in index range scan from a partition. + @param[in] part Partition to read from. + @param[out] record First matching record in index in the partition. + if NULL use table->record[0] as return buffer. + @return error number or 0. */ + int + read_range_next_in_part( + uint part, + uchar* record); + + /** Start index scan and return first record from a partition. + This routine starts an index scan using a start key. The calling + function will check the end key on its own. + @param[in] part Partition to read from. + @param[out] record First matching record in index in the partition. + @param[in] index Index to read from. + @param[in] key Key to match. + @param[in] keypart_map Which part of the key to use. + @param[in] find_flag Key condition/direction to use. + @return error number or 0. */ + int + index_read_idx_map_in_part( + uint part, + uchar* record, + uint index, + const uchar* key, + key_part_map keypart_map, + enum ha_rkey_function find_flag); + + /** Initialize random read/scan of a specific partition. + @param[in] part_id Partition to initialize. + @param[in] table_scan True for scan else random access. + @return error number or 0. */ + int + rnd_init_in_part( + uint part_id, + bool table_scan); + + /** Get next row during scan of a specific partition. + @param[in] part_id Partition to read from. + @param[out] record Next row. + @return error number or 0. */ + int + rnd_next_in_part( + uint part_id, + uchar* record); + + /** End random read/scan of a specific partition. + @param[in] part_id Partition to end random read/scan. + @param[in] table_scan True for scan else random access. + @return error number or 0. */ + int + rnd_end_in_part( + uint part_id, + bool table_scan); + + /** Get a reference to the current cursor position in the last used + partition. + @param[out] ref Reference (PK if exists else row_id). + @param[in] record Record to position. */ + void + position_in_last_part( + uchar* ref, + const uchar* record); + + /** Read record by given record (by its PK) from the last used partition. + see handler::rnd_pos_by_record(). + @param[in,out] record Record to position. + @return 0 or error number. */ + int + rnd_pos_by_record_in_last_part( + uchar* record) + { + /* Not much overhead to use default function. + This avoids out-of-sync code. */ + return(handler::rnd_pos_by_record(record)); + } + + /** Copy a cached MySQL record. + @param[out] to_record Where to copy the MySQL record. + @param[in] from_record Which record to copy. */ + void + copy_cached_row( + uchar* to_record, + const uchar* from_record); + /** @} */ + + /* Private handler:: functions specific for native InnoDB partitioning. + @see handler.h @{ */ + + int + open( + const char* name, + int mode, + uint test_if_locked); + + int + close(); + + double + scan_time(); + + /** Was the last returned row semi consistent read. + In an UPDATE or DELETE, if the row under the cursor was locked by + another transaction, and the engine used an optimistic read of the last + committed row value under the cursor, then the engine returns 1 from + this function. MySQL must NOT try to update this optimistic value. If + the optimistic value does not match the WHERE condition, MySQL can + decide to skip over this row. This can be used to avoid unnecessary + lock waits. + + If this method returns true, it will also signal the storage + engine that the next read will be a locking re-read of the row. + @see handler.h and row0mysql.h + @return true if last read was semi consistent else false. */ + bool was_semi_consistent_read(); + + /** Try semi consistent read. + Tell the engine whether it should avoid unnecessary lock waits. + If yes, in an UPDATE or DELETE, if the row under the cursor was locked + by another transaction, the engine may try an optimistic read of + the last committed row value under the cursor. + @see handler.h and row0mysql.h + @param[in] yes Should semi-consistent read be used. */ + void try_semi_consistent_read( + bool yes); + + /** Removes a lock on a row. + Removes a new lock set on a row, if it was not read optimistically. + This can be called after a row has been read in the processing of + an UPDATE or a DELETE query. @see ha_innobase::unlock_row(). */ + void unlock_row(); + + int + index_init( + uint index, + bool sorted); + + int + index_end(); + + int + rnd_init( + bool scan) + { + return(Partition_helper::ph_rnd_init(scan)); + } + + int + rnd_end() + { + return(Partition_helper::ph_rnd_end()); + } + + int + external_lock( + THD* thd, + int lock_type); + + int + write_row( + uchar* record) + { + return(Partition_helper::ph_write_row(record)); + } + + int + update_row( + const uchar* old_record, + uchar* new_record) + { + return(Partition_helper::ph_update_row(old_record, new_record)); + } + + int + delete_row( + const uchar* record) + { + return(Partition_helper::ph_delete_row(record)); + } + /** @} */ + + /** Truncate partition. + Called from Partition_handler::trunctate_partition(). */ + int + truncate_partition_low(); + + /** Change partitions according to ALTER TABLE ... PARTITION ... + Called from Partition_handler::change_partitions(). + @param[in] create_info Table create info. + @param[in] path Path including db/table_name. + @param[out] copied Number of copied rows. + @param[out] deleted Number of deleted rows. + @return 0 for success or error code. */ + int + change_partitions_low( + HA_CREATE_INFO* create_info, + const char* path, + ulonglong* const copied, + ulonglong* const deleted) + { + return(Partition_helper::change_partitions( + create_info, + path, + copied, + deleted)); + } + + /** Access methods to protected areas in handler to avoid adding + friend class Partition_helper in class handler. + @see partition_handler.h @{ */ + + THD* + get_thd() const + { + return ha_thd(); + } + + TABLE* + get_table() const + { + return table; + } + + bool + get_eq_range() const + { + return eq_range; + } + + void + set_eq_range(bool eq_range_arg) + { + eq_range= eq_range_arg; + } + + void + set_range_key_part(KEY_PART_INFO *key_part) + { + range_key_part= key_part; + } + /** @} */ + + /** Fill in data_dir_path and tablespace name from internal data + dictionary. + @param part_elem Partition element to fill. + @param ib_table InnoDB table to copy from. */ + void + update_part_elem( + partition_element* part_elem, + dict_table_t* ib_table); +protected: + /* Protected handler:: functions specific for native InnoDB partitioning. + @see handler.h @{ */ + + int + rnd_next( + uchar* record) + { + return(Partition_helper::ph_rnd_next(record)); + } + + int + rnd_pos( + uchar* record, + uchar* pos); + + int + records( + ha_rows* num_rows); + + int + index_next( + uchar* record) + { + return(Partition_helper::ph_index_next(record)); + } + + int + index_next_same( + uchar* record, + const uchar* key, + uint keylen) + { + return(Partition_helper::ph_index_next_same(record, key, keylen)); + } + + int + index_prev( + uchar* record) + { + return(Partition_helper::ph_index_prev(record)); + } + + int + index_first( + uchar* record) + { + return(Partition_helper::ph_index_first(record)); + } + + int + index_last( + uchar* record) + { + return(Partition_helper::ph_index_last(record)); + } + + int + index_read_last_map( + uchar* record, + const uchar* key, + key_part_map keypart_map) + { + return(Partition_helper::ph_index_read_last_map( + record, + key, + keypart_map)); + } + + int + index_read_map( + uchar* buf, + const uchar* key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) + { + return(Partition_helper::ph_index_read_map( + buf, + key, + keypart_map, + find_flag)); + } + + int + index_read_idx_map( + uchar* buf, + uint index, + const uchar* key, + key_part_map keypart_map, + enum ha_rkey_function find_flag) + { + return(Partition_helper::ph_index_read_idx_map( + buf, + index, + key, + keypart_map, + find_flag)); + } + /** @} */ + + /** Updates and return statistics. + Returns statistics information of the table to the MySQL interpreter, + in various fields of the handle object. + @param[in] flag Flags for what to update and return. + @param[in] is_analyze True if called from ::analyze(). + @return HA_ERR_* error code or 0. */ + int + info_low( + uint flag, + bool is_analyze); +}; +#endif /* ha_innopart_h */ diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index bb7efa104fa..5f421d485cf 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -22,40 +22,54 @@ this program; if not, write to the Free Software Foundation, Inc., Smart ALTER TABLE *******************************************************/ -#include -#include -#include -#include +/* Include necessary SQL headers */ +#include "ha_prototypes.h" #include -#include -#include +#include +#include #include #include +#include +/* Include necessary InnoDB headers */ +#include "btr0sea.h" #include "dict0crea.h" #include "dict0dict.h" #include "dict0priv.h" #include "dict0stats.h" #include "dict0stats_bg.h" +#include "fsp0sysspace.h" #include "log0log.h" #include "rem0types.h" #include "row0log.h" #include "row0merge.h" -#include "srv0srv.h" #include "trx0trx.h" #include "trx0roll.h" -#include "ha_prototypes.h" #include "handler0alter.h" #include "srv0mon.h" #include "fts0priv.h" +#include "fts0plugin.h" #include "pars0pars.h" #include "row0sel.h" #include "ha_innodb.h" +#include "ut0new.h" +#include "ut0stage.h" +#ifdef WITH_WSREP +//#include "wsrep_api.h" +#include // PROCESS_ACL +#endif +/* For supporting Native InnoDB Partitioning. */ +/* JAN: TODO: MySQL 5.7 +#include "partition_info.h" +#include "ha_innopart.h" +*/ /** Operations for creating secondary indexes (no rebuild needed) */ static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_CREATE = Alter_inplace_info::ADD_INDEX | Alter_inplace_info::ADD_UNIQUE_INDEX; + // JAN: TODO: MySQL 5.7 + // | Alter_inplace_info::ADD_SPATIAL_INDEX; /** Operations for rebuilding a table in place */ static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ALTER_REBUILD @@ -68,10 +82,14 @@ static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ALTER_REBUILD | Alter_inplace_info::ALTER_COLUMN_ORDER | Alter_inplace_info::DROP_COLUMN | Alter_inplace_info::ADD_COLUMN + /* JAN: TODO: MySQL 5.7 + | Alter_inplace_info::ALTER_STORED_COLUMN_ORDER + | Alter_inplace_info::DROP_STORED_COLUMN + | Alter_inplace_info::ADD_STORED_COLUMN + */ | Alter_inplace_info::RECREATE_TABLE /* - | Alter_inplace_info::ALTER_COLUMN_TYPE - | Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH + | Alter_inplace_info::ALTER_STORED_COLUMN_TYPE */ ; @@ -98,7 +116,165 @@ static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ALTER_NOREBUILD | INNOBASE_FOREIGN_OPERATIONS | Alter_inplace_info::DROP_INDEX | Alter_inplace_info::DROP_UNIQUE_INDEX - | Alter_inplace_info::ALTER_COLUMN_NAME; + /* JAN: TODO: MySQL 5.7 + | Alter_inplace_info::RENAME_INDEX + */ + | Alter_inplace_info::ALTER_COLUMN_NAME + | Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH; + /* JAN: TODO: MySQL 5.7 + | Alter_inplace_info::ALTER_INDEX_COMMENT + | Alter_inplace_info::ADD_VIRTUAL_COLUMN + | Alter_inplace_info::DROP_VIRTUAL_COLUMN + | Alter_inplace_info::ALTER_VIRTUAL_COLUMN_ORDER; + */ + /* | Alter_inplace_info::ALTER_VIRTUAL_COLUMN_TYPE; */ + +struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx +{ + /** Dummy query graph */ + que_thr_t* thr; + /** The prebuilt struct of the creating instance */ + row_prebuilt_t*& prebuilt; + /** InnoDB indexes being created */ + dict_index_t** add_index; + /** MySQL key numbers for the InnoDB indexes that are being created */ + const ulint* add_key_numbers; + /** number of InnoDB indexes being created */ + ulint num_to_add_index; + /** InnoDB indexes being dropped */ + dict_index_t** drop_index; + /** number of InnoDB indexes being dropped */ + const ulint num_to_drop_index; + /** InnoDB indexes being renamed */ + dict_index_t** rename; + /** number of InnoDB indexes being renamed */ + const ulint num_to_rename; + /** InnoDB foreign key constraints being dropped */ + dict_foreign_t** drop_fk; + /** number of InnoDB foreign key constraints being dropped */ + const ulint num_to_drop_fk; + /** InnoDB foreign key constraints being added */ + dict_foreign_t** add_fk; + /** number of InnoDB foreign key constraints being dropped */ + const ulint num_to_add_fk; + /** whether to create the indexes online */ + bool online; + /** memory heap */ + mem_heap_t* heap; + /** dictionary transaction */ + trx_t* trx; + /** original table (if rebuilt, differs from indexed_table) */ + dict_table_t* old_table; + /** table where the indexes are being created or dropped */ + dict_table_t* new_table; + /** mapping of old column numbers to new ones, or NULL */ + const ulint* col_map; + /** new column names, or NULL if nothing was renamed */ + const char** col_names; + /** added AUTO_INCREMENT column position, or ULINT_UNDEFINED */ + const ulint add_autoinc; + /** default values of ADD COLUMN, or NULL */ + const dtuple_t* add_cols; + /** autoinc sequence to use */ + ib_sequence_t sequence; + /** maximum auto-increment value */ + ulonglong max_autoinc; + /** temporary table name to use for old table when renaming tables */ + const char* tmp_name; + /** whether the order of the clustered index is unchanged */ + bool skip_pk_sort; + /** number of virtual columns to be added */ + ulint num_to_add_vcol; + /** virtual columns to be added */ + dict_v_col_t* add_vcol; + const char** add_vcol_name; + /** number of virtual columns to be dropped */ + ulint num_to_drop_vcol; + /** virtual columns to be dropped */ + dict_v_col_t* drop_vcol; + const char** drop_vcol_name; + /** ALTER TABLE stage progress recorder */ + ut_stage_alter_t* m_stage; + + ha_innobase_inplace_ctx(row_prebuilt_t*& prebuilt_arg, + dict_index_t** drop_arg, + ulint num_to_drop_arg, + dict_index_t** rename_arg, + ulint num_to_rename_arg, + dict_foreign_t** drop_fk_arg, + ulint num_to_drop_fk_arg, + dict_foreign_t** add_fk_arg, + ulint num_to_add_fk_arg, + bool online_arg, + mem_heap_t* heap_arg, + dict_table_t* new_table_arg, + const char** col_names_arg, + ulint add_autoinc_arg, + ulonglong autoinc_col_min_value_arg, + ulonglong autoinc_col_max_value_arg, + ulint num_to_drop_vcol_arg) : + inplace_alter_handler_ctx(), + prebuilt (prebuilt_arg), + add_index (0), add_key_numbers (0), num_to_add_index (0), + drop_index (drop_arg), num_to_drop_index (num_to_drop_arg), + rename (rename_arg), num_to_rename (num_to_rename_arg), + drop_fk (drop_fk_arg), num_to_drop_fk (num_to_drop_fk_arg), + add_fk (add_fk_arg), num_to_add_fk (num_to_add_fk_arg), + online (online_arg), heap (heap_arg), trx (0), + old_table (prebuilt_arg->table), + new_table (new_table_arg), + col_map (0), col_names (col_names_arg), + add_autoinc (add_autoinc_arg), + add_cols (0), + sequence(prebuilt->trx->mysql_thd, + autoinc_col_min_value_arg, autoinc_col_max_value_arg), + max_autoinc (0), + tmp_name (0), + skip_pk_sort(false), + num_to_add_vcol(0), + add_vcol(0), + add_vcol_name(0), + num_to_drop_vcol(0), + drop_vcol(0), + drop_vcol_name(0), + m_stage(NULL) + { +#ifdef UNIV_DEBUG + for (ulint i = 0; i < num_to_add_index; i++) { + ut_ad(!add_index[i]->to_be_dropped); + } + for (ulint i = 0; i < num_to_drop_index; i++) { + ut_ad(drop_index[i]->to_be_dropped); + } +#endif /* UNIV_DEBUG */ + + thr = pars_complete_graph_for_exec(NULL, prebuilt->trx, heap); + } + + ~ha_innobase_inplace_ctx() + { + UT_DELETE(m_stage); + mem_heap_free(heap); + } + + /** Determine if the table will be rebuilt. + @return whether the table will be rebuilt */ + bool need_rebuild () const { return(old_table != new_table); } + +private: + // Disable copying + ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&); + ha_innobase_inplace_ctx& operator=(const ha_innobase_inplace_ctx&); +}; + +/********************************************************************//** +Get the upper limit of the MySQL integral and floating-point type. +@return maximum allowed value for the field */ +UNIV_INTERN +ulonglong +innobase_get_int_col_max_value( +/*===========================*/ + const Field* field); /*!< in: MySQL field */ /* Report an InnoDB error to the client by invoking my_error(). */ static UNIV_COLD MY_ATTRIBUTE((nonnull)) @@ -132,11 +308,15 @@ my_error_innodb( my_error(ER_RECORD_FILE_FULL, MYF(0), table); ut_error; break; - case DB_TEMP_FILE_WRITE_FAILURE: + case DB_TEMP_FILE_WRITE_FAIL: + /* JAN: TODO: MySQL 5.7 my_error(ER_GET_ERRMSG, MYF(0), DB_TEMP_FILE_WRITE_FAILURE, ut_strerr(DB_TEMP_FILE_WRITE_FAILURE), "InnoDB"); + */ + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + break; case DB_TOO_BIG_INDEX_COL: my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0), @@ -155,8 +335,11 @@ my_error_innodb( my_error(ER_NOT_KEYFILE, MYF(0), table); break; case DB_TOO_BIG_RECORD: + /* We limit max record size to 16k for 64k page size. */ my_error(ER_TOO_BIG_ROWSIZE, MYF(0), - page_get_free_space_of_empty( + srv_page_size == UNIV_PAGE_SIZE_MAX + ? REC_MAX_DATA_SIZE - 1 + : page_get_free_space_of_empty( flags & DICT_TF_COMPACT) / 2); break; case DB_INVALID_NULL: @@ -166,6 +349,9 @@ my_error_innodb( case DB_TABLESPACE_EXISTS: my_error(ER_TABLESPACE_EXISTS, MYF(0), table); break; + case DB_CANT_CREATE_GEOMETRY_OBJECT: + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); + break; #ifdef UNIV_DEBUG case DB_SUCCESS: @@ -181,8 +367,8 @@ my_error_innodb( } /** Determine if fulltext indexes exist in a given table. -@param table MySQL table -@return whether fulltext indexes exist on the table */ +@param table MySQL table +@return whether fulltext indexes exist on the table */ static bool innobase_fulltext_exist( @@ -198,9 +384,27 @@ innobase_fulltext_exist( return(false); } +/** Determine if spatial indexes exist in a given table. +@param table MySQL table +@return whether spatial indexes exist on the table */ +static +bool +innobase_spatial_exist( +/*===================*/ + const TABLE* table) +{ + for (uint i = 0; i < table->s->keys; i++) { + if (table->key_info[i].flags & HA_SPATIAL) { + return(true); + } + } + + return(false); +} + /*******************************************************************//** Determine if ALTER TABLE needs to rebuild the table. -@param ha_alter_info the DDL operation +@param ha_alter_info the DDL operation @param altered_table MySQL original table @return whether it is necessary to rebuild the table */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) @@ -214,9 +418,12 @@ innobase_need_rebuild( == Alter_inplace_info::CHANGE_CREATE_OPTION && !(ha_alter_info->create_info->used_fields & (HA_CREATE_USED_ROW_FORMAT - | HA_CREATE_USED_KEY_BLOCK_SIZE))) { + | HA_CREATE_USED_KEY_BLOCK_SIZE))) { + // JAN: TODO: MySQL 5.7 + // | HA_CREATE_USED_TABLESPACE))) { /* Any other CHANGE_CREATE_OPTION than changing - ROW_FORMAT or KEY_BLOCK_SIZE is ignored. */ + ROW_FORMAT, KEY_BLOCK_SIZE or TABLESPACE can be done + without rebuilding the table. */ return(false); } @@ -247,25 +454,109 @@ innobase_need_rebuild( } } } - + return(!!(ha_alter_info->handler_flags & INNOBASE_ALTER_REBUILD)); } +/** Check if virtual column in old and new table are in order, excluding +those dropped column. This is needed because when we drop a virtual column, +ALTER_VIRTUAL_COLUMN_ORDER is also turned on, so we can't decide if this +is a real ORDER change or just DROP COLUMN +@param[in] table old TABLE +@param[in] altered_table new TABLE +@param[in] ha_alter_info Structure describing changes to be done +by ALTER TABLE and holding data used during in-place alter. +@return true is all columns in order, false otherwise. */ +static +bool +check_v_col_in_order( + const TABLE* table, + const TABLE* altered_table, + Alter_inplace_info* ha_alter_info) +{ + ulint j = 0; + + /* directly return true if ALTER_VIRTUAL_COLUMN_ORDER is not on */ + /* JAN: TODO: MySQL 5.7 + if (!(ha_alter_info->handler_flags + & Alter_inplace_info::ALTER_VIRTUAL_COLUMN_ORDER)) { + return(true); + } + */ + + for (ulint i = 0; i < table->s->fields; i++) { + Field* field = table->s->field[i]; + bool dropped = false; + Alter_drop* drop; + + if (field->stored_in_db()) { + continue; + } + + ut_ad(innobase_is_v_fld(field)); + + /* Check if this column is in drop list */ + List_iterator_fast cf_it( + ha_alter_info->alter_info->drop_list); + + while ((drop = (cf_it++)) != NULL) { + if (my_strcasecmp(system_charset_info, + field->field_name, drop->name) == 0) { + dropped = true; + break; + } + } + + if (dropped) { + continue; + } + + /* Now check if the next virtual column in altered table + matches this column */ + while (j < altered_table->s->fields) { + Field* new_field = altered_table->s->field[j]; + + if (new_field->stored_in_db()) { + j++; + continue; + } + + if (my_strcasecmp(system_charset_info, + field->field_name, + new_field->field_name) != 0) { + /* different column */ + return(false); + } else { + j++; + break; + } + } + + if (j > altered_table->s->fields) { + /* there should not be less column in new table + without them being in drop list */ + ut_ad(0); + return(false); + } + } + + return(true); +} /** Check if InnoDB supports a particular alter table in-place -@param altered_table TABLE object for new version of table. -@param ha_alter_info Structure describing changes to be done +@param altered_table TABLE object for new version of table. +@param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. -@retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported -@retval HA_ALTER_INPLACE_NO_LOCK Supported +@retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported +@retval HA_ALTER_INPLACE_NO_LOCK Supported @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE Supported, but requires lock during main phase and exclusive lock during prepare phase. -@retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE Supported, prepare phase +@retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE Supported, prepare phase requires exclusive lock (any transactions that have accessed the table must commit or roll back first, and no transactions can access the table while prepare_inplace_alter_table() is executing) */ -UNIV_INTERN + enum_alter_inplace_result ha_innobase::check_if_supported_inplace_alter( /*==========================================*/ @@ -274,14 +565,15 @@ ha_innobase::check_if_supported_inplace_alter( { DBUG_ENTER("check_if_supported_inplace_alter"); - if (high_level_read_only) { - ha_alter_info->unsupported_reason = + if (high_level_read_only + || srv_sys_space.created_new_raw() + || srv_force_recovery) { + ha_alter_info->unsupported_reason = (srv_force_recovery)? + innobase_get_err_msg(ER_READ_ONLY_MODE): + // JAN: TODO: MySQL 5.7 + // innobase_get_err_msg(ER_INNODB_FORCED_RECOVERY): innobase_get_err_msg(ER_READ_ONLY_MODE); - DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); - } else if (srv_created_new_raw || srv_force_recovery) { - ha_alter_info->unsupported_reason = - innobase_get_err_msg(ER_READ_ONLY_MODE); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } @@ -296,7 +588,7 @@ ha_innobase::check_if_supported_inplace_alter( } update_thd(); - trx_search_latch_release_if_reserved(prebuilt->trx); + trx_search_latch_release_if_reserved(m_prebuilt->trx); /* Change on engine specific table options require rebuild of the table */ @@ -326,24 +618,31 @@ ha_innobase::check_if_supported_inplace_alter( | INNOBASE_ALTER_NOREBUILD | INNOBASE_ALTER_REBUILD)) { + /* JAN: TODO: MySQL 5.7 if (ha_alter_info->handler_flags - & (Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH - | Alter_inplace_info::ALTER_COLUMN_TYPE)) + & Alter_inplace_info::ALTER_STORED_COLUMN_TYPE) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE); + } + */ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } /* Only support online add foreign key constraint when check_foreigns is turned off */ - if ((ha_alter_info->handler_flags - & Alter_inplace_info::ADD_FOREIGN_KEY) - && prebuilt->trx->check_foreigns) { + if ((ha_alter_info->handler_flags & Alter_inplace_info::ADD_FOREIGN_KEY) + && m_prebuilt->trx->check_foreigns) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } + if (altered_table->file->ht != ht) { + /* Non-native partitioning table engine. No longer supported, + due to implementation of native InnoDB partitioning. */ + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) { DBUG_RETURN(HA_ALTER_INPLACE_NO_LOCK); } @@ -354,7 +653,7 @@ ha_innobase::check_if_supported_inplace_alter( NULL to a NOT NULL value. */ if ((ha_alter_info->handler_flags & Alter_inplace_info::ALTER_COLUMN_NOT_NULLABLE) - && !thd_is_strict_mode(user_thd)) { + && !thd_is_strict_mode(m_user_thd)) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); @@ -386,14 +685,14 @@ ha_innobase::check_if_supported_inplace_alter( /* If a column change from NOT NULL to NULL, and there's a implict pk on this column. the table should be rebuild. The change should - only go through the "Copy" method.*/ + only go through the "Copy" method. */ if ((ha_alter_info->handler_flags & Alter_inplace_info::ALTER_COLUMN_NULLABLE)) { - uint primary_key = altered_table->s->primary_key; + const uint my_primary_key = altered_table->s->primary_key; - /* See if MYSQL table has no pk but we do.*/ - if (UNIV_UNLIKELY(primary_key >= MAX_KEY) - && !row_table_got_default_clust_index(prebuilt->table)) { + /* See if MYSQL table has no pk but we do. */ + if (UNIV_UNLIKELY(my_primary_key >= MAX_KEY) + && !row_table_got_default_clust_index(m_prebuilt->table)) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_PRIMARY_CANT_HAVE_NULL); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); @@ -413,10 +712,13 @@ ha_innobase::check_if_supported_inplace_alter( */ for (ulint i = 0, icol= 0; i < table->s->fields; i++) { const Field* field = table->field[i]; - const dict_col_t* col = dict_table_get_nth_col(prebuilt->table, icol); + const dict_col_t* col = dict_table_get_nth_col(m_prebuilt->table, icol); ulint unsigned_flag; - if (!field->stored_in_db()) + + if (!field->stored_in_db()) { continue; + } + icol++; if (col->mtype != get_innobase_type_from_mysql_type(&unsigned_flag, field)) { @@ -459,20 +761,74 @@ ha_innobase::check_if_supported_inplace_alter( } } - ulint n_indexes = UT_LIST_GET_LEN((prebuilt->table)->indexes); + ulint n_indexes = UT_LIST_GET_LEN((m_prebuilt->table)->indexes); /* If InnoDB dictionary and MySQL frm file are not consistent use "Copy" method. */ - if (prebuilt->table->dict_frm_mismatch) { + if (m_prebuilt->table->dict_frm_mismatch) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_NO_SUCH_INDEX); - ib_push_frm_error(user_thd, prebuilt->table, altered_table, + ib_push_frm_error(m_user_thd, m_prebuilt->table, altered_table, n_indexes, true); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } + /* If there is add or drop virtual columns, we will support operations + with these 2 options alone with inplace interface for now */ + /* JAN: TODO: MySQL 5.7 + if (ha_alter_info->handler_flags + & (Alter_inplace_info::ADD_VIRTUAL_COLUMN + | Alter_inplace_info::DROP_VIRTUAL_COLUMN + | Alter_inplace_info::ALTER_VIRTUAL_COLUMN_ORDER)) { + ulint flags = ha_alter_info->handler_flags; + */ + /* TODO: uncomment the flags below, once we start to + support them */ + /* + flags &= ~(Alter_inplace_info::ADD_VIRTUAL_COLUMN + | Alter_inplace_info::DROP_VIRTUAL_COLUMN + | Alter_inplace_info::ALTER_VIRTUAL_COLUMN_ORDER + */ + /* + | Alter_inplace_info::ALTER_STORED_COLUMN_ORDER + | Alter_inplace_info::ADD_STORED_COLUMN + | Alter_inplace_info::DROP_STORED_COLUMN + | Alter_inplace_info::ALTER_STORED_COLUMN_ORDER + | Alter_inplace_info::ADD_UNIQUE_INDEX + */ + /* + | Alter_inplace_info::ADD_INDEX + | Alter_inplace_info::DROP_INDEX); + + if (flags != 0 + || (altered_table->s->partition_info_str + && altered_table->s->partition_info_str_len) + || (!check_v_col_in_order( + this->table, altered_table, ha_alter_info))) { + ha_alter_info->unsupported_reason = + innobase_get_err_msg( + ER_UNSUPPORTED_ALTER_INPLACE_ON_VIRTUAL_COLUMN); + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + */ + /* Do not support inplace alter table drop virtual + columns and add index together yet */ + /* + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ADD_INDEX) + && (ha_alter_info->handler_flags + & Alter_inplace_info::DROP_VIRTUAL_COLUMN)) { + + ha_alter_info->unsupported_reason = + innobase_get_err_msg( + ER_UNSUPPORTED_ALTER_INPLACE_ON_VIRTUAL_COLUMN); + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + } + */ + /* We should be able to do the operation in-place. See if we can do it online (LOCK=NONE). */ bool online = true; @@ -485,6 +841,7 @@ ha_innobase::check_if_supported_inplace_alter( new_key < ha_alter_info->key_info_buffer + ha_alter_info->key_count; new_key++) { + for (KEY_PART_INFO* key_part = new_key->key_part; key_part < new_key->key_part + new_key->user_defined_key_parts; key_part++) { @@ -507,7 +864,7 @@ ha_innobase::check_if_supported_inplace_alter( /* In some special cases InnoDB emits "false" duplicate key errors with NULL key values. Let us play safe and ensure that we can correctly - print key values even in such cases .*/ + print key values even in such cases. */ key_part->null_offset = key_part->field->null_offset(); key_part->null_bit = key_part->field->null_bit; @@ -522,7 +879,7 @@ ha_innobase::check_if_supported_inplace_alter( /* We cannot replace a hidden FTS_DOC_ID with a user-visible FTS_DOC_ID. */ - if (prebuilt->table->fts + if (m_prebuilt->table->fts && innobase_fulltext_exist(altered_table) && !my_strcasecmp( system_charset_info, @@ -547,15 +904,32 @@ ha_innobase::check_if_supported_inplace_alter( ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC); online = false; } + + if (innobase_is_v_fld(key_part->field)) { + online = false; + } } } - DBUG_ASSERT(!prebuilt->table->fts || prebuilt->table->fts->doc_col + DBUG_ASSERT(!m_prebuilt->table->fts || m_prebuilt->table->fts->doc_col <= table->s->stored_fields); - DBUG_ASSERT(!prebuilt->table->fts || prebuilt->table->fts->doc_col - < dict_table_get_n_user_cols(prebuilt->table)); + DBUG_ASSERT(!m_prebuilt->table->fts || m_prebuilt->table->fts->doc_col + < dict_table_get_n_user_cols(m_prebuilt->table)); + + /* + if (ha_alter_info->handler_flags + & Alter_inplace_info::ADD_SPATIAL_INDEX) { + online = false; + } - if (prebuilt->table->fts + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ADD_VIRTUAL_COLUMN) + && (ha_alter_info->handler_flags + & Alter_inplace_info::ADD_INDEX)) { + online = false; + } + */ + if (m_prebuilt->table->fts && innobase_fulltext_exist(altered_table)) { /* FULLTEXT indexes are supposed to remain. */ /* Disallow DROP INDEX FTS_DOC_ID_INDEX */ @@ -592,7 +966,7 @@ ha_innobase::check_if_supported_inplace_alter( } } - prebuilt->trx->will_lock++; + m_prebuilt->trx->will_lock++; if (!online) { /* We already determined that only a non-locking @@ -600,19 +974,30 @@ ha_innobase::check_if_supported_inplace_alter( } else if (((ha_alter_info->handler_flags & Alter_inplace_info::ADD_PK_INDEX) || innobase_need_rebuild(ha_alter_info, table)) - && (innobase_fulltext_exist(altered_table))) { + && (innobase_fulltext_exist(altered_table) + || innobase_spatial_exist(altered_table))) { /* Refuse to rebuild the table online, if - fulltext indexes are to survive the rebuild. */ + FULLTEXT OR SPATIAL indexes are to survive the rebuild. */ online = false; /* If the table already contains fulltext indexes, refuse to rebuild the table natively altogether. */ - if (prebuilt->table->fts) { + if (m_prebuilt->table->fts) { ha_alter_info->unsupported_reason = innobase_get_err_msg( ER_INNODB_FT_LIMIT); DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); } - ha_alter_info->unsupported_reason = innobase_get_err_msg( - ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS); + + /* + if (innobase_spatial_exist(altered_table)) { + ha_alter_info->unsupported_reason = + innobase_get_err_msg( + ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_GIS); + } else { + */ + ha_alter_info->unsupported_reason = + innobase_get_err_msg( + ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS); + //} } else if ((ha_alter_info->handler_flags & Alter_inplace_info::ADD_INDEX)) { /* Building a full-text index requires a lock. @@ -678,12 +1063,12 @@ innobase_init_foreign( same MySQL 'database' as the table itself. We store the name to foreign->id. */ - db_len = dict_get_db_name_len(table->name); + db_len = dict_get_db_name_len(table->name.m_name); foreign->id = static_cast(mem_heap_alloc( foreign->heap, db_len + strlen(constraint_name) + 2)); - ut_memcpy(foreign->id, table->name, db_len); + ut_memcpy(foreign->id, table->name.m_name, db_len); foreign->id[db_len] = '/'; strcpy(foreign->id + db_len + 1, constraint_name); @@ -698,7 +1083,7 @@ innobase_init_foreign( foreign->foreign_table = table; foreign->foreign_table_name = mem_heap_strdup( - foreign->heap, table->name); + foreign->heap, table->name.m_name); dict_mem_foreign_table_name_lookup_set(foreign, TRUE); foreign->foreign_index = index; @@ -778,6 +1163,7 @@ innobase_set_foreign_key_option( ut_ad(!foreign->type); switch (fk_key->delete_opt) { + // JAN: TODO: ? MySQL 5.7 used enum fk_option directly from sql_lex.h case Foreign_key::FK_OPTION_NO_ACTION: case Foreign_key::FK_OPTION_RESTRICT: case Foreign_key::FK_OPTION_DEFAULT: @@ -811,7 +1197,7 @@ innobase_set_foreign_key_option( /*******************************************************************//** Check if a foreign key constraint can make use of an index that is being created. -@return useable index, or NULL if none found */ +@return useable index, or NULL if none found */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) const KEY* innobase_find_equiv_index( @@ -826,7 +1212,8 @@ innobase_find_equiv_index( for (uint i = 0; i < n_add; i++) { const KEY* key = &keys[add[i]]; - if (key->user_defined_key_parts < n_cols) { + if (key->user_defined_key_parts < n_cols + || key->flags & HA_SPATIAL) { no_match: continue; } @@ -836,6 +1223,12 @@ no_match: uint32 col_len = key_part.field->pack_length(); + /* Any index on virtual columns cannot be used + for reference constaint */ + if (innobase_is_v_fld(key_part.field)) { + goto no_match; + } + /* The MySQL pack length contains 1 or 2 bytes length field for a true VARCHAR. */ @@ -891,6 +1284,7 @@ innobase_find_fk_index( while (index != NULL) { if (!(index->type & DICT_FTS) + && !dict_index_has_virtual(index) && dict_foreign_qualify_index( table, col_names, columns, n_cols, index, NULL, true, 0, @@ -941,12 +1335,16 @@ innobase_get_foreign_key_info( ulint num_fk = 0; Alter_info* alter_info = ha_alter_info->alter_info; + DBUG_ENTER("innobase_get_foreign_key_info"); + *n_add_fk = 0; List_iterator key_iterator(alter_info->key_list); while ((key=key_iterator++)) { if (key->type != Key::FOREIGN_KEY) { + // JAN: TODO: ? + // if (key->type != KEYTYPE_FOREIGN) { continue; } @@ -1014,7 +1412,7 @@ innobase_get_foreign_key_info( add_fk[num_fk] = dict_mem_foreign_create(); -#ifndef __WIN__ +#ifndef _WIN32 if(fk_key->ref_db.str) { tablename_to_filename(fk_key->ref_db.str, db_name, MAX_DATABASE_NAME_LEN); @@ -1046,7 +1444,7 @@ innobase_get_foreign_key_info( mutex_enter(&dict_sys->mutex); referenced_table_name = dict_get_referenced_table( - table->name, + table->name.m_name, db_namep, db_name_len, tbl_namep, @@ -1152,7 +1550,7 @@ innobase_get_foreign_key_info( *n_add_fk = num_fk; - return(true); + DBUG_RETURN(true); err_exit: for (ulint i = 0; i <= num_fk; i++) { if (add_fk[i]) { @@ -1160,7 +1558,7 @@ err_exit: } } - return(false); + DBUG_RETURN(false); } /*************************************************************//** @@ -1214,6 +1612,8 @@ innobase_col_to_mysql( memcpy(dest, data, len); break; + case DATA_VAR_POINT: + case DATA_GEOMETRY: case DATA_BLOB: /* Skip MySQL BLOBs when reporting an erroneous row during index creation or table rebuild. */ @@ -1237,6 +1637,7 @@ innobase_col_to_mysql( case DATA_FLOAT: case DATA_DOUBLE: case DATA_DECIMAL: + case DATA_POINT: /* Above are the valid column types for MySQL data. */ ut_ad(flen == len); /* fall through */ @@ -1254,7 +1655,6 @@ innobase_col_to_mysql( /*************************************************************//** Copies an InnoDB record to table->record[0]. */ -UNIV_INTERN void innobase_rec_to_mysql( /*==================*/ @@ -1276,14 +1676,15 @@ innobase_rec_to_mysql( ulint ipos; ulint ilen; const uchar* ifield; + ulint prefix_col; while (!((field= table->field[sql_idx])->stored_in_db())) sql_idx++; field->reset(); - ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE, - NULL); + ipos = dict_index_get_nth_col_or_prefix_pos( + index, i, true, false, &prefix_col); if (ipos == ULINT_UNDEFINED || rec_offs_nth_extern(offsets, ipos)) { @@ -1311,7 +1712,6 @@ null_field: /*************************************************************//** Copies an InnoDB index entry to table->record[0]. */ -UNIV_INTERN void innobase_fields_to_mysql( /*=====================*/ @@ -1321,22 +1721,34 @@ innobase_fields_to_mysql( { uint n_fields = table->s->stored_fields; uint sql_idx = 0; + ulint num_v = 0; ut_ad(n_fields == dict_table_get_n_user_cols(index->table) + + dict_table_get_n_v_cols(index->table) - !!(DICT_TF2_FLAG_IS_SET(index->table, DICT_TF2_FTS_HAS_DOC_ID))); for (uint i = 0; i < n_fields; i++, sql_idx++) { Field* field; ulint ipos; + ulint col_n; + ulint prefix_col; while (!((field= table->field[sql_idx])->stored_in_db())) sql_idx++; field->reset(); - ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE, - NULL); + if (innobase_is_v_fld(field)) { + col_n = num_v; + num_v++; + } else { + col_n = i - num_v; + } + + ipos = dict_index_get_nth_col_or_prefix_pos( + index, col_n, true, innobase_is_v_fld(field), + &prefix_col); if (ipos == ULINT_UNDEFINED || dfield_is_ext(&fields[ipos]) @@ -1359,7 +1771,6 @@ innobase_fields_to_mysql( /*************************************************************//** Copies an InnoDB row to table->record[0]. */ -UNIV_INTERN void innobase_row_to_mysql( /*==================*/ @@ -1373,6 +1784,7 @@ innobase_row_to_mysql( /* The InnoDB row may contain an extra FTS_DOC_ID column at the end. */ ut_ad(row->n_fields == dict_table_get_n_cols(itab)); ut_ad(n_fields == row->n_fields - DATA_N_SYS_COLS + + dict_table_get_n_v_cols(itab) - !!(DICT_TF2_FLAG_IS_SET(itab, DICT_TF2_FTS_HAS_DOC_ID))); for (uint i = 0; i < n_fields; i++, sql_idx++) { @@ -1399,7 +1811,6 @@ innobase_row_to_mysql( /*************************************************************//** Resets table->record[0]. */ -UNIV_INTERN void innobase_rec_reset( /*===============*/ @@ -1415,7 +1826,7 @@ innobase_rec_reset( /*******************************************************************//** This function checks that index keys are sensible. -@return 0 or error number */ +@return 0 or error number */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) int innobase_check_index_keys( @@ -1452,11 +1863,24 @@ innobase_check_index_keys( for (index = dict_table_get_first_index(innodb_table); index; index = dict_table_get_next_index(index)) { - if (!strcmp(key.name, index->name)) { + if (index->is_committed() + && !strcmp(key.name, index->name)) { break; } } + /* Now we are in a situation where we have "ADD INDEX x" + and an index by the same name already exists. We have 4 + possible cases: + 1. No further clauses for an index x are given. Should reject + the operation. + 2. "DROP INDEX x" is given. Should allow the operation. + 3. "RENAME INDEX x TO y" is given. Should allow the operation. + 4. "DROP INDEX x, RENAME INDEX x TO y" is given. Should allow + the operation, since no name clash occurs. In this particular + case MySQL cancels the operation without calling InnoDB + methods. */ + if (index) { /* If a key by the same name is being created and dropped, the name clash is OK. */ @@ -1470,6 +1894,23 @@ innobase_check_index_keys( } } + /* If a key by the same name is being created and + renamed, the name clash is OK. E.g. + ALTER TABLE t ADD INDEX i (col), RENAME INDEX i TO x + where the index "i" exists prior to the ALTER command. + In this case we: + 1. rename the existing index from "i" to "x" + 2. add the new index "i" */ + /* JAN: TODO: MySQL 5.7 + for (uint i = 0; i < info->index_rename_count; i++) { + const KEY_PAIR* pair + = &info->index_rename_buffer[i]; + + if (0 == strcmp(key.name, pair->old_key->name)) { + goto name_ok; + } + } + */ my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key.name); return(ER_WRONG_NAME_FOR_INDEX); @@ -1535,41 +1976,69 @@ name_ok: return(0); } -/*******************************************************************//** -Create index field definition for key part */ -static MY_ATTRIBUTE((nonnull(2,3))) +/** Create index field definition for key part +@param[in] altered_table MySQL table that is being altered, + or NULL if a new clustered index + is not being created +@param[in] key_part MySQL key definition +@param[in,out] index_field index field +@param[in] new_clustered new cluster +@param[in] fields MySQL table fields*/ +static void innobase_create_index_field_def( /*============================*/ - const TABLE* altered_table, /*!< in: MySQL table that is - being altered, or NULL - if a new clustered index is - not being created */ - const KEY_PART_INFO* key_part, /*!< in: MySQL key definition */ - index_field_t* index_field, /*!< out: index field - definition for key_part */ - const Field** fields) /*!< in: MySQL table fields */ + const TABLE* altered_table, + const KEY_PART_INFO* key_part, + index_field_t* index_field, + bool new_clustered, + const Field** fields) { const Field* field; ibool is_unsigned; ulint col_type; + ulint num_v = 0; + ulint num_m_v = 0; DBUG_ENTER("innobase_create_index_field_def"); ut_ad(key_part); ut_ad(index_field); - field = altered_table + field = new_clustered ? altered_table->field[key_part->fieldnr] : key_part->field; ut_a(field); - index_field->col_no = key_part->fieldnr; - index_field->col_name = altered_table ? field->field_name : fields[key_part->fieldnr]->field_name; + for (ulint i = 0; i < key_part->fieldnr; i++) { + const Field* ifield = altered_table->field[i]; + if (innobase_is_v_fld(ifield)) { + num_v++; + } + + if (!ifield->stored_in_db()) { + num_m_v++; + } + } - col_type = get_innobase_type_from_mysql_type(&is_unsigned, field); + col_type = get_innobase_type_from_mysql_type( + &is_unsigned, field); - if (DATA_BLOB == col_type + // JAN: TODO: MySQL 5.7 Virtual columns + //if (!field->stored_in_db && field->gcol_info) { + // if (!field->stored_in_db() && false) { + // index_field->is_v_col = true; + // index_field->col_no = num_v; + // } else { + // index_field->is_v_col = false; + // index_field->col_no = key_part->fieldnr - num_v; + //} + + index_field->is_v_col = false; + index_field->col_no = key_part->fieldnr - num_m_v; + index_field->col_name = altered_table ? field->field_name : fields[key_part->fieldnr]->field_name; + + if (DATA_LARGE_MTYPE(col_type) || (key_part->length < field->pack_length() && field->type() != MYSQL_TYPE_VARCHAR) || (field->type() == MYSQL_TYPE_VARCHAR @@ -1584,9 +2053,15 @@ innobase_create_index_field_def( DBUG_VOID_RETURN; } -/*******************************************************************//** -Create index definition for key */ -static MY_ATTRIBUTE((nonnull)) +/** Create index definition for key +@param[in] altered_table MySQL table that is being altered +@param[in] keys key definitions +@param[in] key_number MySQL key number +@param[in] new_clustered true if generating a new clustered +index on the table +@param[in] key_clustered true if this is the new clustered index +@param[out] index index definition +@param[in] heap heap where memory is allocated */ void innobase_create_index_def( /*======================*/ @@ -1607,9 +2082,7 @@ innobase_create_index_def( { const KEY* key = &keys[key_number]; ulint i; - ulint len; ulint n_fields = key->user_defined_key_parts; - char* index_name; DBUG_ENTER("innobase_create_index_def"); DBUG_ASSERT(!key_clustered || new_clustered); @@ -1618,43 +2091,102 @@ innobase_create_index_def( mem_heap_alloc(heap, n_fields * sizeof *index->fields)); memset(index->fields, 0, n_fields * sizeof *index->fields); - index->ind_type = 0; + index->parser = NULL; + index->is_ngram = false; index->key_number = key_number; index->n_fields = n_fields; - len = strlen(key->name) + 1; - index->name = index_name = static_cast( - mem_heap_alloc(heap, len + !new_clustered)); - - if (!new_clustered) { - *index_name++ = TEMP_INDEX_PREFIX; - } - - memcpy(index_name, key->name, len); - - if (key->flags & HA_NOSAME) { - index->ind_type |= DICT_UNIQUE; - } + index->name = mem_heap_strdup(heap, key->name); + index->rebuild = new_clustered; if (key_clustered) { - DBUG_ASSERT(!(key->flags & HA_FULLTEXT)); - index->ind_type |= DICT_CLUSTERED; + DBUG_ASSERT(!(key->flags & (HA_FULLTEXT | HA_SPATIAL))); + DBUG_ASSERT(key->flags & HA_NOSAME); + index->ind_type = DICT_CLUSTERED | DICT_UNIQUE; } else if (key->flags & HA_FULLTEXT) { + DBUG_ASSERT(!(key->flags & (HA_SPATIAL | HA_NOSAME))); DBUG_ASSERT(!(key->flags & HA_KEYFLAG_MASK & ~(HA_FULLTEXT | HA_PACK_KEY | HA_BINARY_PACK_KEY))); + index->ind_type = DICT_FTS; + + /* Set plugin parser */ + /* Note: key->parser is only parser name, + we need to get parser from altered_table instead */ + /* JAN: TODO: MySQL FTS Parser + if (key->flags & HA_USES_PARSER) { + for (ulint j = 0; j < altered_table->s->keys; j++) { + if (ut_strcmp(altered_table->key_info[j].name, + key->name) == 0) { + ut_ad(altered_table->key_info[j].flags + & HA_USES_PARSER); + + plugin_ref parser = + altered_table->key_info[j].parser; + index->parser = + static_cast( + plugin_decl(parser)->info); + + index->is_ngram = strncmp( + plugin_name(parser)->str, + FTS_NGRAM_PARSER_NAME, + plugin_name(parser)->length) + == 0; + + break; + } + } + + DBUG_EXECUTE_IF("fts_instrument_use_default_parser", + index->parser = &fts_default_parser;); + ut_ad(index->parser); + } + */ + index->parser = &fts_default_parser; + } else if (key->flags & HA_SPATIAL) { DBUG_ASSERT(!(key->flags & HA_NOSAME)); - DBUG_ASSERT(!index->ind_type); - index->ind_type |= DICT_FTS; + index->ind_type = DICT_SPATIAL; + ut_ad(n_fields == 1); + ulint num_v = 0; + + /* Need to count the virtual fields before this spatial + indexed field */ + for (ulint i = 0; i < key->key_part->fieldnr; i++) { + if (innobase_is_v_fld(altered_table->field[i])) { + num_v++; + } + } + index->fields[0].col_no = key->key_part[0].fieldnr - num_v; + index->fields[0].prefix_len = 0; + index->fields[0].is_v_col = false; + /* JAN: TODO: MySQL 5.7 Virtual columns & spatial indexes + if (!key->key_part[0].field->stored_in_db() + && key->key_part[0].field->gcol_info) { + */ + /* Currently, the spatial index cannot be created + on virtual columns. It is blocked in server + layer */ + /* + ut_ad(0); + index->fields[0].is_v_col = true; + } else { + */ + index->fields[0].is_v_col = false; + //} + } else { + index->ind_type = (key->flags & HA_NOSAME) ? DICT_UNIQUE : 0; } - if (!new_clustered) { - altered_table = NULL; - } + if (!(key->flags & HA_SPATIAL)) { + for (i = 0; i < n_fields; i++) { + innobase_create_index_field_def( + altered_table, &key->key_part[i], + &index->fields[i], new_clustered, fields); - for (i = 0; i < n_fields; i++) { - innobase_create_index_field_def( - altered_table, &key->key_part[i], &index->fields[i], fields); + if (index->fields[i].is_v_col) { + index->ind_type |= DICT_VIRTUAL; + } + } } DBUG_VOID_RETURN; @@ -1672,22 +2204,31 @@ innobase_fts_check_doc_id_col( const TABLE* altered_table, /*!< in: MySQL table with fulltext index */ - ulint* fts_doc_col_no) + ulint* fts_doc_col_no, /*!< out: The column number for Doc ID, or ULINT_UNDEFINED if it is of wrong type */ + ulint* num_v) /*!< out: number of virtual column */ { *fts_doc_col_no = ULINT_UNDEFINED; const uint n_cols = altered_table->s->stored_fields; uint sql_idx = 0; - uint i; + ulint i; + + *num_v = 0; for (i = 0; i < n_cols; i++, sql_idx++) { const Field* field; - while (!((field= altered_table->field[sql_idx])-> - stored_in_db())) - sql_idx++; + + while (!((field= altered_table->field[sql_idx])->stored_in_db())) { + sql_idx++; + } + + if (innobase_is_v_fld(field)) { + (*num_v)++; + } + if (my_strcasecmp(system_charset_info, field->field_name, FTS_DOC_ID_COL_NAME)) { continue; @@ -1699,11 +2240,12 @@ innobase_fts_check_doc_id_col( } else if (field->type() != MYSQL_TYPE_LONGLONG || field->pack_length() != 8 || field->real_maybe_null() - || !(field->flags & UNSIGNED_FLAG)) { + || !(field->flags & UNSIGNED_FLAG) + || innobase_is_v_fld(field)) { my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, MYF(0), field->field_name); } else { - *fts_doc_col_no = i; + *fts_doc_col_no = i - *num_v; } return(true); @@ -1713,6 +2255,9 @@ innobase_fts_check_doc_id_col( return(false); } + /* Not to count the virtual columns */ + i -= *num_v; + for (; i + DATA_N_SYS_COLS < (uint) table->n_cols; i++) { const char* name = dict_table_get_col_name(table, i); @@ -1741,8 +2286,7 @@ innobase_fts_check_doc_id_col( /*******************************************************************//** Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME on the Doc ID column. -@return the status of the FTS_DOC_ID index */ -UNIV_INTERN +@return the status of the FTS_DOC_ID index */ enum fts_doc_id_index_enum innobase_fts_check_doc_id_index( /*============================*/ @@ -1811,7 +2355,8 @@ innobase_fts_check_doc_id_index( if (strcmp(field->name, FTS_DOC_ID_COL_NAME) == 0 && field->col->mtype == DATA_INT && field->col->len == 8 - && field->col->prtype & DATA_NOT_NULL) { + && field->col->prtype & DATA_NOT_NULL + && !dict_col_is_virtual(field->col)) { if (fts_doc_col_no) { *fts_doc_col_no = dict_col_get_no(field->col); } @@ -1828,9 +2373,8 @@ innobase_fts_check_doc_id_index( /*******************************************************************//** Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME on the Doc ID column in MySQL create index definition. -@return FTS_EXIST_DOC_ID_INDEX if there exists the FTS_DOC_ID index, +@return FTS_EXIST_DOC_ID_INDEX if there exists the FTS_DOC_ID index, FTS_INCORRECT_DOC_ID_INDEX if the FTS_DOC_ID index is of wrong format */ -UNIV_INTERN enum fts_doc_id_index_enum innobase_fts_check_doc_id_index_in_def( /*===================================*/ @@ -1875,7 +2419,7 @@ ELSE ENDIF -@return key definitions */ +@return key definitions */ static MY_ATTRIBUTE((nonnull, warn_unused_result, malloc)) index_def_t* innobase_create_key_defs( @@ -1940,11 +2484,14 @@ innobase_create_key_defs( const uint maybe_null = key_info[*add].key_part[key_part].key_type & FIELDFLAG_MAYBE_NULL; + bool is_v + = innobase_is_v_fld( + key_info[*add].key_part[key_part].field); DBUG_ASSERT(!maybe_null == !key_info[*add].key_part[key_part]. field->real_maybe_null()); - if (maybe_null) { + if (maybe_null || is_v) { new_primary = false; break; } @@ -1954,6 +2501,7 @@ innobase_create_key_defs( const bool rebuild = new_primary || add_fts_doc_id || innobase_need_rebuild(ha_alter_info, table); + /* Reserve one more space if new_primary is true, and we might need to add the FTS_DOC_ID_INDEX */ indexdef = indexdefs = static_cast( @@ -1976,8 +2524,8 @@ innobase_create_key_defs( index->fields = NULL; index->n_fields = 0; index->ind_type = DICT_CLUSTERED; - index->name = mem_heap_strdup( - heap, innobase_index_reserve_name); + index->name = innobase_index_reserve_name; + index->rebuild = true; index->key_number = ~0; primary_key_number = ULINT_UNDEFINED; goto created_clustered; @@ -1999,8 +2547,8 @@ created_clustered: } /* Copy the index definitions. */ innobase_create_index_def( - altered_table, key_info, i, TRUE, FALSE, - indexdef, heap, (const Field **)altered_table->field); + altered_table, key_info, i, true, + false, indexdef, heap, (const Field **)altered_table->field); if (indexdef->ind_type & DICT_FTS) { n_fts_add++; @@ -2011,10 +2559,12 @@ created_clustered: } if (n_fts_add > 0) { + ulint num_v = 0; + if (!add_fts_doc_id && !innobase_fts_check_doc_id_col( NULL, altered_table, - &fts_doc_id_col)) { + &fts_doc_id_col, &num_v)) { fts_doc_id_col = altered_table->s->stored_fields; add_fts_doc_id = true; @@ -2044,8 +2594,8 @@ created_clustered: for (ulint i = 0; i < n_add; i++) { innobase_create_index_def( - altered_table, key_info, add[i], FALSE, FALSE, - indexdef, heap, (const Field **)altered_table->field); + altered_table, key_info, add[i], + false, false, indexdef, heap, (const Field **)altered_table->field); if (indexdef->ind_type & DICT_FTS) { n_fts_add++; @@ -2066,23 +2616,14 @@ created_clustered: index->n_fields = 1; index->fields->col_no = fts_doc_id_col; index->fields->prefix_len = 0; + index->fields->is_v_col = false; index->ind_type = DICT_UNIQUE; + ut_ad(!rebuild + || !add_fts_doc_id + || fts_doc_id_col <= altered_table->s->fields); - if (rebuild) { - index->name = mem_heap_strdup( - heap, FTS_DOC_ID_INDEX_NAME); - ut_ad(!add_fts_doc_id - || fts_doc_id_col == altered_table->s->stored_fields); - } else { - char* index_name; - index->name = index_name = static_cast( - mem_heap_alloc( - heap, - 1 + sizeof FTS_DOC_ID_INDEX_NAME)); - *index_name++ = TEMP_INDEX_PREFIX; - memcpy(index_name, FTS_DOC_ID_INDEX_NAME, - sizeof FTS_DOC_ID_INDEX_NAME); - } + index->name = FTS_DOC_ID_INDEX_NAME; + index->rebuild = rebuild; /* TODO: assign a real MySQL key number for this */ index->key_number = ULINT_UNDEFINED; @@ -2099,7 +2640,7 @@ created_clustered: /*******************************************************************//** Check each index column size, make sure they do not exceed the max limit -@return true if index column size exceeds limit */ +@return true if index column size exceeds limit */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_check_column_length( @@ -2115,113 +2656,6 @@ innobase_check_column_length( return(false); } -struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx -{ - /** Dummy query graph */ - que_thr_t* thr; - /** reference to the prebuilt struct of the creating instance */ - row_prebuilt_t*&prebuilt; - /** InnoDB indexes being created */ - dict_index_t** add_index; - /** MySQL key numbers for the InnoDB indexes that are being created */ - const ulint* add_key_numbers; - /** number of InnoDB indexes being created */ - ulint num_to_add_index; - /** InnoDB indexes being dropped */ - dict_index_t** drop_index; - /** number of InnoDB indexes being dropped */ - const ulint num_to_drop_index; - /** InnoDB foreign key constraints being dropped */ - dict_foreign_t** drop_fk; - /** number of InnoDB foreign key constraints being dropped */ - const ulint num_to_drop_fk; - /** InnoDB foreign key constraints being added */ - dict_foreign_t** add_fk; - /** number of InnoDB foreign key constraints being dropped */ - const ulint num_to_add_fk; - /** whether to create the indexes online */ - bool online; - /** memory heap */ - mem_heap_t* heap; - /** dictionary transaction */ - trx_t* trx; - /** original table (if rebuilt, differs from indexed_table) */ - dict_table_t* old_table; - /** table where the indexes are being created or dropped */ - dict_table_t* new_table; - /** mapping of old column numbers to new ones, or NULL */ - const ulint* col_map; - /** new column names, or NULL if nothing was renamed */ - const char** col_names; - /** added AUTO_INCREMENT column position, or ULINT_UNDEFINED */ - const ulint add_autoinc; - /** default values of ADD COLUMN, or NULL */ - const dtuple_t* add_cols; - /** autoinc sequence to use */ - ib_sequence_t sequence; - /** maximum auto-increment value */ - ulonglong max_autoinc; - /** temporary table name to use for old table when renaming tables */ - const char* tmp_name; - - ha_innobase_inplace_ctx(row_prebuilt_t*& prebuilt_arg, - dict_index_t** drop_arg, - ulint num_to_drop_arg, - dict_foreign_t** drop_fk_arg, - ulint num_to_drop_fk_arg, - dict_foreign_t** add_fk_arg, - ulint num_to_add_fk_arg, - bool online_arg, - mem_heap_t* heap_arg, - dict_table_t* new_table_arg, - const char** col_names_arg, - ulint add_autoinc_arg, - ulonglong autoinc_col_min_value_arg, - ulonglong autoinc_col_max_value_arg) : - inplace_alter_handler_ctx(), - prebuilt (prebuilt_arg), - add_index (0), add_key_numbers (0), num_to_add_index (0), - drop_index (drop_arg), num_to_drop_index (num_to_drop_arg), - drop_fk (drop_fk_arg), num_to_drop_fk (num_to_drop_fk_arg), - add_fk (add_fk_arg), num_to_add_fk (num_to_add_fk_arg), - online (online_arg), heap (heap_arg), trx (0), - old_table (prebuilt_arg->table), - new_table (new_table_arg), - col_map (0), col_names (col_names_arg), - add_autoinc (add_autoinc_arg), - add_cols (0), - sequence(prebuilt->trx->mysql_thd, - autoinc_col_min_value_arg, autoinc_col_max_value_arg), - max_autoinc (0), - tmp_name (0) - { -#ifdef UNIV_DEBUG - for (ulint i = 0; i < num_to_add_index; i++) { - ut_ad(!add_index[i]->to_be_dropped); - } - for (ulint i = 0; i < num_to_drop_index; i++) { - ut_ad(drop_index[i]->to_be_dropped); - } -#endif /* UNIV_DEBUG */ - - thr = pars_complete_graph_for_exec(NULL, prebuilt->trx, heap); - } - - ~ha_innobase_inplace_ctx() - { - mem_heap_free(heap); - } - - /** Determine if the table will be rebuilt. - @return whether the table will be rebuilt */ - bool need_rebuild () const { return(old_table != new_table); } - -private: - // Disable copying - ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&); - ha_innobase_inplace_ctx& operator=(const ha_innobase_inplace_ctx&); -}; - /********************************************************************//** Drop any indexes that we were not able to free previously due to open table handles. */ @@ -2240,7 +2674,7 @@ online_retry_drop_indexes_low( may have prebuilt->table pointing to the table. However, these other threads should be between statements, waiting for the next statement to execute, or for a meta-data lock. */ - ut_ad(table->n_ref_count >= 1); + ut_ad(table->get_ref_count() >= 1); if (table->drop_aborted) { row_merge_drop_indexes(trx, table, TRUE); @@ -2269,12 +2703,10 @@ online_retry_drop_indexes( trx_free_for_mysql(trx); } -#ifdef UNIV_DEBUG - mutex_enter(&dict_sys->mutex); - dict_table_check_for_dup_indexes(table, CHECK_ALL_COMPLETE); - mutex_exit(&dict_sys->mutex); - ut_a(!table->drop_aborted); -#endif /* UNIV_DEBUG */ + ut_d(mutex_enter(&dict_sys->mutex)); + ut_d(dict_table_check_for_dup_indexes(table, CHECK_ALL_COMPLETE)); + ut_d(mutex_exit(&dict_sys->mutex)); + ut_ad(!table->drop_aborted); } /********************************************************************//** @@ -2287,7 +2719,9 @@ online_retry_drop_indexes_with_trx( dict_table_t* table, /*!< in/out: table */ trx_t* trx) /*!< in/out: transaction */ { - ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED)); + ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED) + || trx_state_eq(trx, TRX_STATE_FORCED_ROLLBACK)); + ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); /* Now that the dictionary is being locked, check if we can @@ -2305,9 +2739,9 @@ online_retry_drop_indexes_with_trx( } /** Determines if InnoDB is dropping a foreign key constraint. -@param foreign the constraint -@param drop_fk constraints being dropped -@param n_drop_fk number of constraints that are being dropped +@param foreign the constraint +@param drop_fk constraints being dropped +@param n_drop_fk number of constraints that are being dropped @return whether the constraint is being dropped */ inline MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) bool @@ -2328,13 +2762,13 @@ innobase_dropping_foreign( /** Determines if an InnoDB FOREIGN KEY constraint depends on a column that is being dropped or modified to NOT NULL. -@param user_table InnoDB table as it is before the ALTER operation -@param col_name Name of the column being altered -@param drop_fk constraints being dropped -@param n_drop_fk number of constraints that are being dropped -@param drop true=drop column, false=set NOT NULL -@retval true Not allowed (will call my_error()) -@retval false Allowed +@param user_table InnoDB table as it is before the ALTER operation +@param col_name Name of the column being altered +@param drop_fk constraints being dropped +@param n_drop_fk number of constraints that are being dropped +@param drop true=drop column, false=set NOT NULL +@retval true Not allowed (will call my_error()) +@retval false Allowed */ static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) bool @@ -2412,7 +2846,7 @@ innobase_check_foreigns_low( display_name, (sizeof display_name) - 1, foreign->foreign_table_name, strlen(foreign->foreign_table_name), - NULL, TRUE); + NULL); *buf_end = '\0'; my_error(ER_FK_COLUMN_CANNOT_DROP_CHILD, MYF(0), col_name, foreign->id, @@ -2427,14 +2861,14 @@ innobase_check_foreigns_low( /** Determines if an InnoDB FOREIGN KEY constraint depends on a column that is being dropped or modified to NOT NULL. -@param ha_alter_info Data used during in-place alter -@param altered_table MySQL table that is being altered -@param old_table MySQL table as it is before the ALTER operation -@param user_table InnoDB table as it is before the ALTER operation -@param drop_fk constraints being dropped -@param n_drop_fk number of constraints that are being dropped -@retval true Not allowed (will call my_error()) -@retval false Allowed +@param ha_alter_info Data used during in-place alter +@param altered_table MySQL table that is being altered +@param old_table MySQL table as it is before the ALTER operation +@param user_table InnoDB table as it is before the ALTER operation +@param drop_fk constraints being dropped +@param n_drop_fk number of constraints that are being dropped +@retval true Not allowed (will call my_error()) +@retval false Allowed */ static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) bool @@ -2475,12 +2909,35 @@ innobase_check_foreigns( return(false); } +/** Get the default POINT value in MySQL format +@param[in] heap memory heap where allocated +@param[in] length length of MySQL format +@return mysql format data */ +static +const byte* +innobase_build_default_mysql_point( + mem_heap_t* heap, + ulint length) +{ + byte* buf = static_cast(mem_heap_alloc( + heap, DATA_POINT_LEN + length)); + + byte* wkb = buf + length; + + ulint len = get_wkb_of_default_point(SPDIMS, wkb, DATA_POINT_LEN); + ut_ad(len == DATA_POINT_LEN); + + row_mysql_store_blob_ref(buf, length, wkb, len); + + return(buf); +} + /** Convert a default value for ADD COLUMN. -@param heap Memory heap where allocated -@param dfield InnoDB data field to copy to -@param field MySQL value for the column -@param comp nonzero if in compact format */ +@param heap Memory heap where allocated +@param dfield InnoDB data field to copy to +@param field MySQL value for the column +@param comp nonzero if in compact format */ static MY_ATTRIBUTE((nonnull)) void innobase_build_col_map_add( @@ -2499,21 +2956,33 @@ innobase_build_col_map_add( byte* buf = static_cast(mem_heap_alloc(heap, size)); + const byte* mysql_data = field->ptr; + + if (dfield_get_type(dfield)->mtype == DATA_POINT) { + /** If the DATA_POINT field is NOT NULL, we need to + give it a default value, since DATA_POINT is a fixed length + type, we couldn't store a value of length 0, like other + geom types. Server doesn't provide the default value, and + we would use POINT(0 0) here instead. */ + + mysql_data = innobase_build_default_mysql_point(heap, size); + } + row_mysql_store_col_in_innobase_format( - dfield, buf, TRUE, field->ptr, size, comp); + dfield, buf, true, mysql_data, size, comp); } /** Construct the translation table for reordering, dropping or adding columns. -@param ha_alter_info Data used during in-place alter -@param altered_table MySQL table that is being altered -@param table MySQL table as it is before the ALTER operation -@param new_table InnoDB table corresponding to MySQL altered_table -@param old_table InnoDB table corresponding to MYSQL table -@param add_cols Default values for ADD COLUMN, or NULL if no ADD COLUMN -@param heap Memory heap where allocated -@return array of integers, mapping column numbers in the table +@param ha_alter_info Data used during in-place alter +@param altered_table MySQL table that is being altered +@param table MySQL table as it is before the ALTER operation +@param new_table InnoDB table corresponding to MySQL altered_table +@param old_table InnoDB table corresponding to MYSQL table +@param add_cols Default values for ADD COLUMN, or NULL if no ADD COLUMN +@param heap Memory heap where allocated +@return array of integers, mapping column numbers in the table to column numbers in altered_table */ static MY_ATTRIBUTE((nonnull(1,2,3,4,5,7), warn_unused_result)) const ulint* @@ -2532,8 +3001,10 @@ innobase_build_col_map( DBUG_ASSERT(altered_table != table); DBUG_ASSERT(new_table != old_table); DBUG_ASSERT(dict_table_get_n_cols(new_table) + + dict_table_get_n_v_cols(new_table) >= altered_table->s->stored_fields + DATA_N_SYS_COLS); DBUG_ASSERT(dict_table_get_n_cols(old_table) + + dict_table_get_n_v_cols(old_table) >= table->s->stored_fields + DATA_N_SYS_COLS); DBUG_ASSERT(!!add_cols == !!(ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN)); @@ -2541,11 +3012,14 @@ innobase_build_col_map( == dict_table_get_n_cols(new_table)); ulint* col_map = static_cast( - mem_heap_alloc(heap, old_table->n_cols * sizeof *col_map)); + mem_heap_alloc( + heap, (old_table->n_cols + old_table->n_v_cols) + * sizeof *col_map)); List_iterator_fast cf_it( ha_alter_info->alter_info->create_list); uint i = 0, sql_idx = 0; + uint num_v = 0; /* Any dropped columns will map to ULINT_UNDEFINED. */ for (old_innobase_i = 0; @@ -2554,18 +3028,45 @@ innobase_build_col_map( col_map[old_innobase_i] = ULINT_UNDEFINED; } + for (uint old_i = 0; old_i < old_table->n_v_cols; old_i++) { + col_map[old_i + old_table->n_cols] = ULINT_UNDEFINED; + } + while (const Create_field* new_field = cf_it++) { - if (!new_field->stored_in_db()) - { - sql_idx++; - continue; - } + bool is_v = false; + + if (innobase_is_v_fld(new_field)) { + is_v = true; + } + + ulint num_old_v = 0; + + if (!new_field->stored_in_db()) + { + sql_idx++; + continue; + } + for (old_i = 0, old_innobase_i= 0; - table->field[old_i]; - old_i++) { + table->field[old_i]; + old_i++) { const Field* field = table->field[old_i]; - if (!table->field[old_i]->stored_in_db()) - continue; + + if (!table->field[old_i]->stored_in_db()) { + continue; + } + + if (innobase_is_v_fld(field)) { + if (is_v && new_field->field == field) { + col_map[old_table->n_cols + num_v] + = num_old_v; + num_old_v++; + goto found_col; + } + num_old_v++; + continue; + } + if (new_field->field == field) { col_map[old_innobase_i] = i; goto found_col; @@ -2573,18 +3074,24 @@ innobase_build_col_map( old_innobase_i++; } + ut_ad(!is_v); innobase_build_col_map_add( heap, dtuple_get_nth_field(add_cols, i), altered_table->field[sql_idx], dict_table_is_comp(new_table)); found_col: - i++; - sql_idx++; + if (is_v) { + num_v++; + } else { + i++; + sql_idx++; + } } - DBUG_ASSERT(i == altered_table->s->stored_fields); + DBUG_ASSERT(i == altered_table->s->stored_fields - num_v); i = table->s->stored_fields; + //i = table->s->fields - old_table->n_v_cols; /* Add the InnoDB hidden FTS_DOC_ID column, if any. */ if (i + DATA_N_SYS_COLS < old_table->n_cols) { @@ -2597,6 +3104,7 @@ found_col: old_table, table->s->stored_fields), FTS_DOC_ID_COL_NAME)); if (altered_table->s->stored_fields + DATA_N_SYS_COLS + - new_table->n_v_cols < new_table->n_cols) { DBUG_ASSERT(DICT_TF2_FLAG_IS_SET( new_table, @@ -2605,6 +3113,8 @@ found_col: + DATA_N_SYS_COLS + 1 == new_table->n_cols); col_map[i] = altered_table->s->stored_fields; + col_map[i] = altered_table->s->fields + - new_table->n_v_cols; } else { DBUG_ASSERT(!DICT_TF2_FLAG_IS_SET( new_table, @@ -2628,9 +3138,9 @@ found_col: /** Drop newly create FTS index related auxiliary table during FIC create index process, before fts_add_index is called -@param table table that was being rebuilt online -@param trx transaction -@return DB_SUCCESS if successful, otherwise last error code +@param table table that was being rebuilt online +@param trx transaction +@return DB_SUCCESS if successful, otherwise last error code */ static dberr_t @@ -2658,7 +3168,7 @@ innobase_drop_fts_index_table( return(ret_err); } -/** Get the new column names if any columns were renamed +/** Get the new non-virtual column names if any columns were renamed @param ha_alter_info Data used during in-place alter @param altered_table MySQL table that is being altered @param table MySQL table as it is before the ALTER operation @@ -2678,7 +3188,7 @@ innobase_get_col_names( uint i; DBUG_ENTER("innobase_get_col_names"); - DBUG_ASSERT(user_table->n_def > table->s->fields); + DBUG_ASSERT(user_table->n_t_def > table->s->fields); DBUG_ASSERT(ha_alter_info->handler_flags & Alter_inplace_info::ALTER_COLUMN_NAME); @@ -2689,11 +3199,20 @@ innobase_get_col_names( List_iterator_fast cf_it( ha_alter_info->alter_info->create_list); while (const Create_field* new_field = cf_it++) { + ulint num_v = 0; DBUG_ASSERT(i < altered_table->s->fields); + if (innobase_is_v_fld(new_field)) { + continue; + } + for (uint old_i = 0; table->field[old_i]; old_i++) { + if (innobase_is_v_fld(table->field[old_i])) { + num_v++; + } + if (new_field->field == table->field[old_i]) { - cols[old_i] = new_field->field_name; + cols[old_i - num_v] = new_field->field_name; break; } } @@ -2702,7 +3221,7 @@ innobase_get_col_names( } /* Copy the internal column names. */ - i = table->s->fields; + i = table->s->fields - user_table->n_v_def; cols[i] = dict_table_get_col_name(user_table, i); while (++i < user_table->n_def) { @@ -2712,69 +3231,1102 @@ innobase_get_col_names( DBUG_RETURN(cols); } -/** Update internal structures with concurrent writes blocked, -while preparing ALTER TABLE. +/** Check whether the column prefix is increased, decreased, or unchanged. +@param[in] new_prefix_len new prefix length +@param[in] old_prefix_len new prefix length +@retval 1 prefix is increased +@retval 0 prefix is unchanged +@retval -1 prefix is decreased */ +static inline +lint +innobase_pk_col_prefix_compare( + ulint new_prefix_len, + ulint old_prefix_len) +{ + ut_ad(new_prefix_len < REC_MAX_DATA_SIZE); + ut_ad(old_prefix_len < REC_MAX_DATA_SIZE); -@param ha_alter_info Data used during in-place alter -@param altered_table MySQL table that is being altered -@param old_table MySQL table as it is before the ALTER operation -@param table_name Table name in MySQL -@param flags Table and tablespace flags -@param flags2 Additional table flags -@param fts_doc_id_col The column number of FTS_DOC_ID -@param add_fts_doc_id Flag: add column FTS_DOC_ID? -@param add_fts_doc_id_idx Flag: add index FTS_DOC_ID_INDEX (FTS_DOC_ID)? + if (new_prefix_len == old_prefix_len) { + return(0); + } -@retval true Failure -@retval false Success -*/ -static MY_ATTRIBUTE((warn_unused_result, nonnull(1,2,3,4))) + if (new_prefix_len == 0) { + new_prefix_len = ULINT_MAX; + } + + if (old_prefix_len == 0) { + old_prefix_len = ULINT_MAX; + } + + if (new_prefix_len > old_prefix_len) { + return(1); + } else { + return(-1); + } +} + +/** Check whether the column is existing in old table. +@param[in] new_col_no new column no +@param[in] col_map mapping of old column numbers to new ones +@param[in] col_map_size the column map size +@return true if the column is existing, otherwise false. */ +static inline bool -prepare_inplace_alter_table_dict( -/*=============================*/ - Alter_inplace_info* ha_alter_info, - const TABLE* altered_table, - const TABLE* old_table, - const char* table_name, - ulint flags, - ulint flags2, - ulint fts_doc_id_col, - bool add_fts_doc_id, - bool add_fts_doc_id_idx) +innobase_pk_col_is_existing( + const ulint new_col_no, + const ulint* col_map, + const ulint col_map_size) { - bool dict_locked = false; - ulint* add_key_nums; /* MySQL key numbers */ - index_def_t* index_defs; /* index definitions */ - dict_table_t* user_table; - dict_index_t* fts_index = NULL; - ulint new_clustered = 0; - dberr_t error; - ulint num_fts_index; - uint sql_idx; - ha_innobase_inplace_ctx*ctx; - - DBUG_ENTER("prepare_inplace_alter_table_dict"); + for (ulint i = 0; i < col_map_size; i++) { + if (col_map[i] == new_col_no) { + return(true); + } + } - ctx = static_cast - (ha_alter_info->handler_ctx); + return(false); +} - DBUG_ASSERT((ctx->add_autoinc != ULINT_UNDEFINED) - == (ctx->sequence.m_max_value > 0)); - DBUG_ASSERT(!ctx->num_to_drop_index == !ctx->drop_index); - DBUG_ASSERT(!ctx->num_to_drop_fk == !ctx->drop_fk); - DBUG_ASSERT(!add_fts_doc_id || add_fts_doc_id_idx); - DBUG_ASSERT(!add_fts_doc_id_idx - || innobase_fulltext_exist(altered_table)); - DBUG_ASSERT(!ctx->add_cols); - DBUG_ASSERT(!ctx->add_index); - DBUG_ASSERT(!ctx->add_key_numbers); - DBUG_ASSERT(!ctx->num_to_add_index); +/** Determine whether both the indexes have same set of primary key +fields arranged in the same order. + +Rules when we cannot skip sorting: +(1) Removing existing PK columns somewhere else than at the end of the PK; +(2) Adding existing columns to the PK, except at the end of the PK when no +columns are removed from the PK; +(3) Changing the order of existing PK columns; +(4) Decreasing the prefix length just like removing existing PK columns +follows rule(1), Increasing the prefix length just like adding existing +PK columns follows rule(2). +@param[in] col_map mapping of old column numbers to new ones +@param[in] ha_alter_info Data used during in-place alter +@param[in] old_clust_index index to be compared +@param[in] new_clust_index index to be compared +@retval true if both indexes have same order. +@retval false. */ +static __attribute__((warn_unused_result)) +bool +innobase_pk_order_preserved( + const ulint* col_map, + const dict_index_t* old_clust_index, + const dict_index_t* new_clust_index) +{ + ulint old_n_uniq + = dict_index_get_n_ordering_defined_by_user( + old_clust_index); + ulint new_n_uniq + = dict_index_get_n_ordering_defined_by_user( + new_clust_index); + + ut_ad(dict_index_is_clust(old_clust_index)); + ut_ad(dict_index_is_clust(new_clust_index)); + ut_ad(old_clust_index->table != new_clust_index->table); + ut_ad(col_map != NULL); + + if (old_n_uniq == 0) { + /* There was no PRIMARY KEY in the table. + If there is no PRIMARY KEY after the ALTER either, + no sorting is needed. */ + return(new_n_uniq == old_n_uniq); + } + + /* DROP PRIMARY KEY is only allowed in combination with + ADD PRIMARY KEY. */ + ut_ad(new_n_uniq > 0); + + /* The order of the last processed new_clust_index key field, + not counting ADD COLUMN, which are constant. */ + lint last_field_order = -1; + ulint existing_field_count = 0; + ulint old_n_cols = dict_table_get_n_cols(old_clust_index->table); + for (ulint new_field = 0; new_field < new_n_uniq; new_field++) { + ulint new_col_no = + new_clust_index->fields[new_field].col->ind; + + /* Check if there is a match in old primary key. */ + ulint old_field = 0; + while (old_field < old_n_uniq) { + ulint old_col_no = + old_clust_index->fields[old_field].col->ind; + + if (col_map[old_col_no] == new_col_no) { + break; + } - user_table = ctx->new_table; + old_field++; + } - trx_start_if_not_started_xa(ctx->prebuilt->trx); + /* The order of key field in the new primary key. + 1. old PK column: idx in old primary key + 2. existing column: old_n_uniq + sequence no + 3. newly added column: no order */ + lint new_field_order; + const bool old_pk_column = old_field < old_n_uniq; - /* Create a background transaction for the operations on + if (old_pk_column) { + new_field_order = old_field; + } else if (innobase_pk_col_is_existing(new_col_no, col_map, + old_n_cols)) { + new_field_order = old_n_uniq + existing_field_count++; + } else { + /* Skip newly added column. */ + continue; + } + + if (last_field_order + 1 != new_field_order) { + /* Old PK order is not kept, or existing column + is not added at the end of old PK. */ + return(false); + } + + last_field_order = new_field_order; + + if (!old_pk_column) { + continue; + } + + /* Check prefix length change. */ + const lint prefix_change = innobase_pk_col_prefix_compare( + new_clust_index->fields[new_field].prefix_len, + old_clust_index->fields[old_field].prefix_len); + + if (prefix_change < 0) { + /* If a column's prefix length is decreased, it should + be the last old PK column in new PK. + Note: we set last_field_order to -2, so that if there + are any old PK colmns or existing columns after it in + new PK, the comparison to new_field_order will fail in + the next round.*/ + last_field_order = -2; + } else if (prefix_change > 0) { + /* If a column's prefix length is increased, it should + be the last PK column in old PK. */ + if (old_field != old_n_uniq - 1) { + return(false); + } + } + } + + return(true); +} + +/** Update the mtype from DATA_BLOB to DATA_GEOMETRY for a specified +GIS column of a table. This is used when we want to create spatial index +on legacy GIS columns coming from 5.6, where we store GIS data as DATA_BLOB +in innodb layer. +@param[in] table_id table id +@param[in] col_name column name +@param[in] trx data dictionary transaction +@retval true Failure +@retval false Success */ +static +bool +innobase_update_gis_column_type( + table_id_t table_id, + const char* col_name, + trx_t* trx) +{ + pars_info_t* info; + dberr_t error; + + DBUG_ENTER("innobase_update_gis_column_type"); + + DBUG_ASSERT(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX); + ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); + ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + + info = pars_info_create(); + + pars_info_add_ull_literal(info, "tableid", table_id); + pars_info_add_str_literal(info, "name", col_name); + pars_info_add_int4_literal(info, "mtype", DATA_GEOMETRY); + + trx->op_info = "update column type to DATA_GEOMETRY"; + + error = que_eval_sql( + info, + "PROCEDURE UPDATE_SYS_COLUMNS_PROC () IS\n" + "BEGIN\n" + "UPDATE SYS_COLUMNS SET MTYPE=:mtype\n" + "WHERE TABLE_ID=:tableid AND NAME=:name;\n" + "END;\n", + false, trx); + + trx->error_state = DB_SUCCESS; + trx->op_info = ""; + + DBUG_RETURN(error != DB_SUCCESS); +} + +/** Check if we are creating spatial indexes on GIS columns, which are +legacy columns from earlier MySQL, such as 5.6. If so, we have to update +the mtypes of the old GIS columns to DATA_GEOMETRY. +In 5.6, we store GIS columns as DATA_BLOB in InnoDB layer, it will introduce +confusion when we run latest server on older data. That's why we need to +do the upgrade. +@param[in] ha_alter_info Data used during in-place alter +@param[in] table Table on which we want to add indexes +@param[in] trx Transaction +@return DB_SUCCESS if update successfully or no columns need to be updated, +otherwise DB_ERROR, which means we can't update the mtype for some +column, and creating spatial index on it should be dangerous */ +static +dberr_t +innobase_check_gis_columns( + Alter_inplace_info* ha_alter_info, + dict_table_t* table, + trx_t* trx) +{ + DBUG_ENTER("innobase_check_gis_columns"); + + for (uint key_num = 0; + key_num < ha_alter_info->index_add_count; + key_num++) { + + const KEY& key = ha_alter_info->key_info_buffer[ + ha_alter_info->index_add_buffer[key_num]]; + + if (!(key.flags & HA_SPATIAL)) { + continue; + } + + ut_ad(key.user_defined_key_parts == 1); + const KEY_PART_INFO& key_part = key.key_part[0]; + + /* Does not support spatial index on virtual columns */ + if (innobase_is_v_fld(key_part.field)) { + DBUG_RETURN(DB_UNSUPPORTED); + } + + ulint col_nr = dict_table_has_column( + table, + key_part.field->field_name, + key_part.fieldnr); + ut_ad(col_nr != table->n_def); + dict_col_t* col = &table->cols[col_nr]; + + if (col->mtype != DATA_BLOB) { + ut_ad(DATA_GEOMETRY_MTYPE(col->mtype)); + continue; + } + + const char* col_name = dict_table_get_col_name( + table, col_nr); + + if (innobase_update_gis_column_type( + table->id, col_name, trx)) { + + DBUG_RETURN(DB_ERROR); + } else { + col->mtype = DATA_GEOMETRY; + + ib::info() << "Updated mtype of column" << col_name + << " in table " << table->name + << ", whose id is " << table->id + << " to DATA_GEOMETRY"; + } + } + + DBUG_RETURN(DB_SUCCESS); +} + +/** Collect virtual column info for its addition +@param[in] ha_alter_info Data used during in-place alter +@param[in] altered_table MySQL table that is being altered to +@param[in] table MySQL table as it is before the ALTER operation +@retval true Failure +@retval false Success */ +static +bool +prepare_inplace_add_virtual( + Alter_inplace_info* ha_alter_info, + const TABLE* altered_table, + const TABLE* table) +{ + ha_innobase_inplace_ctx* ctx; + ulint i = 0; + ulint j = 0; + const Create_field* new_field; + + ctx = static_cast + (ha_alter_info->handler_ctx); + + ctx->num_to_add_vcol = altered_table->s->fields + + ctx->num_to_drop_vcol - table->s->fields; + + ctx->add_vcol = static_cast( + mem_heap_zalloc(ctx->heap, ctx->num_to_add_vcol + * sizeof *ctx->add_vcol)); + ctx->add_vcol_name = static_cast( + mem_heap_alloc(ctx->heap, ctx->num_to_add_vcol + * sizeof *ctx->add_vcol_name)); + + List_iterator_fast cf_it( + ha_alter_info->alter_info->create_list); + + while ((new_field = (cf_it++)) != NULL) { + const Field* field = new_field->field; + ulint old_i; + + for (old_i = 0; table->field[old_i]; old_i++) { + const Field* n_field = table->field[old_i]; + if (field == n_field) { + break; + } + } + + i++; + + if (table->field[old_i]) { + continue; + } + + ut_ad(!field); + + ulint col_len; + ulint is_unsigned; + ulint field_type; + ulint charset_no; + + field = altered_table->field[i - 1]; + + ulint col_type + = get_innobase_type_from_mysql_type( + &is_unsigned, field); + + /* JAN: TODO: MySQL 5.7 + if (!field->gcol_info || field->stored_in_db) { + */ + if (field->stored_in_db()) { + my_error(ER_WRONG_KEY_COLUMN, MYF(0), + field->field_name); + return(true); + } + + col_len = field->pack_length(); + field_type = (ulint) field->type(); + + if (!field->real_maybe_null()) { + field_type |= DATA_NOT_NULL; + } + + if (field->binary()) { + field_type |= DATA_BINARY_TYPE; + } + + if (is_unsigned) { + field_type |= DATA_UNSIGNED; + } + + if (dtype_is_string_type(col_type)) { + charset_no = (ulint) field->charset()->number; + + DBUG_EXECUTE_IF( + "ib_alter_add_virtual_fail", + charset_no += MAX_CHAR_COLL_NUM;); + + if (charset_no > MAX_CHAR_COLL_NUM) { + my_error(ER_WRONG_KEY_COLUMN, MYF(0), + field->field_name); + return(true); + } + } else { + charset_no = 0; + } + + if (field->type() == MYSQL_TYPE_VARCHAR) { + uint32 length_bytes + = static_cast( + field)->length_bytes; + + col_len -= length_bytes; + + if (length_bytes == 2) { + field_type |= DATA_LONG_TRUE_VARCHAR; + } + } + + + ctx->add_vcol[j].m_col.prtype = dtype_form_prtype( + field_type, charset_no); + + ctx->add_vcol[j].m_col.prtype |= DATA_VIRTUAL; + + ctx->add_vcol[j].m_col.mtype = col_type; + + ctx->add_vcol[j].m_col.len = col_len; + + ctx->add_vcol[j].m_col.ind = i - 1; + /* ctx->add_vcol[j].num_base = + field->gcol_info->non_virtual_base_columns(); + */ + ctx->add_vcol_name[j] = field->field_name; + ctx->add_vcol[j].base_col = static_cast( + mem_heap_alloc(ctx->heap, ctx->add_vcol[j].num_base + * sizeof *(ctx->add_vcol[j].base_col))); + ctx->add_vcol[j].v_pos = ctx->old_table->n_v_cols + - ctx->num_to_drop_vcol + j; + + /* No need to track the list */ + ctx->add_vcol[j].v_indexes = NULL; + innodb_base_col_setup(ctx->old_table, field, &ctx->add_vcol[j]); + j++; + } + + return(false); +} + +/** Collect virtual column info for its addition +@param[in] ha_alter_info Data used during in-place alter +@param[in] altered_table MySQL table that is being altered to +@param[in] table MySQL table as it is before the ALTER operation +@retval true Failure +@retval false Success */ +static +bool +prepare_inplace_drop_virtual( + Alter_inplace_info* ha_alter_info, + const TABLE* altered_table, + const TABLE* table) +{ + ha_innobase_inplace_ctx* ctx; + ulint i = 0; + ulint j = 0; + Alter_drop *drop; + + ctx = static_cast + (ha_alter_info->handler_ctx); + + ctx->num_to_drop_vcol = ha_alter_info->alter_info->drop_list.elements; + + ctx->drop_vcol = static_cast( + mem_heap_alloc(ctx->heap, ctx->num_to_drop_vcol + * sizeof *ctx->drop_vcol)); + ctx->drop_vcol_name = static_cast( + mem_heap_alloc(ctx->heap, ctx->num_to_drop_vcol + * sizeof *ctx->drop_vcol_name)); + + List_iterator_fast cf_it( + ha_alter_info->alter_info->drop_list); + + while ((drop = (cf_it++)) != NULL) { + const Field* field; + ulint old_i; + + ut_ad(drop->type == Alter_drop::COLUMN); + + for (old_i = 0; table->field[old_i]; old_i++) { + const Field* n_field = table->field[old_i]; + if (!my_strcasecmp(system_charset_info, + n_field->field_name, drop->name)) { + break; + } + } + + i++; + + if (!table->field[old_i]) { + continue; + } + + ulint col_len; + ulint is_unsigned; + ulint field_type; + ulint charset_no; + + field = table->field[old_i]; + + ulint col_type + = get_innobase_type_from_mysql_type( + &is_unsigned, field); + + /* JAN: TODO: MySQL 5.7 + if (!field->gcol_info || field->stored_in_db) { + */ + if (field->stored_in_db()) { + my_error(ER_WRONG_KEY_COLUMN, MYF(0), + field->field_name); + return(true); + } + + col_len = field->pack_length(); + field_type = (ulint) field->type(); + + if (!field->real_maybe_null()) { + field_type |= DATA_NOT_NULL; + } + + if (field->binary()) { + field_type |= DATA_BINARY_TYPE; + } + + if (is_unsigned) { + field_type |= DATA_UNSIGNED; + } + + if (dtype_is_string_type(col_type)) { + charset_no = (ulint) field->charset()->number; + + DBUG_EXECUTE_IF( + "ib_alter_add_virtual_fail", + charset_no += MAX_CHAR_COLL_NUM;); + + if (charset_no > MAX_CHAR_COLL_NUM) { + my_error(ER_WRONG_KEY_COLUMN, MYF(0), + field->field_name); + return(true); + } + } else { + charset_no = 0; + } + + if (field->type() == MYSQL_TYPE_VARCHAR) { + uint32 length_bytes + = static_cast( + field)->length_bytes; + + col_len -= length_bytes; + + if (length_bytes == 2) { + field_type |= DATA_LONG_TRUE_VARCHAR; + } + } + + + ctx->drop_vcol[j].m_col.prtype = dtype_form_prtype( + field_type, charset_no); + + ctx->drop_vcol[j].m_col.prtype |= DATA_VIRTUAL; + + ctx->drop_vcol[j].m_col.mtype = col_type; + + ctx->drop_vcol[j].m_col.len = col_len; + + ctx->drop_vcol[j].m_col.ind = old_i; + + ctx->drop_vcol_name[j] = field->field_name; + + dict_v_col_t* v_col = dict_table_get_nth_v_col_mysql( + ctx->old_table, old_i); + ctx->drop_vcol[j].v_pos = v_col->v_pos; + j++; + } + + return(false); +} + +/** Insert a new record to INNODB SYS_VIRTUAL +@param[in] table InnoDB table +@param[in] pos virtual column column no +@param[in] base_pos base column pos +@param[in] trx transaction +@return DB_SUCCESS if successful, otherwise error code */ +static +dberr_t +innobase_insert_sys_virtual( + const dict_table_t* table, + ulint pos, + ulint base_pos, + trx_t* trx) +{ + pars_info_t* info = pars_info_create(); + + pars_info_add_ull_literal(info, "id", table->id); + + pars_info_add_int4_literal(info, "pos", pos); + + pars_info_add_int4_literal(info, "base_pos", base_pos); + + dberr_t error = que_eval_sql( + info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "INSERT INTO SYS_VIRTUAL VALUES" + "(:id, :pos, :base_pos);\n" + "END;\n", + FALSE, trx); + + return(error); +} + +/** Update INNODB SYS_COLUMNS on new virtual columns +@param[in] table InnoDB table +@param[in] col_name column name +@param[in] vcol virtual column +@param[in] trx transaction +@return DB_SUCCESS if successful, otherwise error code */ +static +dberr_t +innobase_add_one_virtual( + const dict_table_t* table, + const char* col_name, + dict_v_col_t* vcol, + trx_t* trx) +{ + ulint pos = dict_create_v_col_pos(vcol->v_pos, + vcol->m_col.ind); + ulint mtype = vcol->m_col.mtype; + ulint prtype = vcol->m_col.prtype; + ulint len = vcol->m_col.len; + pars_info_t* info = pars_info_create(); + + pars_info_add_ull_literal(info, "id", table->id); + + pars_info_add_int4_literal(info, "pos", pos); + + pars_info_add_str_literal(info, "name", col_name); + pars_info_add_int4_literal(info, "mtype", mtype); + pars_info_add_int4_literal(info, "prtype", prtype); + pars_info_add_int4_literal(info, "len", len); + pars_info_add_int4_literal(info, "prec", vcol->num_base); + + dberr_t error = que_eval_sql( + info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "INSERT INTO SYS_COLUMNS VALUES" + "(:id, :pos, :name, :mtype, :prtype, :len, :prec);\n" + "END;\n", + FALSE, trx); + + if (error != DB_SUCCESS) { + return(error); + } + + for (ulint i = 0; i < vcol->num_base; i++) { + error = innobase_insert_sys_virtual( + table, pos, vcol->base_col[i]->ind, trx); + if (error != DB_SUCCESS) { + return(error); + } + } + + return(error); +} + +/** Update INNODB SYS_TABLES on number of virtual columns +@param[in] user_table InnoDB table +@param[in] n_col number of columns +@param[in] trx transaction +@return DB_SUCCESS if successful, otherwise error code */ +static +dberr_t +innobase_update_n_virtual( + const dict_table_t* table, + ulint n_col, + trx_t* trx) +{ + dberr_t err = DB_SUCCESS; + pars_info_t* info = pars_info_create(); + + pars_info_add_int4_literal(info, "num_col", n_col); + pars_info_add_ull_literal(info, "id", table->id); + + err = que_eval_sql( + info, + "PROCEDURE RENUMBER_TABLE_ID_PROC () IS\n" + "BEGIN\n" + "UPDATE SYS_TABLES" + " SET N_COLS = :num_col\n" + " WHERE ID = :id;\n" + "END;\n", FALSE, trx); + + return(err); +} + +/** Update system table for adding virtual column(s) +@param[in] ha_alter_info Data used during in-place alter +@param[in] altered_table MySQL table that is being altered +@param[in] table MySQL table as it is before the ALTER operation +@param[in] user_table InnoDB table +@param[in] trx transaction +@retval true Failure +@retval false Success */ +static +bool +innobase_add_virtual_try( + Alter_inplace_info* ha_alter_info, + const TABLE* altered_table, + const TABLE* table, + const dict_table_t* user_table, + trx_t* trx) +{ + ha_innobase_inplace_ctx* ctx; + dberr_t err = DB_SUCCESS; + + ctx = static_cast( + ha_alter_info->handler_ctx); + + for (ulint i = 0; i < ctx->num_to_add_vcol; i++) { + + err = innobase_add_one_virtual( + user_table, ctx->add_vcol_name[i], + &ctx->add_vcol[i], trx); + + if (err != DB_SUCCESS) { + my_error(ER_INTERNAL_ERROR, MYF(0), + "InnoDB: ADD COLUMN...VIRTUAL"); + return(true); + } + } + + + ulint n_col = user_table->n_cols; + ulint n_v_col = user_table->n_v_cols; + + n_v_col += ctx->num_to_add_vcol; + + n_col -= dict_table_get_n_sys_cols(user_table); + + n_v_col -= ctx->num_to_drop_vcol; + + ulint new_n = dict_table_encode_n_col(n_col, n_v_col) + + ((user_table->flags & DICT_TF_COMPACT) << 31); + + err = innobase_update_n_virtual(user_table, new_n, trx); + + if (err != DB_SUCCESS) { + my_error(ER_INTERNAL_ERROR, MYF(0), + "InnoDB: ADD COLUMN...VIRTUAL"); + return(true); + } + + return(false); +} + +/** Update INNODB SYS_COLUMNS on new virtual column's position +@param[in] table InnoDB table +@param[in] old_pos old position +@param[in] new_pos new position +@param[in] trx transaction +@return DB_SUCCESS if successful, otherwise error code */ +static +dberr_t +innobase_update_v_pos_sys_columns( + const dict_table_t* table, + ulint old_pos, + ulint new_pos, + trx_t* trx) +{ + pars_info_t* info = pars_info_create(); + + pars_info_add_int4_literal(info, "pos", old_pos); + pars_info_add_int4_literal(info, "val", new_pos); + pars_info_add_ull_literal(info, "id", table->id); + + dberr_t error = que_eval_sql( + info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "UPDATE SYS_COLUMNS\n" + "SET POS = :val\n" + "WHERE POS = :pos\n" + "AND TABLE_ID = :id;\n" + "END;\n", + FALSE, trx); + + return(error); +} + +/** Update INNODB SYS_VIRTUAL table with new virtual column position +@param[in] table InnoDB table +@param[in] old_pos old position +@param[in] new_pos new position +@param[in] trx transaction +@return DB_SUCCESS if successful, otherwise error code */ +static +dberr_t +innobase_update_v_pos_sys_virtual( + const dict_table_t* table, + ulint old_pos, + ulint new_pos, + trx_t* trx) +{ + pars_info_t* info = pars_info_create(); + + pars_info_add_int4_literal(info, "pos", old_pos); + pars_info_add_int4_literal(info, "val", new_pos); + pars_info_add_ull_literal(info, "id", table->id); + + dberr_t error = que_eval_sql( + info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "UPDATE SYS_VIRTUAL\n" + "SET POS = :val\n" + "WHERE POS = :pos\n" + "AND TABLE_ID = :id;\n" + "END;\n", + FALSE, trx); + + return(error); +} + +/** Update InnoDB system tables on dropping a virtual column +@param[in] table InnoDB table +@param[in] col_name column name of the dropping column +@param[in] drop_col col information for the dropping column +@param[in] n_prev_dropped number of previously dropped columns in the + same alter clause +@param[in] trx transaction +@return DB_SUCCESS if successful, otherwise error code */ +static +dberr_t +innobase_drop_one_virtual_sys_columns( + const dict_table_t* table, + const char* col_name, + dict_col_t* drop_col, + ulint n_prev_dropped, + trx_t* trx) +{ + pars_info_t* info = pars_info_create(); + pars_info_add_ull_literal(info, "id", table->id); + + pars_info_add_str_literal(info, "name", col_name); + + dberr_t error = que_eval_sql( + info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "DELETE FROM SYS_COLUMNS\n" + "WHERE TABLE_ID = :id\n" + "AND NAME = :name;\n" + "END;\n", + FALSE, trx); + + if (error != DB_SUCCESS) { + return(error); + } + + dict_v_col_t* v_col = dict_table_get_nth_v_col_mysql( + table, drop_col->ind); + + /* Adjust column positions for all subsequent columns */ + for (ulint i = v_col->v_pos + 1; i < table->n_v_cols; i++) { + dict_v_col_t* t_col = dict_table_get_nth_v_col(table, i); + ulint old_p = dict_create_v_col_pos( + t_col->v_pos - n_prev_dropped, + t_col->m_col.ind - n_prev_dropped); + ulint new_p = dict_create_v_col_pos( + t_col->v_pos - 1 - n_prev_dropped, + t_col->m_col.ind - 1 - n_prev_dropped); + + error = innobase_update_v_pos_sys_columns( + table, old_p, new_p, trx); + if (error != DB_SUCCESS) { + return(error); + } + error = innobase_update_v_pos_sys_virtual( + table, old_p, new_p, trx); + if (error != DB_SUCCESS) { + return(error); + } + } + + return(error); +} + +/** Delete virtual column's info from INNODB SYS_VIRTUAL +@param[in] table InnoDB table +@param[in] pos position of the virtual column to be deleted +@param[in] trx transaction +@return DB_SUCCESS if successful, otherwise error code */ +static +dberr_t +innobase_drop_one_virtual_sys_virtual( + const dict_table_t* table, + ulint pos, + trx_t* trx) +{ + pars_info_t* info = pars_info_create(); + pars_info_add_ull_literal(info, "id", table->id); + + pars_info_add_int4_literal(info, "pos", pos); + + dberr_t error = que_eval_sql( + info, + "PROCEDURE P () IS\n" + "BEGIN\n" + "DELETE FROM SYS_VIRTUAL\n" + "WHERE TABLE_ID = :id\n" + "AND POS = :pos;\n" + "END;\n", + FALSE, trx); + + return(error); +} + +/** Update system table for dropping virtual column(s) +@param[in] ha_alter_info Data used during in-place alter +@param[in] altered_table MySQL table that is being altered +@param[in] table MySQL table as it is before the ALTER operation +@param[in] user_table InnoDB table +@param[in] trx transaction +@retval true Failure +@retval false Success */ +static +bool +innobase_drop_virtual_try( + Alter_inplace_info* ha_alter_info, + const TABLE* altered_table, + const TABLE* table, + const dict_table_t* user_table, + trx_t* trx) +{ + ha_innobase_inplace_ctx* ctx; + dberr_t err = DB_SUCCESS; + + ctx = static_cast + (ha_alter_info->handler_ctx); + + for (ulint i = 0; i < ctx->num_to_drop_vcol; i++) { + + ulint pos = dict_create_v_col_pos( + ctx->drop_vcol[i].v_pos - i, + ctx->drop_vcol[i].m_col.ind - i); + err = innobase_drop_one_virtual_sys_virtual( + user_table, pos, trx); + + if (err != DB_SUCCESS) { + my_error(ER_INTERNAL_ERROR, MYF(0), + "InnoDB: DROP COLUMN...VIRTUAL"); + return(true); + } + + err = innobase_drop_one_virtual_sys_columns( + user_table, ctx->drop_vcol_name[i], + &(ctx->drop_vcol[i].m_col), i, trx); + + if (err != DB_SUCCESS) { + my_error(ER_INTERNAL_ERROR, MYF(0), + "InnoDB: DROP COLUMN...VIRTUAL"); + return(true); + } + } + + + ulint n_col = user_table->n_cols; + ulint n_v_col = user_table->n_v_cols; + + n_v_col -= ctx->num_to_drop_vcol; + + n_col -= dict_table_get_n_sys_cols(user_table); + + ulint new_n = dict_table_encode_n_col(n_col, n_v_col) + + ((user_table->flags & DICT_TF_COMPACT) << 31); + + err = innobase_update_n_virtual(user_table, new_n, trx); + + if (err != DB_SUCCESS) { + my_error(ER_INTERNAL_ERROR, MYF(0), + "InnoDB: DROP COLUMN...VIRTUAL"); + } + + return(false); +} + +/** Update internal structures with concurrent writes blocked, +while preparing ALTER TABLE. + +@param ha_alter_info Data used during in-place alter +@param altered_table MySQL table that is being altered +@param old_table MySQL table as it is before the ALTER operation +@param table_name Table name in MySQL +@param flags Table and tablespace flags +@param flags2 Additional table flags +@param fts_doc_id_col The column number of FTS_DOC_ID +@param add_fts_doc_id Flag: add column FTS_DOC_ID? +@param add_fts_doc_id_idx Flag: add index FTS_DOC_ID_INDEX (FTS_DOC_ID)? + +@retval true Failure +@retval false Success +*/ +static MY_ATTRIBUTE((warn_unused_result, nonnull(1,2,3,4))) +bool +prepare_inplace_alter_table_dict( +/*=============================*/ + Alter_inplace_info* ha_alter_info, + const TABLE* altered_table, + const TABLE* old_table, + const char* table_name, + ulint flags, + ulint flags2, + ulint fts_doc_id_col, + bool add_fts_doc_id, + bool add_fts_doc_id_idx) +{ + bool dict_locked = false; + ulint* add_key_nums; /* MySQL key numbers */ + index_def_t* index_defs; /* index definitions */ + dict_table_t* user_table; + dict_index_t* fts_index = NULL; + ulint new_clustered = 0; + dberr_t error; + ulint num_fts_index; + uint sql_idx; + dict_add_v_col_t* add_v = NULL; + ha_innobase_inplace_ctx*ctx; + + DBUG_ENTER("prepare_inplace_alter_table_dict"); + + ctx = static_cast + (ha_alter_info->handler_ctx); + + DBUG_ASSERT((ctx->add_autoinc != ULINT_UNDEFINED) + == (ctx->sequence.m_max_value > 0)); + DBUG_ASSERT(!ctx->num_to_drop_index == !ctx->drop_index); + DBUG_ASSERT(!ctx->num_to_drop_fk == !ctx->drop_fk); + DBUG_ASSERT(!add_fts_doc_id || add_fts_doc_id_idx); + DBUG_ASSERT(!add_fts_doc_id_idx + || innobase_fulltext_exist(altered_table)); + DBUG_ASSERT(!ctx->add_cols); + DBUG_ASSERT(!ctx->add_index); + DBUG_ASSERT(!ctx->add_key_numbers); + DBUG_ASSERT(!ctx->num_to_add_index); + + user_table = ctx->new_table; + + trx_start_if_not_started_xa(ctx->prebuilt->trx, true); + + /* JAN: TODO: MySQL 5.7 Virtual columns + if (ha_alter_info->handler_flags + & Alter_inplace_info::DROP_VIRTUAL_COLUMN) { + if (prepare_inplace_drop_virtual( + ha_alter_info, altered_table, old_table)) { + DBUG_RETURN(true); + } + } + + if (ha_alter_info->handler_flags + & Alter_inplace_info::ADD_VIRTUAL_COLUMN) { + if (prepare_inplace_add_virtual( + ha_alter_info, altered_table, old_table)) { + DBUG_RETURN(true); + } + */ + /* Need information for newly added virtual columns + for create index */ + /* + if (ha_alter_info->handler_flags + & Alter_inplace_info::ADD_INDEX) { + add_v = static_cast( + mem_heap_alloc(ctx->heap, sizeof *add_v)); + add_v->n_v_col = ctx->num_to_add_vcol; + add_v->v_col = ctx->add_vcol; + add_v->v_col_name = ctx->add_vcol_name; + } + } + */ + /* + + JAN: TODO: MySQL 5.7 Virtual columns + + There should be no order change for virtual columns coming in + here + ut_ad(check_v_col_in_order(old_table, altered_table, ha_alter_info)); + */ + + /* Create a background transaction for the operations on the data dictionary tables. */ ctx->trx = innobase_trx_allocate(ctx->prebuilt->trx->mysql_thd); @@ -2787,8 +4339,9 @@ prepare_inplace_alter_table_dict( ctx->num_to_add_index = ha_alter_info->index_add_count; ut_ad(ctx->prebuilt->trx->mysql_thd != NULL); - const char* path = thd_innodb_tmpdir( + /* const char* path = thd_innodb_tmpdir( ctx->prebuilt->trx->mysql_thd); + */ index_defs = innobase_create_key_defs( ctx->heap, ha_alter_info, altered_table, ctx->num_to_add_index, @@ -2816,7 +4369,7 @@ prepare_inplace_alter_table_dict( check_if_supported_inplace_alter(). */ ut_ad(0); my_error(ER_NOT_SUPPORTED_YET, MYF(0), - thd_query_string(ctx->prebuilt->trx->mysql_thd)->str); + thd_query(ctx->prebuilt->trx->mysql_thd)); goto error_handled; } @@ -2883,12 +4436,17 @@ prepare_inplace_alter_table_dict( const char* new_table_name = dict_mem_create_temporary_tablename( ctx->heap, - ctx->new_table->name, + ctx->new_table->name.m_name, ctx->new_table->id); - ulint n_cols; + ulint n_cols = 0; + ulint n_v_cols = 0; + ulint n_mv_cols = 0; dtuple_t* add_cols; + ulint space_id = 0; + ulint z = 0; ulint key_id = FIL_DEFAULT_ENCRYPTION_KEY; fil_encryption_t mode = FIL_SPACE_ENCRYPTION_DEFAULT; + const char* compression=NULL; crypt_data = fil_space_get_crypt_data(ctx->prebuilt->table->space); @@ -2903,7 +4461,21 @@ prepare_inplace_alter_table_dict( goto new_clustered_failed; } - n_cols = altered_table->s->stored_fields; + for (uint i = 0; i < altered_table->s->fields; i++) { + const Field* field = altered_table->field[i]; + + if (innobase_is_v_fld(field)) { + n_v_cols++; + } else { + if (field->stored_in_db()) { + n_cols++; + } else { + n_mv_cols++; + } + } + } + + ut_ad(n_cols + n_v_cols + n_mv_cols == altered_table->s->fields); if (add_fts_doc_id) { n_cols++; @@ -2925,9 +4497,25 @@ prepare_inplace_alter_table_dict( goto new_clustered_failed; } - /* The initial space id 0 may be overridden later. */ + /* Use the old tablespace unless the tablespace + is changing. */ + if (DICT_TF_HAS_SHARED_SPACE(user_table->flags) + && (0 == strcmp(ha_alter_info->create_info->tablespace, + user_table->tablespace))) { + space_id = user_table->space; + } else if (tablespace_is_shared_space( + ha_alter_info->create_info)) { + space_id = fil_space_get_id_by_name( + ha_alter_info->create_info->tablespace); + ut_a(space_id != ULINT_UNDEFINED); + } + + /* The initial space id 0 may be overridden later if this + table is going to be a file_per_table tablespace. */ ctx->new_table = dict_mem_table_create( - new_table_name, 0, n_cols, flags, flags2); + new_table_name, space_id, n_cols + n_v_cols, n_v_cols, + flags, flags2); + /* The rebuilt indexed_table will use the renamed column names. */ ctx->col_names = NULL; @@ -2938,20 +4526,21 @@ prepare_inplace_alter_table_dict( user_table->data_dir_path); } - sql_idx= 0; - for (uint i = 0; i < altered_table->s->stored_fields; i++, sql_idx++) { - const Field* field; + for (uint i = 0, sql_idx=0; i < altered_table->s->stored_fields; i++, sql_idx++) { + Field* field; + ulint is_unsigned; + ulint charset_no; + ulint col_len; + while (!((field= altered_table->field[sql_idx])-> - stored_in_db())) + stored_in_db())) sql_idx++; - ulint is_unsigned; - ulint field_type - = (ulint) field->type(); + + ulint field_type = (ulint) field->type(); + bool is_virtual = innobase_is_v_fld(field); ulint col_type = get_innobase_type_from_mysql_type( &is_unsigned, field); - ulint charset_no; - ulint col_len; /* we assume in dtype_form_prtype() that this fits in two bytes */ @@ -3001,6 +4590,13 @@ prepare_inplace_alter_table_dict( if (length_bytes == 2) { field_type |= DATA_LONG_TRUE_VARCHAR; } + + } + + if (col_type == DATA_POINT) { + /* DATA_POINT should be of fixed length, + instead of the pack_length(blob length). */ + col_len = DATA_POINT_LEN; } if (dict_col_name_is_reserved(field->field_name)) { @@ -3010,27 +4606,83 @@ prepare_inplace_alter_table_dict( goto new_clustered_failed; } - dict_mem_table_add_col( - ctx->new_table, ctx->heap, - field->field_name, - col_type, - dtype_form_prtype(field_type, charset_no), - col_len); + if (is_virtual) { + /* JAN: TODO: MySQL 5.7 virtual columns + dict_mem_table_add_v_col( + ctx->new_table, ctx->heap, + field->field_name, + col_type, + dtype_form_prtype( + field_type, charset_no) + | DATA_VIRTUAL, + col_len, i, + field->gcol_info->non_virtual_base_columns()); + */ + } else { + dict_mem_table_add_col( + ctx->new_table, ctx->heap, + field->field_name, + col_type, + dtype_form_prtype( + field_type, charset_no), + col_len); + } + } + + if (n_v_cols) { + for (uint i = 0; i < altered_table->s->fields; i++) { + dict_v_col_t* v_col; + const Field* field = altered_table->field[i]; + + if (!innobase_is_v_fld(field)) { + continue; + } + v_col = dict_table_get_nth_v_col( + ctx->new_table, z); + z++; + innodb_base_col_setup( + ctx->new_table, field, v_col); + } } if (add_fts_doc_id) { fts_add_doc_id_column(ctx->new_table, ctx->heap); ctx->new_table->fts->doc_col = fts_doc_id_col; - ut_ad(fts_doc_id_col == altered_table->s->stored_fields); + // JAN: TODO: MySQL 5.7 Virtual columns + // ut_ad(fts_doc_id_col + // == altered_table->s->fields - n_v_cols); + // ut_ad(fts_doc_id_col == altered_table->s->stored_fields); + } else if (ctx->new_table->fts) { ctx->new_table->fts->doc_col = fts_doc_id_col; } + /* JAN: TODO: MySQL 5.7 Compression + compression = ha_alter_info->create_info->compress.str; + + if (Compression::validate(compression) != DB_SUCCESS) { + + compression = NULL; + } + */ + error = row_create_table_for_mysql( - ctx->new_table, ctx->trx, false, mode, key_id); + ctx->new_table, compression, ctx->trx, false, mode, key_id); switch (error) { dict_table_t* temp_table; + case DB_IO_NO_PUNCH_HOLE_FS: + + push_warning_printf( + ctx->prebuilt->trx->mysql_thd, + Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + "XPunch hole not supported by the file system. " + "Compression disabled for '%s'", + ctx->new_table->name.m_name); + + error = DB_SUCCESS; + case DB_SUCCESS: /* We need to bump up the table ref count and before we can use it we need to open the @@ -3039,13 +4691,13 @@ prepare_inplace_alter_table_dict( the dict_sys->mutex. */ ut_ad(mutex_own(&dict_sys->mutex)); temp_table = dict_table_open_on_name( - ctx->new_table->name, TRUE, FALSE, + ctx->new_table->name.m_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE); ut_a(ctx->new_table == temp_table); /* n_ref_count must be 1, because purge cannot be executing on this very table as we are holding dict_operation_lock X-latch. */ - DBUG_ASSERT(ctx->new_table->n_ref_count == 1); + DBUG_ASSERT(ctx->new_table->get_ref_count() == 1); break; case DB_TABLESPACE_EXISTS: my_error(ER_TABLESPACE_EXISTS, MYF(0), @@ -3055,13 +4707,17 @@ prepare_inplace_alter_table_dict( my_error(HA_ERR_TABLE_EXIST, MYF(0), altered_table->s->table_name.str); goto new_clustered_failed; + case DB_UNSUPPORTED: + my_error(ER_UNSUPPORTED_EXTENSION, MYF(0), + ctx->new_table->name.m_name); + goto new_clustered_failed; default: my_error_innodb(error, table_name, flags); - new_clustered_failed: +new_clustered_failed: DBUG_ASSERT(ctx->trx != ctx->prebuilt->trx); trx_rollback_to_savepoint(ctx->trx, NULL); - ut_ad(user_table->n_ref_count == 1); + ut_ad(user_table->get_ref_count() == 1); online_retry_drop_indexes_with_trx( user_table, ctx->trx); @@ -3070,9 +4726,10 @@ prepare_inplace_alter_table_dict( if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN) { - add_cols = dtuple_create( + add_cols = dtuple_create_with_vcol( ctx->heap, - dict_table_get_n_cols(ctx->new_table)); + dict_table_get_n_cols(ctx->new_table), + dict_table_get_n_v_cols(ctx->new_table)); dict_table_copy_types(add_cols, ctx->new_table); } else { @@ -3087,12 +4744,35 @@ prepare_inplace_alter_table_dict( } else { DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info, old_table)); + for (dict_index_t* index + = dict_table_get_first_index(user_table); + index != NULL; + index = dict_table_get_next_index(index)) { + if (!index->to_be_dropped + && dict_index_is_corrupted(index)) { + my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0)); + goto error_handled; + } + } + if (!ctx->new_table->fts && innobase_fulltext_exist(altered_table)) { ctx->new_table->fts = fts_create( ctx->new_table); ctx->new_table->fts->doc_col = fts_doc_id_col; } + + /* Check if we need to update mtypes of legacy GIS columns. + This check is only needed when we don't have to rebuild + the table, since rebuild would update all mtypes for GIS + columns */ + error = innobase_check_gis_columns( + ha_alter_info, ctx->new_table, ctx->trx); + if (error != DB_SUCCESS) { + ut_ad(error == DB_ERROR); + error = DB_UNSUPPORTED; + goto error_handling; + } } /* Assign table_id, so that no table id of @@ -3106,7 +4786,7 @@ prepare_inplace_alter_table_dict( ctx->add_index[a] = row_merge_create_index( ctx->trx, ctx->new_table, - &index_defs[a], ctx->col_names); + &index_defs[a], add_v, ctx->col_names); add_key_nums[a] = index_defs[a].key_number; @@ -3116,6 +4796,9 @@ prepare_inplace_alter_table_dict( goto error_handling; } + DBUG_ASSERT(ctx->add_index[a]->is_committed() + == !!new_clustered); + if (ctx->add_index[a]->type & DICT_FTS) { DBUG_ASSERT(num_fts_index); DBUG_ASSERT(!fts_index); @@ -3146,7 +4829,7 @@ prepare_inplace_alter_table_dict( bool ok = row_log_allocate(ctx->add_index[a], NULL, true, NULL, - NULL, path); + NULL); rw_lock_x_unlock(&ctx->add_index[a]->lock); if (!ok) { @@ -3162,22 +4845,31 @@ prepare_inplace_alter_table_dict( error = DB_OUT_OF_MEMORY; goto error_handling;); - if (new_clustered && ctx->online) { - /* Allocate a log for online table rebuild. */ - dict_index_t* clust_index = dict_table_get_first_index( + if (new_clustered) { + dict_index_t* clust_index = dict_table_get_first_index( user_table); + dict_index_t* new_clust_index = dict_table_get_first_index( + ctx->new_table); + ctx->skip_pk_sort = innobase_pk_order_preserved( + ctx->col_map, clust_index, new_clust_index); - rw_lock_x_lock(&clust_index->lock); - bool ok = row_log_allocate( - clust_index, ctx->new_table, - !(ha_alter_info->handler_flags - & Alter_inplace_info::ADD_PK_INDEX), - ctx->add_cols, ctx->col_map, path); - rw_lock_x_unlock(&clust_index->lock); + DBUG_EXECUTE_IF("innodb_alter_table_pk_assert_no_sort", + DBUG_ASSERT(ctx->skip_pk_sort);); - if (!ok) { - error = DB_OUT_OF_MEMORY; - goto error_handling; + if (ctx->online) { + /* Allocate a log for online table rebuild. */ + rw_lock_x_lock(&clust_index->lock); + bool ok = row_log_allocate( + clust_index, ctx->new_table, + !(ha_alter_info->handler_flags + & Alter_inplace_info::ADD_PK_INDEX), + ctx->add_cols, ctx->col_map); + rw_lock_x_unlock(&clust_index->lock); + + if (!ok) { + error = DB_OUT_OF_MEMORY; + goto error_handling; + } } } @@ -3205,11 +4897,17 @@ op_ok: #endif /* UNIV_DEBUG */ ut_ad(ctx->trx->dict_operation_lock_mode == RW_X_LATCH); ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); DICT_TF2_FLAG_SET(ctx->new_table, DICT_TF2_FTS); + if (new_clustered) { + /* For !new_clustered, this will be set at + commit_cache_norebuild(). */ + ctx->new_table->fts_doc_id_index + = dict_table_get_index_on_name( + ctx->new_table, FTS_DOC_ID_INDEX_NAME); + DBUG_ASSERT(ctx->new_table->fts_doc_id_index != NULL); + } /* This function will commit the transaction and reset the trx_t::dict_operation flag on success. */ @@ -3230,7 +4928,7 @@ op_ok: || ib_vector_size(ctx->new_table->fts->indexes) == 0) { error = fts_create_common_tables( ctx->trx, ctx->new_table, - user_table->name, TRUE); + user_table->name.m_name, TRUE); DBUG_EXECUTE_IF( "innodb_test_fail_after_fts_common_table", @@ -3292,6 +4990,9 @@ error_handling: case DB_DUPLICATE_KEY: my_error(ER_DUP_KEY, MYF(0), "SYS_INDEXES"); break; + case DB_UNSUPPORTED: + my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0), "SYS_COLUMNS"); + break; default: my_error_innodb(error, table_name, user_table->flags); } @@ -3314,17 +5015,7 @@ error_handled: ctx->new_table, ctx->trx); } - dict_table_close(ctx->new_table, TRUE, FALSE); - -#if defined UNIV_DEBUG || defined UNIV_DDL_DEBUG - /* Nobody should have initialized the stats of the - newly created table yet. When this is the case, we - know that it has not been added for background stats - gathering. */ - ut_a(!ctx->new_table->stat_initialized); -#endif /* UNIV_DEBUG || UNIV_DDL_DEBUG */ - - row_merge_drop_table(ctx->trx, ctx->new_table); + dict_table_close_and_drop(ctx->trx, ctx->new_table); /* Free the log for online table rebuild, if one was allocated. */ @@ -3348,7 +5039,7 @@ error_handled: /* n_ref_count must be 1, because purge cannot be executing on this very table as we are holding dict_operation_lock X-latch. */ - DBUG_ASSERT(user_table->n_ref_count == 1 || ctx->online); + DBUG_ASSERT(user_table->get_ref_count() == 1 || ctx->online); online_retry_drop_indexes_with_trx(user_table, ctx->trx); } else { @@ -3361,12 +5052,14 @@ error_handled: ut_ad(!user_table->drop_aborted); err_exit: +#ifdef UNIV_DEBUG /* Clear the to_be_dropped flag in the data dictionary cache. */ for (ulint i = 0; i < ctx->num_to_drop_index; i++) { - DBUG_ASSERT(*ctx->drop_index[i]->name != TEMP_INDEX_PREFIX); + DBUG_ASSERT(ctx->drop_index[i]->is_committed()); DBUG_ASSERT(ctx->drop_index[i]->to_be_dropped); ctx->drop_index[i]->to_be_dropped = 0; } +#endif /* UNIV_DEBUG */ row_mysql_unlock_data_dictionary(ctx->trx); @@ -3415,62 +5108,252 @@ innobase_check_foreign_key_index( } ut_ad(indexed_table == foreign->referenced_table); - if (NULL == dict_foreign_find_index( - indexed_table, col_names, - foreign->referenced_col_names, - foreign->n_fields, index, - /*check_charsets=*/TRUE, - /*check_null=*/FALSE, - NULL, NULL, NULL) - && NULL == innobase_find_equiv_index( - foreign->referenced_col_names, - foreign->n_fields, - ha_alter_info->key_info_buffer, - ha_alter_info->index_add_buffer, - ha_alter_info->index_add_count)) { + if (NULL == dict_foreign_find_index( + indexed_table, col_names, + foreign->referenced_col_names, + foreign->n_fields, index, + /*check_charsets=*/TRUE, + /*check_null=*/FALSE, + NULL, NULL, NULL) + && NULL == innobase_find_equiv_index( + foreign->referenced_col_names, + foreign->n_fields, + ha_alter_info->key_info_buffer, + ha_alter_info->index_add_buffer, + ha_alter_info->index_add_count)) { + + /* Index cannot be dropped. */ + trx->error_info = index; + return(true); + } + } + + fks = &indexed_table->foreign_set; + + /* Check for all FK references in current table using the index. */ + for (dict_foreign_set::const_iterator it = fks->begin(); + it != fks->end(); ++it) { + + dict_foreign_t* foreign = *it; + if (foreign->foreign_index != index) { + continue; + } + + ut_ad(indexed_table == foreign->foreign_table); + + if (!innobase_dropping_foreign( + foreign, drop_fk, n_drop_fk) + && NULL == dict_foreign_find_index( + indexed_table, col_names, + foreign->foreign_col_names, + foreign->n_fields, index, + /*check_charsets=*/TRUE, + /*check_null=*/FALSE, + NULL, NULL, NULL) + && NULL == innobase_find_equiv_index( + foreign->foreign_col_names, + foreign->n_fields, + ha_alter_info->key_info_buffer, + ha_alter_info->index_add_buffer, + ha_alter_info->index_add_count)) { + + /* Index cannot be dropped. */ + trx->error_info = index; + return(true); + } + } + + return(false); +} + +/** +Rename a given index in the InnoDB data dictionary. + +@param index index to rename +@param new_name new name of the index +@param[in,out] trx dict transaction to use, not going to be committed here + +@retval true Failure +@retval false Success */ +static __attribute__((warn_unused_result)) +bool +rename_index_in_data_dictionary( +/*============================*/ + const dict_index_t* index, + const char* new_name, + trx_t* trx) +{ + DBUG_ENTER("rename_index_in_data_dictionary"); + + ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); + + pars_info_t* pinfo; + dberr_t err; + + pinfo = pars_info_create(); + + pars_info_add_ull_literal(pinfo, "table_id", index->table->id); + pars_info_add_ull_literal(pinfo, "index_id", index->id); + pars_info_add_str_literal(pinfo, "new_name", new_name); + + trx->op_info = "Renaming an index in SYS_INDEXES"; + + DBUG_EXECUTE_IF( + "ib_rename_index_fail1", + DBUG_SET("+d,innodb_report_deadlock"); + ); + + err = que_eval_sql( + pinfo, + "PROCEDURE RENAME_INDEX_IN_SYS_INDEXES () IS\n" + "BEGIN\n" + "UPDATE SYS_INDEXES SET\n" + "NAME = :new_name\n" + "WHERE\n" + "ID = :index_id AND\n" + "TABLE_ID = :table_id;\n" + "END;\n", + FALSE, trx); /* pinfo is freed by que_eval_sql() */ + + DBUG_EXECUTE_IF( + "ib_rename_index_fail1", + DBUG_SET("-d,innodb_report_deadlock"); + ); + + trx->op_info = ""; + + if (err != DB_SUCCESS) { + my_error_innodb(err, index->table->name.m_name, 0); + DBUG_RETURN(true); + } + + DBUG_RETURN(false); +} + +/** +Rename all indexes in data dictionary of a given table that are +specified in ha_alter_info. + +@param ctx alter context, used to fetch the list of indexes to +rename +@param ha_alter_info fetch the new names from here +@param[in,out] trx dict transaction to use, not going to be committed here + +@retval true Failure +@retval false Success */ +static __attribute__((warn_unused_result)) +bool +rename_indexes_in_data_dictionary( +/*==============================*/ + const ha_innobase_inplace_ctx* ctx, + const Alter_inplace_info* ha_alter_info, + trx_t* trx) +{ + DBUG_ENTER("rename_indexes_in_data_dictionary"); + + /* JAN: TODO: MySQL 5.7 + ut_ad(ctx->num_to_rename == ha_alter_info->index_rename_count); + + for (ulint i = 0; i < ctx->num_to_rename; i++) { + + KEY_PAIR* pair = &ha_alter_info->index_rename_buffer[i]; + dict_index_t* index; + + index = ctx->rename[i]; + + ut_ad(strcmp(index->name, pair->old_key->name) == 0); + + if (rename_index_in_data_dictionary(index, + pair->new_key->name, + trx)) { + */ + /* failed */ + DBUG_RETURN(true); + /* } + } + */ + + DBUG_RETURN(false); +} + +/** +Rename a given index in the InnoDB data dictionary cache. + +@param[in,out] index index to rename +@param new_name new index name +*/ +static +void +rename_index_in_cache( +/*==================*/ + dict_index_t* index, + const char* new_name) +{ + DBUG_ENTER("rename_index_in_cache"); + + ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); - /* Index cannot be dropped. */ - trx->error_info = index; - return(true); - } + size_t old_name_len = strlen(index->name); + size_t new_name_len = strlen(new_name); + + if (old_name_len >= new_name_len) { + /* reuse the old buffer for the name if it is large enough */ + memcpy(const_cast(index->name()), new_name, + new_name_len + 1); + } else { + /* Free the old chunk of memory if it is at the topmost + place in the heap, otherwise the old chunk will be freed + when the index is evicted from the cache. This code will + kick-in in a repeated ALTER sequences where the old name is + alternately longer/shorter than the new name: + 1. ALTER TABLE t RENAME INDEX a TO aa; + 2. ALTER TABLE t RENAME INDEX aa TO a; + 3. go to 1. */ + index->name = mem_heap_strdup_replace( + index->heap, + /* Presumed topmost element of the heap: */ + index->name, old_name_len + 1, + new_name); } - fks = &indexed_table->foreign_set; + DBUG_VOID_RETURN; +} - /* Check for all FK references in current table using the index. */ - for (dict_foreign_set::const_iterator it = fks->begin(); - it != fks->end(); ++it) { +/** +Rename all indexes in data dictionary cache of a given table that are +specified in ha_alter_info. - dict_foreign_t* foreign = *it; - if (foreign->foreign_index != index) { - continue; - } +@param ctx alter context, used to fetch the list of indexes to rename +@param ha_alter_info fetch the new names from here +*/ +static +void +rename_indexes_in_cache( +/*====================*/ + const ha_innobase_inplace_ctx* ctx, + const Alter_inplace_info* ha_alter_info) +{ + DBUG_ENTER("rename_indexes_in_cache"); - ut_ad(indexed_table == foreign->foreign_table); + /* JAN: TODO: MySQL 5.7 + ut_ad(ctx->num_to_rename == ha_alter_info->index_rename_count); - if (!innobase_dropping_foreign( - foreign, drop_fk, n_drop_fk) - && NULL == dict_foreign_find_index( - indexed_table, col_names, - foreign->foreign_col_names, - foreign->n_fields, index, - /*check_charsets=*/TRUE, - /*check_null=*/FALSE, - NULL, NULL, NULL) - && NULL == innobase_find_equiv_index( - foreign->foreign_col_names, - foreign->n_fields, - ha_alter_info->key_info_buffer, - ha_alter_info->index_add_buffer, - ha_alter_info->index_add_count)) { + for (ulint i = 0; i < ctx->num_to_rename; i++) { + KEY_PAIR* pair = &ha_alter_info->index_rename_buffer[i]; + dict_index_t* index; - /* Index cannot be dropped. */ - trx->error_info = index; - return(true); - } + index = ctx->rename[i]; + + ut_ad(strcmp(index->name, pair->old_key->name) == 0); + + rename_index_in_cache(index, pair->new_key->name); } + */ - return(false); + DBUG_VOID_RETURN; } /** Allows InnoDB to update internal structures with concurrent @@ -3478,14 +5361,14 @@ writes blocked (provided that check_if_supported_inplace_alter() did not return HA_ALTER_INPLACE_NO_LOCK). This will be invoked before inplace_alter_table(). -@param altered_table TABLE object for new version of table. -@param ha_alter_info Structure describing changes to be done +@param altered_table TABLE object for new version of table. +@param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. -@retval true Failure -@retval false Success +@retval true Failure +@retval false Success */ -UNIV_INTERN + bool ha_innobase::prepare_inplace_alter_table( /*=====================================*/ @@ -3494,16 +5377,16 @@ ha_innobase::prepare_inplace_alter_table( { dict_index_t** drop_index; /*!< Index to be dropped */ ulint n_drop_index; /*!< Number of indexes to drop */ + dict_index_t** rename_index; /*!< Indexes to be dropped */ + ulint n_rename_index; /*!< Number of indexes to rename */ dict_foreign_t**drop_fk; /*!< Foreign key constraints to drop */ ulint n_drop_fk; /*!< Number of foreign keys to drop */ dict_foreign_t**add_fk = NULL; /*!< Foreign key constraints to drop */ ulint n_add_fk; /*!< Number of foreign keys to drop */ dict_table_t* indexed_table; /*!< Table where indexes are created */ - mem_heap_t* heap; + mem_heap_t* heap; const char** col_names; int error; - ulint flags; - ulint flags2; ulint max_col_len; ulint add_autoinc_col_no = ULINT_UNDEFINED; ulonglong autoinc_col_max_value = 0; @@ -3525,7 +5408,7 @@ ha_innobase::prepare_inplace_alter_table( MONITOR_ATOMIC_INC(MONITOR_PENDING_ALTER_TABLE); #ifdef UNIV_DEBUG - for (dict_index_t* index = dict_table_get_first_index(prebuilt->table); + for (dict_index_t* index = dict_table_get_first_index(m_prebuilt->table); index; index = dict_table_get_next_index(index)) { ut_ad(!index->to_be_dropped); @@ -3534,31 +5417,94 @@ ha_innobase::prepare_inplace_alter_table( ut_d(mutex_enter(&dict_sys->mutex)); ut_d(dict_table_check_for_dup_indexes( - prebuilt->table, CHECK_ABORTED_OK)); + m_prebuilt->table, CHECK_ABORTED_OK)); ut_d(mutex_exit(&dict_sys->mutex)); if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) { /* Nothing to do */ - goto func_exit; + DBUG_ASSERT(m_prebuilt->trx->dict_operation_lock_mode == 0); + if (ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) { + + online_retry_drop_indexes( + m_prebuilt->table, m_user_thd); + + } + DBUG_RETURN(false); + } + + indexed_table = m_prebuilt->table; + + if (indexed_table->is_encrypted) { + String str; + const char* engine= table_type(); + push_warning_printf(m_user_thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_DECRYPTION_FAILED, + "Table %s is encrypted but encryption service or" + " used key_id is not available. " + " Can't continue reading table.", + indexed_table->name); + get_error_message(HA_ERR_DECRYPTION_FAILED, &str); + my_error(ER_GET_ERRMSG, MYF(0), HA_ERR_DECRYPTION_FAILED, str.c_ptr(), engine); + + DBUG_RETURN(true); + } + + if (indexed_table->corrupted + || dict_table_get_first_index(indexed_table) == NULL + || dict_index_is_corrupted( + dict_table_get_first_index(indexed_table))) { + /* The clustered index is corrupted. */ + my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0)); + DBUG_RETURN(true); } + /* ALTER TABLE will not implicitly move a table from a single-table + tablespace to the system tablespace when innodb_file_per_table=OFF. + But it will implicitly move a table from the system tablespace to a + single-table tablespace if innodb_file_per_table = ON. + Tables found in a general tablespace will stay there unless ALTER + TABLE contains another TABLESPACE=name. If that is found it will + explicitly move a table to the named tablespace. + So if you specify TABLESPACE=`innodb_system` a table can be moved + into the system tablespace from either a general or file-per-table + tablespace. But from then on, it is labeled as using a shared space + (the create options have tablespace=='innodb_system' and the + SHARED_SPACE flag is set in the table flags) so it can no longer be + implicitly moved to a file-per-table tablespace. */ + bool in_system_space = is_system_tablespace(indexed_table->space); + bool is_file_per_table = !in_system_space + && !DICT_TF_HAS_SHARED_SPACE(indexed_table->flags); +#ifdef UNIV_DEBUG + bool in_general_space = !in_system_space + && DICT_TF_HAS_SHARED_SPACE(indexed_table->flags); + + /* The table being altered can only be in a system tablespace, + or its own file-per-table tablespace, or a general tablespace. */ + ut_ad(1 == in_system_space + is_file_per_table + in_general_space); +#endif /* UNIV_DEBUG */ + + create_table_info_t info(m_user_thd, + altered_table, + ha_alter_info->create_info, + NULL, + NULL, + NULL, + NULL); + + info.set_tablespace_type(is_file_per_table); + if (ha_alter_info->handler_flags & Alter_inplace_info::CHANGE_CREATE_OPTION) { + const char* invalid_opt = info.create_options_are_invalid(); + /* Check engine specific table options */ - if (const char* invalid_tbopt = check_table_options( - user_thd, altered_table, - ha_alter_info->create_info, - prebuilt->table->space != 0, - srv_file_format)) { + if (const char* invalid_tbopt = info.check_table_options()) { my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), table_type(), invalid_tbopt); goto err_exit_no_heap; } - if (const char* invalid_opt = create_options_are_invalid( - user_thd, altered_table, - ha_alter_info->create_info, - prebuilt->table->space != 0)) { + if (invalid_opt) { my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), table_type(), invalid_opt); goto err_exit_no_heap; @@ -3567,18 +5513,20 @@ ha_innobase::prepare_inplace_alter_table( /* Check if any index name is reserved. */ if (innobase_index_name_is_reserved( - user_thd, + m_user_thd, ha_alter_info->key_info_buffer, ha_alter_info->key_count)) { err_exit_no_heap: - DBUG_ASSERT(prebuilt->trx->dict_operation_lock_mode == 0); + DBUG_ASSERT(m_prebuilt->trx->dict_operation_lock_mode == 0); if (ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) { - online_retry_drop_indexes(prebuilt->table, user_thd); + + online_retry_drop_indexes( + m_prebuilt->table, m_user_thd); } DBUG_RETURN(true); } - indexed_table = prebuilt->table; + indexed_table = m_prebuilt->table; /* Check that index keys are sensible */ error = innobase_check_index_keys(ha_alter_info, indexed_table); @@ -3623,16 +5571,18 @@ check_if_ok_to_rename: } /* Prohibit renaming a column to an internal column. */ - const char* s = prebuilt->table->col_names; + const char* s = m_prebuilt->table->col_names; unsigned j; /* Skip user columns. MySQL should have checked these already. We want to allow renaming of c1 to c2, c2 to c1. */ for (j = 0; j < table->s->fields; j++) { - s += strlen(s) + 1; + if (!innobase_is_v_fld(table->field[j])) { + s += strlen(s) + 1; + } } - for (; j < prebuilt->table->n_def; j++) { + for (; j < m_prebuilt->table->n_def; j++) { if (!my_strcasecmp( system_charset_info, name, s)) { my_error(ER_WRONG_COLUMN_NAME, MYF(0), @@ -3645,16 +5595,11 @@ check_if_ok_to_rename: } } - if (!innobase_table_flags(altered_table, - ha_alter_info->create_info, - user_thd, - srv_file_per_table - || indexed_table->space != 0, - &flags, &flags2)) { + if (!info.innobase_table_flags()) { goto err_exit_no_heap; } - max_col_len = DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags); + max_col_len = DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(info.flags()); /* Check each index's column length to make sure they do not exceed limit */ @@ -3706,11 +5651,11 @@ check_if_ok_to_rename: /* We need to drop any corrupted fts indexes before we add a new fts index. */ if (add_fts_idx && index->type & DICT_CORRUPT) { - ib_errf(user_thd, IB_LOG_LEVEL_ERROR, + ib_errf(m_user_thd, IB_LOG_LEVEL_ERROR, ER_INNODB_INDEX_CORRUPT, "Fulltext index '%s' is corrupt. " "you should drop this index first.", - index->name); + index->name()); goto err_exit_no_heap; } @@ -3768,8 +5713,8 @@ check_if_ok_to_rename: } for (dict_foreign_set::iterator it - = prebuilt->table->foreign_set.begin(); - it != prebuilt->table->foreign_set.end(); + = m_prebuilt->table->foreign_set.begin(); + it != m_prebuilt->table->foreign_set.end(); ++it) { dict_foreign_t* foreign = *it; @@ -3796,6 +5741,7 @@ found_fk: } DBUG_ASSERT(n_drop_fk > 0); + DBUG_ASSERT(n_drop_fk == ha_alter_info->alter_info->drop_list.elements); } else { @@ -3819,16 +5765,16 @@ found_fk: const KEY* key = ha_alter_info->index_drop_buffer[i]; dict_index_t* index - = dict_table_get_index_on_name_and_min_id( + = dict_table_get_index_on_name( indexed_table, key->name); if (!index) { push_warning_printf( - user_thd, + m_user_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_WRONG_INDEX, - "InnoDB could not find key " - "with name %s", key->name); + "InnoDB could not find key" + " with name %s", key->name); } else { ut_ad(!index->to_be_dropped); if (!dict_index_is_clust(index)) { @@ -3848,8 +5794,8 @@ found_fk: && !DICT_TF2_FLAG_IS_SET( indexed_table, DICT_TF2_FTS_HAS_DOC_ID)) { dict_index_t* fts_doc_index - = dict_table_get_index_on_name( - indexed_table, FTS_DOC_ID_INDEX_NAME); + = indexed_table->fts_doc_id_index; + ut_ad(fts_doc_index); // Add some fault tolerance for non-debug builds. if (fts_doc_index == NULL) { @@ -3879,7 +5825,7 @@ check_if_can_drop_indexes: /* Prevent a race condition between DROP INDEX and CREATE TABLE adding FOREIGN KEY constraints. */ - row_mysql_lock_data_dictionary(prebuilt->trx); + row_mysql_lock_data_dictionary(m_prebuilt->trx); if (!n_drop_index) { drop_index = NULL; @@ -3891,19 +5837,19 @@ check_if_can_drop_indexes: } } - if (prebuilt->trx->check_foreigns) { + if (m_prebuilt->trx->check_foreigns) { for (uint i = 0; i < n_drop_index; i++) { - dict_index_t* index = drop_index[i]; + dict_index_t* index = drop_index[i]; if (innobase_check_foreign_key_index( - ha_alter_info, index, - indexed_table, col_names, - prebuilt->trx, drop_fk, n_drop_fk)) { + ha_alter_info, index, + indexed_table, col_names, + m_prebuilt->trx, drop_fk, n_drop_fk)) { row_mysql_unlock_data_dictionary( - prebuilt->trx); - prebuilt->trx->error_info = index; + m_prebuilt->trx); + m_prebuilt->trx->error_info = index; print_error(HA_ERR_DROP_INDEX_FK, - MYF(0)); + MYF(0)); goto err_exit; } } @@ -3911,17 +5857,17 @@ check_if_can_drop_indexes: /* If a primary index is dropped, need to check any depending foreign constraints get affected */ if (drop_primary - && innobase_check_foreign_key_index( - ha_alter_info, drop_primary, - indexed_table, col_names, - prebuilt->trx, drop_fk, n_drop_fk)) { - row_mysql_unlock_data_dictionary(prebuilt->trx); + && innobase_check_foreign_key_index( + ha_alter_info, drop_primary, + indexed_table, col_names, + m_prebuilt->trx, drop_fk, n_drop_fk)) { + row_mysql_unlock_data_dictionary(m_prebuilt->trx); print_error(HA_ERR_DROP_INDEX_FK, MYF(0)); goto err_exit; } } - row_mysql_unlock_data_dictionary(prebuilt->trx); + row_mysql_unlock_data_dictionary(m_prebuilt->trx); } else { drop_index = NULL; } @@ -3933,14 +5879,47 @@ check_if_can_drop_indexes: index != NULL; index = dict_table_get_next_index(index)) { if (!index->to_be_dropped && dict_index_is_corrupted(index)) { - char index_name[MAX_FULL_NAME_LEN + 1]; + my_error(ER_INDEX_CORRUPT, MYF(0), index->name()); + DBUG_RETURN(true); + } + } + } - innobase_format_name(index_name, sizeof index_name, - index->name, TRUE); + n_rename_index = 0; + // JAN: TODO: MySQL 5.7 + //n_rename_index = ha_alter_info->index_rename_count; + rename_index = NULL; - my_error(ER_INDEX_CORRUPT, MYF(0), index_name); - DBUG_RETURN(true); + /* Create a list of dict_index_t objects that are to be renamed, + also checking for requests to rename nonexistent indexes. If + the table is going to be rebuilt (new_clustered == true in + prepare_inplace_alter_table_dict()), then this can be skipped, + but we don't for simplicity (we have not determined the value of + new_clustered yet). */ + if (n_rename_index > 0) { + rename_index = static_cast( + mem_heap_alloc( + heap, + n_rename_index * sizeof(*rename_index))); + for (ulint i = 0; i < n_rename_index; i++) { + dict_index_t* index = NULL; + const char* old_name = NULL; + + /* JAN: TODO: MySQL 5.7 + const char* old_name = ha_alter_info + ->index_rename_buffer[i].old_key->name; + + index = dict_table_get_index_on_name(indexed_table, + old_name); + */ + if (index == NULL) { + my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), + old_name, + m_prebuilt->table->name.m_name); + goto err_exit; } + + rename_index[i] = index; } } @@ -3948,7 +5927,7 @@ check_if_can_drop_indexes: if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_FOREIGN_KEY) { - ut_ad(!prebuilt->trx->check_foreigns); + ut_ad(!m_prebuilt->trx->check_foreigns); add_fk = static_cast( mem_heap_zalloc( @@ -3958,22 +5937,22 @@ check_if_can_drop_indexes: if (!innobase_get_foreign_key_info( ha_alter_info, table_share, - prebuilt->table, col_names, + m_prebuilt->table, col_names, drop_index, n_drop_index, - add_fk, &n_add_fk, prebuilt->trx)) { + add_fk, &n_add_fk, m_prebuilt->trx)) { err_exit: if (n_drop_index) { - row_mysql_lock_data_dictionary(prebuilt->trx); + row_mysql_lock_data_dictionary(m_prebuilt->trx); /* Clear the to_be_dropped flags, which might have been set at this point. */ for (ulint i = 0; i < n_drop_index; i++) { - DBUG_ASSERT(*drop_index[i]->name - != TEMP_INDEX_PREFIX); + ut_ad(drop_index[i]->is_committed()); drop_index[i]->to_be_dropped = 0; } - row_mysql_unlock_data_dictionary(prebuilt->trx); + row_mysql_unlock_data_dictionary( + m_prebuilt->trx); } if (heap) { @@ -3992,20 +5971,40 @@ err_exit: if (heap) { ha_alter_info->handler_ctx = new ha_innobase_inplace_ctx( - prebuilt, + (*m_prebuilt_ptr), drop_index, n_drop_index, + rename_index, n_rename_index, drop_fk, n_drop_fk, add_fk, n_add_fk, ha_alter_info->online, heap, indexed_table, - col_names, ULINT_UNDEFINED, 0, 0); + col_names, ULINT_UNDEFINED, 0, 0, 0); } -func_exit: - DBUG_ASSERT(prebuilt->trx->dict_operation_lock_mode == 0); + DBUG_ASSERT(m_prebuilt->trx->dict_operation_lock_mode == 0); if (ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) { - online_retry_drop_indexes(prebuilt->table, user_thd); + + online_retry_drop_indexes( + m_prebuilt->table, m_user_thd); + } + + /* JAN: TODO: MySQL 5.7 + if ((ha_alter_info->handler_flags + & Alter_inplace_info::DROP_VIRTUAL_COLUMN) + && prepare_inplace_drop_virtual( + ha_alter_info, altered_table, table)) { + DBUG_RETURN(true); + } + + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ADD_VIRTUAL_COLUMN) + && prepare_inplace_add_virtual( + ha_alter_info, altered_table, table)) { + DBUG_RETURN(true); + } + */ + DBUG_RETURN(false); } @@ -4014,25 +6013,31 @@ func_exit: add a Doc ID hidden column and rebuild the primary index */ if (innobase_fulltext_exist(altered_table)) { ulint doc_col_no; + ulint num_v = 0; if (!innobase_fts_check_doc_id_col( - prebuilt->table, altered_table, &fts_doc_col_no)) { + m_prebuilt->table, + altered_table, &fts_doc_col_no, &num_v)) { + fts_doc_col_no = altered_table->s->stored_fields; + + fts_doc_col_no = altered_table->s->fields - num_v; add_fts_doc_id = true; add_fts_doc_id_idx = true; push_warning_printf( - user_thd, + m_user_thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_WRONG_INDEX, - "InnoDB rebuilding table to add column " - FTS_DOC_ID_COL_NAME); + "InnoDB rebuilding table to add" + " column " FTS_DOC_ID_COL_NAME); } else if (fts_doc_col_no == ULINT_UNDEFINED) { goto err_exit; } switch (innobase_fts_check_doc_id_index( - prebuilt->table, altered_table, &doc_col_no)) { + m_prebuilt->table, altered_table, + &doc_col_no)) { case FTS_NOT_EXIST_DOC_ID_INDEX: add_fts_doc_id_idx = true; break; @@ -4041,12 +6046,16 @@ func_exit: FTS_DOC_ID_INDEX_NAME); goto err_exit; case FTS_EXIST_DOC_ID_INDEX: - DBUG_ASSERT(doc_col_no == fts_doc_col_no - || doc_col_no == ULINT_UNDEFINED - || (ha_alter_info->handler_flags - & (Alter_inplace_info::ALTER_COLUMN_ORDER - | Alter_inplace_info::DROP_COLUMN - | Alter_inplace_info::ADD_COLUMN))); + DBUG_ASSERT( + doc_col_no == fts_doc_col_no + || doc_col_no == ULINT_UNDEFINED + || (ha_alter_info->handler_flags)); + /* JAN: TODO: MySQL 5.7 Virtual columns + & (Alter_inplace_info::ALTER_STORED_COLUMN_ORDER + | Alter_inplace_info::DROP_STORED_COLUMN + | + Alter_inplace_info::ADD_STORED_COLUMN))); + */ } } @@ -4092,8 +6101,11 @@ func_exit: } add_autoinc_col_no = innodb_idx; - autoinc_col_max_value = innobase_get_int_col_max_value( - field); + /* JAN: TODO: MySQL 5.7 + autoinc_col_max_value = + field->get_max_int_value(); + */ + autoinc_col_max_value = innobase_get_int_col_max_value(field); } found_col: i++; @@ -4101,23 +6113,24 @@ found_col: } DBUG_ASSERT(heap); - DBUG_ASSERT(user_thd == prebuilt->trx->mysql_thd); + DBUG_ASSERT(m_user_thd == m_prebuilt->trx->mysql_thd); DBUG_ASSERT(!ha_alter_info->handler_ctx); ha_alter_info->handler_ctx = new ha_innobase_inplace_ctx( - prebuilt, + (*m_prebuilt_ptr), drop_index, n_drop_index, + rename_index, n_rename_index, drop_fk, n_drop_fk, add_fk, n_add_fk, ha_alter_info->online, - heap, prebuilt->table, col_names, + heap, m_prebuilt->table, col_names, add_autoinc_col_no, ha_alter_info->create_info->auto_increment_value, - autoinc_col_max_value); + autoinc_col_max_value, 0); DBUG_RETURN(prepare_inplace_alter_table_dict( ha_alter_info, altered_table, table, table_share->table_name.str, - flags, flags2, + info.flags(), info.flags2(), fts_doc_col_no, add_fts_doc_id, add_fts_doc_id_idx)); } @@ -4127,35 +6140,38 @@ specified using Alter_inplace_info. The level of concurrency allowed during this operation depends on the return value from check_if_supported_inplace_alter(). -@param altered_table TABLE object for new version of table. -@param ha_alter_info Structure describing changes to be done +@param altered_table TABLE object for new version of table. +@param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. -@retval true Failure -@retval false Success +@retval true Failure +@retval false Success */ -UNIV_INTERN + bool ha_innobase::inplace_alter_table( /*=============================*/ TABLE* altered_table, Alter_inplace_info* ha_alter_info) { - dberr_t error; + dberr_t error; + dict_add_v_col_t* add_v = NULL; + innodb_col_templ_t* s_templ = NULL; + innodb_col_templ_t* old_templ = NULL; + DBUG_ENTER("inplace_alter_table"); DBUG_ASSERT(!srv_read_only_mode); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); - ut_ad(!rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(!rw_lock_own(dict_operation_lock, RW_LOCK_S)); + + DEBUG_SYNC(m_user_thd, "innodb_inplace_alter_table_enter"); - DEBUG_SYNC(user_thd, "innodb_inplace_alter_table_enter"); if (!(ha_alter_info->handler_flags & INNOBASE_ALTER_DATA)) { ok_exit: - DEBUG_SYNC(user_thd, "innodb_after_inplace_alter_table"); + DEBUG_SYNC(m_user_thd, "innodb_after_inplace_alter_table"); DBUG_RETURN(false); } @@ -4171,13 +6187,58 @@ ok_exit: DBUG_ASSERT(ctx); DBUG_ASSERT(ctx->trx); - DBUG_ASSERT(ctx->prebuilt == prebuilt); + DBUG_ASSERT(ctx->prebuilt == m_prebuilt); - if (prebuilt->table->ibd_file_missing - || dict_table_is_discarded(prebuilt->table)) { + dict_index_t* pk = dict_table_get_first_index(m_prebuilt->table); + ut_ad(pk != NULL); + + /* For partitioned tables this could be already allocated from a + previous partition invocation. For normal tables this is NULL. */ + UT_DELETE(ctx->m_stage); + + ctx->m_stage = UT_NEW_NOKEY(ut_stage_alter_t(pk)); + + if (m_prebuilt->table->ibd_file_missing + || dict_table_is_discarded(m_prebuilt->table)) { goto all_done; } + /* If we are doing a table rebuilding or having added virtual + columns in the same clause, we will need to build a table template + that carries translation information between MySQL TABLE and InnoDB + table, which indicates the virtual columns and their base columns + info. This is used to do the computation callback, so that the + data in base columns can be extracted send to server */ + if (ctx->need_rebuild() && ctx->new_table->n_v_cols) { + s_templ = static_cast( + mem_heap_alloc(ctx->heap, sizeof *s_templ)); + s_templ->vtempl = NULL; + + innobase_build_v_templ( + altered_table, ctx->new_table, s_templ, + NULL, false, NULL); + + ctx->new_table->vc_templ = s_templ; + } else if (ctx->num_to_add_vcol) { + ut_ad(!ctx->online); + s_templ = static_cast( + mem_heap_alloc(ctx->heap, sizeof *s_templ)); + + add_v = static_cast( + mem_heap_alloc(ctx->heap, sizeof *add_v)); + add_v->n_v_col = ctx->num_to_add_vcol; + add_v->v_col = ctx->add_vcol; + add_v->v_col_name = ctx->add_vcol_name; + + s_templ->vtempl = NULL; + + innobase_build_v_templ( + altered_table, ctx->new_table, s_templ, + add_v, false, NULL); + old_templ = ctx->new_table->vc_templ; + ctx->new_table->vc_templ = s_templ; + } + /* Read the clustered index of the table and build indexes based on this information using temporary files and merge sort. */ @@ -4185,19 +6246,31 @@ ok_exit: error = DB_OUT_OF_MEMORY; goto oom;); error = row_merge_build_indexes( - prebuilt->trx, - prebuilt->table, ctx->new_table, + m_prebuilt->trx, + m_prebuilt->table, ctx->new_table, ctx->online, ctx->add_index, ctx->add_key_numbers, ctx->num_to_add_index, altered_table, ctx->add_cols, ctx->col_map, - ctx->add_autoinc, ctx->sequence); + ctx->add_autoinc, ctx->sequence, ctx->skip_pk_sort, + ctx->m_stage, add_v); + + if (s_templ) { + ut_ad(ctx->need_rebuild() || ctx->num_to_add_vcol); + free_vc_templ(s_templ); + + if (old_templ) { + ctx->new_table->vc_templ = old_templ; + } + } + #ifndef DBUG_OFF oom: #endif /* !DBUG_OFF */ if (error == DB_SUCCESS && ctx->online && ctx->need_rebuild()) { DEBUG_SYNC_C("row_log_table_apply1_before"); error = row_log_table_apply( - ctx->thr, prebuilt->table, altered_table); + ctx->thr, m_prebuilt->table, altered_table, + ctx->m_stage); } /* Init online ddl status variables */ @@ -4209,7 +6282,7 @@ oom: DBUG_EXECUTE_IF("create_index_fail", error = DB_DUPLICATE_KEY; - prebuilt->trx->error_key_num = ULINT_UNDEFINED;); + m_prebuilt->trx->error_key_num = ULINT_UNDEFINED;); /* After an error, remove all those index definitions from the dictionary which were defined. */ @@ -4220,13 +6293,13 @@ oom: case DB_SUCCESS: ut_d(mutex_enter(&dict_sys->mutex)); ut_d(dict_table_check_for_dup_indexes( - prebuilt->table, CHECK_PARTIAL_OK)); + m_prebuilt->table, CHECK_PARTIAL_OK)); ut_d(mutex_exit(&dict_sys->mutex)); /* prebuilt->table->n_ref_count can be anything here, given that we hold at most a shared lock on the table. */ goto ok_exit; case DB_DUPLICATE_KEY: - if (prebuilt->trx->error_key_num == ULINT_UNDEFINED + if (m_prebuilt->trx->error_key_num == ULINT_UNDEFINED || ha_alter_info->key_count == 0) { /* This should be the hidden index on FTS_DOC_ID, or there is no PRIMARY KEY in the @@ -4234,27 +6307,27 @@ oom: reporting a bogus duplicate key error. */ dup_key = NULL; } else { - DBUG_ASSERT(prebuilt->trx->error_key_num + DBUG_ASSERT(m_prebuilt->trx->error_key_num < ha_alter_info->key_count); dup_key = &ha_alter_info->key_info_buffer[ - prebuilt->trx->error_key_num]; + m_prebuilt->trx->error_key_num]; } print_keydup_error(altered_table, dup_key, MYF(0)); break; case DB_ONLINE_LOG_TOO_BIG: DBUG_ASSERT(ctx->online); my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0), - (prebuilt->trx->error_key_num == ULINT_UNDEFINED) + (m_prebuilt->trx->error_key_num == ULINT_UNDEFINED) ? FTS_DOC_ID_INDEX_NAME : ha_alter_info->key_info_buffer[ - prebuilt->trx->error_key_num].name); + m_prebuilt->trx->error_key_num].name); break; case DB_INDEX_CORRUPT: my_error(ER_INDEX_CORRUPT, MYF(0), - (prebuilt->trx->error_key_num == ULINT_UNDEFINED) + (m_prebuilt->trx->error_key_num == ULINT_UNDEFINED) ? FTS_DOC_ID_INDEX_NAME : ha_alter_info->key_info_buffer[ - prebuilt->trx->error_key_num].name); + m_prebuilt->trx->error_key_num].name); break; case DB_DECRYPTION_FAILED: { String str; @@ -4266,19 +6339,19 @@ oom: default: my_error_innodb(error, table_share->table_name.str, - prebuilt->table->flags); + m_prebuilt->table->flags); } /* prebuilt->table->n_ref_count can be anything here, given that we hold at most a shared lock on the table. */ - prebuilt->trx->error_info = NULL; + m_prebuilt->trx->error_info = NULL; ctx->trx->error_state = DB_SUCCESS; DBUG_RETURN(true); } /** Free the modification log for online table rebuild. -@param table table that was being rebuilt online */ +@param table table that was being rebuilt online */ static void innobase_online_rebuild_log_free( @@ -4288,9 +6361,7 @@ innobase_online_rebuild_log_free( dict_index_t* clust_index = dict_table_get_first_index(table); ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); rw_lock_x_lock(&clust_index->lock); @@ -4309,10 +6380,10 @@ innobase_online_rebuild_log_free( /** Rollback a secondary index creation, drop the indexes with temparary index prefix -@param user_table InnoDB table -@param table the TABLE -@param locked TRUE=table locked, FALSE=may need to do a lazy drop -@param trx the transaction +@param user_table InnoDB table +@param table the TABLE +@param locked TRUE=table locked, FALSE=may need to do a lazy drop +@param trx the transaction */ static MY_ATTRIBUTE((nonnull)) void @@ -4342,11 +6413,11 @@ for inplace_alter_table() and thus might be higher than during prepare_inplace_alter_table(). (E.g concurrent writes were blocked during prepare, but might not be during commit). -@param ha_alter_info Data used during in-place alter. -@param table the TABLE -@param prebuilt the prebuilt struct -@retval true Failure -@retval false Success +@param ha_alter_info Data used during in-place alter. +@param table the TABLE +@param prebuilt the prebuilt struct +@retval true Failure +@retval false Success */ inline MY_ATTRIBUTE((nonnull, warn_unused_result)) bool @@ -4370,10 +6441,11 @@ rollback_inplace_alter_table( goto func_exit; } + trx_start_for_ddl(ctx->trx, TRX_DICT_OP_INDEX); row_mysql_lock_data_dictionary(ctx->trx); if (ctx->need_rebuild()) { - dberr_t err; + dberr_t err = DB_SUCCESS; ulint flags = ctx->new_table->flags; /* DML threads can access ctx->new_table via the @@ -4383,7 +6455,7 @@ rollback_inplace_alter_table( /* Since the FTS index specific auxiliary tables has not yet registered with "table->fts" by fts_add_index(), we will need explicitly delete them here */ - if (DICT_TF2_FLAG_IS_SET(ctx->new_table, DICT_TF2_FTS)) { + if (dict_table_has_fts_index(ctx->new_table)) { err = innobase_drop_fts_index_table( ctx->new_table, ctx->trx); @@ -4396,18 +6468,7 @@ rollback_inplace_alter_table( } } - /* Drop the table. */ - dict_table_close(ctx->new_table, TRUE, FALSE); - -#if defined UNIV_DEBUG || defined UNIV_DDL_DEBUG - /* Nobody should have initialized the stats of the - newly created table yet. When this is the case, we - know that it has not been added for background stats - gathering. */ - ut_a(!ctx->new_table->stat_initialized); -#endif /* UNIV_DEBUG || UNIV_DDL_DEBUG */ - - err = row_merge_drop_table(ctx->trx, ctx->new_table); + dict_table_close_and_drop(ctx->trx, ctx->new_table); switch (err) { case DB_SUCCESS: @@ -4422,8 +6483,6 @@ rollback_inplace_alter_table( & Alter_inplace_info::ADD_PK_INDEX)); DBUG_ASSERT(ctx->new_table == prebuilt->table); - trx_start_for_ddl(ctx->trx, TRX_DICT_OP_INDEX); - innobase_rollback_sec_index( prebuilt->table, table, FALSE, ctx->trx); } @@ -4460,8 +6519,7 @@ func_exit: commit_inplace_alter_table(). */ for (ulint i = 0; i < ctx->num_to_drop_index; i++) { dict_index_t* index = ctx->drop_index[i]; - DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX); - + DBUG_ASSERT(index->is_committed()); index->to_be_dropped = 0; } @@ -4475,11 +6533,11 @@ func_exit: } /** Drop a FOREIGN KEY constraint from the data dictionary tables. -@param trx data dictionary transaction -@param table_name Table name in MySQL -@param foreign_id Foreign key constraint identifier -@retval true Failure -@retval false Success */ +@param trx data dictionary transaction +@param table_name Table name in MySQL +@param foreign_id Foreign key constraint identifier +@retval true Failure +@retval false Success */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_drop_foreign_try( @@ -4493,9 +6551,7 @@ innobase_drop_foreign_try( DBUG_ASSERT(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX); ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); /* Drop the constraint from the data dictionary. */ static const char sql[] = @@ -4528,26 +6584,27 @@ innobase_drop_foreign_try( } /** Rename a column in the data dictionary tables. -@param user_table InnoDB table that was being altered -@param trx data dictionary transaction -@param table_name Table name in MySQL -@param nth_col 0-based index of the column -@param from old column name -@param to new column name -@param new_clustered whether the table has been rebuilt -@retval true Failure -@retval false Success */ +@param[in] user_table InnoDB table that was being altered +@param[in] trx data dictionary transaction +@param[in] table_name Table name in MySQL +@param[in] nth_col 0-based index of the column +@param[in] from old column name +@param[in] to new column name +@param[in] new_clustered whether the table has been rebuilt +@param[in] is_virtual whether it is a virtual column +@retval true Failure +@retval false Success */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool innobase_rename_column_try( -/*=======================*/ const dict_table_t* user_table, trx_t* trx, const char* table_name, ulint nth_col, const char* from, const char* to, - bool new_clustered) + bool new_clustered, + bool is_virtual) { pars_info_t* info; dberr_t error; @@ -4557,9 +6614,7 @@ innobase_rename_column_try( DBUG_ASSERT(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX); ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); ut_ad(mutex_own(&dict_sys->mutex)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); if (new_clustered) { goto rename_foreign; @@ -4707,138 +6762,291 @@ rename_foreign: pars_info_add_str_literal(info, "old", from); pars_info_add_str_literal(info, "new", to); - error = que_eval_sql( - info, - "PROCEDURE RENAME_SYS_FOREIGN_R_PROC () IS\n" - "BEGIN\n" - "UPDATE SYS_FOREIGN_COLS\n" - "SET REF_COL_NAME=:new\n" - "WHERE ID=:id AND POS=:nth\n" - "AND REF_COL_NAME=:old;\n" - "END;\n", - FALSE, trx); + error = que_eval_sql( + info, + "PROCEDURE RENAME_SYS_FOREIGN_R_PROC () IS\n" + "BEGIN\n" + "UPDATE SYS_FOREIGN_COLS\n" + "SET REF_COL_NAME=:new\n" + "WHERE ID=:id AND POS=:nth\n" + "AND REF_COL_NAME=:old;\n" + "END;\n", + FALSE, trx); + + if (error != DB_SUCCESS) { + goto err_exit; + } + foreign_modified = true; + } + + if (foreign_modified) { + fk_evict.push_back(foreign); + } + } + + if (new_clustered) { + std::for_each(fk_evict.begin(), fk_evict.end(), + dict_foreign_remove_from_cache); + } + + trx->op_info = ""; + DBUG_RETURN(false); +} + +/** Rename columns in the data dictionary tables. +@param ha_alter_info Data used during in-place alter. +@param ctx In-place ALTER TABLE context +@param table the TABLE +@param trx data dictionary transaction +@param table_name Table name in MySQL +@retval true Failure +@retval false Success */ +static MY_ATTRIBUTE((nonnull, warn_unused_result)) +bool +innobase_rename_columns_try( +/*========================*/ + Alter_inplace_info* ha_alter_info, + ha_innobase_inplace_ctx*ctx, + const TABLE* table, + trx_t* trx, + const char* table_name) +{ + List_iterator_fast cf_it( + ha_alter_info->alter_info->create_list); + uint i = 0; + ulint num_v = 0; + + DBUG_ASSERT(ctx); + DBUG_ASSERT(ha_alter_info->handler_flags + & Alter_inplace_info::ALTER_COLUMN_NAME); + + for (Field** fp = table->field; *fp; fp++, i++) { + bool is_virtual = innobase_is_v_fld(*fp); + if (!((*fp)->flags & FIELD_IS_RENAMED) || !((*fp)->stored_in_db())) { + + goto processed_field; + } + + cf_it.rewind(); + while (Create_field* cf = cf_it++) { + if (cf->field == *fp) { + ulint col_n = is_virtual + ? dict_create_v_col_pos( + num_v, i) + : i - num_v; + + if (innobase_rename_column_try( + ctx->old_table, trx, table_name, + col_n, + cf->field->field_name, + cf->field_name, + ctx->need_rebuild(), + is_virtual)) { + return(true); + } + goto processed_field; + } + } + + ut_error; +processed_field: + if (is_virtual) { + num_v++; + } + + continue; + } + + return(false); +} + +/** Enlarge a column in the data dictionary tables. +@param user_table InnoDB table that was being altered +@param trx data dictionary transaction +@param table_name Table name in MySQL +@param nth_col 0-based index of the column +@param new_len new column length, in bytes +@retval true Failure +@retval false Success */ +static __attribute__((nonnull, warn_unused_result)) +bool +innobase_enlarge_column_try( +/*========================*/ + const dict_table_t* user_table, + trx_t* trx, + const char* table_name, + ulint nth_col, + ulint new_len) +{ + pars_info_t* info; + dberr_t error; + + DBUG_ENTER("innobase_enlarge_column_try"); + + DBUG_ASSERT(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX); + ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); + ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); + ut_ad(dict_table_get_nth_col(user_table, nth_col)->len < new_len); +#ifdef UNIV_DEBUG + switch (dict_table_get_nth_col(user_table, nth_col)->mtype) { + case DATA_MYSQL: + /* NOTE: we could allow this when !(prtype & DATA_BINARY_TYPE) + and ROW_FORMAT is not REDUNDANT and mbminlenid); + pars_info_add_int4_literal(info, "nth", nth_col); + pars_info_add_int4_literal(info, "new", new_len); + + trx->op_info = "resizing column in SYS_COLUMNS"; - if (error != DB_SUCCESS) { - goto err_exit; - } - foreign_modified = true; - } + error = que_eval_sql( + info, + "PROCEDURE RESIZE_SYS_COLUMNS_PROC () IS\n" + "BEGIN\n" + "UPDATE SYS_COLUMNS SET LEN=:new\n" + "WHERE TABLE_ID=:tableid AND POS=:nth;\n" + "END;\n", + FALSE, trx); - if (foreign_modified) { - fk_evict.push_back(foreign); - } - } + DBUG_EXECUTE_IF("ib_resize_column_error", + error = DB_OUT_OF_FILE_SPACE;); - if (new_clustered) { - std::for_each(fk_evict.begin(), fk_evict.end(), - dict_foreign_remove_from_cache); + trx->op_info = ""; + trx->error_state = DB_SUCCESS; + + if (error != DB_SUCCESS) { + my_error_innodb(error, table_name, 0); + DBUG_RETURN(true); } - trx->op_info = ""; DBUG_RETURN(false); } -/** Rename columns in the data dictionary tables. -@param ha_alter_info Data used during in-place alter. -@param ctx In-place ALTER TABLE context -@param table the TABLE -@param trx data dictionary transaction -@param table_name Table name in MySQL -@retval true Failure -@retval false Success */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) +/** Enlarge columns in the data dictionary tables. +@param ha_alter_info Data used during in-place alter. +@param table the TABLE +@param user_table InnoDB table that was being altered +@param trx data dictionary transaction +@param table_name Table name in MySQL +@retval true Failure +@retval false Success */ +static __attribute__((nonnull, warn_unused_result)) bool -innobase_rename_columns_try( -/*========================*/ +innobase_enlarge_columns_try( +/*=========================*/ Alter_inplace_info* ha_alter_info, - ha_innobase_inplace_ctx*ctx, const TABLE* table, + const dict_table_t* user_table, trx_t* trx, const char* table_name) { List_iterator_fast cf_it( ha_alter_info->alter_info->create_list); - uint i = 0; - - DBUG_ASSERT(ctx); - DBUG_ASSERT(ha_alter_info->handler_flags - & Alter_inplace_info::ALTER_COLUMN_NAME); + ulint i = 0; for (Field** fp = table->field; *fp; fp++, i++) { - if (!((*fp)->flags & FIELD_IS_RENAMED) || !((*fp)->stored_in_db())) { - continue; - } - cf_it.rewind(); while (Create_field* cf = cf_it++) { if (cf->field == *fp) { - if (innobase_rename_column_try( - ctx->old_table, trx, table_name, i, - cf->field->field_name, - cf->field_name, - ctx->need_rebuild())) { + if ((*fp)->is_equal(cf) + == IS_EQUAL_PACK_LENGTH + && innobase_enlarge_column_try( + user_table, trx, table_name, + i, cf->length)) { return(true); } - goto processed_field; + + break; } } - - ut_error; -processed_field: - continue; } return(false); } -/** Rename columns in the data dictionary cache +/** Rename or enlarge columns in the data dictionary cache as part of commit_cache_norebuild(). -@param ha_alter_info Data used during in-place alter. -@param table the TABLE -@param user_table InnoDB table that was being altered */ +@param ha_alter_info Data used during in-place alter. +@param table the TABLE +@param user_table InnoDB table that was being altered */ static MY_ATTRIBUTE((nonnull)) void -innobase_rename_columns_cache( -/*==========================*/ +innobase_rename_or_enlarge_columns_cache( +/*=====================================*/ Alter_inplace_info* ha_alter_info, const TABLE* table, dict_table_t* user_table) { if (!(ha_alter_info->handler_flags - & Alter_inplace_info::ALTER_COLUMN_NAME)) { + & (Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH + | Alter_inplace_info::ALTER_COLUMN_NAME))) { return; } List_iterator_fast cf_it( ha_alter_info->alter_info->create_list); - uint i = 0; + uint i = 0; + ulint num_v = 0; for (Field** fp = table->field; *fp; fp++, i++) { - if (!((*fp)->flags & FIELD_IS_RENAMED)) { - continue; - } + bool is_virtual = innobase_is_v_fld(*fp); cf_it.rewind(); while (Create_field* cf = cf_it++) { - if (cf->field == *fp) { - dict_mem_table_col_rename(user_table, i, - cf->field->field_name, - cf->field_name); - goto processed_field; + if (cf->field != *fp) { + continue; + } + + ulint col_n = is_virtual ? num_v : i - num_v; + + if ((*fp)->is_equal(cf) == IS_EQUAL_PACK_LENGTH) { + if (is_virtual) { + dict_table_get_nth_v_col( + user_table, col_n)->m_col.len + = cf->length; + } else { + dict_table_get_nth_col( + user_table, col_n)->len + = cf->length; + } + } + + if ((*fp)->flags & FIELD_IS_RENAMED) { + dict_mem_table_col_rename( + user_table, col_n, + cf->field->field_name, + cf->field_name, is_virtual); } + + break; } - ut_error; -processed_field: - continue; + if (is_virtual) { + num_v++; + } } } /** Get the auto-increment value of the table on commit. -@param ha_alter_info Data used during in-place alter -@param ctx In-place ALTER TABLE context -@param altered_table MySQL table that is being altered -@param old_table MySQL table as it is before the ALTER operation +@param ha_alter_info Data used during in-place alter +@param ctx In-place ALTER TABLE context +@param altered_table MySQL table that is being altered +@param old_table MySQL table as it is before the ALTER operation @return the next auto-increment value (0 if not present) */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) ulonglong @@ -4874,11 +7082,9 @@ commit_get_autoinc( Field* autoinc_field = old_table->found_next_number_field; - KEY* autoinc_key = - old_table->key_info + old_table->s->next_number_index; - dict_index_t* index = dict_table_get_index_on_name( - ctx->old_table, autoinc_key->name); + dict_index_t* index = dict_table_get_index_on_first_col( + ctx->old_table, autoinc_field->field_index); max_autoinc = ha_alter_info->create_info->auto_increment_value; @@ -4894,8 +7100,9 @@ commit_get_autoinc( ulonglong col_max_value; ulonglong offset; - col_max_value = innobase_get_int_col_max_value( - old_table->found_next_number_field); + col_max_value = innobase_get_int_col_max_value(autoinc_field); + // JAN: TODO: MySQL 5.7 + //col_max_value = autoinc_field->get_max_int_value(); offset = ctx->prebuilt->autoinc_offset; max_autoinc = innobase_next_autoinc( @@ -4917,12 +7124,12 @@ commit_get_autoinc( /** Add or drop foreign key constraints to the data dictionary tables, but do not touch the data dictionary cache. -@param ha_alter_info Data used during in-place alter -@param ctx In-place ALTER TABLE context -@param trx Data dictionary transaction -@param table_name Table name in MySQL -@retval true Failure -@retval false Success +@param ha_alter_info Data used during in-place alter +@param ctx In-place ALTER TABLE context +@param trx Data dictionary transaction +@param table_name Table name in MySQL +@retval true Failure +@retval false Success */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) bool @@ -4949,7 +7156,7 @@ innobase_update_foreign_try( || fk->foreign_table == ctx->old_table); dberr_t error = dict_create_add_foreign_id( - &foreign_id, ctx->old_table->name, fk); + &foreign_id, ctx->old_table->name.m_name, fk); if (error != DB_SUCCESS) { my_error(ER_TOO_LONG_IDENT, MYF(0), @@ -4977,7 +7184,7 @@ innobase_update_foreign_try( names, while the columns in ctx->old_table have not been renamed yet. */ error = dict_create_add_foreign_to_dictionary( - (dict_table_t*)ctx->old_table,ctx->old_table->name, fk, trx); + (dict_table_t*)ctx->old_table,ctx->old_table->name.m_name, fk, trx); DBUG_EXECUTE_IF( "innodb_test_cannot_add_fk_system", @@ -5020,6 +7227,8 @@ innobase_update_foreign_cache( DBUG_ENTER("innobase_update_foreign_cache"); + ut_ad(mutex_own(&dict_sys->mutex)); + user_table = ctx->old_table; /* Discard the added foreign keys, because we will @@ -5051,17 +7260,23 @@ innobase_update_foreign_cache( /* Load the old or added foreign keys from the data dictionary and prevent the table from being evicted from the data dictionary cache (work around the lack of WL#6049). */ - err = dict_load_foreigns(user_table->name, + dict_names_t fk_tables; + + err = dict_load_foreigns(user_table->name.m_name, ctx->col_names, false, true, - DICT_ERR_IGNORE_NONE); + DICT_ERR_IGNORE_NONE, + fk_tables); if (err == DB_CANNOT_ADD_CONSTRAINT) { + fk_tables.clear(); + /* It is possible there are existing foreign key are loaded with "foreign_key checks" off, so let's retry the loading with charset_check is off */ - err = dict_load_foreigns(user_table->name, + err = dict_load_foreigns(user_table->name.m_name, ctx->col_names, false, false, - DICT_ERR_IGNORE_NONE); + DICT_ERR_IGNORE_NONE, + fk_tables); /* The load with "charset_check" off is successful, warn the user that the foreign key has loaded with mis-matched @@ -5073,25 +7288,46 @@ innobase_update_foreign_cache( ER_ALTER_INFO, "Foreign key constraints for table '%s'" " are loaded with charset check off", - user_table->name); - + user_table->name.m_name); } } + /* For complete loading of foreign keys, all associated tables must + also be loaded. */ + while (err == DB_SUCCESS && !fk_tables.empty()) { + dict_table_t* table = dict_load_table( + fk_tables.front(), true, DICT_ERR_IGNORE_NONE); + + if (table == NULL) { + table_name_t table_name; + table_name.m_name = const_cast( + fk_tables.front()); + + err = DB_TABLE_NOT_FOUND; + ib::error() + << "Failed to load table '" << table_name + << "' which has a foreign key constraint with" + << " table '" << user_table->name << "'."; + break; + } + + fk_tables.pop_front(); + } + DBUG_RETURN(err); } /** Commit the changes made during prepare_inplace_alter_table() and inplace_alter_table() inside the data dictionary tables, when rebuilding the table. -@param ha_alter_info Data used during in-place alter -@param ctx In-place ALTER TABLE context -@param altered_table MySQL table that is being altered -@param old_table MySQL table as it is before the ALTER operation -@param trx Data dictionary transaction -@param table_name Table name in MySQL -@retval true Failure -@retval false Success +@param ha_alter_info Data used during in-place alter +@param ctx In-place ALTER TABLE context +@param altered_table MySQL table that is being altered +@param old_table MySQL table as it is before the ALTER operation +@param trx Data dictionary transaction +@param table_name Table name in MySQL +@retval true Failure +@retval false Success */ inline MY_ATTRIBUTE((nonnull, warn_unused_result)) bool @@ -5113,18 +7349,15 @@ commit_try_rebuild( DBUG_ASSERT(!(ha_alter_info->handler_flags & Alter_inplace_info::DROP_FOREIGN_KEY) || ctx->num_to_drop_fk > 0); - DBUG_ASSERT(ctx->num_to_drop_fk - == ha_alter_info->alter_info->drop_list.elements); for (dict_index_t* index = dict_table_get_first_index(rebuilt_table); index; index = dict_table_get_next_index(index)) { DBUG_ASSERT(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE); - DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX); + DBUG_ASSERT(index->is_committed()); if (dict_index_is_corrupted(index)) { - my_error(ER_INDEX_CORRUPT, MYF(0), - index->name); + my_error(ER_INDEX_CORRUPT, MYF(0), index->name()); DBUG_RETURN(true); } } @@ -5140,7 +7373,7 @@ commit_try_rebuild( for (ulint i = 0; i < ctx->num_to_drop_index; i++) { dict_index_t* index = ctx->drop_index[i]; DBUG_ASSERT(index->table == user_table); - DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX); + DBUG_ASSERT(index->is_committed()); DBUG_ASSERT(index->to_be_dropped); index->to_be_dropped = 0; } @@ -5151,8 +7384,12 @@ commit_try_rebuild( if (ctx->online) { DEBUG_SYNC_C("row_log_table_apply2_before"); + error = row_log_table_apply( - ctx->thr, user_table, altered_table); + ctx->thr, user_table, altered_table, + static_cast( + ha_alter_info->handler_ctx)->m_stage); + ulint err_key = thr_get_trx(ctx->thr)->error_key_num; switch (error) { @@ -5216,12 +7453,12 @@ commit_try_rebuild( user_table, rebuilt_table, ctx->tmp_name, trx); /* We must be still holding a table handle. */ - DBUG_ASSERT(user_table->n_ref_count >= 1); + DBUG_ASSERT(user_table->get_ref_count() >= 1); DBUG_EXECUTE_IF("ib_ddl_crash_after_rename", DBUG_SUICIDE();); DBUG_EXECUTE_IF("ib_rebuild_cannot_rename", error = DB_ERROR;); - if (user_table->n_ref_count > 1) { + if (user_table->get_ref_count() > 1) { /* This should only occur when an innodb_memcached connection with innodb_api_enable_mdl=off was started before commit_inplace_alter_table() locked the data @@ -5239,11 +7476,11 @@ commit_try_rebuild( case DB_SUCCESS: DBUG_RETURN(false); case DB_TABLESPACE_EXISTS: - ut_a(rebuilt_table->n_ref_count == 1); + ut_a(rebuilt_table->get_ref_count() == 1); my_error(ER_TABLESPACE_EXISTS, MYF(0), ctx->tmp_name); DBUG_RETURN(true); case DB_DUPLICATE_KEY: - ut_a(rebuilt_table->n_ref_count == 1); + ut_a(rebuilt_table->get_ref_count() == 1); my_error(ER_TABLE_EXISTS_ERROR, MYF(0), ctx->tmp_name); DBUG_RETURN(true); default: @@ -5254,7 +7491,7 @@ commit_try_rebuild( /** Apply the changes made during commit_try_rebuild(), to the data dictionary cache and the file system. -@param ctx In-place ALTER TABLE context */ +@param ctx In-place ALTER TABLE context */ inline MY_ATTRIBUTE((nonnull)) void commit_cache_rebuild( @@ -5264,12 +7501,13 @@ commit_cache_rebuild( dberr_t error; DBUG_ENTER("commit_cache_rebuild"); + DEBUG_SYNC_C("commit_cache_rebuild"); DBUG_ASSERT(ctx->need_rebuild()); DBUG_ASSERT(dict_table_is_discarded(ctx->old_table) == dict_table_is_discarded(ctx->new_table)); const char* old_name = mem_heap_strdup( - ctx->heap, ctx->old_table->name); + ctx->heap, ctx->old_table->name.m_name); /* We already committed and redo logged the renames, so this must succeed. */ @@ -5287,21 +7525,36 @@ commit_cache_rebuild( /** Store the column number of the columns in a list belonging to indexes which are not being dropped. @param[in] ctx In-place ALTER TABLE context -@param[out] drop_col_list list which will be set, containing columns - which is part of index being dropped */ +@param[in, out] drop_col_list list which will be set, containing columns + which is part of index being dropped +@param[in, out] drop_v_col_list list which will be set, containing + virtual columns which is part of index + being dropped */ static void get_col_list_to_be_dropped( - ha_innobase_inplace_ctx* ctx, - std::set& drop_col_list) + const ha_innobase_inplace_ctx* ctx, + std::set& drop_col_list, + std::set& drop_v_col_list) { for (ulint index_count = 0; index_count < ctx->num_to_drop_index; index_count++) { - dict_index_t* index = ctx->drop_index[index_count]; + const dict_index_t* index = ctx->drop_index[index_count]; for (ulint col = 0; col < index->n_user_defined_cols; col++) { - ulint col_no = dict_index_get_nth_col_no(index, col); - drop_col_list.insert(col_no); + const dict_col_t* idx_col + = dict_index_get_nth_col(index, col); + + if (dict_col_is_virtual(idx_col)) { + const dict_v_col_t* v_col + = reinterpret_cast< + const dict_v_col_t*>(idx_col); + drop_v_col_list.insert(v_col->v_pos); + + } else { + ulint col_no = dict_col_get_no(idx_col); + drop_col_list.insert(col_no); + } } } } @@ -5311,13 +7564,15 @@ dropped, it checks if the column number of the column is same as col_no argument passed. @param[in] table table object @param[in] col_no column number of the column which is to be checked +@param[in] is_v if this is a virtual column @retval true column exists @retval false column does not exist. */ static bool check_col_exists_in_indexes( const dict_table_t* table, - ulint col_no) + ulint col_no, + bool is_v) { for (dict_index_t* index = dict_table_get_first_index(table); index; index = dict_table_get_next_index(index)) { @@ -5326,11 +7581,20 @@ check_col_exists_in_indexes( continue; } - for (ulint col = 0; col < index->n_user_defined_cols; col++) { + for (ulint i = 0; i < index->n_user_defined_cols; i++) { + const dict_col_t* idx_col + = dict_index_get_nth_col(index, i); + + if (is_v && dict_col_is_virtual(idx_col)) { + const dict_v_col_t* v_col = reinterpret_cast< + const dict_v_col_t*>(idx_col); + if (v_col->v_pos == col_no) { + return(true); + } + } - ulint index_col_no = dict_index_get_nth_col_no( - index, col); - if (col_no == index_col_no) { + if (!is_v && !dict_col_is_virtual(idx_col) + && dict_col_get_no(idx_col) == col_no) { return(true); } } @@ -5342,13 +7606,13 @@ check_col_exists_in_indexes( /** Commit the changes made during prepare_inplace_alter_table() and inplace_alter_table() inside the data dictionary tables, when not rebuilding the table. -@param ha_alter_info Data used during in-place alter -@param ctx In-place ALTER TABLE context -@param old_table MySQL table as it is before the ALTER operation -@param trx Data dictionary transaction -@param table_name Table name in MySQL -@retval true Failure -@retval false Success +@param ha_alter_info Data used during in-place alter +@param ctx In-place ALTER TABLE context +@param old_table MySQL table as it is before the ALTER operation +@param trx Data dictionary transaction +@param table_name Table name in MySQL +@retval true Failure +@retval false Success */ inline MY_ATTRIBUTE((nonnull, warn_unused_result)) bool @@ -5356,6 +7620,7 @@ commit_try_norebuild( /*=================*/ Alter_inplace_info* ha_alter_info, ha_innobase_inplace_ctx*ctx, + TABLE* altered_table, const TABLE* old_table, trx_t* trx, const char* table_name) @@ -5367,13 +7632,40 @@ commit_try_norebuild( & Alter_inplace_info::DROP_FOREIGN_KEY) || ctx->num_to_drop_fk > 0); DBUG_ASSERT(ctx->num_to_drop_fk - == ha_alter_info->alter_info->drop_list.elements); + == ha_alter_info->alter_info->drop_list.elements + || ctx->num_to_drop_vcol + == ha_alter_info->alter_info->drop_list.elements); + + + std::set drop_list; + std::set v_drop_list; + std::set::iterator col_no; + + /* Check if the column, part of an index to be dropped is part of any + other index which is not being dropped. If it so, then set the ord_part + of the column to 0. */ + get_col_list_to_be_dropped(ctx, drop_list, v_drop_list); + + for (col_no = drop_list.begin(); col_no != drop_list.end(); ++col_no) { + if (!check_col_exists_in_indexes(ctx->new_table, + *col_no, false)) { + ctx->new_table->cols[*col_no].ord_part = 0; + } + } + + for (col_no = v_drop_list.begin(); + col_no != v_drop_list.end(); ++col_no) { + if (!check_col_exists_in_indexes(ctx->new_table, + *col_no, true)) { + ctx->new_table->v_cols[*col_no].m_col.ord_part = 0; + } + } for (ulint i = 0; i < ctx->num_to_add_index; i++) { dict_index_t* index = ctx->add_index[i]; DBUG_ASSERT(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE); - DBUG_ASSERT(*index->name == TEMP_INDEX_PREFIX); + DBUG_ASSERT(!index->is_committed()); if (dict_index_is_corrupted(index)) { /* Report a duplicate key error for the index that was @@ -5387,7 +7679,7 @@ commit_try_norebuild( with a detailed reason once WL#6379 has been implemented. */ my_error(ER_DUP_UNKNOWN_IN_INDEX, - MYF(0), index->name + 1); + MYF(0), index->name()); DBUG_RETURN(true); } } @@ -5398,17 +7690,27 @@ commit_try_norebuild( dberr_t error; - /* We altered the table in place. */ - /* Lose the TEMP_INDEX_PREFIX. */ + /* We altered the table in place. Mark the indexes as committed. */ for (ulint i = 0; i < ctx->num_to_add_index; i++) { dict_index_t* index = ctx->add_index[i]; DBUG_ASSERT(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE); - DBUG_ASSERT(*index->name - == TEMP_INDEX_PREFIX); + DBUG_ASSERT(!index->is_committed()); error = row_merge_rename_index_to_add( trx, ctx->new_table->id, index->id); - if (error != DB_SUCCESS) { + switch (error) { + case DB_SUCCESS: + break; + case DB_TOO_MANY_CONCURRENT_TRXS: + /* If we wrote some undo log here, then the + persistent data dictionary for this table may + probably be corrupted. This is because a + 'trigger' on SYS_INDEXES could already have invoked + btr_free_if_exists(), which cannot be rolled back. */ + DBUG_ASSERT(trx->undo_no == 0); + my_error(ER_TOO_MANY_CONCURRENT_TRXS, MYF(0)); + DBUG_RETURN(true); + default: sql_print_error( "InnoDB: rename index to add: %lu\n", (ulong) error); @@ -5420,14 +7722,11 @@ commit_try_norebuild( } /* Drop any indexes that were requested to be dropped. - Rename them to TEMP_INDEX_PREFIX in the data - dictionary first. We do not bother to rename - index->name in the dictionary cache, because the index - is about to be freed after row_merge_drop_indexes_dict(). */ + Flag them in the data dictionary first. */ for (ulint i = 0; i < ctx->num_to_drop_index; i++) { dict_index_t* index = ctx->drop_index[i]; - DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX); + DBUG_ASSERT(index->is_committed()); DBUG_ASSERT(index->table == ctx->new_table); DBUG_ASSERT(index->to_be_dropped); @@ -5444,20 +7743,52 @@ commit_try_norebuild( } } - if (!(ha_alter_info->handler_flags - & Alter_inplace_info::ALTER_COLUMN_NAME)) { - DBUG_RETURN(false); + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ALTER_COLUMN_NAME) + && innobase_rename_columns_try(ha_alter_info, ctx, old_table, + trx, table_name)) { + DBUG_RETURN(true); + } + + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH) + && innobase_enlarge_columns_try(ha_alter_info, old_table, + ctx->old_table, trx, table_name)) { + DBUG_RETURN(true); + } + + /* JAN: TODO: MySQL 5.7 + if ((ha_alter_info->handler_flags + & Alter_inplace_info::RENAME_INDEX) + && rename_indexes_in_data_dictionary(ctx, ha_alter_info, trx)) { + DBUG_RETURN(true); + } + + if ((ha_alter_info->handler_flags + & Alter_inplace_info::DROP_VIRTUAL_COLUMN) + && innobase_drop_virtual_try( + ha_alter_info, altered_table, old_table, + ctx->old_table, trx)) { + DBUG_RETURN(true); + } + + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ADD_VIRTUAL_COLUMN) + && innobase_add_virtual_try( + ha_alter_info, altered_table, old_table, + ctx->old_table, trx)) { + DBUG_RETURN(true); } + */ - DBUG_RETURN(innobase_rename_columns_try(ha_alter_info, ctx, - old_table, trx, table_name)); + DBUG_RETURN(false); } /** Commit the changes to the data dictionary cache after a successful commit_try_norebuild() call. -@param ctx In-place ALTER TABLE context -@param table the TABLE before the ALTER -@param trx Data dictionary transaction object +@param ctx In-place ALTER TABLE context +@param table the TABLE before the ALTER +@param trx Data dictionary transaction object (will be started and committed) @return whether all replacements were found for dropped indexes */ inline MY_ATTRIBUTE((nonnull, warn_unused_result)) @@ -5474,26 +7805,12 @@ commit_cache_norebuild( DBUG_ASSERT(!ctx->need_rebuild()); - std::set drop_list; - std::set::const_iterator col_it; - - /* Check if the column, part of an index to be dropped is part of any - other index which is not being dropped. If it so, then set the ord_part - of the column to 0. */ - get_col_list_to_be_dropped(ctx, drop_list); - - for(col_it = drop_list.begin(); col_it != drop_list.end(); ++col_it) { - if (!check_col_exists_in_indexes(ctx->new_table, *col_it)) { - ctx->new_table->cols[*col_it].ord_part = 0; - } - } - for (ulint i = 0; i < ctx->num_to_add_index; i++) { dict_index_t* index = ctx->add_index[i]; DBUG_ASSERT(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE); - DBUG_ASSERT(*index->name == TEMP_INDEX_PREFIX); - index->name++; + DBUG_ASSERT(!index->is_committed()); + index->set_committed(true); } if (ctx->num_to_drop_index) { @@ -5508,7 +7825,7 @@ commit_cache_norebuild( for (ulint i = 0; i < ctx->num_to_drop_index; i++) { dict_index_t* index = ctx->drop_index[i]; - DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX); + DBUG_ASSERT(index->is_committed()); DBUG_ASSERT(index->table == ctx->new_table); DBUG_ASSERT(index->to_be_dropped); @@ -5532,7 +7849,7 @@ commit_cache_norebuild( for (ulint i = 0; i < ctx->num_to_drop_index; i++) { dict_index_t* index = ctx->drop_index[i]; - DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX); + DBUG_ASSERT(index->is_committed()); DBUG_ASSERT(index->table == ctx->new_table); if (index->type & DICT_FTS) { @@ -5549,17 +7866,25 @@ commit_cache_norebuild( trx_commit_for_mysql(trx); } + ctx->new_table->fts_doc_id_index + = ctx->new_table->fts + ? dict_table_get_index_on_name( + ctx->new_table, FTS_DOC_ID_INDEX_NAME) + : NULL; + DBUG_ASSERT((ctx->new_table->fts == NULL) + == (ctx->new_table->fts_doc_id_index == NULL)); + DBUG_RETURN(found); } /** Adjust the persistent statistics after non-rebuilding ALTER TABLE. Remove statistics for dropped indexes, add statistics for created indexes and rename statistics for renamed indexes. -@param ha_alter_info Data used during in-place alter -@param ctx In-place ALTER TABLE context -@param altered_table MySQL table that is being altered -@param table_name Table name in MySQL -@param thd MySQL connection +@param ha_alter_info Data used during in-place alter +@param ctx In-place ALTER TABLE context +@param altered_table MySQL table that is being altered +@param table_name Table name in MySQL +@param thd MySQL connection */ static void @@ -5580,7 +7905,19 @@ alter_stats_norebuild( DBUG_VOID_RETURN; } - /* TODO: This will not drop the (unused) statistics for + /* Delete corresponding rows from the stats table. We do this + in a separate transaction from trx, because lock waits are not + allowed in a data dictionary transaction. (Lock waits are possible + on the statistics table, because it is directly accessible by users, + not covered by the dict_operation_lock.) + + Because the data dictionary changes were already committed, orphaned + rows may be left in the statistics table if the system crashes. + + FIXME: each change to the statistics tables is being committed in a + separate transaction, meaning that the operation is not atomic + + FIXME: This will not drop the (unused) statistics for FTS_DOC_ID_INDEX if it was a hidden index, dropped together with the last renamining FULLTEXT index. */ for (i = 0; i < ha_alter_info->index_drop_count; i++) { @@ -5595,7 +7932,7 @@ alter_stats_norebuild( char errstr[1024]; if (dict_stats_drop_index( - ctx->new_table->name, key->name, + ctx->new_table->name.m_name, key->name, errstr, sizeof errstr) != DB_SUCCESS) { push_warning(thd, Sql_condition::WARN_LEVEL_WARN, @@ -5603,6 +7940,31 @@ alter_stats_norebuild( } } + /* JAN: TODO: MySQL 5.7 + for (i = 0; i < ha_alter_info->index_rename_count; i++) { + KEY_PAIR* pair = &ha_alter_info->index_rename_buffer[i]; + dberr_t err; + + err = dict_stats_rename_index(ctx->new_table, + pair->old_key->name, + pair->new_key->name); + + if (err != DB_SUCCESS) { + push_warning_printf( + thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ERROR_ON_RENAME, + "Error renaming an index of table '%s'" + " from '%s' to '%s' in InnoDB persistent" + " statistics storage: %s", + table_name, + pair->old_key->name, + pair->new_key->name, + ut_strerr(err)); + } + } + */ + for (i = 0; i < ctx->num_to_add_index; i++) { dict_index_t* index = ctx->add_index[i]; DBUG_ASSERT(index->table == ctx->new_table); @@ -5619,9 +7981,9 @@ alter_stats_norebuild( /** Adjust the persistent statistics after rebuilding ALTER TABLE. Remove statistics for dropped indexes, add statistics for created indexes and rename statistics for renamed indexes. -@param table InnoDB table that was rebuilt by ALTER TABLE -@param table_name Table name in MySQL -@param thd MySQL connection +@param table InnoDB table that was rebuilt by ALTER TABLE +@param table_name Table name in MySQL +@param thd MySQL connection */ static void @@ -5638,17 +8000,30 @@ alter_stats_rebuild( DBUG_VOID_RETURN; } - dberr_t ret; +#ifndef DBUG_OFF + bool ibd_file_missing_orig = false; +#endif /* DBUG_OFF */ + + DBUG_EXECUTE_IF( + "ib_rename_index_fail2", + ibd_file_missing_orig = table->ibd_file_missing; + table->ibd_file_missing = TRUE; + ); + + dberr_t ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT); - ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT); + DBUG_EXECUTE_IF( + "ib_rename_index_fail2", + table->ibd_file_missing = ibd_file_missing_orig; + ); if (ret != DB_SUCCESS) { push_warning_printf( thd, Sql_condition::WARN_LEVEL_WARN, ER_ALTER_INFO, - "Error updating stats for table '%s' " - "after table rebuild: %s", + "Error updating stats for table '%s'" + " after table rebuild: %s", table_name, ut_strerr(ret)); } @@ -5673,14 +8048,14 @@ during this operation will be the same as for inplace_alter_table() and thus might be higher than during prepare_inplace_alter_table(). (E.g concurrent writes were blocked during prepare, but might not be during commit). -@param altered_table TABLE object for new version of table. -@param ha_alter_info Structure describing changes to be done +@param altered_table TABLE object for new version of table. +@param ha_alter_info Structure describing changes to be done by ALTER TABLE and holding data used during in-place alter. -@param commit true => Commit, false => Rollback. -@retval true Failure -@retval false Success +@param commit true => Commit, false => Rollback. +@retval true Failure +@retval false Success */ -UNIV_INTERN + bool ha_innobase::commit_inplace_alter_table( /*====================================*/ @@ -5689,31 +8064,39 @@ ha_innobase::commit_inplace_alter_table( bool commit) { dberr_t error; - ha_innobase_inplace_ctx* ctx0 - = static_cast + ha_innobase_inplace_ctx*ctx0; + struct mtr_buf_copy_t logs; + + ctx0 = static_cast (ha_alter_info->handler_ctx); + #ifndef DBUG_OFF - uint crash_inject_count = 1; - uint crash_fail_inject_count = 1; - uint failure_inject_count = 1; -#endif + uint crash_inject_count = 1; + uint crash_fail_inject_count = 1; + uint failure_inject_count = 1; +#endif /* DBUG_OFF */ DBUG_ENTER("commit_inplace_alter_table"); DBUG_ASSERT(!srv_read_only_mode); - DBUG_ASSERT(!ctx0 || ctx0->prebuilt == prebuilt); - DBUG_ASSERT(!ctx0 || ctx0->old_table == prebuilt->table); + DBUG_ASSERT(!ctx0 || ctx0->prebuilt == m_prebuilt); + DBUG_ASSERT(!ctx0 || ctx0->old_table == m_prebuilt->table); DEBUG_SYNC_C("innodb_commit_inplace_alter_table_enter"); DEBUG_SYNC_C("innodb_commit_inplace_alter_table_wait"); + if (ctx0 != NULL && ctx0->m_stage != NULL) { + ctx0->m_stage->begin_phase_end(); + } + if (!commit) { /* A rollback is being requested. So far we may at most have created some indexes. If any indexes were to be dropped, they would actually be dropped in this method if commit=true. */ - DBUG_RETURN(rollback_inplace_alter_table( - ha_alter_info, table, prebuilt)); + const bool ret = rollback_inplace_alter_table( + ha_alter_info, table, m_prebuilt); + DBUG_RETURN(ret); } if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) { @@ -5731,13 +8114,13 @@ ha_innobase::commit_inplace_alter_table( if (ha_alter_info->group_commit_ctx) { ctx_array = ha_alter_info->group_commit_ctx; } else { - ctx_single[0] = ctx0; - ctx_single[1] = NULL; - ctx_array = ctx_single; + ctx_single[0] = ctx0; + ctx_single[1] = NULL; + ctx_array = ctx_single; } DBUG_ASSERT(ctx0 == ctx_array[0]); - ut_ad(prebuilt->table == ctx0->old_table); + ut_ad(m_prebuilt->table == ctx0->old_table); ha_alter_info->group_commit_ctx = NULL; /* Free the ctx->trx of other partitions, if any. We will only @@ -5755,12 +8138,12 @@ ha_innobase::commit_inplace_alter_table( } } - trx_start_if_not_started_xa(prebuilt->trx); + trx_start_if_not_started_xa(m_prebuilt->trx, true); for (inplace_alter_handler_ctx** pctx = ctx_array; *pctx; pctx++) { ha_innobase_inplace_ctx* ctx = static_cast(*pctx); - DBUG_ASSERT(ctx->prebuilt->trx == prebuilt->trx); + DBUG_ASSERT(ctx->prebuilt->trx == m_prebuilt->trx); /* Exclusively lock the table, to ensure that no other transaction is holding locks on the table while we @@ -5770,8 +8153,8 @@ ha_innobase::commit_inplace_alter_table( transactions collected during crash recovery could be holding InnoDB locks only, not MySQL locks. */ - error = row_merge_lock_table( - prebuilt->trx, ctx->old_table, LOCK_X); + dberr_t error = row_merge_lock_table( + m_prebuilt->trx, ctx->old_table, LOCK_X); if (error != DB_SUCCESS) { my_error_innodb( @@ -5780,7 +8163,7 @@ ha_innobase::commit_inplace_alter_table( } } - DEBUG_SYNC(user_thd, "innodb_alter_commit_after_lock_table"); + DEBUG_SYNC(m_user_thd, "innodb_alter_commit_after_lock_table"); const bool new_clustered = ctx0->need_rebuild(); trx_t* trx = ctx0->trx; @@ -5809,7 +8192,7 @@ ha_innobase::commit_inplace_alter_table( if (!trx) { DBUG_ASSERT(!new_clustered); - trx = innobase_trx_allocate(user_thd); + trx = innobase_trx_allocate(m_user_thd); } trx_start_for_ddl(trx, TRX_DICT_OP_INDEX); @@ -5817,6 +8200,8 @@ ha_innobase::commit_inplace_alter_table( or lock waits can happen in it during the data dictionary operation. */ row_mysql_lock_data_dictionary(trx); + ut_ad(log_append_on_checkpoint(NULL) == NULL); + /* Prevent the background statistics collection from accessing the tables. */ for (;;) { @@ -5861,7 +8246,7 @@ ha_innobase::commit_inplace_alter_table( if (ctx->need_rebuild()) { ctx->tmp_name = dict_mem_create_temporary_tablename( - ctx->heap, ctx->new_table->name, + ctx->heap, ctx->new_table->name.m_name, ctx->new_table->id); fail = commit_try_rebuild( @@ -5869,7 +8254,7 @@ ha_innobase::commit_inplace_alter_table( trx, table_share->table_name.str); } else { fail = commit_try_norebuild( - ha_alter_info, ctx, table, trx, + ha_alter_info, ctx, altered_table, table, trx, table_share->table_name.str); } DBUG_INJECT_CRASH("ib_commit_inplace_crash", @@ -5878,8 +8263,11 @@ ha_innobase::commit_inplace_alter_table( { /* Generate a dynamic dbug text. */ char buf[32]; - ut_snprintf(buf, sizeof buf, "ib_commit_inplace_fail_%u", + + ut_snprintf(buf, sizeof buf, + "ib_commit_inplace_fail_%u", failure_inject_count++); + DBUG_EXECUTE_IF(buf, my_error(ER_INTERNAL_ERROR, MYF(0), "Injected error!"); @@ -5907,17 +8295,15 @@ ha_innobase::commit_inplace_alter_table( DBUG_ASSERT(ctx->need_rebuild()); /* Check for any possible problems for any file operations that will be performed in - commit_cache_rebuild(), and if none, generate - the redo log for these operations. */ - error = fil_mtr_rename_log(ctx->old_table, - ctx->new_table, - ctx->tmp_name, &mtr); - if (error != DB_SUCCESS) { - /* Out of memory or a problem will occur - when renaming files. */ + commit_cache_rebuild(). */ + if (!fil_mtr_rename_log(ctx->old_table, + ctx->new_table, + ctx->tmp_name, &mtr)) { + /* Out of memory. */ + mtr.set_log_mode(MTR_LOG_NO_REDO); + mtr_commit(&mtr); + trx_rollback_for_mysql(trx); fail = true; - my_error_innodb(error, ctx->old_table->name, - ctx->old_table->flags); } DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); @@ -5933,20 +8319,33 @@ ha_innobase::commit_inplace_alter_table( DBUG_SUICIDE();); ut_ad(!trx->fts_trx); - if (fail) { - mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); - mtr_commit(&mtr); - trx_rollback_for_mysql(trx); - } else { - /* The following call commits the - mini-transaction, making the data dictionary - transaction committed at mtr.end_lsn. The - transaction becomes 'durable' by the time when - log_buffer_flush_to_disk() returns. In the - logical sense the commit in the file-based - data structures happens here. */ + /* The following call commits the + mini-transaction, making the data dictionary + transaction committed at mtr.end_lsn. The + transaction becomes 'durable' by the time when + log_buffer_flush_to_disk() returns. In the + logical sense the commit in the file-based + data structures happens here. */ + if (!fail) { ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE)); - ut_ad(trx->insert_undo || trx->update_undo); + ut_ad(trx_is_rseg_updated(trx)); + + if (mtr.get_log()->size() > 0) { + ut_ad(*mtr.get_log()->front()->begin() + == MLOG_FILE_RENAME2); + + /* Append the MLOG_FILE_RENAME2 + records on checkpoint, as a separate + mini-transaction before the one that + contains the MLOG_CHECKPOINT marker. */ + static const byte multi + = MLOG_MULTI_REC_END; + + mtr.get_log()->for_each_block(logs); + logs.m_buf.push(&multi, sizeof multi); + + log_append_on_checkpoint(&logs.m_buf); + } trx_commit_low(trx, &mtr); } @@ -5956,6 +8355,7 @@ ha_innobase::commit_inplace_alter_table( and the .frm files must be swapped manually by the administrator. No loss of data. */ DBUG_EXECUTE_IF("innodb_alter_commit_crash_after_commit", + log_make_checkpoint_at(LSN_MAX, TRUE); log_buffer_flush_to_disk(); DBUG_SUICIDE();); } @@ -5985,20 +8385,10 @@ ha_innobase::commit_inplace_alter_table( if (fail) { if (new_clustered) { - dict_table_close(ctx->new_table, - TRUE, FALSE); - -#if defined UNIV_DEBUG || defined UNIV_DDL_DEBUG - /* Nobody should have initialized the - stats of the newly created table - yet. When this is the case, we know - that it has not been added for - background stats gathering. */ - ut_a(!ctx->new_table->stat_initialized); -#endif /* UNIV_DEBUG || UNIV_DDL_DEBUG */ - trx_start_for_ddl(trx, TRX_DICT_OP_TABLE); - row_merge_drop_table(trx, ctx->new_table); + + dict_table_close_and_drop(trx, ctx->new_table); + trx_commit_for_mysql(trx); ctx->new_table = NULL; } else { @@ -6033,15 +8423,18 @@ ha_innobase::commit_inplace_alter_table( implemented yet. */ ctx->old_table->to_be_dropped = true; + DBUG_PRINT("to_be_dropped", + ("table: %s", ctx->old_table->name.m_name)); + /* Rename the tablespace files. */ commit_cache_rebuild(ctx); - error = innobase_update_foreign_cache(ctx, user_thd); + error = innobase_update_foreign_cache(ctx, m_user_thd); if (error != DB_SUCCESS) { goto foreign_fail; } } else { - error = innobase_update_foreign_cache(ctx, user_thd); + error = innobase_update_foreign_cache(ctx, m_user_thd); if (error != DB_SUCCESS) { foreign_fail: @@ -6051,34 +8444,36 @@ foreign_fail: kill and restart the server, but the *.frm file has not been replaced yet. */ - my_error(ER_CANNOT_ADD_FOREIGN, - MYF(0)); - sql_print_error( - "InnoDB: dict_load_foreigns()" - " returned %u for %s", - (unsigned) error, - thd_query_string(user_thd) - ->str); - ut_ad(0); + push_warning_printf( + m_user_thd, + Sql_condition::WARN_LEVEL_WARN, + ER_ALTER_INFO, + "InnoDB: Could not add foreign" + " key constraints."); } else { if (!commit_cache_norebuild( ctx, table, trx)) { - ut_a(!prebuilt->trx->check_foreigns); + ut_a(!m_prebuilt->trx->check_foreigns); } - innobase_rename_columns_cache( + innobase_rename_or_enlarge_columns_cache( ha_alter_info, table, ctx->new_table); + + rename_indexes_in_cache(ctx, ha_alter_info); } } DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); } + log_append_on_checkpoint(NULL); + /* Invalidate the index translation table. In partitioned - tables, there is one TABLE_SHARE (and also only one TABLE) - covering all partitions. */ - share->idx_trans_tbl.index_count = 0; + tables, there is no share. */ + if (m_share) { + m_share->idx_trans_tbl.index_count = 0; + } if (trx == ctx0->trx) { ctx0->trx = NULL; @@ -6110,8 +8505,31 @@ foreign_fail: DBUG_RETURN(true); } + if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) { + + trx_commit_for_mysql(m_prebuilt->trx); + + if (btr_search_enabled) { + btr_search_disable(false); + btr_search_enable(); + } + + char tb_name[FN_REFLEN]; + ut_strcpy(tb_name, m_prebuilt->table->name.m_name); + + tb_name[strlen(m_prebuilt->table->name.m_name)] = 0; + + dict_table_close(m_prebuilt->table, true, false); + dict_table_remove_from_cache(m_prebuilt->table); + m_prebuilt->table = dict_table_open_on_name( + tb_name, TRUE, TRUE, DICT_ERR_IGNORE_NONE); + row_mysql_unlock_data_dictionary(trx); + trx_free_for_mysql(trx); + MONITOR_ATOMIC_DEC(MONITOR_PENDING_ALTER_TABLE); + DBUG_RETURN(false); + } /* Release the table locks. */ - trx_commit_for_mysql(prebuilt->trx); + trx_commit_for_mysql(m_prebuilt->trx); DBUG_EXECUTE_IF("ib_ddl_crash_after_user_trx_commit", DBUG_SUICIDE();); @@ -6169,15 +8587,20 @@ foreign_fail: char errstr[1024]; - DBUG_ASSERT(0 == strcmp(ctx->old_table->name, + DBUG_ASSERT(0 == strcmp(ctx->old_table->name.m_name, ctx->tmp_name)); + DBUG_EXECUTE_IF( + "ib_rename_index_fail3", + DBUG_SET("+d,innodb_report_deadlock"); + ); + if (dict_stats_drop_table( - ctx->new_table->name, + ctx->new_table->name.m_name, errstr, sizeof(errstr)) != DB_SUCCESS) { push_warning_printf( - user_thd, + m_user_thd, Sql_condition::WARN_LEVEL_WARN, ER_ALTER_INFO, "Deleting persistent statistics" @@ -6187,13 +8610,28 @@ foreign_fail: errstr); } + DBUG_EXECUTE_IF( + "ib_rename_index_fail3", + DBUG_SET("-d,innodb_report_deadlock"); + ); + DBUG_EXECUTE_IF("ib_ddl_crash_before_commit", DBUG_SUICIDE();); - trx_t* const user_trx = prebuilt->trx; + ut_ad(m_prebuilt != ctx->prebuilt + || ctx == ctx0); + bool update_own_prebuilt = + (m_prebuilt == ctx->prebuilt); + trx_t* const user_trx = m_prebuilt->trx; row_prebuilt_free(ctx->prebuilt, TRUE); + if (ctx->new_table->n_v_cols + && ctx->old_table->vc_templ) { + refresh_share_vtempl( + altered_table, ctx->new_table, + ctx->old_table->vc_templ->share_name); + } /* Drop the copy of the old table, which was renamed to ctx->tmp_name at the atomic DDL transaction commit. If the system crashes @@ -6206,9 +8644,12 @@ foreign_fail: /* Rebuild the prebuilt object. */ ctx->prebuilt = row_create_prebuilt( ctx->new_table, altered_table->s->reclength); - trx_start_if_not_started(user_trx); + if (update_own_prebuilt) { + m_prebuilt = ctx->prebuilt; + } + trx_start_if_not_started(user_trx, true); user_trx->will_lock++; - prebuilt->trx = user_trx; + m_prebuilt->trx = user_trx; } DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); @@ -6231,7 +8672,7 @@ foreign_fail: alter_stats_rebuild( ctx->new_table, table->s->table_name.str, - user_thd); + m_user_thd); DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); } @@ -6245,39 +8686,85 @@ foreign_fail: alter_stats_norebuild( ha_alter_info, ctx, altered_table, - table->s->table_name.str, user_thd); + table->s->table_name.str, m_user_thd); DBUG_INJECT_CRASH("ib_commit_inplace_crash", crash_inject_count++); } } + /* We don't support compression for the system tablespace nor + the temporary tablespace. Only because they are shared tablespaces. + There is no other technical reason. */ + + innobase_parse_hint_from_comment( + m_user_thd, m_prebuilt->table, altered_table->s); + /* TODO: Also perform DROP TABLE and DROP INDEX after the MDL downgrade. */ #ifndef DBUG_OFF dict_index_t* clust_index = dict_table_get_first_index( - prebuilt->table); + ctx0->prebuilt->table); DBUG_ASSERT(!clust_index->online_log); DBUG_ASSERT(dict_index_get_online_status(clust_index) == ONLINE_INDEX_COMPLETE); - for (dict_index_t* index = dict_table_get_first_index( - prebuilt->table); + for (dict_index_t* index = clust_index; index; index = dict_table_get_next_index(index)) { DBUG_ASSERT(!index->to_be_dropped); } #endif /* DBUG_OFF */ - MONITOR_ATOMIC_DEC(MONITOR_PENDING_ALTER_TABLE); DBUG_RETURN(false); } + +/** Helper class for in-place alter, see handler.h */ +class ha_innopart_inplace_ctx : public inplace_alter_handler_ctx +{ +/* Only used locally in this file, so have everything public for +conveniance. */ +public: + /** Total number of partitions. */ + uint m_tot_parts; + /** Array of inplace contexts for all partitions. */ + inplace_alter_handler_ctx** ctx_array; + /** Array of prebuilt for all partitions. */ + row_prebuilt_t** prebuilt_array; + + ha_innopart_inplace_ctx(THD *thd, uint tot_parts) + : inplace_alter_handler_ctx(), + m_tot_parts(tot_parts), + ctx_array(), + prebuilt_array() + {} + + ~ha_innopart_inplace_ctx() + { + if (ctx_array) { + for (uint i = 0; i < m_tot_parts; i++) { + delete ctx_array[i]; + } + ut_free(ctx_array); + } + if (prebuilt_array) { + /* First entry is the original prebuilt! */ + for (uint i = 1; i < m_tot_parts; i++) { + /* Don't close the tables. */ + prebuilt_array[i]->table = NULL; + row_prebuilt_free(prebuilt_array[i], false); + } + ut_free(prebuilt_array); + } + } +}; + /** -@param thd - the session -@param start_value - the lower bound -@param max_value - the upper bound (inclusive) */ -UNIV_INTERN +@param thd the session +@param start_value the lower bound +@param max_value the upper bound (inclusive) */ + ib_sequence_t::ib_sequence_t( THD* thd, ulonglong start_value, @@ -6314,7 +8801,7 @@ ib_sequence_t::ib_sequence_t( /** Postfix increment @return the next value to insert */ -UNIV_INTERN + ulonglong ib_sequence_t::operator++(int) UNIV_NOTHROW { diff --git a/storage/innobase/handler/handler0alter_innopart.cc b/storage/innobase/handler/handler0alter_innopart.cc new file mode 100644 index 00000000000..0f2d5c7e576 --- /dev/null +++ b/storage/innobase/handler/handler0alter_innopart.cc @@ -0,0 +1,307 @@ +/* JAN: TODO: MySQL 5.7 InnoDB partitioning. */ + +/** Prepare inplace alter table. +Allows InnoDB to update internal structures with concurrent +writes blocked (provided that check_if_supported_inplace_alter() +did not return HA_ALTER_INPLACE_NO_LOCK). +This will be invoked before inplace_alter_table(). +@param[in] altered_table TABLE object for new version of table. +@param[in] ha_alter_info Structure describing changes to be done +by ALTER TABLE and holding data used during in-place alter. +@retval true Failure. +@retval false Success. */ +bool +ha_innopart::prepare_inplace_alter_table( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info) +{ + THD* thd; + ha_innopart_inplace_ctx* ctx_parts; + bool res = true; + DBUG_ENTER("ha_innopart::prepare_inplace_alter_table"); + DBUG_ASSERT(ha_alter_info->handler_ctx == NULL); + + thd = ha_thd(); + + /* Clean up all ins/upd nodes. */ + clear_ins_upd_nodes(); + /* Based on Sql_alloc class, return NULL for new on failure. */ + ctx_parts = new ha_innopart_inplace_ctx(thd, m_tot_parts); + if (!ctx_parts) { + DBUG_RETURN(HA_ALTER_ERROR); + } + + uint ctx_array_size = sizeof(inplace_alter_handler_ctx*) + * (m_tot_parts + 1); + ctx_parts->ctx_array = + static_cast( + ut_malloc(ctx_array_size, + mem_key_partitioning)); + if (!ctx_parts->ctx_array) { + DBUG_RETURN(HA_ALTER_ERROR); + } + + /* Set all to NULL, including the terminating one. */ + memset(ctx_parts->ctx_array, 0, ctx_array_size); + + ctx_parts->prebuilt_array = static_cast( + ut_malloc(sizeof(row_prebuilt_t*) + * m_tot_parts, + mem_key_partitioning)); + if (!ctx_parts->prebuilt_array) { + DBUG_RETURN(HA_ALTER_ERROR); + } + /* For the first partition use the current prebuilt. */ + ctx_parts->prebuilt_array[0] = m_prebuilt; + /* Create new prebuilt for the rest of the partitions. + It is needed for the current implementation of + ha_innobase::commit_inplace_alter_table(). */ + for (uint i = 1; i < m_tot_parts; i++) { + row_prebuilt_t* tmp_prebuilt; + tmp_prebuilt = row_create_prebuilt( + m_part_share->get_table_part(i), + table_share->reclength); + /* Use same trx as original prebuilt. */ + tmp_prebuilt->trx = m_prebuilt->trx; + ctx_parts->prebuilt_array[i] = tmp_prebuilt; + } + + for (uint i = 0; i < m_tot_parts; i++) { + m_prebuilt = ctx_parts->prebuilt_array[i]; + m_prebuilt_ptr = ctx_parts->prebuilt_array + i; + ha_alter_info->handler_ctx = ctx_parts->ctx_array[i]; + set_partition(i); + res = ha_innobase::prepare_inplace_alter_table(altered_table, + ha_alter_info); + update_partition(i); + ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx; + if (res) { + break; + } + } + m_prebuilt = ctx_parts->prebuilt_array[0]; + m_prebuilt_ptr = &m_prebuilt; + ha_alter_info->handler_ctx = ctx_parts; + ha_alter_info->group_commit_ctx = ctx_parts->ctx_array; + DBUG_RETURN(res); +} + +/** Inplace alter table. +Alter the table structure in-place with operations +specified using Alter_inplace_info. +The level of concurrency allowed during this operation depends +on the return value from check_if_supported_inplace_alter(). +@param[in] altered_table TABLE object for new version of table. +@param[in] ha_alter_info Structure describing changes to be done +by ALTER TABLE and holding data used during in-place alter. +@retval true Failure. +@retval false Success. */ +bool +ha_innopart::inplace_alter_table( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info) +{ + bool res = true; + ha_innopart_inplace_ctx* ctx_parts; + + ctx_parts = static_cast( + ha_alter_info->handler_ctx); + for (uint i = 0; i < m_tot_parts; i++) { + m_prebuilt = ctx_parts->prebuilt_array[i]; + ha_alter_info->handler_ctx = ctx_parts->ctx_array[i]; + set_partition(i); + res = ha_innobase::inplace_alter_table(altered_table, + ha_alter_info); + ut_ad(ctx_parts->ctx_array[i] == ha_alter_info->handler_ctx); + ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx; + if (res) { + break; + } + } + m_prebuilt = ctx_parts->prebuilt_array[0]; + ha_alter_info->handler_ctx = ctx_parts; + return(res); +} + +/** Commit or rollback inplace alter table. +Commit or rollback the changes made during +prepare_inplace_alter_table() and inplace_alter_table() inside +the storage engine. Note that the allowed level of concurrency +during this operation will be the same as for +inplace_alter_table() and thus might be higher than during +prepare_inplace_alter_table(). (E.g concurrent writes were +blocked during prepare, but might not be during commit). +@param[in] altered_table TABLE object for new version of table. +@param[in] ha_alter_info Structure describing changes to be done +by ALTER TABLE and holding data used during in-place alter. +@param[in] commit true => Commit, false => Rollback. +@retval true Failure. +@retval false Success. */ +bool +ha_innopart::commit_inplace_alter_table( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info, + bool commit) +{ + bool res = false; + ha_innopart_inplace_ctx* ctx_parts; + + ctx_parts = static_cast( + ha_alter_info->handler_ctx); + ut_ad(ctx_parts); + ut_ad(ctx_parts->prebuilt_array); + ut_ad(ctx_parts->prebuilt_array[0] == m_prebuilt); + if (commit) { + /* Commit is done through first partition (group commit). */ + ut_ad(ha_alter_info->group_commit_ctx == ctx_parts->ctx_array); + ha_alter_info->handler_ctx = ctx_parts->ctx_array[0]; + set_partition(0); + res = ha_innobase::commit_inplace_alter_table(altered_table, + ha_alter_info, + commit); + ut_ad(res || !ha_alter_info->group_commit_ctx); + goto end; + } + /* Rollback is done for each partition. */ + for (uint i = 0; i < m_tot_parts; i++) { + m_prebuilt = ctx_parts->prebuilt_array[i]; + ha_alter_info->handler_ctx = ctx_parts->ctx_array[i]; + set_partition(i); + if (ha_innobase::commit_inplace_alter_table(altered_table, + ha_alter_info, commit)) { + res = true; + } + ut_ad(ctx_parts->ctx_array[i] == ha_alter_info->handler_ctx); + ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx; + } +end: + /* Move the ownership of the new tables back to + the m_part_share. */ + ha_innobase_inplace_ctx* ctx; + for (uint i = 0; i < m_tot_parts; i++) { + /* TODO: Fix to only use one prebuilt (i.e. make inplace + alter partition aware instead of using multiple prebuilt + copies... */ + ctx = static_cast( + ctx_parts->ctx_array[i]); + if (ctx) { + m_part_share->set_table_part(i, ctx->prebuilt->table); + ctx->prebuilt->table = NULL; + ctx_parts->prebuilt_array[i] = ctx->prebuilt; + } + } + /* The above juggling of prebuilt must be reset here. */ + m_prebuilt = ctx_parts->prebuilt_array[0]; + m_prebuilt->table = m_part_share->get_table_part(0); + ha_alter_info->handler_ctx = ctx_parts; + return(res); +} + +/** Notify the storage engine that the table structure (.frm) has +been updated. + +ha_partition allows inplace operations that also upgrades the engine +if it supports partitioning natively. So if this is the case then +we will remove the .par file since it is not used with ha_innopart +(we use the internal data dictionary instead). */ +void +ha_innopart::notify_table_changed() +{ + char tmp_par_path[FN_REFLEN + 1]; + strxnmov(tmp_par_path, FN_REFLEN, table->s->normalized_path.str, + ".par", NullS); + + if (my_access(tmp_par_path, W_OK) == 0) + { + my_delete(tmp_par_path, MYF(0)); + } +} + +/** Check if supported inplace alter table. +@param[in] altered_table Altered MySQL table. +@param[in] ha_alter_info Information about inplace operations to do. +@return Lock level, not supported or error */ +enum_alter_inplace_result +ha_innopart::check_if_supported_inplace_alter( + TABLE* altered_table, + Alter_inplace_info* ha_alter_info) +{ + DBUG_ENTER("ha_innopart::check_if_supported_inplace_alter"); + DBUG_ASSERT(ha_alter_info->handler_ctx == NULL); + + /* Not supporting these for partitioned tables yet! */ + + /* FK not yet supported. */ + if (ha_alter_info->handler_flags + & (Alter_inplace_info::ADD_FOREIGN_KEY + | Alter_inplace_info::DROP_FOREIGN_KEY)) { + + ha_alter_info->unsupported_reason = innobase_get_err_msg( + ER_FOREIGN_KEY_ON_PARTITIONED); + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + /* FTS not yet supported either. */ + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ADD_INDEX)) { + + for (uint i = 0; i < ha_alter_info->index_add_count; i++) { + const KEY* key = + &ha_alter_info->key_info_buffer[ + ha_alter_info->index_add_buffer[i]]; + if (key->flags & HA_FULLTEXT) { + DBUG_ASSERT(!(key->flags & HA_KEYFLAG_MASK + & ~(HA_FULLTEXT + | HA_PACK_KEY + | HA_GENERATED_KEY + | HA_BINARY_PACK_KEY))); + ha_alter_info->unsupported_reason = + innobase_get_err_msg( + ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING); + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + } + } + /* We cannot allow INPLACE to change order of KEY partitioning fields! */ + if ((ha_alter_info->handler_flags + & Alter_inplace_info::ALTER_STORED_COLUMN_ORDER) + && !m_part_info->same_key_column_order( + &ha_alter_info->alter_info->create_list)) { + + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + + /* Cannot allow INPLACE for drop and create PRIMARY KEY if partition is + on Primary Key - PARTITION BY KEY() */ + if ((ha_alter_info->handler_flags + & (Alter_inplace_info::ADD_PK_INDEX + | Alter_inplace_info::DROP_PK_INDEX))) { + + /* Check partition by key(). */ + if ((m_part_info->part_type == HASH_PARTITION) + && m_part_info->list_of_part_fields + && m_part_info->part_field_list.is_empty()) { + + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + + /* Check sub-partition by key(). */ + if ((m_part_info->subpart_type == HASH_PARTITION) + && m_part_info->list_of_subpart_fields + && m_part_info->subpart_field_list.is_empty()) { + + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + } + } + + /* Check for PK and UNIQUE should already be done when creating the + new table metadata. + (fix_partition_info/check_primary_key+check_unique_key) */ + + set_partition(0); + enum_alter_inplace_result res = + ha_innobase::check_if_supported_inplace_alter(altered_table, + ha_alter_info); + + DBEUG_RETURN(res); +} + diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 0bb10f435af..3d7124cdd78 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -25,20 +25,16 @@ Created July 18, 2007 Vasil Dimov Modified Dec 29, 2014 Jan Lindström (Added sys_semaphore_waits) *******************************************************/ +#include "ha_prototypes.h" +#include +#include #include "univ.i" -#include #include +#include +#include -#include -#include -#include -#include -#include #include "i_s.h" -#include -#include - #include "btr0pcur.h" #include "btr0types.h" #include "dict0dict.h" @@ -48,7 +44,6 @@ Modified Dec 29, 2014 Jan Lindström (Added sys_semaphore_waits) #include "ibuf0ibuf.h" #include "dict0mem.h" #include "dict0types.h" -#include "ha_prototypes.h" #include "srv0start.h" #include "trx0i_s.h" #include "trx0trx.h" @@ -63,6 +58,9 @@ Modified Dec 29, 2014 Jan Lindström (Added sys_semaphore_waits) #include "sync0arr.h" #include "fil0fil.h" #include "fil0crypt.h" +#include "fsp0sysspace.h" +#include "ut0new.h" +#include "dict0crea.h" /** structure associates a name string with a file page type and/or buffer page state. */ @@ -72,17 +70,28 @@ struct buf_page_desc_t{ ulint type_value; /*!< Page type or page state */ }; -/** Change buffer B-tree page */ -#define I_S_PAGE_TYPE_IBUF (FIL_PAGE_TYPE_LAST + 1) - -/** Any states greater than I_S_PAGE_TYPE_IBUF would be treated as -unknown. */ -#define I_S_PAGE_TYPE_UNKNOWN (I_S_PAGE_TYPE_IBUF + 1) - /** We also define I_S_PAGE_TYPE_INDEX as the Index Page's position in i_s_page_type[] array */ #define I_S_PAGE_TYPE_INDEX 1 +/** Any unassigned FIL_PAGE_TYPE will be treated as unknown. */ +#define I_S_PAGE_TYPE_UNKNOWN FIL_PAGE_TYPE_UNKNOWN + +/** R-tree index page */ +#define I_S_PAGE_TYPE_RTREE (FIL_PAGE_TYPE_LAST + 1) + +/** Change buffer B-tree page */ +#define I_S_PAGE_TYPE_IBUF (FIL_PAGE_TYPE_LAST + 2) + +#define I_S_PAGE_TYPE_LAST I_S_PAGE_TYPE_IBUF + +#define I_S_PAGE_TYPE_BITS 4 + +/* Check if we can hold all page types */ +#if I_S_PAGE_TYPE_LAST >= 1 << I_S_PAGE_TYPE_BITS +# error i_s_page_type[] is too large +#endif + /** Name string for File Page Types */ static buf_page_desc_t i_s_page_type[] = { {"ALLOCATED", FIL_PAGE_TYPE_ALLOCATED}, @@ -98,16 +107,12 @@ static buf_page_desc_t i_s_page_type[] = { {"BLOB", FIL_PAGE_TYPE_BLOB}, {"COMPRESSED_BLOB", FIL_PAGE_TYPE_ZBLOB}, {"COMPRESSED_BLOB2", FIL_PAGE_TYPE_ZBLOB2}, + {"UNKNOWN", I_S_PAGE_TYPE_UNKNOWN}, + {"RTREE_INDEX", I_S_PAGE_TYPE_RTREE}, {"IBUF_INDEX", I_S_PAGE_TYPE_IBUF}, {"PAGE COMPRESSED", FIL_PAGE_PAGE_COMPRESSED}, - {"UNKNOWN", I_S_PAGE_TYPE_UNKNOWN} }; -/* Check if we can hold all page type in a 4 bit value */ -#if I_S_PAGE_TYPE_UNKNOWN > 1<<4 -# error "i_s_page_type[] is too large" -#endif - /** This structure defines information we will fetch from pages currently cached in the buffer pool. It will be used to populate table INFORMATION_SCHEMA.INNODB_BUFFER_PAGE */ @@ -132,7 +137,7 @@ struct buf_page_info_t{ unsigned zip_ssize:PAGE_ZIP_SSIZE_BITS; /*!< Compressed page size */ unsigned page_state:BUF_PAGE_STATE_BITS; /*!< Page state */ - unsigned page_type:4; /*!< Page type */ + unsigned page_type:I_S_PAGE_TYPE_BITS; /*!< Page type */ unsigned num_recs:UNIV_PAGE_SIZE_SHIFT_MAX-2; /*!< Number of records on Page */ unsigned data_size:UNIV_PAGE_SIZE_SHIFT_MAX; @@ -191,7 +196,7 @@ Common function to fill any of the dynamic tables: INFORMATION_SCHEMA.innodb_trx INFORMATION_SCHEMA.innodb_locks INFORMATION_SCHEMA.innodb_lock_waits -@return 0 on success */ +@return 0 on success */ static int trx_i_s_common_fill_table( @@ -202,7 +207,7 @@ trx_i_s_common_fill_table( /*******************************************************************//** Unbind a dynamic INFORMATION_SCHEMA table. -@return 0 on success */ +@return 0 on success */ static int i_s_common_deinit( @@ -211,7 +216,7 @@ i_s_common_deinit( /*******************************************************************//** Auxiliary function to store time_t value in MYSQL_TYPE_DATETIME field. -@return 0 on success */ +@return 0 on success */ static int field_store_time_t( @@ -237,12 +242,15 @@ field_store_time_t( memset(&my_time, 0, sizeof(my_time)); } + /* JAN: TODO: MySQL 5.7 + return(field->store_time(&my_time, MYSQL_TIMESTAMP_DATETIME)); + */ return(field->store_time(&my_time)); } /*******************************************************************//** Auxiliary function to store char* value in MYSQL_TYPE_STRING field. -@return 0 on success */ +@return 0 on success */ int field_store_string( /*===============*/ @@ -269,7 +277,7 @@ field_store_string( /*******************************************************************//** Store the name of an index in a MYSQL_TYPE_VARCHAR field. Handles the names of incomplete secondary indexes. -@return 0 on success */ +@return 0 on success */ static int field_store_index_name( @@ -287,7 +295,7 @@ field_store_index_name( /* Since TEMP_INDEX_PREFIX is not a valid UTF8, we need to convert it to something else. */ - if (index_name[0] == TEMP_INDEX_PREFIX) { + if (*index_name == *TEMP_INDEX_PREFIX_STR) { char buf[NAME_LEN + 1]; buf[0] = '?'; memcpy(buf + 1, index_name + 1, strlen(index_name)); @@ -308,7 +316,7 @@ field_store_index_name( /*******************************************************************//** Auxiliary function to store ulint value in MYSQL_TYPE_LONGLONG field. If the value is ULINT_UNDEFINED then the field it set to NULL. -@return 0 on success */ +@return 0 on success */ int field_store_ulint( /*==============*/ @@ -555,7 +563,7 @@ static ST_FIELD_INFO innodb_trx_fields_info[] = /*******************************************************************//** Read data from cache buffer and fill the INFORMATION_SCHEMA.innodb_trx table with it. -@return 0 on success */ +@return 0 on success */ static int fill_innodb_trx_from_cache( @@ -692,10 +700,6 @@ fill_innodb_trx_from_cache( OK(fields[IDX_TRX_ADAPTIVE_HASH_LATCHED]->store( static_cast(row->trx_has_search_latch))); - /* trx_adaptive_hash_timeout */ - OK(fields[IDX_TRX_ADAPTIVE_HASH_TIMEOUT]->store( - (longlong) row->trx_search_latch_timeout, true)); - /* trx_is_read_only*/ OK(fields[IDX_TRX_READ_ONLY]->store( (longlong) row->trx_is_read_only, true)); @@ -713,7 +717,7 @@ fill_innodb_trx_from_cache( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.innodb_trx -@return 0 on success */ +@return 0 on success */ static int innodb_trx_init( @@ -886,7 +890,7 @@ static ST_FIELD_INFO innodb_locks_fields_info[] = /*******************************************************************//** Read data from cache buffer and fill the INFORMATION_SCHEMA.innodb_locks table with it. -@return 0 on success */ +@return 0 on success */ static int fill_innodb_locks_from_cache( @@ -941,7 +945,7 @@ fill_innodb_locks_from_cache( bufend = innobase_convert_name(buf, sizeof(buf), row->lock_table, strlen(row->lock_table), - thd, TRUE); + thd); OK(fields[IDX_LOCK_TABLE]->store( buf, static_cast(bufend - buf), system_charset_info)); @@ -978,7 +982,7 @@ fill_innodb_locks_from_cache( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.innodb_locks -@return 0 on success */ +@return 0 on success */ static int innodb_locks_init( @@ -1091,7 +1095,7 @@ static ST_FIELD_INFO innodb_lock_waits_fields_info[] = /*******************************************************************//** Read data from cache buffer and fill the INFORMATION_SCHEMA.innodb_lock_waits table with it. -@return 0 on success */ +@return 0 on success */ static int fill_innodb_lock_waits_from_cache( @@ -1161,7 +1165,7 @@ fill_innodb_lock_waits_from_cache( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.innodb_lock_waits -@return 0 on success */ +@return 0 on success */ static int innodb_lock_waits_init( @@ -1234,7 +1238,7 @@ Common function to fill any of the dynamic tables: INFORMATION_SCHEMA.innodb_trx INFORMATION_SCHEMA.innodb_locks INFORMATION_SCHEMA.innodb_lock_waits -@return 0 on success */ +@return 0 on success */ static int trx_i_s_common_fill_table( @@ -1272,10 +1276,8 @@ trx_i_s_common_fill_table( if (trx_i_s_cache_is_truncated(cache)) { - /* XXX show warning to user if possible */ - fprintf(stderr, "Warning: data in %s truncated due to " - "memory limit of %d bytes\n", table_name, - TRX_I_S_MEM_LIMIT); + ib::warn() << "Data in " << table_name << " truncated due to" + " memory limit of " << TRX_I_S_MEM_LIMIT << " bytes"; } ret = 0; @@ -1307,14 +1309,11 @@ trx_i_s_common_fill_table( } } else { - - /* huh! what happened!? */ - fprintf(stderr, - "InnoDB: trx_i_s_common_fill_table() was " - "called to fill unknown table: %s.\n" - "This function only knows how to fill " - "innodb_trx, innodb_locks and " - "innodb_lock_waits tables.\n", table_name); + ib::error() << "trx_i_s_common_fill_table() was" + " called to fill unknown table: " << table_name << "." + " This function only knows how to fill" + " innodb_trx, innodb_locks and" + " innodb_lock_waits tables."; ret = 1; } @@ -1394,7 +1393,7 @@ static ST_FIELD_INFO i_s_cmp_fields_info[] = /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmp or innodb_cmp_reset. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmp_fill_low( @@ -1454,7 +1453,7 @@ i_s_cmp_fill_low( /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmp. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmp_fill( @@ -1468,7 +1467,7 @@ i_s_cmp_fill( /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmp_reset. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmp_reset_fill( @@ -1482,7 +1481,7 @@ i_s_cmp_reset_fill( /*******************************************************************//** Bind the dynamic table information_schema.innodb_cmp. -@return 0 on success */ +@return 0 on success */ static int i_s_cmp_init( @@ -1500,7 +1499,7 @@ i_s_cmp_init( /*******************************************************************//** Bind the dynamic table information_schema.innodb_cmp_reset. -@return 0 on success */ +@return 0 on success */ static int i_s_cmp_reset_init( @@ -1699,7 +1698,7 @@ static ST_FIELD_INFO i_s_cmp_per_index_fields_info[] = Fill the dynamic table information_schema.innodb_cmp_per_index or information_schema.innodb_cmp_per_index_reset. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmp_per_index_fill_low( @@ -1804,7 +1803,7 @@ i_s_cmp_per_index_fill_low( /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmp_per_index. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmp_per_index_fill( @@ -1818,7 +1817,7 @@ i_s_cmp_per_index_fill( /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmp_per_index_reset. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmp_per_index_reset_fill( @@ -1832,7 +1831,7 @@ i_s_cmp_per_index_reset_fill( /*******************************************************************//** Bind the dynamic table information_schema.innodb_cmp_per_index. -@return 0 on success */ +@return 0 on success */ static int i_s_cmp_per_index_init( @@ -1850,7 +1849,7 @@ i_s_cmp_per_index_init( /*******************************************************************//** Bind the dynamic table information_schema.innodb_cmp_per_index_reset. -@return 0 on success */ +@return 0 on success */ static int i_s_cmp_per_index_reset_init( @@ -2023,7 +2022,7 @@ static ST_FIELD_INFO i_s_cmpmem_fields_info[] = /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmpmem or innodb_cmpmem_reset. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmpmem_fill_low( @@ -2097,7 +2096,7 @@ i_s_cmpmem_fill_low( /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmpmem. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmpmem_fill( @@ -2111,7 +2110,7 @@ i_s_cmpmem_fill( /*******************************************************************//** Fill the dynamic table information_schema.innodb_cmpmem_reset. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_cmpmem_reset_fill( @@ -2125,7 +2124,7 @@ i_s_cmpmem_reset_fill( /*******************************************************************//** Bind the dynamic table information_schema.innodb_cmpmem. -@return 0 on success */ +@return 0 on success */ static int i_s_cmpmem_init( @@ -2143,7 +2142,7 @@ i_s_cmpmem_init( /*******************************************************************//** Bind the dynamic table information_schema.innodb_cmpmem_reset. -@return 0 on success */ +@return 0 on success */ static int i_s_cmpmem_reset_init( @@ -2419,7 +2418,7 @@ static ST_FIELD_INFO innodb_metrics_fields_info[] = /**********************************************************************//** Fill the information schema metrics table. -@return 0 on success */ +@return 0 on success */ static int i_s_metrics_fill( @@ -2692,7 +2691,7 @@ i_s_metrics_fill( /*******************************************************************//** Function to fill information schema metrics tables. -@return 0 on success */ +@return 0 on success */ static int i_s_metrics_fill_table( @@ -2714,7 +2713,7 @@ i_s_metrics_fill_table( } /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.innodb_metrics -@return 0 on success */ +@return 0 on success */ static int innodb_metrics_init( @@ -2798,7 +2797,7 @@ static ST_FIELD_INFO i_s_stopword_fields_info[] = /*******************************************************************//** Fill the dynamic table information_schema.innodb_ft_default_stopword. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_stopword_fill( @@ -2830,7 +2829,7 @@ i_s_stopword_fill( /*******************************************************************//** Bind the dynamic table information_schema.innodb_ft_default_stopword. -@return 0 on success */ +@return 0 on success */ static int i_s_stopword_init( @@ -2914,7 +2913,7 @@ static ST_FIELD_INFO i_s_fts_doc_fields_info[] = /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_DELETED or INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_deleted_generic_fill( @@ -2982,7 +2981,7 @@ i_s_fts_deleted_generic_fill( /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_DELETED -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_deleted_fill( @@ -2998,7 +2997,7 @@ i_s_fts_deleted_fill( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_FT_DELETED -@return 0 on success */ +@return 0 on success */ static int i_s_fts_deleted_init( @@ -3065,7 +3064,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_deleted = /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_being_deleted_fill( @@ -3081,7 +3080,7 @@ i_s_fts_being_deleted_fill( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_FT_BEING_DELETED -@return 0 on success */ +@return 0 on success */ static int i_s_fts_being_deleted_init( @@ -3210,7 +3209,7 @@ static ST_FIELD_INFO i_s_fts_index_fields_info[] = /*******************************************************************//** Go through the Doc Node and its ilist, fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED for one FTS index on the table. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_index_cache_fill_one_index( @@ -3234,7 +3233,7 @@ i_s_fts_index_cache_fill_one_index( index_charset = index_cache->charset; conv_str.f_len = system_charset_info->mbmaxlen * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); + conv_str.f_str = static_cast(ut_malloc_nokey(conv_str.f_len)); conv_str.f_n_char = 0; /* Go through each word in the index cache */ @@ -3320,7 +3319,7 @@ i_s_fts_index_cache_fill_one_index( } /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_index_cache_fill( @@ -3370,7 +3369,7 @@ i_s_fts_index_cache_fill( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE -@return 0 on success */ +@return 0 on success */ static int i_s_fts_index_cache_init( @@ -3438,7 +3437,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_index_cache = /*******************************************************************//** Go through a FTS index auxiliary table, fetch its rows and fill FTS word cache structure. -@return DB_SUCCESS on success, otherwise error code */ +@return DB_SUCCESS on success, otherwise error code */ static dberr_t i_s_fts_index_table_fill_selected( @@ -3455,6 +3454,7 @@ i_s_fts_index_table_fill_selected( que_t* graph; dberr_t error; fts_fetch_t fetch; + char table_name[MAX_FULL_NAME_LEN]; info = pars_info_create(); @@ -3475,14 +3475,16 @@ i_s_fts_index_table_fill_selected( FTS_INIT_INDEX_TABLE(&fts_table, fts_get_suffix(selected), FTS_INDEX_TABLE, index); + fts_get_table_name(&fts_table, table_name); + pars_info_bind_id(info, true, "table_name", table_name); graph = fts_parse_sql( &fts_table, info, "DECLARE FUNCTION my_func;\n" "DECLARE CURSOR c IS" - " SELECT word, doc_count, first_doc_id, last_doc_id, " - "ilist\n" - " FROM %s WHERE word >= :word;\n" + " SELECT word, doc_count, first_doc_id, last_doc_id," + " ilist\n" + " FROM $table_name WHERE word >= :word;\n" "BEGIN\n" "\n" "OPEN c;\n" @@ -3494,7 +3496,7 @@ i_s_fts_index_table_fill_selected( "END LOOP;\n" "CLOSE c;"); - for(;;) { + for (;;) { error = fts_eval_sql(trx, graph); if (error == DB_SUCCESS) { @@ -3504,17 +3506,14 @@ i_s_fts_index_table_fill_selected( } else { fts_sql_rollback(trx); - ut_print_timestamp(stderr); - if (error == DB_LOCK_WAIT_TIMEOUT) { - fprintf(stderr, " InnoDB: Warning: " - "lock wait timeout reading " - "FTS index. Retrying!\n"); + ib::warn() << "Lock wait timeout reading" + " FTS index. Retrying!"; trx->error_state = DB_SUCCESS; } else { - fprintf(stderr, " InnoDB: Error: %d " - "while reading FTS index.\n", error); + ib::error() << "Error occurred while reading" + " FTS index: " << ut_strerr(error); break; } } @@ -3642,18 +3641,16 @@ i_s_fts_index_table_fill_one_fetch( word_str)); OK(fields[I_S_FTS_FIRST_DOC_ID]->store( - (longlong) node->first_doc_id, - true)); + longlong(node->first_doc_id), true)); OK(fields[I_S_FTS_LAST_DOC_ID]->store( - (longlong) node->last_doc_id, - true)); + longlong(node->last_doc_id), true)); OK(fields[I_S_FTS_DOC_COUNT]->store( static_cast(node->doc_count))); OK(fields[I_S_FTS_ILIST_DOC_ID]->store( - (longlong) doc_id, true)); + longlong(doc_id), true)); OK(fields[I_S_FTS_ILIST_DOC_POS]->store( static_cast(pos))); @@ -3677,7 +3674,7 @@ i_s_fts_index_table_fill_one_fetch( /*******************************************************************//** Go through a FTS index and its auxiliary tables, fetch rows in each table and fill INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_index_table_fill_one_index( @@ -3688,7 +3685,6 @@ i_s_fts_index_table_fill_one_index( { ib_vector_t* words; mem_heap_t* heap; - fts_string_t word; CHARSET_INFO* index_charset; fts_string_t conv_str; dberr_t error; @@ -3702,21 +3698,21 @@ i_s_fts_index_table_fill_one_index( words = ib_vector_create(ib_heap_allocator_create(heap), sizeof(fts_word_t), 256); - word.f_str = NULL; - word.f_len = 0; - word.f_n_char = 0; - index_charset = fts_index_get_charset(index); conv_str.f_len = system_charset_info->mbmaxlen * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); + conv_str.f_str = static_cast(ut_malloc_nokey(conv_str.f_len)); conv_str.f_n_char = 0; /* Iterate through each auxiliary table as described in fts_index_selector */ - for (ulint selected = 0; fts_index_selector[selected].value; - selected++) { - bool has_more = false; + for (ulint selected = 0; selected < FTS_NUM_AUX_INDEX; selected++) { + fts_string_t word; + bool has_more = false; + + word.f_str = NULL; + word.f_len = 0; + word.f_n_char = 0; do { /* Fetch from index */ @@ -3739,7 +3735,7 @@ i_s_fts_index_table_fill_one_index( /* Prepare start point for next fetch */ last_word = static_cast(ib_vector_last(words)); ut_ad(last_word != NULL); - fts_utf8_string_dup(&word, &last_word->text, heap); + fts_string_dup(&word, &last_word->text, heap); } /* Fill into tables */ @@ -3761,7 +3757,7 @@ func_exit: } /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_index_table_fill( @@ -3805,7 +3801,7 @@ i_s_fts_index_table_fill( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE -@return 0 on success */ +@return 0 on success */ static int i_s_fts_index_table_init( @@ -3904,7 +3900,7 @@ static const char* fts_config_key[] = { /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_CONFIG -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_fts_config_fill( @@ -4003,7 +3999,7 @@ i_s_fts_config_fill( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_FT_CONFIG -@return 0 on success */ +@return 0 on success */ static int i_s_fts_config_init( @@ -4068,6 +4064,287 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_config = STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_GAMMA), }; +/* Fields of the dynamic table INNODB_TEMP_TABLE_INFO. */ +static ST_FIELD_INFO i_s_innodb_temp_table_info_fields_info[] = +{ +#define IDX_TEMP_TABLE_ID 0 + {STRUCT_FLD(field_name, "TABLE_ID"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define IDX_TEMP_TABLE_NAME 1 + {STRUCT_FLD(field_name, "NAME"), + STRUCT_FLD(field_length, MAX_TABLE_UTF8_LEN), + STRUCT_FLD(field_type, MYSQL_TYPE_STRING), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define IDX_TEMP_TABLE_N_COLS 2 + {STRUCT_FLD(field_name, "N_COLS"), + STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define IDX_TEMP_TABLE_SPACE_ID 3 + {STRUCT_FLD(field_name, "SPACE"), + STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define IDX_TEMP_TABLE_PTT 4 + {STRUCT_FLD(field_name, "PER_TABLE_TABLESPACE"), + STRUCT_FLD(field_length, 64), + STRUCT_FLD(field_type, MYSQL_TYPE_STRING), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define IDX_TEMP_TABLE_IS_COMPRESSED 5 + {STRUCT_FLD(field_name, "IS_COMPRESSED"), + STRUCT_FLD(field_length, 64), + STRUCT_FLD(field_type, MYSQL_TYPE_STRING), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + END_OF_ST_FIELD_INFO +}; + +struct temp_table_info_t{ + table_id_t m_table_id; + char m_table_name[MAX_TABLE_UTF8_LEN]; + unsigned m_n_cols; + unsigned m_space_id; + char m_per_table_tablespace[64]; + char m_is_compressed[64]; +}; + +typedef std::vector > + temp_table_info_cache_t; + +/*******************************************************************//** +Fill Information Schema table INNODB_TEMP_TABLE_INFO for a particular +temp-table +@return 0 on success, 1 on failure */ +static +int +i_s_innodb_temp_table_info_fill( +/*=============================*/ + THD* thd, /*!< in: thread */ + TABLE_LIST* tables, /*!< in/out: tables + to fill */ + const temp_table_info_t* info) /*!< in: temp-table + information */ +{ + TABLE* table; + Field** fields; + + DBUG_ENTER("i_s_innodb_temp_table_info_fill"); + + table = tables->table; + + fields = table->field; + + OK(fields[IDX_TEMP_TABLE_ID]->store((double) info->m_table_id)); + + OK(field_store_string(fields[IDX_TEMP_TABLE_NAME], info->m_table_name)); + + OK(fields[IDX_TEMP_TABLE_N_COLS]->store(info->m_n_cols)); + + OK(fields[IDX_TEMP_TABLE_SPACE_ID]->store(info->m_space_id)); + + OK(field_store_string( + fields[IDX_TEMP_TABLE_PTT], info->m_per_table_tablespace)); + + OK(field_store_string( + fields[IDX_TEMP_TABLE_IS_COMPRESSED], info->m_is_compressed)); + + DBUG_RETURN(schema_table_store_record(thd, table)); +} + +/*******************************************************************//** +Populate current table information to cache */ +static +void +innodb_temp_table_populate_cache( +/*=============================*/ + const dict_table_t* table, /*! in: table */ + temp_table_info_t* cache) /*! in/out: populate data in this + cache */ +{ + cache->m_table_id = table->id; + + char db_utf8[MAX_DB_UTF8_LEN]; + char table_utf8[MAX_TABLE_UTF8_LEN]; + + dict_fs2utf8(table->name.m_name, + db_utf8, sizeof(db_utf8), + table_utf8, sizeof(table_utf8)); + strcpy(cache->m_table_name, table_utf8); + + cache->m_n_cols = table->n_cols; + + cache->m_space_id = table->space; + + if (fsp_is_system_temporary(table->space)) { + strcpy(cache->m_per_table_tablespace, "FALSE"); + } else { + strcpy(cache->m_per_table_tablespace, "TRUE"); + } + + if (dict_table_page_size(table).is_compressed()) { + strcpy(cache->m_is_compressed, "TRUE"); + } else { + strcpy(cache->m_is_compressed, "FALSE"); + } +} + +/*******************************************************************//** +This function will iterate over all available table and will fill +stats for temp-tables to INNODB_TEMP_TABLE_INFO. +@return 0 on success, 1 on failure */ +static +int +i_s_innodb_temp_table_info_fill_table( +/*===================================*/ + THD* thd, /*!< in: thread */ + TABLE_LIST* tables, /*!< in/out: tables to fill */ + Item* ) /*!< in: condition (ignored) */ +{ + int status = 0; + dict_table_t* table = NULL; + + DBUG_ENTER("i_s_innodb_temp_table_info_fill_table"); + + /* Only allow the PROCESS privilege holder to access the stats */ + if (check_global_access(thd, PROCESS_ACL)) { + DBUG_RETURN(0); + } + + /* First populate all temp-table info by acquiring dict_sys->mutex. + Note: Scan is being done on NON-LRU list which mainly has system + table entries and temp-table entries. This means 2 things: list + is smaller so processing would be faster and most of the data + is relevant */ + temp_table_info_cache_t all_temp_info_cache; + all_temp_info_cache.reserve(UT_LIST_GET_LEN(dict_sys->table_non_LRU)); + + mutex_enter(&dict_sys->mutex); + for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); + table != NULL; + table = UT_LIST_GET_NEXT(table_LRU, table)) { + + if (!dict_table_is_temporary(table)) { + continue; + } + + temp_table_info_t current_temp_table_info; + + innodb_temp_table_populate_cache( + table, ¤t_temp_table_info); + + all_temp_info_cache.push_back(current_temp_table_info); + } + mutex_exit(&dict_sys->mutex); + + /* Now populate the info to MySQL table */ + temp_table_info_cache_t::const_iterator end = all_temp_info_cache.end(); + for (temp_table_info_cache_t::const_iterator it + = all_temp_info_cache.begin(); + it != end; + it++) { + status = i_s_innodb_temp_table_info_fill(thd, tables, &(*it)); + if (status) { + break; + } + } + + DBUG_RETURN(status); +} + +/*******************************************************************//** +Bind the dynamic table INFORMATION_SCHEMA.INNODB_TEMP_TABLE_INFO. +@return 0 on success, 1 on failure */ +static +int +i_s_innodb_temp_table_info_init( +/*=============================*/ + void* p) /*!< in/out: table schema object */ +{ + ST_SCHEMA_TABLE* schema; + + DBUG_ENTER("i_s_innodb_temp_table_info_init"); + + schema = reinterpret_cast(p); + + schema->fields_info = i_s_innodb_temp_table_info_fields_info; + schema->fill_table = i_s_innodb_temp_table_info_fill_table; + + DBUG_RETURN(0); +} + +struct st_maria_plugin i_s_innodb_temp_table_info = +{ + /* the plugin type (a MYSQL_XXX_PLUGIN value) */ + /* int */ + STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN), + + /* pointer to type-specific plugin descriptor */ + /* void* */ + STRUCT_FLD(info, &i_s_info), + + /* plugin name */ + /* const char* */ + STRUCT_FLD(name, "INNODB_TEMP_TABLE_INFO"), + + /* plugin author (for SHOW PLUGINS) */ + /* const char* */ + STRUCT_FLD(author, plugin_author), + + /* general descriptive text (for SHOW PLUGINS) */ + /* const char* */ + STRUCT_FLD(descr, "InnoDB Temp Table Stats"), + + /* the plugin license (PLUGIN_LICENSE_XXX) */ + /* int */ + STRUCT_FLD(license, PLUGIN_LICENSE_GPL), + + /* the function to invoke when plugin is loaded */ + /* int (*)(void*); */ + STRUCT_FLD(init, i_s_innodb_temp_table_info_init), + + /* the function to invoke when plugin is unloaded */ + /* int (*)(void*); */ + STRUCT_FLD(deinit, i_s_common_deinit), + + /* plugin version (for SHOW PLUGINS) */ + /* unsigned int */ + STRUCT_FLD(version, INNODB_VERSION_SHORT), + + /* struct st_mysql_show_var* */ + STRUCT_FLD(status_vars, NULL), + + /* struct st_mysql_sys_var** */ + STRUCT_FLD(system_vars, NULL), + + /* Maria extension */ + STRUCT_FLD(version_info, INNODB_VERSION_STR), + STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_BETA), +}; + /* Fields of the dynamic table INNODB_BUFFER_POOL_STATS. */ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] = { @@ -4365,7 +4642,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] = /*******************************************************************//** Fill Information Schema table INNODB_BUFFER_POOL_STATS for a particular buffer pool -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_stats_fill( @@ -4502,7 +4779,7 @@ i_s_innodb_stats_fill( /*******************************************************************//** This is the function that loops through each buffer pool and fetch buffer pool stats to information schema table: I_S_INNODB_BUFFER_POOL_STATS -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buffer_stats_fill_table( @@ -4522,7 +4799,7 @@ i_s_innodb_buffer_stats_fill_table( DBUG_RETURN(0); } - pool_info = (buf_pool_info_t*) mem_zalloc( + pool_info = (buf_pool_info_t*) ut_zalloc_nokey( srv_buf_pool_instances * sizeof *pool_info); /* Walk through each buffer pool */ @@ -4542,14 +4819,14 @@ i_s_innodb_buffer_stats_fill_table( } } - mem_free(pool_info); + ut_free(pool_info); DBUG_RETURN(status); } /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_BUFFER_POOL_STATS. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buffer_pool_stats_init( @@ -4806,7 +5083,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_page_fields_info[] = /*******************************************************************//** Fill Information Schema table INNODB_BUFFER_PAGE with information cached in the buf_page_info_t array -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buffer_page_fill( @@ -4897,7 +5174,7 @@ i_s_innodb_buffer_page_fill( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), - thd, TRUE); + thd); OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store( table_name, @@ -5002,14 +5279,14 @@ i_s_innodb_set_page_type( ulint page_type, /*!< in: page type */ const byte* frame) /*!< in: buffer frame */ { - if (page_type == FIL_PAGE_INDEX) { + if (fil_page_type_is_index(page_type)) { const page_t* page = (const page_t*) frame; page_info->index_id = btr_page_get_index_id(page); - /* FIL_PAGE_INDEX is a bit special, its value - is defined as 17855, so we cannot use FIL_PAGE_INDEX - to index into i_s_page_type[] array, its array index + /* FIL_PAGE_INDEX and FIL_PAGE_RTREE are a bit special, + their values are defined as 17855 and 17854, so we cannot + use them to index into i_s_page_type[] array, its array index in the i_s_page_type[] array is I_S_PAGE_TYPE_INDEX (1) for index pages or I_S_PAGE_TYPE_IBUF for change buffer index pages */ @@ -5017,6 +5294,8 @@ i_s_innodb_set_page_type( == static_cast(DICT_IBUF_ID_MIN + IBUF_SPACE_ID)) { page_info->page_type = I_S_PAGE_TYPE_IBUF; + } else if (page_type == FIL_PAGE_RTREE) { + page_info->page_type = I_S_PAGE_TYPE_RTREE; } else { page_info->page_type = I_S_PAGE_TYPE_INDEX; } @@ -5078,9 +5357,9 @@ i_s_innodb_buffer_page_get_info( const byte* frame; ulint page_type; - page_info->space_id = buf_page_get_space(bpage); + page_info->space_id = bpage->id.space(); - page_info->page_num = buf_page_get_page_no(bpage); + page_info->page_num = bpage->id.page_no(); page_info->flush_type = bpage->flush_type; @@ -5132,7 +5411,7 @@ i_s_innodb_buffer_page_get_info( /*******************************************************************//** This is the function that goes through each block of the buffer pool and fetch information to information schema tables: INNODB_BUFFER_PAGE. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_fill_buffer_pool( @@ -5146,13 +5425,13 @@ i_s_innodb_fill_buffer_pool( mem_heap_t* heap; DBUG_ENTER("i_s_innodb_fill_buffer_pool"); - RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); heap = mem_heap_create(10000); /* Go through each chunk of buffer pool. Currently, we only have one single chunk for each buffer pool */ - for (ulint n = 0; n < buf_pool->n_chunks; n++) { + for (ulint n = 0; + n < ut_min(buf_pool->n_chunks, buf_pool->n_chunks_new); n++) { const buf_block_t* block; ulint n_blocks; buf_page_info_t* info_buffer; @@ -5170,7 +5449,7 @@ i_s_innodb_fill_buffer_pool( /* we cache maximum MAX_BUF_INFO_CACHED number of buffer page info */ num_to_process = ut_min(chunk_size, - MAX_BUF_INFO_CACHED); + (ulint)MAX_BUF_INFO_CACHED); mem_size = num_to_process * sizeof(buf_page_info_t); @@ -5222,7 +5501,7 @@ i_s_innodb_fill_buffer_pool( /*******************************************************************//** Fill page information for pages in InnoDB buffer pool to the dynamic table INFORMATION_SCHEMA.INNODB_BUFFER_PAGE -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buffer_page_fill_table( @@ -5235,6 +5514,8 @@ i_s_innodb_buffer_page_fill_table( DBUG_ENTER("i_s_innodb_buffer_page_fill_table"); + RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); + /* deny access to user without PROCESS privilege */ if (check_global_access(thd, PROCESS_ACL)) { DBUG_RETURN(0); @@ -5261,7 +5542,7 @@ i_s_innodb_buffer_page_fill_table( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_BUFFER_PAGE. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buffer_page_init( @@ -5517,7 +5798,7 @@ static ST_FIELD_INFO i_s_innodb_buf_page_lru_fields_info[] = /*******************************************************************//** Fill Information Schema table INNODB_BUFFER_PAGE_LRU with information cached in the buf_page_info_t array -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buf_page_lru_fill( @@ -5611,7 +5892,7 @@ i_s_innodb_buf_page_lru_fill( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), - thd, TRUE); + thd); OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store( table_name, @@ -5699,7 +5980,7 @@ i_s_innodb_buf_page_lru_fill( /*******************************************************************//** This is the function that goes through buffer pool's LRU list and fetch information to INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_fill_buffer_lru( @@ -5716,7 +5997,6 @@ i_s_innodb_fill_buffer_lru( ulint lru_len; DBUG_ENTER("i_s_innodb_fill_buffer_lru"); - RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); /* Obtain buf_pool mutex before allocate info_buffer, since UT_LIST_GET_LEN(buf_pool->LRU) could change */ @@ -5727,6 +6007,10 @@ i_s_innodb_fill_buffer_lru( /* Print error message if malloc fail */ info_buffer = (buf_page_info_t*) my_malloc( lru_len * sizeof *info_buffer, MYF(MY_WME)); + /* JAN: TODO: MySQL 5.7 PSI + info_buffer = (buf_page_info_t*) my_malloc(PSI_INSTRUMENT_ME, + lru_len * sizeof *info_buffer, MYF(MY_WME)); + */ if (!info_buffer) { status = 1; @@ -5769,7 +6053,7 @@ exit: /*******************************************************************//** Fill page information for pages in InnoDB buffer pool to the dynamic table INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buf_page_lru_fill_table( @@ -5782,6 +6066,8 @@ i_s_innodb_buf_page_lru_fill_table( DBUG_ENTER("i_s_innodb_buf_page_lru_fill_table"); + RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); + /* deny access to any users that do not hold PROCESS_ACL */ if (check_global_access(thd, PROCESS_ACL)) { DBUG_RETURN(0); @@ -5808,7 +6094,7 @@ i_s_innodb_buf_page_lru_fill_table( /*******************************************************************//** Bind the dynamic table INFORMATION_SCHEMA.INNODB_BUFFER_PAGE_LRU. -@return 0 on success, 1 on failure */ +@return 0 on success, 1 on failure */ static int i_s_innodb_buffer_page_lru_init( @@ -5878,7 +6164,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_buffer_page_lru = /*******************************************************************//** Unbind a dynamic INFORMATION_SCHEMA table. -@return 0 on success */ +@return 0 on success */ static int i_s_common_deinit( @@ -5968,13 +6254,22 @@ static ST_FIELD_INFO innodb_sys_tables_fields_info[] = STRUCT_FLD(old_name, ""), STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, +#define SYS_TABLES_SPACE_TYPE 8 + {STRUCT_FLD(field_name, "SPACE_TYPE"), + STRUCT_FLD(field_length, 10), + STRUCT_FLD(field_type, MYSQL_TYPE_STRING), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + END_OF_ST_FIELD_INFO }; /**********************************************************************//** Populate information_schema.innodb_sys_tables table with information from SYS_TABLES. -@return 0 on success */ +@return 0 on success */ static int i_s_dict_fill_sys_tables( @@ -5983,31 +6278,41 @@ i_s_dict_fill_sys_tables( dict_table_t* table, /*!< in: table */ TABLE* table_to_fill) /*!< in/out: fill this table */ { - Field** fields; - ulint compact = DICT_TF_GET_COMPACT(table->flags); - ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(table->flags); - ulint zip_size = dict_tf_get_zip_size(table->flags); - const char* file_format; - const char* row_format; + Field** fields; + ulint compact = DICT_TF_GET_COMPACT(table->flags); + ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS( + table->flags); + const page_size_t& page_size = dict_tf_get_page_size(table->flags); + const char* file_format; + const char* row_format; + const char* space_type; file_format = trx_sys_file_format_id_to_name(atomic_blobs); if (!compact) { row_format = "Redundant"; } else if (!atomic_blobs) { row_format = "Compact"; - } else if DICT_TF_GET_ZIP_SSIZE(table->flags) { + } else if (DICT_TF_GET_ZIP_SSIZE(table->flags)) { row_format = "Compressed"; } else { row_format = "Dynamic"; } + if (is_system_tablespace(table->space)) { + space_type = "System"; + } else if (DICT_TF_HAS_SHARED_SPACE(table->flags)) { + space_type = "General"; + } else { + space_type = "Single"; + } + DBUG_ENTER("i_s_dict_fill_sys_tables"); fields = table_to_fill->field; OK(fields[SYS_TABLES_ID]->store(longlong(table->id), TRUE)); - OK(field_store_string(fields[SYS_TABLES_NAME], table->name)); + OK(field_store_string(fields[SYS_TABLES_NAME], table->name.m_name)); OK(fields[SYS_TABLES_FLAG]->store(table->flags)); @@ -6019,8 +6324,12 @@ i_s_dict_fill_sys_tables( OK(field_store_string(fields[SYS_TABLES_ROW_FORMAT], row_format)); - OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store( - static_cast(zip_size))); + OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(static_cast( + page_size.is_compressed() + ? page_size.physical() + : 0))); + + OK(field_store_string(fields[SYS_TABLES_SPACE_TYPE], space_type)); OK(schema_table_store_record(thd, table_to_fill)); @@ -6052,7 +6361,7 @@ i_s_sys_tables_fill_table( } heap = mem_heap_create(1000); - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); mtr_start(&mtr); rec = dict_startscan_system(&pcur, &mtr, SYS_TABLES); @@ -6258,17 +6567,20 @@ static ST_FIELD_INFO innodb_sys_tablestats_fields_info[] = END_OF_ST_FIELD_INFO }; -/**********************************************************************//** -Populate information_schema.innodb_sys_tablestats table with information +/** Populate information_schema.innodb_sys_tablestats table with information from SYS_TABLES. -@return 0 on success */ +@param[in] thd thread ID +@param[in,out] table table +@param[in] ref_count table reference count +@param[in,out] table_to_fill fill this table +@return 0 on success */ static int i_s_dict_fill_sys_tablestats( -/*=========================*/ - THD* thd, /*!< in: thread */ - dict_table_t* table, /*!< in: table */ - TABLE* table_to_fill) /*!< in/out: fill this table */ + THD* thd, + dict_table_t* table, + ulint ref_count, + TABLE* table_to_fill) { Field** fields; @@ -6278,7 +6590,8 @@ i_s_dict_fill_sys_tablestats( OK(fields[SYS_TABLESTATS_ID]->store(longlong(table->id), TRUE)); - OK(field_store_string(fields[SYS_TABLESTATS_NAME], table->name)); + OK(field_store_string(fields[SYS_TABLESTATS_NAME], + table->name.m_name)); dict_table_stats_lock(table, RW_S_LATCH); @@ -6315,7 +6628,7 @@ i_s_dict_fill_sys_tablestats( OK(fields[SYS_TABLESTATS_AUTONINC]->store(table->autoinc, TRUE)); OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store( - static_cast(table->n_ref_count))); + static_cast(ref_count))); OK(schema_table_store_record(thd, table_to_fill)); @@ -6357,6 +6670,7 @@ i_s_sys_tables_fill_table_stats( while (rec) { const char* err_msg; dict_table_t* table_rec; + ulint ref_count; /* Fetch the dict_table_t structure corresponding to this SYS_TABLES record */ @@ -6364,10 +6678,11 @@ i_s_sys_tables_fill_table_stats( heap, rec, &table_rec, DICT_TABLE_LOAD_FROM_CACHE, &mtr); + ref_count = table_rec->get_ref_count(); mutex_exit(&dict_sys->mutex); if (!err_msg) { - i_s_dict_fill_sys_tablestats(thd, table_rec, + i_s_dict_fill_sys_tablestats(thd, table_rec, ref_count, tables->table); } else { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, @@ -6527,6 +6842,15 @@ static ST_FIELD_INFO innodb_sysindex_fields_info[] = STRUCT_FLD(old_name, ""), STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, +#define SYS_INDEX_MERGE_THRESHOLD 7 + {STRUCT_FLD(field_name, "MERGE_THRESHOLD"), + STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, 0), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + END_OF_ST_FIELD_INFO }; @@ -6569,6 +6893,8 @@ i_s_dict_fill_sys_indexes( OK(fields[SYS_INDEX_SPACE]->store(index->space)); + OK(fields[SYS_INDEX_MERGE_THRESHOLD]->store(index->merge_threshold)); + OK(schema_table_store_record(thd, table_to_fill)); DBUG_RETURN(0); @@ -6786,6 +7112,8 @@ i_s_dict_fill_sys_columns( const char* col_name, /*!< in: column name */ dict_col_t* column, /*!< in: dict_col_t struct holding more column information */ + ulint nth_v_col, /*!< in: virtual column, its + sequence number (nth virtual col) */ TABLE* table_to_fill) /*!< in/out: fill this table */ { Field** fields; @@ -6794,11 +7122,16 @@ i_s_dict_fill_sys_columns( fields = table_to_fill->field; - OK(fields[SYS_COLUMN_TABLE_ID]->store(longlong(table_id), TRUE)); + OK(fields[SYS_COLUMN_TABLE_ID]->store((longlong) table_id, TRUE)); OK(field_store_string(fields[SYS_COLUMN_NAME], col_name)); - OK(fields[SYS_COLUMN_POSITION]->store(column->ind)); + if (dict_col_is_virtual(column)) { + ulint pos = dict_create_v_col_pos(nth_v_col, column->ind); + OK(fields[SYS_COLUMN_POSITION]->store(pos)); + } else { + OK(fields[SYS_COLUMN_POSITION]->store(column->ind)); + } OK(fields[SYS_COLUMN_MTYPE]->store(column->mtype)); @@ -6846,18 +7179,20 @@ i_s_sys_columns_fill_table( const char* err_msg; dict_col_t column_rec; table_id_t table_id; + ulint nth_v_col; /* populate a dict_col_t structure with information from a SYS_COLUMNS row */ err_msg = dict_process_sys_columns_rec(heap, rec, &column_rec, - &table_id, &col_name); + &table_id, &col_name, + &nth_v_col); mtr_commit(&mtr); mutex_exit(&dict_sys->mutex); if (!err_msg) { i_s_dict_fill_sys_columns(thd, table_id, col_name, - &column_rec, + &column_rec, nth_v_col, tables->table); } else { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, @@ -6949,6 +7284,213 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_columns = STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_GAMMA), }; +/** SYS_VIRTUAL **************************************************/ +/** Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_VIRTUAL */ +static ST_FIELD_INFO innodb_sys_virtual_fields_info[] = +{ +#define SYS_VIRTUAL_TABLE_ID 0 + {STRUCT_FLD(field_name, "TABLE_ID"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define SYS_VIRTUAL_POS 1 + {STRUCT_FLD(field_name, "POS"), + STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define SYS_VIRTUAL_BASE_POS 2 + {STRUCT_FLD(field_name, "BASE_POS"), + STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + + END_OF_ST_FIELD_INFO +}; + +/** Function to populate the information_schema.innodb_sys_virtual with +related information +param[in] thd thread +param[in] table_id table ID +param[in] pos virtual column position +param[in] base_pos base column position +param[in,out] table_to_fill fill this table +@return 0 on success */ +static +int +i_s_dict_fill_sys_virtual( + THD* thd, + table_id_t table_id, + ulint pos, + ulint base_pos, + TABLE* table_to_fill) +{ + Field** fields; + + DBUG_ENTER("i_s_dict_fill_sys_virtual"); + + fields = table_to_fill->field; + + OK(fields[SYS_VIRTUAL_TABLE_ID]->store((longlong) table_id, TRUE)); + + OK(fields[SYS_VIRTUAL_POS]->store(pos)); + + OK(fields[SYS_VIRTUAL_BASE_POS]->store(base_pos)); + + OK(schema_table_store_record(thd, table_to_fill)); + + DBUG_RETURN(0); +} + +/** Function to fill information_schema.innodb_sys_virtual with information +collected by scanning SYS_VIRTUAL table. +param[in] thd thread +param[in,out] tables tables to fill +param[in] item condition (not used) +@return 0 on success */ +static +int +i_s_sys_virtual_fill_table( + THD* thd, + TABLE_LIST* tables, + Item* ) +{ + btr_pcur_t pcur; + const rec_t* rec; + ulint pos; + ulint base_pos; + mem_heap_t* heap; + mtr_t mtr; + + DBUG_ENTER("i_s_sys_virtual_fill_table"); + RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); + + /* deny access to user without PROCESS_ACL privilege */ + if (check_global_access(thd, PROCESS_ACL)) { + DBUG_RETURN(0); + } + + heap = mem_heap_create(1000); + mutex_enter(&dict_sys->mutex); + mtr_start(&mtr); + + rec = dict_startscan_system(&pcur, &mtr, SYS_VIRTUAL); + + while (rec) { + const char* err_msg; + table_id_t table_id; + + /* populate a dict_col_t structure with information from + a SYS_VIRTUAL row */ + err_msg = dict_process_sys_virtual_rec(heap, rec, + &table_id, &pos, + &base_pos); + + mtr_commit(&mtr); + mutex_exit(&dict_sys->mutex); + + if (!err_msg) { + i_s_dict_fill_sys_virtual(thd, table_id, pos, base_pos, + tables->table); + } else { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + ER_CANT_FIND_SYSTEM_REC, "%s", + err_msg); + } + + mem_heap_empty(heap); + + /* Get the next record */ + mutex_enter(&dict_sys->mutex); + mtr_start(&mtr); + rec = dict_getnext_system(&pcur, &mtr); + } + + mtr_commit(&mtr); + mutex_exit(&dict_sys->mutex); + mem_heap_free(heap); + + DBUG_RETURN(0); +} + +/** Bind the dynamic table INFORMATION_SCHEMA.innodb_sys_virtual +param[in,out] p table schema object +@return 0 on success */ +static +int +innodb_sys_virtual_init( + void* p) +{ + ST_SCHEMA_TABLE* schema; + + DBUG_ENTER("innodb_sys_virtual_init"); + + schema = (ST_SCHEMA_TABLE*) p; + + schema->fields_info = innodb_sys_virtual_fields_info; + schema->fill_table = i_s_sys_virtual_fill_table; + + DBUG_RETURN(0); +} + +struct st_maria_plugin i_s_innodb_sys_virtual = +{ + /* the plugin type (a MYSQL_XXX_PLUGIN value) */ + /* int */ + STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN), + + /* pointer to type-specific plugin descriptor */ + /* void* */ + STRUCT_FLD(info, &i_s_info), + + /* plugin name */ + /* const char* */ + STRUCT_FLD(name, "INNODB_SYS_VIRTUAL"), + + /* plugin author (for SHOW PLUGINS) */ + /* const char* */ + STRUCT_FLD(author, plugin_author), + + /* general descriptive text (for SHOW PLUGINS) */ + /* const char* */ + STRUCT_FLD(descr, "InnoDB SYS_VIRTUAL"), + + /* the plugin license (PLUGIN_LICENSE_XXX) */ + /* int */ + STRUCT_FLD(license, PLUGIN_LICENSE_GPL), + + /* the function to invoke when plugin is loaded */ + /* int (*)(void*); */ + STRUCT_FLD(init, innodb_sys_virtual_init), + + /* the function to invoke when plugin is unloaded */ + /* int (*)(void*); */ + STRUCT_FLD(deinit, i_s_common_deinit), + + /* plugin version (for SHOW PLUGINS) */ + /* unsigned int */ + STRUCT_FLD(version, INNODB_VERSION_SHORT), + + /* struct st_mysql_show_var* */ + STRUCT_FLD(status_vars, NULL), + + /* struct st_mysql_sys_var** */ + STRUCT_FLD(system_vars, NULL), + + /* Maria extension */ + STRUCT_FLD(version_info, INNODB_VERSION_STR), + STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_BETA), +}; /** SYS_FIELDS ***************************************************/ /* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_FIELDS */ static ST_FIELD_INFO innodb_sys_fields_fields_info[] = @@ -7003,7 +7545,7 @@ i_s_dict_fill_sys_fields( fields = table_to_fill->field; - OK(fields[SYS_FIELD_INDEX_ID]->store(longlong(index_id), TRUE)); + OK(fields[SYS_FIELD_INDEX_ID]->store((longlong) index_id, TRUE)); OK(field_store_string(fields[SYS_FIELD_NAME], field->name)); @@ -7668,6 +8210,51 @@ static ST_FIELD_INFO innodb_sys_tablespaces_fields_info[] = STRUCT_FLD(old_name, ""), STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, +#define SYS_TABLESPACES_SPACE_TYPE 7 + {STRUCT_FLD(field_name, "SPACE_TYPE"), + STRUCT_FLD(field_length, 10), + STRUCT_FLD(field_type, MYSQL_TYPE_STRING), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define SYS_TABLESPACES_FS_BLOCK_SIZE 8 + {STRUCT_FLD(field_name, "FS_BLOCK_SIZE"), + STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define SYS_TABLESPACES_FILE_SIZE 9 + {STRUCT_FLD(field_name, "FILE_SIZE"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define SYS_TABLESPACES_ALLOC_SIZE 10 + {STRUCT_FLD(field_name, "ALLOCATED_SIZE"), + STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS), + STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, MY_I_S_UNSIGNED), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + +#define SYS_TABLESPACES_COMPRESSION 11 + {STRUCT_FLD(field_name, "COMPRESSION"), + STRUCT_FLD(field_length, MAX_COMPRESSION_LEN + 1), + STRUCT_FLD(field_type, MYSQL_TYPE_STRING), + STRUCT_FLD(value, 0), + STRUCT_FLD(field_flags, 0), + STRUCT_FLD(old_name, ""), + STRUCT_FLD(open_method, SKIP_OPEN_TABLE)}, + END_OF_ST_FIELD_INFO }; @@ -7686,50 +8273,115 @@ i_s_dict_fill_sys_tablespaces( ulint flags, /*!< in: tablespace flags */ TABLE* table_to_fill) /*!< in/out: fill this table */ { - Field** fields; - ulint atomic_blobs = FSP_FLAGS_HAS_ATOMIC_BLOBS(flags); - ulint page_size = fsp_flags_get_page_size(flags); - ulint zip_size = fsp_flags_get_zip_size(flags); - const char* file_format; - const char* row_format; + Field** fields; + ulint atomic_blobs = FSP_FLAGS_HAS_ATOMIC_BLOBS(flags); + bool is_compressed = FSP_FLAGS_GET_ZIP_SSIZE(flags); + const char* file_format; + const char* row_format; + const page_size_t page_size(flags); + const char* space_type; DBUG_ENTER("i_s_dict_fill_sys_tablespaces"); file_format = trx_sys_file_format_id_to_name(atomic_blobs); - if (!atomic_blobs) { + if (is_system_tablespace(space)) { row_format = "Compact or Redundant"; - } else if DICT_TF_GET_ZIP_SSIZE(flags) { + } else if (fsp_is_shared_tablespace(flags) && !is_compressed) { + file_format = "Any"; + row_format = "Any"; + } else if (is_compressed) { row_format = "Compressed"; - } else { + } else if (atomic_blobs) { row_format = "Dynamic"; + } else { + row_format = "Compact or Redundant"; + } + + if (is_system_tablespace(space)) { + space_type = "System"; + } else if (fsp_is_shared_tablespace(flags)) { + space_type = "General"; + } else { + space_type = "Single"; } fields = table_to_fill->field; - OK(fields[SYS_TABLESPACES_SPACE]->store( - static_cast(space))); + OK(fields[SYS_TABLESPACES_SPACE]->store(space, true)); OK(field_store_string(fields[SYS_TABLESPACES_NAME], name)); - OK(fields[SYS_TABLESPACES_FLAGS]->store( - static_cast(flags))); + OK(fields[SYS_TABLESPACES_FLAGS]->store(flags, true)); OK(field_store_string(fields[SYS_TABLESPACES_FILE_FORMAT], file_format)); - OK(field_store_string(fields[SYS_TABLESPACES_ROW_FORMAT], - row_format)); + OK(field_store_string(fields[SYS_TABLESPACES_ROW_FORMAT], row_format)); OK(fields[SYS_TABLESPACES_PAGE_SIZE]->store( - static_cast(page_size))); + univ_page_size.physical(), true)); OK(fields[SYS_TABLESPACES_ZIP_PAGE_SIZE]->store( - static_cast(zip_size))); + page_size.is_compressed() + ? page_size.physical() + : 0, true)); + + OK(field_store_string(fields[SYS_TABLESPACES_SPACE_TYPE], + space_type)); + + char* filename = fil_make_filepath(NULL, name, IBD, false); + + os_file_stat_t stat; + os_file_size_t file; + + memset(&file, 0xff, sizeof(file)); + memset(&stat, 0x0, sizeof(stat)); + + if (filename != NULL) { + + file = os_file_get_size(filename); + + /* Get the file system (or Volume) block size. */ + dberr_t err = os_file_get_status(filename, &stat, false, false); + + switch(err) { + case DB_FAIL: + ib::warn() + << "File '" << filename << "', failed to get " + << "stats"; + break; + + case DB_SUCCESS: + case DB_NOT_FOUND: + break; + + default: + ib::error() + << "File '" << filename << "' " + << ut_strerr(err); + break; + } + + ut_free(filename); + } + + OK(fields[SYS_TABLESPACES_FS_BLOCK_SIZE]->store(stat.block_size, true)); + + OK(fields[SYS_TABLESPACES_FILE_SIZE]->store(file.m_total_size, true)); + + OK(fields[SYS_TABLESPACES_ALLOC_SIZE]->store(file.m_alloc_size, true)); + + Compression::Type type = fil_get_compression(space); + + OK(field_store_string( + fields[SYS_TABLESPACES_COMPRESSION], + Compression::to_string(type))); OK(schema_table_store_record(thd, table_to_fill)); DBUG_RETURN(0); } + /*******************************************************************//** Function to populate INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES table. Loop through each record in SYS_TABLESPACES, and extract the column @@ -7760,9 +8412,10 @@ i_s_sys_tablespaces_fill_table( mutex_enter(&dict_sys->mutex); mtr_start(&mtr); - rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES); + for (rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES); + rec != NULL; + rec = dict_getnext_system(&pcur, &mtr)) { - while (rec) { const char* err_msg; ulint space; const char* name; @@ -7790,7 +8443,6 @@ i_s_sys_tablespaces_fill_table( /* Get the next record */ mutex_enter(&dict_sys->mutex); mtr_start(&mtr); - rec = dict_getnext_system(&pcur, &mtr); } mtr_commit(&mtr); @@ -8720,9 +9372,10 @@ i_s_innodb_mutexes_fill_table( DBUG_RETURN(0); } - mutex_enter(&mutex_list_mutex); + // mutex_enter(&mutex_list_mutex); - for (mutex = UT_LIST_GET_FIRST(mutex_list); mutex != NULL; + /* JAN: TODO: FIXME: + for (mutex = UT_LIST_GET_FIRST(os_mutex_list); mutex != NULL; mutex = UT_LIST_GET_NEXT(list, mutex)) { if (mutex->count_os_wait == 0) { continue; @@ -8755,6 +9408,7 @@ i_s_innodb_mutexes_fill_table( } mutex_exit(&mutex_list_mutex); + */ mutex_enter(&rw_lock_list_mutex); @@ -8770,7 +9424,7 @@ i_s_innodb_mutexes_fill_table( continue; } - OK(field_store_string(fields[MUTEXES_NAME], lock->lock_name)); + //OK(field_store_string(fields[MUTEXES_NAME], lock->lock_name)); OK(field_store_string(fields[MUTEXES_CREATE_FILE], innobase_basename(lock->cfile_name))); OK(field_store_ulint(fields[MUTEXES_CREATE_LINE], lock->cline)); OK(field_store_ulint(fields[MUTEXES_OS_WAITS], (longlong)lock->count_os_wait)); @@ -8783,7 +9437,7 @@ i_s_innodb_mutexes_fill_table( my_snprintf(buf1, sizeof buf1, "combined %s", innobase_basename(block_lock->cfile_name)); - OK(field_store_string(fields[MUTEXES_NAME], block_lock->lock_name)); + //OK(field_store_string(fields[MUTEXES_NAME], block_lock->lock_name)); OK(field_store_string(fields[MUTEXES_CREATE_FILE], buf1)); OK(field_store_ulint(fields[MUTEXES_CREATE_LINE], block_lock->cline)); OK(field_store_ulint(fields[MUTEXES_OS_WAITS], (longlong)block_lock_oswait_count)); @@ -9141,3 +9795,147 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_semaphore_waits = STRUCT_FLD(version_info, INNODB_VERSION_STR), STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_GAMMA), }; + +/** Fill handlerton based INFORMATION_SCHEMA.FILES table. +@param[in,out] thd thread/connection descriptor +@param[in,out] tables information schema tables to fill +@retval 0 for success +@retval HA_ERR_OUT_OF_MEM when running out of memory +@return nonzero for failure */ +int +i_s_files_table_fill( + THD* thd, + TABLE_LIST* tables) +{ + TABLE* table_to_fill = tables->table; + Field** fields = table_to_fill->field; + /* Use this class so that if the OK() macro returns, + fil_space_release() is called. */ + FilSpace space; + + DBUG_ENTER("i_s_files_table_fill"); + + /* Gather information reportable to information_schema.files + for the first or next file in fil_system. */ + for (const fil_node_t* node = fil_node_next(NULL); + node != NULL; + node = fil_node_next(node)) { + const char* type = "TABLESPACE"; + const char* space_name; + /** Buffer to build file-per-table tablespace names. + Even though a space_id is often stored in a ulint, it cannot + be larger than 1<<32-1, which is 10 numeric characters. */ + char file_per_table_name[ + sizeof("innodb_file_per_table_1234567890")]; + uintmax_t avail_space; + ulint extent_pages; + ulint extend_pages; + + space = node->space; + fil_type_t purpose = space()->purpose; + + switch (purpose) { + case FIL_TYPE_LOG: + /* Do not report REDO LOGs to I_S.FILES */ + space = NULL; + continue; + case FIL_TYPE_TABLESPACE: + if (!is_system_tablespace(space()->id) + && space()->id <= srv_undo_tablespaces_open) { + type = "UNDO LOG"; + break; + } /* else fall through for TABLESPACE */ + case FIL_TYPE_IMPORT: + /* 'IMPORTING'is a status. The type is TABLESPACE. */ + break; + case FIL_TYPE_TEMPORARY: + type = "TEMPORARY"; + break; + }; + + page_size_t page_size(space()->flags); + + /* Single-table tablespaces are assigned to a schema. */ + if (!is_predefined_tablespace(space()->id) + && !FSP_FLAGS_GET_SHARED(space()->flags)) { + /* Their names will be like "test/t1" */ + ut_ad(NULL != strchr(space()->name, '/')); + + /* File-per-table tablespace names are generated + internally and certain non-file-system-allowed + characters are expanded which can make the space + name too long. In order to avoid that problem, + use a modified tablespace name. + Since we are not returning dbname and tablename, + the user must match the space_id to i_s_table.space + in order find the single table that is in it or the + schema it belongs to. */ + ut_snprintf( + file_per_table_name, + sizeof(file_per_table_name), + "innodb_file_per_table_" ULINTPF, + space()->id); + space_name = file_per_table_name; + } else { + /* Only file-per-table space names contain '/'. + This is not file-per-table . */ + ut_ad(NULL == strchr(space()->name, '/')); + + space_name = space()->name; + } + + init_fill_schema_files_row(table_to_fill); + + OK(field_store_ulint(fields[IS_FILES_FILE_ID], + space()->id)); + OK(field_store_string(fields[IS_FILES_FILE_NAME], + node->name)); + OK(field_store_string(fields[IS_FILES_FILE_TYPE], + type)); + OK(field_store_string(fields[IS_FILES_TABLESPACE_NAME], + space_name)); + OK(field_store_string(fields[IS_FILES_ENGINE], + "InnoDB")); + OK(field_store_ulint(fields[IS_FILES_FREE_EXTENTS], + space()->free_len)); + + extent_pages = fsp_get_extent_size_in_pages(page_size); + + OK(field_store_ulint(fields[IS_FILES_TOTAL_EXTENTS], + space()->size_in_header / extent_pages)); + OK(field_store_ulint(fields[IS_FILES_EXTENT_SIZE], + extent_pages * page_size.physical())); + OK(field_store_ulint(fields[IS_FILES_INITIAL_SIZE], + node->init_size * page_size.physical())); + + if (node->max_size >= ULINT_MAX) { + fields[IS_FILES_MAXIMUM_SIZE]->set_null(); + } else { + OK(field_store_ulint(fields[IS_FILES_MAXIMUM_SIZE], + node->max_size * page_size.physical())); + } + if (space()->id == srv_sys_space.space_id()) { + extend_pages = srv_sys_space.get_increment(); + } else if (space()->id == srv_tmp_space.space_id()) { + extend_pages = srv_tmp_space.get_increment(); + } else { + extend_pages = fsp_get_pages_to_extend_ibd( + page_size, node->size); + } + + OK(field_store_ulint(fields[IS_FILES_AUTOEXTEND_SIZE], + extend_pages * page_size.physical())); + + avail_space = fsp_get_available_space_in_free_extents(space()); + OK(field_store_ulint(fields[IS_FILES_DATA_FREE], + static_cast(avail_space * 1024))); + OK(field_store_string(fields[IS_FILES_STATUS], + (purpose == FIL_TYPE_IMPORT) + ? "IMPORTING" : "NORMAL")); + + schema_table_store_record(thd, table_to_fill); + space = NULL; + } + + DBUG_RETURN(0); +} diff --git a/storage/innobase/handler/i_s.h b/storage/innobase/handler/i_s.h index 979d9d80a7f..eb076ec802e 100644 --- a/storage/innobase/handler/i_s.h +++ b/storage/innobase/handler/i_s.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyrigth (c) 2014, 2015, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under @@ -50,6 +50,7 @@ extern struct st_maria_plugin i_s_innodb_ft_config; extern struct st_maria_plugin i_s_innodb_buffer_page; extern struct st_maria_plugin i_s_innodb_buffer_page_lru; extern struct st_maria_plugin i_s_innodb_buffer_stats; +extern struct st_maria_plugin i_s_innodb_temp_table_info; extern struct st_maria_plugin i_s_innodb_sys_tables; extern struct st_maria_plugin i_s_innodb_sys_tablestats; extern struct st_maria_plugin i_s_innodb_sys_indexes; @@ -60,10 +61,22 @@ extern struct st_maria_plugin i_s_innodb_sys_foreign_cols; extern struct st_maria_plugin i_s_innodb_sys_tablespaces; extern struct st_maria_plugin i_s_innodb_sys_datafiles; extern struct st_maria_plugin i_s_innodb_mutexes; +extern struct st_maria_plugin i_s_innodb_sys_virtual; extern struct st_maria_plugin i_s_innodb_tablespaces_encryption; extern struct st_maria_plugin i_s_innodb_tablespaces_scrubbing; extern struct st_maria_plugin i_s_innodb_sys_semaphore_waits; +/** Fill handlerton based INFORMATION_SCHEMA.FILES table. +@param[in,out] thd thread/connection descriptor +@param[in,out] tables information schema tables to fill +@retval 0 for success +@retval HA_ERR_OUT_OF_MEM when running out of memory +@return nonzero for failure */ +int +i_s_files_table_fill( + THD *thd, + TABLE_LIST *tables); + /** maximum number of buffer page info we would cache. */ #define MAX_BUF_INFO_CACHED 10000 diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 0a2140c4a29..bfdaefc3271 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -24,10 +24,14 @@ Insert buffer Created 7/19/1997 Heikki Tuuri *******************************************************/ +#include "ha_prototypes.h" + #include "ibuf0ibuf.h" +#include "sync0sync.h" +#include "btr0sea.h" #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG -UNIV_INTERN my_bool srv_ibuf_disable_background_merge; +my_bool srv_ibuf_disable_background_merge; #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ /** Number of bits describing a single page */ @@ -54,14 +58,13 @@ UNIV_INTERN my_bool srv_ibuf_disable_background_merge; #include "btr0pcur.h" #include "btr0btr.h" #include "row0upd.h" -#include "sync0sync.h" #include "dict0boot.h" #include "fut0lst.h" #include "lock0lock.h" #include "log0recv.h" #include "que0que.h" #include "srv0start.h" /* srv_shutdown_state */ -#include "ha_prototypes.h" +#include "fsp0sysspace.h" #include "rem0cmp.h" /* STRUCTURE OF AN INSERT BUFFER RECORD @@ -190,25 +193,16 @@ level 2 i/o. However, if an OS thread does the i/o handling for itself, i.e., it uses synchronous aio, it can access any pages, as long as it obeys the access order rules. */ -/** Table name for the insert buffer. */ -#define IBUF_TABLE_NAME "SYS_IBUF_TABLE" - /** Operations that can currently be buffered. */ -UNIV_INTERN ibuf_use_t ibuf_use = IBUF_USE_ALL; +ibuf_use_t ibuf_use = IBUF_USE_ALL; #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG /** Flag to control insert buffer debugging. */ -UNIV_INTERN uint ibuf_debug; +uint ibuf_debug; #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ /** The insert buffer control structure */ -UNIV_INTERN ibuf_t* ibuf = NULL; - -#ifdef UNIV_PFS_MUTEX -UNIV_INTERN mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key; -UNIV_INTERN mysql_pfs_key_t ibuf_mutex_key; -UNIV_INTERN mysql_pfs_key_t ibuf_bitmap_mutex_key; -#endif /* UNIV_PFS_MUTEX */ +ibuf_t* ibuf = NULL; #ifdef UNIV_IBUF_COUNT_DEBUG /** Number of tablespaces in the ibuf_counts array */ @@ -219,27 +213,23 @@ UNIV_INTERN mysql_pfs_key_t ibuf_bitmap_mutex_key; /** Buffered entry counts for file pages, used in debugging */ static ulint ibuf_counts[IBUF_COUNT_N_SPACES][IBUF_COUNT_N_PAGES]; -/******************************************************************//** -Checks that the indexes to ibuf_counts[][] are within limits. */ +/** Checks that the indexes to ibuf_counts[][] are within limits. +@param[in] page_id page id */ UNIV_INLINE void ibuf_count_check( -/*=============*/ - ulint space_id, /*!< in: space identifier */ - ulint page_no) /*!< in: page number */ + const page_id_t& page_id) { - if (space_id < IBUF_COUNT_N_SPACES && page_no < IBUF_COUNT_N_PAGES) { + if (page_id.space() < IBUF_COUNT_N_SPACES + && page_id.page_no() < IBUF_COUNT_N_PAGES) { return; } - fprintf(stderr, - "InnoDB: UNIV_IBUF_COUNT_DEBUG limits space_id and page_no\n" - "InnoDB: and breaks crash recovery.\n" - "InnoDB: space_id=%lu, should be 0<=space_id<%lu\n" - "InnoDB: page_no=%lu, should be 0<=page_no<%lu\n", - (ulint) space_id, (ulint) IBUF_COUNT_N_SPACES, - (ulint) page_no, (ulint) IBUF_COUNT_N_PAGES); - ut_error; + ib::fatal() << "UNIV_IBUF_COUNT_DEBUG limits space_id and page_no" + " and breaks crash recovery. space_id=" << page_id.space() + << ", should be 0<=space_id<" << IBUF_COUNT_N_SPACES + << ". page_no=" << page_id.page_no() + << ", should be 0<=page_no<" << IBUF_COUNT_N_PAGES; } #endif @@ -300,31 +290,31 @@ static ib_mutex_t ibuf_mutex; static ib_mutex_t ibuf_bitmap_mutex; /** The area in pages from which contract looks for page numbers for merge */ -#define IBUF_MERGE_AREA 8UL +const ulint IBUF_MERGE_AREA = 8; /** Inside the merge area, pages which have at most 1 per this number less buffered entries compared to maximum volume that can buffered for a single page are merged along with the page whose buffer became full */ -#define IBUF_MERGE_THRESHOLD 4 +const ulint IBUF_MERGE_THRESHOLD = 4; /** In ibuf_contract at most this number of pages is read to memory in one batch, in order to merge the entries for them in the insert buffer */ -#define IBUF_MAX_N_PAGES_MERGED IBUF_MERGE_AREA +const ulint IBUF_MAX_N_PAGES_MERGED = IBUF_MERGE_AREA; /** If the combined size of the ibuf trees exceeds ibuf->max_size by this many pages, we start to contract it in connection to inserts there, using non-synchronous contract */ -#define IBUF_CONTRACT_ON_INSERT_NON_SYNC 0 +const ulint IBUF_CONTRACT_ON_INSERT_NON_SYNC = 0; /** If the combined size of the ibuf trees exceeds ibuf->max_size by this many pages, we start to contract it in connection to inserts there, using synchronous contract */ -#define IBUF_CONTRACT_ON_INSERT_SYNC 5 +const ulint IBUF_CONTRACT_ON_INSERT_SYNC = 5; /** If the combined size of the ibuf trees exceeds ibuf->max_size by this many pages, we start to contract it synchronous contract, but do not insert */ -#define IBUF_CONTRACT_DO_NOT_INSERT 10 +const ulint IBUF_CONTRACT_DO_NOT_INSERT = 10; /* TODO: how to cope with drop table if there are records in the insert buffer for the indexes of the table? Is there actually any problem, @@ -341,8 +331,8 @@ ibuf_enter( /*=======*/ mtr_t* mtr) /*!< in/out: mini-transaction */ { - ut_ad(!mtr->inside_ibuf); - mtr->inside_ibuf = TRUE; + ut_ad(!mtr->is_inside_ibuf()); + mtr->enter_ibuf(); } /******************************************************************//** @@ -354,8 +344,8 @@ ibuf_exit( /*======*/ mtr_t* mtr) /*!< in/out: mini-transaction */ { - ut_ad(mtr->inside_ibuf); - mtr->inside_ibuf = FALSE; + ut_ad(mtr->is_inside_ibuf()); + mtr->exit_ibuf(); } /**************************************************************//** @@ -374,7 +364,7 @@ ibuf_btr_pcur_commit_specify_mtr( /******************************************************************//** Gets the ibuf header page and x-latches it. -@return insert buffer header page */ +@return insert buffer header page */ static page_t* ibuf_header_page_get( @@ -387,7 +377,9 @@ ibuf_header_page_get( page_t* page = NULL; block = buf_page_get( - IBUF_SPACE_ID, 0, FSP_IBUF_HEADER_PAGE_NO, RW_X_LATCH, mtr); + page_id_t(IBUF_SPACE_ID, FSP_IBUF_HEADER_PAGE_NO), + univ_page_size, RW_X_LATCH, mtr); + if (!block->page.encrypted) { buf_block_dbg_add_level(block, SYNC_IBUF_HEADER); @@ -399,8 +391,8 @@ ibuf_header_page_get( } /******************************************************************//** -Gets the root page and x-latches it. -@return insert buffer tree root page */ +Gets the root page and sx-latches it. +@return insert buffer tree root page */ static page_t* ibuf_tree_root_get( @@ -413,10 +405,12 @@ ibuf_tree_root_get( ut_ad(ibuf_inside(mtr)); ut_ad(mutex_own(&ibuf_mutex)); - mtr_x_lock(dict_index_get_lock(ibuf->index), mtr); + mtr_sx_lock(dict_index_get_lock(ibuf->index), mtr); + /* only segment list access is exclusive each other */ block = buf_page_get( - IBUF_SPACE_ID, 0, FSP_IBUF_TREE_ROOT_PAGE_NO, RW_X_LATCH, mtr); + page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO), + univ_page_size, RW_SX_LATCH, mtr); buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); @@ -430,57 +424,54 @@ ibuf_tree_root_get( } #ifdef UNIV_IBUF_COUNT_DEBUG -/******************************************************************//** -Gets the ibuf count for a given page. + +/** Gets the ibuf count for a given page. +@param[in] page_id page id @return number of entries in the insert buffer currently buffered for this page */ -UNIV_INTERN ulint ibuf_count_get( -/*===========*/ - ulint space, /*!< in: space id */ - ulint page_no)/*!< in: page number */ + const page_id_t& page_id) { - ibuf_count_check(space, page_no); + ibuf_count_check(page_id); - return(ibuf_counts[space][page_no]); + return(ibuf_counts[page_id.space()][page_id.page_no()]); } -/******************************************************************//** -Sets the ibuf count for a given page. */ +/** Sets the ibuf count for a given page. +@param[in] page_id page id +@param[in] val value to set */ static void ibuf_count_set( -/*===========*/ - ulint space, /*!< in: space id */ - ulint page_no,/*!< in: page number */ - ulint val) /*!< in: value to set */ + const page_id_t& page_id, + ulint val) { - ibuf_count_check(space, page_no); + ibuf_count_check(page_id); ut_a(val < UNIV_PAGE_SIZE); - ibuf_counts[space][page_no] = val; + ibuf_counts[page_id.space()][page_id.page_no()] = val; } #endif /******************************************************************//** Closes insert buffer and frees the data structures. */ -UNIV_INTERN void ibuf_close(void) /*============*/ { mutex_free(&ibuf_pessimistic_insert_mutex); - memset(&ibuf_pessimistic_insert_mutex, - 0x0, sizeof(ibuf_pessimistic_insert_mutex)); mutex_free(&ibuf_mutex); - memset(&ibuf_mutex, 0x0, sizeof(ibuf_mutex)); mutex_free(&ibuf_bitmap_mutex); - memset(&ibuf_bitmap_mutex, 0x0, sizeof(ibuf_mutex)); - mem_free(ibuf); + dict_table_t* ibuf_table = ibuf->index->table; + rw_lock_free(&ibuf->index->lock); + dict_mem_index_free(ibuf->index); + dict_mem_table_free(ibuf_table); + + ut_free(ibuf); ibuf = NULL; } @@ -491,15 +482,14 @@ static void ibuf_size_update( /*=============*/ - const page_t* root, /*!< in: ibuf tree root */ - mtr_t* mtr) /*!< in: mtr */ + const page_t* root) /*!< in: ibuf tree root */ { ut_ad(mutex_own(&ibuf_mutex)); ibuf->free_list_len = flst_get_len(root + PAGE_HEADER - + PAGE_BTR_IBUF_FREE_LIST, mtr); + + PAGE_BTR_IBUF_FREE_LIST); - ibuf->height = 1 + btr_page_get_level(root, mtr); + ibuf->height = 1 + btr_page_get_level_low(root); /* the '1 +' is the ibuf header page */ ibuf->size = ibuf->seg_size - (1 + ibuf->free_list_len); @@ -509,21 +499,17 @@ ibuf_size_update( Creates the insert buffer data structure at a database startup and initializes the data structures for the insert buffer. @return DB_SUCCESS or failure */ -UNIV_INTERN dberr_t ibuf_init_at_db_start(void) /*=======================*/ { page_t* root; mtr_t mtr; - dict_table_t* table; - mem_heap_t* heap; - dict_index_t* index; ulint n_used; page_t* header_page; dberr_t error= DB_SUCCESS; - ibuf = static_cast(mem_zalloc(sizeof(ibuf_t))); + ibuf = static_cast(ut_zalloc_nokey(sizeof(ibuf_t))); /* At startup we intialize ibuf to have a maximum of CHANGE_BUFFER_DEFAULT_SIZE in terms of percentage of the @@ -533,21 +519,18 @@ ibuf_init_at_db_start(void) ibuf->max_size = ((buf_pool_get_curr_size() / UNIV_PAGE_SIZE) * CHANGE_BUFFER_DEFAULT_SIZE) / 100; - mutex_create(ibuf_pessimistic_insert_mutex_key, - &ibuf_pessimistic_insert_mutex, - SYNC_IBUF_PESS_INSERT_MUTEX); + mutex_create(LATCH_ID_IBUF, &ibuf_mutex); - mutex_create(ibuf_mutex_key, - &ibuf_mutex, SYNC_IBUF_MUTEX); + mutex_create(LATCH_ID_IBUF_BITMAP, &ibuf_bitmap_mutex); - mutex_create(ibuf_bitmap_mutex_key, - &ibuf_bitmap_mutex, SYNC_IBUF_BITMAP_MUTEX); + mutex_create(LATCH_ID_IBUF_PESSIMISTIC_INSERT, + &ibuf_pessimistic_insert_mutex); mtr_start(&mtr); - mutex_enter(&ibuf_mutex); + mtr_x_lock_space(IBUF_SPACE_ID, &mtr); - mtr_x_lock(fil_space_get_latch(IBUF_SPACE_ID, NULL), &mtr); + mutex_enter(&ibuf_mutex); header_page = ibuf_header_page_get(&mtr); @@ -567,50 +550,37 @@ ibuf_init_at_db_start(void) buf_block_t* block; block = buf_page_get( - IBUF_SPACE_ID, 0, FSP_IBUF_TREE_ROOT_PAGE_NO, - RW_X_LATCH, &mtr); + page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO), + univ_page_size, RW_X_LATCH, &mtr); + buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE); root = buf_block_get_frame(block); } - ibuf_size_update(root, &mtr); + ibuf_size_update(root); mutex_exit(&ibuf_mutex); ibuf->empty = page_is_empty(root); ibuf_mtr_commit(&mtr); - heap = mem_heap_create(450); - - /* Use old-style record format for the insert buffer. */ - table = dict_mem_table_create(IBUF_TABLE_NAME, IBUF_SPACE_ID, 1, 0, 0); - - dict_mem_table_add_col(table, heap, "DUMMY_COLUMN", DATA_BINARY, 0, 0); - - table->id = DICT_IBUF_ID_MIN + IBUF_SPACE_ID; - - dict_table_add_to_cache(table, FALSE, heap); - mem_heap_free(heap); - - index = dict_mem_index_create( - IBUF_TABLE_NAME, "CLUST_IND", - IBUF_SPACE_ID, DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, 1); - - dict_mem_index_add_field(index, "DUMMY_COLUMN", 0); - - index->id = DICT_IBUF_ID_MIN + IBUF_SPACE_ID; - - error = dict_index_add_to_cache(table, index, - FSP_IBUF_TREE_ROOT_PAGE_NO, FALSE); - ut_a(error == DB_SUCCESS); - - ibuf->index = dict_table_get_first_index(table); + ibuf->index = dict_mem_index_create( + "innodb_change_buffer", "CLUST_IND", + IBUF_SPACE_ID, DICT_CLUSTERED | DICT_IBUF, 1); + ibuf->index->id = DICT_IBUF_ID_MIN + IBUF_SPACE_ID; + ibuf->index->table = dict_mem_table_create( + "innodb_change_buffer", IBUF_SPACE_ID, 1, 0, 0, 0); + ibuf->index->n_uniq = REC_MAX_N_FIELDS; + rw_lock_create(index_tree_rw_lock_key, &ibuf->index->lock, + SYNC_IBUF_INDEX_TREE); + ibuf->index->search_info = btr_search_info_create(ibuf->index->heap); + ibuf->index->page = FSP_IBUF_TREE_ROOT_PAGE_NO; + ut_d(ibuf->index->cached = TRUE); return (error); } /*********************************************************************//** Updates the max_size value for ibuf. */ -UNIV_INTERN void ibuf_max_size_update( /*=================*/ @@ -628,7 +598,6 @@ ibuf_max_size_update( #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Initializes an ibuf bitmap page. */ -UNIV_INTERN void ibuf_bitmap_page_init( /*==================*/ @@ -637,21 +606,14 @@ ibuf_bitmap_page_init( { page_t* page; ulint byte_offset; - ulint zip_size = buf_block_get_zip_size(block); - - ut_a(ut_is_2pow(zip_size)); page = buf_block_get_frame(block); fil_page_set_type(page, FIL_PAGE_IBUF_BITMAP); /* Write all zeros to the bitmap */ - if (!zip_size) { - byte_offset = UT_BITS_IN_BYTES(UNIV_PAGE_SIZE - * IBUF_BITS_PER_PAGE); - } else { - byte_offset = UT_BITS_IN_BYTES(zip_size * IBUF_BITS_PER_PAGE); - } + byte_offset = UT_BITS_IN_BYTES(block->page.size.physical() + * IBUF_BITS_PER_PAGE); memset(page + IBUF_BITMAP, 0, byte_offset); @@ -664,8 +626,7 @@ ibuf_bitmap_page_init( /*********************************************************************//** Parses a redo log record of an ibuf bitmap page init. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* ibuf_parse_bitmap_init( /*===================*/ @@ -674,7 +635,8 @@ ibuf_parse_bitmap_init( buf_block_t* block, /*!< in: block or NULL */ mtr_t* mtr) /*!< in: mtr or NULL */ { - ut_ad(ptr && end_ptr); + ut_ad(ptr != NULL); + ut_ad(end_ptr != NULL); if (block) { ibuf_bitmap_page_init(block, mtr); @@ -685,47 +647,49 @@ ibuf_parse_bitmap_init( #ifndef UNIV_HOTBACKUP # ifdef UNIV_DEBUG /** Gets the desired bits for a given page from a bitmap page. -@param page in: bitmap page -@param offset in: page whose bits to get -@param zs in: compressed page size in bytes; 0 for uncompressed pages -@param bit in: IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... -@param mtr in: mini-transaction holding an x-latch on the bitmap page -@return value of bits */ -# define ibuf_bitmap_page_get_bits(page, offset, zs, bit, mtr) \ - ibuf_bitmap_page_get_bits_low(page, offset, zs, \ +@param[in] page bitmap page +@param[in] page_id page id whose bits to get +@param[in] page_size page id whose bits to get +@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... +@param[in,out] mtr mini-transaction holding an x-latch on the +bitmap page +@return value of bits */ +# define ibuf_bitmap_page_get_bits(page, page_id, page_size, bit, mtr) \ + ibuf_bitmap_page_get_bits_low(page, page_id, page_size, \ MTR_MEMO_PAGE_X_FIX, mtr, bit) # else /* UNIV_DEBUG */ /** Gets the desired bits for a given page from a bitmap page. -@param page in: bitmap page -@param offset in: page whose bits to get -@param zs in: compressed page size in bytes; 0 for uncompressed pages -@param bit in: IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... -@param mtr in: mini-transaction holding an x-latch on the bitmap page -@return value of bits */ -# define ibuf_bitmap_page_get_bits(page, offset, zs, bit, mtr) \ - ibuf_bitmap_page_get_bits_low(page, offset, zs, bit) +@param[in] page bitmap page +@param[in] page_id page id whose bits to get +@param[in] page_size page id whose bits to get +@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... +@param[in,out] mtr mini-transaction holding an x-latch on the +bitmap page +@return value of bits */ +# define ibuf_bitmap_page_get_bits(page, page_id, page_size, bit, mtr) \ + ibuf_bitmap_page_get_bits_low(page, page_id, page_size, bit) # endif /* UNIV_DEBUG */ -/********************************************************************//** -Gets the desired bits for a given page from a bitmap page. -@return value of bits */ +/** Gets the desired bits for a given page from a bitmap page. +@param[in] page bitmap page +@param[in] page_id page id whose bits to get +@param[in] page_size page size +@param[in] latch_type MTR_MEMO_PAGE_X_FIX, MTR_MEMO_BUF_FIX, ... +@param[in,out] mtr mini-transaction holding latch_type on the +bitmap page +@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... +@return value of bits */ UNIV_INLINE ulint ibuf_bitmap_page_get_bits_low( -/*==========================*/ - const page_t* page, /*!< in: bitmap page */ - ulint page_no,/*!< in: page whose bits to get */ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ + const page_t* page, + const page_id_t& page_id, + const page_size_t& page_size, #ifdef UNIV_DEBUG - ulint latch_type, - /*!< in: MTR_MEMO_PAGE_X_FIX, - MTR_MEMO_BUF_FIX, ... */ - mtr_t* mtr, /*!< in: mini-transaction holding latch_type - on the bitmap page */ + ulint latch_type, + mtr_t* mtr, #endif /* UNIV_DEBUG */ - ulint bit) /*!< in: IBUF_BITMAP_FREE, - IBUF_BITMAP_BUFFERED, ... */ + ulint bit) { ulint byte_offset; ulint bit_offset; @@ -736,16 +700,10 @@ ibuf_bitmap_page_get_bits_low( #if IBUF_BITS_PER_PAGE % 2 # error "IBUF_BITS_PER_PAGE % 2 != 0" #endif - ut_ad(ut_is_2pow(zip_size)); ut_ad(mtr_memo_contains_page(mtr, page, latch_type)); - if (!zip_size) { - bit_offset = (page_no % UNIV_PAGE_SIZE) * IBUF_BITS_PER_PAGE - + bit; - } else { - bit_offset = (page_no & (zip_size - 1)) * IBUF_BITS_PER_PAGE - + bit; - } + bit_offset = (page_id.page_no() % page_size.physical()) + * IBUF_BITS_PER_PAGE + bit; byte_offset = bit_offset / 8; bit_offset = bit_offset % 8; @@ -765,19 +723,22 @@ ibuf_bitmap_page_get_bits_low( return(value); } -/********************************************************************//** -Sets the desired bit for a given page in a bitmap page. */ +/** Sets the desired bit for a given page in a bitmap page. +@param[in,out] page bitmap page +@param[in] page_id page id whose bits to set +@param[in] page_size page size +@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... +@param[in] val value to set +@param[in,out] mtr mtr containing an x-latch to the bitmap page */ static void ibuf_bitmap_page_set_bits( -/*======================*/ - page_t* page, /*!< in: bitmap page */ - ulint page_no,/*!< in: page whose bits to set */ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint bit, /*!< in: IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... */ - ulint val, /*!< in: value to set */ - mtr_t* mtr) /*!< in: mtr containing an x-latch to the bitmap page */ + page_t* page, + const page_id_t& page_id, + const page_size_t& page_size, + ulint bit, + ulint val, + mtr_t* mtr) { ulint byte_offset; ulint bit_offset; @@ -787,20 +748,15 @@ ibuf_bitmap_page_set_bits( #if IBUF_BITS_PER_PAGE % 2 # error "IBUF_BITS_PER_PAGE % 2 != 0" #endif - ut_ad(ut_is_2pow(zip_size)); ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr->is_named_space(page_id.space())); #ifdef UNIV_IBUF_COUNT_DEBUG ut_a((bit != IBUF_BITMAP_BUFFERED) || (val != FALSE) - || (0 == ibuf_count_get(page_get_space_id(page), - page_no))); + || (0 == ibuf_count_get(page_id))); #endif - if (!zip_size) { - bit_offset = (page_no % UNIV_PAGE_SIZE) * IBUF_BITS_PER_PAGE - + bit; - } else { - bit_offset = (page_no & (zip_size - 1)) * IBUF_BITS_PER_PAGE - + bit; - } + + bit_offset = (page_id.page_no() % page_size.physical()) + * IBUF_BITS_PER_PAGE + bit; byte_offset = bit_offset / 8; bit_offset = bit_offset % 8; @@ -824,52 +780,48 @@ ibuf_bitmap_page_set_bits( MLOG_1BYTE, mtr); } -/********************************************************************//** -Calculates the bitmap page number for a given page number. -@return the bitmap page number where the file page is mapped */ +/** Calculates the bitmap page number for a given page number. +@param[in] page_id page id +@param[in] page_size page size +@return the bitmap page id where the file page is mapped */ UNIV_INLINE -ulint +const page_id_t ibuf_bitmap_page_no_calc( -/*=====================*/ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint page_no) /*!< in: tablespace page number */ + const page_id_t& page_id, + const page_size_t& page_size) { - ut_ad(ut_is_2pow(zip_size)); + ulint bitmap_page_no; - if (!zip_size) { - return(FSP_IBUF_BITMAP_OFFSET - + (page_no & ~(UNIV_PAGE_SIZE - 1))); - } else { - return(FSP_IBUF_BITMAP_OFFSET - + (page_no & ~(zip_size - 1))); - } + bitmap_page_no = FSP_IBUF_BITMAP_OFFSET + + (page_id.page_no() & ~(page_size.physical() - 1)); + + return(page_id_t(page_id.space(), bitmap_page_no)); } -/********************************************************************//** -Gets the ibuf bitmap page where the bits describing a given file page are +/** Gets the ibuf bitmap page where the bits describing a given file page are stored. +@param[in] page_id page id of the file page +@param[in] page_size page size of the file page +@param[in] file file name +@param[in] line line where called +@param[in,out] mtr mini-transaction @return bitmap page where the file page is mapped, that is, the bitmap page containing the descriptor bits for the file page; the bitmap page is x-latched */ static page_t* ibuf_bitmap_get_map_page_func( -/*==========================*/ - ulint space, /*!< in: space id of the file page */ - ulint page_no,/*!< in: page number of the file page */ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - mtr_t* mtr) /*!< in: mtr */ + const page_id_t& page_id, + const page_size_t& page_size, + const char* file, + ulint line, + mtr_t* mtr) { buf_block_t* block = NULL; dberr_t err = DB_SUCCESS; - block = buf_page_get_gen(space, zip_size, - ibuf_bitmap_page_no_calc(zip_size, page_no), - RW_X_LATCH, NULL, BUF_GET, + block = buf_page_get_gen(ibuf_bitmap_page_no_calc(page_id, page_size), + page_size, RW_X_LATCH, NULL, BUF_GET, file, line, mtr, &err); if (err != DB_SUCCESS) { @@ -881,18 +833,16 @@ ibuf_bitmap_get_map_page_func( return(buf_block_get_frame(block)); } -/********************************************************************//** -Gets the ibuf bitmap page where the bits describing a given file page are +/** Gets the ibuf bitmap page where the bits describing a given file page are stored. +@param[in] page_id page id of the file page +@param[in] page_size page size of the file page +@param[in,out] mtr mini-transaction @return bitmap page where the file page is mapped, that is, the bitmap page containing the descriptor bits for the file page; the bitmap page -is x-latched -@param space in: space id of the file page -@param page_no in: page number of the file page -@param zip_size in: compressed page size in bytes; 0 for uncompressed pages -@param mtr in: mini-transaction */ -#define ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr) \ - ibuf_bitmap_get_map_page_func(space, page_no, zip_size, \ +is x-latched */ +#define ibuf_bitmap_get_map_page(page_id, page_size, mtr) \ + ibuf_bitmap_get_map_page_func(page_id, page_size, \ __FILE__, __LINE__, mtr) /************************************************************************//** @@ -904,8 +854,6 @@ UNIV_INLINE void ibuf_set_free_bits_low( /*===================*/ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ const buf_block_t* block, /*!< in: index page; free bits are set if the index is non-clustered and page level is 0 */ @@ -913,29 +861,24 @@ ibuf_set_free_bits_low( mtr_t* mtr) /*!< in/out: mtr */ { page_t* bitmap_page; - ulint space; - ulint page_no; + + ut_ad(mtr->is_named_space(block->page.id.space())); if (!page_is_leaf(buf_block_get_frame(block))) { return; } - space = buf_block_get_space(block); - page_no = buf_block_get_page_no(block); - bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr); -#ifdef UNIV_IBUF_DEBUG -# if 0 - fprintf(stderr, - "Setting space %lu page %lu free bits to %lu should be %lu\n", - space, page_no, val, - ibuf_index_page_calc_free(zip_size, block)); -# endif + bitmap_page = ibuf_bitmap_get_map_page(block->page.id, + block->page.size, mtr); - ut_a(val <= ibuf_index_page_calc_free(zip_size, block)); +#ifdef UNIV_IBUF_DEBUG + ut_a(val <= ibuf_index_page_calc_free(block)); #endif /* UNIV_IBUF_DEBUG */ - ibuf_bitmap_page_set_bits(bitmap_page, page_no, zip_size, - IBUF_BITMAP_FREE, val, mtr); + + ibuf_bitmap_page_set_bits( + bitmap_page, block->page.id, block->page.size, + IBUF_BITMAP_FREE, val, mtr); } /************************************************************************//** @@ -943,7 +886,6 @@ Sets the free bit of the page in the ibuf bitmap. This is done in a separate mini-transaction, hence this operation does not restrict further work to only ibuf bitmap operations, which would result if the latch to the bitmap page were kept. */ -UNIV_INTERN void ibuf_set_free_bits_func( /*====================*/ @@ -959,9 +901,6 @@ ibuf_set_free_bits_func( mtr_t mtr; page_t* page; page_t* bitmap_page; - ulint space; - ulint page_no; - ulint zip_size; page = buf_block_get_frame(block); @@ -971,18 +910,32 @@ ibuf_set_free_bits_func( } mtr_start(&mtr); + const fil_space_t* space = mtr.set_named_space(block->page.id.space()); + + bitmap_page = ibuf_bitmap_get_map_page(block->page.id, + block->page.size, &mtr); - space = buf_block_get_space(block); - page_no = buf_block_get_page_no(block); - zip_size = buf_block_get_zip_size(block); - bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, &mtr); + switch (space->purpose) { + case FIL_TYPE_LOG: + ut_ad(0); + break; + case FIL_TYPE_TABLESPACE: + /* Avoid logging while fixing up truncate of table. */ + if (!srv_is_tablespace_truncated(block->page.id.space())) { + break; + } + /* fall through */ + case FIL_TYPE_TEMPORARY: + case FIL_TYPE_IMPORT: + mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO); + } #ifdef UNIV_IBUF_DEBUG if (max_val != ULINT_UNDEFINED) { ulint old_val; old_val = ibuf_bitmap_page_get_bits( - bitmap_page, page_no, zip_size, + bitmap_page, block->page.id, IBUF_BITMAP_FREE, &mtr); # if 0 if (old_val != max_val) { @@ -998,13 +951,16 @@ ibuf_set_free_bits_func( # if 0 fprintf(stderr, "Setting page no %lu free bits to %lu should be %lu\n", page_get_page_no(page), val, - ibuf_index_page_calc_free(zip_size, block)); + ibuf_index_page_calc_free(block)); # endif - ut_a(val <= ibuf_index_page_calc_free(zip_size, block)); + ut_a(val <= ibuf_index_page_calc_free(block)); #endif /* UNIV_IBUF_DEBUG */ - ibuf_bitmap_page_set_bits(bitmap_page, page_no, zip_size, - IBUF_BITMAP_FREE, val, &mtr); + + ibuf_bitmap_page_set_bits( + bitmap_page, block->page.id, block->page.size, + IBUF_BITMAP_FREE, val, &mtr); + mtr_commit(&mtr); } @@ -1017,7 +973,6 @@ buffer bitmap must never exceed the free space on a page. It is safe to decrement or reset the bits in the bitmap in a mini-transaction that is committed before the mini-transaction that affects the free space. */ -UNIV_INTERN void ibuf_reset_free_bits( /*=================*/ @@ -1036,7 +991,6 @@ thread until mtr is committed. NOTE: The free bits in the insert buffer bitmap must never exceed the free space on a page. It is safe to set the free bits in the same mini-transaction that updated the page. */ -UNIV_INTERN void ibuf_update_free_bits_low( /*======================*/ @@ -1052,17 +1006,19 @@ ibuf_update_free_bits_low( ulint after; ut_a(!buf_block_get_page_zip(block)); + ut_ad(mtr->is_named_space(block->page.id.space())); - before = ibuf_index_page_calc_free_bits(0, max_ins_size); + before = ibuf_index_page_calc_free_bits(block->page.size.logical(), + max_ins_size); - after = ibuf_index_page_calc_free(0, block); + after = ibuf_index_page_calc_free(block); /* This approach cannot be used on compressed pages, since the computed value of "before" often does not match the current state of the bitmap. This is because the free space may increase or decrease when a compressed page is reorganized. */ if (before != after) { - ibuf_set_free_bits_low(0, block, after, mtr); + ibuf_set_free_bits_low(block, after, mtr); } } @@ -1074,7 +1030,6 @@ thread until mtr is committed. NOTE: The free bits in the insert buffer bitmap must never exceed the free space on a page. It is safe to set the free bits in the same mini-transaction that updated the page. */ -UNIV_INTERN void ibuf_update_free_bits_zip( /*======================*/ @@ -1082,21 +1037,15 @@ ibuf_update_free_bits_zip( mtr_t* mtr) /*!< in/out: mtr */ { page_t* bitmap_page; - ulint space; - ulint page_no; - ulint zip_size; ulint after; - space = buf_block_get_space(block); - page_no = buf_block_get_page_no(block); - zip_size = buf_block_get_zip_size(block); - ut_a(page_is_leaf(buf_block_get_frame(block))); - ut_a(zip_size); + ut_a(block->page.size.is_compressed()); - bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr); + bitmap_page = ibuf_bitmap_get_map_page(block->page.id, + block->page.size, mtr); - after = ibuf_index_page_calc_free_zip(zip_size, block); + after = ibuf_index_page_calc_free_zip(block); if (after == 0) { /* We move the page to the front of the buffer pool LRU list: @@ -1107,8 +1056,9 @@ ibuf_update_free_bits_zip( buf_page_make_young(&block->page); } - ibuf_bitmap_page_set_bits(bitmap_page, page_no, zip_size, - IBUF_BITMAP_FREE, after, mtr); + ibuf_bitmap_page_set_bits( + bitmap_page, block->page.id, block->page.size, + IBUF_BITMAP_FREE, after, mtr); } /**********************************************************************//** @@ -1118,73 +1068,72 @@ virtually prevent any further operations until mtr is committed. NOTE: The free bits in the insert buffer bitmap must never exceed the free space on a page. It is safe to set the free bits in the same mini-transaction that updated the pages. */ -UNIV_INTERN void ibuf_update_free_bits_for_two_pages_low( /*====================================*/ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ buf_block_t* block1, /*!< in: index page */ buf_block_t* block2, /*!< in: index page */ mtr_t* mtr) /*!< in: mtr */ { ulint state; + ut_ad(mtr->is_named_space(block1->page.id.space())); + ut_ad(block1->page.id.space() == block2->page.id.space()); + /* As we have to x-latch two random bitmap pages, we have to acquire the bitmap mutex to prevent a deadlock with a similar operation performed by another OS thread. */ mutex_enter(&ibuf_bitmap_mutex); - state = ibuf_index_page_calc_free(zip_size, block1); + state = ibuf_index_page_calc_free(block1); - ibuf_set_free_bits_low(zip_size, block1, state, mtr); + ibuf_set_free_bits_low(block1, state, mtr); - state = ibuf_index_page_calc_free(zip_size, block2); + state = ibuf_index_page_calc_free(block2); - ibuf_set_free_bits_low(zip_size, block2, state, mtr); + ibuf_set_free_bits_low(block2, state, mtr); mutex_exit(&ibuf_bitmap_mutex); } -/**********************************************************************//** -Returns TRUE if the page is one of the fixed address ibuf pages. -@return TRUE if a fixed address ibuf i/o page */ +/** Returns TRUE if the page is one of the fixed address ibuf pages. +@param[in] page_id page id +@param[in] page_size page size +@return TRUE if a fixed address ibuf i/o page */ UNIV_INLINE ibool ibuf_fixed_addr_page( -/*=================*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint page_no)/*!< in: page number */ + const page_id_t& page_id, + const page_size_t& page_size) { - return((space == IBUF_SPACE_ID && page_no == IBUF_TREE_ROOT_PAGE_NO) - || ibuf_bitmap_page(zip_size, page_no)); + return((page_id.space() == IBUF_SPACE_ID + && page_id.page_no() == IBUF_TREE_ROOT_PAGE_NO) + || ibuf_bitmap_page(page_id, page_size)); } -/***********************************************************************//** -Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages. -Must not be called when recv_no_ibuf_operations==TRUE. -@return TRUE if level 2 or level 3 page */ -UNIV_INTERN +/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages. +Must not be called when recv_no_ibuf_operations==true. +@param[in] page_id page id +@param[in] page_size page size +@param[in] x_latch FALSE if relaxed check (avoid latching the +bitmap page) +@param[in] file file name +@param[in] line line where called +@param[in,out] mtr mtr which will contain an x-latch to the +bitmap page if the page is not one of the fixed address ibuf pages, or NULL, +in which case a new transaction is created. +@return TRUE if level 2 or level 3 page */ ibool ibuf_page_low( -/*==========*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes, or 0 */ - ulint page_no,/*!< in: page number */ + const page_id_t& page_id, + const page_size_t& page_size, #ifdef UNIV_DEBUG - ibool x_latch,/*!< in: FALSE if relaxed check - (avoid latching the bitmap page) */ + ibool x_latch, #endif /* UNIV_DEBUG */ - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - mtr_t* mtr) /*!< in: mtr which will contain an - x-latch to the bitmap page if the page - is not one of the fixed address ibuf - pages, or NULL, in which case a new - transaction is created. */ + const char* file, + ulint line, + mtr_t* mtr) { ibool ret; mtr_t local_mtr; @@ -1193,15 +1142,15 @@ ibuf_page_low( ut_ad(!recv_no_ibuf_operations); ut_ad(x_latch || mtr == NULL); - if (ibuf_fixed_addr_page(space, zip_size, page_no)) { + if (ibuf_fixed_addr_page(page_id, page_size)) { return(TRUE); - } else if (space != IBUF_SPACE_ID) { + } else if (page_id.space() != IBUF_SPACE_ID) { return(FALSE); } - ut_ad(fil_space_get_type(IBUF_SPACE_ID) == FIL_TABLESPACE); + ut_ad(fil_space_get_type(IBUF_SPACE_ID) == FIL_TYPE_TABLESPACE); #ifdef UNIV_DEBUG if (!x_latch) { @@ -1217,16 +1166,17 @@ ibuf_page_low( not be modified by any other thread. Nobody should be calling ibuf_add_free_page() or ibuf_remove_free_page() while the page is linked to the insert buffer b-tree. */ + dberr_t err = DB_SUCCESS; + + buf_block_t* block = buf_page_get_gen( + ibuf_bitmap_page_no_calc(page_id, page_size), + page_size, RW_NO_LATCH, NULL, BUF_GET_NO_LATCH, + file, line, &local_mtr, &err); - bitmap_page = buf_block_get_frame( - buf_page_get_gen( - space, zip_size, - ibuf_bitmap_page_no_calc(zip_size, page_no), - RW_NO_LATCH, NULL, BUF_GET_NO_LATCH, - file, line, &local_mtr)); + bitmap_page = buf_block_get_frame(block); ret = ibuf_bitmap_page_get_bits_low( - bitmap_page, page_no, zip_size, + bitmap_page, page_id, page_size, MTR_MEMO_BUF_FIX, &local_mtr, IBUF_BITMAP_IBUF); mtr_commit(&local_mtr); @@ -1239,10 +1189,10 @@ ibuf_page_low( mtr_start(mtr); } - bitmap_page = ibuf_bitmap_get_map_page_func(space, page_no, zip_size, + bitmap_page = ibuf_bitmap_get_map_page_func(page_id, page_size, file, line, mtr); - ret = ibuf_bitmap_page_get_bits(bitmap_page, page_no, zip_size, + ret = ibuf_bitmap_page_get_bits(bitmap_page, page_id, page_size, IBUF_BITMAP_IBUF, mtr); if (mtr == &local_mtr) { @@ -1260,7 +1210,7 @@ ibuf_page_low( /********************************************************************//** Returns the page number field of an ibuf record. -@return page number */ +@return page number */ static ulint ibuf_rec_get_page_no_func( @@ -1298,7 +1248,7 @@ ibuf_rec_get_page_no_func( /********************************************************************//** Returns the space id field of an ibuf record. For < 4.1.x format records returns 0. -@return space id */ +@return space id */ static ulint ibuf_rec_get_space_func( @@ -1421,7 +1371,7 @@ ibuf_rec_get_info_func( /****************************************************************//** Returns the operation type field of an ibuf record. -@return operation type */ +@return operation type */ static ibuf_op_t ibuf_rec_get_op_type_func( @@ -1458,7 +1408,6 @@ Read the first two bytes from a record's fourth field (counter field in new records; something else in older records). @return "counter" field, or ULINT_UNDEFINED if for some reason it can't be read */ -UNIV_INTERN ulint ibuf_rec_get_counter( /*=================*/ @@ -1496,16 +1445,8 @@ ibuf_add_ops( { ulint i; -#ifndef HAVE_ATOMIC_BUILTINS - ut_ad(mutex_own(&ibuf_mutex)); -#endif /* !HAVE_ATOMIC_BUILTINS */ - for (i = 0; i < IBUF_OP_COUNT; i++) { -#ifdef HAVE_ATOMIC_BUILTINS os_atomic_increment_ulint(&arr[i], ops[i]); -#else /* HAVE_ATOMIC_BUILTINS */ - arr[i] += ops[i]; -#endif /* HAVE_ATOMIC_BUILTINS */ } } @@ -1537,7 +1478,7 @@ ibuf_print_ops( /********************************************************************//** Creates a dummy index for inserting a record to a non-clustered index. -@return dummy index */ +@return dummy index */ static dict_index_t* ibuf_dummy_index_create( @@ -1549,7 +1490,7 @@ ibuf_dummy_index_create( dict_index_t* index; table = dict_mem_table_create("IBUF_DUMMY", - DICT_HDR_SPACE, n, + DICT_HDR_SPACE, n, 0, comp ? DICT_TF_COMPACT : 0, 0); index = dict_mem_index_create("IBUF_DUMMY", "IBUF_DUMMY", @@ -1695,7 +1636,7 @@ ibuf_build_entry_from_ibuf_rec_func( /******************************************************************//** Get the data size. -@return size of fields */ +@return size of fields */ UNIV_INLINE ulint ibuf_rec_get_size( @@ -1819,7 +1760,7 @@ non-clustered index. NOTE that the original entry must be kept because we copy pointers to its fields. -@return own: entry to insert into an ibuf index tree */ +@return own: entry to insert into an ibuf index tree */ static dtuple_t* ibuf_entry_build( @@ -1981,7 +1922,7 @@ ibuf_entry_build( /*********************************************************************//** Builds a search tuple used to search buffered inserts for an index page. This is for >= 4.1.x format records. -@return own: search tuple */ +@return own: search tuple */ static dtuple_t* ibuf_search_tuple_build( @@ -2034,7 +1975,7 @@ ibuf_search_tuple_build( /*********************************************************************//** Checks if there are enough pages in the free list of the ibuf tree that we dare to start a pessimistic insert to the insert buffer. -@return TRUE if enough free pages in list */ +@return TRUE if enough free pages in list */ UNIV_INLINE ibool ibuf_data_enough_free_for_insert(void) @@ -2054,7 +1995,7 @@ ibuf_data_enough_free_for_insert(void) /*********************************************************************//** Checks if there are enough pages in the free list of the ibuf tree that we should remove them and free to the file space management. -@return TRUE if enough free pages in list */ +@return TRUE if enough free pages in list */ UNIV_INLINE ibool ibuf_data_too_much_free(void) @@ -2068,7 +2009,7 @@ ibuf_data_too_much_free(void) /*********************************************************************//** Allocates a new page from the ibuf file segment and adds it to the free list. -@return TRUE on success, FALSE if no space left */ +@return TRUE on success, FALSE if no space left */ static ibool ibuf_add_free_page(void) @@ -2076,20 +2017,17 @@ ibuf_add_free_page(void) { mtr_t mtr; page_t* header_page; - ulint flags; - ulint zip_size; buf_block_t* block; page_t* page; page_t* root; page_t* bitmap_page; mtr_start(&mtr); + fil_space_t* space = mtr.set_sys_modified(); /* Acquire the fsp latch before the ibuf header, obeying the latching order */ - mtr_x_lock(fil_space_get_latch(IBUF_SPACE_ID, &flags), &mtr); - zip_size = fsp_flags_get_zip_size(flags); - + mtr_x_lock(&space->latch, &mtr); header_page = ibuf_header_page_get(&mtr); /* Allocate a new page: NOTE that if the page has been a part of a @@ -2134,14 +2072,15 @@ ibuf_add_free_page(void) /* Set the bit indicating that this page is now an ibuf tree page (level 2 page) */ - bitmap_page = ibuf_bitmap_get_map_page( - IBUF_SPACE_ID, buf_block_get_page_no(block), zip_size, &mtr); + const page_id_t page_id(IBUF_SPACE_ID, block->page.id.page_no()); + const page_size_t page_size(space->flags); + + bitmap_page = ibuf_bitmap_get_map_page(page_id, page_size, &mtr); mutex_exit(&ibuf_mutex); - ibuf_bitmap_page_set_bits( - bitmap_page, buf_block_get_page_no(block), zip_size, - IBUF_BITMAP_IBUF, TRUE, &mtr); + ibuf_bitmap_page_set_bits(bitmap_page, page_id, page_size, + IBUF_BITMAP_IBUF, TRUE, &mtr); ibuf_mtr_commit(&mtr); @@ -2158,20 +2097,19 @@ ibuf_remove_free_page(void) mtr_t mtr; mtr_t mtr2; page_t* header_page; - ulint flags; - ulint zip_size; ulint page_no; page_t* page; page_t* root; page_t* bitmap_page; mtr_start(&mtr); + fil_space_t* space = mtr.set_sys_modified(); + const page_size_t page_size(space->flags); /* Acquire the fsp latch before the ibuf header, obeying the latching order */ - mtr_x_lock(fil_space_get_latch(IBUF_SPACE_ID, &flags), &mtr); - zip_size = fsp_flags_get_zip_size(flags); + mtr_x_lock(&space->latch, &mtr); header_page = ibuf_header_page_get(&mtr); /* Prevent pessimistic inserts to insert buffer trees for a while */ @@ -2212,11 +2150,11 @@ ibuf_remove_free_page(void) page from it. */ fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER, - IBUF_SPACE_ID, page_no, &mtr); + IBUF_SPACE_ID, page_no, false, &mtr); + + const page_id_t page_id(IBUF_SPACE_ID, page_no); -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - buf_page_reset_file_page_was_freed(IBUF_SPACE_ID, page_no); -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ + ut_d(buf_page_reset_file_page_was_freed(page_id)); ibuf_enter(&mtr); @@ -2230,8 +2168,7 @@ ibuf_remove_free_page(void) { buf_block_t* block; - block = buf_page_get( - IBUF_SPACE_ID, 0, page_no, RW_X_LATCH, &mtr); + block = buf_page_get(page_id, univ_page_size, RW_X_LATCH, &mtr); buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE); @@ -2251,17 +2188,16 @@ ibuf_remove_free_page(void) /* Set the bit indicating that this page is no more an ibuf tree page (level 2 page) */ - bitmap_page = ibuf_bitmap_get_map_page( - IBUF_SPACE_ID, page_no, zip_size, &mtr); + bitmap_page = ibuf_bitmap_get_map_page(page_id, page_size, &mtr); mutex_exit(&ibuf_mutex); ibuf_bitmap_page_set_bits( - bitmap_page, page_no, zip_size, IBUF_BITMAP_IBUF, FALSE, &mtr); + bitmap_page, page_id, page_size, IBUF_BITMAP_IBUF, FALSE, + &mtr); + + ut_d(buf_page_set_file_page_was_freed(page_id)); -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG - buf_page_set_file_page_was_freed(IBUF_SPACE_ID, page_no); -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ ibuf_mtr_commit(&mtr); } @@ -2269,17 +2205,11 @@ ibuf_remove_free_page(void) Frees excess pages from the ibuf free list. This function is called when an OS thread calls fsp services to allocate a new file segment, or a new page to a file segment, and the thread did not own the fsp latch before this call. */ -UNIV_INTERN void ibuf_free_excess_pages(void) /*========================*/ { - ulint i; - -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(fil_space_get_latch(IBUF_SPACE_ID, NULL), - RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(fil_space_get_latch(IBUF_SPACE_ID, NULL), RW_LOCK_X)); ut_ad(rw_lock_get_x_lock_count( fil_space_get_latch(IBUF_SPACE_ID, NULL)) == 1); @@ -2298,7 +2228,7 @@ ibuf_free_excess_pages(void) /* Free at most a few pages at a time, so that we do not delay the requested service too much */ - for (i = 0; i < 4; i++) { + for (ulint i = 0; i < 4; i++) { ibool too_much_free; @@ -2319,7 +2249,7 @@ ibuf_free_excess_pages(void) ibuf_get_merge_page_nos_func(contract,rec,mtr,ids,vers,pages,n_stored) #else /* UNIV_DEBUG */ # define ibuf_get_merge_page_nos(contract,rec,mtr,ids,vers,pages,n_stored) \ - ibuf_get_merge_page_nos_func(contract,rec,ids,vers,pages,n_stored) + ibuf_get_merge_page_nos_func(contract,rec,ids,vers, pages,n_stored) #endif /* UNIV_DEBUG */ /*********************************************************************//** @@ -2339,12 +2269,12 @@ ibuf_get_merge_page_nos_func( mtr_t* mtr, /*!< in: mini-transaction holding rec */ #endif /* UNIV_DEBUG */ ulint* space_ids,/*!< in/out: space id's of the pages */ - ib_int64_t* space_versions,/*!< in/out: tablespace version - timestamps; used to prevent reading in old - pages after DISCARD + IMPORT tablespace */ ulint* page_nos,/*!< in/out: buffer for at least IBUF_MAX_N_PAGES_MERGED many page numbers; the page numbers are in an ascending order */ + ib_uint64_t* space_versions,/*!< in/out: tablespace version + timestamps; used to prevent reading in old + pages after DISCARD + IMPORT tablespace */ ulint* n_stored)/*!< out: number of page numbers stored to page_nos in this function */ { @@ -2366,7 +2296,8 @@ ibuf_get_merge_page_nos_func( *n_stored = 0; - limit = ut_min(IBUF_MAX_N_PAGES_MERGED, buf_pool_get_curr_size() / 4); + limit = ut_min(IBUF_MAX_N_PAGES_MERGED, + buf_pool_get_curr_size() / 4); if (page_rec_is_supremum(rec)) { @@ -2436,16 +2367,23 @@ ibuf_get_merge_page_nos_func( } else { rec_page_no = ibuf_rec_get_page_no(mtr, rec); rec_space_id = ibuf_rec_get_space(mtr, rec); - /* In the system tablespace, the smallest + /* In the system tablespace the smallest possible secondary index leaf page number is - bigger than IBUF_TREE_ROOT_PAGE_NO (4). In - other tablespaces, the clustered index tree is - created at page 3, which makes page 4 the - smallest possible secondary index leaf page - (and that only after DROP INDEX). */ - ut_ad(rec_page_no - > (ulint) IBUF_TREE_ROOT_PAGE_NO - - (rec_space_id != 0)); + bigger than FSP_DICT_HDR_PAGE_NO (7). + In all tablespaces, pages 0 and 1 are reserved + for the allocation bitmap and the change + buffer bitmap. In file-per-table tablespaces, + a file segment inode page will be created at + page 2 and the clustered index tree is created + at page 3. So for file-per-table tablespaces, + page 4 is the smallest possible secondary + index leaf page. CREATE TABLESPACE also initially + uses pages 2 and 3 for the first created table, + but that table may be dropped, allowing page 2 + to be reused for a secondary index leaf page. + To keep this assertion simple, just + make sure the page is >= 2. */ + ut_ad(rec_page_no >= FSP_FIRST_INODE_PAGE_NO); } #ifdef UNIV_IBUF_DEBUG @@ -2512,7 +2450,7 @@ ibuf_get_merge_page_nos_func( /*******************************************************************//** Get the matching records for space id. -@return current rec or NULL */ +@return current rec or NULL */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) const rec_t* ibuf_get_user_rec( @@ -2544,13 +2482,13 @@ ibuf_get_merge_pages( ulint limit, /*!< in: max page numbers to read */ ulint* pages, /*!< out: pages read */ ulint* spaces, /*!< out: spaces read */ - ib_int64_t* versions,/*!< out: space versions read */ + ib_uint64_t* versions,/*!< out: space versions read */ ulint* n_pages,/*!< out: number of pages read */ mtr_t* mtr) /*!< in: mini transaction */ { const rec_t* rec; ulint volume = 0; - ib_int64_t version = fil_space_get_version(space); + ib_uint64_t version = fil_space_get_version(space); ut_a(space != ULINT_UNDEFINED); @@ -2596,7 +2534,7 @@ ibuf_merge_pages( ulint sum_sizes; ulint page_nos[IBUF_MAX_N_PAGES_MERGED]; ulint space_ids[IBUF_MAX_N_PAGES_MERGED]; - ib_int64_t space_versions[IBUF_MAX_N_PAGES_MERGED]; + ib_uint64_t space_versions[IBUF_MAX_N_PAGES_MERGED]; *n_pages = 0; @@ -2604,8 +2542,12 @@ ibuf_merge_pages( /* Open a cursor to a randomly chosen leaf of the tree, at a random position within the leaf */ + bool available; - btr_pcur_open_at_rnd_pos(ibuf->index, BTR_SEARCH_LEAF, &pcur, &mtr); + available = btr_pcur_open_at_rnd_pos(ibuf->index, BTR_SEARCH_LEAF, + &pcur, &mtr); + /* No one should make this index unavailable when server is running */ + ut_a(available); ut_ad(page_validate(btr_pcur_get_page(&pcur), ibuf->index)); @@ -2627,8 +2569,8 @@ ibuf_merge_pages( sum_sizes = ibuf_get_merge_page_nos(TRUE, btr_pcur_get_rec(&pcur), &mtr, - space_ids, space_versions, - page_nos, n_pages); + space_ids, + page_nos, space_versions, n_pages); #if 0 /* defined UNIV_IBUF_DEBUG */ fprintf(stderr, "Ibuf contract sync %lu pages %lu volume %lu\n", sync, *n_pages, sum_sizes); @@ -2660,6 +2602,8 @@ ibuf_merge_space( ut_ad(space < SRV_LOG_SPACE_FIRST_ID); + ut_ad(space < SRV_LOG_SPACE_FIRST_ID); + ibuf_mtr_start(&mtr); /* Position the cursor on the first matching record. */ @@ -2675,7 +2619,7 @@ ibuf_merge_space( ulint sum_sizes = 0; ulint pages[IBUF_MAX_N_PAGES_MERGED]; ulint spaces[IBUF_MAX_N_PAGES_MERGED]; - ib_int64_t versions[IBUF_MAX_N_PAGES_MERGED]; + ib_uint64_t versions[IBUF_MAX_N_PAGES_MERGED]; if (page_is_empty(btr_pcur_get_page(&pcur))) { /* If a B-tree page is empty, it must be the root page @@ -2693,8 +2637,7 @@ ibuf_merge_space( &pcur, space, IBUF_MAX_N_PAGES_MERGED, &pages[0], &spaces[0], &versions[0], &n_pages, &mtr); - ib_logf(IB_LOG_LEVEL_INFO,"\n Size of pages merged %lu" - ,sum_sizes); + ib::info() << "Size of pages merged " << sum_sizes; } @@ -2724,19 +2667,17 @@ ibuf_merge_space( @param[out] n_pages number of pages merged @param[in] sync whether the caller waits for the issued reads to complete +@param[in] space_id tablespace for which to merge, or +ULINT_UNDEFINED for all tablespaces @return a lower limit for the combined size in bytes of entries which will be merged from ibuf trees to the pages read, 0 if ibuf is empty */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) ulint ibuf_merge( -/*=======*/ - ulint* n_pages, /*!< out: number of pages to - which merged */ - bool sync) /*!< in: TRUE if the caller - wants to wait for the issued - read with the highest - tablespace address to complete */ + ulint* n_pages, + bool sync, + ulint space_id) { *n_pages = 0; @@ -2751,8 +2692,10 @@ ibuf_merge( } else if (ibuf_debug) { return(0); #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ - } else { + } else if (space_id == ULINT_UNDEFINED) { return(ibuf_merge_pages(n_pages, sync)); + } else { + return(ibuf_merge_space(space_id)); } } @@ -2764,10 +2707,7 @@ will be merged from ibuf trees to the pages read, 0 if ibuf is empty */ static ulint ibuf_contract( -/*==========*/ - bool sync) /*!< in: TRUE if the caller wants to wait for the - issued read with the highest tablespace address - to complete */ + bool sync) { ulint n_pages; @@ -2778,18 +2718,15 @@ ibuf_contract( @param[in] full If true, do a full contraction based on PCT_IO(100). If false, the size of contract batch is determined based on the current size of the change buffer. +@param[in] space_id tablespace for which to contract, or +ULINT_UNDEFINED to contract for all tablespaces @return a lower limit for the combined size in bytes of entries which will be merged from ibuf trees to the pages read, 0 if ibuf is empty */ -UNIV_INTERN ulint ibuf_merge_in_background( -/*=====================*/ - bool full) /*!< in: TRUE if the caller wants to - do a full contract based on PCT_IO(100). - If FALSE then the size of contract - batch is determined based on the - current size of the ibuf tree. */ + bool full, + ulint space_id) { ulint sum_bytes = 0; ulint sum_pages = 0; @@ -2797,7 +2734,7 @@ ibuf_merge_in_background( ulint n_pages; #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG - if (srv_ibuf_disable_background_merge) { + if (srv_ibuf_disable_background_merge && space_id == ULINT_UNDEFINED) { return(0); } #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ @@ -2834,7 +2771,7 @@ ibuf_merge_in_background( while (sum_pages < n_pages) { ulint n_bytes; - n_bytes = ibuf_merge(&n_pag2, false); + n_bytes = ibuf_merge(&n_pag2, false, space_id); if (n_bytes == 0) { return(sum_bytes); @@ -2890,7 +2827,7 @@ ibuf_contract_after_insert( /*********************************************************************//** Determine if an insert buffer record has been encountered already. -@return TRUE if a new record, FALSE if possible duplicate */ +@return TRUE if a new record, FALSE if possible duplicate */ static ibool ibuf_get_volume_buffered_hash( @@ -2933,7 +2870,8 @@ ibuf_get_volume_buffered_hash( #else /* UNIV_DEBUG */ # define ibuf_get_volume_buffered_count(mtr,rec,hash,size,n_recs) \ ibuf_get_volume_buffered_count_func(rec,hash,size,n_recs) -#endif +#endif /* UNIV_DEBUG */ + /*********************************************************************//** Update the estimate of the number of records on a page, and get the space taken by merging the buffered record to the index page. @@ -3144,12 +3082,11 @@ ibuf_get_volume_buffered( buf_block_t* block; block = buf_page_get( - IBUF_SPACE_ID, 0, prev_page_no, RW_X_LATCH, - mtr); + page_id_t(IBUF_SPACE_ID, prev_page_no), + univ_page_size, RW_X_LATCH, mtr); buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE); - prev_page = buf_block_get_frame(block); ut_ad(page_validate(prev_page, ibuf->index)); } @@ -3217,12 +3154,11 @@ count_later: buf_block_t* block; block = buf_page_get( - IBUF_SPACE_ID, 0, next_page_no, RW_X_LATCH, - mtr); + page_id_t(IBUF_SPACE_ID, next_page_no), + univ_page_size, RW_X_LATCH, mtr); buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE); - next_page = buf_block_get_frame(block); ut_ad(page_validate(next_page, ibuf->index)); } @@ -3259,7 +3195,6 @@ count_later: /*********************************************************************//** Reads the biggest tablespace id from the high end of the insert buffer tree and updates the counter in fil_system. */ -UNIV_INTERN void ibuf_update_max_tablespace_id(void) /*===============================*/ @@ -3383,12 +3318,12 @@ ibuf_get_entry_counter_low_func( #else /* UNIV_DEBUG */ # define ibuf_get_entry_counter(space,page_no,rec,mtr,exact_leaf) \ ibuf_get_entry_counter_func(space,page_no,rec,exact_leaf) -#endif +#endif /* UNIV_DEBUG */ /****************************************************************//** Calculate the counter field for an entry based on the current last record in ibuf for (space, page_no). -@return the counter field, or ULINT_UNDEFINED +@return the counter field, or ULINT_UNDEFINED if we should abort this insertion to ibuf */ static ulint @@ -3434,28 +3369,32 @@ ibuf_get_entry_counter_func( } } -/*********************************************************************//** -Buffer an operation in the insert/delete buffer, instead of doing it +/** Buffer an operation in the insert/delete buffer, instead of doing it directly to the disk page, if this is possible. -@return DB_SUCCESS, DB_STRONG_FAIL or other error */ +@param[in] mode BTR_MODIFY_PREV or BTR_MODIFY_TREE +@param[in] op operation type +@param[in] no_counter TRUE=use 5.0.3 format; FALSE=allow delete +buffering +@param[in] entry index entry to insert +@param[in] entry_size rec_get_converted_size(index, entry) +@param[in,out] index index where to insert; must not be unique +or clustered +@param[in] page_id page id where to insert +@param[in] page_size page size +@param[in,out] thr query thread +@return DB_SUCCESS, DB_STRONG_FAIL or other error */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t ibuf_insert_low( -/*============*/ - ulint mode, /*!< in: BTR_MODIFY_PREV or BTR_MODIFY_TREE */ - ibuf_op_t op, /*!< in: operation type */ - ibool no_counter, - /*!< in: TRUE=use 5.0.3 format; - FALSE=allow delete buffering */ - const dtuple_t* entry, /*!< in: index entry to insert */ - ulint entry_size, - /*!< in: rec_get_converted_size(index, entry) */ - dict_index_t* index, /*!< in: index where to insert; must not be - unique or clustered */ - ulint space, /*!< in: space id where to insert */ - ulint zip_size,/*!< in: compressed page size in bytes, or 0 */ - ulint page_no,/*!< in: page number where to insert */ - que_thr_t* thr) /*!< in: query thread */ + ulint mode, + ibuf_op_t op, + ibool no_counter, + const dtuple_t* entry, + ulint entry_size, + dict_index_t* index, + const page_id_t& page_id, + const page_size_t& page_size, + que_thr_t* thr) { big_rec_t* dummy_big_rec; btr_pcur_t pcur; @@ -3474,15 +3413,15 @@ ibuf_insert_low( dberr_t err; ibool do_merge; ulint space_ids[IBUF_MAX_N_PAGES_MERGED]; - ib_int64_t space_versions[IBUF_MAX_N_PAGES_MERGED]; ulint page_nos[IBUF_MAX_N_PAGES_MERGED]; + ib_uint64_t space_versions[IBUF_MAX_N_PAGES_MERGED]; ulint n_stored; mtr_t mtr; mtr_t bitmap_mtr; ut_a(!dict_index_is_clust(index)); + ut_ad(!dict_index_is_spatial(index)); ut_ad(dtuple_check_typed(entry)); - ut_ad(ut_is_2pow(zip_size)); ut_ad(!no_counter || op == IBUF_OP_INSERT); ut_a(op < IBUF_OP_COUNT); @@ -3521,14 +3460,14 @@ ibuf_insert_low( value just before actually inserting the entry.) */ ibuf_entry = ibuf_entry_build( - op, index, entry, space, page_no, + op, index, entry, page_id.space(), page_id.page_no(), no_counter ? ULINT_UNDEFINED : 0xFFFF, heap); /* Open a cursor to the insert buffer tree to calculate if we can add the new entry to it without exceeding the free space limit for the page. */ - if (mode == BTR_MODIFY_TREE) { + if (BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE) { for (;;) { mutex_enter(&ibuf_pessimistic_insert_mutex); mutex_enter(&ibuf_mutex); @@ -3541,7 +3480,7 @@ ibuf_insert_low( mutex_exit(&ibuf_mutex); mutex_exit(&ibuf_pessimistic_insert_mutex); - if (UNIV_UNLIKELY(!ibuf_add_free_page())) { + if (!ibuf_add_free_page()) { mem_heap_free(heap); return(DB_STRONG_FAIL); @@ -3557,14 +3496,15 @@ ibuf_insert_low( /* Find out the volume of already buffered inserts for the same index page */ min_n_recs = 0; - buffered = ibuf_get_volume_buffered(&pcur, space, page_no, + buffered = ibuf_get_volume_buffered(&pcur, + page_id.space(), + page_id.page_no(), op == IBUF_OP_DELETE ? &min_n_recs : NULL, &mtr); if (op == IBUF_OP_DELETE - && (min_n_recs < 2 - || buf_pool_watch_occurred(space, page_no))) { + && (min_n_recs < 2 || buf_pool_watch_occurred(page_id))) { /* The page could become empty after the record is deleted, or the page has been read in to the buffer pool. Refuse to buffer the operation. */ @@ -3583,7 +3523,7 @@ ibuf_insert_low( until after the IBUF_OP_DELETE has been buffered. */ fail_exit: - if (mode == BTR_MODIFY_TREE) { + if (BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE) { mutex_exit(&ibuf_mutex); mutex_exit(&ibuf_pessimistic_insert_mutex); } @@ -3602,17 +3542,19 @@ fail_exit: and done mtr_commit(&mtr) to release the latch. */ #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a((buffered == 0) || ibuf_count_get(space, page_no)); + ut_a((buffered == 0) || ibuf_count_get(page_id)); #endif ibuf_mtr_start(&bitmap_mtr); + bitmap_mtr.set_named_space(page_id.space()); - bitmap_page = ibuf_bitmap_get_map_page(space, page_no, - zip_size, &bitmap_mtr); + bitmap_page = ibuf_bitmap_get_map_page(page_id, page_size, + &bitmap_mtr); /* We check if the index page is suitable for buffered entries */ - if (buf_page_peek(space, page_no) - || lock_rec_expl_exist_on_page(space, page_no)) { + if (buf_page_peek(page_id) + || lock_rec_expl_exist_on_page(page_id.space(), + page_id.page_no())) { ibuf_mtr_commit(&bitmap_mtr); goto fail_exit; @@ -3620,11 +3562,11 @@ fail_exit: if (op == IBUF_OP_INSERT) { ulint bits = ibuf_bitmap_page_get_bits( - bitmap_page, page_no, zip_size, IBUF_BITMAP_FREE, + bitmap_page, page_id, page_size, IBUF_BITMAP_FREE, &bitmap_mtr); if (buffered + entry_size + page_dir_calc_reserved_space(1) - > ibuf_index_page_calc_free_from_bits(zip_size, bits)) { + > ibuf_index_page_calc_free_from_bits(page_size, bits)) { /* Release the bitmap page latch early. */ ibuf_mtr_commit(&bitmap_mtr); @@ -3633,8 +3575,8 @@ fail_exit: ibuf_get_merge_page_nos(FALSE, btr_pcur_get_rec(&pcur), &mtr, - space_ids, space_versions, - page_nos, &n_stored); + space_ids, + page_nos, space_versions, &n_stored); goto fail_exit; } @@ -3645,7 +3587,8 @@ fail_exit: insert. This can change the insert position, which can result in the need to abort in some cases. */ ulint counter = ibuf_get_entry_counter( - space, page_no, btr_pcur_get_rec(&pcur), &mtr, + page_id.space(), page_id.page_no(), + btr_pcur_get_rec(&pcur), &mtr, btr_pcur_get_btr_cur(&pcur)->low_match < IBUF_REC_FIELD_METADATA); dfield_t* field; @@ -3666,11 +3609,11 @@ fail_exit: buffered entries for this index page, if the bit is not set yet */ old_bit_value = ibuf_bitmap_page_get_bits( - bitmap_page, page_no, zip_size, + bitmap_page, page_id, page_size, IBUF_BITMAP_BUFFERED, &bitmap_mtr); if (!old_bit_value) { - ibuf_bitmap_page_set_bits(bitmap_page, page_no, zip_size, + ibuf_bitmap_page_set_bits(bitmap_page, page_id, page_size, IBUF_BITMAP_BUFFERED, TRUE, &bitmap_mtr); } @@ -3686,11 +3629,10 @@ fail_exit: ibuf_entry, &ins_rec, &dummy_big_rec, 0, thr, &mtr); block = btr_cur_get_block(cursor); - ut_ad(buf_block_get_space(block) == IBUF_SPACE_ID); + ut_ad(block->page.id.space() == IBUF_SPACE_ID); /* If this is the root page, update ibuf->empty. */ - if (UNIV_UNLIKELY(buf_block_get_page_no(block) - == FSP_IBUF_TREE_ROOT_PAGE_NO)) { + if (block->page.id.page_no() == FSP_IBUF_TREE_ROOT_PAGE_NO) { const page_t* root = buf_block_get_frame(block); ut_ad(page_get_space_id(root) == IBUF_SPACE_ID); @@ -3700,11 +3642,12 @@ fail_exit: ibuf->empty = page_is_empty(root); } } else { - ut_ad(mode == BTR_MODIFY_TREE); + ut_ad(BTR_LATCH_MODE_WITHOUT_INTENTION(mode) + == BTR_MODIFY_TREE); - /* We acquire an x-latch to the root page before the insert, + /* We acquire an sx-latch to the root page before the insert, because a pessimistic insert releases the tree x-latch, - which would cause the x-latching of the root after that to + which would cause the sx-latching of the root after that to break the latching order. */ root = ibuf_tree_root_get(&mtr); @@ -3724,12 +3667,12 @@ fail_exit: } mutex_exit(&ibuf_pessimistic_insert_mutex); - ibuf_size_update(root, &mtr); + ibuf_size_update(root); mutex_exit(&ibuf_mutex); ibuf->empty = page_is_empty(root); block = btr_cur_get_block(cursor); - ut_ad(buf_block_get_space(block) == IBUF_SPACE_ID); + ut_ad(block->page.id.space() == IBUF_SPACE_ID); } if (offsets_heap) { @@ -3745,13 +3688,12 @@ fail_exit: func_exit: #ifdef UNIV_IBUF_COUNT_DEBUG if (err == DB_SUCCESS) { - fprintf(stderr, - "Incrementing ibuf count of space %lu page %lu\n" - "from %lu by 1\n", space, page_no, - ibuf_count_get(space, page_no)); - ibuf_count_set(space, page_no, - ibuf_count_get(space, page_no) + 1); + ib::info() << "Incrementing ibuf count of page " << page_id + << " from " << ibuf_count_get(space, page_no) + << " by 1"; + + ibuf_count_set(page_id, ibuf_count_get(page_id) + 1); } #endif @@ -3760,7 +3702,8 @@ func_exit: mem_heap_free(heap); - if (err == DB_SUCCESS && mode == BTR_MODIFY_TREE) { + if (err == DB_SUCCESS + && BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE) { ibuf_contract_after_insert(entry_size); } @@ -3775,22 +3718,24 @@ func_exit: return(err); } -/*********************************************************************//** -Buffer an operation in the insert/delete buffer, instead of doing it +/** Buffer an operation in the insert/delete buffer, instead of doing it directly to the disk page, if this is possible. Does not do it if the index is clustered or unique. -@return TRUE if success */ -UNIV_INTERN +@param[in] op operation type +@param[in] entry index entry to insert +@param[in,out] index index where to insert +@param[in] page_id page id where to insert +@param[in] page_size page size +@param[in,out] thr query thread +@return TRUE if success */ ibool ibuf_insert( -/*========*/ - ibuf_op_t op, /*!< in: operation type */ - const dtuple_t* entry, /*!< in: index entry to insert */ - dict_index_t* index, /*!< in: index where to insert */ - ulint space, /*!< in: space id where to insert */ - ulint zip_size,/*!< in: compressed page size in bytes, or 0 */ - ulint page_no,/*!< in: page number where to insert */ - que_thr_t* thr) /*!< in: query thread */ + ibuf_op_t op, + const dtuple_t* entry, + dict_index_t* index, + const page_id_t& page_id, + const page_size_t& page_size, + que_thr_t* thr) { dberr_t err; ulint entry_size; @@ -3800,11 +3745,11 @@ ibuf_insert( ibuf_use_t use = ibuf_use; DBUG_ENTER("ibuf_insert"); - DBUG_PRINT("ibuf", ("op: %d, space: %ld, page_no: %ld", - op, space, page_no)); + DBUG_PRINT("ibuf", ("op: %d, space: " UINT32PF ", page_no: " UINT32PF, + op, page_id.space(), page_id.page_no())); ut_ad(dtuple_check_typed(entry)); - ut_ad(ut_is_2pow(zip_size)); + ut_ad(page_id.space() != srv_tmp_space.space_id()); ut_a(!dict_index_is_clust(index)); @@ -3876,11 +3821,11 @@ check_watch: buf_pool_watch_set(space, page_no). */ { - buf_page_t* bpage; - buf_pool_t* buf_pool = buf_pool_get(space, page_no); - bpage = buf_page_get_also_watch(buf_pool, space, page_no); + buf_pool_t* buf_pool = buf_pool_get(page_id); + buf_page_t* bpage + = buf_page_get_also_watch(buf_pool, page_id); - if (UNIV_LIKELY_NULL(bpage)) { + if (bpage != NULL) { /* A buffer pool watch has been set or the page has been read into the buffer pool. Do not buffer the request. If a purge operation @@ -3903,11 +3848,11 @@ skip_watch: err = ibuf_insert_low(BTR_MODIFY_PREV, op, no_counter, entry, entry_size, - index, space, zip_size, page_no, thr); + index, page_id, page_size, thr); if (err == DB_FAIL) { - err = ibuf_insert_low(BTR_MODIFY_TREE, op, no_counter, - entry, entry_size, - index, space, zip_size, page_no, thr); + err = ibuf_insert_low(BTR_MODIFY_TREE | BTR_LATCH_FOR_INSERT, + op, no_counter, entry, entry_size, + index, page_id, page_size, thr); } if (err == DB_SUCCESS) { @@ -3943,9 +3888,6 @@ ibuf_insert_to_index_page_low( after which to insert the buffered entry */ { const page_t* page; - ulint space; - ulint page_no; - ulint zip_size; const page_t* bitmap_page; ulint old_bits; rec_t* rec; @@ -3977,34 +3919,27 @@ ibuf_insert_to_index_page_low( page = buf_block_get_frame(block); - ut_print_timestamp(stderr); + ib::error() << "Insert buffer insert fails; page free " + << page_get_max_insert_size(page, 1) << ", dtuple size " + << rec_get_converted_size(index, entry, 0); - fprintf(stderr, - " InnoDB: Error: Insert buffer insert fails;" - " page free %lu, dtuple size %lu\n", - (ulong) page_get_max_insert_size(page, 1), - (ulong) rec_get_converted_size(index, entry, 0)); fputs("InnoDB: Cannot insert index record ", stderr); dtuple_print(stderr, entry); fputs("\nInnoDB: The table where this index record belongs\n" "InnoDB: is now probably corrupt. Please run CHECK TABLE on\n" "InnoDB: that table.\n", stderr); - space = page_get_space_id(page); - zip_size = buf_block_get_zip_size(block); - page_no = page_get_page_no(page); + bitmap_page = ibuf_bitmap_get_map_page(block->page.id, + block->page.size, mtr); + old_bits = ibuf_bitmap_page_get_bits( + bitmap_page, block->page.id, block->page.size, + IBUF_BITMAP_FREE, mtr); - bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr); - old_bits = ibuf_bitmap_page_get_bits(bitmap_page, page_no, zip_size, - IBUF_BITMAP_FREE, mtr); + ib::error() << "page " << block->page.id << ", size " + << block->page.size.physical() << ", bitmap bits " << old_bits; - fprintf(stderr, - "InnoDB: space %lu, page %lu, zip_size %lu, bitmap bits %lu\n", - (ulong) space, (ulong) page_no, - (ulong) zip_size, (ulong) old_bits); + ib::error() << BUG_REPORT_MSG; - fputs("InnoDB: Submit a detailed bug report" - " to http://bugs.mysql.com\n", stderr); ut_ad(0); DBUG_RETURN(NULL); } @@ -4031,59 +3966,50 @@ ibuf_insert_to_index_page( DBUG_ENTER("ibuf_insert_to_index_page"); - DBUG_PRINT("ibuf", ("page_no: %ld", buf_block_get_page_no(block))); - DBUG_PRINT("ibuf", ("index name: %s", index->name)); - DBUG_PRINT("ibuf", ("online status: %d", - dict_index_get_online_status(index))); + DBUG_PRINT("ibuf", ("page " UINT32PF ":" UINT32PF, + block->page.id.space(), + block->page.id.page_no())); + ut_ad(!dict_index_is_online_ddl(index));// this is an ibuf_dummy index ut_ad(ibuf_inside(mtr)); ut_ad(dtuple_check_typed(entry)); ut_ad(!buf_block_align(page)->index); + ut_ad(mtr->is_named_space(block->page.id.space())); if (UNIV_UNLIKELY(dict_table_is_comp(index->table) != (ibool)!!page_is_comp(page))) { - fputs("InnoDB: Trying to insert a record from" - " the insert buffer to an index page\n" - "InnoDB: but the 'compact' flag does not match!\n", - stderr); + ib::warn() << "Trying to insert a record from the insert" + " buffer to an index page but the 'compact' flag does" + " not match!"; goto dump; } rec = page_rec_get_next(page_get_infimum_rec(page)); if (page_rec_is_supremum(rec)) { - fputs("InnoDB: Trying to insert a record from" - " the insert buffer to an index page\n" - "InnoDB: but the index page is empty!\n", - stderr); + ib::warn() << "Trying to insert a record from the insert" + " buffer to an index page but the index page" + " is empty!"; goto dump; } - if (UNIV_UNLIKELY(rec_get_n_fields(rec, index) - != dtuple_get_n_fields(entry))) { - fputs("InnoDB: Trying to insert a record from" - " the insert buffer to an index page\n" - "InnoDB: but the number of fields does not match!\n", - stderr); + if (!rec_n_fields_is_sane(index, rec, entry)) { + ib::warn() << "Trying to insert a record from the insert" + " buffer to an index page but the number of fields" + " does not match!"; + rec_print(stderr, rec, index); dump: - buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH); - dtuple_print(stderr, entry); ut_ad(0); - fputs("InnoDB: The table where where" - " this index record belongs\n" - "InnoDB: is now probably corrupt." - " Please run CHECK TABLE on\n" - "InnoDB: your tables.\n" - "InnoDB: Submit a detailed bug report to" - " http://bugs.mysql.com!\n", stderr); + ib::warn() << "The table where this index record belongs" + " is now probably corrupt. Please run CHECK TABLE on" + " your tables. " << BUG_REPORT_MSG; DBUG_VOID_RETURN; } - low_match = page_cur_search(block, index, entry, - PAGE_CUR_LE, &page_cur); + low_match = page_cur_search(block, index, entry, &page_cur); heap = mem_heap_create( sizeof(upd_t) @@ -4141,12 +4067,12 @@ dump: just write dummy trx_id(0), roll_ptr(0) */ btr_cur_update_in_place_log(BTR_KEEP_SYS_FLAG, rec, index, update, 0, 0, mtr); + DBUG_EXECUTE_IF( "crash_after_log_ibuf_upd_inplace", log_buffer_flush_to_disk(); - ib_logf(IB_LOG_LEVEL_INFO, - "Wrote log record for ibuf update in " - "place operation"); + ib::info() << "Wrote log record for ibuf" + " update in place operation"; DBUG_SUICIDE(); ); @@ -4214,8 +4140,7 @@ ibuf_set_del_mark( ut_ad(ibuf_inside(mtr)); ut_ad(dtuple_check_typed(entry)); - low_match = page_cur_search( - block, index, entry, PAGE_CUR_LE, &page_cur); + low_match = page_cur_search(block, index, entry, &page_cur); if (low_match == dtuple_get_n_fields(entry)) { rec_t* rec; @@ -4242,22 +4167,18 @@ ibuf_set_del_mark( const buf_block_t* block = page_cur_get_block(&page_cur); - ut_print_timestamp(stderr); - fputs(" InnoDB: unable to find a record to delete-mark\n", - stderr); + ib::error() << "Unable to find a record to delete-mark"; fputs("InnoDB: tuple ", stderr); dtuple_print(stderr, entry); fputs("\n" "InnoDB: record ", stderr); rec_print(stderr, page_cur_get_rec(&page_cur), index); - fprintf(stderr, "\nspace %u offset %u" - " (%u records, index id %llu)\n" - "InnoDB: Submit a detailed bug report" - " to http://bugs.mysql.com\n", - (unsigned) buf_block_get_space(block), - (unsigned) buf_block_get_page_no(block), - (unsigned) page_get_n_recs(page), - (ulonglong) btr_page_get_index_id(page)); + + ib::error() << "page " << block->page.id << " (" + << page_get_n_recs(page) << " records, index id " + << btr_page_get_index_id(page) << ")."; + + ib::error() << BUG_REPORT_MSG; ut_ad(0); } } @@ -4279,9 +4200,9 @@ ibuf_delete( ut_ad(ibuf_inside(mtr)); ut_ad(dtuple_check_typed(entry)); + ut_ad(!dict_index_is_spatial(index)); - low_match = page_cur_search( - block, index, entry, PAGE_CUR_LE, &page_cur); + low_match = page_cur_search(block, index, entry, &page_cur); if (low_match == dtuple_get_n_fields(entry)) { page_zip_des_t* page_zip= buf_block_get_page_zip(block); @@ -4306,20 +4227,18 @@ ibuf_delete( & rec_get_info_bits(rec, page_is_comp(page)))) { /* Refuse to purge the last record or a record that has not been marked for deletion. */ - ut_print_timestamp(stderr); - fputs(" InnoDB: unable to purge a record\n", - stderr); + ib::error() << "Unable to purge a record"; fputs("InnoDB: tuple ", stderr); dtuple_print(stderr, entry); fputs("\n" "InnoDB: record ", stderr); rec_print_new(stderr, rec, offsets); - fprintf(stderr, "\nspace %u offset %u" + fprintf(stderr, "\nspace " UINT32PF " offset " UINT32PF " (%u records, index id %llu)\n" "InnoDB: Submit a detailed bug report" " to http://bugs.mysql.com\n", - (unsigned) buf_block_get_space(block), - (unsigned) buf_block_get_page_no(block), + block->page.id.space(), + block->page.id.page_no(), (unsigned) page_get_n_recs(page), (ulonglong) btr_page_get_index_id(page)); @@ -4358,7 +4277,7 @@ ibuf_delete( /*********************************************************************//** Restores insert buffer tree cursor position -@return TRUE if the position was restored; FALSE if not */ +@return TRUE if the position was restored; FALSE if not */ static MY_ATTRIBUTE((nonnull)) ibool ibuf_restore_pos( @@ -4373,7 +4292,8 @@ ibuf_restore_pos( position is to be restored */ mtr_t* mtr) /*!< in/out: mini-transaction */ { - ut_ad(mode == BTR_MODIFY_LEAF || mode == BTR_MODIFY_TREE); + ut_ad(mode == BTR_MODIFY_LEAF + || BTR_LATCH_MODE_WITHOUT_INTENTION(mode) == BTR_MODIFY_TREE); if (btr_pcur_restore_position(mode, pcur, mtr)) { @@ -4386,13 +4306,11 @@ ibuf_restore_pos( entry. Do not complain. */ ibuf_btr_pcur_commit_specify_mtr(pcur, mtr); } else { - fprintf(stderr, - "InnoDB: ERROR: Submit the output to" - " http://bugs.mysql.com\n" - "InnoDB: ibuf cursor restoration fails!\n" - "InnoDB: ibuf record inserted to page %lu:%lu\n", - (ulong) space, (ulong) page_no); - fflush(stderr); + ib::error() << "ibuf cursor restoration fails!." + " ibuf record inserted to page " + << space << ":" << page_no; + + ib::error() << BUG_REPORT_MSG; rec_print_old(stderr, btr_pcur_get_rec(pcur)); rec_print_old(stderr, pcur->old_rec); @@ -4400,10 +4318,8 @@ ibuf_restore_pos( rec_print_old(stderr, page_rec_get_next(btr_pcur_get_rec(pcur))); - fflush(stderr); - ibuf_btr_pcur_commit_specify_mtr(pcur, mtr); - ut_ad(0); + ib::fatal() << "Failed to restore ibuf position."; } return(FALSE); @@ -4413,7 +4329,7 @@ ibuf_restore_pos( Deletes from ibuf the record on which pcur is positioned. If we have to resort to a pessimistic delete, this function commits mtr and closes the cursor. -@return TRUE if mtr was committed and pcur closed in this operation */ +@return TRUE if mtr was committed and pcur closed in this operation */ static MY_ATTRIBUTE((warn_unused_result)) ibool ibuf_delete_rec( @@ -4446,8 +4362,9 @@ ibuf_delete_rec( an assertion failure after crash recovery. */ btr_cur_set_deleted_flag_for_ibuf( btr_pcur_get_rec(pcur), NULL, TRUE, mtr); + ibuf_mtr_commit(mtr); - log_write_up_to(LSN_MAX, LOG_WAIT_ALL_GROUPS, TRUE); + log_write_up_to(LSN_MAX, true); DBUG_SUICIDE(); } #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ @@ -4455,6 +4372,8 @@ ibuf_delete_rec( success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur), 0, mtr); + const page_id_t page_id(space, page_no); + if (success) { if (page_is_empty(btr_pcur_get_page(pcur))) { /* If a B-tree page is empty, it must be the root page @@ -4473,13 +4392,13 @@ ibuf_delete_rec( } #ifdef UNIV_IBUF_COUNT_DEBUG - fprintf(stderr, - "Decrementing ibuf count of space %lu page %lu\n" - "from %lu by 1\n", space, page_no, - ibuf_count_get(space, page_no)); - ibuf_count_set(space, page_no, - ibuf_count_get(space, page_no) - 1); -#endif + ib::info() << "Decrementing ibuf count of space " << space + << " page " << page_no << " from " + << ibuf_count_get(page_id) << " by 1"; + + ibuf_count_set(page_id, ibuf_count_get(page_id) - 1); +#endif /* UNIV_IBUF_COUNT_DEBUG */ + return(FALSE); } @@ -4501,115 +4420,106 @@ ibuf_delete_rec( mutex_enter(&ibuf_mutex); if (!ibuf_restore_pos(space, page_no, search_tuple, - BTR_MODIFY_TREE, pcur, mtr)) { + BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE, + pcur, mtr)) { mutex_exit(&ibuf_mutex); - ut_ad(mtr->state == MTR_COMMITTED); + ut_ad(mtr->has_committed()); goto func_exit; } root = ibuf_tree_root_get(mtr); btr_cur_pessimistic_delete(&err, TRUE, btr_pcur_get_btr_cur(pcur), 0, - RB_NONE, mtr); + false, mtr); ut_a(err == DB_SUCCESS); #ifdef UNIV_IBUF_COUNT_DEBUG - ibuf_count_set(space, page_no, ibuf_count_get(space, page_no) - 1); -#endif - ibuf_size_update(root, mtr); + ibuf_count_set(page_id, ibuf_count_get(page_id) - 1); +#endif /* UNIV_IBUF_COUNT_DEBUG */ + + ibuf_size_update(root); mutex_exit(&ibuf_mutex); ibuf->empty = page_is_empty(root); ibuf_btr_pcur_commit_specify_mtr(pcur, mtr); func_exit: - ut_ad(mtr->state == MTR_COMMITTED); + ut_ad(mtr->has_committed()); btr_pcur_close(pcur); return(TRUE); } -/*********************************************************************//** -When an index page is read from a disk to the buffer pool, this function +/** When an index page is read from a disk to the buffer pool, this function applies any buffered operations to the page and deletes the entries from the insert buffer. If the page is not read, but created in the buffer pool, this function deletes its buffered entries from the insert buffer; there can exist entries for such a page if the page belonged to an index which -subsequently was dropped. */ -UNIV_INTERN +subsequently was dropped. +@param[in,out] block if page has been read from disk, +pointer to the page x-latched, else NULL +@param[in] page_id page id of the index page +@param[in] update_ibuf_bitmap normally this is set to TRUE, but +if we have deleted or are deleting the tablespace, then we naturally do not +want to update a non-existent bitmap page */ void ibuf_merge_or_delete_for_page( -/*==========================*/ - buf_block_t* block, /*!< in: if page has been read from - disk, pointer to the page x-latched, - else NULL */ - ulint space, /*!< in: space id of the index page */ - ulint page_no,/*!< in: page number of the index page */ - ulint zip_size,/*!< in: compressed page size in bytes, - or 0 */ - ibool update_ibuf_bitmap)/*!< in: normally this is set - to TRUE, but if we have deleted or are - deleting the tablespace, then we - naturally do not want to update a - non-existent bitmap page */ + buf_block_t* block, + const page_id_t& page_id, + const page_size_t* page_size, + ibool update_ibuf_bitmap) { mem_heap_t* heap; btr_pcur_t pcur; dtuple_t* search_tuple; #ifdef UNIV_IBUF_DEBUG ulint volume = 0; -#endif +#endif /* UNIV_IBUF_DEBUG */ page_zip_des_t* page_zip = NULL; - ibool tablespace_being_deleted = FALSE; - ibool corruption_noticed = FALSE; + fil_space_t* space = NULL; + bool corruption_noticed = false; mtr_t mtr; /* Counts for merged & discarded operations. */ ulint mops[IBUF_OP_COUNT]; ulint dops[IBUF_OP_COUNT]; - ut_ad(!block || buf_block_get_space(block) == space); - ut_ad(!block || buf_block_get_page_no(block) == page_no); - ut_ad(!block || buf_block_get_zip_size(block) == zip_size); - ut_ad(!block || buf_block_get_io_fix(block) == BUF_IO_READ); + ut_ad(block == NULL || page_id.equals_to(block->page.id)); + ut_ad(block == NULL || buf_block_get_io_fix(block) == BUF_IO_READ); if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE - || trx_sys_hdr_page(space, page_no)) { + || trx_sys_hdr_page(page_id) + || fsp_is_system_temporary(page_id.space())) { return; } - /* We cannot refer to zip_size in the following, because - zip_size is passed as ULINT_UNDEFINED (it is unknown) when - buf_read_ibuf_merge_pages() is merging (discarding) changes - for a dropped tablespace. When block != NULL or - update_ibuf_bitmap is specified, the zip_size must be known. - That is why we will repeat the check below, with zip_size in - place of 0. Passing zip_size as 0 assumes that the + /* We cannot refer to page_size in the following, because it is passed + as NULL (it is unknown) when buf_read_ibuf_merge_pages() is merging + (discarding) changes for a dropped tablespace. When block != NULL or + update_ibuf_bitmap is specified, then page_size must be known. + That is why we will repeat the check below, with page_size in + place of univ_page_size. Passing univ_page_size assumes that the uncompressed page size always is a power-of-2 multiple of the compressed page size. */ - if (ibuf_fixed_addr_page(space, 0, page_no) - || fsp_descr_page(0, page_no)) { + if (ibuf_fixed_addr_page(page_id, univ_page_size) + || fsp_descr_page(page_id, univ_page_size)) { return; } - if (UNIV_LIKELY(update_ibuf_bitmap)) { - ut_a(ut_is_2pow(zip_size)); + if (update_ibuf_bitmap) { + + ut_ad(page_size != NULL); - if (ibuf_fixed_addr_page(space, zip_size, page_no) - || fsp_descr_page(zip_size, page_no)) { + if (ibuf_fixed_addr_page(page_id, *page_size) + || fsp_descr_page(page_id, *page_size)) { return; } - /* If the following returns FALSE, we get the counter - incremented, and must decrement it when we leave this - function. When the counter is > 0, that prevents tablespace - from being dropped. */ + space = fil_space_acquire(page_id.space()); - tablespace_being_deleted = fil_inc_pending_ops(space, true); - - if (UNIV_UNLIKELY(tablespace_being_deleted)) { + if (space == NULL) { /* Do not try to read the bitmap page from space; just delete the ibuf records for the page */ @@ -4622,12 +4532,12 @@ ibuf_merge_or_delete_for_page( ibuf_mtr_start(&mtr); bitmap_page = ibuf_bitmap_get_map_page( - space, page_no, zip_size, &mtr); + page_id, *page_size, &mtr); if (bitmap_page && fil_page_get_type(bitmap_page) != FIL_PAGE_TYPE_ALLOCATED) { bitmap_bits = ibuf_bitmap_page_get_bits( - bitmap_page, page_no, zip_size, + bitmap_page, page_id, *page_size, IBUF_BITMAP_BUFFERED, &mtr); } @@ -4636,25 +4546,23 @@ ibuf_merge_or_delete_for_page( if (!bitmap_bits) { /* No inserts buffered for this page */ - if (!tablespace_being_deleted) { - fil_decr_pending_ops(space); - } - + fil_space_release(space); return; } } - } else if (block - && (ibuf_fixed_addr_page(space, zip_size, page_no) - || fsp_descr_page(zip_size, page_no))) { + } else if (block != NULL + && (ibuf_fixed_addr_page(page_id, *page_size) + || fsp_descr_page(page_id, *page_size))) { return; } heap = mem_heap_create(512); - search_tuple = ibuf_search_tuple_build(space, page_no, heap); + search_tuple = ibuf_search_tuple_build( + page_id.space(), page_id.page_no(), heap); - if (block) { + if (block != NULL) { /* Move the ownership of the x-latch on the page to this OS thread, so that we can acquire a second x-latch on it. This is needed for the insert operations to the index page to pass @@ -4663,50 +4571,23 @@ ibuf_merge_or_delete_for_page( rw_lock_x_lock_move_ownership(&(block->lock)); page_zip = buf_block_get_page_zip(block); - if (UNIV_UNLIKELY(fil_page_get_type(block->frame) - != FIL_PAGE_INDEX) - || UNIV_UNLIKELY(!page_is_leaf(block->frame))) { - - page_t* bitmap_page; - - corruption_noticed = TRUE; - - ut_print_timestamp(stderr); - - ibuf_mtr_start(&mtr); - - fputs(" InnoDB: Dump of the ibuf bitmap page:\n", - stderr); + if (!fil_page_index_page_check(block->frame) + || !page_is_leaf(block->frame)) { - bitmap_page = ibuf_bitmap_get_map_page(space, page_no, - zip_size, &mtr); - buf_page_print(bitmap_page, 0, - BUF_PAGE_PRINT_NO_CRASH); - ibuf_mtr_commit(&mtr); - - fputs("\nInnoDB: Dump of the page:\n", stderr); + corruption_noticed = true; - buf_page_print(block->frame, 0, - BUF_PAGE_PRINT_NO_CRASH); + ib::error() << "Corruption in the tablespace. Bitmap" + " shows insert buffer records to page " + << page_id << " though the page type is " + << fil_page_get_type(block->frame) + << ", which is not an index leaf page. We try" + " to resolve the problem by skipping the" + " insert buffer merge for this page. Please" + " run CHECK TABLE on your tables to determine" + " if they are corrupt after this."; - fprintf(stderr, - "InnoDB: Error: corruption in the tablespace." - " Bitmap shows insert\n" - "InnoDB: buffer records to page n:o %lu" - " though the page\n" - "InnoDB: type is %lu, which is" - " not an index leaf page!\n" - "InnoDB: We try to resolve the problem" - " by skipping the insert buffer\n" - "InnoDB: merge for this page." - " Please run CHECK TABLE on your tables\n" - "InnoDB: to determine if they are corrupt" - " after this.\n\n" - "InnoDB: Please submit a detailed bug report" - " to http://bugs.mysql.com\n\n", - (ulong) page_no, - (ulong) - fil_page_get_type(block->frame)); + ib::error() << "Please submit a detailed bug" + " report to http://bugs.mysql.com"; ut_ad(0); } } @@ -4723,9 +4604,11 @@ loop: ibuf->index, search_tuple, PAGE_CUR_GE, BTR_MODIFY_LEAF, &pcur, &mtr); - if (block) { + if (block != NULL) { ibool success; + mtr.set_named_space(page_id.space()); + success = buf_page_get_known_nowait( RW_X_LATCH, block, BUF_KEEP_OLD, __FILE__, __LINE__, &mtr); @@ -4739,6 +4622,8 @@ loop: the block is io-fixed. Other threads must not try to latch an io-fixed block. */ buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE); + } else if (update_ibuf_bitmap) { + mtr.set_named_space(page_id.space()); } if (!btr_pcur_is_on_user_rec(&pcur)) { @@ -4755,10 +4640,10 @@ loop: rec = btr_pcur_get_rec(&pcur); /* Check if the entry is for this index page */ - if (ibuf_rec_get_page_no(&mtr, rec) != page_no - || ibuf_rec_get_space(&mtr, rec) != space) { + if (ibuf_rec_get_page_no(&mtr, rec) != page_id.page_no() + || ibuf_rec_get_space(&mtr, rec) != page_id.space()) { - if (block) { + if (block != NULL) { page_header_reset_last_insert( block->frame, page_zip, &mtr); } @@ -4766,11 +4651,11 @@ loop: goto reset_bit; } - if (UNIV_UNLIKELY(corruption_noticed)) { + if (corruption_noticed) { fputs("InnoDB: Discarding record\n ", stderr); rec_print_old(stderr, rec); fputs("\nInnoDB: from the insert buffer!\n\n", stderr); - } else if (block && !rec_get_deleted_flag(rec, 0)) { + } else if (block != NULL && !rec_get_deleted_flag(rec, 0)) { /* Now we have at pcur a record which should be applied on the index page; NOTE that the call below copies pointers to fields in rec, and we must @@ -4822,8 +4707,9 @@ loop: ut_ad(rec == btr_pcur_get_rec(&pcur)); ut_ad(page_rec_is_user_rec(rec)); ut_ad(ibuf_rec_get_page_no(&mtr, rec) - == page_no); - ut_ad(ibuf_rec_get_space(&mtr, rec) == space); + == page_id.page_no()); + ut_ad(ibuf_rec_get_space(&mtr, rec) + == page_id.space()); /* Mark the change buffer record processed, so that it will not be merged again in case @@ -4839,6 +4725,7 @@ loop: ibuf_btr_pcur_commit_specify_mtr(&pcur, &mtr); ibuf_mtr_start(&mtr); + mtr.set_named_space(page_id.space()); success = buf_page_get_known_nowait( RW_X_LATCH, block, @@ -4853,12 +4740,13 @@ loop: buf_block_dbg_add_level( block, SYNC_IBUF_TREE_NODE); - if (!ibuf_restore_pos(space, page_no, + if (!ibuf_restore_pos(page_id.space(), + page_id.page_no(), search_tuple, BTR_MODIFY_LEAF, &pcur, &mtr)) { - ut_ad(mtr.state == MTR_COMMITTED); + ut_ad(mtr.has_committed()); mops[op]++; ibuf_dummy_index_free(dummy_index); goto loop; @@ -4877,12 +4765,12 @@ loop: } /* Delete the record from ibuf */ - if (ibuf_delete_rec(space, page_no, &pcur, search_tuple, - &mtr)) { + if (ibuf_delete_rec(page_id.space(), page_id.page_no(), + &pcur, search_tuple, &mtr)) { /* Deletion was pessimistic and mtr was committed: we start from the beginning again */ - ut_ad(mtr.state == MTR_COMMITTED); + ut_ad(mtr.has_committed()); goto loop; } else if (btr_pcur_is_after_last_on_page(&pcur)) { ibuf_mtr_commit(&mtr); @@ -4893,27 +4781,26 @@ loop: } reset_bit: - if (UNIV_LIKELY(update_ibuf_bitmap)) { + if (update_ibuf_bitmap) { page_t* bitmap_page; - bitmap_page = ibuf_bitmap_get_map_page( - space, page_no, zip_size, &mtr); + bitmap_page = ibuf_bitmap_get_map_page(page_id, *page_size, + &mtr); ibuf_bitmap_page_set_bits( - bitmap_page, page_no, zip_size, + bitmap_page, page_id, *page_size, IBUF_BITMAP_BUFFERED, FALSE, &mtr); - if (block) { + if (block != NULL) { ulint old_bits = ibuf_bitmap_page_get_bits( - bitmap_page, page_no, zip_size, + bitmap_page, page_id, *page_size, IBUF_BITMAP_FREE, &mtr); - ulint new_bits = ibuf_index_page_calc_free( - zip_size, block); + ulint new_bits = ibuf_index_page_calc_free(block); if (old_bits != new_bits) { ibuf_bitmap_page_set_bits( - bitmap_page, page_no, zip_size, + bitmap_page, page_id, *page_size, IBUF_BITMAP_FREE, new_bits, &mtr); } } @@ -4923,37 +4810,24 @@ reset_bit: btr_pcur_close(&pcur); mem_heap_free(heap); -#ifdef HAVE_ATOMIC_BUILTINS os_atomic_increment_ulint(&ibuf->n_merges, 1); ibuf_add_ops(ibuf->n_merged_ops, mops); ibuf_add_ops(ibuf->n_discarded_ops, dops); -#else /* HAVE_ATOMIC_BUILTINS */ - /* Protect our statistics keeping from race conditions */ - mutex_enter(&ibuf_mutex); - - ibuf->n_merges++; - ibuf_add_ops(ibuf->n_merged_ops, mops); - ibuf_add_ops(ibuf->n_discarded_ops, dops); - - mutex_exit(&ibuf_mutex); -#endif /* HAVE_ATOMIC_BUILTINS */ - if (update_ibuf_bitmap && !tablespace_being_deleted) { - - fil_decr_pending_ops(space); + if (space != NULL) { + fil_space_release(space); } #ifdef UNIV_IBUF_COUNT_DEBUG - ut_a(ibuf_count_get(space, page_no) == 0); + ut_a(ibuf_count_get(page_id) == 0); #endif } /*********************************************************************//** Deletes all entries in the insert buffer for a given space id. This is used -in DISCARD TABLESPACE and IMPORT TABLESPACE. +in DISCARD TABLESPACE, IMPORT TABLESPACE and TRUNCATE TABLESPACE. NOTE: this does not update the page free bitmaps in the space. The space will become CORRUPT when you call this function! */ -UNIV_INTERN void ibuf_delete_for_discarded_space( /*============================*/ @@ -5013,7 +4887,7 @@ loop: /* Deletion was pessimistic and mtr was committed: we start from the beginning again */ - ut_ad(mtr.state == MTR_COMMITTED); + ut_ad(mtr.has_committed()); goto loop; } @@ -5029,22 +4903,14 @@ leave_loop: ibuf_mtr_commit(&mtr); btr_pcur_close(&pcur); -#ifdef HAVE_ATOMIC_BUILTINS - ibuf_add_ops(ibuf->n_discarded_ops, dops); -#else /* HAVE_ATOMIC_BUILTINS */ - /* Protect our statistics keeping from race conditions */ - mutex_enter(&ibuf_mutex); ibuf_add_ops(ibuf->n_discarded_ops, dops); - mutex_exit(&ibuf_mutex); -#endif /* HAVE_ATOMIC_BUILTINS */ mem_heap_free(heap); } /******************************************************************//** Looks if the insert buffer is empty. -@return true if empty */ -UNIV_INTERN +@return true if empty */ bool ibuf_is_empty(void) /*===============*/ @@ -5068,7 +4934,6 @@ ibuf_is_empty(void) /******************************************************************//** Prints info of ibuf. */ -UNIV_INTERN void ibuf_print( /*=======*/ @@ -5098,7 +4963,7 @@ ibuf_print( #ifdef UNIV_IBUF_COUNT_DEBUG for (i = 0; i < IBUF_COUNT_N_SPACES; i++) { for (j = 0; j < IBUF_COUNT_N_PAGES; j++) { - ulint count = ibuf_count_get(i, j); + ulint count = ibuf_count_get(page_id_t(i, j, 0)); if (count > 0) { fprintf(stderr, @@ -5116,24 +4981,23 @@ ibuf_print( /******************************************************************//** Checks the insert buffer bitmaps on IMPORT TABLESPACE. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t ibuf_check_bitmap_on_import( /*========================*/ const trx_t* trx, /*!< in: transaction */ ulint space_id) /*!< in: tablespace identifier */ { - ulint zip_size; - ulint page_size; ulint size; ulint page_no; ut_ad(space_id); ut_ad(trx->mysql_thd); - zip_size = fil_space_get_zip_size(space_id); + bool found; + const page_size_t& page_size + = fil_space_get_page_size(space_id, &found); - if (zip_size == ULINT_UNDEFINED) { + if (!found) { return(DB_TABLE_NOT_FOUND); } @@ -5145,9 +5009,13 @@ ibuf_check_bitmap_on_import( mutex_enter(&ibuf_mutex); - page_size = zip_size ? zip_size : UNIV_PAGE_SIZE; + /* The two bitmap pages (allocation bitmap and ibuf bitmap) repeat + every page_size pages. For example if page_size is 16 KiB, then the + two bitmap pages repeat every 16 KiB * 16384 = 256 MiB. In the loop + below page_no is measured in number of pages since the beginning of + the space, as usual. */ - for (page_no = 0; page_no < size; page_no += page_size) { + for (page_no = 0; page_no < size; page_no += page_size.physical()) { mtr_t mtr; page_t* bitmap_page; ulint i; @@ -5164,14 +5032,40 @@ ibuf_check_bitmap_on_import( ibuf_enter(&mtr); bitmap_page = ibuf_bitmap_get_map_page( - space_id, page_no, zip_size, &mtr); + page_id_t(space_id, page_no), page_size, &mtr); + + if (buf_page_is_zeroes(bitmap_page, page_size)) { + /* This means we got all-zero page instead of + ibuf bitmap page. The subsequent page should be + all-zero pages. */ +#ifdef UNIV_DEBUG + for (ulint curr_page = page_no + 1; + curr_page < page_size.physical(); curr_page++) { + + buf_block_t* block = buf_page_get( + page_id_t(space_id, curr_page), + page_size, + RW_S_LATCH, &mtr); + page_t* page = buf_block_get_frame(block); + ut_ad(buf_page_is_zeroes(page, page_size)); + } +#endif /* UNIV_DEBUG */ + ibuf_exit(&mtr); + mtr_commit(&mtr); + continue; + } + + for (i = FSP_IBUF_BITMAP_OFFSET + 1; + i < page_size.physical(); + i++) { - for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < page_size; i++) { const ulint offset = page_no + i; + const page_id_t cur_page_id(space_id, offset); + if (ibuf_bitmap_page_get_bits( - bitmap_page, offset, zip_size, - IBUF_BITMAP_IBUF, &mtr)) { + bitmap_page, cur_page_id, page_size, + IBUF_BITMAP_IBUF, &mtr)) { mutex_exit(&ibuf_mutex); ibuf_exit(&mtr); @@ -5190,7 +5084,7 @@ ibuf_check_bitmap_on_import( } if (ibuf_bitmap_page_get_bits( - bitmap_page, offset, zip_size, + bitmap_page, cur_page_id, page_size, IBUF_BITMAP_BUFFERED, &mtr)) { ib_errf(trx->mysql_thd, @@ -5205,7 +5099,7 @@ ibuf_check_bitmap_on_import( slightly corrupted tables can be imported and dumped. Clear the bit. */ ibuf_bitmap_page_set_bits( - bitmap_page, offset, zip_size, + bitmap_page, cur_page_id, page_size, IBUF_BITMAP_BUFFERED, FALSE, &mtr); } } @@ -5217,4 +5111,39 @@ ibuf_check_bitmap_on_import( mutex_exit(&ibuf_mutex); return(DB_SUCCESS); } + +/** Updates free bits and buffered bits for bulk loaded page. +@param[in] block index page +@param[in] reset flag if reset free val */ +void +ibuf_set_bitmap_for_bulk_load( + buf_block_t* block, + bool reset) +{ + page_t* bitmap_page; + mtr_t mtr; + ulint free_val; + + ut_a(page_is_leaf(buf_block_get_frame(block))); + + free_val = ibuf_index_page_calc_free(block); + + mtr_start(&mtr); + mtr.set_named_space(block->page.id.space()); + + bitmap_page = ibuf_bitmap_get_map_page(block->page.id, + block->page.size, &mtr); + + free_val = reset ? 0 : ibuf_index_page_calc_free(block); + ibuf_bitmap_page_set_bits( + bitmap_page, block->page.id, block->page.size, + IBUF_BITMAP_FREE, free_val, &mtr); + + ibuf_bitmap_page_set_bits( + bitmap_page, block->page.id, block->page.size, + IBUF_BITMAP_BUFFERED, FALSE, &mtr); + + mtr_commit(&mtr); +} + #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/api0api.h b/storage/innobase/include/api0api.h index 500bf4fe3b2..127f8e1949e 100644 --- a/storage/innobase/include/api0api.h +++ b/storage/innobase/include/api0api.h @@ -30,16 +30,11 @@ InnoDB Native API #include "db0err.h" #include -#ifdef _MSC_VER -#define strncasecmp _strnicmp -#define strcasecmp _stricmp -#endif - -#if defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER) +#if defined(__GNUC__) #define UNIV_NO_IGNORE MY_ATTRIBUTE ((warn_unused_result)) #else #define UNIV_NO_IGNORE -#endif /* __GNUC__ && __GNUC__ > 2 && !__INTEL_COMPILER */ +#endif /* __GNUC__ */ /* See comment about ib_bool_t as to why the two macros are unsigned long. */ /** The boolean value of "true" used internally within InnoDB */ @@ -53,7 +48,11 @@ typedef enum dberr_t ib_err_t; /** Representation of a byte within InnoDB */ typedef unsigned char ib_byte_t; /** Representation of an unsigned long int within InnoDB */ +#ifdef _WIN64 +typedef unsigned __int64 ib_ulint_t; +#else typedef unsigned long int ib_ulint_t; +#endif /* _WIN64 */ /* We assume C99 support except when using VisualStudio. */ #if !defined(_MSC_VER) @@ -324,25 +323,6 @@ typedef struct { ib_charset_t* charset; /*!< Column charset */ } ib_col_meta_t; -/* Note: Must be in sync with trx0trx.h */ -/** @enum ib_trx_state_t The transaction state can be queried using the -ib_trx_state() function. The InnoDB deadlock monitor can roll back a -transaction and users should be prepared for this, especially where there -is high contention. The way to determine the state of the transaction is to -query it's state and check. */ -typedef enum { - IB_TRX_NOT_STARTED, /*!< Has not started yet, the - transaction has not ben started yet.*/ - - IB_TRX_ACTIVE, /*!< The transaction is currently - active and needs to be either - committed or rolled back. */ - - IB_TRX_COMMITTED_IN_MEMORY, /*!< Not committed to disk yet */ - - IB_TRX_PREPARED /*!< Support for 2PC/XA */ -} ib_trx_state_t; - /* Note: Must be in sync with trx0trx.h */ /** @enum ib_trx_level_t Transaction isolation levels */ typedef enum { @@ -416,11 +396,11 @@ typedef struct ib_cursor_t* ib_crsr_t; This function is used to compare two data fields for which the data type is such that we must use the client code to compare them. -@param col_meta column meta data -@param p1 key +@param col_meta column meta data +@param p1 key @oaram p1_len key length -@param p2 second key -@param p2_len second key length +@param p2 second key +@param p2_len second key length @return 1, 0, -1, if a is greater, equal, less than b, respectively */ typedef int (*ib_client_cmp_t)( @@ -433,18 +413,6 @@ typedef int (*ib_client_cmp_t)( /* This should be the same as univ.i */ /** Represents SQL_NULL length */ #define IB_SQL_NULL 0xFFFFFFFF -/** The number of system columns in a row. */ -#define IB_N_SYS_COLS 3 - -/** The maximum length of a text column. */ -#define MAX_TEXT_LEN 4096 - -/* MySQL uses 3 byte UTF-8 encoding. */ -/** The maximum length of a column name in a table schema. */ -#define IB_MAX_COL_NAME_LEN (64 * 3) - -/** The maximum length of a table name (plus database name). */ -#define IB_MAX_TABLE_NAME_LEN (64 * 3) * 2 /*****************************************************************//** Start a transaction that's been rolled back. This special function @@ -453,8 +421,7 @@ a transaction. While the transaction has been rolled back the handle is still valid and can be reused by calling this function. If you don't want to reuse the transaction handle then you can free the handle by calling ib_trx_release(). -@return innobase txn handle */ - +@return innobase txn handle */ ib_err_t ib_trx_start( /*=========*/ @@ -469,8 +436,7 @@ ib_trx_start( /*****************************************************************//** Begin a transaction. This will allocate a new transaction handle and put the transaction in the active state. -@return innobase txn handle */ - +@return innobase txn handle */ ib_trx_t ib_trx_begin( /*=========*/ @@ -480,21 +446,6 @@ ib_trx_begin( ib_bool_t auto_commit); /*!< in: auto commit after each single DML */ -/*****************************************************************//** -Query the transaction's state. This function can be used to check for -the state of the transaction in case it has been rolled back by the -InnoDB deadlock detector. Note that when a transaction is selected as -a victim for rollback, InnoDB will always return an appropriate error -code indicating this. @see DB_DEADLOCK, @see DB_LOCK_TABLE_FULL and -@see DB_LOCK_WAIT_TIMEOUT -@return transaction state */ - -ib_trx_state_t -ib_trx_state( -/*=========*/ - ib_trx_t ib_trx); /*!< in: trx handle */ - - /*****************************************************************//** Check if the transaction is read_only */ ib_u32_t @@ -506,8 +457,7 @@ ib_trx_read_only( Release the resources of the transaction. If the transaction was selected as a victim by InnoDB and rolled back then use this function to free the transaction handle. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_trx_release( /*===========*/ @@ -516,8 +466,7 @@ ib_trx_release( /*****************************************************************//** Commit a transaction. This function will release the schema latches too. It will also free the transaction handle. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_trx_commit( /*==========*/ @@ -526,8 +475,7 @@ ib_trx_commit( /*****************************************************************//** Rollback a transaction. This function will release the schema latches too. It will also free the transaction handle. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_trx_rollback( /*============*/ @@ -535,8 +483,7 @@ ib_trx_rollback( /*****************************************************************//** Open an InnoDB table and return a cursor handle to it. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_open_table_using_id( /*==========================*/ @@ -545,22 +492,9 @@ ib_cursor_open_table_using_id( can be NULL */ ib_crsr_t* ib_crsr); /*!< out,own: InnoDB cursor */ -/*****************************************************************//** -Open an InnoDB index and return a cursor handle to it. -@return DB_SUCCESS or err code */ - -ib_err_t -ib_cursor_open_index_using_id( -/*==========================*/ - ib_id_u64_t index_id, /*!< in: index id of index to open */ - ib_trx_t ib_trx, /*!< in: Current transaction handle - can be NULL */ - ib_crsr_t* ib_crsr); /*!< out: InnoDB cursor */ - /*****************************************************************//** Open an InnoDB secondary index cursor and return a cursor handle to it. @return DB_SUCCESS or err code */ - ib_err_t ib_cursor_open_index_using_name( /*============================*/ @@ -572,8 +506,7 @@ ib_cursor_open_index_using_name( /*****************************************************************//** Open an InnoDB table by name and return a cursor handle to it. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_open_table( /*=================*/ @@ -584,26 +517,15 @@ ib_cursor_open_table( /*****************************************************************//** Reset the cursor. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_reset( /*============*/ ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */ - -/*****************************************************************//** -set a cursor trx to NULL*/ - -void -ib_cursor_clear_trx( -/*================*/ - ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */ - /*****************************************************************//** Close an InnoDB table and free the cursor. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_close( /*============*/ @@ -612,7 +534,6 @@ ib_cursor_close( /*****************************************************************//** Close the table, decrement n_ref_count count. @return DB_SUCCESS or err code */ - ib_err_t ib_cursor_close_table( /*==================*/ @@ -621,7 +542,6 @@ ib_cursor_close_table( /*****************************************************************//** update the cursor with new transactions and also reset the cursor @return DB_SUCCESS or err code */ - ib_err_t ib_cursor_new_trx( /*==============*/ @@ -631,26 +551,15 @@ ib_cursor_new_trx( /*****************************************************************//** Commit the transaction in a cursor @return DB_SUCCESS or err code */ - ib_err_t ib_cursor_commit_trx( /*=================*/ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */ ib_trx_t ib_trx); /*!< in: transaction */ -/********************************************************************//** -Open a table using the table name, if found then increment table ref count. -@return table instance if found */ - -void* -ib_open_table_by_name( -/*==================*/ - const char* name); /*!< in: table name to lookup */ - /*****************************************************************//** Insert a row to a table. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_insert_row( /*=================*/ @@ -659,8 +568,7 @@ ib_cursor_insert_row( /*****************************************************************//** Update a row in a table. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_update_row( /*=================*/ @@ -670,8 +578,7 @@ ib_cursor_update_row( /*****************************************************************//** Delete a row in a table. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_delete_row( /*=================*/ @@ -679,8 +586,7 @@ ib_cursor_delete_row( /*****************************************************************//** Read current row. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_read_row( /*===============*/ @@ -691,26 +597,15 @@ ib_cursor_read_row( /*****************************************************************//** Move cursor to the first record in the table. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_first( /*============*/ ib_crsr_t ib_crsr); /*!< in: InnoDB cursor instance */ -/*****************************************************************//** -Move cursor to the last record in the table. -@return DB_SUCCESS or err code */ - -ib_err_t -ib_cursor_last( -/*===========*/ - ib_crsr_t ib_crsr); /*!< in: InnoDB cursor instance */ - /*****************************************************************//** Move cursor to the next record in the table. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_next( /*===========*/ @@ -718,8 +613,7 @@ ib_cursor_next( /*****************************************************************//** Search for key. -@return DB_SUCCESS or err code */ - +@return DB_SUCCESS or err code */ ib_err_t ib_cursor_moveto( /*=============*/ @@ -729,7 +623,6 @@ ib_cursor_moveto( /*****************************************************************//** Set the match mode for ib_cursor_move(). */ - void ib_cursor_set_match_mode( /*=====================*/ @@ -738,8 +631,7 @@ ib_cursor_set_match_mode( /*****************************************************************//** Set a column of the tuple. Make a copy using the tuple's heap. -@return DB_SUCCESS or error code */ - +@return DB_SUCCESS or error code */ ib_err_t ib_col_set_value( /*=============*/ @@ -752,8 +644,7 @@ ib_col_set_value( /*****************************************************************//** Get the size of the data available in the column the tuple. -@return bytes avail or IB_SQL_NULL */ - +@return bytes avail or IB_SQL_NULL */ ib_ulint_t ib_col_get_len( /*===========*/ @@ -762,8 +653,7 @@ ib_col_get_len( /*****************************************************************//** Copy a column value from the tuple. -@return bytes copied or IB_SQL_NULL */ - +@return bytes copied or IB_SQL_NULL */ ib_ulint_t ib_col_copy_value( /*==============*/ @@ -774,8 +664,7 @@ ib_col_copy_value( /*************************************************************//** Read a signed int 8 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i8( /*=============*/ @@ -785,8 +674,7 @@ ib_tuple_read_i8( /*************************************************************//** Read an unsigned int 8 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u8( /*=============*/ @@ -796,8 +684,7 @@ ib_tuple_read_u8( /*************************************************************//** Read a signed int 16 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i16( /*==============*/ @@ -807,8 +694,7 @@ ib_tuple_read_i16( /*************************************************************//** Read an unsigned int 16 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u16( /*==============*/ @@ -818,8 +704,7 @@ ib_tuple_read_u16( /*************************************************************//** Read a signed int 32 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i32( /*==============*/ @@ -829,8 +714,7 @@ ib_tuple_read_i32( /*************************************************************//** Read an unsigned int 32 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u32( /*==============*/ @@ -840,8 +724,7 @@ ib_tuple_read_u32( /*************************************************************//** Read a signed int 64 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_i64( /*==============*/ @@ -851,8 +734,7 @@ ib_tuple_read_i64( /*************************************************************//** Read an unsigned int 64 bit column from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_u64( /*==============*/ @@ -862,8 +744,7 @@ ib_tuple_read_u64( /*****************************************************************//** Get a column value pointer from the tuple. -@return NULL or pointer to buffer */ - +@return NULL or pointer to buffer */ const void* ib_col_get_value( /*=============*/ @@ -872,8 +753,7 @@ ib_col_get_value( /*****************************************************************//** Get a column type, length and attributes from the tuple. -@return len of column data */ - +@return len of column data */ ib_ulint_t ib_col_get_meta( /*============*/ @@ -883,8 +763,7 @@ ib_col_get_meta( /*****************************************************************//** "Clear" or reset an InnoDB tuple. We free the heap and recreate the tuple. -@return new tuple, or NULL */ - +@return new tuple, or NULL */ ib_tpl_t ib_tuple_clear( /*============*/ @@ -894,8 +773,7 @@ ib_tuple_clear( Create a new cluster key search tuple and copy the contents of the secondary index key tuple columns that refer to the cluster index record to the cluster key. It does a deep copy of the column data. -@return DB_SUCCESS or error code */ - +@return DB_SUCCESS or error code */ ib_err_t ib_tuple_get_cluster_key( /*=====================*/ @@ -903,21 +781,9 @@ ib_tuple_get_cluster_key( ib_tpl_t* ib_dst_tpl, /*!< out,own: destination tuple */ const ib_tpl_t ib_src_tpl); /*!< in: source tuple */ -/*****************************************************************//** -Copy the contents of source tuple to destination tuple. The tuples -must be of the same type and belong to the same table/index. -@return DB_SUCCESS or error code */ - -ib_err_t -ib_tuple_copy( -/*==========*/ - ib_tpl_t ib_dst_tpl, /*!< in: destination tuple */ - const ib_tpl_t ib_src_tpl); /*!< in: source tuple */ - /*****************************************************************//** Create an InnoDB tuple used for index/table search. @return tuple for current index */ - ib_tpl_t ib_sec_search_tuple_create( /*=======================*/ @@ -925,8 +791,7 @@ ib_sec_search_tuple_create( /*****************************************************************//** Create an InnoDB tuple used for index/table search. -@return tuple for current index */ - +@return tuple for current index */ ib_tpl_t ib_sec_read_tuple_create( /*=====================*/ @@ -934,8 +799,7 @@ ib_sec_read_tuple_create( /*****************************************************************//** Create an InnoDB tuple used for table key operations. -@return tuple for current table */ - +@return tuple for current table */ ib_tpl_t ib_clust_search_tuple_create( /*=========================*/ @@ -943,8 +807,7 @@ ib_clust_search_tuple_create( /*****************************************************************//** Create an InnoDB tuple for table row operations. -@return tuple for current table */ - +@return tuple for current table */ ib_tpl_t ib_clust_read_tuple_create( /*=======================*/ @@ -952,8 +815,7 @@ ib_clust_read_tuple_create( /*****************************************************************//** Return the number of user columns in the tuple definition. -@return number of user columns */ - +@return number of user columns */ ib_ulint_t ib_tuple_get_n_user_cols( /*=====================*/ @@ -961,8 +823,7 @@ ib_tuple_get_n_user_cols( /*****************************************************************//** Return the number of columns in the tuple definition. -@return number of columns */ - +@return number of columns */ ib_ulint_t ib_tuple_get_n_cols( /*================*/ @@ -970,7 +831,6 @@ ib_tuple_get_n_cols( /*****************************************************************//** Destroy an InnoDB tuple. */ - void ib_tuple_delete( /*============*/ @@ -979,8 +839,7 @@ ib_tuple_delete( /*****************************************************************//** Truncate a table. The cursor handle will be closed and set to NULL on success. -@return DB_SUCCESS or error code */ - +@return DB_SUCCESS or error code */ ib_err_t ib_cursor_truncate( /*===============*/ @@ -990,29 +849,16 @@ ib_cursor_truncate( /*****************************************************************//** Get a table id. -@return DB_SUCCESS if found */ - +@return DB_SUCCESS if found */ ib_err_t ib_table_get_id( /*============*/ const char* table_name, /*!< in: table to find */ ib_id_u64_t* table_id); /*!< out: table id if found */ -/*****************************************************************//** -Get an index id. -@return DB_SUCCESS if found */ - -ib_err_t -ib_index_get_id( -/*============*/ - const char* table_name, /*!< in: find index for this table */ - const char* index_name, /*!< in: index to find */ - ib_id_u64_t* index_id); /*!< out: index id if found */ - /*****************************************************************//** Check if cursor is positioned. -@return IB_TRUE if positioned */ - +@return IB_TRUE if positioned */ ib_bool_t ib_cursor_is_positioned( /*====================*/ @@ -1022,7 +868,6 @@ ib_cursor_is_positioned( Checks if the data dictionary is latched in exclusive mode by a user transaction. @return TRUE if exclusive latch */ - ib_bool_t ib_schema_lock_is_exclusive( /*========================*/ @@ -1030,8 +875,7 @@ ib_schema_lock_is_exclusive( /*****************************************************************//** Lock an InnoDB cursor/table. -@return DB_SUCCESS or error code */ - +@return DB_SUCCESS or error code */ ib_err_t ib_cursor_lock( /*===========*/ @@ -1040,8 +884,7 @@ ib_cursor_lock( /*****************************************************************//** Set the Lock an InnoDB table using the table id. -@return DB_SUCCESS or error code */ - +@return DB_SUCCESS or error code */ ib_err_t ib_table_lock( /*===========*/ @@ -1051,8 +894,7 @@ ib_table_lock( /*****************************************************************//** Set the Lock mode of the cursor. -@return DB_SUCCESS or error code */ - +@return DB_SUCCESS or error code */ ib_err_t ib_cursor_set_lock_mode( /*====================*/ @@ -1061,111 +903,13 @@ ib_cursor_set_lock_mode( /*****************************************************************//** Set need to access clustered index record flag. */ - void ib_cursor_set_cluster_access( /*=========================*/ ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */ -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_i8( -/*==============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i8_t val); /*!< in: value to write */ - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_i16( -/*=================*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i16_t val); /*!< in: value to write */ - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_i32( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i32_t val); /*!< in: value to write */ - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_i64( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_i64_t val); /*!< in: value to write */ - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_u8( -/*==============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_u8_t val); /*!< in: value to write */ - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_u16( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_u16_t val); /*!< in: value to write */ - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_u32( -/*=================*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_u32_t val); /*!< in: value to write */ - -/*****************************************************************//** -Write an integer value to a column. Integers are stored in big-endian -format and will need to be converted from the host format. -@return DB_SUCESS or error */ - -ib_err_t -ib_tuple_write_u64( -/*===============*/ - ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */ - int col_no, /*!< in: column number */ - ib_u64_t val); /*!< in: value to write */ - /*****************************************************************//** Inform the cursor that it's the start of an SQL statement. */ - void ib_cursor_stmt_begin( /*=================*/ @@ -1173,8 +917,7 @@ ib_cursor_stmt_begin( /*****************************************************************//** Write a double value to a column. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_write_double( /*==================*/ @@ -1184,8 +927,7 @@ ib_tuple_write_double( /*************************************************************//** Read a double column value from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_double( /*=================*/ @@ -1195,8 +937,7 @@ ib_tuple_read_double( /*****************************************************************//** Write a float value to a column. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_write_float( /*=================*/ @@ -1206,8 +947,7 @@ ib_tuple_write_float( /*************************************************************//** Read a float value from an InnoDB tuple. -@return DB_SUCCESS or error */ - +@return DB_SUCCESS or error */ ib_err_t ib_tuple_read_float( /*================*/ @@ -1218,7 +958,6 @@ ib_tuple_read_float( /*****************************************************************//** Get a column type, length and attributes from the tuple. @return len of column data */ - const char* ib_col_get_name( /*============*/ @@ -1228,7 +967,6 @@ ib_col_get_name( /*****************************************************************//** Get an index field name from the cursor. @return name of the field */ - const char* ib_get_idx_field_name( /*==================*/ @@ -1238,28 +976,15 @@ ib_get_idx_field_name( /*****************************************************************//** Truncate a table. @return DB_SUCCESS or error code */ - ib_err_t ib_table_truncate( /*==============*/ const char* table_name, /*!< in: table name */ ib_id_u64_t* table_id); /*!< out: new table id */ -/*****************************************************************//** -Frees a possible InnoDB trx object associated with the current THD. -@return DB_SUCCESS or error number */ - -ib_err_t -ib_close_thd( -/*=========*/ - void* thd); /*!< in: handle to the MySQL - thread of the user whose resources - should be free'd */ - /*****************************************************************//** Get generic configure status @return configure status*/ - int ib_cfg_get_cfg(); /*============*/ @@ -1274,28 +999,16 @@ ib_cursor_set_memcached_sync( ib_crsr_t ib_crsr, /*!< in: cursor */ ib_bool_t flag); /*!< in: true for increasing */ -/*****************************************************************//** -Check whether the table name conforms to our requirements. Currently -we only do a simple check for the presence of a '/'. -@return DB_SUCCESS or err code */ - -ib_err_t -ib_table_name_check( -/*================*/ - const char* name); /*!< in: table name to check */ - /*****************************************************************//** Return isolation configuration set by "innodb_api_trx_level" @return trx isolation level*/ - -ib_trx_state_t +ib_trx_level_t ib_cfg_trx_level(); /*==============*/ /*****************************************************************//** Return configure value for background commit interval (in seconds) @return background commit interval (in seconds) */ - ib_ulint_t ib_cfg_bk_commit_interval(); /*=======================*/ @@ -1303,10 +1016,18 @@ ib_cfg_bk_commit_interval(); /*****************************************************************//** Get a trx start time. @return trx start_time */ - ib_u64_t ib_trx_get_start_time( /*==================*/ ib_trx_t ib_trx); /*!< in: transaction */ +/*****************************************************************//** +Wrapper of ut_strerr() which converts an InnoDB error number to a +human readable text message. +@return string, describing the error */ +const char* +ib_ut_strerr( +/*=========*/ + ib_err_t num); /*!< in: error number */ + #endif /* api0api_h */ diff --git a/storage/innobase/include/api0misc.h b/storage/innobase/include/api0misc.h index fcd748390d1..84ac3d622a9 100644 --- a/storage/innobase/include/api0misc.h +++ b/storage/innobase/include/api0misc.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2008, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2008, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -49,9 +49,8 @@ extern ulong ib_bk_commit_interval; /******************************************************************** Handles user errors and lock waits detected by the database engine. -@return TRUE if it was a lock wait and we should continue running +@return TRUE if it was a lock wait and we should continue running the query thread */ -UNIV_INTERN ibool ib_handle_errors( /*=============*/ @@ -66,8 +65,7 @@ ib_handle_errors( /************************************************************************* Sets a lock on a table. -@return error code or DB_SUCCESS */ -UNIV_INTERN +@return error code or DB_SUCCESS */ dberr_t ib_trx_lock_table_with_retry( /*=========================*/ diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h index bf3f4a76301..e350e01ff5b 100644 --- a/storage/innobase/include/btr0btr.h +++ b/storage/innobase/include/btr0btr.h @@ -35,6 +35,7 @@ Created 6/2/1994 Heikki Tuuri #include "page0cur.h" #include "mtr0mtr.h" #include "btr0types.h" +#include "gis0type.h" #ifndef UNIV_HOTBACKUP /** Maximum record size which can be stored on a page, without using the @@ -67,7 +68,11 @@ enum btr_latch_mode { /** Search the previous record. */ BTR_SEARCH_PREV = 35, /** Modify the previous record. */ - BTR_MODIFY_PREV = 36 + BTR_MODIFY_PREV = 36, + /** Start searching the entire B-tree. */ + BTR_SEARCH_TREE = 37, + /** Continue searching the entire B-tree. */ + BTR_CONT_SEARCH_TREE = 38 }; /* BTR_INSERT, BTR_DELETE and BTR_DELETE_MARK are mutually exclusive. */ @@ -98,18 +103,47 @@ buffer when the record is not in the buffer pool. */ already holding an S latch on the index tree */ #define BTR_ALREADY_S_LATCHED 16384 +/** In the case of BTR_MODIFY_TREE, the caller specifies the intention +to insert record only. It is used to optimize block->lock range.*/ +#define BTR_LATCH_FOR_INSERT 32768 + +/** In the case of BTR_MODIFY_TREE, the caller specifies the intention +to delete record only. It is used to optimize block->lock range.*/ +#define BTR_LATCH_FOR_DELETE 65536 + +/** This flag is for undo insert of rtree. For rtree, we need this flag +to find proper rec to undo insert.*/ +#define BTR_RTREE_UNDO_INS 131072 + +/** In the case of BTR_MODIFY_LEAF, the caller intends to allocate or +free the pages of externally stored fields. */ +#define BTR_MODIFY_EXTERNAL 262144 + +/** Try to delete mark the record at the searched position when the +record is in spatial index */ +#define BTR_RTREE_DELETE_MARK 524288 + #define BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode) \ ((latch_mode) & ~(BTR_INSERT \ | BTR_DELETE_MARK \ + | BTR_RTREE_UNDO_INS \ + | BTR_RTREE_DELETE_MARK \ | BTR_DELETE \ | BTR_ESTIMATE \ | BTR_IGNORE_SEC_UNIQUE \ - | BTR_ALREADY_S_LATCHED)) + | BTR_ALREADY_S_LATCHED \ + | BTR_LATCH_FOR_INSERT \ + | BTR_LATCH_FOR_DELETE \ + | BTR_MODIFY_EXTERNAL)) + +#define BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode) \ + ((latch_mode) & ~(BTR_LATCH_FOR_INSERT \ + | BTR_LATCH_FOR_DELETE \ + | BTR_MODIFY_EXTERNAL)) #endif /* UNIV_HOTBACKUP */ /**************************************************************//** Report that an index page is corrupted. */ -UNIV_INTERN void btr_corruption_report( /*==================*/ @@ -128,95 +162,9 @@ btr_corruption_report( } #ifndef UNIV_HOTBACKUP -#ifdef UNIV_BLOB_DEBUG -# include "ut0rbt.h" -/** An index->blobs entry for keeping track of off-page column references */ -struct btr_blob_dbg_t -{ - unsigned blob_page_no:32; /*!< first BLOB page number */ - unsigned ref_page_no:32; /*!< referring page number */ - unsigned ref_heap_no:16; /*!< referring heap number */ - unsigned ref_field_no:10; /*!< referring field number */ - unsigned owner:1; /*!< TRUE if BLOB owner */ - unsigned always_owner:1; /*!< TRUE if always - has been the BLOB owner; - reset to TRUE on B-tree - page splits and merges */ - unsigned del:1; /*!< TRUE if currently - delete-marked */ -}; - -/**************************************************************//** -Add a reference to an off-page column to the index->blobs map. */ -UNIV_INTERN -void -btr_blob_dbg_add_blob( -/*==================*/ - const rec_t* rec, /*!< in: clustered index record */ - ulint field_no, /*!< in: number of off-page column */ - ulint page_no, /*!< in: start page of the column */ - dict_index_t* index, /*!< in/out: index tree */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); -/**************************************************************//** -Display the references to off-page columns. -This function is to be called from a debugger, -for example when a breakpoint on ut_dbg_assertion_failed is hit. */ -UNIV_INTERN -void -btr_blob_dbg_print( -/*===============*/ - const dict_index_t* index) /*!< in: index tree */ - MY_ATTRIBUTE((nonnull)); -/**************************************************************//** -Check that there are no references to off-page columns from or to -the given page. Invoked when freeing or clearing a page. -@return TRUE when no orphan references exist */ -UNIV_INTERN -ibool -btr_blob_dbg_is_empty( -/*==================*/ - dict_index_t* index, /*!< in: index */ - ulint page_no) /*!< in: page number */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); - -/**************************************************************//** -Modify the 'deleted' flag of a record. */ -UNIV_INTERN -void -btr_blob_dbg_set_deleted_flag( -/*==========================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ - ibool del) /*!< in: TRUE=deleted, FALSE=exists */ - MY_ATTRIBUTE((nonnull)); /**************************************************************//** -Change the ownership of an off-page column. */ -UNIV_INTERN -void -btr_blob_dbg_owner( -/*===============*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: rec_get_offs(rec, index) */ - ulint i, /*!< in: ith field in rec */ - ibool own) /*!< in: TRUE=owned, FALSE=disowned */ - MY_ATTRIBUTE((nonnull)); -/** Assert that there are no BLOB references to or from the given page. */ -# define btr_blob_dbg_assert_empty(index, page_no) \ - ut_a(btr_blob_dbg_is_empty(index, page_no)) -#else /* UNIV_BLOB_DEBUG */ -# define btr_blob_dbg_add_blob(rec, field_no, page, index, ctx) ((void) 0) -# define btr_blob_dbg_set_deleted_flag(rec, index, offsets, del)((void) 0) -# define btr_blob_dbg_owner(rec, index, offsets, i, val) ((void) 0) -# define btr_blob_dbg_assert_empty(index, page_no) ((void) 0) -#endif /* UNIV_BLOB_DEBUG */ - -/**************************************************************//** -Gets the root node of a tree and x-latches it. -@return root page, x-latched */ -UNIV_INTERN +Gets the root node of a tree and sx-latches it for segment access. +@return root page, sx-latched */ page_t* btr_root_get( /*=========*/ @@ -227,7 +175,6 @@ btr_root_get( /**************************************************************//** Checks and adjusts the root node of a tree during IMPORT TABLESPACE. @return error code, or DB_SUCCESS */ -UNIV_INTERN dberr_t btr_root_adjust_on_import( /*======================*/ @@ -238,70 +185,70 @@ btr_root_adjust_on_import( Gets the height of the B-tree (the level of the root, when the leaf level is assumed to be 0). The caller must hold an S or X latch on the index. -@return tree height (level of the root) */ -UNIV_INTERN +@return tree height (level of the root) */ ulint btr_height_get( /*===========*/ dict_index_t* index, /*!< in: index tree */ mtr_t* mtr) /*!< in/out: mini-transaction */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/**************************************************************//** -Gets a buffer page and declares its latching order level. */ + +/** Gets a buffer page and declares its latching order level. +@param[in] page_id page id +@param[in] mode latch mode +@param[in] file file name +@param[in] line line where called +@param[in] index index tree, may be NULL if it is not an insert buffer +tree +@param[in,out] mtr mini-transaction +@return block */ UNIV_INLINE buf_block_t* btr_block_get_func( -/*===============*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page_no, /*!< in: page number */ - ulint mode, /*!< in: latch mode */ - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - dict_index_t* index, /*!< in: index tree, may be NULL - if it is not an insert buffer tree */ - mtr_t* mtr); /*!< in/out: mini-transaction */ -# ifdef UNIV_SYNC_DEBUG + const page_id_t& page_id, + const page_size_t& page_size, + ulint mode, + const char* file, + ulint line, + dict_index_t* index, + mtr_t* mtr); + +# ifdef UNIV_DEBUG /** Gets a buffer page and declares its latching order level. -@param space tablespace identifier -@param zip_size compressed page size in bytes or 0 for uncompressed pages -@param page_no page number -@param mode latch mode -@param index index tree, may be NULL if not the insert buffer tree -@param mtr mini-transaction handle +@param page_id tablespace/page identifier +@param page_size page size +@param mode latch mode +@param index index tree, may be NULL if not the insert buffer tree +@param mtr mini-transaction handle @return the block descriptor */ -# define btr_block_get(space,zip_size,page_no,mode,index,mtr) \ - btr_block_get_func(space,zip_size,page_no,mode, \ - __FILE__,__LINE__,index,mtr) -# else /* UNIV_SYNC_DEBUG */ +# define btr_block_get(page_id, page_size, mode, index, mtr) \ + btr_block_get_func(page_id, page_size, mode, \ + __FILE__, __LINE__, (dict_index_t*)index, mtr) +# else /* UNIV_DEBUG */ /** Gets a buffer page and declares its latching order level. -@param space tablespace identifier -@param zip_size compressed page size in bytes or 0 for uncompressed pages -@param page_no page number -@param mode latch mode -@param idx index tree, may be NULL if not the insert buffer tree -@param mtr mini-transaction handle +@param page_id tablespace/page identifier +@param page_size page size +@param mode latch mode +@param index index tree, may be NULL if not the insert buffer tree +@param mtr mini-transaction handle @return the block descriptor */ -# define btr_block_get(space,zip_size,page_no,mode,idx,mtr) \ - btr_block_get_func(space,zip_size,page_no,mode, \ - __FILE__,__LINE__,idx,mtr) -# endif /* UNIV_SYNC_DEBUG */ +# define btr_block_get(page_id, page_size, mode, index, mtr) \ + btr_block_get_func(page_id, page_size, mode, __FILE__, __LINE__, (dict_index_t*)index, mtr) +# endif /* UNIV_DEBUG */ /** Gets a buffer page and declares its latching order level. -@param space tablespace identifier -@param zip_size compressed page size in bytes or 0 for uncompressed pages -@param page_no page number -@param mode latch mode -@param idx index tree, may be NULL if not the insert buffer tree -@param mtr mini-transaction handle +@param page_id tablespace/page identifier +@param page_size page size +@param mode latch mode +@param index index tree, may be NULL if not the insert buffer tree +@param mtr mini-transaction handle @return the uncompressed page frame */ -# define btr_page_get(space,zip_size,page_no,mode,idx,mtr) \ - buf_block_get_frame(btr_block_get(space,zip_size,page_no, \ - mode,idx,mtr)) +# define btr_page_get(page_id, page_size, mode, index, mtr) \ + buf_block_get_frame(btr_block_get(page_id, page_size, \ + mode, index, mtr)) #endif /* !UNIV_HOTBACKUP */ /**************************************************************//** Gets the index id field of a page. -@return index id */ +@return index id */ UNIV_INLINE index_id_t btr_page_get_index_id( @@ -311,7 +258,7 @@ btr_page_get_index_id( #ifndef UNIV_HOTBACKUP /********************************************************//** Gets the node level field in an index page. -@return level, leaf level == 0 */ +@return level, leaf level == 0 */ UNIV_INLINE ulint btr_page_get_level_low( @@ -321,7 +268,7 @@ btr_page_get_level_low( #define btr_page_get_level(page, mtr) btr_page_get_level_low(page) /********************************************************//** Gets the next index page number. -@return next page number */ +@return next page number */ UNIV_INLINE ulint btr_page_get_next( @@ -331,7 +278,7 @@ btr_page_get_next( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************//** Gets the previous index page number. -@return prev page number */ +@return prev page number */ UNIV_INLINE ulint btr_page_get_prev( @@ -339,30 +286,6 @@ btr_page_get_prev( const page_t* page, /*!< in: index page */ mtr_t* mtr) /*!< in: mini-transaction handle */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/*************************************************************//** -Gets pointer to the previous user record in the tree. It is assumed -that the caller has appropriate latches on the page and its neighbor. -@return previous user record, NULL if there is none */ -UNIV_INTERN -rec_t* -btr_get_prev_user_rec( -/*==================*/ - rec_t* rec, /*!< in: record on leaf level */ - mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if - needed, also to the previous page */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/*************************************************************//** -Gets pointer to the next user record in the tree. It is assumed -that the caller has appropriate latches on the page and its neighbor. -@return next user record, NULL if there is none */ -UNIV_INTERN -rec_t* -btr_get_next_user_rec( -/*==================*/ - rec_t* rec, /*!< in: record on leaf level */ - mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if - needed, also to the next page */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); /**************************************************************//** Releases the latch on a leaf page and bufferunfixes it. */ UNIV_INLINE @@ -380,7 +303,7 @@ NOTE: the offsets array must contain all offsets for the record since we read the last field according to offsets and assume that it contains the child page number. In other words offsets must have been retrieved with rec_get_offsets(n_fields=ULINT_UNDEFINED). -@return child node address */ +@return child node address */ UNIV_INLINE ulint btr_node_ptr_get_child_page_no( @@ -388,52 +311,55 @@ btr_node_ptr_get_child_page_no( const rec_t* rec, /*!< in: node pointer record */ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); -/************************************************************//** -Creates the root node for a new index tree. -@return page number of the created root, FIL_NULL if did not succeed */ -UNIV_INTERN + +/** Create the root node for a new index tree. +@param[in] type type of the index +@param[in] space space where created +@param[in] page_size page size +@param[in] index_id index id +@param[in] index index, or NULL when applying TRUNCATE +log record during recovery +@param[in] btr_redo_create_info used for applying TRUNCATE log +@param[in] mtr mini-transaction handle +record during recovery +@return page number of the created root, FIL_NULL if did not succeed */ ulint btr_create( -/*=======*/ - ulint type, /*!< in: type of the index */ - ulint space, /*!< in: space where created */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - index_id_t index_id,/*!< in: index id */ - dict_index_t* index, /*!< in: index */ - mtr_t* mtr) /*!< in: mini-transaction handle */ - MY_ATTRIBUTE((nonnull)); -/************************************************************//** -Frees a B-tree except the root page, which MUST be freed after this -by calling btr_free_root. */ -UNIV_INTERN + ulint type, + ulint space, + const page_size_t& page_size, + index_id_t index_id, + dict_index_t* index, + const btr_create_t* btr_redo_create_info, + mtr_t* mtr); + +/** Free a persistent index tree if it exists. +@param[in] page_id root page id +@param[in] page_size page size +@param[in] index_id PAGE_INDEX_ID contents +@param[in,out] mtr mini-transaction */ void -btr_free_but_not_root( -/*==================*/ - ulint space, /*!< in: space where created */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint root_page_no); /*!< in: root page number */ -/************************************************************//** -Frees the B-tree root page. Other tree MUST already have been freed. */ -UNIV_INTERN +btr_free_if_exists( + const page_id_t& page_id, + const page_size_t& page_size, + index_id_t index_id, + mtr_t* mtr); + +/** Free an index tree in a temporary tablespace or during TRUNCATE TABLE. +@param[in] page_id root page id +@param[in] page_size page size */ void -btr_free_root( -/*==========*/ - ulint space, /*!< in: space where created */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint root_page_no, /*!< in: root page number */ - mtr_t* mtr) /*!< in/out: mini-transaction */ - MY_ATTRIBUTE((nonnull)); +btr_free( + const page_id_t& page_id, + const page_size_t& page_size); + /*************************************************************//** Makes tree one level higher by splitting the root, and inserts the tuple. It is assumed that mtr contains an x-latch on the tree. NOTE that the operation of this function must always succeed, we cannot reverse it: therefore enough free disk space must be guaranteed to be available before this function is called. -@return inserted record */ -UNIV_INTERN +@return inserted record */ rec_t* btr_root_raise_and_insert( /*======================*/ @@ -448,7 +374,7 @@ btr_root_raise_and_insert( const dtuple_t* tuple, /*!< in: tuple to insert */ ulint n_ext, /*!< in: number of externally stored columns */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull(2,3,4,7), warn_unused_result)); + __attribute__((warn_unused_result)); /*************************************************************//** Reorganizes an index page. @@ -460,7 +386,6 @@ IBUF_BITMAP_FREE is unaffected by reorganization. @retval true if the operation was successful @retval false if it is a compressed page, and recompression failed */ -UNIV_INTERN bool btr_page_reorganize_low( /*====================*/ @@ -486,7 +411,6 @@ IBUF_BITMAP_FREE is unaffected by reorganization. @retval true if the operation was successful @retval false if it is a compressed page, and recompression failed */ -UNIV_INTERN bool btr_page_reorganize( /*================*/ @@ -497,8 +421,7 @@ btr_page_reorganize( /*************************************************************//** Decides if the page should be split at the convergence point of inserts converging to left. -@return TRUE if split recommended */ -UNIV_INTERN +@return TRUE if split recommended */ ibool btr_page_get_split_rec_to_left( /*===========================*/ @@ -510,8 +433,7 @@ btr_page_get_split_rec_to_left( /*************************************************************//** Decides if the page should be split at the convergence point of inserts converging to right. -@return TRUE if split recommended */ -UNIV_INTERN +@return TRUE if split recommended */ ibool btr_page_get_split_rec_to_right( /*============================*/ @@ -520,6 +442,7 @@ btr_page_get_split_rec_to_right( the first record on upper half page, or NULL if tuple should be first */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /*************************************************************//** Splits an index page to halves and inserts the tuple. It is assumed that mtr holds an x-latch to the index tree. NOTE: the tree x-latch is @@ -529,7 +452,6 @@ free disk space (2 pages) must be guaranteed to be available before this function is called. @return inserted record */ -UNIV_INTERN rec_t* btr_page_split_and_insert( /*======================*/ @@ -543,11 +465,10 @@ btr_page_split_and_insert( const dtuple_t* tuple, /*!< in: tuple to insert */ ulint n_ext, /*!< in: number of externally stored columns */ mtr_t* mtr) /*!< in: mtr */ - __attribute__((nonnull(2,3,4,7), warn_unused_result)); + __attribute__((warn_unused_result)); /*******************************************************//** Inserts a data tuple to a tree on a non-leaf level. It is assumed that mtr holds an x-latch on the tree. */ -UNIV_INTERN void btr_insert_on_non_leaf_level_func( /*==============================*/ @@ -558,13 +479,12 @@ btr_insert_on_non_leaf_level_func( const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in: mtr */ - MY_ATTRIBUTE((nonnull)); + MY_ATTRIBUTE((nonnull(4,5))); # define btr_insert_on_non_leaf_level(f,i,l,t,m) \ btr_insert_on_non_leaf_level_func(f,i,l,t,__FILE__,__LINE__,m) #endif /* !UNIV_HOTBACKUP */ /****************************************************************//** Sets a record as the predefined minimum record. */ -UNIV_INTERN void btr_set_min_rec_mark( /*=================*/ @@ -574,7 +494,6 @@ btr_set_min_rec_mark( #ifndef UNIV_HOTBACKUP /*************************************************************//** Deletes on the upper level the node pointer to a page. */ -UNIV_INTERN void btr_node_ptr_delete( /*================*/ @@ -585,8 +504,7 @@ btr_node_ptr_delete( #ifdef UNIV_DEBUG /************************************************************//** Checks that the node pointer to a page is appropriate. -@return TRUE */ -UNIV_INTERN +@return TRUE */ ibool btr_check_node_ptr( /*===============*/ @@ -604,8 +522,7 @@ level lifts the records of the page to the father page, thus reducing the tree height. It is assumed that mtr holds an x-latch on the tree and on the page. If cursor is on the leaf level, mtr must also hold x-latches to the brothers, if they exist. -@return TRUE on success */ -UNIV_INTERN +@return TRUE on success */ ibool btr_compress( /*=========*/ @@ -621,7 +538,6 @@ btr_compress( Discards a page from a B-tree. This is used to remove the last record from a B-tree page: the whole page must be removed at the same time. This cannot be used for the root page, which is allowed to be empty. */ -UNIV_INTERN void btr_discard_page( /*=============*/ @@ -633,8 +549,7 @@ btr_discard_page( /****************************************************************//** Parses the redo log record for setting an index record as the predefined minimum record. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_parse_set_min_rec_mark( /*=======================*/ @@ -646,8 +561,7 @@ btr_parse_set_min_rec_mark( MY_ATTRIBUTE((nonnull(1,2), warn_unused_result)); /***********************************************************//** Parses a redo log record of reorganizing a page. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_parse_page_reorganize( /*======================*/ @@ -661,8 +575,7 @@ btr_parse_page_reorganize( #ifndef UNIV_HOTBACKUP /**************************************************************//** Gets the number of pages in a B-tree. -@return number of pages, or ULINT_UNDEFINED if the index is unavailable */ -UNIV_INTERN +@return number of pages, or ULINT_UNDEFINED if the index is unavailable */ ulint btr_get_size( /*=========*/ @@ -693,7 +606,6 @@ that the caller has made the reservation for free extents! @retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) @retval block (not allocated or initialized) otherwise */ -UNIV_INTERN buf_block_t* btr_page_alloc( /*===========*/ @@ -712,7 +624,6 @@ btr_page_alloc( /**************************************************************//** Frees a file page used in an index tree. NOTE: cannot free field external storage pages because the page must contain info on its level. */ -UNIV_INTERN void btr_page_free( /*==========*/ @@ -721,19 +632,39 @@ btr_page_free( mtr_t* mtr) /*!< in: mtr */ MY_ATTRIBUTE((nonnull)); /**************************************************************//** +Creates a new index page (not the root, and also not +used in page reorganization). @see btr_page_empty(). */ +void +btr_page_create( +/*============*/ + buf_block_t* block, /*!< in/out: page to be created */ + page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */ + dict_index_t* index, /*!< in: index */ + ulint level, /*!< in: the B-tree level of the page */ + mtr_t* mtr); /*!< in: mtr */ +/**************************************************************//** Frees a file page used in an index tree. Can be used also to BLOB -external storage pages, because the page level 0 can be given as an -argument. */ -UNIV_INTERN +external storage pages. */ void btr_page_free_low( /*==============*/ dict_index_t* index, /*!< in: index tree */ buf_block_t* block, /*!< in: block to be freed, x-latched */ - ulint level, /*!< in: page level */ + ulint level, /*!< in: page level (ULINT_UNDEFINED=BLOB) */ bool blob, /*!< in: blob page */ mtr_t* mtr) /*!< in: mtr */ __attribute__((nonnull)); +/**************************************************************//** +Gets the root node of a tree and x- or s-latches it. +@return root page, x- or s-latched */ +buf_block_t* +btr_root_block_get( +/*===============*/ + const dict_index_t* index, /*!< in: index tree */ + ulint mode, /*!< in: either RW_S_LATCH + or RW_X_LATCH */ + mtr_t* mtr); /*!< in: mtr */ + /*************************************************************//** Reorganizes an index page. @@ -784,8 +715,7 @@ btr_print_index( /************************************************************//** Checks the size and number of fields in a record based on the definition of the index. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool btr_index_rec_validate( /*===================*/ @@ -802,31 +732,10 @@ UNIV_INTERN dberr_t btr_validate_index( /*===============*/ - dict_index_t* index, /*!< in: index */ - const trx_t* trx) /*!< in: transaction or 0 */ - MY_ATTRIBUTE((nonnull(1), warn_unused_result)); - -#ifdef UNIV_SYNC_DEBUG -/*************************************************************//** -Removes a page from the level list of pages. -@param space in: space where removed -@param zip_size in: compressed page size in bytes, or 0 for uncompressed -@param page in/out: page to remove -@param index in: index tree -@param mtr in/out: mini-transaction */ -# define btr_level_list_remove(space,zip_size,page,index,mtr) \ - btr_level_list_remove_func(space,zip_size,page,index,mtr) -#else /* UNIV_SYNC_DEBUG */ -/*************************************************************//** -Removes a page from the level list of pages. -@param space in: space where removed -@param zip_size in: compressed page size in bytes, or 0 for uncompressed -@param page in/out: page to remove -@param index in: index tree -@param mtr in/out: mini-transaction */ -# define btr_level_list_remove(space,zip_size,page,index,mtr) \ - btr_level_list_remove_func(space,zip_size,page,index,mtr) -#endif /* UNIV_SYNC_DEBUG */ + dict_index_t* index, /*!< in: index */ + const trx_t* trx, /*!< in: transaction or 0 */ + bool lockout)/*!< in: true if X-latch index is intended */ + __attribute__((warn_unused_result)); /*************************************************************//** Removes a page from the level list of pages. */ @@ -835,11 +744,19 @@ void btr_level_list_remove_func( /*=======================*/ ulint space, /*!< in: space where removed */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ + const page_size_t& page_size,/*!< in: page size */ page_t* page, /*!< in/out: page to remove */ dict_index_t* index, /*!< in: index tree */ mtr_t* mtr); /*!< in/out: mini-transaction */ +/*************************************************************//** +Removes a page from the level list of pages. +@param space in: space where removed +@param zip_size in: compressed page size in bytes, or 0 for uncompressed +@param page in/out: page to remove +@param index in: index tree +@param mtr in/out: mini-transaction */ +# define btr_level_list_remove(space,zip_size,page,index,mtr) \ + btr_level_list_remove_func(space,zip_size,page,index,mtr) /*************************************************************//** If page is the only on its level, this function moves its records to the diff --git a/storage/innobase/include/btr0btr.ic b/storage/innobase/include/btr0btr.ic index 64b3d5a0975..58a0c6755b1 100644 --- a/storage/innobase/include/btr0btr.ic +++ b/storage/innobase/include/btr0btr.ic @@ -36,28 +36,31 @@ Created 6/2/1994 Heikki Tuuri in btr_page_set_level and btr_page_get_level_low */ -/**************************************************************//** -Gets a buffer page and declares its latching order level. */ +/** Gets a buffer page and declares its latching order level. +@param[in] page_id page id +@param[in] mode latch mode +@param[in] file file name +@param[in] line line where called +@param[in] index index tree, may be NULL if it is not an insert buffer +tree +@param[in,out] mtr mini-transaction +@return block */ UNIV_INLINE buf_block_t* btr_block_get_func( -/*===============*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page_no, /*!< in: page number */ - ulint mode, /*!< in: latch mode */ - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - dict_index_t* index, /*!< in: index tree, may be NULL - if it is not an insert buffer tree */ - mtr_t* mtr) /*!< in/out: mtr */ + const page_id_t& page_id, + const page_size_t& page_size, + ulint mode, + const char* file, + ulint line, + dict_index_t* index, + mtr_t* mtr) { buf_block_t* block; - dberr_t err; + dberr_t err=DB_SUCCESS; - block = buf_page_get_gen(space, zip_size, page_no, mode, - NULL, BUF_GET, file, line, mtr, &err); + block = buf_page_get_gen( + page_id, page_size, mode, NULL, BUF_GET, file, line, mtr, &err); if (err == DB_DECRYPTION_FAILED) { index->table->is_encrypted = true; @@ -100,7 +103,7 @@ btr_page_set_index_id( /**************************************************************//** Gets the index id field of a page. -@return index id */ +@return index id */ UNIV_INLINE index_id_t btr_page_get_index_id( @@ -113,7 +116,7 @@ btr_page_get_index_id( #ifndef UNIV_HOTBACKUP /********************************************************//** Gets the node level field in an index page. -@return level, leaf level == 0 */ +@return level, leaf level == 0 */ UNIV_INLINE ulint btr_page_get_level_low( @@ -143,7 +146,8 @@ btr_page_set_level( ulint level, /*!< in: level, leaf level == 0 */ mtr_t* mtr) /*!< in: mini-transaction handle */ { - ut_ad(page && mtr); + ut_ad(page != NULL); + ut_ad(mtr != NULL); ut_ad(level <= BTR_MAX_NODE_LEVEL); if (page_zip) { @@ -159,7 +163,7 @@ btr_page_set_level( /********************************************************//** Gets the next index page number. -@return next page number */ +@return next page number */ UNIV_INLINE ulint btr_page_get_next( @@ -170,10 +174,7 @@ btr_page_get_next( { ut_ad(page != NULL); ut_ad(mtr != NULL); -#ifndef UNIV_INNOCHECKSUM - ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX) - || mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_S_FIX)); -#endif /* UNIV_INNOCHECKSUM */ + return(mach_read_from_4(page + FIL_PAGE_NEXT)); } @@ -202,7 +203,7 @@ btr_page_set_next( /********************************************************//** Gets the previous index page number. -@return prev page number */ +@return prev page number */ UNIV_INLINE ulint btr_page_get_prev( @@ -245,7 +246,7 @@ NOTE: the offsets array must contain all offsets for the record since we read the last field according to offsets and assume that it contains the child page number. In other words offsets must have been retrieved with rec_get_offsets(n_fields=ULINT_UNDEFINED). -@return child node address */ +@return child node address */ UNIV_INLINE ulint btr_node_ptr_get_child_page_no( @@ -266,15 +267,7 @@ btr_node_ptr_get_child_page_no( ut_ad(len == 4); page_no = mach_read_from_4(field); - - if (page_no == 0) { - fprintf(stderr, - "InnoDB: a nonsensical page number 0" - " in a node ptr record at offset %lu\n", - (ulong) page_offset(rec)); - buf_page_print(page_align(rec), 0, 0); - ut_ad(0); - } + ut_ad(page_no > 1); return(page_no); } @@ -290,12 +283,27 @@ btr_leaf_page_release( BTR_MODIFY_LEAF */ mtr_t* mtr) /*!< in: mtr */ { - ut_ad(latch_mode == BTR_SEARCH_LEAF || latch_mode == BTR_MODIFY_LEAF); + ut_ad(latch_mode == BTR_SEARCH_LEAF + || latch_mode == BTR_MODIFY_LEAF + || latch_mode == BTR_NO_LATCHES); + ut_ad(!mtr_memo_contains(mtr, block, MTR_MEMO_MODIFY)); - mtr_memo_release(mtr, block, - latch_mode == BTR_SEARCH_LEAF - ? MTR_MEMO_PAGE_S_FIX - : MTR_MEMO_PAGE_X_FIX); + ulint mode; + switch (latch_mode) { + case BTR_SEARCH_LEAF: + mode = MTR_MEMO_PAGE_S_FIX; + break; + case BTR_MODIFY_LEAF: + mode = MTR_MEMO_PAGE_X_FIX; + break; + case BTR_NO_LATCHES: + mode = MTR_MEMO_BUF_FIX; + break; + default: + ut_a(0); + } + + mtr->memo_release(block, mode); } #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/btr0bulk.h b/storage/innobase/include/btr0bulk.h new file mode 100644 index 00000000000..64ffa89c0ae --- /dev/null +++ b/storage/innobase/include/btr0bulk.h @@ -0,0 +1,392 @@ +/***************************************************************************** + +Copyright (c) 2014, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/********************************************************************//** +@file include/btr0bulk.h +The B-tree bulk load + +Created 03/11/2014 Shaohua Wang +*************************************************************************/ + +#ifndef btr0bulk_h +#define btr0bulk_h + +#include "dict0dict.h" +#include "page0cur.h" +#include "ut0new.h" + +#include + +/** Innodb B-tree index fill factor for bulk load. */ +extern long innobase_fill_factor; + +/* +The proper function call sequence of PageBulk is as below: +-- PageBulk::init +-- PageBulk::insert +-- PageBulk::finish +-- PageBulk::compress(COMPRESSED table only) +-- PageBulk::pageSplit(COMPRESSED table only) +-- PageBulk::commit +*/ + +class PageBulk +{ +public: + /** Constructor + @param[in] index B-tree index + @param[in] page_no page number + @param[in] level page level + @param[in] trx_id transaction id + @param[in] observer flush observer */ + PageBulk( + dict_index_t* index, + trx_id_t trx_id, + ulint page_no, + ulint level, + FlushObserver* observer) + : + m_heap(NULL), + m_index(index), + m_mtr(NULL), + m_trx_id(trx_id), + m_block(NULL), + m_page(NULL), + m_page_zip(NULL), + m_cur_rec(NULL), + m_page_no(page_no), + m_level(level), + m_is_comp(dict_table_is_comp(index->table)), + m_heap_top(NULL), + m_rec_no(0), + m_free_space(0), + m_reserved_space(0), +#ifdef UNIV_DEBUG + m_total_data(0), +#endif /* UNIV_DEBUG */ + m_modify_clock(0), + m_flush_observer(observer), + m_err(DB_SUCCESS) + { + ut_ad(!dict_index_is_spatial(m_index)); + } + + /** Deconstructor */ + ~PageBulk() + { + mem_heap_free(m_heap); + } + + /** Initialize members and allocate page if needed and start mtr. + Note: must be called and only once right after constructor. + @return error code */ + dberr_t init(); + + /** Insert a record in the page. + @param[in] rec record + @param[in] offsets record offsets */ + void insert(const rec_t* rec, ulint* offsets); + + /** Mark end of insertion to the page. Scan all records to set page + dirs, and set page header members. */ + void finish(); + + /** Commit mtr for a page + @param[in] success Flag whether all inserts succeed. */ + void commit(bool success); + + /** Compress if it is compressed table + @return true compress successfully or no need to compress + @return false compress failed. */ + bool compress(); + + /** Check whether the record needs to be stored externally. + @return true + @return false */ + bool needExt(const dtuple_t* tuple, ulint rec_size); + + /** Store external record + @param[in] big_rec external recrod + @param[in] offsets record offsets + @return error code */ + dberr_t storeExt(const big_rec_t* big_rec, ulint* offsets); + + /** Get node pointer + @return node pointer */ + dtuple_t* getNodePtr(); + + /** Get split rec in the page. We split a page in half when compresssion + fails, and the split rec should be copied to the new page. + @return split rec */ + rec_t* getSplitRec(); + + /** Copy all records after split rec including itself. + @param[in] rec split rec */ + void copyIn(rec_t* split_rec); + + /** Remove all records after split rec including itself. + @param[in] rec split rec */ + void copyOut(rec_t* split_rec); + + /** Set next page + @param[in] next_page_no next page no */ + void setNext(ulint next_page_no); + + /** Set previous page + @param[in] prev_page_no previous page no */ + void setPrev(ulint prev_page_no); + + /** Release block by commiting mtr */ + inline void release(); + + /** Start mtr and latch block */ + inline dberr_t latch(); + + /** Check if required space is available in the page for the rec + to be inserted. We check fill factor & padding here. + @param[in] length required length + @return true if space is available */ + inline bool isSpaceAvailable(ulint rec_size); + + /** Get page no */ + ulint getPageNo() + { + return(m_page_no); + } + + /** Get page level */ + ulint getLevel() + { + return(m_level); + } + + /** Get record no */ + ulint getRecNo() + { + return(m_rec_no); + } + + /** Get page */ + page_t* getPage() + { + return(m_page); + } + + /** Get page zip */ + page_zip_des_t* getPageZip() + { + return(m_page_zip); + } + + dberr_t getError() + { + return(m_err); + } + + /* Memory heap for internal allocation */ + mem_heap_t* m_heap; + +private: + /** The index B-tree */ + dict_index_t* m_index; + + /** The min-transaction */ + mtr_t* m_mtr; + + /** The transaction id */ + trx_id_t m_trx_id; + + /** The buffer block */ + buf_block_t* m_block; + + /** The page */ + page_t* m_page; + + /** The page zip descriptor */ + page_zip_des_t* m_page_zip; + + /** The current rec, just before the next insert rec */ + rec_t* m_cur_rec; + + /** The page no */ + ulint m_page_no; + + /** The page level in B-tree */ + ulint m_level; + + /** Flag: is page in compact format */ + const bool m_is_comp; + + /** The heap top in page for next insert */ + byte* m_heap_top; + + /** User record no */ + ulint m_rec_no; + + /** The free space left in the page */ + ulint m_free_space; + + /** The reserved space for fill factor */ + ulint m_reserved_space; + + /** The padding space for compressed page */ + ulint m_padding_space; + +#ifdef UNIV_DEBUG + /** Total data in the page */ + ulint m_total_data; +#endif /* UNIV_DEBUG */ + + /** The modify clock value of the buffer block + when the block is re-pinned */ + ib_uint64_t m_modify_clock; + + /** Flush observer */ + FlushObserver* m_flush_observer; + + /** Operation result DB_SUCCESS or error code */ + dberr_t m_err; +}; + +typedef std::vector > + page_bulk_vector; + +class BtrBulk +{ +public: + /** Constructor + @param[in] index B-tree index + @param[in] trx_id transaction id + @param[in] observer flush observer */ + BtrBulk( + dict_index_t* index, + trx_id_t trx_id, + FlushObserver* observer) + : + m_heap(NULL), + m_index(index), + m_trx_id(trx_id), + m_flush_observer(observer) + { + ut_ad(m_flush_observer != NULL); +#ifdef UNIV_DEBUG + fil_space_inc_redo_skipped_count(m_index->space); +#endif /* UNIV_DEBUG */ + } + + /** Destructor */ + ~BtrBulk() + { + mem_heap_free(m_heap); + UT_DELETE(m_page_bulks); + +#ifdef UNIV_DEBUG + fil_space_dec_redo_skipped_count(m_index->space); +#endif /* UNIV_DEBUG */ + } + + /** Initialization + Note: must be called right after constructor. */ + void init() + { + ut_ad(m_heap == NULL); + m_heap = mem_heap_create(1000); + + m_page_bulks = UT_NEW_NOKEY(page_bulk_vector()); + } + + /** Insert a tuple + @param[in] tuple tuple to insert. + @return error code */ + dberr_t insert(dtuple_t* tuple) + { + return(insert(tuple, 0)); + } + + /** Btree bulk load finish. We commit the last page in each level + and copy the last page in top level to the root page of the index + if no error occurs. + @param[in] err whether bulk load was successful until now + @return error code */ + dberr_t finish(dberr_t err); + + /** Release all latches */ + void release(); + + /** Re-latch all latches */ + void latch(); + +private: + /** Insert a tuple to a page in a level + @param[in] tuple tuple to insert + @param[in] level B-tree level + @return error code */ + dberr_t insert(dtuple_t* tuple, ulint level); + + /** Split a page + @param[in] page_bulk page to split + @param[in] next_page_bulk next page + @return error code */ + dberr_t pageSplit(PageBulk* page_bulk, + PageBulk* next_page_bulk); + + /** Commit(finish) a page. We set next/prev page no, compress a page of + compressed table and split the page if compression fails, insert a node + pointer to father page if needed, and commit mini-transaction. + @param[in] page_bulk page to commit + @param[in] next_page_bulk next page + @param[in] insert_father flag whether need to insert node ptr + @return error code */ + dberr_t pageCommit(PageBulk* page_bulk, + PageBulk* next_page_bulk, + bool insert_father); + + /** Abort a page when an error occurs + @param[in] page_bulk page bulk object + Note: we should call pageAbort for a PageBulk object, which is not in + m_page_bulks after pageCommit, and we will commit or abort PageBulk + objects in function "finish". */ + void pageAbort(PageBulk* page_bulk) + { + page_bulk->commit(false); + } + + /** Log free check */ + void logFreeCheck(); + +private: + /** Memory heap for allocation */ + mem_heap_t* m_heap; + + /** B-tree index */ + dict_index_t* m_index; + + /** Transaction id */ + trx_id_t m_trx_id; + + /** Root page level */ + ulint m_root_level; + + /** Flush observer */ + FlushObserver* m_flush_observer; + + /** Page cursor vector for all level */ + page_bulk_vector* m_page_bulks; +}; + +#endif diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index aa799e0fc00..f582f04733c 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -30,6 +30,7 @@ Created 10/16/1994 Heikki Tuuri #include "dict0dict.h" #include "page0cur.h" #include "btr0types.h" +#include "gis0type.h" /** Mode flags for btr_cur operations; these can be ORed */ enum { @@ -52,6 +53,13 @@ enum { BTR_KEEP_IBUF_BITMAP = 32 }; +/* btr_cur_latch_leaves() returns latched blocks and savepoints. */ +struct btr_latch_leaves_t { + /* left block, target block and right block */ + buf_block_t* blocks[3]; + ulint savepoints[3]; +}; + #ifndef UNIV_HOTBACKUP #include "que0types.h" #include "row0types.h" @@ -63,7 +71,7 @@ enum { #ifdef UNIV_DEBUG /*********************************************************//** Returns the page cursor component of a tree cursor. -@return pointer to page cursor component */ +@return pointer to page cursor component */ UNIV_INLINE page_cur_t* btr_cur_get_page_cur( @@ -71,7 +79,7 @@ btr_cur_get_page_cur( const btr_cur_t* cursor);/*!< in: tree cursor */ /*********************************************************//** Returns the buffer block on which the tree cursor is positioned. -@return pointer to buffer block */ +@return pointer to buffer block */ UNIV_INLINE buf_block_t* btr_cur_get_block( @@ -79,7 +87,7 @@ btr_cur_get_block( const btr_cur_t* cursor);/*!< in: tree cursor */ /*********************************************************//** Returns the record pointer of a tree cursor. -@return pointer to record */ +@return pointer to record */ UNIV_INLINE rec_t* btr_cur_get_rec( @@ -92,22 +100,15 @@ btr_cur_get_rec( #endif /* UNIV_DEBUG */ /*********************************************************//** Returns the compressed page on which the tree cursor is positioned. -@return pointer to compressed page, or NULL if the page is not compressed */ +@return pointer to compressed page, or NULL if the page is not compressed */ UNIV_INLINE page_zip_des_t* btr_cur_get_page_zip( /*=================*/ btr_cur_t* cursor);/*!< in: tree cursor */ /*********************************************************//** -Invalidates a tree cursor by setting record pointer to NULL. */ -UNIV_INLINE -void -btr_cur_invalidate( -/*===============*/ - btr_cur_t* cursor);/*!< in: tree cursor */ -/*********************************************************//** Returns the page of a tree cursor. -@return pointer to page */ +@return pointer to page */ UNIV_INLINE page_t* btr_cur_get_page( @@ -115,8 +116,8 @@ btr_cur_get_page( btr_cur_t* cursor);/*!< in: tree cursor */ /*********************************************************//** Returns the index of a cursor. -@param cursor b-tree cursor -@return index */ +@param cursor b-tree cursor +@return index */ #define btr_cur_get_index(cursor) ((cursor)->index) /*********************************************************//** Positions a tree cursor at a given record. */ @@ -128,6 +129,26 @@ btr_cur_position( rec_t* rec, /*!< in: record in tree */ buf_block_t* block, /*!< in: buffer block of rec */ btr_cur_t* cursor);/*!< in: cursor */ + +/** Optimistically latches the leaf page or pages requested. +@param[in] block guessed buffer block +@param[in] modify_clock modify clock value +@param[in,out] latch_mode BTR_SEARCH_LEAF, ... +@param[in,out] cursor cursor +@param[in] file file name +@param[in] line line where called +@param[in] mtr mini-transaction +@return true if success */ +bool +btr_cur_optimistic_latch_leaves( + buf_block_t* block, + ib_uint64_t modify_clock, + ulint* latch_mode, + btr_cur_t* cursor, + const char* file, + ulint line, + mtr_t* mtr); + /********************************************************************//** Searches an index tree and positions a tree cursor on a given level. NOTE: n_fields_cmp in tuple must be set so that it cannot be compared @@ -135,7 +156,6 @@ to node pointer page number fields on the upper levels of the tree! Note that if mode is PAGE_CUR_LE, which is used in inserts, then cursor->up_match and cursor->low_match both will have sensible values. If mode is PAGE_CUR_GE, then up_match will a have a sensible value. */ -UNIV_INTERN dberr_t btr_cur_search_to_nth_level( /*========================*/ @@ -144,7 +164,7 @@ btr_cur_search_to_nth_level( const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in tuple must be set so that it cannot get compared to the node ptr page number field! */ - ulint mode, /*!< in: PAGE_CUR_L, ...; + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ...; NOTE that if the search is made using a unique prefix of a record, mode should be PAGE_CUR_LE, not PAGE_CUR_GE, as the latter may end up on @@ -164,15 +184,48 @@ btr_cur_search_to_nth_level( to protect the record! */ btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is s- or x-latched, but see also above! */ - ulint has_search_latch,/*!< in: latch mode the caller - currently has on btr_search_latch: + ulint has_search_latch, + /*!< in: latch mode the caller + currently has on search system: RW_S_LATCH, or 0 */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr); /*!< in: mtr */ + +/** Searches an index tree and positions a tree cursor on a given level. +This function will avoid placing latches the travesal path and so +should be used only for cases where-in latching is not needed. + +@param[in] index index +@param[in] level the tree level of search +@param[in] tuple data tuple; Note: n_fields_cmp in compared + to the node ptr page node field +@param[in] mode PAGE_CUR_L, .... + Insert should always be made using PAGE_CUR_LE + to search the position. +@param[in,out] cursor tree cursor; points to record of interest. +@param[in] file file name +@param[in[ line line where called from +@param[in,out] mtr mtr +@param[in] mark_dirty + if true then mark the block as dirty +@return DB_SUCCESS or error code */ +dberr_t +btr_cur_search_to_nth_level_with_no_latch( + dict_index_t* index, + ulint level, + const dtuple_t* tuple, + page_cur_mode_t mode, + btr_cur_t* cursor, + const char* file, + ulint line, + mtr_t* mtr, + bool mark_dirty = true) + __attribute__((warn_unused_result)); + /*****************************************************************//** -Opens a cursor at either end of an index. */ -UNIV_INTERN +Opens a cursor at either end of an index. +@return DB_SUCCESS or error code */ dberr_t btr_cur_open_at_index_side_func( /*============================*/ @@ -187,12 +240,44 @@ btr_cur_open_at_index_side_func( ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in/out: mini-transaction */ MY_ATTRIBUTE((nonnull)); + #define btr_cur_open_at_index_side(f,i,l,c,lv,m) \ btr_cur_open_at_index_side_func(f,i,l,c,lv,__FILE__,__LINE__,m) + +/** Opens a cursor at either end of an index. +Avoid taking latches on buffer, just pin (by incrementing fix_count) +to keep them in buffer pool. This mode is used by intrinsic table +as they are not shared and so there is no need of latching. +@param[in] from_left true if open to low end, false if open + to high end. +@param[in] index index +@param[in] latch_mode latch mode +@param[in,out] cursor cursor +@param[in] file file name +@param[in] line line where called +@param[in,out] mtr mini transaction +@return DB_SUCCESS or error code +*/ +dberr_t +btr_cur_open_at_index_side_with_no_latch_func( + bool from_left, + dict_index_t* index, + btr_cur_t* cursor, + ulint level, + const char* file, + ulint line, + mtr_t* mtr) + __attribute__((warn_unused_result)); + +#define btr_cur_open_at_index_side_with_no_latch(f,i,c,lv,m) \ + btr_cur_open_at_index_side_with_no_latch_func( \ + f,i,c,lv,__FILE__,__LINE__,m) + /**********************************************************************//** -Positions a cursor at a randomly chosen position within a B-tree. */ -UNIV_INTERN -void +Positions a cursor at a randomly chosen position within a B-tree. +@return true if the index is available and we have put the cursor, false +if the index is unavailable */ +bool btr_cur_open_at_rnd_pos_func( /*=========================*/ dict_index_t* index, /*!< in: index */ @@ -209,8 +294,7 @@ It is assumed that mtr holds an x-latch on the page. The operation does not succeed if there is too little space on the page. If there is just one record on the page, the insert will always succeed; this is to prevent trying to split a page with just one record. -@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */ -UNIV_INTERN +@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */ dberr_t btr_cur_optimistic_insert( /*======================*/ @@ -241,8 +325,7 @@ Performs an insert on a page of an index tree. It is assumed that mtr holds an x-latch on the tree and on the cursor page. If the insert is made on the leaf level, to avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. -@return DB_SUCCESS or error number */ -UNIV_INTERN +@return DB_SUCCESS or error number */ dberr_t btr_cur_pessimistic_insert( /*=======================*/ @@ -273,13 +356,12 @@ an update-in-place. @retval false if out of space; IBUF_BITMAP_FREE will be reset outside mtr if the page was recompressed -@retval true if enough place; +@retval true if enough place; IMPORTANT: The caller will have to update IBUF_BITMAP_FREE if this is a secondary index leaf page. This has to be done either within the same mini-transaction, or by invoking ibuf_reset_free_bits() before mtr_commit(mtr). */ -UNIV_INTERN bool btr_cur_update_alloc_zip_func( /*==========================*/ @@ -307,7 +389,6 @@ Updates a record when the update causes no size changes in its fields. @retval DB_SUCCESS on success @retval DB_ZIP_OVERFLOW if there is not enough space left on the compressed page (IBUF_BITMAP_FREE was reset outside mtr) */ -UNIV_INTERN dberr_t btr_cur_update_in_place( /*====================*/ @@ -328,7 +409,6 @@ btr_cur_update_in_place( MY_ATTRIBUTE((warn_unused_result, nonnull)); /***********************************************************//** Writes a redo log record of updating a record in-place. */ -UNIV_INTERN void btr_cur_update_in_place_log( /*========================*/ @@ -351,7 +431,6 @@ so that tree compression is recommended. @retval DB_UNDERFLOW if the page would become too empty @retval DB_ZIP_OVERFLOW if there is not enough space left on the compressed page */ -UNIV_INTERN dberr_t btr_cur_optimistic_update( /*======================*/ @@ -377,8 +456,7 @@ Performs an update of a record on a page of a tree. It is assumed that mtr holds an x-latch on the tree and on the cursor page. If the update is made on the leaf level, to avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t btr_cur_pessimistic_update( /*=======================*/ @@ -396,9 +474,10 @@ btr_cur_pessimistic_update( big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to be stored externally by the caller, or NULL */ - const upd_t* update, /*!< in: update vector; this is allowed also - contain trx id and roll ptr fields, but - the values in update vector have no effect */ + upd_t* update, /*!< in/out: update vector; this is allowed to + also contain trx id and roll ptr fields. + Non-updated columns that are moved offpage will + be appended to this. */ ulint cmpl_info,/*!< in: compiler info on secondary index updates */ que_thr_t* thr, /*!< in: query thread */ @@ -411,22 +490,22 @@ Marks a clustered index record deleted. Writes an undo log record to undo log on this delete marking. Writes in the trx id field the id of the deleting transaction, and in the roll ptr field pointer to the undo log record created. -@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ -UNIV_INTERN +@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ dberr_t btr_cur_del_mark_set_clust_rec( /*===========================*/ + ulint flags, /*!< in: undo logging and locking flags */ buf_block_t* block, /*!< in/out: buffer block of the record */ rec_t* rec, /*!< in/out: record */ dict_index_t* index, /*!< in: clustered index of the record */ const ulint* offsets,/*!< in: rec_get_offsets(rec) */ que_thr_t* thr, /*!< in: query thread */ + const dtuple_t* entry, /*!< in: dtuple for the deleting record */ mtr_t* mtr) /*!< in/out: mini-transaction */ MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************//** Sets a secondary index record delete mark to TRUE or FALSE. -@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ -UNIV_INTERN +@return DB_SUCCESS, DB_LOCK_WAIT, or error number */ dberr_t btr_cur_del_mark_set_sec_rec( /*=========================*/ @@ -442,8 +521,7 @@ that mtr holds an x-latch on the tree and on the cursor page. To avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. NOTE: it is assumed that the caller has reserved enough free extents so that the compression will always succeed if done! -@return TRUE if compression occurred */ -UNIV_INTERN +@return TRUE if compression occurred */ ibool btr_cur_compress_if_useful( /*=======================*/ @@ -458,8 +536,7 @@ btr_cur_compress_if_useful( Removes the record on which the tree cursor is positioned. It is assumed that the mtr has an x-latch on the page where the cursor is positioned, but no latch on the whole tree. -@return TRUE if success, i.e., the page did not become too empty */ -UNIV_INTERN +@return TRUE if success, i.e., the page did not become too empty */ ibool btr_cur_optimistic_delete_func( /*===========================*/ @@ -489,8 +566,7 @@ or if it is the only page on the level. It is assumed that mtr holds an x-latch on the tree and on the cursor page. To avoid deadlocks, mtr must also own x-latches to brothers of page, if those brothers exist. -@return TRUE if compression occurred */ -UNIV_INTERN +@return TRUE if compression occurred */ ibool btr_cur_pessimistic_delete( /*=======================*/ @@ -508,14 +584,13 @@ btr_cur_pessimistic_delete( stays valid: it points to successor of deleted record on function exit */ ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ + bool rollback,/*!< in: performing rollback? */ mtr_t* mtr) /*!< in: mtr */ MY_ATTRIBUTE((nonnull)); #endif /* !UNIV_HOTBACKUP */ /***********************************************************//** Parses a redo log record of updating a record in-place. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_cur_parse_update_in_place( /*==========================*/ @@ -527,8 +602,7 @@ btr_cur_parse_update_in_place( /****************************************************************//** Parses the redo log record for delete marking or unmarking of a clustered index record. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_cur_parse_del_mark_set_clust_rec( /*=================================*/ @@ -540,8 +614,7 @@ btr_cur_parse_del_mark_set_clust_rec( /****************************************************************//** Parses the redo log record for delete marking or unmarking of a secondary index record. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* btr_cur_parse_del_mark_set_sec_rec( /*===============================*/ @@ -550,19 +623,22 @@ btr_cur_parse_del_mark_set_sec_rec( page_t* page, /*!< in/out: page or NULL */ page_zip_des_t* page_zip);/*!< in/out: compressed page, or NULL */ #ifndef UNIV_HOTBACKUP -/*******************************************************************//** -Estimates the number of rows in a given index range. -@return estimated number of rows */ -UNIV_INTERN -ib_int64_t + +/** Estimates the number of rows in a given index range. +@param[in] index index +@param[in] tuple1 range start, may also be empty tuple +@param[in] mode1 search mode for range start +@param[in] tuple2 range end, may also be empty tuple +@param[in] mode2 search mode for range end +@return estimated number of rows */ +int64_t btr_estimate_n_rows_in_range( -/*=========================*/ - dict_index_t* index, /*!< in: index */ - const dtuple_t* tuple1, /*!< in: range start, may also be empty tuple */ - ulint mode1, /*!< in: search mode for range start */ - const dtuple_t* tuple2, /*!< in: range end, may also be empty tuple */ - ulint mode2, /*!< in: search mode for range end */ - trx_t* trx); /*!< in: trx */ + dict_index_t* index, + const dtuple_t* tuple1, + page_cur_mode_t mode1, + const dtuple_t* tuple2, + page_cur_mode_t mode2); + /*******************************************************************//** Estimates the number of different key values in a given index, for each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index). @@ -571,9 +647,10 @@ The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed index->stat_n_sample_sizes[]. If innodb_stats_method is nulls_ignored, we also record the number of non-null values for each prefix and stored the estimates in -array index->stat_n_non_null_key_vals. */ -UNIV_INTERN -void +array index->stat_n_non_null_key_vals. +@return true if the index is available and we get the estimated numbers, +false if the index is unavailable. */ +bool btr_estimate_number_of_different_key_vals( /*======================================*/ dict_index_t* index); /*!< in: index */ @@ -582,7 +659,6 @@ btr_estimate_number_of_different_key_vals( @param[in] rec record @param[in] offsets array returned by rec_get_offsets() @return externally stored part, in units of a database page */ - ulint btr_rec_get_externally_stored_len( const rec_t* rec, @@ -593,7 +669,6 @@ Marks non-updated off-page fields as disowned by this record. The ownership must be transferred to the updated record which is inserted elsewhere in the index tree. In purge only the owner of externally stored field is allowed to free the field. */ -UNIV_INTERN void btr_cur_disown_inherited_fields( /*============================*/ @@ -613,7 +688,9 @@ enum blob_op { /** Store off-page columns for an insert by update */ BTR_STORE_INSERT_UPDATE, /** Store off-page columns for an update */ - BTR_STORE_UPDATE + BTR_STORE_UPDATE, + /** Store off-page columns for a freshly inserted record by bulk */ + BTR_STORE_INSERT_BULK }; /*******************************************************************//** @@ -631,32 +708,31 @@ Stores the fields in big_rec_vec to the tablespace and puts pointers to them in rec. The extern flags in rec will have to be set beforehand. The fields are stored on pages allocated from leaf node file segment of the index tree. -@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ -UNIV_INTERN +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ dberr_t btr_store_big_rec_extern_fields( /*============================*/ - dict_index_t* index, /*!< in: index of rec; the index tree - MUST be X-latched */ - buf_block_t* rec_block, /*!< in/out: block containing rec */ - rec_t* rec, /*!< in/out: record */ - const ulint* offsets, /*!< in: rec_get_offsets(rec, index); - the "external storage" flags in offsets - will not correspond to rec when - this function returns */ + btr_pcur_t* pcur, /*!< in/out: a persistent cursor. if + btr_mtr is restarted, then this can + be repositioned. */ + const upd_t* upd, /*!< in: update vector */ + ulint* offsets, /*!< in/out: rec_get_offsets() on + pcur. the "external storage" flags + in offsets will correctly correspond + to rec when this function returns */ const big_rec_t*big_rec_vec, /*!< in: vector containing fields to be stored externally */ - mtr_t* btr_mtr, /*!< in: mtr containing the - latches to the clustered index */ + mtr_t* btr_mtr, /*!< in/out: mtr containing the + latches to the clustered index. can be + committed and restarted. */ enum blob_op op) /*! in: operation code */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /*******************************************************************//** Frees the space in an externally stored field to the file space management if the field in data is owned the externally stored field, in a rollback we may have the additional condition that the field must not be inherited. */ -UNIV_INTERN void btr_free_externally_stored_field( /*=============================*/ @@ -677,69 +753,68 @@ btr_free_externally_stored_field( to rec, or NULL if rec == NULL */ ulint i, /*!< in: field number of field_ref; ignored if rec == NULL */ - enum trx_rb_ctx rb_ctx, /*!< in: rollback context */ - mtr_t* local_mtr); /*!< in: mtr containing the latch to - data an an X-latch to the index - tree */ -/*******************************************************************//** -Copies the prefix of an externally stored field of a record. The -clustered index record must be protected by a lock or a page latch. + bool rollback, /*!< in: performing rollback? */ + mtr_t* local_mtr); /*!< in: mtr containing the latch */ +/** Copies the prefix of an externally stored field of a record. +The clustered index record must be protected by a lock or a page latch. +@param[out] buf the field, or a prefix of it +@param[in] len length of buf, in bytes +@param[in] page_size BLOB page size +@param[in] data 'internally' stored part of the field +containing also the reference to the external part; must be protected by +a lock or a page latch +@param[in] local_len length of data, in bytes @return the length of the copied field, or 0 if the column was being or has been deleted */ -UNIV_INTERN ulint btr_copy_externally_stored_field_prefix( -/*====================================*/ - byte* buf, /*!< out: the field, or a prefix of it */ - ulint len, /*!< in: length of buf, in bytes */ - ulint zip_size,/*!< in: nonzero=compressed BLOB page size, - zero for uncompressed BLOBs */ - const byte* data, /*!< in: 'internally' stored part of the - field containing also the reference to - the external part; must be protected by - a lock or a page latch */ - ulint local_len,/*!< in: length of data, in bytes */ - trx_t* trx); /*!< in: transaction handle */ -/*******************************************************************//** -Copies an externally stored field of a record to mem heap. The -clustered index record must be protected by a lock or a page latch. + byte* buf, + ulint len, + const page_size_t& page_size, + const byte* data, + ulint local_len); + +/** Copies an externally stored field of a record to mem heap. +The clustered index record must be protected by a lock or a page latch. +@param[out] len length of the whole field +@param[in] data 'internally' stored part of the field +containing also the reference to the external part; must be protected by +a lock or a page latch +@param[in] page_size BLOB page size +@param[in] local_len length of data +@param[in,out] heap mem heap @return the whole field copied to heap */ -UNIV_INTERN byte* btr_copy_externally_stored_field( -/*=============================*/ - ulint* len, /*!< out: length of the whole field */ - const byte* data, /*!< in: 'internally' stored part of the - field containing also the reference to - the external part; must be protected by - a lock or a page latch */ - ulint zip_size,/*!< in: nonzero=compressed BLOB page size, - zero for uncompressed BLOBs */ - ulint local_len,/*!< in: length of data */ - mem_heap_t* heap, /*!< in: mem heap */ - trx_t* trx); /*!< in: transaction handle */ -/*******************************************************************//** -Copies an externally stored field of a record to mem heap. -@return the field copied to heap, or NULL if the field is incomplete */ -UNIV_INTERN + ulint* len, + const byte* data, + const page_size_t& page_size, + ulint local_len, + mem_heap_t* heap); + +/** Copies an externally stored field of a record to mem heap. +@param[in] rec record in a clustered index; must be +protected by a lock or a page latch +@param[in] offset array returned by rec_get_offsets() +@param[in] page_size BLOB page size +@param[in] no field number +@param[out] len length of the field +@param[in,out] heap mem heap +@return the field copied to heap, or NULL if the field is incomplete */ byte* btr_rec_copy_externally_stored_field( -/*=================================*/ - const rec_t* rec, /*!< in: record in a clustered index; - must be protected by a lock or a page latch */ - const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ - ulint zip_size,/*!< in: nonzero=compressed BLOB page size, - zero for uncompressed BLOBs */ - ulint no, /*!< in: field number */ - ulint* len, /*!< out: length of the field */ - mem_heap_t* heap, /*!< in: mem heap */ - trx_t* trx); /*!< in: transaction handle */ + const rec_t* rec, + const ulint* offsets, + const page_size_t& page_size, + ulint no, + ulint* len, + mem_heap_t* heap); + /*******************************************************************//** Flags the data tuple fields that are marked as extern storage in the update vector. We use this function to remember which fields we must mark as extern storage in a record inserted for an update. -@return number of flagged external columns */ -UNIV_INTERN +@return number of flagged external columns */ ulint btr_push_update_extern_fields( /*==========================*/ @@ -750,38 +825,74 @@ btr_push_update_extern_fields( /***********************************************************//** Sets a secondary index record's delete mark to the given value. This function is only used by the insert buffer merge mechanism. */ -UNIV_INTERN void btr_cur_set_deleted_flag_for_ibuf( /*==============================*/ rec_t* rec, /*!< in/out: record */ page_zip_des_t* page_zip, /*!< in/out: compressed page corresponding to rec, or NULL - when the tablespace is - uncompressed */ + when the tablespace is uncompressed */ ibool val, /*!< in: value to set */ mtr_t* mtr); /*!< in/out: mini-transaction */ + +/******************************************************//** +The following function is used to set the deleted bit of a record. */ +UNIV_INLINE +void +btr_rec_set_deleted_flag( +/*=====================*/ + rec_t* rec, /*!< in/out: physical record */ + page_zip_des_t* page_zip,/*!< in/out: compressed page (or NULL) */ + ulint flag); /*!< in: nonzero if delete marked */ + +/** Latches the leaf page or pages requested. +@param[in] block leaf page where the search converged +@param[in] page_id page id of the leaf +@param[in] latch_mode BTR_SEARCH_LEAF, ... +@param[in] cursor cursor +@param[in] mtr mini-transaction +@return blocks and savepoints which actually latched. */ +btr_latch_leaves_t +btr_cur_latch_leaves( + buf_block_t* block, + const page_id_t& page_id, + const page_size_t& page_size, + ulint latch_mode, + btr_cur_t* cursor, + mtr_t* mtr); + /*######################################################################*/ /** In the pessimistic delete, if the page data size drops below this limit, merging it to a neighbor is tried */ -#define BTR_CUR_PAGE_COMPRESS_LIMIT (UNIV_PAGE_SIZE / 2) +#define BTR_CUR_PAGE_COMPRESS_LIMIT(index) \ + ((UNIV_PAGE_SIZE * (ulint)((index)->merge_threshold)) / 100) /** A slot in the path array. We store here info on a search path down the tree. Each slot contains data on a single level of the tree. */ +struct btr_path_t { + /* Assume a page like: + records: (inf, a, b, c, d, sup) + index of the record: 0, 1, 2, 3, 4, 5 + */ + + /** Index of the record where the page cursor stopped on this level + (index in alphabetical order). Value ULINT_UNDEFINED denotes array + end. In the above example, if the search stopped on record 'c', then + nth_rec will be 3. */ + ulint nth_rec; + + /** Number of the records on the page, not counting inf and sup. + In the above example n_recs will be 4. */ + ulint n_recs; -struct btr_path_t{ - ulint nth_rec; /*!< index of the record - where the page cursor stopped on - this level (index in alphabetical - order); value ULINT_UNDEFINED - denotes array end */ - ulint n_recs; /*!< number of records on the page */ - ulint page_no; /*!< no of the page containing the record */ - ulint page_level; /*!< level of the page, if later we fetch - the page under page_no and it is no different - level then we know that the tree has been - reorganized */ + /** Number of the page containing the record. */ + ulint page_no; + + /** Level of the page. If later we fetch the page under page_no + and it is no different level then we know that the tree has been + reorganized. */ + ulint page_level; }; #define BTR_PATH_ARRAY_N_SLOTS 250 /*!< size of path array (in slots) */ @@ -858,7 +969,7 @@ struct btr_cur_t { other search modes; see also the NOTE in up_match! */ ulint low_bytes; /*!< number of matched bytes to the - right at the time cursor positioned; + left at the time cursor positioned; only used internally in searches: not defined after the search */ ulint n_fields; /*!< prefix length used in a hash @@ -872,8 +983,22 @@ struct btr_cur_t { rows in range, we store in this array information of the path through the tree */ + rtr_info_t* rtr_info; /*!< rtree search info */ + btr_cur_t():thr(NULL), rtr_info(NULL) {} + /* default values */ }; +/******************************************************//** +The following function is used to set the deleted bit of a record. */ +UNIV_INLINE +void +btr_rec_set_deleted_flag( +/*=====================*/ + rec_t* rec, /*!< in/out: physical record */ + page_zip_des_t* page_zip,/*!< in/out: compressed page (or NULL) */ + ulint flag); /*!< in: nonzero if delete marked */ + + /** If pessimistic delete fails because of lack of file space, there is still a good change of success a little later. Try this many times. */ diff --git a/storage/innobase/include/btr0cur.ic b/storage/innobase/include/btr0cur.ic index 43ee3304c0e..45c0d59a8aa 100644 --- a/storage/innobase/include/btr0cur.ic +++ b/storage/innobase/include/btr0cur.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -39,7 +39,7 @@ if (btr_cur_limit_optimistic_insert_debug > 1\ #ifdef UNIV_DEBUG /*********************************************************//** Returns the page cursor component of a tree cursor. -@return pointer to page cursor component */ +@return pointer to page cursor component */ UNIV_INLINE page_cur_t* btr_cur_get_page_cur( @@ -51,7 +51,7 @@ btr_cur_get_page_cur( /*********************************************************//** Returns the buffer block on which the tree cursor is positioned. -@return pointer to buffer block */ +@return pointer to buffer block */ UNIV_INLINE buf_block_t* btr_cur_get_block( @@ -63,7 +63,7 @@ btr_cur_get_block( /*********************************************************//** Returns the record pointer of a tree cursor. -@return pointer to record */ +@return pointer to record */ UNIV_INLINE rec_t* btr_cur_get_rec( @@ -76,7 +76,7 @@ btr_cur_get_rec( /*********************************************************//** Returns the compressed page on which the tree cursor is positioned. -@return pointer to compressed page, or NULL if the page is not compressed */ +@return pointer to compressed page, or NULL if the page is not compressed */ UNIV_INLINE page_zip_des_t* btr_cur_get_page_zip( @@ -86,20 +86,9 @@ btr_cur_get_page_zip( return(buf_block_get_page_zip(btr_cur_get_block(cursor))); } -/*********************************************************//** -Invalidates a tree cursor by setting record pointer to NULL. */ -UNIV_INLINE -void -btr_cur_invalidate( -/*===============*/ - btr_cur_t* cursor) /*!< in: tree cursor */ -{ - page_cur_invalidate(&(cursor->page_cur)); -} - /*********************************************************//** Returns the page of a tree cursor. -@return pointer to page */ +@return pointer to page */ UNIV_INLINE page_t* btr_cur_get_page( @@ -130,7 +119,7 @@ btr_cur_position( /*********************************************************************//** Checks if compressing an index page where a btr cursor is placed makes sense. -@return TRUE if compression is recommended */ +@return TRUE if compression is recommended */ UNIV_INLINE ibool btr_cur_compress_recommendation( @@ -140,15 +129,17 @@ btr_cur_compress_recommendation( { const page_t* page; - ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor), - MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_is_block_fix( + mtr, btr_cur_get_block(cursor), + MTR_MEMO_PAGE_X_FIX, cursor->index->table)); page = btr_cur_get_page(cursor); LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page) * 2, return(FALSE)); - if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT) + if ((page_get_data_size(page) + < BTR_CUR_PAGE_COMPRESS_LIMIT(cursor->index)) || ((btr_page_get_next(page, mtr) == FIL_NULL) && (btr_page_get_prev(page, mtr) == FIL_NULL))) { @@ -167,7 +158,7 @@ btr_cur_compress_recommendation( /*********************************************************************//** Checks if the record on which the cursor is placed can be deleted without making tree compression necessary (or, recommended). -@return TRUE if can be deleted without recommended compression */ +@return TRUE if can be deleted without recommended compression */ UNIV_INLINE ibool btr_cur_can_delete_without_compress( @@ -183,7 +174,8 @@ btr_cur_can_delete_without_compress( page = btr_cur_get_page(cursor); - if ((page_get_data_size(page) - rec_size < BTR_CUR_PAGE_COMPRESS_LIMIT) + if ((page_get_data_size(page) - rec_size + < BTR_CUR_PAGE_COMPRESS_LIMIT(cursor->index)) || ((btr_page_get_next(page, mtr) == FIL_NULL) && (btr_page_get_prev(page, mtr) == FIL_NULL)) || (page_get_n_recs(page) < 2)) { @@ -211,6 +203,7 @@ btr_blob_op_is_update( { switch (op) { case BTR_STORE_INSERT: + case BTR_STORE_INSERT_BULK: return(FALSE); case BTR_STORE_INSERT_UPDATE: case BTR_STORE_UPDATE: @@ -220,4 +213,23 @@ btr_blob_op_is_update( ut_ad(0); return(FALSE); } + +/******************************************************//** +The following function is used to set the deleted bit of a record. */ +UNIV_INLINE +void +btr_rec_set_deleted_flag( +/*=====================*/ + rec_t* rec, /*!< in/out: physical record */ + page_zip_des_t* page_zip,/*!< in/out: compressed page (or NULL) */ + ulint flag) /*!< in: nonzero if delete marked */ +{ + if (page_rec_is_comp(rec)) { + rec_set_deleted_flag_new(rec, page_zip, flag); + } else { + ut_ad(!page_zip); + rec_set_deleted_flag_old(rec, flag); + } +} + #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h index dafe14ce556..02f4faf24a5 100644 --- a/storage/innobase/include/btr0pcur.h +++ b/storage/innobase/include/btr0pcur.h @@ -34,22 +34,24 @@ Created 2/23/1996 Heikki Tuuri #include "btr0cur.h" #include "btr0btr.h" #include "btr0types.h" +#include "gis0rtree.h" /* Relative positions for a stored cursor position */ -#define BTR_PCUR_ON 1 -#define BTR_PCUR_BEFORE 2 -#define BTR_PCUR_AFTER 3 +enum btr_pcur_pos_t { + BTR_PCUR_ON = 1, + BTR_PCUR_BEFORE = 2, + BTR_PCUR_AFTER = 3, /* Note that if the tree is not empty, btr_pcur_store_position does not use the following, but only uses the above three alternatives, where the position is stored relative to a specific record: this makes implementation of a scroll cursor easier */ -#define BTR_PCUR_BEFORE_FIRST_IN_TREE 4 /* in an empty tree */ -#define BTR_PCUR_AFTER_LAST_IN_TREE 5 /* in an empty tree */ + BTR_PCUR_BEFORE_FIRST_IN_TREE = 4, /* in an empty tree */ + BTR_PCUR_AFTER_LAST_IN_TREE = 5 /* in an empty tree */ +}; /**************************************************************//** Allocates memory for a persistent cursor object and initializes the cursor. -@return own: persistent cursor */ -UNIV_INTERN +@return own: persistent cursor */ btr_pcur_t* btr_pcur_create_for_mysql(void); /*============================*/ @@ -57,7 +59,6 @@ btr_pcur_create_for_mysql(void); /**************************************************************//** Resets a persistent cursor object, freeing ::old_rec_buf if it is allocated and resetting the other members to their initial values. */ -UNIV_INTERN void btr_pcur_reset( /*===========*/ @@ -65,14 +66,12 @@ btr_pcur_reset( /**************************************************************//** Frees the memory for a persistent cursor object. */ -UNIV_INTERN void btr_pcur_free_for_mysql( /*====================*/ btr_pcur_t* cursor); /*!< in, own: persistent cursor */ /**************************************************************//** Copies the stored position of a pcur to another pcur. */ -UNIV_INTERN void btr_pcur_copy_stored_position( /*==========================*/ @@ -87,6 +86,14 @@ void btr_pcur_init( /*==========*/ btr_pcur_t* pcur); /*!< in: persistent cursor */ + +/** Free old_rec_buf. +@param[in] pcur Persistent cursor holding old_rec to be freed. */ +UNIV_INLINE +void +btr_pcur_free( + btr_pcur_t* pcur); + /**************************************************************//** Initializes and opens a persistent cursor to an index tree. It should be closed with btr_pcur_close. */ @@ -97,7 +104,7 @@ btr_pcur_open_low( dict_index_t* index, /*!< in: index */ ulint level, /*!< in: level in the btree */ const dtuple_t* tuple, /*!< in: tuple on which search done */ - ulint mode, /*!< in: PAGE_CUR_L, ...; + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ...; NOTE that if the search is made using a unique prefix of a record, mode should be PAGE_CUR_LE, not PAGE_CUR_GE, as the latter @@ -119,7 +126,7 @@ btr_pcur_open_with_no_init_func( /*============================*/ dict_index_t* index, /*!< in: index */ const dtuple_t* tuple, /*!< in: tuple on which search done */ - ulint mode, /*!< in: PAGE_CUR_L, ...; + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ...; NOTE that if the search is made using a unique prefix of a record, mode should be PAGE_CUR_LE, not PAGE_CUR_GE, as the latter @@ -131,8 +138,9 @@ btr_pcur_open_with_no_init_func( page, but assume that the caller uses his btr search latch to protect the record! */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ - ulint has_search_latch,/*!< in: latch mode the caller - currently has on btr_search_latch: + ulint has_search_latch, + /*!< in: latch mode the caller + currently has on search system: RW_S_LATCH, or 0 */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ @@ -181,13 +189,12 @@ PAGE_CUR_LE, on the last user record. If no such user record exists, then in the first case sets the cursor after last in tree, and in the latter case before first in tree. The latching mode must be BTR_SEARCH_LEAF or BTR_MODIFY_LEAF. */ -UNIV_INTERN void btr_pcur_open_on_user_rec_func( /*===========================*/ dict_index_t* index, /*!< in: index */ const dtuple_t* tuple, /*!< in: tuple on which search done */ - ulint mode, /*!< in: PAGE_CUR_L, ... */ + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ... */ ulint latch_mode, /*!< in: BTR_SEARCH_LEAF or BTR_MODIFY_LEAF */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent @@ -198,9 +205,11 @@ btr_pcur_open_on_user_rec_func( #define btr_pcur_open_on_user_rec(i,t,md,l,c,m) \ btr_pcur_open_on_user_rec_func(i,t,md,l,c,__FILE__,__LINE__,m) /**********************************************************************//** -Positions a cursor at a randomly chosen position within a B-tree. */ +Positions a cursor at a randomly chosen position within a B-tree. +@return true if the index is available and we have put the cursor, false +if the index is unavailable */ UNIV_INLINE -void +bool btr_pcur_open_at_rnd_pos_func( /*==========================*/ dict_index_t* index, /*!< in: index */ @@ -235,7 +244,6 @@ cursor data structure, or just setting a flag if the cursor id before the first in an EMPTY tree, or after the last in an EMPTY tree. NOTE that the page where the cursor is positioned must not be empty if the index tree is not totally empty! */ -UNIV_INTERN void btr_pcur_store_position( /*====================*/ @@ -256,7 +264,6 @@ restores to before first or after the last in the tree. @return TRUE if the cursor position was stored when it was on a user record and it can be restored on a user record whose ordering fields are identical to the ones of the original user record */ -UNIV_INTERN ibool btr_pcur_restore_position_func( /*===========================*/ @@ -269,7 +276,7 @@ btr_pcur_restore_position_func( btr_pcur_restore_position_func(l,cur,__FILE__,__LINE__,mtr) /*********************************************************//** Gets the rel_pos field for a cursor whose position has been stored. -@return BTR_PCUR_ON, ... */ +@return BTR_PCUR_ON, ... */ UNIV_INLINE ulint btr_pcur_get_rel_pos( @@ -289,7 +296,7 @@ btr_pcur_commit_specify_mtr( /*********************************************************//** Moves the persistent cursor to the next record in the tree. If no records are left, the cursor stays 'after last in tree'. -@return TRUE if the cursor was not after last in tree */ +@return TRUE if the cursor was not after last in tree */ UNIV_INLINE ibool btr_pcur_move_to_next( @@ -300,8 +307,7 @@ btr_pcur_move_to_next( /*********************************************************//** Moves the persistent cursor to the previous record in the tree. If no records are left, the cursor stays 'before first in tree'. -@return TRUE if the cursor was not before first in tree */ -UNIV_INTERN +@return TRUE if the cursor was not before first in tree */ ibool btr_pcur_move_to_prev( /*==================*/ @@ -319,7 +325,7 @@ btr_pcur_move_to_last_on_page( /*********************************************************//** Moves the persistent cursor to the next user record in the tree. If no user records are left, the cursor ends up 'after last in tree'. -@return TRUE if the cursor moved forward, ending on a user record */ +@return TRUE if the cursor moved forward, ending on a user record */ UNIV_INLINE ibool btr_pcur_move_to_next_user_rec( @@ -332,7 +338,6 @@ Moves the persistent cursor to the first record on the next page. Releases the latch on the current page, and bufferunfixes it. Note that there must not be modifications on the current page, as then the x-latch can be released only in mtr_commit. */ -UNIV_INTERN void btr_pcur_move_to_next_page( /*=======================*/ @@ -349,7 +354,6 @@ The alphabetical position of the cursor is guaranteed to be sensible on return, but it may happen that the cursor is not positioned on the last record of any page, because the structure of the tree may have changed while the cursor had no latches. */ -UNIV_INTERN void btr_pcur_move_backward_from_page( /*=============================*/ @@ -359,7 +363,7 @@ btr_pcur_move_backward_from_page( #ifdef UNIV_DEBUG /*********************************************************//** Returns the btr cursor component of a persistent cursor. -@return pointer to btr cursor component */ +@return pointer to btr cursor component */ UNIV_INLINE btr_cur_t* btr_pcur_get_btr_cur( @@ -367,7 +371,7 @@ btr_pcur_get_btr_cur( const btr_pcur_t* cursor); /*!< in: persistent cursor */ /*********************************************************//** Returns the page cursor component of a persistent cursor. -@return pointer to page cursor component */ +@return pointer to page cursor component */ UNIV_INLINE page_cur_t* btr_pcur_get_page_cur( @@ -375,7 +379,7 @@ btr_pcur_get_page_cur( const btr_pcur_t* cursor); /*!< in: persistent cursor */ /*********************************************************//** Returns the page of a persistent cursor. -@return pointer to the page */ +@return pointer to the page */ UNIV_INLINE page_t* btr_pcur_get_page( @@ -383,7 +387,7 @@ btr_pcur_get_page( const btr_pcur_t* cursor);/*!< in: persistent cursor */ /*********************************************************//** Returns the buffer block of a persistent cursor. -@return pointer to the block */ +@return pointer to the block */ UNIV_INLINE buf_block_t* btr_pcur_get_block( @@ -391,7 +395,7 @@ btr_pcur_get_block( const btr_pcur_t* cursor);/*!< in: persistent cursor */ /*********************************************************//** Returns the record of a persistent cursor. -@return pointer to the record */ +@return pointer to the record */ UNIV_INLINE rec_t* btr_pcur_get_rec( @@ -493,53 +497,53 @@ enum pcur_pos_t { selects, updates, and deletes. */ struct btr_pcur_t{ - btr_cur_t btr_cur; /*!< a B-tree cursor */ - ulint latch_mode; /*!< see TODO note below! - BTR_SEARCH_LEAF, BTR_MODIFY_LEAF, - BTR_MODIFY_TREE, or BTR_NO_LATCHES, - depending on the latching state of - the page and tree where the cursor is - positioned; BTR_NO_LATCHES means that - the cursor is not currently positioned: - we say then that the cursor is - detached; it can be restored to - attached if the old position was - stored in old_rec */ - ulint old_stored; /*!< BTR_PCUR_OLD_STORED - or BTR_PCUR_OLD_NOT_STORED */ - rec_t* old_rec; /*!< if cursor position is stored, - contains an initial segment of the - latest record cursor was positioned - either on, before, or after */ - ulint old_n_fields; /*!< number of fields in old_rec */ - ulint rel_pos; /*!< BTR_PCUR_ON, BTR_PCUR_BEFORE, or - BTR_PCUR_AFTER, depending on whether - cursor was on, before, or after the - old_rec record */ - buf_block_t* block_when_stored;/* buffer block when the position was - stored */ - ib_uint64_t modify_clock; /*!< the modify clock value of the - buffer block when the cursor position - was stored */ - enum pcur_pos_t pos_state; /*!< btr_pcur_store_position() and - btr_pcur_restore_position() state. */ - ulint search_mode; /*!< PAGE_CUR_G, ... */ - trx_t* trx_if_known; /*!< the transaction, if we know it; - otherwise this field is not defined; - can ONLY BE USED in error prints in - fatal assertion failures! */ + /** a B-tree cursor */ + btr_cur_t btr_cur; + /** see TODO note below! + BTR_SEARCH_LEAF, BTR_MODIFY_LEAF, BTR_MODIFY_TREE or BTR_NO_LATCHES, + depending on the latching state of the page and tree where the cursor + is positioned; BTR_NO_LATCHES means that the cursor is not currently + positioned: + we say then that the cursor is detached; it can be restored to + attached if the old position was stored in old_rec */ + ulint latch_mode; + /** true if old_rec is stored */ + bool old_stored; + /** if cursor position is stored, contains an initial segment of the + latest record cursor was positioned either on, before or after */ + rec_t* old_rec; + /** number of fields in old_rec */ + ulint old_n_fields; + /** BTR_PCUR_ON, BTR_PCUR_BEFORE, or BTR_PCUR_AFTER, depending on + whether cursor was on, before, or after the old_rec record */ + enum btr_pcur_pos_t rel_pos; + /** buffer block when the position was stored */ + buf_block_t* block_when_stored; + /** the modify clock value of the buffer block when the cursor position + was stored */ + ib_uint64_t modify_clock; + /** the withdraw clock value of the buffer pool when the cursor + position was stored */ + ulint withdraw_clock; + /** btr_pcur_store_position() and btr_pcur_restore_position() state. */ + enum pcur_pos_t pos_state; + /** PAGE_CUR_G, ... */ + page_cur_mode_t search_mode; + /** the transaction, if we know it; otherwise this field is not defined; + can ONLY BE USED in error prints in fatal assertion failures! */ + trx_t* trx_if_known; /*-----------------------------*/ /* NOTE that the following fields may possess dynamically allocated memory which should be freed if not needed anymore! */ - byte* old_rec_buf; /*!< NULL, or a dynamically allocated - buffer for old_rec */ - ulint buf_size; /*!< old_rec_buf size if old_rec_buf - is not NULL */ -}; + /** NULL, or a dynamically allocated buffer for old_rec */ + byte* old_rec_buf; + /** old_rec_buf size if old_rec_buf is not NULL */ + ulint buf_size; -#define BTR_PCUR_OLD_STORED 908467085 -#define BTR_PCUR_OLD_NOT_STORED 122766467 + /** Return the index of this persistent cursor */ + dict_index_t* index() const { return(btr_cur.index); } +}; #ifndef UNIV_NONINL #include "btr0pcur.ic" diff --git a/storage/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic index 1cd13824542..6cd968b4682 100644 --- a/storage/innobase/include/btr0pcur.ic +++ b/storage/innobase/include/btr0pcur.ic @@ -1,6 +1,7 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2015, 2016, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,7 +27,7 @@ Created 2/23/1996 Heikki Tuuri /*********************************************************//** Gets the rel_pos field for a cursor whose position has been stored. -@return BTR_PCUR_ON, ... */ +@return BTR_PCUR_ON, ... */ UNIV_INLINE ulint btr_pcur_get_rel_pos( @@ -35,7 +36,7 @@ btr_pcur_get_rel_pos( { ut_ad(cursor); ut_ad(cursor->old_rec); - ut_ad(cursor->old_stored == BTR_PCUR_OLD_STORED); + ut_ad(cursor->old_stored); ut_ad(cursor->pos_state == BTR_PCUR_WAS_POSITIONED || cursor->pos_state == BTR_PCUR_IS_POSITIONED); @@ -45,7 +46,7 @@ btr_pcur_get_rel_pos( #ifdef UNIV_DEBUG /*********************************************************//** Returns the btr cursor component of a persistent cursor. -@return pointer to btr cursor component */ +@return pointer to btr cursor component */ UNIV_INLINE btr_cur_t* btr_pcur_get_btr_cur( @@ -58,7 +59,7 @@ btr_pcur_get_btr_cur( /*********************************************************//** Returns the page cursor component of a persistent cursor. -@return pointer to page cursor component */ +@return pointer to page cursor component */ UNIV_INLINE page_cur_t* btr_pcur_get_page_cur( @@ -70,7 +71,7 @@ btr_pcur_get_page_cur( /*********************************************************//** Returns the page of a persistent cursor. -@return pointer to the page */ +@return pointer to the page */ UNIV_INLINE page_t* btr_pcur_get_page( @@ -84,7 +85,7 @@ btr_pcur_get_page( /*********************************************************//** Returns the buffer block of a persistent cursor. -@return pointer to the block */ +@return pointer to the block */ UNIV_INLINE buf_block_t* btr_pcur_get_block( @@ -98,7 +99,7 @@ btr_pcur_get_block( /*********************************************************//** Returns the record of a persistent cursor. -@return pointer to the record */ +@return pointer to the record */ UNIV_INLINE rec_t* btr_pcur_get_rec( @@ -260,7 +261,7 @@ btr_pcur_move_to_next_on_page( page_cur_move_to_next(btr_pcur_get_page_cur(cursor)); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; } /*********************************************************//** @@ -276,7 +277,7 @@ btr_pcur_move_to_prev_on_page( page_cur_move_to_prev(btr_pcur_get_page_cur(cursor)); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; } /*********************************************************//** @@ -294,13 +295,13 @@ btr_pcur_move_to_last_on_page( page_cur_set_after_last(btr_pcur_get_block(cursor), btr_pcur_get_page_cur(cursor)); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; } /*********************************************************//** Moves the persistent cursor to the next user record in the tree. If no user records are left, the cursor ends up 'after last in tree'. -@return TRUE if the cursor moved forward, ending on a user record */ +@return TRUE if the cursor moved forward, ending on a user record */ UNIV_INLINE ibool btr_pcur_move_to_next_user_rec( @@ -311,7 +312,7 @@ btr_pcur_move_to_next_user_rec( { ut_ad(cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(cursor->latch_mode != BTR_NO_LATCHES); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; loop: if (btr_pcur_is_after_last_on_page(cursor)) { @@ -336,7 +337,7 @@ loop: /*********************************************************//** Moves the persistent cursor to the next record in the tree. If no records are left, the cursor stays 'after last in tree'. -@return TRUE if the cursor was not after last in tree */ +@return TRUE if the cursor was not after last in tree */ UNIV_INLINE ibool btr_pcur_move_to_next( @@ -348,7 +349,7 @@ btr_pcur_move_to_next( ut_ad(cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(cursor->latch_mode != BTR_NO_LATCHES); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; if (btr_pcur_is_after_last_on_page(cursor)) { @@ -396,9 +397,21 @@ btr_pcur_init( /*==========*/ btr_pcur_t* pcur) /*!< in: persistent cursor */ { - pcur->old_stored = BTR_PCUR_OLD_NOT_STORED; + pcur->old_stored = false; pcur->old_rec_buf = NULL; pcur->old_rec = NULL; + + pcur->btr_cur.rtr_info = NULL; +} + +/** Free old_rec_buf. +@param[in] pcur Persistent cursor holding old_rec to be freed. */ +UNIV_INLINE +void +btr_pcur_free( + btr_pcur_t* pcur) +{ + ut_free(pcur->old_rec_buf); } /**************************************************************//** @@ -411,7 +424,7 @@ btr_pcur_open_low( dict_index_t* index, /*!< in: index */ ulint level, /*!< in: level in the btree */ const dtuple_t* tuple, /*!< in: tuple on which search done */ - ulint mode, /*!< in: PAGE_CUR_L, ...; + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ...; NOTE that if the search is made using a unique prefix of a record, mode should be PAGE_CUR_LE, not PAGE_CUR_GE, as the latter @@ -424,6 +437,7 @@ btr_pcur_open_low( mtr_t* mtr) /*!< in: mtr */ { btr_cur_t* btr_cursor; + dberr_t err = DB_SUCCESS; /* Initialize the cursor */ @@ -436,8 +450,33 @@ btr_pcur_open_low( btr_cursor = btr_pcur_get_btr_cur(cursor); - btr_cur_search_to_nth_level(index, level, tuple, mode, latch_mode, - btr_cursor, 0, file, line, mtr); + ut_ad(!dict_index_is_spatial(index)); + + if (dict_table_is_intrinsic(index->table)) { + ut_ad((latch_mode & BTR_MODIFY_LEAF) + || (latch_mode & BTR_SEARCH_LEAF) + || (latch_mode & BTR_MODIFY_TREE)); + err = btr_cur_search_to_nth_level_with_no_latch( + index, level, tuple, mode, btr_cursor, + file, line, mtr, + (((latch_mode & BTR_MODIFY_LEAF) + || (latch_mode & BTR_MODIFY_TREE)) ? true : false)); + } else { + err = btr_cur_search_to_nth_level( + index, level, tuple, mode, latch_mode, + btr_cursor, 0, file, line, mtr); + } + + if (err != DB_SUCCESS) { + ib::warn() << " Error code: " << err + << " btr_pcur_open_low " + << " level: " << level + << " called from file: " + << file << " line: " << line + << " table: " << index->table->name + << " index: " << index->name; + } + cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->trx_if_known = NULL; @@ -452,7 +491,7 @@ btr_pcur_open_with_no_init_func( /*============================*/ dict_index_t* index, /*!< in: index */ const dtuple_t* tuple, /*!< in: tuple on which search done */ - ulint mode, /*!< in: PAGE_CUR_L, ...; + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ...; NOTE that if the search is made using a unique prefix of a record, mode should be PAGE_CUR_LE, not PAGE_CUR_GE, as the latter @@ -464,8 +503,9 @@ btr_pcur_open_with_no_init_func( page, but assume that the caller uses his btr search latch to protect the record! */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ - ulint has_search_latch,/*!< in: latch mode the caller - currently has on btr_search_latch: + ulint has_search_latch, + /*!< in: latch mode the caller + currently has on search system: RW_S_LATCH, or 0 */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ @@ -474,19 +514,29 @@ btr_pcur_open_with_no_init_func( btr_cur_t* btr_cursor; dberr_t err = DB_SUCCESS; - cursor->latch_mode = latch_mode; + cursor->latch_mode = BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode); cursor->search_mode = mode; /* Search with the tree cursor */ btr_cursor = btr_pcur_get_btr_cur(cursor); - err = btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode, - btr_cursor, has_search_latch, - file, line, mtr); + if (dict_table_is_intrinsic(index->table)) { + ut_ad((latch_mode & BTR_MODIFY_LEAF) + || (latch_mode & BTR_SEARCH_LEAF)); + err = btr_cur_search_to_nth_level_with_no_latch( + index, 0, tuple, mode, btr_cursor, + file, line, mtr, + ((latch_mode & BTR_MODIFY_LEAF) ? true : false)); + } else { + err = btr_cur_search_to_nth_level( + index, 0, tuple, mode, latch_mode, btr_cursor, + has_search_latch, file, line, mtr); + } + cursor->pos_state = BTR_PCUR_IS_POSITIONED; - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; cursor->trx_if_known = NULL; return err; @@ -518,11 +568,18 @@ btr_pcur_open_at_index_side( btr_pcur_init(pcur); } - err = btr_cur_open_at_index_side(from_left, index, latch_mode, - btr_pcur_get_btr_cur(pcur), level, mtr); + if (dict_table_is_intrinsic(index->table)) { + err = btr_cur_open_at_index_side_with_no_latch( + from_left, index, + btr_pcur_get_btr_cur(pcur), level, mtr); + } else { + err = btr_cur_open_at_index_side( + from_left, index, latch_mode, + btr_pcur_get_btr_cur(pcur), level, mtr); + } pcur->pos_state = BTR_PCUR_IS_POSITIONED; - pcur->old_stored = BTR_PCUR_OLD_NOT_STORED; + pcur->old_stored = false; pcur->trx_if_known = NULL; @@ -530,9 +587,11 @@ btr_pcur_open_at_index_side( } /**********************************************************************//** -Positions a cursor at a randomly chosen position within a B-tree. */ +Positions a cursor at a randomly chosen position within a B-tree. +@return true if the index is available and we have put the cursor, false +if the index is unavailable */ UNIV_INLINE -void +bool btr_pcur_open_at_rnd_pos_func( /*==========================*/ dict_index_t* index, /*!< in: index */ @@ -549,13 +608,17 @@ btr_pcur_open_at_rnd_pos_func( btr_pcur_init(cursor); - btr_cur_open_at_rnd_pos_func(index, latch_mode, - btr_pcur_get_btr_cur(cursor), - file, line, mtr); + bool available; + + available = btr_cur_open_at_rnd_pos_func(index, latch_mode, + btr_pcur_get_btr_cur(cursor), + file, line, mtr); cursor->pos_state = BTR_PCUR_IS_POSITIONED; - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; cursor->trx_if_known = NULL; + + return(available); } /**************************************************************//** @@ -576,18 +639,20 @@ btr_pcur_close( /*===========*/ btr_pcur_t* cursor) /*!< in: persistent cursor */ { - if (cursor->old_rec_buf != NULL) { - - mem_free(cursor->old_rec_buf); + ut_free(cursor->old_rec_buf); - cursor->old_rec = NULL; - cursor->old_rec_buf = NULL; + if (cursor->btr_cur.rtr_info) { + rtr_clean_rtr_info(cursor->btr_cur.rtr_info, true); + cursor->btr_cur.rtr_info = NULL; } + cursor->old_rec = NULL; + cursor->old_rec_buf = NULL; cursor->btr_cur.page_cur.rec = NULL; cursor->btr_cur.page_cur.block = NULL; + cursor->old_rec = NULL; - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; cursor->latch_mode = BTR_NO_LATCHES; cursor->pos_state = BTR_PCUR_NOT_POSITIONED; @@ -608,5 +673,5 @@ btr_pcur_move_before_first_on_page( page_cur_set_before_first(btr_pcur_get_block(cursor), btr_pcur_get_page_cur(cursor)); - cursor->old_stored = BTR_PCUR_OLD_NOT_STORED; + cursor->old_stored = false; } diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h index e25e2a4d49c..659944f327e 100644 --- a/storage/innobase/include/btr0sea.h +++ b/storage/innobase/include/btr0sea.h @@ -34,59 +34,54 @@ Created 2/17/1996 Heikki Tuuri #include "mtr0mtr.h" #include "ha0ha.h" -/*****************************************************************//** -Creates and initializes the adaptive search system at a database start. */ -UNIV_INTERN +/** Creates and initializes the adaptive search system at a database start. +@param[in] hash_size hash table size. */ void -btr_search_sys_create( -/*==================*/ - ulint hash_size); /*!< in: hash index hash table size */ -/*****************************************************************//** -Frees the adaptive search system at a database shutdown. */ -UNIV_INTERN +btr_search_sys_create(ulint hash_size); + +/** Resize hash index hash table. +@param[in] hash_size hash index hash table size */ void -btr_search_sys_free(void); -/*=====================*/ +btr_search_sys_resize(ulint hash_size); -/********************************************************************//** -Disable the adaptive hash search system and empty the index. */ -UNIV_INTERN +/** Frees the adaptive search system at a database shutdown. */ void -btr_search_disable(void); -/*====================*/ -/********************************************************************//** -Enable the adaptive hash search system. */ -UNIV_INTERN +btr_search_sys_free(); + +/** Disable the adaptive hash search system and empty the index. +@param need_mutex need to acquire dict_sys->mutex */ void -btr_search_enable(void); -/*====================*/ +btr_search_disable( + bool need_mutex); +/** Enable the adaptive hash search system. */ +void +btr_search_enable(); /********************************************************************//** Returns search info for an index. -@return search info; search mutex reserved */ +@return search info; search mutex reserved */ UNIV_INLINE btr_search_t* btr_search_get_info( /*================*/ dict_index_t* index) /*!< in: index */ MY_ATTRIBUTE((nonnull)); -/*****************************************************************//** -Creates and initializes a search info struct. -@return own: search info struct */ -UNIV_INTERN + +/** Creates and initializes a search info struct. +@param[in] heap heap where created. +@return own: search info struct */ btr_search_t* -btr_search_info_create( -/*===================*/ - mem_heap_t* heap); /*!< in: heap where created */ -/*****************************************************************//** -Returns the value of ref_count. The value is protected by -btr_search_latch. -@return ref_count value. */ -UNIV_INTERN +btr_search_info_create(mem_heap_t* heap); + +/** Returns the value of ref_count. The value is protected by latch. +@param[in] info search info +@param[in] index index identifier +@return ref_count value. */ ulint btr_search_info_get_ref_count( -/*==========================*/ - btr_search_t* info); /*!< in: search info. */ + btr_search_t* info, + dict_index_t* index); + /*********************************************************************//** Updates the search info. */ UNIV_INLINE @@ -95,108 +90,180 @@ btr_search_info_update( /*===================*/ dict_index_t* index, /*!< in: index of the cursor */ btr_cur_t* cursor);/*!< in: cursor which was just positioned */ -/******************************************************************//** -Tries to guess the right search position based on the hash search info + +/** Tries to guess the right search position based on the hash search info of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts, and the function returns TRUE, then cursor->up_match and cursor->low_match both have sensible values. -@return TRUE if succeeded */ -UNIV_INTERN +@param[in,out] index index +@param[in,out] info index search info +@param[in] tuple logical record +@param[in] mode PAGE_CUR_L, .... +@param[in] latch_mode BTR_SEARCH_LEAF, ...; + NOTE that only if has_search_latch is 0, we will + have a latch set on the cursor page, otherwise + we assume the caller uses his search latch + to protect the record! +@param[out] cursor tree cursor +@param[in] has_search_latch + latch mode the caller currently has on + search system: RW_S/X_LATCH or 0 +@param[in] mtr mini transaction +@return TRUE if succeeded */ ibool btr_search_guess_on_hash( -/*=====================*/ - dict_index_t* index, /*!< in: index */ - btr_search_t* info, /*!< in: index search info */ - const dtuple_t* tuple, /*!< in: logical record */ - ulint mode, /*!< in: PAGE_CUR_L, ... */ - ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ... */ - btr_cur_t* cursor, /*!< out: tree cursor */ - ulint has_search_latch,/*!< in: latch mode the caller - currently has on btr_search_latch: - RW_S_LATCH, RW_X_LATCH, or 0 */ - mtr_t* mtr); /*!< in: mtr */ -/********************************************************************//** -Moves or deletes hash entries for moved records. If new_page is already hashed, -then the hash index for page, if any, is dropped. If new_page is not hashed, -and page is hashed, then a new hash index is built to new_page with the same -parameters as page (this often happens when a page is split). */ -UNIV_INTERN + dict_index_t* index, + btr_search_t* info, + const dtuple_t* tuple, + ulint mode, + ulint latch_mode, + btr_cur_t* cursor, + ulint has_search_latch, + mtr_t* mtr); + +/** Moves or deletes hash entries for moved records. If new_page is already +hashed, then the hash index for page, if any, is dropped. If new_page is not +hashed, and page is hashed, then a new hash index is built to new_page with the +same parameters as page (this often happens when a page is split). +@param[in,out] new_block records are copied to this page. +@param[in,out] block index page from which record are copied, and the + copied records will be deleted from this page. +@param[in,out] index record descriptor */ void btr_search_move_or_delete_hash_entries( -/*===================================*/ - buf_block_t* new_block, /*!< in: records are copied - to this page */ - buf_block_t* block, /*!< in: index page from which - records were copied, and the - copied records will be deleted - from this page */ - dict_index_t* index); /*!< in: record descriptor */ -/********************************************************************//** -Drops a page hash index. */ -UNIV_INTERN + buf_block_t* new_block, + buf_block_t* block, + dict_index_t* index); + +/** Drop any adaptive hash index entries that point to an index page. +@param[in,out] block block containing index page, s- or x-latched, or an + index page for which we know that + block->buf_fix_count == 0 or it is an index page which + has already been removed from the buf_pool->page_hash + i.e.: it is in state BUF_BLOCK_REMOVE_HASH */ void -btr_search_drop_page_hash_index( -/*============================*/ - buf_block_t* block); /*!< in: block containing index page, - s- or x-latched, or an index page - for which we know that - block->buf_fix_count == 0 */ -/********************************************************************//** -Drops a possible page hash index when a page is evicted from the buffer pool -or freed in a file segment. */ -UNIV_INTERN +btr_search_drop_page_hash_index(buf_block_t* block); + +/** Drop any adaptive hash index entries that may point to an index +page that may be in the buffer pool, when a page is evicted from the +buffer pool or freed in a file segment. +@param[in] page_id page id +@param[in] page_size page size */ void btr_search_drop_page_hash_when_freed( -/*=================================*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint page_no); /*!< in: page number */ -/********************************************************************//** -Updates the page hash index when a single record is inserted on a page. */ -UNIV_INTERN + const page_id_t& page_id, + const page_size_t& page_size); + +/** Updates the page hash index when a single record is inserted on a page. +@param[in] cursor cursor which was positioned to the place to insert + using btr_cur_search_, and the new record has been + inserted next to the cursor. */ void -btr_search_update_hash_node_on_insert( -/*==================================*/ - btr_cur_t* cursor);/*!< in: cursor which was positioned to the +btr_search_update_hash_node_on_insert(btr_cur_t* cursor); + +/** Updates the page hash index when a single record is inserted on a page. +@param[in] cursor cursor which was positioned to the place to insert using btr_cur_search_..., and the new record has been inserted next to the cursor */ -/********************************************************************//** -Updates the page hash index when a single record is inserted on a page. */ -UNIV_INTERN void -btr_search_update_hash_on_insert( -/*=============================*/ - btr_cur_t* cursor);/*!< in: cursor which was positioned to the - place to insert using btr_cur_search_..., - and the new record has been inserted next - to the cursor */ -/********************************************************************//** -Updates the page hash index when a single record is deleted from a page. */ -UNIV_INTERN +btr_search_update_hash_on_insert(btr_cur_t* cursor); + +/** Updates the page hash index when a single record is deleted from a page. +@param[in] cursor cursor which was positioned on the record to delete + using btr_cur_search_, the record is not yet deleted.*/ void -btr_search_update_hash_on_delete( -/*=============================*/ - btr_cur_t* cursor);/*!< in: cursor which was positioned on the - record to delete using btr_cur_search_..., - the record is not yet deleted */ -#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG -/********************************************************************//** -Validates the search system. -@return TRUE if ok */ -UNIV_INTERN -ibool -btr_search_validate(void); -/*======================*/ -#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */ +btr_search_update_hash_on_delete(btr_cur_t* cursor); + +/** Validates the search system. +@return true if ok */ +bool +btr_search_validate(); + +/** X-Lock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_x_lock(const dict_index_t* index); + +/** X-Unlock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_x_unlock(const dict_index_t* index); + +/** Lock all search latches in exclusive mode. */ +UNIV_INLINE +void +btr_search_x_lock_all(); + +/** Unlock all search latches from exclusive mode. */ +UNIV_INLINE +void +btr_search_x_unlock_all(); + +/** S-Lock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_s_lock(const dict_index_t* index); + +/** S-Unlock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_s_unlock(const dict_index_t* index); + +/** Lock all search latches in shared mode. */ +UNIV_INLINE +void +btr_search_s_lock_all(); + +#ifdef UNIV_DEBUG +/** Check if thread owns all the search latches. +@param[in] mode lock mode check +@retval true if owns all of them +@retval false if does not own some of them */ +UNIV_INLINE +bool +btr_search_own_all(ulint mode); + +/** Check if thread owns any of the search latches. +@param[in] mode lock mode check +@retval true if owns any of them +@retval false if owns no search latch */ +UNIV_INLINE +bool +btr_search_own_any(ulint mode); +#endif /* UNIV_DEBUG */ + +/** Unlock all search latches from shared mode. */ +UNIV_INLINE +void +btr_search_s_unlock_all(); + +/** Get the latch based on index attributes. +A latch is selected from an array of latches using pair of index-id, space-id. +@param[in] index index handler +@return latch */ +UNIV_INLINE +rw_lock_t* +btr_get_search_latch(const dict_index_t* index); + +/** Get the hash-table based on index attributes. +A table is selected from an array of tables using pair of index-id, space-id. +@param[in] index index handler +@return hash table */ +UNIV_INLINE +hash_table_t* +btr_get_search_table(const dict_index_t* index); /** The search info struct in an index */ struct btr_search_t{ ulint ref_count; /*!< Number of blocks in this index tree that have search index built i.e. block->index points to this index. - Protected by btr_search_latch except + Protected by search latch except when during initialization in btr_search_info_create(). */ @@ -205,6 +272,8 @@ struct btr_search_t{ the machine word, i.e., they cannot be turned into bit-fields. */ buf_block_t* root_guess;/*!< the root page frame when it was last time fetched, or NULL */ + ulint withdraw_clock; /*!< the withdraw clock value of the buffer + pool when root_guess was stored */ ulint hash_analysis; /*!< when this exceeds BTR_SEARCH_HASH_ANALYSIS, the hash analysis starts; this is reset if no @@ -248,11 +317,14 @@ struct btr_search_t{ /** The hash index system */ struct btr_search_sys_t{ - hash_table_t* hash_index; /*!< the adaptive hash index, + hash_table_t** hash_tables; /*!< the adaptive hash tables, mapping dtuple_fold values to rec_t pointers on index pages */ }; +/** Latches protecting access to adaptive hash index. */ +extern rw_lock_t** btr_search_latches; + /** The adaptive hash index */ extern btr_search_sys_t* btr_search_sys; diff --git a/storage/innobase/include/btr0sea.ic b/storage/innobase/include/btr0sea.ic index 0bd869be136..4fd76810ea0 100644 --- a/storage/innobase/include/btr0sea.ic +++ b/storage/innobase/include/btr0sea.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,7 +29,6 @@ Created 2/17/1996 Heikki Tuuri /*********************************************************************//** Updates the search info. */ -UNIV_INTERN void btr_search_info_update_slow( /*========================*/ @@ -38,7 +37,7 @@ btr_search_info_update_slow( /********************************************************************//** Returns search info for an index. -@return search info; search mutex reserved */ +@return search info; search mutex reserved */ UNIV_INLINE btr_search_t* btr_search_get_info( @@ -57,13 +56,14 @@ btr_search_info_update( dict_index_t* index, /*!< in: index of the cursor */ btr_cur_t* cursor) /*!< in: cursor which was just positioned */ { - btr_search_t* info; + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_S)); + ut_ad(!rw_lock_own(btr_get_search_latch(index), RW_LOCK_X)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); - ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + if (dict_index_is_spatial(index) || !btr_search_enabled) { + return; + } + btr_search_t* info; info = btr_search_get_info(index); info->hash_analysis++; @@ -80,3 +80,142 @@ btr_search_info_update( btr_search_info_update_slow(info, cursor); } + +/** X-Lock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_x_lock(const dict_index_t* index) +{ + rw_lock_x_lock(btr_get_search_latch(index)); +} + +/** X-Unlock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_x_unlock(const dict_index_t* index) +{ + rw_lock_x_unlock(btr_get_search_latch(index)); +} + +/** Lock all search latches in exclusive mode. */ +UNIV_INLINE +void +btr_search_x_lock_all() +{ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + rw_lock_x_lock(btr_search_latches[i]); + } +} + +/** Unlock all search latches from exclusive mode. */ +UNIV_INLINE +void +btr_search_x_unlock_all() +{ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + rw_lock_x_unlock(btr_search_latches[i]); + } +} + +/** S-Lock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_s_lock(const dict_index_t* index) +{ + rw_lock_s_lock(btr_get_search_latch(index)); +} + +/** S-Unlock the search latch (corresponding to given index) +@param[in] index index handler */ +UNIV_INLINE +void +btr_search_s_unlock(const dict_index_t* index) +{ + rw_lock_s_unlock(btr_get_search_latch(index)); +} + +/** Lock all search latches in shared mode. */ +UNIV_INLINE +void +btr_search_s_lock_all() +{ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + rw_lock_s_lock(btr_search_latches[i]); + } +} + +/** Unlock all search latches from shared mode. */ +UNIV_INLINE +void +btr_search_s_unlock_all() +{ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + rw_lock_s_unlock(btr_search_latches[i]); + } +} + +#ifdef UNIV_DEBUG +/** Check if thread owns all the search latches. +@param[in] mode lock mode check +@retval true if owns all of them +@retval false if does not own some of them */ +UNIV_INLINE +bool +btr_search_own_all(ulint mode) +{ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + if (!rw_lock_own(btr_search_latches[i], mode)) { + return(false); + } + } + return(true); +} + +/** Check if thread owns any of the search latches. +@param[in] mode lock mode check +@retval true if owns any of them +@retval false if owns no search latch */ +UNIV_INLINE +bool +btr_search_own_any(ulint mode) +{ + for (ulint i = 0; i < btr_ahi_parts; ++i) { + if (rw_lock_own(btr_search_latches[i], mode)) { + return(true); + } + } + return(false); +} +#endif /* UNIV_DEBUG */ + +/** Get the adaptive hash search index latch for a b-tree. +@param[in] index b-tree index +@return latch */ +UNIV_INLINE +rw_lock_t* +btr_get_search_latch(const dict_index_t* index) +{ + ut_ad(index != NULL); + + ulint ifold = ut_fold_ulint_pair(index->id, index->space); + + return(btr_search_latches[ifold % btr_ahi_parts]); +} + +/** Get the hash-table based on index attributes. +A table is selected from an array of tables using pair of index-id, space-id. +@param[in] index index handler +@return hash table */ +UNIV_INLINE +hash_table_t* +btr_get_search_table(const dict_index_t* index) +{ + ut_ad(index != NULL); + + ulint ifold = ut_fold_ulint_pair(index->id, index->space); + + return(btr_search_sys->hash_tables[ifold % btr_ahi_parts]); +} diff --git a/storage/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h index 04b69d8145c..734b33e4221 100644 --- a/storage/innobase/include/btr0types.h +++ b/storage/innobase/include/btr0types.h @@ -31,6 +31,7 @@ Created 2/17/1996 Heikki Tuuri #include "rem0types.h" #include "page0types.h" #include "sync0rw.h" +#include "page0size.h" /** Persistent cursor */ struct btr_pcur_t; @@ -39,165 +40,51 @@ struct btr_cur_t; /** B-tree search information for the adaptive hash index */ struct btr_search_t; -#ifndef UNIV_HOTBACKUP - -/** @brief The latch protecting the adaptive search system - -This latch protects the -(1) hash index; -(2) columns of a record to which we have a pointer in the hash index; - -but does NOT protect: - -(3) next record offset field in a record; -(4) next or previous records on the same page. - -Bear in mind (3) and (4) when using the hash index. -*/ -extern rw_lock_t* btr_search_latch_temp; - -#endif /* UNIV_HOTBACKUP */ - -/** The latch protecting the adaptive search system */ -#define btr_search_latch (*btr_search_latch_temp) - -/** Flag: has the search system been enabled? -Protected by btr_search_latch. */ +/** Is search system enabled. +Search system is protected by array of latches. */ extern char btr_search_enabled; -#ifdef UNIV_BLOB_DEBUG -# include "buf0types.h" -/** An index->blobs entry for keeping track of off-page column references */ -struct btr_blob_dbg_t; - -/** Insert to index->blobs a reference to an off-page column. -@param index the index tree -@param b the reference -@param ctx context (for logging) */ -UNIV_INTERN -void -btr_blob_dbg_rbt_insert( -/*====================*/ - dict_index_t* index, /*!< in/out: index tree */ - const btr_blob_dbg_t* b, /*!< in: the reference */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); - -/** Remove from index->blobs a reference to an off-page column. -@param index the index tree -@param b the reference -@param ctx context (for logging) */ -UNIV_INTERN -void -btr_blob_dbg_rbt_delete( -/*====================*/ - dict_index_t* index, /*!< in/out: index tree */ - const btr_blob_dbg_t* b, /*!< in: the reference */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); - -/**************************************************************//** -Add to index->blobs any references to off-page columns from a record. -@return number of references added */ -UNIV_INTERN -ulint -btr_blob_dbg_add_rec( -/*=================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: offsets */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); -/**************************************************************//** -Remove from index->blobs any references to off-page columns from a record. -@return number of references removed */ -UNIV_INTERN -ulint -btr_blob_dbg_remove_rec( -/*====================*/ - const rec_t* rec, /*!< in: record */ - dict_index_t* index, /*!< in/out: index */ - const ulint* offsets,/*!< in: offsets */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); -/**************************************************************//** -Count and add to index->blobs any references to off-page columns -from records on a page. -@return number of references added */ -UNIV_INTERN -ulint -btr_blob_dbg_add( -/*=============*/ - const page_t* page, /*!< in: rewritten page */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); -/**************************************************************//** -Count and remove from index->blobs any references to off-page columns -from records on a page. -Used when reorganizing a page, before copying the records. -@return number of references removed */ -UNIV_INTERN -ulint -btr_blob_dbg_remove( -/*================*/ - const page_t* page, /*!< in: b-tree page */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); -/**************************************************************//** -Restore in index->blobs any references to off-page columns -Used when page reorganize fails due to compressed page overflow. */ -UNIV_INTERN -void -btr_blob_dbg_restore( -/*=================*/ - const page_t* npage, /*!< in: page that failed to compress */ - const page_t* page, /*!< in: copy of original page */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx) /*!< in: context (for logging) */ - MY_ATTRIBUTE((nonnull)); - -/** Operation that processes the BLOB references of an index record -@param[in] rec record on index page -@param[in/out] index the index tree of the record -@param[in] offsets rec_get_offsets(rec,index) -@param[in] ctx context (for logging) -@return number of BLOB references processed */ -typedef ulint (*btr_blob_dbg_op_f) -(const rec_t* rec,dict_index_t* index,const ulint* offsets,const char* ctx); - -/**************************************************************//** -Count and process all references to off-page columns on a page. -@return number of references processed */ -UNIV_INTERN -ulint -btr_blob_dbg_op( -/*============*/ - const page_t* page, /*!< in: B-tree leaf page */ - const rec_t* rec, /*!< in: record to start from - (NULL to process the whole page) */ - dict_index_t* index, /*!< in/out: index */ - const char* ctx, /*!< in: context (for logging) */ - const btr_blob_dbg_op_f op) /*!< in: operation on records */ - MY_ATTRIBUTE((nonnull(1,3,4,5))); -#else /* UNIV_BLOB_DEBUG */ -# define btr_blob_dbg_add_rec(rec, index, offsets, ctx) ((void) 0) -# define btr_blob_dbg_add(page, index, ctx) ((void) 0) -# define btr_blob_dbg_remove_rec(rec, index, offsets, ctx) ((void) 0) -# define btr_blob_dbg_remove(page, index, ctx) ((void) 0) -# define btr_blob_dbg_restore(npage, page, index, ctx) ((void) 0) -# define btr_blob_dbg_op(page, rec, index, ctx, op) ((void) 0) -#endif /* UNIV_BLOB_DEBUG */ +/** Number of adaptive hash index partition. */ +extern ulong btr_ahi_parts; /** The size of a reference to data stored on a different page. The reference is stored at the end of the prefix of the field in the index record. */ -#define BTR_EXTERN_FIELD_REF_SIZE 20 - -/** A BLOB field reference full of zero, for use in assertions and tests. -Initially, BLOB field references are set to zero, in -dtuple_convert_big_rec(). */ -extern const byte field_ref_zero[BTR_EXTERN_FIELD_REF_SIZE]; +#define BTR_EXTERN_FIELD_REF_SIZE FIELD_REF_SIZE + +/** If the data don't exceed the size, the data are stored locally. */ +#define BTR_EXTERN_LOCAL_STORED_MAX_SIZE \ + (BTR_EXTERN_FIELD_REF_SIZE * 2) + +/** The information is used for creating a new index tree when +applying TRUNCATE log record during recovery */ +struct btr_create_t { + + explicit btr_create_t(const byte* const ptr) + : + format_flags(), + n_fields(), + field_len(), + fields(ptr), + trx_id_pos(ULINT_UNDEFINED) + { + /* Do nothing */ + } + + /** Page format */ + ulint format_flags; + + /** Numbr of index fields */ + ulint n_fields; + + /** The length of the encoded meta-data */ + ulint field_len; + + /** Field meta-data, encoded. */ + const byte* const fields; + + /** Position of trx-id column. */ + ulint trx_id_pos; +}; #endif diff --git a/storage/innobase/include/buf0buddy.h b/storage/innobase/include/buf0buddy.h index 7fc4408505d..c2c100e83e6 100644 --- a/storage/innobase/include/buf0buddy.h +++ b/storage/innobase/include/buf0buddy.h @@ -39,7 +39,7 @@ Allocate a block. The thread calling this function must hold buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex. The buf_pool->mutex may be released and reacquired. This function should only be used for allocating compressed page frames. -@return allocated block, never NULL */ +@return allocated block, never NULL */ UNIV_INLINE byte* buf_buddy_alloc( @@ -70,6 +70,24 @@ buf_buddy_free( up to UNIV_PAGE_SIZE */ MY_ATTRIBUTE((nonnull)); +/** Reallocate a block. +@param[in] buf_pool buffer pool instance +@param[in] buf block to be reallocated, must be pointed +to by the buffer pool +@param[in] size block size, up to UNIV_PAGE_SIZE +@retval false if failed because of no free blocks. */ +bool +buf_buddy_realloc( + buf_pool_t* buf_pool, + void* buf, + ulint size); + +/** Combine all pairs of free buddies. +@param[in] buf_pool buffer pool instance */ +void +buf_buddy_condense_free( + buf_pool_t* buf_pool); + #ifndef UNIV_NONINL # include "buf0buddy.ic" #endif diff --git a/storage/innobase/include/buf0buddy.ic b/storage/innobase/include/buf0buddy.ic index 4352ebe8945..2b6d76df009 100644 --- a/storage/innobase/include/buf0buddy.ic +++ b/storage/innobase/include/buf0buddy.ic @@ -30,15 +30,12 @@ Created December 2006 by Marko Makela #include "buf0buf.h" #include "buf0buddy.h" -#include "ut0ut.h" -#include "sync0sync.h" /**********************************************************************//** Allocate a block. The thread calling this function must hold buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex. The buf_pool_mutex may be released and reacquired. -@return allocated block, never NULL */ -UNIV_INTERN +@return allocated block, never NULL */ void* buf_buddy_alloc_low( /*================*/ @@ -54,7 +51,6 @@ buf_buddy_alloc_low( /**********************************************************************//** Deallocate a block. */ -UNIV_INTERN void buf_buddy_free_low( /*===============*/ @@ -67,7 +63,7 @@ buf_buddy_free_low( /**********************************************************************//** Get the index of buf_pool->zip_free[] for a given block size. -@return index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ +@return index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ UNIV_INLINE ulint buf_buddy_get_slot( @@ -91,7 +87,7 @@ Allocate a block. The thread calling this function must hold buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex. The buf_pool->mutex may be released and reacquired. This function should only be used for allocating compressed page frames. -@return allocated block, never NULL */ +@return allocated block, never NULL */ UNIV_INLINE byte* buf_buddy_alloc( diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index c737f3a6f1d..6e147ce95c5 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -31,6 +31,7 @@ Created 11/5/1995 Heikki Tuuri #include "fil0fil.h" #include "mtr0types.h" #include "buf0types.h" +#ifndef UNIV_INNOCHECKSUM #include "hash0hash.h" #include "ut0byte.h" #include "page0types.h" @@ -38,6 +39,11 @@ Created 11/5/1995 Heikki Tuuri #include "ut0rbt.h" #include "os0proc.h" #include "log0log.h" +#include "srv0srv.h" +#include + +// Forward declaration +struct fil_addr_t; /** @name Modes for buf_page_get_gen */ /* @{ */ @@ -85,18 +91,25 @@ Created 11/5/1995 Heikki Tuuri extern buf_pool_t* buf_pool_ptr; /*!< The buffer pools of the database */ + +extern volatile bool buf_pool_withdrawing; /*!< true when withdrawing buffer + pool pages might cause page relocation */ + +extern volatile ulint buf_withdraw_clock; /*!< the clock is incremented + every time a pointer to a page may + become obsolete */ + #ifdef UNIV_DEBUG -extern ibool buf_debug_prints;/*!< If this is set TRUE, the program - prints info whenever read or flush - occurs */ +extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing + buffer pool is not allowed. */ #endif /* UNIV_DEBUG */ -extern ulint srv_buf_pool_instances; -extern ulint srv_buf_pool_curr_size; #else /* !UNIV_HOTBACKUP */ extern buf_block_t* back_block1; /*!< first block, for --apply-log */ extern buf_block_t* back_block2; /*!< second block, for page reorganize */ #endif /* !UNIV_HOTBACKUP */ +#endif /* !UNIV_INNOCHECKSUM */ +#ifndef UNIV_INNOCHECKSUM /** @brief States of a control block @see buf_page_t @@ -204,6 +217,127 @@ struct buf_pools_list_size_t { ulint flush_list_bytes; /*!< flush_list size in bytes */ }; +/** Page identifier. */ +class page_id_t { +public: + + /** Constructor from (space, page_no). + @param[in] space tablespace id + @param[in] page_no page number */ + page_id_t(ulint space, ulint page_no) + : + m_space(static_cast(space)), + m_page_no(static_cast(page_no)), + m_fold(ULINT_UNDEFINED) + { + ut_ad(space <= 0xFFFFFFFFU); + ut_ad(page_no <= 0xFFFFFFFFU); + } + + /** Retrieve the tablespace id. + @return tablespace id */ + inline ib_uint32_t space() const + { + return(m_space); + } + + /** Retrieve the page number. + @return page number */ + inline ib_uint32_t page_no() const + { + return(m_page_no); + } + + /** Retrieve the fold value. + @return fold value */ + inline ulint fold() const + { + /* Initialize m_fold if it has not been initialized yet. */ + if (m_fold == ULINT_UNDEFINED) { + m_fold = (m_space << 20) + m_space + m_page_no; + ut_ad(m_fold != ULINT_UNDEFINED); + } + + return(m_fold); + } + + /** Copy the values from a given page_id_t object. + @param[in] src page id object whose values to fetch */ + inline void copy_from(const page_id_t& src) + { + m_space = src.space(); + m_page_no = src.page_no(); + m_fold = src.fold(); + } + + /** Reset the values from a (space, page_no). + @param[in] space tablespace id + @param[in] page_no page number */ + inline void reset(ulint space, ulint page_no) + { + m_space = static_cast(space); + m_page_no = static_cast(page_no); + m_fold = ULINT_UNDEFINED; + + ut_ad(space <= 0xFFFFFFFFU); + ut_ad(page_no <= 0xFFFFFFFFU); + } + + /** Reset the page number only. + @param[in] page_no page number */ + inline void set_page_no(ulint page_no) + { + m_page_no = static_cast(page_no); + m_fold = ULINT_UNDEFINED; + + ut_ad(page_no <= 0xFFFFFFFFU); + } + + /** Check if a given page_id_t object is equal to the current one. + @param[in] a page_id_t object to compare + @return true if equal */ + inline bool equals_to(const page_id_t& a) const + { + return(a.space() == m_space && a.page_no() == m_page_no); + } + +private: + + /** Tablespace id. */ + ib_uint32_t m_space; + + /** Page number. */ + ib_uint32_t m_page_no; + + /** A fold value derived from m_space and m_page_no, + used in hashing. */ + mutable ulint m_fold; + + /* Disable implicit copying. */ + void operator=(const page_id_t&); + + /** Declare the overloaded global operator<< as a friend of this + class. Refer to the global declaration for further details. Print + the given page_id_t object. + @param[in,out] out the output stream + @param[in] page_id the page_id_t object to be printed + @return the output stream */ + friend + std::ostream& + operator<<( + std::ostream& out, + const page_id_t& page_id); +}; + +/** Print the given page_id_t object. +@param[in,out] out the output stream +@param[in] page_id the page_id_t object to be printed +@return the output stream */ +std::ostream& +operator<<( + std::ostream& out, + const page_id_t& page_id); + #ifndef UNIV_HOTBACKUP /********************************************************************//** Acquire mutex on all buffer pool instances */ @@ -221,8 +355,7 @@ buf_pool_mutex_exit_all(void); /********************************************************************//** Creates the buffer pool. -@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */ -UNIV_INTERN +@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */ dberr_t buf_pool_init( /*=========*/ @@ -231,42 +364,62 @@ buf_pool_init( /********************************************************************//** Frees the buffer pool at shutdown. This must not be invoked before freeing all mutexes. */ -UNIV_INTERN void buf_pool_free( /*==========*/ ulint n_instances); /*!< in: numbere of instances to free */ +/** Determines if a block is intended to be withdrawn. +@param[in] buf_pool buffer pool instance +@param[in] block pointer to control block +@retval true if will be withdrawn */ +bool +buf_block_will_withdrawn( + buf_pool_t* buf_pool, + const buf_block_t* block); + +/** Determines if a frame is intended to be withdrawn. +@param[in] buf_pool buffer pool instance +@param[in] ptr pointer to a frame +@retval true if will be withdrawn */ +bool +buf_frame_will_withdrawn( + buf_pool_t* buf_pool, + const byte* ptr); + +/** Resize the buffer pool based on srv_buf_pool_size from +srv_buf_pool_old_size. */ +void +buf_pool_resize(); + +/** This is the thread for resizing buffer pool. It waits for an event and +when waked up either performs a resizing and sleeps again. +@param[in] arg a dummy parameter required by os_thread_create. +@return this function does not return, calls os_thread_exit() +*/ +extern "C" +os_thread_ret_t +DECLARE_THREAD(buf_resize_thread)( +/*==============================*/ + void* arg); /*!< in: a dummy parameter + required by os_thread_create */ + /********************************************************************//** Clears the adaptive hash index on all pages in the buffer pool. */ -UNIV_INTERN void buf_pool_clear_hash_index(void); /*===========================*/ -/********************************************************************//** -Relocate a buffer control block. Relocates the block on the LRU list -and in buf_pool->page_hash. Does not relocate bpage->list. -The caller must take care of relocating bpage->list. */ -UNIV_INTERN -void -buf_relocate( -/*=========*/ - buf_page_t* bpage, /*!< in/out: control block being relocated; - buf_page_get_state(bpage) must be - BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ - buf_page_t* dpage) /*!< in/out: destination control block */ - MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets the current size of buffer buf_pool in bytes. -@return size in bytes */ +@return size in bytes */ UNIV_INLINE ulint buf_pool_get_curr_size(void); /*========================*/ /*********************************************************************//** Gets the current size of buffer buf_pool in frames. -@return size in pages */ +@return size in pages */ UNIV_INLINE ulint buf_pool_get_n_pages(void); @@ -274,8 +427,7 @@ buf_pool_get_n_pages(void); /********************************************************************//** Gets the smallest oldest_modification lsn for any page in the pool. Returns zero if all modified pages have been flushed to disk. -@return oldest modification in pool, zero if none */ -UNIV_INTERN +@return oldest modification in pool, zero if none */ lsn_t buf_pool_get_oldest_modification(void); /*==================================*/ @@ -299,8 +451,7 @@ buf_page_free_descriptor( /********************************************************************//** Allocates a buffer block. -@return own: the allocated block, in state BUF_BLOCK_MEMORY */ -UNIV_INTERN +@return own: the allocated block, in state BUF_BLOCK_MEMORY */ buf_block_t* buf_block_alloc( /*============*/ @@ -317,7 +468,7 @@ buf_block_free( #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Copies contents of a buffer frame to a given buffer. -@return buf */ +@return buf */ UNIV_INLINE byte* buf_frame_copy( @@ -329,23 +480,21 @@ buf_frame_copy( NOTE! The following macros should be used instead of buf_page_get_gen, to improve debugging. Only values RW_S_LATCH and RW_X_LATCH are allowed in LA! */ -#define buf_page_get(SP, ZS, OF, LA, MTR) buf_page_get_gen(\ - SP, ZS, OF, LA, NULL,\ - BUF_GET, __FILE__, __LINE__, MTR) +#define buf_page_get(ID, SIZE, LA, MTR) \ + buf_page_get_gen(ID, SIZE, LA, NULL, BUF_GET, __FILE__, __LINE__, MTR, NULL) /**************************************************************//** Use these macros to bufferfix a page with no latching. Remember not to read the contents of the page unless you know it is safe. Do not modify the contents of the page! We have separated this case, because it is error-prone programming not to set a latch, and it should be used with care. */ -#define buf_page_get_with_no_latch(SP, ZS, OF, MTR) buf_page_get_gen(\ - SP, ZS, OF, RW_NO_LATCH, NULL,\ - BUF_GET_NO_LATCH, __FILE__, __LINE__, MTR) +#define buf_page_get_with_no_latch(ID, SIZE, MTR) \ + buf_page_get_gen(ID, SIZE, RW_NO_LATCH, NULL, BUF_GET_NO_LATCH, \ + __FILE__, __LINE__, MTR, NULL) /********************************************************************//** This is the general function used to get optimistic access to a database page. -@return TRUE if success */ -UNIV_INTERN +@return TRUE if success */ ibool buf_page_optimistic_get( /*====================*/ @@ -358,8 +507,7 @@ buf_page_optimistic_get( /********************************************************************//** This is used to get access to a known database page, when no waiting can be done. -@return TRUE if success */ -UNIV_INTERN +@return TRUE if success */ ibool buf_page_get_known_nowait( /*======================*/ @@ -370,96 +518,98 @@ buf_page_get_known_nowait( ulint line, /*!< in: line where called */ mtr_t* mtr); /*!< in: mini-transaction */ -/*******************************************************************//** -Given a tablespace id and page number tries to get that page. If the +/** Given a tablespace id and page number tries to get that page. If the page is not in the buffer pool it is not loaded and NULL is returned. -Suitable for using when holding the lock_sys_t::mutex. */ -UNIV_INTERN +Suitable for using when holding the lock_sys_t::mutex. +@param[in] page_id page id +@param[in] file file name +@param[in] line line where called +@param[in] mtr mini-transaction +@return pointer to a page or NULL */ buf_block_t* buf_page_try_get_func( -/*==================*/ - ulint space_id,/*!< in: tablespace id */ - ulint page_no,/*!< in: page number */ - ulint rw_latch, /*!< in: RW_S_LATCH, RW_X_LATCH */ - bool possibly_freed, /*!< in: don't mind if page is freed */ - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - mtr_t* mtr); /*!< in: mini-transaction */ - -/** Tries to get a page. If the page is not in the buffer pool it is -not loaded. Suitable for using when holding the lock_sys_t::mutex. -@param space_id in: tablespace id -@param page_no in: page number -@param mtr in: mini-transaction -@return the page if in buffer pool, NULL if not */ -#define buf_page_try_get(space_id, page_no, mtr) \ - buf_page_try_get_func(space_id, page_no, RW_S_LATCH, false, \ - __FILE__, __LINE__, mtr); - -/********************************************************************//** -Get read access to a compressed page (usually of type + const page_id_t& page_id, + const char* file, + ulint line, + mtr_t* mtr); + +/** Tries to get a page. +If the page is not in the buffer pool it is not loaded. Suitable for using +when holding the lock_sys_t::mutex. +@param[in] page_id page identifier +@param[in] mtr mini-transaction +@return the page if in buffer pool, NULL if not */ +#define buf_page_try_get(page_id, mtr) \ + buf_page_try_get_func((page_id), __FILE__, __LINE__, mtr); + +/** Get read access to a compressed page (usually of type FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2). The page must be released with buf_page_release_zip(). NOTE: the page is not protected by any latch. Mutual exclusion has to be implemented at a higher level. In other words, all possible accesses to a given page through this function must be protected by the same set of mutexes or latches. -@return pointer to the block, or NULL if not compressed */ -UNIV_INTERN +@param[in] page_id page id +@param[in] page_size page size +@return pointer to the block */ buf_page_t* buf_page_get_zip( -/*=============*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size */ - ulint offset);/*!< in: page number */ -/********************************************************************//** -This is the general function used to get access to a database page. -@return pointer to the block or NULL */ -UNIV_INTERN + const page_id_t& page_id, + const page_size_t& page_size); + +/** This is the general function used to get access to a database page. +@param[in] page_id page id +@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH +@param[in] guess guessed block or NULL +@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL, +BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH +@param[in] file file name +@param[in] line line where called +@param[in] mtr mini-transaction +@param[out] err DB_SUCCESS or error code +@param[in] dirty_with_no_latch + mark page as dirty even if page + is being pinned without any latch +@return pointer to the block or NULL */ buf_block_t* buf_page_get_gen( -/*=============*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - ulint offset, /*!< in: page number */ - ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */ - buf_block_t* guess, /*!< in: guessed block or NULL */ - ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL, - BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH or - BUF_GET_IF_IN_POOL_OR_WATCH */ - const char* file, /*!< in: file name */ - ulint line, /*!< in: line where called */ - mtr_t* mtr, /*!< in: mini-transaction */ - dberr_t* err = NULL); /*!< out: error code */ -/********************************************************************//** -Initializes a page to the buffer buf_pool. The page is usually not read + const page_id_t& page_id, + const page_size_t& page_size, + ulint rw_latch, + buf_block_t* guess, + ulint mode, + const char* file, + ulint line, + mtr_t* mtr, + dberr_t* err, + bool dirty_with_no_latch = false); + +/** Initializes a page to the buffer buf_pool. The page is usually not read from a file even if it cannot be found in the buffer buf_pool. This is one of the functions which perform to a block a state transition NOT_USED => FILE_PAGE (the other is buf_page_get_gen). -@return pointer to the block, page bufferfixed */ -UNIV_INTERN +@param[in] page_id page id +@param[in] page_size page size +@param[in] mtr mini-transaction +@return pointer to the block, page bufferfixed */ buf_block_t* buf_page_create( -/*============*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: offset of the page within space in units of - a page */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - mtr_t* mtr); /*!< in: mini-transaction handle */ + const page_id_t& page_id, + const page_size_t& page_size, + mtr_t* mtr); + #else /* !UNIV_HOTBACKUP */ -/********************************************************************//** -Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. */ -UNIV_INTERN + +/** Inits a page to the buffer buf_pool, for use in mysqlbackup --restore. +@param[in] page_id page id +@param[in] page_size page size +@param[in,out] block block to init */ void buf_page_init_for_backup_restore( -/*=============================*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: offset of the page within space - in units of a page */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - buf_block_t* block); /*!< in: block to init */ + const page_id_t& page_id, + const page_size_t& page_size, + buf_block_t* block); + #endif /* !UNIV_HOTBACKUP */ #ifndef UNIV_HOTBACKUP @@ -471,12 +621,11 @@ buf_page_release_zip( /*=================*/ buf_page_t* bpage); /*!< in: buffer block */ /********************************************************************//** -Decrements the bufferfix count of a buffer control block and releases -a latch, if specified. */ +Releases a latch, if specified. */ UNIV_INLINE void -buf_page_release( -/*=============*/ +buf_page_release_latch( +/*=====================*/ buf_block_t* block, /*!< in: buffer block */ ulint rw_latch); /*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */ @@ -484,53 +633,47 @@ buf_page_release( Moves a page to the start of the buffer pool LRU list. This high-level function can be used to prevent an important page from slipping out of the buffer pool. */ -UNIV_INTERN void buf_page_make_young( /*================*/ buf_page_t* bpage); /*!< in: buffer block of a file page */ -/********************************************************************//** -Returns TRUE if the page can be found in the buffer pool hash table. +/** Returns TRUE if the page can be found in the buffer pool hash table. NOTE that it is possible that the page is not yet read from disk, though. - -@return TRUE if found in the page hash table */ +@param[in] page_id page id +@return TRUE if found in the page hash table */ UNIV_INLINE ibool buf_page_peek( -/*==========*/ - ulint space, /*!< in: space id */ - ulint offset);/*!< in: page number */ -#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG -/********************************************************************//** -Sets file_page_was_freed TRUE if the page is found in the buffer pool. + const page_id_t& page_id); + +#ifdef UNIV_DEBUG + +/** Sets file_page_was_freed TRUE if the page is found in the buffer pool. This function should be called when we free a file page and want the debug version to check that it is not accessed any more unless reallocated. -@return control block if found in page hash table, otherwise NULL */ -UNIV_INTERN +@param[in] page_id page id +@return control block if found in page hash table, otherwise NULL */ buf_page_t* buf_page_set_file_page_was_freed( -/*=============================*/ - ulint space, /*!< in: space id */ - ulint offset);/*!< in: page number */ -/********************************************************************//** -Sets file_page_was_freed FALSE if the page is found in the buffer pool. + const page_id_t& page_id); + +/** Sets file_page_was_freed FALSE if the page is found in the buffer pool. This function should be called when we free a file page and want the debug version to check that it is not accessed any more unless reallocated. -@return control block if found in page hash table, otherwise NULL */ -UNIV_INTERN +@param[in] page_id page id +@return control block if found in page hash table, otherwise NULL */ buf_page_t* buf_page_reset_file_page_was_freed( -/*===============================*/ - ulint space, /*!< in: space id */ - ulint offset); /*!< in: page number */ -#endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ + const page_id_t& page_id); + +#endif /* UNIV_DEBUG */ /********************************************************************//** Reads the freed_page_clock of a buffer block. -@return freed_page_clock */ +@return freed_page_clock */ UNIV_INLINE ulint buf_page_get_freed_page_clock( @@ -539,7 +682,7 @@ buf_page_get_freed_page_clock( MY_ATTRIBUTE((pure)); /********************************************************************//** Reads the freed_page_clock of a buffer block. -@return freed_page_clock */ +@return freed_page_clock */ UNIV_INLINE ulint buf_block_get_freed_page_clock( @@ -553,7 +696,7 @@ meaning that it is not in danger of getting evicted and also implying that it has been accessed recently. Note that this is for heuristics only and does not reserve buffer pool mutex. -@return TRUE if block is close to MRU end of LRU */ +@return TRUE if block is close to MRU end of LRU */ UNIV_INLINE ibool buf_page_peek_if_young( @@ -563,7 +706,7 @@ buf_page_peek_if_young( Recommends a move of a block to the start of the LRU list if there is danger of dropping from the buffer pool. NOTE: does not reserve the buffer pool mutex. -@return TRUE if should be made younger */ +@return TRUE if should be made younger */ UNIV_INLINE ibool buf_page_peek_if_too_old( @@ -572,7 +715,7 @@ buf_page_peek_if_too_old( /********************************************************************//** Gets the youngest modification log sequence number for a frame. Returns zero if not file page or no modification occurred yet. -@return newest modification to page */ +@return newest modification to page */ UNIV_INLINE lsn_t buf_page_get_newest_modification( @@ -591,7 +734,7 @@ buf_block_modify_clock_inc( /********************************************************************//** Returns the value of the modify clock. The caller must have an s-lock or x-lock on the block. -@return value */ +@return value */ UNIV_INLINE ib_uint64_t buf_block_get_modify_clock( @@ -603,67 +746,97 @@ UNIV_INLINE void buf_block_buf_fix_inc_func( /*=======================*/ -# ifdef UNIV_SYNC_DEBUG +# ifdef UNIV_DEBUG const char* file, /*!< in: file name */ ulint line, /*!< in: line */ -# endif /* UNIV_SYNC_DEBUG */ +# endif /* UNIV_DEBUG */ buf_block_t* block) /*!< in/out: block to bufferfix */ MY_ATTRIBUTE((nonnull)); -/*******************************************************************//** -Increments the bufferfix count. */ +/** Increments the bufferfix count. +@param[in,out] bpage block to bufferfix +@return the count */ UNIV_INLINE -void +ulint buf_block_fix( -/*===========*/ - buf_block_t* block); /*!< in/out: block to bufferfix */ + buf_page_t* bpage); -/*******************************************************************//** -Increments the bufferfix count. */ +/** Increments the bufferfix count. +@param[in,out] block block to bufferfix +@return the count */ UNIV_INLINE -void +ulint +buf_block_fix( + buf_block_t* block); + +/** Decrements the bufferfix count. +@param[in,out] bpage block to bufferunfix +@return the remaining buffer-fix count */ +UNIV_INLINE +ulint buf_block_unfix( -/*===========*/ - buf_block_t* block); /*!< in/out: block to bufferfix */ + buf_page_t* bpage); +/** Decrements the bufferfix count. +@param[in,out] block block to bufferunfix +@return the remaining buffer-fix count */ +UNIV_INLINE +ulint +buf_block_unfix( + buf_block_t* block); -# ifdef UNIV_SYNC_DEBUG +# ifdef UNIV_DEBUG /** Increments the bufferfix count. -@param b in/out: block to bufferfix -@param f in: file name where requested -@param l in: line number where requested */ +@param[in,out] b block to bufferfix +@param[in] f file name where requested +@param[in] l line number where requested */ # define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(f,l,b) -# else /* UNIV_SYNC_DEBUG */ +# else /* UNIV_DEBUG */ /** Increments the bufferfix count. -@param b in/out: block to bufferfix -@param f in: file name where requested -@param l in: line number where requested */ +@param[in,out] b block to bufferfix +@param[in] f file name where requested +@param[in] l line number where requested */ # define buf_block_buf_fix_inc(b,f,l) buf_block_buf_fix_inc_func(b) -# endif /* UNIV_SYNC_DEBUG */ +# endif /* UNIV_DEBUG */ #else /* !UNIV_HOTBACKUP */ # define buf_block_modify_clock_inc(block) ((void) 0) #endif /* !UNIV_HOTBACKUP */ -/********************************************************************//** -Checks if a page is corrupt. -@return TRUE if corrupted */ -UNIV_INTERN -ibool -buf_page_is_corrupted( -/*==================*/ - bool check_lsn, /*!< in: true if we need to check the - and complain about the LSN */ - const byte* read_buf, /*!< in: a database page */ - ulint zip_size) /*!< in: size of compressed page; - 0 for uncompressed pages */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/********************************************************************//** -Checks if a page is all zeroes. -@return TRUE if the page is all zeroes */ +#endif /* !UNIV_INNOCHECKSUM */ + +/** Checks if a page contains only zeroes. +@param[in] read_buf database page +@param[in] page_size page size +@return true if page is filled with zeroes */ bool buf_page_is_zeroes( -/*===============*/ - const byte* read_buf, /*!< in: a database page */ - const ulint zip_size); /*!< in: size of compressed page; - 0 for uncompressed pages */ + const byte* read_buf, + const page_size_t& page_size); + +/** Checks if a page is corrupt. +@param[in] check_lsn true if we need to check and complain about +the LSN +@param[in] read_buf database page +@param[in] page_size page size +@param[in] skip_checksum if true, skip checksum +@param[in] page_no page number of given read_buf +@param[in] strict_check true if strict-check option is enabled +@param[in] is_log_enabled true if log option is enabled +@param[in] log_file file pointer to log_file +@return TRUE if corrupted */ +ibool +buf_page_is_corrupted( + bool check_lsn, + const byte* read_buf, + const page_size_t& page_size, + bool skip_checksum +#ifdef UNIV_INNOCHECKSUM + ,uintmax_t page_no, + bool strict_check, + bool is_log_enabled, + FILE* log_file +#endif /* UNIV_INNOCHECKSUM */ +) __attribute__((warn_unused_result)); + +#ifndef UNIV_INNOCHECKSUM #ifndef UNIV_HOTBACKUP /**********************************************************************//** Gets the space id, page offset, and byte offset within page of a @@ -678,7 +851,7 @@ buf_ptr_get_fsp_addr( /**********************************************************************//** Gets the hash value of a block. This can be used in searches in the lock hash table. -@return lock hash value */ +@return lock hash value */ UNIV_INLINE ulint buf_block_get_lock_hash_val( @@ -689,8 +862,7 @@ buf_block_get_lock_hash_val( /*********************************************************************//** Finds a block in the buffer pool that points to a given compressed page. -@return buffer block pointing to the compressed page, or NULL */ -UNIV_INTERN +@return buffer block pointing to the compressed page, or NULL */ buf_block_t* buf_pool_contains_zip( /*==================*/ @@ -711,8 +883,7 @@ buf_frame_align( #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /*********************************************************************//** Validates the buffer pool data structure. -@return TRUE */ -UNIV_INTERN +@return TRUE */ ibool buf_validate(void); /*==============*/ @@ -720,7 +891,6 @@ buf_validate(void); #if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /*********************************************************************//** Prints info of the buffer pool data structure. */ -UNIV_INTERN void buf_print(void); /*============*/ @@ -733,23 +903,20 @@ enum buf_page_print_flags { BUF_PAGE_PRINT_NO_FULL = 2 }; -/********************************************************************//** -Prints a page to stderr. */ -UNIV_INTERN +/** Prints a page to stderr. +@param[in] read_buf a database page +@param[in] page_size page size +@param[in] flags 0 or BUF_PAGE_PRINT_NO_CRASH or +BUF_PAGE_PRINT_NO_FULL */ void buf_page_print( -/*===========*/ - const byte* read_buf, /*!< in: a database page */ - ulint zip_size, /*!< in: compressed page size, or - 0 for uncompressed pages */ - ulint flags) /*!< in: 0 or - BUF_PAGE_PRINT_NO_CRASH or - BUF_PAGE_PRINT_NO_FULL */ - UNIV_COLD MY_ATTRIBUTE((nonnull)); + const byte* read_buf, + const page_size_t& page_size, + ulint flags); + /********************************************************************//** Decompress a block. -@return TRUE if successful */ -UNIV_INTERN +@return TRUE if successful */ ibool buf_zip_decompress( /*===============*/ @@ -759,22 +926,19 @@ buf_zip_decompress( #ifdef UNIV_DEBUG /*********************************************************************//** Returns the number of latched pages in the buffer pool. -@return number of latched pages */ -UNIV_INTERN +@return number of latched pages */ ulint buf_get_latched_pages_number(void); /*==============================*/ #endif /* UNIV_DEBUG */ /*********************************************************************//** Returns the number of pending buf pool read ios. -@return number of pending read I/O operations */ -UNIV_INTERN +@return number of pending read I/O operations */ ulint buf_get_n_pending_read_ios(void); /*============================*/ /*********************************************************************//** Prints info of the buffer i/o. */ -UNIV_INTERN void buf_print_io( /*=========*/ @@ -783,7 +947,6 @@ buf_print_io( Collect buffer pool stats information for a buffer pool. Also record aggregated stats if there are more than one buffer pool in the server */ -UNIV_INTERN void buf_stats_get_pool_info( /*====================*/ @@ -794,36 +957,31 @@ buf_stats_get_pool_info( /*********************************************************************//** Returns the ratio in percents of modified pages in the buffer pool / database pages in the buffer pool. -@return modified page percentage ratio */ -UNIV_INTERN +@return modified page percentage ratio */ double buf_get_modified_ratio_pct(void); /*============================*/ /**********************************************************************//** Refreshes the statistics used to print per-second averages. */ -UNIV_INTERN void buf_refresh_io_stats( /*=================*/ buf_pool_t* buf_pool); /*!< buffer pool instance */ /**********************************************************************//** Refreshes the statistics used to print per-second averages. */ -UNIV_INTERN void buf_refresh_io_stats_all(void); /*=================*/ /*********************************************************************//** Asserts that all file pages in the buffer are in a replaceable state. -@return TRUE */ -UNIV_INTERN +@return TRUE */ ibool buf_all_freed(void); /*===============*/ /*********************************************************************//** Checks that there currently are no pending i/o-operations for the buffer pool. -@return number of pending i/o operations */ -UNIV_INTERN +@return number of pending i/o operations */ ulint buf_pool_check_no_pending_io(void); /*==============================*/ @@ -831,7 +989,6 @@ buf_pool_check_no_pending_io(void); Invalidates the file pages in the buffer pool when an archive recovery is completed. All the file pages buffered must be in a replaceable state when this function is called: not latched and not modified. */ -UNIV_INTERN void buf_pool_invalidate(void); /*=====================*/ @@ -841,7 +998,7 @@ buf_pool_invalidate(void); --------------------------- LOWER LEVEL ROUTINES ------------------------- =========================================================================*/ -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG /*********************************************************************//** Adds latch level info for the rw-lock protecting the buffer frame. This should be called in the debug version after a successful latching of a @@ -852,13 +1009,13 @@ buf_block_dbg_add_level( /*====================*/ buf_block_t* block, /*!< in: buffer page where we have acquired latch */ - ulint level); /*!< in: latching order level */ -#else /* UNIV_SYNC_DEBUG */ + latch_level_t level); /*!< in: latching order level */ +#else /* UNIV_DEBUG */ # define buf_block_dbg_add_level(block, level) /* nothing */ -#endif /* UNIV_SYNC_DEBUG */ +#endif /* UNIV_DEBUG */ /*********************************************************************//** Gets the state of a block. -@return state */ +@return state */ UNIV_INLINE enum buf_page_state buf_page_get_state( @@ -876,7 +1033,7 @@ buf_get_state_name( block */ /*********************************************************************//** Gets the state of a block. -@return state */ +@return state */ UNIV_INLINE enum buf_page_state buf_block_get_state( @@ -901,7 +1058,7 @@ buf_block_set_state( enum buf_page_state state); /*!< in: state */ /*********************************************************************//** Determines if a block is mapped to a tablespace. -@return TRUE if mapped */ +@return TRUE if mapped */ UNIV_INLINE ibool buf_page_in_file( @@ -911,7 +1068,7 @@ buf_page_in_file( #ifndef UNIV_HOTBACKUP /*********************************************************************//** Determines if a block should be on unzip_LRU list. -@return TRUE if block belongs to unzip_LRU */ +@return TRUE if block belongs to unzip_LRU */ UNIV_INLINE ibool buf_page_belongs_to_unzip_LRU( @@ -921,9 +1078,9 @@ buf_page_belongs_to_unzip_LRU( /*********************************************************************//** Gets the mutex of a block. -@return pointer to mutex protecting bpage */ +@return pointer to mutex protecting bpage */ UNIV_INLINE -ib_mutex_t* +BPageMutex* buf_page_get_mutex( /*===============*/ const buf_page_t* bpage) /*!< in: pointer to control block */ @@ -931,7 +1088,7 @@ buf_page_get_mutex( /*********************************************************************//** Get the flush type of a page. -@return flush type */ +@return flush type */ UNIV_INLINE buf_flush_t buf_page_get_flush_type( @@ -946,18 +1103,19 @@ buf_page_set_flush_type( /*====================*/ buf_page_t* bpage, /*!< in: buffer page */ buf_flush_t flush_type); /*!< in: flush type */ -/*********************************************************************//** -Map a block to a file page. */ + +/** Map a block to a file page. +@param[in,out] block pointer to control block +@param[in] page_id page id */ UNIV_INLINE void buf_block_set_file_page( -/*====================*/ - buf_block_t* block, /*!< in/out: pointer to control block */ - ulint space, /*!< in: tablespace id */ - ulint page_no);/*!< in: page number */ + buf_block_t* block, + const page_id_t& page_id); + /*********************************************************************//** Gets the io_fix state of a block. -@return io_fix state */ +@return io_fix state */ UNIV_INLINE enum buf_io_fix buf_page_get_io_fix( @@ -966,7 +1124,7 @@ buf_page_get_io_fix( MY_ATTRIBUTE((pure)); /*********************************************************************//** Gets the io_fix state of a block. -@return io_fix state */ +@return io_fix state */ UNIV_INLINE enum buf_io_fix buf_block_get_io_fix( @@ -1022,7 +1180,7 @@ buf_page_can_relocate( /*********************************************************************//** Determine if a block has been flagged old. -@return TRUE if old */ +@return TRUE if old */ UNIV_INLINE ibool buf_page_is_old( @@ -1039,7 +1197,7 @@ buf_page_set_old( ibool old); /*!< in: old */ /*********************************************************************//** Determine the time of first access of a block in the buffer pool. -@return ut_time_ms() at the time of first access, 0 if not accessed */ +@return ut_time_ms() at the time of first access, 0 if not accessed */ UNIV_INLINE unsigned buf_page_is_accessed( @@ -1057,8 +1215,8 @@ buf_page_set_accessed( /*********************************************************************//** Gets the buf_block_t handle of a buffered file block if an uncompressed page frame exists, or NULL. Note: even though bpage is not declared a -const we don't update its value. It is safe to make this pure. -@return control block, or NULL */ +const we don't update its value. +@return control block, or NULL */ UNIV_INLINE buf_block_t* buf_page_get_block( @@ -1069,70 +1227,17 @@ buf_page_get_block( #ifdef UNIV_DEBUG /*********************************************************************//** Gets a pointer to the memory frame of a block. -@return pointer to the frame */ +@return pointer to the frame */ UNIV_INLINE buf_frame_t* buf_block_get_frame( /*================*/ const buf_block_t* block) /*!< in: pointer to the control block */ - MY_ATTRIBUTE((pure)); + __attribute__((warn_unused_result)); #else /* UNIV_DEBUG */ # define buf_block_get_frame(block) (block)->frame #endif /* UNIV_DEBUG */ -/*********************************************************************//** -Gets the space id of a block. -@return space id */ -UNIV_INLINE -ulint -buf_page_get_space( -/*===============*/ - const buf_page_t* bpage) /*!< in: pointer to the control block */ - MY_ATTRIBUTE((pure)); -/*********************************************************************//** -Gets the space id of a block. -@return space id */ -UNIV_INLINE -ulint -buf_block_get_space( -/*================*/ - const buf_block_t* block) /*!< in: pointer to the control block */ - MY_ATTRIBUTE((pure)); -/*********************************************************************//** -Gets the page number of a block. -@return page number */ -UNIV_INLINE -ulint -buf_page_get_page_no( -/*=================*/ - const buf_page_t* bpage) /*!< in: pointer to the control block */ - MY_ATTRIBUTE((pure)); -/*********************************************************************//** -Gets the page number of a block. -@return page number */ -UNIV_INLINE -ulint -buf_block_get_page_no( -/*==================*/ - const buf_block_t* block) /*!< in: pointer to the control block */ - MY_ATTRIBUTE((pure)); -/*********************************************************************//** -Gets the compressed page size of a block. -@return compressed page size, or 0 */ -UNIV_INLINE -ulint -buf_page_get_zip_size( -/*==================*/ - const buf_page_t* bpage) /*!< in: pointer to the control block */ - MY_ATTRIBUTE((pure)); -/*********************************************************************//** -Gets the compressed page size of a block. -@return compressed page size, or 0 */ -UNIV_INLINE -ulint -buf_block_get_zip_size( -/*===================*/ - const buf_block_t* block) /*!< in: pointer to the control block */ - MY_ATTRIBUTE((pure)); + /*********************************************************************//** Gets the compressed page descriptor corresponding to an uncompressed page if applicable. */ @@ -1141,8 +1246,7 @@ if applicable. */ #ifndef UNIV_HOTBACKUP /*******************************************************************//** Gets the block to whose frame the pointer is pointing to. -@return pointer to block, never NULL */ -UNIV_INTERN +@return pointer to block, never NULL */ buf_block_t* buf_block_align( /*============*/ @@ -1150,21 +1254,20 @@ buf_block_align( /********************************************************************//** Find out if a pointer belongs to a buf_block_t. It can be a pointer to the buf_block_t itself or a member of it -@return TRUE if ptr belongs to a buf_block_t struct */ -UNIV_INTERN +@return TRUE if ptr belongs to a buf_block_t struct */ ibool buf_pointer_is_block_field( /*=======================*/ const void* ptr); /*!< in: pointer not dereferenced */ /** Find out if a pointer corresponds to a buf_block_t::mutex. -@param m in: mutex candidate -@return TRUE if m is a buf_block_t::mutex */ +@param m in: mutex candidate +@return TRUE if m is a buf_block_t::mutex */ #define buf_pool_is_block_mutex(m) \ buf_pointer_is_block_field((const void*)(m)) /** Find out if a pointer corresponds to a buf_block_t::lock. -@param l in: rw-lock candidate -@return TRUE if l is a buf_block_t::lock */ +@param l in: rw-lock candidate +@return TRUE if l is a buf_block_t::lock */ #define buf_pool_is_block_lock(l) \ buf_pointer_is_block_field((const void*)(l)) @@ -1172,15 +1275,15 @@ buf_pointer_is_block_field( /*********************************************************************//** Gets the compressed page descriptor corresponding to an uncompressed page if applicable. -@return compressed page descriptor, or NULL */ +@return compressed page descriptor, or NULL */ UNIV_INLINE const page_zip_des_t* buf_frame_get_page_zip( /*===================*/ const byte* ptr); /*!< in: pointer to the page */ #endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */ -/********************************************************************//** -Function which inits a page for read to the buffer buf_pool. If the page is + +/** Inits a page for read to the buffer buf_pool. If the page is (1) already in buf_pool, or (2) if we specify to read only ibuf pages and the page is not an ibuf page, or (3) if the space is deleted or being deleted, @@ -1188,25 +1291,23 @@ then this function does nothing. Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock on the buffer frame. The io-handler must take care that the flag is cleared and the lock released later. -@return pointer to the block or NULL */ -UNIV_INTERN +@param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED +@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ... +@param[in] page_id page id +@param[in] unzip TRUE=request uncompressed page +@return pointer to the block or NULL */ buf_page_t* buf_page_init_for_read( -/*===================*/ - dberr_t* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */ - ulint mode, /*!< in: BUF_READ_IBUF_PAGES_ONLY, ... */ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size, or 0 */ - ibool unzip, /*!< in: TRUE=request uncompressed page */ - ib_int64_t tablespace_version,/*!< in: prevents reading from a wrong - version of the tablespace in case we have done - DISCARD + IMPORT */ - ulint offset);/*!< in: page number */ + dberr_t* err, + ulint mode, + const page_id_t& page_id, + const page_size_t& page_size, + ibool unzip); + /********************************************************************//** Completes an asynchronous read or write request of a file page to or from the buffer pool. @return true if successful */ -UNIV_INTERN bool buf_page_io_complete( /*=================*/ @@ -1214,19 +1315,8 @@ buf_page_io_complete( bool evict = false);/*!< in: whether or not to evict the page from LRU list. */ /********************************************************************//** -Calculates a folded value of a file page address to use in the page hash -table. -@return the folded value */ -UNIV_INLINE -ulint -buf_page_address_fold( -/*==================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: offset of the page within space */ - MY_ATTRIBUTE((const)); -/********************************************************************//** Calculates the index of a buffer pool to the buf_pool[] array. -@return the position of the buffer pool in buf_pool[] */ +@return the position of the buffer pool in buf_pool[] */ UNIV_INLINE ulint buf_pool_index( @@ -1249,15 +1339,15 @@ buf_pool_t* buf_pool_from_block( /*================*/ const buf_block_t* block); /*!< in: block */ -/******************************************************************//** -Returns the buffer pool instance given space and offset of page + +/** Returns the buffer pool instance given a page id. +@param[in] page_id page id @return buffer pool */ UNIV_INLINE buf_pool_t* buf_pool_get( -/*==========*/ - ulint space, /*!< in: space id */ - ulint offset);/*!< in: offset of the page within space */ + const page_id_t& page_id); + /******************************************************************//** Returns the buffer pool instance given its array index @return buffer pool */ @@ -1267,71 +1357,64 @@ buf_pool_from_array( /*================*/ ulint index); /*!< in: array index to get buffer pool instance from */ -/******************************************************************//** -Returns the control block of a file page, NULL if not found. -@return block, NULL if not found */ + +/** Returns the control block of a file page, NULL if not found. +@param[in] buf_pool buffer pool instance +@param[in] page_id page id +@return block, NULL if not found */ UNIV_INLINE buf_page_t* buf_page_hash_get_low( -/*==================*/ - buf_pool_t* buf_pool,/*!< buffer pool instance */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: offset of the page within space */ - ulint fold); /*!< in: buf_page_address_fold(space, offset) */ -/******************************************************************//** -Returns the control block of a file page, NULL if not found. + buf_pool_t* buf_pool, + const page_id_t& page_id); + +/** Returns the control block of a file page, NULL if not found. If the block is found and lock is not NULL then the appropriate page_hash lock is acquired in the specified lock mode. Otherwise, mode value is ignored. It is up to the caller to release the lock. If the block is found and the lock is NULL then the page_hash lock is released by this function. -@return block, NULL if not found, or watch sentinel (if watch is true) */ +@param[in] buf_pool buffer pool instance +@param[in] page_id page id +@param[in,out] lock lock of the page hash acquired if bpage is +found, NULL otherwise. If NULL is passed then the hash_lock is released by +this function. +@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if +lock == NULL +@param[in] watch if true, return watch sentinel also. +@return pointer to the bpage or NULL; if NULL, lock is also NULL or +a watch sentinel. */ UNIV_INLINE buf_page_t* buf_page_hash_get_locked( -/*=====================*/ - /*!< out: pointer to the bpage, - or NULL; if NULL, hash_lock - is also NULL. */ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page number */ - rw_lock_t** lock, /*!< in/out: lock of the page - hash acquired if bpage is - found. NULL otherwise. If NULL - is passed then the hash_lock - is released by this function */ - ulint lock_mode, /*!< in: RW_LOCK_EX or - RW_LOCK_SHARED. Ignored if - lock == NULL */ - bool watch = false); /*!< in: if true, return watch - sentinel also. */ -/******************************************************************//** -Returns the control block of a file page, NULL if not found. + buf_pool_t* buf_pool, + const page_id_t& page_id, + rw_lock_t** lock, + ulint lock_mode, + bool watch = false); + +/** Returns the control block of a file page, NULL if not found. If the block is found and lock is not NULL then the appropriate page_hash lock is acquired in the specified lock mode. Otherwise, mode value is ignored. It is up to the caller to release the lock. If the block is found and the lock is NULL then the page_hash lock is released by this function. -@return block, NULL if not found */ +@param[in] buf_pool buffer pool instance +@param[in] page_id page id +@param[in,out] lock lock of the page hash acquired if bpage is +found, NULL otherwise. If NULL is passed then the hash_lock is released by +this function. +@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if +lock == NULL +@return pointer to the block or NULL; if NULL, lock is also NULL. */ UNIV_INLINE buf_block_t* buf_block_hash_get_locked( -/*=====================*/ - /*!< out: pointer to the bpage, - or NULL; if NULL, hash_lock - is also NULL. */ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page number */ - rw_lock_t** lock, /*!< in/out: lock of the page - hash acquired if bpage is - found. NULL otherwise. If NULL - is passed then the hash_lock - is released by this function */ - ulint lock_mode); /*!< in: RW_LOCK_EX or - RW_LOCK_SHARED. Ignored if - lock == NULL */ + buf_pool_t* buf_pool, + const page_id_t& page_id, + rw_lock_t** lock, + ulint lock_mode); + /* There are four different ways we can try to get a bpage or block from the page hash: 1) Caller already holds the appropriate page hash lock: in the case call @@ -1339,75 +1422,70 @@ buf_page_hash_get_low() function. 2) Caller wants to hold page hash lock in x-mode 3) Caller wants to hold page hash lock in s-mode 4) Caller doesn't want to hold page hash lock */ -#define buf_page_hash_get_s_locked(b, s, o, l) \ - buf_page_hash_get_locked(b, s, o, l, RW_LOCK_SHARED) -#define buf_page_hash_get_x_locked(b, s, o, l) \ - buf_page_hash_get_locked(b, s, o, l, RW_LOCK_EX) -#define buf_page_hash_get(b, s, o) \ - buf_page_hash_get_locked(b, s, o, NULL, 0) -#define buf_page_get_also_watch(b, s, o) \ - buf_page_hash_get_locked(b, s, o, NULL, 0, true) - -#define buf_block_hash_get_s_locked(b, s, o, l) \ - buf_block_hash_get_locked(b, s, o, l, RW_LOCK_SHARED) -#define buf_block_hash_get_x_locked(b, s, o, l) \ - buf_block_hash_get_locked(b, s, o, l, RW_LOCK_EX) -#define buf_block_hash_get(b, s, o) \ - buf_block_hash_get_locked(b, s, o, NULL, 0) +#define buf_page_hash_get_s_locked(b, page_id, l) \ + buf_page_hash_get_locked(b, page_id, l, RW_LOCK_S) +#define buf_page_hash_get_x_locked(b, page_id, l) \ + buf_page_hash_get_locked(b, page_id, l, RW_LOCK_X) +#define buf_page_hash_get(b, page_id) \ + buf_page_hash_get_locked(b, page_id, NULL, 0) +#define buf_page_get_also_watch(b, page_id) \ + buf_page_hash_get_locked(b, page_id, NULL, 0, true) + +#define buf_block_hash_get_s_locked(b, page_id, l) \ + buf_block_hash_get_locked(b, page_id, l, RW_LOCK_S) +#define buf_block_hash_get_x_locked(b, page_id, l) \ + buf_block_hash_get_locked(b, page_id, l, RW_LOCK_X) +#define buf_block_hash_get(b, page_id) \ + buf_block_hash_get_locked(b, page_id, NULL, 0) /*********************************************************************//** Gets the current length of the free list of buffer blocks. -@return length of the free list */ -UNIV_INTERN +@return length of the free list */ ulint buf_get_free_list_len(void); /*=======================*/ /********************************************************************//** Determine if a block is a sentinel for a buffer pool watch. -@return TRUE if a sentinel for a buffer pool watch, FALSE if not */ -UNIV_INTERN +@return TRUE if a sentinel for a buffer pool watch, FALSE if not */ ibool buf_pool_watch_is_sentinel( /*=======================*/ - buf_pool_t* buf_pool, /*!< buffer pool instance */ + const buf_pool_t* buf_pool, /*!< buffer pool instance */ const buf_page_t* bpage) /*!< in: block */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/****************************************************************//** -Add watch for the given page to be read in. Caller must have the buffer pool + +/** Add watch for the given page to be read in. Caller must have +appropriate hash_lock for the bpage. This function may release the +hash_lock and reacquire it. +@param[in] page_id page id +@param[in,out] hash_lock hash_lock currently latched @return NULL if watch set, block if the page is in the buffer pool */ -UNIV_INTERN buf_page_t* buf_pool_watch_set( -/*===============*/ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page number */ - ulint fold) /*!< in: buf_page_address_fold(space, offset) */ + const page_id_t& page_id, + rw_lock_t** hash_lock) MY_ATTRIBUTE((warn_unused_result)); -/****************************************************************//** -Stop watching if the page has been read in. -buf_pool_watch_set(space,offset) must have returned NULL before. */ -UNIV_INTERN + +/** Stop watching if the page has been read in. +buf_pool_watch_set(space,offset) must have returned NULL before. +@param[in] page_id page id */ void buf_pool_watch_unset( -/*=================*/ - ulint space, /*!< in: space id */ - ulint offset);/*!< in: page number */ -/****************************************************************//** -Check if the page has been read in. + const page_id_t& page_id); + +/** Check if the page has been read in. This may only be called after buf_pool_watch_set(space,offset) has returned NULL and before invoking buf_pool_watch_unset(space,offset). -@return FALSE if the given page was not read in, TRUE if it was */ -UNIV_INTERN +@param[in] page_id page id +@return FALSE if the given page was not read in, TRUE if it was */ ibool buf_pool_watch_occurred( -/*====================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id) MY_ATTRIBUTE((warn_unused_result)); + /********************************************************************//** Get total buffer pool statistics. */ -UNIV_INTERN void buf_get_total_list_len( /*===================*/ @@ -1416,7 +1494,6 @@ buf_get_total_list_len( ulint* flush_list_len);/*!< out: length of all flush lists */ /********************************************************************//** Get total list size in bytes from all buffer pools. */ -UNIV_INTERN void buf_get_total_list_size_in_bytes( /*=============================*/ @@ -1424,7 +1501,6 @@ buf_get_total_list_size_in_bytes( in all buffer pools */ /********************************************************************//** Get total buffer pool statistics. */ -UNIV_INTERN void buf_get_total_stat( /*===============*/ @@ -1440,15 +1516,33 @@ buf_get_nth_chunk_block( ulint n, /*!< in: nth chunk in the buffer pool */ ulint* chunk_size); /*!< in: chunk size */ -/********************************************************************//** -Calculate the checksum of a page from compressed table and update the page. */ -UNIV_INTERN +/** Verify the possibility that a stored page is not in buffer pool. +@param[in] withdraw_clock withdraw clock when stored the page +@retval true if the page might be relocated */ +UNIV_INLINE +bool +buf_pool_is_obsolete( + ulint withdraw_clock); + +/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit, +if needed. +@param[in] size size in bytes +@return aligned size */ +UNIV_INLINE +ulint +buf_pool_size_align( + ulint size); + +/** Calculate the checksum of a page from compressed table and update the +page. +@param[in,out] page page to update +@param[in] size compressed page size +@param[in] lsn LSN to stamp on the page */ void buf_flush_update_zip_checksum( -/*==========================*/ - buf_frame_t* page, /*!< in/out: Page to update */ - ulint zip_size, /*!< in: Compressed page size */ - lsn_t lsn); /*!< in: Lsn to stamp on the page */ + buf_frame_t* page, + ulint size, + lsn_t lsn); #endif /* !UNIV_HOTBACKUP */ @@ -1528,7 +1622,8 @@ for compressed and uncompressed frames */ /** Number of bits used for buffer page states. */ #define BUF_PAGE_STATE_BITS 3 -struct buf_page_t{ +class buf_page_t { +public: /** @name General fields None of these bit-fields must be modified without holding buf_page_get_mutex() [buf_block_t::mutex or @@ -1537,36 +1632,21 @@ struct buf_page_t{ by buf_pool->mutex. */ /* @{ */ - ib_uint32_t space; /*!< tablespace id; also protected - by buf_pool->mutex. */ - ib_uint32_t offset; /*!< page number; also protected - by buf_pool->mutex. */ - /** count of how manyfold this block is currently bufferfixed */ -#ifdef PAGE_ATOMIC_REF_COUNT - ib_uint32_t buf_fix_count; + /** Page id. Protected by buf_pool mutex. */ + page_id_t id; - /** type of pending I/O operation; also protected by - buf_pool->mutex for writes only @see enum buf_io_fix */ - byte io_fix; + /** Page size. Protected by buf_pool mutex. */ + page_size_t size; - byte state; -#else - unsigned buf_fix_count:19; + /** Count of how manyfold this block is currently bufferfixed. */ + ib_uint32_t buf_fix_count; /** type of pending I/O operation; also protected by - buf_pool->mutex for writes only @see enum buf_io_fix */ - unsigned io_fix:2; + buf_pool->mutex for writes only */ + buf_io_fix io_fix; - /*!< state of the control block; also protected by buf_pool->mutex. - State transitions from BUF_BLOCK_READY_FOR_USE to BUF_BLOCK_MEMORY - need not be protected by buf_page_get_mutex(). @see enum buf_page_state. - State changes that are relevant to page_hash are additionally protected - by the appropriate page_hash mutex i.e.: if a page is in page_hash or - is being added to/removed from page_hash then the corresponding changes - must also be protected by page_hash mutex. */ - unsigned state:BUF_PAGE_STATE_BITS; - -#endif /* PAGE_ATOMIC_REF_COUNT */ + /** Block state. @see buf_page_in_file */ + buf_page_state state; #ifndef UNIV_HOTBACKUP unsigned flush_type:2; /*!< if this block is currently being @@ -1592,6 +1672,7 @@ struct buf_page_t{ if written again we check is TRIM operation needed. */ + ulint space; /*!< space id */ unsigned key_version; /*!< key version for this block */ bool page_encrypted; /*!< page is page encrypted */ bool page_compressed;/*!< page is page compressed */ @@ -1631,7 +1712,7 @@ struct buf_page_t{ in one of the following lists in buf_pool: - - BUF_BLOCK_NOT_USED: free + - BUF_BLOCK_NOT_USED: free, withdraw - BUF_BLOCK_FILE_PAGE: flush_list - BUF_BLOCK_ZIP_DIRTY: flush_list - BUF_BLOCK_ZIP_PAGE: zip_clean @@ -1667,6 +1748,9 @@ struct buf_page_t{ should hold: in_free_list == (state == BUF_BLOCK_NOT_USED) */ #endif /* UNIV_DEBUG */ + + FlushObserver* flush_observer; /*!< flush observer */ + lsn_t newest_modification; /*!< log sequence number of the youngest modification to @@ -1714,13 +1798,13 @@ struct buf_page_t{ 0 if the block was never accessed in the buffer pool. Protected by block mutex */ -# if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG +# ifdef UNIV_DEBUG ibool file_page_was_freed; /*!< this is set to TRUE when fsp frees a page in buffer pool; protected by buf_pool->zip_mutex or buf_block_t::mutex. */ -# endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */ +# endif /* UNIV_DEBUG */ #endif /* !UNIV_HOTBACKUP */ }; @@ -1740,6 +1824,8 @@ struct buf_block_t{ aligned to an address divisible by UNIV_PAGE_SIZE */ #ifndef UNIV_HOTBACKUP + BPageLock lock; /*!< read-write lock of the buffer + frame */ UT_LIST_NODE_T(buf_block_t) unzip_LRU; /*!< node of the decompressed LRU list; a block is in the unzip_LRU list @@ -1749,15 +1835,8 @@ struct buf_block_t{ ibool in_unzip_LRU_list;/*!< TRUE if the page is in the decompressed LRU list; used in debugging */ + ibool in_withdraw_list; #endif /* UNIV_DEBUG */ - ib_mutex_t mutex; /*!< mutex protecting this block: - state (also protected by the buffer - pool mutex), io_fix, buf_fix_count, - and accessed; we introduce this new - mutex in InnoDB-5.1 to relieve - contention on the buffer pool mutex */ - rw_lock_t lock; /*!< read-write lock of the buffer - frame */ unsigned lock_hash_val:32;/*!< hashed value of the page address in the record lock hash table; protected by buf_block_t::lock @@ -1765,15 +1844,6 @@ struct buf_block_t{ in buf_page_get_gen(), buf_page_init_for_read() and buf_page_create()) */ - ibool check_index_page_at_flush; - /*!< TRUE if we know that this is - an index page, and want the database - to check its consistency before flush; - note that there may be pages in the - buffer pool which are index pages, - but this flag is not set because - we do not keep track of all pages; - NOT protected by any mutex */ /* @} */ /** @name Optimistic search field */ /* @{ */ @@ -1796,11 +1866,12 @@ struct buf_block_t{ ulint n_hash_helps; /*!< counter which controls building of a new hash index for the page */ - ulint n_fields; /*!< recommended prefix length for hash + volatile ulint n_bytes; /*!< recommended prefix length for hash + search: number of bytes in + an incomplete last field */ + volatile ulint n_fields; /*!< recommended prefix length for hash search: number of full fields */ - ulint n_bytes; /*!< recommended prefix: number of bytes - in an incomplete field */ - ibool left_side; /*!< TRUE or FALSE, depending on + volatile bool left_side; /*!< true or false, depending on whether the leftmost record of several records with the same prefix should be indexed in the hash index */ @@ -1808,7 +1879,7 @@ struct buf_block_t{ /** @name Hash search fields These 5 fields may only be modified when we have - an x-latch on btr_search_latch AND + an x-latch on search system AND - we are holding an s-latch or x-latch on buf_block_t::lock or - we know that buf_block_t::buf_fix_count == 0. @@ -1816,7 +1887,7 @@ struct buf_block_t{ in the buffer pool in buf0buf.cc. Another exception is that assigning block->index = NULL - is allowed whenever holding an x-latch on btr_search_latch. */ + is allowed whenever holding an x-latch on search system. */ /* @{ */ @@ -1839,8 +1910,17 @@ struct buf_block_t{ complete, though: there may have been hash collisions, record deletions, etc. */ + bool made_dirty_with_no_latch; + /*!< true if block has been made dirty + without acquiring X/SX latch as the + block belongs to temporary tablespace + and block is always accessed by a + single thread. */ + bool skip_flush_check; + /*!< Skip check in buf_dblwr_check_block + during bulk load, protected by lock.*/ /* @} */ -# ifdef UNIV_SYNC_DEBUG +# ifdef UNIV_DEBUG /** @name Debug fields */ /* @{ */ rw_lock_t debug_latch; /*!< in the debug version, each thread @@ -1849,16 +1929,23 @@ struct buf_block_t{ debug utilities in sync0rw */ /* @} */ # endif + BPageMutex mutex; /*!< mutex protecting this block: + state (also protected by the buffer + pool mutex), io_fix, buf_fix_count, + and accessed; we introduce this new + mutex in InnoDB-5.1 to relieve + contention on the buffer pool mutex */ #endif /* !UNIV_HOTBACKUP */ }; /** Check if a buf_block_t object is in a valid state -@param block buffer block -@return TRUE if valid */ +@param block buffer block +@return TRUE if valid */ #define buf_block_state_valid(block) \ (buf_block_get_state(block) >= BUF_BLOCK_NOT_USED \ && (buf_block_get_state(block) <= BUF_BLOCK_REMOVE_HASH)) + #ifndef UNIV_HOTBACKUP /**********************************************************************//** Compute the hash fold value for blocks in buf_pool->zip_hash. */ @@ -1890,7 +1977,7 @@ public: virtual ~HazardPointer() {} /** Get current value */ - buf_page_t* get() + buf_page_t* get() const { ut_ad(mutex_own(m_mutex)); return(m_hp); @@ -1920,7 +2007,7 @@ protected: /** Buffer pool instance */ const buf_pool_t* m_buf_pool; -#if UNIV_DEBUG +#ifdef UNIV_DEBUG /** mutex that protects access to the m_hp. */ const ib_mutex_t* m_mutex; #endif /* UNIV_DEBUG */ @@ -2074,15 +2161,14 @@ struct buf_pool_t{ /** @name General fields */ /* @{ */ - ib_mutex_t mutex; /*!< Buffer pool mutex of this + BufPoolMutex mutex; /*!< Buffer pool mutex of this instance */ - ib_mutex_t zip_mutex; /*!< Zip mutex of this buffer + BufPoolZipMutex zip_mutex; /*!< Zip mutex of this buffer pool instance, protects compressed only pages (of type buf_page_t, not buf_block_t */ ulint instance_no; /*!< Array index of this buffer pool instance */ - ulint old_pool_size; /*!< Old pool size in bytes */ ulint curr_pool_size; /*!< Current pool size in bytes */ ulint LRU_old_ratio; /*!< Reserve this much of the buffer pool for "old" blocks */ @@ -2093,9 +2179,19 @@ struct buf_pool_t{ #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG ulint mutex_exit_forbidden; /*!< Forbid release mutex */ #endif - ulint n_chunks; /*!< number of buffer pool chunks */ + ut_allocator allocator; /*!< Allocator used for + allocating memory for the the "chunks" + member. */ + volatile ulint n_chunks; /*!< number of buffer pool chunks */ + volatile ulint n_chunks_new; /*!< new number of buffer pool chunks */ buf_chunk_t* chunks; /*!< buffer pool chunks */ + buf_chunk_t* chunks_old; /*!< old buffer pool chunks to be freed + after resizing buffer pool */ ulint curr_size; /*!< current pool size in pages */ + ulint old_size; /*!< previous pool size in pages */ + ulint read_ahead_area;/*!< size in pages of the area which + the read-ahead algorithms read if + invoked */ hash_table_t* page_hash; /*!< hash table of buf_page_t or buf_block_t file pages, buf_page_in_file() == TRUE, @@ -2107,6 +2203,8 @@ struct buf_pool_t{ page_hash mutex. Lookups can happen while holding the buf_pool->mutex or the relevant page_hash mutex. */ + hash_table_t* page_hash_old; /*!< old pointer to page_hash to be + freed after resizing buffer pool */ hash_table_t* zip_hash; /*!< hash table of buf_block_t blocks whose frames are allocated to the zip buddy system, @@ -2130,7 +2228,7 @@ struct buf_pool_t{ /* @{ */ - ib_mutex_t flush_list_mutex;/*!< mutex protecting the + FlushListMutex flush_list_mutex;/*!< mutex protecting the flush list access. This mutex protects flush_list, flush_rbt and bpage::list pointers when @@ -2197,6 +2295,15 @@ struct buf_pool_t{ /*!< base node of the free block list */ + UT_LIST_BASE_NODE_T(buf_page_t) withdraw; + /*!< base node of the withdraw + block list. It is only used during + shrinking buffer pool size, not to + reuse the blocks will be removed */ + + ulint withdraw_target;/*!< target length of withdraw + block list, when withdrawing */ + /** "hazard pointer" used during scan of LRU while doing LRU list batch. Protected by buf_pool::mutex */ LRUHp lru_hp; @@ -2211,6 +2318,7 @@ struct buf_pool_t{ UT_LIST_BASE_NODE_T(buf_page_t) LRU; /*!< base node of the LRU list */ + buf_page_t* LRU_old; /*!< pointer to the about LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV oldest blocks in the LRU list; @@ -2258,6 +2366,15 @@ struct buf_pool_t{ /* @} */ }; +/** Print the given buf_pool_t object. +@param[in,out] out the output stream +@param[in] buf_pool the buf_pool_t object to be printed +@return the output stream */ +std::ostream& +operator<<( + std::ostream& out, + const buf_pool_t& buf_pool); + /** @name Accessors for buf_pool->mutex. Use these instead of accessing buf_pool->mutex directly. */ /* @{ */ @@ -2265,77 +2382,79 @@ Use these instead of accessing buf_pool->mutex directly. */ /** Test if a buffer pool mutex is owned. */ #define buf_pool_mutex_own(b) mutex_own(&b->mutex) /** Acquire a buffer pool mutex. */ -#define buf_pool_mutex_enter(b) do { \ - ut_ad(!mutex_own(&b->zip_mutex)); \ - mutex_enter(&b->mutex); \ +#define buf_pool_mutex_enter(b) do { \ + ut_ad(!(b)->zip_mutex.is_owned()); \ + mutex_enter(&(b)->mutex); \ } while (0) /** Test if flush list mutex is owned. */ -#define buf_flush_list_mutex_own(b) mutex_own(&b->flush_list_mutex) +#define buf_flush_list_mutex_own(b) mutex_own(&(b)->flush_list_mutex) /** Acquire the flush list mutex. */ -#define buf_flush_list_mutex_enter(b) do { \ - mutex_enter(&b->flush_list_mutex); \ +#define buf_flush_list_mutex_enter(b) do { \ + mutex_enter(&(b)->flush_list_mutex); \ } while (0) /** Release the flush list mutex. */ -# define buf_flush_list_mutex_exit(b) do { \ - mutex_exit(&b->flush_list_mutex); \ +# define buf_flush_list_mutex_exit(b) do { \ + mutex_exit(&(b)->flush_list_mutex); \ } while (0) + /** Test if block->mutex is owned. */ -#define buf_block_mutex_own(b) mutex_own(&(b)->mutex) +#define buf_page_mutex_own(b) (b)->mutex.is_owned() /** Acquire the block->mutex. */ -#define buf_block_mutex_enter(b) do { \ +#define buf_page_mutex_enter(b) do { \ mutex_enter(&(b)->mutex); \ } while (0) /** Release the trx->mutex. */ -#define buf_block_mutex_exit(b) do { \ - mutex_exit(&(b)->mutex); \ +#define buf_page_mutex_exit(b) do { \ + (b)->mutex.exit(); \ } while (0) /** Get appropriate page_hash_lock. */ -# define buf_page_hash_lock_get(b, f) \ - hash_get_lock(b->page_hash, f) +# define buf_page_hash_lock_get(buf_pool, page_id) \ + hash_get_lock((buf_pool)->page_hash, (page_id).fold()) + +/** If not appropriate page_hash_lock, relock until appropriate. */ +# define buf_page_hash_lock_s_confirm(hash_lock, buf_pool, page_id)\ + hash_lock_s_confirm(hash_lock, (buf_pool)->page_hash, (page_id).fold()) -#ifdef UNIV_SYNC_DEBUG +# define buf_page_hash_lock_x_confirm(hash_lock, buf_pool, page_id)\ + hash_lock_x_confirm(hash_lock, (buf_pool)->page_hash, (page_id).fold()) + +#ifdef UNIV_DEBUG /** Test if page_hash lock is held in s-mode. */ -# define buf_page_hash_lock_held_s(b, p) \ - rw_lock_own(buf_page_hash_lock_get(b, \ - buf_page_address_fold(p->space, \ - p->offset)), \ - RW_LOCK_SHARED) +# define buf_page_hash_lock_held_s(buf_pool, bpage) \ + rw_lock_own(buf_page_hash_lock_get((buf_pool), (bpage)->id), RW_LOCK_S) /** Test if page_hash lock is held in x-mode. */ -# define buf_page_hash_lock_held_x(b, p) \ - rw_lock_own(buf_page_hash_lock_get(b, \ - buf_page_address_fold(p->space, \ - p->offset)), \ - RW_LOCK_EX) +# define buf_page_hash_lock_held_x(buf_pool, bpage) \ + rw_lock_own(buf_page_hash_lock_get((buf_pool), (bpage)->id), RW_LOCK_X) /** Test if page_hash lock is held in x or s-mode. */ -# define buf_page_hash_lock_held_s_or_x(b, p) \ - (buf_page_hash_lock_held_s(b, p) \ - || buf_page_hash_lock_held_x(b, p)) +# define buf_page_hash_lock_held_s_or_x(buf_pool, bpage)\ + (buf_page_hash_lock_held_s((buf_pool), (bpage)) \ + || buf_page_hash_lock_held_x((buf_pool), (bpage))) -# define buf_block_hash_lock_held_s(b, p) \ - buf_page_hash_lock_held_s(b, &(p->page)) +# define buf_block_hash_lock_held_s(buf_pool, block) \ + buf_page_hash_lock_held_s((buf_pool), &(block)->page) -# define buf_block_hash_lock_held_x(b, p) \ - buf_page_hash_lock_held_x(b, &(p->page)) +# define buf_block_hash_lock_held_x(buf_pool, block) \ + buf_page_hash_lock_held_x((buf_pool), &(block)->page) -# define buf_block_hash_lock_held_s_or_x(b, p) \ - buf_page_hash_lock_held_s_or_x(b, &(p->page)) -#else /* UNIV_SYNC_DEBUG */ +# define buf_block_hash_lock_held_s_or_x(buf_pool, block) \ + buf_page_hash_lock_held_s_or_x((buf_pool), &(block)->page) +#else /* UNIV_DEBUG */ # define buf_page_hash_lock_held_s(b, p) (TRUE) # define buf_page_hash_lock_held_x(b, p) (TRUE) # define buf_page_hash_lock_held_s_or_x(b, p) (TRUE) # define buf_block_hash_lock_held_s(b, p) (TRUE) # define buf_block_hash_lock_held_x(b, p) (TRUE) # define buf_block_hash_lock_held_s_or_x(b, p) (TRUE) -#endif /* UNIV_SYNC_DEBUG */ +#endif /* UNIV_DEBUG */ #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /** Forbid the release of the buffer pool mutex. */ @@ -2416,6 +2535,12 @@ struct CheckInLRUList { { ut_a(elem->in_LRU_list); } + + static void validate(const buf_pool_t* buf_pool) + { + CheckInLRUList check; + ut_list_validate(buf_pool->LRU, check); + } }; /** Functor to validate the LRU list. */ @@ -2424,6 +2549,12 @@ struct CheckInFreeList { { ut_a(elem->in_free_list); } + + static void validate(const buf_pool_t* buf_pool) + { + CheckInFreeList check; + ut_list_validate(buf_pool->free, check); + } }; struct CheckUnzipLRUAndLRUList { @@ -2432,11 +2563,18 @@ struct CheckUnzipLRUAndLRUList { ut_a(elem->page.in_LRU_list); ut_a(elem->in_unzip_LRU_list); } + + static void validate(const buf_pool_t* buf_pool) + { + CheckUnzipLRUAndLRUList check; + ut_list_validate(buf_pool->unzip_LRU, check); + } }; #endif /* UNIV_DEBUG || defined UNIV_BUF_DEBUG */ #ifndef UNIV_NONINL #include "buf0buf.ic" #endif +#endif /* !UNIV_INNOCHECKSUM */ #endif diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index e77c5a84202..bd75c7a3d22 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2014, 2015, MariaDB Corporation. @@ -36,13 +36,19 @@ Created 11/5/1995 Heikki Tuuri #include "buf0flu.h" #include "buf0lru.h" #include "buf0rea.h" +#include "sync0debug.h" +#include "fsp0types.h" +#include "ut0new.h" /** A chunk of buffers. The buffer pool is allocated in chunks. */ struct buf_chunk_t{ - ulint mem_size; /*!< allocated size of the chunk */ ulint size; /*!< size of frames[] and blocks[] */ - void* mem; /*!< pointer to the memory area which + unsigned char* mem; /*!< pointer to the memory area which was allocated for the frames */ + ut_new_pfx_t mem_pfx; /*!< Auxiliary structure, describing + "mem". It is filled by the allocator's + alloc method and later passed to the + deallocate method. */ buf_block_t* blocks; /*!< array of buffer control blocks */ }; @@ -59,7 +65,7 @@ buf_pool_get_curr_size(void) /********************************************************************//** Calculates the index of a buffer pool to the buf_pool[] array. -@return the position of the buffer pool in buf_pool[] */ +@return the position of the buffer pool in buf_pool[] */ UNIV_INLINE ulint buf_pool_index( @@ -112,7 +118,7 @@ buf_pool_get_n_pages(void) /********************************************************************//** Reads the freed_page_clock of a buffer block. -@return freed_page_clock */ +@return freed_page_clock */ UNIV_INLINE ulint buf_page_get_freed_page_clock( @@ -125,7 +131,7 @@ buf_page_get_freed_page_clock( /********************************************************************//** Reads the freed_page_clock of a buffer block. -@return freed_page_clock */ +@return freed_page_clock */ UNIV_INLINE ulint buf_block_get_freed_page_clock( @@ -141,7 +147,7 @@ meaning that it is not in danger of getting evicted and also implying that it has been accessed recently. Note that this is for heuristics only and does not reserve buffer pool mutex. -@return TRUE if block is close to MRU end of LRU */ +@return TRUE if block is close to MRU end of LRU */ UNIV_INLINE ibool buf_page_peek_if_young( @@ -162,7 +168,7 @@ buf_page_peek_if_young( Recommends a move of a block to the start of the LRU list if there is danger of dropping from the buffer pool. NOTE: does not reserve the buffer pool mutex. -@return TRUE if should be made younger */ +@return TRUE if should be made younger */ UNIV_INLINE ibool buf_page_peek_if_too_old( @@ -179,6 +185,12 @@ buf_page_peek_if_too_old( } else if (buf_LRU_old_threshold_ms && bpage->old) { unsigned access_time = buf_page_is_accessed(bpage); + /* It is possible that the below comparison returns an + unexpected result. 2^32 milliseconds pass in about 50 days, + so if the difference between ut_time_ms() and access_time + is e.g. 50 days + 15 ms, then the below will behave as if + it is 15 ms. This is known and fixing it would require to + increase buf_page_t::access_time from 32 to 64 bits. */ if (access_time > 0 && ((ib_uint32_t) (ut_time_ms() - access_time)) >= buf_LRU_old_threshold_ms) { @@ -195,14 +207,14 @@ buf_page_peek_if_too_old( /*********************************************************************//** Gets the state of a block. -@return state */ +@return state */ UNIV_INLINE enum buf_page_state buf_page_get_state( /*===============*/ const buf_page_t* bpage) /*!< in: pointer to the control block */ { - enum buf_page_state state = (enum buf_page_state) bpage->state; + enum buf_page_state state = bpage->state; #ifdef UNIV_DEBUG switch (state) { @@ -224,7 +236,7 @@ buf_page_get_state( } /*********************************************************************//** Gets the state of a block. -@return state */ +@return state */ UNIV_INLINE enum buf_page_state buf_block_get_state( @@ -341,7 +353,7 @@ buf_block_set_state( /*********************************************************************//** Determines if a block is mapped to a tablespace. -@return TRUE if mapped */ +@return TRUE if mapped */ UNIV_INLINE ibool buf_page_in_file( @@ -369,7 +381,7 @@ buf_page_in_file( #ifndef UNIV_HOTBACKUP /*********************************************************************//** Determines if a block should be on unzip_LRU list. -@return TRUE if block belongs to unzip_LRU */ +@return TRUE if block belongs to unzip_LRU */ UNIV_INLINE ibool buf_page_belongs_to_unzip_LRU( @@ -384,23 +396,22 @@ buf_page_belongs_to_unzip_LRU( /*********************************************************************//** Gets the mutex of a block. -@return pointer to mutex protecting bpage */ +@return pointer to mutex protecting bpage */ UNIV_INLINE -ib_mutex_t* +BPageMutex* buf_page_get_mutex( /*===============*/ const buf_page_t* bpage) /*!< in: pointer to control block */ { + buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + switch (buf_page_get_state(bpage)) { case BUF_BLOCK_POOL_WATCH: ut_error; return(NULL); case BUF_BLOCK_ZIP_PAGE: - case BUF_BLOCK_ZIP_DIRTY: { - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - + case BUF_BLOCK_ZIP_DIRTY: return(&buf_pool->zip_mutex); - } default: return(&((buf_block_t*) bpage)->mutex); } @@ -408,7 +419,7 @@ buf_page_get_mutex( /*********************************************************************//** Get the flush type of a page. -@return flush type */ +@return flush type */ UNIV_INLINE buf_flush_t buf_page_get_flush_type( @@ -443,24 +454,22 @@ buf_page_set_flush_type( ut_ad(buf_page_get_flush_type(bpage) == flush_type); } -/*********************************************************************//** -Map a block to a file page. */ +/** Map a block to a file page. +@param[in,out] block pointer to control block +@param[in] page_id page id */ UNIV_INLINE void buf_block_set_file_page( -/*====================*/ - buf_block_t* block, /*!< in/out: pointer to control block */ - ulint space, /*!< in: tablespace id */ - ulint page_no)/*!< in: page number */ + buf_block_t* block, + const page_id_t& page_id) { buf_block_set_state(block, BUF_BLOCK_FILE_PAGE); - block->page.space = static_cast(space); - block->page.offset = static_cast(page_no); + block->page.id.copy_from(page_id); } /*********************************************************************//** Gets the io_fix state of a block. -@return io_fix state */ +@return io_fix state */ UNIV_INLINE enum buf_io_fix buf_page_get_io_fix( @@ -469,7 +478,8 @@ buf_page_get_io_fix( { ut_ad(bpage != NULL); - enum buf_io_fix io_fix = (enum buf_io_fix) bpage->io_fix; + enum buf_io_fix io_fix = bpage->io_fix; + #ifdef UNIV_DEBUG switch (io_fix) { case BUF_IO_NONE: @@ -485,7 +495,7 @@ buf_page_get_io_fix( /*********************************************************************//** Gets the io_fix state of a block. -@return io_fix state */ +@return io_fix state */ UNIV_INLINE enum buf_io_fix buf_block_get_io_fix( @@ -507,7 +517,7 @@ buf_page_set_io_fix( #ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); -#endif +#endif /* UNIV_DEBUG */ ut_ad(mutex_own(buf_page_get_mutex(bpage))); bpage->io_fix = io_fix; @@ -544,7 +554,7 @@ buf_page_set_sticky( #ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); -#endif +#endif /* UNIV_DEBUG */ ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE); @@ -562,7 +572,7 @@ buf_page_unset_sticky( #ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); -#endif +#endif /* UNIV_DEBUG */ ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN); @@ -581,7 +591,7 @@ buf_page_can_relocate( #ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); -#endif +#endif /* UNIV_DEBUG */ ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list); @@ -592,7 +602,7 @@ buf_page_can_relocate( /*********************************************************************//** Determine if a block has been flagged old. -@return TRUE if old */ +@return TRUE if old */ UNIV_INLINE ibool buf_page_is_old( @@ -602,7 +612,7 @@ buf_page_is_old( #ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); ut_ad(buf_pool_mutex_own(buf_pool)); -#endif +#endif /* UNIV_DEBUG */ ut_ad(buf_page_in_file(bpage)); return(bpage->old); @@ -646,7 +656,7 @@ buf_page_set_old( /*********************************************************************//** Determine the time of first access of a block in the buffer pool. -@return ut_time_ms() at the time of first access, 0 if not accessed */ +@return ut_time_ms() at the time of first access, 0 if not accessed */ UNIV_INLINE unsigned buf_page_is_accessed( @@ -683,7 +693,7 @@ buf_page_set_accessed( /*********************************************************************//** Gets the buf_block_t handle of a buffered file block if an uncompressed page frame exists, or NULL. -@return control block, or NULL */ +@return control block, or NULL */ UNIV_INLINE buf_block_t* buf_page_get_block( @@ -705,7 +715,7 @@ buf_page_get_block( #ifdef UNIV_DEBUG /*********************************************************************//** Gets a pointer to the memory frame of a block. -@return pointer to the frame */ +@return pointer to the frame */ UNIV_INLINE buf_frame_t* buf_block_get_frame( @@ -742,50 +752,6 @@ ok: } #endif /* UNIV_DEBUG */ -/*********************************************************************//** -Gets the space id of a block. -@return space id */ -UNIV_INLINE -ulint -buf_page_get_space( -/*===============*/ - const buf_page_t* bpage) /*!< in: pointer to the control block */ -{ - ut_ad(bpage); - ut_a(buf_page_in_file(bpage)); - - return(bpage->space); -} - -/*********************************************************************//** -Gets the space id of a block. -@return space id */ -UNIV_INLINE -ulint -buf_block_get_space( -/*================*/ - const buf_block_t* block) /*!< in: pointer to the control block */ -{ - ut_ad(block); - ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - - return(block->page.space); -} - -/*********************************************************************//** -Gets the page number of a block. -@return page number */ -UNIV_INLINE -ulint -buf_page_get_page_no( -/*=================*/ - const buf_page_t* bpage) /*!< in: pointer to the control block */ -{ - ut_ad(bpage); - ut_a(buf_page_in_file(bpage)); - - return(bpage->offset); -} /*********************************************************************** FIXME_FTS Gets the frame the pointer is pointing to. */ UNIV_INLINE @@ -804,53 +770,12 @@ buf_frame_align( return(frame); } -/*********************************************************************//** -Gets the page number of a block. -@return page number */ -UNIV_INLINE -ulint -buf_block_get_page_no( -/*==================*/ - const buf_block_t* block) /*!< in: pointer to the control block */ -{ - ut_ad(block); - ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - - return(block->page.offset); -} - -/*********************************************************************//** -Gets the compressed page size of a block. -@return compressed page size, or 0 */ -UNIV_INLINE -ulint -buf_page_get_zip_size( -/*==================*/ - const buf_page_t* bpage) /*!< in: pointer to the control block */ -{ - return(bpage->zip.ssize - ? (UNIV_ZIP_SIZE_MIN >> 1) << bpage->zip.ssize : 0); -} - -/*********************************************************************//** -Gets the compressed page size of a block. -@return compressed page size, or 0 */ -UNIV_INLINE -ulint -buf_block_get_zip_size( -/*===================*/ - const buf_block_t* block) /*!< in: pointer to the control block */ -{ - return(block->page.zip.ssize - ? (UNIV_ZIP_SIZE_MIN >> 1) << block->page.zip.ssize : 0); -} - #ifndef UNIV_HOTBACKUP #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG /*********************************************************************//** Gets the compressed page descriptor corresponding to an uncompressed page if applicable. -@return compressed page descriptor, or NULL */ +@return compressed page descriptor, or NULL */ UNIV_INLINE const page_zip_des_t* buf_frame_get_page_zip( @@ -885,7 +810,7 @@ buf_ptr_get_fsp_addr( /**********************************************************************//** Gets the hash value of the page the pointer is pointing to. This can be used in searches in the lock hash table. -@return lock hash value */ +@return lock hash value */ UNIV_INLINE ulint buf_block_get_lock_hash_val( @@ -894,10 +819,9 @@ buf_block_get_lock_hash_val( { ut_ad(block); ut_ad(buf_page_in_file(&block->page)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_EXCLUSIVE) - || rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_X) + || rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_S)); + return(block->lock_hash_val); } @@ -912,8 +836,8 @@ buf_page_alloc_descriptor(void) { buf_page_t* bpage; - bpage = (buf_page_t*) ut_malloc(sizeof *bpage); - ut_d(memset(bpage, 0, sizeof *bpage)); + bpage = (buf_page_t*) ut_zalloc_nokey(sizeof *bpage); + ut_ad(bpage); UNIV_MEM_ALLOC(bpage, sizeof *bpage); return(bpage); @@ -942,13 +866,13 @@ buf_block_free( buf_pool_mutex_enter(buf_pool); - mutex_enter(&block->mutex); + buf_page_mutex_enter(block); ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE); buf_LRU_block_free_non_file_page(block); - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); buf_pool_mutex_exit(buf_pool); } @@ -956,7 +880,7 @@ buf_block_free( /*********************************************************************//** Copies contents of a buffer frame to a given buffer. -@return buf */ +@return buf */ UNIV_INLINE byte* buf_frame_copy( @@ -972,24 +896,10 @@ buf_frame_copy( } #ifndef UNIV_HOTBACKUP -/********************************************************************//** -Calculates a folded value of a file page address to use in the page hash -table. -@return the folded value */ -UNIV_INLINE -ulint -buf_page_address_fold( -/*==================*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: offset of the page within space */ -{ - return((space << 20) + space + offset); -} - /********************************************************************//** Gets the youngest modification log sequence number for a frame. Returns zero if not file page or no modification occurred yet. -@return newest modification to page */ +@return newest modification to page */ UNIV_INLINE lsn_t buf_page_get_newest_modification( @@ -998,7 +908,7 @@ buf_page_get_newest_modification( page frame */ { lsn_t lsn; - ib_mutex_t* block_mutex = buf_page_get_mutex(bpage); + BPageMutex* block_mutex = buf_page_get_mutex(bpage); mutex_enter(block_mutex); @@ -1023,13 +933,17 @@ buf_block_modify_clock_inc( /*=======================*/ buf_block_t* block) /*!< in: block */ { -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG buf_pool_t* buf_pool = buf_pool_from_bpage((buf_page_t*) block); - ut_ad((buf_pool_mutex_own(buf_pool) - && (block->page.buf_fix_count == 0)) - || rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE)); -#endif /* UNIV_SYNC_DEBUG */ + /* No latch is acquired if block belongs to intrinsic table. */ + if (!fsp_is_system_temporary(block->page.id.space())) { + ut_ad((buf_pool_mutex_own(buf_pool) + && (block->page.buf_fix_count == 0)) + || rw_lock_own_flagged(&block->lock, + RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX)); + } +#endif /* UNIV_DEBUG */ block->modify_clock++; } @@ -1037,38 +951,45 @@ buf_block_modify_clock_inc( /********************************************************************//** Returns the value of the modify clock. The caller must have an s-lock or x-lock on the block. -@return value */ +@return value */ UNIV_INLINE ib_uint64_t buf_block_get_modify_clock( /*=======================*/ buf_block_t* block) /*!< in: block */ { -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) - || rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE)); -#endif /* UNIV_SYNC_DEBUG */ +#ifdef UNIV_DEBUG + /* No latch is acquired if block belongs to intrinsic table. */ + if (!fsp_is_system_temporary(block->page.id.space())) { + ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S) + || rw_lock_own(&(block->lock), RW_LOCK_X) + || rw_lock_own(&(block->lock), RW_LOCK_SX)); + } +#endif /* UNIV_DEBUG */ return(block->modify_clock); } -/*******************************************************************//** -Increments the bufferfix count. */ +/** Increments the bufferfix count. +@param[in,out] bpage block to bufferfix +@return the count */ UNIV_INLINE -void +ulint buf_block_fix( -/*===========*/ - buf_block_t* block) /*!< in/out: block to bufferfix */ + buf_page_t* bpage) { -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_increment_uint32(&block->page.buf_fix_count, 1); -#else - ib_mutex_t* block_mutex = buf_page_get_mutex(&block->page); + return(os_atomic_increment_uint32(&bpage->buf_fix_count, 1)); +} - mutex_enter(block_mutex); - ++block->page.buf_fix_count; - mutex_exit(block_mutex); -#endif /* PAGE_ATOMIC_REF_COUNT */ +/** Increments the bufferfix count. +@param[in,out] block block to bufferfix +@return the count */ +UNIV_INLINE +ulint +buf_block_fix( + buf_block_t* block) +{ + return(buf_block_fix(&block->page)); } /*******************************************************************//** @@ -1077,47 +998,48 @@ UNIV_INLINE void buf_block_buf_fix_inc_func( /*=======================*/ -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG const char* file, /*!< in: file name */ ulint line, /*!< in: line */ -#endif /* UNIV_SYNC_DEBUG */ +#endif /* UNIV_DEBUG */ buf_block_t* block) /*!< in/out: block to bufferfix */ { -#ifdef UNIV_SYNC_DEBUG - ibool ret; - - ret = rw_lock_s_lock_nowait(&(block->debug_latch), file, line); - ut_a(ret); -#endif /* UNIV_SYNC_DEBUG */ - -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_increment_uint32(&block->page.buf_fix_count, 1); -#else - ut_ad(mutex_own(&block->mutex)); +#ifdef UNIV_DEBUG + /* No debug latch is acquired if block belongs to system temporary. + Debug latch is not of much help if access to block is single + threaded. */ + if (!fsp_is_system_temporary(block->page.id.space())) { + ibool ret; + ret = rw_lock_s_lock_nowait(&block->debug_latch, file, line); + ut_a(ret); + } +#endif /* UNIV_DEBUG */ - ++block->page.buf_fix_count; -#endif /* PAGE_ATOMIC_REF_COUNT */ + buf_block_fix(block); } -/*******************************************************************//** -Decrements the bufferfix count. */ +/** Decrements the bufferfix count. +@param[in,out] bpage block to bufferunfix +@return the remaining buffer-fix count */ UNIV_INLINE -void +ulint buf_block_unfix( -/*============*/ - buf_block_t* block) /*!< in/out: block to bufferunfix */ + buf_page_t* bpage) { - ut_ad(block->page.buf_fix_count > 0); - -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_decrement_uint32(&block->page.buf_fix_count, 1); -#else - ib_mutex_t* block_mutex = buf_page_get_mutex(&block->page); + ulint count = os_atomic_decrement_uint32(&bpage->buf_fix_count, 1); + ut_ad(count + 1 != 0); + return(count); +} - mutex_enter(block_mutex); - --block->page.buf_fix_count; - mutex_exit(block_mutex); -#endif /* PAGE_ATOMIC_REF_COUNT */ +/** Decrements the bufferfix count. +@param[in,out] block block to bufferunfix +@return the remaining buffer-fix count */ +UNIV_INLINE +ulint +buf_block_unfix( + buf_block_t* block) +{ + return(buf_block_unfix(&block->page)); } /*******************************************************************//** @@ -1128,39 +1050,34 @@ buf_block_buf_fix_dec( /*==================*/ buf_block_t* block) /*!< in/out: block to bufferunfix */ { - ut_ad(block->page.buf_fix_count > 0); - -#ifdef PAGE_ATOMIC_REF_COUNT - os_atomic_decrement_uint32(&block->page.buf_fix_count, 1); -#else - mutex_enter(&block->mutex); - --block->page.buf_fix_count; - mutex_exit(&block->mutex); -#endif /* PAGE_ATOMIC_REF_COUNT */ + buf_block_unfix(block); -#ifdef UNIV_SYNC_DEBUG - rw_lock_s_unlock(&block->debug_latch); -#endif +#ifdef UNIV_DEBUG + /* No debug latch is acquired if block belongs to system temporary. + Debug latch is not of much help if access to block is single + threaded. */ + if (!fsp_is_system_temporary(block->page.id.space())) { + rw_lock_s_unlock(&block->debug_latch); + } +#endif /* UNIV_DEBUG */ } -/******************************************************************//** -Returns the buffer pool instance given space and offset of page +/** Returns the buffer pool instance given a page id. +@param[in] page_id page id @return buffer pool */ UNIV_INLINE buf_pool_t* buf_pool_get( -/*==========*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: offset of the page within space */ + const page_id_t& page_id) { - ulint fold; - ulint index; - ulint ignored_offset; + /* 2log of BUF_READ_AHEAD_AREA (64) */ + ulint ignored_page_no = page_id.page_no() >> 6; - ignored_offset = offset >> 6; /* 2log of BUF_READ_AHEAD_AREA (64)*/ - fold = buf_page_address_fold(space, ignored_offset); - index = fold % srv_buf_pool_instances; - return(&buf_pool_ptr[index]); + page_id_t id(page_id.space(), ignored_page_no); + + ulint i = id.fold() % srv_buf_pool_instances; + + return(&buf_pool_ptr[i]); } /******************************************************************//** @@ -1178,103 +1095,98 @@ buf_pool_from_array( return(&buf_pool_ptr[index]); } -/******************************************************************//** -Returns the control block of a file page, NULL if not found. -@return block, NULL if not found */ +/** Returns the control block of a file page, NULL if not found. +@param[in] buf_pool buffer pool instance +@param[in] page_id page id +@return block, NULL if not found */ UNIV_INLINE buf_page_t* buf_page_hash_get_low( -/*==================*/ - buf_pool_t* buf_pool,/*!< buffer pool instance */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: offset of the page within space */ - ulint fold) /*!< in: buf_page_address_fold(space, offset) */ + buf_pool_t* buf_pool, + const page_id_t& page_id) { buf_page_t* bpage; -#ifdef UNIV_SYNC_DEBUG - ulint hash_fold; +#ifdef UNIV_DEBUG rw_lock_t* hash_lock; - hash_fold = buf_page_address_fold(space, offset); - ut_ad(hash_fold == fold); - - hash_lock = hash_get_lock(buf_pool->page_hash, fold); - ut_ad(rw_lock_own(hash_lock, RW_LOCK_EX) - || rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold()); + ut_ad(rw_lock_own(hash_lock, RW_LOCK_X) + || rw_lock_own(hash_lock, RW_LOCK_S)); +#endif /* UNIV_DEBUG */ /* Look for the page in the hash table */ - HASH_SEARCH(hash, buf_pool->page_hash, fold, buf_page_t*, bpage, + HASH_SEARCH(hash, buf_pool->page_hash, page_id.fold(), buf_page_t*, + bpage, ut_ad(bpage->in_page_hash && !bpage->in_zip_hash && buf_page_in_file(bpage)), - bpage->space == space && bpage->offset == offset); + page_id.equals_to(bpage->id)); if (bpage) { ut_a(buf_page_in_file(bpage)); ut_ad(bpage->in_page_hash); ut_ad(!bpage->in_zip_hash); + ut_ad(buf_pool_from_bpage(bpage) == buf_pool); } return(bpage); } -/******************************************************************//** -Returns the control block of a file page, NULL if not found. +/** Returns the control block of a file page, NULL if not found. If the block is found and lock is not NULL then the appropriate page_hash lock is acquired in the specified lock mode. Otherwise, mode value is ignored. It is up to the caller to release the lock. If the block is found and the lock is NULL then the page_hash lock is released by this function. -@return block, NULL if not found, or watch sentinel (if watch is true) */ +@param[in] buf_pool buffer pool instance +@param[in] page_id page id +@param[in,out] lock lock of the page hash acquired if bpage is +found, NULL otherwise. If NULL is passed then the hash_lock is released by +this function. +@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if +lock == NULL +@param[in] watch if true, return watch sentinel also. +@return pointer to the bpage or NULL; if NULL, lock is also NULL or +a watch sentinel. */ UNIV_INLINE buf_page_t* buf_page_hash_get_locked( -/*=====================*/ - /*!< out: pointer to the bpage, - or NULL; if NULL, hash_lock - is also NULL. */ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page number */ - rw_lock_t** lock, /*!< in/out: lock of the page - hash acquired if bpage is - found. NULL otherwise. If NULL - is passed then the hash_lock - is released by this function */ - ulint lock_mode, /*!< in: RW_LOCK_EX or - RW_LOCK_SHARED. Ignored if - lock == NULL */ - bool watch) /*!< in: if true, return watch - sentinel also. */ + buf_pool_t* buf_pool, + const page_id_t& page_id, + rw_lock_t** lock, + ulint lock_mode, + bool watch) { buf_page_t* bpage = NULL; - ulint fold; rw_lock_t* hash_lock; - ulint mode = RW_LOCK_SHARED; + ulint mode = RW_LOCK_S; if (lock != NULL) { *lock = NULL; - ut_ad(lock_mode == RW_LOCK_EX - || lock_mode == RW_LOCK_SHARED); + ut_ad(lock_mode == RW_LOCK_X + || lock_mode == RW_LOCK_S); mode = lock_mode; } - fold = buf_page_address_fold(space, offset); - hash_lock = hash_get_lock(buf_pool->page_hash, fold); + hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold()); -#ifdef UNIV_SYNC_DEBUG - ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX) - && !rw_lock_own(hash_lock, RW_LOCK_SHARED)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X) + && !rw_lock_own(hash_lock, RW_LOCK_S)); - if (mode == RW_LOCK_SHARED) { + if (mode == RW_LOCK_S) { rw_lock_s_lock(hash_lock); + + /* If not own buf_pool_mutex, page_hash can be changed. */ + hash_lock = hash_lock_s_confirm( + hash_lock, buf_pool->page_hash, page_id.fold()); } else { rw_lock_x_lock(hash_lock); + /* If not own buf_pool_mutex, page_hash can be changed. */ + hash_lock = hash_lock_x_confirm( + hash_lock, buf_pool->page_hash, page_id.fold()); } - bpage = buf_page_hash_get_low(buf_pool, space, offset, fold); + bpage = buf_page_hash_get_low(buf_pool, page_id); if (!bpage || buf_pool_watch_is_sentinel(buf_pool, bpage)) { if (!watch) { @@ -1284,8 +1196,7 @@ buf_page_hash_get_locked( } ut_ad(buf_page_in_file(bpage)); - ut_ad(offset == bpage->offset); - ut_ad(space == bpage->space); + ut_ad(page_id.equals_to(bpage->id)); if (lock == NULL) { /* The caller wants us to release the page_hash lock */ @@ -1297,7 +1208,7 @@ buf_page_hash_get_locked( } unlock_and_exit: - if (mode == RW_LOCK_SHARED) { + if (mode == RW_LOCK_S) { rw_lock_s_unlock(hash_lock); } else { rw_lock_x_unlock(hash_lock); @@ -1306,52 +1217,46 @@ exit: return(bpage); } -/******************************************************************//** -Returns the control block of a file page, NULL if not found. +/** Returns the control block of a file page, NULL if not found. If the block is found and lock is not NULL then the appropriate page_hash lock is acquired in the specified lock mode. Otherwise, mode value is ignored. It is up to the caller to release the lock. If the block is found and the lock is NULL then the page_hash lock is released by this function. -@return block, NULL if not found */ +@param[in] buf_pool buffer pool instance +@param[in] page_id page id +@param[in,out] lock lock of the page hash acquired if bpage is +found, NULL otherwise. If NULL is passed then the hash_lock is released by +this function. +@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if +lock == NULL +@return pointer to the block or NULL; if NULL, lock is also NULL. */ UNIV_INLINE buf_block_t* buf_block_hash_get_locked( -/*=====================*/ - /*!< out: pointer to the bpage, - or NULL; if NULL, hash_lock - is also NULL. */ - buf_pool_t* buf_pool, /*!< buffer pool instance */ - ulint space, /*!< in: space id */ - ulint offset, /*!< in: page number */ - rw_lock_t** lock, /*!< in/out: lock of the page - hash acquired if bpage is - found. NULL otherwise. If NULL - is passed then the hash_lock - is released by this function */ - ulint lock_mode) /*!< in: RW_LOCK_EX or - RW_LOCK_SHARED. Ignored if - lock == NULL */ + buf_pool_t* buf_pool, + const page_id_t& page_id, + rw_lock_t** lock, + ulint lock_mode) { buf_page_t* bpage = buf_page_hash_get_locked(buf_pool, - space, - offset, + page_id, lock, lock_mode); buf_block_t* block = buf_page_get_block(bpage); - if (block) { + if (block != NULL) { + ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); -#ifdef UNIV_SYNC_DEBUG ut_ad(!lock || rw_lock_own(*lock, lock_mode)); -#endif /* UNIV_SYNC_DEBUG */ + return(block); } else if (bpage) { /* It is not a block. Just a bpage */ ut_ad(buf_page_in_file(bpage)); if (lock) { - if (lock_mode == RW_LOCK_SHARED) { + if (lock_mode == RW_LOCK_S) { rw_lock_s_unlock(*lock); } else { rw_lock_x_unlock(*lock); @@ -1366,23 +1271,19 @@ buf_block_hash_get_locked( return(NULL); } -/********************************************************************//** -Returns TRUE if the page can be found in the buffer pool hash table. - +/** Returns TRUE if the page can be found in the buffer pool hash table. NOTE that it is possible that the page is not yet read from disk, though. - -@return TRUE if found in the page hash table */ +@param[in] page_id page id +@return TRUE if found in the page hash table */ UNIV_INLINE ibool buf_page_peek( -/*==========*/ - ulint space, /*!< in: space id */ - ulint offset) /*!< in: page number */ + const page_id_t& page_id) { - buf_pool_t* buf_pool = buf_pool_get(space, offset); + buf_pool_t* buf_pool = buf_pool_get(page_id); - return(buf_page_hash_get(buf_pool, space, offset) != NULL); + return(buf_page_hash_get(buf_pool, page_id) != NULL); } /********************************************************************//** @@ -1393,19 +1294,27 @@ buf_page_release_zip( /*=================*/ buf_page_t* bpage) /*!< in: buffer block */ { - buf_block_t* block; - - block = (buf_block_t*) bpage; + ut_ad(bpage); + ut_a(bpage->buf_fix_count > 0); switch (buf_page_get_state(bpage)) { case BUF_BLOCK_FILE_PAGE: -#ifdef UNIV_SYNC_DEBUG - rw_lock_s_unlock(&block->debug_latch); -#endif /* UNUV_SYNC_DEBUG */ +#ifdef UNIV_DEBUG + { + /* No debug latch is acquired if block belongs to system + temporary. Debug latch is not of much help if access to block + is single threaded. */ + buf_block_t* block = reinterpret_cast(bpage); + if (!fsp_is_system_temporary(block->page.id.space())) { + rw_lock_s_unlock(&block->debug_latch); + } + } /* Fall through */ +#endif /* UNIV_DEBUG */ + case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_DIRTY: - buf_block_unfix(block); + buf_block_unfix(reinterpret_cast(bpage)); return; case BUF_BLOCK_POOL_WATCH: @@ -1420,31 +1329,34 @@ buf_page_release_zip( } /********************************************************************//** -Decrements the bufferfix count of a buffer control block and releases -a latch, if specified. */ +Releases a latch, if specified. */ UNIV_INLINE void -buf_page_release( -/*=============*/ +buf_page_release_latch( +/*===================*/ buf_block_t* block, /*!< in: buffer block */ ulint rw_latch) /*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */ { - ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); +#ifdef UNIV_DEBUG + /* No debug latch is acquired if block belongs to system + temporary. Debug latch is not of much help if access to block + is single threaded. */ + if (!fsp_is_system_temporary(block->page.id.space())) { + rw_lock_s_unlock(&block->debug_latch); + } +#endif /* UNIV_DEBUG */ -#ifdef UNIV_SYNC_DEBUG - rw_lock_s_unlock(&(block->debug_latch)); -#endif if (rw_latch == RW_S_LATCH) { - rw_lock_s_unlock(&(block->lock)); + rw_lock_s_unlock(&block->lock); + } else if (rw_latch == RW_SX_LATCH) { + rw_lock_sx_unlock(&block->lock); } else if (rw_latch == RW_X_LATCH) { - rw_lock_x_unlock(&(block->lock)); + rw_lock_x_unlock(&block->lock); } - - buf_block_unfix(block); } -#ifdef UNIV_SYNC_DEBUG +#ifdef UNIV_DEBUG /*********************************************************************//** Adds latch level info for the rw-lock protecting the buffer frame. This should be called in the debug version after a successful latching of a @@ -1455,12 +1367,12 @@ buf_block_dbg_add_level( /*====================*/ buf_block_t* block, /*!< in: buffer page where we have acquired latch */ - ulint level) /*!< in: latching order level */ + latch_level_t level) /*!< in: latching order level */ { - sync_thread_add_level(&block->lock, level, FALSE); + sync_check_lock(&block->lock, level); } -#endif /* UNIV_SYNC_DEBUG */ +#endif /* UNIV_DEBUG */ /********************************************************************//** Acquire mutex on all buffer pool instances. */ UNIV_INLINE @@ -1468,12 +1380,9 @@ void buf_pool_mutex_enter_all(void) /*==========================*/ { - ulint i; + for (ulint i = 0; i < srv_buf_pool_instances; ++i) { + buf_pool_t* buf_pool = buf_pool_from_array(i); - for (i = 0; i < srv_buf_pool_instances; i++) { - buf_pool_t* buf_pool; - - buf_pool = buf_pool_from_array(i); buf_pool_mutex_enter(buf_pool); } } @@ -1512,6 +1421,37 @@ buf_get_nth_chunk_block( return(chunk->blocks); } +/** Verify the possibility that a stored page is not in buffer pool. +@param[in] withdraw_clock withdraw clock when stored the page +@retval true if the page might be relocated */ +UNIV_INLINE +bool +buf_pool_is_obsolete( + ulint withdraw_clock) +{ + return(buf_pool_withdrawing + || buf_withdraw_clock != withdraw_clock); +} + +/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit, +if needed. +@param[in] size size in bytes +@return aligned size */ +UNIV_INLINE +ulint +buf_pool_size_align( + ulint size) +{ + const ulint m = srv_buf_pool_instances * srv_buf_pool_chunk_unit; + size = ut_max(size, srv_buf_pool_min_size); + + if (size % m == 0) { + return(size); + } else { + return((size / m + 1) * m); + } +} + /********************************************************************//** Get buf frame. */ UNIV_INLINE @@ -1530,5 +1470,4 @@ buf_page_get_frame( return ((buf_block_t*) bpage)->frame; } } - #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/buf0checksum.h b/storage/innobase/include/buf0checksum.h index 6818345f965..684c378e066 100644 --- a/storage/innobase/include/buf0checksum.h +++ b/storage/innobase/include/buf0checksum.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -33,23 +33,25 @@ Created Aug 11, 2011 Vasil Dimov /** Magic value to use instead of checksums when they are disabled */ #define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL -/********************************************************************//** -Calculates a page CRC32 which is stored to the page when it is written -to a file. Note that we must be careful to calculate the same value on -32-bit and 64-bit architectures. -@return checksum */ -UNIV_INTERN -ib_uint32_t +/** Calculates the CRC32 checksum of a page. The value is stored to the page +when it is written to a file and also checked for a match when reading from +the file. When reading we allow both normal CRC32 and CRC-legacy-big-endian +variants. Note that we must be careful to calculate the same value on 32-bit +and 64-bit architectures. +@param[in] page buffer page (UNIV_PAGE_SIZE bytes) +@param[in] use_legacy_big_endian if true then use big endian +byteorder when converting byte strings to integers +@return checksum */ +uint32_t buf_calc_page_crc32( -/*================*/ - const byte* page); /*!< in: buffer page */ + const byte* page, + bool use_legacy_big_endian = false); /********************************************************************//** Calculates a page checksum which is stored to the page when it is written to a file. Note that we must be careful to calculate the same value on 32-bit and 64-bit architectures. -@return checksum */ -UNIV_INTERN +@return checksum */ ulint buf_calc_page_new_checksum( /*=======================*/ @@ -62,8 +64,7 @@ checksum. NOTE: we must first store the new formula checksum to FIL_PAGE_SPACE_OR_CHKSUM before calculating and storing this old checksum because this takes that field as an input! -@return checksum */ -UNIV_INTERN +@return checksum */ ulint buf_calc_page_old_checksum( /*=======================*/ @@ -71,8 +72,7 @@ buf_calc_page_old_checksum( /********************************************************************//** Return a printable string describing the checksum algorithm. -@return algorithm name */ -UNIV_INTERN +@return algorithm name */ const char* buf_checksum_algorithm_name( /*========================*/ diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h index a62a6400d97..1f8f23edd4e 100644 --- a/storage/innobase/include/buf0dblwr.h +++ b/storage/innobase/include/buf0dblwr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,6 +29,7 @@ Created 2011/12/19 Inaam Rana #include "univ.i" #include "ut0byte.h" #include "log0log.h" +#include "buf0types.h" #include "log0recv.h" #ifndef UNIV_HOTBACKUP @@ -40,9 +41,10 @@ extern ibool buf_dblwr_being_created; /****************************************************************//** Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ -UNIV_INTERN -void +doublewrite buffer is placed on the trx system header page. +@return true if successful, false if not. */ +__attribute__((warn_unused_result)) +bool buf_dblwr_create(void); /*==================*/ @@ -51,29 +53,24 @@ At a database startup initializes the doublewrite buffer memory structure if we already have a doublewrite buffer created in the data files. If we are upgrading to an InnoDB version which supports multiple tablespaces, then this function performs the necessary update operations. If we are in a crash -recovery, this function loads the pages from double write buffer into memory. */ -void +recovery, this function loads the pages from double write buffer into memory. +@return DB_SUCCESS or error code */ +dberr_t buf_dblwr_init_or_load_pages( -/*=========================*/ os_file_t file, - char* path, - bool load_corrupt_pages); + const char* path); -/****************************************************************//** -Process the double write buffer pages. */ +/** Process and remove the double write buffer pages for all tablespaces. */ void buf_dblwr_process(void); -/*===================*/ /****************************************************************//** frees doublewrite buffer. */ -UNIV_INTERN void buf_dblwr_free(void); /*================*/ /********************************************************************//** Updates the doublewrite buffer when an IO request is completed. */ -UNIV_INTERN void buf_dblwr_update( /*=============*/ @@ -83,7 +80,6 @@ buf_dblwr_update( Determines if a page number is located inside the doublewrite buffer. @return TRUE if the location is inside the two blocks of the doublewrite buffer */ -UNIV_INTERN ibool buf_dblwr_page_inside( /*==================*/ @@ -92,18 +88,23 @@ buf_dblwr_page_inside( Posts a buffer page for writing. If the doublewrite memory buffer is full, calls buf_dblwr_flush_buffered_writes and waits for for free space to appear. */ -UNIV_INTERN void buf_dblwr_add_to_batch( /*====================*/ buf_page_t* bpage); /*!< in: buffer block to write */ + +/********************************************************************//** +Flush a batch of writes to the datafiles that have already been +written to the dblwr buffer on disk. */ +void +buf_dblwr_sync_datafiles(); + /********************************************************************//** Flushes possible buffered writes from the doublewrite memory buffer to disk, and also wakes up the aio thread if simulated aio is used. It is very important to call this function after a batch of writes has been posted, and also when we may have to wait for a page latch! Otherwise a deadlock of threads can occur. */ -UNIV_INTERN void buf_dblwr_flush_buffered_writes(void); /*=================================*/ @@ -115,7 +116,6 @@ flushes in the doublewrite buffer are in use we wait here for one to become free. We are guaranteed that a slot will become free because any thread that is using a slot must also release the slot before leaving this function. */ -UNIV_INTERN void buf_dblwr_write_single_page( /*========================*/ diff --git a/storage/innobase/include/buf0dump.h b/storage/innobase/include/buf0dump.h index c704a8e97e0..3dbddfa6bf5 100644 --- a/storage/innobase/include/buf0dump.h +++ b/storage/innobase/include/buf0dump.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -33,7 +33,6 @@ Wakes up the buffer pool dump/load thread and instructs it to start a dump. This function is called by MySQL code via buffer_pool_dump_now() and it should return immediately because the whole MySQL is frozen during its execution. */ -UNIV_INTERN void buf_dump_start(); /*============*/ @@ -43,7 +42,6 @@ Wakes up the buffer pool dump/load thread and instructs it to start a load. This function is called by MySQL code via buffer_pool_load_now() and it should return immediately because the whole MySQL is frozen during its execution. */ -UNIV_INTERN void buf_load_start(); /*============*/ @@ -52,7 +50,6 @@ buf_load_start(); Aborts a currently running buffer pool load. This function is called by MySQL code via buffer_pool_load_abort() and it should return immediately because the whole MySQL is frozen during its execution. */ -UNIV_INTERN void buf_load_abort(); /*============*/ @@ -62,7 +59,7 @@ This is the main thread for buffer pool dump/load. It waits for an event and when waked up either performs a dump or load and sleeps again. @return this function does not return, it calls os_thread_exit() */ -extern "C" UNIV_INTERN +extern "C" os_thread_ret_t DECLARE_THREAD(buf_dump_thread)( /*============================*/ diff --git a/storage/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h index f1ca1039ccb..1d38c679f81 100644 --- a/storage/innobase/include/buf0flu.h +++ b/storage/innobase/include/buf0flu.h @@ -31,11 +31,15 @@ Created 11/5/1995 Heikki Tuuri #include "ut0byte.h" #include "log0log.h" #ifndef UNIV_HOTBACKUP -#include "mtr0types.h" #include "buf0types.h" /** Flag indicating if the page_cleaner is in active state. */ -extern ibool buf_page_cleaner_is_active; +extern bool buf_page_cleaner_is_active; + +/** Event to synchronise with the flushing. */ +extern os_event_t buf_flush_event; + +class ut_stage_alter_t; /** Event to synchronise with the flushing. */ extern os_event_t buf_flush_event; @@ -50,7 +54,6 @@ struct flush_counters_t { /********************************************************************//** Remove a block from the flush list of modified blocks. */ -UNIV_INTERN void buf_flush_remove( /*=============*/ @@ -59,7 +62,6 @@ buf_flush_remove( Relocates a buffer control block on the flush_list. Note that it is assumed that the contents of bpage has already been copied to dpage. */ -UNIV_INTERN void buf_flush_relocate_on_flush_list( /*=============================*/ @@ -67,22 +69,25 @@ buf_flush_relocate_on_flush_list( buf_page_t* dpage); /*!< in/out: destination block */ /********************************************************************//** Updates the flush system data structures when a write is completed. */ -UNIV_INTERN void buf_flush_write_complete( /*=====================*/ buf_page_t* bpage); /*!< in: pointer to the block in question */ #endif /* !UNIV_HOTBACKUP */ -/********************************************************************//** -Initializes a page for writing to the tablespace. */ -UNIV_INTERN +/** Initialize a page for writing to the tablespace. +@param[in] block buffer block; NULL if bypassing the buffer pool +@param[in,out] page page frame +@param[in,out] page_zip_ compressed page, or NULL if uncompressed +@param[in] newest_lsn newest modification LSN to the page +@param[in] skip_checksum whether to disable the page checksum */ void buf_flush_init_for_writing( -/*=======================*/ - byte* page, /*!< in/out: page */ - void* page_zip_, /*!< in/out: compressed page, or NULL */ - lsn_t newest_lsn); /*!< in: newest modification lsn - to the page */ + const buf_block_t* block, + byte* page, + void* page_zip_, + lsn_t newest_lsn, + bool skip_checksum); + #ifndef UNIV_HOTBACKUP # if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG /********************************************************************//** @@ -91,7 +96,6 @@ NOTE: buf_pool->mutex and block->mutex must be held upon entering this function, and they will be released by this function after flushing. This is loosely based on buf_flush_batch() and buf_flush_page(). @return TRUE if the page was flushed and the mutexes released */ -UNIV_INTERN ibool buf_flush_page_try( /*===============*/ @@ -99,28 +103,46 @@ buf_flush_page_try( buf_block_t* block) /*!< in/out: buffer control block */ MY_ATTRIBUTE((nonnull, warn_unused_result)); # endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ -/*******************************************************************//** -This utility flushes dirty blocks from the end of the flush list of -all buffer pool instances. +/** Do flushing batch of a given type. NOTE: The calling thread is not allowed to own any latches on pages! +@param[in,out] buf_pool buffer pool instance +@param[in] type flush type +@param[in] min_n wished minimum mumber of blocks flushed +(it is not guaranteed that the actual number is that big, though) +@param[in] lsn_limit in the case BUF_FLUSH_LIST all blocks whose +oldest_modification is smaller than this should be flushed (if their number +does not exceed min_n), otherwise ignored +@param[out] n the number of pages which were processed is +passed back to caller. Ignored if NULL +@retval true if a batch was queued successfully. +@retval false if another batch of same type was already running. */ +bool +buf_flush_do_batch( + buf_pool_t* buf_pool, + buf_flush_t type, + ulint min_n, + lsn_t lsn_limit, + flush_counters_t* n); + +/** This utility flushes dirty blocks from the end of the flush list of all +buffer pool instances. +NOTE: The calling thread is not allowed to own any latches on pages! +@param[in] min_n wished minimum mumber of blocks flushed (it is +not guaranteed that the actual number is that big, though) +@param[in] lsn_limit in the case BUF_FLUSH_LIST all blocks whose +oldest_modification is smaller than this should be flushed (if their number +does not exceed min_n), otherwise ignored +@param[out] n_processed the number of pages which were processed is +passed back to caller. Ignored if NULL. @return true if a batch was queued successfully for each buffer pool instance. false if another batch of same type was already running in at least one of the buffer pool instance */ -UNIV_INTERN bool -buf_flush_list( -/*===========*/ - ulint min_n, /*!< in: wished minimum mumber of blocks - flushed (it is not guaranteed that the - actual number is that big, though) */ - lsn_t lsn_limit, /*!< in the case BUF_FLUSH_LIST all - blocks whose oldest_modification is - smaller than this should be flushed - (if their number does not exceed - min_n), otherwise ignored */ - ulint* n_processed); /*!< out: the number of pages - which were processed is passed - back to caller. Ignored if NULL */ +buf_flush_lists( + ulint min_n, + lsn_t lsn_limit, + ulint* n_processed); + /******************************************************************//** This function picks up a single page from the tail of the LRU list, flushes it (if it is dirty), removes it from page_hash and LRU @@ -128,26 +150,31 @@ list and puts it on the free list. It is called from user threads when they are unable to find a replaceable page at the tail of the LRU list i.e.: when the background LRU flushing in the page_cleaner thread is not fast enough to keep pace with the workload. -@return TRUE if success. */ -UNIV_INTERN -ibool +@return true if success. */ +bool buf_flush_single_page_from_LRU( /*===========================*/ buf_pool_t* buf_pool); /*!< in/out: buffer pool instance */ /******************************************************************//** Waits until a flush batch of the given type ends */ -UNIV_INTERN void buf_flush_wait_batch_end( /*=====================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ buf_flush_t type); /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */ +/** +Waits until a flush batch of the given lsn ends +@param[in] new_oldest target oldest_modified_lsn to wait for */ + +void +buf_flush_wait_flushed( + lsn_t new_oldest); + /******************************************************************//** Waits until a flush batch of the given type ends. This is called by a thread that only wants to wait for a flush to end but doesn't do any flushing itself. */ -UNIV_INTERN void buf_flush_wait_batch_end_wait_only( /*===============================*/ @@ -162,8 +189,13 @@ UNIV_INLINE void buf_flush_note_modification( /*========================*/ - buf_block_t* block, /*!< in: block which is modified */ - mtr_t* mtr); /*!< in: mtr */ + buf_block_t* block, /*!< in: block which is modified */ + lsn_t start_lsn, /*!< in: start lsn of the first mtr in a + set of mtr's */ + lsn_t end_lsn, /*!< in: end lsn of the last mtr in the + set of mtr's */ + FlushObserver* observer); /*!< in: flush observer */ + /********************************************************************//** This function should be called when recovery has modified a buffer page. */ UNIV_INLINE @@ -178,8 +210,7 @@ buf_flush_recv_note_modification( /********************************************************************//** Returns TRUE if the file page block is immediately suitable for replacement, i.e., transition FILE_PAGE => NOT_USED allowed. -@return TRUE if can replace immediately */ -UNIV_INTERN +@return TRUE if can replace immediately */ ibool buf_flush_ready_for_replace( /*========================*/ @@ -187,14 +218,28 @@ buf_flush_ready_for_replace( buf_page_in_file(bpage) and in the LRU list */ /******************************************************************//** page_cleaner thread tasked with flushing dirty pages from the buffer -pools. As of now we'll have only one instance of this thread. +pools. As of now we'll have only one coordinator of this thread. +@return a dummy parameter */ +extern "C" +os_thread_ret_t +DECLARE_THREAD(buf_flush_page_cleaner_coordinator)( +/*===============================================*/ + void* arg); /*!< in: a dummy parameter required by + os_thread_create */ +/******************************************************************//** +Worker thread of page_cleaner. @return a dummy parameter */ -extern "C" UNIV_INTERN +extern "C" os_thread_ret_t -DECLARE_THREAD(buf_flush_page_cleaner_thread)( +DECLARE_THREAD(buf_flush_page_cleaner_worker)( /*==========================================*/ void* arg); /*!< in: a dummy parameter required by os_thread_create */ +/******************************************************************//** +Initialize page_cleaner. */ +void +buf_flush_page_cleaner_init(void); +/*=============================*/ /*********************************************************************//** Clears up tail of the LRU lists: * Put replaceable pages at the tail of LRU to the free list @@ -202,13 +247,11 @@ Clears up tail of the LRU lists: The depth to which we scan each buffer pool is controlled by dynamic config parameter innodb_LRU_scan_depth. @return total pages flushed */ -UNIV_INTERN ulint -buf_flush_LRU_tail(void); -/*====================*/ +buf_flush_LRU_lists(void); +/*=====================*/ /*********************************************************************//** Wait for any possible LRU flushes that are in progress to end. */ -UNIV_INTERN void buf_flush_wait_LRU_batch_end(void); /*==============================*/ @@ -216,8 +259,7 @@ buf_flush_wait_LRU_batch_end(void); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /******************************************************************//** Validates the flush list. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool buf_flush_validate( /*===============*/ @@ -228,14 +270,12 @@ buf_flush_validate( Initialize the red-black tree to speed up insertions into the flush_list during recovery process. Should be called at the start of recovery process before any page has been read/written. */ -UNIV_INTERN void buf_flush_init_flush_rbt(void); /*==========================*/ /********************************************************************//** Frees up the red-black tree. */ -UNIV_INTERN void buf_flush_free_flush_rbt(void); /*==========================*/ @@ -246,10 +286,9 @@ NOTE: in simulated aio we must call os_aio_simulated_wake_handler_threads after we have posted a batch of writes! NOTE: buf_pool->mutex and buf_page_get_mutex(bpage) must be held upon entering this function, and they will be released by this -function if it returns true. -@return TRUE if the page was flushed */ -UNIV_INTERN -bool +function. +@return TRUE if page was flushed */ +ibool buf_flush_page( /*===========*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ @@ -258,8 +297,7 @@ buf_flush_page( bool sync); /*!< in: true if sync IO request */ /********************************************************************//** Returns true if the block is modified and ready for flushing. -@return true if can flush immediately */ -UNIV_INTERN +@return true if can flush immediately */ bool buf_flush_ready_for_flush( /*======================*/ @@ -268,26 +306,116 @@ buf_flush_ready_for_flush( buf_flush_t flush_type)/*!< in: type of flush */ MY_ATTRIBUTE((warn_unused_result)); -#ifdef UNIV_DEBUG /******************************************************************//** Check if there are any dirty pages that belong to a space id in the flush list in a particular buffer pool. -@return number of dirty pages present in a single buffer pool */ -UNIV_INTERN +@return number of dirty pages present in a single buffer pool */ ulint buf_pool_get_dirty_pages_count( /*===========================*/ buf_pool_t* buf_pool, /*!< in: buffer pool */ - ulint id); /*!< in: space id to check */ + ulint id, /*!< in: space id to check */ + FlushObserver* observer); /*!< in: flush observer to check */ /******************************************************************//** Check if there are any dirty pages that belong to a space id in the flush list. -@return count of dirty pages present in all the buffer pools */ -UNIV_INTERN +@return count of dirty pages present in all the buffer pools */ ulint buf_flush_get_dirty_pages_count( /*============================*/ - ulint id); /*!< in: space id to check */ -#endif /* UNIV_DEBUG */ + ulint id, /*!< in: space id to check */ + FlushObserver* observer); /*!< in: flush observer to check */ + +/*******************************************************************//** +Synchronously flush dirty blocks from the end of the flush list of all buffer +pool instances. +NOTE: The calling thread is not allowed to own any latches on pages! */ +void +buf_flush_sync_all_buf_pools(void); +/*==============================*/ + +/** Request IO burst and wake page_cleaner up. +@param[in] lsn_limit upper limit of LSN to be flushed */ +void +buf_flush_request_force( + lsn_t lsn_limit); + +/** We use FlushObserver to track flushing of non-redo logged pages in bulk +create index(BtrBulk.cc).Since we disable redo logging during a index build, +we need to make sure that all dirty pages modifed by the index build are +flushed to disk before any redo logged operations go to the index. */ + +class FlushObserver { +public: + /** Constructor + @param[in] space_id table space id + @param[in] trx trx instance + @param[in] stage performance schema accounting object, + used by ALTER TABLE. It is passed to log_preflush_pool_modified_pages() + for accounting. */ + FlushObserver(ulint space_id, trx_t* trx, ut_stage_alter_t* stage); + + /** Deconstructor */ + ~FlushObserver(); + + /** Check pages have been flushed and removed from the flush list + in a buffer pool instance. + @pram[in] instance_no buffer pool instance no + @return true if the pages were removed from the flush list */ + bool is_complete(ulint instance_no) + { + return(m_flushed->at(instance_no) == m_removed->at(instance_no) + || m_interrupted); + } + + /** Interrupt observer not to wait. */ + void interrupted() + { + m_interrupted = true; + } + + /** Check whether trx is interrupted + @return true if trx is interrupted */ + bool check_interrupted(); + + /** Flush dirty pages. */ + void flush(); + + /** Notify observer of flushing a page + @param[in] buf_pool buffer pool instance + @param[in] bpage buffer page to flush */ + void notify_flush( + buf_pool_t* buf_pool, + buf_page_t* bpage); + + /** Notify observer of removing a page from flush list + @param[in] buf_pool buffer pool instance + @param[in] bpage buffer page flushed */ + void notify_remove( + buf_pool_t* buf_pool, + buf_page_t* bpage); +private: + /** Table space id */ + ulint m_space_id; + + /** Trx instance */ + trx_t* m_trx; + + /** Performance schema accounting object, used by ALTER TABLE. + If not NULL, then stage->begin_phase_flush() will be called initially, + specifying the number of pages to be attempted to be flushed and + subsequently, stage->inc() will be called for each page we attempt to + flush. */ + ut_stage_alter_t* m_stage; + + /* Flush request sent */ + std::vector* m_flushed; + + /* Flush request finished */ + std::vector* m_removed; + + /* True if the operation was interrupted. */ + bool m_interrupted; +}; #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/buf0flu.ic b/storage/innobase/include/buf0flu.ic index a763cd115fe..ecb98e32619 100644 --- a/storage/innobase/include/buf0flu.ic +++ b/storage/innobase/include/buf0flu.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,21 +27,21 @@ Created 11/5/1995 Heikki Tuuri #include "buf0buf.h" #include "mtr0mtr.h" #include "srv0srv.h" +#include "fsp0types.h" /********************************************************************//** Inserts a modified block into the flush list. */ -UNIV_INTERN void buf_flush_insert_into_flush_list( /*=============================*/ buf_pool_t* buf_pool, /*!< buffer pool instance */ buf_block_t* block, /*!< in/out: block which is modified */ lsn_t lsn); /*!< in: oldest modification */ + /********************************************************************//** Inserts a modified block into the flush list in the right sorted position. This function is used by recovery, because there the modifications do not necessarily come in the order of lsn's. */ -UNIV_INTERN void buf_flush_insert_sorted_into_flush_list( /*====================================*/ @@ -57,40 +57,49 @@ UNIV_INLINE void buf_flush_note_modification( /*========================*/ - buf_block_t* block, /*!< in: block which is modified */ - mtr_t* mtr) /*!< in: mtr */ + buf_block_t* block, /*!< in: block which is modified */ + lsn_t start_lsn, /*!< in: start lsn of the mtr that + modified this block */ + lsn_t end_lsn, /*!< in: end lsn of the mtr that + modified this block */ + FlushObserver* observer) /*!< in: flush observer */ { - buf_pool_t* buf_pool = buf_pool_from_block(block); - - ut_ad(!srv_read_only_mode); - ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - ut_ad(block->page.buf_fix_count > 0); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ +#ifdef UNIV_DEBUG + { + /* Allow write to proceed to shared temporary tablespace + in read-only mode. */ + ut_ad(!srv_read_only_mode + || fsp_is_system_temporary(block->page.id.space())); + ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); + ut_ad(block->page.buf_fix_count > 0); + + buf_pool_t* buf_pool = buf_pool_from_block(block); + + ut_ad(!buf_pool_mutex_own(buf_pool)); + ut_ad(!buf_flush_list_mutex_own(buf_pool)); + } +#endif /* UNIV_DEBUG */ - ut_ad(!buf_pool_mutex_own(buf_pool)); - ut_ad(!buf_flush_list_mutex_own(buf_pool)); - ut_ad(!mtr->made_dirty || log_flush_order_mutex_own()); + mutex_enter(&block->mutex); - ut_ad(mtr->start_lsn != 0); - ut_ad(mtr->modifications); + ut_ad(block->page.newest_modification <= end_lsn); + block->page.newest_modification = end_lsn; - mutex_enter(&block->mutex); - ut_ad(block->page.newest_modification <= mtr->end_lsn); + /* Don't allow to set flush observer from non-null to null, + or from one observer to another. */ + ut_ad(block->page.flush_observer == NULL + || block->page.flush_observer == observer); + block->page.flush_observer = observer; - block->page.newest_modification = mtr->end_lsn; + if (block->page.oldest_modification == 0) { + buf_pool_t* buf_pool = buf_pool_from_block(block); - if (!block->page.oldest_modification) { - ut_a(mtr->made_dirty); - ut_ad(log_flush_order_mutex_own()); - buf_flush_insert_into_flush_list( - buf_pool, block, mtr->start_lsn); + buf_flush_insert_into_flush_list(buf_pool, block, start_lsn); } else { - ut_ad(block->page.oldest_modification <= mtr->start_lsn); + ut_ad(block->page.oldest_modification <= start_lsn); } - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); srv_stats.buf_pool_write_requests.inc(); } @@ -107,33 +116,36 @@ buf_flush_recv_note_modification( lsn_t end_lsn) /*!< in: end lsn of the last mtr in the set of mtr's */ { - buf_pool_t* buf_pool = buf_pool_from_block(block); +#ifdef UNIV_DEBUG + { + ut_ad(!srv_read_only_mode); + ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); + ut_ad(block->page.buf_fix_count > 0); - ut_ad(!srv_read_only_mode); - ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); - ut_ad(block->page.buf_fix_count > 0); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + buf_pool_t* buf_pool = buf_pool_from_block(block); - ut_ad(!buf_pool_mutex_own(buf_pool)); - ut_ad(!buf_flush_list_mutex_own(buf_pool)); - ut_ad(log_flush_order_mutex_own()); + ut_ad(!buf_pool_mutex_own(buf_pool)); + ut_ad(!buf_flush_list_mutex_own(buf_pool)); - ut_ad(start_lsn != 0); - ut_ad(block->page.newest_modification <= end_lsn); + ut_ad(start_lsn != 0); + ut_ad(block->page.newest_modification <= end_lsn); + } +#endif /* UNIV_DEBUG */ + + buf_page_mutex_enter(block); - mutex_enter(&block->mutex); block->page.newest_modification = end_lsn; if (!block->page.oldest_modification) { + buf_pool_t* buf_pool = buf_pool_from_block(block); + buf_flush_insert_sorted_into_flush_list( buf_pool, block, start_lsn); } else { ut_ad(block->page.oldest_modification <= start_lsn); } - mutex_exit(&block->mutex); + buf_page_mutex_exit(block); } #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h index a7a65df33aa..0cbd77878ec 100644 --- a/storage/innobase/include/buf0lru.h +++ b/storage/innobase/include/buf0lru.h @@ -38,8 +38,7 @@ struct trx_t; Returns TRUE if less than 25 % of the buffer pool is available. This can be used in heuristics to prevent huge transactions eating up the whole buffer pool for their locks. -@return TRUE if less than 25 % of buffer pool left */ -UNIV_INTERN +@return TRUE if less than 25 % of buffer pool left */ ibool buf_LRU_buf_pool_running_out(void); /*==============================*/ @@ -56,7 +55,6 @@ Flushes all dirty pages or removes all pages belonging to a given tablespace. A PROBLEM: if readahead is being started, what guarantees that it will not try to read in pages after this operation has completed? */ -UNIV_INTERN void buf_LRU_flush_or_remove_pages( /*==========================*/ @@ -68,7 +66,6 @@ buf_LRU_flush_or_remove_pages( #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /********************************************************************//** Insert a compressed block into buf_pool->zip_clean in the LRU order. */ -UNIV_INTERN void buf_LRU_insert_zip_clean( /*=====================*/ @@ -86,7 +83,6 @@ accessible via bpage. The caller must hold buf_pool->mutex and must not hold any buf_page_get_mutex() when calling this function. @return true if freed, false otherwise. */ -UNIV_INTERN bool buf_LRU_free_page( /*==============*/ @@ -96,21 +92,19 @@ buf_LRU_free_page( MY_ATTRIBUTE((nonnull)); /******************************************************************//** Try to free a replaceable block. -@return TRUE if found and freed */ -UNIV_INTERN -ibool +@return true if found and freed */ +bool buf_LRU_scan_and_free_block( /*========================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ - ibool scan_all) /*!< in: scan whole LRU list - if TRUE, otherwise scan only + bool scan_all) /*!< in: scan whole LRU list + if true, otherwise scan only 'old' blocks. */ MY_ATTRIBUTE((nonnull,warn_unused_result)); /******************************************************************//** Returns a free block from the buf_pool. The block is taken off the free list. If it is empty, returns NULL. -@return a free control block, or NULL if the buf_block->free list is empty */ -UNIV_INTERN +@return a free control block, or NULL if the buf_block->free list is empty */ buf_block_t* buf_LRU_get_free_only( /*==================*/ @@ -138,8 +132,7 @@ we put it to free list to be used. * scan LRU list even if buf_pool->try_LRU_scan is not set * iteration > 1: * same as iteration 1 but sleep 10ms -@return the free control block, in state BUF_BLOCK_READY_FOR_USE */ -UNIV_INTERN +@return the free control block, in state BUF_BLOCK_READY_FOR_USE */ buf_block_t* buf_LRU_get_free_block( /*===================*/ @@ -148,25 +141,21 @@ buf_LRU_get_free_block( /******************************************************************//** Determines if the unzip_LRU list should be used for evicting a victim instead of the general LRU list. -@return TRUE if should use unzip_LRU */ -UNIV_INTERN +@return TRUE if should use unzip_LRU */ ibool buf_LRU_evict_from_unzip_LRU( /*=========================*/ buf_pool_t* buf_pool); /******************************************************************//** Puts a block back to the free list. */ -UNIV_INTERN void buf_LRU_block_free_non_file_page( /*=============================*/ buf_block_t* block); /*!< in: block, must not contain a file page */ /******************************************************************//** -Adds a block to the LRU list. Please make sure that the zip_size is -already set into the page zip when invoking the function, so that we -can get correct zip_size from the buffer page when adding a block -into LRU */ -UNIV_INTERN +Adds a block to the LRU list. Please make sure that the page_size is +already set when invoking the function, so that we can get correct +page_size from the buffer page when adding a block into LRU */ void buf_LRU_add_block( /*==============*/ @@ -177,7 +166,6 @@ buf_LRU_add_block( the start regardless of this parameter */ /******************************************************************//** Adds a block to the LRU list of decompressed zip pages. */ -UNIV_INTERN void buf_unzip_LRU_add_block( /*====================*/ @@ -186,23 +174,20 @@ buf_unzip_LRU_add_block( of the list, else put to the start */ /******************************************************************//** Moves a block to the start of the LRU list. */ -UNIV_INTERN void buf_LRU_make_block_young( /*=====================*/ buf_page_t* bpage); /*!< in: control block */ /******************************************************************//** Moves a block to the end of the LRU list. */ -UNIV_INTERN void buf_LRU_make_block_old( /*===================*/ buf_page_t* bpage); /*!< in: control block */ /**********************************************************************//** Updates buf_pool->LRU_old_ratio. -@return updated old_pct */ -UNIV_INTERN -ulint +@return updated old_pct */ +uint buf_LRU_old_ratio_update( /*=====================*/ uint old_pct,/*!< in: Reserve this percentage of @@ -213,14 +198,12 @@ buf_LRU_old_ratio_update( /********************************************************************//** Update the historical stats that we are collecting for LRU eviction policy at the end of each interval. */ -UNIV_INTERN void buf_LRU_stat_update(void); /*=====================*/ /******************************************************************//** Remove one page from LRU list and put it to free list */ -UNIV_INTERN void buf_LRU_free_one_page( /*==================*/ @@ -231,7 +214,6 @@ buf_LRU_free_one_page( /******************************************************************//** Adjust LRU hazard pointers if needed. */ - void buf_LRU_adjust_hp( /*==============*/ @@ -241,8 +223,7 @@ buf_LRU_adjust_hp( #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** Validates the LRU list. -@return TRUE */ -UNIV_INTERN +@return TRUE */ ibool buf_LRU_validate(void); /*==================*/ @@ -250,7 +231,6 @@ buf_LRU_validate(void); #if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** Prints the LRU list. */ -UNIV_INTERN void buf_LRU_print(void); /*===============*/ diff --git a/storage/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h index 10714031710..f2ec11f1783 100644 --- a/storage/innobase/include/buf0rea.h +++ b/storage/innobase/include/buf0rea.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2015, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -28,36 +28,38 @@ Created 11/5/1995 Heikki Tuuri #define buf0rea_h #include "univ.i" +#include "buf0buf.h" #include "buf0types.h" -/********************************************************************//** -High-level function which reads a page asynchronously from a file to the +/** High-level function which reads a page asynchronously from a file to the buffer buf_pool if it is not already there. Sets the io_fix flag and sets an exclusive lock on the buffer frame. The flag is cleared and the x-lock released by the i/o-handler thread. +@param[in] page_id page id +@param[in] page_size page size @return TRUE if page has been read in, FALSE in case of failure */ -UNIV_INTERN ibool buf_read_page( -/*==========*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes, or 0 */ - ulint offset, /*!< in: page number */ - buf_page_t** bpage);/*!< out: page */ + const page_id_t& page_id, + const page_size_t& page_size, + buf_page_t** bpage); + /********************************************************************//** High-level function which reads a page asynchronously from a file to the buffer buf_pool if it is not already there. Sets the io_fix flag and sets an exclusive lock on the buffer frame. The flag is cleared and the x-lock released by the i/o-handler thread. +@param[in] page_id page id +@param[in] page_size page size +@param[in] sync true if synchronous aio is desired @return TRUE if page has been read in, FALSE in case of failure */ -UNIV_INTERN ibool -buf_read_page_async( -/*================*/ - ulint space, /*!< in: space id */ - ulint offset);/*!< in: page number */ -/********************************************************************//** -Applies a random read-ahead in buf_pool if there are at least a threshold +buf_read_page_background( + const page_id_t& page_id, + const page_size_t& page_size, + bool sync); + +/** Applies a random read-ahead in buf_pool if there are at least a threshold value of accessed pages from the random read-ahead area. Does not read any page, not even the one at the position (space, offset), if the read-ahead mechanism is not activated. NOTE 1: the calling thread may own latches on @@ -66,23 +68,20 @@ end up waiting for these latches! NOTE 2: the calling thread must want access to the page given: this rule is set to prevent unintended read-aheads performed by ibuf routines, a situation which could result in a deadlock if the OS does not support asynchronous i/o. +@param[in] page_id page id of a page which the current thread +wants to access +@param[in] page_size page size +@param[in] inside_ibuf TRUE if we are inside ibuf routine @return number of page read requests issued; NOTE that if we read ibuf pages, it may happen that the page at the given page number does not -get read even if we return a positive value! -@return number of page read requests issued */ -UNIV_INTERN +get read even if we return a positive value! */ ulint buf_read_ahead_random( -/*==================*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes, - or 0 */ - ulint offset, /*!< in: page number of a page which - the current thread wants to access */ - ibool inside_ibuf); /*!< in: TRUE if we are inside ibuf - routine */ -/********************************************************************//** -Applies linear read-ahead if in the buf_pool the page is a border page of + const page_id_t& page_id, + const page_size_t& page_size, + ibool inside_ibuf); + +/** Applies linear read-ahead if in the buf_pool the page is a border page of a linear read-ahead area and all the pages in the area have been accessed. Does not read any page if the read-ahead mechanism is not activated. Note that the algorithm looks at the 'natural' adjacent successor and @@ -104,20 +103,20 @@ latches! NOTE 3: the calling thread must want access to the page given: this rule is set to prevent unintended read-aheads performed by ibuf routines, a situation which could result in a deadlock if the OS does not support asynchronous io. -@return number of page read requests issued */ -UNIV_INTERN +@param[in] page_id page id; see NOTE 3 above +@param[in] page_size page size +@param[in] inside_ibuf TRUE if we are inside ibuf routine +@return number of page read requests issued */ ulint buf_read_ahead_linear( -/*==================*/ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes, or 0 */ - ulint offset, /*!< in: page number; see NOTE 3 above */ - ibool inside_ibuf); /*!< in: TRUE if we are inside ibuf routine */ + const page_id_t& page_id, + const page_size_t& page_size, + ibool inside_ibuf); + /********************************************************************//** Issues read requests for pages which the ibuf module wants to read in, in order to contract the insert buffer tree. Technically, this function is like a read-ahead function. */ -UNIV_INTERN void buf_read_ibuf_merge_pages( /*======================*/ @@ -127,7 +126,7 @@ buf_read_ibuf_merge_pages( to get read in, before this function returns */ const ulint* space_ids, /*!< in: array of space ids */ - const ib_int64_t* space_versions,/*!< in: the spaces must have + const ib_uint64_t* space_versions,/*!< in: the spaces must have this version number (timestamp), otherwise we discard the read; we use this @@ -140,40 +139,31 @@ buf_read_ibuf_merge_pages( array */ ulint n_stored); /*!< in: number of elements in the arrays */ -/********************************************************************//** -Issues read requests for pages which recovery wants to read in. */ -UNIV_INTERN + +/** Issues read requests for pages which recovery wants to read in. +@param[in] sync true if the caller wants this function to wait +for the highest address page to get read in, before this function returns +@param[in] space_id tablespace id +@param[in] page_nos array of page numbers to read, with the +highest page number the last in the array +@param[in] n_stored number of page numbers in the array */ + void buf_read_recv_pages( -/*================*/ - ibool sync, /*!< in: TRUE if the caller - wants this function to wait - for the highest address page - to get read in, before this - function returns */ - ulint space, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in - bytes, or 0 */ - const ulint* page_nos, /*!< in: array of page numbers - to read, with the highest page - number the last in the - array */ - ulint n_stored); /*!< in: number of page numbers - in the array */ + bool sync, + ulint space_id, + const ulint* page_nos, + ulint n_stored); /** The size in pages of the area which the read-ahead algorithms read if invoked */ -#define BUF_READ_AHEAD_AREA(b) \ - ut_min(64, ut_2_power_up((b)->curr_size / 32)) +#define BUF_READ_AHEAD_AREA(b) ((b)->read_ahead_area) /** @name Modes used in read-ahead @{ */ /** read only pages belonging to the insert buffer tree */ #define BUF_READ_IBUF_PAGES_ONLY 131 /** read any page */ #define BUF_READ_ANY_PAGE 132 -/** read any page, but ignore (return an error) if a page does not exist -instead of crashing like BUF_READ_ANY_PAGE does */ -#define BUF_READ_IGNORE_NONEXISTENT_PAGES 1024 /* @} */ #endif diff --git a/storage/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h index 11bbc9b5c8a..102b831ec61 100644 --- a/storage/innobase/include/buf0types.h +++ b/storage/innobase/include/buf0types.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,12 +26,11 @@ Created 11/17/1995 Heikki Tuuri #ifndef buf0types_h #define buf0types_h -#if defined(INNODB_PAGE_ATOMIC_REF_COUNT) && defined(HAVE_ATOMIC_BUILTINS) -#define PAGE_ATOMIC_REF_COUNT -#endif /* INNODB_PAGE_ATOMIC_REF_COUNT && HAVE_ATOMIC_BUILTINS */ +#include "os0event.h" +#include "ut0ut.h" /** Buffer page (uncompressed or compressed) */ -struct buf_page_t; +class buf_page_t; /** Buffer block for which an uncompressed page exists */ struct buf_block_t; /** Buffer pool chunk comprising buf_block_t */ @@ -44,6 +43,8 @@ struct buf_pool_stat_t; struct buf_buddy_stat_t; /** Doublewrite memory struct */ struct buf_dblwr_t; +/** Flush observer for bulk create index */ +class FlushObserver; /** A buffer frame. @see page_t */ typedef byte buf_frame_t; @@ -96,6 +97,24 @@ enum srv_checksum_algorithm_t { when reading */ }; +inline +bool +is_checksum_strict(srv_checksum_algorithm_t algo) +{ + return(algo == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32 + || algo == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB + || algo == SRV_CHECKSUM_ALGORITHM_STRICT_NONE); +} + +inline +bool +is_checksum_strict(ulint algo) +{ + return(algo == SRV_CHECKSUM_ALGORITHM_STRICT_CRC32 + || algo == SRV_CHECKSUM_ALGORITHM_STRICT_INNODB + || algo == SRV_CHECKSUM_ALGORITHM_STRICT_NONE); +} + /** Parameters of binary buddy system for compressed pages (buf0buddy.h) */ /* @{ */ /** Zip shift value for the smallest page size */ @@ -117,4 +136,16 @@ this must be equal to UNIV_PAGE_SIZE */ #define BUF_BUDDY_HIGH (BUF_BUDDY_LOW << BUF_BUDDY_SIZES) /* @} */ +#ifndef UNIV_INNOCHECKSUM + +#include "ut0mutex.h" +#include "sync0rw.h" + +typedef ib_bpmutex_t BPageMutex; +typedef ib_mutex_t BufPoolMutex; +typedef ib_mutex_t FlushListMutex; +typedef BPageMutex BufPoolZipMutex; +typedef rw_lock_t BPageLock; +#endif /* !UNIV_INNOCHECKSUM */ + #endif /* buf0types.h */ diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h index 1d954bfc07c..24b0a791535 100644 --- a/storage/innobase/include/data0data.h +++ b/storage/innobase/include/data0data.h @@ -33,14 +33,17 @@ Created 5/30/1994 Heikki Tuuri #include "mem0mem.h" #include "dict0types.h" +#include + /** Storage for overflow data in a big record, that is, a clustered index record which needs external storage of data fields */ struct big_rec_t; +struct upd_t; #ifdef UNIV_DEBUG /*********************************************************************//** Gets pointer to the type struct of SQL data field. -@return pointer to the type struct */ +@return pointer to the type struct */ UNIV_INLINE dtype_t* dfield_get_type( @@ -49,7 +52,7 @@ dfield_get_type( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets pointer to the data in a field. -@return pointer to data */ +@return pointer to data */ UNIV_INLINE void* dfield_get_data( @@ -71,7 +74,7 @@ dfield_set_type( MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets length of field data. -@return length of data; UNIV_SQL_NULL if SQL null data */ +@return length of data; UNIV_SQL_NULL if SQL null data */ UNIV_INLINE ulint dfield_get_len( @@ -89,7 +92,7 @@ dfield_set_len( MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Determines if a field is SQL NULL -@return nonzero if SQL null data */ +@return nonzero if SQL null data */ UNIV_INLINE ulint dfield_is_null( @@ -98,7 +101,7 @@ dfield_is_null( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Determines if a field is externally stored -@return nonzero if externally stored */ +@return nonzero if externally stored */ UNIV_INLINE ulint dfield_is_ext( @@ -124,6 +127,15 @@ dfield_set_data( ulint len) /*!< in: length or UNIV_SQL_NULL */ MY_ATTRIBUTE((nonnull(1))); /*********************************************************************//** +Sets pointer to the data and length in a field. */ +UNIV_INLINE +void +dfield_write_mbr( +/*=============*/ + dfield_t* field, /*!< in: field */ + const double* mbr) /*!< in: data */ + __attribute__((nonnull(1))); +/*********************************************************************//** Sets a data field to SQL NULL. */ UNIV_INLINE void @@ -146,7 +158,7 @@ UNIV_INLINE void dfield_copy_data( /*=============*/ - dfield_t* field1, /*!< out: field to copy to */ + dfield_t* field1, /*!< out: field to copy to */ const dfield_t* field2) /*!< in: field to copy from */ MY_ATTRIBUTE((nonnull)); /*********************************************************************//** @@ -172,7 +184,7 @@ dfield_dup( Tests if two data fields are equal. If len==0, tests the data length and content for equality. If len>0, tests the first len bytes of the content for equality. -@return TRUE if both fields are NULL or if they are equal */ +@return TRUE if both fields are NULL or if they are equal */ UNIV_INLINE ibool dfield_datas_are_binary_equal( @@ -184,7 +196,7 @@ dfield_datas_are_binary_equal( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Tests if dfield data length and content is equal to the given. -@return TRUE if equal */ +@return TRUE if equal */ UNIV_INLINE ibool dfield_data_is_binary_equal( @@ -196,29 +208,47 @@ dfield_data_is_binary_equal( #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Gets number of fields in a data tuple. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dtuple_get_n_fields( /*================*/ const dtuple_t* tuple) /*!< in: tuple */ MY_ATTRIBUTE((nonnull, warn_unused_result)); +/** Gets number of virtual fields in a data tuple. +@param[in] tuple dtuple to check +@return number of fields */ +UNIV_INLINE +ulint +dtuple_get_n_v_fields( + const dtuple_t* tuple); + #ifdef UNIV_DEBUG -/*********************************************************************//** -Gets nth field of a tuple. -@return nth field */ +/** Gets nth field of a tuple. +@param[in] tuple tuple +@param[in] n index of field +@return nth field */ UNIV_INLINE dfield_t* dtuple_get_nth_field( -/*=================*/ - const dtuple_t* tuple, /*!< in: tuple */ - ulint n); /*!< in: index of field */ + const dtuple_t* tuple, + ulint n); +/** Gets nth virtual field of a tuple. +@param[in] tuple tuple +@oaran[in] n the nth field to get +@return nth field */ +UNIV_INLINE +dfield_t* +dtuple_get_nth_v_field( + const dtuple_t* tuple, + ulint n); #else /* UNIV_DEBUG */ # define dtuple_get_nth_field(tuple, n) ((tuple)->fields + (n)) +# define dtuple_get_nth_v_field(tuple, n) ((tuple)->fields + (tuple)->n_fields + (n)) #endif /* UNIV_DEBUG */ /*********************************************************************//** Gets info bits in a data tuple. -@return info bits */ +@return info bits */ UNIV_INLINE ulint dtuple_get_info_bits( @@ -236,7 +266,7 @@ dtuple_set_info_bits( MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets number of fields used in record comparisons. -@return number of fields used in comparisons in rem0cmp.* */ +@return number of fields used in comparisons in rem0cmp.* */ UNIV_INLINE ulint dtuple_get_n_fields_cmp( @@ -259,25 +289,28 @@ creating a new dtuple_t object */ #define DTUPLE_EST_ALLOC(n_fields) \ (sizeof(dtuple_t) + (n_fields) * sizeof(dfield_t)) -/**********************************************************//** -Creates a data tuple from an already allocated chunk of memory. +/** Creates a data tuple from an already allocated chunk of memory. The size of the chunk must be at least DTUPLE_EST_ALLOC(n_fields). The default value for number of fields used in record comparisons for this tuple is n_fields. -@return created tuple (inside buf) */ +@param[in,out] buf buffer to use +@param[in] buf_size buffer size +@param[in] n_fields number of field +@param[in] n_v_fields number of fields on virtual columns +@return created tuple (inside buf) */ UNIV_INLINE dtuple_t* dtuple_create_from_mem( -/*===================*/ - void* buf, /*!< in, out: buffer to use */ - ulint buf_size, /*!< in: buffer size */ - ulint n_fields) /*!< in: number of fields */ + void* buf, + ulint buf_size, + ulint n_fields, + ulint n_v_fields) MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Creates a data tuple to a memory heap. The default value for number of fields used in record comparisons for this tuple is n_fields. -@return own: created tuple */ +@return own: created tuple */ UNIV_INLINE dtuple_t* dtuple_create( @@ -288,20 +321,56 @@ dtuple_create( ulint n_fields)/*!< in: number of fields */ MY_ATTRIBUTE((nonnull, malloc)); + +/** Initialize the virtual field data in a dtuple_t +@param[in,out] vrow dtuple contains the virtual fields */ +UNIV_INLINE +void +dtuple_init_v_fld( + const dtuple_t* vrow); + +/** Duplicate the virtual field data in a dtuple_t +@param[in,out] vrow dtuple contains the virtual fields +@param[in] heap heap memory to use */ +UNIV_INLINE +void +dtuple_dup_v_fld( + const dtuple_t* vrow, + mem_heap_t* heap); + +/** Creates a data tuple with possible virtual columns to a memory heap. +@param[in] heap memory heap where the tuple is created +@param[in] n_fields number of fields +@param[in] n_v_fields number of fields on virtual col +@return own: created tuple */ +UNIV_INLINE +dtuple_t* +dtuple_create_with_vcol( + mem_heap_t* heap, + ulint n_fields, + ulint n_v_fields); + /*********************************************************************//** Sets number of fields used in a tuple. Normally this is set in dtuple_create, but if you want later to set it smaller, you can use this. */ -UNIV_INTERN void dtuple_set_n_fields( /*================*/ dtuple_t* tuple, /*!< in: tuple */ ulint n_fields) /*!< in: number of fields */ MY_ATTRIBUTE((nonnull)); +/** Copies a data tuple's virtaul fields to another. This is a shallow copy; +@param[in,out] d_tuple destination tuple +@param[in] s_tuple source tuple */ +UNIV_INLINE +void +dtuple_copy_v_fields( + dtuple_t* d_tuple, + const dtuple_t* s_tuple); /*********************************************************************//** Copies a data tuple to another. This is a shallow copy; if a deep copy is desired, dfield_dup() will have to be invoked on each field. -@return own: copy of tuple */ +@return own: copy of tuple */ UNIV_INLINE dtuple_t* dtuple_copy( @@ -313,7 +382,7 @@ dtuple_copy( /**********************************************************//** The following function returns the sum of data lengths of a tuple. The space occupied by the field structs or the tuple struct is not counted. -@return sum of data lens */ +@return sum of data lens */ UNIV_INLINE ulint dtuple_get_data_size( @@ -323,37 +392,37 @@ dtuple_get_data_size( MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Computes the number of externally stored fields in a data tuple. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dtuple_get_n_ext( /*=============*/ const dtuple_t* tuple) /*!< in: tuple */ MY_ATTRIBUTE((nonnull)); -/************************************************************//** -Compare two data tuples, respecting the collation of character fields. -@return 1, 0 , -1 if tuple1 is greater, equal, less, respectively, -than tuple2 */ -UNIV_INTERN +/** Compare two data tuples. +@param[in] tuple1 first data tuple +@param[in] tuple2 second data tuple +@return positive, 0, negative if tuple1 is greater, equal, less, than tuple2, +respectively */ int dtuple_coll_cmp( -/*============*/ - const dtuple_t* tuple1, /*!< in: tuple 1 */ - const dtuple_t* tuple2) /*!< in: tuple 2 */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/************************************************************//** -Folds a prefix given as the number of fields of a tuple. -@return the folded value */ + const dtuple_t* tuple1, + const dtuple_t* tuple2) + __attribute__((warn_unused_result)); +/** Fold a prefix given as the number of fields of a tuple. +@param[in] tuple index record +@param[in] n_fields number of complete fields to fold +@param[in] n_bytes number of bytes to fold in the last field +@param[in] index_id index tree ID +@return the folded value */ UNIV_INLINE ulint dtuple_fold( -/*========*/ - const dtuple_t* tuple, /*!< in: the tuple */ - ulint n_fields,/*!< in: number of complete fields to fold */ - ulint n_bytes,/*!< in: number of bytes to fold in an - incomplete last field */ - index_id_t tree_id)/*!< in: index tree id */ - MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); + const dtuple_t* tuple, + ulint n_fields, + ulint n_bytes, + index_id_t tree_id) + __attribute__((warn_unused_result)); /*******************************************************************//** Sets types of fields binary in a tuple. */ UNIV_INLINE @@ -365,7 +434,7 @@ dtuple_set_types_binary( MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Checks if a dtuple contains an SQL null value. -@return TRUE if some field is SQL null */ +@return TRUE if some field is SQL null */ UNIV_INLINE ibool dtuple_contains_null( @@ -374,8 +443,7 @@ dtuple_contains_null( MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Checks that a data field is typed. Asserts an error if not. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dfield_check_typed( /*===============*/ @@ -383,8 +451,7 @@ dfield_check_typed( MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Checks that a data tuple is typed. Asserts an error if not. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtuple_check_typed( /*===============*/ @@ -392,8 +459,7 @@ dtuple_check_typed( MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************//** Checks that a data tuple is typed. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtuple_check_typed_no_assert( /*=========================*/ @@ -403,8 +469,7 @@ dtuple_check_typed_no_assert( /**********************************************************//** Validates the consistency of a tuple which must be complete, i.e, all fields must have been set. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtuple_validate( /*============*/ @@ -413,7 +478,6 @@ dtuple_validate( #endif /* UNIV_DEBUG */ /*************************************************************//** Pretty prints a dfield value according to its data type. */ -UNIV_INTERN void dfield_print( /*=========*/ @@ -422,7 +486,6 @@ dfield_print( /*************************************************************//** Pretty prints a dfield value according to its data type. Also the hex string is printed if a string contains non-printable characters. */ -UNIV_INTERN void dfield_print_also_hex( /*==================*/ @@ -430,13 +493,41 @@ dfield_print_also_hex( MY_ATTRIBUTE((nonnull)); /**********************************************************//** The following function prints the contents of a tuple. */ -UNIV_INTERN void dtuple_print( /*=========*/ FILE* f, /*!< in: output stream */ const dtuple_t* tuple) /*!< in: tuple */ MY_ATTRIBUTE((nonnull)); + +/** Print the contents of a tuple. +@param[out] o output stream +@param[in] field array of data fields +@param[in] n number of data fields */ +void +dfield_print( + std::ostream& o, + const dfield_t* field, + ulint n); +/** Print the contents of a tuple. +@param[out] o output stream +@param[in] tuple data tuple */ +void +dtuple_print( + std::ostream& o, + const dtuple_t* tuple); + +/** Print the contents of a tuple. +@param[out] o output stream +@param[in] tuple data tuple */ +inline +std::ostream& +operator<<(std::ostream& o, const dtuple_t& tuple) +{ + dtuple_print(o, &tuple); + return(o); +} + /**************************************************************//** Moves parts of long fields in entry to the big record vector so that the size of tuple drops below the maximum record size allowed in the @@ -445,20 +536,19 @@ to determine uniquely the insertion place of the tuple in the index. @return own: created big record vector, NULL if we are not able to shorten the entry enough, i.e., if there are too many fixed-length or short fields in entry or the index is clustered */ -UNIV_INTERN big_rec_t* dtuple_convert_big_rec( /*===================*/ dict_index_t* index, /*!< in: index */ + upd_t* upd, /*!< in/out: update vector */ dtuple_t* entry, /*!< in/out: index entry */ ulint* n_ext) /*!< in/out: number of externally stored columns */ - MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,4), malloc, warn_unused_result)); /**************************************************************//** Puts back to entry the data stored in vector. Note that to ensure the fields in entry can accommodate the data, vector must have been created from entry with dtuple_convert_big_rec. */ -UNIV_INTERN void dtuple_convert_back_big_rec( /*========================*/ @@ -482,9 +572,15 @@ dtuple_big_rec_free( /** Structure for an SQL data field */ struct dfield_t{ void* data; /*!< pointer to data */ - unsigned ext:1; /*!< TRUE=externally stored, FALSE=local */ - unsigned len:32; /*!< data length; UNIV_SQL_NULL if SQL null */ + unsigned ext; /*!< TRUE=externally stored, FALSE=local */ + unsigned len; /*!< data length; UNIV_SQL_NULL if SQL null */ dtype_t type; /*!< type of data */ + + /** Create a deep copy of this object + @param[in] heap the memory heap in which the clone will be + created. + @return the cloned object. */ + dfield_t* clone(mem_heap_t* heap); }; /** Structure for an SQL data tuple of fields (logical record) */ @@ -502,6 +598,8 @@ struct dtuple_t { default value in dtuple creation is the same value as n_fields */ dfield_t* fields; /*!< fields */ + ulint n_v_fields; /*!< number of virtual fields */ + dfield_t* v_fields; /*!< fields on virtual column */ UT_LIST_NODE_T(dtuple_t) tuple_list; /*!< data tuples can be linked into a list using this field */ @@ -513,8 +611,20 @@ struct dtuple_t { #endif /* UNIV_DEBUG */ }; + /** A slot for a field in a big rec vector */ struct big_rec_field_t { + + /** Constructor. + @param[in] field_no_ the field number + @param[in] len_ the data length + @param[in] data_ the data */ + big_rec_field_t(ulint field_no_, ulint len_, const void* data_) + : field_no(field_no_), + len(len_), + data(data_) + {} + ulint field_no; /*!< field number in record */ ulint len; /*!< stored data length, in bytes */ const void* data; /*!< stored data */ @@ -525,8 +635,36 @@ clustered index record which needs external storage of data fields */ struct big_rec_t { mem_heap_t* heap; /*!< memory heap from which allocated */ + const ulint capacity; /*!< fields array size */ ulint n_fields; /*!< number of stored fields */ big_rec_field_t*fields; /*!< stored fields */ + + /** Constructor. + @param[in] max the capacity of the array of fields. */ + explicit big_rec_t(const ulint max) + : heap(0), + capacity(max), + n_fields(0), + fields(0) + {} + + /** Append one big_rec_field_t object to the end of array of fields */ + void append(const big_rec_field_t& field) + { + ut_ad(n_fields < capacity); + fields[n_fields] = field; + n_fields++; + } + + /** Allocate a big_rec_t object in the given memory heap, and for + storing n_fld number of fields. + @param[in] heap memory heap in which this object is allocated + @param[in] n_fld maximum number of fields that can be stored in + this object + @return the allocated object */ + static big_rec_t* alloc( + mem_heap_t* heap, + ulint n_fld); }; #ifndef UNIV_NONINL diff --git a/storage/innobase/include/data0data.ic b/storage/innobase/include/data0data.ic index 11499ab928c..c6c155c6b61 100644 --- a/storage/innobase/include/data0data.ic +++ b/storage/innobase/include/data0data.ic @@ -25,6 +25,7 @@ Created 5/30/1994 Heikki Tuuri #include "mem0mem.h" #include "ut0rnd.h" +#include "btr0types.h" #ifdef UNIV_DEBUG /** Dummy variable to catch access to uninitialized fields. In the @@ -34,7 +35,7 @@ extern byte data_error; /*********************************************************************//** Gets pointer to the type struct of SQL data field. -@return pointer to the type struct */ +@return pointer to the type struct */ UNIV_INLINE dtype_t* dfield_get_type( @@ -65,7 +66,7 @@ dfield_set_type( #ifdef UNIV_DEBUG /*********************************************************************//** Gets pointer to the data in a field. -@return pointer to data */ +@return pointer to data */ UNIV_INLINE void* dfield_get_data( @@ -82,7 +83,7 @@ dfield_get_data( /*********************************************************************//** Gets length of field data. -@return length of data; UNIV_SQL_NULL if SQL null data */ +@return length of data; UNIV_SQL_NULL if SQL null data */ UNIV_INLINE ulint dfield_get_len( @@ -111,12 +112,12 @@ dfield_set_len( #endif /* UNIV_VALGRIND_DEBUG */ field->ext = 0; - field->len = len; + field->len = static_cast(len); } /*********************************************************************//** Determines if a field is SQL NULL -@return nonzero if SQL null data */ +@return nonzero if SQL null data */ UNIV_INLINE ulint dfield_is_null( @@ -130,7 +131,7 @@ dfield_is_null( /*********************************************************************//** Determines if a field is externally stored -@return nonzero if externally stored */ +@return nonzero if externally stored */ UNIV_INLINE ulint dfield_is_ext( @@ -138,6 +139,7 @@ dfield_is_ext( const dfield_t* field) /*!< in: field */ { ut_ad(field); + ut_ad(!field->ext || field->len >= BTR_EXTERN_FIELD_REF_SIZE); return(field->ext); } @@ -172,7 +174,31 @@ dfield_set_data( #endif /* UNIV_VALGRIND_DEBUG */ field->data = (void*) data; field->ext = 0; - field->len = len; + field->len = static_cast(len); +} + +/*********************************************************************//** +Sets pointer to the data and length in a field. */ +UNIV_INLINE +void +dfield_write_mbr( +/*=============*/ + dfield_t* field, /*!< in: field */ + const double* mbr) /*!< in: data */ +{ + ut_ad(field); + +#ifdef UNIV_VALGRIND_DEBUG + if (len != UNIV_SQL_NULL) UNIV_MEM_ASSERT_RW(data, len); +#endif /* UNIV_VALGRIND_DEBUG */ + field->ext = 0; + + for (int i = 0; i < SPDIMS * 2; i++) { + mach_double_write(static_cast(field->data) + + i * sizeof(double), mbr[i]); + } + + field->len = DATA_MBR_LEN; } /*********************************************************************//** @@ -235,7 +261,7 @@ dfield_dup( Tests if two data fields are equal. If len==0, tests the data length and content for equality. If len>0, tests the first len bytes of the content for equality. -@return TRUE if both fields are NULL or if they are equal */ +@return TRUE if both fields are NULL or if they are equal */ UNIV_INLINE ibool dfield_datas_are_binary_equal( @@ -262,7 +288,7 @@ dfield_datas_are_binary_equal( /*********************************************************************//** Tests if dfield data length and content is equal to the given. -@return TRUE if equal */ +@return TRUE if equal */ UNIV_INLINE ibool dfield_data_is_binary_equal( @@ -279,7 +305,7 @@ dfield_data_is_binary_equal( /*********************************************************************//** Gets info bits in a data tuple. -@return info bits */ +@return info bits */ UNIV_INLINE ulint dtuple_get_info_bits( @@ -307,7 +333,7 @@ dtuple_set_info_bits( /*********************************************************************//** Gets number of fields used in record comparisons. -@return number of fields used in comparisons in rem0cmp.* */ +@return number of fields used in comparisons in rem0cmp.* */ UNIV_INLINE ulint dtuple_get_n_fields_cmp( @@ -337,7 +363,7 @@ dtuple_set_n_fields_cmp( /*********************************************************************//** Gets number of fields in a data tuple. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dtuple_get_n_fields( @@ -349,48 +375,85 @@ dtuple_get_n_fields( return(tuple->n_fields); } +/** Gets the number of virtual fields in a data tuple. +@param[in] tuple dtuple to check +@return number of fields */ +UNIV_INLINE +ulint +dtuple_get_n_v_fields( + const dtuple_t* tuple) +{ + ut_ad(tuple); + + return(tuple->n_v_fields); +} #ifdef UNIV_DEBUG -/*********************************************************************//** -Gets nth field of a tuple. -@return nth field */ +/** Gets nth field of a tuple. +@param[in] tuple tuple +@param[in] n index of field +@return nth field */ UNIV_INLINE dfield_t* dtuple_get_nth_field( -/*=================*/ - const dtuple_t* tuple, /*!< in: tuple */ - ulint n) /*!< in: index of field */ + const dtuple_t* tuple, + ulint n) { ut_ad(tuple); ut_ad(n < tuple->n_fields); return((dfield_t*) tuple->fields + n); } +/** Gets nth virtual field of a tuple. +@param[in] tuple tuple +@oaran[in] n the nth field to get +@return nth field */ +UNIV_INLINE +dfield_t* +dtuple_get_nth_v_field( + const dtuple_t* tuple, + ulint n) +{ + ut_ad(tuple); + ut_ad(n < tuple->n_v_fields); + + return(static_cast(tuple->v_fields + n)); +} #endif /* UNIV_DEBUG */ -/**********************************************************//** -Creates a data tuple from an already allocated chunk of memory. +/** Creates a data tuple from an already allocated chunk of memory. The size of the chunk must be at least DTUPLE_EST_ALLOC(n_fields). The default value for number of fields used in record comparisons for this tuple is n_fields. -@return created tuple (inside buf) */ +@param[in,out] buf buffer to use +@param[in] buf_size buffer size +@param[in] n_fields number of field +@param[in] n_v_fields number of fields on virtual columns +@return created tuple (inside buf) */ UNIV_INLINE dtuple_t* dtuple_create_from_mem( -/*===================*/ - void* buf, /*!< in, out: buffer to use */ - ulint buf_size, /*!< in: buffer size */ - ulint n_fields) /*!< in: number of fields */ + void* buf, + ulint buf_size, + ulint n_fields, + ulint n_v_fields) { dtuple_t* tuple; + ulint n_t_fields = n_fields + n_v_fields; ut_ad(buf != NULL); - ut_a(buf_size >= DTUPLE_EST_ALLOC(n_fields)); + ut_a(buf_size >= DTUPLE_EST_ALLOC(n_t_fields)); tuple = (dtuple_t*) buf; tuple->info_bits = 0; tuple->n_fields = n_fields; + tuple->n_v_fields = n_v_fields; tuple->n_fields_cmp = n_fields; tuple->fields = (dfield_t*) &tuple[1]; + if (n_v_fields > 0) { + tuple->v_fields = &tuple->fields[n_fields]; + } else { + tuple->v_fields = NULL; + } #ifdef UNIV_DEBUG tuple->magic_n = DATA_TUPLE_MAGIC_N; @@ -398,26 +461,61 @@ dtuple_create_from_mem( { /* In the debug version, initialize fields to an error value */ ulint i; - for (i = 0; i < n_fields; i++) { + for (i = 0; i < n_t_fields; i++) { dfield_t* field; - field = dtuple_get_nth_field(tuple, i); + if (i >= n_fields) { + field = dtuple_get_nth_v_field( + tuple, i - n_fields); + } else { + field = dtuple_get_nth_field(tuple, i); + } dfield_set_len(field, UNIV_SQL_NULL); field->data = &data_error; dfield_get_type(field)->mtype = DATA_ERROR; + dfield_get_type(field)->prtype = DATA_ERROR; } } #endif - UNIV_MEM_ASSERT_W(tuple->fields, n_fields * sizeof *tuple->fields); - UNIV_MEM_INVALID(tuple->fields, n_fields * sizeof *tuple->fields); + UNIV_MEM_ASSERT_W(tuple->fields, n_t_fields * sizeof *tuple->fields); + UNIV_MEM_INVALID(tuple->fields, n_t_fields * sizeof *tuple->fields); return(tuple); } +/** Duplicate the virtual field data in a dtuple_t +@param[in,out] vrow dtuple contains the virtual fields +@param[in] heap heap memory to use */ +UNIV_INLINE +void +dtuple_dup_v_fld( + const dtuple_t* vrow, + mem_heap_t* heap) +{ + for (ulint i = 0; i < vrow->n_v_fields; i++) { + dfield_t* dfield = dtuple_get_nth_v_field(vrow, i); + dfield_dup(dfield, heap); + } +} + +/** Initialize the virtual field data in a dtuple_t +@param[in,out] vrow dtuple contains the virtual fields */ +UNIV_INLINE +void +dtuple_init_v_fld( + const dtuple_t* vrow) +{ + for (ulint i = 0; i < vrow->n_v_fields; i++) { + dfield_t* dfield = dtuple_get_nth_v_field(vrow, i); + dfield_get_type(dfield)->mtype = DATA_MISSING; + dfield_set_len(dfield, UNIV_SQL_NULL); + } +} + /**********************************************************//** Creates a data tuple to a memory heap. The default value for number of fields used in record comparisons for this tuple is n_fields. -@return own: created tuple */ +@return own: created tuple */ UNIV_INLINE dtuple_t* dtuple_create( @@ -426,6 +524,21 @@ dtuple_create( is created, DTUPLE_EST_ALLOC(n_fields) bytes will be allocated from this heap */ ulint n_fields) /*!< in: number of fields */ +{ + return(dtuple_create_with_vcol(heap, n_fields, 0)); +} + +/** Creates a data tuple with virtual columns to a memory heap. +@param[in] heap memory heap where the tuple is created +@param[in] n_fields number of fields +@param[in] n_v_fields number of fields on virtual col +@return own: created tuple */ +UNIV_INLINE +dtuple_t* +dtuple_create_with_vcol( + mem_heap_t* heap, + ulint n_fields, + ulint n_v_fields) { void* buf; ulint buf_size; @@ -433,18 +546,37 @@ dtuple_create( ut_ad(heap); - buf_size = DTUPLE_EST_ALLOC(n_fields); + buf_size = DTUPLE_EST_ALLOC(n_fields + n_v_fields); buf = mem_heap_alloc(heap, buf_size); - tuple = dtuple_create_from_mem(buf, buf_size, n_fields); + tuple = dtuple_create_from_mem(buf, buf_size, n_fields, n_v_fields); return(tuple); } +/** Copies a data tuple's virtual fields to another. This is a shallow copy; +@param[in,out] d_tuple destination tuple +@param[in] s_tuple source tuple */ +UNIV_INLINE +void +dtuple_copy_v_fields( + dtuple_t* d_tuple, + const dtuple_t* s_tuple) +{ + + ulint n_v_fields = dtuple_get_n_v_fields(d_tuple); + ut_ad(n_v_fields == dtuple_get_n_v_fields(s_tuple)); + + for (ulint i = 0; i < n_v_fields; i++) { + dfield_copy(dtuple_get_nth_v_field(d_tuple, i), + dtuple_get_nth_v_field(s_tuple, i)); + } +} + /*********************************************************************//** Copies a data tuple to another. This is a shallow copy; if a deep copy is desired, dfield_dup() will have to be invoked on each field. -@return own: copy of tuple */ +@return own: copy of tuple */ UNIV_INLINE dtuple_t* dtuple_copy( @@ -454,7 +586,9 @@ dtuple_copy( where the tuple is created */ { ulint n_fields = dtuple_get_n_fields(tuple); - dtuple_t* new_tuple = dtuple_create(heap, n_fields); + ulint n_v_fields = dtuple_get_n_v_fields(tuple); + dtuple_t* new_tuple = dtuple_create_with_vcol( + heap, n_fields, n_v_fields); ulint i; for (i = 0; i < n_fields; i++) { @@ -462,6 +596,11 @@ dtuple_copy( dtuple_get_nth_field(tuple, i)); } + for (i = 0; i < n_v_fields; i++) { + dfield_copy(dtuple_get_nth_v_field(new_tuple, i), + dtuple_get_nth_v_field(tuple, i)); + } + return(new_tuple); } @@ -469,7 +608,7 @@ dtuple_copy( The following function returns the sum of data lengths of a tuple. The space occupied by the field structs or the tuple struct is not counted. Neither is possible space in externally stored parts of the field. -@return sum of data lengths */ +@return sum of data lengths */ UNIV_INLINE ulint dtuple_get_data_size( @@ -506,7 +645,7 @@ dtuple_get_data_size( /*********************************************************************//** Computes the number of externally stored fields in a data tuple. -@return number of externally stored fields */ +@return number of externally stored fields */ UNIV_INLINE ulint dtuple_get_n_ext( @@ -546,18 +685,19 @@ dtuple_set_types_binary( } } -/************************************************************//** -Folds a prefix given as the number of fields of a tuple. -@return the folded value */ +/** Fold a prefix given as the number of fields of a tuple. +@param[in] tuple index record +@param[in] n_fields number of complete fields to fold +@param[in] n_bytes number of bytes to fold in the last field +@param[in] index_id index tree ID +@return the folded value */ UNIV_INLINE ulint dtuple_fold( -/*========*/ - const dtuple_t* tuple, /*!< in: the tuple */ - ulint n_fields,/*!< in: number of complete fields to fold */ - ulint n_bytes,/*!< in: number of bytes to fold in an - incomplete last field */ - index_id_t tree_id)/*!< in: index tree id */ + const dtuple_t* tuple, + ulint n_fields, + ulint n_bytes, + index_id_t tree_id) { const dfield_t* field; ulint i; @@ -616,7 +756,7 @@ data_write_sql_null( /**********************************************************************//** Checks if a dtuple contains an SQL null value. -@return TRUE if some field is SQL null */ +@return TRUE if some field is SQL null */ UNIV_INLINE ibool dtuple_contains_null( diff --git a/storage/innobase/include/data0type.h b/storage/innobase/include/data0type.h index 111664b0b52..00073dfca2c 100644 --- a/storage/innobase/include/data0type.h +++ b/storage/innobase/include/data0type.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,19 +29,15 @@ Created 1/16/1996 Heikki Tuuri #include "univ.i" extern ulint data_mysql_default_charset_coll; -#define DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL 8 #define DATA_MYSQL_BINARY_CHARSET_COLL 63 /* SQL data type struct */ struct dtype_t; -/* SQL Like operator comparison types */ +/** SQL Like operator comparison types */ enum ib_like_t { - IB_LIKE_EXACT, /* e.g. STRING */ - IB_LIKE_PREFIX, /* e.g., STRING% */ - IB_LIKE_SUFFIX, /* e.g., %STRING */ - IB_LIKE_SUBSTR, /* e.g., %STRING% */ - IB_LIKE_REGEXP /* Future */ + IB_LIKE_EXACT, /**< e.g. STRING */ + IB_LIKE_PREFIX /**< e.g., STRING% */ }; /*-------------------------------------------*/ @@ -79,8 +75,29 @@ binary strings */ DATA_VARMYSQL for all character sets, and the charset-collation for tables created with it can also be latin1_swedish_ci */ + +/* DATA_POINT&DATA_VAR_POINT are for standard geometry datatype 'point' and +DATA_GEOMETRY include all other standard geometry datatypes as described in +OGC standard(line_string, polygon, multi_point, multi_polygon, +multi_line_string, geometry_collection, geometry). +Currently, geometry data is stored in the standard Well-Known Binary(WKB) +format (http://www.opengeospatial.org/standards/sfa). +We use BLOB as underlying datatype for DATA_GEOMETRY and DATA_VAR_POINT +while CHAR for DATA_POINT */ +#define DATA_GEOMETRY 14 /* geometry datatype of variable length */ +/* The following two are disabled temporarily, we won't create them in +get_innobase_type_from_mysql_type(). +TODO: We will enable DATA_POINT/them when we come to the fixed-length POINT +again. */ +#define DATA_POINT 15 /* geometry datatype of fixed length POINT */ +#define DATA_VAR_POINT 16 /* geometry datatype of variable length + POINT, used when we want to store POINT + as BLOB internally */ #define DATA_MTYPE_MAX 63 /* dtype_store_for_order_and_null_size() requires the values are <= 63 */ + +#define DATA_MTYPE_CURRENT_MIN DATA_VARCHAR /* minimum value of mtype */ +#define DATA_MTYPE_CURRENT_MAX DATA_VAR_POINT /* maximum value of mtype */ /*-------------------------------------------*/ /* The 'PRECISE TYPE' of a column */ /* @@ -149,6 +166,10 @@ be less than 256 */ #define DATA_N_SYS_COLS 3 /* number of system columns defined above */ +#define DATA_ITT_N_SYS_COLS 2 + /* number of system columns for intrinsic + temporary table */ + #define DATA_FTS_DOC_ID 3 /* Used as FTS DOC ID column */ #define DATA_SYS_PRTYPE_MASK 0xF /* mask to extract the above from prtype */ @@ -166,10 +187,15 @@ be less than 256 */ In earlier versions this was set for some BLOB columns. */ +#define DATA_GIS_MBR 2048 /* Used as GIS MBR column */ +#define DATA_MBR_LEN SPDIMS * 2 * sizeof(double) /* GIS MBR length*/ + #define DATA_LONG_TRUE_VARCHAR 4096 /* this is ORed to the precise data type when the column is true VARCHAR where MySQL uses 2 bytes to store the data len; for shorter VARCHARs MySQL uses only 1 byte */ +#define DATA_VIRTUAL 8192 /* Virtual column */ + /*-------------------------------------------*/ /* This many bytes we need to store the type information affecting the @@ -183,6 +209,15 @@ store the charset-collation number; one byte is left unused, though */ /* Maximum multi-byte character length in bytes, plus 1 */ #define DATA_MBMAX 5 +/* For DATA_POINT of dimension 2, the length of value in btree is always 25, +which is the summary of: +SRID_SIZE(4) + WKB_HEADER_SIZE(1+4) + POINT_DATA_SIZE(8*2). +So the length of physical record or POINT KEYs on btree are 25. +GIS_TODO: When we support multi-dimensions DATA_POINT, we should get the +length from corresponding column or index definition, instead of this MACRO +*/ +#define DATA_POINT_LEN 25 + /* Pack mbminlen, mbmaxlen to mbminmaxlen. */ #define DATA_MBMINMAXLEN(mbminlen, mbmaxlen) \ ((mbmaxlen) * DATA_MBMAX + (mbminlen)) @@ -194,6 +229,30 @@ because in GCC it returns a long. */ /* Get mbmaxlen from mbminmaxlen. */ #define DATA_MBMAXLEN(mbminmaxlen) ((ulint) ((mbminmaxlen) / DATA_MBMAX)) +/* For checking if a geom_type is POINT */ +#define DATA_POINT_MTYPE(mtype) ((mtype) == DATA_POINT \ + || (mtype) == DATA_VAR_POINT) + +/* For checking if mtype is GEOMETRY datatype */ +#define DATA_GEOMETRY_MTYPE(mtype) (DATA_POINT_MTYPE(mtype) \ + || (mtype) == DATA_GEOMETRY) + +/* For checking if mtype is BLOB or GEOMETRY, since we use BLOB as +the underling datatype of GEOMETRY(not DATA_POINT) data. */ +#define DATA_LARGE_MTYPE(mtype) ((mtype) == DATA_BLOB \ + || (mtype) == DATA_VAR_POINT \ + || (mtype) == DATA_GEOMETRY) + +/* For checking if data type is big length data type. */ +#define DATA_BIG_LEN_MTYPE(len, mtype) ((len) > 255 || DATA_LARGE_MTYPE(mtype)) + +/* For checking if the column is a big length column. */ +#define DATA_BIG_COL(col) DATA_BIG_LEN_MTYPE((col)->len, (col)->mtype) + +/* For checking if data type is large binary data type. */ +#define DATA_LARGE_BINARY(mtype,prtype) ((mtype) == DATA_GEOMETRY || \ + ((mtype) == DATA_BLOB && !((prtype) & DATA_BINARY_TYPE))) + /* We now support 15 bits (up to 32767) collation number */ #define MAX_CHAR_COLL_NUM 32767 @@ -203,7 +262,7 @@ because in GCC it returns a long. */ #ifndef UNIV_HOTBACKUP /*********************************************************************//** Gets the MySQL type code from a dtype. -@return MySQL type code; this is NOT an InnoDB type code! */ +@return MySQL type code; this is NOT an InnoDB type code! */ UNIV_INLINE ulint dtype_get_mysql_type( @@ -213,8 +272,7 @@ dtype_get_mysql_type( Determine how many bytes the first n characters of the given string occupy. If the string is shorter than n characters, returns the number of bytes the characters in the string occupy. -@return length of the prefix, in bytes */ -UNIV_INTERN +@return length of the prefix, in bytes */ ulint dtype_get_at_most_n_mbchars( /*========================*/ @@ -231,8 +289,7 @@ dtype_get_at_most_n_mbchars( /*********************************************************************//** Checks if a data main type is a string type. Also a BLOB is considered a string type. -@return TRUE if string type */ -UNIV_INTERN +@return TRUE if string type */ ibool dtype_is_string_type( /*=================*/ @@ -241,8 +298,7 @@ dtype_is_string_type( Checks if a type is a binary string type. Note that for tables created with < 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column. For those DATA_BLOB columns this function currently returns FALSE. -@return TRUE if binary string type */ -UNIV_INTERN +@return TRUE if binary string type */ ibool dtype_is_binary_string_type( /*========================*/ @@ -253,8 +309,7 @@ Checks if a type is a non-binary string type. That is, dtype_is_string_type is TRUE and dtype_is_binary_string_type is FALSE. Note that for tables created with < 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column. For those DATA_BLOB columns this function currently returns TRUE. -@return TRUE if non-binary string type */ -UNIV_INTERN +@return TRUE if non-binary string type */ ibool dtype_is_non_binary_string_type( /*============================*/ @@ -280,7 +335,7 @@ dtype_copy( const dtype_t* type2); /*!< in: type struct to copy from */ /*********************************************************************//** Gets the SQL main data type. -@return SQL main data type */ +@return SQL main data type */ UNIV_INLINE ulint dtype_get_mtype( @@ -288,7 +343,7 @@ dtype_get_mtype( const dtype_t* type); /*!< in: data type */ /*********************************************************************//** Gets the precise data type. -@return precise data type */ +@return precise data type */ UNIV_INLINE ulint dtype_get_prtype( @@ -309,7 +364,7 @@ dtype_get_mblen( multi-byte character */ /*********************************************************************//** Gets the MySQL charset-collation code for MySQL string types. -@return MySQL charset-collation code */ +@return MySQL charset-collation code */ UNIV_INLINE ulint dtype_get_charset_coll( @@ -319,7 +374,6 @@ dtype_get_charset_coll( Forms a precise type from the < 4.1.2 format precise type plus the charset-collation code. @return precise type, including the charset-collation code */ -UNIV_INTERN ulint dtype_form_prtype( /*==============*/ @@ -330,7 +384,7 @@ dtype_form_prtype( Determines if a MySQL string type is a subset of UTF-8. This function may return false negatives, in case further character-set collation codes are introduced in MySQL later. -@return TRUE if a subset of UTF-8 */ +@return TRUE if a subset of UTF-8 */ UNIV_INLINE ibool dtype_is_utf8( @@ -339,7 +393,7 @@ dtype_is_utf8( #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Gets the type length. -@return fixed length of the type, in bytes, or 0 if variable-length */ +@return fixed length of the type, in bytes, or 0 if variable-length */ UNIV_INLINE ulint dtype_get_len( @@ -377,19 +431,10 @@ dtype_set_mbminmaxlen( ulint mbmaxlen); /*!< in: maximum length of a char, in bytes, or 0 if this is not a character type */ -/*********************************************************************//** -Gets the padding character code for the type. -@return padding character code, or ULINT_UNDEFINED if no padding specified */ -UNIV_INLINE -ulint -dtype_get_pad_char( -/*===============*/ - ulint mtype, /*!< in: main type */ - ulint prtype); /*!< in: precise type */ #endif /* !UNIV_HOTBACKUP */ /***********************************************************************//** Returns the size of a fixed size data type, 0 if not a fixed size type. -@return fixed size, or 0 */ +@return fixed size, or 0 */ UNIV_INLINE ulint dtype_get_fixed_size_low( @@ -403,7 +448,7 @@ dtype_get_fixed_size_low( #ifndef UNIV_HOTBACKUP /***********************************************************************//** Returns the minimum size of a data type. -@return minimum size */ +@return minimum size */ UNIV_INLINE ulint dtype_get_min_size_low( @@ -416,7 +461,7 @@ dtype_get_min_size_low( /***********************************************************************//** Returns the maximum size of a data type. Note: types in system tables may be incomplete and return incorrect information. -@return maximum size */ +@return maximum size */ UNIV_INLINE ulint dtype_get_max_size_low( @@ -427,7 +472,7 @@ dtype_get_max_size_low( /***********************************************************************//** Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a type. For fixed length types it is the fixed length of the type, otherwise 0. -@return SQL null storage size in ROW_FORMAT=REDUNDANT */ +@return SQL null storage size in ROW_FORMAT=REDUNDANT */ UNIV_INLINE ulint dtype_get_sql_null_size( @@ -486,15 +531,13 @@ dtype_sql_name( /*********************************************************************//** Validates a data type structure. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dtype_validate( /*===========*/ const dtype_t* type); /*!< in: type struct to validate */ /*********************************************************************//** Prints a data type structure. */ -UNIV_INTERN void dtype_print( /*========*/ diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic index d489bef89a8..57770ec0e17 100644 --- a/storage/innobase/include/data0type.ic +++ b/storage/innobase/include/data0type.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -23,15 +23,13 @@ Data types Created 1/16/1996 Heikki Tuuri *******************************************************/ -#include /* strlen() */ - #include "mach0data.h" #ifndef UNIV_HOTBACKUP # include "ha_prototypes.h" /*********************************************************************//** Gets the MySQL charset-collation code for MySQL string types. -@return MySQL charset-collation code */ +@return MySQL charset-collation code */ UNIV_INLINE ulint dtype_get_charset_coll( @@ -45,7 +43,7 @@ dtype_get_charset_coll( Determines if a MySQL string type is a subset of UTF-8. This function may return false negatives, in case further character-set collation codes are introduced in MySQL later. -@return TRUE if a subset of UTF-8 */ +@return TRUE if a subset of UTF-8 */ UNIV_INLINE ibool dtype_is_utf8( @@ -68,7 +66,7 @@ dtype_is_utf8( /*********************************************************************//** Gets the MySQL type code from a dtype. -@return MySQL type code; this is NOT an InnoDB type code! */ +@return MySQL type code; this is NOT an InnoDB type code! */ UNIV_INLINE ulint dtype_get_mysql_type( @@ -180,7 +178,7 @@ dtype_copy( /*********************************************************************//** Gets the SQL main data type. -@return SQL main data type */ +@return SQL main data type */ UNIV_INLINE ulint dtype_get_mtype( @@ -194,7 +192,7 @@ dtype_get_mtype( /*********************************************************************//** Gets the precise data type. -@return precise data type */ +@return precise data type */ UNIV_INLINE ulint dtype_get_prtype( @@ -208,7 +206,7 @@ dtype_get_prtype( /*********************************************************************//** Gets the type length. -@return fixed length of the type, in bytes, or 0 if variable-length */ +@return fixed length of the type, in bytes, or 0 if variable-length */ UNIV_INLINE ulint dtype_get_len( @@ -248,45 +246,6 @@ dtype_get_mbmaxlen( return(DATA_MBMAXLEN(type->mbminmaxlen)); } -/*********************************************************************//** -Gets the padding character code for a type. -@return padding character code, or ULINT_UNDEFINED if no padding specified */ -UNIV_INLINE -ulint -dtype_get_pad_char( -/*===============*/ - ulint mtype, /*!< in: main type */ - ulint prtype) /*!< in: precise type */ -{ - switch (mtype) { - case DATA_FIXBINARY: - case DATA_BINARY: - if (dtype_get_charset_coll(prtype) - == DATA_MYSQL_BINARY_CHARSET_COLL) { - /* Starting from 5.0.18, do not pad - VARBINARY or BINARY columns. */ - return(ULINT_UNDEFINED); - } - /* Fall through */ - case DATA_CHAR: - case DATA_VARCHAR: - case DATA_MYSQL: - case DATA_VARMYSQL: - /* Space is the padding character for all char and binary - strings, and starting from 5.0.3, also for TEXT strings. */ - - return(0x20); - case DATA_BLOB: - if (!(prtype & DATA_BINARY_TYPE)) { - return(0x20); - } - /* Fall through */ - default: - /* No padding specified */ - return(ULINT_UNDEFINED); - } -} - /**********************************************************************//** Stores for a type the information which determines its alphabetical ordering and the storage size of an SQL NULL value. This is the >= 4.1.x storage @@ -309,7 +268,7 @@ dtype_new_store_for_order_and_null_size( ut_ad(type); ut_ad(type->mtype >= DATA_VARCHAR); - ut_ad(type->mtype <= DATA_MYSQL); + ut_ad(type->mtype <= DATA_MTYPE_MAX); buf[0] = (byte)(type->mtype & 0xFFUL); @@ -483,6 +442,9 @@ dtype_sql_name( case DATA_BINARY: ut_snprintf(name, name_sz, "VARBINARY(%u)", len); break; + case DATA_GEOMETRY: + ut_snprintf(name, name_sz, "GEOMETRY"); + break; case DATA_BLOB: switch (len) { case 9: @@ -513,7 +475,7 @@ dtype_sql_name( /***********************************************************************//** Returns the size of a fixed size data type, 0 if not a fixed size type. -@return fixed size, or 0 */ +@return fixed size, or 0 */ UNIV_INLINE ulint dtype_get_fixed_size_low( @@ -548,6 +510,7 @@ dtype_get_fixed_size_low( case DATA_INT: case DATA_FLOAT: case DATA_DOUBLE: + case DATA_POINT: return(len); case DATA_MYSQL: #ifndef UNIV_HOTBACKUP @@ -579,6 +542,8 @@ dtype_get_fixed_size_low( case DATA_BINARY: case DATA_DECIMAL: case DATA_VARMYSQL: + case DATA_VAR_POINT: + case DATA_GEOMETRY: case DATA_BLOB: return(0); default: @@ -591,7 +556,7 @@ dtype_get_fixed_size_low( #ifndef UNIV_HOTBACKUP /***********************************************************************//** Returns the minimum size of a data type. -@return minimum size */ +@return minimum size */ UNIV_INLINE ulint dtype_get_min_size_low( @@ -625,6 +590,7 @@ dtype_get_min_size_low( case DATA_INT: case DATA_FLOAT: case DATA_DOUBLE: + case DATA_POINT: return(len); case DATA_MYSQL: if (prtype & DATA_BINARY_TYPE) { @@ -647,6 +613,8 @@ dtype_get_min_size_low( case DATA_BINARY: case DATA_DECIMAL: case DATA_VARMYSQL: + case DATA_VAR_POINT: + case DATA_GEOMETRY: case DATA_BLOB: return(0); default: @@ -659,7 +627,7 @@ dtype_get_min_size_low( /***********************************************************************//** Returns the maximum size of a data type. Note: types in system tables may be incomplete and return incorrect information. -@return maximum size */ +@return maximum size */ UNIV_INLINE ulint dtype_get_max_size_low( @@ -679,7 +647,10 @@ dtype_get_max_size_low( case DATA_BINARY: case DATA_DECIMAL: case DATA_VARMYSQL: + case DATA_POINT: return(len); + case DATA_VAR_POINT: + case DATA_GEOMETRY: case DATA_BLOB: break; default: @@ -693,7 +664,7 @@ dtype_get_max_size_low( /***********************************************************************//** Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a type. For fixed length types it is the fixed length of the type, otherwise 0. -@return SQL null storage size in ROW_FORMAT=REDUNDANT */ +@return SQL null storage size in ROW_FORMAT=REDUNDANT */ UNIV_INLINE ulint dtype_get_sql_null_size( diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h index feac81af98e..5297d6b0daf 100644 --- a/storage/innobase/include/db0err.h +++ b/storage/innobase/include/db0err.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2015, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under @@ -27,6 +27,7 @@ Created 5/24/1996 Heikki Tuuri #ifndef db0err_h #define db0err_h +/* Do not include univ.i because univ.i includes this. */ enum dberr_t { DB_SUCCESS_LOCKED_REC = 9, /*!< like DB_SUCCESS, but a new @@ -64,7 +65,8 @@ enum dberr_t { which is referenced */ DB_CANNOT_ADD_CONSTRAINT, /*!< adding a foreign key constraint to a table failed */ - DB_CORRUPTION, /*!< data structure corruption noticed */ + DB_CORRUPTION, /*!< data structure corruption + noticed */ DB_CANNOT_DROP_CONSTRAINT, /*!< dropping a foreign key constraint from a table failed */ DB_NO_SAVEPOINT, /*!< no savepoint exists with the given @@ -124,20 +126,49 @@ enum dberr_t { DB_ONLINE_LOG_TOO_BIG, /*!< Modification log grew too big during online index creation */ - DB_IO_ERROR, /*!< Generic IO error */ DB_IDENTIFIER_TOO_LONG, /*!< Identifier name too long */ DB_FTS_EXCEED_RESULT_CACHE_LIMIT, /*!< FTS query memory exceeds result cache limit */ - DB_TEMP_FILE_WRITE_FAILURE, /*!< Temp file write failure */ + DB_TEMP_FILE_WRITE_FAIL, /*!< Temp file write failure */ + DB_CANT_CREATE_GEOMETRY_OBJECT, /*!< Cannot create specified Geometry + data object */ + DB_CANNOT_OPEN_FILE, /*!< Cannot open a file */ DB_FTS_TOO_MANY_WORDS_IN_PHRASE, /*< Too many words in a phrase */ - DB_TOO_BIG_FOR_REDO, /* Record length greater than 10% - of redo log */ + + DB_TABLESPACE_TRUNCATED, /*!< tablespace was truncated */ DB_DECRYPTION_FAILED, /* Tablespace encrypted and decrypt operation failed because of missing key management plugin, or missing or incorrect key or incorret AES method or algorithm. */ + + DB_IO_ERROR = 100, /*!< Generic IO error */ + + DB_IO_DECOMPRESS_FAIL, /*!< Failure to decompress a page + after reading it from disk */ + + DB_IO_NO_PUNCH_HOLE, /*!< Punch hole not supported by + InnoDB */ + + DB_IO_NO_PUNCH_HOLE_FS, /*!< The file system doesn't support + punch hole */ + + DB_IO_NO_PUNCH_HOLE_TABLESPACE, /*!< The tablespace doesn't support + punch hole */ + + DB_IO_PARTIAL_FAILED, /*!< Partial IO request failed */ + + DB_FORCED_ABORT, /*!< Transaction was forced to rollback + by a higher priority transaction */ + + DB_TABLE_CORRUPT, /*!< Table/clustered index is + corrupted */ + + DB_WRONG_FILE_NAME, /*!< Invalid Filename */ + + DB_COMPUTE_VALUE_FAILED, /*!< Compute generated value failed */ + /* The following are partial failure codes */ DB_FAIL = 1000, DB_OVERFLOW, @@ -146,22 +177,18 @@ enum dberr_t { DB_ZIP_OVERFLOW, DB_RECORD_NOT_FOUND = 1500, DB_END_OF_INDEX, - DB_DICT_CHANGED, /*!< Some part of table dictionary has - changed. Such as index dropped or - foreign key dropped */ - + DB_NOT_FOUND, /*!< Generic error code for "Not found" + type of errors */ - /* The following are API only error codes. */ + /* The following are API only error codes. */ DB_DATA_MISMATCH = 2000, /*!< Column update or read failed because the types mismatch */ - DB_SCHEMA_NOT_LOCKED, /*!< If an API function expects the + DB_SCHEMA_NOT_LOCKED /*!< If an API function expects the schema to be locked in exclusive mode and if it's not then that API function will return this error code */ - DB_NOT_FOUND /*!< Generic error code for "Not found" - type of errors */ }; #endif diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h index 477e1150f43..5884ba4bcc2 100644 --- a/storage/innobase/include/dict0boot.h +++ b/storage/innobase/include/dict0boot.h @@ -39,41 +39,42 @@ typedef byte dict_hdr_t; /**********************************************************************//** Gets a pointer to the dictionary header and x-latches its page. -@return pointer to the dictionary header, page x-latched */ -UNIV_INTERN +@return pointer to the dictionary header, page x-latched */ dict_hdr_t* dict_hdr_get( /*=========*/ mtr_t* mtr); /*!< in: mtr */ /**********************************************************************//** Returns a new table, index, or space id. */ -UNIV_INTERN void dict_hdr_get_new_id( /*================*/ - table_id_t* table_id, /*!< out: table id - (not assigned if NULL) */ - index_id_t* index_id, /*!< out: index id - (not assigned if NULL) */ - ulint* space_id); /*!< out: space id - (not assigned if NULL) */ + table_id_t* table_id, /*!< out: table id + (not assigned if NULL) */ + index_id_t* index_id, /*!< out: index id + (not assigned if NULL) */ + ulint* space_id, /*!< out: space id + (not assigned if NULL) */ + const dict_table_t* table, /*!< in: table */ + bool disable_redo); /*!< in: if true and table + object is NULL + then disable-redo */ /**********************************************************************//** Writes the current value of the row id counter to the dictionary header file page. */ -UNIV_INTERN void dict_hdr_flush_row_id(void); /*=======================*/ /**********************************************************************//** Returns a new row id. -@return the new id */ +@return the new id */ UNIV_INLINE row_id_t dict_sys_get_new_row_id(void); /*=========================*/ /**********************************************************************//** Reads a row id from a record or other 6-byte stored form. -@return row id */ +@return row id */ UNIV_INLINE row_id_t dict_sys_read_row_id( @@ -91,7 +92,6 @@ dict_sys_write_row_id( Initializes the data dictionary memory structures when the database is started. This function is also called when the data dictionary is created. @return DB_SUCCESS or error code. */ -UNIV_INTERN dberr_t dict_boot(void) /*===========*/ @@ -100,7 +100,6 @@ dict_boot(void) /*****************************************************************//** Creates and initializes the data dictionary at the server bootstrap. @return DB_SUCCESS or error code. */ -UNIV_INTERN dberr_t dict_create(void) /*=============*/ @@ -221,7 +220,8 @@ enum dict_col_sys_indexes_enum { DICT_COL__SYS_INDEXES__TYPE = 4, DICT_COL__SYS_INDEXES__SPACE = 5, DICT_COL__SYS_INDEXES__PAGE_NO = 6, - DICT_NUM_COLS__SYS_INDEXES = 7 + DICT_COL__SYS_INDEXES__MERGE_THRESHOLD = 7, + DICT_NUM_COLS__SYS_INDEXES = 8 }; /* The field numbers in the SYS_INDEXES clustered index */ enum dict_fld_sys_indexes_enum { @@ -234,7 +234,8 @@ enum dict_fld_sys_indexes_enum { DICT_FLD__SYS_INDEXES__TYPE = 6, DICT_FLD__SYS_INDEXES__SPACE = 7, DICT_FLD__SYS_INDEXES__PAGE_NO = 8, - DICT_NUM_FIELDS__SYS_INDEXES = 9 + DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD = 9, + DICT_NUM_FIELDS__SYS_INDEXES = 10 }; /* The columns in SYS_FIELDS */ enum dict_col_sys_fields_enum { @@ -325,6 +326,23 @@ enum dict_fld_sys_datafiles_enum { DICT_NUM_FIELDS__SYS_DATAFILES = 4 }; +/* The columns in SYS_VIRTUAL */ +enum dict_col_sys_virtual_enum { + DICT_COL__SYS_VIRTUAL__TABLE_ID = 0, + DICT_COL__SYS_VIRTUAL__POS = 1, + DICT_COL__SYS_VIRTUAL__BASE_POS = 2, + DICT_NUM_COLS__SYS_VIRTUAL = 3 +}; +/* The field numbers in the SYS_VIRTUAL clustered index */ +enum dict_fld_sys_virtual_enum { + DICT_FLD__SYS_VIRTUAL__TABLE_ID = 0, + DICT_FLD__SYS_VIRTUAL__POS = 1, + DICT_FLD__SYS_VIRTUAL__BASE_POS = 2, + DICT_FLD__SYS_VIRTUAL__DB_TRX_ID = 3, + DICT_FLD__SYS_VIRTUAL__DB_ROLL_PTR = 4, + DICT_NUM_FIELDS__SYS_VIRTUAL = 5 +}; + /* A number of the columns above occur in multiple tables. These are the length of thos fields. */ #define DICT_FLD_LEN_SPACE 4 diff --git a/storage/innobase/include/dict0boot.ic b/storage/innobase/include/dict0boot.ic index 2b156a4f672..e40c3f844e3 100644 --- a/storage/innobase/include/dict0boot.ic +++ b/storage/innobase/include/dict0boot.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -25,7 +25,7 @@ Created 4/18/1996 Heikki Tuuri /**********************************************************************//** Returns a new row id. -@return the new id */ +@return the new id */ UNIV_INLINE row_id_t dict_sys_get_new_row_id(void) @@ -33,7 +33,7 @@ dict_sys_get_new_row_id(void) { row_id_t id; - mutex_enter(&(dict_sys->mutex)); + mutex_enter(&dict_sys->mutex); id = dict_sys->row_id; @@ -44,14 +44,14 @@ dict_sys_get_new_row_id(void) dict_sys->row_id++; - mutex_exit(&(dict_sys->mutex)); + mutex_exit(&dict_sys->mutex); return(id); } /**********************************************************************//** Reads a row id from a record or other 6-byte stored form. -@return row id */ +@return row id */ UNIV_INLINE row_id_t dict_sys_read_row_id( diff --git a/storage/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h index 150c76b2e65..7915d694c2d 100644 --- a/storage/innobase/include/dict0crea.h +++ b/storage/innobase/include/dict0crea.h @@ -33,97 +33,155 @@ Created 1/8/1996 Heikki Tuuri #include "row0types.h" #include "mtr0mtr.h" #include "fil0crypt.h" +#include "fsp0space.h" /*********************************************************************//** Creates a table create graph. -@return own: table create node */ -UNIV_INTERN +@return own: table create node */ tab_node_t* tab_create_graph_create( /*====================*/ - dict_table_t* table, /*!< in: table to create, built as a memory data - structure */ - mem_heap_t* heap, /*!< in: heap where created */ - bool commit, /*!< in: true if the commit node should be - added to the query graph */ - fil_encryption_t mode, /*!< in: encryption mode */ - ulint key_id);/*!< in: encryption key_id */ -/*********************************************************************//** -Creates an index create graph. -@return own: index create node */ -UNIV_INTERN + dict_table_t* table, /*!< in: table to create, built as + a memory data structure */ + mem_heap_t* heap, /*!< in: heap where created */ + fil_encryption_t mode, /*!< in: encryption mode */ + ulint key_id); /*!< in: encryption key_id */ + +/** Creates an index create graph. +@param[in] index index to create, built as a memory data structure +@param[in,out] heap heap where created +@param[in] add_v new virtual columns added in the same clause with + add index +@return own: index create node */ ind_node_t* ind_create_graph_create( -/*====================*/ - dict_index_t* index, /*!< in: index to create, built as a memory data - structure */ - mem_heap_t* heap, /*!< in: heap where created */ - bool commit);/*!< in: true if the commit node should be - added to the query graph */ + dict_index_t* index, + mem_heap_t* heap, + const dict_add_v_col_t* add_v); + /***********************************************************//** Creates a table. This is a high-level function used in SQL execution graphs. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* dict_create_table_step( /*===================*/ - que_thr_t* thr); /*!< in: query thread */ + que_thr_t* thr); /*!< in: query thread */ + +/** Builds a tablespace to store various objects. +@param[in,out] tablespace Tablespace object describing what to build. +@return DB_SUCCESS or error code. */ +dberr_t +dict_build_tablespace( + Tablespace* tablespace); + +/** Builds a tablespace to contain a table, using file-per-table=1. +@param[in,out] table Table to build in its own tablespace. +@param[in] node Table create node +@return DB_SUCCESS or error code */ +dberr_t +dict_build_tablespace_for_table( + dict_table_t* table, + tab_node_t* node); + +/** Assign a new table ID and put it into the table cache and the transaction. +@param[in,out] table Table that needs an ID +@param[in,out] trx Transaction */ +void +dict_table_assign_new_id( + dict_table_t* table, + trx_t* trx); + /***********************************************************//** Creates an index. This is a high-level function used in SQL execution graphs. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* dict_create_index_step( /*===================*/ - que_thr_t* thr); /*!< in: query thread */ + que_thr_t* thr); /*!< in: query thread */ + +/***************************************************************//** +Builds an index definition but doesn't update sys_table. +@return DB_SUCCESS or error code */ +void +dict_build_index_def( +/*=================*/ + const dict_table_t* table, /*!< in: table */ + dict_index_t* index, /*!< in/out: index */ + trx_t* trx); /*!< in/out: InnoDB transaction + handle */ +/***************************************************************//** +Creates an index tree for the index if it is not a member of a cluster. +Don't update SYSTEM TABLES. +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ +dberr_t +dict_create_index_tree( +/*===================*/ + dict_index_t* index, /*!< in/out: index */ + const trx_t* trx); /*!< in: InnoDB transaction handle */ + /*******************************************************************//** -Truncates the index tree associated with a row in SYS_INDEXES table. +Recreate the index tree associated with a row in SYS_INDEXES table. @return new root page number, or FIL_NULL on failure */ -UNIV_INTERN ulint -dict_truncate_index_tree( -/*=====================*/ - dict_table_t* table, /*!< in: the table the index belongs to */ - ulint space, /*!< in: 0=truncate, - nonzero=create the index tree in the - given tablespace */ - btr_pcur_t* pcur, /*!< in/out: persistent cursor pointing to - record in the clustered index of - SYS_INDEXES table. The cursor may be - repositioned in this call. */ - mtr_t* mtr); /*!< in: mtr having the latch - on the record page. The mtr may be - committed and restarted in this call. */ +dict_recreate_index_tree( +/*======================*/ + const dict_table_t* table, /*!< in: the table the index + belongs to */ + btr_pcur_t* pcur, /*!< in/out: persistent cursor pointing + to record in the clustered index of + SYS_INDEXES table. The cursor may be + repositioned in this call. */ + mtr_t* mtr); /*!< in: mtr having the latch + on the record page. The mtr may be + committed and restarted in this call. */ + +/** Drop the index tree associated with a row in SYS_INDEXES table. +@param[in,out] rec SYS_INDEXES record +@param[in,out] pcur persistent cursor on rec +@param[in,out] mtr mini-transaction +@return whether freeing the B-tree was attempted */ +bool +dict_drop_index_tree( + rec_t* rec, + btr_pcur_t* pcur, + mtr_t* mtr); + +/***************************************************************//** +Creates an index tree for the index if it is not a member of a cluster. +Don't update SYSTEM TABLES. +@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ +dberr_t +dict_create_index_tree_in_mem( +/*==========================*/ + dict_index_t* index, /*!< in/out: index */ + const trx_t* trx); /*!< in: InnoDB transaction handle */ + /*******************************************************************//** -Drops the index tree associated with a row in SYS_INDEXES table. */ -UNIV_INTERN +Truncates the index tree but don't update SYSTEM TABLES. +@return DB_SUCCESS or error */ +dberr_t +dict_truncate_index_tree_in_mem( +/*============================*/ + dict_index_t* index); /*!< in/out: index */ + +/*******************************************************************//** +Drops the index tree but don't update SYS_INDEXES table. */ void -dict_drop_index_tree( -/*=================*/ - rec_t* rec, /*!< in/out: record in the clustered index - of SYS_INDEXES table */ - mtr_t* mtr); /*!< in: mtr having the latch on the record page */ +dict_drop_index_tree_in_mem( +/*========================*/ + const dict_index_t* index, /*!< in: index */ + ulint page_no);/*!< in: index page-no */ + /****************************************************************//** Creates the foreign key constraints system tables inside InnoDB at server bootstrap or server start if they are not found or are not of the right form. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t dict_create_or_check_foreign_constraint_tables(void); /*================================================*/ -/********************************************************************//** -Construct foreign key constraint defintion from data dictionary information. -*/ -UNIV_INTERN -char* -dict_foreign_def_get( -/*=================*/ - dict_foreign_t* foreign,/*!< in: foreign */ - trx_t* trx); /*!< in: trx */ - /********************************************************************//** Generate a foreign key constraint name when it was not named by the user. A generated constraint has a name of the format dbname/tablename_ibfk_NUMBER, @@ -133,11 +191,10 @@ UNIV_INLINE dberr_t dict_create_add_foreign_id( /*=======================*/ - ulint* id_nr, /*!< in/out: number to use in id generation; - incremented if used */ - const char* name, /*!< in: table name */ - dict_foreign_t* foreign)/*!< in/out: foreign key */ - MY_ATTRIBUTE((nonnull)); + ulint* id_nr, /*!< in/out: number to use in id + generation; incremented if used */ + const char* name, /*!< in: table name */ + dict_foreign_t* foreign); /*!< in/out: foreign key */ /** Adds the given set of foreign key objects to the dictionary tables in the database. This function does not modify the dictionary cache. The @@ -149,7 +206,6 @@ the dictionary tables local_fk_set belong to @param[in,out] trx transaction @return error code or DB_SUCCESS */ -UNIV_INTERN dberr_t dict_create_add_foreigns_to_dictionary( /*===================================*/ @@ -161,30 +217,48 @@ dict_create_add_foreigns_to_dictionary( Creates the tablespaces and datafiles system tables inside InnoDB at server bootstrap or server start if they are not found or are not of the right form. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t dict_create_or_check_sys_tablespace(void); /*=====================================*/ -/********************************************************************//** -Add a single tablespace definition to the data dictionary tables in the -database. -@return error code or DB_SUCCESS */ -UNIV_INTERN +/** Creates the virtual column system tables inside InnoDB +at server bootstrap or server start if they are not found or are +not of the right form. +@return DB_SUCCESS or error code */ dberr_t -dict_create_add_tablespace_to_dictionary( -/*=====================================*/ - ulint space, /*!< in: tablespace id */ - const char* name, /*!< in: tablespace name */ - ulint flags, /*!< in: tablespace flags */ - const char* path, /*!< in: tablespace path */ - trx_t* trx, /*!< in: transaction */ - bool commit); /*!< in: if true then commit the - transaction */ +dict_create_or_check_sys_virtual(); + +/** Put a tablespace definition into the data dictionary, +replacing what was there previously. +@param[in] space Tablespace id +@param[in] name Tablespace name +@param[in] flags Tablespace flags +@param[in] path Tablespace path +@param[in] trx Transaction +@param[in] commit If true, commit the transaction +@return error code or DB_SUCCESS */ +dberr_t +dict_replace_tablespace_in_dictionary( + ulint space_id, + const char* name, + ulint flags, + const char* path, + trx_t* trx, + bool commit); + +/** Delete records from SYS_TABLESPACES and SYS_DATAFILES associated +with a particular tablespace ID. +@param[in] space Tablespace ID +@param[in,out] trx Current transaction +@return DB_SUCCESS if OK, dberr_t if the operation failed */ +dberr_t +dict_delete_tablespace_and_datafiles( + ulint space, + trx_t* trx); + /********************************************************************//** Add a foreign key definition to the data dictionary tables. -@return error code or DB_SUCCESS */ -UNIV_INTERN +@return error code or DB_SUCCESS */ dberr_t dict_create_add_foreign_to_dictionary( /*==================================*/ @@ -206,65 +280,102 @@ dict_foreign_def_get( /* Table create node structure */ struct tab_node_t{ - que_common_t common; /*!< node type: QUE_NODE_TABLE_CREATE */ - dict_table_t* table; /*!< table to create, built as a memory data - structure with dict_mem_... functions */ - ins_node_t* tab_def; /* child node which does the insert of - the table definition; the row to be inserted - is built by the parent node */ - ins_node_t* col_def; /* child node which does the inserts of - the column definitions; the row to be inserted - is built by the parent node */ - commit_node_t* commit_node; - /* child node which performs a commit after - a successful table creation */ + que_common_t common; /*!< node type: QUE_NODE_TABLE_CREATE */ + dict_table_t* table; /*!< table to create, built as a + memory data structure with + dict_mem_... functions */ + ins_node_t* tab_def; /*!< child node which does the insert of + the table definition; the row to be + inserted is built by the parent node */ + ins_node_t* col_def; /*!< child node which does the inserts + of the column definitions; the row to + be inserted is built by the parent + node */ + ins_node_t* v_col_def; /*!< child node which does the inserts + of the sys_virtual row definitions; + the row to be inserted is built by + the parent node */ /*----------------------*/ /* Local storage for this graph node */ - ulint state; /*!< node execution state */ - ulint col_no; /*!< next column definition to insert */ + ulint state; /*!< node execution state */ + ulint col_no; /*!< next column definition to insert */ ulint key_id; /*!< encryption key_id */ fil_encryption_t mode; /*!< encryption mode */ - mem_heap_t* heap; /*!< memory heap used as auxiliary storage */ + ulint base_col_no; /*!< next base column to insert */ + mem_heap_t* heap; /*!< memory heap used as auxiliary + storage */ }; /* Table create node states */ #define TABLE_BUILD_TABLE_DEF 1 #define TABLE_BUILD_COL_DEF 2 -#define TABLE_COMMIT_WORK 3 +#define TABLE_BUILD_V_COL_DEF 3 #define TABLE_ADD_TO_CACHE 4 #define TABLE_COMPLETED 5 /* Index create node struct */ struct ind_node_t{ - que_common_t common; /*!< node type: QUE_NODE_INDEX_CREATE */ - dict_index_t* index; /*!< index to create, built as a memory data - structure with dict_mem_... functions */ - ins_node_t* ind_def; /* child node which does the insert of - the index definition; the row to be inserted - is built by the parent node */ - ins_node_t* field_def; /* child node which does the inserts of - the field definitions; the row to be inserted - is built by the parent node */ - commit_node_t* commit_node; - /* child node which performs a commit after - a successful index creation */ + que_common_t common; /*!< node type: QUE_NODE_INDEX_CREATE */ + dict_index_t* index; /*!< index to create, built as a + memory data structure with + dict_mem_... functions */ + ins_node_t* ind_def; /*!< child node which does the insert of + the index definition; the row to be + inserted is built by the parent node */ + ins_node_t* field_def; /*!< child node which does the inserts + of the field definitions; the row to + be inserted is built by the parent + node */ /*----------------------*/ /* Local storage for this graph node */ - ulint state; /*!< node execution state */ - ulint page_no;/* root page number of the index */ - dict_table_t* table; /*!< table which owns the index */ - dtuple_t* ind_row;/* index definition row built */ - ulint field_no;/* next field definition to insert */ - mem_heap_t* heap; /*!< memory heap used as auxiliary storage */ + ulint state; /*!< node execution state */ + ulint page_no; /* root page number of the index */ + dict_table_t* table; /*!< table which owns the index */ + dtuple_t* ind_row; /* index definition row built */ + ulint field_no; /* next field definition to insert */ + mem_heap_t* heap; /*!< memory heap used as auxiliary + storage */ + const dict_add_v_col_t* + add_v; /*!< new virtual columns that being + added along with an add index call */ }; +/** Compose a column number for a virtual column, stored in the "POS" field +of Sys_columns. The column number includes both its virtual column sequence +(the "nth" virtual column) and its actual column position in original table +@param[in] v_pos virtual column sequence +@param[in] col_pos column position in original table definition +@return composed column position number */ +UNIV_INLINE +ulint +dict_create_v_col_pos( + ulint v_pos, + ulint col_pos); + +/** Get the column number for a virtual column (the column position in +original table), stored in the "POS" field of Sys_columns +@param[in] pos virtual column position +@return column position in original table */ +UNIV_INLINE +ulint +dict_get_v_col_mysql_pos( + ulint pos); + +/** Get a virtual column sequence (the "nth" virtual column) for a +virtual column, stord in the "POS" field of Sys_columns +@param[in] pos virtual column position +@return virtual column sequence */ +UNIV_INLINE +ulint +dict_get_v_col_pos( + ulint pos); + /* Index create node states */ #define INDEX_BUILD_INDEX_DEF 1 #define INDEX_BUILD_FIELD_DEF 2 #define INDEX_CREATE_INDEX_TREE 3 -#define INDEX_COMMIT_WORK 4 -#define INDEX_ADD_TO_CACHE 5 +#define INDEX_ADD_TO_CACHE 4 #ifndef UNIV_NONINL #include "dict0crea.ic" diff --git a/storage/innobase/include/dict0crea.ic b/storage/innobase/include/dict0crea.ic index 1cbaa47032b..565e4ed1a8c 100644 --- a/storage/innobase/include/dict0crea.ic +++ b/storage/innobase/include/dict0crea.ic @@ -23,13 +23,14 @@ Database object creation Created 1/8/1996 Heikki Tuuri *******************************************************/ +#include "ha_prototypes.h" + #include "mem0mem.h" /*********************************************************************//** Checks if a table name contains the string "/#sql" which denotes temporary tables in MySQL. @return true if temporary table */ -UNIV_INTERN bool row_is_mysql_tmp_table_name( /*========================*/ @@ -52,6 +53,8 @@ dict_create_add_foreign_id( const char* name, /*!< in: table name */ dict_foreign_t* foreign)/*!< in/out: foreign key */ { + DBUG_ENTER("dict_create_add_foreign_id"); + if (foreign->id == NULL) { /* Generate a new constraint id */ ulint namelen = strlen(name); @@ -87,12 +90,57 @@ dict_create_add_foreign_id( if (innobase_check_identifier_length( strchr(id,'/') + 1)) { - return(DB_IDENTIFIER_TOO_LONG); + DBUG_RETURN(DB_IDENTIFIER_TOO_LONG); } } foreign->id = id; + + DBUG_PRINT("dict_create_add_foreign_id", + ("generated foreign id: %s", id)); } - return(DB_SUCCESS); + + DBUG_RETURN(DB_SUCCESS); +} + +/** Compose a column number for a virtual column, stored in the "POS" field +of Sys_columns. The column number includes both its virtual column sequence +(the "nth" virtual column) and its actual column position in original table +@param[in] v_pos virtual column sequence +@param[in] col_pos column position in original table definition +@return composed column position number */ +UNIV_INLINE +ulint +dict_create_v_col_pos( + ulint v_pos, + ulint col_pos) +{ + ut_ad(v_pos <= REC_MAX_N_FIELDS); + ut_ad(col_pos <= REC_MAX_N_FIELDS); + + return(((v_pos + 1) << 16) + col_pos); +} + +/** Get the column number for a virtual column (the column position in +original table), stored in the "POS" field of Sys_columns +@param[in] pos virtual column position +@return column position in original table */ +UNIV_INLINE +ulint +dict_get_v_col_mysql_pos( + ulint pos) +{ + return(pos & 0xFFFF); } +/** Get a virtual column sequence (the "nth" virtual column) for a +virtual column, stord in the "POS" field of Sys_columns +@param[in] pos virtual column position +@return virtual column sequence */ +UNIV_INLINE +ulint +dict_get_v_col_pos( + ulint pos) +{ + return((pos >> 16) - 1); +} diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index 42f93b5a889..af7b91f9662 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -29,20 +29,22 @@ Created 1/8/1996 Heikki Tuuri #define dict0dict_h #include "univ.i" -#include "db0err.h" -#include "dict0types.h" -#include "dict0mem.h" -#include "data0type.h" #include "data0data.h" +#include "data0type.h" +#include "dict0mem.h" +#include "dict0types.h" +#include "fsp0fsp.h" +#include "fsp0sysspace.h" +#include "hash0hash.h" #include "mem0mem.h" #include "rem0types.h" +#include "row0types.h" +#include "trx0types.h" +#include "ut0byte.h" #include "ut0mem.h" -#include "ut0lst.h" -#include "hash0hash.h" +#include "ut0new.h" #include "ut0rnd.h" -#include "ut0byte.h" -#include "trx0types.h" -#include "row0types.h" +#include #include "fsp0fsp.h" #include "dict0pagecompress.h" @@ -50,20 +52,10 @@ extern bool innodb_table_stats_not_found; extern bool innodb_index_stats_not_found; #ifndef UNIV_HOTBACKUP -# include "sync0sync.h" # include "sync0rw.h" -/******************************************************************//** -Makes all characters in a NUL-terminated UTF-8 string lower case. */ -UNIV_INTERN -void -dict_casedn_str( -/*============*/ - char* a) /*!< in/out: string to put in lower case */ - MY_ATTRIBUTE((nonnull)); /********************************************************************//** Get the database name length in a table name. -@return database name length */ -UNIV_INTERN +@return database name length */ ulint dict_get_db_name_len( /*=================*/ @@ -75,7 +67,6 @@ Open a table from its database and table name, this is currently used by foreign constraint parser to get the referenced table. @return complete table name with database and table name, allocated from heap memory passed in */ -UNIV_INTERN char* dict_get_referenced_table( /*======================*/ @@ -88,7 +79,6 @@ dict_get_referenced_table( mem_heap_t* heap); /*!< in: heap memory */ /*********************************************************************//** Frees a foreign key struct. */ - void dict_foreign_free( /*==============*/ @@ -98,7 +88,6 @@ Finds the highest [number] for foreign key constraints of the table. Looks only at the >= 4.0.18-format id's, which are of the form databasename/tablename_ibfk_[number]. @return highest number, 0 if table has no new format foreign key constraints */ -UNIV_INTERN ulint dict_table_get_highest_foreign_id( /*==============================*/ @@ -106,8 +95,7 @@ dict_table_get_highest_foreign_id( memory cache */ /********************************************************************//** Return the end of table name where we have removed dbname and '/'. -@return table name */ -UNIV_INTERN +@return table name */ const char* dict_remove_db_name( /*================*/ @@ -130,8 +118,7 @@ enum dict_table_op_t { /**********************************************************************//** Returns a table object based on table id. -@return table, NULL if does not exist */ -UNIV_INTERN +@return table, NULL if does not exist */ dict_table_t* dict_table_open_on_id( /*==================*/ @@ -152,7 +139,6 @@ dict_table_open_on_index_id( __attribute__((warn_unused_result)); /********************************************************************//** Decrements the count of open handles to a table. */ -UNIV_INTERN void dict_table_close( /*=============*/ @@ -162,22 +148,22 @@ dict_table_close( indexes after an aborted online index creation */ MY_ATTRIBUTE((nonnull)); +/*********************************************************************//** +Closes the only open handle to a table and drops a table while assuring +that dict_sys->mutex is held the whole time. This assures that the table +is not evicted after the close when the count of open handles goes to zero. +Because dict_sys->mutex is held, we do not need to call +dict_table_prevent_eviction(). */ +void +dict_table_close_and_drop( +/*======================*/ + trx_t* trx, /*!< in: data dictionary transaction */ + dict_table_t* table); /*!< in/out: table */ /**********************************************************************//** Inits the data dictionary module. */ -UNIV_INTERN void dict_init(void); -/*===========*/ -/********************************************************************//** -Gets the space id of every table of the data dictionary and makes a linear -list and a hash table of them to the data dictionary cache. This function -can be called at database startup if we did not need to do a crash recovery. -In crash recovery we must scan the space id's from the .ibd files in MySQL -database directories. */ -UNIV_INTERN -void -dict_load_space_id_list(void); -/*=========================*/ + /*********************************************************************//** Gets the minimum number of bytes per character. @return minimum multi-byte char size, in bytes */ @@ -215,8 +201,8 @@ void dict_col_copy_type( /*===============*/ const dict_col_t* col, /*!< in: column */ - dtype_t* type) /*!< out: data type */ - MY_ATTRIBUTE((nonnull)); + dtype_t* type); /*!< out: data type */ + /**********************************************************************//** Determine bytes of column prefix to be stored in the undo log. Please note if the table format is UNIV_FORMAT_A (< UNIV_FORMAT_B), no prefix @@ -230,11 +216,23 @@ dict_max_field_len_store_undo( const dict_col_t* col) /*!< in: column which index prefix is based on */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + +/** Determine maximum bytes of a virtual column need to be stored +in the undo log. +@param[in] table dict_table_t for the table +@param[in] col_no virtual column number +@return maximum bytes of virtual column to be stored in the undo log */ +UNIV_INLINE +ulint +dict_max_v_field_len_store_undo( + dict_table_t* table, + ulint col_no); + #endif /* !UNIV_HOTBACKUP */ #ifdef UNIV_DEBUG /*********************************************************************//** Assert that a column and a data type match. -@return TRUE */ +@return TRUE */ UNIV_INLINE ibool dict_col_type_assert_equal( @@ -246,7 +244,7 @@ dict_col_type_assert_equal( #ifndef UNIV_HOTBACKUP /***********************************************************************//** Returns the minimum size of the column. -@return minimum size */ +@return minimum size */ UNIV_INLINE ulint dict_col_get_min_size( @@ -255,7 +253,7 @@ dict_col_get_min_size( MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Returns the maximum size of the column. -@return maximum size */ +@return maximum size */ UNIV_INLINE ulint dict_col_get_max_size( @@ -264,7 +262,7 @@ dict_col_get_max_size( MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** Returns the size of a fixed size column, 0 if not a fixed size column. -@return fixed size, or 0 */ +@return fixed size, or 0 */ UNIV_INLINE ulint dict_col_get_fixed_size( @@ -275,7 +273,7 @@ dict_col_get_fixed_size( /***********************************************************************//** Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a column. For fixed length types it is the fixed length of the type, otherwise 0. -@return SQL null storage size in ROW_FORMAT=REDUNDANT */ +@return SQL null storage size in ROW_FORMAT=REDUNDANT */ UNIV_INLINE ulint dict_col_get_sql_null_size( @@ -285,7 +283,7 @@ dict_col_get_sql_null_size( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the column number. -@return col->ind, table column position (starting from 0) */ +@return col->ind, table column position (starting from 0) */ UNIV_INLINE ulint dict_col_get_no( @@ -301,11 +299,22 @@ dict_col_get_clust_pos( const dict_col_t* col, /*!< in: table column */ const dict_index_t* clust_index) /*!< in: clustered index */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + +/** Gets the column position in the given index. +@param[in] col table column +@param[in] index index to be searched for column +@return position of column in the given index. */ +UNIV_INLINE +ulint +dict_col_get_index_pos( + const dict_col_t* col, + const dict_index_t* index) + __attribute__((nonnull, warn_unused_result)); + /****************************************************************//** If the given column name is reserved for InnoDB system columns, return TRUE. -@return TRUE if name is reserved */ -UNIV_INTERN +@return TRUE if name is reserved */ ibool dict_col_name_is_reserved( /*======================*/ @@ -313,7 +322,6 @@ dict_col_name_is_reserved( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Acquire the autoinc lock. */ -UNIV_INTERN void dict_table_autoinc_lock( /*====================*/ @@ -321,7 +329,6 @@ dict_table_autoinc_lock( MY_ATTRIBUTE((nonnull)); /********************************************************************//** Unconditionally set the autoinc counter. */ -UNIV_INTERN void dict_table_autoinc_initialize( /*==========================*/ @@ -331,14 +338,12 @@ dict_table_autoinc_initialize( /** Store autoinc value when the table is evicted. @param[in] table table evicted */ -UNIV_INTERN void dict_table_autoinc_store( const dict_table_t* table); /** Restore autoinc value when the table is loaded. @param[in] table table loaded */ -UNIV_INTERN void dict_table_autoinc_restore( dict_table_t* table); @@ -346,8 +351,7 @@ dict_table_autoinc_restore( /********************************************************************//** Reads the next autoinc value (== autoinc counter value), 0 if not yet initialized. -@return value for a new row, or 0 */ -UNIV_INTERN +@return value for a new row, or 0 */ ib_uint64_t dict_table_autoinc_read( /*====================*/ @@ -356,7 +360,6 @@ dict_table_autoinc_read( /********************************************************************//** Updates the autoinc counter if the value supplied is greater than the current value. */ -UNIV_INTERN void dict_table_autoinc_update_if_greater( /*=================================*/ @@ -366,7 +369,6 @@ dict_table_autoinc_update_if_greater( MY_ATTRIBUTE((nonnull)); /********************************************************************//** Release the autoinc lock. */ -UNIV_INTERN void dict_table_autoinc_unlock( /*======================*/ @@ -375,7 +377,6 @@ dict_table_autoinc_unlock( #endif /* !UNIV_HOTBACKUP */ /**********************************************************************//** Adds system columns to a table object. */ -UNIV_INTERN void dict_table_add_system_columns( /*==========================*/ @@ -383,9 +384,14 @@ dict_table_add_system_columns( mem_heap_t* heap) /*!< in: temporary heap */ MY_ATTRIBUTE((nonnull)); #ifndef UNIV_HOTBACKUP +/** Mark if table has big rows. +@param[in,out] table table handler */ +void +dict_table_set_big_rows( + dict_table_t* table) + __attribute__((nonnull)); /**********************************************************************//** Adds a table object to the dictionary cache. */ -UNIV_INTERN void dict_table_add_to_cache( /*====================*/ @@ -395,7 +401,6 @@ dict_table_add_to_cache( MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Removes a table object from the dictionary cache. */ -UNIV_INTERN void dict_table_remove_from_cache( /*=========================*/ @@ -403,7 +408,6 @@ dict_table_remove_from_cache( MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Removes a table object from the dictionary cache. */ -UNIV_INTERN void dict_table_remove_from_cache_low( /*=============================*/ @@ -412,8 +416,7 @@ dict_table_remove_from_cache_low( to make room in the table LRU list */ /**********************************************************************//** Renames a table object. -@return TRUE if success */ -UNIV_INTERN +@return TRUE if success */ dberr_t dict_table_rename_in_cache( /*=======================*/ @@ -424,19 +427,19 @@ dict_table_rename_in_cache( to preserve the original table name in constraints which reference it */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/**********************************************************************//** -Removes an index from the dictionary cache. */ -UNIV_INTERN + +/** Removes an index from the dictionary cache. +@param[in,out] table table whose index to remove +@param[in,out] index index to remove, this object is destroyed and must not +be accessed by the caller afterwards */ void dict_index_remove_from_cache( -/*=========================*/ - dict_table_t* table, /*!< in/out: table */ - dict_index_t* index) /*!< in, own: index */ - MY_ATTRIBUTE((nonnull)); + dict_table_t* table, + dict_index_t* index); + /**********************************************************************//** Change the id of a table object in the dictionary cache. This is used in DISCARD TABLESPACE. */ -UNIV_INTERN void dict_table_change_id_in_cache( /*==========================*/ @@ -445,7 +448,6 @@ dict_table_change_id_in_cache( MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Removes a foreign constraint struct from the dictionary cache. */ -UNIV_INTERN void dict_foreign_remove_from_cache( /*===========================*/ @@ -456,8 +458,7 @@ Adds a foreign key constraint object to the dictionary cache. May free the object if there already is an object with the same identifier in. At least one of foreign table or referenced table must already be in the dictionary cache! -@return DB_SUCCESS or error code */ -UNIV_INTERN +@return DB_SUCCESS or error code */ dberr_t dict_foreign_add_to_cache( /*======================*/ @@ -474,8 +475,7 @@ dict_foreign_add_to_cache( MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /*********************************************************************//** Checks if a table is referenced by foreign keys. -@return TRUE if table is referenced by a foreign key */ -UNIV_INTERN +@return TRUE if table is referenced by a foreign key */ ibool dict_table_is_referenced_by_foreign_key( /*====================================*/ @@ -485,7 +485,6 @@ dict_table_is_referenced_by_foreign_key( Replace the index passed in with another equivalent index in the foreign key lists of the table. @return whether all replacements were found */ -UNIV_INTERN bool dict_foreign_replace_index( /*=======================*/ @@ -498,7 +497,6 @@ dict_foreign_replace_index( /**********************************************************************//** Determines whether a string starts with the specified keyword. @return TRUE if str starts with keyword */ -UNIV_INTERN ibool dict_str_starts_with_keyword( /*=========================*/ @@ -506,40 +504,38 @@ dict_str_starts_with_keyword( const char* str, /*!< in: string to scan for keyword */ const char* keyword) /*!< in: keyword to look for */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/*********************************************************************//** -Scans a table create SQL string and adds to the data dictionary +/** Scans a table create SQL string and adds to the data dictionary the foreign key constraints declared in the string. This function should be called after the indexes for a table have been created. Each foreign key constraint must be accompanied with indexes in bot participating tables. The indexes are allowed to contain more fields than mentioned in the constraint. -@return error code or DB_SUCCESS */ -UNIV_INTERN + +@param[in] trx transaction +@param[in] sql_string table create statement where + foreign keys are declared like: + FOREIGN KEY (a, b) REFERENCES table2(c, d), + table2 can be written also with the database + name before it: test.table2; the default + database id the database of parameter name +@param[in] sql_length length of sql_string +@param[in] name table full name in normalized form +@param[in] reject_fks if TRUE, fail with error code + DB_CANNOT_ADD_CONSTRAINT if any + foreign keys are found. +@return error code or DB_SUCCESS */ dberr_t dict_create_foreign_constraints( -/*============================*/ - trx_t* trx, /*!< in: transaction */ - const char* sql_string, /*!< in: table create statement where - foreign keys are declared like: - FOREIGN KEY (a, b) REFERENCES - table2(c, d), table2 can be written - also with the database - name before it: test.table2; the - default database id the database of - parameter name */ - size_t sql_length, /*!< in: length of sql_string */ - const char* name, /*!< in: table full name in the - normalized form - database_name/table_name */ - ibool reject_fks) /*!< in: if TRUE, fail with error - code DB_CANNOT_ADD_CONSTRAINT if - any foreign keys are found. */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + trx_t* trx, + const char* sql_string, + size_t sql_length, + const char* name, + ibool reject_fks) + __attribute__((warn_unused_result)); /**********************************************************************//** Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement. @return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the constraint id does not match */ -UNIV_INTERN dberr_t dict_foreign_parse_drop_constraints( /*================================*/ @@ -557,27 +553,25 @@ Returns a table object and increments its open handle count. NOTE! This is a high-level function to be used mainly from outside the 'dict' directory. Inside this directory dict_table_get_low is usually the appropriate function. -@return table, NULL if does not exist */ -UNIV_INTERN +@param[in] table_name Table name +@param[in] dict_locked TRUE=data dictionary locked +@param[in] try_drop TRUE=try to drop any orphan indexes after + an aborted online index creation +@param[in] ignore_err error to be ignored when loading the table +@return table, NULL if does not exist */ dict_table_t* dict_table_open_on_name( -/*====================*/ - const char* table_name, /*!< in: table name */ - ibool dict_locked, /*!< in: TRUE=data dictionary locked */ - ibool try_drop, /*!< in: TRUE=try to drop any orphan - indexes after an aborted online - index creation */ - dict_err_ignore_t - ignore_err) /*!< in: error to be ignored when - loading the table */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + const char* table_name, + ibool dict_locked, + ibool try_drop, + dict_err_ignore_t ignore_err) + __attribute__((warn_unused_result)); /*********************************************************************//** Tries to find an index whose first fields are the columns in the array, in the same order and is not marked for deletion and is not the same as types_idx. -@return matching index, NULL if not found */ -UNIV_INTERN +@return matching index, NULL if not found */ dict_index_t* dict_foreign_find_index( /*====================*/ @@ -612,13 +606,13 @@ dict_foreign_find_index( Returns a column's name. @return column name. NOTE: not guaranteed to stay valid if table is modified in any way (columns added, etc.). */ -UNIV_INTERN const char* dict_table_get_col_name( /*====================*/ const dict_table_t* table, /*!< in: table */ ulint col_nr) /*!< in: column number */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /**********************************************************************//** Returns a column's name. @return column name. NOTE: not guaranteed to stay valid if table is @@ -630,17 +624,29 @@ dict_table_get_col_name_for_mysql( const dict_table_t* table, /*!< in: table */ const char* col_name)/*!< in: MySQL table column name */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + +/** Returns a virtual column's name. +@param[in] table table object +@param[in] col_nr virtual column number(nth virtual column) +@return column name. */ +const char* +dict_table_get_v_col_name( + const dict_table_t* table, + ulint col_nr); + /**********************************************************************//** -Prints a table data. */ -UNIV_INTERN -void -dict_table_print( -/*=============*/ - dict_table_t* table) /*!< in: table */ - MY_ATTRIBUTE((nonnull)); +@param[in] col_name column name +@param[in] col_nr column number guessed, 0 as default +@return column number if the table has the specified column, +otherwise table->n_def */ +ulint +dict_table_has_column( + const dict_table_t* table, + const char* col_name, + ulint col_nr = 0); + /**********************************************************************//** Outputs info on foreign keys of a table. */ -UNIV_INTERN std::string dict_print_info_on_foreign_keys( /*============================*/ @@ -653,29 +659,17 @@ dict_print_info_on_foreign_keys( /**********************************************************************//** Outputs info on a foreign key of a table in a format suitable for CREATE TABLE. */ -UNIV_INTERN std::string dict_print_info_on_foreign_key_in_create_format( /*============================================*/ trx_t* trx, /*!< in: transaction */ dict_foreign_t* foreign, /*!< in: foreign key constraint */ ibool add_newline); /*!< in: whether to add a newline */ -/********************************************************************//** -Displays the names of the index and the table. */ -UNIV_INTERN -void -dict_index_name_print( -/*==================*/ - FILE* file, /*!< in: output stream */ - const trx_t* trx, /*!< in: transaction */ - const dict_index_t* index) /*!< in: index to print */ - MY_ATTRIBUTE((nonnull(1,3))); /*********************************************************************//** Tries to find an index whose first fields are the columns in the array, in the same order and is not marked for deletion and is not the same as types_idx. -@return matching index, NULL if not found */ -UNIV_INTERN +@return matching index, NULL if not found */ bool dict_foreign_qualify_index( /*====================*/ @@ -709,7 +703,7 @@ dict_foreign_qualify_index( #ifdef UNIV_DEBUG /********************************************************************//** Gets the first index on the table (the clustered index). -@return index, NULL if none exists */ +@return index, NULL if none exists */ UNIV_INLINE dict_index_t* dict_table_get_first_index( @@ -718,7 +712,7 @@ dict_table_get_first_index( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the last index on the table. -@return index, NULL if none exists */ +@return index, NULL if none exists */ UNIV_INLINE dict_index_t* dict_table_get_last_index( @@ -727,7 +721,7 @@ dict_table_get_last_index( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the next index on the table. -@return index, NULL if none left */ +@return index, NULL if none left */ UNIV_INLINE dict_index_t* dict_table_get_next_index( @@ -756,16 +750,26 @@ do { \ /********************************************************************//** Check whether the index is the clustered index. -@return nonzero for clustered index, zero for other indexes */ +@return nonzero for clustered index, zero for other indexes */ UNIV_INLINE ulint dict_index_is_clust( /*================*/ const dict_index_t* index) /*!< in: index */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); + +/** Check if index is auto-generated clustered index. +@param[in] index index + +@return true if index is auto-generated clustered index. */ +UNIV_INLINE +bool +dict_index_is_auto_gen_clust( + const dict_index_t* index); + /********************************************************************//** Check whether the index is unique. -@return nonzero for unique index, zero for other indexes */ +@return nonzero for unique index, zero for other indexes */ UNIV_INLINE ulint dict_index_is_unique( @@ -773,8 +777,24 @@ dict_index_is_unique( const dict_index_t* index) /*!< in: index */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** +Check whether the index is a Spatial Index. +@return nonzero for Spatial Index, zero for other indexes */ +UNIV_INLINE +ulint +dict_index_is_spatial( +/*==================*/ + const dict_index_t* index) /*!< in: index */ + __attribute__((warn_unused_result)); +/** Check whether the index contains a virtual column. +@param[in] index index +@return nonzero for index on virtual column, zero for other indexes */ +UNIV_INLINE +ulint +dict_index_has_virtual( + const dict_index_t* index); +/********************************************************************//** Check whether the index is the insert buffer tree. -@return nonzero for insert buffer, zero for other indexes */ +@return nonzero for insert buffer, zero for other indexes */ UNIV_INLINE ulint dict_index_is_ibuf( @@ -783,7 +803,7 @@ dict_index_is_ibuf( MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** Check whether the index is a secondary index or the insert buffer tree. -@return nonzero for insert buffer, zero for other indexes */ +@return nonzero for insert buffer, zero for other indexes */ UNIV_INLINE ulint dict_index_is_sec_or_ibuf( @@ -791,30 +811,39 @@ dict_index_is_sec_or_ibuf( const dict_index_t* index) /*!< in: index */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); -/************************************************************************ -Gets the all the FTS indexes for the table. NOTE: must not be called for -tables which do not have an FTS-index. */ -UNIV_INTERN +/** Get all the FTS indexes on a table. +@param[in] table table +@param[out] indexes all FTS indexes on this table +@return number of FTS indexes */ ulint dict_table_get_all_fts_indexes( -/*===========================*/ - /* out: number of indexes collected */ - dict_table_t* table, /* in: table */ - ib_vector_t* indexes)/* out: vector for collecting FTS indexes */ - MY_ATTRIBUTE((nonnull)); + const dict_table_t* table, + ib_vector_t* indexes); + /********************************************************************//** -Gets the number of user-defined columns in a table in the dictionary -cache. -@return number of user-defined (e.g., not ROW_ID) columns of a table */ +Gets the number of user-defined non-virtual columns in a table in the +dictionary cache. +@return number of user-defined (e.g., not ROW_ID) non-virtual +columns of a table */ UNIV_INLINE ulint dict_table_get_n_user_cols( /*=======================*/ const dict_table_t* table) /*!< in: table */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); +/** Gets the number of user-defined virtual and non-virtual columns in a table +in the dictionary cache. +@param[in] table table +@return number of user-defined (e.g., not ROW_ID) columns of a table */ +UNIV_INLINE +ulint +dict_table_get_n_tot_u_cols( + const dict_table_t* table); /********************************************************************//** -Gets the number of system columns in a table in the dictionary cache. -@return number of system (e.g., ROW_ID) columns of a table */ +Gets the number of system columns in a table. +For intrinsic table on ROW_ID column is added for all other +tables TRX_ID and ROLL_PTR are all also appeneded. +@return number of system (e.g., ROW_ID) columns of a table */ UNIV_INLINE ulint dict_table_get_n_sys_cols( @@ -822,18 +851,35 @@ dict_table_get_n_sys_cols( const dict_table_t* table) /*!< in: table */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); /********************************************************************//** -Gets the number of all columns (also system) in a table in the dictionary -cache. -@return number of columns of a table */ +Gets the number of all non-virtual columns (also system) in a table +in the dictionary cache. +@return number of columns of a table */ UNIV_INLINE ulint dict_table_get_n_cols( /*==================*/ const dict_table_t* table) /*!< in: table */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); + +/** Gets the number of virtual columns in a table in the dictionary cache. +@param[in] table the table to check +@return number of virtual columns of a table */ +UNIV_INLINE +ulint +dict_table_get_n_v_cols( + const dict_table_t* table); + +/** Check if a table has indexed virtual columns +@param[in] table the table to check +@return true is the table has indexed virtual columns */ +UNIV_INLINE +bool +dict_table_has_indexed_v_cols( + const dict_table_t* table); + /********************************************************************//** Gets the approximately estimated number of rows in the table. -@return estimated number of rows */ +@return estimated number of rows */ UNIV_INLINE ib_uint64_t dict_table_get_n_rows( @@ -860,10 +906,21 @@ dict_table_n_rows_dec( /*==================*/ dict_table_t* table) /*!< in/out: table */ MY_ATTRIBUTE((nonnull)); + + +/** Get nth virtual column +@param[in] table target table +@param[in] col_nr column number in MySQL Table definition +@return dict_v_col_t ptr */ +dict_v_col_t* +dict_table_get_nth_v_col_mysql( + const dict_table_t* table, + ulint col_nr); + #ifdef UNIV_DEBUG /********************************************************************//** Gets the nth column of a table. -@return pointer to column object */ +@return pointer to column object */ UNIV_INLINE dict_col_t* dict_table_get_nth_col( @@ -871,9 +928,18 @@ dict_table_get_nth_col( const dict_table_t* table, /*!< in: table */ ulint pos) /*!< in: position of column */ MY_ATTRIBUTE((nonnull, warn_unused_result)); +/** Gets the nth virtual column of a table. +@param[in] table table +@param[in] pos position of virtual column +@return pointer to virtual column object */ +UNIV_INLINE +dict_v_col_t* +dict_table_get_nth_v_col( + const dict_table_t* table, + ulint pos); /********************************************************************//** Gets the given system column of a table. -@return pointer to column object */ +@return pointer to column object */ UNIV_INLINE dict_col_t* dict_table_get_sys_col( @@ -882,14 +948,17 @@ dict_table_get_sys_col( ulint sys) /*!< in: DATA_ROW_ID, ... */ MY_ATTRIBUTE((nonnull, warn_unused_result)); #else /* UNIV_DEBUG */ -#define dict_table_get_nth_col(table, pos) \ +#define dict_table_get_nth_col(table, pos) \ ((table)->cols + (pos)) -#define dict_table_get_sys_col(table, sys) \ -((table)->cols + (table)->n_cols + (sys) - DATA_N_SYS_COLS) +#define dict_table_get_sys_col(table, sys) \ +((table)->cols + (table)->n_cols + (sys) \ + - (dict_table_get_n_sys_cols(table))) +/* Get nth virtual columns */ +#define dict_table_get_nth_v_col(table, pos) ((table)->v_cols + (pos)) #endif /* UNIV_DEBUG */ /********************************************************************//** Gets the given system column number of a table. -@return column number */ +@return column number */ UNIV_INLINE ulint dict_table_get_sys_col_no( @@ -900,7 +969,7 @@ dict_table_get_sys_col_no( #ifndef UNIV_HOTBACKUP /********************************************************************//** Returns the minimum data size of an index record. -@return minimum data size in bytes */ +@return minimum data size in bytes */ UNIV_INLINE ulint dict_index_get_min_size( @@ -910,16 +979,17 @@ dict_index_get_min_size( #endif /* !UNIV_HOTBACKUP */ /********************************************************************//** Check whether the table uses the compact page format. -@return TRUE if table uses the compact page format */ +@return TRUE if table uses the compact page format */ UNIV_INLINE ibool dict_table_is_comp( /*===============*/ const dict_table_t* table) /*!< in: table */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /********************************************************************//** Determine the file format of a table. -@return file format version */ +@return file format version */ UNIV_INLINE ulint dict_table_get_format( @@ -928,67 +998,94 @@ dict_table_get_format( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Determine the file format from a dict_table_t::flags. -@return file format version */ +@return file format version */ UNIV_INLINE ulint dict_tf_get_format( /*===============*/ ulint flags) /*!< in: dict_table_t::flags */ MY_ATTRIBUTE((warn_unused_result)); -/********************************************************************//** -Set the various values in a dict_table_t::flags pointer. */ +/** Set the various values in a dict_table_t::flags pointer. +@param[in,out] flags, Pointer to a 4 byte Table Flags +@param[in] format, File Format +@param[in] zip_ssize Zip Shift Size +@param[in] use_data_dir Table uses DATA DIRECTORY +@param[in] shared_space Table uses a General Shared Tablespace */ UNIV_INLINE void dict_tf_set( -/*========*/ - ulint* flags, /*!< in/out: table */ - rec_format_t format, /*!< in: file format */ - ulint zip_ssize, /*!< in: zip shift size */ - bool remote_path, /*!< in: table uses DATA DIRECTORY - */ - bool page_compressed,/*!< in: table uses page compressed - pages */ - ulint page_compression_level, /*!< in: table page compression - level */ - ulint atomic_writes) /*!< in: table atomic - writes option value*/ - __attribute__((nonnull)); -/********************************************************************//** -Convert a 32 bit integer table flags to the 32 bit integer that is -written into the tablespace header at the offset FSP_SPACE_FLAGS and is -also stored in the fil_space_t::flags field. The following chart shows -the translation of the low order bit. Other bits are the same. + ulint* flags, + rec_format_t format, + ulint zip_ssize, + bool use_data_dir, + bool shared_space, + bool page_compressed, + ulint page_compression_level, + ulint atomic_writes); + +/** Initialize a dict_table_t::flags pointer. +@param[in] compact, Table uses Compact or greater +@param[in] zip_ssize Zip Shift Size (log 2 minus 9) +@param[in] atomic_blobs Table uses Compressed or Dynamic +@param[in] data_dir Table uses DATA DIRECTORY +@param[in] shared_space Table uses a General Shared Tablespace */ +UNIV_INLINE +ulint +dict_tf_init( + bool compact, + ulint zip_ssize, + bool atomic_blobs, + bool data_dir, + bool shared_space, + bool page_compressed, + ulint page_compression_level, + ulint atomic_writes); + +/** Convert a 32 bit integer table flags to the 32 bit FSP Flags. +Fsp Flags are written into the tablespace header at the offset +FSP_SPACE_FLAGS and are also stored in the fil_space_t::flags field. +The following chart shows the translation of the low order bit. +Other bits are the same. ========================= Low order bit ========================== | REDUNDANT | COMPACT | COMPRESSED | DYNAMIC dict_table_t::flags | 0 | 1 | 1 | 1 fil_space_t::flags | 0 | 0 | 1 | 1 ================================================================== -@return tablespace flags (fil_space_t::flags) */ -UNIV_INLINE +@param[in] table_flags dict_table_t::flags +@param[in] is_temp whether the tablespace is temporary +@return tablespace flags (fil_space_t::flags) */ ulint dict_tf_to_fsp_flags( -/*=================*/ - ulint flags) /*!< in: dict_table_t::flags */ + ulint table_flags, + bool is_temp) MY_ATTRIBUTE((const)); -/********************************************************************//** -Extract the compressed page size from table flags. -@return compressed page size, or 0 if not compressed */ -UNIV_INLINE + +/** Extract the page size from table flags. +@param[in] flags flags +@return compressed page size, or 0 if not compressed */ +UNIV_INLINE +const page_size_t +dict_tf_get_page_size( + ulint flags) +__attribute__((const)); + +/** Determine the extent size (in pages) for the given table +@param[in] table the table whose extent size is being + calculated. +@return extent size in pages (256, 128 or 64) */ ulint -dict_tf_get_zip_size( -/*=================*/ - ulint flags) /*!< in: flags */ - __attribute__((const)); +dict_table_extent_size( + const dict_table_t* table); -/********************************************************************//** -Check whether the table uses the compressed compact page format. -@return compressed page size, or 0 if not compressed */ +/** Get the table page size. +@param[in] table table +@return compressed page size, or 0 if not compressed */ UNIV_INLINE -ulint -dict_table_zip_size( -/*================*/ - const dict_table_t* table) /*!< in: table */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); +const page_size_t +dict_table_page_size( + const dict_table_t* table) + __attribute__((warn_unused_result)); + #ifndef UNIV_HOTBACKUP /*********************************************************************//** Obtain exclusive locks on all index trees of the table. This is to prevent @@ -1011,8 +1108,7 @@ dict_table_x_unlock_indexes( /********************************************************************//** Checks if a column is in the ordering columns of the clustered index of a table. Column prefixes are treated like whole columns. -@return TRUE if the column, or its prefix, is in the clustered key */ -UNIV_INTERN +@return TRUE if the column, or its prefix, is in the clustered key */ ibool dict_table_col_in_clustered_key( /*============================*/ @@ -1028,11 +1124,21 @@ dict_table_has_fts_index( /*=====================*/ dict_table_t* table) /*!< in: table */ MY_ATTRIBUTE((nonnull, warn_unused_result)); +/** Copies types of virtual columns contained in table to tuple and sets all +fields of the tuple to the SQL NULL value. This function should +be called right after dtuple_create(). +@param[in,out] tuple data tuple +@param[in] table table +*/ +void +dict_table_copy_v_types( + dtuple_t* tuple, + const dict_table_t* table); + /*******************************************************************//** Copies types of columns contained in table to tuple and sets all fields of the tuple to the SQL NULL value. This function should be called right after dtuple_create(). */ -UNIV_INTERN void dict_table_copy_types( /*==================*/ @@ -1043,7 +1149,6 @@ dict_table_copy_types( Wait until all the background threads of the given table have exited, i.e., bg_threads == 0. Note: bg_threads_mutex must be reserved when calling this. */ -UNIV_INTERN void dict_table_wait_for_bg_threads_to_exit( /*===================================*/ @@ -1055,8 +1160,7 @@ dict_table_wait_for_bg_threads_to_exit( Looks for an index with the given id. NOTE that we do not reserve the dictionary mutex: this function is for emergency purposes like printing info of a corrupt database page! -@return index or NULL if not found from cache */ -UNIV_INTERN +@return index or NULL if not found from cache */ dict_index_t* dict_index_find_on_id_low( /*======================*/ @@ -1067,41 +1171,56 @@ Make room in the table cache by evicting an unused table. The unused table should not be part of FK relationship and currently not used in any user transaction. There is no guarantee that it will remove a table. @return number of tables evicted. */ -UNIV_INTERN ulint dict_make_room_in_cache( /*====================*/ ulint max_tables, /*!< in: max tables allowed in cache */ ulint pct_check); /*!< in: max percent to check */ -/**********************************************************************//** -Adds an index to the dictionary cache. -@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */ -UNIV_INTERN + +#define BIG_ROW_SIZE 1024 + +/** Adds an index to the dictionary cache. +@param[in] table table on which the index is +@param[in] index index; NOTE! The index memory + object is freed in this function! +@param[in] page_no root page number of the index +@param[in] strict TRUE=refuse to create the index + if records could be too big to fit in + an B-tree page +@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */ dberr_t dict_index_add_to_cache( -/*====================*/ - dict_table_t* table, /*!< in: table on which the index is */ - dict_index_t* index, /*!< in, own: index; NOTE! The index memory - object is freed in this function! */ - ulint page_no,/*!< in: root page number of the index */ - ibool strict) /*!< in: TRUE=refuse to create the index - if records could be too big to fit in - an B-tree page */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/**********************************************************************//** -Removes an index from the dictionary cache. */ -UNIV_INTERN -void -dict_index_remove_from_cache( -/*=========================*/ - dict_table_t* table, /*!< in/out: table */ - dict_index_t* index) /*!< in, own: index */ - MY_ATTRIBUTE((nonnull)); + dict_table_t* table, + dict_index_t* index, + ulint page_no, + ibool strict) + __attribute__((warn_unused_result)); + +/** Adds an index to the dictionary cache, with possible indexing newly +added column. +@param[in] table table on which the index is +@param[in] index index; NOTE! The index memory + object is freed in this function! +@param[in] add_v new virtual column that being added along with + an add index call +@param[in] page_no root page number of the index +@param[in] strict TRUE=refuse to create the index + if records could be too big to fit in + an B-tree page +@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */ +dberr_t +dict_index_add_to_cache_w_vcol( + dict_table_t* table, + dict_index_t* index, + const dict_add_v_col_t* add_v, + ulint page_no, + ibool strict) + __attribute__((warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ /********************************************************************//** Gets the number of fields in the internal representation of an index, including fields added by the dictionary system. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_fields( @@ -1115,7 +1234,7 @@ Gets the number of fields in the internal representation of an index that uniquely determine the position of an index entry in the index, if we do not take multiversioning into account: in the B-tree use the value returned by dict_index_get_n_unique_in_tree. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_unique( @@ -1127,7 +1246,7 @@ dict_index_get_n_unique( Gets the number of fields in the internal representation of an index which uniquely determine the position of an index entry in the index, if we also take multiversioning into account. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_unique_in_tree( @@ -1135,12 +1254,28 @@ dict_index_get_n_unique_in_tree( const dict_index_t* index) /*!< in: an internal representation of index (in the dictionary cache) */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + +/** The number of fields in the nonleaf page of spatial index, except +the page no field. */ +#define DICT_INDEX_SPATIAL_NODEPTR_SIZE 1 +/** +Gets the number of fields on nonleaf page level in the internal representation +of an index which uniquely determine the position of an index entry in the +index, if we also take multiversioning into account. Note, it doesn't +include page no field. +@param[in] index index +@return number of fields */ +UNIV_INLINE +ulint +dict_index_get_n_unique_in_tree_nonleaf( + const dict_index_t* index) + __attribute__((nonnull, warn_unused_result)); /********************************************************************//** Gets the number of user-defined ordering fields in the index. In the internal representation we add the row id to the ordering fields to make all indexes unique, but this function returns the number of fields the user defined in the index as ordering fields. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_ordering_defined_by_user( @@ -1151,7 +1286,7 @@ dict_index_get_n_ordering_defined_by_user( #ifdef UNIV_DEBUG /********************************************************************//** Gets the nth field of an index. -@return pointer to field object */ +@return pointer to field object */ UNIV_INLINE dict_field_t* dict_index_get_nth_field( @@ -1164,7 +1299,7 @@ dict_index_get_nth_field( #endif /* UNIV_DEBUG */ /********************************************************************//** Gets pointer to the nth column in an index. -@return column */ +@return column */ UNIV_INLINE const dict_col_t* dict_index_get_nth_col( @@ -1174,7 +1309,7 @@ dict_index_get_nth_col( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Gets the column number of the nth field in an index. -@return column number */ +@return column number */ UNIV_INLINE ulint dict_index_get_nth_col_no( @@ -1192,33 +1327,42 @@ dict_index_get_nth_col_pos( /*=======================*/ const dict_index_t* index, /*!< in: index */ ulint n, /*!< in: column number */ - ulint* prefix_col_pos) /*!< out: col num if prefix */ - __attribute__((nonnull(1), warn_unused_result)); -/********************************************************************//** -Looks for column n in an index. + ulint* prefix_col_pos) /*!< out: col num if prefix + */ + __attribute__((warn_unused_result)); +/** Looks for column n in an index. +@param[in] index index +@param[in] n column number +@param[in] inc_prefix true=consider column prefixes too +@param[in] is_virtual true==virtual column @return position in internal representation of the index; ULINT_UNDEFINED if not contained */ -UNIV_INTERN ulint dict_index_get_nth_col_or_prefix_pos( -/*=================================*/ const dict_index_t* index, /*!< in: index */ ulint n, /*!< in: column number */ - ibool inc_prefix, /*!< in: TRUE=consider + bool inc_prefix, /*!< in: TRUE=consider column prefixes too */ - ulint* prefix_col_pos) /*!< out: col num if prefix */ + bool is_virtual, /*!< in: is a virtual column + */ + ulint* prefix_col_pos) /*!< out: col num if prefix + */ + __attribute__((warn_unused_result)); - __attribute__((nonnull(1), warn_unused_result)); /********************************************************************//** Returns TRUE if the index contains a column or a prefix of that column. -@return TRUE if contains the column or its prefix */ -UNIV_INTERN +@param[in] index index +@param[in] n column number +@param[in] is_virtual whether it is a virtual col +@return TRUE if contains the column or its prefix */ ibool dict_index_contains_col_or_prefix( /*==============================*/ const dict_index_t* index, /*!< in: index */ - ulint n) /*!< in: column number */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + ulint n, /*!< in: column number */ + bool is_virtual) + /*!< in: whether it is a virtual col */ + __attribute__((warn_unused_result)); /********************************************************************//** Looks for a matching field in an index. The column has to be the same. The column in index must be complete, or must contain a prefix longer than the @@ -1226,7 +1370,6 @@ column in index2. That is, we must be able to construct the prefix in index2 from the prefix in index. @return position in internal representation of the index; ULINT_UNDEFINED if not contained */ -UNIV_INTERN ulint dict_index_get_nth_field_pos( /*=========================*/ @@ -1236,17 +1379,17 @@ dict_index_get_nth_field_pos( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Looks for column n position in the clustered index. -@return position in internal representation of the clustered index */ -UNIV_INTERN +@return position in internal representation of the clustered index */ ulint dict_table_get_nth_col_pos( /*=======================*/ const dict_table_t* table, /*!< in: table */ - ulint n) /*!< in: column number */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + ulint n, /*!< in: column number */ + ulint* prefix_col_pos) /*!< out: col num if prefix */ + __attribute__((nonnull, warn_unused_result)); /********************************************************************//** Returns the position of a system column in an index. -@return position, ULINT_UNDEFINED if not contained */ +@return position, ULINT_UNDEFINED if not contained */ UNIV_INLINE ulint dict_index_get_sys_col_pos( @@ -1256,7 +1399,6 @@ dict_index_get_sys_col_pos( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*******************************************************************//** Adds a column to index. */ -UNIV_INTERN void dict_index_add_col( /*===============*/ @@ -1268,7 +1410,6 @@ dict_index_add_col( #ifndef UNIV_HOTBACKUP /*******************************************************************//** Copies types of fields contained in index to tuple. */ -UNIV_INTERN void dict_index_copy_types( /*==================*/ @@ -1280,7 +1421,7 @@ dict_index_copy_types( #endif /* !UNIV_HOTBACKUP */ /*********************************************************************//** Gets the field column. -@return field->col, pointer to the table column */ +@return field->col, pointer to the table column */ UNIV_INLINE const dict_col_t* dict_field_get_col( @@ -1291,8 +1432,7 @@ dict_field_get_col( /**********************************************************************//** Returns an index object if it is found in the dictionary cache. Assumes that dict_sys->mutex is already being held. -@return index, NULL if not found */ -UNIV_INTERN +@return index, NULL if not found */ dict_index_t* dict_index_get_if_in_cache_low( /*===========================*/ @@ -1301,8 +1441,7 @@ dict_index_get_if_in_cache_low( #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG /**********************************************************************//** Returns an index object if it is found in the dictionary cache. -@return index, NULL if not found */ -UNIV_INTERN +@return index, NULL if not found */ dict_index_t* dict_index_get_if_in_cache( /*=======================*/ @@ -1313,8 +1452,7 @@ dict_index_get_if_in_cache( /**********************************************************************//** Checks that a tuple has n_fields_cmp value in a sensible range, so that no comparison can occur with the page number field in a node pointer. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool dict_index_check_search_tuple( /*==========================*/ @@ -1332,7 +1470,6 @@ enum check_name { }; /**********************************************************************//** Check for duplicate index entries in a table [using the index name] */ -UNIV_INTERN void dict_table_check_for_dup_indexes( /*=============================*/ @@ -1344,8 +1481,7 @@ dict_table_check_for_dup_indexes( #endif /* UNIV_DEBUG */ /**********************************************************************//** Builds a node pointer out of a physical record and a page number. -@return own: node pointer */ -UNIV_INTERN +@return own: node pointer */ dtuple_t* dict_index_build_node_ptr( /*======================*/ @@ -1362,8 +1498,7 @@ dict_index_build_node_ptr( /**********************************************************************//** Copies an initial segment of a physical record, long enough to specify an index entry uniquely. -@return pointer to the prefix record */ -UNIV_INTERN +@return pointer to the prefix record */ rec_t* dict_index_copy_rec_order_prefix( /*=============================*/ @@ -1377,8 +1512,7 @@ dict_index_copy_rec_order_prefix( MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** Builds a typed data tuple out of a physical record. -@return own: data tuple */ -UNIV_INTERN +@return own: data tuple */ dtuple_t* dict_index_build_data_tuple( /*========================*/ @@ -1389,7 +1523,7 @@ dict_index_build_data_tuple( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the space id of the root of the index tree. -@return space id */ +@return space id */ UNIV_INLINE ulint dict_index_get_space( @@ -1407,7 +1541,7 @@ dict_index_set_space( MY_ATTRIBUTE((nonnull)); /*********************************************************************//** Gets the page number of the root of the index tree. -@return page number */ +@return page number */ UNIV_INLINE ulint dict_index_get_page( @@ -1416,7 +1550,7 @@ dict_index_get_page( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Gets the read-write lock of the index tree. -@return read-write lock */ +@return read-write lock */ UNIV_INLINE rw_lock_t* dict_index_get_lock( @@ -1427,7 +1561,7 @@ dict_index_get_lock( Returns free space reserved for future updates of records. This is relevant only in the case of many consecutive inserts, as updates which make the records bigger might fragment the index. -@return number of free bytes on page, reserved for updates */ +@return number of free bytes on page, reserved for updates */ UNIV_INLINE ulint dict_index_get_space_reserve(void); @@ -1468,7 +1602,6 @@ dict_index_is_online_ddl( MY_ATTRIBUTE((nonnull, warn_unused_result)); /*********************************************************************//** Calculates the minimum record length in an index. */ -UNIV_INTERN ulint dict_index_calc_min_rec_len( /*========================*/ @@ -1476,7 +1609,6 @@ dict_index_calc_min_rec_len( MY_ATTRIBUTE((nonnull, warn_unused_result)); /********************************************************************//** Reserves the dictionary system mutex for MySQL. */ -UNIV_INTERN void dict_mutex_enter_for_mysql_func(const char * file, ulint line); /*============================*/ @@ -1486,7 +1618,6 @@ dict_mutex_enter_for_mysql_func(const char * file, ulint line); /********************************************************************//** Releases the dictionary system mutex for MySQL. */ -UNIV_INTERN void dict_mutex_exit_for_mysql(void); /*===========================*/ @@ -1497,7 +1628,6 @@ or from a thread that has not shared the table object with other threads. @param[in,out] table table whose stats latch to create @param[in] enabled if false then the latch is disabled and dict_table_stats_lock()/unlock() become noop on this table. */ - void dict_table_stats_latch_create( dict_table_t* table, @@ -1507,33 +1637,29 @@ dict_table_stats_latch_create( This function is only called from either single threaded environment or from a thread that has not shared the table object with other threads. @param[in,out] table table whose stats latch to destroy */ - void dict_table_stats_latch_destroy( dict_table_t* table); -/**********************************************************************//** -Lock the appropriate latch to protect a given table's statistics. -table->id is used to pick the corresponding latch from a global array of -latches. */ -UNIV_INTERN +/** Lock the appropriate latch to protect a given table's statistics. +@param[in] table table whose stats to lock +@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */ void dict_table_stats_lock( -/*==================*/ - dict_table_t* table, /*!< in: table */ - ulint latch_mode); /*!< in: RW_S_LATCH or RW_X_LATCH */ -/**********************************************************************//** -Unlock the latch that has been locked by dict_table_stats_lock() */ -UNIV_INTERN + dict_table_t* table, + ulint latch_mode); + +/** Unlock the latch that has been locked by dict_table_stats_lock(). +@param[in] table table whose stats to unlock +@param[in] latch_mode RW_S_LATCH or RW_X_LATCH */ void dict_table_stats_unlock( -/*====================*/ - dict_table_t* table, /*!< in: table */ - ulint latch_mode); /*!< in: RW_S_LATCH or RW_X_LATCH */ + dict_table_t* table, + ulint latch_mode); + /********************************************************************//** Checks if the database name in two table names is the same. -@return TRUE if same db name */ -UNIV_INTERN +@return TRUE if same db name */ ibool dict_tables_have_same_db( /*=====================*/ @@ -1542,29 +1668,23 @@ dict_tables_have_same_db( const char* name2) /*!< in: table name in the form dbname '/' tablename */ MY_ATTRIBUTE((nonnull, warn_unused_result)); -/*********************************************************************//** -Removes an index from the cache */ -UNIV_INTERN -void -dict_index_remove_from_cache( -/*=========================*/ - dict_table_t* table, /*!< in/out: table */ - dict_index_t* index) /*!< in, own: index */ - MY_ATTRIBUTE((nonnull)); -/**********************************************************************//** -Get index by name -@return index, NULL if does not exist */ -UNIV_INTERN +/** Get an index by name. +@param[in] table the table where to look for the index +@param[in] name the index name to look for +@param[in] committed true=search for committed, +false=search for uncommitted +@return index, NULL if does not exist */ dict_index_t* dict_table_get_index_on_name( -/*=========================*/ - dict_table_t* table, /*!< in: table */ - const char* name) /*!< in: name of the index to find */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/**********************************************************************//** -Looks for an index with the given id given a table instance. -@return index or NULL */ -UNIV_INTERN + dict_table_t* table, + const char* name, + bool committed=true) + MY_ATTRIBUTE((nonnull, warn_unused_result)); +/** Get an index by name. +@param[in] table the table where to look for the index +@param[in] name the index name to look for +@param[in] committed true=search for committed, +false=search for uncommitted */ dict_index_t* dict_table_find_index_on_id( /*========================*/ @@ -1572,16 +1692,18 @@ dict_table_find_index_on_id( index_id_t id) /*!< in: index id */ __attribute__((nonnull, warn_unused_result)); /**********************************************************************//** -In case there is more than one index with the same name return the index -with the min(id). -@return index, NULL if does not exist */ -UNIV_INTERN -dict_index_t* -dict_table_get_index_on_name_and_min_id( -/*====================================*/ - dict_table_t* table, /*!< in: table */ - const char* name) /*!< in: name of the index to find */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); +@return index, NULL if does not exist */ +inline +const dict_index_t* +dict_table_get_index_on_name( + const dict_table_t* table, + const char* name, + bool committed=true) +{ + return(dict_table_get_index_on_name( + const_cast(table), name, committed)); +} + /*************************************************************** Check whether a column exists in an FTS index. */ UNIV_INLINE @@ -1591,27 +1713,36 @@ dict_table_is_fts_column( /* out: ULINT_UNDEFINED if no match else the offset within the vector */ ib_vector_t* indexes,/* in: vector containing only FTS indexes */ - ulint col_no) /* in: col number to search for */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + ulint col_no, /* in: col number to search for */ + bool is_virtual)/*!< in: whether it is a virtual column */ + __attribute__((warn_unused_result)); +/**********************************************************************//** +Prevent table eviction by moving a table to the non-LRU list from the +LRU list if it is not already there. */ +UNIV_INLINE +void +dict_table_prevent_eviction( +/*========================*/ + dict_table_t* table) /*!< in: table to prevent eviction */ + __attribute__((nonnull)); /**********************************************************************//** Move a table to the non LRU end of the LRU list. */ -UNIV_INTERN void dict_table_move_from_lru_to_non_lru( /*================================*/ dict_table_t* table) /*!< in: table to move from LRU to non-LRU */ MY_ATTRIBUTE((nonnull)); -/**********************************************************************//** -Move a table to the LRU list from the non-LRU list. */ -UNIV_INTERN -void -dict_table_move_from_non_lru_to_lru( -/*================================*/ - dict_table_t* table) /*!< in: table to move from non-LRU to LRU */ - MY_ATTRIBUTE((nonnull)); +/** Looks for an index with the given id given a table instance. +@param[in] table table instance +@param[in] id index id +@return index or NULL */ +dict_index_t* +dict_table_find_index_on_id( + const dict_table_t* table, + index_id_t id) + MY_ATTRIBUTE((nonnull(1))); /**********************************************************************//** Move to the most recently used segment of the LRU list. */ -UNIV_INTERN void dict_move_to_mru( /*=============*/ @@ -1625,19 +1756,20 @@ constraint */ /* Buffers for storing detailed information about the latest foreign key and unique key errors */ -extern FILE* dict_foreign_err_file; -extern ib_mutex_t dict_foreign_err_mutex; /* mutex protecting the buffers */ +extern FILE* dict_foreign_err_file; +extern ib_mutex_t dict_foreign_err_mutex; /* mutex protecting the + foreign key error messages */ /** the dictionary system */ extern dict_sys_t* dict_sys; /** the data dictionary rw-latch protecting dict_sys */ -extern rw_lock_t dict_operation_lock; +extern rw_lock_t* dict_operation_lock; typedef std::map autoinc_map_t; /* Dictionary system struct */ struct dict_sys_t{ - ib_mutex_t mutex; /*!< mutex protecting the data + DictSysMutex mutex; /*!< mutex protecting the data dictionary; protects also the disk-based dictionary system tables; this mutex serializes CREATE TABLE @@ -1654,13 +1786,14 @@ struct dict_sys_t{ on name */ hash_table_t* table_id_hash; /*!< hash table of the tables, based on id */ - ulint size; /*!< varying space in bytes occupied + lint size; /*!< varying space in bytes occupied by the data dictionary table and index objects */ dict_table_t* sys_tables; /*!< SYS_TABLES table */ dict_table_t* sys_columns; /*!< SYS_COLUMNS table */ dict_table_t* sys_indexes; /*!< SYS_INDEXES table */ dict_table_t* sys_fields; /*!< SYS_FIELDS table */ + dict_table_t* sys_virtual; /*!< SYS_VIRTUAL table */ /*=============================*/ UT_LIST_BASE_NODE_T(dict_table_t) @@ -1676,12 +1809,9 @@ struct dict_sys_t{ /** dummy index for ROW_FORMAT=REDUNDANT supremum and infimum records */ extern dict_index_t* dict_ind_redundant; -/** dummy index for ROW_FORMAT=COMPACT supremum and infimum records */ -extern dict_index_t* dict_ind_compact; /**********************************************************************//** -Inits dict_ind_redundant and dict_ind_compact. */ -UNIV_INTERN +Inits dict_ind_redundant. */ void dict_ind_init(void); /*===============*/ @@ -1701,7 +1831,7 @@ struct dict_col_meta_t { }; /* This struct is used for checking whether a given table exists and -whether it has a predefined schema (number of columns and columns names +whether it has a predefined schema (number of columns and column names and types) */ struct dict_table_schema_t { const char* table_name; /* the name of the table whose @@ -1729,7 +1859,6 @@ types. The order of the columns does not matter. The caller must own the dictionary mutex. dict_table_schema_check() @{ @return DB_SUCCESS if the table exists and contains the necessary columns */ -UNIV_INTERN dberr_t dict_table_schema_check( /*====================*/ @@ -1748,7 +1877,6 @@ Converts a database and table name from filesystem encoding (e.g. d@i1b/a@q1b@1Kc, same format as used in dict_table_t::name) in two strings in UTF8 encoding (e.g. dцb and aюbØc). The output buffers must be at least MAX_DB_UTF8_LEN and MAX_TABLE_UTF8_LEN bytes. */ -UNIV_INTERN void dict_fs2utf8( /*=========*/ @@ -1760,16 +1888,19 @@ dict_fs2utf8( size_t table_utf8_size)/*!< in: table_utf8 size */ MY_ATTRIBUTE((nonnull)); +/** Resize the hash tables besed on the current buffer pool size. */ +void +dict_resize(); + /**********************************************************************//** Closes the data dictionary module. */ -UNIV_INTERN void dict_close(void); /*============*/ #ifndef UNIV_HOTBACKUP /**********************************************************************//** Check whether the table is corrupted. -@return nonzero for corrupted table, zero for valid tables */ +@return nonzero for corrupted table, zero for valid tables */ UNIV_INLINE ulint dict_table_is_corrupted( @@ -1779,7 +1910,7 @@ dict_table_is_corrupted( /**********************************************************************//** Check whether the index is corrupted. -@return nonzero for corrupted index, zero for valid indexes */ +@return nonzero for corrupted index, zero for valid indexes */ UNIV_INLINE ulint dict_index_is_corrupted( @@ -1791,7 +1922,6 @@ dict_index_is_corrupted( /**********************************************************************//** Flags an index and table corrupted both in the data dictionary cache and in the system table SYS_INDEXES. */ -UNIV_INTERN void dict_set_corrupted( /*===============*/ @@ -1800,41 +1930,61 @@ dict_set_corrupted( const char* ctx) /*!< in: context */ UNIV_COLD MY_ATTRIBUTE((nonnull)); -/**********************************************************************//** -Flags an index corrupted in the data dictionary cache only. This +/** Flags an index corrupted in the data dictionary cache only. This is used mostly to mark a corrupted index when index's own dictionary -is corrupted, and we force to load such index for repair purpose */ -UNIV_INTERN +is corrupted, and we force to load such index for repair purpose +@param[in,out] index index that is corrupted */ void dict_set_corrupted_index_cache_only( -/*================================*/ - dict_index_t* index, /*!< in/out: index */ - dict_table_t* table) /*!< in/out: table */ - MY_ATTRIBUTE((nonnull)); + dict_index_t* index); /**********************************************************************//** Flags a table with specified space_id corrupted in the table dictionary cache. @return TRUE if successful */ -UNIV_INTERN ibool dict_set_corrupted_by_space( /*========================*/ ulint space_id); /*!< in: space ID */ -/********************************************************************//** -Validate the table flags. -@return true if valid. */ +/** Sets merge_threshold in the SYS_INDEXES +@param[in,out] index index +@param[in] merge_threshold value to set */ +void +dict_index_set_merge_threshold( + dict_index_t* index, + ulint merge_threshold); + +#ifdef UNIV_DEBUG +/** Sets merge_threshold for all indexes in dictionary cache for debug. +@param[in] merge_threshold_all value to set for all indexes */ +void +dict_set_merge_threshold_all_debug( + uint merge_threshold_all); +#endif /* UNIV_DEBUG */ + +/** Validate the table flags. +@param[in] flags Table flags +@return true if valid. */ UNIV_INLINE bool dict_tf_is_valid( -/*=============*/ - ulint flags) /*!< in: table flags */ - MY_ATTRIBUTE((warn_unused_result)); + ulint flags); + +/** Validate both table flags and table flags2 and make sure they +are compatible. +@param[in] flags Table flags +@param[in] flags2 Table flags2 +@return true if valid. */ +UNIV_INLINE +bool +dict_tf2_is_valid( + ulint flags, + ulint flags2); /********************************************************************//** Check if the tablespace for the table has been discarded. -@return true if the tablespace has been discarded. */ +@return true if the tablespace has been discarded. */ UNIV_INLINE bool dict_table_is_discarded( @@ -1844,7 +1994,7 @@ dict_table_is_discarded( /********************************************************************//** Check if it is a temporary table. -@return true if temporary table flag is set. */ +@return true if temporary table flag is set. */ UNIV_INLINE bool dict_table_is_temporary( @@ -1852,11 +2002,73 @@ dict_table_is_temporary( const dict_table_t* table) /*!< in: table to check */ MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); +/** Check whether the table is intrinsic. +An intrinsic table is a special kind of temporary table that +is invisible to the end user. It is created internally by the MySQL server +layer or other module connected to InnoDB in order to gather and use data +as part of a larger task. Since access to it must be as fast as possible, +it does not need UNDO semantics, system fields DB_TRX_ID & DB_ROLL_PTR, +doublewrite, checksum, insert buffer, use of the shared data dictionary, +locking, or even a transaction. In short, these are not ACID tables at all, +just temporary + +@param[in] table table to check +@return true if intrinsic table flag is set. */ +UNIV_INLINE +bool +dict_table_is_intrinsic( + const dict_table_t* table) + __attribute__((warn_unused_result)); + +/** Check whether locking is disabled for this table. +Currently this is done for intrinsic table as their visibility is limited +to the connection only. + +@param[in] table table to check +@return true if locking is disabled. */ +UNIV_INLINE +bool +dict_table_is_locking_disabled( + const dict_table_t* table) + __attribute__((warn_unused_result)); + +/********************************************************************//** +Turn-off redo-logging if temporary table. */ +UNIV_INLINE +void +dict_disable_redo_if_temporary( +/*===========================*/ + const dict_table_t* table, /*!< in: table to check */ + mtr_t* mtr); /*!< out: mini-transaction */ + +/** Get table session row-id and increment the row-id counter for next use. +@param[in,out] table table handler +@return next table local row-id. */ +UNIV_INLINE +row_id_t +dict_table_get_next_table_sess_row_id( + dict_table_t* table); + +/** Get table session trx-id and increment the trx-id counter for next use. +@param[in,out] table table handler +@return next table local trx-id. */ +UNIV_INLINE +trx_id_t +dict_table_get_next_table_sess_trx_id( + dict_table_t* table); + +/** Get current session trx-id. +@param[in] table table handler +@return table local trx-id. */ +UNIV_INLINE +trx_id_t +dict_table_get_curr_table_sess_trx_id( + const dict_table_t* table); + #ifndef UNIV_HOTBACKUP /*********************************************************************//** This function should be called whenever a page is successfully compressed. Updates the compression padding information. */ -UNIV_INTERN void dict_index_zip_success( /*===================*/ @@ -1865,7 +2077,6 @@ dict_index_zip_success( /*********************************************************************//** This function should be called whenever a page compression attempt fails. Updates the compression padding information. */ -UNIV_INTERN void dict_index_zip_failure( /*===================*/ @@ -1874,7 +2085,6 @@ dict_index_zip_failure( /*********************************************************************//** Return the optimal page size, for which page will likely compress. @return page size beyond which page may not compress*/ -UNIV_INTERN ulint dict_index_zip_pad_optimal_page_size( /*=================================*/ @@ -1884,11 +2094,18 @@ dict_index_zip_pad_optimal_page_size( /*************************************************************//** Convert table flag to row format string. @return row format name */ -UNIV_INTERN const char* dict_tf_to_row_format_string( /*=========================*/ ulint table_flag); /*!< in: row format setting */ +/****************************************************************//** +Return maximum size of the node pointer record. +@return maximum size of the record in bytes */ +ulint +dict_index_node_ptr_max_size( +/*=========================*/ + const dict_index_t* index) /*!< in: index */ + __attribute__((warn_unused_result)); /*****************************************************************//** Get index by first field of the index @return index which is having first field matches @@ -1900,6 +2117,43 @@ dict_table_get_index_on_first_col( const dict_table_t* table, /*!< in: table */ ulint col_index); /*!< in: position of column in table */ +/** Check if a column is a virtual column +@param[in] col column +@return true if it is a virtual column, false otherwise */ +UNIV_INLINE +bool +dict_col_is_virtual( + const dict_col_t* col); + +/** encode number of columns and number of virtual columns in one +4 bytes value. We could do this because the number of columns in +InnoDB is limited to 1017 +@param[in] n_col number of non-virtual column +@param[in] n_v_col number of virtual column +@return encoded value */ +UNIV_INLINE +ulint +dict_table_encode_n_col( + ulint n_col, + ulint n_v_col); + +/** Decode number of virtual and non-virtual columns in one 4 bytes value. +@param[in] encoded encoded value +@param[in,out] n_col number of non-virtual column +@param[in,out] n_v_col number of virtual column */ +UNIV_INLINE +void +dict_table_decode_n_col( + ulint encoded, + ulint* n_col, + ulint* n_v_col); + +/** Look for any dictionary objects that are found in the given tablespace. +@param[in] space Tablespace ID to search for. +@return true if tablespace is empty. */ +bool +dict_tablespace_is_empty( + ulint space_id); #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index 3d2f0dff0da..9aa25b0a9c5 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -30,7 +30,8 @@ Created 1/8/1996 Heikki Tuuri #include "rem0types.h" #include "fsp0fsp.h" #include "srv0srv.h" -#include "sync0rw.h" /* RW_S_LATCH */ +#include "sync0rw.h" +#include "fsp0sysspace.h" /*********************************************************************//** Gets the minimum number of bytes per character. @@ -89,12 +90,23 @@ dict_col_copy_type( type->len = col->len; type->mbminmaxlen = col->mbminmaxlen; } +/** Check if a column is a virtual column +@param[in] col column +@return true if it is a virtual column, false otherwise */ +UNIV_INLINE +bool +dict_col_is_virtual( + const dict_col_t* col) +{ + return(col->prtype & DATA_VIRTUAL); +} + #endif /* !UNIV_HOTBACKUP */ #ifdef UNIV_DEBUG /*********************************************************************//** Assert that a column and a data type match. -@return TRUE */ +@return TRUE */ UNIV_INLINE ibool dict_col_type_assert_equal( @@ -119,7 +131,7 @@ dict_col_type_assert_equal( #ifndef UNIV_HOTBACKUP /***********************************************************************//** Returns the minimum size of the column. -@return minimum size */ +@return minimum size */ UNIV_INLINE ulint dict_col_get_min_size( @@ -131,7 +143,7 @@ dict_col_get_min_size( } /***********************************************************************//** Returns the maximum size of the column. -@return maximum size */ +@return maximum size */ UNIV_INLINE ulint dict_col_get_max_size( @@ -143,7 +155,7 @@ dict_col_get_max_size( #endif /* !UNIV_HOTBACKUP */ /***********************************************************************//** Returns the size of a fixed size column, 0 if not a fixed size column. -@return fixed size, or 0 */ +@return fixed size, or 0 */ UNIV_INLINE ulint dict_col_get_fixed_size( @@ -157,7 +169,7 @@ dict_col_get_fixed_size( /***********************************************************************//** Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a column. For fixed length types it is the fixed length of the type, otherwise 0. -@return SQL null storage size in ROW_FORMAT=REDUNDANT */ +@return SQL null storage size in ROW_FORMAT=REDUNDANT */ UNIV_INLINE ulint dict_col_get_sql_null_size( @@ -170,7 +182,7 @@ dict_col_get_sql_null_size( /*********************************************************************//** Gets the column number. -@return col->ind, table column position (starting from 0) */ +@return col->ind, table column position (starting from 0) */ UNIV_INLINE ulint dict_col_get_no( @@ -208,11 +220,36 @@ dict_col_get_clust_pos( return(ULINT_UNDEFINED); } +/** Gets the column position in the given index. +@param[in] col table column +@param[in] index index to be searched for column +@return position of column in the given index. */ +UNIV_INLINE +ulint +dict_col_get_index_pos( + const dict_col_t* col, + const dict_index_t* index) +{ + ulint i; + + ut_ad(col); + + for (i = 0; i < index->n_def; i++) { + const dict_field_t* field = &index->fields[i]; + + if (!field->prefix_len && field->col == col) { + return(i); + } + } + + return(ULINT_UNDEFINED); +} + #ifndef UNIV_HOTBACKUP #ifdef UNIV_DEBUG /********************************************************************//** Gets the first index on the table (the clustered index). -@return index, NULL if none exists */ +@return index, NULL if none exists */ UNIV_INLINE dict_index_t* dict_table_get_first_index( @@ -227,7 +264,7 @@ dict_table_get_first_index( /********************************************************************//** Gets the last index on the table. -@return index, NULL if none exists */ +@return index, NULL if none exists */ UNIV_INLINE dict_index_t* dict_table_get_last_index( @@ -243,7 +280,7 @@ dict_table_get_last_index( /********************************************************************//** Gets the next index on the table. -@return index, NULL if none left */ +@return index, NULL if none left */ UNIV_INLINE dict_index_t* dict_table_get_next_index( @@ -260,7 +297,7 @@ dict_table_get_next_index( /********************************************************************//** Check whether the index is the clustered index. -@return nonzero for clustered index, zero for other indexes */ +@return nonzero for clustered index, zero for other indexes */ UNIV_INLINE ulint dict_index_is_clust( @@ -272,9 +309,22 @@ dict_index_is_clust( return(index->type & DICT_CLUSTERED); } + +/** Check if index is auto-generated clustered index. +@param[in] index index + +@return true if index is auto-generated clustered index. */ +UNIV_INLINE +bool +dict_index_is_auto_gen_clust( + const dict_index_t* index) +{ + return(index->type == DICT_CLUSTERED); +} + /********************************************************************//** Check whether the index is unique. -@return nonzero for unique index, zero for other indexes */ +@return nonzero for unique index, zero for other indexes */ UNIV_INLINE ulint dict_index_is_unique( @@ -288,38 +338,52 @@ dict_index_is_unique( } /********************************************************************//** -Check whether the index is the insert buffer tree. -@return nonzero for insert buffer, zero for other indexes */ +Check whether the index is a Spatial Index. +@return nonzero for Spatial Index, zero for other indexes */ UNIV_INLINE ulint -dict_index_is_ibuf( -/*===============*/ +dict_index_is_spatial( +/*==================*/ const dict_index_t* index) /*!< in: index */ { ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - return(index->type & DICT_IBUF); + return(index->type & DICT_SPATIAL); +} + +/** Check whether the index contains a virtual column +@param[in] index index +@return nonzero for the index has virtual column, zero for other indexes */ +UNIV_INLINE +ulint +dict_index_has_virtual( + const dict_index_t* index) +{ + ut_ad(index); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); + + return(index->type & DICT_VIRTUAL); } /********************************************************************//** -Check whether the index is an universal index tree. -@return nonzero for universal tree, zero for other indexes */ +Check whether the index is the insert buffer tree. +@return nonzero for insert buffer, zero for other indexes */ UNIV_INLINE ulint -dict_index_is_univ( +dict_index_is_ibuf( /*===============*/ const dict_index_t* index) /*!< in: index */ { ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - return(index->type & DICT_UNIVERSAL); + return(index->type & DICT_IBUF); } /********************************************************************//** Check whether the index is a secondary index or the insert buffer tree. -@return nonzero for insert buffer, zero for other indexes */ +@return nonzero for insert buffer, zero for other indexes */ UNIV_INLINE ulint dict_index_is_sec_or_ibuf( @@ -337,9 +401,10 @@ dict_index_is_sec_or_ibuf( } /********************************************************************//** -Gets the number of user-defined columns in a table in the dictionary -cache. -@return number of user-defined (e.g., not ROW_ID) columns of a table */ +Gets the number of user-defined non-virtual columns in a table in the +dictionary cache. +@return number of user-defined (e.g., not ROW_ID) non-virtual +columns of a table */ UNIV_INLINE ulint dict_table_get_n_user_cols( @@ -349,12 +414,29 @@ dict_table_get_n_user_cols( ut_ad(table); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - return(table->n_cols - DATA_N_SYS_COLS); + return(table->n_cols - dict_table_get_n_sys_cols(table)); } +/** Gets the number of user-defined virtual and non-virtual columns in a table +in the dictionary cache. +@param[in] table table +@return number of user-defined (e.g., not ROW_ID) columns of a table */ +UNIV_INLINE +ulint +dict_table_get_n_tot_u_cols( + const dict_table_t* table) +{ + ut_ad(table); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + + return(dict_table_get_n_user_cols(table) + + dict_table_get_n_v_cols(table)); +} /********************************************************************//** -Gets the number of system columns in a table in the dictionary cache. -@return number of system (e.g., ROW_ID) columns of a table */ +Gets the number of system columns in a table. +For intrinsic table on ROW_ID column is added for all other +tables TRX_ID and ROLL_PTR are all also appeneded. +@return number of system (e.g., ROW_ID) columns of a table */ UNIV_INLINE ulint dict_table_get_n_sys_cols( @@ -363,15 +445,15 @@ dict_table_get_n_sys_cols( { ut_ad(table); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - ut_ad(table->cached); - return(DATA_N_SYS_COLS); + return(dict_table_is_intrinsic(table) + ? DATA_ITT_N_SYS_COLS : DATA_N_SYS_COLS); } /********************************************************************//** -Gets the number of all columns (also system) in a table in the dictionary -cache. -@return number of columns of a table */ +Gets the number of all non-virtual columns (also system) in a table +in the dictionary cache. +@return number of non-virtual columns of a table */ UNIV_INLINE ulint dict_table_get_n_cols( @@ -384,9 +466,42 @@ dict_table_get_n_cols( return(table->n_cols); } +/** Gets the number of virtual columns in a table in the dictionary cache. +@param[in] table the table to check +@return number of virtual columns of a table */ +UNIV_INLINE +ulint +dict_table_get_n_v_cols( + const dict_table_t* table) +{ + ut_ad(table); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + + return(table->n_v_cols); +} + +/** Check if a table has indexed virtual columns +@param[in] table the table to check +@return true is the table has indexed virtual columns */ +UNIV_INLINE +bool +dict_table_has_indexed_v_cols( + const dict_table_t* table) +{ + + for (ulint i = 0; i < table->n_v_cols; i++) { + const dict_v_col_t* col = dict_table_get_nth_v_col(table, i); + if (col->m_col.ord_part) { + return(true); + } + } + + return(false); +} + /********************************************************************//** Gets the approximately estimated number of rows in the table. -@return estimated number of rows */ +@return estimated number of rows */ UNIV_INLINE ib_uint64_t dict_table_get_n_rows( @@ -437,7 +552,7 @@ dict_table_n_rows_dec( #ifdef UNIV_DEBUG /********************************************************************//** Gets the nth column of a table. -@return pointer to column object */ +@return pointer to column object */ UNIV_INLINE dict_col_t* dict_table_get_nth_col( @@ -452,9 +567,26 @@ dict_table_get_nth_col( return((dict_col_t*) (table->cols) + pos); } +/** Gets the nth virtual column of a table. +@param[in] table table +@param[in] pos position of virtual column +@return pointer to virtual column object */ +UNIV_INLINE +dict_v_col_t* +dict_table_get_nth_v_col( + const dict_table_t* table, + ulint pos) +{ + ut_ad(table); + ut_ad(pos < table->n_v_def); + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + + return(static_cast(table->v_cols) + pos); +} + /********************************************************************//** Gets the given system column of a table. -@return pointer to column object */ +@return pointer to column object */ UNIV_INLINE dict_col_t* dict_table_get_sys_col( @@ -465,11 +597,12 @@ dict_table_get_sys_col( dict_col_t* col; ut_ad(table); - ut_ad(sys < DATA_N_SYS_COLS); + ut_ad(sys < dict_table_get_n_sys_cols(table)); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); col = dict_table_get_nth_col(table, table->n_cols - - DATA_N_SYS_COLS + sys); + - dict_table_get_n_sys_cols(table) + + sys); ut_ad(col->mtype == DATA_SYS); ut_ad(col->prtype == (sys | DATA_NOT_NULL)); @@ -479,7 +612,7 @@ dict_table_get_sys_col( /********************************************************************//** Gets the given system column number of a table. -@return column number */ +@return column number */ UNIV_INLINE ulint dict_table_get_sys_col_no( @@ -488,15 +621,15 @@ dict_table_get_sys_col_no( ulint sys) /*!< in: DATA_ROW_ID, ... */ { ut_ad(table); - ut_ad(sys < DATA_N_SYS_COLS); + ut_ad(sys < dict_table_get_n_sys_cols(table)); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - return(table->n_cols - DATA_N_SYS_COLS + sys); + return(table->n_cols - dict_table_get_n_sys_cols(table) + sys); } /********************************************************************//** Check whether the table uses the compact page format. -@return TRUE if table uses the compact page format */ +@return TRUE if table uses the compact page format */ UNIV_INLINE ibool dict_table_is_comp( @@ -526,77 +659,44 @@ dict_table_has_fts_index( return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS)); } -/********************************************************************//** -Validate the table flags. -@return true if valid. */ +/** Validate the table flags. +@param[in] flags Table flags +@return true if valid. */ UNIV_INLINE bool dict_tf_is_valid( -/*=============*/ - ulint flags) /*!< in: table flags */ + ulint flags) { - ulint compact = DICT_TF_GET_COMPACT(flags); + bool compact = DICT_TF_GET_COMPACT(flags); ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); - ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(flags); + bool atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(flags); + bool data_dir = DICT_TF_HAS_DATA_DIR(flags); + bool shared_space = DICT_TF_HAS_SHARED_SPACE(flags); ulint unused = DICT_TF_GET_UNUSED(flags); - ulint page_compression = DICT_TF_GET_PAGE_COMPRESSION(flags); + bool page_compression = DICT_TF_GET_PAGE_COMPRESSION(flags); ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(flags); - ulint data_dir = DICT_TF_HAS_DATA_DIR(flags); ulint atomic_writes = DICT_TF_GET_ATOMIC_WRITES(flags); + bool flags_corrupt = false; /* Make sure there are no bits that we do not know about. */ if (unused != 0) { - fprintf(stderr, - "InnoDB: Error: table unused flags are %ld" - " in the data dictionary and are corrupted\n" - "InnoDB: Error: data dictionary flags are\n" - "InnoDB: compact %ld atomic_blobs %ld\n" - "InnoDB: unused %ld data_dir %ld zip_ssize %ld\n" - "InnoDB: page_compression %ld page_compression_level %ld\n" - "InnoDB: atomic_writes %ld\n", - unused, - compact, atomic_blobs, unused, data_dir, zip_ssize, - page_compression, page_compression_level, atomic_writes - ); - - return(false); + flags_corrupt = true; + } - } else if (atomic_blobs) { - /* Barracuda row formats COMPRESSED and DYNAMIC build on - the page structure introduced for the COMPACT row format - by allowing keys in secondary indexes to be made from - data stored off-page in the clustered index. */ + if (atomic_blobs) { + /* Barracuda row formats COMPRESSED and DYNAMIC both use + atomic_blobs, which build on the page structure introduced + for the COMPACT row format by allowing keys in secondary + indexes to be made from data stored off-page in the + clustered index. */ if (!compact) { - fprintf(stderr, - "InnoDB: Error: table compact flags are %ld" - " in the data dictionary and are corrupted\n" - "InnoDB: Error: data dictionary flags are\n" - "InnoDB: compact %ld atomic_blobs %ld\n" - "InnoDB: unused %ld data_dir %ld zip_ssize %ld\n" - "InnoDB: page_compression %ld page_compression_level %ld\n" - "InnoDB: atomic_writes %ld\n", - compact, compact, atomic_blobs, unused, data_dir, zip_ssize, - page_compression, page_compression_level, atomic_writes - ); - return(false); + flags_corrupt = true; } } else if (zip_ssize) { - /* Antelope does not support COMPRESSED row format. */ - fprintf(stderr, - "InnoDB: Error: table flags are %ld" - " in the data dictionary and are corrupted\n" - "InnoDB: Error: data dictionary flags are\n" - "InnoDB: compact %ld atomic_blobs %ld\n" - "InnoDB: unused %ld data_dir %ld zip_ssize %ld\n" - "InnoDB: page_compression %ld page_compression_level %ld\n" - "InnoDB: atomic_writes %ld\n", - flags, compact, atomic_blobs, unused, data_dir, zip_ssize, - page_compression, page_compression_level, atomic_writes - ); - return(false); + flags_corrupt = true; } if (zip_ssize) { @@ -607,20 +707,7 @@ dict_tf_is_valid( if (!compact || !atomic_blobs || zip_ssize > PAGE_ZIP_SSIZE_MAX) { - - fprintf(stderr, - "InnoDB: Error: table compact flags are %ld in the data dictionary and are corrupted\n" - "InnoDB: Error: data dictionary flags are\n" - "InnoDB: compact %ld atomic_blobs %ld\n" - "InnoDB: unused %ld data_dir %ld zip_ssize %ld\n" - "InnoDB: page_compression %ld page_compression_level %ld\n" - "InnoDB: atomic_writes %ld\n", - flags, - compact, atomic_blobs, unused, data_dir, zip_ssize, - page_compression, page_compression_level, atomic_writes - - ); - return(false); + flags_corrupt = true; } } @@ -631,48 +718,72 @@ dict_tf_is_valid( if (!compact || !page_compression || !atomic_blobs) { - - fprintf(stderr, - "InnoDB: Error: table flags are %ld in the data dictionary and are corrupted\n" - "InnoDB: Error: data dictionary flags are\n" - "InnoDB: compact %ld atomic_blobs %ld\n" - "InnoDB: unused %ld data_dir %ld zip_ssize %ld\n" - "InnoDB: page_compression %ld page_compression_level %ld\n" - "InnoDB: atomic_writes %ld\n", - flags, compact, atomic_blobs, unused, data_dir, zip_ssize, - page_compression, page_compression_level, atomic_writes - ); - return(false); + flags_corrupt = true; } } if (atomic_writes) { if(atomic_writes > ATOMIC_WRITES_OFF) { - - fprintf(stderr, - "InnoDB: Error: table flags are %ld in the data dictionary and are corrupted\n" - "InnoDB: Error: data dictionary flags are\n" - "InnoDB: compact %ld atomic_blobs %ld\n" - "InnoDB: unused %ld data_dir %ld zip_ssize %ld\n" - "InnoDB: page_compression %ld page_compression_level %ld\n" - "InnoDB: atomic_writes %ld\n", - flags, compact, atomic_blobs, unused, data_dir, zip_ssize, - page_compression, page_compression_level, atomic_writes - ); - return(false); + flags_corrupt = true; } } - /* CREATE TABLE ... DATA DIRECTORY is supported for any row format, - so the DATA_DIR flag is compatible with all other table flags. */ + /* HAS_DATA_DIR and SHARED_SPACE are mutually exclusive. */ + if (data_dir && shared_space) { + flags_corrupt = true; + } + + if (flags_corrupt) { + ib::error() + << "InnoDB: Error: table unused flags are:" << flags + << " in the data dictionary and are corrupted:" + << " compact:" << compact + << " atomic_blobs:" << atomic_blobs + << " unused:" << unused + << " data_dir:" << data_dir + << " zip_ssize:" << zip_ssize + << " page_compression:" << page_compression + << " page_compression_level:" << page_compression_level + << " atomic_writes:" << atomic_writes + << " shared_space:" << shared_space; + return (false); + } else { + return(true); + } +} + +/** Validate both table flags and table flags2 and make sure they +are compatible. +@param[in] flags Table flags +@param[in] flags2 Table flags2 +@return true if valid. */ +UNIV_INLINE +bool +dict_tf2_is_valid( + ulint flags, + ulint flags2) +{ + if (!dict_tf_is_valid(flags)) { + return(false); + } + + if ((flags2 & DICT_TF2_UNUSED_BIT_MASK) != 0) { + return(false); + } + + bool file_per_table = ((flags2 & DICT_TF2_USE_FILE_PER_TABLE) != 0); + bool shared_space = DICT_TF_HAS_SHARED_SPACE(flags); + if (file_per_table && shared_space) { + return(false); + } return(true); } /********************************************************************//** Validate a SYS_TABLES TYPE field and return it. -@return Same as input after validating it as a SYS_TABLES TYPE field. +@return Same as input after validating it as a SYS_TABLES TYPE field. If there is an error, return ULINT_UNDEFINED. */ UNIV_INLINE ulint @@ -686,7 +797,7 @@ dict_sys_tables_type_validate( ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(type); ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(type); ulint unused = DICT_TF_GET_UNUSED(type); - ulint page_compression = DICT_TF_GET_PAGE_COMPRESSION(type); + bool page_compression = DICT_TF_GET_PAGE_COMPRESSION(type); ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type); ulint atomic_writes = DICT_TF_GET_ATOMIC_WRITES(type); @@ -701,16 +812,17 @@ dict_sys_tables_type_validate( if (redundant) { if (zip_ssize || atomic_blobs) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=Redundant, zip_ssize %lu atomic_blobs %lu\n", - zip_ssize, atomic_blobs); + ib::error() + << "SYS_TABLES::TYPE=Redundant, zip_ssize:" << zip_ssize + << " atomic_blobs:" << atomic_blobs; return(ULINT_UNDEFINED); } } /* Make sure there are no bits that we do not know about. */ if (unused) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=%lu, unused %lu\n", - type, unused); + ib::error() + << "SYS_TABLES::TYPE=" << type << " unused:" << unused; return(ULINT_UNDEFINED); } @@ -725,8 +837,9 @@ dict_sys_tables_type_validate( } else if (zip_ssize) { /* Antelope does not support COMPRESSED format. */ - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=%lu, zip_ssize %lu\n", - type, zip_ssize); + ib::error() + << "SYS_TABLES::TYPE=" << type << "zip_ssize:" << zip_ssize; + return(ULINT_UNDEFINED); } @@ -736,15 +849,17 @@ dict_sys_tables_type_validate( should be in N_COLS, but we already know about the low_order_bit and DICT_N_COLS_COMPACT flags. */ if (!atomic_blobs) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=%lu, zip_ssize %lu atomic_blobs %lu\n", - type, zip_ssize, atomic_blobs); + ib::error() << "SYS_TABLES::TYPE=" << type + << " zip_ssize:" << zip_ssize + << " atomic_blobs:" << atomic_blobs; return(ULINT_UNDEFINED); } /* Validate that the number is within allowed range. */ if (zip_ssize > PAGE_ZIP_SSIZE_MAX) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=%lu, zip_ssize %lu max %d\n", - type, zip_ssize, PAGE_ZIP_SSIZE_MAX); + ib::error() << "SYS_TABLES::TYPE=" << type + << " zip_ssize:" << zip_ssize + << " max:" << PAGE_ZIP_SSIZE_MAX; return(ULINT_UNDEFINED); } } @@ -752,7 +867,7 @@ dict_sys_tables_type_validate( /* There is nothing to validate for the data_dir field. CREATE TABLE ... DATA DIRECTORY is supported for any row format, so the DATA_DIR flag is compatible with any other - table flags. However, it is not used with TEMPORARY tables.*/ + table flags. However, it is not used with TEMPORARY tables. */ if (page_compression || page_compression_level) { /* page compressed row format must have low_order_bit and @@ -761,17 +876,19 @@ dict_sys_tables_type_validate( low_order_bit and DICT_N_COLS_COMPACT flags. */ if (!atomic_blobs || !page_compression) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=%lu, page_compression %lu page_compression_level %lu\n" - "InnoDB: Error: atomic_blobs %lu\n", - type, page_compression, page_compression_level, atomic_blobs); + ib::error() << "SYS_TABLES::TYPE=" << type + << " page_compression:" << page_compression + << " page_compression_level:" << page_compression_level + << " atomic_blobs:" << atomic_blobs; + return(ULINT_UNDEFINED); } } /* Validate that the atomic writes number is within allowed range. */ if (atomic_writes > ATOMIC_WRITES_OFF) { - fprintf(stderr, "InnoDB: Error: SYS_TABLES::TYPE=%lu, atomic_writes %lu\n", - type, atomic_writes); + ib::error() << "SYS_TABLES::TYPE=" << type + << " atomic_writes:" << atomic_writes; return(ULINT_UNDEFINED); } @@ -783,7 +900,7 @@ dict_sys_tables_type_validate( Determine the file format from dict_table_t::flags The low order bit will be zero for REDUNDANT and 1 for COMPACT. For any other row_format, file_format is > 0 and DICT_TF_COMPACT will also be set. -@return file format version */ +@return file format version */ UNIV_INLINE rec_format_t dict_tf_get_rec_format( @@ -809,7 +926,7 @@ dict_tf_get_rec_format( /********************************************************************//** Determine the file format from a dict_table_t::flags. -@return file format version */ +@return file format version */ UNIV_INLINE ulint dict_tf_get_format( @@ -825,7 +942,7 @@ dict_tf_get_format( /********************************************************************//** Determine the file format of a table. -@return file format version */ +@return file format version */ UNIV_INLINE ulint dict_table_get_format( @@ -837,26 +954,29 @@ dict_table_get_format( return(dict_tf_get_format(table->flags)); } -/********************************************************************//** -Set the file format and zip size in a dict_table_t::flags. If zip size -is not needed, it should be 0. */ +/** Set the various values in a dict_table_t::flags pointer. +@param[in,out] flags, Pointer to a 4 byte Table Flags +@param[in] format File Format +@param[in] zip_ssize Zip Shift Size +@param[in] use_data_dir Table uses DATA DIRECTORY +@param[in] atomic_writes Does table use atomic writes +@param[in] shared_space Table uses a General Shared Tablespace +@param[in] page_compressed Table uses page compression +@param[in] page_compression_level Page compression level +@param[in] atomic_writes Table uses atomic writes */ UNIV_INLINE void dict_tf_set( /*========*/ - ulint* flags, /*!< in/out: table flags */ - rec_format_t format, /*!< in: file format */ - ulint zip_ssize, /*!< in: zip shift size */ - bool use_data_dir, /*!< in: table uses DATA DIRECTORY - */ - bool page_compressed,/*!< in: table uses page compressed - pages */ - ulint page_compression_level, /*!< in: table page compression - level */ - ulint atomic_writes) /*!< in: table atomic writes setup */ -{ - atomic_writes_t awrites = (atomic_writes_t)atomic_writes; - + ulint* flags, + rec_format_t format, + ulint zip_ssize, + bool use_data_dir, + bool shared_space, + bool page_compressed, + ulint page_compression_level, + ulint atomic_writes) +{ switch (format) { case REC_FORMAT_REDUNDANT: *flags = 0; @@ -878,6 +998,14 @@ dict_tf_set( break; } + if (use_data_dir) { + *flags |= (1 << DICT_TF_POS_DATA_DIR); + } + + if (shared_space) { + *flags |= (1 << DICT_TF_POS_SHARED_SPACE); + } + if (page_compressed) { *flags |= (1 << DICT_TF_POS_ATOMIC_BLOBS) | (1 << DICT_TF_POS_PAGE_COMPRESSION) @@ -888,69 +1016,71 @@ dict_tf_set( ut_ad(dict_tf_get_page_compression_level(*flags) == page_compression_level); } - *flags |= (atomic_writes << DICT_TF_POS_ATOMIC_WRITES); - ut_a(dict_tf_get_atomic_writes(*flags) == awrites); - - if (use_data_dir) { - *flags |= (1 << DICT_TF_POS_DATA_DIR); + if (atomic_writes) { + *flags |= (atomic_writes << DICT_TF_POS_ATOMIC_WRITES); + ut_a(dict_tf_get_atomic_writes(*flags) == atomic_writes); } } -/********************************************************************//** -Convert a 32 bit integer table flags to the 32 bit integer that is -written into the tablespace header at the offset FSP_SPACE_FLAGS and is -also stored in the fil_space_t::flags field. The following chart shows -the translation of the low order bit. Other bits are the same. -========================= Low order bit ========================== - | REDUNDANT | COMPACT | COMPRESSED | DYNAMIC -dict_table_t::flags | 0 | 1 | 1 | 1 -fil_space_t::flags | 0 | 0 | 1 | 1 -================================================================== -@return tablespace flags (fil_space_t::flags) */ +/** Initialize a dict_table_t::flags pointer. +@param[in] compact, Table uses Compact or greater +@param[in] zip_ssize Zip Shift Size (log 2 minus 9) +@param[in] atomic_blobs Table uses Compressed or Dynamic +@param[in] data_dir Table uses DATA DIRECTORY +@param[in] shared_space Table uses a General Shared Tablespace +@param[in] page_compression Table uses page compression +@param[in] page_compression_level used compression level +@param[in] atomic_writes Table atomic writes option */ UNIV_INLINE ulint -dict_tf_to_fsp_flags( -/*=================*/ - ulint table_flags) /*!< in: dict_table_t::flags */ -{ - ulint fsp_flags; - ulint page_compression = DICT_TF_GET_PAGE_COMPRESSION(table_flags); - ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(table_flags); - ulint atomic_writes = DICT_TF_GET_ATOMIC_WRITES(table_flags); - - DBUG_EXECUTE_IF("dict_tf_to_fsp_flags_failure", - return(ULINT_UNDEFINED);); - - /* Adjust bit zero. */ - fsp_flags = DICT_TF_HAS_ATOMIC_BLOBS(table_flags) ? 1 : 0; +dict_tf_init( + bool compact, + ulint zip_ssize, + bool atomic_blobs, + bool data_dir, + bool shared_space, + bool page_compressed, + ulint page_compression_level, + ulint atomic_writes) +{ + ulint flags = 0; + + if (compact) { + flags |= DICT_TF_COMPACT; + } - /* ZIP_SSIZE and ATOMIC_BLOBS are at the same position. */ - fsp_flags |= table_flags & DICT_TF_MASK_ZIP_SSIZE; - fsp_flags |= table_flags & DICT_TF_MASK_ATOMIC_BLOBS; + if (zip_ssize) { + flags |= (zip_ssize << DICT_TF_POS_ZIP_SSIZE); + } - /* In addition, tablespace flags also contain the page size. */ - fsp_flags |= fsp_flags_set_page_size(fsp_flags, UNIV_PAGE_SIZE); + if (atomic_blobs) { + flags |= (1 << DICT_TF_POS_ATOMIC_BLOBS); + } - /* The DATA_DIR flag is in a different position in fsp_flag */ - fsp_flags |= DICT_TF_HAS_DATA_DIR(table_flags) - ? FSP_FLAGS_MASK_DATA_DIR : 0; + if (data_dir) { + flags |= (1 << DICT_TF_POS_DATA_DIR); + } - /* In addition, tablespace flags also contain if the page - compression is used for this table. */ - fsp_flags |= FSP_FLAGS_SET_PAGE_COMPRESSION(fsp_flags, page_compression); + if (shared_space) { + flags |= (1 << DICT_TF_POS_SHARED_SPACE); + } - /* In addition, tablespace flags also contain page compression level - if page compression is used for this table. */ - fsp_flags |= FSP_FLAGS_SET_PAGE_COMPRESSION_LEVEL(fsp_flags, page_compression_level); + if (page_compressed) { + flags |= (1 << DICT_TF_POS_ATOMIC_BLOBS) + | (1 << DICT_TF_POS_PAGE_COMPRESSION) + | (page_compression_level << DICT_TF_POS_PAGE_COMPRESSION_LEVEL); - /* In addition, tablespace flags also contain flag if atomic writes - is used for this table */ - fsp_flags |= FSP_FLAGS_SET_ATOMIC_WRITES(fsp_flags, atomic_writes); + ut_ad(zip_ssize == 0); + ut_ad(dict_tf_get_page_compression(flags) == TRUE); + ut_ad(dict_tf_get_page_compression_level(flags) == page_compression_level); + } - ut_a(fsp_flags_is_valid(fsp_flags)); - ut_a(dict_tf_verify_flags(table_flags, fsp_flags)); + if (atomic_writes) { + flags |= (atomic_writes << DICT_TF_POS_ATOMIC_WRITES); + ut_a(dict_tf_get_atomic_writes(flags) == atomic_writes); + } - return(fsp_flags); + return(flags); } /********************************************************************//** @@ -962,7 +1092,7 @@ Other bits are the same. SYS_TABLES.TYPE | 1 | 1 | 1 dict_table_t::flags | 0 | 1 | 1 ================================================================== -@return ulint containing SYS_TABLES.TYPE */ +@return ulint containing SYS_TABLES.TYPE */ UNIV_INLINE ulint dict_sys_tables_type_to_tf( @@ -984,9 +1114,9 @@ dict_sys_tables_type_to_tf( | DICT_TF_MASK_PAGE_COMPRESSION | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL | DICT_TF_MASK_ATOMIC_WRITES + | DICT_TF_MASK_SHARED_SPACE); - ); - + ut_ad(!DICT_TF_GET_ZIP_SSIZE(flags) || DICT_TF_HAS_ATOMIC_BLOBS(flags)); return(flags); } @@ -999,7 +1129,7 @@ the low order bit. Other bits are the same. dict_table_t::flags | 0 | 1 | 1 SYS_TABLES.TYPE | 1 | 1 | 1 ================================================================== -@return ulint containing SYS_TABLES.TYPE */ +@return ulint containing SYS_TABLES.TYPE */ UNIV_INLINE ulint dict_tf_to_sys_tables_type( @@ -1020,43 +1150,46 @@ dict_tf_to_sys_tables_type( | DICT_TF_MASK_DATA_DIR | DICT_TF_MASK_PAGE_COMPRESSION | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL - | DICT_TF_MASK_ATOMIC_WRITES); + | DICT_TF_MASK_ATOMIC_WRITES + | DICT_TF_MASK_SHARED_SPACE); return(type); } -/********************************************************************//** -Extract the compressed page size from dict_table_t::flags. -These flags are in memory, so assert that they are valid. -@return compressed page size, or 0 if not compressed */ +/** Extract the page size info from table flags. +@param[in] flags flags +@return a structure containing the compressed and uncompressed +page sizes and a boolean indicating if the page is compressed. */ UNIV_INLINE -ulint -dict_tf_get_zip_size( -/*=================*/ - ulint flags) /*!< in: flags */ +const page_size_t +dict_tf_get_page_size( + ulint flags) { - ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); - ulint zip_size = (zip_ssize - ? (UNIV_ZIP_SIZE_MIN >> 1) << zip_ssize - : 0); + const ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); + + if (zip_ssize == 0) { + return(univ_page_size); + } + + const ulint zip_size = (UNIV_ZIP_SIZE_MIN >> 1) << zip_ssize; ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX); - return(zip_size); + return(page_size_t(zip_size, univ_page_size.logical(), true)); } -/********************************************************************//** -Check whether the table uses the compressed compact page format. -@return compressed page size, or 0 if not compressed */ +/** Get the table page size. +@param[in] table table +@return a structure containing the compressed and uncompressed +page sizes and a boolean indicating if the page is compressed */ UNIV_INLINE -ulint -dict_table_zip_size( -/*================*/ - const dict_table_t* table) /*!< in: table */ +const page_size_t +dict_table_page_size( + const dict_table_t* table) { - ut_ad(table); + ut_ad(table != NULL); - return(dict_tf_get_zip_size(table->flags)); + return(dict_tf_get_page_size(table->flags)); } #ifndef UNIV_HOTBACKUP @@ -1073,7 +1206,7 @@ dict_table_x_lock_indexes( dict_index_t* index; ut_a(table); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); /* Loop through each index of the table and lock them */ for (index = dict_table_get_first_index(table); @@ -1094,7 +1227,7 @@ dict_table_x_unlock_indexes( dict_index_t* index; ut_a(table); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); for (index = dict_table_get_first_index(table); index != NULL; @@ -1107,7 +1240,7 @@ dict_table_x_unlock_indexes( /********************************************************************//** Gets the number of fields in the internal representation of an index, including fields added by the dictionary system. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_fields( @@ -1127,7 +1260,7 @@ Gets the number of fields in the internal representation of an index that uniquely determine the position of an index entry in the index, if we do not take multiversioning into account: in the B-tree use the value returned by dict_index_get_n_unique_in_tree. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_unique( @@ -1146,7 +1279,7 @@ dict_index_get_n_unique( Gets the number of fields in the internal representation of an index which uniquely determine the position of an index entry in the index, if we also take multiversioning into account. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_unique_in_tree( @@ -1166,12 +1299,38 @@ dict_index_get_n_unique_in_tree( return(dict_index_get_n_fields(index)); } +/** +Gets the number of fields on nonleaf page level in the internal representation +of an index which uniquely determine the position of an index entry in the +index, if we also take multiversioning into account. Note, it doesn't +include page no field. +@param[in] index index +@return number of fields */ +UNIV_INLINE +ulint +dict_index_get_n_unique_in_tree_nonleaf( + const dict_index_t* index) +{ + ut_ad(index != NULL); + ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); + ut_ad(index->cached); + + if (dict_index_is_spatial(index)) { + /* For spatial index, on non-leaf page, we have only + 2 fields(mbr+page_no). So, except page no field, + there's one field there. */ + return(DICT_INDEX_SPATIAL_NODEPTR_SIZE); + } else { + return(dict_index_get_n_unique_in_tree(index)); + } +} + /********************************************************************//** Gets the number of user-defined ordering fields in the index. In the internal representation of clustered indexes we add the row id to the ordering fields to make a clustered index unique, but this function returns the number of fields the user defined in the index as ordering fields. -@return number of fields */ +@return number of fields */ UNIV_INLINE ulint dict_index_get_n_ordering_defined_by_user( @@ -1185,7 +1344,7 @@ dict_index_get_n_ordering_defined_by_user( #ifdef UNIV_DEBUG /********************************************************************//** Gets the nth field of an index. -@return pointer to field object */ +@return pointer to field object */ UNIV_INLINE dict_field_t* dict_index_get_nth_field( @@ -1203,7 +1362,7 @@ dict_index_get_nth_field( /********************************************************************//** Returns the position of a system column in an index. -@return position, ULINT_UNDEFINED if not contained */ +@return position, ULINT_UNDEFINED if not contained */ UNIV_INLINE ulint dict_index_get_sys_col_pos( @@ -1213,7 +1372,7 @@ dict_index_get_sys_col_pos( { ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); - ut_ad(!dict_index_is_univ(index)); + ut_ad(!dict_index_is_ibuf(index)); if (dict_index_is_clust(index)) { @@ -1223,13 +1382,12 @@ dict_index_get_sys_col_pos( } return(dict_index_get_nth_col_pos( - index, dict_table_get_sys_col_no(index->table, type), - NULL)); + index, dict_table_get_sys_col_no(index->table, type), NULL)); } /*********************************************************************//** Gets the field column. -@return field->col, pointer to the table column */ +@return field->col, pointer to the table column */ UNIV_INLINE const dict_col_t* dict_field_get_col( @@ -1243,7 +1401,7 @@ dict_field_get_col( /********************************************************************//** Gets pointer to the nth column in an index. -@return column */ +@return column */ UNIV_INLINE const dict_col_t* dict_index_get_nth_col( @@ -1256,7 +1414,7 @@ dict_index_get_nth_col( /********************************************************************//** Gets the column number the nth field in an index. -@return column number */ +@return column number */ UNIV_INLINE ulint dict_index_get_nth_col_no( @@ -1279,14 +1437,14 @@ dict_index_get_nth_col_pos( ulint n, /*!< in: column number */ ulint* prefix_col_pos) /*!< out: col num if prefix */ { - return(dict_index_get_nth_col_or_prefix_pos(index, n, FALSE, + return(dict_index_get_nth_col_or_prefix_pos(index, n, false, false, prefix_col_pos)); } #ifndef UNIV_HOTBACKUP /********************************************************************//** Returns the minimum data size of an index record. -@return minimum data size in bytes */ +@return minimum data size in bytes */ UNIV_INLINE ulint dict_index_get_min_size( @@ -1306,7 +1464,7 @@ dict_index_get_min_size( /*********************************************************************//** Gets the space id of the root of the index tree. -@return space id */ +@return space id */ UNIV_INLINE ulint dict_index_get_space( @@ -1336,7 +1494,7 @@ dict_index_set_space( /*********************************************************************//** Gets the page number of the root of the index tree. -@return page number */ +@return page number */ UNIV_INLINE ulint dict_index_get_page( @@ -1351,7 +1509,7 @@ dict_index_get_page( /*********************************************************************//** Gets the read-write lock of the index tree. -@return read-write lock */ +@return read-write lock */ UNIV_INLINE rw_lock_t* dict_index_get_lock( @@ -1368,7 +1526,7 @@ dict_index_get_lock( Returns free space reserved for future updates of records. This is relevant only in the case of many consecutive inserts, as updates which make the records bigger might fragment the index. -@return number of free bytes on page, reserved for updates */ +@return number of free bytes on page, reserved for updates */ UNIV_INLINE ulint dict_index_get_space_reserve(void) @@ -1420,9 +1578,8 @@ dict_index_set_online_status( enum online_index_status status) /*!< in: status */ { ut_ad(!(index->type & DICT_FTS)); -#ifdef UNIV_SYNC_DEBUG - ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX)); -#endif /* UNIV_SYNC_DEBUG */ + ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_X)); + #ifdef UNIV_DEBUG switch (dict_index_get_online_status(index)) { case ONLINE_INDEX_COMPLETE: @@ -1482,7 +1639,8 @@ ulint dict_table_is_fts_column( /*=====================*/ ib_vector_t* indexes,/*!< in: vector containing only FTS indexes */ - ulint col_no) /*!< in: col number to search for */ + ulint col_no, /*!< in: col number to search for */ + bool is_virtual) /*!< in: whether it is a virtual column */ { ulint i; @@ -1492,7 +1650,8 @@ dict_table_is_fts_column( index = (dict_index_t*) ib_vector_getp(indexes, i); - if (dict_index_contains_col_or_prefix(index, col_no)) { + if (dict_index_contains_col_or_prefix( + index, col_no, is_virtual)) { return(i); } @@ -1526,9 +1685,55 @@ dict_max_field_len_store_undo( return(prefix_len); } +/** Determine maximum bytes of a virtual column need to be stored +in the undo log. +@param[in] table dict_table_t for the table +@param[in] col_no virtual column number +@return maximum bytes of virtual column to be stored in the undo log */ +UNIV_INLINE +ulint +dict_max_v_field_len_store_undo( + dict_table_t* table, + ulint col_no) +{ + const dict_col_t* col + = &dict_table_get_nth_v_col(table, col_no)->m_col; + ulint max_log_len; + + /* This calculation conforms to the non-virtual column + maximum log length calculation: + 1) for UNIV_FORMAT_A, upto REC_ANTELOPE_MAX_INDEX_COL_LEN + for UNIV_FORMAT_B, upto col->max_prefix or + 2) REC_VERSION_56_MAX_INDEX_COL_LEN, whichever is less */ + if (dict_table_get_format(table) >= UNIV_FORMAT_B) { + max_log_len = (col->max_prefix > 0) + ? col->max_prefix + : DICT_MAX_FIELD_LEN_BY_FORMAT(table); + } else { + max_log_len = REC_ANTELOPE_MAX_INDEX_COL_LEN; + } + + return(max_log_len); +} + +/**********************************************************************//** +Prevent table eviction by moving a table to the non-LRU list from the +LRU list if it is not already there. */ +UNIV_INLINE +void +dict_table_prevent_eviction( +/*========================*/ + dict_table_t* table) /*!< in: table to prevent eviction */ +{ + ut_ad(mutex_own(&dict_sys->mutex)); + if (table->can_be_evicted) { + dict_table_move_from_lru_to_non_lru(table); + } +} + /********************************************************************//** Check whether the table is corrupted. -@return nonzero for corrupted table, zero for valid tables */ +@return nonzero for corrupted table, zero for valid tables */ UNIV_INLINE ulint dict_table_is_corrupted( @@ -1543,7 +1748,7 @@ dict_table_is_corrupted( /********************************************************************//** Check whether the index is corrupted. -@return nonzero for corrupted index, zero for valid indexes */ +@return nonzero for corrupted index, zero for valid indexes */ UNIV_INLINE ulint dict_index_is_corrupted( @@ -1559,7 +1764,7 @@ dict_index_is_corrupted( /********************************************************************//** Check if the tablespace for the table has been discarded. -@return true if the tablespace has been discarded. */ +@return true if the tablespace has been discarded. */ UNIV_INLINE bool dict_table_is_discarded( @@ -1571,7 +1776,7 @@ dict_table_is_discarded( /********************************************************************//** Check if it is a temporary table. -@return true if temporary table flag is set. */ +@return true if temporary table flag is set. */ UNIV_INLINE bool dict_table_is_temporary( @@ -1581,6 +1786,88 @@ dict_table_is_temporary( return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY)); } +/** Check whether the table is intrinsic. +An intrinsic table is a special kind of temporary table that +is invisible to the end user. It can be created internally by InnoDB, the MySQL +server layer or other modules connected to InnoDB in order to gather and use +data as part of a larger task. Since access to it must be as fast as possible, +it does not need UNDO semantics, system fields DB_TRX_ID & DB_ROLL_PTR, +doublewrite, checksum, insert buffer, use of the shared data dictionary, +locking, or even a transaction. In short, these are not ACID tables at all, +just temporary data stored and manipulated during a larger process. + +@param[in] table table to check +@return true if intrinsic table flag is set. */ +UNIV_INLINE +bool +dict_table_is_intrinsic( + const dict_table_t* table) +{ + return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_INTRINSIC)); +} + +/** Check whether locking is disabled for this table. +Currently this is done for intrinsic table as their visibility is limited +to the connection only. + +@param[in] table table to check +@return true if locking is disabled. */ +UNIV_INLINE +bool +dict_table_is_locking_disabled( + const dict_table_t* table) +{ + return(dict_table_is_intrinsic(table)); +} + +/********************************************************************//** +Turn-off redo-logging if temporary table. */ +UNIV_INLINE +void +dict_disable_redo_if_temporary( +/*===========================*/ + const dict_table_t* table, /*!< in: table to check */ + mtr_t* mtr) /*!< out: mini-transaction */ +{ + if (dict_table_is_temporary(table)) { + mtr_set_log_mode(mtr, MTR_LOG_NO_REDO); + } +} + +/** Check if the table is found is a file_per_table tablespace. +This test does not use table flags2 since some REDUNDANT tables in the +system tablespace may have garbage in the MIX_LEN field where flags2 is +stored. These garbage MIX_LEN fields were written before v3.23.52. +A patch was added to v3.23.52 which initializes the MIX_LEN field to 0. +Since file-per-table tablespaces were added in 4.1, any SYS_TABLES +record with a non-zero space ID will have a reliable MIX_LEN field. +However, this test does not use flags2 from SYS_TABLES.MIX_LEN. Instead, +assume that if the tablespace is not a predefined system tablespace and it +is not a general shared tablespace, then it must be file-per-table. +Also, during ALTER TABLE, the DICT_TF2_USE_FILE_PER_TABLE flag may not be +set on one of the file-per-table tablespaces. +This test cannot be done on a table in the process of being created +because the space_id will be zero until the tablespace is created. +@param[in] table An existing open table to check +@return true if this table was created as a file-per-table tablespace. */ +UNIV_INLINE +bool +dict_table_is_file_per_table( + const dict_table_t* table) /*!< in: table to check */ +{ + bool is_file_per_table = + !is_system_tablespace(table->space) + && !DICT_TF_HAS_SHARED_SPACE(table->flags); + + /* If the table is file-per-table and it is not redundant, then + it should have the flags2 bit for DICT_TF2_USE_FILE_PER_TABLE. */ + ut_ad(!is_file_per_table + || !DICT_TF_GET_COMPACT(table->flags) + || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_USE_FILE_PER_TABLE)); + + return(is_file_per_table ); +} + /**********************************************************************//** Get index by first field of the index @return index which is having first field matches @@ -1589,8 +1876,8 @@ UNIV_INLINE dict_index_t* dict_table_get_index_on_first_col( /*==============================*/ - const dict_table_t* table, /*!< in: table */ - ulint col_index) /*!< in: position of column + const dict_table_t* table, /*!< in: table */ + ulint col_index) /*!< in: position of column in table */ { ut_ad(col_index < table->n_cols); @@ -1608,4 +1895,112 @@ dict_table_get_index_on_first_col( return(0); } +/** Get table session row-id and increment the row-id counter for next use. +@param[in,out] table table handler +@return next table session row-id. */ +UNIV_INLINE +row_id_t +dict_table_get_next_table_sess_row_id( + dict_table_t* table) +{ + return(++table->sess_row_id); +} + +/** Get table session trx-id and increment the trx-id counter for next use. +@param[in,out] table table handler +@return next table session trx-id. */ +UNIV_INLINE +trx_id_t +dict_table_get_next_table_sess_trx_id( + dict_table_t* table) +{ + return(++table->sess_trx_id); +} + +/** Get current session trx-id. +@param[in] table table handler +@return table session trx-id. */ +UNIV_INLINE +trx_id_t +dict_table_get_curr_table_sess_trx_id( + const dict_table_t* table) +{ + return(table->sess_trx_id); +} + +/** Get reference count. +@return current value of n_ref_count */ +inline +ulint +dict_table_t::get_ref_count() const +{ + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(this)); + return(n_ref_count); +} + +/** Acquire the table handle. */ +inline +void +dict_table_t::acquire() +{ + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(this)); + ++n_ref_count; +} + +/** Release the table handle. */ +inline +void +dict_table_t::release() +{ + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(this)); + ut_ad(n_ref_count > 0); + --n_ref_count; +} + +/** Check if tablespace name is "innodb_general". +@param[in] tablespace_name tablespace name +@retval true if name is "innodb_general" +@retval false if name is not "innodb_general" */ +inline +bool +dict_table_has_temp_general_tablespace_name( + const char* tablespace_name) { + + return(tablespace_name != NULL + && strncmp(tablespace_name, general_space_name, + strlen(general_space_name)) == 0); +} + +/** Encode the number of columns and number of virtual columns in a +4 bytes value. We could do this because the number of columns in +InnoDB is limited to 1017 +@param[in] n_col number of non-virtual column +@param[in] n_v_col number of virtual column +@return encoded value */ +UNIV_INLINE +ulint +dict_table_encode_n_col( + ulint n_col, + ulint n_v_col) +{ + return(n_col + (n_v_col<<16)); +} + +/** decode number of virtual and non-virtual columns in one 4 bytes value. +@param[in] encoded encoded value +@param[in,out] n_col number of non-virtual column +@param[in,out] n_v_col number of virtual column */ +UNIV_INLINE +void +dict_table_decode_n_col( + ulint encoded, + ulint* n_col, + ulint* n_v_col) +{ + + ulint num = encoded & ~DICT_N_COLS_COMPACT; + *n_v_col = num >> 16; + *n_col = num & 0xFFFF; +} + #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h index dcbc3de8e94..8e62022de85 100644 --- a/storage/innobase/include/dict0load.h +++ b/storage/innobase/include/dict0load.h @@ -33,6 +33,12 @@ Created 4/24/1996 Heikki Tuuri #include "ut0byte.h" #include "mem0mem.h" #include "btr0types.h" +#include "ut0new.h" + +#include + +/** A stack of table names related through foreign key constraints */ +typedef std::deque > dict_names_t; /** enum that defines all system table IDs. @see SYSTEM_TABLE_NAME[] */ enum dict_system_id_t { @@ -44,6 +50,7 @@ enum dict_system_id_t { SYS_FOREIGN_COLS, SYS_TABLESPACES, SYS_DATAFILES, + SYS_VIRTUAL, /* This must be last item. Defines the number of system tables. */ SYS_NUM_SYSTEM_TABLES @@ -58,57 +65,37 @@ enum dict_table_info_t { is in the cache, if so, return it */ }; -/** Check type for dict_check_tablespaces_and_store_max_id() */ -enum dict_check_t { - /** No user tablespaces have been opened - (no crash recovery, no transactions recovered). */ - DICT_CHECK_NONE_LOADED = 0, - /** Some user tablespaces may have been opened - (no crash recovery; recovered table locks for transactions). */ - DICT_CHECK_SOME_LOADED, - /** All user tablespaces have been opened (crash recovery). */ - DICT_CHECK_ALL_LOADED -}; +/** Check each tablespace found in the data dictionary. +Look at each table defined in SYS_TABLES that has a space_id > 0. +If the tablespace is not yet in the fil_system cache, look up the +tablespace in SYS_DATAFILES to ensure the correct path. -/********************************************************************//** -In a crash recovery we already have all the tablespace objects created. -This function compares the space id information in the InnoDB data dictionary -to what we already read with fil_load_single_table_tablespaces(). - -In a normal startup, we create the tablespace objects for every table in -InnoDB's data dictionary, if the corresponding .ibd file exists. -We also scan the biggest space id, and store it to fil_system. */ -UNIV_INTERN +In a crash recovery we already have some tablespace objects created from +processing the REDO log. Any other tablespace in SYS_TABLESPACES not +previously used in recovery will be opened here. We will compare the +space_id information in the data dictionary to what we find in the +tablespace file. In addition, more validation will be done if recovery +was needed and force_recovery is not set. + +We also scan the biggest space id, and store it to fil_system. +@param[in] validate true if recovery was needed */ void dict_check_tablespaces_and_store_max_id( -/*====================================*/ - dict_check_t dict_check); /*!< in: how to check */ + bool validate); + /********************************************************************//** Finds the first table name in the given database. @return own: table name, NULL if does not exist; the caller must free the memory in the string! */ -UNIV_INTERN char* dict_get_first_table_name_in_db( /*============================*/ const char* name); /*!< in: database name which ends to '/' */ /********************************************************************//** -Loads a table definition from a SYS_TABLES record to dict_table_t. -Does not load any columns or indexes. -@return error message, or NULL on success */ -UNIV_INTERN -const char* -dict_load_table_low( -/*================*/ - const char* name, /*!< in: table name */ - const rec_t* rec, /*!< in: SYS_TABLES record */ - dict_table_t** table); /*!< out,own: table, or NULL */ -/********************************************************************//** Loads a table column definition from a SYS_COLUMNS record to dict_table_t. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_load_column_low( /*=================*/ @@ -122,14 +109,36 @@ dict_load_column_low( or NULL if table != NULL */ table_id_t* table_id, /*!< out: table id */ const char** col_name, /*!< out: column name */ - const rec_t* rec); /*!< in: SYS_COLUMNS record */ + const rec_t* rec, /*!< in: SYS_COLUMNS record */ + ulint* nth_v_col); /*!< out: if not NULL, this + records the "n" of "nth" virtual + column */ + +/** Loads a virtual column "mapping" (to base columns) information +from a SYS_VIRTUAL record +@param[in,out] table table +@param[in,out] heap memory heap +@param[in,out] column mapped base column's dict_column_t +@param[in,out] table_id table id +@param[in,out] pos virtual column position +@param[in,out] base_pos base column position +@param[in] rec SYS_VIRTUAL record +@return error message, or NULL on success */ +const char* +dict_load_virtual_low( + dict_table_t* table, + mem_heap_t* heap, + dict_col_t** column, + table_id_t* table_id, + ulint* pos, + ulint* base_pos, + const rec_t* rec); /********************************************************************//** Loads an index definition from a SYS_INDEXES record to dict_index_t. If allocate=TRUE, we will create a dict_index_t structure and fill it accordingly. If allocated=FALSE, the dict_index_t will be supplied by the caller and filled with information read from the record. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_load_index_low( /*================*/ @@ -147,7 +156,6 @@ dict_load_index_low( Loads an index field definition from a SYS_FIELDS record to dict_index_t. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_load_field_low( /*================*/ @@ -170,44 +178,50 @@ Using the table->heap, copy the null-terminated filepath into table->data_dir_path and put a null byte before the extension. This allows SHOW CREATE TABLE to return the correct DATA DIRECTORY path. Make this data directory path only if it has not yet been saved. */ -UNIV_INTERN void dict_save_data_dir_path( /*====================*/ dict_table_t* table, /*!< in/out: table */ char* filepath); /*!< in: filepath of tablespace */ -/*****************************************************************//** -Make sure the data_file_name is saved in dict_table_t if needed. Try to -read it from the file dictionary first, then from SYS_DATAFILES. */ -UNIV_INTERN + +/** Make sure the data_file_name is saved in dict_table_t if needed. +Try to read it from the fil_system first, then from SYS_DATAFILES. +@param[in] table Table object +@param[in] dict_mutex_own true if dict_sys->mutex is owned already */ void dict_get_and_save_data_dir_path( -/*============================*/ - dict_table_t* table, /*!< in/out: table */ - bool dict_mutex_own); /*!< in: true if dict_sys->mutex - is owned already */ -/********************************************************************//** -Loads a table definition and also all its index definitions, and also + dict_table_t* table, + bool dict_mutex_own); + +/** Make sure the tablespace name is saved in dict_table_t if needed. +Try to read it from the file dictionary first, then from SYS_TABLESPACES. +@param[in] table Table object +@param[in] dict_mutex_own) true if dict_sys->mutex is owned already */ +void +dict_get_and_save_space_name( + dict_table_t* table, + bool dict_mutex_own); + +/** Loads a table definition and also all its index definitions, and also the cluster definition if the table is a member in a cluster. Also loads all foreign key constraints where the foreign key is in the table or where a foreign key references columns in this table. +@param[in] name Table name in the dbname/tablename format +@param[in] cached true=add to cache, false=do not +@param[in] ignore_err Error to be ignored when loading + table and its index definition @return table, NULL if does not exist; if the table is stored in an -.ibd file, but the file does not exist, then we set the -ibd_file_missing flag TRUE in the table object we return */ -UNIV_INTERN +.ibd file, but the file does not exist, then we set the ibd_file_missing +flag in the table object we return. */ dict_table_t* dict_load_table( -/*============*/ - const char* name, /*!< in: table name in the - databasename/tablename format */ - ibool cached, /*!< in: TRUE=add to cache, FALSE=do not */ + const char* name, + bool cached, dict_err_ignore_t ignore_err); - /*!< in: error to be ignored when loading - table and its indexes' definition */ + /***********************************************************************//** Loads a table object based on the table id. -@return table; NULL if table does not exist */ -UNIV_INTERN +@return table; NULL if table does not exist */ dict_table_t* dict_load_table_on_id( /*==================*/ @@ -218,7 +232,6 @@ dict_load_table_on_id( This function is called when the database is booted. Loads system table index definitions except for the clustered index which is added to the dictionary cache at booting before calling this function. */ -UNIV_INTERN void dict_load_sys_table( /*================*/ @@ -226,11 +239,13 @@ dict_load_sys_table( /***********************************************************************//** Loads foreign key constraints where the table is either the foreign key holder or where the table is referenced by a foreign key. Adds these -constraints to the data dictionary. Note that we know that the dictionary -cache already contains all constraints where the other relevant table is -already in the dictionary cache. -@return DB_SUCCESS or error code */ -UNIV_INTERN +constraints to the data dictionary. + +The foreign key constraint is loaded only if the referenced table is also +in the dictionary cache. If the referenced table is not in dictionary +cache, then it is added to the output parameter (fk_tables). + +@return DB_SUCCESS or error code */ dberr_t dict_load_foreigns( /*===============*/ @@ -242,20 +257,16 @@ dict_load_foreigns( chained by FK */ bool check_charsets, /*!< in: whether to check charset compatibility */ - dict_err_ignore_t ignore_err) /*!< in: error to be ignored */ - MY_ATTRIBUTE((nonnull(1), warn_unused_result)); -/********************************************************************//** -Prints to the standard output information on all tables found in the data -dictionary system table. */ -UNIV_INTERN -void -dict_print(void); -/*============*/ + dict_err_ignore_t ignore_err, /*!< in: error to be ignored */ + dict_names_t& fk_tables) /*!< out: stack of table names + which must be loaded + subsequently to load all the + foreign key constraints. */ + __attribute__((nonnull(1), warn_unused_result)); /********************************************************************//** This function opens a system table, and return the first record. -@return first record of the system table */ -UNIV_INTERN +@return first record of the system table */ const rec_t* dict_startscan_system( /*==================*/ @@ -265,8 +276,7 @@ dict_startscan_system( dict_system_id_t system_id); /*!< in: which system table to open */ /********************************************************************//** This function get the next system table record as we scan the table. -@return the record if found, NULL if end of scan. */ -UNIV_INTERN +@return the record if found, NULL if end of scan. */ const rec_t* dict_getnext_system( /*================*/ @@ -278,7 +288,6 @@ This function processes one SYS_TABLES record and populate the dict_table_t struct for the table. Extracted out of dict_print() to be used by both monitor table output and information schema innodb_sys_tables output. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_tables_rec_and_mtr_commit( /*=======================================*/ @@ -296,7 +305,6 @@ This function parses a SYS_INDEXES record and populate a dict_index_t structure with the information from the record. For detail information about SYS_INDEXES fields, please refer to dict_boot() function. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_indexes_rec( /*=========================*/ @@ -309,7 +317,6 @@ dict_process_sys_indexes_rec( This function parses a SYS_COLUMNS record and populate a dict_column_t structure with the information from the record. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_columns_rec( /*=========================*/ @@ -317,12 +324,29 @@ dict_process_sys_columns_rec( const rec_t* rec, /*!< in: current SYS_COLUMNS rec */ dict_col_t* column, /*!< out: dict_col_t to be filled */ table_id_t* table_id, /*!< out: table id */ - const char** col_name); /*!< out: column name */ + const char** col_name, /*!< out: column name */ + ulint* nth_v_col); /*!< out: if virtual col, this is + records its sequence number */ + +/** This function parses a SYS_VIRTUAL record and extract virtual column +information +@param[in,out] heap heap memory +@param[in] rec current SYS_COLUMNS rec +@param[in,out] table_id table id +@param[in,out] pos virtual column position +@param[in,out] base_pos base column position +@return error message, or NULL on success */ +const char* +dict_process_sys_virtual_rec( + mem_heap_t* heap, + const rec_t* rec, + table_id_t* table_id, + ulint* pos, + ulint* base_pos); /********************************************************************//** This function parses a SYS_FIELDS record and populate a dict_field_t structure with the information from the record. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_fields_rec( /*========================*/ @@ -338,7 +362,6 @@ This function parses a SYS_FOREIGN record and populate a dict_foreign_t structure with the information from the record. For detail information about SYS_FOREIGN fields, please refer to dict_load_foreign() function @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_foreign_rec( /*=========================*/ @@ -350,7 +373,6 @@ dict_process_sys_foreign_rec( This function parses a SYS_FOREIGN_COLS record and extract necessary information from the record and return to caller. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_foreign_col_rec( /*=============================*/ @@ -365,7 +387,6 @@ dict_process_sys_foreign_col_rec( This function parses a SYS_TABLESPACES record, extracts necessary information from the record and returns to caller. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_tablespaces( /*=========================*/ @@ -378,7 +399,6 @@ dict_process_sys_tablespaces( This function parses a SYS_DATAFILES record, extracts necessary information from the record and returns to caller. @return error message, or NULL on success */ -UNIV_INTERN const char* dict_process_sys_datafiles( /*=======================*/ @@ -386,40 +406,29 @@ dict_process_sys_datafiles( const rec_t* rec, /*!< in: current SYS_DATAFILES rec */ ulint* space, /*!< out: pace id */ const char** path); /*!< out: datafile path */ -/********************************************************************//** -Get the filepath for a spaceid from SYS_DATAFILES. This function provides -a temporary heap which is used for the table lookup, but not for the path. -The caller must free the memory for the path returned. This function can -return NULL if the space ID is not found in SYS_DATAFILES, then the caller -will assume that the ibd file is in the normal datadir. -@return own: A copy of the first datafile found in SYS_DATAFILES.PATH for -the given space ID. NULL if space ID is zero or not found. */ -UNIV_INTERN -char* -dict_get_first_path( -/*================*/ - ulint space, /*!< in: space id */ - const char* name); /*!< in: tablespace name */ -/********************************************************************//** -Update the record for space_id in SYS_TABLESPACES to this filepath. -@return DB_SUCCESS if OK, dberr_t if the insert failed */ -UNIV_INTERN + +/** Update the record for space_id in SYS_TABLESPACES to this filepath. +@param[in] space_id Tablespace ID +@param[in] filepath Tablespace filepath +@return DB_SUCCESS if OK, dberr_t if the insert failed */ dberr_t dict_update_filepath( -/*=================*/ - ulint space_id, /*!< in: space id */ - const char* filepath); /*!< in: filepath */ -/********************************************************************//** -Insert records into SYS_TABLESPACES and SYS_DATAFILES. -@return DB_SUCCESS if OK, dberr_t if the insert failed */ -UNIV_INTERN + ulint space_id, + const char* filepath); + +/** Replace records in SYS_TABLESPACES and SYS_DATAFILES associated with +the given space_id using an independent transaction. +@param[in] space_id Tablespace ID +@param[in] name Tablespace name +@param[in] filepath First filepath +@param[in] fsp_flags Tablespace flags +@return DB_SUCCESS if OK, dberr_t if the insert failed */ dberr_t -dict_insert_tablespace_and_filepath( -/*================================*/ - ulint space, /*!< in: space id */ - const char* name, /*!< in: talespace name */ - const char* filepath, /*!< in: filepath */ - ulint fsp_flags); /*!< in: tablespace flags */ +dict_replace_tablespace_and_filepath( + ulint space_id, + const char* name, + const char* filepath, + ulint fsp_flags); #ifndef UNIV_NONINL #include "dict0load.ic" diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index f964447fb8f..98ce7d3bd33 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -41,12 +41,13 @@ Created 1/8/1996 Heikki Tuuri # include "sync0rw.h" #endif /* !UNIV_HOTBACKUP */ #include "ut0mem.h" -#include "ut0lst.h" #include "ut0rnd.h" #include "ut0byte.h" #include "hash0hash.h" #include "trx0types.h" #include "fts0fts.h" +#include "buf0buf.h" +#include "gis0type.h" #include "os0once.h" #include "fil0fil.h" #include @@ -62,17 +63,20 @@ struct ib_rbt_t; /** Type flags of an index: OR'ing of the flags is allowed to define a combination of types */ /* @{ */ -#define DICT_CLUSTERED 1 /*!< clustered index */ +#define DICT_CLUSTERED 1 /*!< clustered index; for other than + auto-generated clustered indexes, + also DICT_UNIQUE will be set */ #define DICT_UNIQUE 2 /*!< unique index */ -#define DICT_UNIVERSAL 4 /*!< index which can contain records from any - other index */ #define DICT_IBUF 8 /*!< insert buffer tree */ #define DICT_CORRUPT 16 /*!< bit to store the corrupted flag in SYS_INDEXES.TYPE */ #define DICT_FTS 32 /* FTS index; can't be combined with the other flags */ +#define DICT_SPATIAL 64 /* SPATIAL index; can't be combined with the + other flags */ +#define DICT_VIRTUAL 128 /* Index on Virtual column */ -#define DICT_IT_BITS 6 /*!< number of bits used for +#define DICT_IT_BITS 8 /*!< number of bits used for SYS_INDEXES.TYPE */ /* @} */ @@ -115,20 +119,31 @@ the Compact page format is used, i.e ROW_FORMAT != REDUNDANT */ /** Width of the COMPACT flag */ #define DICT_TF_WIDTH_COMPACT 1 + /** Width of the ZIP_SSIZE flag */ #define DICT_TF_WIDTH_ZIP_SSIZE 4 + /** Width of the ATOMIC_BLOBS flag. The Antelope file formats broke up BLOB and TEXT fields, storing the first 768 bytes in the clustered index. -Brracuda row formats store the whole blob or text field off-page atomically. +Barracuda row formats store the whole blob or text field off-page atomically. Secondary indexes are created from this external data using row_ext_t to cache the BLOB prefixes. */ #define DICT_TF_WIDTH_ATOMIC_BLOBS 1 + /** If a table is created with the MYSQL option DATA DIRECTORY and innodb-file-per-table, an older engine will not be able to find that table. This flag prevents older engines from attempting to open the table and allows InnoDB to update_create_info() accordingly. */ #define DICT_TF_WIDTH_DATA_DIR 1 +/** Width of the SHARED tablespace flag. +It is used to identify tables that exist inside a shared general tablespace. +If a table is created with the TABLESPACE=tsname option, an older engine will +not be able to find that table. This flag prevents older engines from attempting +to open the table and allows InnoDB to quickly find the tablespace. */ + +#define DICT_TF_WIDTH_SHARED_SPACE 1 + /** Width of the page compression flag */ @@ -148,14 +163,15 @@ DEFAULT=0, ON = 1, OFF = 2 #define DICT_TF_WIDTH_ATOMIC_WRITES 2 /** Width of all the currently known table flags */ -#define DICT_TF_BITS (DICT_TF_WIDTH_COMPACT \ - + DICT_TF_WIDTH_ZIP_SSIZE \ - + DICT_TF_WIDTH_ATOMIC_BLOBS \ - + DICT_TF_WIDTH_DATA_DIR \ - + DICT_TF_WIDTH_PAGE_COMPRESSION \ - + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL \ - + DICT_TF_WIDTH_ATOMIC_WRITES \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION \ +#define DICT_TF_BITS (DICT_TF_WIDTH_COMPACT \ + + DICT_TF_WIDTH_ZIP_SSIZE \ + + DICT_TF_WIDTH_ATOMIC_BLOBS \ + + DICT_TF_WIDTH_DATA_DIR \ + + DICT_TF_WIDTH_SHARED_SPACE \ + + DICT_TF_WIDTH_PAGE_COMPRESSION \ + + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL \ + + DICT_TF_WIDTH_ATOMIC_WRITES \ + + DICT_TF_WIDTH_PAGE_ENCRYPTION \ + DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY) /** A mask of all the known/used bits in table flags */ @@ -172,9 +188,12 @@ DEFAULT=0, ON = 1, OFF = 2 /** Zero relative shift position of the DATA_DIR field */ #define DICT_TF_POS_DATA_DIR (DICT_TF_POS_ATOMIC_BLOBS \ + DICT_TF_WIDTH_ATOMIC_BLOBS) +/** Zero relative shift position of the SHARED TABLESPACE field */ +#define DICT_TF_POS_SHARED_SPACE (DICT_TF_POS_DATA_DIR \ + + DICT_TF_WIDTH_DATA_DIR) /** Zero relative shift position of the PAGE_COMPRESSION field */ -#define DICT_TF_POS_PAGE_COMPRESSION (DICT_TF_POS_DATA_DIR \ - + DICT_TF_WIDTH_DATA_DIR) +#define DICT_TF_POS_PAGE_COMPRESSION (DICT_TF_POS_SHARED_SPACE \ + + DICT_TF_WIDTH_SHARED_SPACE) /** Zero relative shift position of the PAGE_COMPRESSION_LEVEL field */ #define DICT_TF_POS_PAGE_COMPRESSION_LEVEL (DICT_TF_POS_PAGE_COMPRESSION \ + DICT_TF_WIDTH_PAGE_COMPRESSION) @@ -183,12 +202,12 @@ DEFAULT=0, ON = 1, OFF = 2 + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL) /** Zero relative shift position of the PAGE_ENCRYPTION field */ #define DICT_TF_POS_PAGE_ENCRYPTION (DICT_TF_POS_ATOMIC_WRITES \ - + DICT_TF_WIDTH_ATOMIC_WRITES) + + DICT_TF_WIDTH_ATOMIC_WRITES) /** Zero relative shift position of the PAGE_ENCRYPTION_KEY field */ #define DICT_TF_POS_PAGE_ENCRYPTION_KEY (DICT_TF_POS_PAGE_ENCRYPTION \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION) + + DICT_TF_WIDTH_PAGE_ENCRYPTION) #define DICT_TF_POS_UNUSED (DICT_TF_POS_PAGE_ENCRYPTION_KEY \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY) + + DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY) /** Bit mask of the COMPACT field */ #define DICT_TF_MASK_COMPACT \ @@ -206,6 +225,10 @@ DEFAULT=0, ON = 1, OFF = 2 #define DICT_TF_MASK_DATA_DIR \ ((~(~0U << DICT_TF_WIDTH_DATA_DIR)) \ << DICT_TF_POS_DATA_DIR) +/** Bit mask of the SHARED_SPACE field */ +#define DICT_TF_MASK_SHARED_SPACE \ + ((~(~0U << DICT_TF_WIDTH_SHARED_SPACE)) \ + << DICT_TF_POS_SHARED_SPACE) /** Bit mask of the PAGE_COMPRESSION field */ #define DICT_TF_MASK_PAGE_COMPRESSION \ ((~(~0 << DICT_TF_WIDTH_PAGE_COMPRESSION)) \ @@ -239,10 +262,14 @@ DEFAULT=0, ON = 1, OFF = 2 #define DICT_TF_HAS_ATOMIC_BLOBS(flags) \ ((flags & DICT_TF_MASK_ATOMIC_BLOBS) \ >> DICT_TF_POS_ATOMIC_BLOBS) -/** Return the value of the ATOMIC_BLOBS field */ +/** Return the value of the DATA_DIR field */ #define DICT_TF_HAS_DATA_DIR(flags) \ ((flags & DICT_TF_MASK_DATA_DIR) \ >> DICT_TF_POS_DATA_DIR) +/** Return the value of the SHARED_SPACE field */ +#define DICT_TF_HAS_SHARED_SPACE(flags) \ + ((flags & DICT_TF_MASK_SHARED_SPACE) \ + >> DICT_TF_POS_SHARED_SPACE) /** Return the value of the PAGE_COMPRESSION field */ #define DICT_TF_GET_PAGE_COMPRESSION(flags) \ ((flags & DICT_TF_MASK_PAGE_COMPRESSION) \ @@ -278,21 +305,26 @@ ROW_FORMAT=REDUNDANT. InnoDB engines do not check these flags for unknown bits in order to protect backward incompatibility. */ /* @{ */ /** Total number of bits in table->flags2. */ -#define DICT_TF2_BITS 7 -#define DICT_TF2_BIT_MASK ~(~0U << DICT_TF2_BITS) +#define DICT_TF2_BITS 8 +#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS) +#define DICT_TF2_BIT_MASK ~DICT_TF2_UNUSED_BIT_MASK /** TEMPORARY; TRUE for tables from CREATE TEMPORARY TABLE. */ #define DICT_TF2_TEMPORARY 1 + /** The table has an internal defined DOC ID column */ #define DICT_TF2_FTS_HAS_DOC_ID 2 + /** The table has an FTS index */ #define DICT_TF2_FTS 4 + /** Need to add Doc ID column for FTS index build. This is a transient bit for index build */ #define DICT_TF2_FTS_ADD_DOC_ID 8 + /** This bit is used during table creation to indicate that it will use its own tablespace instead of the system tablespace. */ -#define DICT_TF2_USE_TABLESPACE 16 +#define DICT_TF2_USE_FILE_PER_TABLE 16 /** Set when we discard/detach the tablespace */ #define DICT_TF2_DISCARDED 32 @@ -300,15 +332,22 @@ use its own tablespace instead of the system tablespace. */ /** This bit is set if all aux table names (both common tables and index tables) of a FTS table are in HEX format. */ #define DICT_TF2_FTS_AUX_HEX_NAME 64 + +/** Intrinsic table bit +Intrinsic table is table created internally by MySQL modules viz. Optimizer, +FTS, etc.... Intrinsic table has all the properties of the normal table except +it is not created by user and so not visible to end-user. */ +#define DICT_TF2_INTRINSIC 128 + /* @} */ -#define DICT_TF2_FLAG_SET(table, flag) \ +#define DICT_TF2_FLAG_SET(table, flag) \ (table->flags2 |= (flag)) -#define DICT_TF2_FLAG_IS_SET(table, flag) \ +#define DICT_TF2_FLAG_IS_SET(table, flag) \ (table->flags2 & (flag)) -#define DICT_TF2_FLAG_UNSET(table, flag) \ +#define DICT_TF2_FLAG_UNSET(table, flag) \ (table->flags2 &= ~(flag)) /** Tables could be chained together with Foreign key constraint. When @@ -329,15 +368,17 @@ before proceeds. */ /**********************************************************************//** Creates a table memory object. -@return own: table object */ -UNIV_INTERN +@return own: table object */ dict_table_t* dict_mem_table_create( /*==================*/ const char* name, /*!< in: table name */ ulint space, /*!< in: space where the clustered index of the table is placed */ - ulint n_cols, /*!< in: number of columns */ + ulint n_cols, /*!< in: total number of columns + including virtual and non-virtual + columns */ + ulint n_v_cols, /*!< in: number of virtual columns */ ulint flags, /*!< in: table flags */ ulint flags2); /*!< in: table flags2 */ /**********************************************************************//** @@ -350,14 +391,12 @@ dict_mem_table_is_system( char *name); /*!< in: table name */ /****************************************************************//** Free a table memory object. */ -UNIV_INTERN void dict_mem_table_free( /*================*/ dict_table_t* table); /*!< in: table */ /**********************************************************************//** Adds a column definition to a table. */ -UNIV_INTERN void dict_mem_table_add_col( /*===================*/ @@ -368,21 +407,44 @@ dict_mem_table_add_col( ulint prtype, /*!< in: precise type */ ulint len) /*!< in: precision */ MY_ATTRIBUTE((nonnull(1))); +/** Adds a virtual column definition to a table. +@param[in,out] table table +@param[in] heap temporary memory heap, or NULL. It is + used to store name when we have not finished + adding all columns. When all columns are + added, the whole name will copy to memory from + table->heap +@param[in] name column name +@param[in] mtype main datatype +@param[in] prtype precise type +@param[in] len length +@param[in] pos position in a table +@param[in] num_base number of base columns +@return the virtual column definition */ +dict_v_col_t* +dict_mem_table_add_v_col( + dict_table_t* table, + mem_heap_t* heap, + const char* name, + ulint mtype, + ulint prtype, + ulint len, + ulint pos, + ulint num_base); /**********************************************************************//** Renames a column of a table in the data dictionary cache. */ -UNIV_INTERN void dict_mem_table_col_rename( /*======================*/ dict_table_t* table, /*!< in/out: table */ unsigned nth_col,/*!< in: column index */ const char* from, /*!< in: old column name */ - const char* to) /*!< in: new column name */ - MY_ATTRIBUTE((nonnull)); + const char* to, /*!< in: new column name */ + bool is_virtual); + /*!< in: if this is a virtual column */ /**********************************************************************//** This function populates a dict_col_t memory structure with supplied information. */ -UNIV_INTERN void dict_mem_fill_column_struct( /*========================*/ @@ -411,8 +473,7 @@ dict_mem_fill_index_struct( ulint n_fields); /*!< in: number of fields */ /**********************************************************************//** Creates an index memory object. -@return own: index object */ -UNIV_INTERN +@return own: index object */ dict_index_t* dict_mem_index_create( /*==================*/ @@ -428,7 +489,6 @@ dict_mem_index_create( Adds a field definition to an index. NOTE: does not take a copy of the column name if the field is a column. The memory occupied by the column name may be released only after publishing the index. */ -UNIV_INTERN void dict_mem_index_add_field( /*=====================*/ @@ -439,15 +499,13 @@ dict_mem_index_add_field( INDEX (textcol(25)) */ /**********************************************************************//** Frees an index memory object. */ -UNIV_INTERN void dict_mem_index_free( /*================*/ dict_index_t* index); /*!< in: index */ /**********************************************************************//** Creates and initializes a foreign constraint memory object. -@return own: foreign constraint struct */ -UNIV_INTERN +@return own: foreign constraint struct */ dict_foreign_t* dict_mem_foreign_create(void); /*=========================*/ @@ -457,7 +515,6 @@ Sets the foreign_table_name_lookup pointer based on the value of lower_case_table_names. If that is 0 or 1, foreign_table_name_lookup will point to foreign_table_name. If 2, then another string is allocated from the heap and set to lower case. */ -UNIV_INTERN void dict_mem_foreign_table_name_lookup_set( /*===================================*/ @@ -469,7 +526,6 @@ Sets the referenced_table_name_lookup pointer based on the value of lower_case_table_names. If that is 0 or 1, referenced_table_name_lookup will point to referenced_table_name. If 2, then another string is allocated from the heap and set to lower case. */ -UNIV_INTERN void dict_mem_referenced_table_name_lookup_set( /*======================================*/ @@ -488,7 +544,6 @@ reasonably unique temporary file name. @param[in] dbtab Table name in the form database/table name @param[in] id Table id @return A unique temporary tablename suitable for InnoDB use */ -UNIV_INTERN char* dict_mem_create_temporary_tablename( mem_heap_t* heap, @@ -496,10 +551,59 @@ dict_mem_create_temporary_tablename( table_id_t id); /** Initialize dict memory variables */ - void dict_mem_init(void); +/** SQL identifier name wrapper for pretty-printing */ +class id_name_t +{ +public: + /** Default constructor */ + id_name_t() + : m_name() + {} + /** Constructor + @param[in] name identifier to assign */ + explicit id_name_t( + const char* name) + : m_name(name) + {} + + /** Assignment operator + @param[in] name identifier to assign */ + id_name_t& operator=( + const char* name) + { + m_name = name; + return(*this); + } + + /** Implicit type conversion + @return the name */ + operator const char*() const + { + return(m_name); + } + + /** Explicit type conversion + @return the name */ + const char* operator()() const + { + return(m_name); + } + +private: + /** The name in internal representation */ + const char* m_name; +}; + +/** Table name wrapper for pretty-printing */ +struct table_name_t +{ + /** The name in internal representation */ + char* m_name; +}; + /** Data structure for a column in a table */ struct dict_col_t{ /*----------------------*/ @@ -545,6 +649,53 @@ struct dict_col_t{ 3072 for Barracuda table */ }; +/** Index information put in a list of virtual column structure. Index +id and virtual column position in the index will be logged. +There can be multiple entries for a given index, with a different position. */ +struct dict_v_idx_t { + /** active index on the column */ + dict_index_t* index; + + /** position in this index */ + ulint nth_field; +}; + +/** Index list to put in dict_v_col_t */ +typedef std::list > dict_v_idx_list; + +/** Data structure for a virtual column in a table */ +struct dict_v_col_t{ + /** column structure */ + dict_col_t m_col; + + /** array of base column ptr */ + dict_col_t** base_col; + + /** number of base column */ + ulint num_base; + + /** column pos in table */ + ulint v_pos; + + /** Virtual index list, and column position in the index, + the allocated memory is not from table->heap, nor it is + tracked by dict_sys->size */ + dict_v_idx_list* v_indexes; + +}; + +/** Data structure for newly added virtual column in a table */ +struct dict_add_v_col_t{ + /** number of new virtual column */ + ulint n_v_col; + + /** column structures */ + const dict_v_col_t* v_col; + + /** new col names */ + const char** v_col_name; +}; + /** @brief DICT_ANTELOPE_MAX_INDEX_COL_LEN is measured in bytes and is the maximum indexed column length (or indexed prefix length) in ROW_FORMAT=REDUNDANT and ROW_FORMAT=COMPACT. Also, in any format, @@ -582,7 +733,7 @@ be REC_VERSION_56_MAX_INDEX_COL_LEN (3072) bytes */ /** Data structure for a field in an index */ struct dict_field_t{ dict_col_t* col; /*!< pointer to the table column */ - const char* name; /*!< name of the column */ + id_name_t name; /*!< name of the column */ unsigned prefix_len:12; /*!< 0 or the length of the column prefix in bytes in a MySQL index of type, e.g., INDEX (textcol(25)); @@ -634,12 +785,11 @@ extern ulong zip_failure_threshold_pct; compression failures */ extern ulong zip_pad_max; -/** Data structure to hold information about how much space in +/** Data structure to hold information about about how much space in an uncompressed page should be left as padding to avoid compression failures. This estimate is based on a self-adapting heuristic. */ struct zip_pad_info_t { - os_fast_mutex_t* - mutex; /*!< mutex protecting the info */ + SysMutex* mutex; /*!< mutex protecting the info */ ulint pad; /*!< number of bytes used as pad */ ulint success;/*!< successful compression ops during current round */ @@ -656,22 +806,124 @@ struct zip_pad_info_t { a certain index.*/ #define STAT_DEFRAG_DATA_SIZE_N_SAMPLE 10 +/** If key is fixed length key then cache the record offsets on first +computation. This will help save computation cycle that generate same +redundant data. */ +class rec_cache_t +{ +public: + /** Constructor */ + rec_cache_t() + : + rec_size(), + offsets(), + sz_of_offsets(), + fixed_len_key(), + offsets_cached(), + key_has_null_cols() + { + /* Do Nothing. */ + } + +public: + /** Record size. (for fixed length key record size is constant) */ + ulint rec_size; + + /** Holds reference to cached offsets for record. */ + ulint* offsets; + + /** Size of offset array */ + uint32_t sz_of_offsets; + + /** If true, then key is fixed length key. */ + bool fixed_len_key; + + /** If true, then offset has been cached for re-use. */ + bool offsets_cached; + + /** If true, then key part can have columns that can take + NULL values. */ + bool key_has_null_cols; +}; + +/** Cache position of last inserted or selected record by caching record +and holding reference to the block where record resides. +Note: We don't commit mtr and hold it beyond a transaction lifetime as this is +a special case (intrinsic table) that are not shared accross connection. */ +class last_ops_cur_t +{ +public: + /** Constructor */ + last_ops_cur_t() + : + rec(), + block(), + mtr(), + disable_caching(), + invalid() + { + /* Do Nothing. */ + } + + /* Commit mtr and re-initialize cache record and block to NULL. */ + void release() + { + if (mtr.is_active()) { + mtr_commit(&mtr); + } + rec = NULL; + block = NULL; + invalid = false; + } + +public: + /** last inserted/selected record. */ + rec_t* rec; + + /** block where record reside. */ + buf_block_t* block; + + /** active mtr that will be re-used for next insert/select. */ + mtr_t mtr; + + /** disable caching. (disabled when table involves blob/text.) */ + bool disable_caching; + + /** If index structure is undergoing structural change viz. + split then invalidate the cached position as it would be no more + remain valid. Will be re-cached on post-split insert. */ + bool invalid; +}; + +/** "GEN_CLUST_INDEX" is the name reserved for InnoDB default +system clustered index when there is no primary key. */ +const char innobase_index_reserve_name[] = "GEN_CLUST_INDEX"; + +/* Estimated number of offsets in records (based on columns) +to start with. */ +#define OFFS_IN_REC_NORMAL_SIZE 100 + /** Data structure for an index. Most fields will be initialized to 0, NULL or FALSE in dict_mem_index_create(). */ struct dict_index_t{ index_id_t id; /*!< id of the index */ mem_heap_t* heap; /*!< memory heap */ - const char* name; /*!< index name */ + id_name_t name; /*!< index name */ const char* table_name;/*!< table name */ dict_table_t* table; /*!< back pointer to table */ #ifndef UNIV_HOTBACKUP unsigned space:32; /*!< space where the index tree is placed */ unsigned page:32;/*!< index tree root page number */ + unsigned merge_threshold:6; + /*!< In the pessimistic delete, if the page + data size drops below this limit in percent, + merging it to a neighbor is tried */ +# define DICT_INDEX_MERGE_THRESHOLD_DEFAULT 50 #endif /* !UNIV_HOTBACKUP */ unsigned type:DICT_IT_BITS; /*!< index type (DICT_CLUSTERED, DICT_UNIQUE, - DICT_UNIVERSAL, DICT_IBUF, DICT_CORRUPT) */ + DICT_IBUF, DICT_CORRUPT) */ #define MAX_KEY_LENGTH_BITS 12 unsigned trx_id_offset:MAX_KEY_LENGTH_BITS; /*!< position of the trx id column @@ -685,6 +937,18 @@ struct dict_index_t{ /*!< number of columns the user defined to be in the index: in the internal representation we add more columns */ + unsigned allow_duplicates:1; + /*!< if true, allow duplicate values + even if index is created with unique + constraint */ + unsigned nulls_equal:1; + /*!< if true, SQL NULL == SQL NULL */ + unsigned disable_ahi:1; + /*!< in true, then disable AHI. + Currently limited to intrinsic + temporary table as index id is not + unqiue for such table which is one of the + validation criterion for ahi. */ unsigned n_uniq:10;/*!< number of fields from the beginning which are enough to determine an index entry uniquely */ @@ -703,7 +967,21 @@ struct dict_index_t{ by dict_operation_lock and dict_sys->mutex. Other changes are protected by index->lock. */ + unsigned uncommitted:1; + /*!< a flag that is set for secondary indexes + that have not been committed to the + data dictionary yet */ + +#ifdef UNIV_DEBUG + uint32_t magic_n;/*!< magic number */ +/** Value of dict_index_t::magic_n */ +# define DICT_INDEX_MAGIC_N 76789786 +#endif dict_field_t* fields; /*!< array of field descriptions */ + st_mysql_ftparser* + parser; /*!< fulltext parser plugin */ + bool is_ngram; + /*!< true if it's ngram parser */ #ifndef UNIV_HOTBACKUP UT_LIST_NODE_T(dict_index_t) indexes;/*!< list of indexes of the table */ @@ -764,26 +1042,48 @@ struct dict_index_t{ /* in which slot the next sample should be saved. */ /* @} */ - rw_lock_t lock; /*!< read-write lock protecting the - upper levels of the index tree */ + last_ops_cur_t* last_ins_cur; + /*!< cache the last insert position. + Currently limited to auto-generated + clustered index on intrinsic table only. */ + last_ops_cur_t* last_sel_cur; + /*!< cache the last selected position + Currently limited to intrinsic table only. */ + rec_cache_t rec_cache; + /*!< cache the field that needs to be + re-computed on each insert. + Limited to intrinsic table as this is common + share and can't be used without protection + if table is accessible to multiple-threads. */ + rtr_ssn_t rtr_ssn;/*!< Node sequence number for RTree */ + rtr_info_track_t* + rtr_track;/*!< tracking all R-Tree search cursors */ trx_id_t trx_id; /*!< id of the transaction that created this index, or 0 if the index existed when InnoDB was started up */ zip_pad_info_t zip_pad;/*!< Information about state of compression failures and successes */ + rw_lock_t lock; /*!< read-write lock protecting the + upper levels of the index tree */ + + /** Determine if the index has been committed to the + data dictionary. + @return whether the index definition has been committed */ + bool is_committed() const + { + ut_ad(!uncommitted || !(type & DICT_CLUSTERED)); + return(UNIV_LIKELY(!uncommitted)); + } + + /** Flag an index committed or uncommitted. + @param[in] committed whether the index is committed */ + void set_committed(bool committed) + { + ut_ad(!to_be_dropped); + ut_ad(committed || !(type & DICT_CLUSTERED)); + uncommitted = !committed; + } #endif /* !UNIV_HOTBACKUP */ -#ifdef UNIV_BLOB_DEBUG - ib_mutex_t blobs_mutex; - /*!< mutex protecting blobs */ - ib_rbt_t* blobs; /*!< map of (page_no,heap_no,field_no) - to first_blob_page_no; protected by - blobs_mutex; @see btr_blob_dbg_t */ -#endif /* UNIV_BLOB_DEBUG */ -#ifdef UNIV_DEBUG - ulint magic_n;/*!< magic number */ -/** Value of dict_index_t::magic_n */ -# define DICT_INDEX_MAGIC_N 76789786 -#endif }; /** The status of online index creation */ @@ -926,7 +1226,10 @@ struct dict_foreign_matches_id { const char* m_id; }; -typedef std::set dict_foreign_set; +typedef std::set< + dict_foreign_t*, + dict_foreign_compare, + ut_allocator > dict_foreign_set; std::ostream& operator<< (std::ostream& out, const dict_foreign_set& fk_set); @@ -1004,11 +1307,37 @@ a foreign key constraint is enforced, therefore RESTRICT just means no flag */ #define DICT_FOREIGN_ON_UPDATE_NO_ACTION 32 /*!< ON UPDATE NO ACTION */ /* @} */ +/** Display an identifier. +@param[in,out] s output stream +@param[in] id_name SQL identifier (other than table name) +@return the output stream */ +std::ostream& +operator<<( + std::ostream& s, + const id_name_t& id_name); + +/** Display a table name. +@param[in,out] s output stream +@param[in] table_name table name +@return the output stream */ +std::ostream& +operator<<( + std::ostream& s, + const table_name_t& table_name); + +/** List of locks that different transactions have acquired on a table. This +list has a list node that is embedded in a nested union/structure. We have to +generate a specific template for it. */ + +typedef ut_list_base lock_table_t::*> + table_lock_list_t; + /* This flag is for sync SQL DDL and memcached DML. if table->memcached_sync_count == DICT_TABLE_IN_DDL means there's DDL running on the table, DML from memcached will be blocked. */ #define DICT_TABLE_IN_DDL -1 +struct innodb_col_templ_t; /** These are used when MySQL FRM and InnoDB data dictionary are in inconsistent state. */ typedef enum { @@ -1023,60 +1352,133 @@ typedef enum { /** Data structure for a database table. Most fields will be initialized to 0, NULL or FALSE in dict_mem_table_create(). */ -struct dict_table_t{ +struct dict_table_t { + /** Get reference count. + @return current value of n_ref_count */ + inline ulint get_ref_count() const; + + /** Acquire the table handle. */ + inline void acquire(); - table_id_t id; /*!< id of the table */ - mem_heap_t* heap; /*!< memory heap */ - char* name; /*!< table name */ void* thd; /*!< thd */ fil_space_crypt_t *crypt_data; /*!< crypt data if present */ - const char* dir_path_of_temp_table;/*!< NULL or the directory path - where a TEMPORARY table that was explicitly - created by a user should be placed if - innodb_file_per_table is defined in my.cnf; - in Unix this is usually /tmp/..., in Windows - temp\... */ - char* data_dir_path; /*!< NULL or the directory path - specified by DATA DIRECTORY */ - unsigned space:32; - /*!< space where the clustered index of the - table is placed */ - unsigned flags:DICT_TF_BITS; /*!< DICT_TF_... */ - unsigned flags2:DICT_TF2_BITS; /*!< DICT_TF2_... */ - unsigned ibd_file_missing:1; - /*!< TRUE if this is in a single-table - tablespace and the .ibd file is missing; then - we must return in ha_innodb.cc an error if the - user tries to query such an orphaned table */ - unsigned cached:1;/*!< TRUE if the table object has been added - to the dictionary cache */ - unsigned to_be_dropped:1; - /*!< TRUE if the table is to be dropped, but - not yet actually dropped (could in the bk - drop list); It is turned on at the beginning - of row_drop_table_for_mysql() and turned off - just before we start to update system tables - for the drop. It is protected by - dict_operation_lock */ - unsigned n_def:10;/*!< number of columns defined so far */ - unsigned n_cols:10;/*!< number of columns */ - unsigned can_be_evicted:1; - /*!< TRUE if it's not an InnoDB system table - or a table that has no FK relationships */ - unsigned corrupted:1; - /*!< TRUE if table is corrupted */ - unsigned drop_aborted:1; - /*!< TRUE if some indexes should be dropped - after ONLINE_INDEX_ABORTED - or ONLINE_INDEX_ABORTED_DROPPED */ - dict_col_t* cols; /*!< array of column descriptions */ - const char* col_names; - /*!< Column names packed in a character string - "name1\0name2\0...nameN\0". Until - the string contains n_cols, it will be - allocated from a temporary heap. The final - string will be allocated from table->heap. */ + /** Release the table handle. */ + inline void release(); + + /** Id of the table. */ + table_id_t id; + + /** Memory heap. If you allocate from this heap after the table has + been created then be sure to account the allocation into + dict_sys->size. When closing the table we do something like + dict_sys->size -= mem_heap_get_size(table->heap) and if that is going + to become negative then we would assert. Something like this should do: + old_size = mem_heap_get_size() + mem_heap_alloc() + new_size = mem_heap_get_size() + dict_sys->size += new_size - old_size. */ + mem_heap_t* heap; + + /** Table name. */ + table_name_t name; + + /** NULL or the directory path where a TEMPORARY table that was + explicitly created by a user should be placed if innodb_file_per_table + is defined in my.cnf. In Unix this is usually "/tmp/...", + in Windows "temp\...". */ + const char* dir_path_of_temp_table; + + /** NULL or the directory path specified by DATA DIRECTORY. */ + char* data_dir_path; + + /** NULL or the tablespace name that this table is assigned to, + specified by the TABLESPACE option.*/ + id_name_t tablespace; + + /** Space where the clustered index of the table is placed. */ + uint32_t space; + + /** Stores information about: + 1 row format (redundant or compact), + 2 compressed page size (zip shift size), + 3 whether using atomic blobs, + 4 whether the table has been created with the option DATA DIRECTORY. + Use DICT_TF_GET_COMPACT(), DICT_TF_GET_ZIP_SSIZE(), + DICT_TF_HAS_ATOMIC_BLOBS() and DICT_TF_HAS_DATA_DIR() to parse this + flag. */ + unsigned flags:DICT_TF_BITS; + + /** Stores information about: + 1 whether the table has been created using CREATE TEMPORARY TABLE, + 2 whether the table has an internally defined DOC ID column, + 3 whether the table has a FTS index, + 4 whether DOC ID column need to be added to the FTS index, + 5 whether the table is being created its own tablespace, + 6 whether the table has been DISCARDed, + 7 whether the aux FTS tables names are in hex. + Use DICT_TF2_FLAG_IS_SET() to parse this flag. */ + unsigned flags2:DICT_TF2_BITS; + + /** TRUE if this is in a single-table tablespace and the .ibd file is + missing. Then we must return in ha_innodb.cc an error if the user + tries to query such an orphaned table. */ + unsigned ibd_file_missing:1; + + /** TRUE if the table object has been added to the dictionary cache. */ + unsigned cached:1; + + /** TRUE if the table is to be dropped, but not yet actually dropped + (could in the background drop list). It is turned on at the beginning + of row_drop_table_for_mysql() and turned off just before we start to + update system tables for the drop. It is protected by + dict_operation_lock. */ + unsigned to_be_dropped:1; + + /** Number of non-virtual columns defined so far. */ + unsigned n_def:10; + + /** Number of non-virtual columns. */ + unsigned n_cols:10; + + /** Number of total columns (inlcude virtual and non-virtual) */ + unsigned n_t_cols:10; + + /** Number of total columns defined so far. */ + unsigned n_t_def:10; + + /** Number of virtual columns defined so far. */ + unsigned n_v_def:10; + + /** Number of virtual columns. */ + unsigned n_v_cols:10; + + /** TRUE if it's not an InnoDB system table or a table that has no FK + relationships. */ + unsigned can_be_evicted:1; + + /** TRUE if table is corrupted. */ + unsigned corrupted:1; + + /** TRUE if some indexes should be dropped after ONLINE_INDEX_ABORTED + or ONLINE_INDEX_ABORTED_DROPPED. */ + unsigned drop_aborted:1; + + /** Array of column descriptions. */ + dict_col_t* cols; + + /** Array of virtual column descriptions. */ + dict_v_col_t* v_cols; + + /** Column names packed in a character string + "name1\0name2\0...nameN\0". Until the string contains n_cols, it will + be allocated from a temporary heap. The final string will be allocated + from table->heap. */ + const char* col_names; + + /** Virtual column names */ + const char* v_col_names; + bool is_system_db; /*!< True if the table belongs to a system database (mysql, information_schema or @@ -1086,177 +1488,168 @@ struct dict_table_t{ dictionary information and MySQL FRM information mismatch. */ #ifndef UNIV_HOTBACKUP - hash_node_t name_hash; /*!< hash chain node */ - hash_node_t id_hash; /*!< hash chain node */ - UT_LIST_BASE_NODE_T(dict_index_t) - indexes; /*!< list of indexes of the table */ - - dict_foreign_set foreign_set; - /*!< set of foreign key constraints - in the table; these refer to columns - in other tables */ - - dict_foreign_set referenced_set; - /*!< list of foreign key constraints - which refer to this table */ - - UT_LIST_NODE_T(dict_table_t) - table_LRU; /*!< node of the LRU list of tables */ - unsigned fk_max_recusive_level:8; - /*!< maximum recursive level we support when - loading tables chained together with FK - constraints. If exceeds this level, we will - stop loading child table into memory along with - its parent table */ - ulint n_foreign_key_checks_running; - /*!< count of how many foreign key check - operations are currently being performed - on the table: we cannot drop the table while - there are foreign key checks running on - it! */ - trx_id_t def_trx_id; - /*!< transaction id that last touched - the table definition, either when - loading the definition or CREATE - TABLE, or ALTER TABLE (prepare, - commit, and rollback phases) */ - trx_id_t query_cache_inv_trx_id; - /*!< transactions whose trx id is - smaller than this number are not - allowed to store to the MySQL query - cache or retrieve from it; when a trx - with undo logs commits, it sets this - to the value of the trx id counter for - the tables it had an IX lock on */ + /** Hash chain node. */ + hash_node_t name_hash; + + /** Hash chain node. */ + hash_node_t id_hash; + + /** The FTS_DOC_ID_INDEX, or NULL if no fulltext indexes exist */ + dict_index_t* fts_doc_id_index; + + /** List of indexes of the table. */ + UT_LIST_BASE_NODE_T(dict_index_t) indexes; + + /** List of foreign key constraints in the table. These refer to + columns in other tables. */ + UT_LIST_BASE_NODE_T(dict_foreign_t) foreign_list; + + /** List of foreign key constraints which refer to this table. */ + UT_LIST_BASE_NODE_T(dict_foreign_t) referenced_list; + + /** Node of the LRU list of tables. */ + UT_LIST_NODE_T(dict_table_t) table_LRU; + + /** Maximum recursive level we support when loading tables chained + together with FK constraints. If exceeds this level, we will stop + loading child table into memory along with its parent table. */ + unsigned fk_max_recusive_level:8; + + /** Count of how many foreign key check operations are currently being + performed on the table. We cannot drop the table while there are + foreign key checks running on it. */ + ulint n_foreign_key_checks_running; + + /** Transactions whose view low limit is greater than this number are + not allowed to store to the MySQL query cache or retrieve from it. + When a trx with undo logs commits, it sets this to the value of the + current time. */ + trx_id_t query_cache_inv_id; + + /** Transaction id that last touched the table definition. Either when + loading the definition or CREATE TABLE, or ALTER TABLE (prepare, + commit, and rollback phases). */ + trx_id_t def_trx_id; + + /*!< set of foreign key constraints in the table; these refer to + columns in other tables */ + dict_foreign_set foreign_set; + + /*!< set of foreign key constraints which refer to this table */ + dict_foreign_set referenced_set; + #ifdef UNIV_DEBUG - /*----------------------*/ - ibool does_not_fit_in_memory; - /*!< this field is used to specify in - simulations tables which are so big - that disk should be accessed: disk - access is simulated by putting the - thread to sleep for a while; NOTE that - this flag is not stored to the data - dictionary on disk, and the database - will forget about value TRUE if it has - to reload the table definition from - disk */ + /** This field is used to specify in simulations tables which are so + big that disk should be accessed. Disk access is simulated by putting + the thread to sleep for a while. NOTE that this flag is not stored to + the data dictionary on disk, and the database will forget about value + TRUE if it has to reload the table definition from disk. */ + ibool does_not_fit_in_memory; #endif /* UNIV_DEBUG */ - /*----------------------*/ - unsigned big_rows:1; - /*!< flag: TRUE if the maximum length of - a single row exceeds BIG_ROW_SIZE; - initialized in dict_table_add_to_cache() */ - /** Statistics for query optimization */ - /* @{ */ - - volatile os_once::state_t stats_latch_created; - /*!< Creation state of 'stats_latch'. */ - - rw_lock_t* stats_latch; /*!< this latch protects: - dict_table_t::stat_initialized - dict_table_t::stat_n_rows (*) - dict_table_t::stat_clustered_index_size - dict_table_t::stat_sum_of_other_index_sizes - dict_table_t::stat_modified_counter (*) - dict_table_t::indexes*::stat_n_diff_key_vals[] - dict_table_t::indexes*::stat_index_size - dict_table_t::indexes*::stat_n_leaf_pages - (*) those are not always protected for - performance reasons */ - unsigned stat_initialized:1; /*!< TRUE if statistics have - been calculated the first time - after database startup or table creation */ -#define DICT_TABLE_IN_USED -1 - lint memcached_sync_count; - /*!< count of how many handles are opened - to this table from memcached; DDL on the - table is NOT allowed until this count - goes to zero. If it's -1, means there's DDL - on the table, DML from memcached will be - blocked. */ - ib_time_t stats_last_recalc; - /*!< Timestamp of last recalc of the stats */ - ib_uint32_t stat_persistent; - /*!< The two bits below are set in the - ::stat_persistent member and have the following - meaning: - 1. _ON=0, _OFF=0, no explicit persistent stats - setting for this table, the value of the global - srv_stats_persistent is used to determine - whether the table has persistent stats enabled - or not - 2. _ON=0, _OFF=1, persistent stats are - explicitly disabled for this table, regardless - of the value of the global srv_stats_persistent - 3. _ON=1, _OFF=0, persistent stats are - explicitly enabled for this table, regardless - of the value of the global srv_stats_persistent - 4. _ON=1, _OFF=1, not allowed, we assert if - this ever happens. */ -#define DICT_STATS_PERSISTENT_ON (1 << 1) -#define DICT_STATS_PERSISTENT_OFF (1 << 2) - ib_uint32_t stats_auto_recalc; - /*!< The two bits below are set in the - ::stats_auto_recalc member and have - the following meaning: - 1. _ON=0, _OFF=0, no explicit auto recalc - setting for this table, the value of the global - srv_stats_persistent_auto_recalc is used to - determine whether the table has auto recalc - enabled or not - 2. _ON=0, _OFF=1, auto recalc is explicitly - disabled for this table, regardless of the - value of the global - srv_stats_persistent_auto_recalc - 3. _ON=1, _OFF=0, auto recalc is explicitly - enabled for this table, regardless of the - value of the global - srv_stats_persistent_auto_recalc - 4. _ON=1, _OFF=1, not allowed, we assert if - this ever happens. */ -#define DICT_STATS_AUTO_RECALC_ON (1 << 1) -#define DICT_STATS_AUTO_RECALC_OFF (1 << 2) - ulint stats_sample_pages; - /*!< the number of pages to sample for this - table during persistent stats estimation; - if this is 0, then the value of the global - srv_stats_persistent_sample_pages will be - used instead. */ - ib_uint64_t stat_n_rows; - /*!< approximate number of rows in the table; - we periodically calculate new estimates */ - ulint stat_clustered_index_size; - /*!< approximate clustered index size in - database pages */ - ulint stat_sum_of_other_index_sizes; - /*!< other indexes in database pages */ - ib_uint64_t stat_modified_counter; - /*!< when a row is inserted, updated, - or deleted, - we add 1 to this number; we calculate new - estimates for the stat_... values for the - table and the indexes when about 1 / 16 of - table has been modified; - also when the estimate operation is - called for MySQL SHOW TABLE STATUS; the - counter is reset to zero at statistics - calculation; this counter is not protected by - any latch, because this is only used for - heuristics */ - -#define BG_STAT_IN_PROGRESS ((byte)(1 << 0)) - /*!< BG_STAT_IN_PROGRESS is set in - stats_bg_flag when the background - stats code is working on this table. The DROP - TABLE code waits for this to be cleared - before proceeding. */ -#define BG_STAT_SHOULD_QUIT ((byte)(1 << 1)) - /*!< BG_STAT_SHOULD_QUIT is set in - stats_bg_flag when DROP TABLE starts - waiting on BG_STAT_IN_PROGRESS to be cleared, - the background stats thread will detect this - and will eventually quit sooner */ + + /** TRUE if the maximum length of a single row exceeds BIG_ROW_SIZE. + Initialized in dict_table_add_to_cache(). */ + unsigned big_rows:1; + + /** Statistics for query optimization. @{ */ + + /** Creation state of 'stats_latch'. */ + volatile os_once::state_t stats_latch_created; + + /** This latch protects: + dict_table_t::stat_initialized, + dict_table_t::stat_n_rows (*), + dict_table_t::stat_clustered_index_size, + dict_table_t::stat_sum_of_other_index_sizes, + dict_table_t::stat_modified_counter (*), + dict_table_t::indexes*::stat_n_diff_key_vals[], + dict_table_t::indexes*::stat_index_size, + dict_table_t::indexes*::stat_n_leaf_pages. + (*) Those are not always protected for + performance reasons. */ + rw_lock_t* stats_latch; + + /** TRUE if statistics have been calculated the first time after + database startup or table creation. */ + unsigned stat_initialized:1; + + /** Timestamp of last recalc of the stats. */ + ib_time_t stats_last_recalc; + + /** The two bits below are set in the 'stat_persistent' member. They + have the following meaning: + 1. _ON=0, _OFF=0, no explicit persistent stats setting for this table, + the value of the global srv_stats_persistent is used to determine + whether the table has persistent stats enabled or not + 2. _ON=0, _OFF=1, persistent stats are explicitly disabled for this + table, regardless of the value of the global srv_stats_persistent + 3. _ON=1, _OFF=0, persistent stats are explicitly enabled for this + table, regardless of the value of the global srv_stats_persistent + 4. _ON=1, _OFF=1, not allowed, we assert if this ever happens. */ + #define DICT_STATS_PERSISTENT_ON (1 << 1) + #define DICT_STATS_PERSISTENT_OFF (1 << 2) + + /** Indicates whether the table uses persistent stats or not. See + DICT_STATS_PERSISTENT_ON and DICT_STATS_PERSISTENT_OFF. */ + ib_uint32_t stat_persistent; + + /** The two bits below are set in the 'stats_auto_recalc' member. They + have the following meaning: + 1. _ON=0, _OFF=0, no explicit auto recalc setting for this table, the + value of the global srv_stats_persistent_auto_recalc is used to + determine whether the table has auto recalc enabled or not + 2. _ON=0, _OFF=1, auto recalc is explicitly disabled for this table, + regardless of the value of the global srv_stats_persistent_auto_recalc + 3. _ON=1, _OFF=0, auto recalc is explicitly enabled for this table, + regardless of the value of the global srv_stats_persistent_auto_recalc + 4. _ON=1, _OFF=1, not allowed, we assert if this ever happens. */ + #define DICT_STATS_AUTO_RECALC_ON (1 << 1) + #define DICT_STATS_AUTO_RECALC_OFF (1 << 2) + + /** Indicates whether the table uses automatic recalc for persistent + stats or not. See DICT_STATS_AUTO_RECALC_ON and + DICT_STATS_AUTO_RECALC_OFF. */ + ib_uint32_t stats_auto_recalc; + + /** The number of pages to sample for this table during persistent + stats estimation. If this is 0, then the value of the global + srv_stats_persistent_sample_pages will be used instead. */ + ulint stats_sample_pages; + + /** Approximate number of rows in the table. We periodically calculate + new estimates. */ + ib_uint64_t stat_n_rows; + + /** Approximate clustered index size in database pages. */ + ulint stat_clustered_index_size; + + /** Approximate size of other indexes in database pages. */ + ulint stat_sum_of_other_index_sizes; + + /** How many rows are modified since last stats recalc. When a row is + inserted, updated, or deleted, we add 1 to this number; we calculate + new estimates for the table and the indexes if the table has changed + too much, see row_update_statistics_if_needed(). The counter is reset + to zero at statistics calculation. This counter is not protected by + any latch, because this is only used for heuristics. */ + ib_uint64_t stat_modified_counter; + + /** Background stats thread is not working on this table. */ + #define BG_STAT_NONE 0 + + /** Set in 'stats_bg_flag' when the background stats code is working + on this table. The DROP TABLE code waits for this to be cleared before + proceeding. */ + #define BG_STAT_IN_PROGRESS (1 << 0) + + /** Set in 'stats_bg_flag' when DROP TABLE starts waiting on + BG_STAT_IN_PROGRESS to be cleared. The background stats thread will + detect this and will eventually quit sooner. */ + #define BG_STAT_SHOULD_QUIT (1 << 1) + + /** The state of the background stats thread wrt this table. + See BG_STAT_NONE, BG_STAT_IN_PROGRESS and BG_STAT_SHOULD_QUIT. + Writes are covered by dict_sys->mutex. Dirty reads are possible. */ #define BG_SCRUB_IN_PROGRESS ((byte)(1 << 2)) /*!< BG_SCRUB_IN_PROGRESS is set in stats_bg_flag when the background @@ -1266,96 +1659,124 @@ struct dict_table_t{ #define BG_IN_PROGRESS (BG_STAT_IN_PROGRESS | BG_SCRUB_IN_PROGRESS) - byte stats_bg_flag; - /*!< see BG_STAT_* above. - Writes are covered by dict_sys->mutex. - Dirty reads are possible. */ + byte stats_bg_flag; + bool stats_error_printed; /*!< Has persistent stats error beein already printed for this table ? */ - /* @} */ - /*----------------------*/ - /**!< The following fields are used by the - AUTOINC code. The actual collection of - tables locked during AUTOINC read/write is - kept in trx_t. In order to quickly determine - whether a transaction has locked the AUTOINC - lock we keep a pointer to the transaction - here in the autoinc_trx variable. This is to - avoid acquiring the lock_sys_t::mutex and - scanning the vector in trx_t. - - When an AUTOINC lock has to wait, the - corresponding lock instance is created on - the trx lock heap rather than use the - pre-allocated instance in autoinc_lock below.*/ - /* @{ */ - lock_t* autoinc_lock; - /*!< a buffer for an AUTOINC lock - for this table: we allocate the memory here - so that individual transactions can get it - and release it without a need to allocate - space from the lock heap of the trx: - otherwise the lock heap would grow rapidly - if we do a large insert from a select */ - ib_mutex_t* autoinc_mutex; - /*!< mutex protecting the autoincrement - counter */ + /* @} */ + + /** AUTOINC related members. @{ */ + + /* The actual collection of tables locked during AUTOINC read/write is + kept in trx_t. In order to quickly determine whether a transaction has + locked the AUTOINC lock we keep a pointer to the transaction here in + the 'autoinc_trx' member. This is to avoid acquiring the + lock_sys_t::mutex and scanning the vector in trx_t. + When an AUTOINC lock has to wait, the corresponding lock instance is + created on the trx lock heap rather than use the pre-allocated instance + in autoinc_lock below. */ + + /** A buffer for an AUTOINC lock for this table. We allocate the + memory here so that individual transactions can get it and release it + without a need to allocate space from the lock heap of the trx: + otherwise the lock heap would grow rapidly if we do a large insert + from a select. */ + lock_t* autoinc_lock; /** Creation state of autoinc_mutex member */ - volatile os_once::state_t - autoinc_mutex_created; - - ib_uint64_t autoinc;/*!< autoinc counter value to give to the - next inserted row */ - ulong n_waiting_or_granted_auto_inc_locks; - /*!< This counter is used to track the number - of granted and pending autoinc locks on this - table. This value is set after acquiring the - lock_sys_t::mutex but we peek the contents to - determine whether other transactions have - acquired the AUTOINC lock or not. Of course - only one transaction can be granted the - lock but there can be multiple waiters. */ - const trx_t* autoinc_trx; - /*!< The transaction that currently holds the - the AUTOINC lock on this table. - Protected by lock_sys->mutex. */ - fts_t* fts; /* FTS specific state variables */ - /* @} */ - /*----------------------*/ + volatile os_once::state_t autoinc_mutex_created; - ib_quiesce_t quiesce;/*!< Quiescing states, protected by the - dict_index_t::lock. ie. we can only change - the state if we acquire all the latches - (dict_index_t::lock) in X mode of this table's - indexes. */ + /** Mutex protecting the autoincrement counter. */ + ib_mutex_t* autoinc_mutex; - /*----------------------*/ - ulint n_rec_locks; - /*!< Count of the number of record locks on - this table. We use this to determine whether - we can evict the table from the dictionary - cache. It is protected by lock_sys->mutex. */ - ulint n_ref_count; - /*!< count of how many handles are opened - to this table; dropping of the table is - NOT allowed until this count gets to zero; - MySQL does NOT itself check the number of - open handles at drop */ - UT_LIST_BASE_NODE_T(lock_t) - locks; /*!< list of locks on the table; protected - by lock_sys->mutex */ + /** Autoinc counter value to give to the next inserted row. */ + ib_uint64_t autoinc; + + /** This counter is used to track the number of granted and pending + autoinc locks on this table. This value is set after acquiring the + lock_sys_t::mutex but we peek the contents to determine whether other + transactions have acquired the AUTOINC lock or not. Of course only one + transaction can be granted the lock but there can be multiple + waiters. */ + ulong n_waiting_or_granted_auto_inc_locks; + + /** The transaction that currently holds the the AUTOINC lock on this + table. Protected by lock_sys->mutex. */ + const trx_t* autoinc_trx; + + /* @} */ + + /** Count of how many handles are opened to this table from memcached. + DDL on the table is NOT allowed until this count goes to zero. If + it is -1, then there's DDL on the table, DML from memcached will be + blocked. */ + lint memcached_sync_count; + + /** FTS specific state variables. */ + fts_t* fts; + + /** Quiescing states, protected by the dict_index_t::lock. ie. we can + only change the state if we acquire all the latches (dict_index_t::lock) + in X mode of this table's indexes. */ + ib_quiesce_t quiesce; + + /** Count of the number of record locks on this table. We use this to + determine whether we can evict the table from the dictionary cache. + It is protected by lock_sys->mutex. */ + ulint n_rec_locks; + +#ifndef UNIV_DEBUG +private: +#endif + /** Count of how many handles are opened to this table. Dropping of the + table is NOT allowed until this count gets to zero. MySQL does NOT + itself check the number of open handles at DROP. */ + ulint n_ref_count; + +public: + /** List of locks on the table. Protected by lock_sys->mutex. */ + table_lock_list_t locks; + + /** Timestamp of the last modification of this table. */ + time_t update_time; + + /** row-id counter for use by intrinsic table for getting row-id. + Given intrinsic table semantics, row-id can be locally maintained + instead of getting it from central generator which involves mutex + locking. */ + ib_uint64_t sess_row_id; + + /** trx_id counter for use by intrinsic table for getting trx-id. + Intrinsic table are not shared so don't need a central trx-id + but just need a increased counter to track consistent view while + proceeding SELECT as part of UPDATE. */ + ib_uint64_t sess_trx_id; #endif /* !UNIV_HOTBACKUP */ ibool is_encrypted; #ifdef UNIV_DEBUG - ulint magic_n;/*!< magic number */ -/** Value of dict_table_t::magic_n */ -# define DICT_TABLE_MAGIC_N 76333786 + /** Value of 'magic_n'. */ + #define DICT_TABLE_MAGIC_N 76333786 + + /** Magic number. */ + ulint magic_n; #endif /* UNIV_DEBUG */ + /** mysql_row_templ_t for base columns used for compute the virtual + columns */ + innodb_col_templ_t* vc_templ; + + /** whether above vc_templ comes from purge allocation */ + bool vc_templ_purge; }; +/*******************************************************************//** +Initialise the table lock list. */ +void +lock_table_lock_list_init( +/*======================*/ + table_lock_list_t* locks); /*!< List to initialise */ + /** A function object to add the foreign key constraint to the referenced set of the referenced table, if it exists in the dictionary cache. */ struct dict_foreign_add_to_referenced_table { @@ -1381,24 +1802,10 @@ dict_table_autoinc_destroy( if (table->autoinc_mutex_created == os_once::DONE && table->autoinc_mutex != NULL) { mutex_free(table->autoinc_mutex); - delete table->autoinc_mutex; + UT_DELETE(table->autoinc_mutex); } } -/** Allocate and init the autoinc latch of a given table. -This function must not be called concurrently on the same table object. -@param[in,out] table_void table whose autoinc latch to create */ -void -dict_table_autoinc_alloc( - void* table_void); - -/** Allocate and init the zip_pad_mutex of a given index. -This function must not be called concurrently on the same index object. -@param[in,out] index_void index whose zip_pad_mutex to create */ -void -dict_index_zip_pad_alloc( - void* index_void); - /** Request for lazy creation of the autoinc latch of a given table. This function is only called from either single threaded environment or from a thread that has not shared the table object with other threads. @@ -1408,13 +1815,8 @@ void dict_table_autoinc_create_lazy( dict_table_t* table) { -#ifdef HAVE_ATOMIC_BUILTINS table->autoinc_mutex = NULL; table->autoinc_mutex_created = os_once::NEVER_DONE; -#else /* HAVE_ATOMIC_BUILTINS */ - dict_table_autoinc_alloc(table); - table->autoinc_mutex_created = os_once::DONE; -#endif /* HAVE_ATOMIC_BUILTINS */ } /** Request a lazy creation of dict_index_t::zip_pad::mutex. @@ -1426,13 +1828,8 @@ void dict_index_zip_pad_mutex_create_lazy( dict_index_t* index) { -#ifdef HAVE_ATOMIC_BUILTINS index->zip_pad.mutex = NULL; index->zip_pad.mutex_created = os_once::NEVER_DONE; -#else /* HAVE_ATOMIC_BUILTINS */ - dict_index_zip_pad_alloc(index); - index->zip_pad.mutex_created = os_once::DONE; -#endif /* HAVE_ATOMIC_BUILTINS */ } /** Destroy the zip_pad_mutex of the given index. @@ -1446,8 +1843,8 @@ dict_index_zip_pad_mutex_destroy( { if (index->zip_pad.mutex_created == os_once::DONE && index->zip_pad.mutex != NULL) { - os_fast_mutex_free(index->zip_pad.mutex); - delete index->zip_pad.mutex; + mutex_free(index->zip_pad.mutex); + UT_DELETE(index->zip_pad.mutex); } } @@ -1458,7 +1855,7 @@ void dict_index_zip_pad_unlock( dict_index_t* index) { - os_fast_mutex_unlock(index->zip_pad.mutex); + mutex_exit(index->zip_pad.mutex); } #ifdef UNIV_DEBUG @@ -1474,8 +1871,45 @@ dict_table_autoinc_own( } #endif /* UNIV_DEBUG */ +/** whether a col is used in spatial index or regular index */ +enum col_spatial_status { + /** Not used in gis index. */ + SPATIAL_NONE = 0, + + /** Used in both spatial index and regular index. */ + SPATIAL_MIXED = 1, + + /** Only used in spatial index. */ + SPATIAL_ONLY = 2 +}; + +/** Check whether the col is used in spatial index or regular index. +@param[in] col column to check +@return col_spatial_status */ +inline +col_spatial_status +dict_col_get_spatial_status( + const dict_col_t* col) +{ + col_spatial_status spatial_status = SPATIAL_NONE; + + ut_ad(col->ord_part); + + if (DATA_GEOMETRY_MTYPE(col->mtype)) { + if (col->max_prefix == 0) { + spatial_status = SPATIAL_ONLY; + } else { + /* Any regular index on a geometry column + should have a prefix. */ + spatial_status = SPATIAL_MIXED; + } + } + + return(spatial_status); +} + #ifndef UNIV_NONINL #include "dict0mem.ic" #endif -#endif +#endif /* dict0mem_h */ diff --git a/storage/innobase/include/dict0mem.ic b/storage/innobase/include/dict0mem.ic index 38d51f61789..3269596feb7 100644 --- a/storage/innobase/include/dict0mem.ic +++ b/storage/innobase/include/dict0mem.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -63,11 +63,42 @@ dict_mem_fill_index_struct( #ifndef UNIV_HOTBACKUP index->space = (unsigned int) space; index->page = FIL_NULL; + index->merge_threshold = DICT_INDEX_MERGE_THRESHOLD_DEFAULT; #endif /* !UNIV_HOTBACKUP */ index->table_name = table_name; index->n_fields = (unsigned int) n_fields; /* The '1 +' above prevents allocation of an empty mem block */ + index->allow_duplicates = false; + index->nulls_equal = false; + index->disable_ahi = false; + + new (&index->rec_cache) rec_cache_t(); + + if (heap != NULL) { + index->last_ins_cur = + static_cast(mem_heap_alloc( + heap, sizeof(last_ops_cur_t))); + + new (index->last_ins_cur) last_ops_cur_t(); + + index->last_sel_cur = + static_cast(mem_heap_alloc( + heap, sizeof(last_ops_cur_t))); + + new (index->last_sel_cur) last_ops_cur_t(); + + index->rec_cache.offsets = + static_cast(mem_heap_alloc( + heap, sizeof(ulint) * OFFS_IN_REC_NORMAL_SIZE)); + + index->rec_cache.sz_of_offsets = OFFS_IN_REC_NORMAL_SIZE; + } else { + index->last_ins_cur = NULL; + index->last_sel_cur = NULL; + index->rec_cache.offsets = NULL; + } + #ifdef UNIV_DEBUG index->magic_n = DICT_INDEX_MAGIC_N; #endif /* UNIV_DEBUG */ diff --git a/storage/innobase/include/dict0priv.h b/storage/innobase/include/dict0priv.h index e034662aba0..35548faeb93 100644 --- a/storage/innobase/include/dict0priv.h +++ b/storage/innobase/include/dict0priv.h @@ -26,10 +26,12 @@ Created Fri 2 Jul 2010 13:30:38 EST - Sunny Bains #ifndef dict0priv_h #define dict0priv_h +#include "univ.i" + /**********************************************************************//** Gets a table; loads it to the dictionary cache if necessary. A low-level function. Note: Not to be called from outside dict0*c functions. -@return table, NULL if not found */ +@return table, NULL if not found */ UNIV_INLINE dict_table_t* dict_table_get_low( @@ -38,7 +40,7 @@ dict_table_get_low( /**********************************************************************//** Checks if a table is in the dictionary cache. -@return table, NULL if not found */ +@return table, NULL if not found */ UNIV_INLINE dict_table_t* dict_table_check_if_in_cache_low( @@ -47,7 +49,7 @@ dict_table_check_if_in_cache_low( /**********************************************************************//** Returns a table object based on table id. -@return table, NULL if does not exist */ +@return table, NULL if does not exist */ UNIV_INLINE dict_table_t* dict_table_open_on_id_low( diff --git a/storage/innobase/include/dict0priv.ic b/storage/innobase/include/dict0priv.ic index 983218af78a..fd10c566be6 100644 --- a/storage/innobase/include/dict0priv.ic +++ b/storage/innobase/include/dict0priv.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2010, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2010, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -31,7 +31,7 @@ Created Wed 13 Oct 2010 16:10:14 EST Sunny Bains /**********************************************************************//** Gets a table; loads it to the dictionary cache if necessary. A low-level function. -@return table, NULL if not found */ +@return table, NULL if not found */ UNIV_INLINE dict_table_t* dict_table_get_low( @@ -41,24 +41,22 @@ dict_table_get_low( dict_table_t* table; ut_ad(table_name); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); table = dict_table_check_if_in_cache_low(table_name); if (table && table->corrupted) { - fprintf(stderr, "InnoDB: table"); - ut_print_name(stderr, NULL, TRUE, table->name); + ib::error error; + error << "Table " << table->name << "is corrupted"; if (srv_load_corrupted) { - fputs(" is corrupted, but" - " innodb_force_load_corrupted is set\n", stderr); + error << ", but innodb_force_load_corrupted is set"; } else { - fputs(" is corrupted\n", stderr); return(NULL); } } if (table == NULL) { - table = dict_load_table(table_name, TRUE, DICT_ERR_IGNORE_NONE); + table = dict_load_table(table_name, true, DICT_ERR_IGNORE_NONE); } ut_ad(!table || table->cached); @@ -68,7 +66,7 @@ dict_table_get_low( /**********************************************************************//** Returns a table object based on table id. -@return table, NULL if does not exist */ +@return table, NULL if does not exist */ UNIV_INLINE dict_table_t* dict_table_open_on_id_low( @@ -81,7 +79,7 @@ dict_table_open_on_id_low( dict_table_t* table; ulint fold; - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); /* Look for the table name in the hash table */ fold = ut_fold_ull(table_id); @@ -102,7 +100,7 @@ dict_table_open_on_id_low( /**********************************************************************//** Checks if a table is in the dictionary cache. -@return table, NULL if not found */ +@return table, NULL if not found */ UNIV_INLINE dict_table_t* dict_table_check_if_in_cache_low( @@ -112,15 +110,19 @@ dict_table_check_if_in_cache_low( dict_table_t* table; ulint table_fold; + DBUG_ENTER("dict_table_check_if_in_cache_low"); + DBUG_PRINT("dict_table_check_if_in_cache_low", + ("table: '%s'", table_name)); + ut_ad(table_name); - ut_ad(mutex_own(&(dict_sys->mutex))); + ut_ad(mutex_own(&dict_sys->mutex)); /* Look for the table name in the hash table */ table_fold = ut_fold_string(table_name); HASH_SEARCH(name_hash, dict_sys->table_hash, table_fold, dict_table_t*, table, ut_ad(table->cached), - !strcmp(table->name, table_name)); - return(table); + !strcmp(table->name.m_name, table_name)); + DBUG_RETURN(table); } #endif /*! UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h index 72501bf9429..40f254bd743 100644 --- a/storage/innobase/include/dict0stats.h +++ b/storage/innobase/include/dict0stats.h @@ -28,7 +28,6 @@ Created Jan 06, 2010 Vasil Dimov #include "univ.i" -#include "db0err.h" #include "dict0types.h" #include "trx0types.h" @@ -60,7 +59,6 @@ is relatively quick and is used to calculate transient statistics that are not saved on disk. This was the only way to calculate statistics before the Persistent Statistics feature was introduced. */ -UNIV_INTERN void dict_stats_update_transient( /*========================*/ @@ -133,7 +131,6 @@ dict_stats_deinit( Calculates new estimates for table and index statistics. The statistics are used in query optimization. @return DB_* error code or DB_SUCCESS */ -UNIV_INTERN dberr_t dict_stats_update( /*==============*/ @@ -148,7 +145,6 @@ Removes the information for a particular index's stats from the persistent storage if it exists and if there is data stored for this index. This function creates its own trx and commits it. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t dict_stats_drop_index( /*==================*/ @@ -163,7 +159,6 @@ Removes the statistics for a table and all of its indexes from the persistent storage if it exists and if there is data stored for the table. This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t dict_stats_drop_table( /*==================*/ @@ -174,7 +169,6 @@ dict_stats_drop_table( /*********************************************************************//** Fetches or calculates new estimates for index statistics. */ -UNIV_INTERN void dict_stats_update_for_index( /*========================*/ @@ -185,7 +179,6 @@ dict_stats_update_for_index( Renames a table in InnoDB persistent stats storage. This function creates its own transaction and commits it. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t dict_stats_rename_table( /*====================*/ @@ -194,7 +187,19 @@ dict_stats_rename_table( char* errstr, /*!< out: error string if != DB_SUCCESS is returned */ size_t errstr_sz); /*!< in: errstr size */ - +/*********************************************************************//** +Renames an index in InnoDB persistent stats storage. +This function creates its own transaction and commits it. +@return DB_SUCCESS or error code. DB_STATS_DO_NOT_EXIST will be returned +if the persistent stats do not exist. */ +dberr_t +dict_stats_rename_index( +/*====================*/ + const dict_table_t* table, /*!< in: table whose index + is renamed */ + const char* old_index_name, /*!< in: old index name */ + const char* new_index_name) /*!< in: new index name */ + __attribute__((warn_unused_result)); /*********************************************************************//** Save defragmentation result. @return DB_SUCCESS or error code */ @@ -232,4 +237,8 @@ dict_stats_empty_defrag_stats( #include "dict0stats.ic" #endif +#ifdef UNIV_ENABLE_UNIT_TEST_DICT_STATS +void test_dict_stats_all(); +#endif /* UNIV_ENABLE_UNIT_TEST_DICT_STATS */ + #endif /* dict0stats_h */ diff --git a/storage/innobase/include/dict0stats.ic b/storage/innobase/include/dict0stats.ic index ec9a9065470..80709091734 100644 --- a/storage/innobase/include/dict0stats.ic +++ b/storage/innobase/include/dict0stats.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -23,10 +23,9 @@ Code used for calculating and manipulating table statistics. Created Jan 23, 2012 Vasil Dimov *******************************************************/ -#include "univ.i" -#include "dict0dict.h" /* dict_table_stats_lock() */ -#include "dict0types.h" /* dict_table_t */ -#include "srv0srv.h" /* srv_stats_persistent, srv_stats_auto_recalc */ +#include "dict0dict.h" +#include "dict0types.h" +#include "srv0srv.h" /*********************************************************************//** Set the persistent statistics flag for a given table. This is set only @@ -183,9 +182,9 @@ dict_stats_deinit( /*==============*/ dict_table_t* table) /*!< in/out: table */ { - ut_ad(mutex_own(&dict_sys->mutex)); + ut_ad(mutex_own(&dict_sys->mutex) || dict_table_is_intrinsic(table)); - ut_a(table->n_ref_count == 0); + ut_a(table->get_ref_count() == 0); dict_table_stats_lock(table, RW_X_LATCH); diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h index 34dc4657829..e04d9ab5ab9 100644 --- a/storage/innobase/include/dict0stats_bg.h +++ b/storage/innobase/include/dict0stats_bg.h @@ -28,20 +28,23 @@ Created Apr 26, 2012 Vasil Dimov #include "univ.i" -#include "dict0types.h" /* dict_table_t, table_id_t */ -#include "os0sync.h" /* os_event_t */ -#include "os0thread.h" /* DECLARE_THREAD */ +#include "dict0types.h" +#include "os0event.h" +#include "os0thread.h" /** Event to wake up the stats thread */ extern os_event_t dict_stats_event; +#ifdef HAVE_PSI_INTERFACE +extern mysql_pfs_key_t dict_stats_recalc_pool_mutex_key; +#endif /* HAVE_PSI_INTERFACE */ + /*****************************************************************//** Add a table to the recalc pool, which is processed by the background stats gathering thread. Only the table id is added to the list, so the table can be closed after being enqueued and it will be opened when needed. If the table does not exist later (has been DROPped), then it will be removed from the pool and skipped. */ -UNIV_INTERN void dict_stats_recalc_pool_add( /*=======================*/ @@ -50,7 +53,6 @@ dict_stats_recalc_pool_add( /*****************************************************************//** Delete a given table from the auto recalc pool. dict_stats_recalc_pool_del() */ -UNIV_INTERN void dict_stats_recalc_pool_del( /*=======================*/ @@ -80,7 +82,7 @@ dict_stats_defrag_pool_del( /** Yield the data dictionary latch when waiting for the background thread to stop accessing a table. -@param trx transaction holding the data dictionary locks */ +@param trx transaction holding the data dictionary locks */ #define DICT_STATS_BG_YIELD(trx) do { \ row_mysql_unlock_data_dictionary(trx); \ os_thread_sleep(250000); \ @@ -107,7 +109,6 @@ The background stats thread is guaranteed not to start using the specified table after this function returns and before the caller unlocks the data dictionary because it sets the BG_STAT_IN_PROGRESS bit in table->stats_bg_flag under dict_sys->mutex. */ -UNIV_INTERN void dict_stats_wait_bg_to_stop_using_table( /*===================================*/ @@ -117,7 +118,6 @@ dict_stats_wait_bg_to_stop_using_table( /*****************************************************************//** Initialize global variables needed for the operation of dict_stats_thread(). Must be called before dict_stats_thread() is started. */ -UNIV_INTERN void dict_stats_thread_init(); /*====================*/ @@ -125,7 +125,6 @@ dict_stats_thread_init(); /*****************************************************************//** Free resources allocated by dict_stats_thread_init(), must be called after dict_stats_thread() has exited. */ -UNIV_INTERN void dict_stats_thread_deinit(); /*======================*/ @@ -135,7 +134,7 @@ This is the thread for background stats gathering. It pops tables, from the auto recalc list and proceeds them, eventually recalculating their statistics. @return this function does not return, it calls os_thread_exit() */ -extern "C" UNIV_INTERN +extern "C" os_thread_ret_t DECLARE_THREAD(dict_stats_thread)( /*==============================*/ diff --git a/storage/innobase/include/dict0types.h b/storage/innobase/include/dict0types.h index 35430e8ea62..5e6e5621686 100644 --- a/storage/innobase/include/dict0types.h +++ b/storage/innobase/include/dict0types.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under @@ -27,15 +27,19 @@ Created 1/8/1996 Heikki Tuuri #ifndef dict0types_h #define dict0types_h +#include + struct dict_sys_t; struct dict_col_t; struct dict_field_t; struct dict_index_t; struct dict_table_t; struct dict_foreign_t; +struct dict_v_col_t; struct ind_node_t; struct tab_node_t; +struct dict_add_v_col_t; /* Space id and page no where the dictionary header resides */ #define DICT_HDR_SPACE 0 /* the SYSTEM tablespace */ @@ -83,10 +87,14 @@ typedef enum { ATOMIC_WRITES_OFF = 2 } atomic_writes_t; +#ifndef UNIV_INNOCHECKSUM +typedef ib_mutex_t DictSysMutex; +#endif /* !UNIV_INNOCHECKSUM */ + /** Prefix for tmp tables, adopted from sql/table.h */ -#define tmp_file_prefix "#sql" -#define tmp_file_prefix_length 4 -#define TEMP_FILE_PREFIX_INNODB "#sql-ib" +#define TEMP_FILE_PREFIX "#sql" +#define TEMP_FILE_PREFIX_LENGTH 4 +#define TEMP_FILE_PREFIX_INNODB "#sql-ib" #define TEMP_TABLE_PREFIX "#sql" #define TEMP_TABLE_PATH_PREFIX "/" TEMP_TABLE_PREFIX diff --git a/storage/innobase/include/dyn0buf.h b/storage/innobase/include/dyn0buf.h new file mode 100644 index 00000000000..7e2995ccabd --- /dev/null +++ b/storage/innobase/include/dyn0buf.h @@ -0,0 +1,505 @@ +/***************************************************************************** + +Copyright (c) 2013, 2014, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/dyn0buf.h +The dynamically allocated buffer implementation + +Created 2013-03-16 Sunny Bains +*******************************************************/ + +#ifndef dyn0buf_h +#define dyn0buf_h + +#include "univ.i" +#include "ut0lst.h" +#include "mem0mem.h" +#include "dyn0types.h" + +/** Class that manages dynamic buffers. It uses a UT_LIST of +dyn_buf_t::block_t instances. We don't use STL containers in +order to avoid the overhead of heap calls. Using a custom memory +allocator doesn't solve the problem either because we have to get +the memory from somewhere. We can't use the block_t::m_data as the +backend for the custom allocator because we would like the data in +the blocks to be contiguous. */ +template +class dyn_buf_t { +public: + + class block_t; + + typedef UT_LIST_NODE_T(block_t) block_node_t; + typedef UT_LIST_BASE_NODE_T(block_t) block_list_t; + + class block_t { + public: + + block_t() + { + ut_ad(MAX_DATA_SIZE <= (2 << 15)); + init(); + } + + ~block_t() { } + + /** + Gets the number of used bytes in a block. + @return number of bytes used */ + ulint used() const + __attribute__((warn_unused_result)) + { + return(static_cast(m_used & ~DYN_BLOCK_FULL_FLAG)); + } + + /** + Gets pointer to the start of data. + @return pointer to data */ + byte* start() + __attribute__((warn_unused_result)) + { + return(m_data); + } + + /** + @return start of data - non const version */ + byte* begin() + __attribute__((warn_unused_result)) + { + return(m_data); + } + + /** + @return end of used data - non const version */ + byte* end() + __attribute__((warn_unused_result)) + { + return(begin() + m_used); + } + + /** + @return start of data - const version */ + const byte* begin() const + __attribute__((warn_unused_result)) + { + return(m_data); + } + + /** + @return end of used data - const version */ + const byte* end() const + __attribute__((warn_unused_result)) + { + return(begin() + m_used); + } + + private: + /** + @return pointer to start of reserved space */ + template + Type push(ib_uint32_t size) + { + Type ptr = reinterpret_cast(end()); + + m_used += size; + ut_ad(m_used <= static_cast(MAX_DATA_SIZE)); + + return(ptr); + } + + /** + Grow the stack. */ + void close(const byte* ptr) + { + /* Check that it is within bounds */ + ut_ad(ptr >= begin()); + ut_ad(ptr <= begin() + m_buf_end); + + /* We have done the boundary check above */ + m_used = static_cast(ptr - begin()); + + ut_ad(m_used <= MAX_DATA_SIZE); + ut_d(m_buf_end = 0); + } + + /** + Initialise the block */ + void init() + { + m_used = 0; + ut_d(m_buf_end = 0); + ut_d(m_magic_n = DYN_BLOCK_MAGIC_N); + } + private: +#ifdef UNIV_DEBUG + /** If opened then this is the buffer end offset, else 0 */ + ulint m_buf_end; + + /** Magic number (DYN_BLOCK_MAGIC_N) */ + ulint m_magic_n; +#endif /* UNIV_DEBUG */ + + /** SIZE - sizeof(m_node) + sizeof(m_used) */ + enum { + MAX_DATA_SIZE = SIZE + - sizeof(block_node_t) + + sizeof(ib_uint32_t) + }; + + /** Storage */ + byte m_data[MAX_DATA_SIZE]; + + /** Doubly linked list node. */ + block_node_t m_node; + + /** number of data bytes used in this block; + DYN_BLOCK_FULL_FLAG is set when the block becomes full */ + ib_uint32_t m_used; + + friend class dyn_buf_t; + }; + + enum { MAX_DATA_SIZE = block_t::MAX_DATA_SIZE}; + + /** Default constructor */ + dyn_buf_t() + : + m_heap(), + m_size() + { + UT_LIST_INIT(m_list, &block_t::m_node); + push_back(&m_first_block); + } + + /** Destructor */ + ~dyn_buf_t() + { + erase(); + } + + /** Reset the buffer vector */ + void erase() + { + if (m_heap != NULL) { + mem_heap_free(m_heap); + m_heap = NULL; + + /* Initialise the list and add the first block. */ + UT_LIST_INIT(m_list, &block_t::m_node); + push_back(&m_first_block); + } else { + m_first_block.init(); + ut_ad(UT_LIST_GET_LEN(m_list) == 1); + } + + m_size = 0; + } + + /** + Makes room on top and returns a pointer to a buffer in it. After + copying the elements, the caller must close the buffer using close(). + @param size in bytes of the buffer; MUST be <= MAX_DATA_SIZE! + @return pointer to the buffer */ + byte* open(ulint size) + __attribute__((warn_unused_result)) + { + ut_ad(size > 0); + ut_ad(size <= MAX_DATA_SIZE); + + block_t* block; + + block = has_space(size) ? back() : add_block(); + + ut_ad(block->m_used <= MAX_DATA_SIZE); + ut_d(block->m_buf_end = block->m_used + size); + + return(block->end()); + } + + /** + Closes the buffer returned by open. + @param ptr end of used space */ + void close(const byte* ptr) + { + ut_ad(UT_LIST_GET_LEN(m_list) > 0); + block_t* block = back(); + + m_size -= block->used(); + + block->close(ptr); + + m_size += block->used(); + } + + /** + Makes room on top and returns a pointer to the added element. + The caller must copy the element to the pointer returned. + @param size in bytes of the element + @return pointer to the element */ + template + Type push(ib_uint32_t size) + { + ut_ad(size > 0); + ut_ad(size <= MAX_DATA_SIZE); + + block_t* block; + + block = has_space(size) ? back() : add_block(); + + m_size += size; + + /* See ISO C++03 14.2/4 for why "template" is required. */ + + return(block->template push(size)); + } + + /** + Pushes n bytes. + @param str string to write + @param len string length */ + void push(const byte* ptr, ib_uint32_t len) + { + while (len > 0) { + ib_uint32_t n_copied; + + if (len >= MAX_DATA_SIZE) { + n_copied = MAX_DATA_SIZE; + } else { + n_copied = len; + } + + ::memmove(push(n_copied), ptr, n_copied); + + ptr += n_copied; + len -= n_copied; + } + } + + /** + Returns a pointer to an element in the buffer. const version. + @param pos position of element in bytes from start + @return pointer to element */ + template + const Type at(ulint pos) const + { + block_t* block = const_cast( + const_cast(this)->find(pos)); + + return(reinterpret_cast(block->begin() + pos)); + } + + /** + Returns a pointer to an element in the buffer. non const version. + @param pos position of element in bytes from start + @return pointer to element */ + template + Type at(ulint pos) + { + block_t* block = const_cast(find(pos)); + + return(reinterpret_cast(block->begin() + pos)); + } + + /** + Returns the size of the total stored data. + @return data size in bytes */ + ulint size() const + __attribute__((warn_unused_result)) + { +#ifdef UNIV_DEBUG + ulint total_size = 0; + + for (const block_t* block = UT_LIST_GET_FIRST(m_list); + block != NULL; + block = UT_LIST_GET_NEXT(m_node, block)) { + + total_size += block->used(); + } + + ut_ad(total_size == m_size); +#endif /* UNIV_DEBUG */ + return(m_size); + } + + /** + Iterate over each block and call the functor. + @return false if iteration was terminated. */ + template + bool for_each_block(Functor& functor) const + { + for (const block_t* block = UT_LIST_GET_FIRST(m_list); + block != NULL; + block = UT_LIST_GET_NEXT(m_node, block)) { + + if (!functor(block)) { + return(false); + } + } + + return(true); + } + + /** + Iterate over all the blocks in reverse and call the iterator + @return false if iteration was terminated. */ + template + bool for_each_block_in_reverse(Functor& functor) const + { + for (block_t* block = UT_LIST_GET_LAST(m_list); + block != NULL; + block = UT_LIST_GET_PREV(m_node, block)) { + + if (!functor(block)) { + return(false); + } + } + + return(true); + } + + /** + @return the first block */ + block_t* front() + __attribute__((warn_unused_result)) + { + ut_ad(UT_LIST_GET_LEN(m_list) > 0); + return(UT_LIST_GET_FIRST(m_list)); + } + + /** + @return true if m_first_block block was not filled fully */ + bool is_small() const + __attribute__((warn_unused_result)) + { + return(m_heap == NULL); + } + +private: + // Disable copying + dyn_buf_t(const dyn_buf_t&); + dyn_buf_t& operator=(const dyn_buf_t&); + + /** + Add the block to the end of the list*/ + void push_back(block_t* block) + { + block->init(); + + UT_LIST_ADD_LAST(m_list, block); + } + + /** @return the last block in the list */ + block_t* back() + { + return(UT_LIST_GET_LAST(m_list)); + } + + /* + @return true if request can be fullfilled */ + bool has_space(ulint size) const + { + return(back()->m_used + size <= MAX_DATA_SIZE); + } + + /* + @return true if request can be fullfilled */ + bool has_space(ulint size) + { + return(back()->m_used + size <= MAX_DATA_SIZE); + } + + /** Find the block that contains the pos. + @param pos absolute offset, it is updated to make it relative + to the block + @return the block containing the pos. */ + block_t* find(ulint& pos) + { + block_t* block; + + ut_ad(UT_LIST_GET_LEN(m_list) > 0); + + for (block = UT_LIST_GET_FIRST(m_list); + block != NULL; + block = UT_LIST_GET_NEXT(m_node, block)) { + + if (pos < block->used()) { + break; + } + + pos -= block->used(); + } + + ut_ad(block != NULL); + ut_ad(block->used() >= pos); + + return(block); + } + + /** + Allocate and add a new block to m_list */ + block_t* add_block() + { + block_t* block; + + if (m_heap == NULL) { + m_heap = mem_heap_create(sizeof(*block)); + } + + block = reinterpret_cast( + mem_heap_alloc(m_heap, sizeof(*block))); + + push_back(block); + + return(block); + } + +private: + /** Heap to use for memory allocation */ + mem_heap_t* m_heap; + + /** Allocated blocks */ + block_list_t m_list; + + /** Total size used by all blocks */ + ulint m_size; + + /** The default block, should always be the first element. This + is for backwards compatibility and to avoid an extra heap allocation + for small REDO log records */ + block_t m_first_block; +}; + +typedef dyn_buf_t mtr_buf_t; + +/** mtr_buf_t copier */ +struct mtr_buf_copy_t { + /** The copied buffer */ + mtr_buf_t m_buf; + + /** Append a block to the redo log buffer. + @return whether the appending should continue (always true here) */ + bool operator()(const mtr_buf_t::block_t* block) + { + byte* buf = m_buf.open(block->used()); + memcpy(buf, block->begin(), block->used()); + m_buf.close(buf + block->used()); + return(true); + } +}; + +#endif /* dyn0buf_h */ diff --git a/storage/innobase/include/dyn0dyn.h b/storage/innobase/include/dyn0dyn.h deleted file mode 100644 index 1bd10b6bf58..00000000000 --- a/storage/innobase/include/dyn0dyn.h +++ /dev/null @@ -1,199 +0,0 @@ -/***************************************************************************** - -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. - -This program is free software; you can redistribute it and/or modify it under -the terms of the GNU General Public License as published by the Free Software -Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA - -*****************************************************************************/ - -/**************************************************//** -@file include/dyn0dyn.h -The dynamically allocated array - -Created 2/5/1996 Heikki Tuuri -*******************************************************/ - -#ifndef dyn0dyn_h -#define dyn0dyn_h - -#include "univ.i" -#include "ut0lst.h" -#include "mem0mem.h" - -/** A block in a dynamically allocated array */ -struct dyn_block_t; -/** Dynamically allocated array */ -typedef dyn_block_t dyn_array_t; - -/** This is the initial 'payload' size of a dynamic array; -this must be > MLOG_BUF_MARGIN + 30! */ -#define DYN_ARRAY_DATA_SIZE 512 - -/*********************************************************************//** -Initializes a dynamic array. -@return initialized dyn array */ -UNIV_INLINE -dyn_array_t* -dyn_array_create( -/*=============*/ - dyn_array_t* arr) /*!< in/out memory buffer of - size sizeof(dyn_array_t) */ - MY_ATTRIBUTE((nonnull)); -/************************************************************//** -Frees a dynamic array. */ -UNIV_INLINE -void -dyn_array_free( -/*===========*/ - dyn_array_t* arr) /*!< in,own: dyn array */ - MY_ATTRIBUTE((nonnull)); -/*********************************************************************//** -Makes room on top of a dyn array and returns a pointer to a buffer in it. -After copying the elements, the caller must close the buffer using -dyn_array_close. -@return pointer to the buffer */ -UNIV_INLINE -byte* -dyn_array_open( -/*===========*/ - dyn_array_t* arr, /*!< in: dynamic array */ - ulint size) /*!< in: size in bytes of the buffer; MUST be - smaller than DYN_ARRAY_DATA_SIZE! */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/*********************************************************************//** -Closes the buffer returned by dyn_array_open. */ -UNIV_INLINE -void -dyn_array_close( -/*============*/ - dyn_array_t* arr, /*!< in: dynamic array */ - const byte* ptr) /*!< in: end of used space */ - MY_ATTRIBUTE((nonnull)); -/*********************************************************************//** -Makes room on top of a dyn array and returns a pointer to -the added element. The caller must copy the element to -the pointer returned. -@return pointer to the element */ -UNIV_INLINE -void* -dyn_array_push( -/*===========*/ - dyn_array_t* arr, /*!< in/out: dynamic array */ - ulint size) /*!< in: size in bytes of the element */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/************************************************************//** -Returns pointer to an element in dyn array. -@return pointer to element */ -UNIV_INLINE -void* -dyn_array_get_element( -/*==================*/ - const dyn_array_t* arr, /*!< in: dyn array */ - ulint pos) /*!< in: position of element - in bytes from array start */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -/************************************************************//** -Returns the size of stored data in a dyn array. -@return data size in bytes */ -UNIV_INLINE -ulint -dyn_array_get_data_size( -/*====================*/ - const dyn_array_t* arr) /*!< in: dyn array */ - MY_ATTRIBUTE((nonnull, warn_unused_result, pure)); -/************************************************************//** -Gets the first block in a dyn array. -@param arr dyn array -@return first block */ -#define dyn_array_get_first_block(arr) (arr) -/************************************************************//** -Gets the last block in a dyn array. -@param arr dyn array -@return last block */ -#define dyn_array_get_last_block(arr) \ - ((arr)->heap ? UT_LIST_GET_LAST((arr)->base) : (arr)) -/********************************************************************//** -Gets the next block in a dyn array. -@param arr dyn array -@param block dyn array block -@return pointer to next, NULL if end of list */ -#define dyn_array_get_next_block(arr, block) \ - ((arr)->heap ? UT_LIST_GET_NEXT(list, block) : NULL) -/********************************************************************//** -Gets the previous block in a dyn array. -@param arr dyn array -@param block dyn array block -@return pointer to previous, NULL if end of list */ -#define dyn_array_get_prev_block(arr, block) \ - ((arr)->heap ? UT_LIST_GET_PREV(list, block) : NULL) -/********************************************************************//** -Gets the number of used bytes in a dyn array block. -@return number of bytes used */ -UNIV_INLINE -ulint -dyn_block_get_used( -/*===============*/ - const dyn_block_t* block) /*!< in: dyn array block */ - MY_ATTRIBUTE((nonnull, warn_unused_result, pure)); -/********************************************************************//** -Gets pointer to the start of data in a dyn array block. -@return pointer to data */ -UNIV_INLINE -byte* -dyn_block_get_data( -/*===============*/ - const dyn_block_t* block) /*!< in: dyn array block */ - MY_ATTRIBUTE((nonnull, warn_unused_result, pure)); -/********************************************************//** -Pushes n bytes to a dyn array. */ -UNIV_INLINE -void -dyn_push_string( -/*============*/ - dyn_array_t* arr, /*!< in/out: dyn array */ - const byte* str, /*!< in: string to write */ - ulint len) /*!< in: string length */ - MY_ATTRIBUTE((nonnull)); - -/*#################################################################*/ - -/** @brief A block in a dynamically allocated array. -NOTE! Do not access the fields of the struct directly: the definition -appears here only for the compiler to know its size! */ -struct dyn_block_t{ - mem_heap_t* heap; /*!< in the first block this is != NULL - if dynamic allocation has been needed */ - ulint used; /*!< number of data bytes used in this block; - DYN_BLOCK_FULL_FLAG is set when the block - becomes full */ - byte data[DYN_ARRAY_DATA_SIZE]; - /*!< storage for array elements */ - UT_LIST_BASE_NODE_T(dyn_block_t) base; - /*!< linear list of dyn blocks: this node is - used only in the first block */ - UT_LIST_NODE_T(dyn_block_t) list; - /*!< linear list node: used in all blocks */ -#ifdef UNIV_DEBUG - ulint buf_end;/*!< only in the debug version: if dyn - array is opened, this is the buffer - end offset, else this is 0 */ - ulint magic_n;/*!< magic number (DYN_BLOCK_MAGIC_N) */ -#endif -}; - - -#ifndef UNIV_NONINL -#include "dyn0dyn.ic" -#endif - -#endif diff --git a/storage/innobase/include/dyn0dyn.ic b/storage/innobase/include/dyn0dyn.ic deleted file mode 100644 index f18f2e6dff9..00000000000 --- a/storage/innobase/include/dyn0dyn.ic +++ /dev/null @@ -1,306 +0,0 @@ -/***************************************************************************** - -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. - -This program is free software; you can redistribute it and/or modify it under -the terms of the GNU General Public License as published by the Free Software -Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along with -this program; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA - -*****************************************************************************/ - -/**************************************************//** -@file include/dyn0dyn.ic -The dynamically allocated array - -Created 2/5/1996 Heikki Tuuri -*******************************************************/ - -/** Value of dyn_block_t::magic_n */ -#define DYN_BLOCK_MAGIC_N 375767 -/** Flag for dyn_block_t::used that indicates a full block */ -#define DYN_BLOCK_FULL_FLAG 0x1000000UL - -/************************************************************//** -Adds a new block to a dyn array. -@return created block */ -UNIV_INTERN -dyn_block_t* -dyn_array_add_block( -/*================*/ - dyn_array_t* arr) /*!< in/out: dyn array */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); - -/********************************************************************//** -Gets the number of used bytes in a dyn array block. -@return number of bytes used */ -UNIV_INLINE -ulint -dyn_block_get_used( -/*===============*/ - const dyn_block_t* block) /*!< in: dyn array block */ -{ - ut_ad(block); - - return((block->used) & ~DYN_BLOCK_FULL_FLAG); -} - -/********************************************************************//** -Gets pointer to the start of data in a dyn array block. -@return pointer to data */ -UNIV_INLINE -byte* -dyn_block_get_data( -/*===============*/ - const dyn_block_t* block) /*!< in: dyn array block */ -{ - ut_ad(block); - - return(const_cast(block->data)); -} - -/*********************************************************************//** -Initializes a dynamic array. -@return initialized dyn array */ -UNIV_INLINE -dyn_array_t* -dyn_array_create( -/*=============*/ - dyn_array_t* arr) /*!< in/out: memory buffer of - size sizeof(dyn_array_t) */ -{ - ut_ad(arr); -#if DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG -# error "DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG" -#endif - - arr->heap = NULL; - arr->used = 0; - - ut_d(arr->buf_end = 0); - ut_d(arr->magic_n = DYN_BLOCK_MAGIC_N); - - return(arr); -} - -/************************************************************//** -Frees a dynamic array. */ -UNIV_INLINE -void -dyn_array_free( -/*===========*/ - dyn_array_t* arr) /*!< in: dyn array */ -{ - if (arr->heap != NULL) { - mem_heap_free(arr->heap); - } - - ut_d(arr->magic_n = 0); -} - -/*********************************************************************//** -Makes room on top of a dyn array and returns a pointer to the added element. -The caller must copy the element to the pointer returned. -@return pointer to the element */ -UNIV_INLINE -void* -dyn_array_push( -/*===========*/ - dyn_array_t* arr, /*!< in/out: dynamic array */ - ulint size) /*!< in: size in bytes of the element */ -{ - dyn_block_t* block; - ulint used; - - ut_ad(arr); - ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N); - ut_ad(size <= DYN_ARRAY_DATA_SIZE); - ut_ad(size); - - block = arr; - - if (block->used + size > DYN_ARRAY_DATA_SIZE) { - /* Get the last array block */ - - block = dyn_array_get_last_block(arr); - - if (block->used + size > DYN_ARRAY_DATA_SIZE) { - block = dyn_array_add_block(arr); - } - } - - used = block->used; - - block->used = used + size; - ut_ad(block->used <= DYN_ARRAY_DATA_SIZE); - - return(block->data + used); -} - -/*********************************************************************//** -Makes room on top of a dyn array and returns a pointer to a buffer in it. -After copying the elements, the caller must close the buffer using -dyn_array_close. -@return pointer to the buffer */ -UNIV_INLINE -byte* -dyn_array_open( -/*===========*/ - dyn_array_t* arr, /*!< in: dynamic array */ - ulint size) /*!< in: size in bytes of the buffer; MUST be - smaller than DYN_ARRAY_DATA_SIZE! */ -{ - dyn_block_t* block; - - ut_ad(arr); - ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N); - ut_ad(size <= DYN_ARRAY_DATA_SIZE); - ut_ad(size); - - block = arr; - - if (block->used + size > DYN_ARRAY_DATA_SIZE) { - /* Get the last array block */ - - block = dyn_array_get_last_block(arr); - - if (block->used + size > DYN_ARRAY_DATA_SIZE) { - block = dyn_array_add_block(arr); - ut_a(size <= DYN_ARRAY_DATA_SIZE); - } - } - - ut_ad(block->used <= DYN_ARRAY_DATA_SIZE); - ut_ad(arr->buf_end == 0); - ut_d(arr->buf_end = block->used + size); - - return(block->data + block->used); -} - -/*********************************************************************//** -Closes the buffer returned by dyn_array_open. */ -UNIV_INLINE -void -dyn_array_close( -/*============*/ - dyn_array_t* arr, /*!< in/out: dynamic array */ - const byte* ptr) /*!< in: end of used space */ -{ - dyn_block_t* block; - - ut_ad(arr); - ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N); - - block = dyn_array_get_last_block(arr); - - ut_ad(arr->buf_end + block->data >= ptr); - - block->used = ptr - block->data; - - ut_ad(block->used <= DYN_ARRAY_DATA_SIZE); - - ut_d(arr->buf_end = 0); -} - -/************************************************************//** -Returns pointer to an element in dyn array. -@return pointer to element */ -UNIV_INLINE -void* -dyn_array_get_element( -/*==================*/ - const dyn_array_t* arr, /*!< in: dyn array */ - ulint pos) /*!< in: position of element - in bytes from array start */ -{ - const dyn_block_t* block; - - ut_ad(arr); - ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N); - - /* Get the first array block */ - block = dyn_array_get_first_block(arr); - - if (arr->heap != NULL) { - for (;;) { - ulint used = dyn_block_get_used(block); - - if (pos < used) { - break; - } - - pos -= used; - block = UT_LIST_GET_NEXT(list, block); - ut_ad(block); - } - } - - ut_ad(block); - ut_ad(dyn_block_get_used(block) >= pos); - - return(const_cast(block->data) + pos); -} - -/************************************************************//** -Returns the size of stored data in a dyn array. -@return data size in bytes */ -UNIV_INLINE -ulint -dyn_array_get_data_size( -/*====================*/ - const dyn_array_t* arr) /*!< in: dyn array */ -{ - const dyn_block_t* block; - ulint sum = 0; - - ut_ad(arr); - ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N); - - if (arr->heap == NULL) { - - return(arr->used); - } - - /* Get the first array block */ - block = dyn_array_get_first_block(arr); - - while (block != NULL) { - sum += dyn_block_get_used(block); - block = dyn_array_get_next_block(arr, block); - } - - return(sum); -} - -/********************************************************//** -Pushes n bytes to a dyn array. */ -UNIV_INLINE -void -dyn_push_string( -/*============*/ - dyn_array_t* arr, /*!< in/out: dyn array */ - const byte* str, /*!< in: string to write */ - ulint len) /*!< in: string length */ -{ - ulint n_copied; - - while (len > 0) { - if (len > DYN_ARRAY_DATA_SIZE) { - n_copied = DYN_ARRAY_DATA_SIZE; - } else { - n_copied = len; - } - - memcpy(dyn_array_push(arr, n_copied), str, n_copied); - - str += n_copied; - len -= n_copied; - } -} diff --git a/storage/innobase/include/dyn0types.h b/storage/innobase/include/dyn0types.h new file mode 100644 index 00000000000..058a22f46e1 --- /dev/null +++ b/storage/innobase/include/dyn0types.h @@ -0,0 +1,39 @@ +/***************************************************************************** + +Copyright (c) 2013, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/dyn0types.h +The dynamically allocated buffer types and constants + +Created 2013-03-16 Sunny Bains +*******************************************************/ + +#ifndef dyn0types_h +#define dyn0types_h + +/** Value of dyn_block_t::magic_n */ +#define DYN_BLOCK_MAGIC_N 375767 + +/** This is the initial 'payload' size of a dynamic array; +this must be > MLOG_BUF_MARGIN + 30! */ +#define DYN_ARRAY_DATA_SIZE 512 + +/** Flag for dyn_block_t::used that indicates a full block */ +#define DYN_BLOCK_FULL_FLAG 0x1000000UL + +#endif /* dyn0types_h */ diff --git a/storage/innobase/include/eval0eval.h b/storage/innobase/include/eval0eval.h index e3b1e6c16b6..f0e5b4006b6 100644 --- a/storage/innobase/include/eval0eval.h +++ b/storage/innobase/include/eval0eval.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,7 +36,6 @@ Created 12/29/1997 Heikki Tuuri Free the buffer from global dynamic memory for a value of a que_node, if it has been allocated in the above function. The freeing for pushed column values is done in sel_col_prefetch_buf_free. */ -UNIV_INTERN void eval_node_free_val_buf( /*===================*/ @@ -65,7 +64,7 @@ eval_node_set_int_val( lint val); /*!< in: value to set */ /*****************************************************************//** Gets an integer value from an expression node. -@return integer value */ +@return integer value */ UNIV_INLINE lint eval_node_get_int_val( @@ -91,7 +90,7 @@ eval_node_copy_val( que_node_t* node2); /*!< in: node to copy from */ /*****************************************************************//** Gets a iboolean value from a query node. -@return iboolean value */ +@return iboolean value */ UNIV_INLINE ibool eval_node_get_ibool_val( @@ -99,8 +98,7 @@ eval_node_get_ibool_val( que_node_t* node); /*!< in: query graph node */ /*****************************************************************//** Evaluates a comparison node. -@return the result of the comparison */ -UNIV_INTERN +@return the result of the comparison */ ibool eval_cmp( /*=====*/ diff --git a/storage/innobase/include/eval0eval.ic b/storage/innobase/include/eval0eval.ic index e4b1dd08017..2f759301c03 100644 --- a/storage/innobase/include/eval0eval.ic +++ b/storage/innobase/include/eval0eval.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -30,7 +30,6 @@ Created 12/29/1997 Heikki Tuuri /*****************************************************************//** Evaluates a function node. */ -UNIV_INTERN void eval_func( /*======*/ @@ -41,8 +40,7 @@ NOTE that this memory must be explicitly freed when the query graph is freed. If the node already has allocated buffer, that buffer is freed here. NOTE that this is the only function where dynamic memory should be allocated for a query node val field. -@return pointer to allocated buffer */ -UNIV_INTERN +@return pointer to allocated buffer */ byte* eval_node_alloc_val_buf( /*====================*/ @@ -54,7 +52,7 @@ eval_node_alloc_val_buf( /*****************************************************************//** Allocates a new buffer if needed. -@return pointer to buffer */ +@return pointer to buffer */ UNIV_INLINE byte* eval_node_ensure_val_buf( @@ -145,7 +143,7 @@ eval_node_set_int_val( /*****************************************************************//** Gets an integer non-SQL null value from an expression node. -@return integer value */ +@return integer value */ UNIV_INLINE lint eval_node_get_int_val( @@ -165,7 +163,7 @@ eval_node_get_int_val( /*****************************************************************//** Gets a iboolean value from a query node. -@return iboolean value */ +@return iboolean value */ UNIV_INLINE ibool eval_node_get_ibool_val( diff --git a/storage/innobase/include/eval0proc.h b/storage/innobase/include/eval0proc.h index 7755fb10343..6705c2c7b64 100644 --- a/storage/innobase/include/eval0proc.h +++ b/storage/innobase/include/eval0proc.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1998, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1998, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -33,7 +33,7 @@ Created 1/20/1998 Heikki Tuuri /**********************************************************************//** Performs an execution step of a procedure node. -@return query thread to run next or NULL */ +@return query thread to run next or NULL */ UNIV_INLINE que_thr_t* proc_step( @@ -41,39 +41,35 @@ proc_step( que_thr_t* thr); /*!< in: query thread */ /**********************************************************************//** Performs an execution step of an if-statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* if_step( /*====*/ que_thr_t* thr); /*!< in: query thread */ /**********************************************************************//** Performs an execution step of a while-statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* while_step( /*=======*/ que_thr_t* thr); /*!< in: query thread */ /**********************************************************************//** Performs an execution step of a for-loop node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* for_step( /*=====*/ que_thr_t* thr); /*!< in: query thread */ /**********************************************************************//** Performs an execution step of an assignment statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* assign_step( /*========*/ que_thr_t* thr); /*!< in: query thread */ /**********************************************************************//** Performs an execution step of a procedure call node. -@return query thread to run next or NULL */ +@return query thread to run next or NULL */ UNIV_INLINE que_thr_t* proc_eval_step( @@ -81,16 +77,14 @@ proc_eval_step( que_thr_t* thr); /*!< in: query thread */ /**********************************************************************//** Performs an execution step of an exit statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* exit_step( /*======*/ que_thr_t* thr); /*!< in: query thread */ /**********************************************************************//** Performs an execution step of a return-statement node. -@return query thread to run next or NULL */ -UNIV_INTERN +@return query thread to run next or NULL */ que_thr_t* return_step( /*========*/ diff --git a/storage/innobase/include/eval0proc.ic b/storage/innobase/include/eval0proc.ic index 81418bae2c9..cda3fd7b874 100644 --- a/storage/innobase/include/eval0proc.ic +++ b/storage/innobase/include/eval0proc.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1998, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1998, 2013, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,7 +29,7 @@ Created 1/20/1998 Heikki Tuuri /**********************************************************************//** Performs an execution step of a procedure node. -@return query thread to run next or NULL */ +@return query thread to run next or NULL */ UNIV_INLINE que_thr_t* proc_step( @@ -64,7 +64,7 @@ proc_step( /**********************************************************************//** Performs an execution step of a procedure call node. -@return query thread to run next or NULL */ +@return query thread to run next or NULL */ UNIV_INLINE que_thr_t* proc_eval_step( diff --git a/storage/innobase/include/fil0crypt.h b/storage/innobase/include/fil0crypt.h index fdc413e7520..acac155ef3f 100644 --- a/storage/innobase/include/fil0crypt.h +++ b/storage/innobase/include/fil0crypt.h @@ -128,7 +128,8 @@ fil_space_crypt_t * fil_space_create_crypt_data( /*========================*/ fil_encryption_t encrypt_mode, /*!< in: encryption mode */ - uint key_id); /*!< in: encryption key id */ + uint key_id) /*!< in: encryption key id */ + __attribute__((warn_unused_result)); /********************************************************************* Destroy crypt data */ @@ -211,7 +212,8 @@ UNIV_INTERN bool fil_space_check_encryption_read( /*============================*/ - ulint space); /*!< in: tablespace id */ + ulint space) /*!< in: tablespace id */ + __attribute__((warn_unused_result)); /****************************************************************** Decrypt a page @@ -222,10 +224,11 @@ fil_space_decrypt( /*==============*/ fil_space_crypt_t* crypt_data, /*!< in: crypt data */ byte* tmp_frame, /*!< in: temporary buffer */ - ulint page_size, /*!< in: page size */ + const page_size_t& page_size, /*!< in: page size */ byte* src_frame, /*!< in:out: page buffer */ - dberr_t* err); /*!< in: out: DB_SUCCESS or + dberr_t* err) /*!< in: out: DB_SUCCESS or error code */ + __attribute__((warn_unused_result)); /********************************************************************* Encrypt buffer page @@ -239,8 +242,9 @@ fil_space_encrypt( ulint offset, /*!< in: page no */ lsn_t lsn, /*!< in: page lsn */ byte* src_frame, /*!< in: page frame */ - ulint size, /*!< in: size of data to encrypt */ - byte* dst_frame); /*!< in: where to encrypt to */ + const page_size_t& page_size, /*!< in: page size */ + byte* dst_frame) /*!< in: where to encrypt to */ + __attribute__((warn_unused_result)); /********************************************************************* Decrypt buffer page @@ -250,10 +254,10 @@ UNIV_INTERN byte* fil_space_decrypt( /*==============*/ - ulint space, /*!< in: tablespace id */ - byte* src_frame, /*!< in: page frame */ - ulint page_size, /*!< in: size of data to encrypt */ - byte* dst_frame) /*!< in: where to decrypt to */ + ulint space, /*!< in: tablespace id */ + byte* src_frame, /*!< in: page frame */ + const page_size_t& page_size, /*!< in: page size */ + byte* dst_frame) /*!< in: where to decrypt to */ __attribute__((warn_unused_result)); /********************************************************************* @@ -265,8 +269,9 @@ UNIV_INTERN bool fil_space_verify_crypt_checksum( /*============================*/ - const byte* src_frame,/*!< in: page frame */ - ulint zip_size); /*!< in: size of data to encrypt */ + const byte* src_frame,/*!< in: page frame */ + const page_size_t& page_size) /*!< in: page size */ + __attribute__((warn_unused_result)); /********************************************************************* Init threads for key rotation */ @@ -408,9 +413,9 @@ fil_encrypt_buf( ulint offset, /*!< in: Page offset */ lsn_t lsn, /*!< in: lsn */ byte* src_frame, /*!< in: Source page to be encrypted */ - ulint zip_size, /*!< in: compressed size if - row_format compressed */ - byte* dst_frame); /*!< in: outbut buffer */ + const page_size_t& page_size, /*!< in: page size */ + byte* dst_frame) /*!< in: outbut buffer */ + __attribute__((warn_unused_result)); /****************************************************************** Calculate post encryption checksum @@ -420,8 +425,9 @@ UNIV_INTERN ulint fil_crypt_calculate_checksum( /*=========================*/ - ulint zip_size, /*!< in: zip_size or 0 */ - byte* dst_frame); /*!< in: page where to calculate */ + const page_size_t& page_size, /*!< in: page size */ + byte* dst_frame) /*!< in: page where to calculate */ + __attribute__((warn_unused_result)); #ifndef UNIV_NONINL #include "fil0crypt.ic" diff --git a/storage/innobase/include/fil0crypt.ic b/storage/innobase/include/fil0crypt.ic index 5fafa6cd3f0..1a91aee750e 100644 --- a/storage/innobase/include/fil0crypt.ic +++ b/storage/innobase/include/fil0crypt.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2015, MariaDB Corporation. +Copyright (c) 2015, 2016, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -66,3 +66,54 @@ fil_page_encryption_status( } return 0; } + +/*******************************************************************//** +Get current encryption mode from crypt_data. +@return string representation */ +UNIV_INLINE +const char * +fil_crypt_get_mode( +/*===============*/ + const fil_space_crypt_t* crypt_data) +{ + ut_ad(crypt_data != NULL); + + switch(crypt_data->encryption) { + case FIL_SPACE_ENCRYPTION_DEFAULT: + return("Default tablespace encryption mode"); + break; + case FIL_SPACE_ENCRYPTION_ON: + return("Tablespace encrypted"); + break; + case FIL_SPACE_ENCRYPTION_OFF: + return("Tablespace not encrypted"); + break; + default: + ut_error; + } + + return ("NULL"); +} + +/*******************************************************************//** +Get current encryption type from crypt_data. +@return string representation */ +UNIV_INLINE +const char * +fil_crypt_get_type( + const fil_space_crypt_t* crypt_data) +{ + ut_ad(crypt_data != NULL); + switch (crypt_data->type) { + case CRYPT_SCHEME_UNENCRYPTED: + return("scheme unencrypted"); + break; + case CRYPT_SCHEME_1: + return("scheme encrypted"); + break; + default: + ut_error; + } + + return ("NULL"); +} diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 0c7ed7b3ab0..d04355c3912 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -30,28 +30,341 @@ Created 10/25/1995 Heikki Tuuri #ifndef UNIV_INNOCHECKSUM +#include "log0recv.h" #include "dict0types.h" -#include "ut0byte.h" -#include "os0file.h" +#include "page0size.h" #include "hash0hash.h" #ifndef UNIV_HOTBACKUP -#include "sync0rw.h" #include "ibuf0types.h" +#else #include "log0log.h" +#include "os0file.h" +#include "m_string.h" #endif /* !UNIV_HOTBACKUP */ #include +#include + +extern const char general_space_name[]; // Forward declaration struct trx_t; +class page_id_t; +class truncate_t; +struct fil_node_t; struct fil_space_t; +struct btr_create_t; +/* structure containing encryption specification */ +typedef struct fil_space_crypt_struct fil_space_crypt_t; + +typedef std::list > space_name_list_t; + +/** File types */ +enum fil_type_t { + /** temporary tablespace (temporary undo log or tables) */ + FIL_TYPE_TEMPORARY, + /** a tablespace that is being imported (no logging until finished) */ + FIL_TYPE_IMPORT, + /** persistent tablespace (for system, undo log or tables) */ + FIL_TYPE_TABLESPACE, + /** redo log covering changes to files of FIL_TYPE_TABLESPACE */ + FIL_TYPE_LOG +}; + +/** Check if fil_type is any of FIL_TYPE_TEMPORARY, FIL_TYPE_IMPORT +or FIL_TYPE_TABLESPACE. +@param[in] type variable of type fil_type_t +@return true if any of FIL_TYPE_TEMPORARY, FIL_TYPE_IMPORT +or FIL_TYPE_TABLESPACE */ +inline +bool +fil_type_is_data( + fil_type_t type) +{ + return(type == FIL_TYPE_TEMPORARY + || type == FIL_TYPE_IMPORT + || type == FIL_TYPE_TABLESPACE); +} + +struct fil_node_t; + +/** Tablespace or log data space */ +struct fil_space_t { + char* name; /*!< Tablespace name */ + ulint id; /*!< space id */ + ib_uint64_t tablespace_version; + /*!< in DISCARD/IMPORT this timestamp + is used to check if we should ignore + an insert buffer merge request for a + page because it actually was for the + previous incarnation of the space */ + lsn_t max_lsn; + /*!< LSN of the most recent + fil_names_write_if_was_clean(). + Reset to 0 by fil_names_clear(). + Protected by log_sys->mutex. + If and only if this is nonzero, the + tablespace will be in named_spaces. */ + bool stop_ios;/*!< true if we want to rename the + .ibd file of tablespace and want to + stop temporarily posting of new i/o + requests on the file */ + bool stop_new_ops; + /*!< we set this true when we start + deleting a single-table tablespace. + When this is set following new ops + are not allowed: + * read IO request + * ibuf merge + * file flush + Note that we can still possibly have + new write operations because we don't + check this flag when doing flush + batches. */ + bool is_being_truncated; + /*!< this is set to true when we prepare to + truncate a single-table tablespace and its + .ibd file */ +#ifdef UNIV_DEBUG + ulint redo_skipped_count; + /*!< reference count for operations who want + to skip redo log in the file space in order + to make fsp_space_modify_check pass. */ +#endif + fil_type_t purpose;/*!< purpose */ + UT_LIST_BASE_NODE_T(fil_node_t) chain; + /*!< base node for the file chain */ + ulint size; /*!< tablespace file size in pages; + 0 if not known yet */ + ulint size_in_header; + /* FSP_SIZE in the tablespace header; + 0 if not known yet */ + ulint free_len; + /*!< length of the FSP_FREE list */ + ulint free_limit; + /*!< contents of FSP_FREE_LIMIT */ + ulint flags; /*!< tablespace flags; see + fsp_flags_is_valid(), + page_size_t(ulint) (constructor) */ + ulint n_reserved_extents; + /*!< number of reserved free extents for + ongoing operations like B-tree page split */ + ulint n_pending_flushes; /*!< this is positive when flushing + the tablespace to disk; dropping of the + tablespace is forbidden if this is positive */ + ulint n_pending_ops;/*!< this is positive when we + have pending operations against this + tablespace. The pending operations can + be ibuf merges or lock validation code + trying to read a block. + Dropping of the tablespace is forbidden + if this is positive. + Protected by fil_system->mutex. */ + hash_node_t hash; /*!< hash chain node */ + hash_node_t name_hash;/*!< hash chain the name_hash table */ +#ifndef UNIV_HOTBACKUP + rw_lock_t latch; /*!< latch protecting the file space storage + allocation */ +#endif /* !UNIV_HOTBACKUP */ + UT_LIST_NODE_T(fil_space_t) unflushed_spaces; + /*!< list of spaces with at least one unflushed + file we have written to */ + UT_LIST_NODE_T(fil_space_t) named_spaces; + /*!< list of spaces for which MLOG_FILE_NAME + records have been issued */ + bool is_in_unflushed_spaces; + /*!< true if this space is currently in + unflushed_spaces */ + UT_LIST_NODE_T(fil_space_t) space_list; + /*!< list of all spaces */ + + /** Compression algorithm */ + Compression::Type compression_type; -typedef std::list space_name_list_t; + bool printed_compression_failure; + /*!< true if we have already printed + compression failure */ + fil_space_crypt_t* crypt_data; + /* Tablespace crypt information or + NULL */ + bool read_page0; + /*!< true if page 0 of this tablespace + is read */ + + ulint file_block_size;/*!< file system block size */ + + /** Release the reserved free extents. + @param[in] n_reserved number of reserved extents */ + void release_free_extents(ulint n_reserved); + + ulint magic_n;/*!< FIL_SPACE_MAGIC_N */ +}; + +/** Value of fil_space_t::magic_n */ +#define FIL_SPACE_MAGIC_N 89472 + +/** File node of a tablespace or the log data space */ +struct fil_node_t { + fil_space_t* space; /*!< backpointer to the space where this node + belongs */ + char* name; /*!< path to the file */ + bool is_open;/*!< true if file is open */ + os_file_t handle; /*!< OS handle to the file, if file open */ + os_event_t sync_event;/*!< Condition event to group and + serialize calls to fsync */ + bool is_raw_disk;/*!< true if the 'file' is actually a raw + device or a raw disk partition */ + ulint size; /*!< size of the file in database pages, 0 if + not known yet; the possible last incomplete + megabyte may be ignored if space == 0 */ + ulint init_size; + /*!< initial size of the file in database pages, + defaults to FIL_IBD_FILE_INITIAL_SIZE. */ + ulint max_size; + /*!< maximum size of the file in database pages; + 0 if there is no maximum size. */ + ulint n_pending; + /*!< count of pending i/o's on this file; + closing of the file is not allowed if + this is > 0 */ + ulint n_pending_flushes; + /*!< count of pending flushes on this file; + closing of the file is not allowed if + this is > 0 */ + bool being_extended; + /*!< true if the node is currently + being extended. */ + int64_t modification_counter;/*!< when we write to the file we + increment this by one */ + int64_t flush_counter;/*!< up to what + modification_counter value we have + flushed the modifications to disk */ + UT_LIST_NODE_T(fil_node_t) chain; + /*!< link field for the file chain */ + UT_LIST_NODE_T(fil_node_t) LRU; + /*!< link field for the LRU list */ + ulint magic_n;/*!< FIL_NODE_MAGIC_N */ + + /** true if the FS where the file is located supports PUNCH HOLE */ + bool punch_hole; + + /** Block size to use for punching holes */ + ulint block_size; + + /** True if atomic write is enabled for this file */ + bool atomic_write; +}; + +/** Value of fil_node_t::magic_n */ +#define FIL_NODE_MAGIC_N 89389 + +/** Common InnoDB file extentions */ +enum ib_extention { + NO_EXT = 0, + IBD = 1, + ISL = 2, + CFG = 3 +}; +extern const char* dot_ext[]; +#define DOT_IBD dot_ext[IBD] +#define DOT_ISL dot_ext[ISL] +#define DOT_CFG dot_ext[CFG] + +/** Wrapper for a path to a directory. +This folder may or may not yet esist. Since not all directory paths +end in "/", we should only use this for a directory path or a filepath +that has a ".ibd" extension. */ +class Folder +{ +public: + /** Default constructor */ + Folder() : m_folder(NULL) {} + + /** Constructor + @param[in] path pathname (not necessarily NUL-terminated) + @param[in] len length of the path, in bytes */ + Folder(const char* path, size_t len); + + /** Assignment operator + @param[in] folder folder string provided */ + class Folder& operator=(const char* path); + + /** Destructor */ + ~Folder() + { + ut_free(m_folder); + } + + /** Implicit type conversion + @return the wrapped object */ + operator const char*() const + { + return(m_folder); + } + + /** Explicit type conversion + @return the wrapped object */ + const char* operator()() const + { + return(m_folder); + } + + /** return the length of m_folder + @return the length of m_folder */ + size_t len() + { + return m_folder_len; + } + + /** Determine if two folders are equal + @param[in] other folder to compare to + @return whether the folders are equal */ + bool operator==(const Folder& other) const; + + /** Determine if the left folder is the same or an ancestor of + (contains) the right folder. + @param[in] other folder to compare to + @return whether this is the same or an ancestor or the other folder. */ + bool operator>=(const Folder& other) const; + + /** Determine if the left folder is an ancestor of (contains) + the right folder. + @param[in] other folder to compare to + @return whether this is an ancestor of the other folder */ + bool operator>(const Folder& other) const; + + /** Determine if the directory referenced by m_folder exists. + @return whether the directory exists */ + bool exists(); + +private: + /** Build the basic folder name from the path and length provided + @param[in] path pathname (not necessarily NUL-terminated) + @param[in] len length of the path, in bytes */ + void make_path(const char* path, size_t len); + + /** Resolve a relative path in m_folder to an absolute path + in m_abs_path setting m_abs_len. */ + void make_abs_path(); + + /** The wrapped folder string */ + char* m_folder; + + /** Length of m_folder */ + size_t m_folder_len; + + /** A full absolute path to the same file. */ + char m_abs_path[FN_REFLEN + 2]; + + /** Length of m_abs_path to the deepest folder */ + size_t m_abs_len; +}; /** When mysqld is run, the default directory "." is the mysqld datadir, but in the MySQL Embedded Server Library and mysqlbackup it is not the default directory, and we must set the base file path explicitly */ extern const char* fil_path_to_mysql_datadir; +extern Folder folder_mysql_datadir; /** Initial size of a single-table tablespace in pages */ #define FIL_IBD_FILE_INITIAL_SIZE 4 @@ -66,17 +379,15 @@ of the address is FIL_NULL, the address is considered undefined. */ typedef byte fil_faddr_t; /*!< 'type' definition in C: an address stored in a file page is a string of bytes */ -#endif /* !UNIV_INNOCHECKSUM */ - #define FIL_ADDR_PAGE 0 /* first in address is the page offset */ #define FIL_ADDR_BYTE 4 /* then comes 2-byte byte offset within page*/ - +#endif /* !UNIV_INNOCHECKSUM */ #define FIL_ADDR_SIZE 6 /* address size is 6 bytes */ #ifndef UNIV_INNOCHECKSUM /** File space address */ -struct fil_addr_t{ +struct fil_addr_t { ulint page; /*!< page number within a space */ ulint boffset; /*!< byte offset within the page */ }; @@ -135,11 +446,33 @@ extern fil_addr_t fil_addr_null; used to encrypt the page + 32-bit checksum or 64 bits of zero if no encryption */ -#define FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID 34 /*!< starting from 4.1.x this - contains the space id of the page */ +/** If page type is FIL_PAGE_COMPRESSED then the 8 bytes starting at +FIL_PAGE_FILE_FLUSH_LSN are broken down as follows: */ + +/** Control information version format (u8) */ +static const ulint FIL_PAGE_VERSION = FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION; + +/** Compression algorithm (u8) */ +static const ulint FIL_PAGE_ALGORITHM_V1 = FIL_PAGE_VERSION + 1; + +/** Original page type (u16) */ +static const ulint FIL_PAGE_ORIGINAL_TYPE_V1 = FIL_PAGE_ALGORITHM_V1 + 1; + +/** Original data size in bytes (u16)*/ +static const ulint FIL_PAGE_ORIGINAL_SIZE_V1 = FIL_PAGE_ORIGINAL_TYPE_V1 + 2; + +/** Size after compression (u16) */ +static const ulint FIL_PAGE_COMPRESS_SIZE_V1 = FIL_PAGE_ORIGINAL_SIZE_V1 + 2; + +/** This overloads FIL_PAGE_FILE_FLUSH_LSN for RTREE Split Sequence Number */ +#define FIL_RTREE_SPLIT_SEQ_NUM FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + +/** starting from 4.1.x this contains the space id of the page */ +#define FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID 34 + #define FIL_PAGE_SPACE_ID FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID -#define FIL_PAGE_DATA 38 /*!< start of the data on the page */ +#define FIL_PAGE_DATA 38U /*!< start of the data on the page */ /* Following are used when page compression is used */ #define FIL_PAGE_COMPRESSED_SIZE 2 /*!< Number of bytes used to store actual payload data size on @@ -161,6 +494,7 @@ extern fil_addr_t fil_addr_null; then encrypted */ #define FIL_PAGE_PAGE_COMPRESSED 34354 /*!< page compressed page */ #define FIL_PAGE_INDEX 17855 /*!< B-tree node */ +#define FIL_PAGE_RTREE 17854 /*!< B-tree node */ #define FIL_PAGE_UNDO_LOG 2 /*!< Undo log page */ #define FIL_PAGE_INODE 3 /*!< Index node */ #define FIL_PAGE_IBUF_FREE_LIST 4 /*!< Insert buffer free list */ @@ -174,20 +508,26 @@ extern fil_addr_t fil_addr_null; #define FIL_PAGE_TYPE_BLOB 10 /*!< Uncompressed BLOB page */ #define FIL_PAGE_TYPE_ZBLOB 11 /*!< First compressed BLOB page */ #define FIL_PAGE_TYPE_ZBLOB2 12 /*!< Subsequent compressed BLOB page */ -#define FIL_PAGE_TYPE_COMPRESSED 13 /*!< Compressed page */ -#define FIL_PAGE_TYPE_LAST FIL_PAGE_TYPE_COMPRESSED +#define FIL_PAGE_TYPE_UNKNOWN 13 /*!< In old tablespaces, garbage + in FIL_PAGE_TYPE is replaced with this + value when flushing pages. */ +#define FIL_PAGE_COMPRESSED 14 /*!< Compressed page */ + +/** Used by i_s.cc to index into the text description. */ +#define FIL_PAGE_TYPE_LAST FIL_PAGE_TYPE_UNKNOWN /*!< Last page type */ /* @} */ -/** Space types @{ */ -#define FIL_TABLESPACE 501 /*!< tablespace */ -#define FIL_LOG 502 /*!< redo log */ -/* @} */ +/** macro to check whether the page type is index (Btree or Rtree) type */ +#define fil_page_type_is_index(page_type) \ + (page_type == FIL_PAGE_INDEX || page_type == FIL_PAGE_RTREE) -#ifndef UNIV_INNOCHECKSUM +/** Check whether the page is index page (either regular Btree index or Rtree +index */ +#define fil_page_index_page_check(page) \ + fil_page_type_is_index(fil_page_get_type(page)) -/* structure containing encryption specification */ -typedef struct fil_space_crypt_struct fil_space_crypt_t; +#ifndef UNIV_INNOCHECKSUM /** The number of fsyncs done to the log */ extern ulint fil_n_log_flushes; @@ -200,152 +540,12 @@ extern ulint fil_n_pending_tablespace_flushes; /** Number of files currently open */ extern ulint fil_n_file_opened; -struct fsp_open_info { - ibool success; /*!< Has the tablespace been opened? */ - const char* check_msg; /*!< fil_check_first_page() message */ - ibool valid; /*!< Is the tablespace valid? */ - os_file_t file; /*!< File handle */ - char* filepath; /*!< File path to open */ - lsn_t lsn; /*!< Flushed LSN from header page */ - ulint id; /*!< Space ID */ - ulint flags; /*!< Tablespace flags */ - ulint encryption_error; /*!< if an encryption error occurs */ -#ifdef UNIV_LOG_ARCHIVE - ulint arch_log_no; /*!< latest archived log file number */ -#endif /* UNIV_LOG_ARCHIVE */ - fil_space_crypt_t* crypt_data; /*!< crypt data */ - dict_table_t* table; /*!< table */ -}; - -struct fil_space_t; - -/** File node of a tablespace or the log data space */ -struct fil_node_t { - fil_space_t* space; /*!< backpointer to the space where this node - belongs */ - char* name; /*!< path to the file */ - ibool open; /*!< TRUE if file open */ - os_file_t handle; /*!< OS handle to the file, if file open */ - os_event_t sync_event;/*!< Condition event to group and - serialize calls to fsync */ - ibool is_raw_disk;/*!< TRUE if the 'file' is actually a raw - device or a raw disk partition */ - ulint size; /*!< size of the file in database pages, 0 if - not known yet; the possible last incomplete - megabyte may be ignored if space == 0 */ - ulint n_pending; - /*!< count of pending i/o's on this file; - closing of the file is not allowed if - this is > 0 */ - ulint n_pending_flushes; - /*!< count of pending flushes on this file; - closing of the file is not allowed if - this is > 0 */ - ibool being_extended; - /*!< TRUE if the node is currently - being extended. */ - ib_int64_t modification_counter;/*!< when we write to the file we - increment this by one */ - ib_int64_t flush_counter;/*!< up to what - modification_counter value we have - flushed the modifications to disk */ - ulint file_block_size;/*!< file system block size */ - UT_LIST_NODE_T(fil_node_t) chain; - /*!< link field for the file chain */ - UT_LIST_NODE_T(fil_node_t) LRU; - /*!< link field for the LRU list */ - ulint magic_n;/*!< FIL_NODE_MAGIC_N */ -}; - -/** Value of fil_node_t::magic_n */ -#define FIL_NODE_MAGIC_N 89389 - -/** Tablespace or log data space: let us call them by a common name space */ -struct fil_space_t { - char* name; /*!< space name = the path to the first file in - it */ - ulint id; /*!< space id */ - ib_int64_t tablespace_version; - /*!< in DISCARD/IMPORT this timestamp - is used to check if we should ignore - an insert buffer merge request for a - page because it actually was for the - previous incarnation of the space */ - ibool mark; /*!< this is set to TRUE at database startup if - the space corresponds to a table in the InnoDB - data dictionary; so we can print a warning of - orphaned tablespaces */ - ibool stop_ios;/*!< TRUE if we want to rename the - .ibd file of tablespace and want to - stop temporarily posting of new i/o - requests on the file */ - ibool stop_new_ops; - /*!< we set this TRUE when we start - deleting a single-table tablespace. - When this is set following new ops - are not allowed: - * read IO request - * ibuf merge - * file flush - Note that we can still possibly have - new write operations because we don't - check this flag when doing flush - batches. */ - ulint purpose;/*!< FIL_TABLESPACE, FIL_LOG, or - FIL_ARCH_LOG */ - UT_LIST_BASE_NODE_T(fil_node_t) chain; - /*!< base node for the file chain */ - ulint size; /*!< space size in pages; 0 if a single-table - tablespace whose size we do not know yet; - last incomplete megabytes in data files may be - ignored if space == 0 */ - ulint flags; /*!< tablespace flags; see - fsp_flags_is_valid(), - fsp_flags_get_zip_size() */ - ulint n_reserved_extents; - /*!< number of reserved free extents for - ongoing operations like B-tree page split */ - ulint n_pending_flushes; /*!< this is positive when flushing - the tablespace to disk; dropping of the - tablespace is forbidden if this is positive */ - ulint n_pending_ops;/*!< this is positive when we - have pending operations against this - tablespace. The pending operations can - be ibuf merges or lock validation code - trying to read a block. - Dropping of the tablespace is forbidden - if this is positive */ - hash_node_t hash; /*!< hash chain node */ - hash_node_t name_hash;/*!< hash chain the name_hash table */ -#ifndef UNIV_HOTBACKUP - rw_lock_t latch; /*!< latch protecting the file space storage - allocation */ -#endif /* !UNIV_HOTBACKUP */ - UT_LIST_NODE_T(fil_space_t) unflushed_spaces; - /*!< list of spaces with at least one unflushed - file we have written to */ - bool is_in_unflushed_spaces; - /*!< true if this space is currently in - unflushed_spaces */ - bool printed_compression_failure; - /*!< true if we have already printed - compression failure */ - UT_LIST_NODE_T(fil_space_t) space_list; - /*!< list of all spaces */ - fil_space_crypt_t* crypt_data; - ulint file_block_size;/*!< file system block size */ - ulint magic_n;/*!< FIL_SPACE_MAGIC_N */ -}; - -/** Value of fil_space_t::magic_n */ -#define FIL_SPACE_MAGIC_N 89472 - /** The tablespace memory cache; also the totality of logs (the log data space) is stored here; below we talk about tablespaces, but also the ib_logfiles form a 'space' and it is handled here */ struct fil_system_t { #ifndef UNIV_HOTBACKUP - ib_mutex_t mutex; /*!< The mutex protecting the cache */ + ib_mutex_t mutex; /*!< The mutex protecting the cache */ #endif /* !UNIV_HOTBACKUP */ hash_table_t* spaces; /*!< The hash table of spaces in the system; they are hashed on the space @@ -372,7 +572,7 @@ struct fil_system_t { ulint n_open; /*!< number of files currently open */ ulint max_n_open; /*!< n_open is not allowed to exceed this */ - ib_int64_t modification_counter;/*!< when we write to a file we + int64_t modification_counter;/*!< when we write to a file we increment this by one */ ulint max_assigned_id;/*!< maximum space id in the existing tables, or assigned during the time @@ -380,7 +580,7 @@ struct fil_system_t { startup we scan the data dictionary and set here the maximum of the space id's of the tables there */ - ib_int64_t tablespace_version; + int64_t tablespace_version; /*!< a counter which is incremented for every space object memory creation; every space mem object gets a @@ -390,113 +590,151 @@ struct fil_system_t { request */ UT_LIST_BASE_NODE_T(fil_space_t) space_list; /*!< list of all file spaces */ + UT_LIST_BASE_NODE_T(fil_space_t) named_spaces; + /*!< list of all file spaces + for which a MLOG_FILE_NAME + record has been written since + the latest redo log checkpoint. + Protected only by log_sys->mutex. */ ibool space_id_reuse_warned; /* !< TRUE if fil_space_create() has issued a warning about potential space_id reuse */ }; -/** The tablespace memory cache. This variable is NULL before the module is -initialized. */ -extern fil_system_t* fil_system; - #ifndef UNIV_HOTBACKUP +/** Look up a tablespace. +The caller should hold an InnoDB table lock or a MDL that prevents +the tablespace from being dropped during the operation, +or the caller should be in single-threaded crash recovery mode +(no user connections that could drop tablespaces). +If this is not the case, fil_space_acquire() and fil_space_release() +should be used instead. +@param[in] id tablespace ID +@return tablespace, or NULL if not found */ +fil_space_t* +fil_space_get( + ulint id) + __attribute__((warn_unused_result)); + /*******************************************************************//** Returns the version number of a tablespace, -1 if not found. @return version number, -1 if the tablespace does not exist in the memory cache */ UNIV_INTERN -ib_int64_t +ib_uint64_t fil_space_get_version( /*==================*/ ulint id); /*!< in: space id */ -/*******************************************************************//** -Returns the latch of a file space. -@return latch protecting storage allocation */ -UNIV_INTERN +/** Returns the latch of a file space. +@param[in] id space id +@param[out] flags tablespace flags +@return latch protecting storage allocation */ rw_lock_t* fil_space_get_latch( -/*================*/ - ulint id, /*!< in: space id */ - ulint* zip_size);/*!< out: compressed page size, or - 0 for uncompressed tablespaces */ -/*******************************************************************//** -Returns the type of a file space. -@return FIL_TABLESPACE or FIL_LOG */ -UNIV_INTERN -ulint + ulint id, + ulint* flags); + +/** Gets the type of a file space. +@param[in] id tablespace identifier +@return file type */ +fil_type_t fil_space_get_type( -/*===============*/ - ulint id); /*!< in: space id */ + ulint id); + +/** Note that a tablespace has been imported. +It is initially marked as FIL_TYPE_IMPORT so that no logging is +done during the import process when the space ID is stamped to each page. +Now we change it to FIL_SPACE_TABLESPACE to start redo and undo logging. +NOTE: temporary tablespaces are never imported. +@param[in] id tablespace identifier */ +void +fil_space_set_imported( + ulint id); +# ifdef UNIV_DEBUG +/** Determine if a tablespace is temporary. +@param[in] id tablespace identifier +@return whether it is a temporary tablespace */ +bool +fsp_is_temporary(ulint id) +__attribute__((warn_unused_result, pure)); +# endif /* UNIV_DEBUG */ #endif /* !UNIV_HOTBACKUP */ -/*******************************************************************//** -Appends a new file to the chain of files of a space. File must be closed. -@return pointer to the file name, or NULL on error */ -UNIV_INTERN + +/** Append a file to the chain of files of a space. +@param[in] name file name of a file that is not open +@param[in] size file size in entire database blocks +@param[in,out] space tablespace from fil_space_create() +@param[in] is_raw whether this is a raw device or partition +@param[in] atomic_write true if atomic write enabled +@param[in] max_pages maximum number of pages in file, +ULINT_MAX means the file size is unlimited. +@return pointer to the file name +@retval NULL if error */ char* fil_node_create( -/*============*/ - const char* name, /*!< in: file name (file must be closed) */ - ulint size, /*!< in: file size in database blocks, rounded - downwards to an integer */ - ulint id, /*!< in: space id where to append */ - ibool is_raw) /*!< in: TRUE if a raw device or - a raw disk partition */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); -#ifdef UNIV_LOG_ARCHIVE -/****************************************************************//** -Drops files from the start of a file space, so that its size is cut by -the amount given. */ -UNIV_INTERN -void -fil_space_truncate_start( -/*=====================*/ - ulint id, /*!< in: space id */ - ulint trunc_len); /*!< in: truncate by this much; it is an error - if this does not equal to the combined size of - some initial files in the space */ -#endif /* UNIV_LOG_ARCHIVE */ -/*******************************************************************//** -Creates a space memory object and puts it to the 'fil system' hash table. -If there is an error, prints an error message to the .err log. -@return TRUE if success */ -UNIV_INTERN -ibool + const char* name, + ulint size, + fil_space_t* space, + bool is_raw, + bool atomic_write, + ulint max_pages = ULINT_MAX) + __attribute__((warn_unused_result)); + +/** Create a space memory object and put it to the fil_system hash table. +The tablespace name is independent from the tablespace file-name. +Error messages are issued to the server log. +@param[in] name tablespace name +@param[in] id tablespace identifier +@param[in] flags tablespace flags +@param[in] purpose tablespace purpose +@return pointer to created tablespace, to be filled in with fil_node_create() +@retval NULL on failure (such as when the same tablespace exists) */ +fil_space_t* fil_space_create( -/*=============*/ - const char* name, /*!< in: space name */ - ulint id, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size, or - 0 for uncompressed tablespaces */ - ulint purpose, /*!< in: FIL_TABLESPACE, or FIL_LOG if log */ - fil_space_crypt_t* crypt_data); /*!< in: crypt data */ + const char* name, /*!< in: space name */ + ulint id, /*!< in: space id */ + ulint flags, + fil_type_t purpose, /*!< in: FIL_TABLESPACE, or FIL_LOG if log */ + fil_space_crypt_t* crypt_data) /*!< in: crypt data */ + __attribute__((warn_unused_result)); /*******************************************************************//** Assigns a new space id for a new single-table tablespace. This works simply by incrementing the global counter. If 4 billion id's is not enough, we may need to recycle id's. -@return TRUE if assigned, FALSE if not */ -UNIV_INTERN -ibool +@return true if assigned, false if not */ +bool fil_assign_new_space_id( /*====================*/ ulint* space_id); /*!< in/out: space id */ -/*******************************************************************//** -Returns the path from the first fil_node_t found for the space ID sent. + +/** Frees a space object from the tablespace memory cache. +Closes the files in the chain but does not delete them. +There must not be any pending i/o's or flushes on the files. +@param[in] id tablespace identifier +@param[in] x_latched whether the caller holds X-mode space->latch +@return true if success */ +bool +fil_space_free( + ulint id, + bool x_latched); + +/** Returns the path from the first fil_node_t found with this space ID. The caller is responsible for freeing the memory allocated here for the value returned. -@return a copy of fil_node_t::path, NULL if space is zero or not found. */ -UNIV_INTERN +@param[in] id Tablespace ID +@return own: A copy of fil_node_t::path, NULL if space ID is zero +or not found. */ char* fil_space_get_first_path( -/*=====================*/ - ulint id); /*!< in: space id */ + ulint id); + /*******************************************************************//** Returns the size of the space in pages. The tablespace must be cached in the memory cache. -@return space size, 0 if space not found */ -UNIV_INTERN +@return space size, 0 if space not found */ ulint fil_space_get_size( /*===============*/ @@ -504,34 +742,44 @@ fil_space_get_size( /*******************************************************************//** Returns the flags of the space. The tablespace must be cached in the memory cache. -@return flags, ULINT_UNDEFINED if space not found */ -UNIV_INTERN +@return flags, ULINT_UNDEFINED if space not found */ ulint fil_space_get_flags( /*================*/ ulint id); /*!< in: space id */ -/*******************************************************************//** -Returns the compressed page size of the space, or 0 if the space -is not compressed. The tablespace must be cached in the memory cache. -@return compressed page size, ULINT_UNDEFINED if space not found */ -UNIV_INTERN -ulint -fil_space_get_zip_size( -/*===================*/ - ulint id); /*!< in: space id */ -/*******************************************************************//** -Checks if the pair space, page_no refers to an existing page in a tablespace -file space. The tablespace must be cached in the memory cache. -@return TRUE if the address is meaningful */ -UNIV_INTERN -ibool -fil_check_adress_in_tablespace( -/*===========================*/ - ulint id, /*!< in: space id */ - ulint page_no);/*!< in: page number */ + +/** Check if table is mark for truncate. +@param[in] id space id +@return true if tablespace is marked for truncate. */ +bool +fil_space_is_being_truncated( + ulint id); + +/** Open each fil_node_t of a named fil_space_t if not already open. +@param[in] name Tablespace name +@return true if all file nodes are opened. */ +bool +fil_space_open( + const char* name); + +/** Close each fil_node_t of a named fil_space_t if open. +@param[in] name Tablespace name */ +void +fil_space_close( + const char* name); + +/** Returns the page size of the space and whether it is compressed or not. +The tablespace must be cached in the memory cache. +@param[in] id space id +@param[out] found true if tablespace was found +@return page size */ +const page_size_t +fil_space_get_page_size( + ulint id, + bool* found); + /****************************************************************//** Initializes the tablespace memory cache. */ -UNIV_INTERN void fil_init( /*=====*/ @@ -539,7 +787,6 @@ fil_init( ulint max_n_open); /*!< in: max number of open files */ /*******************************************************************//** Initializes the tablespace memory cache. */ -UNIV_INTERN void fil_close(void); /*===========*/ @@ -549,21 +796,18 @@ database server shutdown. This should be called at a server startup after the space objects for the log and the system tablespace have been created. The purpose of this operation is to make sure we never run out of file descriptors if we need to read from the insert buffer or to write to the log. */ -UNIV_INTERN void fil_open_log_and_system_tablespace_files(void); /*==========================================*/ /*******************************************************************//** Closes all open files. There must not be any pending i/o's or not flushed modifications in the files. */ -UNIV_INTERN void fil_close_all_files(void); /*=====================*/ /*******************************************************************//** Closes the redo log files. There must not be any pending i/o's or not flushed modifications in the files. */ -UNIV_INTERN void fil_close_log_files( /*================*/ @@ -571,50 +815,11 @@ fil_close_log_files( /*******************************************************************//** Sets the max tablespace id counter if the given number is bigger than the previous value. */ -UNIV_INTERN void fil_set_max_space_id_if_bigger( /*===========================*/ ulint max_id);/*!< in: maximum known id */ #ifndef UNIV_HOTBACKUP -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN -dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no); /*!< in: latest archived log file number */ -/*******************************************************************//** -Reads the flushed lsn, arch no, and tablespace flag fields from a data -file at database startup. -@retval NULL on success, or if innodb_force_recovery is set -@return pointer to an error message string */ -UNIV_INTERN -const char* -fil_read_first_page( -/*================*/ - os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: tablespace flags */ - ulint* space_id, /*!< out: tablespace ID */ -#ifdef UNIV_LOG_ARCHIVE - ulint* min_arch_log_no, /*!< out: min of archived - log numbers in data files */ - ulint* max_arch_log_no, /*!< out: max of archived - log numbers in data files */ -#endif /* UNIV_LOG_ARCHIVE */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*!< out: crypt data */ - - __attribute__((warn_unused_result)); /*******************************************************************//** Increments the count of pending operation, if space is not being deleted. @return TRUE if being deleted, and operation should be skipped */ @@ -631,52 +836,183 @@ void fil_decr_pending_ops( /*=================*/ ulint id); /*!< in: space id */ + +/** Acquire a tablespace when it could be dropped concurrently. +Used by background threads that do not necessarily hold proper locks +for concurrency control. +@param[in] id tablespace ID +@return the tablespace, or NULL if missing or being deleted */ +fil_space_t* +fil_space_acquire( + ulint id) + __attribute__((warn_unused_result)); + +/** Acquire a tablespace that may not exist. +Used by background threads that do not necessarily hold proper locks +for concurrency control. +@param[in] id tablespace ID +@return the tablespace, or NULL if missing or being deleted */ +fil_space_t* +fil_space_acquire_silent( + ulint id) + __attribute__((warn_unused_result)); + +/** Release a tablespace acquired with fil_space_acquire(). +@param[in,out] space tablespace to release */ +void +fil_space_release( + fil_space_t* space); + +/** Wrapper with reference-counting for a fil_space_t. */ +class FilSpace +{ +public: + /** Default constructor: Use this when reference counting + is done outside this wrapper. */ + FilSpace() : m_space(NULL) {} + + /** Constructor: Look up the tablespace and increment the + referece count if found. + @param[in] space_id tablespace ID */ + explicit FilSpace(ulint space_id) + : m_space(fil_space_acquire(space_id)) {} + + /** Assignment operator: This assumes that fil_space_acquire() + has already been done for the fil_space_t. The caller must + assign NULL if it calls fil_space_release(). + @param[in] space tablespace to assign */ + class FilSpace& operator=( + fil_space_t* space) + { + /* fil_space_acquire() must have been invoked. */ + ut_ad(space == NULL || space->n_pending_ops > 0); + m_space = space; + return(*this); + } + + /** Destructor - Decrement the reference count if a fil_space_t + is still assigned. */ + ~FilSpace() + { + if (m_space != NULL) { + fil_space_release(m_space); + } + } + + /** Implicit type conversion + @return the wrapped object */ + operator const fil_space_t*() const + { + return(m_space); + } + + /** Explicit type conversion + @return the wrapped object */ + const fil_space_t* operator()() const + { + return(m_space); + } + +private: + /** The wrapped pointer */ + fil_space_t* m_space; +}; + #endif /* !UNIV_HOTBACKUP */ -/*******************************************************************//** -Parses the body of a log record written about an .ibd file operation. That is, -the log record part after the standard (type, space id, page no) header of the -log record. - -If desired, also replays the delete or rename operation if the .ibd file -exists and the space id in it matches. Replays the create operation if a file -at that path does not exist yet. If the database directory for the file to be -created does not exist, then we create the directory, too. - -Note that mysqlbackup --apply-log sets fil_path_to_mysql_datadir to point to -the datadir that we should use in replaying the file operations. -@return end of log record, or NULL if the record was not completely -contained between ptr and end_ptr */ -UNIV_INTERN -byte* -fil_op_log_parse_or_replay( -/*=======================*/ - byte* ptr, /*!< in: buffer containing the log record body, - or an initial segment of it, if the record does - not fir completely between ptr and end_ptr */ - byte* end_ptr, /*!< in: buffer end */ - ulint type, /*!< in: the type of this log record */ - ulint space_id, /*!< in: the space id of the tablespace in - question, or 0 if the log record should - only be parsed but not replayed */ - ulint log_flags); /*!< in: redo log flags - (stored in the page number parameter) */ -/*******************************************************************//** -Deletes a single-table tablespace. The tablespace must be cached in the -memory cache. -@return TRUE if success */ -UNIV_INTERN +/********************************************************//** +Creates the database directory for a table if it does not exist yet. */ +void +fil_create_directory_for_tablename( +/*===============================*/ + const char* name); /*!< in: name in the standard + 'databasename/tablename' format */ +/********************************************************//** +Recreates table indexes by applying +TRUNCATE log record during recovery. +@return DB_SUCCESS or error code */ +dberr_t +fil_recreate_table( +/*===============*/ + ulint space_id, /*!< in: space id */ + ulint format_flags, /*!< in: page format */ + ulint flags, /*!< in: tablespace flags */ + const char* name, /*!< in: table name */ + truncate_t& truncate); /*!< in/out: The information of + TRUNCATE log record */ +/********************************************************//** +Recreates the tablespace and table indexes by applying +TRUNCATE log record during recovery. +@return DB_SUCCESS or error code */ +dberr_t +fil_recreate_tablespace( +/*====================*/ + ulint space_id, /*!< in: space id */ + ulint format_flags, /*!< in: page format */ + ulint flags, /*!< in: tablespace flags */ + const char* name, /*!< in: table name */ + truncate_t& truncate, /*!< in/out: The information of + TRUNCATE log record */ + lsn_t recv_lsn); /*!< in: the end LSN of + the log record */ +/** Replay a file rename operation if possible. +@param[in] space_id tablespace identifier +@param[in] first_page_no first page number in the file +@param[in] name old file name +@param[in] new_name new file name +@return whether the operation was successfully applied +(the name did not exist, or new_name did not exist and +name was successfully renamed to new_name) */ +bool +fil_op_replay_rename( + ulint space_id, + ulint first_page_no, + const char* name, + const char* new_name) + __attribute__((warn_unused_result)); + +/** Deletes an IBD tablespace, either general or single-table. +The tablespace must be cached in the memory cache. This will delete the +datafile, fil_space_t & fil_node_t entries from the file_system_t cache. +@param[in] space_id Tablespace id +@param[in] buf_remove Specify the action to take on the pages +for this table in the buffer pool. +@return true if success */ dberr_t fil_delete_tablespace( -/*==================*/ - ulint id, /*!< in: space id */ - buf_remove_t buf_remove); /*!< in: specify the action to take - on the tables pages in the buffer - pool */ + ulint id, + buf_remove_t buf_remove); + +/** Truncate the tablespace to needed size. +@param[in] space_id id of tablespace to truncate +@param[in] size_in_pages truncate size. +@return true if truncate was successful. */ +bool +fil_truncate_tablespace( + ulint space_id, + ulint size_in_pages); + +/*******************************************************************//** +Prepare for truncating a single-table tablespace. The tablespace +must be cached in the memory cache. +1) Check pending operations on a tablespace; +2) Remove all insert buffer entries for the tablespace; +@return DB_SUCCESS or error */ +dberr_t +fil_prepare_for_truncate( +/*=====================*/ + ulint id); /*!< in: space id */ +/**********************************************************************//** +Reinitialize the original tablespace header with the same space id +for single tablespace */ +void +fil_reinit_space_header( +/*====================*/ + ulint id, /*!< in: space id */ + ulint size); /*!< in: size in blocks */ /*******************************************************************//** Closes a single-table tablespace. The tablespace must be cached in the memory cache. Free all pages used by the tablespace. -@return DB_SUCCESS or error */ -UNIV_INTERN +@return DB_SUCCESS or error */ dberr_t fil_close_tablespace( /*=================*/ @@ -695,9 +1031,8 @@ memory cache. Discarding is like deleting a tablespace, but 3. When the user does IMPORT TABLESPACE, the tablespace will have the same id as it originally had. - 4. Free all the pages in use by the tablespace if rename=TRUE. -@return DB_SUCCESS or error */ -UNIV_INTERN + 4. Free all the pages in use by the tablespace if rename=true. +@return DB_SUCCESS or error */ dberr_t fil_discard_tablespace( /*===================*/ @@ -719,104 +1054,58 @@ fil_rename_tablespace_check( const char* new_path, bool is_discarded); -/*******************************************************************//** -Renames a single-table tablespace. The tablespace must be cached in the -tablespace memory cache. -@return TRUE if success */ -UNIV_INTERN -ibool +/** Rename a single-table tablespace. +The tablespace must exist in the memory cache. +@param[in] id tablespace identifier +@param[in] old_path old file name +@param[in] new_name new table name in the +databasename/tablename format +@param[in] new_path_in new file name, +or NULL if it is located in the normal data directory +@return true if success */ +bool fil_rename_tablespace( -/*==================*/ - const char* old_name_in, /*!< in: old table name in the - standard databasename/tablename - format of InnoDB, or NULL if we - do the rename based on the space - id only */ - ulint id, /*!< in: space id */ - const char* new_name, /*!< in: new table name in the - standard databasename/tablename - format of InnoDB */ - const char* new_path); /*!< in: new full datafile path - if the tablespace is remotely - located, or NULL if it is located - in the normal data directory. */ + ulint id, + const char* old_path, + const char* new_name, + const char* new_path_in); /*******************************************************************//** -Allocates a file name for a single-table tablespace. The string must be freed -by caller with mem_free(). -@return own: file name */ -UNIV_INTERN -char* -fil_make_ibd_name( -/*==============*/ - const char* name, /*!< in: table name or a dir path */ - bool is_full_path); /*!< in: TRUE if it is a dir path */ -/*******************************************************************//** -Allocates a file name for a tablespace ISL file (InnoDB Symbolic Link). -The string must be freed by caller with mem_free(). -@return own: file name */ -UNIV_INTERN -char* -fil_make_isl_name( -/*==============*/ - const char* name); /*!< in: table name */ -/*******************************************************************//** -Creates a new InnoDB Symbolic Link (ISL) file. It is always created -under the 'datadir' of MySQL. The datadir is the directory of a -running mysqld program. We can refer to it by simply using the path '.'. -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -fil_create_link_file( -/*=================*/ - const char* tablename, /*!< in: tablename */ - const char* filepath); /*!< in: pathname of tablespace */ -/*******************************************************************//** -Deletes an InnoDB Symbolic Link (ISL) file. */ -UNIV_INTERN -void -fil_delete_link_file( -/*==================*/ - const char* tablename); /*!< in: name of table */ -/*******************************************************************//** -Reads an InnoDB Symbolic Link (ISL) file. -It is always created under the 'datadir' of MySQL. The name is of the -form {databasename}/{tablename}. and the isl file is expected to be in a -'{databasename}' directory called '{tablename}.isl'. The caller must free -the memory of the null-terminated path returned if it is not null. -@return own: filepath found in link file, NULL if not found. */ -UNIV_INTERN +Allocates and builds a file name from a path, a table or tablespace name +and a suffix. The string must be freed by caller with ut_free(). +@param[in] path NULL or the direcory path or the full path and filename. +@param[in] name NULL if path is full, or Table/Tablespace name +@param[in] suffix NULL or the file extention to use. +@return own: file name */ char* -fil_read_link_file( -/*===============*/ - const char* name); /*!< in: tablespace name */ +fil_make_filepath( + const char* path, + const char* name, + ib_extention suffix, + bool strip_name); #include "fil0crypt.h" -/*******************************************************************//** -Creates a new single-table tablespace to a database directory of MySQL. -Database directories are under the 'datadir' of MySQL. The datadir is the -directory of a running mysqld program. We can refer to it by simply the -path '.'. Tables created with CREATE TEMPORARY TABLE we place in the temp -dir of the mysqld server. -@return DB_SUCCESS or error code */ -UNIV_INTERN +/** Creates a new General or Single-Table tablespace +@param[in] space_id Tablespace ID +@param[in] name Tablespace name in dbname/tablename format. +For general tablespaces, the 'dbname/' part may be missing. +@param[in] path Path and filename of the datafile to create. +@param[in] flags Tablespace flags +@param[in] size Initial size of the tablespace file in pages, +must be >= FIL_IBD_FILE_INITIAL_SIZE +@return DB_SUCCESS or error code */ dberr_t -fil_create_new_single_table_tablespace( -/*===================================*/ - ulint space_id, /*!< in: space id */ - const char* tablename, /*!< in: the table name in the usual - databasename/tablename format - of InnoDB */ - const char* dir_path, /*!< in: NULL or a dir path */ - ulint flags, /*!< in: tablespace flags */ - ulint flags2, /*!< in: table flags2 */ - ulint size, /*!< in: the initial size of the - tablespace file in pages, - must be >= FIL_IBD_FILE_INITIAL_SIZE */ +fil_ibd_create( + ulint space_id, + const char* name, + const char* path, + ulint flags, + ulint size, fil_encryption_t mode, /*!< in: encryption mode */ - ulint key_id) /*!< in: encryption key_id */ - __attribute__((nonnull, warn_unused_result)); + ulint key_id) /*!< in: encryption key_id */ + __attribute__((warn_unused_result)); + #ifndef UNIV_HOTBACKUP /********************************************************************//** Tries to open a single-table tablespace and optionally checks the space id is @@ -832,80 +1121,86 @@ If the validate boolean is set, we read the first page of the file and check that the space id in the file is what we expect. We assume that this function runs much faster if no check is made, since accessing the file inode probably is much faster (the OS caches them) than accessing -the first page of the file. This boolean may be initially FALSE, but if +the first page of the file. This boolean may be initially false, but if a remote tablespace is found it will be changed to true. If the fix_dict boolean is set, then it is safe to use an internal SQL statement to update the dictionary tables if they are incorrect. -@return DB_SUCCESS or error code */ -UNIV_INTERN +@param[in] validate true if we should validate the tablespace +@param[in] fix_dict true if the dictionary is available to be fixed +@param[in] purpose FIL_TYPE_TABLESPACE or FIL_TYPE_TEMPORARY +@param[in] id tablespace ID +@param[in] flags tablespace flags +@param[in] space_name tablespace name of the datafile +If file-per-table, it is the table name in the databasename/tablename format +@param[in] path_in expected filepath, usually read from dictionary +@return DB_SUCCESS or error code */ dberr_t -fil_open_single_table_tablespace( -/*=============================*/ - bool validate, /*!< in: Do we validate tablespace? */ - bool fix_dict, /*!< in: Can we fix the dictionary? */ - ulint id, /*!< in: space id */ - ulint flags, /*!< in: tablespace flags */ - const char* tablename, /*!< in: table name in the - databasename/tablename format */ - const char* filepath, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ - __attribute__((nonnull(5), warn_unused_result)); +fil_ibd_open( + bool validate, + bool fix_dict, + fil_type_t purpose, + ulint id, + ulint flags, + const char* tablename, + const char* path_in, + dict_table_t* table) /*!< in: table */ + __attribute__((warn_unused_result)); + +enum fil_load_status { + /** The tablespace file(s) were found and valid. */ + FIL_LOAD_OK, + /** The name no longer matches space_id */ + FIL_LOAD_ID_CHANGED, + /** The file(s) were not found */ + FIL_LOAD_NOT_FOUND, + /** The file(s) were not valid */ + FIL_LOAD_INVALID +}; + +/** Open a single-file tablespace and add it to the InnoDB data structures. +@param[in] space_id tablespace ID +@param[in] filename path/to/databasename/tablename.ibd +@param[out] space the tablespace, or NULL on error +@return status of the operation */ +enum fil_load_status +fil_ibd_load( + ulint space_id, + const char* filename, + fil_space_t*& space) + __attribute__((warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ -/********************************************************************//** -At the server startup, if we need crash recovery, scans the database -directories under the MySQL datadir, looking for .ibd files. Those files are -single-table tablespaces. We need to know the space id in each of them so that -we know into which file we should look to check the contents of a page stored -in the doublewrite buffer, also to know where to apply log records where the -space id is != 0. -@return DB_SUCCESS or error number */ -UNIV_INTERN -dberr_t -fil_load_single_table_tablespaces(void); -/*===================================*/ -/*******************************************************************//** -Returns TRUE if a single-table tablespace does not exist in the memory cache, -or is being deleted there. -@return TRUE if does not exist or is being deleted */ -UNIV_INTERN -ibool -fil_tablespace_deleted_or_being_deleted_in_mem( -/*===========================================*/ - ulint id, /*!< in: space id */ - ib_int64_t version);/*!< in: tablespace_version should be this; if - you pass -1 as the value of this, then this - parameter is ignored */ -/*******************************************************************//** -Returns TRUE if a single-table tablespace exists in the memory cache. -@return TRUE if exists */ -UNIV_INTERN -ibool -fil_tablespace_exists_in_mem( -/*=========================*/ - ulint id); /*!< in: space id */ + +/***********************************************************************//** +A fault-tolerant function that tries to read the next file name in the +directory. We retry 100 times if os_file_readdir_next_file() returns -1. The +idea is to read as much good data as we can and jump over bad data. +@return 0 if ok, -1 if error even after the retries, 1 if at the end +of the directory */ +int +fil_file_readdir_next_file( +/*=======================*/ + dberr_t* err, /*!< out: this is set to DB_ERROR if an error + was encountered, otherwise not changed */ + const char* dirname,/*!< in: directory name or path */ + os_file_dir_t dir, /*!< in: directory stream */ + os_file_stat_t* info); /*!< in/out: buffer where the + info is returned */ #ifndef UNIV_HOTBACKUP /*******************************************************************//** -Returns TRUE if a matching tablespace exists in the InnoDB tablespace memory +Returns true if a matching tablespace exists in the InnoDB tablespace memory cache. Note that if we have not done a crash recovery at the database startup, there may be many tablespaces which are not yet in the memory cache. -@return TRUE if a matching tablespace exists in the memory cache */ -UNIV_INTERN -ibool +@return true if a matching tablespace exists in the memory cache */ +bool fil_space_for_table_exists_in_mem( /*==============================*/ ulint id, /*!< in: space id */ const char* name, /*!< in: table name in the standard 'databasename/tablename' format */ - ibool mark_space, /*!< in: in crash recovery, at database - startup we mark all spaces which have - an associated table in the InnoDB - data dictionary, so that - we can print a warning about orphaned - tablespaces */ - ibool print_error_if_does_not_exist, + bool print_error_if_does_not_exist, /*!< in: print detailed error information to the .err log if a matching tablespace is not found from @@ -913,39 +1208,30 @@ fil_space_for_table_exists_in_mem( bool adjust_space, /*!< in: whether to adjust space id when find table space mismatch */ mem_heap_t* heap, /*!< in: heap memory */ - table_id_t table_id); /*!< in: table id */ + table_id_t table_id, /*!< in: table id */ + dict_table_t* table); /*!< in: table or NULL */ #else /* !UNIV_HOTBACKUP */ /********************************************************************//** Extends all tablespaces to the size stored in the space header. During the mysqlbackup --apply-log phase we extended the spaces on-demand so that log records could be appllied, but that may have left spaces still too small compared to the size stored in the space header. */ -UNIV_INTERN void fil_extend_tablespaces_to_stored_len(void); /*======================================*/ #endif /* !UNIV_HOTBACKUP */ -/**********************************************************************//** -Tries to extend a data file so that it would accommodate the number of pages -given. The tablespace must be cached in the memory cache. If the space is big -enough already, does nothing. -@return TRUE if success */ -UNIV_INTERN -ibool -fil_extend_space_to_desired_size( -/*=============================*/ - ulint* actual_size, /*!< out: size of the space after extension; - if we ran out of disk space this may be lower - than the desired size */ - ulint space_id, /*!< in: space id */ - ulint size_after_extend);/*!< in: desired size in pages after the - extension; if the current space size is bigger - than this already, the function does nothing */ +/** Try to extend a tablespace if it is smaller than the specified size. +@param[in,out] space tablespace +@param[in] size desired size in pages +@return whether the tablespace is at least as big as requested */ +bool +fil_space_extend( + fil_space_t* space, + ulint size); /*******************************************************************//** Tries to reserve free extents in a file space. -@return TRUE if succeed */ -UNIV_INTERN -ibool +@return true if succeed */ +bool fil_space_reserve_free_extents( /*===========================*/ ulint id, /*!< in: space id */ @@ -953,7 +1239,6 @@ fil_space_reserve_free_extents( ulint n_to_reserve); /*!< in: how many one wants to reserve */ /*******************************************************************//** Releases free extents in a file space. */ -UNIV_INTERN void fil_space_release_free_extents( /*===========================*/ @@ -962,56 +1247,50 @@ fil_space_release_free_extents( /*******************************************************************//** Gets the number of reserved extents. If the database is silent, this number should be zero. */ -UNIV_INTERN ulint fil_space_get_n_reserved_extents( /*=============================*/ ulint id); /*!< in: space id */ -/********************************************************************//** -Reads or writes data. This operation is asynchronous (aio). -@return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do -i/o on a tablespace which does not exist */ -UNIV_INTERN + +/** Reads or writes data. This operation could be asynchronous (aio). + +@param[in] type IO context +@param[in] sync true if synchronous aio is desired +@param[in] page_id page id +@param[in] page_size page size +@param[in] byte_offset remainder of offset in bytes; in aio this + must be divisible by the OS block size +@param[in] len how many bytes to read or write; this must + not cross a file boundary; in aio this must + be a block size multiple +@param[in,out] buf buffer where to store read data or from where + to write; in aio this must be appropriately + aligned +@param[in] message message for aio handler if non-sync aio + used, else ignored + +@return DB_SUCCESS, DB_TABLESPACE_DELETED or DB_TABLESPACE_TRUNCATED +if we are trying to do i/o on a tablespace which does not exist */ dberr_t fil_io( -/*===*/ - ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE, - ORed to OS_FILE_LOG, if a log i/o - and ORed to OS_AIO_SIMULATED_WAKE_LATER - if simulated aio and we want to post a - batch of i/os; NOTE that a simulated batch - may introduce hidden chances of deadlocks, - because i/os are not actually handled until - all have been posted: use with great - caution! */ - bool sync, /*!< in: true if synchronous aio is desired */ - ulint space_id, /*!< in: space id */ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint block_offset, /*!< in: offset in number of blocks */ - ulint byte_offset, /*!< in: remainder of offset in bytes; in - aio this must be divisible by the OS block - size */ - ulint len, /*!< in: how many bytes to read or write; this - must not cross a file boundary; in aio this - must be a block size multiple */ - void* buf, /*!< in/out: buffer where to store read data - or from where to write; in aio this must be - appropriately aligned */ - void* message, /*!< in: message for aio handler if non-sync - aio used, else ignored */ - ulint* write_size) /*!< in/out: Actual write size initialized + const IORequest& type, + bool sync, + const page_id_t& page_id, + const page_size_t& page_size, + ulint byte_offset, + ulint len, + void* buf, + void* message, + ulint* write_size); /*!< in/out: Actual write size initialized after fist successfull trim operation for this page and if initialized we do not trim again if actual page size does not decrease. */ - __attribute__((nonnull(8))); /**********************************************************************//** Waits for an aio operation to complete. This function is used to write the handler for completed requests. The aio array of pending requests is divided into segments (see os0file.cc for more info). The thread specifies which segment it wants to wait for. */ -UNIV_INTERN void fil_aio_wait( /*=========*/ @@ -1020,82 +1299,135 @@ fil_aio_wait( /**********************************************************************//** Flushes to disk possible writes cached by the OS. If the space does not exist or is being dropped, does not do anything. */ -UNIV_INTERN void fil_flush( /*======*/ ulint space_id); /*!< in: file space id (this can be a group of log files or a tablespace of the database) */ -/**********************************************************************//** -Flushes to disk writes in file spaces of the given type possibly cached by -the OS. */ -UNIV_INTERN +/** Flush to disk the writes in file spaces of the given type +possibly cached by the OS. +@param[in] purpose FIL_TYPE_TABLESPACE or FIL_TYPE_LOG */ void fil_flush_file_spaces( -/*==================*/ - ulint purpose); /*!< in: FIL_TABLESPACE, FIL_LOG */ + fil_type_t purpose); /******************************************************************//** Checks the consistency of the tablespace cache. -@return TRUE if ok */ -UNIV_INTERN -ibool +@return true if ok */ +bool fil_validate(void); /*==============*/ /********************************************************************//** -Returns TRUE if file address is undefined. -@return TRUE if undefined */ -UNIV_INTERN -ibool +Returns true if file address is undefined. +@return true if undefined */ +bool fil_addr_is_null( /*=============*/ fil_addr_t addr); /*!< in: address */ /********************************************************************//** Get the predecessor of a file page. -@return FIL_PAGE_PREV */ -UNIV_INTERN +@return FIL_PAGE_PREV */ ulint fil_page_get_prev( /*==============*/ const byte* page); /*!< in: file page */ /********************************************************************//** Get the successor of a file page. -@return FIL_PAGE_NEXT */ -UNIV_INTERN +@return FIL_PAGE_NEXT */ ulint fil_page_get_next( /*==============*/ const byte* page); /*!< in: file page */ /*********************************************************************//** Sets the file page type. */ -UNIV_INTERN void fil_page_set_type( /*==============*/ byte* page, /*!< in/out: file page */ ulint type); /*!< in: type */ -/*********************************************************************//** -Gets the file page type. -@return type; NOTE that if the type has not been written to page, the -return value not defined */ -UNIV_INTERN +/** Reset the page type. +Data files created before MySQL 5.1 may contain garbage in FIL_PAGE_TYPE. +In MySQL 3.23.53, only undo log pages and index pages were tagged. +Any other pages were written with uninitialized bytes in FIL_PAGE_TYPE. +@param[in] page_id page number +@param[in,out] page page with invalid FIL_PAGE_TYPE +@param[in] type expected page type +@param[in,out] mtr mini-transaction */ +void +fil_page_reset_type( + const page_id_t& page_id, + byte* page, + ulint type, + mtr_t* mtr); +/** Get the file page type. +@param[in] page file page +@return page type */ +inline ulint fil_page_get_type( -/*==============*/ - const byte* page); /*!< in: file page */ + const byte* page) +{ + return(mach_read_from_2(page + FIL_PAGE_TYPE)); +} +/** Check (and if needed, reset) the page type. +Data files created before MySQL 5.1 may contain +garbage in the FIL_PAGE_TYPE field. +In MySQL 3.23.53, only undo log pages and index pages were tagged. +Any other pages were written with uninitialized bytes in FIL_PAGE_TYPE. +@param[in] page_id page number +@param[in,out] page page with possibly invalid FIL_PAGE_TYPE +@param[in] type expected page type +@param[in,out] mtr mini-transaction */ +inline +void +fil_page_check_type( + const page_id_t& page_id, + byte* page, + ulint type, + mtr_t* mtr) +{ + ulint page_type = fil_page_get_type(page); + + if (page_type != type) { + fil_page_reset_type(page_id, page, type, mtr); + } +} + +/** Check (and if needed, reset) the page type. +Data files created before MySQL 5.1 may contain +garbage in the FIL_PAGE_TYPE field. +In MySQL 3.23.53, only undo log pages and index pages were tagged. +Any other pages were written with uninitialized bytes in FIL_PAGE_TYPE. +@param[in,out] block block with possibly invalid FIL_PAGE_TYPE +@param[in] type expected page type +@param[in,out] mtr mini-transaction */ +#define fil_block_check_type(block, type, mtr) \ + fil_page_check_type(block->page.id, block->frame, type, mtr) + +#ifdef UNIV_DEBUG +/** Increase redo skipped of a tablespace. +@param[in] id space id */ +void +fil_space_inc_redo_skipped_count( + ulint id); + +/** Decrease redo skipped of a tablespace. +@param[in] id space id */ +void +fil_space_dec_redo_skipped_count( + ulint id); /*******************************************************************//** -Returns TRUE if a single-table tablespace is being deleted. -@return TRUE if being deleted */ -UNIV_INTERN -ibool -fil_tablespace_is_being_deleted( -/*============================*/ +Check whether a single-table tablespace is redo skipped. +@return true if redo skipped */ +bool +fil_space_is_redo_skipped( +/*======================*/ ulint id); /*!< in: space id */ +#endif /********************************************************************//** Delete the tablespace file and any related files like .cfg. This should not be called for temporary tables. */ -UNIV_INTERN void fil_delete_file( /*============*/ @@ -1104,42 +1436,37 @@ fil_delete_file( /** Callback functor. */ struct PageCallback { - /** - Default constructor */ + /** Default constructor */ PageCallback() : - m_zip_size(), - m_page_size(), + m_page_size(0, 0, false), m_filepath() UNIV_NOTHROW {} virtual ~PageCallback() UNIV_NOTHROW {} - /** - Called for page 0 in the tablespace file at the start. - @param file_size - size of the file in bytes - @param block - contents of the first page in the tablespace file - @retval DB_SUCCESS or error code.*/ + /** Called for page 0 in the tablespace file at the start. + @param file_size size of the file in bytes + @param block contents of the first page in the tablespace file + @retval DB_SUCCESS or error code. */ virtual dberr_t init( os_offset_t file_size, const buf_block_t* block) UNIV_NOTHROW = 0; - /** - Called for every page in the tablespace. If the page was not + /** Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; - @param offset - physical offset within the file - @param block - block read from file, note it is not from the buffer pool + block->frame + UNIV_PAGE_SIZE; + @param offset physical offset within the file + @param block block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ virtual dberr_t operator()( os_offset_t offset, buf_block_t* block) UNIV_NOTHROW = 0; - /** - Set the name of the physical file and the file handle that is used + /** Set the name of the physical file and the file handle that is used to open it for the file that is being iterated over. - @param filename - then physical name of the tablespace file. - @param file - OS file handle */ + @param filename then physical name of the tablespace file. + @param file OS file handle */ void set_file(const char* filename, os_file_t file) UNIV_NOTHROW { m_file = file; @@ -1150,30 +1477,19 @@ struct PageCallback { @return the space id of the tablespace */ virtual ulint get_space_id() const UNIV_NOTHROW = 0; - /** The compressed page size - @return the compressed page size */ - ulint get_zip_size() const - { - return(m_zip_size); - } - - /** - Set the tablespace compressed table size. - @return DB_SUCCESS if it is valie or DB_CORRUPTION if not */ - dberr_t set_zip_size(const buf_frame_t* page) UNIV_NOTHROW; + /** Set the tablespace table size. + @param[in] page a page belonging to the tablespace */ + void set_page_size(const buf_frame_t* page) UNIV_NOTHROW; /** The compressed page size @return the compressed page size */ - ulint get_page_size() const + const page_size_t& get_page_size() const { return(m_page_size); } - /** Compressed table page size */ - ulint m_zip_size; - /** The tablespace page size. */ - ulint m_page_size; + page_size_t m_page_size; /** File handle to the tablespace */ os_file_t m_file; @@ -1189,11 +1505,10 @@ protected: /********************************************************************//** Iterate over all the pages in the tablespace. -@param table - the table definiton in the server -@param n_io_buffers - number of blocks to read and write together -@param callback - functor that will do the page updates -@return DB_SUCCESS or error code */ -UNIV_INTERN +@param table the table definiton in the server +@param n_io_buffers number of blocks to read and write together +@param callback functor that will do the page updates +@return DB_SUCCESS or error code */ dberr_t fil_tablespace_iterate( /*===================*/ @@ -1202,23 +1517,40 @@ fil_tablespace_iterate( PageCallback& callback) MY_ATTRIBUTE((nonnull, warn_unused_result)); -/*******************************************************************//** -Checks if a single-table tablespace for a given table name exists in the -tablespace memory cache. -@return space id, ULINT_UNDEFINED if not found */ -UNIV_INTERN +/********************************************************************//** +Looks for a pre-existing fil_space_t with the given tablespace ID +and, if found, returns the name and filepath in newly allocated buffers that the caller must free. +@param[in] space_id The tablespace ID to search for. +@param[out] name Name of the tablespace found. +@param[out] fileapth The filepath of the first datafile for thtablespace found. +@return true if tablespace is found, false if not. */ +bool +fil_space_read_name_and_filepath( + ulint space_id, + char** name, + char** filepath); + +/** Convert a file name to a tablespace name. +@param[in] filename directory/databasename/tablename.ibd +@return database/tablename string, to be freed with ut_free() */ +char* +fil_path_to_space_name( + const char* filename); + +/** Returns the space ID based on the tablespace name. +The tablespace must be found in the tablespace memory cache. +This call is made from external to this module, so the mutex is not owned. +@param[in] tablespace Tablespace name +@return space ID if tablespace found, ULINT_UNDEFINED if space not. */ ulint -fil_get_space_id_for_table( -/*=======================*/ - const char* name); /*!< in: table name in the standard - 'databasename/tablename' format */ +fil_space_get_id_by_name( + const char* tablespace); /** Iterate over all the spaces in the space list and fetch the tablespace names. It will return a copy of the name that must be freed by the caller using: delete[]. @return DB_SUCCESS if all OK. */ -UNIV_INTERN dberr_t fil_get_space_names( /*================*/ @@ -1226,14 +1558,25 @@ fil_get_space_names( /*!< in/out: Vector for collecting the names. */ MY_ATTRIBUTE((warn_unused_result)); +/** Return the next fil_node_t in the current or next fil_space_t. +Once started, the caller must keep calling this until it returns NULL. +fil_space_acquire() and fil_space_release() are invoked here which +blocks a concurrent operation from dropping the tablespace. +@param[in] prev_node Pointer to the previous fil_node_t. +If NULL, use the first fil_space_t on fil_system->space_list. +@return pointer to the next fil_node_t. +@retval NULL if this was the last file node */ +const fil_node_t* +fil_node_next( + const fil_node_t* prev_node); + /** Generate redo log for swapping two .ibd files @param[in] old_table old table @param[in] new_table new table @param[in] tmp_name temporary table name @param[in,out] mtr mini-transaction -@return innodb error code */ -UNIV_INTERN -dberr_t +@return whether the operation succeeded */ +bool fil_mtr_rename_log( const dict_table_t* old_table, const dict_table_t* new_table, @@ -1241,17 +1584,122 @@ fil_mtr_rename_log( mtr_t* mtr) MY_ATTRIBUTE((nonnull)); -/*******************************************************************//** -Finds the given page_no of the given space id from the double write buffer, -and copies it to the corresponding .ibd file. -@return true if copy was successful, or false. */ +/** Note that a non-predefined persistent tablespace has been modified +by redo log. +@param[in,out] space tablespace */ +void +fil_names_dirty( + fil_space_t* space); + +/** Write MLOG_FILE_NAME records when a non-predefined persistent +tablespace was modified for the first time since the latest +fil_names_clear(). +@param[in,out] space tablespace +@param[in,out] mtr mini-transaction */ +void +fil_names_dirty_and_write( + fil_space_t* space, + mtr_t* mtr); + +/** Set the compression type for the tablespace +@param[in] space Space ID of tablespace for which to set +@param[in] algorithm Text representation of the algorithm +@return DB_SUCCESS or error code */ +dberr_t +fil_set_compression( + ulint space_id, + const char* algorithm) + __attribute__((warn_unused_result)); + +/** +@param[in] space_id Space ID to check +@return the compression algorithm */ +Compression::Type +fil_get_compression( + ulint space_id) + __attribute__((warn_unused_result)); + +/** Write MLOG_FILE_NAME records if a persistent tablespace was modified +for the first time since the latest fil_names_clear(). +@param[in,out] space tablespace +@param[in,out] mtr mini-transaction +@return whether any MLOG_FILE_NAME record was written */ +inline __attribute__((warn_unused_result)) bool -fil_user_tablespace_restore_page( -/*==============================*/ - fsp_open_info* fsp, /* in: contains space id and .ibd - file information */ - ulint page_no); /* in: page_no to obtain from double - write buffer */ +fil_names_write_if_was_clean( + fil_space_t* space, + mtr_t* mtr) +{ + ut_ad(log_mutex_own()); + + if (space == NULL) { + return(false); + } + + const bool was_clean = space->max_lsn == 0; + ut_ad(space->max_lsn <= log_sys->lsn); + space->max_lsn = log_sys->lsn; + + if (was_clean) { + fil_names_dirty_and_write(space, mtr); + } + + return(was_clean); +} + +extern volatile bool recv_recovery_on; + +/** During crash recovery, open a tablespace if it had not been opened +yet, to get valid size and flags. +@param[in,out] space tablespace */ +inline +void +fil_space_open_if_needed( + fil_space_t* space) +{ + ut_ad(recv_recovery_on); + + if (space->size == 0) { + /* Initially, size and flags will be set to 0, + until the files are opened for the first time. + fil_space_get_size() will open the file + and adjust the size and flags. */ +#ifdef UNIV_DEBUG + ulint size = +#endif /* UNIV_DEBUG */ + fil_space_get_size(space->id); + ut_ad(size == space->size); + } +} + +/** On a log checkpoint, reset fil_names_dirty_and_write() flags +and write out MLOG_FILE_NAME and MLOG_CHECKPOINT if needed. +@param[in] lsn checkpoint LSN +@param[in] do_write whether to always write MLOG_CHECKPOINT +@return whether anything was written to the redo log +@retval false if no flags were set and nothing written +@retval true if anything was written to the redo log */ +bool +fil_names_clear( + lsn_t lsn, + bool do_write); + +#if !defined(NO_FALLOCATE) && defined(UNIV_LINUX) +/** +Try and enable FusionIO atomic writes. +@param[in] file OS file handle +@return true if successful */ +bool +fil_fusionio_enable_atomic_write(os_file_t file); +#endif /* !NO_FALLOCATE && UNIV_LINUX */ + +/** Note that the file system where the file resides doesn't support PUNCH HOLE +@param[in,out] node Node to set */ +void fil_no_punch_hole(fil_node_t* node); + +#ifdef UNIV_ENABLE_UNIT_TEST_MAKE_FILEPATH +void test_make_filepath(); +#endif /* UNIV_ENABLE_UNIT_TEST_MAKE_FILEPATH */ /*******************************************************************//** Return space flags */ @@ -1333,6 +1781,14 @@ fil_space_get_block_size( #endif /* UNIV_INNOCHECKSUM */ +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ +dberr_t +fil_write_flushed_lsn( + lsn_t lsn); + #ifndef UNIV_INNOCHECKSUM #ifndef UNIV_NONINL #include "fil0fil.ic" diff --git a/storage/innobase/include/fil0fil.ic b/storage/innobase/include/fil0fil.ic index 5654d8f6178..b17bf8213ab 100644 --- a/storage/innobase/include/fil0fil.ic +++ b/storage/innobase/include/fil0fil.ic @@ -57,6 +57,8 @@ fil_get_page_type_name( ulint page_type) /*!< in: FIL_PAGE_TYPE */ { switch(page_type) { + case FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED: + return (const char*)"PAGE_COMPRESSED_ENRYPTED"; case FIL_PAGE_PAGE_COMPRESSED: return (const char*)"PAGE_COMPRESSED"; case FIL_PAGE_INDEX: @@ -85,7 +87,7 @@ fil_get_page_type_name( return (const char*)"ZBLOB"; case FIL_PAGE_TYPE_ZBLOB2: return (const char*)"ZBLOB2"; - case FIL_PAGE_TYPE_COMPRESSED: + case FIL_PAGE_COMPRESSED: return (const char*)"ORACLE PAGE COMPRESSED"; default: return (const char*)"PAGE TYPE CORRUPTED"; @@ -102,7 +104,7 @@ fil_node_get_block_size( fil_node_t* node) /*!< in: Node where to get block size */ { - return (node->file_block_size); + return (node->block_size); } /****************************************************************//** @@ -131,7 +133,7 @@ fil_page_type_validate( page_type == FIL_PAGE_TYPE_XDES || page_type == FIL_PAGE_TYPE_BLOB || page_type == FIL_PAGE_TYPE_ZBLOB || - page_type == FIL_PAGE_TYPE_COMPRESSED))) { + page_type == FIL_PAGE_COMPRESSED))) { uint key_version = mach_read_from_4(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); bool page_compressed = (page_type == FIL_PAGE_PAGE_COMPRESSED); @@ -165,7 +167,7 @@ fil_page_type_validate( page_type == FIL_PAGE_TYPE_XDES || page_type == FIL_PAGE_TYPE_BLOB || page_type == FIL_PAGE_TYPE_ZBLOB || - page_type == FIL_PAGE_TYPE_COMPRESSED); + page_type == FIL_PAGE_COMPRESSED); return false; } diff --git a/storage/innobase/include/fil0pagecompress.h b/storage/innobase/include/fil0pagecompress.h index 99a05f14ffe..4d27a61be64 100644 --- a/storage/innobase/include/fil0pagecompress.h +++ b/storage/innobase/include/fil0pagecompress.h @@ -44,7 +44,7 @@ Returns the page compression flag of the space, or false if the space is not compressed. The tablespace must be cached in the memory cache. @return true if page compressed, false if not or space not found */ UNIV_INTERN -ibool +bool fil_space_is_page_compressed( /*=========================*/ ulint id); /*!< in: space id */ @@ -53,7 +53,7 @@ Returns the page compression flag of the space, or false if the space is not compressed. The tablespace must be cached in the memory cache. @return true if page compressed, false if not or space not found */ UNIV_INTERN -ibool +bool fil_space_get_page_compressed( /*=========================*/ fil_space_t* space); /*!< in: space id */ @@ -66,24 +66,6 @@ atomic_writes_t fil_space_get_atomic_writes( /*=========================*/ ulint id); /*!< in: space id */ -/*******************************************************************//** -Find out wheather the page is index page or not -@return true if page type index page, false if not */ -UNIV_INTERN -ibool -fil_page_is_index_page( -/*===================*/ - byte *buf); /*!< in: page */ - -/****************************************************************//** -Get the name of the compression algorithm used for page -compression. -@return compression algorithm name or "UNKNOWN" if not known*/ -UNIV_INTERN -const char* -fil_get_compression_alg_name( -/*=========================*/ - ulint comp_alg); /*! + +/** Types of raw partitions in innodb_data_file_path */ +enum device_t { + SRV_NOT_RAW = 0, /*!< Not a raw partition */ + SRV_NEW_RAW, /*!< A 'newraw' partition, only to be + initialized */ + SRV_OLD_RAW /*!< An initialized raw partition */ +}; + +/** Data file control information. */ +class Datafile { + + friend class Tablespace; + friend class SysTablespace; + +public: + + Datafile() + : + m_name(), + m_filepath(), + m_filename(), + m_handle(OS_FILE_CLOSED), + m_open_flags(OS_FILE_OPEN), + m_size(), + m_order(), + m_type(SRV_NOT_RAW), + m_space_id(ULINT_UNDEFINED), + m_flags(), + m_exists(), + m_is_valid(), + m_first_page_buf(), + m_first_page(), + m_atomic_write(), + m_last_os_error(), + m_file_info(), + m_crypt_info() + { + /* No op */ + } + + Datafile(const char* name, ulint flags, ulint size, ulint order) + : + m_name(mem_strdup(name)), + m_filepath(), + m_filename(), + m_handle(OS_FILE_CLOSED), + m_open_flags(OS_FILE_OPEN), + m_size(size), + m_order(order), + m_type(SRV_NOT_RAW), + m_space_id(ULINT_UNDEFINED), + m_flags(flags), + m_exists(), + m_is_valid(), + m_first_page_buf(), + m_first_page(), + m_atomic_write(), + m_last_os_error(), + m_file_info(), + m_crypt_info() + { + ut_ad(m_name != NULL); + /* No op */ + } + + Datafile(const Datafile& file) + : + m_handle(file.m_handle), + m_open_flags(file.m_open_flags), + m_size(file.m_size), + m_order(file.m_order), + m_type(file.m_type), + m_space_id(file.m_space_id), + m_flags(file.m_flags), + m_exists(file.m_exists), + m_is_valid(file.m_is_valid), + m_first_page_buf(), + m_first_page(), + m_atomic_write(file.m_atomic_write), + m_last_os_error(), + m_file_info(), + m_crypt_info() + { + m_name = mem_strdup(file.m_name); + ut_ad(m_name != NULL); + + if (file.m_filepath != NULL) { + m_filepath = mem_strdup(file.m_filepath); + ut_a(m_filepath != NULL); + set_filename(); + } else { + m_filepath = NULL; + m_filename = NULL; + } + } + + virtual ~Datafile() + { + shutdown(); + } + + Datafile& operator=(const Datafile& file) + { + ut_a(this != &file); + + ut_ad(m_name == NULL); + m_name = mem_strdup(file.m_name); + ut_a(m_name != NULL); + + m_size = file.m_size; + m_order = file.m_order; + m_type = file.m_type; + + ut_a(m_handle == OS_FILE_CLOSED); + m_handle = file.m_handle; + + m_exists = file.m_exists; + m_is_valid = file.m_is_valid; + m_open_flags = file.m_open_flags; + m_space_id = file.m_space_id; + m_flags = file.m_flags; + m_last_os_error = 0; + + if (m_filepath != NULL) { + ut_free(m_filepath); + m_filepath = NULL; + m_filename = NULL; + } + + if (file.m_filepath != NULL) { + m_filepath = mem_strdup(file.m_filepath); + ut_a(m_filepath != NULL); + set_filename(); + } + + /* Do not make a copy of the first page, + it should be reread if needed */ + m_first_page_buf = NULL; + m_first_page = NULL; + /* Do not copy crypt info it is read from first page */ + m_crypt_info = NULL; + + m_atomic_write = file.m_atomic_write; + + return(*this); + } + + /** Initialize the name and flags of this datafile. + @param[in] name tablespace name, will be copied + @param[in] flags tablespace flags */ + void init(const char* name, ulint flags); + + /** Release the resources. */ + virtual void shutdown(); + + /** Open a data file in read-only mode to check if it exists + so that it can be validated. + @param[in] strict whether to issue error messages + @return DB_SUCCESS or error code */ + virtual dberr_t open_read_only(bool strict); + + /** Open a data file in read-write mode during start-up so that + doublewrite pages can be restored and then it can be validated. + @param[in] read_only_mode if true, then readonly mode checks + are enforced. + @return DB_SUCCESS or error code */ + virtual dberr_t open_read_write(bool read_only_mode) + __attribute__((warn_unused_result)); + + /** Initialize OS specific file info. */ + void init_file_info(); + + /** Close a data file. + @return DB_SUCCESS or error code */ + dberr_t close(); + + /** Make a full filepath from a directory path and a filename. + Prepend the dirpath to filename using the extension given. + If dirpath is NULL, prepend the default datadir to filepath. + Store the result in m_filepath. + @param[in] dirpath directory path + @param[in] filename filename or filepath + @param[in] ext filename extension */ + void make_filepath( + const char* dirpath, + const char* filename, + ib_extention ext); + + /** Set the filepath by duplicating the filepath sent in */ + void set_filepath(const char* filepath); + + /** Allocate and set the datafile or tablespace name in m_name. + If a name is provided, use it; else if the datafile is file-per-table, + extract a file-per-table tablespace name from m_filepath; else it is a + general tablespace, so just call it that for now. The value of m_name + will be freed in the destructor. + @param[in] name Tablespace Name if known, NULL if not */ + void set_name(const char* name); + + /** Validates the datafile and checks that it conforms with + the expected space ID and flags. The file should exist and be + successfully opened in order for this function to validate it. + @param[in] space_id The expected tablespace ID. + @param[in] flags The expected tablespace flags. + @retval DB_SUCCESS if tablespace is valid, DB_ERROR if not. + m_is_valid is also set true on success, else false. */ + dberr_t validate_to_dd( + ulint space_id, + ulint flags) + __attribute__((warn_unused_result)); + + /** Validates this datafile for the purpose of recovery. + The file should exist and be successfully opened. We initially + open it in read-only mode because we just want to read the SpaceID. + However, if the first page is corrupt and needs to be restored + from the doublewrite buffer, we will reopen it in write mode and + ry to restore that page. + @retval DB_SUCCESS if tablespace is valid, DB_ERROR if not. + m_is_valid is also set true on success, else false. */ + dberr_t validate_for_recovery() + __attribute__((warn_unused_result)); + + /** Checks the consistency of the first page of a datafile when the + tablespace is opened. This occurs before the fil_space_t is created + so the Space ID found here must not already be open. + m_is_valid is set true on success, else false. + @param[out] flush_lsn contents of FIL_PAGE_FILE_FLUSH_LSN + (only valid for the first file of the system tablespace) + @retval DB_SUCCESS on if the datafile is valid + @retval DB_CORRUPTION if the datafile is not readable + @retval DB_TABLESPACE_EXISTS if there is a duplicate space_id */ + dberr_t validate_first_page(lsn_t* flush_lsn = 0) + __attribute__((warn_unused_result)); + + /** Get Datafile::m_name. + @return m_name */ + const char* name() const + { + return(m_name); + } + + /** Get Datafile::m_filepath. + @return m_filepath */ + const char* filepath() const + { + return(m_filepath); + } + + /** Get Datafile::m_handle. + @return m_handle */ + os_file_t handle() const + { + return(m_handle); + } + + /** Get Datafile::m_order. + @return m_order */ + ulint order() const + { + return(m_order); + } + + /** Get Datafile::m_space_id. + @return m_space_id */ + ulint space_id() const + { + return(m_space_id); + } + + /** Get Datafile::m_flags. + @return m_flags */ + ulint flags() const + { + return(m_flags); + } + + /** + @return true if m_handle is open, false if not */ + bool is_open() const + { + return(m_handle != OS_FILE_CLOSED); + } + + /** Get Datafile::m_is_valid. + @return m_is_valid */ + bool is_valid() const + { + return(m_is_valid); + } + + /** Get the last OS error reported + @return m_last_os_error */ + ulint last_os_error() const + { + return(m_last_os_error); + } + + fil_space_crypt_t* get_crypt_info() const + { + return(m_crypt_info); + } + + /** Test if the filepath provided looks the same as this filepath + by string comparison. If they are two different paths to the same + file, same_as() will be used to show that after the files are opened. + @param[in] other filepath to compare with + @retval true if it is the same filename by char comparison + @retval false if it looks different */ + bool same_filepath_as(const char* other) const; + + /** Test if another opened datafile is the same file as this object. + @param[in] other Datafile to compare with + @return true if it is the same file, else false */ + bool same_as(const Datafile& other) const; + +private: + /** Free the filepath buffer. */ + void free_filepath(); + + /** Set the filename pointer to the start of the file name + in the filepath. */ + void set_filename() + { + if (m_filepath == NULL) { + return; + } + + char* last_slash = strrchr(m_filepath, OS_PATH_SEPARATOR); + + m_filename = last_slash ? last_slash + 1 : m_filepath; + } + + /** Create/open a data file. + @param[in] read_only_mode if true, then readonly mode checks + are enforced. + @return DB_SUCCESS or error code */ + dberr_t open_or_create(bool read_only_mode) + __attribute__((warn_unused_result)); + + /** Reads a few significant fields from the first page of the + datafile, which must already be open. + @param[in] read_only_mode if true, then readonly mode checks + are enforced. + @return DB_SUCCESS or DB_IO_ERROR if page cannot be read */ + dberr_t read_first_page(bool read_first_page) + __attribute__((warn_unused_result)); + + /** Free the first page from memory when it is no longer needed. */ + void free_first_page(); + + /** Set the Datafile::m_open_flags. + @param open_flags The Open flags to set. */ + void set_open_flags(os_file_create_t open_flags) + { + m_open_flags = open_flags; + }; + + /** Determine if this datafile is on a Raw Device + @return true if it is a RAW device. */ + bool is_raw_device() + { + return(m_type != SRV_NOT_RAW); + } + + /* DATA MEMBERS */ + + /** Datafile name at the tablespace location. + This is either the basename of the file if an absolute path + was entered, or it is the relative path to the datadir or + Tablespace::m_path. */ + char* m_name; + +protected: + /** Physical file path with base name and extension */ + char* m_filepath; + +private: + /** Determine the space id of the given file descriptor by reading + a few pages from the beginning of the .ibd file. + @return DB_SUCCESS if space id was successfully identified, + else DB_ERROR. */ + dberr_t find_space_id(); + + /** Finds a given page of the given space id from the double write + buffer and copies it to the corresponding .ibd file. + @param[in] page_no Page number to restore + @return DB_SUCCESS if page was restored, else DB_ERROR */ + dberr_t restore_from_doublewrite( + ulint restore_page_no); + + /** Points into m_filepath to the file name with extension */ + char* m_filename; + + /** Open file handle */ + os_file_t m_handle; + + /** Flags to use for opening the data file */ + os_file_create_t m_open_flags; + + /** size in database pages */ + ulint m_size; + + /** ordinal position of this datafile in the tablespace */ + ulint m_order; + + /** The type of the data file */ + device_t m_type; + + /** Tablespace ID. Contained in the datafile header. + If this is a system tablespace, FSP_SPACE_ID is only valid + in the first datafile. */ + ulint m_space_id; + + /** Tablespace flags. Contained in the datafile header. + If this is a system tablespace, FSP_SPACE_FLAGS are only valid + in the first datafile. */ + ulint m_flags; + + /** true if file already existed on startup */ + bool m_exists; + + /* true if the tablespace is valid */ + bool m_is_valid; + + /** Buffer to hold first page */ + byte* m_first_page_buf; + + /** Pointer to the first page held in the buffer above */ + byte* m_first_page; + + /** true if atomic writes enabled for this file */ + bool m_atomic_write; + +protected: + /** Last OS error received so it can be reported if needed. */ + ulint m_last_os_error; + +public: + /** Use the following to determine the uniqueness of this datafile. */ +#ifdef _WIN32 + /* Use fields dwVolumeSerialNumber, nFileIndexLow, nFileIndexHigh. */ + BY_HANDLE_FILE_INFORMATION m_file_info; +#else + /* Use field st_ino. */ + struct stat m_file_info; +#endif /* WIN32 */ + + /** Encryption information */ + fil_space_crypt_t* m_crypt_info; +}; + + +/** Data file control information. */ +class RemoteDatafile : public Datafile +{ +private: + /** Link filename (full path) */ + char* m_link_filepath; + +public: + + RemoteDatafile() + : + m_link_filepath() + { + /* No op - base constructor is called. */ + } + + RemoteDatafile(const char* name, ulint size, ulint order) + : + m_link_filepath() + { + /* No op - base constructor is called. */ + } + + ~RemoteDatafile() + { + shutdown(); + } + + /** Release the resources. */ + void shutdown(); + + /** Get the link filepath. + @return m_link_filepath */ + const char* link_filepath() const + { + return(m_link_filepath); + } + + /** Set the link filepath. Use default datadir, the base name of + the path provided without its suffix, plus DOT_ISL. + @param[in] path filepath which contains a basename to use. + If NULL, use m_name as the basename. */ + void set_link_filepath(const char* path); + + /** Create a link filename based on the contents of m_name, + open that file, and read the contents into m_filepath. + @retval DB_SUCCESS if remote linked tablespace file is opened and read. + @retval DB_CANNOT_OPEN_FILE if the link file does not exist. */ + dberr_t open_link_file(); + + /** Delete an InnoDB Symbolic Link (ISL) file. */ + void delete_link_file(void); + + /** Open a handle to the file linked to in an InnoDB Symbolic Link file + in read-only mode so that it can be validated. + @param[in] strict whether to issue error messages + @return DB_SUCCESS or error code */ + dberr_t open_read_only(bool strict); + + /** Opens a handle to the file linked to in an InnoDB Symbolic Link + file in read-write mode so that it can be restored from doublewrite + and validated. + @param[in] read_only_mode If true, then readonly mode checks + are enforced. + @return DB_SUCCESS or error code */ + dberr_t open_read_write(bool read_only_mode) + __attribute__((warn_unused_result)); + + /****************************************************************** + Global Static Functions; Cannot refer to data members. + ******************************************************************/ + + /** Creates a new InnoDB Symbolic Link (ISL) file. It is always + created under the 'datadir' of MySQL. The datadir is the directory + of a running mysqld program. We can refer to it by simply using + the path ".". + @param[in] name tablespace name + @param[in] filepath remote filepath of tablespace datafile + @param[in] is_shared true for general tablespace, + false for file-per-table + @return DB_SUCCESS or error code */ + static dberr_t create_link_file( + const char* name, + const char* filepath, + bool is_shared = false); + + /** Delete an InnoDB Symbolic Link (ISL) file by name. + @param[in] name tablespace name */ + static void delete_link_file(const char* name); + + /** Read an InnoDB Symbolic Link (ISL) file by name. + It is always created under the datadir of MySQL. + For file-per-table tablespaces, the isl file is expected to be + in a 'database' directory and called 'tablename.isl'. + For general tablespaces, there will be no 'database' directory. + The 'basename.isl' will be in the datadir. + The caller must free the memory returned if it is not null. + @param[in] link_filepath filepath of the ISL file + @return Filepath of the IBD file read from the ISL file */ + static char* read_link_file( + const char* link_filepath); +}; +#endif /* fsp0file_h */ diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index b9ff05b4bd4..17be27a8ce5 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -33,177 +33,25 @@ Created 12/18/1995 Heikki Tuuri #ifndef UNIV_INNOCHECKSUM -#include "mtr0mtr.h" +#include "fsp0space.h" #include "fut0lst.h" -#include "ut0byte.h" +#include "mtr0mtr.h" #include "page0types.h" +#include "rem0types.h" +#include "ut0byte.h" #endif /* !UNIV_INNOCHECKSUM */ - -/* @defgroup fsp_flags InnoDB Tablespace Flag Constants @{ */ - -/** Width of the POST_ANTELOPE flag */ -#define FSP_FLAGS_WIDTH_POST_ANTELOPE 1 -/** Number of flag bits used to indicate the tablespace zip page size */ -#define FSP_FLAGS_WIDTH_ZIP_SSIZE 4 -/** Width of the ATOMIC_BLOBS flag. The ability to break up a long -column into an in-record prefix and an externally stored part is available -to the two Barracuda row formats COMPRESSED and DYNAMIC. */ -#define FSP_FLAGS_WIDTH_ATOMIC_BLOBS 1 -/** Number of flag bits used to indicate the tablespace page size */ -#define FSP_FLAGS_WIDTH_PAGE_SSIZE 4 -/** Width of the DATA_DIR flag. This flag indicates that the tablespace -is found in a remote location, not the default data directory. */ -#define FSP_FLAGS_WIDTH_DATA_DIR 1 -/** Number of flag bits used to indicate the page compression and compression level */ -#define FSP_FLAGS_WIDTH_PAGE_COMPRESSION 1 -#define FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL 4 - -/** Number of flag bits used to indicate atomic writes for this tablespace */ -#define FSP_FLAGS_WIDTH_ATOMIC_WRITES 2 - -/** Width of all the currently known tablespace flags */ -#define FSP_FLAGS_WIDTH (FSP_FLAGS_WIDTH_POST_ANTELOPE \ - + FSP_FLAGS_WIDTH_ZIP_SSIZE \ - + FSP_FLAGS_WIDTH_ATOMIC_BLOBS \ - + FSP_FLAGS_WIDTH_PAGE_SSIZE \ - + FSP_FLAGS_WIDTH_DATA_DIR \ - + FSP_FLAGS_WIDTH_PAGE_COMPRESSION \ - + FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL \ - + FSP_FLAGS_WIDTH_ATOMIC_WRITES ) - -/** A mask of all the known/used bits in tablespace flags */ -#define FSP_FLAGS_MASK (~(~0 << FSP_FLAGS_WIDTH)) - -/** Zero relative shift position of the POST_ANTELOPE field */ -#define FSP_FLAGS_POS_POST_ANTELOPE 0 -/** Zero relative shift position of the ZIP_SSIZE field */ -#define FSP_FLAGS_POS_ZIP_SSIZE (FSP_FLAGS_POS_POST_ANTELOPE \ - + FSP_FLAGS_WIDTH_POST_ANTELOPE) -/** Zero relative shift position of the ATOMIC_BLOBS field */ -#define FSP_FLAGS_POS_ATOMIC_BLOBS (FSP_FLAGS_POS_ZIP_SSIZE \ - + FSP_FLAGS_WIDTH_ZIP_SSIZE) -/** Note that these need to be before the page size to be compatible with -dictionary */ -/** Zero relative shift position of the PAGE_COMPRESSION field */ -#define FSP_FLAGS_POS_PAGE_COMPRESSION (FSP_FLAGS_POS_ATOMIC_BLOBS \ - + FSP_FLAGS_WIDTH_ATOMIC_BLOBS) -/** Zero relative shift position of the PAGE_COMPRESSION_LEVEL field */ -#define FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL (FSP_FLAGS_POS_PAGE_COMPRESSION \ - + FSP_FLAGS_WIDTH_PAGE_COMPRESSION) -/** Zero relative shift position of the ATOMIC_WRITES field */ -#define FSP_FLAGS_POS_ATOMIC_WRITES (FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL \ - + FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL) -/** Zero relative shift position of the PAGE_SSIZE field */ -#define FSP_FLAGS_POS_PAGE_SSIZE (FSP_FLAGS_POS_ATOMIC_WRITES \ - + FSP_FLAGS_WIDTH_ATOMIC_WRITES) -/** Zero relative shift position of the start of the UNUSED bits */ -#define FSP_FLAGS_POS_DATA_DIR (FSP_FLAGS_POS_PAGE_SSIZE \ - + FSP_FLAGS_WIDTH_PAGE_SSIZE) +#include "fsp0types.h" #define FSP_FLAGS_POS_DATA_DIR_ORACLE (FSP_FLAGS_POS_ATOMIC_BLOBS \ + FSP_FLAGS_WIDTH_ATOMIC_BLOBS \ + FSP_FLAGS_WIDTH_PAGE_SSIZE) -/** Zero relative shift position of the start of the UNUSED bits */ -#define FSP_FLAGS_POS_UNUSED (FSP_FLAGS_POS_DATA_DIR \ - + FSP_FLAGS_WIDTH_DATA_DIR) - -/** Bit mask of the POST_ANTELOPE field */ -#define FSP_FLAGS_MASK_POST_ANTELOPE \ - ((~(~0U << FSP_FLAGS_WIDTH_POST_ANTELOPE)) \ - << FSP_FLAGS_POS_POST_ANTELOPE) -/** Bit mask of the ZIP_SSIZE field */ -#define FSP_FLAGS_MASK_ZIP_SSIZE \ - ((~(~0U << FSP_FLAGS_WIDTH_ZIP_SSIZE)) \ - << FSP_FLAGS_POS_ZIP_SSIZE) -/** Bit mask of the ATOMIC_BLOBS field */ -#define FSP_FLAGS_MASK_ATOMIC_BLOBS \ - ((~(~0U << FSP_FLAGS_WIDTH_ATOMIC_BLOBS)) \ - << FSP_FLAGS_POS_ATOMIC_BLOBS) -/** Bit mask of the PAGE_SSIZE field */ -#define FSP_FLAGS_MASK_PAGE_SSIZE \ - ((~(~0U << FSP_FLAGS_WIDTH_PAGE_SSIZE)) \ - << FSP_FLAGS_POS_PAGE_SSIZE) -/** Bit mask of the DATA_DIR field */ -#define FSP_FLAGS_MASK_DATA_DIR \ - ((~(~0U << FSP_FLAGS_WIDTH_DATA_DIR)) \ - << FSP_FLAGS_POS_DATA_DIR) /** Bit mask of the DATA_DIR field */ #define FSP_FLAGS_MASK_DATA_DIR_ORACLE \ ((~(~0 << FSP_FLAGS_WIDTH_DATA_DIR)) \ << FSP_FLAGS_POS_DATA_DIR_ORACLE) -/** Bit mask of the PAGE_COMPRESSION field */ -#define FSP_FLAGS_MASK_PAGE_COMPRESSION \ - ((~(~0 << FSP_FLAGS_WIDTH_PAGE_COMPRESSION)) \ - << FSP_FLAGS_POS_PAGE_COMPRESSION) -/** Bit mask of the PAGE_COMPRESSION_LEVEL field */ -#define FSP_FLAGS_MASK_PAGE_COMPRESSION_LEVEL \ - ((~(~0 << FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL)) \ - << FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL) -/** Bit mask of the ATOMIC_WRITES field */ -#define FSP_FLAGS_MASK_ATOMIC_WRITES \ - ((~(~0 << FSP_FLAGS_WIDTH_ATOMIC_WRITES)) \ - << FSP_FLAGS_POS_ATOMIC_WRITES) -/** Return the value of the POST_ANTELOPE field */ -#define FSP_FLAGS_GET_POST_ANTELOPE(flags) \ - ((flags & FSP_FLAGS_MASK_POST_ANTELOPE) \ - >> FSP_FLAGS_POS_POST_ANTELOPE) -/** Return the value of the ZIP_SSIZE field */ -#define FSP_FLAGS_GET_ZIP_SSIZE(flags) \ - ((flags & FSP_FLAGS_MASK_ZIP_SSIZE) \ - >> FSP_FLAGS_POS_ZIP_SSIZE) -/** Return the value of the ATOMIC_BLOBS field */ -#define FSP_FLAGS_HAS_ATOMIC_BLOBS(flags) \ - ((flags & FSP_FLAGS_MASK_ATOMIC_BLOBS) \ - >> FSP_FLAGS_POS_ATOMIC_BLOBS) -/** Return the value of the PAGE_SSIZE field */ -#define FSP_FLAGS_GET_PAGE_SSIZE(flags) \ - ((flags & FSP_FLAGS_MASK_PAGE_SSIZE) \ - >> FSP_FLAGS_POS_PAGE_SSIZE) -/** Return the value of the DATA_DIR field */ -#define FSP_FLAGS_HAS_DATA_DIR(flags) \ - ((flags & FSP_FLAGS_MASK_DATA_DIR) \ - >> FSP_FLAGS_POS_DATA_DIR) #define FSP_FLAGS_HAS_DATA_DIR_ORACLE(flags) \ ((flags & FSP_FLAGS_MASK_DATA_DIR_ORACLE) \ >> FSP_FLAGS_POS_DATA_DIR_ORACLE) -/** Return the contents of the UNUSED bits */ -#define FSP_FLAGS_GET_UNUSED(flags) \ - (flags >> FSP_FLAGS_POS_UNUSED) - -/** Return the value of the PAGE_COMPRESSION field */ -#define FSP_FLAGS_GET_PAGE_COMPRESSION(flags) \ - ((flags & FSP_FLAGS_MASK_PAGE_COMPRESSION) \ - >> FSP_FLAGS_POS_PAGE_COMPRESSION) -/** Return the value of the PAGE_COMPRESSION_LEVEL field */ -#define FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(flags) \ - ((flags & FSP_FLAGS_MASK_PAGE_COMPRESSION_LEVEL) \ - >> FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL) -/** Return the value of the ATOMIC_WRITES field */ -#define FSP_FLAGS_GET_ATOMIC_WRITES(flags) \ - ((flags & FSP_FLAGS_MASK_ATOMIC_WRITES) \ - >> FSP_FLAGS_POS_ATOMIC_WRITES) - -/** Set a PAGE_SSIZE into the correct bits in a given -tablespace flags. */ -#define FSP_FLAGS_SET_PAGE_SSIZE(flags, ssize) \ - (flags | (ssize << FSP_FLAGS_POS_PAGE_SSIZE)) - -/** Set a PAGE_COMPRESSION into the correct bits in a given -tablespace flags. */ -#define FSP_FLAGS_SET_PAGE_COMPRESSION(flags, compression) \ - (flags | (compression << FSP_FLAGS_POS_PAGE_COMPRESSION)) - -/** Set a PAGE_COMPRESSION_LEVEL into the correct bits in a given -tablespace flags. */ -#define FSP_FLAGS_SET_PAGE_COMPRESSION_LEVEL(flags, level) \ - (flags | (level << FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL)) - -/** Set a ATOMIC_WRITES into the correct bits in a given -tablespace flags. */ -#define FSP_FLAGS_SET_ATOMIC_WRITES(flags, atomics) \ - (flags | (atomics << FSP_FLAGS_POS_ATOMIC_WRITES)) - -/* @} */ /* @defgroup Tablespace Header Constants (moved from fsp0fsp.c) @{ */ @@ -237,7 +85,7 @@ descriptor page, but used only in the first. */ < 64 pages, this number is 64, i.e., we have initialized the space about the first extent, but have not - physically allocted those pages to the + physically allocated those pages to the file */ #define FSP_SPACE_FLAGS 16 /* fsp_space_t.flags, similar to dict_table_t::flags */ @@ -318,9 +166,8 @@ typedef byte fseg_inode_t; (16 + 3 * FLST_BASE_NODE_SIZE \ + FSEG_FRAG_ARR_N_SLOTS * FSEG_FRAG_SLOT_SIZE) -#define FSP_SEG_INODES_PER_PAGE(zip_size) \ - (((zip_size ? zip_size : UNIV_PAGE_SIZE) \ - - FSEG_ARR_OFFSET - 10) / FSEG_INODE_SIZE) +#define FSP_SEG_INODES_PER_PAGE(page_size) \ + ((page_size.physical() - FSEG_ARR_OFFSET - 10) / FSEG_INODE_SIZE) /* Number of segment inodes which fit on a single page */ @@ -410,57 +257,87 @@ the extent are free and which contain old tuple version to clean. */ #ifndef UNIV_INNOCHECKSUM /**********************************************************************//** Initializes the file space system. */ -UNIV_INTERN void fsp_init(void); /*==========*/ + /**********************************************************************//** Gets the size of the system tablespace from the tablespace header. If we do not have an auto-extending data file, this should be equal to the size of the data files. If there is an auto-extending data file, this can be smaller. -@return size in pages */ -UNIV_INTERN +@return size in pages */ ulint fsp_header_get_tablespace_size(void); /*================================*/ -/**********************************************************************//** -Reads the file space size stored in the header page. -@return tablespace size stored in the space header */ -UNIV_INTERN + +/** Calculate the number of pages to extend a datafile. +We extend single-table and general tablespaces first one extent at a time, +but 4 at a time for bigger tablespaces. It is not enough to extend always +by one extent, because we need to add at least one extent to FSP_FREE. +A single extent descriptor page will track many extents. And the extent +that uses its extent descriptor page is put onto the FSP_FREE_FRAG list. +Extents that do not use their extent descriptor page are added to FSP_FREE. +The physical page size is used to determine how many extents are tracked +on one extent descriptor page. See xdes_calc_descriptor_page(). +@param[in] page_size page_size of the datafile +@param[in] size current number of pages in the datafile +@return number of pages to extend the file. */ ulint -fsp_get_size_low( -/*=============*/ - page_t* page); /*!< in: header page (page 0 in the tablespace) */ +fsp_get_pages_to_extend_ibd( + const page_size_t& page_size, + ulint size); + +/** Calculate the number of physical pages in an extent for this file. +@param[in] page_size page_size of the datafile +@return number of pages in an extent for this file. */ +UNIV_INLINE +ulint +fsp_get_extent_size_in_pages(const page_size_t& page_size) +{ + return(FSP_EXTENT_SIZE * UNIV_PAGE_SIZE / page_size.physical()); +} + /**********************************************************************//** Reads the space id from the first page of a tablespace. -@return space id, ULINT UNDEFINED if error */ -UNIV_INTERN +@return space id, ULINT UNDEFINED if error */ ulint fsp_header_get_space_id( /*====================*/ const page_t* page); /*!< in: first page of a tablespace */ -/**********************************************************************//** -Reads the space flags from the first page of a tablespace. -@return flags */ -UNIV_INTERN + +/** Read a tablespace header field. +@param[in] page first page of a tablespace +@param[in] field the header field +@return the contents of the header field */ +inline ulint -fsp_header_get_flags( -/*=================*/ - const page_t* page); /*!< in: first page of a tablespace */ -/**********************************************************************//** -Reads the compressed page size from the first page of a tablespace. -@return compressed page size in bytes, or 0 if uncompressed */ -UNIV_INTERN +fsp_header_get_field(const page_t* page, ulint field) +{ + return(mach_read_from_4(FSP_HEADER_OFFSET + field + page)); +} + +/** Read the flags from the tablespace header page. +@param[in] page first page of a tablespace +@return the contents of FSP_SPACE_FLAGS */ +inline ulint -fsp_header_get_zip_size( -/*====================*/ - const page_t* page); /*!< in: first page of a tablespace */ +fsp_header_get_flags(const page_t* page) +{ + return(fsp_header_get_field(page, FSP_SPACE_FLAGS)); +} + +/** Reads the page size from the first page of a tablespace. +@param[in] page first page of a tablespace +@return page size */ +page_size_t +fsp_header_get_page_size( + const page_t* page); + /**********************************************************************//** Writes the space id and flags to a tablespace header. The flags contain row type, physical/compressed page size, and logical/uncompressed page size of the tablespace. */ -UNIV_INTERN void fsp_header_init_fields( /*===================*/ @@ -468,34 +345,35 @@ fsp_header_init_fields( ulint space_id, /*!< in: space id */ ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS): 0, or table->flags if newer than COMPACT */ -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ -UNIV_INTERN -void + +/** Initializes the space header of a new created space and creates also the +insert buffer tree root if space == 0. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr min-transaction +@return true on success, otherwise false. */ +bool fsp_header_init( -/*============*/ - ulint space, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr); /*!< in/out: mini-transaction */ + ulint space_id, + ulint size, + mtr_t* mtr); + /**********************************************************************//** Increases the space size field of a space. */ -UNIV_INTERN void fsp_header_inc_size( /*================*/ - ulint space, /*!< in: space id */ + ulint space_id, /*!< in: space id */ ulint size_inc, /*!< in: size increment in pages */ mtr_t* mtr); /*!< in/out: mini-transaction */ /**********************************************************************//** Creates a new segment. @return the block where the segment header is placed, x-latched, NULL if could not create segment because of lack of space */ -UNIV_INTERN buf_block_t* fseg_create( /*========*/ - ulint space, /*!< in: space id */ + ulint space_id,/*!< in: space id */ ulint page, /*!< in: page where the segment header is placed: if this is != 0, the page must belong to another segment, if this is 0, a new page will be allocated and it @@ -507,11 +385,10 @@ fseg_create( Creates a new segment. @return the block where the segment header is placed, x-latched, NULL if could not create segment because of lack of space */ -UNIV_INTERN buf_block_t* fseg_create_general( /*================*/ - ulint space, /*!< in: space id */ + ulint space_id,/*!< in: space id */ ulint page, /*!< in: page where the segment header is placed: if this is != 0, the page must belong to another segment, if this is 0, a new page will be allocated and it @@ -528,8 +405,7 @@ fseg_create_general( /**********************************************************************//** Calculates the number of pages reserved by a segment, and how many pages are currently used. -@return number of reserved pages */ -UNIV_INTERN +@return number of reserved pages */ ulint fseg_n_reserved_pages( /*==================*/ @@ -540,15 +416,15 @@ fseg_n_reserved_pages( Allocates a single free page from a segment. This function implements the intelligent allocation strategy which tries to minimize file space fragmentation. -@param[in/out] seg_header segment header -@param[in] hint hint of which page would be desirable -@param[in] direction if the new page is needed because +@param[in,out] seg_header segment header +@param[in] hint hint of which page would be desirable +@param[in] direction if the new page is needed because of an index page split, and records are inserted there in order, into which direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR -@param[in/out] mtr mini-transaction -@return X-latched block, or NULL if no page could be allocated */ +@param[in,out] mtr mini-transaction +@return X-latched block, or NULL if no page could be allocated */ #define fseg_alloc_free_page(seg_header, hint, direction, mtr) \ fseg_alloc_free_page_general(seg_header, hint, direction, \ FALSE, mtr, mtr) @@ -560,7 +436,6 @@ fragmentation. @retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded (init_mtr == mtr, or the page was not previously freed in mtr) @retval block (not allocated or initialized) otherwise */ -UNIV_INTERN buf_block_t* fseg_alloc_free_page_general( /*=========================*/ @@ -608,48 +483,56 @@ function we would liberally reserve several 64 page extents for every page split or merge in a B-tree. But we do not want to waste disk space if the table only occupies < 32 pages. That is why we apply different rules in that special case, just ensuring that there are 3 free pages available. -@return TRUE if we were able to make the reservation */ -UNIV_INTERN -ibool +@return TRUE if we were able to make the reservation */ +bool fsp_reserve_free_extents( /*=====================*/ ulint* n_reserved,/*!< out: number of extents actually reserved; if we return TRUE and the tablespace size is < 64 pages, then this can be 0, otherwise it is n_ext */ - ulint space, /*!< in: space id */ + ulint space_id,/*!< in: space id */ ulint n_ext, /*!< in: number of extents to reserve */ - ulint alloc_type,/*!< in: FSP_NORMAL, FSP_UNDO, or FSP_CLEANING */ - mtr_t* mtr); /*!< in: mini-transaction */ -/**********************************************************************//** -This function should be used to get information on how much we still -will be able to insert new data to the database without running out the -tablespace. Only free extents are taken into account and we also subtract -the safety margin required by the above function fsp_reserve_free_extents. -@return available space in kB */ -UNIV_INTERN -ullint + fsp_reserve_t alloc_type, + /*!< in: page reservation type */ + mtr_t* mtr); /*!< in/out: mini-transaction */ + +/** Calculate how many KiB of new data we will be able to insert to the +tablespace without running out of space. +@param[in] space_id tablespace ID +@return available space in KiB +@retval UINTMAX_MAX if unknown */ +uintmax_t fsp_get_available_space_in_free_extents( -/*====================================*/ - ulint space); /*!< in: space id */ + ulint space_id); + +/** Calculate how many KiB of new data we will be able to insert to the +tablespace without running out of space. Start with a space object that has +been acquired by the caller who holds it for the calculation, +@param[in] space tablespace object from fil_space_acquire() +@return available space in KiB */ +uintmax_t +fsp_get_available_space_in_free_extents( + const fil_space_t* space); + /**********************************************************************//** Frees a single page of a segment. */ -UNIV_INTERN void fseg_free_page( /*===========*/ fseg_header_t* seg_header, /*!< in: segment header */ - ulint space, /*!< in: space id */ + ulint space_id, /*!< in: space id */ ulint page, /*!< in: page offset */ + bool ahi, /*!< in: whether we may need to drop + the adaptive hash index */ mtr_t* mtr); /*!< in/out: mini-transaction */ /**********************************************************************//** Checks if a single page of a segment is free. -@return true if free */ -UNIV_INTERN +@return true if free */ bool fseg_page_is_free( /*==============*/ fseg_header_t* seg_header, /*!< in: segment header */ - ulint space, /*!< in: space id */ + ulint space_id, /*!< in: space id */ ulint page) /*!< in: page offset */ MY_ATTRIBUTE((nonnull, warn_unused_result)); /**********************************************************************//** @@ -657,8 +540,7 @@ Frees part of a segment. This function can be used to free a segment by repeatedly calling this function in different mini-transactions. Doing the freeing in a single mini-transaction might result in too big a mini-transaction. -@return TRUE if freeing completed */ -UNIV_INTERN +@return TRUE if freeing completed */ ibool fseg_free_step( /*===========*/ @@ -666,58 +548,47 @@ fseg_free_step( resides on the first page of the frag list of the segment, this pointer becomes obsolete after the last freeing step */ - mtr_t* mtr); /*!< in/out: mini-transaction */ + bool ahi, /*!< in: whether we may need to drop + the adaptive hash index */ + mtr_t* mtr) /*!< in/out: mini-transaction */ + __attribute__((warn_unused_result)); /**********************************************************************//** Frees part of a segment. Differs from fseg_free_step because this function leaves the header page unfreed. -@return TRUE if freeing completed, except the header page */ -UNIV_INTERN +@return TRUE if freeing completed, except the header page */ ibool fseg_free_step_not_header( /*======================*/ fseg_header_t* header, /*!< in: segment header which must reside on the first fragment page of the segment */ - mtr_t* mtr); /*!< in/out: mini-transaction */ -/***********************************************************************//** -Checks if a page address is an extent descriptor page address. -@return TRUE if a descriptor page */ + bool ahi, /*!< in: whether we may need to drop + the adaptive hash index */ + mtr_t* mtr) /*!< in/out: mini-transaction */ + __attribute__((warn_unused_result)); + +/** Checks if a page address is an extent descriptor page address. +@param[in] page_id page id +@param[in] page_size page size +@return TRUE if a descriptor page */ UNIV_INLINE ibool fsp_descr_page( -/*===========*/ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint page_no);/*!< in: page number */ + const page_id_t& page_id, + const page_size_t& page_size); + /***********************************************************//** Parses a redo log record of a file page init. -@return end of log record or NULL */ -UNIV_INTERN +@return end of log record or NULL */ byte* fsp_parse_init_file_page( /*=====================*/ byte* ptr, /*!< in: buffer */ byte* end_ptr, /*!< in: buffer end */ buf_block_t* block); /*!< in: block or NULL */ -/*******************************************************************//** -Validates the file space system and its segments. -@return TRUE if ok */ -UNIV_INTERN -ibool -fsp_validate( -/*=========*/ - ulint space); /*!< in: space id */ -/*******************************************************************//** -Prints info of a file space. */ -UNIV_INTERN -void -fsp_print( -/*======*/ - ulint space); /*!< in: space id */ #ifdef UNIV_DEBUG /*******************************************************************//** Validates a segment. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool fseg_validate( /*==========*/ @@ -727,7 +598,6 @@ fseg_validate( #ifdef UNIV_BTR_PRINT /*******************************************************************//** Writes info of a segment. */ -UNIV_INTERN void fseg_print( /*=======*/ @@ -735,42 +605,93 @@ fseg_print( mtr_t* mtr); /*!< in/out: mini-transaction */ #endif /* UNIV_BTR_PRINT */ -/********************************************************************//** -Validate and return the tablespace flags, which are stored in the -tablespace header at offset FSP_SPACE_FLAGS. They should be 0 for -ROW_FORMAT=COMPACT and ROW_FORMAT=REDUNDANT. The newer row formats, -COMPRESSED and DYNAMIC, use a file format > Antelope so they should -have a file format number plus the DICT_TF_COMPACT bit set. -@return true if check ok */ +/** Determine if the tablespace is compressed from tablespace flags. +@param[in] flags Tablespace flags +@return true if compressed, false if not compressed */ UNIV_INLINE bool -fsp_flags_is_valid( -/*===============*/ - ulint flags) /*!< in: tablespace flags */ - MY_ATTRIBUTE((warn_unused_result, const)); -/********************************************************************//** -Determine if the tablespace is compressed from dict_table_t::flags. -@return TRUE if compressed, FALSE if not compressed */ -UNIV_INLINE -ibool fsp_flags_is_compressed( -/*====================*/ - ulint flags); /*!< in: tablespace flags */ + ulint flags); -/********************************************************************//** -Calculates the descriptor index within a descriptor page. -@return descriptor index */ +/** Determine if two tablespaces are equivalent or compatible. +@param[in] flags1 First tablespace flags +@param[in] flags2 Second tablespace flags +@return true the flags are compatible, false if not */ +UNIV_INLINE +bool +fsp_flags_are_equal( + ulint flags1, + ulint flags2); + +/** Initialize an FSP flags integer. +@param[in] page_size page sizes in bytes and compression flag. +@param[in] atomic_blobs Used by Dynammic and Compressed. +@param[in] has_data_dir This tablespace is in a remote location. +@param[in] is_shared This tablespace can be shared by many tables. +@param[in] is_temporary This tablespace is temporary. +@return tablespace flags after initialization */ +UNIV_INLINE +ulint +fsp_flags_init( + const page_size_t& page_size, + bool atomic_blobs, + bool has_data_dir, + bool is_shared, + bool is_temporary, + bool page_compression, + ulint page_compression_level, + ulint atomic_writes); + +/** Convert a 32 bit integer tablespace flags to the 32 bit table flags. +This can only be done for a tablespace that was built as a file-per-table +tablespace. Note that the fsp_flags cannot show the difference between a +Compact and Redundant table, so an extra Compact boolean must be supplied. + Low order bit + | REDUNDANT | COMPACT | COMPRESSED | DYNAMIC +fil_space_t::flags | 0 | 0 | 1 | 1 +dict_table_t::flags | 0 | 1 | 1 | 1 +@param[in] fsp_flags fil_space_t::flags +@param[in] compact true if not Redundant row format +@return tablespace flags (fil_space_t::flags) */ +ulint +fsp_flags_to_dict_tf( + ulint fsp_flags, + bool compact); + +/** Calculates the descriptor index within a descriptor page. +@param[in] page_size page size +@param[in] offset page offset +@return descriptor index */ UNIV_INLINE ulint xdes_calc_descriptor_index( -/*=======================*/ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint offset); /*!< in: page offset */ + const page_size_t& page_size, + ulint offset); + +/** Gets pointer to a the extent descriptor of a page. +The page where the extent descriptor resides is x-locked. If the page offset +is equal to the free limit of the space, adds new extents from above the free +limit to the space free list, if not free limit == space size. This adding +is necessary to make the descriptor defined, as they are uninitialized +above the free limit. +@param[in] space_id space id +@param[in] offset page offset; if equal to the free limit, we +try to add new extents to the space free list +@param[in] page_size page size +@param[in,out] mtr mini-transaction +@return pointer to the extent descriptor, NULL if the page does not +exist in the space or if the offset exceeds the free limit */ +xdes_t* +xdes_get_descriptor( + ulint space_id, + ulint offset, + const page_size_t& page_size, + mtr_t* mtr) +__attribute__((warn_unused_result)); /**********************************************************************//** Gets a descriptor bit of a page. -@return TRUE if free */ +@return TRUE if free */ UNIV_INLINE ibool xdes_get_bit( @@ -780,37 +701,17 @@ xdes_get_bit( ulint offset);/*!< in: page offset within extent: 0 ... FSP_EXTENT_SIZE - 1 */ -/********************************************************************//** -Calculates the page where the descriptor of a page resides. -@return descriptor page offset */ +/** Calculates the page where the descriptor of a page resides. +@param[in] page_size page size +@param[in] offset page offset +@return descriptor page offset */ UNIV_INLINE ulint xdes_calc_descriptor_page( -/*======================*/ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint offset); /*!< in: page offset */ - + const page_size_t& page_size, + ulint offset); #endif /* !UNIV_INNOCHECKSUM */ -/********************************************************************//** -Extract the zip size from tablespace flags. A tablespace has only one -physical page size whether that page is compressed or not. -@return compressed page size of the file-per-table tablespace in bytes, -or zero if the table is not compressed. */ -UNIV_INLINE -ulint -fsp_flags_get_zip_size( -/*====================*/ - ulint flags); /*!< in: tablespace flags */ -/********************************************************************//** -Extract the page size from tablespace flags. -@return page size of the tablespace in bytes */ -UNIV_INLINE -ulint -fsp_flags_get_page_size( -/*====================*/ - ulint flags); /*!< in: tablespace flags */ /*********************************************************************/ /* @return offset into fsp header where crypt data is stored */ @@ -818,7 +719,7 @@ UNIV_INTERN ulint fsp_header_get_crypt_offset( /*========================*/ - ulint zip_size, /*!< in: zip_size */ + const page_size_t& page_size,/*!< in: page size */ ulint* max_size); /*!< out: free space after offset */ #define fsp_page_is_free(space,page,mtr) \ @@ -838,7 +739,6 @@ fsp_page_is_free_func( const char *file, ulint line); #endif - #ifndef UNIV_NONINL #include "fsp0fsp.ic" #endif diff --git a/storage/innobase/include/fsp0fsp.ic b/storage/innobase/include/fsp0fsp.ic index 9f09a9d53e1..e204ac3c69f 100644 --- a/storage/innobase/include/fsp0fsp.ic +++ b/storage/innobase/include/fsp0fsp.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2013, SkySQL Ab. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under @@ -26,260 +26,247 @@ Created 12/18/1995 Heikki Tuuri #ifndef UNIV_INNOCHECKSUM -/***********************************************************************//** -Checks if a page address is an extent descriptor page address. -@return TRUE if a descriptor page */ +/** Checks if a page address is an extent descriptor page address. +@param[in] page_id page id +@param[in] page_size page size +@return TRUE if a descriptor page */ UNIV_INLINE ibool fsp_descr_page( -/*===========*/ - ulint zip_size,/*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint page_no)/*!< in: page number */ + const page_id_t& page_id, + const page_size_t& page_size) { - ut_ad(ut_is_2pow(zip_size)); - - if (!zip_size) { - return((page_no & (UNIV_PAGE_SIZE - 1)) == FSP_XDES_OFFSET); - } - - return((page_no & (zip_size - 1)) == FSP_XDES_OFFSET); + return((page_id.page_no() & (page_size.physical() - 1)) + == FSP_XDES_OFFSET); } -/********************************************************************//** -Validate and return the tablespace flags, which are stored in the -tablespace header at offset FSP_SPACE_FLAGS. They should be 0 for -ROW_FORMAT=COMPACT and ROW_FORMAT=REDUNDANT. The newer row formats, -COMPRESSED and DYNAMIC, use a file format > Antelope so they should -have a file format number plus the DICT_TF_COMPACT bit set. -@return true if check ok */ +/** Determine if the tablespace is compressed from tablespace flags. +@param[in] flags Tablespace flags +@return true if compressed, false if not compressed */ UNIV_INLINE bool -fsp_flags_is_valid( -/*===============*/ - ulint flags) /*!< in: tablespace flags */ +fsp_flags_is_compressed( + ulint flags) { - ulint post_antelope = FSP_FLAGS_GET_POST_ANTELOPE(flags); - ulint zip_ssize = FSP_FLAGS_GET_ZIP_SSIZE(flags); - ulint atomic_blobs = FSP_FLAGS_HAS_ATOMIC_BLOBS(flags); - ulint page_ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); - ulint unused = FSP_FLAGS_GET_UNUSED(flags); - ulint page_compression = FSP_FLAGS_GET_PAGE_COMPRESSION(flags); - ulint page_compression_level = FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(flags); - ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(flags); - - DBUG_EXECUTE_IF("fsp_flags_is_valid_failure", return(false);); - - /* fsp_flags is zero unless atomic_blobs is set. */ - /* Make sure there are no bits that we do not know about. */ - if (unused != 0 || flags == 1) { - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted unused %lu\n", - flags, unused); - return(false); - } else if (post_antelope) { - /* The Antelope row formats REDUNDANT and COMPACT did - not use tablespace flags, so this flag and the entire - 4-byte field is zero for Antelope row formats. */ - - if (!atomic_blobs) { - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted atomic_blobs %lu\n", - flags, atomic_blobs); - return(false); - } - } - - if (!atomic_blobs) { - /* Barracuda row formats COMPRESSED and DYNAMIC build on - the page structure introduced for the COMPACT row format - by allowing long fields to be broken into prefix and - externally stored parts. */ - - if (post_antelope || zip_ssize != 0) { - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted zip_ssize %lu atomic_blobs %lu\n", - flags, zip_ssize, atomic_blobs); - return(false); - } - - } else if (!post_antelope || zip_ssize > PAGE_ZIP_SSIZE_MAX) { - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted zip_ssize %lu max %d\n", - flags, zip_ssize, PAGE_ZIP_SSIZE_MAX); - return(false); - } else if (page_ssize > UNIV_PAGE_SSIZE_MAX) { - - /* The page size field can be used for any row type, or it may - be zero for an original 16k page size. - Validate the page shift size is within allowed range. */ + return(FSP_FLAGS_GET_ZIP_SSIZE(flags) != 0); +} - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted page_ssize %lu max %lu\n", - flags, page_ssize, UNIV_PAGE_SSIZE_MAX); - return(false); +#define ACTUAL_SSIZE(ssize) (0 == ssize ? UNIV_PAGE_SSIZE_ORIG : ssize) - } else if (UNIV_PAGE_SIZE != UNIV_PAGE_SIZE_ORIG && !page_ssize) { - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted page_ssize %lu max %lu:%d\n", - flags, page_ssize, UNIV_PAGE_SIZE, UNIV_PAGE_SIZE_ORIG); +/** Determine if two tablespaces are equivalent or compatible. +@param[in] flags1 First tablespace flags +@param[in] flags2 Second tablespace flags +@return true the flags are compatible, false if not */ +UNIV_INLINE +bool +fsp_flags_are_equal( + ulint flags1, + ulint flags2) +{ + /* If either one of these flags is ULINT_UNDEFINED, + then they are not equal */ + if (flags1 == ULINT_UNDEFINED || flags2 == ULINT_UNDEFINED) { return(false); } - /* Page compression level requires page compression and atomic blobs - to be set */ - if (page_compression_level || page_compression) { - if (!page_compression || !atomic_blobs) { - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted page_compression %lu\n" - "InnoDB: Error: page_compression_level %lu atomic_blobs %lu\n", - flags, page_compression, page_compression_level, atomic_blobs); - return(false); - } - } - - if (atomic_writes > ATOMIC_WRITES_OFF) { - fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted atomic_writes %lu\n", - flags, atomic_writes); - return (false); + if (!fsp_is_shared_tablespace(flags1) || !fsp_is_shared_tablespace(flags2)) { + /* At least one of these is a single-table tablespaces so all + flags must match. */ + return(flags1 == flags2); } -#if UNIV_FORMAT_MAX != UNIV_FORMAT_B -# error "UNIV_FORMAT_MAX != UNIV_FORMAT_B, Add more validations." -#endif + /* Both are shared tablespaces which can contain all formats. + But they must have the same logical and physical page size. + Once InnoDB can support multiple page sizes together, + the logical page size will not matter. */ + ulint zip_ssize1 = ACTUAL_SSIZE(FSP_FLAGS_GET_ZIP_SSIZE(flags1)); + ulint zip_ssize2 = ACTUAL_SSIZE(FSP_FLAGS_GET_ZIP_SSIZE(flags2)); + ulint page_ssize1 = ACTUAL_SSIZE(FSP_FLAGS_GET_PAGE_SSIZE(flags1)); + ulint page_ssize2 = ACTUAL_SSIZE(FSP_FLAGS_GET_PAGE_SSIZE(flags2)); - /* The DATA_DIR field can be used for any row type so there is - nothing here to validate. */ - - return(true); + return(zip_ssize1 == zip_ssize2 && page_ssize1 == page_ssize2); } -/********************************************************************//** -Determine if the tablespace is compressed from dict_table_t::flags. -@return TRUE if compressed, FALSE if not compressed */ +/** Convert a page size, which is a power of 2, to an ssize, which is +the number of bit shifts from 512 to make that page size. +@param[in] page_size compressed page size in bytes +@return an ssize created from the page size provided. */ UNIV_INLINE -ibool -fsp_flags_is_compressed( -/*====================*/ - ulint flags) /*!< in: tablespace flags */ +ulint +page_size_to_ssize( + ulint page_size) { - return(FSP_FLAGS_GET_ZIP_SSIZE(flags) != 0); -} + ulint ssize; -#endif /* !UNIV_INNOCHECKSUM */ + for (ssize = UNIV_ZIP_SIZE_SHIFT_MIN; + ((ulint) 1 << ssize) < page_size; + ssize++) {}; + + return(ssize - UNIV_ZIP_SIZE_SHIFT_MIN + 1); +} -/********************************************************************//** -Extract the zip size from tablespace flags. -@return compressed page size of the file-per-table tablespace in bytes, -or zero if the table is not compressed. */ +/** Add the compressed page size to the tablespace flags. +@param[in] flags Tablespace flags +@param[in] page_size page sizes in bytes and compression flag. +@return tablespace flags after zip size is added */ UNIV_INLINE ulint -fsp_flags_get_zip_size( -/*===================*/ - ulint flags) /*!< in: tablespace flags */ +fsp_flags_set_zip_size( + ulint flags, + const page_size_t& page_size) { - ulint zip_size = 0; - ulint ssize = FSP_FLAGS_GET_ZIP_SSIZE(flags); + if (!page_size.is_compressed()) { + return(flags); + } - /* Convert from a 'log2 minus 9' to a page size in bytes. */ - if (ssize) { - zip_size = ((UNIV_ZIP_SIZE_MIN >> 1) << ssize); + /* Zip size should be a power of 2 between UNIV_ZIP_SIZE_MIN + and UNIV_ZIP_SIZE_MAX */ + ut_ad(page_size.physical() >= UNIV_ZIP_SIZE_MIN); + ut_ad(page_size.physical() <= UNIV_ZIP_SIZE_MAX); + ut_ad(ut_is_2pow(page_size.physical())); - ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX); - } + ulint ssize = page_size_to_ssize(page_size.physical()); + + ut_ad(ssize > 0); + ut_ad(ssize <= UNIV_PAGE_SSIZE_MAX); + + flags |= (ssize << FSP_FLAGS_POS_ZIP_SSIZE); + + ut_ad(fsp_flags_is_valid(flags)); - return(zip_size); + return(flags); } -/********************************************************************//** -Extract the page size from tablespace flags. -@return page size of the tablespace in bytes */ +/** Add the page size to the tablespace flags. +@param[in] flags Tablespace flags +@param[in] page_size page sizes in bytes and compression flag. +@return tablespace flags after page size is added */ UNIV_INLINE ulint -fsp_flags_get_page_size( -/*====================*/ - ulint flags) /*!< in: tablespace flags */ +fsp_flags_set_page_size( + ulint flags, + const page_size_t& page_size) { - ulint page_size = 0; - ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); - - /* Convert from a 'log2 minus 9' to a page size in bytes. */ - if (UNIV_UNLIKELY(ssize)) { - page_size = ((UNIV_ZIP_SIZE_MIN >> 1) << ssize); + /* Page size should be a power of two between UNIV_PAGE_SIZE_MIN + and UNIV_PAGE_SIZE */ + ut_ad(page_size.logical() >= UNIV_PAGE_SIZE_MIN); + ut_ad(page_size.logical() <= UNIV_PAGE_SIZE_MAX); + ut_ad(ut_is_2pow(page_size.logical())); + + /* Remove this assert once we add support for different + page size per tablespace. Currently all tablespaces must + have a page size that is equal to innodb-page-size */ + ut_ad(page_size.logical() == UNIV_PAGE_SIZE); + + if (page_size.logical() == UNIV_PAGE_SIZE_ORIG) { + ut_ad(0 == FSP_FLAGS_GET_PAGE_SSIZE(flags)); - ut_ad(page_size <= UNIV_PAGE_SIZE_MAX); } else { - /* If the page size was not stored, then it is the - original 16k. */ - page_size = UNIV_PAGE_SIZE_ORIG; + ulint ssize = page_size_to_ssize(page_size.logical()); + + ut_ad(ssize); + ut_ad(ssize <= UNIV_PAGE_SSIZE_MAX); + + flags |= (ssize << FSP_FLAGS_POS_PAGE_SSIZE); } - return(page_size); -} + ut_ad(fsp_flags_is_valid(flags)); -#ifndef UNIV_INNOCHECKSUM + return(flags); +} -/********************************************************************//** -Add the page size to the tablespace flags. -@return tablespace flags after page size is added */ +/** Initialize an FSP flags integer. +@param[in] page_size page sizes in bytes and compression flag. +@param[in] atomic_blobs Used by Dynammic and Compressed. +@param[in] has_data_dir This tablespace is in a remote location. +@param[in] is_shared This tablespace can be shared by many tables. +@param[in] is_temporary This tablespace is temporary. +@param[in] page_compressed Table uses page compression +@param[in] page_compression_level Page compression level +@param[in] atomic_writes Table uses atomic writes +@return tablespace flags after initialization */ UNIV_INLINE ulint -fsp_flags_set_page_size( -/*====================*/ - ulint flags, /*!< in: tablespace flags */ - ulint page_size) /*!< in: page size in bytes */ +fsp_flags_init( + const page_size_t& page_size, + bool atomic_blobs, + bool has_data_dir, + bool is_shared, + bool is_temporary, + bool page_compression, + ulint page_compression_level, + ulint atomic_writes) { - ulint ssize = 0; - ulint shift; + ut_ad(page_size.physical() <= page_size.logical()); + ut_ad(!page_size.is_compressed() || atomic_blobs); - /* Page size should be > UNIV_PAGE_SIZE_MIN */ - ut_ad(page_size >= UNIV_PAGE_SIZE_MIN); - ut_ad(page_size <= UNIV_PAGE_SIZE_MAX); + /* Page size should be a power of two between UNIV_PAGE_SIZE_MIN + and UNIV_PAGE_SIZE, but zip_size may be 0 if not compressed. */ + ulint flags = fsp_flags_set_page_size(0, page_size); - if (page_size == UNIV_PAGE_SIZE_ORIG) { - ut_ad(0 == FSP_FLAGS_GET_PAGE_SSIZE(flags)); - return(flags); + if (atomic_blobs) { + flags |= FSP_FLAGS_MASK_POST_ANTELOPE + | FSP_FLAGS_MASK_ATOMIC_BLOBS; } - for (shift = UNIV_PAGE_SIZE_SHIFT_MAX; - shift >= UNIV_PAGE_SIZE_SHIFT_MIN; - shift--) { - ulint mask = (1 << shift); - if (page_size & mask) { - ut_ad(!(page_size & ~mask)); - ssize = shift - UNIV_ZIP_SIZE_SHIFT_MIN + 1; - break; - } + /* If the zip_size is explicit and different from the default, + compressed row format is implied. */ + flags = fsp_flags_set_zip_size(flags, page_size); + + if (has_data_dir) { + flags |= FSP_FLAGS_MASK_DATA_DIR; } - ut_ad(ssize); - ut_ad(ssize <= UNIV_PAGE_SSIZE_MAX); + /* Shared tablespaces can hold all row formats, so we only mark the + POST_ANTELOPE and ATOMIC_BLOB bits if it is compressed. */ + if (is_shared) { + ut_ad(!has_data_dir); + flags |= FSP_FLAGS_MASK_SHARED; + } - flags = FSP_FLAGS_SET_PAGE_SSIZE(flags, ssize); + if (is_temporary) { + ut_ad(!has_data_dir); + flags |= FSP_FLAGS_MASK_TEMPORARY; + } - ut_ad(fsp_flags_is_valid(flags)); + /* In addition, tablespace flags also contain if the page + compression is used for this table. */ + if (page_compression) { + flags |= FSP_FLAGS_SET_PAGE_COMPRESSION(flags, page_compression); + } + + /* In addition, tablespace flags also contain page compression level + if page compression is used for this table. */ + if (page_compression && page_compression_level) { + flags |= FSP_FLAGS_SET_PAGE_COMPRESSION_LEVEL(flags, page_compression_level); + } + + /* In addition, tablespace flags also contain flag if atomic writes + is used for this table */ + if (atomic_writes) { + flags |= FSP_FLAGS_SET_ATOMIC_WRITES(flags, atomic_writes); + } return(flags); } -/********************************************************************//** -Calculates the descriptor index within a descriptor page. -@return descriptor index */ +/** Calculates the descriptor index within a descriptor page. +@param[in] page_size page size +@param[in] offset page offset +@return descriptor index */ UNIV_INLINE ulint xdes_calc_descriptor_index( -/*=======================*/ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint offset) /*!< in: page offset */ + const page_size_t& page_size, + ulint offset) { - ut_ad(ut_is_2pow(zip_size)); - - if (zip_size == 0) { - return(ut_2pow_remainder(offset, UNIV_PAGE_SIZE) - / FSP_EXTENT_SIZE); - } else { - return(ut_2pow_remainder(offset, zip_size) / FSP_EXTENT_SIZE); - } + return(ut_2pow_remainder(offset, page_size.physical()) + / FSP_EXTENT_SIZE); } -#endif /* !UNIV_INNOCHECKSUM */ /**********************************************************************//** Gets a descriptor bit of a page. -@return TRUE if free */ +@return TRUE if free */ UNIV_INLINE ibool xdes_get_bit( @@ -303,17 +290,15 @@ xdes_get_bit( bit_index)); } -#ifndef UNIV_INNOCHECKSUM -/********************************************************************//** -Calculates the page where the descriptor of a page resides. -@return descriptor page offset */ +/** Calculates the page where the descriptor of a page resides. +@param[in] page_size page size +@param[in] offset page offset +@return descriptor page offset */ UNIV_INLINE ulint xdes_calc_descriptor_page( -/*======================*/ - ulint zip_size, /*!< in: compressed page size in bytes; - 0 for uncompressed pages */ - ulint offset) /*!< in: page offset */ + const page_size_t& page_size, + ulint offset) { #ifndef DOXYGEN /* Doxygen gets confused by these */ # if UNIV_PAGE_SIZE_MAX <= XDES_ARR_OFFSET \ @@ -335,16 +320,13 @@ xdes_calc_descriptor_page( + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE) * XDES_SIZE); - ut_ad(ut_is_2pow(zip_size)); - - if (zip_size == 0) { - return(ut_2pow_round(offset, UNIV_PAGE_SIZE)); - } else { - ut_ad(zip_size > XDES_ARR_OFFSET - + (zip_size / FSP_EXTENT_SIZE) * XDES_SIZE); - return(ut_2pow_round(offset, zip_size)); +#ifdef UNIV_DEBUG + if (page_size.is_compressed()) { + ut_a(page_size.physical() > XDES_ARR_OFFSET + + (page_size.physical() / FSP_EXTENT_SIZE) * XDES_SIZE); } -} +#endif /* UNIV_DEBUG */ + return(ut_2pow_round(offset, page_size.physical())); +} #endif /* !UNIV_INNOCHECKSUM */ - diff --git a/storage/innobase/include/fsp0pagecompress.h b/storage/innobase/include/fsp0pagecompress.h index 5f943ee2b83..44bdddfa3bf 100644 --- a/storage/innobase/include/fsp0pagecompress.h +++ b/storage/innobase/include/fsp0pagecompress.h @@ -51,7 +51,7 @@ fsp_header_get_compression_level( Determine if the tablespace is page compressed from dict_table_t::flags. @return TRUE if page compressed, FALSE if not compressed */ UNIV_INLINE -ibool +bool fsp_flags_is_page_compressed( /*=========================*/ ulint flags); /*!< in: tablespace flags */ diff --git a/storage/innobase/include/fsp0pagecompress.ic b/storage/innobase/include/fsp0pagecompress.ic index e879aa2c16e..a3971da6772 100644 --- a/storage/innobase/include/fsp0pagecompress.ic +++ b/storage/innobase/include/fsp0pagecompress.ic @@ -29,7 +29,7 @@ Created 11/12/2013 Jan Lindström jan.lindstrom@mariadb.com Determine if the tablespace is page compressed from dict_table_t::flags. @return TRUE if page compressed, FALSE if not page compressed */ UNIV_INLINE -ibool +bool fsp_flags_is_page_compressed( /*=========================*/ ulint flags) /*!< in: tablespace flags */ @@ -65,7 +65,7 @@ fsp_flags_get_atomic_writes( Find out wheather the page is index page or not @return true if page type index page, false if not */ UNIV_INLINE -ibool +bool fil_page_is_index_page( /*===================*/ byte* buf) /*!< in: page */ @@ -77,10 +77,10 @@ fil_page_is_index_page( Find out wheather the page is page compressed @return true if page is page compressed, false if not */ UNIV_INLINE -ibool +bool fil_page_is_compressed( /*===================*/ - byte* buf) /*!< in: page */ + const byte* buf) /*!< in: page */ { return(mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED); } @@ -89,10 +89,10 @@ fil_page_is_compressed( Find out wheather the page is page compressed @return true if page is page compressed, false if not */ UNIV_INLINE -ibool +bool fil_page_is_compressed_encrypted( /*=============================*/ - byte* buf) /*!< in: page */ + const byte* buf) /*!< in: page */ { return(mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED); } @@ -125,7 +125,7 @@ Extract the page compression from space. @return true if space is page compressed, false if space is not found or space is not page compressed. */ UNIV_INLINE -ibool +bool fil_space_is_page_compressed( /*=========================*/ ulint id) /*!< in: space id */ @@ -209,7 +209,7 @@ fil_space_get_atomic_writes( Find out wheather the page is page compressed with lzo method @return true if page is page compressed with lzo method, false if not */ UNIV_INLINE -ibool +bool fil_page_is_lzo_compressed( /*=======================*/ byte* buf) /*!< in: page */ diff --git a/storage/innobase/include/fsp0space.h b/storage/innobase/include/fsp0space.h new file mode 100644 index 00000000000..5fdb12a922f --- /dev/null +++ b/storage/innobase/include/fsp0space.h @@ -0,0 +1,247 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/fsp0space.h +General shared tablespace implementation. + +Created 2013-7-26 by Kevin Lewis +*******************************************************/ + +#ifndef fsp0space_h +#define fsp0space_h + +#include "univ.i" +#include "fsp0file.h" +#include "fsp0fsp.h" +#include "fsp0types.h" +#include "ut0new.h" + +#include + +/** Data structure that contains the information about shared tablespaces. +Currently this can be the system tablespace or a temporary table tablespace */ +class Tablespace { + +public: + typedef std::vector > files_t; + + /** Data file information - each Datafile can be accessed globally */ + files_t m_files; + + Tablespace() + : + m_files(), + m_name(), + m_space_id(ULINT_UNDEFINED), + m_path(), + m_flags(), + m_ignore_read_only(false) + { + /* No op */ + } + + virtual ~Tablespace() + { + shutdown(); + ut_ad(m_files.empty()); + ut_ad(m_space_id == ULINT_UNDEFINED); + if (m_name != NULL) { + ut_free(m_name); + m_name = NULL; + } + if (m_path != NULL) { + ut_free(m_path); + m_path = NULL; + } + } + + // Disable copying + Tablespace(const Tablespace&); + Tablespace& operator=(const Tablespace&); + + /** Set tablespace name + @param[in] name tablespace name */ + void set_name(const char* name) + { + ut_ad(m_name == NULL); + m_name = mem_strdup(name); + ut_ad(m_name != NULL); + } + + /** Get tablespace name + @return tablespace name */ + const char* name() const + { + return(m_name); + } + + /** Set tablespace path and filename members. + @param[in] path where tablespace file(s) resides + @param[in] len length of the file path */ + void set_path(const char* path, size_t len) + { + ut_ad(m_path == NULL); + m_path = mem_strdupl(path, len); + ut_ad(m_path != NULL); + + os_normalize_path_for_win(m_path); + } + + /** Set tablespace path and filename members. + @param[in] path where tablespace file(s) resides */ + void set_path(const char* path) + { + set_path(path, strlen(path)); + } + + /** Get tablespace path + @return tablespace path */ + const char* path() const + { + return(m_path); + } + + /** Set the space id of the tablespace + @param[in] space_id tablespace ID to set */ + void set_space_id(ulint space_id) + { + ut_ad(m_space_id == ULINT_UNDEFINED); + m_space_id = space_id; + } + + /** Get the space id of the tablespace + @return m_space_id space id of the tablespace */ + ulint space_id() const + { + return(m_space_id); + } + + /** Set the tablespace flags + @param[in] fsp_flags tablespace flags */ + void set_flags(ulint fsp_flags) + { + ut_ad(fsp_flags_is_valid(fsp_flags)); + m_flags = fsp_flags; + } + + /** Get the tablespace flags + @return m_flags tablespace flags */ + ulint flags() const + { + return(m_flags); + } + + /** Get the tablespace encryption mode + @return m_mode tablespace encryption mode */ + fil_encryption_t encryption_mode() const + { + return (m_mode); + } + + /** Get the tablespace encryption key_id + @return m_key_id tablespace encryption key_id */ + ulint key_id() const + { + return (m_key_id); + } + + /** Set Ignore Read Only Status for tablespace. + @param[in] read_only_status read only status indicator */ + void set_ignore_read_only(bool read_only_status) + { + m_ignore_read_only = read_only_status; + } + + /** Free the memory allocated by the Tablespace object */ + void shutdown(); + + /** + @return ULINT_UNDEFINED if the size is invalid else the sum of sizes */ + ulint get_sum_of_sizes() const; + + /** Open or Create the data files if they do not exist. + @param[in] is_temp whether this is a temporary tablespace + @return DB_SUCCESS or error code */ + dberr_t open_or_create(bool is_temp) + __attribute__((warn_unused_result)); + + /** Delete all the data files. */ + void delete_files(); + + /** Check if two tablespaces have common data file names. + @param[in] other_space Tablespace to check against this. + @return true if they have the same data filenames and paths */ + bool intersection(const Tablespace* other_space); + + /** Use the ADD DATAFILE path to create a Datafile object and add + it to the front of m_files. Parse the datafile path into a path + and a basename with extension 'ibd'. This datafile_path provided + may be an absolute or relative path, but it must end with the + extension .ibd and have a basename of at least 1 byte. + + Set tablespace m_path member and add a Datafile with the filename. + @param[in] datafile_path full path of the tablespace file. */ + dberr_t add_datafile( + const char* datafile_path); + + /* Return a pointer to the first Datafile for this Tablespace + @return pointer to the first Datafile for this Tablespace*/ + Datafile* first_datafile() + { + ut_a(!m_files.empty()); + return(&m_files.front()); + } + + /** Check if undo tablespace. + @return true if undo tablespace */ + static bool is_undo_tablespace(ulint id); +private: + /** + @param[in] filename Name to lookup in the data files. + @return true if the filename exists in the data files */ + bool find(const char* filename); + + /** Note that the data file was found. + @param[in] file data file object */ + void file_found(Datafile& file); + + /* DATA MEMBERS */ + + /** Name of the tablespace. */ + char* m_name; + + /** Tablespace ID */ + ulint m_space_id; + + /** Path where tablespace files will reside, not including a filename.*/ + char* m_path; + + /** Tablespace flags */ + ulint m_flags; + + /** Encryption mode and key_id */ + fil_encryption_t m_mode; + ulint m_key_id; + +protected: + /** Ignore server read only configuration for this tablespace. */ + bool m_ignore_read_only; +}; + +#endif /* fsp0space_h */ diff --git a/storage/innobase/include/fsp0sysspace.h b/storage/innobase/include/fsp0sysspace.h new file mode 100644 index 00000000000..42778f7d8e6 --- /dev/null +++ b/storage/innobase/include/fsp0sysspace.h @@ -0,0 +1,326 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/**************************************************//** +@file include/fsp0sysspace.h +Multi file, shared, system tablespace implementation. + +Created 2013-7-26 by Kevin Lewis +*******************************************************/ + +#ifndef fsp0sysspace_h +#define fsp0sysspace_h + +#include "univ.i" +#include "fsp0space.h" + +/** If the last data file is auto-extended, we add this many pages to it +at a time. We have to make this public because it is a config variable. */ +extern ulong sys_tablespace_auto_extend_increment; + +#ifdef UNIV_DEBUG +/** Control if extra debug checks need to be done for temporary tablespace. +Default = true that is disable such checks. +This variable is not exposed to end-user but still kept as variable for +developer to enable it during debug. */ +extern bool srv_skip_temp_table_checks_debug; +#endif /* UNIV_DEBUG */ + +/** Data structure that contains the information about shared tablespaces. +Currently this can be the system tablespace or a temporary table tablespace */ +class SysTablespace : public Tablespace +{ +public: + + SysTablespace() + : + m_auto_extend_last_file(), + m_last_file_size_max(), + m_created_new_raw(), + m_is_tablespace_full(false), + m_sanity_checks_done(false), + m_crypt_info() + { + /* No op */ + } + + ~SysTablespace() + { + shutdown(); + } + + /** Set tablespace full status + @param[in] is_full true if full */ + void set_tablespace_full_status(bool is_full) + { + m_is_tablespace_full = is_full; + } + + /** Get tablespace full status + @return true if table is full */ + bool get_tablespace_full_status() + { + return(m_is_tablespace_full); + } + + /** Set sanity check status + @param[in] status true if sanity checks are done */ + void set_sanity_check_status(bool status) + { + m_sanity_checks_done = status; + } + + /** Get sanity check status + @return true if sanity checks are done */ + bool get_sanity_check_status() + { + return(m_sanity_checks_done); + } + + /** Parse the input params and populate member variables. + @param filepath path to data files + @param supports_raw true if it supports raw devices + @return true on success parse */ + bool parse_params(const char* filepath, bool supports_raw); + + /** Check the data file specification. + @param[out] create_new_db true if a new database + is to be created + @param[in] min_expected_size expected tablespace + size in bytes + @return DB_SUCCESS if all OK else error code */ + dberr_t check_file_spec( + bool* create_new_db, + ulint min_expected_tablespace_size); + + /** Free the memory allocated by parse() */ + void shutdown(); + + /** Normalize the file size, convert to extents. */ + void normalize(); + + /** + @return true if a new raw device was created. */ + bool created_new_raw() const + { + return(m_created_new_raw); + } + + /** + @return auto_extend value setting */ + ulint can_auto_extend_last_file() const + { + return(m_auto_extend_last_file); + } + + /** Set the last file size. + @param[in] size the size to set */ + void set_last_file_size(ulint size) + { + ut_ad(!m_files.empty()); + m_files.back().m_size = size; + } + + /** Get the size of the last data file in the tablespace + @return the size of the last data file in the array */ + ulint last_file_size() const + { + ut_ad(!m_files.empty()); + return(m_files.back().m_size); + } + + /** + @return the autoextend increment in pages. */ + ulint get_autoextend_increment() const + { + return(sys_tablespace_auto_extend_increment + * ((1024 * 1024) / UNIV_PAGE_SIZE)); + } + + /** + @return next increment size */ + ulint get_increment() const; + + /** Open or create the data files + @param[in] is_temp whether this is a temporary tablespace + @param[in] create_new_db whether we are creating a new database + @param[out] sum_new_sizes sum of sizes of the new files added + @param[out] flush_lsn FIL_PAGE_FILE_FLUSH_LSN of first file + @return DB_SUCCESS or error code */ + dberr_t open_or_create( + bool is_temp, + bool create_new_db, + ulint* sum_new_sizes, + lsn_t* flush_lsn) + __attribute__((warn_unused_result)); + +private: + /** Check the tablespace header for this tablespace. + @param[out] flushed_lsn the value of FIL_PAGE_FILE_FLUSH_LSN + @return DB_SUCCESS or error code */ + dberr_t read_lsn_and_check_flags(lsn_t* flushed_lsn); + + /** + @return true if the last file size is valid. */ + bool is_valid_size() const + { + return(m_last_file_size_max >= last_file_size()); + } + + /** + @return true if configured to use raw devices */ + bool has_raw_device(); + + /** Note that the data file was not found. + @param[in] file data file object + @param[out] create_new_db true if a new instance to be created + @return DB_SUCESS or error code */ + dberr_t file_not_found(Datafile& file, bool* create_new_db); + + /** Note that the data file was found. + @param[in,out] file data file object + @return true if a new instance to be created */ + bool file_found(Datafile& file); + + /** Create a data file. + @param[in,out] file data file object + @return DB_SUCCESS or error code */ + dberr_t create(Datafile& file); + + /** Create a data file. + @param[in,out] file data file object + @return DB_SUCCESS or error code */ + dberr_t create_file(Datafile& file); + + /** Open a data file. + @param[in,out] file data file object + @return DB_SUCCESS or error code */ + dberr_t open_file(Datafile& file); + + /** Set the size of the file. + @param[in,out] file data file object + @return DB_SUCCESS or error code */ + dberr_t set_size(Datafile& file); + + /** Convert a numeric string that optionally ends in G or M, to a + number containing megabytes. + @param[in] ptr string with a quantity in bytes + @param[out] megs the number in megabytes + @return next character in string */ + static char* parse_units(char* ptr, ulint* megs); + +private: + enum file_status_t { + FILE_STATUS_VOID = 0, /** status not set */ + FILE_STATUS_RW_PERMISSION_ERROR,/** permission error */ + FILE_STATUS_READ_WRITE_ERROR, /** not readable/writable */ + FILE_STATUS_NOT_REGULAR_FILE_ERROR /** not a regular file */ + }; + + /** Verify the size of the physical file + @param[in] file data file object + @return DB_SUCCESS if OK else error code. */ + dberr_t check_size(Datafile& file); + + /** Check if a file can be opened in the correct mode. + @param[in,out] file data file object + @param[out] reason exact reason if file_status check failed. + @return DB_SUCCESS or error code. */ + dberr_t check_file_status( + const Datafile& file, + file_status_t& reason); + + /* DATA MEMBERS */ + + /** if true, then we auto-extend the last data file */ + bool m_auto_extend_last_file; + + /** if != 0, this tells the max size auto-extending may increase the + last data file size */ + ulint m_last_file_size_max; + + /** If the following is true we do not allow + inserts etc. This protects the user from forgetting + the 'newraw' keyword to my.cnf */ + bool m_created_new_raw; + + /** Tablespace full status */ + bool m_is_tablespace_full; + + /** if false, then sanity checks are still pending */ + bool m_sanity_checks_done; + + /** Encryption information */ + fil_space_crypt_t* m_crypt_info; +}; + +/* GLOBAL OBJECTS */ + +/** The control info of the system tablespace. */ +extern SysTablespace srv_sys_space; + +/** The control info of a temporary table shared tablespace. */ +extern SysTablespace srv_tmp_space; + +/** Check if the space_id is for a system-tablespace (shared + temp). +@param[in] id Space ID to check +@return true if id is a system tablespace, false if not. */ +UNIV_INLINE +bool +is_system_tablespace( + ulint id) +{ + return(id == srv_sys_space.space_id() + || id == srv_tmp_space.space_id()); +} + +/** Check if it is a shared tablespace. +@param[in] id Space ID to check +@return true if id is a shared tablespace, false if not. */ +UNIV_INLINE +bool +is_shared_tablespace( + ulint id) +{ + return(is_system_tablespace(id)); +} + +/** Check if shared-system or undo tablespace. +@return true if shared-system or undo tablespace */ +UNIV_INLINE +bool +is_system_or_undo_tablespace( + ulint id) +{ + return(id == srv_sys_space.space_id() + || id <= srv_undo_tablespaces_open); +} + +/** Check if predefined shared tablespace. +@return true if predefined shared tablespace */ +UNIV_INLINE +bool +is_predefined_tablespace( + ulint id) +{ + ut_ad(srv_sys_space.space_id() == TRX_SYS_SPACE); + ut_ad(TRX_SYS_SPACE == 0); + return(id <= srv_undo_tablespaces_open + || id == srv_tmp_space.space_id()); +} +#endif /* fsp0sysspace_h */ diff --git a/storage/innobase/include/fsp0types.h b/storage/innobase/include/fsp0types.h index 4f2ca2594cb..33f630a3574 100644 --- a/storage/innobase/include/fsp0types.h +++ b/storage/innobase/include/fsp0types.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,9 +26,9 @@ Created May 26, 2009 Vasil Dimov #ifndef fsp0types_h #define fsp0types_h -#include "univ.i" +#ifndef UNIV_INNOCHECKSUM -#include "fil0fil.h" /* for FIL_PAGE_DATA */ +#include "univ.i" #include "ut0byte.h" /** @name Flags for inserting records in order @@ -42,6 +42,7 @@ fseg_alloc_free_page) */ #define FSP_NO_DIR ((byte)113) /*!< no order */ /* @} */ +#endif /* !UNIV_INNOCHECKSUM */ /** File space extent size in pages page size | file space extent size ----------+----------------------- @@ -51,15 +52,14 @@ page size | file space extent size 32 KiB | 64 pages = 2 MiB 64 KiB | 64 pages = 4 MiB */ -/** File space extent size (one megabyte if default two or four if not) in pages */ -#define FSP_EXTENT_SIZE ((UNIV_PAGE_SIZE <= (16384) ? \ - (1048576U / UNIV_PAGE_SIZE) : \ +#define FSP_EXTENT_SIZE ((UNIV_PAGE_SIZE <= (16384) ? \ + (1048576 / UNIV_PAGE_SIZE) : \ ((UNIV_PAGE_SIZE <= (32768)) ? \ - (2097152U / UNIV_PAGE_SIZE) : \ - (4194304U / UNIV_PAGE_SIZE)))) + (2097152 / UNIV_PAGE_SIZE) : \ + (4194304 / UNIV_PAGE_SIZE)))) -/** File space extent size (four megabytes) in pages for MAX page size */ -#define FSP_EXTENT_SIZE_MAX (4194304U / UNIV_PAGE_SIZE_MAX) +/** File space extent size (four megabyte) in pages for MAX page size */ +#define FSP_EXTENT_SIZE_MAX (4194304 / UNIV_PAGE_SIZE_MAX) /** File space extent size (one megabyte) in pages for MIN page size */ #define FSP_EXTENT_SIZE_MIN (1048576U / UNIV_PAGE_SIZE_MIN) @@ -68,6 +68,7 @@ page size | file space extent size offset */ #define FSEG_PAGE_DATA FIL_PAGE_DATA +#ifndef UNIV_INNOCHECKSUM /** @name File segment header The file segment header points to the inode describing the file segment. */ /* @{ */ @@ -82,11 +83,63 @@ typedef byte fseg_header_t; header, in bytes */ /* @} */ -/** Flags for fsp_reserve_free_extents @{ */ -#define FSP_NORMAL 1000000 -#define FSP_UNDO 2000000 -#define FSP_CLEANING 3000000 -/* @} */ +#ifdef UNIV_DEBUG + +struct mtr_t; + +/** A wrapper class to print the file segment header information. */ +class fseg_header +{ +public: + /** Constructor of fseg_header. + @param[in] header the underlying file segment header object + @param[in] mtr the mini-transaction. No redo logs are + generated, only latches are checked within + mini-transaction */ + fseg_header( + const fseg_header_t* header, + mtr_t* mtr) + : + m_header(header), + m_mtr(mtr) + {} + + /** Print the file segment header to the given output stream. + @param[in,out] out the output stream into which the object + is printed. + @retval the output stream into which the object was printed. */ + std::ostream& + to_stream(std::ostream& out) const; +private: + /** The underlying file segment header */ + const fseg_header_t* m_header; + + /** The mini transaction, which is used mainly to check whether + appropriate latches have been taken by the calling thread. */ + mtr_t* m_mtr; +}; + +/* Overloading the global output operator to print a file segment header +@param[in,out] out the output stream into which object will be printed +@param[in] header the file segment header to be printed +@retval the output stream */ +inline +std::ostream& +operator<<( + std::ostream& out, + const fseg_header& header) +{ + return(header.to_stream(out)); +} +#endif /* UNIV_DEBUG */ + +/** Flags for fsp_reserve_free_extents */ +enum fsp_reserve_t { + FSP_NORMAL, /* reservation during normal B-tree operations */ + FSP_UNDO, /* reservation done for undo logging */ + FSP_CLEANING, /* reservation done during purge operations */ + FSP_BLOB /* reservation being done for BLOB insertion */ +}; /* Number of pages described in a single descriptor page: currently each page description takes less than 1 byte; a descriptor page is repeated every @@ -127,4 +180,232 @@ every XDES_DESCRIBED_PER_PAGE pages in every tablespace. */ /*--------------------------------------*/ /* @} */ +/** Validate the tablespace flags. +These flags are stored in the tablespace header at offset FSP_SPACE_FLAGS. +They should be 0 for ROW_FORMAT=COMPACT and ROW_FORMAT=REDUNDANT. +The newer row formats, COMPRESSED and DYNAMIC, use a file format > Antelope +so they should have a file format number plus the DICT_TF_COMPACT bit set. +@param[in] flags Tablespace flags +@return true if valid, false if not */ +bool +fsp_flags_is_valid( + ulint flags) + __attribute__((warn_unused_result, const)); + +/** Check if tablespace is system temporary. +@param[in] space_id verify is checksum is enabled for given space. +@return true if tablespace is system temporary. */ +bool +fsp_is_system_temporary( + ulint space_id); + +/** Check if checksum is disabled for the given space. +@param[in] space_id verify is checksum is enabled for given space. +@return true if checksum is disabled for given space. */ +bool +fsp_is_checksum_disabled( + ulint space_id); + +/** Check if tablespace is file-per-table. +@param[in] space_id Tablespace ID +@param[in] fsp_flags Tablespace Flags +@return true if tablespace is file-per-table. */ +bool +fsp_is_file_per_table( + ulint space_id, + ulint fsp_flags); + +#ifdef UNIV_DEBUG +/** Skip some of the sanity checks that are time consuming even in debug mode +and can affect frequent verification runs that are done to ensure stability of +the product. +@return true if check should be skipped for given space. */ +bool +fsp_skip_sanity_check( + ulint space_id); +#endif /* UNIV_DEBUG */ + +#endif /* !UNIV_INNOCHECKSUM */ + +/* @defgroup fsp_flags InnoDB Tablespace Flag Constants @{ */ + +/** Width of the POST_ANTELOPE flag */ +#define FSP_FLAGS_WIDTH_POST_ANTELOPE 1 +/** Number of flag bits used to indicate the tablespace zip page size */ +#define FSP_FLAGS_WIDTH_ZIP_SSIZE 4 +/** Width of the ATOMIC_BLOBS flag. The ability to break up a long +column into an in-record prefix and an externally stored part is available +to the two Barracuda row formats COMPRESSED and DYNAMIC. */ +#define FSP_FLAGS_WIDTH_ATOMIC_BLOBS 1 +/** Number of flag bits used to indicate the tablespace page size */ +#define FSP_FLAGS_WIDTH_PAGE_SSIZE 4 +/** Width of the DATA_DIR flag. This flag indicates that the tablespace +is found in a remote location, not the default data directory. */ +#define FSP_FLAGS_WIDTH_DATA_DIR 1 +/** Width of the SHARED flag. This flag indicates that the tablespace +was created with CREATE TABLESPACE and can be shared by multiple tables. */ +#define FSP_FLAGS_WIDTH_SHARED 1 +/** Width of the TEMPORARY flag. This flag indicates that the tablespace +is a temporary tablespace and everything in it is temporary, meaning that +it is for a single client and should be deleted upon startup if it exists. */ +#define FSP_FLAGS_WIDTH_TEMPORARY 1 + +/** Number of flag bits used to indicate the page compression and compression level */ +#define FSP_FLAGS_WIDTH_PAGE_COMPRESSION 1 +#define FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL 4 + +/** Number of flag bits used to indicate atomic writes for this tablespace */ +#define FSP_FLAGS_WIDTH_ATOMIC_WRITES 2 + +/** Width of all the currently known tablespace flags */ +#define FSP_FLAGS_WIDTH (FSP_FLAGS_WIDTH_POST_ANTELOPE \ + + FSP_FLAGS_WIDTH_ZIP_SSIZE \ + + FSP_FLAGS_WIDTH_ATOMIC_BLOBS \ + + FSP_FLAGS_WIDTH_PAGE_SSIZE \ + + FSP_FLAGS_WIDTH_DATA_DIR \ + + FSP_FLAGS_WIDTH_SHARED \ + + FSP_FLAGS_WIDTH_TEMPORARY \ + + FSP_FLAGS_WIDTH_PAGE_COMPRESSION \ + + FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL \ + + FSP_FLAGS_WIDTH_ATOMIC_WRITES ) + +/** A mask of all the known/used bits in tablespace flags */ +#define FSP_FLAGS_MASK (~(~0 << FSP_FLAGS_WIDTH)) + +/** Zero relative shift position of the POST_ANTELOPE field */ +#define FSP_FLAGS_POS_POST_ANTELOPE 0 +/** Zero relative shift position of the ZIP_SSIZE field */ +#define FSP_FLAGS_POS_ZIP_SSIZE (FSP_FLAGS_POS_POST_ANTELOPE \ + + FSP_FLAGS_WIDTH_POST_ANTELOPE) +/** Zero relative shift position of the ATOMIC_BLOBS field */ +#define FSP_FLAGS_POS_ATOMIC_BLOBS (FSP_FLAGS_POS_ZIP_SSIZE \ + + FSP_FLAGS_WIDTH_ZIP_SSIZE) +/** Zero relative shift position of the PAGE_SSIZE field */ +#define FSP_FLAGS_POS_PAGE_SSIZE (FSP_FLAGS_POS_ATOMIC_BLOBS \ + + FSP_FLAGS_WIDTH_ATOMIC_BLOBS) +/** Zero relative shift position of the start of the DATA_DIR bit */ +#define FSP_FLAGS_POS_DATA_DIR (FSP_FLAGS_POS_PAGE_SSIZE \ + + FSP_FLAGS_WIDTH_PAGE_SSIZE) +/** Zero relative shift position of the start of the SHARED bit */ +#define FSP_FLAGS_POS_SHARED (FSP_FLAGS_POS_DATA_DIR \ + + FSP_FLAGS_WIDTH_DATA_DIR) +/** Zero relative shift position of the start of the TEMPORARY bit */ +#define FSP_FLAGS_POS_TEMPORARY (FSP_FLAGS_POS_SHARED \ + + FSP_FLAGS_WIDTH_SHARED) +/** Zero relative shift position of the PAGE_COMPRESSION field */ +#define FSP_FLAGS_POS_PAGE_COMPRESSION (FSP_FLAGS_POS_TEMPORARY \ + + FSP_FLAGS_WIDTH_TEMPORARY) +/** Zero relative shift position of the PAGE_COMPRESSION_LEVEL field */ +#define FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL (FSP_FLAGS_POS_PAGE_COMPRESSION \ + + FSP_FLAGS_WIDTH_PAGE_COMPRESSION) +/** Zero relative shift position of the ATOMIC_WRITES field */ +#define FSP_FLAGS_POS_ATOMIC_WRITES (FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL \ + + FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL) +/** Zero relative shift position of the start of the UNUSED bits */ +#define FSP_FLAGS_POS_UNUSED (FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL \ + + FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL) + + +/** Bit mask of the POST_ANTELOPE field */ +#define FSP_FLAGS_MASK_POST_ANTELOPE \ + ((~(~0U << FSP_FLAGS_WIDTH_POST_ANTELOPE)) \ + << FSP_FLAGS_POS_POST_ANTELOPE) +/** Bit mask of the ZIP_SSIZE field */ +#define FSP_FLAGS_MASK_ZIP_SSIZE \ + ((~(~0U << FSP_FLAGS_WIDTH_ZIP_SSIZE)) \ + << FSP_FLAGS_POS_ZIP_SSIZE) +/** Bit mask of the ATOMIC_BLOBS field */ +#define FSP_FLAGS_MASK_ATOMIC_BLOBS \ + ((~(~0U << FSP_FLAGS_WIDTH_ATOMIC_BLOBS)) \ + << FSP_FLAGS_POS_ATOMIC_BLOBS) +/** Bit mask of the PAGE_SSIZE field */ +#define FSP_FLAGS_MASK_PAGE_SSIZE \ + ((~(~0U << FSP_FLAGS_WIDTH_PAGE_SSIZE)) \ + << FSP_FLAGS_POS_PAGE_SSIZE) +/** Bit mask of the DATA_DIR field */ +#define FSP_FLAGS_MASK_DATA_DIR \ + ((~(~0U << FSP_FLAGS_WIDTH_DATA_DIR)) \ + << FSP_FLAGS_POS_DATA_DIR) +/** Bit mask of the SHARED field */ +#define FSP_FLAGS_MASK_SHARED \ + ((~(~0U << FSP_FLAGS_WIDTH_SHARED)) \ + << FSP_FLAGS_POS_SHARED) +/** Bit mask of the TEMPORARY field */ +#define FSP_FLAGS_MASK_TEMPORARY \ + ((~(~0U << FSP_FLAGS_WIDTH_TEMPORARY)) \ + << FSP_FLAGS_POS_TEMPORARY) +/** Bit mask of the PAGE_COMPRESSION field */ +#define FSP_FLAGS_MASK_PAGE_COMPRESSION \ + ((~(~0 << FSP_FLAGS_WIDTH_PAGE_COMPRESSION)) \ + << FSP_FLAGS_POS_PAGE_COMPRESSION) +/** Bit mask of the PAGE_COMPRESSION_LEVEL field */ +#define FSP_FLAGS_MASK_PAGE_COMPRESSION_LEVEL \ + ((~(~0 << FSP_FLAGS_WIDTH_PAGE_COMPRESSION_LEVEL)) \ + << FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL) +/** Bit mask of the ATOMIC_WRITES field */ +#define FSP_FLAGS_MASK_ATOMIC_WRITES \ + ((~(~0 << FSP_FLAGS_WIDTH_ATOMIC_WRITES)) \ + << FSP_FLAGS_POS_ATOMIC_WRITES) + +/** Return the value of the POST_ANTELOPE field */ +#define FSP_FLAGS_GET_POST_ANTELOPE(flags) \ + ((flags & FSP_FLAGS_MASK_POST_ANTELOPE) \ + >> FSP_FLAGS_POS_POST_ANTELOPE) +/** Return the value of the ZIP_SSIZE field */ +#define FSP_FLAGS_GET_ZIP_SSIZE(flags) \ + ((flags & FSP_FLAGS_MASK_ZIP_SSIZE) \ + >> FSP_FLAGS_POS_ZIP_SSIZE) +/** Return the value of the ATOMIC_BLOBS field */ +#define FSP_FLAGS_HAS_ATOMIC_BLOBS(flags) \ + ((flags & FSP_FLAGS_MASK_ATOMIC_BLOBS) \ + >> FSP_FLAGS_POS_ATOMIC_BLOBS) +/** Return the value of the PAGE_SSIZE field */ +#define FSP_FLAGS_GET_PAGE_SSIZE(flags) \ + ((flags & FSP_FLAGS_MASK_PAGE_SSIZE) \ + >> FSP_FLAGS_POS_PAGE_SSIZE) +/** Return the value of the DATA_DIR field */ +#define FSP_FLAGS_HAS_DATA_DIR(flags) \ + ((flags & FSP_FLAGS_MASK_DATA_DIR) \ + >> FSP_FLAGS_POS_DATA_DIR) +/** Return the contents of the SHARED field */ +#define FSP_FLAGS_GET_SHARED(flags) \ + ((flags & FSP_FLAGS_MASK_SHARED) \ + >> FSP_FLAGS_POS_SHARED) +/** Return the contents of the TEMPORARY field */ +#define FSP_FLAGS_GET_TEMPORARY(flags) \ + ((flags & FSP_FLAGS_MASK_TEMPORARY) \ + >> FSP_FLAGS_POS_TEMPORARY) +/** Return the contents of the UNUSED bits */ +#define FSP_FLAGS_GET_UNUSED(flags) \ + (flags >> FSP_FLAGS_POS_UNUSED) +/** Return the value of the PAGE_COMPRESSION field */ +#define FSP_FLAGS_GET_PAGE_COMPRESSION(flags) \ + ((flags & FSP_FLAGS_MASK_PAGE_COMPRESSION) \ + >> FSP_FLAGS_POS_PAGE_COMPRESSION) +/** Return the value of the PAGE_COMPRESSION_LEVEL field */ +#define FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(flags) \ + ((flags & FSP_FLAGS_MASK_PAGE_COMPRESSION_LEVEL) \ + >> FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL) +/** Return the value of the ATOMIC_WRITES field */ +#define FSP_FLAGS_GET_ATOMIC_WRITES(flags) \ + ((flags & FSP_FLAGS_MASK_ATOMIC_WRITES) \ + >> FSP_FLAGS_POS_ATOMIC_WRITES) +/** Use an alias in the code for FSP_FLAGS_GET_SHARED() */ +#define fsp_is_shared_tablespace FSP_FLAGS_GET_SHARED +/* @} */ + +/** Set a PAGE_COMPRESSION into the correct bits in a given +tablespace flags. */ +#define FSP_FLAGS_SET_PAGE_COMPRESSION(flags, compression) \ + (flags | (compression << FSP_FLAGS_POS_PAGE_COMPRESSION)) + +/** Set a PAGE_COMPRESSION_LEVEL into the correct bits in a given +tablespace flags. */ +#define FSP_FLAGS_SET_PAGE_COMPRESSION_LEVEL(flags, level) \ + (flags | (level << FSP_FLAGS_POS_PAGE_COMPRESSION_LEVEL)) + +/** Set a ATOMIC_WRITES into the correct bits in a given +tablespace flags. */ +#define FSP_FLAGS_SET_ATOMIC_WRITES(flags, atomics) \ + (flags | (atomics << FSP_FLAGS_POS_ATOMIC_WRITES)) #endif /* fsp0types_h */ diff --git a/storage/innobase/include/fts0ast.h b/storage/innobase/include/fts0ast.h index 50f62063893..f0f00a40193 100644 --- a/storage/innobase/include/fts0ast.h +++ b/storage/innobase/include/fts0ast.h @@ -26,8 +26,16 @@ Created 2007/03/16/03 Sunny Bains #ifndef INNOBASE_FST0AST_H #define INNOBASE_FST0AST_H -#include "mem0mem.h" #include "ha_prototypes.h" +#include "mem0mem.h" + +#ifdef UNIV_PFS_MEMORY + +#define malloc(A) ut_malloc_nokey(A) +#define free(A) ut_free(A) +#define realloc(P, A) ut_realloc(P, A) + +#endif /* UNIV_PFS_MEMORY */ /* The type of AST Node */ enum fts_ast_type_t { @@ -35,6 +43,10 @@ enum fts_ast_type_t { FTS_AST_NUMB, /*!< Number */ FTS_AST_TERM, /*!< Term (or word) */ FTS_AST_TEXT, /*!< Text string */ + FTS_AST_PARSER_PHRASE_LIST, /*!< Phase for plugin parser + The difference from text type + is that we tokenize text into + term list */ FTS_AST_LIST, /*!< Expression list */ FTS_AST_SUBEXP_LIST /*!< Sub-Expression list */ }; @@ -139,9 +151,8 @@ fts_ast_term_set_wildcard( fts_ast_node_t* node); /*!< in: term to change */ /******************************************************************** Set the proximity attribute of a text node. */ - void -fts_ast_term_set_distance( +fts_ast_text_set_distance( /*======================*/ fts_ast_node_t* node, /*!< in/out: text node */ ulint distance); /*!< in: the text proximity @@ -149,7 +160,6 @@ fts_ast_term_set_distance( /********************************************************************//** Free a fts_ast_node_t instance. @return next node to free */ -UNIV_INTERN fts_ast_node_t* fts_ast_free_node( /*==============*/ @@ -188,7 +198,6 @@ fts_ast_state_free( /******************************************************************//** Traverse the AST - in-order traversal. @return DB_SUCCESS if all went well */ -UNIV_INTERN dberr_t fts_ast_visit( /*==========*/ @@ -206,7 +215,6 @@ Process (nested) sub-expression, create a new result set to store the sub-expression result by processing nodes under current sub-expression list. Merge the sub-expression result with that of parent expression list. @return DB_SUCCESS if all went well */ -UNIV_INTERN dberr_t fts_ast_visit_sub_exp( /*==================*/ @@ -216,7 +224,6 @@ fts_ast_visit_sub_exp( MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************** Create a lex instance.*/ -UNIV_INTERN fts_lexer_t* fts_lexer_create( /*=============*/ @@ -226,7 +233,6 @@ fts_lexer_create( MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); /******************************************************************** Free an fts_lexer_t instance.*/ -UNIV_INTERN void fts_lexer_free( /*===========*/ @@ -240,7 +246,6 @@ has one more byte than len @param[in] str pointer to string @param[in] len length of the string @return ast string with NUL-terminator */ -UNIV_INTERN fts_ast_string_t* fts_ast_string_create( const byte* str, @@ -249,7 +254,6 @@ fts_ast_string_create( /** Free an ast string instance @param[in,out] ast_str string to free */ -UNIV_INTERN void fts_ast_string_free( fts_ast_string_t* ast_str); @@ -259,7 +263,6 @@ Translate ast string of type FTS_AST_NUMB to unsigned long by strtoul @param[in] str string to translate @param[in] base the base @return translated number */ -UNIV_INTERN ulint fts_ast_string_to_ul( const fts_ast_string_t* ast_str, @@ -268,7 +271,6 @@ fts_ast_string_to_ul( /** Print the ast string @param[in] str string to print */ -UNIV_INTERN void fts_ast_string_print( const fts_ast_string_t* ast_str); @@ -314,6 +316,9 @@ struct fts_ast_node_t { fts_ast_node_t* next_alloc; /*!< For tracking allocations */ bool visited; /*!< whether this node is already processed */ + /* Used by plugin parser */ + fts_ast_node_t* up_node; /*!< Direct up node */ + bool go_up; /*!< Flag if go one level up */ }; /* To track state during parsing */ @@ -327,8 +332,32 @@ struct fts_ast_state_t { fts_lexer_t* lexer; /*!< Lexer callback + arg */ CHARSET_INFO* charset; /*!< charset used for tokenization */ + /* Used by plugin parser */ + fts_ast_node_t* cur_node; /*!< Current node into which + we add new node */ + int depth; /*!< Depth of parsing state */ }; +/******************************************************************//** +Create an AST term node, makes a copy of ptr for plugin parser +@return node */ +extern +fts_ast_node_t* +fts_ast_create_node_term_for_parser( +/*==========i=====================*/ + void* arg, /*!< in: ast state */ + const char* ptr, /*!< in: term string */ + const ulint len); /*!< in: term string length */ + +/******************************************************************//** +Create an AST phrase list node for plugin parser +@return node */ +extern +fts_ast_node_t* +fts_ast_create_node_phrase_list( +/*============================*/ + void* arg); /*!< in: ast state */ + #ifdef UNIV_DEBUG const char* fts_ast_oper_name_get(fts_ast_oper_t oper); diff --git a/storage/innobase/include/fts0blex.h b/storage/innobase/include/fts0blex.h index d0e4cae0678..da93ab8617d 100644 --- a/storage/innobase/include/fts0blex.h +++ b/storage/innobase/include/fts0blex.h @@ -341,7 +341,7 @@ extern int fts0blex (yyscan_t yyscanner); #undef YY_DECL #endif -#line 73 "fts0blex.l" +#line 74 "fts0blex.l" #line 348 "../include/fts0blex.h" diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h index 68d4d333245..210b9b700e4 100644 --- a/storage/innobase/include/fts0fts.h +++ b/storage/innobase/include/fts0fts.h @@ -26,7 +26,7 @@ Created 2011/09/02 Sunny Bains #ifndef fts0fts_h #define fts0fts_h -#include "univ.i" +#include "ha_prototypes.h" #include "data0type.h" #include "data0types.h" @@ -41,6 +41,7 @@ Created 2011/09/02 Sunny Bains #include "ut0wqueue.h" #include "que0types.h" #include "ft_global.h" +#include "mysql/plugin_ftparser.h" /** "NULL" value of a document id. */ #define FTS_NULL_DOC_ID 0 @@ -85,12 +86,16 @@ those defined in mysql file ft_global.h */ #define FTS_BOOL 1 #define FTS_SORTED 2 #define FTS_EXPAND 4 -#define FTS_PROXIMITY 8 -#define FTS_PHRASE 16 -#define FTS_OPT_RANKING 32 +#define FTS_NO_RANKING 8 +#define FTS_PROXIMITY 16 +#define FTS_PHRASE 32 +#define FTS_OPT_RANKING 64 #define FTS_INDEX_TABLE_IND_NAME "FTS_INDEX_TABLE_IND" +/** The number of FTS index partitions for a fulltext idnex */ +#define FTS_NUM_AUX_INDEX 6 + /** Threshold where our optimize thread automatically kicks in */ #define FTS_OPTIMIZE_THRESHOLD 10000000 @@ -98,6 +103,31 @@ those defined in mysql file ft_global.h */ should not exceed FTS_DOC_ID_MAX_STEP */ #define FTS_DOC_ID_MAX_STEP 65535 + +/** Maximum possible Fulltext word length */ +#define FTS_MAX_WORD_LEN HA_FT_MAXBYTELEN + +/** Maximum possible Fulltext word length (in characters) */ +#define FTS_MAX_WORD_LEN_IN_CHAR HA_FT_MAXCHARLEN + +/** Number of columns in FTS AUX Tables */ +#define FTS_DELETED_TABLE_NUM_COLS 1 +#define FTS_CONFIG_TABLE_NUM_COLS 2 +#define FTS_AUX_INDEX_TABLE_NUM_COLS 5 + +/** DELETED_TABLE(doc_id BIGINT UNSIGNED) */ +#define FTS_DELETED_TABLE_COL_LEN 8 +/** CONFIG_TABLE(key CHAR(50), value CHAR(200)) */ +#define FTS_CONFIG_TABLE_KEY_COL_LEN 50 +#define FTS_CONFIG_TABLE_VALUE_COL_LEN 200 + +#define FTS_INDEX_WORD_LEN FTS_MAX_WORD_LEN +#define FTS_INDEX_FIRST_DOC_ID_LEN 8 +#define FTS_INDEX_LAST_DOC_ID_LEN 8 +#define FTS_INDEX_DOC_COUNT_LEN 4 +/* BLOB COLUMN, 0 means VARIABLE SIZE */ +#define FTS_INDEX_ILIST_LEN 0 + /** Variable specifying the FTS parallel sort degree */ extern ulong fts_sort_pll_degree; @@ -150,7 +180,7 @@ do { \ (fts_table)->suffix = m_suffix; \ (fts_table)->type = m_type; \ (fts_table)->table_id = m_table->id; \ - (fts_table)->parent = m_table->name; \ + (fts_table)->parent = m_table->name.m_name; \ (fts_table)->table = m_table; \ } while (0); @@ -159,7 +189,7 @@ do { \ (fts_table)->suffix = m_suffix; \ (fts_table)->type = m_type; \ (fts_table)->table_id = m_index->table->id; \ - (fts_table)->parent = m_index->table->name; \ + (fts_table)->parent = m_index->table->name.m_name; \ (fts_table)->table = m_index->table; \ (fts_table)->index_id = m_index->id; \ } while (0); @@ -306,35 +336,45 @@ enum fts_status { typedef enum fts_status fts_status_t; /** The state of the FTS sub system. */ -struct fts_t { - /*!< mutex protecting bg_threads* and - fts_add_wq. */ - ib_mutex_t bg_threads_mutex; - - ulint bg_threads; /*!< number of background threads - accessing this table */ - - /*!< TRUE if background threads running - should stop themselves */ - ulint fts_status; /*!< Status bit regarding fts - running state */ - - ib_wqueue_t* add_wq; /*!< Work queue for scheduling jobs - for the FTS 'Add' thread, or NULL - if the thread has not yet been - created. Each work item is a - fts_trx_doc_ids_t*. */ - - fts_cache_t* cache; /*!< FTS memory buffer for this table, - or NULL if the table has no FTS - index. */ - - ulint doc_col; /*!< FTS doc id hidden column number - in the CLUSTERED index. */ - - ib_vector_t* indexes; /*!< Vector of FTS indexes, this is - mainly for caching purposes. */ - mem_heap_t* fts_heap; /*!< heap for fts_t allocation */ +class fts_t { +public: + /** fts_t constructor. + @param[in] table table with FTS indexes + @param[in,out] heap memory heap where 'this' is stored */ + fts_t( + const dict_table_t* table, + mem_heap_t* heap); + + /** fts_t destructor. */ + ~fts_t(); + + /** Mutex protecting bg_threads* and fts_add_wq. */ + ib_mutex_t bg_threads_mutex; + + /** Number of background threads accessing this table. */ + ulint bg_threads; + + /** Status bit regarding fts running state. TRUE if background + threads running should stop themselves. */ + ulint fts_status; + + /** Work queue for scheduling jobs for the FTS 'Add' thread, or NULL + if the thread has not yet been created. Each work item is a + fts_trx_doc_ids_t*. */ + ib_wqueue_t* add_wq; + + /** FTS memory buffer for this table, or NULL if the table has no FTS + index. */ + fts_cache_t* cache; + + /** FTS doc id hidden column number in the CLUSTERED index. */ + ulint doc_col; + + /** Vector of FTS indexes, this is mainly for caching purposes. */ + ib_vector_t* indexes; + + /** Heap for fts_t allocation. */ + mem_heap_t* fts_heap; }; struct fts_stopword_t; @@ -366,12 +406,6 @@ extern ulong fts_min_token_size; need a sync to free some memory */ extern bool fts_need_sync; -/** Maximum possible Fulltext word length */ -#define FTS_MAX_WORD_LEN HA_FT_MAXBYTELEN - -/** Maximum possible Fulltext word length (in characters) */ -#define FTS_MAX_WORD_LEN_IN_CHAR HA_FT_MAXCHARLEN - /** Variable specifying the table that has Fulltext index to display its content through information schema table */ extern char* fts_internal_tbl_name; @@ -385,7 +419,6 @@ do { \ /******************************************************************//** Create a FTS cache. */ -UNIV_INTERN fts_cache_t* fts_cache_create( /*=============*/ @@ -394,7 +427,6 @@ fts_cache_create( /******************************************************************//** Create a FTS index cache. @return Index Cache */ -UNIV_INTERN fts_index_cache_t* fts_cache_index_cache_create( /*=========================*/ @@ -405,31 +437,26 @@ fts_cache_index_cache_create( Get the next available document id. This function creates a new transaction to generate the document id. @return DB_SUCCESS if OK */ -UNIV_INTERN dberr_t fts_get_next_doc_id( /*================*/ const dict_table_t* table, /*!< in: table */ - doc_id_t* doc_id) /*!< out: new document id */ - MY_ATTRIBUTE((nonnull)); + doc_id_t* doc_id);/*!< out: new document id */ /*********************************************************************//** Update the next and last Doc ID in the CONFIG table to be the input "doc_id" value (+ 1). We would do so after each FTS index build or table truncate */ -UNIV_INTERN void fts_update_next_doc_id( /*===================*/ trx_t* trx, /*!< in/out: transaction */ const dict_table_t* table, /*!< in: table */ const char* table_name, /*!< in: table name, or NULL */ - doc_id_t doc_id) /*!< in: DOC ID to set */ - MY_ATTRIBUTE((nonnull(2))); + doc_id_t doc_id); /*!< in: DOC ID to set */ /******************************************************************//** Create a new document id . @return DB_SUCCESS if all went well else error */ -UNIV_INTERN dberr_t fts_create_doc_id( /*==============*/ @@ -439,19 +466,17 @@ fts_create_doc_id( value to this row. This is the current row that is being inserted. */ - mem_heap_t* heap) /*!< in: heap */ - MY_ATTRIBUTE((nonnull)); + mem_heap_t* heap); /*!< in: heap */ + /******************************************************************//** Create a new fts_doc_ids_t. @return new fts_doc_ids_t. */ -UNIV_INTERN fts_doc_ids_t* fts_doc_ids_create(void); /*=====================*/ /******************************************************************//** Free a fts_doc_ids_t. */ -UNIV_INTERN void fts_doc_ids_free( /*=============*/ @@ -459,7 +484,6 @@ fts_doc_ids_free( /******************************************************************//** Notify the FTS system about an operation on an FTS-indexed table. */ -UNIV_INTERN void fts_trx_add_op( /*===========*/ @@ -467,13 +491,11 @@ fts_trx_add_op( dict_table_t* table, /*!< in: table */ doc_id_t doc_id, /*!< in: doc id */ fts_row_state state, /*!< in: state of the row */ - ib_vector_t* fts_indexes) /*!< in: FTS indexes affected + ib_vector_t* fts_indexes); /*!< in: FTS indexes affected (NULL=all) */ - MY_ATTRIBUTE((nonnull(1,2))); /******************************************************************//** Free an FTS trx. */ -UNIV_INTERN void fts_trx_free( /*=========*/ @@ -484,7 +506,6 @@ Creates the common ancillary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have been called before this. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_create_common_tables( /*=====================*/ @@ -499,7 +520,6 @@ fts_create_common_tables( Wrapper function of fts_create_index_tables_low(), create auxiliary tables for an FTS index @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_create_index_tables( /*====================*/ @@ -512,7 +532,6 @@ Creates the column specific ancillary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have been called before this. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_create_index_tables_low( /*========================*/ @@ -525,7 +544,6 @@ fts_create_index_tables_low( MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Add the FTS document id hidden column. */ -UNIV_INTERN void fts_add_doc_id_column( /*==================*/ @@ -538,7 +556,6 @@ Drops the ancillary tables needed for supporting an FTS index on the given table. row_mysql_lock_data_dictionary must have been called before this. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_drop_tables( /*============*/ @@ -550,7 +567,6 @@ fts_drop_tables( The given transaction is about to be committed; do whatever is necessary from the FTS system's POV. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_commit( /*=======*/ @@ -560,7 +576,6 @@ fts_commit( /*******************************************************************//** FTS Query entry point. @return DB_SUCCESS if successful otherwise error code */ -UNIV_INTERN dberr_t fts_query( /*======*/ @@ -577,7 +592,6 @@ fts_query( /******************************************************************//** Retrieve the FTS Relevance Ranking result for doc with doc_id @return the relevance ranking value. */ -UNIV_INTERN float fts_retrieve_ranking( /*=================*/ @@ -587,7 +601,6 @@ fts_retrieve_ranking( /******************************************************************//** FTS Query sort result, returned by fts_query() on fts_ranking_t::rank. */ -UNIV_INTERN void fts_query_sort_result_on_rank( /*==========================*/ @@ -596,7 +609,6 @@ fts_query_sort_result_on_rank( /******************************************************************//** FTS Query free result, returned by fts_query(). */ -UNIV_INTERN void fts_query_free_result( /*==================*/ @@ -605,7 +617,6 @@ fts_query_free_result( /******************************************************************//** Extract the doc id from the FTS hidden column. */ -UNIV_INTERN doc_id_t fts_get_doc_id_from_row( /*====================*/ @@ -613,37 +624,45 @@ fts_get_doc_id_from_row( dtuple_t* row); /*!< in: row whose FTS doc id we want to extract.*/ -/******************************************************************//** -Extract the doc id from the FTS hidden column. */ -UNIV_INTERN +/** Extract the doc id from the record that belongs to index. +@param[in] table table +@param[in] rec record contains FTS_DOC_ID +@param[in] index index of rec +@param[in] heap heap memory +@return doc id that was extracted from rec */ doc_id_t fts_get_doc_id_from_rec( -/*====================*/ - dict_table_t* table, /*!< in: table */ - const rec_t* rec, /*!< in: rec */ - mem_heap_t* heap); /*!< in: heap */ - -/******************************************************************//** -Update the query graph with a new document id. -@return Doc ID used */ -UNIV_INTERN + dict_table_t* table, + const rec_t* rec, + const dict_index_t* index, + mem_heap_t* heap); + +/** Add new fts doc id to the update vector. +@param[in] table the table that contains the FTS index. +@param[in,out] ufield the fts doc id field in the update vector. + No new memory is allocated for this in this + function. +@param[in,out] next_doc_id the fts doc id that has been added to the + update vector. If 0, a new fts doc id is + automatically generated. The memory provided + for this argument will be used by the update + vector. Ensure that the life time of this + memory matches that of the update vector. +@return the fts doc id used in the update vector */ doc_id_t fts_update_doc_id( -/*==============*/ - dict_table_t* table, /*!< in: table */ - upd_field_t* ufield, /*!< out: update node */ - doc_id_t* next_doc_id); /*!< out: buffer for writing */ + dict_table_t* table, + upd_field_t* ufield, + doc_id_t* next_doc_id); /******************************************************************//** FTS initialize. */ -UNIV_INTERN void fts_startup(void); /*==============*/ /******************************************************************//** Signal FTS threads to initiate shutdown. */ -UNIV_INTERN void fts_start_shutdown( /*===============*/ @@ -654,7 +673,6 @@ fts_start_shutdown( /******************************************************************//** Wait for FTS threads to shutdown. */ -UNIV_INTERN void fts_shutdown( /*=========*/ @@ -666,7 +684,6 @@ fts_shutdown( /******************************************************************//** Create an instance of fts_t. @return instance of fts_t */ -UNIV_INTERN fts_t* fts_create( /*=======*/ @@ -675,7 +692,6 @@ fts_create( /**********************************************************************//** Free the FTS resources. */ -UNIV_INTERN void fts_free( /*=====*/ @@ -685,7 +701,6 @@ fts_free( /*********************************************************************//** Run OPTIMIZE on the given table. @return DB_SUCCESS if all OK */ -UNIV_INTERN dberr_t fts_optimize_table( /*===============*/ @@ -694,7 +709,6 @@ fts_optimize_table( /**********************************************************************//** Startup the optimize thread and create the work queue. */ -UNIV_INTERN void fts_optimize_init(void); /*====================*/ @@ -702,7 +716,6 @@ fts_optimize_init(void); /**********************************************************************//** Check whether the work queue is initialized. @return TRUE if optimze queue is initialized. */ -UNIV_INTERN ibool fts_optimize_is_init(void); /*======================*/ @@ -710,7 +723,6 @@ fts_optimize_is_init(void); /****************************************************************//** Drops index ancillary tables for a FTS index @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_drop_index_tables( /*==================*/ @@ -721,7 +733,6 @@ fts_drop_index_tables( /******************************************************************//** Remove the table from the OPTIMIZER's list. We do wait for acknowledgement from the consumer of the message. */ -UNIV_INTERN void fts_optimize_remove_table( /*======================*/ @@ -736,21 +747,18 @@ fts_optimize_request_sync_table( /**********************************************************************//** Signal the optimize thread to prepare for shutdown. */ -UNIV_INTERN void fts_optimize_start_shutdown(void); /*==============================*/ /**********************************************************************//** Inform optimize to clean up. */ -UNIV_INTERN void fts_optimize_end(void); /*===================*/ /**********************************************************************//** Take a FTS savepoint. */ -UNIV_INTERN void fts_savepoint_take( /*===============*/ @@ -760,7 +768,6 @@ fts_savepoint_take( MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Refresh last statement savepoint. */ -UNIV_INTERN void fts_savepoint_laststmt_refresh( /*===========================*/ @@ -768,7 +775,6 @@ fts_savepoint_laststmt_refresh( MY_ATTRIBUTE((nonnull)); /**********************************************************************//** Release the savepoint data identified by name. */ -UNIV_INTERN void fts_savepoint_release( /*==================*/ @@ -777,7 +783,6 @@ fts_savepoint_release( /**********************************************************************//** Free the FTS cache. */ -UNIV_INTERN void fts_cache_destroy( /*==============*/ @@ -785,14 +790,12 @@ fts_cache_destroy( /** Clear cache. @param[in,out] cache fts cache */ -UNIV_INTERN void fts_cache_clear( fts_cache_t* cache); /*********************************************************************//** Initialize things in cache. */ -UNIV_INTERN void fts_cache_init( /*===========*/ @@ -800,7 +803,6 @@ fts_cache_init( /*********************************************************************//** Rollback to and including savepoint indentified by name. */ -UNIV_INTERN void fts_savepoint_rollback( /*===================*/ @@ -809,7 +811,6 @@ fts_savepoint_rollback( /*********************************************************************//** Rollback to and including savepoint indentified by name. */ -UNIV_INTERN void fts_savepoint_rollback_last_stmt( /*=============================*/ @@ -818,7 +819,6 @@ fts_savepoint_rollback_last_stmt( /***********************************************************************//** Drop all orphaned FTS auxiliary tables, those that don't have a parent table or FTS index defined on them. */ -UNIV_INTERN void fts_drop_orphaned_tables(void); /*==========================*/ @@ -827,7 +827,6 @@ fts_drop_orphaned_tables(void); Since we do a horizontal split on the index table, we need to drop all the split tables. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_drop_index_split_tables( /*========================*/ @@ -841,7 +840,6 @@ FTS auxiliary INDEX table and clear the cache at the end. @param[in] unlock_cache whether unlock cache when write node @param[in] wait whether wait for existing sync to finish @return DB_SUCCESS on success, error code on failure. */ -UNIV_INTERN dberr_t fts_sync_table( dict_table_t* table, @@ -851,7 +849,6 @@ fts_sync_table( /****************************************************************//** Free the query graph but check whether dict_sys->mutex is already held */ -UNIV_INTERN void fts_que_graph_free_check_lock( /*==========================*/ @@ -861,7 +858,6 @@ fts_que_graph_free_check_lock( /****************************************************************//** Create an FTS index cache. */ -UNIV_INTERN CHARSET_INFO* fts_index_get_charset( /*==================*/ @@ -870,7 +866,6 @@ fts_index_get_charset( /*********************************************************************//** Get the initial Doc ID by consulting the CONFIG table @return initial Doc ID */ -UNIV_INTERN doc_id_t fts_init_doc_id( /*============*/ @@ -921,15 +916,31 @@ innobase_mysql_fts_get_token( const byte* start, /*!< in: start of text */ const byte* end, /*!< in: one character past end of text */ - fts_string_t* token, /*!< out: token's text */ - ulint* offset); /*!< out: offset to token, - measured as characters from - 'start' */ + fts_string_t* token); /*!< out: token's text */ + +/*************************************************************//** +Get token char size by charset +@return the number of token char size */ +ulint +fts_get_token_size( +/*===============*/ + const CHARSET_INFO* cs, /*!< in: Character set */ + const char* token, /*!< in: token */ + ulint len); /*!< in: token length */ + +/*************************************************************//** +FULLTEXT tokenizer internal in MYSQL_FTPARSER_SIMPLE_MODE +@return 0 if tokenize sucessfully */ +int +fts_tokenize_document_internal( +/*===========================*/ + MYSQL_FTPARSER_PARAM* param, /*!< in: parser parameter */ + const char* doc, /*!< in: document to tokenize */ + int len); /*!< in: document length */ /*********************************************************************//** Fetch COUNT(*) from specified table. @return the number of rows in the table */ -UNIV_INTERN ulint fts_get_rows_count( /*===============*/ @@ -938,7 +949,6 @@ fts_get_rows_count( /*************************************************************//** Get maximum Doc ID in a table if index "FTS_DOC_ID_INDEX" exists @return max Doc ID or 0 if index "FTS_DOC_ID_INDEX" does not exist */ -UNIV_INTERN doc_id_t fts_get_max_doc_id( /*===============*/ @@ -948,7 +958,6 @@ fts_get_max_doc_id( Check whether user supplied stopword table exists and is of the right format. @return the stopword column charset if qualifies */ -UNIV_INTERN CHARSET_INFO* fts_valid_stopword_table( /*=====================*/ @@ -957,7 +966,6 @@ fts_valid_stopword_table( /****************************************************************//** This function loads specified stopword into FTS cache @return TRUE if success */ -UNIV_INTERN ibool fts_load_stopword( /*==============*/ @@ -976,7 +984,6 @@ fts_load_stopword( /****************************************************************//** Create the vector of fts_get_doc_t instances. @return vector of fts_get_doc_t instances */ -UNIV_INTERN ib_vector_t* fts_get_docs_create( /*================*/ @@ -985,7 +992,6 @@ fts_get_docs_create( /****************************************************************//** Read the rows from the FTS index @return DB_SUCCESS if OK */ -UNIV_INTERN dberr_t fts_table_fetch_doc_ids( /*====================*/ @@ -999,7 +1005,6 @@ used. There are documents that have not yet sync-ed to auxiliary tables from last server abnormally shutdown, we will need to bring such document into FTS cache before any further operations @return TRUE if all OK */ -UNIV_INTERN ibool fts_init_index( /*===========*/ @@ -1008,7 +1013,6 @@ fts_init_index( have cache lock */ /*******************************************************************//** Add a newly create index in FTS cache */ -UNIV_INTERN void fts_add_index( /*==========*/ @@ -1018,7 +1022,6 @@ fts_add_index( /*******************************************************************//** Drop auxiliary tables related to an FTS index @return DB_SUCCESS or error number */ -UNIV_INTERN dberr_t fts_drop_index( /*===========*/ @@ -1030,7 +1033,6 @@ fts_drop_index( /****************************************************************//** Rename auxiliary tables for all fts index for a table @return DB_SUCCESS or error code */ - dberr_t fts_rename_aux_tables( /*==================*/ @@ -1042,10 +1044,21 @@ fts_rename_aux_tables( Check indexes in the fts->indexes is also present in index cache and table->indexes list @return TRUE if all indexes match */ -UNIV_INTERN ibool fts_check_cached_index( /*===================*/ dict_table_t* table); /*!< in: Table where indexes are dropped */ + +/** Check if the all the auxillary tables associated with FTS index are in +consistent state. For now consistency is check only by ensuring +index->page_no != FIL_NULL +@param[out] base_table table has host fts index +@param[in,out] trx trx handler +@return true if check certifies auxillary tables are sane false otherwise. */ +bool +fts_is_corrupt( + dict_table_t* base_table, + trx_t* trx); + #endif /*!< fts0fts.h */ diff --git a/storage/innobase/include/fts0opt.h b/storage/innobase/include/fts0opt.h index 92eaf8270d2..a9185ad8df1 100644 --- a/storage/innobase/include/fts0opt.h +++ b/storage/innobase/include/fts0opt.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2001, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2001, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,7 +27,6 @@ Created 2011-02-15 Jimmy Yang /******************************************************************** Callback function to fetch the rows in an FTS INDEX record. */ -UNIV_INTERN ibool fts_optimize_index_fetch_node( /*==========================*/ diff --git a/storage/innobase/include/fts0plugin.h b/storage/innobase/include/fts0plugin.h new file mode 100644 index 00000000000..9bc9b6b9dd7 --- /dev/null +++ b/storage/innobase/include/fts0plugin.h @@ -0,0 +1,50 @@ +/***************************************************************************** + +Copyright (c) 2013, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/******************************************************************//** +@file include/fts0plugin.h +Full text search plugin header file + +Created 2013/06/04 Shaohua Wang +***********************************************************************/ + +#ifndef INNOBASE_FTS0PLUGIN_H +#define INNOBASE_FTS0PLUGIN_H + +#include "ha_prototypes.h" + +extern struct st_mysql_ftparser fts_default_parser; + +struct fts_ast_state_t; + +#define PARSER_INIT(parser, arg) if (parser->init) { parser->init(arg); } +#define PARSER_DEINIT(parser, arg) if (parser->deinit) { parser->deinit(arg); } + +/******************************************************************//** +fts parse query by plugin parser. +@return 0 if parse successfully, or return non-zero. */ +int +fts_parse_by_parser( +/*================*/ + ibool mode, /*!< in: query boolean mode */ + uchar* query, /*!< in: query string */ + ulint len, /*!< in: query string length */ + st_mysql_ftparser* parse, /*!< in: fts plugin parser */ + fts_ast_state_t* state); /*!< in: query parser state */ + +#endif /* INNOBASE_FTS0PLUGIN_H */ diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h index 2d4e9d88fd1..f7a2d2c72f2 100644 --- a/storage/innobase/include/fts0priv.h +++ b/storage/innobase/include/fts0priv.h @@ -26,6 +26,7 @@ Created 2011/09/02 Sunny Bains #ifndef INNOBASE_FTS0PRIV_H #define INNOBASE_FTS0PRIV_H +#include "univ.i" #include "dict0dict.h" #include "pars0pars.h" #include "que0que.h" @@ -114,7 +115,6 @@ component. /******************************************************************//** Parse an SQL string. %s is replaced with the table's id. @return query graph */ -UNIV_INTERN que_t* fts_parse_sql( /*==========*/ @@ -122,26 +122,27 @@ fts_parse_sql( pars_info_t* info, /*!< in: info struct, or NULL */ const char* sql) /*!< in: SQL string to evaluate */ MY_ATTRIBUTE((nonnull(3), malloc, warn_unused_result)); + /******************************************************************//** Evaluate a parsed SQL statement @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_eval_sql( /*=========*/ trx_t* trx, /*!< in: transaction */ que_t* graph) /*!< in: Parsed statement */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Construct the name of an ancillary FTS table for the given table. -@return own: table name, must be freed with mem_free() */ -UNIV_INTERN -char* +Caller must allocate enough memory(usually size of MAX_FULL_NAME_LEN) +for param 'table_name'. */ +void fts_get_table_name( /*===============*/ - const fts_table_t* - fts_table) /*!< in: FTS aux table info */ - MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); + const fts_table_t* fts_table, /*!< in: FTS aux table info */ + char* table_name); /*!< in/out: aux table name */ + /******************************************************************//** Construct the column specification part of the SQL string for selecting the indexed FTS columns for the given table. Adds the necessary bound @@ -157,7 +158,6 @@ Two indexed columns named "subject" and "content": "$sel0, $sel1", info/ids: sel0 -> "subject", sel1 -> "content", @return heap-allocated WHERE string */ -UNIV_INTERN const char* fts_get_select_columns_str( /*=======================*/ @@ -177,7 +177,6 @@ ID */ Fetch document (= a single row's indexed text) with the given document id. @return: DB_SUCCESS if fetch is successful, else error */ -UNIV_INTERN dberr_t fts_doc_fetch_by_doc_id( /*====================*/ @@ -197,7 +196,6 @@ fts_doc_fetch_by_doc_id( Callback function for fetch that stores the text of an FTS document, converting each column to UTF-16. @return always FALSE */ -UNIV_INTERN ibool fts_query_expansion_fetch_doc( /*==========================*/ @@ -207,7 +205,6 @@ fts_query_expansion_fetch_doc( /******************************************************************** Write out a single word's data as new entry/entries in the INDEX table. @return DB_SUCCESS if all OK. */ -UNIV_INTERN dberr_t fts_write_node( /*===========*/ @@ -217,21 +214,37 @@ fts_write_node( fts_string_t* word, /*!< in: word in UTF-8 */ fts_node_t* node) /*!< in: node columns */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + +/** Check fts token +1. for ngram token, check whether the token contains any words in stopwords +2. for non-ngram token, check if it's stopword or less than fts_min_token_size +or greater than fts_max_token_size. +@param[in] token token string +@param[in] stopwords stopwords rb tree +@param[in] is_ngram is ngram parser +@param[in] cs token charset +@retval true if it is not stopword and length in range +@retval false if it is stopword or length not in range */ +bool +fts_check_token( + const fts_string_t* token, + const ib_rbt_t* stopwords, + bool is_ngram, + const CHARSET_INFO* cs); + /*******************************************************************//** Tokenize a document. */ -UNIV_INTERN void fts_tokenize_document( /*==================*/ fts_doc_t* doc, /*!< in/out: document to tokenize */ - fts_doc_t* result) /*!< out: if provided, save + fts_doc_t* result, /*!< out: if provided, save result tokens here */ - MY_ATTRIBUTE((nonnull(1))); + st_mysql_ftparser* parser);/* in: plugin fts parser */ /*******************************************************************//** Continue to tokenize a document. */ -UNIV_INTERN void fts_tokenize_document_next( /*=======================*/ @@ -239,23 +252,21 @@ fts_tokenize_document_next( tokenize */ ulint add_pos, /*!< in: add this position to all tokens from this tokenization */ - fts_doc_t* result) /*!< out: if provided, save + fts_doc_t* result, /*!< out: if provided, save result tokens here */ - MY_ATTRIBUTE((nonnull(1))); + st_mysql_ftparser* parser);/* in: plugin fts parser */ + /******************************************************************//** Initialize a document. */ -UNIV_INTERN void fts_doc_init( /*=========*/ - fts_doc_t* doc) /*!< in: doc to initialize */ - MY_ATTRIBUTE((nonnull)); + fts_doc_t* doc); /*!< in: doc to initialize */ /******************************************************************//** Do a binary search for a doc id in the array @return +ve index if found -ve index where it should be inserted if not found */ -UNIV_INTERN int fts_bsearch( /*========*/ @@ -266,24 +277,21 @@ fts_bsearch( MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Free document. */ -UNIV_INTERN void fts_doc_free( /*=========*/ - fts_doc_t* doc) /*!< in: document */ - MY_ATTRIBUTE((nonnull)); + fts_doc_t* doc); /*!< in: document */ + /******************************************************************//** Free fts_optimizer_word_t instanace.*/ -UNIV_INTERN void fts_word_free( /*==========*/ - fts_word_t* word) /*!< in: instance to free.*/ - MY_ATTRIBUTE((nonnull)); + fts_word_t* word); /*!< in: instance to free.*/ + /******************************************************************//** Read the rows from the FTS inde @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_index_fetch_nodes( /*==================*/ @@ -292,19 +300,18 @@ fts_index_fetch_nodes( fts_table_t* fts_table, /*!< in: FTS aux table */ const fts_string_t* word, /*!< in: the word to fetch */ - fts_fetch_t* fetch) /*!< in: fetch callback.*/ - MY_ATTRIBUTE((nonnull)); + fts_fetch_t* fetch); /*!< in: fetch callback.*/ + /******************************************************************//** Create a fts_optimizer_word_t instance. @return new instance */ -UNIV_INTERN fts_word_t* fts_word_init( /*==========*/ fts_word_t* word, /*!< in: word to initialize */ byte* utf8, /*!< in: UTF-8 string */ - ulint len) /*!< in: length of string in bytes */ - MY_ATTRIBUTE((nonnull)); + ulint len); /*!< in: length of string in bytes */ + /******************************************************************//** Compare two fts_trx_table_t instances, we actually compare the table id's here. @@ -314,8 +321,8 @@ int fts_trx_table_cmp( /*==============*/ const void* v1, /*!< in: id1 */ - const void* v2) /*!< in: id2 */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + const void* v2); /*!< in: id2 */ + /******************************************************************//** Compare a table id with a trx_table_t table id. @return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */ @@ -324,31 +331,28 @@ int fts_trx_table_id_cmp( /*=================*/ const void* p1, /*!< in: id1 */ - const void* p2) /*!< in: id2 */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + const void* p2); /*!< in: id2 */ + /******************************************************************//** Commit a transaction. @return DB_SUCCESS if all OK */ -UNIV_INTERN dberr_t fts_sql_commit( /*===========*/ - trx_t* trx) /*!< in: transaction */ - MY_ATTRIBUTE((nonnull)); + trx_t* trx); /*!< in: transaction */ + /******************************************************************//** Rollback a transaction. @return DB_SUCCESS if all OK */ -UNIV_INTERN dberr_t fts_sql_rollback( /*=============*/ - trx_t* trx) /*!< in: transaction */ - MY_ATTRIBUTE((nonnull)); + trx_t* trx); /*!< in: transaction */ + /******************************************************************//** Parse an SQL string. %s is replaced with the table's id. Don't acquire the dict mutex @return query graph */ -UNIV_INTERN que_t* fts_parse_sql_no_dict_lock( /*=======================*/ @@ -356,11 +360,11 @@ fts_parse_sql_no_dict_lock( pars_info_t* info, /*!< in: parser info */ const char* sql) /*!< in: SQL string to evaluate */ MY_ATTRIBUTE((nonnull(3), malloc, warn_unused_result)); + /******************************************************************//** Get value from config table. The caller must ensure that enough space is allocated for value to hold the column contents @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_get_value( /*=================*/ @@ -368,15 +372,13 @@ fts_config_get_value( fts_table_t* fts_table, /*!< in: the indexed FTS table */ const char* name, /*!< in: get config value for this parameter name */ - fts_string_t* value) /*!< out: value read from + fts_string_t* value); /*!< out: value read from config table */ - MY_ATTRIBUTE((nonnull)); /******************************************************************//** Get value specific to an FTS index from the config table. The caller must ensure that enough space is allocated for value to hold the column contents. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_get_index_value( /*=======================*/ @@ -387,10 +389,10 @@ fts_config_get_index_value( fts_string_t* value) /*!< out: value read from config table */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Set the value in the config table for name. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_set_value( /*=================*/ @@ -399,12 +401,11 @@ fts_config_set_value( const char* name, /*!< in: get config value for this parameter name */ const fts_string_t* - value) /*!< in: value to update */ - MY_ATTRIBUTE((nonnull)); + value); /*!< in: value to update */ + /****************************************************************//** Set an ulint value in the config table. @return DB_SUCCESS if all OK else error code */ -UNIV_INTERN dberr_t fts_config_set_ulint( /*=================*/ @@ -413,10 +414,10 @@ fts_config_set_ulint( const char* name, /*!< in: param name */ ulint int_value) /*!< in: value */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Set the value specific to an FTS index in the config table. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_set_index_value( /*=======================*/ @@ -427,10 +428,10 @@ fts_config_set_index_value( fts_string_t* value) /*!< out: value read from config table */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Increment the value in the config table for column name. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_increment_value( /*=======================*/ @@ -440,10 +441,10 @@ fts_config_increment_value( for this parameter name */ ulint delta) /*!< in: increment by this much */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Increment the per index value in the config table for column name. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_increment_index_value( /*=============================*/ @@ -451,12 +452,11 @@ fts_config_increment_index_value( dict_index_t* index, /*!< in: FTS index */ const char* name, /*!< in: increment config value for this parameter name */ - ulint delta) /*!< in: increment by this much */ - MY_ATTRIBUTE((nonnull)); + ulint delta); /*!< in: increment by this much */ + /******************************************************************//** Get an ulint value from the config table. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_get_index_ulint( /*=======================*/ @@ -465,10 +465,10 @@ fts_config_get_index_ulint( const char* name, /*!< in: param name */ ulint* int_value) /*!< out: value */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Set an ulint value int the config table. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_set_index_ulint( /*=======================*/ @@ -477,22 +477,21 @@ fts_config_set_index_ulint( const char* name, /*!< in: param name */ ulint int_value) /*!< in: value */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Get an ulint value from the config table. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_config_get_ulint( /*=================*/ trx_t* trx, /*!< in: transaction */ fts_table_t* fts_table, /*!< in: the indexed FTS table */ const char* name, /*!< in: param name */ - ulint* int_value) /*!< out: value */ - MY_ATTRIBUTE((nonnull)); + ulint* int_value); /*!< out: value */ + /******************************************************************//** Search cache for word. @return the word node vector if found else NULL */ -UNIV_INTERN const ib_vector_t* fts_cache_find_word( /*================*/ @@ -501,10 +500,10 @@ fts_cache_find_word( const fts_string_t* text) /*!< in: word to search for */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Check cache for deleted doc id. @return TRUE if deleted */ -UNIV_INTERN ibool fts_cache_is_deleted_doc_id( /*========================*/ @@ -512,9 +511,9 @@ fts_cache_is_deleted_doc_id( cache, /*!< in: cache ito search */ doc_id_t doc_id) /*!< in: doc id to search for */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Append deleted doc ids to vector and sort the vector. */ -UNIV_INTERN void fts_cache_append_deleted_doc_ids( /*=============================*/ @@ -526,7 +525,6 @@ Wait for the background thread to start. We poll to detect change of state, which is acceptable, since the wait should happen only once during startup. @return true if the thread started else FALSE (i.e timed out) */ -UNIV_INTERN ibool fts_wait_for_background_thread_to_start( /*====================================*/ @@ -539,7 +537,6 @@ fts_wait_for_background_thread_to_start( /******************************************************************//** Get the total number of words in the FTS for a particular FTS index. @return DB_SUCCESS or error code */ -UNIV_INTERN dberr_t fts_get_total_word_count( /*=====================*/ @@ -551,7 +548,6 @@ fts_get_total_word_count( /******************************************************************//** Search the index specific cache for a particular FTS index. @return the index specific cache else NULL */ -UNIV_INTERN fts_index_cache_t* fts_find_index_cache( /*================*/ @@ -560,10 +556,11 @@ fts_find_index_cache( const dict_index_t* index) /*!< in: index to search for */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Write the table id to the given buffer (including final NUL). Buffer must be at least FTS_AUX_MIN_TABLE_ID_LENGTH bytes long. -@return number of bytes written */ +@return number of bytes written */ UNIV_INLINE int fts_write_object_id( @@ -584,10 +581,10 @@ fts_read_object_id( ib_id_t* id, /*!< out: a table id */ const char* str) /*!< in: buffer to read from */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Get the table id. @return number of bytes written */ -UNIV_INTERN int fts_get_table_id( /*=============*/ @@ -597,53 +594,49 @@ fts_get_table_id( FTS_AUX_MIN_TABLE_ID_LENGTH bytes long */ MY_ATTRIBUTE((nonnull, warn_unused_result)); + /******************************************************************//** Add the table to add to the OPTIMIZER's list. */ -UNIV_INTERN void fts_optimize_add_table( /*===================*/ - dict_table_t* table) /*!< in: table to add */ - MY_ATTRIBUTE((nonnull)); + dict_table_t* table); /*!< in: table to add */ + /******************************************************************//** Optimize a table. */ -UNIV_INTERN void fts_optimize_do_table( /*==================*/ - dict_table_t* table) /*!< in: table to optimize */ - MY_ATTRIBUTE((nonnull)); + dict_table_t* table); /*!< in: table to optimize */ + /******************************************************************//** Construct the prefix name of an FTS table. -@return own: table name, must be freed with mem_free() */ -UNIV_INTERN +@return own: table name, must be freed with ut_free() */ char* fts_get_table_name_prefix( /*======================*/ const fts_table_t* fts_table) /*!< in: Auxiliary table type */ MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); + /******************************************************************//** Add node positions. */ -UNIV_INTERN void fts_cache_node_add_positions( /*=========================*/ fts_cache_t* cache, /*!< in: cache */ fts_node_t* node, /*!< in: word node */ doc_id_t doc_id, /*!< in: doc id */ - ib_vector_t* positions) /*!< in: fts_token_t::positions */ - MY_ATTRIBUTE((nonnull(2,4))); + ib_vector_t* positions); /*!< in: fts_token_t::positions */ /******************************************************************//** Create the config table name for retrieving index specific value. @return index config parameter name */ -UNIV_INTERN char* fts_config_create_index_param_name( /*===============================*/ - const char* param, /*!< in: base name of param */ - const dict_index_t* index) /*!< in: index for config */ + const char* param, /*!< in: base name of param */ + const dict_index_t* index) /*!< in: index for config */ MY_ATTRIBUTE((nonnull, malloc, warn_unused_result)); #ifndef UNIV_NONINL diff --git a/storage/innobase/include/fts0priv.ic b/storage/innobase/include/fts0priv.ic index 88f2d67c7b8..9de3be215dc 100644 --- a/storage/innobase/include/fts0priv.ic +++ b/storage/innobase/include/fts0priv.ic @@ -26,7 +26,7 @@ Created 2011/11/12 Sunny Bains /******************************************************************//** Write the table id to the given buffer (including final NUL). Buffer must be at least FTS_AUX_MIN_TABLE_ID_LENGTH bytes long. -@return number of bytes written */ +@return number of bytes written */ UNIV_INLINE int fts_write_object_id( @@ -53,10 +53,10 @@ fts_write_object_id( /* Use this to construct old(5.6.14 and 5.7.3) windows ambiguous aux table names */ DBUG_EXECUTE_IF("innodb_test_wrong_windows_fts_aux_table_name", - return(sprintf(str, "%016" PRIu64, id));); + return(sprintf(str, "%016lu", id));); DBUG_EXECUTE_IF("innodb_test_wrong_fts_aux_table_name", - return(sprintf(str, UINT64PFx, id));); + return(sprintf(str, "%016lx", id));); #endif /* _WIN32 */ @@ -66,16 +66,16 @@ fts_write_object_id( // FIXME: Use ut_snprintf(), so does following one. return(sprintf(str, "%016llu", id)); #else /* _WIN32 */ - return(sprintf(str, "%016" PRIu64, id)); + return(sprintf(str, "%016lu", id)); #endif /* _WIN32 */ } - return(sprintf(str, UINT64PFx, id)); + return(sprintf(str, "%016lx", id)); } /******************************************************************//** Read the table id from the string generated by fts_write_object_id(). -@return TRUE if parse successful */ +@return TRUE if parse successful */ UNIV_INLINE ibool fts_read_object_id( @@ -86,12 +86,12 @@ fts_read_object_id( /* NOTE: this func doesn't care about whether current table is set with HEX_NAME, the user of the id read here will check if the id is HEX or DEC and do the right thing with it. */ - return(sscanf(str, UINT64PFx, id) == 1); + return(sscanf(str, "%016lx", id) == 1); } /******************************************************************//** Compare two fts_trx_table_t instances. -@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */ +@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */ UNIV_INLINE int fts_trx_table_cmp( @@ -99,8 +99,11 @@ fts_trx_table_cmp( const void* p1, /*!< in: id1 */ const void* p2) /*!< in: id2 */ { - const dict_table_t* table1 = (*(const fts_trx_table_t**) p1)->table; - const dict_table_t* table2 = (*(const fts_trx_table_t**) p2)->table; + const dict_table_t* table1 + = (*static_cast(p1))->table; + + const dict_table_t* table2 + = (*static_cast(p2))->table; return((table1->id > table2->id) ? 1 @@ -119,8 +122,9 @@ fts_trx_table_id_cmp( const void* p1, /*!< in: id1 */ const void* p2) /*!< in: id2 */ { - const ullint* table_id = (const ullint*) p1; - const dict_table_t* table2 = (*(const fts_trx_table_t**) p2)->table; + const uintmax_t* table_id = static_cast(p1); + const dict_table_t* table2 + = (*static_cast(p2))->table; return((*table_id > table2->id) ? 1 diff --git a/storage/innobase/include/fts0tlex.h b/storage/innobase/include/fts0tlex.h index f91533803e8..49bea8b08d4 100644 --- a/storage/innobase/include/fts0tlex.h +++ b/storage/innobase/include/fts0tlex.h @@ -341,7 +341,7 @@ extern int fts0tlex (yyscan_t yyscanner); #undef YY_DECL #endif -#line 68 "fts0tlex.l" +#line 69 "fts0tlex.l" #line 348 "../include/fts0tlex.h" diff --git a/storage/innobase/include/fts0tokenize.h b/storage/innobase/include/fts0tokenize.h new file mode 100644 index 00000000000..15726aea1de --- /dev/null +++ b/storage/innobase/include/fts0tokenize.h @@ -0,0 +1,188 @@ +/***************************************************************************** + +Copyright (c) 2014, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/******************************************************************//** +@file fts/fts0tokenize.cc +Full Text Search plugin tokenizer refer to MyISAM + +Created 2014/11/17 Shaohua Wang +***********************************************************************/ + +#include "ft_global.h" +#include "mysql/plugin_ftparser.h" +#include "m_ctype.h" + +/* Macros and structs below are from ftdefs.h in MyISAM */ +/** Check a char is true word */ +#define true_word_char(c, ch) ((c) & (_MY_U | _MY_L | _MY_NMR) || (ch) == '_') + +/** Check if a char is misc word */ +#define misc_word_char(X) 0 + +/** Boolean search syntax */ +static const char* fts_boolean_syntax = DEFAULT_FTB_SYNTAX; + +#define FTB_YES (fts_boolean_syntax[0]) +#define FTB_EGAL (fts_boolean_syntax[1]) +#define FTB_NO (fts_boolean_syntax[2]) +#define FTB_INC (fts_boolean_syntax[3]) +#define FTB_DEC (fts_boolean_syntax[4]) +#define FTB_LBR (fts_boolean_syntax[5]) +#define FTB_RBR (fts_boolean_syntax[6]) +#define FTB_NEG (fts_boolean_syntax[7]) +#define FTB_TRUNC (fts_boolean_syntax[8]) +#define FTB_LQUOT (fts_boolean_syntax[10]) +#define FTB_RQUOT (fts_boolean_syntax[11]) + +/** FTS query token */ +typedef struct st_ft_word { + uchar* pos; /*!< word start pointer */ + uint len; /*!< word len */ + double weight; /*!< word weight, unused in innodb */ +} FT_WORD; + +/** Tokenizer for ngram referring to ft_get_word(ft_parser.c) in MyISAM. +Differences: a. code format changed; b. stopword processing removed. +@param[in] cs charset +@param[in,out] start doc start pointer +@param[in,out] end doc end pointer +@param[in,out] word token +@param[in,out] info token info +@retval 0 eof +@retval 1 word found +@retval 2 left bracket +@retval 3 right bracket +@retval 4 stopword found */ +inline +uchar +fts_get_word( + const CHARSET_INFO* cs, + uchar** start, + uchar* end, + FT_WORD* word, + MYSQL_FTPARSER_BOOLEAN_INFO* + info) +{ + uchar* doc = *start; + int ctype; + uint mwc; + uint length; + int mbl; + + info->yesno = (FTB_YES ==' ') ? 1 : (info->quot != 0); + info->weight_adjust = info->wasign = 0; + info->type = FT_TOKEN_EOF; + + while (doc < end) { + for (; doc < end; + doc += (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { + mbl = cs->cset->ctype(cs, &ctype, doc, end); + + if (true_word_char(ctype, *doc)) { + break; + } + + if (*doc == FTB_RQUOT && info->quot) { + *start = doc + 1; + info->type = FT_TOKEN_RIGHT_PAREN; + + return(info->type); + } + + if (!info->quot) { + if (*doc == FTB_LBR + || *doc == FTB_RBR + || *doc == FTB_LQUOT) { + /* param->prev=' '; */ + *start = doc + 1; + if (*doc == FTB_LQUOT) { + info->quot = (char*)1; + } + + info->type = (*doc == FTB_RBR ? + FT_TOKEN_RIGHT_PAREN : + FT_TOKEN_LEFT_PAREN); + + return(info->type); + } + + if (info->prev == ' ') { + if (*doc == FTB_YES) { + info->yesno = +1; + continue; + } else if (*doc == FTB_EGAL) { + info->yesno = 0; + continue; + } else if (*doc == FTB_NO) { + info->yesno = -1; + continue; + } else if (*doc == FTB_INC) { + info->weight_adjust++; + continue; + } else if (*doc == FTB_DEC) { + info->weight_adjust--; + continue; + } else if (*doc == FTB_NEG) { + info->wasign = !info->wasign; + continue; + } + } + } + + info->prev = *doc; + info->yesno = (FTB_YES == ' ') ? 1 : (info->quot != 0); + info->weight_adjust = info->wasign = 0; + } + + mwc = length = 0; + for (word->pos = doc; + doc < end; + length++, doc += (mbl > 0 ? mbl : (mbl < 0 ? -mbl : 1))) { + mbl = cs->cset->ctype(cs, &ctype, doc, end); + + if (true_word_char(ctype, *doc)) { + mwc = 0; + } else if (!misc_word_char(*doc) || mwc) { + break; + } else { + mwc++; + } + } + + /* Be sure *prev is true_word_char. */ + info->prev = 'A'; + word->len = (uint)(doc-word->pos) - mwc; + + if ((info->trunc = (doc < end && *doc == FTB_TRUNC))) { + doc++; + } + + /* We don't check stopword here. */ + *start = doc; + info->type = FT_TOKEN_WORD; + + return(info->type); + } + + if (info->quot) { + *start = doc; + info->type = FT_TOKEN_RIGHT_PAREN; + } + + return(info->type); +} diff --git a/storage/innobase/include/fts0types.h b/storage/innobase/include/fts0types.h index e495fe72a60..039006265f6 100644 --- a/storage/innobase/include/fts0types.h +++ b/storage/innobase/include/fts0types.h @@ -26,16 +26,17 @@ Created 2007-03-27 Sunny Bains #ifndef INNOBASE_FTS0TYPES_H #define INNOBASE_FTS0TYPES_H +#include "univ.i" +#include "fts0fts.h" +#include "fut0fut.h" +#include "pars0pars.h" #include "que0types.h" #include "ut0byte.h" -#include "fut0fut.h" #include "ut0rbt.h" -#include "fts0fts.h" /** Types used within FTS. */ struct fts_que_t; struct fts_node_t; -struct fts_utf8_str_t; /** Callbacks used within FTS. */ typedef pars_user_func_cb_t fts_sql_callback; @@ -270,6 +271,12 @@ struct fts_doc_t { same lifespan, most notably the vector of token positions */ CHARSET_INFO* charset; /*!< Document's charset info */ + + st_mysql_ftparser* parser; /*!< fts plugin parser */ + + bool is_ngram; /*!< Whether it is a ngram parser */ + + ib_rbt_t* stopwords; /*!< Stopwords */ }; /** A token and its positions within a document. */ @@ -284,33 +291,6 @@ struct fts_token_t { /** It's defined in fts/fts0fts.c */ extern const fts_index_selector_t fts_index_selector[]; -/******************************************************************//** -Compare two UTF-8 strings. */ -UNIV_INLINE -int -fts_utf8_string_cmp( -/*================*/ - /*!< out: - < 0 if n1 < n2, - 0 if n1 == n2, - > 0 if n1 > n2 */ - const void* p1, /*!< in: key */ - const void* p2); /*!< in: node */ - -/******************************************************************//** -Compare two UTF-8 strings, and return match (0) if -passed in "key" value equals or is the prefix of the "node" value. */ -UNIV_INLINE -int -fts_utf8_string_cmp_prefix( -/*=======================*/ - /*!< out: - < 0 if n1 < n2, - 0 if n1 == n2, - > 0 if n1 > n2 */ - const void* p1, /*!< in: key */ - const void* p2); /*!< in: node */ - /******************************************************************//** Compare two fts_trx_row_t instances doc_ids. */ UNIV_INLINE @@ -361,11 +341,11 @@ fts_decode_vlc( incremented by the number of bytes decoded */ /******************************************************************//** -Duplicate an UTF-8 string. */ +Duplicate a string. */ UNIV_INLINE void -fts_utf8_string_dup( -/*================*/ +fts_string_dup( +/*===========*/ /*!< out: < 0 if n1 < n2, 0 if n1 == n2, @@ -396,43 +376,6 @@ fts_encode_int( byte* buf); /*!< in: buffer, must have enough space */ -/******************************************************************//** -Decode a UTF-8 character. - -http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf: - - Scalar Value 1st Byte 2nd Byte 3rd Byte 4th Byte -00000000 0xxxxxxx 0xxxxxxx -00000yyy yyxxxxxx 110yyyyy 10xxxxxx -zzzzyyyy yyxxxxxx 1110zzzz 10yyyyyy 10xxxxxx -000uuuzz zzzzyyyy yyxxxxxx 11110uuu 10zzzzzz 10yyyyyy 10xxxxxx - -This function decodes UTF-8 sequences up to 6 bytes (31 bits). - -On error *ptr will point to the first byte that was not correctly -decoded. This will hopefully help in resyncing the input. */ -UNIV_INLINE -ulint -fts_utf8_decode( -/*============*/ - /*!< out: UTF8_ERROR if *ptr - did not point to a valid - UTF-8 sequence, or the - Unicode code point. */ - const byte** ptr); /*!< in/out: pointer to - UTF-8 string. The - pointer is advanced to - the start of the next - character. */ - -/******************************************************************//** -Lowercase an UTF-8 string. */ -UNIV_INLINE -void -fts_utf8_tolower( -/*=============*/ - fts_string_t* str); /*!< in: string */ - /******************************************************************//** Get the selected FTS aux INDEX suffix. */ UNIV_INLINE @@ -441,34 +384,17 @@ fts_get_suffix( /*===========*/ ulint selected); /*!< in: selected index */ -/******************************************************************** -Get the number of index selectors. */ -UNIV_INLINE -ulint -fts_get_n_selectors(void); -/*=====================*/ - -/******************************************************************//** -Select the FTS auxiliary index for the given string. +/** Select the FTS auxiliary index for the given character. +@param[in] cs charset +@param[in] str string +@param[in] len string length in bytes @return the index to use for the string */ UNIV_INLINE ulint fts_select_index( -/*=============*/ - const CHARSET_INFO* cs, /*!< Charset */ - const byte* str, /*!< in: word string */ - ulint len); /*!< in: string length */ - -/******************************************************************** -Select the next FTS auxiliary index for the given character. -@return the next index to use for character */ -UNIV_INLINE -ulint -fts_select_next_index( -/*==================*/ - const CHARSET_INFO* cs, /*!< Charset */ - const byte* str, /*!< in: string */ - ulint len); /*!< in: string length */ + const CHARSET_INFO* cs, + const byte* str, + ulint len); #ifndef UNIV_NONINL #include "fts0types.ic" diff --git a/storage/innobase/include/fts0types.ic b/storage/innobase/include/fts0types.ic index f0dfd023a70..417a1010919 100644 --- a/storage/innobase/include/fts0types.ic +++ b/storage/innobase/include/fts0types.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,23 +26,16 @@ Created 2007-03-27 Sunny Bains #ifndef INNOBASE_FTS0TYPES_IC #define INNOBASE_FTS0TYPES_IC -#include - #include "rem0cmp.h" #include "ha_prototypes.h" -extern const ulint UTF8_ERROR; - -/* Determine if a UTF-8 continuation byte is valid. */ -#define fts_utf8_is_valid(b) (((b) & 0xC0) == 0x80) - /******************************************************************//** -Duplicate an UTF-8 string. +Duplicate a string. @return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */ UNIV_INLINE void -fts_utf8_string_dup( -/*================*/ +fts_string_dup( +/*===========*/ fts_string_t* dst, /*!< in: dup to here */ const fts_string_t* src, /*!< in: src string */ mem_heap_t* heap) /*!< in: heap to use */ @@ -103,183 +96,6 @@ fts_update_doc_id_cmp( return((int)(up1->doc_id - up2->doc_id)); } - -/******************************************************************//** -Lowercase an UTF-8 string. */ -UNIV_INLINE -void -fts_utf8_tolower( -/*=============*/ - fts_string_t* str) /*!< in: string */ -{ - innobase_casedn_str((char*) str->f_str); -} - -/******************************************************************//** -Compare two UTF-8 strings. -@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */ -UNIV_INLINE -int -fts_utf8_string_cmp( -/*================*/ - const void* p1, /*!< in: key */ - const void* p2) /*!< in: node */ -{ - const fts_string_t* s1 = (const fts_string_t*) p1; - const fts_string_t* s2 = (const fts_string_t*) p2; - - return(cmp_data_data_slow_varchar( - s1->f_str, s1->f_len, s2->f_str, s2->f_len)); -} - -/******************************************************************//** -Compare two UTF-8 strings, and return match (0) if -passed in "key" value equals or is the prefix of the "node" value. -@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */ -UNIV_INLINE -int -fts_utf8_string_cmp_prefix( -/*=======================*/ - const void* p1, /*!< in: key */ - const void* p2) /*!< in: node */ -{ - int result; - ulint len; - - const fts_string_t* s1 = (const fts_string_t*) p1; - const fts_string_t* s2 = (const fts_string_t*) p2; - - len = ut_min(s1->f_len, s2->f_len); - - result = cmp_data_data_slow_varchar(s1->f_str, len, s2->f_str, len); - - if (result) { - return(result); - } - - if (s1->f_len > s2->f_len) { - return(1); - } - - return(0); -} - -/******************************************************************//** -Decode a UTF-8 character. - -http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf: - - Scalar Value 1st Byte 2nd Byte 3rd Byte 4th Byte -00000000 0xxxxxxx 0xxxxxxx -00000yyy yyxxxxxx 110yyyyy 10xxxxxx -zzzzyyyy yyxxxxxx 1110zzzz 10yyyyyy 10xxxxxx -000uuuzz zzzzyyyy yyxxxxxx 11110uuu 10zzzzzz 10yyyyyy 10xxxxxx - -This function decodes UTF-8 sequences up to 6 bytes (31 bits). - -On error *ptr will point to the first byte that was not correctly -decoded. This will hopefully help in resyncing the input. -@return UTF8_ERROR if *ptr did not point to a valid -UTF-8 sequence, or the Unicode code point. */ -UNIV_INLINE -ulint -fts_utf8_decode( -/*============*/ - const byte** ptr) /*!< in/out: pointer to - UTF-8 string. The - pointer is advanced to - the start of the next - character. */ -{ - const byte* p = *ptr; - ulint ch = *p++; -#ifdef UNIV_DEBUG - ulint min_ch; -#endif /* UNIV_DEBUG */ - - if (UNIV_LIKELY(ch < 0x80)) { - /* 0xxxxxxx */ - } else if (UNIV_UNLIKELY(ch < 0xC0)) { - /* A continuation byte cannot start a code. */ - goto err_exit; - } else if (ch < 0xE0) { - /* 110yyyyy 10xxxxxx */ - ch &= 0x1F; - ut_d(min_ch = 0x80); - goto get1; - } else if (ch < 0xF0) { - /* 1110zzzz 10yyyyyy 10xxxxxx */ - ch &= 0x0F; - ut_d(min_ch = 0x800); - goto get2; - } else if (ch < 0xF8) { - /* 11110uuu 10zzzzzz 10yyyyyy 10xxxxxx */ - ch &= 0x07; - ut_d(min_ch = 0x10000); - goto get3; - } else if (ch < 0xFC) { - /* 111110tt 10uuuuuu 10zzzzzz 10yyyyyy 10xxxxxx */ - ch &= 0x03; - ut_d(min_ch = 0x200000); - goto get4; - } else if (ch < 0xFE) { - /* 1111110s 10tttttt 10uuuuuu 10zzzzzz 10yyyyyy 10xxxxxx */ - ut_d(min_ch = 0x4000000); - if (!fts_utf8_is_valid(*p)) { - goto err_exit; - } - ch <<= 6; - ch |= (*p++) & 0x3F; -get4: - if (!fts_utf8_is_valid(*p)) { - goto err_exit; - } - ch <<= 6; - ch |= (*p++) & 0x3F; -get3: - if (!fts_utf8_is_valid(*p)) { - goto err_exit; - } - ch <<= 6; - ch |= (*p++) & 0x3F; -get2: - if (!fts_utf8_is_valid(*p)) { - goto err_exit; - } - ch <<= 6; - ch |= (*p++) & 0x3F; -get1: - if (!fts_utf8_is_valid(*p)) { - goto err_exit; - } - ch <<= 6; - ch |= (*p++) & 0x3F; - - /* The following is needed in the 6-byte case - when ulint is wider than 32 bits. */ - ch &= 0xFFFFFFFF; - - /* The code positions U+D800 to U+DFFF (UTF-16 surrogate pairs) - and U+FFFE and U+FFFF cannot occur in valid UTF-8. */ - - if ( (ch >= 0xD800 && ch <= 0xDFFF) -#ifdef UNIV_DEBUG - || ch < min_ch -#endif /* UNIV_DEBUG */ - || ch == 0xFFFE || ch == 0xFFFF) { - - ch = UTF8_ERROR; - } - } else { -err_exit: - ch = UTF8_ERROR; - } - - *ptr = p; - - return(ch); -} - /******************************************************************//** Get the first character's code position for FTS index partition */ extern @@ -290,16 +106,41 @@ innobase_strnxfrm( const uchar* p2, /*!< in: string */ const ulint len2); /*!< in: string length */ -/******************************************************************//** -Select the FTS auxiliary index for the given character. -@return the index to use for the string */ +/** Check if fts index charset is cjk +@param[in] cs charset +@retval true if the charset is cjk +@retval false if not. */ +UNIV_INLINE +bool +fts_is_charset_cjk( + const CHARSET_INFO* cs) +{ + if (strcmp(cs->name, "gb2312_chinese_ci") == 0 + || strcmp(cs->name, "gbk_chinese_ci") == 0 + || strcmp(cs->name, "big5_chinese_ci") == 0 + || strcmp(cs->name, "gb18030_chinese_ci") == 0 + || strcmp(cs->name, "ujis_japanese_ci") == 0 + || strcmp(cs->name, "sjis_japanese_ci") == 0 + || strcmp(cs->name, "cp932_japanese_ci") == 0 + || strcmp(cs->name, "eucjpms_japanese_ci") == 0 + || strcmp(cs->name, "euckr_korean_ci") == 0) { + return(true); + } else { + return(false); + } +} + +/** Select the FTS auxiliary index for the given character by range. +@param[in] cs charset +@param[in] str string +@param[in] len string length +@retval the index to use for the string */ UNIV_INLINE ulint -fts_select_index( -/*=============*/ - const CHARSET_INFO* cs, /*!< in: Charset */ - const byte* str, /*!< in: string */ - ulint len) /*!< in: string length */ +fts_select_index_by_range( + const CHARSET_INFO* cs, + const byte* str, + ulint len) { ulint selected = 0; ulint value = innobase_strnxfrm(cs, str, len); @@ -323,37 +164,64 @@ fts_select_index( return(selected - 1); } -/******************************************************************//** -Select the next FTS auxiliary index for the given character. -@return the next index to use for character */ +/** Select the FTS auxiliary index for the given character by hash. +@param[in] cs charset +@param[in] str string +@param[in] len string length +@retval the index to use for the string */ UNIV_INLINE ulint -fts_select_next_index( -/*==================*/ - const CHARSET_INFO* cs, /*!< in: Charset */ - const byte* str, /*!< in: string */ - ulint len) /*!< in: string length */ +fts_select_index_by_hash( + const CHARSET_INFO* cs, + const byte* str, + ulint len) { - ulint selected = 0; - ulint value = innobase_strnxfrm(cs, str, len); + int char_len; + ulong nr1 = 1; + ulong nr2 = 4; - while (fts_index_selector[selected].value != 0) { + ut_ad(!(str == NULL && len > 0)); - if (fts_index_selector[selected].value == value) { + if (str == NULL || len == 0) { + return 0; + } - return(selected + 1); + /* Get the first char */ + /* JAN: TODO: MySQL 5.7 had + char_len = my_mbcharlen_ptr(cs, reinterpret_cast(str), + reinterpret_cast(str + len)); + */ + char_len = cs->cset->charlen(cs, str, str+len); - } else if (fts_index_selector[selected].value > value) { + ut_ad(static_cast(char_len) <= len); - return(selected); - } + /* Get collation hash code */ + cs->coll->hash_sort(cs, str, char_len, &nr1, &nr2); - ++selected; - } + return(nr1 % FTS_NUM_AUX_INDEX); +} - ut_ad(selected > 0); +/** Select the FTS auxiliary index for the given character. +@param[in] cs charset +@param[in] str string +@param[in] len string length in bytes +@retval the index to use for the string */ +UNIV_INLINE +ulint +fts_select_index( + const CHARSET_INFO* cs, + const byte* str, + ulint len) +{ + ulint selected; - return((ulint) selected); + if (fts_is_charset_cjk(cs)) { + selected = fts_select_index_by_hash(cs, str, len); + } else { + selected = fts_select_index_by_range(cs, str, len); + } + + return(selected); } /******************************************************************//** @@ -367,22 +235,4 @@ fts_get_suffix( return(fts_index_selector[selected].suffix); } -/******************************************************************//** -Get the number of index selectors. -@return The number of selectors */ -UNIV_INLINE -ulint -fts_get_n_selectors(void) -/*=====================*/ -{ - ulint i = 0; - - // FIXME: This is a hack - while (fts_index_selector[i].value != 0) { - ++i; - } - - return(i); -} - #endif /* INNOBASE_FTS0TYPES_IC */ diff --git a/storage/innobase/include/fut0fut.h b/storage/innobase/include/fut0fut.h index 851cdb44cdf..ab04a700d4f 100644 --- a/storage/innobase/include/fut0fut.h +++ b/storage/innobase/include/fut0fut.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -32,24 +32,28 @@ Created 12/13/1995 Heikki Tuuri #include "fil0fil.h" #include "mtr0mtr.h" -/********************************************************************//** -Gets a pointer to a file address and latches the page. -@return pointer to a byte in a frame; the file page in the frame is +/** Gets a pointer to a file address and latches the page. +@param[in] space space id +@param[in] page_size page size +@param[in] addr file address +@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_SX_LATCH +@param[out] ptr_block file page +@param[in,out] mtr mini-transaction +@return pointer to a byte in (*ptr_block)->frame; the *ptr_block is bufferfixed and latched */ UNIV_INLINE byte* fut_get_ptr( -/*========*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - fil_addr_t addr, /*!< in: file address */ - ulint rw_latch, /*!< in: RW_S_LATCH, RW_X_LATCH */ - mtr_t* mtr); /*!< in: mtr handle */ + ulint space, + const page_size_t& page_size, + fil_addr_t addr, + rw_lock_type_t rw_latch, + mtr_t* mtr, + buf_block_t** ptr_block = NULL) + __attribute__((warn_unused_result)); #ifndef UNIV_NONINL #include "fut0fut.ic" #endif -#endif - +#endif /* fut0fut_h */ diff --git a/storage/innobase/include/fut0fut.ic b/storage/innobase/include/fut0fut.ic index b065b10b9ca..6fe031876e6 100644 --- a/storage/innobase/include/fut0fut.ic +++ b/storage/innobase/include/fut0fut.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,31 +26,43 @@ Created 12/13/1995 Heikki Tuuri #include "sync0rw.h" #include "buf0buf.h" -/********************************************************************//** -Gets a pointer to a file address and latches the page. -@return pointer to a byte in a frame; the file page in the frame is +/** Gets a pointer to a file address and latches the page. +@param[in] space space id +@param[in] page_size page size +@param[in] addr file address +@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_SX_LATCH +@param[in,out] mtr mini-transaction +@param[out] ptr_block file page +@return pointer to a byte in (*ptr_block)->frame; the *ptr_block is bufferfixed and latched */ UNIV_INLINE byte* fut_get_ptr( -/*========*/ - ulint space, /*!< in: space id */ - ulint zip_size,/*!< in: compressed page size in bytes - or 0 for uncompressed pages */ - fil_addr_t addr, /*!< in: file address */ - ulint rw_latch, /*!< in: RW_S_LATCH, RW_X_LATCH */ - mtr_t* mtr) /*!< in: mtr handle */ + ulint space, + const page_size_t& page_size, + fil_addr_t addr, + rw_lock_type_t rw_latch, + mtr_t* mtr, + buf_block_t** ptr_block) { buf_block_t* block; - byte* ptr; + byte* ptr = NULL; ut_ad(addr.boffset < UNIV_PAGE_SIZE); - ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH)); + ut_ad((rw_latch == RW_S_LATCH) + || (rw_latch == RW_X_LATCH) + || (rw_latch == RW_SX_LATCH)); + + block = buf_page_get(page_id_t(space, addr.page), page_size, + rw_latch, mtr); - block = buf_page_get(space, zip_size, addr.page, rw_latch, mtr); ptr = buf_block_get_frame(block) + addr.boffset; buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK); + if (ptr_block != NULL) { + *ptr_block = block; + } + return(ptr); } diff --git a/storage/innobase/include/fut0lst.h b/storage/innobase/include/fut0lst.h index 90f9a65d4fa..9c980d1358d 100644 --- a/storage/innobase/include/fut0lst.h +++ b/storage/innobase/include/fut0lst.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -26,6 +26,8 @@ Created 11/28/1995 Heikki Tuuri #ifndef fut0lst_h #define fut0lst_h +#ifndef UNIV_INNOCHECKSUM + #include "univ.i" #include "fil0fil.h" @@ -41,11 +43,12 @@ typedef byte flst_node_t; /* The physical size of a list base node in bytes */ #define FLST_BASE_NODE_SIZE (4 + 2 * FIL_ADDR_SIZE) +#endif /* !UNIV_INNOCHECKSUM */ /* The physical size of a list node in bytes */ #define FLST_NODE_SIZE (2 * FIL_ADDR_SIZE) -#ifndef UNIV_HOTBACKUP +#if !defined UNIV_HOTBACKUP && !defined UNIV_INNOCHECKSUM /********************************************************************//** Initializes a list base node. */ UNIV_INLINE @@ -56,7 +59,6 @@ flst_init( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Adds a node as the last node in a list. */ -UNIV_INTERN void flst_add_last( /*==========*/ @@ -65,7 +67,6 @@ flst_add_last( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Adds a node as the first node in a list. */ -UNIV_INTERN void flst_add_first( /*===========*/ @@ -74,7 +75,6 @@ flst_add_first( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Inserts a node after another in a list. */ -UNIV_INTERN void flst_insert_after( /*==============*/ @@ -84,7 +84,6 @@ flst_insert_after( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Inserts a node before another in a list. */ -UNIV_INTERN void flst_insert_before( /*===============*/ @@ -94,7 +93,6 @@ flst_insert_before( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Removes a node. */ -UNIV_INTERN void flst_remove( /*========*/ @@ -105,7 +103,6 @@ flst_remove( Cuts off the tail of the list, including the node given. The number of nodes which will be removed must be provided by the caller, as this function does not measure the length of the tail. */ -UNIV_INTERN void flst_cut_end( /*=========*/ @@ -118,7 +115,6 @@ flst_cut_end( Cuts off the tail of the list, not including the given node. The number of nodes which will be removed must be provided by the caller, as this function does not measure the length of the tail. */ -UNIV_INTERN void flst_truncate_end( /*==============*/ @@ -126,18 +122,16 @@ flst_truncate_end( flst_node_t* node2, /*!< in: first node not to remove */ ulint n_nodes,/*!< in: number of nodes to remove */ mtr_t* mtr); /*!< in: mini-transaction handle */ -/********************************************************************//** -Gets list length. -@return length */ +/** Get the length of a list. +@param[in] base base node +@return length */ UNIV_INLINE ulint flst_get_len( -/*=========*/ - const flst_base_node_t* base, /*!< in: pointer to base node */ - mtr_t* mtr); /*!< in: mini-transaction handle */ + const flst_base_node_t* base); /********************************************************************//** Gets list first node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_first( @@ -146,7 +140,7 @@ flst_get_first( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Gets list last node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_last( @@ -155,7 +149,7 @@ flst_get_last( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Gets list next node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_next_addr( @@ -164,7 +158,7 @@ flst_get_next_addr( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Gets list prev node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_prev_addr( @@ -182,7 +176,7 @@ flst_write_addr( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Reads a file address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_read_addr( @@ -191,8 +185,7 @@ flst_read_addr( mtr_t* mtr); /*!< in: mini-transaction handle */ /********************************************************************//** Validates a file-based list. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool flst_validate( /*==========*/ @@ -200,7 +193,6 @@ flst_validate( mtr_t* mtr1); /*!< in: mtr */ /********************************************************************//** Prints info of a file-based list. */ -UNIV_INTERN void flst_print( /*=======*/ @@ -212,6 +204,6 @@ flst_print( #include "fut0lst.ic" #endif -#endif /* !UNIV_HOTBACKUP */ +#endif /* !UNIV_HOTBACKUP && !UNIV_INNOCHECKSUM*/ #endif diff --git a/storage/innobase/include/fut0lst.ic b/storage/innobase/include/fut0lst.ic index d18cf21378f..128dc77ed92 100644 --- a/storage/innobase/include/fut0lst.ic +++ b/storage/innobase/include/fut0lst.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -54,7 +54,9 @@ flst_write_addr( mtr_t* mtr) /*!< in: mini-transaction handle */ { ut_ad(faddr && mtr); - ut_ad(mtr_memo_contains_page(mtr, faddr, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, faddr, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); ut_a(addr.page == FIL_NULL || addr.boffset >= FIL_PAGE_DATA); ut_a(ut_align_offset(faddr, UNIV_PAGE_SIZE) >= FIL_PAGE_DATA); @@ -65,7 +67,7 @@ flst_write_addr( /********************************************************************//** Reads a file address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_read_addr( @@ -94,29 +96,29 @@ flst_init( flst_base_node_t* base, /*!< in: pointer to base node */ mtr_t* mtr) /*!< in: mini-transaction handle */ { - ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); + ut_ad(mtr_memo_contains_page_flagged(mtr, base, + MTR_MEMO_PAGE_X_FIX + | MTR_MEMO_PAGE_SX_FIX)); mlog_write_ulint(base + FLST_LEN, 0, MLOG_4BYTES, mtr); flst_write_addr(base + FLST_FIRST, fil_addr_null, mtr); flst_write_addr(base + FLST_LAST, fil_addr_null, mtr); } -/********************************************************************//** -Gets list length. -@return length */ +/** Get the length of a list. +@param[in] base base node +@return length */ UNIV_INLINE ulint flst_get_len( -/*=========*/ - const flst_base_node_t* base, /*!< in: pointer to base node */ - mtr_t* mtr) /*!< in: mini-transaction handle */ + const flst_base_node_t* base) { - return(mtr_read_ulint(base + FLST_LEN, MLOG_4BYTES, mtr)); + return(mach_read_from_4(base + FLST_LEN)); } /********************************************************************//** Gets list first node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_first( @@ -129,7 +131,7 @@ flst_get_first( /********************************************************************//** Gets list last node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_last( @@ -142,7 +144,7 @@ flst_get_last( /********************************************************************//** Gets list next node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_next_addr( @@ -155,7 +157,7 @@ flst_get_next_addr( /********************************************************************//** Gets list prev node address. -@return file address */ +@return file address */ UNIV_INLINE fil_addr_t flst_get_prev_addr( diff --git a/storage/innobase/include/gis0geo.h b/storage/innobase/include/gis0geo.h new file mode 100644 index 00000000000..08895af545e --- /dev/null +++ b/storage/innobase/include/gis0geo.h @@ -0,0 +1,162 @@ +/***************************************************************************** +Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software Foundation, +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA +*****************************************************************************/ + +/**************************************************//** +@file gis0geo.h +The r-tree define from MyISAM +*******************************************************/ + +#ifndef _gis0geo_h +#define _gis0geo_h + +#include "my_global.h" +#include "string.h" + +#define SPTYPE HA_KEYTYPE_DOUBLE +#define SPLEN 8 + +/* Since the mbr could be a point or a linestring, in this case, area of +mbr is 0. So, we define this macro for calculating the area increasing +when we need to enlarge the mbr. */ +#define LINE_MBR_WEIGHTS 0.001 + +/* Types of "well-known binary representation" (wkb) format. */ +enum wkbType +{ + wkbPoint = 1, + wkbLineString = 2, + wkbPolygon = 3, + wkbMultiPoint = 4, + wkbMultiLineString = 5, + wkbMultiPolygon = 6, + wkbGeometryCollection = 7 +}; + +/* Byte order of "well-known binary representation" (wkb) format. */ +enum wkbByteOrder +{ + wkbXDR = 0, /* Big Endian */ + wkbNDR = 1 /* Little Endian */ +}; + +/** Get the wkb of default POINT value, which represents POINT(0 0) +if it's of dimension 2, etc. +@param[in] n_dims dimensions +@param[out] wkb wkb buffer for default POINT +@param[in] len length of wkb buffer +@return non-0 indicate the length of wkb of the default POINT, +0 if the buffer is too small */ +uint +get_wkb_of_default_point( + uint n_dims, + uchar* wkb, + uint len); + +/*************************************************************//** +Calculate minimal bounding rectangle (mbr) of the spatial object +stored in "well-known binary representation" (wkb) format. +@return 0 if ok */ +int +rtree_mbr_from_wkb( +/*===============*/ + uchar* wkb, /*!< in: pointer to wkb. */ + uint size, /*!< in: size of wkb. */ + uint n_dims, /*!< in: dimensions. */ + double* mbr); /*!< in/out: mbr. */ + +/* Rtree split node structure. */ +struct rtr_split_node_t +{ + double square; /* square of the mbr.*/ + int n_node; /* which group in.*/ + uchar* key; /* key. */ + double* coords; /* mbr. */ +}; + +/*************************************************************//** +Inline function for reserving coords */ +inline +static +double* +reserve_coords(double **d_buffer, /*!< in/out: buffer. */ + int n_dim) /*!< in: dimensions. */ +/*===========*/ +{ + double *coords = *d_buffer; + (*d_buffer) += n_dim * 2; + return coords; +} + +/*************************************************************//** +Split rtree nodes. +Return which group the first rec is in. */ +int +split_rtree_node( +/*=============*/ + rtr_split_node_t* node, /*!< in: split nodes.*/ + int n_entries, /*!< in: entries number.*/ + int all_size, /*!< in: total key's size.*/ + int key_size, /*!< in: key's size.*/ + int min_size, /*!< in: minimal group size.*/ + int size1, /*!< in: size of group.*/ + int size2, /*!< in: initial group sizes */ + double** d_buffer, /*!< in/out: buffer.*/ + int n_dim, /*!< in: dimensions. */ + uchar* first_rec); /*!< in: the first rec. */ + +/*************************************************************//** +Compares two keys a and b depending on nextflag +nextflag can contain these flags: + MBR_INTERSECT(a,b) a overlaps b + MBR_CONTAIN(a,b) a contains b + MBR_DISJOINT(a,b) a disjoint b + MBR_WITHIN(a,b) a within b + MBR_EQUAL(a,b) All coordinates of MBRs are equal + MBR_DATA(a,b) Data reference is the same +Returns 0 on success. */ +int +rtree_key_cmp( +/*==========*/ + page_cur_mode_t mode, /*!< in: compare method. */ + const uchar* b, /*!< in: first key. */ + int b_len, /*!< in: first key len. */ + const uchar* a, /*!< in: second key. */ + int a_len); /*!< in: second key len. */ + +/*************************************************************//** +Calculates MBR_AREA(a+b) - MBR_AREA(a) +Note: when 'a' and 'b' objects are far from each other, +the area increase can be really big, so this function +can return 'inf' as a result. */ +double +rtree_area_increase( + const uchar* a, /*!< in: first mbr. */ + const uchar* b, /*!< in: second mbr. */ + int a_len, /*!< in: mbr length. */ + double* ab_area); /*!< out: increased area. */ + +/** Calculates overlapping area +@param[in] a mbr a +@param[in] b mbr b +@param[in] mbr_len mbr length +@return overlapping area */ +double +rtree_area_overlapping( + const uchar* a, + const uchar* b, + int mbr_len); +#endif diff --git a/storage/innobase/include/gis0rtree.h b/storage/innobase/include/gis0rtree.h new file mode 100644 index 00000000000..316cab888b3 --- /dev/null +++ b/storage/innobase/include/gis0rtree.h @@ -0,0 +1,572 @@ +/***************************************************************************** + +Copyright (c) 2014, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/******************************************************************//** +@file include gis0rtree.h +R-tree header file + +Created 2013/03/27 Jimmy Yang and Allen Lai +***********************************************************************/ + +#ifndef gis0rtree_h +#define gis0rtree_h + +#include "univ.i" + +#include "data0type.h" +#include "data0types.h" +#include "dict0types.h" +#include "hash0hash.h" +#include "mem0mem.h" +#include "page0page.h" +#include "rem0types.h" +#include "row0types.h" +#include "trx0types.h" +#include "ut0vec.h" +#include "ut0wqueue.h" +#include "que0types.h" +#include "gis0geo.h" +#include "gis0type.h" +#include "btr0types.h" +#include "btr0cur.h" + +/* Whether MBR 'a' contains 'b' */ +#define MBR_CONTAIN_CMP(a, b) \ + ((((b)->xmin >= (a)->xmin) && ((b)->xmax <= (a)->xmax) \ + && ((b)->ymin >= (a)->ymin) && ((b)->ymax <= (a)->ymax))) + +/* Whether MBR 'a' equals to 'b' */ +#define MBR_EQUAL_CMP(a, b) \ + ((((b)->xmin == (a)->xmin) && ((b)->xmax == (a)->xmax)) \ + && (((b)->ymin == (a)->ymin) && ((b)->ymax == (a)->ymax))) + +/* Whether MBR 'a' intersects 'b' */ +#define MBR_INTERSECT_CMP(a, b) \ + ((((b)->xmin <= (a)->xmax) || ((b)->xmax >= (a)->xmin)) \ + && (((b)->ymin <= (a)->ymax) || ((b)->ymax >= (a)->ymin))) + +/* Whether MBR 'a' and 'b' disjoint */ +#define MBR_DISJOINT_CMP(a, b) (!MBR_INTERSECT_CMP(a, b)) + +/* Whether MBR 'a' within 'b' */ +#define MBR_WITHIN_CMP(a, b) \ + ((((b)->xmin <= (a)->xmin) && ((b)->xmax >= (a)->xmax)) \ + && (((b)->ymin <= (a)->ymin) && ((b)->ymax >= (a)->ymax))) + +/* Define it for rtree search mode checking. */ +#define RTREE_SEARCH_MODE(mode) \ + (((mode) >= PAGE_CUR_CONTAIN) && ((mode <= PAGE_CUR_RTREE_LOCATE))) + +/* Geometry data header */ +#define GEO_DATA_HEADER_SIZE 4 +/**********************************************************************//** +Builds a Rtree node pointer out of a physical record and a page number. +@return own: node pointer */ +dtuple_t* +rtr_index_build_node_ptr( +/*=====================*/ + const dict_index_t* index, /*!< in: index */ + const rtr_mbr_t* mbr, /*!< in: mbr of lower page */ + const rec_t* rec, /*!< in: record for which to build node + pointer */ + ulint page_no,/*!< in: page number to put in node + pointer */ + mem_heap_t* heap, /*!< in: memory heap where pointer + created */ + ulint level); /*!< in: level of rec in tree: + 0 means leaf level */ + +/*************************************************************//** +Splits an R-tree index page to halves and inserts the tuple. It is assumed +that mtr holds an x-latch to the index tree. NOTE: the tree x-latch is +released within this function! NOTE that the operation of this +function must always succeed, we cannot reverse it: therefore enough +free disk space (2 pages) must be guaranteed to be available before +this function is called. +@return inserted record */ +rec_t* +rtr_page_split_and_insert( +/*======================*/ + ulint flags, /*!< in: undo logging and locking flags */ + btr_cur_t* cursor, /*!< in/out: cursor at which to insert; when the + function returns, the cursor is positioned + on the predecessor of the inserted record */ + ulint** offsets,/*!< out: offsets on inserted record */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + const dtuple_t* tuple, /*!< in: tuple to insert */ + ulint n_ext, /*!< in: number of externally stored columns */ + mtr_t* mtr); /*!< in: mtr */ + +/**************************************************************//** +Sets the child node mbr in a node pointer. */ +UNIV_INLINE +void +rtr_page_cal_mbr( +/*=============*/ + const dict_index_t* index, /*!< in: index */ + const buf_block_t* block, /*!< in: buffer block */ + rtr_mbr_t* mbr, /*!< out: MBR encapsulates the page */ + mem_heap_t* heap); /*!< in: heap for the memory + allocation */ +/*************************************************************//** +Find the next matching record. This function will first exhaust +the copied record listed in the rtr_info->matches vector before +moving to next page +@return true if there is next qualified record found, otherwise(if +exhausted) false */ +bool +rtr_pcur_move_to_next( +/*==================*/ + const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in + tuple must be set so that it cannot get + compared to the node ptr page number field! */ + page_cur_mode_t mode, /*!< in: cursor search mode */ + btr_pcur_t* cursor, /*!< in: persistent cursor; NOTE that the + function may release the page latch */ + ulint cur_level, + /*!< in: current level */ + mtr_t* mtr); /*!< in: mtr */ + +/**************************************************************//** +Restores the stored position of a persistent cursor bufferfixing the page */ +bool +rtr_cur_restore_position_func( +/*==========================*/ + ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ... */ + btr_cur_t* cursor, /*!< in: detached persistent cursor */ + ulint level, /*!< in: index level */ + const char* file, /*!< in: file name */ + ulint line, /*!< in: line where called */ + mtr_t* mtr); /*!< in: mtr */ +#define rtr_cur_restore_position(l,cur,level,mtr) \ + rtr_cur_restore_position_func(l,cur,level,__FILE__,__LINE__,mtr) + +/****************************************************************//** +Searches the right position in rtree for a page cursor. */ +bool +rtr_cur_search_with_match( +/*======================*/ + const buf_block_t* block, /*!< in: buffer block */ + dict_index_t* index, /*!< in: index descriptor */ + const dtuple_t* tuple, /*!< in: data tuple */ + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, + PAGE_CUR_LE, PAGE_CUR_G, or + PAGE_CUR_GE */ + page_cur_t* cursor, /*!< in/out: page cursor */ + rtr_info_t* rtr_info);/*!< in/out: search stack */ + +/****************************************************************//** +Calculate the area increased for a new record +@return area increased */ +double +rtr_rec_cal_increase( +/*=================*/ + const dtuple_t* dtuple, /*!< in: data tuple to insert, which + cause area increase */ + const rec_t* rec, /*!< in: physical record which differs from + dtuple in some of the common fields, or which + has an equal number or more fields than + dtuple */ + const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ + double* area); /*!< out: increased area */ + +/****************************************************************//** +Following the right link to find the proper block for insert. +@return the proper block.*/ +dberr_t +rtr_ins_enlarge_mbr( +/*=================*/ + btr_cur_t* cursor, /*!< in: btr cursor */ + que_thr_t* thr, /*!< in: query thread */ + mtr_t* mtr); /*!< in: mtr */ + +/********************************************************************//** +*/ +void +rtr_get_father_node( +/*================*/ + dict_index_t* index, /*!< in: index */ + ulint level, /*!< in: the tree level of search */ + const dtuple_t* tuple, /*!< in: data tuple; NOTE: n_fields_cmp in + tuple must be set so that it cannot get + compared to the node ptr page number field! */ + btr_cur_t* sea_cur,/*!< in: search cursor */ + btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is + s- or x-latched */ + ulint page_no,/*!< in: current page no */ + mtr_t* mtr); /*!< in: mtr */ + +/**************************************************************//** +push a nonleaf index node to the search path */ +UNIV_INLINE +void +rtr_non_leaf_stack_push( +/*====================*/ + rtr_node_path_t* path, /*!< in/out: search path */ + ulint pageno, /*!< in: pageno to insert */ + node_seq_t seq_no, /*!< in: Node sequence num */ + ulint level, /*!< in: index level */ + ulint child_no, /*!< in: child page no */ + btr_pcur_t* cursor, /*!< in: position cursor */ + double mbr_inc); /*!< in: MBR needs to be + enlarged */ + +/**************************************************************//** +push a nonleaf index node to the search path for insertion */ +void +rtr_non_leaf_insert_stack_push( +/*===========================*/ + dict_index_t* index, /*!< in: index descriptor */ + rtr_node_path_t* path, /*!< in/out: search path */ + ulint level, /*!< in: index level */ + const buf_block_t* block, /*!< in: block of the page */ + const rec_t* rec, /*!< in: positioned record */ + double mbr_inc); /*!< in: MBR needs to be + enlarged */ + +/*****************************************************************//** +Allocates a new Split Sequence Number. +@return new SSN id */ +UNIV_INLINE +node_seq_t +rtr_get_new_ssn_id( +/*===============*/ + dict_index_t* index); /*!< in: the index struct */ + +/*****************************************************************//** +Get the current Split Sequence Number. +@return current SSN id */ +UNIV_INLINE +node_seq_t +rtr_get_current_ssn_id( +/*===================*/ + dict_index_t* index); /*!< in/out: the index struct */ + +/********************************************************************//** +Create a RTree search info structure */ +rtr_info_t* +rtr_create_rtr_info( +/******************/ + bool need_prdt, /*!< in: Whether predicate lock is + needed */ + bool init_matches, /*!< in: Whether to initiate the + "matches" structure for collecting + matched leaf records */ + btr_cur_t* cursor, /*!< in: tree search cursor */ + dict_index_t* index); /*!< in: index struct */ + +/********************************************************************//** +Update a btr_cur_t with rtr_info */ +void +rtr_info_update_btr( +/******************/ + btr_cur_t* cursor, /*!< in/out: tree cursor */ + rtr_info_t* rtr_info); /*!< in: rtr_info to set to the + cursor */ + +/********************************************************************//** +Update a btr_cur_t with rtr_info */ +void +rtr_init_rtr_info( +/****************/ + rtr_info_t* rtr_info, /*!< in: rtr_info to set to the + cursor */ + bool need_prdt, /*!< in: Whether predicate lock is + needed */ + btr_cur_t* cursor, /*!< in: tree search cursor */ + dict_index_t* index, /*!< in: index structure */ + bool reinit); /*!< in: Whether this is a reinit */ + +/**************************************************************//** +Clean up Rtree cursor */ +void +rtr_clean_rtr_info( +/*===============*/ + rtr_info_t* rtr_info, /*!< in: RTree search info */ + bool free_all); /*!< in: need to free rtr_info itself */ + +/****************************************************************//** +Get the bounding box content from an index record*/ +void +rtr_get_mbr_from_rec( +/*=================*/ + const rec_t* rec, /*!< in: data tuple */ + const ulint* offsets,/*!< in: offsets array */ + rtr_mbr_t* mbr); /*!< out MBR */ + +/****************************************************************//** +Get the bounding box content from a MBR data record */ +void +rtr_get_mbr_from_tuple( +/*===================*/ + const dtuple_t* dtuple, /*!< in: data tuple */ + rtr_mbr* mbr); /*!< out: mbr to fill */ + +#define rtr_page_get_father_node_ptr(of,heap,sea,cur,mtr) \ + rtr_page_get_father_node_ptr_func(of,heap,sea,cur,__FILE__,__LINE__,mtr) + +/************************************************************//** +Returns the upper level node pointer to a R-Tree page. It is assumed +that mtr holds an x-latch on the tree. +@return rec_get_offsets() of the node pointer record */ +ulint* +rtr_page_get_father_node_ptr_func( +/*==============================*/ + ulint* offsets,/*!< in: work area for the return value */ + mem_heap_t* heap, /*!< in: memory heap to use */ + btr_cur_t* sea_cur,/*!< in: search cursor */ + btr_cur_t* cursor, /*!< in: cursor pointing to user record, + out: cursor on node pointer record, + its page x-latched */ + const char* file, /*!< in: file name */ + ulint line, /*!< in: line where called */ + mtr_t* mtr); /*!< in: mtr */ + + +/************************************************************//** +Returns the father block to a page. It is assumed that mtr holds +an X or SX latch on the tree. +@return rec_get_offsets() of the node pointer record */ +ulint* +rtr_page_get_father_block( +/*======================*/ + ulint* offsets,/*!< in: work area for the return value */ + mem_heap_t* heap, /*!< in: memory heap to use */ + dict_index_t* index, /*!< in: b-tree index */ + buf_block_t* block, /*!< in: child page in the index */ + mtr_t* mtr, /*!< in: mtr */ + btr_cur_t* sea_cur,/*!< in: search cursor, contains information + about parent nodes in search */ + btr_cur_t* cursor);/*!< out: cursor on node pointer record, + its page x-latched */ +/**************************************************************//** +Store the parent path cursor +@return number of cursor stored */ +ulint +rtr_store_parent_path( +/*==================*/ + const buf_block_t* block, /*!< in: block of the page */ + btr_cur_t* btr_cur,/*!< in/out: persistent cursor */ + ulint latch_mode, + /*!< in: latch_mode */ + ulint level, /*!< in: index level */ + mtr_t* mtr); /*!< in: mtr */ + +/**************************************************************//** +Initializes and opens a persistent cursor to an index tree. It should be +closed with btr_pcur_close. */ +void +rtr_pcur_open_low( +/*==============*/ + dict_index_t* index, /*!< in: index */ + ulint level, /*!< in: level in the btree */ + const dtuple_t* tuple, /*!< in: tuple on which search done */ + page_cur_mode_t mode, /*!< in: PAGE_CUR_L, ...; + NOTE that if the search is made using a unique + prefix of a record, mode should be + PAGE_CUR_LE, not PAGE_CUR_GE, as the latter + may end up on the previous page from the + record! */ + ulint latch_mode,/*!< in: BTR_SEARCH_LEAF, ... */ + btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ + const char* file, /*!< in: file name */ + ulint line, /*!< in: line where called */ + mtr_t* mtr); /*!< in: mtr */ + +#define rtr_pcur_open(i,t,md,l,c,m) \ + rtr_pcur_open_low(i,0,t,md,l,c,__FILE__,__LINE__,m) + +struct btr_cur_t; + +/*********************************************************//** +Returns the R-Tree node stored in the parent search path +@return pointer to R-Tree cursor component */ +UNIV_INLINE +node_visit_t* +rtr_get_parent_node( +/*================*/ + btr_cur_t* btr_cur, /*!< in: persistent cursor */ + ulint level, /*!< in: index level of buffer page */ + ulint is_insert); /*!< in: whether it is insert */ + +/*********************************************************//** +Returns the R-Tree cursor stored in the parent search path +@return pointer to R-Tree cursor component */ +UNIV_INLINE +btr_pcur_t* +rtr_get_parent_cursor( +/*==================*/ + btr_cur_t* btr_cur, /*!< in: persistent cursor */ + ulint level, /*!< in: index level of buffer page */ + ulint is_insert); /*!< in: whether insert operation */ + +/*************************************************************//** +Copy recs from a page to new_block of rtree. */ +void +rtr_page_copy_rec_list_end_no_locks( +/*================================*/ + buf_block_t* new_block, /*!< in: index page to copy to */ + buf_block_t* block, /*!< in: index page of rec */ + rec_t* rec, /*!< in: record on page */ + dict_index_t* index, /*!< in: record descriptor */ + mem_heap_t* heap, /*!< in/out: heap memory */ + rtr_rec_move_t* rec_move, /*!< in: recording records moved */ + ulint max_move, /*!< in: num of rec to move */ + ulint* num_moved, /*!< out: num of rec to move */ + mtr_t* mtr); /*!< in: mtr */ + +/*************************************************************//** +Copy recs till a specified rec from a page to new_block of rtree. */ +void +rtr_page_copy_rec_list_start_no_locks( +/*==================================*/ + buf_block_t* new_block, /*!< in: index page to copy to */ + buf_block_t* block, /*!< in: index page of rec */ + rec_t* rec, /*!< in: record on page */ + dict_index_t* index, /*!< in: record descriptor */ + mem_heap_t* heap, /*!< in/out: heap memory */ + rtr_rec_move_t* rec_move, /*!< in: recording records moved */ + ulint max_move, /*!< in: num of rec to move */ + ulint* num_moved, /*!< out: num of rec to move */ + mtr_t* mtr); /*!< in: mtr */ + +/****************************************************************//** +Merge 2 mbrs and update the the mbr that cursor is on. */ +dberr_t +rtr_merge_and_update_mbr( +/*=====================*/ + btr_cur_t* cursor, /*!< in/out: cursor */ + btr_cur_t* cursor2, /*!< in: the other cursor */ + ulint* offsets, /*!< in: rec offsets */ + ulint* offsets2, /*!< in: rec offsets */ + page_t* child_page, /*!< in: the child page. */ + buf_block_t* merge_block, /*!< in: page to merge */ + buf_block_t* block, /*!< in: page be merged */ + dict_index_t* index, /*!< in: index */ + mtr_t* mtr); /*!< in: mtr */ + +/*************************************************************//** +Deletes on the upper level the node pointer to a page. */ +void +rtr_node_ptr_delete( +/*================*/ + dict_index_t* index, /*!< in: index tree */ + btr_cur_t* sea_cur,/*!< in: search cursor, contains information + about parent nodes in search */ + buf_block_t* block, /*!< in: page whose node pointer is deleted */ + mtr_t* mtr); /*!< in: mtr */ + +/****************************************************************//** +Check two MBRs are identical or need to be merged */ +bool +rtr_merge_mbr_changed( +/*==================*/ + btr_cur_t* cursor, /*!< in: cursor */ + btr_cur_t* cursor2, /*!< in: the other cursor */ + ulint* offsets, /*!< in: rec offsets */ + ulint* offsets2, /*!< in: rec offsets */ + rtr_mbr_t* new_mbr, /*!< out: MBR to update */ + buf_block_t* merge_block, /*!< in: page to merge */ + buf_block_t* block, /*!< in: page be merged */ + dict_index_t* index); /*!< in: index */ + + +/**************************************************************//** +Update the mbr field of a spatial index row. +@return true if successful */ +bool +rtr_update_mbr_field( +/*=================*/ + btr_cur_t* cursor, /*!< in: cursor pointed to rec.*/ + ulint* offsets, /*!< in: offsets on rec. */ + btr_cur_t* cursor2, /*!< in/out: cursor pointed to rec + that should be deleted. + this cursor is for btr_compress to + delete the merged page's father rec.*/ + page_t* child_page, /*!< in: child page. */ + rtr_mbr_t* new_mbr, /*!< in: the new mbr. */ + rec_t* new_rec, /*!< in: rec to use */ + mtr_t* mtr); /*!< in: mtr */ + +/**************************************************************//** +Check whether a Rtree page is child of a parent page +@return true if there is child/parent relationship */ +bool +rtr_check_same_block( +/*=================*/ + dict_index_t* index, /*!< in: index tree */ + btr_cur_t* cur, /*!< in/out: position at the parent entry + pointing to the child if successful */ + buf_block_t* parentb,/*!< in: parent page to check */ + buf_block_t* childb, /*!< in: child Page */ + mem_heap_t* heap); /*!< in: memory heap */ + +/*********************************************************************//** +Sets pointer to the data and length in a field. */ +UNIV_INLINE +void +rtr_write_mbr( +/*==========*/ + byte* data, /*!< out: data */ + const rtr_mbr_t* mbr); /*!< in: data */ + +/*********************************************************************//** +Sets pointer to the data and length in a field. */ +UNIV_INLINE +void +rtr_read_mbr( +/*==========*/ + const byte* data, /*!< in: data */ + rtr_mbr_t* mbr); /*!< out: data */ + +/**************************************************************//** +Check whether a discarding page is in anyone's search path */ +void +rtr_check_discard_page( +/*===================*/ + dict_index_t* index, /*!< in: index */ + btr_cur_t* cursor, /*!< in: cursor on the page to discard: not on + the root page */ + buf_block_t* block); /*!< in: block of page to be discarded */ + +/********************************************************************//** +Reinitialize a RTree search info */ +UNIV_INLINE +void +rtr_info_reinit_in_cursor( +/************************/ + btr_cur_t* cursor, /*!< in/out: tree cursor */ + dict_index_t* index, /*!< in: index struct */ + bool need_prdt); /*!< in: Whether predicate lock is + needed */ + +/** Estimates the number of rows in a given area. +@param[in] index index +@param[in] tuple range tuple containing mbr, may also be empty tuple +@param[in] mode search mode +@return estimated number of rows */ +int64_t +rtr_estimate_n_rows_in_range( + dict_index_t* index, + const dtuple_t* tuple, + page_cur_mode_t mode); + +#ifndef UNIV_NONINL +#include "gis0rtree.ic" +#endif +#endif /*!< gis0rtree.h */ diff --git a/storage/innobase/include/gis0rtree.ic b/storage/innobase/include/gis0rtree.ic new file mode 100644 index 00000000000..a30db122273 --- /dev/null +++ b/storage/innobase/include/gis0rtree.ic @@ -0,0 +1,274 @@ +/***************************************************************************** + +Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/******************************************************************//** +@file include gis0rtree.h +R-tree Inline code + +Created 2013/03/27 Jimmy Yang and Allen Lai +***********************************************************************/ + +/**************************************************************//** +Sets the child node mbr in a node pointer. */ +UNIV_INLINE +void +rtr_page_cal_mbr( +/*=============*/ + const dict_index_t* index, /*!< in: index */ + const buf_block_t* block, /*!< in: buffer block */ + rtr_mbr_t* rtr_mbr,/*!< out: MBR encapsulates the page */ + mem_heap_t* heap) /*!< in: heap for the memory + allocation */ +{ + page_t* page; + rec_t* rec; + byte* field; + ulint len; + ulint* offsets = NULL; + double bmin, bmax; + double* amin; + double* amax; + ulint inc = 0; + double* mbr; + + rtr_mbr->xmin = DBL_MAX; + rtr_mbr->ymin = DBL_MAX; + rtr_mbr->xmax = -DBL_MAX; + rtr_mbr->ymax = -DBL_MAX; + + mbr = reinterpret_cast(rtr_mbr); + + page = buf_block_get_frame(block); + + rec = page_rec_get_next(page_get_infimum_rec(page)); + offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap); + + do { + /* The mbr address is in the first field. */ + field = rec_get_nth_field(rec, offsets, 0, &len); + + ut_ad(len == DATA_MBR_LEN); + inc = 0; + for (int i = 0; i < SPDIMS; i++) { + bmin = mach_double_read(field + inc); + bmax = mach_double_read(field + inc + sizeof(double)); + + amin = mbr + i * SPDIMS; + amax = mbr + i * SPDIMS + 1; + + if (*amin > bmin) + *amin = bmin; + if (*amax < bmax) + *amax = bmax; + + inc += 2 * sizeof(double); + } + + rec = page_rec_get_next(rec); + + if (rec == NULL) { + break; + } + } while (!page_rec_is_supremum(rec)); +} + +/**************************************************************//** +push a nonleaf index node to the search path */ +UNIV_INLINE +void +rtr_non_leaf_stack_push( +/*====================*/ + rtr_node_path_t* path, /*!< in/out: search path */ + ulint pageno, /*!< in: pageno to insert */ + node_seq_t seq_no, /*!< in: Node sequence num */ + ulint level, /*!< in: index page level */ + ulint child_no, /*!< in: child page no */ + btr_pcur_t* cursor, /*!< in: position cursor */ + double mbr_inc) /*!< in: MBR needs to be + enlarged */ +{ + node_visit_t insert_val; + + insert_val.page_no = pageno; + insert_val.seq_no = seq_no; + insert_val.level = level; + insert_val.child_no = child_no; + insert_val.cursor = cursor; + insert_val.mbr_inc = mbr_inc; + + path->push_back(insert_val); + +#ifdef RTR_SEARCH_DIAGNOSTIC + fprintf(stderr, "INNODB_RTR: Push page %d, level %d, seq %d" + " to search stack \n", + static_cast(pageno), static_cast(level), + static_cast(seq_no)); +#endif /* RTR_SEARCH_DIAGNOSTIC */ +} + +/*****************************************************************//** +Allocates a new Split Sequence Number. +@return new SSN id */ +UNIV_INLINE +node_seq_t +rtr_get_new_ssn_id( +/*===============*/ + dict_index_t* index) /*!< in/out: the index struct */ +{ + node_seq_t ssn; + + mutex_enter(&(index->rtr_ssn.mutex)); + ssn = ++index->rtr_ssn.seq_no; + mutex_exit(&(index->rtr_ssn.mutex)); + + return(ssn); +} +/*****************************************************************//** +Get the current Split Sequence Number. +@return current SSN id */ +UNIV_INLINE +node_seq_t +rtr_get_current_ssn_id( +/*===================*/ + dict_index_t* index) /*!< in: index struct */ +{ + node_seq_t ssn; + + mutex_enter(&(index->rtr_ssn.mutex)); + ssn = index->rtr_ssn.seq_no; + mutex_exit(&(index->rtr_ssn.mutex)); + + return(ssn); +} + +/*********************************************************************//** +Sets pointer to the data and length in a field. */ +UNIV_INLINE +void +rtr_write_mbr( +/*==========*/ + byte* data, /*!< out: data */ + const rtr_mbr_t* mbr) /*!< in: data */ +{ + const double* my_mbr = reinterpret_cast(mbr); + + for (int i = 0; i < SPDIMS * 2; i++) { + mach_double_write(data + i * sizeof(double), my_mbr[i]); + } +} + +/*********************************************************************//** +Sets pointer to the data and length in a field. */ +UNIV_INLINE +void +rtr_read_mbr( +/*==========*/ + const byte* data, /*!< in: data */ + rtr_mbr_t* mbr) /*!< out: MBR */ +{ + for (int i = 0; i < SPDIMS * 2; i++) { + (reinterpret_cast(mbr))[i] = mach_double_read( + data + + i * sizeof(double)); + } +} + +/*********************************************************//** +Returns the R-Tree node stored in the parent search path +@return pointer to R-Tree cursor component in the parent path, +NULL if parent path is empty or index is larger than num of items contained */ +UNIV_INLINE +node_visit_t* +rtr_get_parent_node( +/*================*/ + btr_cur_t* btr_cur, /*!< in: persistent cursor */ + ulint level, /*!< in: index level of buffer page */ + ulint is_insert) /*!< in: whether it is insert */ +{ + ulint num; + ulint tree_height = btr_cur->tree_height; + node_visit_t* found_node = NULL; + + if (level >= tree_height) { + return(NULL); + } + + mutex_enter(&btr_cur->rtr_info->rtr_path_mutex); + + num = btr_cur->rtr_info->parent_path->size(); + + if (!num) { + mutex_exit(&btr_cur->rtr_info->rtr_path_mutex); + return(NULL); + } + + if (is_insert) { + ulint idx = tree_height - level - 1; + ut_ad(idx < num); + + found_node = &(*btr_cur->rtr_info->parent_path)[idx]; + } else { + node_visit_t* node; + + while (num > 0) { + node = &(*btr_cur->rtr_info->parent_path)[num - 1]; + + if (node->level == level) { + found_node = node; + break; + } + num--; + } + } + + mutex_exit(&btr_cur->rtr_info->rtr_path_mutex); + + return(found_node); +} + +/*********************************************************//** +Returns the R-Tree cursor stored in the parent search path +@return pointer to R-Tree cursor component */ +UNIV_INLINE +btr_pcur_t* +rtr_get_parent_cursor( +/*==================*/ + btr_cur_t* btr_cur, /*!< in: persistent cursor */ + ulint level, /*!< in: index level of buffer page */ + ulint is_insert) /*!< in: whether insert operation */ +{ + node_visit_t* found_node = rtr_get_parent_node( + btr_cur, level, is_insert); + + return((found_node) ? found_node->cursor : NULL); +} + +/********************************************************************//** +Reinitialize a R-Tree search info in btr_cur_t */ +UNIV_INLINE +void +rtr_info_reinit_in_cursor( +/************************/ + btr_cur_t* cursor, /*!< in/out: tree cursor */ + dict_index_t* index, /*!< in: index struct */ + bool need_prdt) /*!< in: Whether predicate lock is + needed */ +{ + rtr_clean_rtr_info(cursor->rtr_info, false); + rtr_init_rtr_info(cursor->rtr_info, need_prdt, cursor, index, true); +} diff --git a/storage/innobase/include/gis0type.h b/storage/innobase/include/gis0type.h new file mode 100644 index 00000000000..14c098f9608 --- /dev/null +++ b/storage/innobase/include/gis0type.h @@ -0,0 +1,168 @@ +/***************************************************************************** + +Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/******************************************************************//** +@file include gis0type.h +R-tree header file + +Created 2013/03/27 Jimmy Yang +***********************************************************************/ + +#ifndef gis0type_h +#define gis0type_h + +#include "univ.i" + +#include "buf0buf.h" +#include "data0type.h" +#include "data0types.h" +#include "dict0types.h" +#include "hash0hash.h" +#include "mem0mem.h" +#include "rem0types.h" +#include "row0types.h" +#include "trx0types.h" +#include "ut0vec.h" +#include "ut0wqueue.h" +#include "que0types.h" +#include "gis0geo.h" +#include "ut0new.h" + +#include +#include + +/* Node Sequence Number. Only updated when page splits */ +typedef ib_uint32_t node_seq_t; + +/* RTree internal non-leaf Nodes to be searched, from root to leaf */ +typedef struct node_visit { + ulint page_no; /*!< the page number */ + node_seq_t seq_no; /*!< the SSN (split sequence number */ + ulint level; /*!< the page's index level */ + ulint child_no; /*!< child page num if for parent + recording */ + btr_pcur_t* cursor; /*!< cursor structure if we positioned + FIXME: there is no need to use whole + btr_pcur_t, just the position related + members */ + double mbr_inc; /*!< whether this node needs to be + enlarged for insertion */ +} node_visit_t; + +typedef std::vector > rtr_node_path_t; + +typedef struct rtr_rec { + rec_t* r_rec; /*!< matched record */ + bool locked; /*!< whether the record locked */ +} rtr_rec_t; + +typedef std::vector > rtr_rec_vector; + +/* Structure for matched records on the leaf page */ +typedef struct matched_rec { + byte* bufp; /*!< aligned buffer point */ + byte rec_buf[UNIV_PAGE_SIZE_MAX * 2]; + /*!< buffer used to copy matching rec */ + buf_block_t block; /*!< the shadow buffer block */ + ulint used; /*!< memory used */ + rtr_rec_vector* matched_recs; /*!< vector holding the matching rec */ + ib_mutex_t rtr_match_mutex;/*!< mutex protect the match_recs + vector */ + bool valid; /*!< whether result in matched_recs + or this search is valid (page not + dropped) */ + bool locked; /*!< whether these recs locked */ +} matched_rec_t; + +/* In memory representation of a minimum bounding rectangle */ +typedef struct rtr_mbr { + double xmin; /*!< minimum on x */ + double xmax; /*!< maximum on x */ + double ymin; /*!< minimum on y */ + double ymax; /*!< maximum on y */ +} rtr_mbr_t; + +/* Maximum index level for R-Tree, this is consistent with BTR_MAX_LEVELS */ +#define RTR_MAX_LEVELS 100 + +/* Number of pages we latch at leaf level when there is possible Tree +modification (split, shrink), we always latch left, current +and right pages */ +#define RTR_LEAF_LATCH_NUM 3 + +/** Vectors holding the matching internal pages/nodes and leaf records */ +typedef struct rtr_info{ + rtr_node_path_t*path; /*!< vector holding matching pages */ + rtr_node_path_t*parent_path; + /*!< vector holding parent pages during + search */ + matched_rec_t* matches;/*!< struct holding matching leaf records */ + ib_mutex_t rtr_path_mutex; + /*!< mutex protect the "path" vector */ + buf_block_t* tree_blocks[RTR_MAX_LEVELS + RTR_LEAF_LATCH_NUM]; + /*!< tracking pages that would be locked + at leaf level, for future free */ + ulint tree_savepoints[RTR_MAX_LEVELS + RTR_LEAF_LATCH_NUM]; + /*!< savepoint used to release latches/blocks + on each level and leaf level */ + rtr_mbr_t mbr; /*!< the search MBR */ + que_thr_t* thr; /*!< the search thread */ + mem_heap_t* heap; /*!< memory heap */ + btr_cur_t* cursor; /*!< cursor used for search */ + dict_index_t* index; /*!< index it is searching */ + bool need_prdt_lock; + /*!< whether we will need predicate lock + the tree */ + bool need_page_lock; + /*!< whether we will need predicate page lock + the tree */ + bool allocated;/*!< whether this structure is allocate or + on stack */ + bool mbr_adj;/*!< whether mbr will need to be enlarged + for an insertion operation */ + bool fd_del; /*!< found deleted row */ + const dtuple_t* search_tuple; + /*!< search tuple being used */ + page_cur_mode_t search_mode; + /*!< current search mode */ +} rtr_info_t; + +typedef std::list > rtr_info_active; + +/* Tracking structure for all onoging search for an index */ +typedef struct rtr_info_track { + rtr_info_active* rtr_active; /*!< Active search info */ + ib_mutex_t rtr_active_mutex; + /*!< mutex to protect + rtr_active */ +} rtr_info_track_t; + +/* Node Sequence Number and mutex protects it. */ +typedef struct rtree_ssn { + ib_mutex_t mutex; /*!< mutex protect the seq num */ + node_seq_t seq_no; /*!< the SSN (node sequence number) */ +} rtr_ssn_t; + +/* This is to record the record movement between pages. Used for corresponding +lock movement */ +typedef struct rtr_rec_move { + rec_t* old_rec; /*!< record being moved in old page */ + rec_t* new_rec; /*!< new record location */ + bool moved; /*!< whether lock are moved too */ +} rtr_rec_move_t; +#endif /*!< gis0rtree.h */ diff --git a/storage/innobase/include/ha0ha.h b/storage/innobase/include/ha0ha.h index 07ab20ab995..11c12c4ebc3 100644 --- a/storage/innobase/include/ha0ha.h +++ b/storage/innobase/include/ha0ha.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -47,7 +47,6 @@ ha_search_and_get_data( Looks for an element when we know the pointer to the data and updates the pointer to data if found. @return TRUE if found */ -UNIV_INTERN ibool ha_search_and_update_if_found_func( /*===============================*/ @@ -62,66 +61,55 @@ ha_search_and_update_if_found_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /** Looks for an element when we know the pointer to the data and updates the pointer to data if found. -@param table in/out: hash table -@param fold in: folded value of the searched data -@param data in: pointer to the data -@param new_block in: block containing new_data -@param new_data in: new pointer to the data */ +@param table in/out: hash table +@param fold in: folded value of the searched data +@param data in: pointer to the data +@param new_block in: block containing new_data +@param new_data in: new pointer to the data */ # define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \ ha_search_and_update_if_found_func(table,fold,data,new_block,new_data) #else /* UNIV_AHI_DEBUG || UNIV_DEBUG */ /** Looks for an element when we know the pointer to the data and updates the pointer to data if found. -@param table in/out: hash table -@param fold in: folded value of the searched data -@param data in: pointer to the data -@param new_block ignored: block containing new_data -@param new_data in: new pointer to the data */ +@param table in/out: hash table +@param fold in: folded value of the searched data +@param data in: pointer to the data +@param new_block ignored: block containing new_data +@param new_data in: new pointer to the data */ # define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \ ha_search_and_update_if_found_func(table,fold,data,new_data) #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ + /*************************************************************//** Creates a hash table with at least n array cells. The actual number of cells is chosen to be a prime number slightly bigger than n. -@return own: created table */ -UNIV_INTERN +@return own: created table */ hash_table_t* -ha_create_func( -/*===========*/ - ulint n, /*!< in: number of array cells */ -#ifdef UNIV_SYNC_DEBUG - ulint mutex_level, /*!< in: level of the mutexes in the latching - order: this is used in the debug version */ -#endif /* UNIV_SYNC_DEBUG */ - ulint n_mutexes, /*!< in: number of mutexes to protect the +ib_create( +/*======*/ + ulint n, /*!< in: number of array cells */ + latch_id_t id, /*!< in: latch ID */ + ulint n_mutexes,/*!< in: number of mutexes to protect the hash table: must be a power of 2, or 0 */ - ulint type); /*!< in: type of datastructure for which + ulint type); /*!< in: type of datastructure for which the memory heap is going to be used e.g.: MEM_HEAP_FOR_BTR_SEARCH or MEM_HEAP_FOR_PAGE_HASH */ -#ifdef UNIV_SYNC_DEBUG -/** Creates a hash table. -@return own: created table -@param n_c in: number of array cells. The actual number of cells is -chosen to be a slightly bigger prime number. -@param level in: level of the mutexes in the latching order -@param n_m in: number of mutexes to protect the hash table; - must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type) -#else /* UNIV_SYNC_DEBUG */ -/** Creates a hash table. -@return own: created table -@param n_c in: number of array cells. The actual number of cells is -chosen to be a slightly bigger prime number. -@param level in: level of the mutexes in the latching order -@param n_m in: number of mutexes to protect the hash table; - must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type) -#endif /* UNIV_SYNC_DEBUG */ + +/** Recreate a hash table with at least n array cells. The actual number +of cells is chosen to be a prime number slightly bigger than n. +The new cells are all cleared. The heaps are recreated. +The sync objects are reused. +@param[in,out] table hash table to be resuzed (to be freed later) +@param[in] n number of array cells +@return resized new table */ +hash_table_t* +ib_recreate( + hash_table_t* table, + ulint n); /*************************************************************//** Empties a hash table and frees the memory heaps. */ -UNIV_INTERN void ha_clear( /*=====*/ @@ -131,8 +119,7 @@ ha_clear( Inserts an entry into a hash table. If an entry with the same fold number is found, its node is updated to point to the new data, and no new node is inserted. -@return TRUE if succeed, FALSE if no more memory could be allocated */ -UNIV_INTERN +@return TRUE if succeed, FALSE if no more memory could be allocated */ ibool ha_insert_for_fold_func( /*====================*/ @@ -151,11 +138,11 @@ ha_insert_for_fold_func( Inserts an entry into a hash table. If an entry with the same fold number is found, its node is updated to point to the new data, and no new node is inserted. -@return TRUE if succeed, FALSE if no more memory could be allocated -@param t in: hash table -@param f in: folded value of data -@param b in: buffer block containing the data -@param d in: data, must not be NULL */ +@return TRUE if succeed, FALSE if no more memory could be allocated +@param t in: hash table +@param f in: folded value of data +@param b in: buffer block containing the data +@param d in: data, must not be NULL */ # define ha_insert_for_fold(t,f,b,d) do { \ ha_insert_for_fold_func(t,f,b,d); \ MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); \ @@ -165,11 +152,11 @@ is inserted. Inserts an entry into a hash table. If an entry with the same fold number is found, its node is updated to point to the new data, and no new node is inserted. -@return TRUE if succeed, FALSE if no more memory could be allocated -@param t in: hash table -@param f in: folded value of data -@param b ignored: buffer block containing the data -@param d in: data, must not be NULL */ +@return TRUE if succeed, FALSE if no more memory could be allocated +@param t in: hash table +@param f in: folded value of data +@param b ignored: buffer block containing the data +@param d in: data, must not be NULL */ # define ha_insert_for_fold(t,f,b,d) do { \ ha_insert_for_fold_func(t,f,d); \ MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); \ @@ -179,7 +166,7 @@ is inserted. /*********************************************************//** Looks for an element when we know the pointer to the data and deletes it from the hash table if found. -@return TRUE if found */ +@return TRUE if found */ UNIV_INLINE ibool ha_search_and_delete_if_found( @@ -191,7 +178,6 @@ ha_search_and_delete_if_found( /*****************************************************************//** Removes from the chain determined by fold all nodes whose data pointer points to the page given. */ -UNIV_INTERN void ha_remove_all_nodes_to_page( /*========================*/ @@ -201,8 +187,7 @@ ha_remove_all_nodes_to_page( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /*************************************************************//** Validates a given range of the cells in hash table. -@return TRUE if ok */ -UNIV_INTERN +@return TRUE if ok */ ibool ha_validate( /*========*/ @@ -212,7 +197,6 @@ ha_validate( #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */ /*************************************************************//** Prints info of a hash table. */ -UNIV_INTERN void ha_print_info( /*==========*/ @@ -222,12 +206,12 @@ ha_print_info( /** The hash table external chain node */ struct ha_node_t { + ulint fold; /*!< fold value for the data */ ha_node_t* next; /*!< next chain node or NULL if none */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* block; /*!< buffer block containing the data, or NULL */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ const rec_t* data; /*!< pointer to the data */ - ulint fold; /*!< fold value for the data */ }; #ifdef UNIV_DEBUG diff --git a/storage/innobase/include/ha0ha.ic b/storage/innobase/include/ha0ha.ic index c478ff54303..854ff9bc046 100644 --- a/storage/innobase/include/ha0ha.ic +++ b/storage/innobase/include/ha0ha.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,7 +29,6 @@ Created 8/18/1994 Heikki Tuuri /***********************************************************//** Deletes a hash node. */ -UNIV_INTERN void ha_delete_hash_node( /*================*/ @@ -38,7 +37,7 @@ ha_delete_hash_node( /******************************************************************//** Gets a hash node data. -@return pointer to the data */ +@return pointer to the data */ UNIV_INLINE const rec_t* ha_node_get_data( @@ -68,33 +67,33 @@ ha_node_set_data_func( #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /** Sets hash node data. -@param n in: hash chain node -@param b in: buffer block containing the data -@param d in: pointer to the data */ +@param n in: hash chain node +@param b in: buffer block containing the data +@param d in: pointer to the data */ # define ha_node_set_data(n,b,d) ha_node_set_data_func(n,b,d) #else /* UNIV_AHI_DEBUG || UNIV_DEBUG */ /** Sets hash node data. -@param n in: hash chain node -@param b in: buffer block containing the data -@param d in: pointer to the data */ +@param n in: hash chain node +@param b in: buffer block containing the data +@param d in: pointer to the data */ # define ha_node_set_data(n,b,d) ha_node_set_data_func(n,d) #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ /******************************************************************//** Gets the next node in a hash chain. -@return next node, NULL if none */ +@return next node, NULL if none */ UNIV_INLINE ha_node_t* ha_chain_get_next( /*==============*/ - ha_node_t* node) /*!< in: hash chain node */ + const ha_node_t* node) /*!< in: hash chain node */ { return(node->next); } /******************************************************************//** Gets the first node in a hash chain. -@return first node, NULL if none */ +@return first node, NULL if none */ UNIV_INLINE ha_node_t* ha_chain_get_first( @@ -122,9 +121,9 @@ hash_assert_can_modify( if (table->type == HASH_TABLE_SYNC_MUTEX) { ut_ad(mutex_own(hash_get_mutex(table, fold))); } else if (table->type == HASH_TABLE_SYNC_RW_LOCK) { -# ifdef UNIV_SYNC_DEBUG +# ifdef UNIV_DEBUG rw_lock_t* lock = hash_get_lock(table, fold); - ut_ad(rw_lock_own(lock, RW_LOCK_EX)); + ut_ad(rw_lock_own(lock, RW_LOCK_X)); # endif } else { ut_ad(table->type == HASH_TABLE_SYNC_NONE); @@ -145,10 +144,10 @@ hash_assert_can_search( if (table->type == HASH_TABLE_SYNC_MUTEX) { ut_ad(mutex_own(hash_get_mutex(table, fold))); } else if (table->type == HASH_TABLE_SYNC_RW_LOCK) { -# ifdef UNIV_SYNC_DEBUG +# ifdef UNIV_DEBUG rw_lock_t* lock = hash_get_lock(table, fold); - ut_ad(rw_lock_own(lock, RW_LOCK_EX) - || rw_lock_own(lock, RW_LOCK_SHARED)); + ut_ad(rw_lock_own(lock, RW_LOCK_X) + || rw_lock_own(lock, RW_LOCK_S)); # endif } else { ut_ad(table->type == HASH_TABLE_SYNC_NONE); @@ -167,20 +166,17 @@ ha_search_and_get_data( hash_table_t* table, /*!< in: hash table */ ulint fold) /*!< in: folded value of the searched data */ { - ha_node_t* node; - hash_assert_can_search(table, fold); ut_ad(btr_search_enabled); - node = ha_chain_get_first(table, fold); + for (const ha_node_t* node = ha_chain_get_first(table, fold); + node != NULL; + node = ha_chain_get_next(node)) { - while (node) { if (node->fold == fold) { return(node->data); } - - node = ha_chain_get_next(node); } return(NULL); @@ -188,7 +184,7 @@ ha_search_and_get_data( /*********************************************************//** Looks for an element when we know the pointer to the data. -@return pointer to the hash table node, NULL if not found in the table */ +@return pointer to the hash table node, NULL if not found in the table */ UNIV_INLINE ha_node_t* ha_search_with_data( @@ -220,7 +216,7 @@ ha_search_with_data( /*********************************************************//** Looks for an element when we know the pointer to the data, and deletes it from the hash table, if found. -@return TRUE if found */ +@return TRUE if found */ UNIV_INLINE ibool ha_search_and_delete_if_found( diff --git a/storage/innobase/include/ha0storage.h b/storage/innobase/include/ha0storage.h index 0073930b502..7dd6d26bad0 100644 --- a/storage/innobase/include/ha0storage.h +++ b/storage/innobase/include/ha0storage.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2014, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -44,7 +44,7 @@ struct ha_storage_t; /*******************************************************************//** Creates a hash storage. If any of the parameters is 0, then a default value is used. -@return own: hash storage */ +@return own: hash storage */ UNIV_INLINE ha_storage_t* ha_storage_create( @@ -62,8 +62,7 @@ data_len bytes need to be allocated) and the size of storage is going to become more than "memlim" then "data" is not added and NULL is returned. To disable this behavior "memlim" can be set to 0, which stands for "no limit". -@return pointer to the copy */ -UNIV_INTERN +@return pointer to the copy */ const void* ha_storage_put_memlim( /*==================*/ @@ -74,10 +73,10 @@ ha_storage_put_memlim( /*******************************************************************//** Same as ha_storage_put_memlim() but without memory limit. -@param storage in/out: hash storage -@param data in: data to store -@param data_len in: data length -@return pointer to the copy of the string */ +@param storage in/out: hash storage +@param data in: data to store +@param data_len in: data length +@return pointer to the copy of the string */ #define ha_storage_put(storage, data, data_len) \ ha_storage_put_memlim((storage), (data), (data_len), 0) @@ -85,9 +84,9 @@ Same as ha_storage_put_memlim() but without memory limit. Copies string into the storage and returns a pointer to the copy. If the same string is already present, then pointer to it is returned. Strings are considered to be equal if strcmp(str1, str2) == 0. -@param storage in/out: hash storage -@param str in: string to put -@return pointer to the copy of the string */ +@param storage in/out: hash storage +@param str in: string to put +@return pointer to the copy of the string */ #define ha_storage_put_str(storage, str) \ ((const char*) ha_storage_put((storage), (str), strlen(str) + 1)) @@ -96,10 +95,10 @@ Copies string into the storage and returns a pointer to the copy obeying a memory limit. If the same string is already present, then pointer to it is returned. Strings are considered to be equal if strcmp(str1, str2) == 0. -@param storage in/out: hash storage -@param str in: string to put -@param memlim in: memory limit to obey -@return pointer to the copy of the string */ +@param storage in/out: hash storage +@param str in: string to put +@param memlim in: memory limit to obey +@return pointer to the copy of the string */ #define ha_storage_put_str_memlim(storage, str, memlim) \ ((const char*) ha_storage_put_memlim((storage), (str), \ strlen(str) + 1, (memlim))) @@ -126,7 +125,7 @@ ha_storage_free( /*******************************************************************//** Gets the size of the memory used by a storage. -@return bytes used */ +@return bytes used */ UNIV_INLINE ulint ha_storage_get_size( diff --git a/storage/innobase/include/ha0storage.ic b/storage/innobase/include/ha0storage.ic index 7150ca045ec..e841925d320 100644 --- a/storage/innobase/include/ha0storage.ic +++ b/storage/innobase/include/ha0storage.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2007, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2007, 2013, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -48,7 +48,7 @@ struct ha_storage_node_t { /*******************************************************************//** Creates a hash storage. If any of the parameters is 0, then a default value is used. -@return own: hash storage */ +@return own: hash storage */ UNIV_INLINE ha_storage_t* ha_storage_create( @@ -127,7 +127,7 @@ ha_storage_free( /*******************************************************************//** Gets the size of the memory used by a storage. -@return bytes used */ +@return bytes used */ UNIV_INLINE ulint ha_storage_get_size( diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 12453099ef7..9bf8cd2b281 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -19,33 +19,28 @@ this program; if not, write to the Free Software Foundation, Inc., /*******************************************************************//** @file include/ha_prototypes.h Prototypes for global functions in ha_innodb.cc that are called by -InnoDB C code +InnoDB C code. -Created 5/11/2006 Osku Salerma +NOTE: This header is intended to insulate InnoDB from SQL names and functions. +Do not include any headers other than univ.i into this unless they are very +simple headers. ************************************************************************/ #ifndef HA_INNODB_PROTOTYPES_H #define HA_INNODB_PROTOTYPES_H -#include "my_dbug.h" -#include "mysqld_error.h" -#include "my_compare.h" -#include "my_sys.h" -#include "m_string.h" -#include "debug_sync.h" -#include "my_base.h" +#include "univ.i" -#include "trx0types.h" -#include "m_ctype.h" /* CHARSET_INFO */ +#if !defined UNIV_HOTBACKUP && !defined UNIV_INNOCHECKSUM -// Forward declarations +/* Forward declarations */ +class THD; class Field; struct fts_string_t; /*********************************************************************//** Wrapper around MySQL's copy_and_convert function. -@return number of bytes copied to 'to' */ -UNIV_INTERN +@return number of bytes copied to 'to' */ ulint innobase_convert_string( /*====================*/ @@ -68,8 +63,7 @@ Not more than "buf_size" bytes are written to "buf". The result is always NUL-terminated (provided buf_size > 0) and the number of bytes that were written to "buf" is returned (including the terminating NUL). -@return number of bytes that were written */ -UNIV_INTERN +@return number of bytes that were written */ ulint innobase_raw_format( /*================*/ @@ -83,35 +77,50 @@ innobase_raw_format( /*****************************************************************//** Invalidates the MySQL query cache for the table. */ -UNIV_INTERN void innobase_invalidate_query_cache( /*============================*/ trx_t* trx, /*!< in: transaction which modifies the table */ const char* full_name, /*!< in: concatenation of - database name, null char NUL, + database name, path separator, table name, null char NUL; NOTE that in Windows this is always in LOWER CASE! */ ulint full_name_len); /*!< in: full name length where also the null chars count */ +/** Quote a standard SQL identifier like tablespace, index or column name. +@param[in] file output stream +@param[in] trx InnoDB transaction, or NULL +@param[in] id identifier to quote */ +void +innobase_quote_identifier( + FILE* file, + trx_t* trx, + const char* id); + +/** Quote an standard SQL identifier like tablespace, index or column name. +Return the string as an std:string object. +@param[in] trx InnoDB transaction, or NULL +@param[in] id identifier to quote +@return a std::string with id properly quoted. */ +std::string +innobase_quote_identifier( + trx_t* trx, + const char* id); + /*****************************************************************//** -Convert a table or index name to the MySQL system_charset_info (UTF-8) -and quote it if needed. -@return pointer to the end of buf */ -UNIV_INTERN +Convert a table name to the MySQL system_charset_info (UTF-8). +@return pointer to the end of buf */ char* innobase_convert_name( /*==================*/ char* buf, /*!< out: buffer for converted identifier */ ulint buflen, /*!< in: length of buf, in bytes */ - const char* id, /*!< in: identifier to convert */ + const char* id, /*!< in: table name to convert */ ulint idlen, /*!< in: length of id, in bytes */ - THD* thd, /*!< in: MySQL connection thread, or NULL */ - ibool table_id);/*!< in: TRUE=id is a table or database name; - FALSE=id is an index name */ + THD* thd); /*!< in: MySQL connection thread, or NULL */ /******************************************************************//** Returns true if the thread is the replication thread on the slave @@ -119,32 +128,18 @@ server. Used in srv_conc_enter_innodb() to determine if the thread should be allowed to enter InnoDB - the replication thread is treated differently than other threads. Also used in srv_conc_force_exit_innodb(). -@return true if thd is the replication thread */ -UNIV_INTERN +@return true if thd is the replication thread */ ibool thd_is_replication_slave_thread( /*============================*/ THD* thd); /*!< in: thread handle */ -/******************************************************************//** -Gets information on the durability property requested by thread. -Used when writing either a prepare or commit record to the log -buffer. -@return the durability property. */ -UNIV_INTERN -enum durability_properties -thd_requested_durability( -/*=====================*/ - const THD* thd) /*!< in: thread handle */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); - /******************************************************************//** Returns true if the transaction this thread is processing has edited non-transactional tables. Used by the deadlock detector when deciding which transaction to rollback in case of a deadlock - we try to avoid rolling back transactions that have edited non-transactional tables. -@return true if non-transactional tables have been edited */ -UNIV_INTERN +@return true if non-transactional tables have been edited */ ibool thd_has_edited_nontrans_tables( /*===========================*/ @@ -152,7 +147,6 @@ thd_has_edited_nontrans_tables( /*************************************************************//** Prints info of a THD object (== user session thread) to the given file. */ -UNIV_INTERN void innobase_mysql_print_thd( /*=====================*/ @@ -161,24 +155,6 @@ innobase_mysql_print_thd( uint max_query_len); /*!< in: max query length to print, or 0 to use the default max length */ -/*************************************************************//** -InnoDB uses this function to compare two data fields for which the data type -is such that we must use MySQL code to compare them. -@return 1, 0, -1, if a is greater, equal, less than b, respectively */ -UNIV_INTERN -int -innobase_mysql_cmp( -/*===============*/ - int mysql_type, /*!< in: MySQL type */ - uint charset_number, /*!< in: number of the charset */ - const unsigned char* a, /*!< in: data field */ - unsigned int a_length, /*!< in: data field length, - not UNIV_SQL_NULL */ - const unsigned char* b, /*!< in: data field */ - unsigned int b_length) /*!< in: data field length, - not UNIV_SQL_NULL */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); - /*****************************************************************//** Log code calls this whenever log has been written and/or flushed up to a new position. We use this to notify upper layer of a new commit @@ -190,26 +166,20 @@ innobase_mysql_log_notify( ib_uint64_t write_lsn, /*!< in: LSN written to log file */ ib_uint64_t flush_lsn); /*!< in: LSN flushed to disk */ -/**************************************************************//** -Converts a MySQL type to an InnoDB type. Note that this function returns +/** Converts a MySQL type to an InnoDB type. Note that this function returns the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1 VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. -@return DATA_BINARY, DATA_VARCHAR, ... */ -UNIV_INTERN +@param[out] unsigned_flag DATA_UNSIGNED if an 'unsigned type'; +at least ENUM and SET, and unsigned integer types are 'unsigned types' +@param[in] f MySQL Field +@return DATA_BINARY, DATA_VARCHAR, ... */ ulint get_innobase_type_from_mysql_type( -/*==============================*/ - ulint* unsigned_flag, /*!< out: DATA_UNSIGNED if an - 'unsigned type'; - at least ENUM and SET, - and unsigned integer - types are 'unsigned types' */ - const void* field) /*!< in: MySQL Field */ - MY_ATTRIBUTE((nonnull)); + ulint* unsigned_flag, + const void* field); /******************************************************************//** Get the variable length bounds of the given character set. */ -UNIV_INTERN void innobase_get_cset_width( /*====================*/ @@ -219,8 +189,7 @@ innobase_get_cset_width( /******************************************************************//** Compares NUL-terminated UTF-8 strings case insensitively. -@return 0 if a=b, <0 if a1 if a>b */ -UNIV_INTERN +@return 0 if a=b, <0 if a1 if a>b */ int innobase_strcasecmp( /*================*/ @@ -231,26 +200,22 @@ innobase_strcasecmp( Compares NUL-terminated UTF-8 strings case insensitively. The second string contains wildcards. @return 0 if a match is found, 1 if not */ -UNIV_INTERN int innobase_wildcasecmp( /*=================*/ const char* a, /*!< in: string to compare */ const char* b); /*!< in: wildcard string to compare */ -/******************************************************************//** -Strip dir name from a full path name and return only its file name. +/** Strip dir name from a full path name and return only the file name +@param[in] path_name full path name @return file name or "null" if no file name */ -UNIV_INTERN const char* innobase_basename( -/*==============*/ - const char* path_name); /*!< in: full path name */ + const char* path_name); /******************************************************************//** Returns true if the thread is executing a SELECT statement. -@return true if thd is executing SELECT */ -UNIV_INTERN +@return true if thd is executing SELECT */ ibool thd_is_select( /*==========*/ @@ -258,29 +223,26 @@ thd_is_select( /******************************************************************//** Converts an identifier to a table name. */ -UNIV_INTERN void innobase_convert_from_table_id( /*===========================*/ - struct charset_info_st* cs, /*!< in: the 'from' character set */ - char* to, /*!< out: converted identifier */ - const char* from, /*!< in: identifier to convert */ - ulint len); /*!< in: length of 'to', in bytes; should - be at least 5 * strlen(to) + 1 */ + CHARSET_INFO* cs, /*!< in: the 'from' character set */ + char* to, /*!< out: converted identifier */ + const char* from, /*!< in: identifier to convert */ + ulint len); /*!< in: length of 'to', in bytes; should + be at least 5 * strlen(to) + 1 */ /******************************************************************//** Converts an identifier to UTF-8. */ -UNIV_INTERN void innobase_convert_from_id( /*=====================*/ - struct charset_info_st* cs, /*!< in: the 'from' character set */ - char* to, /*!< out: converted identifier */ - const char* from, /*!< in: identifier to convert */ - ulint len); /*!< in: length of 'to', in bytes; - should be at least 3 * strlen(to) + 1 */ + CHARSET_INFO* cs, /*!< in: the 'from' character set */ + char* to, /*!< out: converted identifier */ + const char* from, /*!< in: identifier to convert */ + ulint len); /*!< in: length of 'to', in bytes; + should be at least 3 * strlen(to) + 1 */ /******************************************************************//** Makes all characters in a NUL-terminated UTF-8 string lower case. */ -UNIV_INTERN void innobase_casedn_str( /*================*/ @@ -300,29 +262,41 @@ int wsrep_innobase_mysql_sort(int mysql_type, uint charset_number, /**********************************************************************//** Determines the connection character set. -@return connection character set */ -UNIV_INTERN -struct charset_info_st* +@return connection character set */ +CHARSET_INFO* innobase_get_charset( /*=================*/ THD* thd); /*!< in: MySQL thread handle */ -/**********************************************************************//** -Determines the current SQL statement. -@return SQL statement string */ -UNIV_INTERN + +/** Determines the current SQL statement. +Thread unsafe, can only be called from the thread owning the THD. +@param[in] thd MySQL thread handle +@param[out] length Length of the SQL statement +@return SQL statement string */ const char* -innobase_get_stmt( -/*==============*/ - THD* thd, /*!< in: MySQL thread handle */ - size_t* length) /*!< out: length of the SQL statement */ - MY_ATTRIBUTE((nonnull)); +innobase_get_stmt_unsafe( + THD* thd, + size_t* length); + +/** Determines the current SQL statement. +Thread safe, can be called from any thread as the string is copied +into the provided buffer. +@param[in] thd MySQL thread handle +@param[out] buf Buffer containing SQL statement +@param[in] buflen Length of provided buffer +@return Length of the SQL statement */ +size_t +innobase_get_stmt_safe( + THD* thd, + char* buf, + size_t buflen); + /******************************************************************//** This function is used to find the storage length in bytes of the first n characters for prefix indexes using a multibyte character set. The function finds charset information and returns length of prefix_len characters in the index field in bytes. -@return number of bytes occupied by the first n characters */ -UNIV_INTERN +@return number of bytes occupied by the first n characters */ ulint innobase_get_at_most_n_mbchars( /*===========================*/ @@ -333,20 +307,10 @@ innobase_get_at_most_n_mbchars( ulint data_len, /*!< in: length of the string in bytes */ const char* str); /*!< in: character string */ -/*************************************************************//** -InnoDB index push-down condition check -@return ICP_NO_MATCH, ICP_MATCH, or ICP_OUT_OF_RANGE */ -UNIV_INTERN -enum icp_result -innobase_index_cond( -/*================*/ - void* file) /*!< in/out: pointer to ha_innobase */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); /******************************************************************//** Returns true if the thread supports XA, global value of innodb_supports_xa if thd is NULL. -@return true if thd supports XA */ -UNIV_INTERN +@return true if thd supports XA */ ibool thd_supports_xa( /*============*/ @@ -364,8 +328,7 @@ thd_innodb_tmpdir( /******************************************************************//** Returns the lock wait timeout for the current connection. -@return the lock wait timeout, in seconds */ -UNIV_INTERN +@return the lock wait timeout, in seconds */ ulong thd_lock_wait_timeout( /*==================*/ @@ -373,7 +336,6 @@ thd_lock_wait_timeout( the global innodb_lock_wait_timeout */ /******************************************************************//** Add up the time waited for the lock for the current query. */ -UNIV_INTERN void thd_set_lock_wait_time( /*===================*/ @@ -384,8 +346,7 @@ thd_set_lock_wait_time( Get the current setting of the table_cache_size global parameter. We do a dirty read because for one there is no synchronization object and secondly there is little harm in doing so even if we get a torn read. -@return SQL statement string */ -UNIV_INTERN +@return SQL statement string */ ulint innobase_get_table_cache_size(void); /*===============================*/ @@ -395,39 +356,13 @@ Get the current setting of the lower_case_table_names global parameter from mysqld.cc. We do a dirty read because for one there is no synchronization object and secondly there is little harm in doing so even if we get a torn read. -@return value of lower_case_table_names */ -UNIV_INTERN +@return value of lower_case_table_names */ ulint innobase_get_lower_case_table_names(void); /*=====================================*/ -/*****************************************************************//** -Frees a possible InnoDB trx object associated with the current THD. -@return 0 or error number */ -UNIV_INTERN -int -innobase_close_thd( -/*===============*/ - THD* thd); /*!< in: MySQL thread handle for - which to close the connection */ -/*************************************************************//** -Get the next token from the given string and store it in *token. */ -UNIV_INTERN -ulint -innobase_mysql_fts_get_token( -/*=========================*/ - CHARSET_INFO* charset, /*!< in: Character set */ - const byte* start, /*!< in: start of text */ - const byte* end, /*!< in: one character past end of - text */ - fts_string_t* token, /*!< out: token's text */ - ulint* offset); /*!< out: offset to token, - measured as characters from - 'start' */ - /******************************************************************//** compare two character string case insensitively according to their charset. */ -UNIV_INTERN int innobase_fts_text_case_cmp( /*=======================*/ @@ -435,49 +370,57 @@ innobase_fts_text_case_cmp( const void* p1, /*!< in: key */ const void* p2); /*!< in: node */ -/****************************************************************//** -Get FTS field charset info from the field's prtype -@return charset info */ -UNIV_INTERN -CHARSET_INFO* -innobase_get_fts_charset( -/*=====================*/ - int mysql_type, /*!< in: MySQL type */ - uint charset_number);/*!< in: number of the charset */ /******************************************************************//** Returns true if transaction should be flagged as read-only. -@return true if the thd is marked as read-only */ -UNIV_INTERN -ibool +@return true if the thd is marked as read-only */ +bool thd_trx_is_read_only( /*=================*/ THD* thd); /*!< in/out: thread handle */ +/** +Check if the transaction can be rolled back +@param[in] requestor Session requesting the lock +@param[in] holder Session that holds the lock +@return the session that will be rolled back, null don't care */ + +THD* +thd_trx_arbitrate(THD* requestor, THD* holder); + +/** +@param[in] thd Session to check +@return the priority */ + +int +thd_trx_priority(THD* thd); + /******************************************************************//** Check if the transaction is an auto-commit transaction. TRUE also implies that it is a SELECT (read-only) transaction. -@return true if the transaction is an auto commit read-only transaction. */ -UNIV_INTERN +@return true if the transaction is an auto commit read-only transaction. */ ibool thd_trx_is_auto_commit( /*===================*/ THD* thd); /*!< in: thread handle, or NULL */ +/******************************************************************//** +Get the thread start time. +@return the thread start time in seconds since the epoch. */ +ulint +thd_start_time_in_secs( +/*===================*/ + THD* thd); /*!< in: thread handle, or NULL */ + /*****************************************************************//** -A wrapper function of innobase_convert_name(), convert a table or -index name to the MySQL system_charset_info (UTF-8) and quote it if needed. -@return pointer to the end of buf */ -UNIV_INTERN +A wrapper function of innobase_convert_name(), convert a table name +to the MySQL system_charset_info (UTF-8) and quote it if needed. +@return pointer to the end of buf */ void innobase_format_name( /*==================*/ - char* buf, /*!< out: buffer for converted - identifier */ - ulint buflen, /*!< in: length of buf, in bytes */ - const char* name, /*!< in: index or table name - to format */ - ibool is_index_name) /*!< in: index name */ - MY_ATTRIBUTE((nonnull)); + char* buf, /*!< out: buffer for converted identifier */ + ulint buflen, /*!< in: length of buf, in bytes */ + const char* name); /*!< in: table name to format */ /** Corresponds to Sql_condition:enum_warning_level. */ enum ib_log_level_t { @@ -498,7 +441,6 @@ void push_warning_printf( THD *thd, Sql_condition::enum_warning_level level, uint code, const char *format, ...); */ -UNIV_INTERN void ib_errf( /*====*/ @@ -519,7 +461,6 @@ void push_warning_printf( THD *thd, Sql_condition::enum_warning_level level, uint code, const char *format, ...); */ -UNIV_INTERN void ib_senderrf( /*========*/ @@ -528,22 +469,19 @@ ib_senderrf( ib_uint32_t code, /*!< MySQL error code */ ...); /*!< Args */ -/******************************************************************//** -Write a message to the MySQL log, prefixed with "InnoDB: ". -Wrapper around sql_print_information() */ -UNIV_INTERN -void -ib_logf( -/*====*/ - ib_log_level_t level, /*!< in: warning level */ - const char* format, /*!< printf format */ - ...) /*!< Args */ - MY_ATTRIBUTE((format(printf, 2, 3))); +extern const char* TROUBLESHOOTING_MSG; +extern const char* TROUBLESHOOT_DATADICT_MSG; +extern const char* BUG_REPORT_MSG; +extern const char* FORCE_RECOVERY_MSG; +extern const char* ERROR_CREATING_MSG; +extern const char* OPERATING_SYSTEM_ERROR_MSG; +extern const char* FOREIGN_KEY_CONSTRAINTS_MSG; +extern const char* SET_TRANSACTION_MSG; +extern const char* INNODB_PARAMETERS_MSG; /******************************************************************//** Returns the NUL terminated value of glob_hostname. -@return pointer to glob_hostname. */ -UNIV_INTERN +@return pointer to glob_hostname. */ const char* server_get_hostname(); /*=================*/ @@ -551,7 +489,6 @@ server_get_hostname(); /******************************************************************//** Get the error message format string. @return the format string or 0 if not found. */ -UNIV_INTERN const char* innobase_get_err_msg( /*=================*/ @@ -574,8 +511,7 @@ values we want to reserve for multi-value inserts e.g., innobase_next_autoinc() will be called with increment set to 3 where autoinc_lock_mode != TRADITIONAL because we want to reserve 3 values for the multi-value INSERT above. -@return the next value */ -UNIV_INTERN +@return the next value */ ulonglong innobase_next_autoinc( /*==================*/ @@ -586,16 +522,6 @@ innobase_next_autoinc( ulonglong max_value) /*!< in: max value for type */ MY_ATTRIBUTE((pure, warn_unused_result)); -/********************************************************************//** -Get the upper limit of the MySQL integral and floating-point type. -@return maximum allowed value for the field */ -UNIV_INTERN -ulonglong -innobase_get_int_col_max_value( -/*===========================*/ - const Field* field) /*!< in: MySQL field */ - MY_ATTRIBUTE((nonnull, pure, warn_unused_result)); - /********************************************************************** Converts an identifier from my_charset_filename to UTF-8 charset. */ uint @@ -610,7 +536,6 @@ innobase_convert_to_system_charset( Check if the length of the identifier exceeds the maximum allowed. The input to this function is an identifier in charset my_charset_filename. return true when length of identifier is too long. */ -UNIV_INTERN my_bool innobase_check_identifier_length( /*=============================*/ @@ -622,9 +547,9 @@ Converts an identifier from my_charset_filename to UTF-8 charset. */ uint innobase_convert_to_system_charset( /*===============================*/ - char* to, /* out: converted identifier */ - const char* from, /* in: identifier to convert */ - ulint len, /* in: length of 'to', in bytes */ + char* to, /* out: converted identifier */ + const char* from, /* in: identifier to convert */ + ulint len, /* in: length of 'to', in bytes */ uint* errors); /* out: error return */ /********************************************************************** @@ -632,9 +557,9 @@ Converts an identifier from my_charset_filename to UTF-8 charset. */ uint innobase_convert_to_filename_charset( /*=================================*/ - char* to, /* out: converted identifier */ - const char* from, /* in: identifier to convert */ - ulint len); /* in: length of 'to', in bytes */ + char* to, /* out: converted identifier */ + const char* from, /* in: identifier to convert */ + ulint len); /* in: length of 'to', in bytes */ /********************************************************************//** Helper function to push warnings from InnoDB internals to SQL-layer. */ @@ -662,12 +587,51 @@ database name catenated to '/' and table name. An example: test/mytable. On Windows normalization puts both the database name and the table name always to lower case if "set_lower_case" is set to TRUE. */ void -normalize_table_name_low( -/*=====================*/ +normalize_table_name_c_low( +/*=======================*/ char* norm_name, /*!< out: normalized name as a null-terminated string */ const char* name, /*!< in: table name string */ ibool set_lower_case); /*!< in: TRUE if we want to set name to lower case */ +/*************************************************************//** +InnoDB index push-down condition check defined in ha_innodb.cc +@return ICP_NO_MATCH, ICP_MATCH, or ICP_OUT_OF_RANGE */ + +/* JAN: TODO: missing 5.7 header */ +//#include +#include + +ICP_RESULT +innobase_index_cond( +/*================*/ + void* file) /*!< in/out: pointer to ha_innobase */ + __attribute__((warn_unused_result)); + +/******************************************************************//** +Gets information on the durability property requested by thread. +Used when writing either a prepare or commit record to the log +buffer. +@return the durability property. */ + +/* JAN: TODO: missing 5.7 header */ +#ifdef HAVE_DUR_PROP_H +#include +#else +#include +#endif + +enum durability_properties +thd_requested_durability( +/*=====================*/ + const THD* thd) /*!< in: thread handle */ + __attribute__((warn_unused_result)); + +/** Update the system variable with the given value of the InnoDB +buffer pool size. +@param[in] buf_pool_size given value of buffer pool size.*/ +void +innodb_set_buf_pool_size(ulonglong buf_pool_size); +#endif /* !UNIV_HOTBACKUP && !UNIV_INNOCHECKSUM */ #endif /* HA_INNODB_PROTOTYPES_H */ diff --git a/storage/innobase/include/handler0alter.h b/storage/innobase/include/handler0alter.h index 3dd6c99eb6d..1c690839449 100644 --- a/storage/innobase/include/handler0alter.h +++ b/storage/innobase/include/handler0alter.h @@ -23,7 +23,6 @@ Smart ALTER TABLE /*************************************************************//** Copies an InnoDB record to table->record[0]. */ -UNIV_INTERN void innobase_rec_to_mysql( /*==================*/ @@ -36,7 +35,6 @@ innobase_rec_to_mysql( /*************************************************************//** Copies an InnoDB index entry to table->record[0]. */ -UNIV_INTERN void innobase_fields_to_mysql( /*=====================*/ @@ -47,7 +45,6 @@ innobase_fields_to_mysql( /*************************************************************//** Copies an InnoDB row to table->record[0]. */ -UNIV_INTERN void innobase_row_to_mysql( /*==================*/ @@ -58,7 +55,6 @@ innobase_row_to_mysql( /*************************************************************//** Resets table->record[0]. */ -UNIV_INTERN void innobase_rec_reset( /*===============*/ @@ -70,13 +66,12 @@ auto_increment_increment and auto_increment_offset variables. */ struct ib_sequence_t { /** - @param thd - the session - @param start_value - the lower bound - @param max_value - the upper bound (inclusive) */ + @param thd the session + @param start_value the lower bound + @param max_value the upper bound (inclusive) */ ib_sequence_t(THD* thd, ulonglong start_value, ulonglong max_value); - /** - Postfix increment + /** Postfix increment @return the value to insert */ ulonglong operator++(int) UNIV_NOTHROW; diff --git a/storage/innobase/include/hash0hash.h b/storage/innobase/include/hash0hash.h index 9a4077befb1..a7bcee1185b 100644 --- a/storage/innobase/include/hash0hash.h +++ b/storage/innobase/include/hash0hash.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,7 +29,6 @@ Created 5/20/1997 Heikki Tuuri #include "univ.i" #include "mem0mem.h" #ifndef UNIV_HOTBACKUP -# include "sync0sync.h" # include "sync0rw.h" #endif /* !UNIV_HOTBACKUP */ @@ -56,8 +55,7 @@ enum hash_table_sync_t { /*************************************************************//** Creates a hash table with >= n array cells. The actual number of cells is chosen to be a prime number slightly bigger than n. -@return own: created table */ -UNIV_INTERN +@return own: created table */ hash_table_t* hash_create( /*========*/ @@ -67,39 +65,26 @@ hash_create( Creates a sync object array array to protect a hash table. ::sync_obj can be mutexes or rw_locks depening on the type of hash table. */ -UNIV_INTERN void -hash_create_sync_obj_func( -/*======================*/ +hash_create_sync_obj( +/*=================*/ hash_table_t* table, /*!< in: hash table */ - enum hash_table_sync_t type, /*!< in: HASH_TABLE_SYNC_MUTEX + hash_table_sync_t type, /*!< in: HASH_TABLE_SYNC_MUTEX or HASH_TABLE_SYNC_RW_LOCK */ -#ifdef UNIV_SYNC_DEBUG - ulint sync_level,/*!< in: latching order level - of the mutexes: used in the - debug version */ -#endif /* UNIV_SYNC_DEBUG */ + latch_id_t id, /*!< in: mutex/rw_lock ID */ ulint n_sync_obj);/*!< in: number of sync objects, must be a power of 2 */ -#ifdef UNIV_SYNC_DEBUG -# define hash_create_sync_obj(t, s, n, level) \ - hash_create_sync_obj_func(t, s, level, n) -#else /* UNIV_SYNC_DEBUG */ -# define hash_create_sync_obj(t, s, n, level) \ - hash_create_sync_obj_func(t, s, n) -#endif /* UNIV_SYNC_DEBUG */ #endif /* !UNIV_HOTBACKUP */ /*************************************************************//** Frees a hash table. */ -UNIV_INTERN void hash_table_free( /*============*/ hash_table_t* table); /*!< in, own: hash table */ /**************************************************************//** Calculates the hash value from a folded value. -@return hashed value */ +@return hashed value */ UNIV_INLINE ulint hash_calc_hash( @@ -268,7 +253,7 @@ do { \ /************************************************************//** Gets the nth cell in a hash table. -@return pointer to cell */ +@return pointer to cell */ UNIV_INLINE hash_cell_t* hash_get_nth_cell( @@ -286,7 +271,7 @@ hash_table_clear( /*************************************************************//** Returns the number of cells in a hash table. -@return number of cells */ +@return number of cells */ UNIV_INLINE ulint hash_get_n_cells( @@ -364,10 +349,12 @@ do {\ cell_count2222 = hash_get_n_cells(OLD_TABLE);\ \ for (i2222 = 0; i2222 < cell_count2222; i2222++) {\ - NODE_TYPE* node2222 = HASH_GET_FIRST((OLD_TABLE), i2222);\ + NODE_TYPE* node2222 = static_cast(\ + HASH_GET_FIRST((OLD_TABLE), i2222));\ \ while (node2222) {\ - NODE_TYPE* next2222 = node2222->PTR_NAME;\ + NODE_TYPE* next2222 = static_cast(\ + node2222->PTR_NAME);\ ulint fold2222 = FOLD_FUNC(node2222);\ \ HASH_INSERT(NODE_TYPE, PTR_NAME, (NEW_TABLE),\ @@ -380,7 +367,7 @@ do {\ /************************************************************//** Gets the sync object index for a fold value in a hash table. -@return index */ +@return index */ UNIV_INLINE ulint hash_get_sync_obj_index( @@ -389,7 +376,7 @@ hash_get_sync_obj_index( ulint fold); /*!< in: fold */ /************************************************************//** Gets the nth heap in a hash table. -@return mem heap */ +@return mem heap */ UNIV_INLINE mem_heap_t* hash_get_nth_heap( @@ -398,7 +385,7 @@ hash_get_nth_heap( ulint i); /*!< in: index of the heap */ /************************************************************//** Gets the heap for a fold value in a hash table. -@return mem heap */ +@return mem heap */ UNIV_INLINE mem_heap_t* hash_get_heap( @@ -407,7 +394,7 @@ hash_get_heap( ulint fold); /*!< in: fold */ /************************************************************//** Gets the nth mutex in a hash table. -@return mutex */ +@return mutex */ UNIV_INLINE ib_mutex_t* hash_get_nth_mutex( @@ -416,7 +403,7 @@ hash_get_nth_mutex( ulint i); /*!< in: index of the mutex */ /************************************************************//** Gets the nth rw_lock in a hash table. -@return rw_lock */ +@return rw_lock */ UNIV_INLINE rw_lock_t* hash_get_nth_lock( @@ -425,7 +412,7 @@ hash_get_nth_lock( ulint i); /*!< in: index of the rw_lock */ /************************************************************//** Gets the mutex for a fold value in a hash table. -@return mutex */ +@return mutex */ UNIV_INLINE ib_mutex_t* hash_get_mutex( @@ -434,16 +421,42 @@ hash_get_mutex( ulint fold); /*!< in: fold */ /************************************************************//** Gets the rw_lock for a fold value in a hash table. -@return rw_lock */ +@return rw_lock */ UNIV_INLINE rw_lock_t* hash_get_lock( /*==========*/ hash_table_t* table, /*!< in: hash table */ ulint fold); /*!< in: fold */ + +/** If not appropriate rw_lock for a fold value in a hash table, +relock S-lock the another rw_lock until appropriate for a fold value. +@param[in] hash_lock latched rw_lock to be confirmed +@param[in] table hash table +@param[in] fold fold value +@return latched rw_lock */ +UNIV_INLINE +rw_lock_t* +hash_lock_s_confirm( + rw_lock_t* hash_lock, + hash_table_t* table, + ulint fold); + +/** If not appropriate rw_lock for a fold value in a hash table, +relock X-lock the another rw_lock until appropriate for a fold value. +@param[in] hash_lock latched rw_lock to be confirmed +@param[in] table hash table +@param[in] fold fold value +@return latched rw_lock */ +UNIV_INLINE +rw_lock_t* +hash_lock_x_confirm( + rw_lock_t* hash_lock, + hash_table_t* table, + ulint fold); + /************************************************************//** Reserves the mutex for a fold value in a hash table. */ -UNIV_INTERN void hash_mutex_enter( /*=============*/ @@ -451,7 +464,6 @@ hash_mutex_enter( ulint fold); /*!< in: fold */ /************************************************************//** Releases the mutex for a fold value in a hash table. */ -UNIV_INTERN void hash_mutex_exit( /*============*/ @@ -459,21 +471,18 @@ hash_mutex_exit( ulint fold); /*!< in: fold */ /************************************************************//** Reserves all the mutexes of a hash table, in an ascending order. */ -UNIV_INTERN void hash_mutex_enter_all( /*=================*/ hash_table_t* table); /*!< in: hash table */ /************************************************************//** Releases all the mutexes of a hash table. */ -UNIV_INTERN void hash_mutex_exit_all( /*================*/ hash_table_t* table); /*!< in: hash table */ /************************************************************//** Releases all but the passed in mutex of a hash table. */ -UNIV_INTERN void hash_mutex_exit_all_but( /*====================*/ @@ -481,7 +490,6 @@ hash_mutex_exit_all_but( ib_mutex_t* keep_mutex); /*!< in: mutex to keep */ /************************************************************//** s-lock a lock for a fold value in a hash table. */ -UNIV_INTERN void hash_lock_s( /*========*/ @@ -489,7 +497,6 @@ hash_lock_s( ulint fold); /*!< in: fold */ /************************************************************//** x-lock a lock for a fold value in a hash table. */ -UNIV_INTERN void hash_lock_x( /*========*/ @@ -497,7 +504,6 @@ hash_lock_x( ulint fold); /*!< in: fold */ /************************************************************//** unlock an s-lock for a fold value in a hash table. */ -UNIV_INTERN void hash_unlock_s( /*==========*/ @@ -506,7 +512,6 @@ hash_unlock_s( ulint fold); /*!< in: fold */ /************************************************************//** unlock x-lock for a fold value in a hash table. */ -UNIV_INTERN void hash_unlock_x( /*==========*/ @@ -514,21 +519,18 @@ hash_unlock_x( ulint fold); /*!< in: fold */ /************************************************************//** Reserves all the locks of a hash table, in an ascending order. */ -UNIV_INTERN void hash_lock_x_all( /*============*/ hash_table_t* table); /*!< in: hash table */ /************************************************************//** Releases all the locks of a hash table, in an ascending order. */ -UNIV_INTERN void hash_unlock_x_all( /*==============*/ hash_table_t* table); /*!< in: hash table */ /************************************************************//** Releases all but passed in lock of a hash table, */ -UNIV_INTERN void hash_unlock_x_all_but( /*==================*/ diff --git a/storage/innobase/include/hash0hash.ic b/storage/innobase/include/hash0hash.ic index 254f3f82e5d..b99ac1eb501 100644 --- a/storage/innobase/include/hash0hash.ic +++ b/storage/innobase/include/hash0hash.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2015, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,7 +27,7 @@ Created 5/20/1997 Heikki Tuuri /************************************************************//** Gets the nth cell in a hash table. -@return pointer to cell */ +@return pointer to cell */ UNIV_INLINE hash_cell_t* hash_get_nth_cell( @@ -58,7 +58,7 @@ hash_table_clear( /*************************************************************//** Returns the number of cells in a hash table. -@return number of cells */ +@return number of cells */ UNIV_INLINE ulint hash_get_n_cells( @@ -72,7 +72,7 @@ hash_get_n_cells( /**************************************************************//** Calculates the hash value from a folded value. -@return hashed value */ +@return hashed value */ UNIV_INLINE ulint hash_calc_hash( @@ -88,7 +88,7 @@ hash_calc_hash( #ifndef UNIV_HOTBACKUP /************************************************************//** Gets the sync object index for a fold value in a hash table. -@return index */ +@return index */ UNIV_INLINE ulint hash_get_sync_obj_index( @@ -106,7 +106,7 @@ hash_get_sync_obj_index( /************************************************************//** Gets the nth heap in a hash table. -@return mem heap */ +@return mem heap */ UNIV_INLINE mem_heap_t* hash_get_nth_heap( @@ -124,7 +124,7 @@ hash_get_nth_heap( /************************************************************//** Gets the heap for a fold value in a hash table. -@return mem heap */ +@return mem heap */ UNIV_INLINE mem_heap_t* hash_get_heap( @@ -148,7 +148,7 @@ hash_get_heap( /************************************************************//** Gets the nth mutex in a hash table. -@return mutex */ +@return mutex */ UNIV_INLINE ib_mutex_t* hash_get_nth_mutex( @@ -166,7 +166,7 @@ hash_get_nth_mutex( /************************************************************//** Gets the mutex for a fold value in a hash table. -@return mutex */ +@return mutex */ UNIV_INLINE ib_mutex_t* hash_get_mutex( @@ -186,7 +186,7 @@ hash_get_mutex( /************************************************************//** Gets the nth rw_lock in a hash table. -@return rw_lock */ +@return rw_lock */ UNIV_INLINE rw_lock_t* hash_get_nth_lock( @@ -204,7 +204,7 @@ hash_get_nth_lock( /************************************************************//** Gets the rw_lock for a fold value in a hash table. -@return rw_lock */ +@return rw_lock */ UNIV_INLINE rw_lock_t* hash_get_lock( @@ -222,4 +222,58 @@ hash_get_lock( return(hash_get_nth_lock(table, i)); } + +/** If not appropriate rw_lock for a fold value in a hash table, +relock S-lock the another rw_lock until appropriate for a fold value. +@param[in] hash_lock latched rw_lock to be confirmed +@param[in] table hash table +@param[in] fold fold value +@return latched rw_lock */ +UNIV_INLINE +rw_lock_t* +hash_lock_s_confirm( + rw_lock_t* hash_lock, + hash_table_t* table, + ulint fold) +{ + ut_ad(rw_lock_own(hash_lock, RW_LOCK_S)); + + rw_lock_t* hash_lock_tmp = hash_get_lock(table, fold); + + while (hash_lock_tmp != hash_lock) { + rw_lock_s_unlock(hash_lock); + hash_lock = hash_lock_tmp; + rw_lock_s_lock(hash_lock); + hash_lock_tmp = hash_get_lock(table, fold); + } + + return(hash_lock); +} + +/** If not appropriate rw_lock for a fold value in a hash table, +relock X-lock the another rw_lock until appropriate for a fold value. +@param[in] hash_lock latched rw_lock to be confirmed +@param[in] table hash table +@param[in] fold fold value +@return latched rw_lock */ +UNIV_INLINE +rw_lock_t* +hash_lock_x_confirm( + rw_lock_t* hash_lock, + hash_table_t* table, + ulint fold) +{ + ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)); + + rw_lock_t* hash_lock_tmp = hash_get_lock(table, fold); + + while (hash_lock_tmp != hash_lock) { + rw_lock_x_unlock(hash_lock); + hash_lock = hash_lock_tmp; + rw_lock_x_lock(hash_lock); + hash_lock_tmp = hash_get_lock(table, fold); + } + + return(hash_lock); +} #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/ib0mutex.h b/storage/innobase/include/ib0mutex.h new file mode 100644 index 00000000000..3ea0687da43 --- /dev/null +++ b/storage/innobase/include/ib0mutex.h @@ -0,0 +1,1166 @@ +/***************************************************************************** + +Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA + +*****************************************************************************/ + +/******************************************************************//** +@file include/ib0mutex.h +Policy based mutexes. + +Created 2013-03-26 Sunny Bains. +***********************************************************************/ + +#ifndef UNIV_INNOCHECKSUM + +#ifndef ib0mutex_h +#define ib0mutex_h + +#include "ut0ut.h" +#include "ut0rnd.h" +#include "os0event.h" + +/** OS mutex for tracking lock/unlock for debugging */ +template